summaryrefslogtreecommitdiff
path: root/chromium/media/audio
diff options
context:
space:
mode:
authorZeno Albisser <zeno.albisser@digia.com>2013-08-15 21:46:11 +0200
committerZeno Albisser <zeno.albisser@digia.com>2013-08-15 21:46:11 +0200
commit679147eead574d186ebf3069647b4c23e8ccace6 (patch)
treefc247a0ac8ff119f7c8550879ebb6d3dd8d1ff69 /chromium/media/audio
downloadqtwebengine-chromium-679147eead574d186ebf3069647b4c23e8ccace6.tar.gz
Initial import.
Diffstat (limited to 'chromium/media/audio')
-rw-r--r--chromium/media/audio/OWNERS11
-rw-r--r--chromium/media/audio/agc_audio_stream.h208
-rw-r--r--chromium/media/audio/android/audio_manager_android.cc204
-rw-r--r--chromium/media/audio/android/audio_manager_android.h70
-rw-r--r--chromium/media/audio/android/opensles_input.cc311
-rw-r--r--chromium/media/audio/android/opensles_input.h88
-rw-r--r--chromium/media/audio/android/opensles_output.cc313
-rw-r--r--chromium/media/audio/android/opensles_output.h91
-rw-r--r--chromium/media/audio/android/opensles_util.h45
-rw-r--r--chromium/media/audio/async_socket_io_handler.h113
-rw-r--r--chromium/media/audio/async_socket_io_handler_posix.cc98
-rw-r--r--chromium/media/audio/async_socket_io_handler_unittest.cc168
-rw-r--r--chromium/media/audio/async_socket_io_handler_win.cc77
-rw-r--r--chromium/media/audio/audio_buffers_state.cc20
-rw-r--r--chromium/media/audio/audio_buffers_state.h32
-rw-r--r--chromium/media/audio/audio_device_name.cc18
-rw-r--r--chromium/media/audio/audio_device_name.h27
-rw-r--r--chromium/media/audio/audio_device_thread.cc203
-rw-r--r--chromium/media/audio/audio_device_thread.h114
-rw-r--r--chromium/media/audio/audio_input_controller.cc380
-rw-r--r--chromium/media/audio/audio_input_controller.h274
-rw-r--r--chromium/media/audio/audio_input_controller_unittest.cc235
-rw-r--r--chromium/media/audio/audio_input_device.cc315
-rw-r--r--chromium/media/audio/audio_input_device.h175
-rw-r--r--chromium/media/audio/audio_input_device_unittest.cc199
-rw-r--r--chromium/media/audio/audio_input_ipc.cc13
-rw-r--r--chromium/media/audio/audio_input_ipc.h88
-rw-r--r--chromium/media/audio/audio_input_unittest.cc185
-rw-r--r--chromium/media/audio/audio_input_volume_unittest.cc185
-rw-r--r--chromium/media/audio/audio_io.h172
-rw-r--r--chromium/media/audio/audio_low_latency_input_output_unittest.cc449
-rw-r--r--chromium/media/audio/audio_manager.cc42
-rw-r--r--chromium/media/audio/audio_manager.h150
-rw-r--r--chromium/media/audio/audio_manager_base.cc391
-rw-r--r--chromium/media/audio/audio_manager_base.h168
-rw-r--r--chromium/media/audio/audio_output_controller.cc399
-rw-r--r--chromium/media/audio/audio_output_controller.h245
-rw-r--r--chromium/media/audio/audio_output_controller_unittest.cc379
-rw-r--r--chromium/media/audio/audio_output_device.cc352
-rw-r--r--chromium/media/audio/audio_output_device.h185
-rw-r--r--chromium/media/audio/audio_output_device_unittest.cc294
-rw-r--r--chromium/media/audio/audio_output_dispatcher.cc28
-rw-r--r--chromium/media/audio/audio_output_dispatcher.h90
-rw-r--r--chromium/media/audio/audio_output_dispatcher_impl.cc204
-rw-r--r--chromium/media/audio/audio_output_dispatcher_impl.h101
-rw-r--r--chromium/media/audio/audio_output_ipc.cc13
-rw-r--r--chromium/media/audio/audio_output_ipc.h90
-rw-r--r--chromium/media/audio/audio_output_proxy.cc93
-rw-r--r--chromium/media/audio/audio_output_proxy.h66
-rw-r--r--chromium/media/audio/audio_output_proxy_unittest.cc741
-rw-r--r--chromium/media/audio/audio_output_resampler.cc395
-rw-r--r--chromium/media/audio/audio_output_resampler.h89
-rw-r--r--chromium/media/audio/audio_parameters.cc97
-rw-r--r--chromium/media/audio/audio_parameters.h124
-rw-r--r--chromium/media/audio/audio_parameters_unittest.cc168
-rw-r--r--chromium/media/audio/audio_power_monitor.cc94
-rw-r--r--chromium/media/audio/audio_power_monitor.h87
-rw-r--r--chromium/media/audio/audio_power_monitor_unittest.cc304
-rw-r--r--chromium/media/audio/audio_source_diverter.h40
-rw-r--r--chromium/media/audio/audio_util.cc99
-rw-r--r--chromium/media/audio/audio_util.h31
-rw-r--r--chromium/media/audio/clockless_audio_sink.cc107
-rw-r--r--chromium/media/audio/clockless_audio_sink.h55
-rw-r--r--chromium/media/audio/cras/audio_manager_cras.cc133
-rw-r--r--chromium/media/audio/cras/audio_manager_cras.h66
-rw-r--r--chromium/media/audio/cras/cras_input.cc283
-rw-r--r--chromium/media/audio/cras/cras_input.h108
-rw-r--r--chromium/media/audio/cras/cras_input_unittest.cc214
-rw-r--r--chromium/media/audio/cras/cras_unified.cc369
-rw-r--r--chromium/media/audio/cras/cras_unified.h122
-rw-r--r--chromium/media/audio/cras/cras_unified_unittest.cc157
-rw-r--r--chromium/media/audio/cross_process_notification.cc30
-rw-r--r--chromium/media/audio/cross_process_notification.h172
-rw-r--r--chromium/media/audio/cross_process_notification_posix.cc114
-rw-r--r--chromium/media/audio/cross_process_notification_unittest.cc462
-rw-r--r--chromium/media/audio/cross_process_notification_win.cc270
-rw-r--r--chromium/media/audio/fake_audio_consumer.cc162
-rw-r--r--chromium/media/audio/fake_audio_consumer.h54
-rw-r--r--chromium/media/audio/fake_audio_consumer_unittest.cc143
-rw-r--r--chromium/media/audio/fake_audio_input_stream.cc170
-rw-r--r--chromium/media/audio/fake_audio_input_stream.h76
-rw-r--r--chromium/media/audio/fake_audio_output_stream.cc67
-rw-r--r--chromium/media/audio/fake_audio_output_stream.h50
-rw-r--r--chromium/media/audio/ios/audio_manager_ios.h56
-rw-r--r--chromium/media/audio/ios/audio_manager_ios.mm140
-rw-r--r--chromium/media/audio/ios/audio_manager_ios_unittest.cc34
-rw-r--r--chromium/media/audio/ios/audio_session_util_ios.h17
-rw-r--r--chromium/media/audio/ios/audio_session_util_ios.mm40
-rw-r--r--chromium/media/audio/linux/alsa_input.cc340
-rw-r--r--chromium/media/audio/linux/alsa_input.h92
-rw-r--r--chromium/media/audio/linux/alsa_output.cc765
-rw-r--r--chromium/media/audio/linux/alsa_output.h228
-rw-r--r--chromium/media/audio/linux/alsa_output_unittest.cc868
-rw-r--r--chromium/media/audio/linux/alsa_util.cc200
-rw-r--r--chromium/media/audio/linux/alsa_util.h47
-rw-r--r--chromium/media/audio/linux/alsa_wrapper.cc173
-rw-r--r--chromium/media/audio/linux/alsa_wrapper.h81
-rw-r--r--chromium/media/audio/linux/audio_manager_linux.cc352
-rw-r--r--chromium/media/audio/linux/audio_manager_linux.h82
-rw-r--r--chromium/media/audio/mac/aggregate_device_manager.cc371
-rw-r--r--chromium/media/audio/mac/aggregate_device_manager.h58
-rw-r--r--chromium/media/audio/mac/audio_auhal_mac.cc542
-rw-r--r--chromium/media/audio/mac/audio_auhal_mac.h167
-rw-r--r--chromium/media/audio/mac/audio_auhal_mac_unittest.cc219
-rw-r--r--chromium/media/audio/mac/audio_device_listener_mac.cc77
-rw-r--r--chromium/media/audio/mac/audio_device_listener_mac.h46
-rw-r--r--chromium/media/audio/mac/audio_device_listener_mac_unittest.cc87
-rw-r--r--chromium/media/audio/mac/audio_input_mac.cc231
-rw-r--r--chromium/media/audio/mac/audio_input_mac.h88
-rw-r--r--chromium/media/audio/mac/audio_low_latency_input_mac.cc664
-rw-r--r--chromium/media/audio/mac/audio_low_latency_input_mac.h169
-rw-r--r--chromium/media/audio/mac/audio_low_latency_input_mac_unittest.cc317
-rw-r--r--chromium/media/audio/mac/audio_low_latency_output_mac.cc416
-rw-r--r--chromium/media/audio/mac/audio_low_latency_output_mac.h115
-rw-r--r--chromium/media/audio/mac/audio_manager_mac.cc610
-rw-r--r--chromium/media/audio/mac/audio_manager_mac.h90
-rw-r--r--chromium/media/audio/mac/audio_synchronized_mac.cc977
-rw-r--r--chromium/media/audio/mac/audio_synchronized_mac.h216
-rw-r--r--chromium/media/audio/mac/audio_unified_mac.cc398
-rw-r--r--chromium/media/audio/mac/audio_unified_mac.h100
-rw-r--r--chromium/media/audio/mock_audio_manager.cc85
-rw-r--r--chromium/media/audio/mock_audio_manager.h71
-rw-r--r--chromium/media/audio/null_audio_sink.cc91
-rw-r--r--chromium/media/audio/null_audio_sink.h64
-rw-r--r--chromium/media/audio/openbsd/audio_manager_openbsd.cc154
-rw-r--r--chromium/media/audio/openbsd/audio_manager_openbsd.h54
-rw-r--r--chromium/media/audio/pulse/audio_manager_pulse.cc318
-rw-r--r--chromium/media/audio/pulse/audio_manager_pulse.h87
-rw-r--r--chromium/media/audio/pulse/pulse.sigs52
-rw-r--r--chromium/media/audio/pulse/pulse_input.cc292
-rw-r--r--chromium/media/audio/pulse/pulse_input.h85
-rw-r--r--chromium/media/audio/pulse/pulse_output.cc216
-rw-r--r--chromium/media/audio/pulse/pulse_output.h92
-rw-r--r--chromium/media/audio/pulse/pulse_stub_header.fragment8
-rw-r--r--chromium/media/audio/pulse/pulse_unified.cc292
-rw-r--r--chromium/media/audio/pulse/pulse_unified.h90
-rw-r--r--chromium/media/audio/pulse/pulse_util.cc315
-rw-r--r--chromium/media/audio/pulse/pulse_util.h80
-rw-r--r--chromium/media/audio/sample_rates.cc26
-rw-r--r--chromium/media/audio/sample_rates.h35
-rw-r--r--chromium/media/audio/scoped_loop_observer.cc47
-rw-r--r--chromium/media/audio/scoped_loop_observer.h50
-rw-r--r--chromium/media/audio/shared_memory_util.cc70
-rw-r--r--chromium/media/audio/shared_memory_util.h39
-rw-r--r--chromium/media/audio/simple_sources.cc73
-rw-r--r--chromium/media/audio/simple_sources.h53
-rw-r--r--chromium/media/audio/simple_sources_unittest.cc78
-rw-r--r--chromium/media/audio/test_audio_input_controller_factory.cc69
-rw-r--r--chromium/media/audio/test_audio_input_controller_factory.h121
-rw-r--r--chromium/media/audio/virtual_audio_input_stream.cc188
-rw-r--r--chromium/media/audio/virtual_audio_input_stream.h116
-rw-r--r--chromium/media/audio/virtual_audio_input_stream_unittest.cc358
-rw-r--r--chromium/media/audio/virtual_audio_output_stream.cc87
-rw-r--r--chromium/media/audio/virtual_audio_output_stream.h70
-rw-r--r--chromium/media/audio/virtual_audio_output_stream_unittest.cc122
-rw-r--r--chromium/media/audio/win/audio_device_listener_win.cc159
-rw-r--r--chromium/media/audio/win/audio_device_listener_win.h61
-rw-r--r--chromium/media/audio/win/audio_device_listener_win_unittest.cc103
-rw-r--r--chromium/media/audio/win/audio_low_latency_input_win.cc641
-rw-r--r--chromium/media/audio/win/audio_low_latency_input_win.h209
-rw-r--r--chromium/media/audio/win/audio_low_latency_input_win_unittest.cc405
-rw-r--r--chromium/media/audio/win/audio_low_latency_output_win.cc685
-rw-r--r--chromium/media/audio/win/audio_low_latency_output_win.h262
-rw-r--r--chromium/media/audio/win/audio_low_latency_output_win_unittest.cc703
-rw-r--r--chromium/media/audio/win/audio_manager_win.cc455
-rw-r--r--chromium/media/audio/win/audio_manager_win.h87
-rw-r--r--chromium/media/audio/win/audio_output_win_unittest.cc713
-rw-r--r--chromium/media/audio/win/audio_unified_win.cc1000
-rw-r--r--chromium/media/audio/win/audio_unified_win.h352
-rw-r--r--chromium/media/audio/win/audio_unified_win_unittest.cc366
-rw-r--r--chromium/media/audio/win/avrt_wrapper_win.cc64
-rw-r--r--chromium/media/audio/win/avrt_wrapper_win.h39
-rw-r--r--chromium/media/audio/win/core_audio_util_win.cc718
-rw-r--r--chromium/media/audio/win/core_audio_util_win.h191
-rw-r--r--chromium/media/audio/win/core_audio_util_win_unittest.cc453
-rw-r--r--chromium/media/audio/win/device_enumeration_win.cc177
-rw-r--r--chromium/media/audio/win/device_enumeration_win.h40
-rw-r--r--chromium/media/audio/win/wavein_input_win.cc316
-rw-r--r--chromium/media/audio/win/wavein_input_win.h131
-rw-r--r--chromium/media/audio/win/waveout_output_win.cc410
-rw-r--r--chromium/media/audio/win/waveout_output_win.h141
181 files changed, 36226 insertions, 0 deletions
diff --git a/chromium/media/audio/OWNERS b/chromium/media/audio/OWNERS
new file mode 100644
index 00000000000..17c8eccedba
--- /dev/null
+++ b/chromium/media/audio/OWNERS
@@ -0,0 +1,11 @@
+tommi@chromium.org
+
+# Linux/Pulse
+xians@chromium.org
+
+# Windows
+henrika@chromium.org
+
+# Mirroring (and related glue) OWNERS.
+justinlin@chromium.org
+miu@chromium.org
diff --git a/chromium/media/audio/agc_audio_stream.h b/chromium/media/audio/agc_audio_stream.h
new file mode 100644
index 00000000000..b289a0b15e9
--- /dev/null
+++ b/chromium/media/audio/agc_audio_stream.h
@@ -0,0 +1,208 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_AGC_AUDIO_STREAM_H_
+#define MEDIA_AUDIO_AGC_AUDIO_STREAM_H_
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/thread_checker.h"
+#include "base/timer/timer.h"
+#include "media/audio/audio_io.h"
+
+// The template based AgcAudioStream implements platform-independent parts
+// of the AudioInterface interface. Supported interfaces to pass as
+// AudioInterface are AudioIntputStream and AudioOutputStream. Each platform-
+// dependent implementation should derive from this class.
+//
+// Usage example (on Windows):
+//
+// class WASAPIAudioInputStream : public AgcAudioStream<AudioInputStream> {
+// public:
+// WASAPIAudioInputStream();
+// ...
+// };
+//
+// Call flow example:
+//
+// 1) User creates AgcAudioStream<AudioInputStream>
+// 2) User calls AudioInputStream::SetAutomaticGainControl(true) =>
+// AGC usage is now initialized but not yet started.
+// 3) User calls AudioInputStream::Start() => implementation calls
+// AgcAudioStream<AudioInputStream>::StartAgc() which detects that AGC
+// is enabled and then starts the periodic AGC timer.
+// 4) Microphone volume samples are now taken and included in all
+// AudioInputCallback::OnData() callbacks.
+// 5) User calls AudioInputStream::Stop() => implementation calls
+// AgcAudioStream<AudioInputStream>::StopAgc() which stops the timer.
+//
+// Note that, calling AudioInputStream::SetAutomaticGainControl(false) while
+// AGC measurements are active will not have an effect until StopAgc(),
+// StartAgc() are called again since SetAutomaticGainControl() only sets a
+// a state.
+//
+// Calling SetAutomaticGainControl(true) enables the AGC and StartAgc() starts
+// a periodic timer which calls QueryAndStoreNewMicrophoneVolume()
+// approximately once every second. QueryAndStoreNewMicrophoneVolume() asks
+// the actual microphone about its current volume level. This value is
+// normalized and stored so it can be read by GetAgcVolume() when the real-time
+// audio thread needs the value. The main idea behind this scheme is to avoid
+// accessing the audio hardware from the real-time audio thread and to ensure
+// that we don't take new microphone-level samples too often (~1 Hz is a
+// suitable compromise). The timer will be active until StopAgc() is called.
+//
+// This class should be created and destroyed on the audio manager thread and
+// a thread checker is added to ensure that this is the case (uses DCHECK).
+// All methods except GetAgcVolume() should be called on the creating thread
+// as well to ensure that thread safety is maintained. It will also guarantee
+// that the periodic timer runs on the audio manager thread.
+// |normalized_volume_|, which is updated by QueryAndStoreNewMicrophoneVolume()
+// and read in GetAgcVolume(), is protected by a lock to ensure that it can
+// be accessed from any real-time audio thread that needs it to update the its
+// AGC volume.
+
+namespace media {
+
+template <typename AudioInterface>
+class MEDIA_EXPORT AgcAudioStream : public AudioInterface {
+ public:
+ // Time between two successive timer events.
+ static const int kIntervalBetweenVolumeUpdatesMs = 1000;
+
+ AgcAudioStream()
+ : agc_is_enabled_(false), max_volume_(0.0), normalized_volume_(0.0) {
+ DVLOG(1) << __FUNCTION__;
+ }
+
+ virtual ~AgcAudioStream() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DVLOG(1) << __FUNCTION__;
+ }
+
+ protected:
+ // Starts the periodic timer which periodically checks and updates the
+ // current microphone volume level.
+ // The timer is only started if AGC mode is first enabled using the
+ // SetAutomaticGainControl() method.
+ void StartAgc() {
+ DVLOG(1) << "StartAgc()";
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (!agc_is_enabled_ || timer_.IsRunning())
+ return;
+
+ // Query and cache the volume to avoid sending 0 as volume to AGC at the
+ // beginning of the audio stream, otherwise AGC will try to raise the
+ // volume from 0.
+ QueryAndStoreNewMicrophoneVolume();
+
+ timer_.Start(FROM_HERE,
+ base::TimeDelta::FromMilliseconds(kIntervalBetweenVolumeUpdatesMs),
+ this, &AgcAudioStream::QueryAndStoreNewMicrophoneVolume);
+ }
+
+ // Stops the periodic timer which periodically checks and updates the
+ // current microphone volume level.
+ void StopAgc() {
+ DVLOG(1) << "StopAgc()";
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (timer_.IsRunning())
+ timer_.Stop();
+ }
+
+ // Stores a new microphone volume level by checking the audio input device.
+ // Called on the audio manager thread.
+ void UpdateAgcVolume() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ if (!timer_.IsRunning())
+ return;
+
+ // We take new volume samples once every second when the AGC is enabled.
+ // To ensure that a new setting has an immediate effect, the new volume
+ // setting is cached here. It will ensure that the next OnData() callback
+ // will contain a new valid volume level. If this approach was not taken,
+ // we could report invalid volume levels to the client for a time period
+ // of up to one second.
+ QueryAndStoreNewMicrophoneVolume();
+ }
+
+ // Gets the latest stored volume level if AGC is enabled.
+ // Called at each capture callback on a real-time capture thread (platform
+ // dependent).
+ void GetAgcVolume(double* normalized_volume) {
+ base::AutoLock lock(lock_);
+ *normalized_volume = normalized_volume_;
+ }
+
+ private:
+ // Sets the automatic gain control (AGC) to on or off. When AGC is enabled,
+ // the microphone volume is queried periodically and the volume level can
+ // be read in each AudioInputCallback::OnData() callback and fed to the
+ // render-side AGC. User must call StartAgc() as well to start measuring
+ // the microphone level.
+ virtual void SetAutomaticGainControl(bool enabled) OVERRIDE {
+ DVLOG(1) << "SetAutomaticGainControl(enabled=" << enabled << ")";
+ DCHECK(thread_checker_.CalledOnValidThread());
+ agc_is_enabled_ = enabled;
+ }
+
+ // Gets the current automatic gain control state.
+ virtual bool GetAutomaticGainControl() OVERRIDE {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return agc_is_enabled_;
+ }
+
+ // Takes a new microphone volume sample and stores it in |normalized_volume_|.
+ // Range is normalized to [0.0,1.0] or [0.0, 1.5] on Linux.
+ // This method is called periodically when AGC is enabled and always on the
+ // audio manager thread. We use it to read the current microphone level and
+ // to store it so it can be read by the main capture thread. By using this
+ // approach, we can avoid accessing audio hardware from a real-time audio
+ // thread and it leads to a more stable capture performance.
+ void QueryAndStoreNewMicrophoneVolume() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // Cach the maximum volume if this is the first time we ask for it.
+ if (max_volume_ == 0.0)
+ max_volume_ = static_cast<AudioInterface*>(this)->GetMaxVolume();
+
+ // Retrieve the current volume level by asking the audio hardware.
+ // Range is normalized to [0.0,1.0] or [0.0, 1.5] on Linux.
+ if (max_volume_ != 0.0) {
+ double normalized_volume =
+ static_cast<AudioInterface*>(this)->GetVolume() / max_volume_;
+ base::AutoLock auto_lock(lock_);
+ normalized_volume_ = normalized_volume;
+ }
+ }
+
+ // Ensures that this class is created and destroyed on the same thread.
+ base::ThreadChecker thread_checker_;
+
+ // Repeating timer which cancels itself when it goes out of scope.
+ // Used to check the microphone volume periodically.
+ base::RepeatingTimer<AgcAudioStream<AudioInterface> > timer_;
+
+ // True when automatic gain control is enabled, false otherwise.
+ bool agc_is_enabled_;
+
+ // Stores the maximum volume which is used for normalization to a volume
+ // range of [0.0, 1.0].
+ double max_volume_;
+
+ // Contains last result of internal call to GetVolume(). We save resources
+ // by not querying the capture volume for each callback. Guarded by |lock_|.
+ // The range is normalized to [0.0, 1.0].
+ double normalized_volume_;
+
+ // Protects |normalized_volume_| .
+ base::Lock lock_;
+
+ DISALLOW_COPY_AND_ASSIGN(AgcAudioStream<AudioInterface>);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_AGC_AUDIO_STREAM_H_
diff --git a/chromium/media/audio/android/audio_manager_android.cc b/chromium/media/audio/android/audio_manager_android.cc
new file mode 100644
index 00000000000..69bd2b82cd4
--- /dev/null
+++ b/chromium/media/audio/android/audio_manager_android.cc
@@ -0,0 +1,204 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/android/audio_manager_android.h"
+
+#include "base/logging.h"
+#include "jni/AudioManagerAndroid_jni.h"
+#include "media/audio/android/opensles_input.h"
+#include "media/audio/android/opensles_output.h"
+#include "media/audio/audio_manager.h"
+#include "media/audio/audio_parameters.h"
+#include "media/audio/audio_util.h"
+#include "media/audio/fake_audio_input_stream.h"
+#include "media/base/channel_layout.h"
+
+namespace media {
+
+// Maximum number of output streams that can be open simultaneously.
+static const int kMaxOutputStreams = 10;
+
+static const int kAudioModeNormal = 0x00000000;
+static const int kAudioModeInCommunication = 0x00000003;
+
+static const int kDefaultInputBufferSize = 1024;
+static const int kDefaultOutputBufferSize = 2048;
+
+AudioManager* CreateAudioManager() {
+ return new AudioManagerAndroid();
+}
+
+AudioManagerAndroid::AudioManagerAndroid() {
+ SetMaxOutputStreamsAllowed(kMaxOutputStreams);
+
+ j_audio_manager_.Reset(
+ Java_AudioManagerAndroid_createAudioManagerAndroid(
+ base::android::AttachCurrentThread(),
+ base::android::GetApplicationContext()));
+}
+
+AudioManagerAndroid::~AudioManagerAndroid() {
+ Shutdown();
+}
+
+bool AudioManagerAndroid::HasAudioOutputDevices() {
+ return true;
+}
+
+bool AudioManagerAndroid::HasAudioInputDevices() {
+ return true;
+}
+
+void AudioManagerAndroid::GetAudioInputDeviceNames(
+ media::AudioDeviceNames* device_names) {
+ DCHECK(device_names->empty());
+ device_names->push_front(
+ media::AudioDeviceName(kDefaultDeviceName, kDefaultDeviceId));
+}
+
+AudioParameters AudioManagerAndroid::GetInputStreamParameters(
+ const std::string& device_id) {
+ int buffer_size = Java_AudioManagerAndroid_getMinInputFrameSize(
+ base::android::AttachCurrentThread(), GetNativeOutputSampleRate(), 2);
+
+ return AudioParameters(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
+ GetNativeOutputSampleRate(), 16,
+ buffer_size <= 0 ? kDefaultInputBufferSize : buffer_size);
+}
+
+AudioOutputStream* AudioManagerAndroid::MakeAudioOutputStream(
+ const AudioParameters& params, const std::string& input_device_id) {
+ AudioOutputStream* stream =
+ AudioManagerBase::MakeAudioOutputStream(params, std::string());
+ if (stream && output_stream_count() == 1) {
+ SetAudioMode(kAudioModeInCommunication);
+ RegisterHeadsetReceiver();
+ }
+ return stream;
+}
+
+AudioInputStream* AudioManagerAndroid::MakeAudioInputStream(
+ const AudioParameters& params, const std::string& device_id) {
+ AudioInputStream* stream =
+ AudioManagerBase::MakeAudioInputStream(params, device_id);
+ return stream;
+}
+
+void AudioManagerAndroid::ReleaseOutputStream(AudioOutputStream* stream) {
+ AudioManagerBase::ReleaseOutputStream(stream);
+ if (!output_stream_count()) {
+ UnregisterHeadsetReceiver();
+ SetAudioMode(kAudioModeNormal);
+ }
+}
+
+void AudioManagerAndroid::ReleaseInputStream(AudioInputStream* stream) {
+ AudioManagerBase::ReleaseInputStream(stream);
+}
+
+AudioOutputStream* AudioManagerAndroid::MakeLinearOutputStream(
+ const AudioParameters& params) {
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
+ return new OpenSLESOutputStream(this, params);
+}
+
+AudioOutputStream* AudioManagerAndroid::MakeLowLatencyOutputStream(
+ const AudioParameters& params, const std::string& input_device_id) {
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
+ return new OpenSLESOutputStream(this, params);
+}
+
+AudioInputStream* AudioManagerAndroid::MakeLinearInputStream(
+ const AudioParameters& params, const std::string& device_id) {
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
+ return new OpenSLESInputStream(this, params);
+}
+
+AudioInputStream* AudioManagerAndroid::MakeLowLatencyInputStream(
+ const AudioParameters& params, const std::string& device_id) {
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
+ return new OpenSLESInputStream(this, params);
+}
+
+int AudioManagerAndroid::GetOptimalOutputFrameSize(int sample_rate,
+ int channels) {
+ if (IsAudioLowLatencySupported()) {
+ return GetAudioLowLatencyOutputFrameSize();
+ } else {
+ return std::max(kDefaultOutputBufferSize,
+ Java_AudioManagerAndroid_getMinOutputFrameSize(
+ base::android::AttachCurrentThread(),
+ sample_rate, channels));
+ }
+}
+
+AudioParameters AudioManagerAndroid::GetPreferredOutputStreamParameters(
+ const AudioParameters& input_params) {
+ ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
+ int sample_rate = GetNativeOutputSampleRate();
+ int buffer_size = GetOptimalOutputFrameSize(sample_rate, 2);
+ int bits_per_sample = 16;
+ int input_channels = 0;
+ if (input_params.IsValid()) {
+ // Use the client's input parameters if they are valid.
+ sample_rate = input_params.sample_rate();
+ bits_per_sample = input_params.bits_per_sample();
+ channel_layout = input_params.channel_layout();
+ input_channels = input_params.input_channels();
+ buffer_size = GetOptimalOutputFrameSize(
+ sample_rate, ChannelLayoutToChannelCount(channel_layout));
+ }
+
+ int user_buffer_size = GetUserBufferSize();
+ if (user_buffer_size)
+ buffer_size = user_buffer_size;
+
+ return AudioParameters(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout, input_channels,
+ sample_rate, bits_per_sample, buffer_size);
+}
+
+// static
+bool AudioManagerAndroid::RegisterAudioManager(JNIEnv* env) {
+ return RegisterNativesImpl(env);
+}
+
+void AudioManagerAndroid::SetAudioMode(int mode) {
+ Java_AudioManagerAndroid_setMode(
+ base::android::AttachCurrentThread(),
+ j_audio_manager_.obj(), mode);
+}
+
+void AudioManagerAndroid::RegisterHeadsetReceiver() {
+ Java_AudioManagerAndroid_registerHeadsetReceiver(
+ base::android::AttachCurrentThread(),
+ j_audio_manager_.obj());
+}
+
+void AudioManagerAndroid::UnregisterHeadsetReceiver() {
+ Java_AudioManagerAndroid_unregisterHeadsetReceiver(
+ base::android::AttachCurrentThread(),
+ j_audio_manager_.obj());
+}
+
+int AudioManagerAndroid::GetNativeOutputSampleRate() {
+ return Java_AudioManagerAndroid_getNativeOutputSampleRate(
+ base::android::AttachCurrentThread(),
+ j_audio_manager_.obj());
+}
+
+bool AudioManagerAndroid::IsAudioLowLatencySupported() {
+ return Java_AudioManagerAndroid_isAudioLowLatencySupported(
+ base::android::AttachCurrentThread(),
+ j_audio_manager_.obj());
+}
+
+int AudioManagerAndroid::GetAudioLowLatencyOutputFrameSize() {
+ return Java_AudioManagerAndroid_getAudioLowLatencyOutputFrameSize(
+ base::android::AttachCurrentThread(),
+ j_audio_manager_.obj());
+}
+
+} // namespace media
diff --git a/chromium/media/audio/android/audio_manager_android.h b/chromium/media/audio/android/audio_manager_android.h
new file mode 100644
index 00000000000..fa1c3736a35
--- /dev/null
+++ b/chromium/media/audio/android/audio_manager_android.h
@@ -0,0 +1,70 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_ANDROID_AUDIO_MANAGER_ANDROID_H_
+#define MEDIA_AUDIO_ANDROID_AUDIO_MANAGER_ANDROID_H_
+
+#include "base/android/jni_android.h"
+#include "media/audio/audio_manager_base.h"
+
+namespace media {
+
+// Android implemention of AudioManager.
+class MEDIA_EXPORT AudioManagerAndroid : public AudioManagerBase {
+ public:
+ AudioManagerAndroid();
+
+ // Implementation of AudioManager.
+ virtual bool HasAudioOutputDevices() OVERRIDE;
+ virtual bool HasAudioInputDevices() OVERRIDE;
+ virtual void GetAudioInputDeviceNames(media::AudioDeviceNames* device_names)
+ OVERRIDE;
+ virtual AudioParameters GetInputStreamParameters(
+ const std::string& device_id) OVERRIDE;
+
+ virtual AudioOutputStream* MakeAudioOutputStream(
+ const AudioParameters& params,
+ const std::string& input_device_id) OVERRIDE;
+ virtual AudioInputStream* MakeAudioInputStream(
+ const AudioParameters& params, const std::string& device_id) OVERRIDE;
+ virtual void ReleaseOutputStream(AudioOutputStream* stream) OVERRIDE;
+ virtual void ReleaseInputStream(AudioInputStream* stream) OVERRIDE;
+
+ // Implementation of AudioManagerBase.
+ virtual AudioOutputStream* MakeLinearOutputStream(
+ const AudioParameters& params) OVERRIDE;
+ virtual AudioOutputStream* MakeLowLatencyOutputStream(
+ const AudioParameters& params,
+ const std::string& input_device_id) OVERRIDE;
+ virtual AudioInputStream* MakeLinearInputStream(
+ const AudioParameters& params, const std::string& device_id) OVERRIDE;
+ virtual AudioInputStream* MakeLowLatencyInputStream(
+ const AudioParameters& params, const std::string& device_id) OVERRIDE;
+
+ static bool RegisterAudioManager(JNIEnv* env);
+
+ protected:
+ virtual ~AudioManagerAndroid();
+
+ virtual AudioParameters GetPreferredOutputStreamParameters(
+ const AudioParameters& input_params) OVERRIDE;
+
+ private:
+ void SetAudioMode(int mode);
+ void RegisterHeadsetReceiver();
+ void UnregisterHeadsetReceiver();
+ int GetNativeOutputSampleRate();
+ bool IsAudioLowLatencySupported();
+ int GetAudioLowLatencyOutputFrameSize();
+ int GetOptimalOutputFrameSize(int sample_rate, int channels);
+
+ // Java AudioManager instance.
+ base::android::ScopedJavaGlobalRef<jobject> j_audio_manager_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioManagerAndroid);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_ANDROID_AUDIO_MANAGER_ANDROID_H_
diff --git a/chromium/media/audio/android/opensles_input.cc b/chromium/media/audio/android/opensles_input.cc
new file mode 100644
index 00000000000..15c3eac3726
--- /dev/null
+++ b/chromium/media/audio/android/opensles_input.cc
@@ -0,0 +1,311 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/android/opensles_input.h"
+
+#include "base/logging.h"
+#include "media/audio/android/audio_manager_android.h"
+
+#define LOG_ON_FAILURE_AND_RETURN(op, ...) \
+ do { \
+ SLresult err = (op); \
+ if (err != SL_RESULT_SUCCESS) { \
+ DLOG(ERROR) << #op << " failed: " << err; \
+ return __VA_ARGS__; \
+ } \
+ } while (0)
+
+namespace media {
+
+OpenSLESInputStream::OpenSLESInputStream(AudioManagerAndroid* audio_manager,
+ const AudioParameters& params)
+ : audio_manager_(audio_manager),
+ callback_(NULL),
+ recorder_(NULL),
+ simple_buffer_queue_(NULL),
+ active_queue_(0),
+ buffer_size_bytes_(0),
+ started_(false) {
+ format_.formatType = SL_DATAFORMAT_PCM;
+ format_.numChannels = static_cast<SLuint32>(params.channels());
+ // Provides sampling rate in milliHertz to OpenSLES.
+ format_.samplesPerSec = static_cast<SLuint32>(params.sample_rate() * 1000);
+ format_.bitsPerSample = params.bits_per_sample();
+ format_.containerSize = params.bits_per_sample();
+ format_.endianness = SL_BYTEORDER_LITTLEENDIAN;
+ if (format_.numChannels == 1)
+ format_.channelMask = SL_SPEAKER_FRONT_CENTER;
+ else if (format_.numChannels == 2)
+ format_.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
+ else
+ NOTREACHED() << "Unsupported number of channels: " << format_.numChannels;
+
+ buffer_size_bytes_ = params.GetBytesPerBuffer();
+
+ memset(&audio_data_, 0, sizeof(audio_data_));
+}
+
+OpenSLESInputStream::~OpenSLESInputStream() {
+ DCHECK(!recorder_object_.Get());
+ DCHECK(!engine_object_.Get());
+ DCHECK(!recorder_);
+ DCHECK(!simple_buffer_queue_);
+ DCHECK(!audio_data_[0]);
+}
+
+bool OpenSLESInputStream::Open() {
+ if (engine_object_.Get())
+ return false;
+
+ if (!CreateRecorder())
+ return false;
+
+ SetupAudioBuffer();
+
+ return true;
+}
+
+void OpenSLESInputStream::Start(AudioInputCallback* callback) {
+ DCHECK(callback);
+ DCHECK(recorder_);
+ DCHECK(simple_buffer_queue_);
+ if (started_)
+ return;
+
+ // Enable the flags before streaming.
+ callback_ = callback;
+ active_queue_ = 0;
+ started_ = true;
+
+ SLresult err = SL_RESULT_UNKNOWN_ERROR;
+ // Enqueues |kNumOfQueuesInBuffer| zero buffers to get the ball rolling.
+ for (int i = 0; i < kNumOfQueuesInBuffer; ++i) {
+ err = (*simple_buffer_queue_)->Enqueue(
+ simple_buffer_queue_,
+ audio_data_[i],
+ buffer_size_bytes_);
+ if (SL_RESULT_SUCCESS != err) {
+ HandleError(err);
+ return;
+ }
+ }
+
+ // Start the recording by setting the state to |SL_RECORDSTATE_RECORDING|.
+ err = (*recorder_)->SetRecordState(recorder_, SL_RECORDSTATE_RECORDING);
+ if (SL_RESULT_SUCCESS != err)
+ HandleError(err);
+}
+
+void OpenSLESInputStream::Stop() {
+ if (!started_)
+ return;
+
+ // Stop recording by setting the record state to |SL_RECORDSTATE_STOPPED|.
+ LOG_ON_FAILURE_AND_RETURN(
+ (*recorder_)->SetRecordState(recorder_,
+ SL_RECORDSTATE_STOPPED));
+
+ // Clear the buffer queue to get rid of old data when resuming recording.
+ LOG_ON_FAILURE_AND_RETURN(
+ (*simple_buffer_queue_)->Clear(simple_buffer_queue_));
+
+ started_ = false;
+}
+
+void OpenSLESInputStream::Close() {
+ // Stop the stream if it is still recording.
+ Stop();
+
+ // Explicitly free the player objects and invalidate their associated
+ // interfaces. They have to be done in the correct order.
+ recorder_object_.Reset();
+ engine_object_.Reset();
+ simple_buffer_queue_ = NULL;
+ recorder_ = NULL;
+
+ ReleaseAudioBuffer();
+
+ audio_manager_->ReleaseInputStream(this);
+}
+
+double OpenSLESInputStream::GetMaxVolume() {
+ NOTIMPLEMENTED();
+ return 0.0;
+}
+
+void OpenSLESInputStream::SetVolume(double volume) {
+ NOTIMPLEMENTED();
+}
+
+double OpenSLESInputStream::GetVolume() {
+ NOTIMPLEMENTED();
+ return 0.0;
+}
+
+void OpenSLESInputStream::SetAutomaticGainControl(bool enabled) {
+ NOTIMPLEMENTED();
+}
+
+bool OpenSLESInputStream::GetAutomaticGainControl() {
+ NOTIMPLEMENTED();
+ return false;
+}
+
+bool OpenSLESInputStream::CreateRecorder() {
+ // Initializes the engine object with specific option. After working with the
+ // object, we need to free the object and its resources.
+ SLEngineOption option[] = {
+ { SL_ENGINEOPTION_THREADSAFE, static_cast<SLuint32>(SL_BOOLEAN_TRUE) }
+ };
+ LOG_ON_FAILURE_AND_RETURN(slCreateEngine(engine_object_.Receive(),
+ 1,
+ option,
+ 0,
+ NULL,
+ NULL),
+ false);
+
+ // Realize the SL engine object in synchronous mode.
+ LOG_ON_FAILURE_AND_RETURN(engine_object_->Realize(engine_object_.Get(),
+ SL_BOOLEAN_FALSE),
+ false);
+
+ // Get the SL engine interface which is implicit.
+ SLEngineItf engine;
+ LOG_ON_FAILURE_AND_RETURN(engine_object_->GetInterface(engine_object_.Get(),
+ SL_IID_ENGINE,
+ &engine),
+ false);
+
+ // Audio source configuration.
+ SLDataLocator_IODevice mic_locator = {
+ SL_DATALOCATOR_IODEVICE, SL_IODEVICE_AUDIOINPUT,
+ SL_DEFAULTDEVICEID_AUDIOINPUT, NULL
+ };
+ SLDataSource audio_source = { &mic_locator, NULL };
+
+ // Audio sink configuration.
+ SLDataLocator_AndroidSimpleBufferQueue buffer_queue = {
+ SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, // Locator type.
+ static_cast<SLuint32>(kNumOfQueuesInBuffer) // Number of buffers.
+ };
+ SLDataSink audio_sink = { &buffer_queue, &format_ };
+
+ // Create an audio recorder.
+ const SLInterfaceID interface_id[] = {
+ SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
+ SL_IID_ANDROIDCONFIGURATION
+ };
+ const SLboolean interface_required[] = {
+ SL_BOOLEAN_TRUE,
+ SL_BOOLEAN_TRUE
+ };
+ // Create AudioRecorder and specify SL_IID_ANDROIDCONFIGURATION.
+ LOG_ON_FAILURE_AND_RETURN(
+ (*engine)->CreateAudioRecorder(engine,
+ recorder_object_.Receive(),
+ &audio_source,
+ &audio_sink,
+ arraysize(interface_id),
+ interface_id,
+ interface_required),
+ false);
+
+ SLAndroidConfigurationItf recorder_config;
+ LOG_ON_FAILURE_AND_RETURN(
+ recorder_object_->GetInterface(recorder_object_.Get(),
+ SL_IID_ANDROIDCONFIGURATION,
+ &recorder_config),
+ false);
+
+ SLint32 stream_type = SL_ANDROID_RECORDING_PRESET_VOICE_COMMUNICATION;
+ LOG_ON_FAILURE_AND_RETURN(
+ (*recorder_config)->SetConfiguration(recorder_config,
+ SL_ANDROID_KEY_RECORDING_PRESET,
+ &stream_type, sizeof(SLint32)),
+ false);
+
+ // Realize the recorder object in synchronous mode.
+ LOG_ON_FAILURE_AND_RETURN(
+ recorder_object_->Realize(recorder_object_.Get(),
+ SL_BOOLEAN_FALSE),
+ false);
+
+ // Get an implicit recorder interface.
+ LOG_ON_FAILURE_AND_RETURN(
+ recorder_object_->GetInterface(recorder_object_.Get(),
+ SL_IID_RECORD,
+ &recorder_),
+ false);
+
+ // Get the simple buffer queue interface.
+ LOG_ON_FAILURE_AND_RETURN(
+ recorder_object_->GetInterface(recorder_object_.Get(),
+ SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
+ &simple_buffer_queue_),
+ false);
+
+ // Register the input callback for the simple buffer queue.
+ // This callback will be called when receiving new data from the device.
+ LOG_ON_FAILURE_AND_RETURN(
+ (*simple_buffer_queue_)->RegisterCallback(simple_buffer_queue_,
+ SimpleBufferQueueCallback,
+ this),
+ false);
+
+ return true;
+}
+
+void OpenSLESInputStream::SimpleBufferQueueCallback(
+ SLAndroidSimpleBufferQueueItf buffer_queue, void* instance) {
+ OpenSLESInputStream* stream =
+ reinterpret_cast<OpenSLESInputStream*>(instance);
+ stream->ReadBufferQueue();
+}
+
+void OpenSLESInputStream::ReadBufferQueue() {
+ if (!started_)
+ return;
+
+ // TODO(xians): Get an accurate delay estimation.
+ callback_->OnData(this,
+ audio_data_[active_queue_],
+ buffer_size_bytes_,
+ buffer_size_bytes_,
+ 0.0);
+
+ // Done with this buffer. Send it to device for recording.
+ SLresult err = (*simple_buffer_queue_)->Enqueue(
+ simple_buffer_queue_,
+ audio_data_[active_queue_],
+ buffer_size_bytes_);
+ if (SL_RESULT_SUCCESS != err)
+ HandleError(err);
+
+ active_queue_ = (active_queue_ + 1) % kNumOfQueuesInBuffer;
+}
+
+void OpenSLESInputStream::SetupAudioBuffer() {
+ DCHECK(!audio_data_[0]);
+ for (int i = 0; i < kNumOfQueuesInBuffer; ++i) {
+ audio_data_[i] = new uint8[buffer_size_bytes_];
+ }
+}
+
+void OpenSLESInputStream::ReleaseAudioBuffer() {
+ if (audio_data_[0]) {
+ for (int i = 0; i < kNumOfQueuesInBuffer; ++i) {
+ delete [] audio_data_[i];
+ audio_data_[i] = NULL;
+ }
+ }
+}
+
+void OpenSLESInputStream::HandleError(SLresult error) {
+ DLOG(FATAL) << "OpenSLES Input error " << error;
+ if (callback_)
+ callback_->OnError(this);
+}
+
+} // namespace media
diff --git a/chromium/media/audio/android/opensles_input.h b/chromium/media/audio/android/opensles_input.h
new file mode 100644
index 00000000000..9743992fc65
--- /dev/null
+++ b/chromium/media/audio/android/opensles_input.h
@@ -0,0 +1,88 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_ANDROID_OPENSLES_INPUT_H_
+#define MEDIA_AUDIO_ANDROID_OPENSLES_INPUT_H_
+
+#include <SLES/OpenSLES.h>
+#include <SLES/OpenSLES_Android.h>
+
+#include "base/compiler_specific.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+#include "media/audio/android/opensles_util.h"
+
+namespace media {
+
+class AudioManagerAndroid;
+
+// Implements PCM audio input support for Android using the OpenSLES API.
+class OpenSLESInputStream : public AudioInputStream {
+ public:
+ static const int kNumOfQueuesInBuffer = 2;
+
+ OpenSLESInputStream(AudioManagerAndroid* manager,
+ const AudioParameters& params);
+
+ virtual ~OpenSLESInputStream();
+
+ // Implementation of AudioInputStream.
+ virtual bool Open() OVERRIDE;
+ virtual void Start(AudioInputCallback* callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual double GetMaxVolume() OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual double GetVolume() OVERRIDE;
+ virtual void SetAutomaticGainControl(bool enabled) OVERRIDE;
+ virtual bool GetAutomaticGainControl() OVERRIDE;
+
+ private:
+ bool CreateRecorder();
+
+ static void SimpleBufferQueueCallback(
+ SLAndroidSimpleBufferQueueItf buffer_queue, void* instance);
+
+ void ReadBufferQueue();
+
+ // Called in Open();
+ void SetupAudioBuffer();
+
+ // Called in Close();
+ void ReleaseAudioBuffer();
+
+ // If OpenSLES reports an error this function handles it and passes it to
+ // the attached AudioInputCallback::OnError().
+ void HandleError(SLresult error);
+
+ AudioManagerAndroid* audio_manager_;
+
+ AudioInputCallback* callback_;
+
+ // Shared engine interfaces for the app.
+ media::ScopedSLObjectItf recorder_object_;
+ media::ScopedSLObjectItf engine_object_;
+
+ SLRecordItf recorder_;
+
+ // Buffer queue recorder interface.
+ SLAndroidSimpleBufferQueueItf simple_buffer_queue_;
+
+ SLDataFormat_PCM format_;
+
+ // Audio buffers that are allocated in the constructor based on
+ // info from audio parameters.
+ uint8* audio_data_[kNumOfQueuesInBuffer];
+
+ int active_queue_;
+ int buffer_size_bytes_;
+
+ bool started_;
+
+ DISALLOW_COPY_AND_ASSIGN(OpenSLESInputStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_ANDROID_OPENSLES_INPUT_H_
diff --git a/chromium/media/audio/android/opensles_output.cc b/chromium/media/audio/android/opensles_output.cc
new file mode 100644
index 00000000000..c6d455715d9
--- /dev/null
+++ b/chromium/media/audio/android/opensles_output.cc
@@ -0,0 +1,313 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/android/opensles_output.h"
+
+#include "base/debug/trace_event.h"
+#include "base/logging.h"
+#include "media/audio/android/audio_manager_android.h"
+
+#define LOG_ON_FAILURE_AND_RETURN(op, ...) \
+ do { \
+ SLresult err = (op); \
+ if (err != SL_RESULT_SUCCESS) { \
+ DLOG(ERROR) << #op << " failed: " << err; \
+ return __VA_ARGS__; \
+ } \
+ } while (0)
+
+namespace media {
+
+OpenSLESOutputStream::OpenSLESOutputStream(AudioManagerAndroid* manager,
+ const AudioParameters& params)
+ : audio_manager_(manager),
+ callback_(NULL),
+ player_(NULL),
+ simple_buffer_queue_(NULL),
+ active_queue_(0),
+ buffer_size_bytes_(0),
+ started_(false),
+ volume_(1.0) {
+ format_.formatType = SL_DATAFORMAT_PCM;
+ format_.numChannels = static_cast<SLuint32>(params.channels());
+ // Provides sampling rate in milliHertz to OpenSLES.
+ format_.samplesPerSec = static_cast<SLuint32>(params.sample_rate() * 1000);
+ format_.bitsPerSample = params.bits_per_sample();
+ format_.containerSize = params.bits_per_sample();
+ format_.endianness = SL_BYTEORDER_LITTLEENDIAN;
+ if (format_.numChannels == 1)
+ format_.channelMask = SL_SPEAKER_FRONT_CENTER;
+ else if (format_.numChannels == 2)
+ format_.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
+ else
+ NOTREACHED() << "Unsupported number of channels: " << format_.numChannels;
+
+ buffer_size_bytes_ = params.GetBytesPerBuffer();
+ audio_bus_ = AudioBus::Create(params);
+
+ memset(&audio_data_, 0, sizeof(audio_data_));
+}
+
+OpenSLESOutputStream::~OpenSLESOutputStream() {
+ DCHECK(!engine_object_.Get());
+ DCHECK(!player_object_.Get());
+ DCHECK(!output_mixer_.Get());
+ DCHECK(!player_);
+ DCHECK(!simple_buffer_queue_);
+ DCHECK(!audio_data_[0]);
+}
+
+bool OpenSLESOutputStream::Open() {
+ if (engine_object_.Get())
+ return false;
+
+ if (!CreatePlayer())
+ return false;
+
+ SetupAudioBuffer();
+
+ return true;
+}
+
+void OpenSLESOutputStream::Start(AudioSourceCallback* callback) {
+ DCHECK(callback);
+ DCHECK(player_);
+ DCHECK(simple_buffer_queue_);
+ if (started_)
+ return;
+
+ // Enable the flags before streaming.
+ callback_ = callback;
+ active_queue_ = 0;
+ started_ = true;
+
+ // Avoid start-up glitches by filling up one buffer queue before starting
+ // the stream.
+ FillBufferQueue();
+
+ // Start streaming data by setting the play state to |SL_PLAYSTATE_PLAYING|.
+ LOG_ON_FAILURE_AND_RETURN(
+ (*player_)->SetPlayState(player_, SL_PLAYSTATE_PLAYING));
+}
+
+void OpenSLESOutputStream::Stop() {
+ if (!started_)
+ return;
+
+ started_ = false;
+ // Stop playing by setting the play state to |SL_PLAYSTATE_STOPPED|.
+ LOG_ON_FAILURE_AND_RETURN(
+ (*player_)->SetPlayState(player_, SL_PLAYSTATE_STOPPED));
+
+ // Clear the buffer queue so that the old data won't be played when
+ // resuming playing.
+ LOG_ON_FAILURE_AND_RETURN(
+ (*simple_buffer_queue_)->Clear(simple_buffer_queue_));
+}
+
+void OpenSLESOutputStream::Close() {
+ // Stop the stream if it is still playing.
+ Stop();
+
+ // Explicitly free the player objects and invalidate their associated
+ // interfaces. They have to be done in the correct order.
+ player_object_.Reset();
+ output_mixer_.Reset();
+ engine_object_.Reset();
+ simple_buffer_queue_ = NULL;
+ player_ = NULL;
+
+ ReleaseAudioBuffer();
+
+ audio_manager_->ReleaseOutputStream(this);
+}
+
+void OpenSLESOutputStream::SetVolume(double volume) {
+ float volume_float = static_cast<float>(volume);
+ if (volume_float < 0.0f || volume_float > 1.0f) {
+ return;
+ }
+ volume_ = volume_float;
+}
+
+void OpenSLESOutputStream::GetVolume(double* volume) {
+ *volume = static_cast<double>(volume_);
+}
+
+bool OpenSLESOutputStream::CreatePlayer() {
+ // Initializes the engine object with specific option. After working with the
+ // object, we need to free the object and its resources.
+ SLEngineOption option[] = {
+ { SL_ENGINEOPTION_THREADSAFE, static_cast<SLuint32>(SL_BOOLEAN_TRUE) }
+ };
+ LOG_ON_FAILURE_AND_RETURN(
+ slCreateEngine(engine_object_.Receive(), 1, option, 0, NULL, NULL),
+ false);
+
+ // Realize the SL engine object in synchronous mode.
+ LOG_ON_FAILURE_AND_RETURN(
+ engine_object_->Realize(engine_object_.Get(), SL_BOOLEAN_FALSE),
+ false);
+
+ // Get the SL engine interface which is implicit.
+ SLEngineItf engine;
+ LOG_ON_FAILURE_AND_RETURN(
+ engine_object_->GetInterface(engine_object_.Get(),
+ SL_IID_ENGINE,
+ &engine),
+ false);
+
+ // Create ouput mixer object to be used by the player.
+ LOG_ON_FAILURE_AND_RETURN(
+ (*engine)->CreateOutputMix(engine,
+ output_mixer_.Receive(),
+ 0,
+ NULL,
+ NULL),
+ false);
+
+ // Realizing the output mix object in synchronous mode.
+ LOG_ON_FAILURE_AND_RETURN(
+ output_mixer_->Realize(output_mixer_.Get(), SL_BOOLEAN_FALSE),
+ false);
+
+ // Audio source configuration.
+ SLDataLocator_AndroidSimpleBufferQueue simple_buffer_queue = {
+ SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE,
+ static_cast<SLuint32>(kNumOfQueuesInBuffer)
+ };
+ SLDataSource audio_source = { &simple_buffer_queue, &format_ };
+
+ // Audio sink configuration.
+ SLDataLocator_OutputMix locator_output_mix = {
+ SL_DATALOCATOR_OUTPUTMIX, output_mixer_.Get()
+ };
+ SLDataSink audio_sink = { &locator_output_mix, NULL };
+
+ // Create an audio player.
+ const SLInterfaceID interface_id[] = {
+ SL_IID_BUFFERQUEUE,
+ SL_IID_VOLUME,
+ SL_IID_ANDROIDCONFIGURATION
+ };
+ const SLboolean interface_required[] = {
+ SL_BOOLEAN_TRUE,
+ SL_BOOLEAN_TRUE,
+ SL_BOOLEAN_TRUE
+ };
+ LOG_ON_FAILURE_AND_RETURN(
+ (*engine)->CreateAudioPlayer(engine,
+ player_object_.Receive(),
+ &audio_source,
+ &audio_sink,
+ arraysize(interface_id),
+ interface_id,
+ interface_required),
+ false);
+
+ // Create AudioPlayer and specify SL_IID_ANDROIDCONFIGURATION.
+ SLAndroidConfigurationItf player_config;
+ LOG_ON_FAILURE_AND_RETURN(
+ player_object_->GetInterface(player_object_.Get(),
+ SL_IID_ANDROIDCONFIGURATION,
+ &player_config),
+ false);
+
+ SLint32 stream_type = SL_ANDROID_STREAM_VOICE;
+ LOG_ON_FAILURE_AND_RETURN(
+ (*player_config)->SetConfiguration(player_config,
+ SL_ANDROID_KEY_STREAM_TYPE,
+ &stream_type, sizeof(SLint32)),
+ false);
+
+ // Realize the player object in synchronous mode.
+ LOG_ON_FAILURE_AND_RETURN(
+ player_object_->Realize(player_object_.Get(), SL_BOOLEAN_FALSE),
+ false);
+
+ // Get an implicit player interface.
+ LOG_ON_FAILURE_AND_RETURN(
+ player_object_->GetInterface(player_object_.Get(), SL_IID_PLAY, &player_),
+ false);
+
+ // Get the simple buffer queue interface.
+ LOG_ON_FAILURE_AND_RETURN(
+ player_object_->GetInterface(player_object_.Get(),
+ SL_IID_BUFFERQUEUE,
+ &simple_buffer_queue_),
+ false);
+
+ // Register the input callback for the simple buffer queue.
+ // This callback will be called when the soundcard needs data.
+ LOG_ON_FAILURE_AND_RETURN(
+ (*simple_buffer_queue_)->RegisterCallback(simple_buffer_queue_,
+ SimpleBufferQueueCallback,
+ this),
+ false);
+
+ return true;
+}
+
+void OpenSLESOutputStream::SimpleBufferQueueCallback(
+ SLAndroidSimpleBufferQueueItf buffer_queue, void* instance) {
+ OpenSLESOutputStream* stream =
+ reinterpret_cast<OpenSLESOutputStream*>(instance);
+ stream->FillBufferQueue();
+}
+
+void OpenSLESOutputStream::FillBufferQueue() {
+ if (!started_)
+ return;
+
+ TRACE_EVENT0("audio", "OpenSLESOutputStream::FillBufferQueue");
+ // Read data from the registered client source.
+ // TODO(xians): Get an accurate delay estimation.
+ uint32 hardware_delay = buffer_size_bytes_;
+ int frames_filled = callback_->OnMoreData(
+ audio_bus_.get(), AudioBuffersState(0, hardware_delay));
+ if (frames_filled <= 0)
+ return; // Audio source is shutting down, or halted on error.
+ int num_filled_bytes =
+ frames_filled * audio_bus_->channels() * format_.bitsPerSample / 8;
+ DCHECK_LE(static_cast<size_t>(num_filled_bytes), buffer_size_bytes_);
+ // Note: If this ever changes to output raw float the data must be clipped and
+ // sanitized since it may come from an untrusted source such as NaCl.
+ audio_bus_->Scale(volume_);
+ audio_bus_->ToInterleaved(
+ frames_filled, format_.bitsPerSample / 8, audio_data_[active_queue_]);
+
+ // Enqueue the buffer for playback.
+ SLresult err = (*simple_buffer_queue_)->Enqueue(
+ simple_buffer_queue_,
+ audio_data_[active_queue_],
+ num_filled_bytes);
+ if (SL_RESULT_SUCCESS != err)
+ HandleError(err);
+
+ active_queue_ = (active_queue_ + 1) % kNumOfQueuesInBuffer;
+}
+
+void OpenSLESOutputStream::SetupAudioBuffer() {
+ DCHECK(!audio_data_[0]);
+ for (int i = 0; i < kNumOfQueuesInBuffer; ++i) {
+ audio_data_[i] = new uint8[buffer_size_bytes_];
+ }
+}
+
+void OpenSLESOutputStream::ReleaseAudioBuffer() {
+ if (audio_data_[0]) {
+ for (int i = 0; i < kNumOfQueuesInBuffer; ++i) {
+ delete [] audio_data_[i];
+ audio_data_[i] = NULL;
+ }
+ }
+}
+
+void OpenSLESOutputStream::HandleError(SLresult error) {
+ DLOG(ERROR) << "OpenSLES Output error " << error;
+ if (callback_)
+ callback_->OnError(this);
+}
+
+} // namespace media
diff --git a/chromium/media/audio/android/opensles_output.h b/chromium/media/audio/android/opensles_output.h
new file mode 100644
index 00000000000..f505b5165cd
--- /dev/null
+++ b/chromium/media/audio/android/opensles_output.h
@@ -0,0 +1,91 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_ANDROID_OPENSLES_OUTPUT_H_
+#define MEDIA_AUDIO_ANDROID_OPENSLES_OUTPUT_H_
+
+#include <SLES/OpenSLES.h>
+#include <SLES/OpenSLES_Android.h>
+
+#include "base/compiler_specific.h"
+#include "media/audio/android/opensles_util.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+
+namespace media {
+
+class AudioManagerAndroid;
+
+// Implements PCM audio output support for Android using the OpenSLES API.
+class OpenSLESOutputStream : public AudioOutputStream {
+ public:
+ static const int kNumOfQueuesInBuffer = 2;
+
+ OpenSLESOutputStream(AudioManagerAndroid* manager,
+ const AudioParameters& params);
+
+ virtual ~OpenSLESOutputStream();
+
+ // Implementation of AudioOutputStream.
+ virtual bool Open() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual void Start(AudioSourceCallback* callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual void GetVolume(double* volume) OVERRIDE;
+
+ private:
+ bool CreatePlayer();
+
+ static void SimpleBufferQueueCallback(
+ SLAndroidSimpleBufferQueueItf buffer_queue, void* instance);
+
+ void FillBufferQueue();
+
+ // Called in Open();
+ void SetupAudioBuffer();
+
+ // Called in Close();
+ void ReleaseAudioBuffer();
+
+ // If OpenSLES reports an error this function handles it and passes it to
+ // the attached AudioOutputCallback::OnError().
+ void HandleError(SLresult error);
+
+ AudioManagerAndroid* audio_manager_;
+
+ AudioSourceCallback* callback_;
+
+ // Shared engine interfaces for the app.
+ media::ScopedSLObjectItf engine_object_;
+ media::ScopedSLObjectItf player_object_;
+ media::ScopedSLObjectItf output_mixer_;
+
+ SLPlayItf player_;
+
+ // Buffer queue recorder interface.
+ SLAndroidSimpleBufferQueueItf simple_buffer_queue_;
+
+ SLDataFormat_PCM format_;
+
+ // Audio buffer arrays that are allocated in the constructor.
+ uint8* audio_data_[kNumOfQueuesInBuffer];
+
+ int active_queue_;
+ size_t buffer_size_bytes_;
+
+ bool started_;
+
+ // Volume level from 0 to 1.
+ float volume_;
+
+ // Container for retrieving data from AudioSourceCallback::OnMoreData().
+ scoped_ptr<AudioBus> audio_bus_;
+
+ DISALLOW_COPY_AND_ASSIGN(OpenSLESOutputStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_ANDROID_OPENSLES_INPUT_H_
diff --git a/chromium/media/audio/android/opensles_util.h b/chromium/media/audio/android/opensles_util.h
new file mode 100644
index 00000000000..bc6d0410af9
--- /dev/null
+++ b/chromium/media/audio/android/opensles_util.h
@@ -0,0 +1,45 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_ANDROID_OPENSLES_UTIL_H_
+#define MEDIA_AUDIO_ANDROID_OPENSLES_UTIL_H_
+
+#include <SLES/OpenSLES.h>
+
+#include "base/logging.h"
+
+namespace media {
+
+template <typename SLType, typename SLDerefType>
+class ScopedSLObject {
+ public:
+ ScopedSLObject() : obj_(NULL) {}
+
+ ~ScopedSLObject() { Reset(); }
+
+ SLType* Receive() {
+ DCHECK(!obj_);
+ return &obj_;
+ }
+
+ SLDerefType operator->() { return *obj_; }
+
+ SLType Get() const { return obj_; }
+
+ void Reset() {
+ if (obj_) {
+ (*obj_)->Destroy(obj_);
+ obj_ = NULL;
+ }
+ }
+
+ private:
+ SLType obj_;
+};
+
+typedef ScopedSLObject<SLObjectItf, const SLObjectItf_*> ScopedSLObjectItf;
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_ANDROID_OPENSLES_UTIL_H_
diff --git a/chromium/media/audio/async_socket_io_handler.h b/chromium/media/audio/async_socket_io_handler.h
new file mode 100644
index 00000000000..cc7185eb243
--- /dev/null
+++ b/chromium/media/audio/async_socket_io_handler.h
@@ -0,0 +1,113 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_ASYNC_SOCKET_IO_HANDLER_H_
+#define MEDIA_AUDIO_ASYNC_SOCKET_IO_HANDLER_H_
+
+#include "base/message_loop/message_loop.h"
+#include "base/sync_socket.h"
+#include "base/threading/non_thread_safe.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// The message loop callback interface is different based on platforms.
+#if defined(OS_WIN)
+typedef base::MessageLoopForIO::IOHandler MessageLoopIOHandler;
+#elif defined(OS_POSIX)
+typedef base::MessageLoopForIO::Watcher MessageLoopIOHandler;
+#endif
+
+// Extends the CancelableSyncSocket class to allow reading from a socket
+// asynchronously on a TYPE_IO message loop thread. This makes it easy to share
+// a thread that uses a message loop (e.g. for IPC and other things) and not
+// require a separate thread to read from the socket.
+//
+// Example usage (also see the unit tests):
+//
+// class SocketReader {
+// public:
+// SocketReader(base::CancelableSyncSocket* socket)
+// : socket_(socket), buffer_() {
+// io_handler.Initialize(socket_->handle(),
+// base::Bind(&SocketReader::OnDataAvailable,
+// base::Unretained(this));
+// }
+//
+// void AsyncRead() {
+// CHECK(io_handler.Read(&buffer_[0], sizeof(buffer_)));
+// }
+//
+// private:
+// void OnDataAvailable(int bytes_read) {
+// if (ProcessData(&buffer_[0], bytes_read)) {
+// // Issue another read.
+// CHECK(io_handler.Read(&buffer_[0], sizeof(buffer_)));
+// }
+// }
+//
+// media::AsyncSocketIoHandler io_handler;
+// base::CancelableSyncSocket* socket_;
+// char buffer_[kBufferSize];
+// };
+//
+class MEDIA_EXPORT AsyncSocketIoHandler
+ : public NON_EXPORTED_BASE(base::NonThreadSafe),
+ public NON_EXPORTED_BASE(MessageLoopIOHandler) {
+ public:
+ AsyncSocketIoHandler();
+ virtual ~AsyncSocketIoHandler();
+
+ // Type definition for the callback. The parameter tells how many
+ // bytes were read and is 0 if an error occurred.
+ typedef base::Callback<void(int)> ReadCompleteCallback;
+
+ // Initializes the AsyncSocketIoHandler by hooking it up to the current
+ // thread's message loop (must be TYPE_IO), to do async reads from the socket
+ // on the current thread. The |callback| will be invoked whenever a Read()
+ // has completed.
+ bool Initialize(base::SyncSocket::Handle socket,
+ const ReadCompleteCallback& callback);
+
+ // Attempts to read from the socket. The return value will be |false|
+ // if an error occurred and |true| if data was read or a pending read
+ // was issued. Regardless of async or sync operation, the
+ // ReadCompleteCallback (see above) will be called when data is available.
+ bool Read(char* buffer, int buffer_len);
+
+ private:
+#if defined(OS_WIN)
+ // Implementation of IOHandler on Windows.
+ virtual void OnIOCompleted(base::MessageLoopForIO::IOContext* context,
+ DWORD bytes_transfered,
+ DWORD error) OVERRIDE;
+#elif defined(OS_POSIX)
+ // Implementation of base::MessageLoopForIO::Watcher.
+ virtual void OnFileCanWriteWithoutBlocking(int socket) OVERRIDE {}
+ virtual void OnFileCanReadWithoutBlocking(int socket) OVERRIDE;
+
+ void EnsureWatchingSocket();
+#endif
+
+ base::SyncSocket::Handle socket_;
+#if defined(OS_WIN)
+ base::MessageLoopForIO::IOContext* context_;
+ bool is_pending_;
+#elif defined(OS_POSIX)
+ base::MessageLoopForIO::FileDescriptorWatcher socket_watcher_;
+ // |pending_buffer_| and |pending_buffer_len_| are valid only between
+ // Read() and OnFileCanReadWithoutBlocking().
+ char* pending_buffer_;
+ int pending_buffer_len_;
+ // |true| iff the message loop is watching the socket for IO events.
+ bool is_watching_;
+#endif
+ ReadCompleteCallback read_complete_;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncSocketIoHandler);
+};
+
+} // namespace media.
+
+#endif // MEDIA_AUDIO_ASYNC_SOCKET_IO_HANDLER_H_
diff --git a/chromium/media/audio/async_socket_io_handler_posix.cc b/chromium/media/audio/async_socket_io_handler_posix.cc
new file mode 100644
index 00000000000..be8f3708cb7
--- /dev/null
+++ b/chromium/media/audio/async_socket_io_handler_posix.cc
@@ -0,0 +1,98 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/async_socket_io_handler.h"
+
+#include <fcntl.h>
+
+#include "base/posix/eintr_wrapper.h"
+
+namespace media {
+
+AsyncSocketIoHandler::AsyncSocketIoHandler()
+ : socket_(base::SyncSocket::kInvalidHandle),
+ pending_buffer_(NULL),
+ pending_buffer_len_(0),
+ is_watching_(false) {
+}
+
+AsyncSocketIoHandler::~AsyncSocketIoHandler() {
+ DCHECK(CalledOnValidThread());
+}
+
+void AsyncSocketIoHandler::OnFileCanReadWithoutBlocking(int socket) {
+ DCHECK(CalledOnValidThread());
+ DCHECK_EQ(socket, socket_);
+ DCHECK(!read_complete_.is_null());
+
+ if (pending_buffer_) {
+ int bytes_read = HANDLE_EINTR(read(socket_, pending_buffer_,
+ pending_buffer_len_));
+ DCHECK_GE(bytes_read, 0);
+ pending_buffer_ = NULL;
+ pending_buffer_len_ = 0;
+ read_complete_.Run(bytes_read > 0 ? bytes_read : 0);
+ } else {
+ // We're getting notifications that we can read from the socket while
+ // we're not waiting for data. In order to not starve the message loop,
+ // let's stop watching the fd and restart the watch when Read() is called.
+ is_watching_ = false;
+ socket_watcher_.StopWatchingFileDescriptor();
+ }
+}
+
+bool AsyncSocketIoHandler::Read(char* buffer, int buffer_len) {
+ DCHECK(CalledOnValidThread());
+ DCHECK(!read_complete_.is_null());
+ DCHECK(!pending_buffer_);
+
+ EnsureWatchingSocket();
+
+ int bytes_read = HANDLE_EINTR(read(socket_, buffer, buffer_len));
+ if (bytes_read < 0) {
+ if (errno == EAGAIN) {
+ pending_buffer_ = buffer;
+ pending_buffer_len_ = buffer_len;
+ } else {
+ NOTREACHED() << "read(): " << errno;
+ return false;
+ }
+ } else {
+ read_complete_.Run(bytes_read);
+ }
+ return true;
+}
+
+bool AsyncSocketIoHandler::Initialize(base::SyncSocket::Handle socket,
+ const ReadCompleteCallback& callback) {
+ DCHECK_EQ(socket_, base::SyncSocket::kInvalidHandle);
+
+ DetachFromThread();
+
+ socket_ = socket;
+ read_complete_ = callback;
+
+ // SyncSocket is blocking by default, so let's convert it to non-blocking.
+ int value = fcntl(socket, F_GETFL);
+ if (!(value & O_NONBLOCK)) {
+ // Set the socket to be non-blocking so we can do async reads.
+ if (fcntl(socket, F_SETFL, O_NONBLOCK) == -1) {
+ NOTREACHED();
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void AsyncSocketIoHandler::EnsureWatchingSocket() {
+ DCHECK(CalledOnValidThread());
+ if (!is_watching_ && socket_ != base::SyncSocket::kInvalidHandle) {
+ is_watching_ = base::MessageLoopForIO::current()->WatchFileDescriptor(
+ socket_, true, base::MessageLoopForIO::WATCH_READ,
+ &socket_watcher_, this);
+ }
+}
+
+} // namespace media.
diff --git a/chromium/media/audio/async_socket_io_handler_unittest.cc b/chromium/media/audio/async_socket_io_handler_unittest.cc
new file mode 100644
index 00000000000..ae971464dbc
--- /dev/null
+++ b/chromium/media/audio/async_socket_io_handler_unittest.cc
@@ -0,0 +1,168 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/async_socket_io_handler.h"
+
+#include "base/bind.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+const char kAsyncSocketIoTestString[] = "Hello, AsyncSocketIoHandler";
+const size_t kAsyncSocketIoTestStringLength =
+ arraysize(kAsyncSocketIoTestString);
+
+class TestSocketReader {
+ public:
+ // Set |number_of_reads_before_quit| to >0 when you expect a specific number
+ // of Read operations to complete. Once that number is reached, the current
+ // message loop will be Quit(). Set |number_of_reads_before_quit| to -1 if
+ // callbacks should not be counted.
+ TestSocketReader(base::CancelableSyncSocket* socket,
+ int number_of_reads_before_quit,
+ bool issue_reads_from_callback,
+ bool expect_eof)
+ : socket_(socket), buffer_(),
+ number_of_reads_before_quit_(number_of_reads_before_quit),
+ callbacks_received_(0),
+ issue_reads_from_callback_(issue_reads_from_callback),
+ expect_eof_(expect_eof) {
+ io_handler.Initialize(socket_->handle(),
+ base::Bind(&TestSocketReader::OnRead,
+ base::Unretained(this)));
+ }
+ ~TestSocketReader() {}
+
+ bool IssueRead() {
+ return io_handler.Read(&buffer_[0], sizeof(buffer_));
+ }
+
+ const char* buffer() const { return &buffer_[0]; }
+
+ int callbacks_received() const { return callbacks_received_; }
+
+ private:
+ void OnRead(int bytes_read) {
+ if (!expect_eof_) {
+ EXPECT_GT(bytes_read, 0);
+ } else {
+ EXPECT_GE(bytes_read, 0);
+ }
+ ++callbacks_received_;
+ if (number_of_reads_before_quit_ == callbacks_received_) {
+ base::MessageLoop::current()->Quit();
+ } else if (issue_reads_from_callback_) {
+ IssueRead();
+ }
+ }
+
+ media::AsyncSocketIoHandler io_handler;
+ base::CancelableSyncSocket* socket_; // Ownership lies outside the class.
+ char buffer_[kAsyncSocketIoTestStringLength];
+ int number_of_reads_before_quit_;
+ int callbacks_received_;
+ bool issue_reads_from_callback_;
+ bool expect_eof_;
+};
+
+// Workaround to be able to use a base::Closure for sending data.
+// Send() returns int but a closure must return void.
+void SendData(base::CancelableSyncSocket* socket,
+ const void* buffer,
+ size_t length) {
+ socket->Send(buffer, length);
+}
+
+} // end namespace.
+
+// Tests doing a pending read from a socket and use an IO handler to get
+// notified of data.
+TEST(AsyncSocketIoHandlerTest, AsynchronousReadWithMessageLoop) {
+ base::MessageLoopForIO loop;
+
+ base::CancelableSyncSocket pair[2];
+ ASSERT_TRUE(base::CancelableSyncSocket::CreatePair(&pair[0], &pair[1]));
+
+ TestSocketReader reader(&pair[0], 1, false, false);
+ EXPECT_TRUE(reader.IssueRead());
+
+ pair[1].Send(kAsyncSocketIoTestString, kAsyncSocketIoTestStringLength);
+ base::MessageLoop::current()->Run();
+ EXPECT_EQ(strcmp(reader.buffer(), kAsyncSocketIoTestString), 0);
+ EXPECT_EQ(1, reader.callbacks_received());
+}
+
+// Tests doing a read from a socket when we know that there is data in the
+// socket. Here we want to make sure that any async 'can read' notifications
+// won't trip us off and that the synchronous case works as well.
+TEST(AsyncSocketIoHandlerTest, SynchronousReadWithMessageLoop) {
+ base::MessageLoopForIO loop;
+
+ base::CancelableSyncSocket pair[2];
+ ASSERT_TRUE(base::CancelableSyncSocket::CreatePair(&pair[0], &pair[1]));
+
+ TestSocketReader reader(&pair[0], -1, false, false);
+
+ pair[1].Send(kAsyncSocketIoTestString, kAsyncSocketIoTestStringLength);
+ base::MessageLoop::current()->PostDelayedTask(FROM_HERE,
+ base::MessageLoop::QuitClosure(),
+ base::TimeDelta::FromMilliseconds(100));
+ base::MessageLoop::current()->Run();
+
+ EXPECT_TRUE(reader.IssueRead());
+ EXPECT_EQ(strcmp(reader.buffer(), kAsyncSocketIoTestString), 0);
+ // We've now verified that the read happened synchronously, but it's not
+ // guaranteed that the callback has been issued since the callback will be
+ // called asynchronously even though the read may have been done.
+ // So we call RunUntilIdle() to allow any event notifications or APC's on
+ // Windows, to execute before checking the count of how many callbacks we've
+ // received.
+ base::MessageLoop::current()->RunUntilIdle();
+ EXPECT_EQ(1, reader.callbacks_received());
+}
+
+// Calls Read() from within a callback to test that simple read "loops" work.
+TEST(AsyncSocketIoHandlerTest, ReadFromCallback) {
+ base::MessageLoopForIO loop;
+
+ base::CancelableSyncSocket pair[2];
+ ASSERT_TRUE(base::CancelableSyncSocket::CreatePair(&pair[0], &pair[1]));
+
+ const int kReadOperationCount = 10;
+ TestSocketReader reader(&pair[0], kReadOperationCount, true, false);
+ EXPECT_TRUE(reader.IssueRead());
+
+ // Issue sends on an interval to satisfy the Read() requirements.
+ int64 milliseconds = 0;
+ for (int i = 0; i < kReadOperationCount; ++i) {
+ base::MessageLoop::current()->PostDelayedTask(FROM_HERE,
+ base::Bind(&SendData, &pair[1], kAsyncSocketIoTestString,
+ kAsyncSocketIoTestStringLength),
+ base::TimeDelta::FromMilliseconds(milliseconds));
+ milliseconds += 10;
+ }
+
+ base::MessageLoop::current()->PostDelayedTask(FROM_HERE,
+ base::MessageLoop::QuitClosure(),
+ base::TimeDelta::FromMilliseconds(100 + milliseconds));
+
+ base::MessageLoop::current()->Run();
+ EXPECT_EQ(kReadOperationCount, reader.callbacks_received());
+}
+
+// Calls Read() then close other end, check that a correct callback is received.
+TEST(AsyncSocketIoHandlerTest, ReadThenClose) {
+ base::MessageLoopForIO loop;
+
+ base::CancelableSyncSocket pair[2];
+ ASSERT_TRUE(base::CancelableSyncSocket::CreatePair(&pair[0], &pair[1]));
+
+ const int kReadOperationCount = 1;
+ TestSocketReader reader(&pair[0], kReadOperationCount, false, true);
+ EXPECT_TRUE(reader.IssueRead());
+
+ pair[1].Close();
+
+ base::MessageLoop::current()->Run();
+ EXPECT_EQ(kReadOperationCount, reader.callbacks_received());
+}
diff --git a/chromium/media/audio/async_socket_io_handler_win.cc b/chromium/media/audio/async_socket_io_handler_win.cc
new file mode 100644
index 00000000000..ea6bd4ad0d5
--- /dev/null
+++ b/chromium/media/audio/async_socket_io_handler_win.cc
@@ -0,0 +1,77 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/async_socket_io_handler.h"
+
+namespace media {
+
+AsyncSocketIoHandler::AsyncSocketIoHandler()
+ : socket_(base::SyncSocket::kInvalidHandle),
+ context_(NULL),
+ is_pending_(false) {}
+
+AsyncSocketIoHandler::~AsyncSocketIoHandler() {
+ // We need to be deleted on the correct thread to avoid racing with the
+ // message loop thread.
+ DCHECK(CalledOnValidThread());
+
+ if (context_) {
+ if (is_pending_) {
+ // Make the context be deleted by the message pump when done.
+ context_->handler = NULL;
+ } else {
+ delete context_;
+ }
+ }
+}
+
+// Implementation of IOHandler on Windows.
+void AsyncSocketIoHandler::OnIOCompleted(
+ base::MessageLoopForIO::IOContext* context,
+ DWORD bytes_transfered,
+ DWORD error) {
+ DCHECK(CalledOnValidThread());
+ DCHECK_EQ(context_, context);
+ DCHECK(!read_complete_.is_null());
+ is_pending_ = false;
+ read_complete_.Run(error == ERROR_SUCCESS ? bytes_transfered : 0);
+}
+
+bool AsyncSocketIoHandler::Read(char* buffer, int buffer_len) {
+ DCHECK(CalledOnValidThread());
+ DCHECK(!read_complete_.is_null());
+ DCHECK(!is_pending_);
+ DCHECK_NE(socket_, base::SyncSocket::kInvalidHandle);
+
+ DWORD bytes_read = 0;
+ BOOL ok = ::ReadFile(socket_, buffer, buffer_len, &bytes_read,
+ &context_->overlapped);
+ // The completion port will be signaled regardless of completing the read
+ // straight away or asynchronously (ERROR_IO_PENDING). OnIOCompleted() will
+ // be called regardless and we don't need to explicitly run the callback
+ // in the case where ok is FALSE and GLE==ERROR_IO_PENDING.
+ is_pending_ = !ok && (GetLastError() == ERROR_IO_PENDING);
+ return ok || is_pending_;
+}
+
+bool AsyncSocketIoHandler::Initialize(base::SyncSocket::Handle socket,
+ const ReadCompleteCallback& callback) {
+ DCHECK(!context_);
+ DCHECK_EQ(socket_, base::SyncSocket::kInvalidHandle);
+
+ DetachFromThread();
+
+ socket_ = socket;
+ read_complete_ = callback;
+
+ base::MessageLoopForIO::current()->RegisterIOHandler(socket, this);
+
+ context_ = new base::MessageLoopForIO::IOContext();
+ context_->handler = this;
+ memset(&context_->overlapped, 0, sizeof(context_->overlapped));
+
+ return true;
+}
+
+} // namespace media.
diff --git a/chromium/media/audio/audio_buffers_state.cc b/chromium/media/audio/audio_buffers_state.cc
new file mode 100644
index 00000000000..6c4f9501605
--- /dev/null
+++ b/chromium/media/audio/audio_buffers_state.cc
@@ -0,0 +1,20 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/audio_buffers_state.h"
+
+namespace media {
+
+AudioBuffersState::AudioBuffersState()
+ : pending_bytes(0),
+ hardware_delay_bytes(0) {
+}
+
+AudioBuffersState::AudioBuffersState(int pending_bytes,
+ int hardware_delay_bytes)
+ : pending_bytes(pending_bytes),
+ hardware_delay_bytes(hardware_delay_bytes) {
+}
+
+} // namespace media
diff --git a/chromium/media/audio/audio_buffers_state.h b/chromium/media/audio/audio_buffers_state.h
new file mode 100644
index 00000000000..79244aeb1a5
--- /dev/null
+++ b/chromium/media/audio/audio_buffers_state.h
@@ -0,0 +1,32 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_AUDIO_BUFFERS_STATE_H_
+#define MEDIA_AUDIO_AUDIO_BUFFERS_STATE_H_
+
+#include "media/base/media_export.h"
+
+namespace media {
+
+// AudioBuffersState struct stores current state of audio buffers.
+// It is used for audio synchronization.
+struct MEDIA_EXPORT AudioBuffersState {
+ AudioBuffersState();
+ AudioBuffersState(int pending_bytes, int hardware_delay_bytes);
+
+ int total_bytes() {
+ return pending_bytes + hardware_delay_bytes;
+ }
+
+ // Number of bytes we currently have in our software buffer.
+ int pending_bytes;
+
+ // Number of bytes that have been written to the device, but haven't
+ // been played yet.
+ int hardware_delay_bytes;
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_AUDIO_BUFFERS_STATE_H_
diff --git a/chromium/media/audio/audio_device_name.cc b/chromium/media/audio/audio_device_name.cc
new file mode 100644
index 00000000000..02bb03f4aea
--- /dev/null
+++ b/chromium/media/audio/audio_device_name.cc
@@ -0,0 +1,18 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/audio_device_name.h"
+
+namespace media {
+
+AudioDeviceName::AudioDeviceName() {}
+
+AudioDeviceName::AudioDeviceName(const std::string& device_name,
+ const std::string& unique_id)
+ : device_name(device_name),
+ unique_id(unique_id) {
+}
+
+} // namespace media
+
diff --git a/chromium/media/audio/audio_device_name.h b/chromium/media/audio/audio_device_name.h
new file mode 100644
index 00000000000..aa3cca0da8b
--- /dev/null
+++ b/chromium/media/audio/audio_device_name.h
@@ -0,0 +1,27 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_AUDIO_DEVICE_NAME_H_
+#define MEDIA_AUDIO_AUDIO_DEVICE_NAME_H_
+
+#include <list>
+#include <string>
+#include "media/base/media_export.h"
+
+namespace media {
+
+struct MEDIA_EXPORT AudioDeviceName {
+ AudioDeviceName();
+ AudioDeviceName(const std::string& device_name,
+ const std::string& unique_id);
+
+ std::string device_name; // Friendly name of the device.
+ std::string unique_id; // Unique identifier for the device.
+};
+
+typedef std::list<AudioDeviceName> AudioDeviceNames;
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_AUDIO_DEVICE_NAME_H_
diff --git a/chromium/media/audio/audio_device_thread.cc b/chromium/media/audio/audio_device_thread.cc
new file mode 100644
index 00000000000..d5c1bbcebdc
--- /dev/null
+++ b/chromium/media/audio/audio_device_thread.cc
@@ -0,0 +1,203 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/audio_device_thread.h"
+
+#include <algorithm>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/memory/aligned_memory.h"
+#include "base/message_loop/message_loop.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread_restrictions.h"
+#include "media/audio/audio_util.h"
+#include "media/base/audio_bus.h"
+
+using base::PlatformThread;
+
+namespace media {
+
+// The actual worker thread implementation. It's very bare bones and much
+// simpler than SimpleThread (no synchronization in Start, etc) and supports
+// joining the thread handle asynchronously via a provided message loop even
+// after the Thread object itself has been deleted.
+class AudioDeviceThread::Thread
+ : public PlatformThread::Delegate,
+ public base::RefCountedThreadSafe<AudioDeviceThread::Thread> {
+ public:
+ Thread(AudioDeviceThread::Callback* callback,
+ base::SyncSocket::Handle socket,
+ const char* thread_name);
+
+ void Start();
+
+ // Stops the thread. If |loop_for_join| is non-NULL, the function posts
+ // a task to join (close) the thread handle later instead of waiting for
+ // the thread. If loop_for_join is NULL, then the function waits
+ // synchronously for the thread to terminate.
+ void Stop(base::MessageLoop* loop_for_join);
+
+ private:
+ friend class base::RefCountedThreadSafe<AudioDeviceThread::Thread>;
+ virtual ~Thread();
+
+ // Overrides from PlatformThread::Delegate.
+ virtual void ThreadMain() OVERRIDE;
+
+ // Runs the loop that reads from the socket.
+ void Run();
+
+ private:
+ base::PlatformThreadHandle thread_;
+ AudioDeviceThread::Callback* callback_;
+ base::CancelableSyncSocket socket_;
+ base::Lock callback_lock_;
+ const char* thread_name_;
+
+ DISALLOW_COPY_AND_ASSIGN(Thread);
+};
+
+// AudioDeviceThread implementation
+
+AudioDeviceThread::AudioDeviceThread() {
+}
+
+AudioDeviceThread::~AudioDeviceThread() { DCHECK(!thread_.get()); }
+
+void AudioDeviceThread::Start(AudioDeviceThread::Callback* callback,
+ base::SyncSocket::Handle socket,
+ const char* thread_name) {
+ base::AutoLock auto_lock(thread_lock_);
+ CHECK(thread_.get() == NULL);
+ thread_ = new AudioDeviceThread::Thread(callback, socket, thread_name);
+ thread_->Start();
+}
+
+void AudioDeviceThread::Stop(base::MessageLoop* loop_for_join) {
+ base::AutoLock auto_lock(thread_lock_);
+ if (thread_.get()) {
+ thread_->Stop(loop_for_join);
+ thread_ = NULL;
+ }
+}
+
+bool AudioDeviceThread::IsStopped() {
+ base::AutoLock auto_lock(thread_lock_);
+ return thread_.get() == NULL;
+}
+
+// AudioDeviceThread::Thread implementation
+AudioDeviceThread::Thread::Thread(AudioDeviceThread::Callback* callback,
+ base::SyncSocket::Handle socket,
+ const char* thread_name)
+ : thread_(),
+ callback_(callback),
+ socket_(socket),
+ thread_name_(thread_name) {
+}
+
+AudioDeviceThread::Thread::~Thread() {
+ DCHECK(thread_.is_null());
+}
+
+void AudioDeviceThread::Thread::Start() {
+ base::AutoLock auto_lock(callback_lock_);
+ DCHECK(thread_.is_null());
+ // This reference will be released when the thread exists.
+ AddRef();
+
+ PlatformThread::CreateWithPriority(0, this, &thread_,
+ base::kThreadPriority_RealtimeAudio);
+ CHECK(!thread_.is_null());
+}
+
+void AudioDeviceThread::Thread::Stop(base::MessageLoop* loop_for_join) {
+ socket_.Shutdown();
+
+ base::PlatformThreadHandle thread = base::PlatformThreadHandle();
+
+ { // NOLINT
+ base::AutoLock auto_lock(callback_lock_);
+ callback_ = NULL;
+ std::swap(thread, thread_);
+ }
+
+ if (!thread.is_null()) {
+ if (loop_for_join) {
+ loop_for_join->PostTask(FROM_HERE,
+ base::Bind(&base::PlatformThread::Join, thread));
+ } else {
+ base::PlatformThread::Join(thread);
+ }
+ }
+}
+
+void AudioDeviceThread::Thread::ThreadMain() {
+ PlatformThread::SetName(thread_name_);
+
+ // Singleton access is safe from this thread as long as callback is non-NULL.
+ // The callback is the only point where the thread calls out to 'unknown' code
+ // that might touch singletons and the lifetime of the callback is controlled
+ // by another thread on which singleton access is OK as well.
+ base::ThreadRestrictions::SetSingletonAllowed(true);
+
+ { // NOLINT
+ base::AutoLock auto_lock(callback_lock_);
+ if (callback_)
+ callback_->InitializeOnAudioThread();
+ }
+
+ Run();
+
+ // Release the reference for the thread. Note that after this, the Thread
+ // instance will most likely be deleted.
+ Release();
+}
+
+void AudioDeviceThread::Thread::Run() {
+ while (true) {
+ int pending_data = 0;
+ size_t bytes_read = socket_.Receive(&pending_data, sizeof(pending_data));
+ if (bytes_read != sizeof(pending_data)) {
+ DCHECK_EQ(bytes_read, 0U);
+ break;
+ }
+
+ base::AutoLock auto_lock(callback_lock_);
+ if (callback_)
+ callback_->Process(pending_data);
+ }
+}
+
+// AudioDeviceThread::Callback implementation
+
+AudioDeviceThread::Callback::Callback(
+ const AudioParameters& audio_parameters,
+ base::SharedMemoryHandle memory,
+ int memory_length,
+ int total_segments)
+ : audio_parameters_(audio_parameters),
+ samples_per_ms_(audio_parameters.sample_rate() / 1000),
+ bytes_per_ms_(audio_parameters.channels() *
+ (audio_parameters_.bits_per_sample() / 8) *
+ samples_per_ms_),
+ shared_memory_(memory, false),
+ memory_length_(memory_length),
+ total_segments_(total_segments) {
+ CHECK_NE(bytes_per_ms_, 0); // Catch division by zero early.
+ CHECK_NE(samples_per_ms_, 0);
+ CHECK_GT(total_segments_, 0);
+ CHECK_EQ(memory_length_ % total_segments_, 0);
+ segment_length_ = memory_length_ / total_segments_;
+}
+
+AudioDeviceThread::Callback::~Callback() {}
+
+void AudioDeviceThread::Callback::InitializeOnAudioThread() {
+ MapSharedMemory();
+ CHECK(shared_memory_.memory());
+}
+
+} // namespace media.
diff --git a/chromium/media/audio/audio_device_thread.h b/chromium/media/audio/audio_device_thread.h
new file mode 100644
index 00000000000..976f88359ba
--- /dev/null
+++ b/chromium/media/audio/audio_device_thread.h
@@ -0,0 +1,114 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_AUDIO_DEVICE_THREAD_H_
+#define MEDIA_AUDIO_AUDIO_DEVICE_THREAD_H_
+
+#include "base/basictypes.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/shared_memory.h"
+#include "base/sync_socket.h"
+#include "base/synchronization/lock.h"
+#include "media/audio/audio_parameters.h"
+#include "media/audio/shared_memory_util.h"
+#include "media/base/media_export.h"
+
+namespace base {
+class MessageLoop;
+}
+
+namespace media {
+class AudioBus;
+
+// Data transfer between browser and render process uses a combination
+// of sync sockets and shared memory. To read from the socket and render
+// data, we use a worker thread, a.k.a. the AudioDeviceThread, which reads
+// data from the browser via the socket and fills the shared memory from the
+// audio thread via the AudioDeviceThread::Callback interface/class.
+// For more details see the documentation in audio_device.h.
+//
+// TODO(tommi): Multiple audio input/output device instances should be able to
+// share the same thread instead of spinning one per instance.
+class MEDIA_EXPORT AudioDeviceThread {
+ public:
+ // This is the callback interface/base class that Audio[Output|Input]Device
+ // implements to render input/output data. The callbacks run on the
+ // thread owned by AudioDeviceThread.
+ class Callback {
+ public:
+ Callback(const AudioParameters& audio_parameters,
+ base::SharedMemoryHandle memory,
+ int memory_length,
+ int total_segments);
+ virtual ~Callback();
+
+ // One time initialization for the callback object on the audio thread.
+ void InitializeOnAudioThread();
+
+ // Derived implementations must call shared_memory_.Map appropriately
+ // before Process can be called.
+ virtual void MapSharedMemory() = 0;
+
+ // Called whenever we receive notifications about pending data.
+ virtual void Process(int pending_data) = 0;
+
+ protected:
+ // Protected so that derived classes can access directly.
+ // The variables are 'const' since values are calculated/set in the
+ // constructor and must never change.
+ const AudioParameters audio_parameters_;
+ const int samples_per_ms_;
+ const int bytes_per_ms_;
+
+ base::SharedMemory shared_memory_;
+ const int memory_length_;
+ const int total_segments_;
+ int segment_length_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Callback);
+ };
+
+ AudioDeviceThread();
+ ~AudioDeviceThread();
+
+ // Starts the audio thread. The thread must not already be running.
+ void Start(AudioDeviceThread::Callback* callback,
+ base::SyncSocket::Handle socket,
+ const char* thread_name);
+
+ // This tells the audio thread to stop and clean up the data.
+ // The method can stop the thread synchronously or asynchronously.
+ // In the latter case, the thread will still be running after Stop()
+ // returns, but the callback pointer is cleared so no further callbacks will
+ // be made (IOW after Stop() returns, it is safe to delete the callback).
+ // The |loop_for_join| parameter is required for asynchronous operation
+ // in order to join the worker thread and close the thread handle later via a
+ // posted task.
+ // If set to NULL, function will wait for the thread to exit before returning.
+ void Stop(base::MessageLoop* loop_for_join);
+
+ // Returns true if the thread is stopped or stopping.
+ bool IsStopped();
+
+ private:
+ // Our own private SimpleThread override. We implement this in a
+ // private class so that we get the following benefits:
+ // 1) AudioDeviceThread doesn't expose SimpleThread methods.
+ // I.e. the caller can't call Start()/Stop() - which would be bad.
+ // 2) We override ThreadMain to add additional on-thread initialization
+ // while still synchronized with SimpleThread::Start() to provide
+ // reliable initialization.
+ class Thread;
+
+ base::Lock thread_lock_;
+ scoped_refptr<AudioDeviceThread::Thread> thread_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioDeviceThread);
+};
+
+} // namespace media.
+
+#endif // MEDIA_AUDIO_AUDIO_DEVICE_THREAD_H_
diff --git a/chromium/media/audio/audio_input_controller.cc b/chromium/media/audio/audio_input_controller.cc
new file mode 100644
index 00000000000..31e137e2b17
--- /dev/null
+++ b/chromium/media/audio/audio_input_controller.cc
@@ -0,0 +1,380 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/audio_input_controller.h"
+
+#include "base/bind.h"
+#include "base/threading/thread_restrictions.h"
+#include "media/base/limits.h"
+#include "media/base/scoped_histogram_timer.h"
+
+namespace {
+const int kMaxInputChannels = 2;
+
+// TODO(henrika): remove usage of timers and add support for proper
+// notification of when the input device is removed. This was originally added
+// to resolve http://crbug.com/79936 for Windows platforms. This then caused
+// breakage (very hard to repro bugs!) on other platforms: See
+// http://crbug.com/226327 and http://crbug.com/230972.
+const int kTimerResetIntervalSeconds = 1;
+#if defined(OS_IOS)
+// The first callback on iOS is received after the current background
+// audio has faded away.
+const int kTimerInitialIntervalSeconds = 4;
+#else
+// We have received reports that the timer can be too trigger happy on some
+// Mac devices and the initial timer interval has therefore been increased
+// from 1 second to 5 seconds.
+const int kTimerInitialIntervalSeconds = 5;
+#endif // defined(OS_IOS)
+}
+
+namespace media {
+
+// static
+AudioInputController::Factory* AudioInputController::factory_ = NULL;
+
+AudioInputController::AudioInputController(EventHandler* handler,
+ SyncWriter* sync_writer)
+ : creator_loop_(base::MessageLoopProxy::current()),
+ handler_(handler),
+ stream_(NULL),
+ data_is_active_(false),
+ state_(kEmpty),
+ sync_writer_(sync_writer),
+ max_volume_(0.0) {
+ DCHECK(creator_loop_.get());
+}
+
+AudioInputController::~AudioInputController() {
+ DCHECK(kClosed == state_ || kCreated == state_ || kEmpty == state_);
+}
+
+// static
+scoped_refptr<AudioInputController> AudioInputController::Create(
+ AudioManager* audio_manager,
+ EventHandler* event_handler,
+ const AudioParameters& params,
+ const std::string& device_id) {
+ DCHECK(audio_manager);
+
+ if (!params.IsValid() || (params.channels() > kMaxInputChannels))
+ return NULL;
+
+ if (factory_)
+ return factory_->Create(audio_manager, event_handler, params);
+
+ scoped_refptr<AudioInputController> controller(new AudioInputController(
+ event_handler, NULL));
+
+ controller->message_loop_ = audio_manager->GetMessageLoop();
+
+ // Create and open a new audio input stream from the existing
+ // audio-device thread.
+ if (!controller->message_loop_->PostTask(FROM_HERE,
+ base::Bind(&AudioInputController::DoCreate, controller,
+ base::Unretained(audio_manager), params, device_id))) {
+ controller = NULL;
+ }
+
+ return controller;
+}
+
+// static
+scoped_refptr<AudioInputController> AudioInputController::CreateLowLatency(
+ AudioManager* audio_manager,
+ EventHandler* event_handler,
+ const AudioParameters& params,
+ const std::string& device_id,
+ SyncWriter* sync_writer) {
+ DCHECK(audio_manager);
+ DCHECK(sync_writer);
+
+ if (!params.IsValid() || (params.channels() > kMaxInputChannels))
+ return NULL;
+
+ // Create the AudioInputController object and ensure that it runs on
+ // the audio-manager thread.
+ scoped_refptr<AudioInputController> controller(new AudioInputController(
+ event_handler, sync_writer));
+ controller->message_loop_ = audio_manager->GetMessageLoop();
+
+ // Create and open a new audio input stream from the existing
+ // audio-device thread. Use the provided audio-input device.
+ if (!controller->message_loop_->PostTask(FROM_HERE,
+ base::Bind(&AudioInputController::DoCreate, controller,
+ base::Unretained(audio_manager), params, device_id))) {
+ controller = NULL;
+ }
+
+ return controller;
+}
+
+// static
+scoped_refptr<AudioInputController> AudioInputController::CreateForStream(
+ const scoped_refptr<base::MessageLoopProxy>& message_loop,
+ EventHandler* event_handler,
+ AudioInputStream* stream,
+ SyncWriter* sync_writer) {
+ DCHECK(sync_writer);
+ DCHECK(stream);
+
+ // Create the AudioInputController object and ensure that it runs on
+ // the audio-manager thread.
+ scoped_refptr<AudioInputController> controller(new AudioInputController(
+ event_handler, sync_writer));
+ controller->message_loop_ = message_loop;
+
+ // TODO(miu): See TODO at top of file. Until that's resolved, we need to
+ // disable the error auto-detection here (since the audio mirroring
+ // implementation will reliably report error and close events). Note, of
+ // course, that we're assuming CreateForStream() has been called for the audio
+ // mirroring use case only.
+ if (!controller->message_loop_->PostTask(
+ FROM_HERE,
+ base::Bind(&AudioInputController::DoCreateForStream, controller,
+ stream, false))) {
+ controller = NULL;
+ }
+
+ return controller;
+}
+
+void AudioInputController::Record() {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &AudioInputController::DoRecord, this));
+}
+
+void AudioInputController::Close(const base::Closure& closed_task) {
+ DCHECK(!closed_task.is_null());
+ DCHECK(creator_loop_->BelongsToCurrentThread());
+
+ message_loop_->PostTaskAndReply(
+ FROM_HERE, base::Bind(&AudioInputController::DoClose, this), closed_task);
+}
+
+void AudioInputController::SetVolume(double volume) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &AudioInputController::DoSetVolume, this, volume));
+}
+
+void AudioInputController::SetAutomaticGainControl(bool enabled) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &AudioInputController::DoSetAutomaticGainControl, this, enabled));
+}
+
+void AudioInputController::DoCreate(AudioManager* audio_manager,
+ const AudioParameters& params,
+ const std::string& device_id) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ SCOPED_UMA_HISTOGRAM_TIMER("Media.AudioInputController.CreateTime");
+ // TODO(miu): See TODO at top of file. Until that's resolved, assume all
+ // platform audio input requires the |no_data_timer_| be used to auto-detect
+ // errors. In reality, probably only Windows and IOS need to be treated as
+ // unreliable here.
+ DoCreateForStream(audio_manager->MakeAudioInputStream(params, device_id),
+ true);
+}
+
+void AudioInputController::DoCreateForStream(
+ AudioInputStream* stream_to_control, bool enable_nodata_timer) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ DCHECK(!stream_);
+ stream_ = stream_to_control;
+
+ if (!stream_) {
+ handler_->OnError(this);
+ return;
+ }
+
+ if (stream_ && !stream_->Open()) {
+ stream_->Close();
+ stream_ = NULL;
+ handler_->OnError(this);
+ return;
+ }
+
+ DCHECK(!no_data_timer_.get());
+ if (enable_nodata_timer) {
+ // Create the data timer which will call DoCheckForNoData(). The timer
+ // is started in DoRecord() and restarted in each DoCheckForNoData()
+ // callback.
+ no_data_timer_.reset(new base::Timer(
+ FROM_HERE, base::TimeDelta::FromSeconds(kTimerInitialIntervalSeconds),
+ base::Bind(&AudioInputController::DoCheckForNoData,
+ base::Unretained(this)), false));
+ } else {
+ DVLOG(1) << "Disabled: timer check for no data.";
+ }
+
+ state_ = kCreated;
+ handler_->OnCreated(this);
+}
+
+void AudioInputController::DoRecord() {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ SCOPED_UMA_HISTOGRAM_TIMER("Media.AudioInputController.RecordTime");
+
+ if (state_ != kCreated)
+ return;
+
+ {
+ base::AutoLock auto_lock(lock_);
+ state_ = kRecording;
+ }
+
+ if (no_data_timer_) {
+ // Start the data timer. Once |kTimerResetIntervalSeconds| have passed,
+ // a callback to DoCheckForNoData() is made.
+ no_data_timer_->Reset();
+ }
+
+ stream_->Start(this);
+ handler_->OnRecording(this);
+}
+
+void AudioInputController::DoClose() {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ SCOPED_UMA_HISTOGRAM_TIMER("Media.AudioInputController.CloseTime");
+
+ // Delete the timer on the same thread that created it.
+ no_data_timer_.reset();
+
+ if (state_ != kClosed) {
+ DoStopCloseAndClearStream(NULL);
+ SetDataIsActive(false);
+
+ if (LowLatencyMode()) {
+ sync_writer_->Close();
+ }
+
+ state_ = kClosed;
+ }
+}
+
+void AudioInputController::DoReportError() {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ handler_->OnError(this);
+}
+
+void AudioInputController::DoSetVolume(double volume) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK_GE(volume, 0);
+ DCHECK_LE(volume, 1.0);
+
+ if (state_ != kCreated && state_ != kRecording)
+ return;
+
+ // Only ask for the maximum volume at first call and use cached value
+ // for remaining function calls.
+ if (!max_volume_) {
+ max_volume_ = stream_->GetMaxVolume();
+ }
+
+ if (max_volume_ == 0.0) {
+ DLOG(WARNING) << "Failed to access input volume control";
+ return;
+ }
+
+ // Set the stream volume and scale to a range matched to the platform.
+ stream_->SetVolume(max_volume_ * volume);
+}
+
+void AudioInputController::DoSetAutomaticGainControl(bool enabled) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK_NE(state_, kRecording);
+
+ // Ensure that the AGC state only can be modified before streaming starts.
+ if (state_ != kCreated || state_ == kRecording)
+ return;
+
+ stream_->SetAutomaticGainControl(enabled);
+}
+
+void AudioInputController::DoCheckForNoData() {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ if (!GetDataIsActive()) {
+ // The data-is-active marker will be false only if it has been more than
+ // one second since a data packet was recorded. This can happen if a
+ // capture device has been removed or disabled.
+ handler_->OnError(this);
+ return;
+ }
+
+ // Mark data as non-active. The flag will be re-enabled in OnData() each
+ // time a data packet is received. Hence, under normal conditions, the
+ // flag will only be disabled during a very short period.
+ SetDataIsActive(false);
+
+ // Restart the timer to ensure that we check the flag again in
+ // |kTimerResetIntervalSeconds|.
+ no_data_timer_->Start(
+ FROM_HERE, base::TimeDelta::FromSeconds(kTimerResetIntervalSeconds),
+ base::Bind(&AudioInputController::DoCheckForNoData,
+ base::Unretained(this)));
+}
+
+void AudioInputController::OnData(AudioInputStream* stream, const uint8* data,
+ uint32 size, uint32 hardware_delay_bytes,
+ double volume) {
+ {
+ base::AutoLock auto_lock(lock_);
+ if (state_ != kRecording)
+ return;
+ }
+
+ // Mark data as active to ensure that the periodic calls to
+ // DoCheckForNoData() does not report an error to the event handler.
+ SetDataIsActive(true);
+
+ // Use SyncSocket if we are in a low-latency mode.
+ if (LowLatencyMode()) {
+ sync_writer_->Write(data, size, volume);
+ sync_writer_->UpdateRecordedBytes(hardware_delay_bytes);
+ return;
+ }
+
+ handler_->OnData(this, data, size);
+}
+
+void AudioInputController::OnClose(AudioInputStream* stream) {
+ DVLOG(1) << "AudioInputController::OnClose()";
+ // TODO(satish): Sometimes the device driver closes the input stream without
+ // us asking for it (may be if the device was unplugged?). Check how to handle
+ // such cases here.
+}
+
+void AudioInputController::OnError(AudioInputStream* stream) {
+ // Handle error on the audio-manager thread.
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &AudioInputController::DoReportError, this));
+}
+
+void AudioInputController::DoStopCloseAndClearStream(
+ base::WaitableEvent *done) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ // Allow calling unconditionally and bail if we don't have a stream to close.
+ if (stream_ != NULL) {
+ stream_->Stop();
+ stream_->Close();
+ stream_ = NULL;
+ }
+
+ // Should be last in the method, do not touch "this" from here on.
+ if (done != NULL)
+ done->Signal();
+}
+
+void AudioInputController::SetDataIsActive(bool enabled) {
+ base::subtle::Release_Store(&data_is_active_, enabled);
+}
+
+bool AudioInputController::GetDataIsActive() {
+ return (base::subtle::Acquire_Load(&data_is_active_) != false);
+}
+
+} // namespace media
diff --git a/chromium/media/audio/audio_input_controller.h b/chromium/media/audio/audio_input_controller.h
new file mode 100644
index 00000000000..586d47703a1
--- /dev/null
+++ b/chromium/media/audio/audio_input_controller.h
@@ -0,0 +1,274 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_AUDIO_INPUT_CONTROLLER_H_
+#define MEDIA_AUDIO_AUDIO_INPUT_CONTROLLER_H_
+
+#include <string>
+#include "base/atomicops.h"
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread.h"
+#include "base/timer/timer.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_manager_base.h"
+
+// An AudioInputController controls an AudioInputStream and records data
+// from this input stream. The two main methods are Record() and Close() and
+// they are both executed on the audio thread which is injected by the two
+// alternative factory methods, Create() or CreateLowLatency().
+//
+// All public methods of AudioInputController are non-blocking.
+//
+// Here is a state diagram for the AudioInputController:
+//
+// .--> [ Closed / Error ] <--.
+// | |
+// | |
+// [ Created ] ----------> [ Recording ]
+// ^
+// |
+// *[ Empty ]
+//
+// * Initial state
+//
+// State sequences (assuming low-latency):
+//
+// [Creating Thread] [Audio Thread]
+//
+// User AudioInputController EventHandler
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// CrateLowLatency() ==> DoCreate()
+// AudioManager::MakeAudioInputStream()
+// AudioInputStream::Open()
+// .- - - - - - - - - - - - -> OnError()
+// create the data timer
+// .-------------------------> OnCreated()
+// kCreated
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// Record() ==> DoRecord()
+// AudioInputStream::Start()
+// .-------------------------> OnRecording()
+// start the data timer
+// kRecording
+// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+// Close() ==> DoClose()
+// delete the data timer
+// state_ = kClosed
+// AudioInputStream::Stop()
+// AudioInputStream::Close()
+// SyncWriter::Close()
+// Closure::Run() <-----------------.
+// (closure-task)
+//
+// The audio thread itself is owned by the AudioManager that the
+// AudioInputController holds a reference to. When performing tasks on the
+// audio thread, the controller must not add or release references to the
+// AudioManager or itself (since it in turn holds a reference to the manager).
+//
+namespace media {
+
+class MEDIA_EXPORT AudioInputController
+ : public base::RefCountedThreadSafe<AudioInputController>,
+ public AudioInputStream::AudioInputCallback {
+ public:
+ // An event handler that receives events from the AudioInputController. The
+ // following methods are all called on the audio thread.
+ class MEDIA_EXPORT EventHandler {
+ public:
+ virtual void OnCreated(AudioInputController* controller) = 0;
+ virtual void OnRecording(AudioInputController* controller) = 0;
+ virtual void OnError(AudioInputController* controller) = 0;
+ virtual void OnData(AudioInputController* controller, const uint8* data,
+ uint32 size) = 0;
+
+ protected:
+ virtual ~EventHandler() {}
+ };
+
+ // A synchronous writer interface used by AudioInputController for
+ // synchronous writing.
+ class SyncWriter {
+ public:
+ virtual ~SyncWriter() {}
+
+ // Notify the synchronous writer about the number of bytes in the
+ // soundcard which has been recorded.
+ virtual void UpdateRecordedBytes(uint32 bytes) = 0;
+
+ // Write certain amount of data from |data|. This method returns
+ // number of written bytes.
+ virtual uint32 Write(const void* data, uint32 size, double volume) = 0;
+
+ // Close this synchronous writer.
+ virtual void Close() = 0;
+ };
+
+ // AudioInputController::Create() can use the currently registered Factory
+ // to create the AudioInputController. Factory is intended for testing only.
+ class Factory {
+ public:
+ virtual AudioInputController* Create(AudioManager* audio_manager,
+ EventHandler* event_handler,
+ AudioParameters params) = 0;
+ protected:
+ virtual ~Factory() {}
+ };
+
+ // Factory method for creating an AudioInputController.
+ // The audio device will be created on the audio thread, and when that is
+ // done, the event handler will receive an OnCreated() call from that same
+ // thread. |device_id| is the unique ID of the audio device to be opened.
+ static scoped_refptr<AudioInputController> Create(
+ AudioManager* audio_manager,
+ EventHandler* event_handler,
+ const AudioParameters& params,
+ const std::string& device_id);
+
+ // Sets the factory used by the static method Create(). AudioInputController
+ // does not take ownership of |factory|. A value of NULL results in an
+ // AudioInputController being created directly.
+ static void set_factory_for_testing(Factory* factory) { factory_ = factory; }
+ AudioInputStream* stream_for_testing() { return stream_; }
+
+ // Factory method for creating an AudioInputController for low-latency mode.
+ // The audio device will be created on the audio thread, and when that is
+ // done, the event handler will receive an OnCreated() call from that same
+ // thread.
+ static scoped_refptr<AudioInputController> CreateLowLatency(
+ AudioManager* audio_manager,
+ EventHandler* event_handler,
+ const AudioParameters& params,
+ const std::string& device_id,
+ // External synchronous writer for audio controller.
+ SyncWriter* sync_writer);
+
+ // Factory method for creating an AudioInputController for low-latency mode,
+ // taking ownership of |stream|. The stream will be opened on the audio
+ // thread, and when that is done, the event handler will receive an
+ // OnCreated() call from that same thread.
+ static scoped_refptr<AudioInputController> CreateForStream(
+ const scoped_refptr<base::MessageLoopProxy>& message_loop,
+ EventHandler* event_handler,
+ AudioInputStream* stream,
+ // External synchronous writer for audio controller.
+ SyncWriter* sync_writer);
+
+ // Starts recording using the created audio input stream.
+ // This method is called on the creator thread.
+ virtual void Record();
+
+ // Closes the audio input stream. The state is changed and the resources
+ // are freed on the audio thread. |closed_task| is then executed on the thread
+ // that called Close().
+ // Callbacks (EventHandler and SyncWriter) must exist until |closed_task|
+ // is called.
+ // It is safe to call this method more than once. Calls after the first one
+ // will have no effect.
+ // This method trampolines to the audio thread.
+ virtual void Close(const base::Closure& closed_task);
+
+ // Sets the capture volume of the input stream. The value 0.0 corresponds
+ // to muted and 1.0 to maximum volume.
+ virtual void SetVolume(double volume);
+
+ // Sets the Automatic Gain Control (AGC) state of the input stream.
+ // Changing the AGC state is not supported while recording is active.
+ virtual void SetAutomaticGainControl(bool enabled);
+
+ // AudioInputCallback implementation. Threading details depends on the
+ // device-specific implementation.
+ virtual void OnData(AudioInputStream* stream, const uint8* src, uint32 size,
+ uint32 hardware_delay_bytes, double volume) OVERRIDE;
+ virtual void OnClose(AudioInputStream* stream) OVERRIDE;
+ virtual void OnError(AudioInputStream* stream) OVERRIDE;
+
+ bool LowLatencyMode() const { return sync_writer_ != NULL; }
+
+ protected:
+ friend class base::RefCountedThreadSafe<AudioInputController>;
+
+ // Internal state of the source.
+ enum State {
+ kEmpty,
+ kCreated,
+ kRecording,
+ kClosed,
+ kError
+ };
+
+ AudioInputController(EventHandler* handler, SyncWriter* sync_writer);
+ virtual ~AudioInputController();
+
+ // Methods called on the audio thread (owned by the AudioManager).
+ void DoCreate(AudioManager* audio_manager, const AudioParameters& params,
+ const std::string& device_id);
+ void DoCreateForStream(AudioInputStream* stream_to_control,
+ bool enable_nodata_timer);
+ void DoRecord();
+ void DoClose();
+ void DoReportError();
+ void DoSetVolume(double volume);
+ void DoSetAutomaticGainControl(bool enabled);
+
+ // Method which ensures that OnError() is triggered when data recording
+ // times out. Called on the audio thread.
+ void DoCheckForNoData();
+
+ // Helper method that stops, closes, and NULL:s |*stream_|.
+ // Signals event when done if the event is not NULL.
+ void DoStopCloseAndClearStream(base::WaitableEvent* done);
+
+ void SetDataIsActive(bool enabled);
+ bool GetDataIsActive();
+
+ // Gives access to the message loop of the creating thread.
+ scoped_refptr<base::MessageLoopProxy> creator_loop_;
+
+ // The message loop of audio-manager thread that this object runs on.
+ scoped_refptr<base::MessageLoopProxy> message_loop_;
+
+ // Contains the AudioInputController::EventHandler which receives state
+ // notifications from this class.
+ EventHandler* handler_;
+
+ // Pointer to the audio input stream object.
+ AudioInputStream* stream_;
+
+ // |no_data_timer_| is used to call OnError() when we stop receiving
+ // OnData() calls without an OnClose() call. This can occur
+ // when an audio input device is unplugged whilst recording on Windows.
+ // See http://crbug.com/79936 for details.
+ // This member is only touched by the audio thread.
+ scoped_ptr<base::Timer> no_data_timer_;
+
+ // This flag is used to signal that we are receiving OnData() calls, i.e,
+ // that data is active. It can be touched by the audio thread and by the
+ // low-level audio thread which calls OnData(). E.g. on Windows, the
+ // low-level audio thread is called wasapi_capture_thread.
+ base::subtle::Atomic32 data_is_active_;
+
+ // |state_| is written on the audio thread and is read on the hardware audio
+ // thread. These operations need to be locked. But lock is not required for
+ // reading on the audio input controller thread.
+ State state_;
+
+ base::Lock lock_;
+
+ // SyncWriter is used only in low-latency mode for synchronous writing.
+ SyncWriter* sync_writer_;
+
+ static Factory* factory_;
+
+ double max_volume_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioInputController);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_AUDIO_INPUT_CONTROLLER_H_
diff --git a/chromium/media/audio/audio_input_controller_unittest.cc b/chromium/media/audio/audio_input_controller_unittest.cc
new file mode 100644
index 00000000000..b96ef3ad016
--- /dev/null
+++ b/chromium/media/audio/audio_input_controller_unittest.cc
@@ -0,0 +1,235 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/basictypes.h"
+#include "base/bind.h"
+#include "base/message_loop/message_loop.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/test/test_timeouts.h"
+#include "media/audio/audio_input_controller.h"
+#include "media/audio/audio_manager_base.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::_;
+using ::testing::AtLeast;
+using ::testing::Exactly;
+using ::testing::InvokeWithoutArgs;
+using ::testing::NotNull;
+
+namespace media {
+
+static const int kSampleRate = AudioParameters::kAudioCDSampleRate;
+static const int kBitsPerSample = 16;
+static const ChannelLayout kChannelLayout = CHANNEL_LAYOUT_STEREO;
+static const int kSamplesPerPacket = kSampleRate / 10;
+
+// Posts base::MessageLoop::QuitClosure() on specified message loop.
+ACTION_P(QuitMessageLoop, loop_or_proxy) {
+ loop_or_proxy->PostTask(FROM_HERE, base::MessageLoop::QuitClosure());
+}
+
+// Posts base::MessageLoop::QuitClosure() on specified message loop after a
+// certain number of calls given by |limit|.
+ACTION_P3(CheckCountAndPostQuitTask, count, limit, loop_or_proxy) {
+ if (++*count >= limit) {
+ loop_or_proxy->PostTask(FROM_HERE, base::MessageLoop::QuitClosure());
+ }
+}
+
+// Closes AudioOutputController synchronously.
+static void CloseAudioController(AudioInputController* controller) {
+ controller->Close(base::MessageLoop::QuitClosure());
+ base::MessageLoop::current()->Run();
+}
+
+class MockAudioInputControllerEventHandler
+ : public AudioInputController::EventHandler {
+ public:
+ MockAudioInputControllerEventHandler() {}
+
+ MOCK_METHOD1(OnCreated, void(AudioInputController* controller));
+ MOCK_METHOD1(OnRecording, void(AudioInputController* controller));
+ MOCK_METHOD1(OnError, void(AudioInputController* controller));
+ MOCK_METHOD3(OnData, void(AudioInputController* controller,
+ const uint8* data, uint32 size));
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockAudioInputControllerEventHandler);
+};
+
+// Test fixture.
+class AudioInputControllerTest : public testing::Test {
+ public:
+ AudioInputControllerTest() {}
+ virtual ~AudioInputControllerTest() {}
+
+ protected:
+ base::MessageLoop message_loop_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AudioInputControllerTest);
+};
+
+// Test AudioInputController for create and close without recording audio.
+TEST_F(AudioInputControllerTest, CreateAndClose) {
+ MockAudioInputControllerEventHandler event_handler;
+
+ // OnCreated() will be posted once.
+ EXPECT_CALL(event_handler, OnCreated(NotNull()))
+ .WillOnce(QuitMessageLoop(&message_loop_));
+
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ AudioParameters params(AudioParameters::AUDIO_FAKE, kChannelLayout,
+ kSampleRate, kBitsPerSample, kSamplesPerPacket);
+ scoped_refptr<AudioInputController> controller =
+ AudioInputController::Create(audio_manager.get(), &event_handler, params,
+ AudioManagerBase::kDefaultDeviceId);
+ ASSERT_TRUE(controller.get());
+
+ // Wait for OnCreated() to fire.
+ message_loop_.Run();
+
+ // Close the AudioInputController synchronously.
+ CloseAudioController(controller.get());
+}
+
+// Test a normal call sequence of create, record and close.
+TEST_F(AudioInputControllerTest, RecordAndClose) {
+ MockAudioInputControllerEventHandler event_handler;
+ int count = 0;
+
+ // OnCreated() will be called once.
+ EXPECT_CALL(event_handler, OnCreated(NotNull()))
+ .Times(Exactly(1));
+
+ // OnRecording() will be called only once.
+ EXPECT_CALL(event_handler, OnRecording(NotNull()))
+ .Times(Exactly(1));
+
+ // OnData() shall be called ten times.
+ EXPECT_CALL(event_handler, OnData(NotNull(), NotNull(), _))
+ .Times(AtLeast(10))
+ .WillRepeatedly(CheckCountAndPostQuitTask(&count, 10,
+ message_loop_.message_loop_proxy()));
+
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ AudioParameters params(AudioParameters::AUDIO_FAKE, kChannelLayout,
+ kSampleRate, kBitsPerSample, kSamplesPerPacket);
+
+ // Creating the AudioInputController should render an OnCreated() call.
+ scoped_refptr<AudioInputController> controller =
+ AudioInputController::Create(audio_manager.get(), &event_handler, params,
+ AudioManagerBase::kDefaultDeviceId);
+ ASSERT_TRUE(controller.get());
+
+ // Start recording and trigger one OnRecording() call.
+ controller->Record();
+
+ // Record and wait until ten OnData() callbacks are received.
+ message_loop_.Run();
+
+ // Close the AudioInputController synchronously.
+ CloseAudioController(controller.get());
+}
+
+// Test that the AudioInputController reports an error when the input stream
+// stops without an OnClose() callback. This can happen when the underlying
+// audio layer stops feeding data as a result of a removed microphone device.
+TEST_F(AudioInputControllerTest, RecordAndError) {
+ MockAudioInputControllerEventHandler event_handler;
+ int count = 0;
+
+ // OnCreated() will be called once.
+ EXPECT_CALL(event_handler, OnCreated(NotNull()))
+ .Times(Exactly(1));
+
+ // OnRecording() will be called only once.
+ EXPECT_CALL(event_handler, OnRecording(NotNull()))
+ .Times(Exactly(1));
+
+ // OnData() shall be called ten times.
+ EXPECT_CALL(event_handler, OnData(NotNull(), NotNull(), _))
+ .Times(AtLeast(10))
+ .WillRepeatedly(CheckCountAndPostQuitTask(&count, 10,
+ message_loop_.message_loop_proxy()));
+
+ // OnError() will be called after the data stream stops while the
+ // controller is in a recording state.
+ EXPECT_CALL(event_handler, OnError(NotNull()))
+ .Times(Exactly(1))
+ .WillOnce(QuitMessageLoop(&message_loop_));
+
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ AudioParameters params(AudioParameters::AUDIO_FAKE, kChannelLayout,
+ kSampleRate, kBitsPerSample, kSamplesPerPacket);
+
+ // Creating the AudioInputController should render an OnCreated() call.
+ scoped_refptr<AudioInputController> controller =
+ AudioInputController::Create(audio_manager.get(), &event_handler, params,
+ AudioManagerBase::kDefaultDeviceId);
+ ASSERT_TRUE(controller.get());
+
+ // Start recording and trigger one OnRecording() call.
+ controller->Record();
+
+ // Record and wait until ten OnData() callbacks are received.
+ message_loop_.Run();
+
+ // Stop the stream and verify that OnError() is posted.
+ AudioInputStream* stream = controller->stream_for_testing();
+ stream->Stop();
+ message_loop_.Run();
+
+ // Close the AudioInputController synchronously.
+ CloseAudioController(controller.get());
+}
+
+// Test that AudioInputController rejects insanely large packet sizes.
+TEST_F(AudioInputControllerTest, SamplesPerPacketTooLarge) {
+ // Create an audio device with a very large packet size.
+ MockAudioInputControllerEventHandler event_handler;
+
+ // OnCreated() shall not be called in this test.
+ EXPECT_CALL(event_handler, OnCreated(NotNull()))
+ .Times(Exactly(0));
+
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ AudioParameters params(AudioParameters::AUDIO_FAKE, kChannelLayout,
+ kSampleRate, kBitsPerSample, kSamplesPerPacket * 1000);
+ scoped_refptr<AudioInputController> controller =
+ AudioInputController::Create(audio_manager.get(), &event_handler, params,
+ AudioManagerBase::kDefaultDeviceId);
+ ASSERT_FALSE(controller.get());
+}
+
+// Test calling AudioInputController::Close multiple times.
+TEST_F(AudioInputControllerTest, CloseTwice) {
+ MockAudioInputControllerEventHandler event_handler;
+
+ // OnRecording() will be called only once.
+ EXPECT_CALL(event_handler, OnCreated(NotNull()));
+
+ // OnRecording() will be called only once.
+ EXPECT_CALL(event_handler, OnRecording(NotNull()))
+ .Times(Exactly(1));
+
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ AudioParameters params(AudioParameters::AUDIO_FAKE, kChannelLayout,
+ kSampleRate, kBitsPerSample, kSamplesPerPacket);
+ scoped_refptr<AudioInputController> controller =
+ AudioInputController::Create(audio_manager.get(), &event_handler, params,
+ AudioManagerBase::kDefaultDeviceId);
+ ASSERT_TRUE(controller.get());
+
+ controller->Record();
+
+ controller->Close(base::MessageLoop::QuitClosure());
+ base::MessageLoop::current()->Run();
+
+ controller->Close(base::MessageLoop::QuitClosure());
+ base::MessageLoop::current()->Run();
+}
+
+} // namespace media
diff --git a/chromium/media/audio/audio_input_device.cc b/chromium/media/audio/audio_input_device.cc
new file mode 100644
index 00000000000..87fd57143cd
--- /dev/null
+++ b/chromium/media/audio/audio_input_device.cc
@@ -0,0 +1,315 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/audio_input_device.h"
+
+#include "base/basictypes.h"
+#include "base/bind.h"
+#include "base/message_loop/message_loop.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/time/time.h"
+#include "media/audio/audio_manager_base.h"
+#include "media/base/audio_bus.h"
+
+namespace media {
+
+// The number of shared memory buffer segments indicated to browser process
+// in order to avoid data overwriting. This number can be any positive number,
+// dependent how fast the renderer process can pick up captured data from
+// shared memory.
+static const int kRequestedSharedMemoryCount = 10;
+
+// Takes care of invoking the capture callback on the audio thread.
+// An instance of this class is created for each capture stream in
+// OnLowLatencyCreated().
+class AudioInputDevice::AudioThreadCallback
+ : public AudioDeviceThread::Callback {
+ public:
+ AudioThreadCallback(const AudioParameters& audio_parameters,
+ base::SharedMemoryHandle memory,
+ int memory_length,
+ int total_segments,
+ CaptureCallback* capture_callback);
+ virtual ~AudioThreadCallback();
+
+ virtual void MapSharedMemory() OVERRIDE;
+
+ // Called whenever we receive notifications about pending data.
+ virtual void Process(int pending_data) OVERRIDE;
+
+ private:
+ int current_segment_id_;
+ CaptureCallback* capture_callback_;
+ scoped_ptr<AudioBus> audio_bus_;
+ DISALLOW_COPY_AND_ASSIGN(AudioThreadCallback);
+};
+
+AudioInputDevice::AudioInputDevice(
+ scoped_ptr<AudioInputIPC> ipc,
+ const scoped_refptr<base::MessageLoopProxy>& io_loop)
+ : ScopedLoopObserver(io_loop),
+ callback_(NULL),
+ ipc_(ipc.Pass()),
+ state_(IDLE),
+ session_id_(0),
+ agc_is_enabled_(false),
+ stopping_hack_(false) {
+ CHECK(ipc_);
+
+ // The correctness of the code depends on the relative values assigned in the
+ // State enum.
+ COMPILE_ASSERT(IPC_CLOSED < IDLE, invalid_enum_value_assignment_0);
+ COMPILE_ASSERT(IDLE < CREATING_STREAM, invalid_enum_value_assignment_1);
+ COMPILE_ASSERT(CREATING_STREAM < RECORDING, invalid_enum_value_assignment_2);
+}
+
+void AudioInputDevice::Initialize(const AudioParameters& params,
+ CaptureCallback* callback,
+ int session_id) {
+ DCHECK(params.IsValid());
+ DCHECK(!callback_);
+ DCHECK_EQ(0, session_id_);
+ audio_parameters_ = params;
+ callback_ = callback;
+ session_id_ = session_id;
+}
+
+void AudioInputDevice::Start() {
+ DCHECK(callback_) << "Initialize hasn't been called";
+ DVLOG(1) << "Start()";
+ message_loop()->PostTask(FROM_HERE,
+ base::Bind(&AudioInputDevice::StartUpOnIOThread, this));
+}
+
+void AudioInputDevice::Stop() {
+ DVLOG(1) << "Stop()";
+
+ {
+ base::AutoLock auto_lock(audio_thread_lock_);
+ audio_thread_.Stop(base::MessageLoop::current());
+ stopping_hack_ = true;
+ }
+
+ message_loop()->PostTask(FROM_HERE,
+ base::Bind(&AudioInputDevice::ShutDownOnIOThread, this));
+}
+
+void AudioInputDevice::SetVolume(double volume) {
+ if (volume < 0 || volume > 1.0) {
+ DLOG(ERROR) << "Invalid volume value specified";
+ return;
+ }
+
+ message_loop()->PostTask(FROM_HERE,
+ base::Bind(&AudioInputDevice::SetVolumeOnIOThread, this, volume));
+}
+
+void AudioInputDevice::SetAutomaticGainControl(bool enabled) {
+ DVLOG(1) << "SetAutomaticGainControl(enabled=" << enabled << ")";
+ message_loop()->PostTask(FROM_HERE,
+ base::Bind(&AudioInputDevice::SetAutomaticGainControlOnIOThread,
+ this, enabled));
+}
+
+void AudioInputDevice::OnStreamCreated(
+ base::SharedMemoryHandle handle,
+ base::SyncSocket::Handle socket_handle,
+ int length,
+ int total_segments) {
+ DCHECK(message_loop()->BelongsToCurrentThread());
+#if defined(OS_WIN)
+ DCHECK(handle);
+ DCHECK(socket_handle);
+#else
+ DCHECK_GE(handle.fd, 0);
+ DCHECK_GE(socket_handle, 0);
+#endif
+ DCHECK_GT(length, 0);
+
+ if (state_ != CREATING_STREAM)
+ return;
+
+ base::AutoLock auto_lock(audio_thread_lock_);
+ // TODO(miu): See TODO in OnStreamCreated method for AudioOutputDevice.
+ // Interface changes need to be made; likely, after AudioInputDevice is merged
+ // into AudioOutputDevice (http://crbug.com/179597).
+ if (stopping_hack_)
+ return;
+
+ DCHECK(audio_thread_.IsStopped());
+ audio_callback_.reset(
+ new AudioInputDevice::AudioThreadCallback(
+ audio_parameters_, handle, length, total_segments, callback_));
+ audio_thread_.Start(audio_callback_.get(), socket_handle, "AudioInputDevice");
+
+ state_ = RECORDING;
+ ipc_->RecordStream();
+}
+
+void AudioInputDevice::OnVolume(double volume) {
+ NOTIMPLEMENTED();
+}
+
+void AudioInputDevice::OnStateChanged(
+ AudioInputIPCDelegate::State state) {
+ DCHECK(message_loop()->BelongsToCurrentThread());
+
+ // Do nothing if the stream has been closed.
+ if (state_ < CREATING_STREAM)
+ return;
+
+ // TODO(miu): Clean-up inconsistent and incomplete handling here.
+ // http://crbug.com/180640
+ switch (state) {
+ case AudioInputIPCDelegate::kStopped:
+ ShutDownOnIOThread();
+ break;
+ case AudioInputIPCDelegate::kRecording:
+ NOTIMPLEMENTED();
+ break;
+ case AudioInputIPCDelegate::kError:
+ DLOG(WARNING) << "AudioInputDevice::OnStateChanged(kError)";
+ // Don't dereference the callback object if the audio thread
+ // is stopped or stopping. That could mean that the callback
+ // object has been deleted.
+ // TODO(tommi): Add an explicit contract for clearing the callback
+ // object. Possibly require calling Initialize again or provide
+ // a callback object via Start() and clear it in Stop().
+ if (!audio_thread_.IsStopped())
+ callback_->OnCaptureError();
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+}
+
+void AudioInputDevice::OnIPCClosed() {
+ DCHECK(message_loop()->BelongsToCurrentThread());
+ state_ = IPC_CLOSED;
+ ipc_.reset();
+}
+
+AudioInputDevice::~AudioInputDevice() {
+ // TODO(henrika): The current design requires that the user calls
+ // Stop before deleting this class.
+ DCHECK(audio_thread_.IsStopped());
+}
+
+void AudioInputDevice::StartUpOnIOThread() {
+ DCHECK(message_loop()->BelongsToCurrentThread());
+
+ // Make sure we don't call Start() more than once.
+ if (state_ != IDLE)
+ return;
+
+ if (session_id_ <= 0) {
+ DLOG(WARNING) << "Invalid session id for the input stream " << session_id_;
+ return;
+ }
+
+ state_ = CREATING_STREAM;
+ ipc_->CreateStream(this, session_id_, audio_parameters_,
+ agc_is_enabled_, kRequestedSharedMemoryCount);
+}
+
+void AudioInputDevice::ShutDownOnIOThread() {
+ DCHECK(message_loop()->BelongsToCurrentThread());
+
+ // Close the stream, if we haven't already.
+ if (state_ >= CREATING_STREAM) {
+ ipc_->CloseStream();
+ state_ = IDLE;
+ agc_is_enabled_ = false;
+ }
+
+ // We can run into an issue where ShutDownOnIOThread is called right after
+ // OnStreamCreated is called in cases where Start/Stop are called before we
+ // get the OnStreamCreated callback. To handle that corner case, we call
+ // Stop(). In most cases, the thread will already be stopped.
+ //
+ // Another situation is when the IO thread goes away before Stop() is called
+ // in which case, we cannot use the message loop to close the thread handle
+ // and can't not rely on the main thread existing either.
+ base::AutoLock auto_lock_(audio_thread_lock_);
+ base::ThreadRestrictions::ScopedAllowIO allow_io;
+ audio_thread_.Stop(NULL);
+ audio_callback_.reset();
+ stopping_hack_ = false;
+}
+
+void AudioInputDevice::SetVolumeOnIOThread(double volume) {
+ DCHECK(message_loop()->BelongsToCurrentThread());
+ if (state_ >= CREATING_STREAM)
+ ipc_->SetVolume(volume);
+}
+
+void AudioInputDevice::SetAutomaticGainControlOnIOThread(bool enabled) {
+ DCHECK(message_loop()->BelongsToCurrentThread());
+
+ if (state_ >= CREATING_STREAM) {
+ DLOG(WARNING) << "The AGC state can not be modified after starting.";
+ return;
+ }
+
+ // We simply store the new AGC setting here. This value will be used when
+ // a new stream is initialized and by GetAutomaticGainControl().
+ agc_is_enabled_ = enabled;
+}
+
+void AudioInputDevice::WillDestroyCurrentMessageLoop() {
+ LOG(ERROR) << "IO loop going away before the input device has been stopped";
+ ShutDownOnIOThread();
+}
+
+// AudioInputDevice::AudioThreadCallback
+AudioInputDevice::AudioThreadCallback::AudioThreadCallback(
+ const AudioParameters& audio_parameters,
+ base::SharedMemoryHandle memory,
+ int memory_length,
+ int total_segments,
+ CaptureCallback* capture_callback)
+ : AudioDeviceThread::Callback(audio_parameters, memory, memory_length,
+ total_segments),
+ current_segment_id_(0),
+ capture_callback_(capture_callback) {
+ audio_bus_ = AudioBus::Create(audio_parameters_);
+}
+
+AudioInputDevice::AudioThreadCallback::~AudioThreadCallback() {
+}
+
+void AudioInputDevice::AudioThreadCallback::MapSharedMemory() {
+ shared_memory_.Map(memory_length_);
+}
+
+void AudioInputDevice::AudioThreadCallback::Process(int pending_data) {
+ // The shared memory represents parameters, size of the data buffer and the
+ // actual data buffer containing audio data. Map the memory into this
+ // structure and parse out parameters and the data area.
+ uint8* ptr = static_cast<uint8*>(shared_memory_.memory());
+ ptr += current_segment_id_ * segment_length_;
+ AudioInputBuffer* buffer = reinterpret_cast<AudioInputBuffer*>(ptr);
+ DCHECK_EQ(buffer->params.size,
+ segment_length_ - sizeof(AudioInputBufferParameters));
+ double volume = buffer->params.volume;
+
+ int audio_delay_milliseconds = pending_data / bytes_per_ms_;
+ int16* memory = reinterpret_cast<int16*>(&buffer->audio[0]);
+ const int bytes_per_sample = sizeof(memory[0]);
+
+ if (++current_segment_id_ >= total_segments_)
+ current_segment_id_ = 0;
+
+ // Deinterleave each channel and convert to 32-bit floating-point
+ // with nominal range -1.0 -> +1.0.
+ audio_bus_->FromInterleaved(memory, audio_bus_->frames(), bytes_per_sample);
+
+ // Deliver captured data to the client in floating point format
+ // and update the audio-delay measurement.
+ capture_callback_->Capture(audio_bus_.get(),
+ audio_delay_milliseconds, volume);
+}
+
+} // namespace media
diff --git a/chromium/media/audio/audio_input_device.h b/chromium/media/audio/audio_input_device.h
new file mode 100644
index 00000000000..bb7d0ff4f71
--- /dev/null
+++ b/chromium/media/audio/audio_input_device.h
@@ -0,0 +1,175 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Low-latency audio capturing class utilizing audio input stream provided
+// by a server (browser) process by use of an IPC interface.
+//
+// Relationship of classes:
+//
+// AudioInputController AudioInputDevice
+// ^ ^
+// | |
+// v IPC v
+// AudioInputRendererHost <-----------> AudioInputIPC
+// ^ (AudioInputMessageFilter)
+// |
+// v
+// AudioInputDeviceManager
+//
+// Transportation of audio samples from the browser to the render process
+// is done by using shared memory in combination with a SyncSocket.
+// The AudioInputDevice user registers an AudioInputDevice::CaptureCallback by
+// calling Initialize(). The callback will be called with recorded audio from
+// the underlying audio layers.
+// The session ID is used by the AudioInputRendererHost to start the device
+// referenced by this ID.
+//
+// State sequences:
+//
+// Start -> InitializeOnIOThread -> CreateStream ->
+// <- OnStreamCreated <-
+// -> StartOnIOThread -> PlayStream ->
+//
+//
+// AudioInputDevice::Capture => low latency audio transport on audio thread =>
+// |
+// Stop --> ShutDownOnIOThread ------> CloseStream -> Close
+//
+// This class depends on two threads to function:
+//
+// 1. An IO thread.
+// This thread is used to asynchronously process Start/Stop etc operations
+// that are available via the public interface. The public methods are
+// asynchronous and simply post a task to the IO thread to actually perform
+// the work.
+// 2. Audio transport thread.
+// Responsible for calling the CaptureCallback and feed audio samples from
+// the server side audio layer using a socket and shared memory.
+//
+// Implementation notes:
+// - The user must call Stop() before deleting the class instance.
+
+#ifndef MEDIA_AUDIO_AUDIO_INPUT_DEVICE_H_
+#define MEDIA_AUDIO_AUDIO_INPUT_DEVICE_H_
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/shared_memory.h"
+#include "media/audio/audio_device_thread.h"
+#include "media/audio/audio_input_ipc.h"
+#include "media/audio/audio_parameters.h"
+#include "media/audio/scoped_loop_observer.h"
+#include "media/base/audio_capturer_source.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// TODO(henrika): This class is based on the AudioOutputDevice class and it has
+// many components in common. Investigate potential for re-factoring.
+// See http://crbug.com/179597.
+// TODO(henrika): Add support for event handling (e.g. OnStateChanged,
+// OnCaptureStopped etc.) and ensure that we can deliver these notifications
+// to any clients using this class.
+class MEDIA_EXPORT AudioInputDevice
+ : NON_EXPORTED_BASE(public AudioCapturerSource),
+ NON_EXPORTED_BASE(public AudioInputIPCDelegate),
+ NON_EXPORTED_BASE(public ScopedLoopObserver) {
+ public:
+ // NOTE: Clients must call Initialize() before using.
+ AudioInputDevice(scoped_ptr<AudioInputIPC> ipc,
+ const scoped_refptr<base::MessageLoopProxy>& io_loop);
+
+ // AudioCapturerSource implementation.
+ virtual void Initialize(const AudioParameters& params,
+ CaptureCallback* callback,
+ int session_id) OVERRIDE;
+ virtual void Start() OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual void SetAutomaticGainControl(bool enabled) OVERRIDE;
+
+ protected:
+ friend class base::RefCountedThreadSafe<AudioInputDevice>;
+ virtual ~AudioInputDevice();
+
+ // Methods called on IO thread ----------------------------------------------
+ // AudioInputIPCDelegate implementation.
+ virtual void OnStreamCreated(base::SharedMemoryHandle handle,
+ base::SyncSocket::Handle socket_handle,
+ int length,
+ int total_segments) OVERRIDE;
+ virtual void OnVolume(double volume) OVERRIDE;
+ virtual void OnStateChanged(
+ AudioInputIPCDelegate::State state) OVERRIDE;
+ virtual void OnIPCClosed() OVERRIDE;
+
+ private:
+ // Note: The ordering of members in this enum is critical to correct behavior!
+ enum State {
+ IPC_CLOSED, // No more IPCs can take place.
+ IDLE, // Not started.
+ CREATING_STREAM, // Waiting for OnStreamCreated() to be called back.
+ RECORDING, // Receiving audio data.
+ };
+
+ // Methods called on IO thread ----------------------------------------------
+ // The following methods are tasks posted on the IO thread that needs to
+ // be executed on that thread. They interact with AudioInputMessageFilter and
+ // sends IPC messages on that thread.
+ void StartUpOnIOThread();
+ void ShutDownOnIOThread();
+ void SetVolumeOnIOThread(double volume);
+ void SetAutomaticGainControlOnIOThread(bool enabled);
+
+ // base::MessageLoop::DestructionObserver implementation for the IO loop.
+ // If the IO loop dies before we do, we shut down the audio thread from here.
+ virtual void WillDestroyCurrentMessageLoop() OVERRIDE;
+
+ AudioParameters audio_parameters_;
+
+ CaptureCallback* callback_;
+
+ // A pointer to the IPC layer that takes care of sending requests over to
+ // the AudioInputRendererHost. Only valid when state_ != IPC_CLOSED and must
+ // only be accessed on the IO thread.
+ scoped_ptr<AudioInputIPC> ipc_;
+
+ // Current state (must only be accessed from the IO thread). See comments for
+ // State enum above.
+ State state_;
+
+ // The media session ID used to identify which input device to be started.
+ // Only modified in Initialize() and ShutDownOnIOThread().
+ int session_id_;
+
+ // Stores the Automatic Gain Control state. Default is false.
+ // Only modified on the IO thread.
+ bool agc_is_enabled_;
+
+ // Our audio thread callback class. See source file for details.
+ class AudioThreadCallback;
+
+ // In order to avoid a race between OnStreamCreated and Stop(), we use this
+ // guard to control stopping and starting the audio thread.
+ base::Lock audio_thread_lock_;
+ AudioDeviceThread audio_thread_;
+ scoped_ptr<AudioInputDevice::AudioThreadCallback> audio_callback_;
+
+ // Temporary hack to ignore OnStreamCreated() due to the user calling Stop()
+ // so we don't start the audio thread pointing to a potentially freed
+ // |callback_|.
+ //
+ // TODO(miu): Replace this by changing AudioCapturerSource to accept the
+ // callback via Start(). See http://crbug.com/151051 for details.
+ bool stopping_hack_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AudioInputDevice);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_AUDIO_INPUT_DEVICE_H_
diff --git a/chromium/media/audio/audio_input_device_unittest.cc b/chromium/media/audio/audio_input_device_unittest.cc
new file mode 100644
index 00000000000..dc211a48a93
--- /dev/null
+++ b/chromium/media/audio/audio_input_device_unittest.cc
@@ -0,0 +1,199 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/environment.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/audio/audio_manager.h"
+#include "media/audio/audio_manager_base.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_WIN)
+#include "base/win/scoped_com_initializer.h"
+#include "media/audio/win/audio_manager_win.h"
+#include "media/audio/win/wavein_input_win.h"
+#endif
+
+namespace media {
+
+// Test fixture which allows us to override the default enumeration API on
+// Windows.
+class AudioInputDeviceTest
+ : public ::testing::Test {
+ protected:
+ AudioInputDeviceTest()
+ : audio_manager_(AudioManager::Create())
+#if defined(OS_WIN)
+ , com_init_(base::win::ScopedCOMInitializer::kMTA)
+#endif
+ {
+ }
+
+#if defined(OS_WIN)
+ bool SetMMDeviceEnumeration() {
+ AudioManagerWin* amw = static_cast<AudioManagerWin*>(audio_manager_.get());
+ // Windows Wave is used as default if Windows XP was detected =>
+ // return false since MMDevice is not supported on XP.
+ if (amw->enumeration_type() == AudioManagerWin::kWaveEnumeration)
+ return false;
+
+ amw->SetEnumerationType(AudioManagerWin::kMMDeviceEnumeration);
+ return true;
+ }
+
+ void SetWaveEnumeration() {
+ AudioManagerWin* amw = static_cast<AudioManagerWin*>(audio_manager_.get());
+ amw->SetEnumerationType(AudioManagerWin::kWaveEnumeration);
+ }
+
+ std::string GetDeviceIdFromPCMWaveInAudioInputStream(
+ const std::string& device_id) {
+ AudioManagerWin* amw = static_cast<AudioManagerWin*>(audio_manager_.get());
+ AudioParameters parameters(
+ AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
+ AudioParameters::kAudioCDSampleRate, 16,
+ 1024);
+ scoped_ptr<PCMWaveInAudioInputStream> stream(
+ static_cast<PCMWaveInAudioInputStream*>(
+ amw->CreatePCMWaveInAudioInputStream(parameters, device_id)));
+ return stream.get() ? stream->device_id_ : std::string();
+ }
+#endif
+
+ // Helper method which verifies that the device list starts with a valid
+ // default record followed by non-default device names.
+ static void CheckDeviceNames(const AudioDeviceNames& device_names) {
+ if (!device_names.empty()) {
+ AudioDeviceNames::const_iterator it = device_names.begin();
+
+ // The first device in the list should always be the default device.
+ EXPECT_EQ(std::string(AudioManagerBase::kDefaultDeviceName),
+ it->device_name);
+ EXPECT_EQ(std::string(AudioManagerBase::kDefaultDeviceId), it->unique_id);
+ ++it;
+
+ // Other devices should have non-empty name and id and should not contain
+ // default name or id.
+ while (it != device_names.end()) {
+ EXPECT_FALSE(it->device_name.empty());
+ EXPECT_FALSE(it->unique_id.empty());
+ EXPECT_NE(std::string(AudioManagerBase::kDefaultDeviceName),
+ it->device_name);
+ EXPECT_NE(std::string(AudioManagerBase::kDefaultDeviceId),
+ it->unique_id);
+ ++it;
+ }
+ } else {
+ // Log a warning so we can see the status on the build bots. No need to
+ // break the test though since this does successfully test the code and
+ // some failure cases.
+ LOG(WARNING) << "No input devices detected";
+ }
+ }
+
+ bool CanRunAudioTest() {
+ return audio_manager_->HasAudioInputDevices();
+ }
+
+ scoped_ptr<AudioManager> audio_manager_;
+
+#if defined(OS_WIN)
+ // The MMDevice API requires COM to be initialized on the current thread.
+ base::win::ScopedCOMInitializer com_init_;
+#endif
+};
+
+// Test that devices can be enumerated.
+TEST_F(AudioInputDeviceTest, EnumerateDevices) {
+ if (!CanRunAudioTest())
+ return;
+
+ AudioDeviceNames device_names;
+ audio_manager_->GetAudioInputDeviceNames(&device_names);
+ CheckDeviceNames(device_names);
+}
+
+// Run additional tests for Windows since enumeration can be done using
+// two different APIs. MMDevice is default for Vista and higher and Wave
+// is default for XP and lower.
+#if defined(OS_WIN)
+
+// Override default enumeration API and force usage of Windows MMDevice.
+// This test will only run on Windows Vista and higher.
+TEST_F(AudioInputDeviceTest, EnumerateDevicesWinMMDevice) {
+ if (!CanRunAudioTest())
+ return;
+
+ AudioDeviceNames device_names;
+ if (!SetMMDeviceEnumeration()) {
+ // Usage of MMDevice will fail on XP and lower.
+ LOG(WARNING) << "MM device enumeration is not supported.";
+ return;
+ }
+ audio_manager_->GetAudioInputDeviceNames(&device_names);
+ CheckDeviceNames(device_names);
+}
+
+// Override default enumeration API and force usage of Windows Wave.
+// This test will run on Windows XP, Windows Vista and Windows 7.
+TEST_F(AudioInputDeviceTest, EnumerateDevicesWinWave) {
+ if (!CanRunAudioTest())
+ return;
+
+ AudioDeviceNames device_names;
+ SetWaveEnumeration();
+ audio_manager_->GetAudioInputDeviceNames(&device_names);
+ CheckDeviceNames(device_names);
+}
+
+TEST_F(AudioInputDeviceTest, WinXPDeviceIdUnchanged) {
+ if (!CanRunAudioTest())
+ return;
+
+ AudioDeviceNames xp_device_names;
+ SetWaveEnumeration();
+ audio_manager_->GetAudioInputDeviceNames(&xp_device_names);
+ CheckDeviceNames(xp_device_names);
+
+ // Device ID should remain unchanged, including the default device ID.
+ for (AudioDeviceNames::iterator i = xp_device_names.begin();
+ i != xp_device_names.end(); ++i) {
+ EXPECT_EQ(i->unique_id,
+ GetDeviceIdFromPCMWaveInAudioInputStream(i->unique_id));
+ }
+}
+
+TEST_F(AudioInputDeviceTest, ConvertToWinXPDeviceId) {
+ if (!CanRunAudioTest())
+ return;
+
+ if (!SetMMDeviceEnumeration()) {
+ // Usage of MMDevice will fail on XP and lower.
+ LOG(WARNING) << "MM device enumeration is not supported.";
+ return;
+ }
+
+ AudioDeviceNames device_names;
+ audio_manager_->GetAudioInputDeviceNames(&device_names);
+ CheckDeviceNames(device_names);
+
+ for (AudioDeviceNames::iterator i = device_names.begin();
+ i != device_names.end(); ++i) {
+ std::string converted_id =
+ GetDeviceIdFromPCMWaveInAudioInputStream(i->unique_id);
+ if (i == device_names.begin()) {
+ // The first in the list is the default device ID, which should not be
+ // changed when passed to PCMWaveInAudioInputStream.
+ EXPECT_EQ(i->unique_id, converted_id);
+ } else {
+ // MMDevice-style device IDs should be converted to WaveIn-style device
+ // IDs.
+ EXPECT_NE(i->unique_id, converted_id);
+ }
+ }
+}
+
+#endif
+
+} // namespace media
diff --git a/chromium/media/audio/audio_input_ipc.cc b/chromium/media/audio/audio_input_ipc.cc
new file mode 100644
index 00000000000..69253b057a3
--- /dev/null
+++ b/chromium/media/audio/audio_input_ipc.cc
@@ -0,0 +1,13 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/audio_input_ipc.h"
+
+namespace media {
+
+AudioInputIPCDelegate::~AudioInputIPCDelegate() {}
+
+AudioInputIPC::~AudioInputIPC() {}
+
+} // namespace media
diff --git a/chromium/media/audio/audio_input_ipc.h b/chromium/media/audio/audio_input_ipc.h
new file mode 100644
index 00000000000..0e6f2c34c4c
--- /dev/null
+++ b/chromium/media/audio/audio_input_ipc.h
@@ -0,0 +1,88 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_AUDIO_INPUT_IPC_H_
+#define MEDIA_AUDIO_AUDIO_INPUT_IPC_H_
+
+#include "base/memory/shared_memory.h"
+#include "base/sync_socket.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// Contains IPC notifications for the state of the server side
+// (AudioInputController) audio state changes and when an AudioInputController
+// has been created. Implemented by AudioInputDevice.
+class MEDIA_EXPORT AudioInputIPCDelegate {
+ public:
+ // Valid states for the input stream.
+ enum State {
+ kRecording,
+ kStopped,
+ kError
+ };
+
+ // Called when an AudioInputController has been created.
+ // The shared memory |handle| points to a memory section that's used to
+ // transfer data between the AudioInputDevice and AudioInputController
+ // objects. The implementation of OnStreamCreated takes ownership.
+ // The |socket_handle| is used by the AudioInputController to signal
+ // notifications that more data is available and can optionally provide
+ // parameter changes back. The AudioInputDevice must read from this socket
+ // and process the shared memory whenever data is read from the socket.
+ virtual void OnStreamCreated(base::SharedMemoryHandle handle,
+ base::SyncSocket::Handle socket_handle,
+ int length,
+ int total_segments) = 0;
+
+ // Called when state of an audio stream has changed.
+ virtual void OnStateChanged(State state) = 0;
+
+ // Called when the input stream volume has changed.
+ virtual void OnVolume(double volume) = 0;
+
+ // Called when the AudioInputIPC object is going away and/or when the
+ // IPC channel has been closed and no more IPC requests can be made.
+ // Implementations should delete their owned AudioInputIPC instance
+ // immediately.
+ virtual void OnIPCClosed() = 0;
+
+ protected:
+ virtual ~AudioInputIPCDelegate();
+};
+
+// Provides IPC functionality for an AudioInputIPCDelegate (e.g., an
+// AudioInputDevice). The implementation should asynchronously deliver the
+// messages to an AudioInputController object (or create one in the case of
+// CreateStream()), that may live in a separate process.
+class MEDIA_EXPORT AudioInputIPC {
+ public:
+ virtual ~AudioInputIPC();
+
+ // Sends a request to create an AudioInputController object in the peer
+ // process, and configures it to use the specified audio |params|. The
+ // |total_segments| indidates number of equal-lengthed segments in the shared
+ // memory buffer. Once the stream has been created, the implementation will
+ // notify |delegate| by calling OnStreamCreated().
+ virtual void CreateStream(AudioInputIPCDelegate* delegate,
+ int session_id,
+ const AudioParameters& params,
+ bool automatic_gain_control,
+ uint32 total_segments) = 0;
+
+ // Corresponds to a call to AudioInputController::Record() on the server side.
+ virtual void RecordStream() = 0;
+
+ // Sets the volume of the audio stream.
+ virtual void SetVolume(double volume) = 0;
+
+ // Closes the audio stream, which should shut down the corresponding
+ // AudioInputController in the peer process.
+ virtual void CloseStream() = 0;
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_AUDIO_INPUT_IPC_H_
diff --git a/chromium/media/audio/audio_input_unittest.cc b/chromium/media/audio/audio_input_unittest.cc
new file mode 100644
index 00000000000..8adb746ee86
--- /dev/null
+++ b/chromium/media/audio/audio_input_unittest.cc
@@ -0,0 +1,185 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/basictypes.h"
+#include "base/environment.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/threading/platform_thread.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_manager_base.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+static const int kSamplingRate = 8000;
+static const int kSamplesPerPacket = kSamplingRate / 20;
+
+// This class allows to find out if the callbacks are occurring as
+// expected and if any error has been reported.
+class TestInputCallback : public AudioInputStream::AudioInputCallback {
+ public:
+ explicit TestInputCallback(int max_data_bytes)
+ : callback_count_(0),
+ had_error_(0),
+ max_data_bytes_(max_data_bytes) {
+ }
+ virtual void OnData(AudioInputStream* stream,
+ const uint8* data,
+ uint32 size,
+ uint32 hardware_delay_bytes,
+ double volume) OVERRIDE {
+ ++callback_count_;
+ // Read the first byte to make sure memory is good.
+ if (size) {
+ ASSERT_LE(static_cast<int>(size), max_data_bytes_);
+ int value = data[0];
+ EXPECT_GE(value, 0);
+ }
+ }
+ virtual void OnClose(AudioInputStream* stream) OVERRIDE {}
+ virtual void OnError(AudioInputStream* stream) OVERRIDE {
+ ++had_error_;
+ }
+ // Returns how many times OnData() has been called.
+ int callback_count() const {
+ return callback_count_;
+ }
+ // Returns how many times the OnError callback was called.
+ int had_error() const {
+ return had_error_;
+ }
+
+ private:
+ int callback_count_;
+ int had_error_;
+ int max_data_bytes_;
+};
+
+static bool CanRunAudioTests(AudioManager* audio_man) {
+ bool has_input = audio_man->HasAudioInputDevices();
+
+ if (!has_input)
+ LOG(WARNING) << "No input devices detected";
+
+ return has_input;
+}
+
+static AudioInputStream* CreateTestAudioInputStream(AudioManager* audio_man) {
+ AudioInputStream* ais = audio_man->MakeAudioInputStream(
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
+ kSamplingRate, 16, kSamplesPerPacket),
+ AudioManagerBase::kDefaultDeviceId);
+ EXPECT_TRUE(NULL != ais);
+ return ais;
+}
+
+// Test that AudioInputStream rejects out of range parameters.
+TEST(AudioInputTest, SanityOnMakeParams) {
+ scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ if (!CanRunAudioTests(audio_man.get()))
+ return;
+
+ AudioParameters::Format fmt = AudioParameters::AUDIO_PCM_LINEAR;
+ EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
+ AudioParameters(fmt, CHANNEL_LAYOUT_7_1, 8000, 16,
+ kSamplesPerPacket), AudioManagerBase::kDefaultDeviceId));
+ EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
+ AudioParameters(fmt, CHANNEL_LAYOUT_MONO, 1024 * 1024, 16,
+ kSamplesPerPacket), AudioManagerBase::kDefaultDeviceId));
+ EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
+ AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, 8000, 80,
+ kSamplesPerPacket), AudioManagerBase::kDefaultDeviceId));
+ EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
+ AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, 8000, 80,
+ 1000 * kSamplesPerPacket),
+ AudioManagerBase::kDefaultDeviceId));
+ EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
+ AudioParameters(fmt, CHANNEL_LAYOUT_UNSUPPORTED, 8000, 16,
+ kSamplesPerPacket), AudioManagerBase::kDefaultDeviceId));
+ EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
+ AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, -8000, 16,
+ kSamplesPerPacket), AudioManagerBase::kDefaultDeviceId));
+ EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
+ AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, 8000, -16,
+ kSamplesPerPacket), AudioManagerBase::kDefaultDeviceId));
+ EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
+ AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, 8000, 16, -1024),
+ AudioManagerBase::kDefaultDeviceId));
+}
+
+// Test create and close of an AudioInputStream without recording audio.
+TEST(AudioInputTest, CreateAndClose) {
+ scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ if (!CanRunAudioTests(audio_man.get()))
+ return;
+ AudioInputStream* ais = CreateTestAudioInputStream(audio_man.get());
+ ais->Close();
+}
+
+#if defined(OS_LINUX) && !defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
+// This test is failing on ARM linux: http://crbug.com/238490
+#define MAYBE_OpenAndClose DISABLED_OpenAndClose
+#else
+#define MAYBE_OpenAndClose OpenAndClose
+#endif
+// Test create, open and close of an AudioInputStream without recording audio.
+TEST(AudioInputTest, MAYBE_OpenAndClose) {
+ scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ if (!CanRunAudioTests(audio_man.get()))
+ return;
+ AudioInputStream* ais = CreateTestAudioInputStream(audio_man.get());
+ EXPECT_TRUE(ais->Open());
+ ais->Close();
+}
+
+#if defined(OS_LINUX) && !defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
+// This test is failing on ARM linux: http://crbug.com/238490
+#define MAYBE_OpenStopAndClose DISABLED_OpenStopAndClose
+#else
+#define MAYBE_OpenStopAndClose OpenStopAndClose
+#endif
+// Test create, open, stop and close of an AudioInputStream without recording.
+TEST(AudioInputTest, MAYBE_OpenStopAndClose) {
+ scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ if (!CanRunAudioTests(audio_man.get()))
+ return;
+ AudioInputStream* ais = CreateTestAudioInputStream(audio_man.get());
+ EXPECT_TRUE(ais->Open());
+ ais->Stop();
+ ais->Close();
+}
+
+#if defined(OS_LINUX) && !defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
+// This test is failing on ARM linux: http://crbug.com/238490
+#define MAYBE_Record DISABLED_Record
+#else
+#define MAYBE_Record Record
+#endif
+// Test a normal recording sequence using an AudioInputStream.
+TEST(AudioInputTest, MAYBE_Record) {
+ scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ if (!CanRunAudioTests(audio_man.get()))
+ return;
+ base::MessageLoop message_loop(base::MessageLoop::TYPE_DEFAULT);
+ AudioInputStream* ais = CreateTestAudioInputStream(audio_man.get());
+ EXPECT_TRUE(ais->Open());
+
+ TestInputCallback test_callback(kSamplesPerPacket * 4);
+ ais->Start(&test_callback);
+ // Verify at least 500ms worth of audio was recorded, after giving sufficient
+ // extra time.
+ message_loop.PostDelayedTask(
+ FROM_HERE,
+ base::MessageLoop::QuitClosure(),
+ base::TimeDelta::FromMilliseconds(690));
+ message_loop.Run();
+ EXPECT_GE(test_callback.callback_count(), 1);
+ EXPECT_FALSE(test_callback.had_error());
+
+ ais->Stop();
+ ais->Close();
+}
+
+} // namespace media
diff --git a/chromium/media/audio/audio_input_volume_unittest.cc b/chromium/media/audio/audio_input_volume_unittest.cc
new file mode 100644
index 00000000000..570c045570e
--- /dev/null
+++ b/chromium/media/audio/audio_input_volume_unittest.cc
@@ -0,0 +1,185 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <cmath>
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_manager_base.h"
+#include "media/audio/audio_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_WIN)
+#include "base/win/scoped_com_initializer.h"
+#include "media/audio/win/core_audio_util_win.h"
+#endif
+
+namespace media {
+
+double GetVolumeAfterSetVolumeOnLinux(AudioInputStream* ais,
+ double target_volume) {
+ // SetVolume() is asynchronous on Linux, we need to keep trying until
+ // the SetVolume() operation is done.
+ static const int kTimesToRun = 10;
+ double volume = 0.0;
+ for (int i = 0; i < kTimesToRun; ++i) {
+ volume = ais->GetVolume();
+ if (volume == target_volume)
+ break;
+
+ // Sleep 100ms to wait for the operation.
+ base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(100));
+ }
+
+ return volume;
+}
+
+class AudioInputVolumeTest : public ::testing::Test {
+ protected:
+ AudioInputVolumeTest()
+ : audio_manager_(AudioManager::Create())
+#if defined(OS_WIN)
+ , com_init_(base::win::ScopedCOMInitializer::kMTA)
+#endif
+ {
+ }
+
+ bool CanRunAudioTests() {
+#if defined(OS_WIN)
+ // TODO(henrika): add support for volume control on Windows XP as well.
+ // For now, we might as well signal false already here to avoid running
+ // these tests on Windows XP.
+ if (!CoreAudioUtil::IsSupported())
+ return false;
+#endif
+ if (!audio_manager_)
+ return false;
+
+ return audio_manager_->HasAudioInputDevices();
+ }
+
+ // Helper method which checks if the stream has volume support.
+ bool HasDeviceVolumeControl(AudioInputStream* stream) {
+ if (!stream)
+ return false;
+
+ return (stream->GetMaxVolume() != 0.0);
+ }
+
+ AudioInputStream* CreateAndOpenStream(const std::string& device_id) {
+ const AudioParameters& params =
+ audio_manager_->GetInputStreamParameters(device_id);
+ AudioInputStream* ais = audio_manager_->MakeAudioInputStream(
+ params, device_id);
+ EXPECT_TRUE(NULL != ais);
+
+#if defined(OS_LINUX) || defined(OS_OPENBSD)
+ // Some linux devices do not support our settings, we may fail to open
+ // those devices.
+ if (!ais->Open()) {
+ // Default device should always be able to be opened.
+ EXPECT_TRUE(AudioManagerBase::kDefaultDeviceId != device_id);
+ ais->Close();
+ ais = NULL;
+ }
+#elif defined(OS_WIN) || defined(OS_MACOSX)
+ EXPECT_TRUE(ais->Open());
+#endif
+
+ return ais;
+ }
+
+ scoped_ptr<AudioManager> audio_manager_;
+
+#if defined(OS_WIN)
+ base::win::ScopedCOMInitializer com_init_;
+#endif
+};
+
+#if defined(OS_LINUX) && !defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
+// Currently failing on linux ARM bot: http://crbug/238490
+#define MAYBE_InputVolumeTest DISABLED_InputVolumeTest
+#else
+#define MAYBE_InputVolumeTest InputVolumeTest
+#endif
+
+TEST_F(AudioInputVolumeTest, MAYBE_InputVolumeTest) {
+ if (!CanRunAudioTests())
+ return;
+
+ // Retrieve a list of all available input devices.
+ AudioDeviceNames device_names;
+ audio_manager_->GetAudioInputDeviceNames(&device_names);
+ if (device_names.empty()) {
+ LOG(WARNING) << "Could not find any available input device";
+ return;
+ }
+
+ // Scan all available input devices and repeat the same test for all of them.
+ for (AudioDeviceNames::const_iterator it = device_names.begin();
+ it != device_names.end();
+ ++it) {
+ AudioInputStream* ais = CreateAndOpenStream(it->unique_id);
+ if (!ais) {
+ DLOG(WARNING) << "Failed to open stream for device " << it->unique_id;
+ continue;
+ }
+
+ if (!HasDeviceVolumeControl(ais)) {
+ DLOG(WARNING) << "Device: " << it->unique_id
+ << ", does not have volume control.";
+ ais->Close();
+ continue;
+ }
+
+ double max_volume = ais->GetMaxVolume();
+ EXPECT_GT(max_volume, 0.0);
+
+ // Store the current input-device volume level.
+ double original_volume = ais->GetVolume();
+ EXPECT_GE(original_volume, 0.0);
+#if defined(OS_WIN) || defined(OS_MACOSX)
+ // Note that |original_volume| can be higher than |max_volume| on Linux.
+ EXPECT_LE(original_volume, max_volume);
+#endif
+
+ // Set the volume to the maxiumum level..
+ ais->SetVolume(max_volume);
+ double current_volume = ais->GetVolume();
+ EXPECT_EQ(max_volume, current_volume);
+
+ // Set the volume to the mininum level (=0).
+ double new_volume = 0.0;
+ ais->SetVolume(new_volume);
+#if defined(OS_LINUX)
+ current_volume = GetVolumeAfterSetVolumeOnLinux(ais, new_volume);
+#else
+ current_volume = ais->GetVolume();
+#endif
+ EXPECT_EQ(new_volume, current_volume);
+
+ // Set the volume to the mid level (50% of max).
+ // Verify that the absolute error is small enough.
+ new_volume = max_volume / 2;
+ ais->SetVolume(new_volume);
+#if defined(OS_LINUX)
+ current_volume = GetVolumeAfterSetVolumeOnLinux(ais, new_volume);
+#else
+ current_volume = ais->GetVolume();
+#endif
+ EXPECT_LT(current_volume, max_volume);
+ EXPECT_GT(current_volume, 0);
+ EXPECT_NEAR(current_volume, new_volume, 0.25 * max_volume);
+
+ // Restores the volume to the original value.
+ ais->SetVolume(original_volume);
+ current_volume = ais->GetVolume();
+ EXPECT_EQ(original_volume, current_volume);
+
+ ais->Close();
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/audio/audio_io.h b/chromium/media/audio/audio_io.h
new file mode 100644
index 00000000000..473af0d512f
--- /dev/null
+++ b/chromium/media/audio/audio_io.h
@@ -0,0 +1,172 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_AUDIO_IO_H_
+#define MEDIA_AUDIO_AUDIO_IO_H_
+
+#include "base/basictypes.h"
+#include "media/audio/audio_buffers_state.h"
+#include "media/base/audio_bus.h"
+
+// Low-level audio output support. To make sound there are 3 objects involved:
+// - AudioSource : produces audio samples on a pull model. Implements
+// the AudioSourceCallback interface.
+// - AudioOutputStream : uses the AudioSource to render audio on a given
+// channel, format and sample frequency configuration. Data from the
+// AudioSource is delivered in a 'pull' model.
+// - AudioManager : factory for the AudioOutputStream objects, manager
+// of the hardware resources and mixer control.
+//
+// The number and configuration of AudioOutputStream does not need to match the
+// physically available hardware resources. For example you can have:
+//
+// MonoPCMSource1 --> MonoPCMStream1 --> | | --> audio left channel
+// StereoPCMSource -> StereoPCMStream -> | mixer |
+// MonoPCMSource2 --> MonoPCMStream2 --> | | --> audio right channel
+//
+// This facility's objective is mix and render audio with low overhead using
+// the OS basic audio support, abstracting as much as possible the
+// idiosyncrasies of each platform. Non-goals:
+// - Positional, 3d audio
+// - Dependence on non-default libraries such as DirectX 9, 10, XAudio
+// - Digital signal processing or effects
+// - Extra features if a specific hardware is installed (EAX, X-fi)
+//
+// The primary client of this facility is audio coming from several tabs.
+// Specifically for this case we avoid supporting complex formats such as MP3
+// or WMA. Complex format decoding should be done by the renderers.
+
+
+// Models an audio stream that gets rendered to the audio hardware output.
+// Because we support more audio streams than physically available channels
+// a given AudioOutputStream might or might not talk directly to hardware.
+// An audio stream allocates several buffers for audio data and calls
+// AudioSourceCallback::OnMoreData() periodically to fill these buffers,
+// as the data is written to the audio device. Size of each packet is determined
+// by |samples_per_packet| specified in AudioParameters when the stream is
+// created.
+
+namespace media {
+
+class MEDIA_EXPORT AudioOutputStream {
+ public:
+ // Audio sources must implement AudioSourceCallback. This interface will be
+ // called in a random thread which very likely is a high priority thread. Do
+ // not rely on using this thread TLS or make calls that alter the thread
+ // itself such as creating Windows or initializing COM.
+ class MEDIA_EXPORT AudioSourceCallback {
+ public:
+ // Provide more data by fully filling |dest|. The source will return
+ // the number of frames it filled. |buffers_state| contains current state
+ // of the buffers, and can be used by the source to calculate delay.
+ virtual int OnMoreData(AudioBus* dest,
+ AudioBuffersState buffers_state) = 0;
+
+ virtual int OnMoreIOData(AudioBus* source,
+ AudioBus* dest,
+ AudioBuffersState buffers_state) = 0;
+
+ // There was an error while playing a buffer. Audio source cannot be
+ // destroyed yet. No direct action needed by the AudioStream, but it is
+ // a good place to stop accumulating sound data since is is likely that
+ // playback will not continue.
+ virtual void OnError(AudioOutputStream* stream) = 0;
+
+ protected:
+ virtual ~AudioSourceCallback() {}
+ };
+
+ virtual ~AudioOutputStream() {}
+
+ // Open the stream. false is returned if the stream cannot be opened. Open()
+ // must always be followed by a call to Close() even if Open() fails.
+ virtual bool Open() = 0;
+
+ // Starts playing audio and generating AudioSourceCallback::OnMoreData().
+ // Since implementor of AudioOutputStream may have internal buffers, right
+ // after calling this method initial buffers are fetched.
+ //
+ // The output stream does not take ownership of this callback.
+ virtual void Start(AudioSourceCallback* callback) = 0;
+
+ // Stops playing audio. Effect might not be instantaneous as the hardware
+ // might have locked audio data that is processing.
+ virtual void Stop() = 0;
+
+ // Sets the relative volume, with range [0.0, 1.0] inclusive.
+ virtual void SetVolume(double volume) = 0;
+
+ // Gets the relative volume, with range [0.0, 1.0] inclusive.
+ virtual void GetVolume(double* volume) = 0;
+
+ // Close the stream. This also generates AudioSourceCallback::OnClose().
+ // After calling this method, the object should not be used anymore.
+ virtual void Close() = 0;
+};
+
+// Models an audio sink receiving recorded audio from the audio driver.
+class MEDIA_EXPORT AudioInputStream {
+ public:
+ class MEDIA_EXPORT AudioInputCallback {
+ public:
+ // Called by the audio recorder when a full packet of audio data is
+ // available. This is called from a special audio thread and the
+ // implementation should return as soon as possible.
+ virtual void OnData(AudioInputStream* stream, const uint8* src,
+ uint32 size, uint32 hardware_delay_bytes,
+ double volume) = 0;
+
+ // The stream is done with this callback, the last call received by this
+ // audio sink.
+ virtual void OnClose(AudioInputStream* stream) = 0;
+
+ // There was an error while recording audio. The audio sink cannot be
+ // destroyed yet. No direct action needed by the AudioInputStream, but it
+ // is a good place to stop accumulating sound data since is is likely that
+ // recording will not continue.
+ virtual void OnError(AudioInputStream* stream) = 0;
+
+ protected:
+ virtual ~AudioInputCallback() {}
+ };
+
+ virtual ~AudioInputStream() {}
+
+ // Open the stream and prepares it for recording. Call Start() to actually
+ // begin recording.
+ virtual bool Open() = 0;
+
+ // Starts recording audio and generating AudioInputCallback::OnData().
+ // The input stream does not take ownership of this callback.
+ virtual void Start(AudioInputCallback* callback) = 0;
+
+ // Stops recording audio. Effect might not be instantaneous as there could be
+ // pending audio callbacks in the queue which will be issued first before
+ // recording stops.
+ virtual void Stop() = 0;
+
+ // Close the stream. This also generates AudioInputCallback::OnClose(). This
+ // should be the last call made on this object.
+ virtual void Close() = 0;
+
+ // Returns the maximum microphone analog volume or 0.0 if device does not
+ // have volume control.
+ virtual double GetMaxVolume() = 0;
+
+ // Sets the microphone analog volume, with range [0, max_volume] inclusive.
+ virtual void SetVolume(double volume) = 0;
+
+ // Returns the microphone analog volume, with range [0, max_volume] inclusive.
+ virtual double GetVolume() = 0;
+
+ // Sets the Automatic Gain Control (AGC) state.
+ virtual void SetAutomaticGainControl(bool enabled) = 0;
+
+ // Returns the Automatic Gain Control (AGC) state.
+ virtual bool GetAutomaticGainControl() = 0;
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_AUDIO_IO_H_
diff --git a/chromium/media/audio/audio_low_latency_input_output_unittest.cc b/chromium/media/audio/audio_low_latency_input_output_unittest.cc
new file mode 100644
index 00000000000..33729c45a04
--- /dev/null
+++ b/chromium/media/audio/audio_low_latency_input_output_unittest.cc
@@ -0,0 +1,449 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/basictypes.h"
+#include "base/environment.h"
+#include "base/file_util.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/path_service.h"
+#include "base/synchronization/lock.h"
+#include "base/test/test_timeouts.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_manager_base.h"
+#include "media/base/seekable_buffer.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_LINUX) || defined(OS_OPENBSD)
+#include "media/audio/linux/audio_manager_linux.h"
+#elif defined(OS_MACOSX)
+#include "media/audio/mac/audio_manager_mac.h"
+#elif defined(OS_WIN)
+#include "media/audio/win/audio_manager_win.h"
+#include "media/audio/win/core_audio_util_win.h"
+#elif defined(OS_ANDROID)
+#include "media/audio/android/audio_manager_android.h"
+#endif
+
+namespace media {
+
+#if defined(OS_LINUX) || defined(OS_OPENBSD)
+typedef AudioManagerLinux AudioManagerAnyPlatform;
+#elif defined(OS_MACOSX)
+typedef AudioManagerMac AudioManagerAnyPlatform;
+#elif defined(OS_WIN)
+typedef AudioManagerWin AudioManagerAnyPlatform;
+#elif defined(OS_ANDROID)
+typedef AudioManagerAndroid AudioManagerAnyPlatform;
+#endif
+
+// Limits the number of delay measurements we can store in an array and
+// then write to file at end of the WASAPIAudioInputOutputFullDuplex test.
+static const size_t kMaxDelayMeasurements = 1000;
+
+// Name of the output text file. The output file will be stored in the
+// directory containing media_unittests.exe.
+// Example: \src\build\Debug\audio_delay_values_ms.txt.
+// See comments for the WASAPIAudioInputOutputFullDuplex test for more details
+// about the file format.
+static const char kDelayValuesFileName[] = "audio_delay_values_ms.txt";
+
+// Contains delay values which are reported during the full-duplex test.
+// Total delay = |buffer_delay_ms| + |input_delay_ms| + |output_delay_ms|.
+struct AudioDelayState {
+ AudioDelayState()
+ : delta_time_ms(0),
+ buffer_delay_ms(0),
+ input_delay_ms(0),
+ output_delay_ms(0) {
+ }
+
+ // Time in milliseconds since last delay report. Typical value is ~10 [ms].
+ int delta_time_ms;
+
+ // Size of internal sync buffer. Typical value is ~0 [ms].
+ int buffer_delay_ms;
+
+ // Reported capture/input delay. Typical value is ~10 [ms].
+ int input_delay_ms;
+
+ // Reported render/output delay. Typical value is ~40 [ms].
+ int output_delay_ms;
+};
+
+// This class mocks the platform specific audio manager and overrides
+// the GetMessageLoop() method to ensure that we can run our tests on
+// the main thread instead of the audio thread.
+class MockAudioManager : public AudioManagerAnyPlatform {
+ public:
+ MockAudioManager() {}
+ virtual ~MockAudioManager() {}
+
+ virtual scoped_refptr<base::MessageLoopProxy> GetMessageLoop() OVERRIDE {
+ return base::MessageLoop::current()->message_loop_proxy();
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockAudioManager);
+};
+
+// Test fixture class.
+class AudioLowLatencyInputOutputTest : public testing::Test {
+ protected:
+ AudioLowLatencyInputOutputTest() {}
+
+ virtual ~AudioLowLatencyInputOutputTest() {}
+
+ AudioManager* audio_manager() { return &mock_audio_manager_; }
+ base::MessageLoopForUI* message_loop() { return &message_loop_; }
+
+ // Convenience method which ensures that we are not running on the build
+ // bots and that at least one valid input and output device can be found.
+ bool CanRunAudioTests() {
+ bool input = audio_manager()->HasAudioInputDevices();
+ bool output = audio_manager()->HasAudioOutputDevices();
+ LOG_IF(WARNING, !input) << "No input device detected.";
+ LOG_IF(WARNING, !output) << "No output device detected.";
+ return input && output;
+ }
+
+ private:
+ base::MessageLoopForUI message_loop_;
+ MockAudioManager mock_audio_manager_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioLowLatencyInputOutputTest);
+};
+
+// This audio source/sink implementation should be used for manual tests
+// only since delay measurements are stored on an output text file.
+// All incoming/recorded audio packets are stored in an intermediate media
+// buffer which the renderer reads from when it needs audio for playout.
+// The total effect is that recorded audio is played out in loop back using
+// a sync buffer as temporary storage.
+class FullDuplexAudioSinkSource
+ : public AudioInputStream::AudioInputCallback,
+ public AudioOutputStream::AudioSourceCallback {
+ public:
+ FullDuplexAudioSinkSource(int sample_rate,
+ int samples_per_packet,
+ int channels)
+ : sample_rate_(sample_rate),
+ samples_per_packet_(samples_per_packet),
+ channels_(channels),
+ input_elements_to_write_(0),
+ output_elements_to_write_(0),
+ previous_write_time_(base::TimeTicks::Now()) {
+ // Size in bytes of each audio frame (4 bytes for 16-bit stereo PCM).
+ frame_size_ = (16 / 8) * channels_;
+
+ // Start with the smallest possible buffer size. It will be increased
+ // dynamically during the test if required.
+ buffer_.reset(
+ new media::SeekableBuffer(0, samples_per_packet_ * frame_size_));
+
+ frames_to_ms_ = static_cast<double>(1000.0 / sample_rate_);
+ delay_states_.reset(new AudioDelayState[kMaxDelayMeasurements]);
+ }
+
+ virtual ~FullDuplexAudioSinkSource() {
+ // Get complete file path to output file in the directory containing
+ // media_unittests.exe. Example: src/build/Debug/audio_delay_values_ms.txt.
+ base::FilePath file_name;
+ EXPECT_TRUE(PathService::Get(base::DIR_EXE, &file_name));
+ file_name = file_name.AppendASCII(kDelayValuesFileName);
+
+ FILE* text_file = file_util::OpenFile(file_name, "wt");
+ DLOG_IF(ERROR, !text_file) << "Failed to open log file.";
+ LOG(INFO) << ">> Output file " << file_name.value() << " has been created.";
+
+ // Write the array which contains time-stamps, buffer size and
+ // audio delays values to a text file.
+ size_t elements_written = 0;
+ while (elements_written <
+ std::min(input_elements_to_write_, output_elements_to_write_)) {
+ const AudioDelayState state = delay_states_[elements_written];
+ fprintf(text_file, "%d %d %d %d\n",
+ state.delta_time_ms,
+ state.buffer_delay_ms,
+ state.input_delay_ms,
+ state.output_delay_ms);
+ ++elements_written;
+ }
+
+ file_util::CloseFile(text_file);
+ }
+
+ // AudioInputStream::AudioInputCallback.
+ virtual void OnData(AudioInputStream* stream,
+ const uint8* src, uint32 size,
+ uint32 hardware_delay_bytes,
+ double volume) OVERRIDE {
+ base::AutoLock lock(lock_);
+
+ // Update three components in the AudioDelayState for this recorded
+ // audio packet.
+ const base::TimeTicks now_time = base::TimeTicks::Now();
+ const int diff = (now_time - previous_write_time_).InMilliseconds();
+ previous_write_time_ = now_time;
+ if (input_elements_to_write_ < kMaxDelayMeasurements) {
+ delay_states_[input_elements_to_write_].delta_time_ms = diff;
+ delay_states_[input_elements_to_write_].buffer_delay_ms =
+ BytesToMilliseconds(buffer_->forward_bytes());
+ delay_states_[input_elements_to_write_].input_delay_ms =
+ BytesToMilliseconds(hardware_delay_bytes);
+ ++input_elements_to_write_;
+ }
+
+ // Store the captured audio packet in a seekable media buffer.
+ if (!buffer_->Append(src, size)) {
+ // An attempt to write outside the buffer limits has been made.
+ // Double the buffer capacity to ensure that we have a buffer large
+ // enough to handle the current sample test scenario.
+ buffer_->set_forward_capacity(2 * buffer_->forward_capacity());
+ buffer_->Clear();
+ }
+ }
+
+ virtual void OnClose(AudioInputStream* stream) OVERRIDE {}
+ virtual void OnError(AudioInputStream* stream) OVERRIDE {}
+
+ // AudioOutputStream::AudioSourceCallback.
+ virtual int OnMoreData(AudioBus* audio_bus,
+ AudioBuffersState buffers_state) OVERRIDE {
+ base::AutoLock lock(lock_);
+
+ // Update one component in the AudioDelayState for the packet
+ // which is about to be played out.
+ if (output_elements_to_write_ < kMaxDelayMeasurements) {
+ int output_delay_bytes = buffers_state.hardware_delay_bytes;
+#if defined(OS_WIN)
+ // Special fix for Windows in combination with Wave where the
+ // pending bytes field of the audio buffer state is used to
+ // report the delay.
+ if (!CoreAudioUtil::IsSupported()) {
+ output_delay_bytes = buffers_state.pending_bytes;
+ }
+#endif
+ delay_states_[output_elements_to_write_].output_delay_ms =
+ BytesToMilliseconds(output_delay_bytes);
+ ++output_elements_to_write_;
+ }
+
+ int size;
+ const uint8* source;
+ // Read the data from the seekable media buffer which contains
+ // captured data at the same size and sample rate as the output side.
+ if (buffer_->GetCurrentChunk(&source, &size) && size > 0) {
+ EXPECT_EQ(channels_, audio_bus->channels());
+ size = std::min(audio_bus->frames() * frame_size_, size);
+ EXPECT_EQ(static_cast<size_t>(size) % sizeof(*audio_bus->channel(0)), 0U);
+ audio_bus->FromInterleaved(
+ source, size / frame_size_, frame_size_ / channels_);
+ buffer_->Seek(size);
+ return size / frame_size_;
+ }
+
+ return 0;
+ }
+
+ virtual int OnMoreIOData(AudioBus* source,
+ AudioBus* dest,
+ AudioBuffersState buffers_state) OVERRIDE {
+ NOTREACHED();
+ return 0;
+ }
+
+ virtual void OnError(AudioOutputStream* stream) OVERRIDE {}
+
+ protected:
+ // Converts from bytes to milliseconds taking the sample rate and size
+ // of an audio frame into account.
+ int BytesToMilliseconds(uint32 delay_bytes) const {
+ return static_cast<int>((delay_bytes / frame_size_) * frames_to_ms_ + 0.5);
+ }
+
+ private:
+ base::Lock lock_;
+ scoped_ptr<media::SeekableBuffer> buffer_;
+ int sample_rate_;
+ int samples_per_packet_;
+ int channels_;
+ int frame_size_;
+ double frames_to_ms_;
+ scoped_ptr<AudioDelayState[]> delay_states_;
+ size_t input_elements_to_write_;
+ size_t output_elements_to_write_;
+ base::TimeTicks previous_write_time_;
+};
+
+class AudioInputStreamTraits {
+ public:
+ typedef AudioInputStream StreamType;
+
+ static AudioParameters GetDefaultAudioStreamParameters(
+ AudioManager* audio_manager) {
+ return audio_manager->GetInputStreamParameters(
+ AudioManagerBase::kDefaultDeviceId);
+ }
+
+ static StreamType* CreateStream(AudioManager* audio_manager,
+ const AudioParameters& params) {
+ return audio_manager->MakeAudioInputStream(params,
+ AudioManagerBase::kDefaultDeviceId);
+ }
+};
+
+class AudioOutputStreamTraits {
+ public:
+ typedef AudioOutputStream StreamType;
+
+ static AudioParameters GetDefaultAudioStreamParameters(
+ AudioManager* audio_manager) {
+ return audio_manager->GetDefaultOutputStreamParameters();
+ }
+
+ static StreamType* CreateStream(AudioManager* audio_manager,
+ const AudioParameters& params) {
+ return audio_manager->MakeAudioOutputStream(params, std::string());
+ }
+};
+
+// Traits template holding a trait of StreamType. It encapsulates
+// AudioInputStream and AudioOutputStream stream types.
+template <typename StreamTraits>
+class StreamWrapper {
+ public:
+ typedef typename StreamTraits::StreamType StreamType;
+
+ explicit StreamWrapper(AudioManager* audio_manager)
+ :
+ audio_manager_(audio_manager),
+ format_(AudioParameters::AUDIO_PCM_LOW_LATENCY),
+#if defined(OS_ANDROID)
+ channel_layout_(CHANNEL_LAYOUT_MONO),
+#else
+ channel_layout_(CHANNEL_LAYOUT_STEREO),
+#endif
+ bits_per_sample_(16) {
+ // Use the preferred sample rate.
+ const AudioParameters& params =
+ StreamTraits::GetDefaultAudioStreamParameters(audio_manager_);
+ sample_rate_ = params.sample_rate();
+
+ // Use the preferred buffer size. Note that the input side uses the same
+ // size as the output side in this implementation.
+ samples_per_packet_ = params.frames_per_buffer();
+ }
+
+ virtual ~StreamWrapper() {}
+
+ // Creates an Audio[Input|Output]Stream stream object using default
+ // parameters.
+ StreamType* Create() {
+ return CreateStream();
+ }
+
+ int channels() const {
+ return ChannelLayoutToChannelCount(channel_layout_);
+ }
+ int bits_per_sample() const { return bits_per_sample_; }
+ int sample_rate() const { return sample_rate_; }
+ int samples_per_packet() const { return samples_per_packet_; }
+
+ private:
+ StreamType* CreateStream() {
+ StreamType* stream = StreamTraits::CreateStream(audio_manager_,
+ AudioParameters(format_, channel_layout_, sample_rate_,
+ bits_per_sample_, samples_per_packet_));
+ EXPECT_TRUE(stream);
+ return stream;
+ }
+
+ AudioManager* audio_manager_;
+ AudioParameters::Format format_;
+ ChannelLayout channel_layout_;
+ int bits_per_sample_;
+ int sample_rate_;
+ int samples_per_packet_;
+};
+
+typedef StreamWrapper<AudioInputStreamTraits> AudioInputStreamWrapper;
+typedef StreamWrapper<AudioOutputStreamTraits> AudioOutputStreamWrapper;
+
+// This test is intended for manual tests and should only be enabled
+// when it is required to make a real-time test of audio in full duplex and
+// at the same time create a text file which contains measured delay values.
+// The file can later be analyzed off line using e.g. MATLAB.
+// MATLAB example:
+// D=load('audio_delay_values_ms.txt');
+// x=cumsum(D(:,1));
+// plot(x, D(:,2), x, D(:,3), x, D(:,4), x, D(:,2)+D(:,3)+D(:,4));
+// axis([0, max(x), 0, max(D(:,2)+D(:,3)+D(:,4))+10]);
+// legend('buffer delay','input delay','output delay','total delay');
+// xlabel('time [msec]')
+// ylabel('delay [msec]')
+// title('Full-duplex audio delay measurement');
+TEST_F(AudioLowLatencyInputOutputTest, DISABLED_FullDuplexDelayMeasurement) {
+ if (!CanRunAudioTests())
+ return;
+
+ AudioInputStreamWrapper aisw(audio_manager());
+ AudioInputStream* ais = aisw.Create();
+ EXPECT_TRUE(ais);
+
+ AudioOutputStreamWrapper aosw(audio_manager());
+ AudioOutputStream* aos = aosw.Create();
+ EXPECT_TRUE(aos);
+
+ // This test only supports identical parameters in both directions.
+ // TODO(henrika): it is possible to cut delay here by using different
+ // buffer sizes for input and output.
+ if (aisw.sample_rate() != aosw.sample_rate() ||
+ aisw.samples_per_packet() != aosw.samples_per_packet() ||
+ aisw.channels()!= aosw.channels() ||
+ aisw.bits_per_sample() != aosw.bits_per_sample()) {
+ LOG(ERROR) << "This test requires symmetric input and output parameters. "
+ "Ensure that sample rate and number of channels are identical in "
+ "both directions";
+ aos->Close();
+ ais->Close();
+ return;
+ }
+
+ EXPECT_TRUE(ais->Open());
+ EXPECT_TRUE(aos->Open());
+
+ FullDuplexAudioSinkSource full_duplex(
+ aisw.sample_rate(), aisw.samples_per_packet(), aisw.channels());
+
+ LOG(INFO) << ">> You should now be able to hear yourself in loopback...";
+ DLOG(INFO) << " sample_rate : " << aisw.sample_rate();
+ DLOG(INFO) << " samples_per_packet: " << aisw.samples_per_packet();
+ DLOG(INFO) << " channels : " << aisw.channels();
+
+ ais->Start(&full_duplex);
+ aos->Start(&full_duplex);
+
+ // Wait for approximately 10 seconds. The user shall hear his own voice
+ // in loop back during this time. At the same time, delay recordings are
+ // performed and stored in the output text file.
+ message_loop()->PostDelayedTask(FROM_HERE,
+ base::MessageLoop::QuitClosure(), TestTimeouts::action_timeout());
+ message_loop()->Run();
+
+ aos->Stop();
+ ais->Stop();
+
+ // All Close() operations that run on the mocked audio thread,
+ // should be synchronous and not post additional close tasks to
+ // mocked the audio thread. Hence, there is no need to call
+ // message_loop()->RunUntilIdle() after the Close() methods.
+ aos->Close();
+ ais->Close();
+}
+
+} // namespace media
diff --git a/chromium/media/audio/audio_manager.cc b/chromium/media/audio/audio_manager.cc
new file mode 100644
index 00000000000..3f49a45ad87
--- /dev/null
+++ b/chromium/media/audio/audio_manager.cc
@@ -0,0 +1,42 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/audio_manager.h"
+
+#include "base/at_exit.h"
+#include "base/atomicops.h"
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+
+namespace media {
+namespace {
+AudioManager* g_last_created = NULL;
+}
+
+// Forward declaration of the platform specific AudioManager factory function.
+AudioManager* CreateAudioManager();
+
+AudioManager::AudioManager() {
+}
+
+AudioManager::~AudioManager() {
+ CHECK(g_last_created == NULL || g_last_created == this);
+ g_last_created = NULL;
+}
+
+// static
+AudioManager* AudioManager::Create() {
+ CHECK(g_last_created == NULL);
+ g_last_created = CreateAudioManager();
+ return g_last_created;
+}
+
+// static
+AudioManager* AudioManager::Get() {
+ return g_last_created;
+}
+
+} // namespace media
diff --git a/chromium/media/audio/audio_manager.h b/chromium/media/audio/audio_manager.h
new file mode 100644
index 00000000000..cc5b95c8197
--- /dev/null
+++ b/chromium/media/audio/audio_manager.h
@@ -0,0 +1,150 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_AUDIO_MANAGER_H_
+#define MEDIA_AUDIO_AUDIO_MANAGER_H_
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/memory/ref_counted.h"
+#include "base/strings/string16.h"
+#include "media/audio/audio_device_name.h"
+#include "media/audio/audio_parameters.h"
+
+namespace base {
+class MessageLoop;
+class MessageLoopProxy;
+}
+
+namespace media {
+
+class AudioInputStream;
+class AudioOutputStream;
+
+// Manages all audio resources. In particular it owns the AudioOutputStream
+// objects. Provides some convenience functions that avoid the need to provide
+// iterators over the existing streams.
+class MEDIA_EXPORT AudioManager {
+ public:
+ virtual ~AudioManager();
+
+ // Use to construct the audio manager.
+ // NOTE: There should only be one instance.
+ static AudioManager* Create();
+
+ // Returns the pointer to the last created instance, or NULL if not yet
+ // created. This is a utility method for the code outside of media directory,
+ // like src/chrome.
+ static AudioManager* Get();
+
+ // Returns true if the OS reports existence of audio devices. This does not
+ // guarantee that the existing devices support all formats and sample rates.
+ virtual bool HasAudioOutputDevices() = 0;
+
+ // Returns true if the OS reports existence of audio recording devices. This
+ // does not guarantee that the existing devices support all formats and
+ // sample rates.
+ virtual bool HasAudioInputDevices() = 0;
+
+ // Returns a human readable string for the model/make of the active audio
+ // input device for this computer.
+ virtual string16 GetAudioInputDeviceModel() = 0;
+
+ // Opens the platform default audio input settings UI.
+ // Note: This could invoke an external application/preferences pane, so
+ // ideally must not be called from the UI thread or other time sensitive
+ // threads to avoid blocking the rest of the application.
+ virtual void ShowAudioInputSettings() = 0;
+
+ // Appends a list of available input devices. It is not guaranteed that
+ // all the devices in the list support all formats and sample rates for
+ // recording.
+ virtual void GetAudioInputDeviceNames(AudioDeviceNames* device_names) = 0;
+
+ // Factory for all the supported stream formats. |params| defines parameters
+ // of the audio stream to be created.
+ //
+ // |params.sample_per_packet| is the requested buffer allocation which the
+ // audio source thinks it can usually fill without blocking. Internally two
+ // or three buffers are created, one will be locked for playback and one will
+ // be ready to be filled in the call to AudioSourceCallback::OnMoreData().
+ //
+ // Returns NULL if the combination of the parameters is not supported, or if
+ // we have reached some other platform specific limit.
+ //
+ // |params.format| can be set to AUDIO_PCM_LOW_LATENCY and that has two
+ // effects:
+ // 1- Instead of triple buffered the audio will be double buffered.
+ // 2- A low latency driver or alternative audio subsystem will be used when
+ // available.
+ //
+ // Do not free the returned AudioOutputStream. It is owned by AudioManager.
+ virtual AudioOutputStream* MakeAudioOutputStream(
+ const AudioParameters& params, const std::string& input_device_id) = 0;
+
+ // Creates new audio output proxy. A proxy implements
+ // AudioOutputStream interface, but unlike regular output stream
+ // created with MakeAudioOutputStream() it opens device only when a
+ // sound is actually playing.
+ virtual AudioOutputStream* MakeAudioOutputStreamProxy(
+ const AudioParameters& params, const std::string& input_device_id) = 0;
+
+ // Factory to create audio recording streams.
+ // |channels| can be 1 or 2.
+ // |sample_rate| is in hertz and can be any value supported by the platform.
+ // |bits_per_sample| can be any value supported by the platform.
+ // |samples_per_packet| is in hertz as well and can be 0 to |sample_rate|,
+ // with 0 suggesting that the implementation use a default value for that
+ // platform.
+ // Returns NULL if the combination of the parameters is not supported, or if
+ // we have reached some other platform specific limit.
+ //
+ // Do not free the returned AudioInputStream. It is owned by AudioManager.
+ // When you are done with it, call |Stop()| and |Close()| to release it.
+ virtual AudioInputStream* MakeAudioInputStream(
+ const AudioParameters& params, const std::string& device_id) = 0;
+
+ // Returns message loop used for audio IO.
+ virtual scoped_refptr<base::MessageLoopProxy> GetMessageLoop() = 0;
+
+ // Heavyweight tasks should use GetWorkerLoop() instead of GetMessageLoop().
+ // On most platforms they are the same, but some share the UI loop with the
+ // audio IO loop.
+ virtual scoped_refptr<base::MessageLoopProxy> GetWorkerLoop() = 0;
+
+ // Allows clients to listen for device state changes; e.g. preferred sample
+ // rate or channel layout changes. The typical response to receiving this
+ // callback is to recreate the stream.
+ class AudioDeviceListener {
+ public:
+ virtual void OnDeviceChange() = 0;
+ };
+
+ virtual void AddOutputDeviceChangeListener(AudioDeviceListener* listener) = 0;
+ virtual void RemoveOutputDeviceChangeListener(
+ AudioDeviceListener* listener) = 0;
+
+ // Returns the default output hardware audio parameters for opening output
+ // streams. It is a convenience interface to
+ // AudioManagerBase::GetPreferredOutputStreamParameters and each AudioManager
+ // does not need their own implementation to this interface.
+ virtual AudioParameters GetDefaultOutputStreamParameters() = 0;
+
+ // Returns the input hardware audio parameters of the specific device
+ // for opening input streams. Each AudioManager needs to implement their own
+ // version of this interface.
+ virtual AudioParameters GetInputStreamParameters(
+ const std::string& device_id) = 0;
+
+ protected:
+ AudioManager();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AudioManager);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_AUDIO_MANAGER_H_
diff --git a/chromium/media/audio/audio_manager_base.cc b/chromium/media/audio/audio_manager_base.cc
new file mode 100644
index 00000000000..db77f004e38
--- /dev/null
+++ b/chromium/media/audio/audio_manager_base.cc
@@ -0,0 +1,391 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/audio_manager_base.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/command_line.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "base/threading/thread.h"
+#include "build/build_config.h"
+#include "media/audio/audio_output_dispatcher_impl.h"
+#include "media/audio/audio_output_proxy.h"
+#include "media/audio/audio_output_resampler.h"
+#include "media/audio/audio_util.h"
+#include "media/audio/fake_audio_input_stream.h"
+#include "media/audio/fake_audio_output_stream.h"
+#include "media/base/media_switches.h"
+
+namespace media {
+
+static const int kStreamCloseDelaySeconds = 5;
+
+// Default maximum number of output streams that can be open simultaneously
+// for all platforms.
+static const int kDefaultMaxOutputStreams = 16;
+
+// Default maximum number of input streams that can be open simultaneously
+// for all platforms.
+static const int kDefaultMaxInputStreams = 16;
+
+static const int kMaxInputChannels = 2;
+
+const char AudioManagerBase::kDefaultDeviceName[] = "Default";
+const char AudioManagerBase::kDefaultDeviceId[] = "default";
+
+struct AudioManagerBase::DispatcherParams {
+ DispatcherParams(const AudioParameters& input,
+ const AudioParameters& output,
+ const std::string& device_id)
+ : input_params(input),
+ output_params(output),
+ input_device_id(device_id) {}
+ ~DispatcherParams() {}
+
+ const AudioParameters input_params;
+ const AudioParameters output_params;
+ const std::string input_device_id;
+ scoped_refptr<AudioOutputDispatcher> dispatcher;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(DispatcherParams);
+};
+
+class AudioManagerBase::CompareByParams {
+ public:
+ explicit CompareByParams(const DispatcherParams* dispatcher)
+ : dispatcher_(dispatcher) {}
+ bool operator()(DispatcherParams* dispatcher_in) const {
+ // We will reuse the existing dispatcher when:
+ // 1) Unified IO is not used, input_params and output_params of the
+ // existing dispatcher are the same as the requested dispatcher.
+ // 2) Unified IO is used, input_params, output_params and input_device_id
+ // of the existing dispatcher are the same as the request dispatcher.
+ return (dispatcher_->input_params == dispatcher_in->input_params &&
+ dispatcher_->output_params == dispatcher_in->output_params &&
+ (!dispatcher_->input_params.input_channels() ||
+ dispatcher_->input_device_id == dispatcher_in->input_device_id));
+ }
+
+ private:
+ const DispatcherParams* dispatcher_;
+};
+
+AudioManagerBase::AudioManagerBase()
+ : max_num_output_streams_(kDefaultMaxOutputStreams),
+ max_num_input_streams_(kDefaultMaxInputStreams),
+ num_output_streams_(0),
+ num_input_streams_(0),
+ // TODO(dalecurtis): Switch this to an ObserverListThreadSafe, so we don't
+ // block the UI thread when swapping devices.
+ output_listeners_(
+ ObserverList<AudioDeviceListener>::NOTIFY_EXISTING_ONLY),
+ audio_thread_(new base::Thread("AudioThread")) {
+#if defined(OS_WIN)
+ audio_thread_->init_com_with_mta(true);
+#elif defined(OS_MACOSX)
+ // CoreAudio calls must occur on the main thread of the process, which in our
+ // case is sadly the browser UI thread. Failure to execute calls on the right
+ // thread leads to crashes and odd behavior. See http://crbug.com/158170.
+ // TODO(dalecurtis): We should require the message loop to be passed in.
+ const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
+ if (!cmd_line->HasSwitch(switches::kDisableMainThreadAudio) &&
+ base::MessageLoopProxy::current().get() &&
+ base::MessageLoop::current()->IsType(base::MessageLoop::TYPE_UI)) {
+ message_loop_ = base::MessageLoopProxy::current();
+ return;
+ }
+#endif
+
+ CHECK(audio_thread_->Start());
+ message_loop_ = audio_thread_->message_loop_proxy();
+}
+
+AudioManagerBase::~AudioManagerBase() {
+ // The platform specific AudioManager implementation must have already
+ // stopped the audio thread. Otherwise, we may destroy audio streams before
+ // stopping the thread, resulting an unexpected behavior.
+ // This way we make sure activities of the audio streams are all stopped
+ // before we destroy them.
+ CHECK(!audio_thread_.get());
+ // All the output streams should have been deleted.
+ DCHECK_EQ(0, num_output_streams_);
+ // All the input streams should have been deleted.
+ DCHECK_EQ(0, num_input_streams_);
+}
+
+string16 AudioManagerBase::GetAudioInputDeviceModel() {
+ return string16();
+}
+
+scoped_refptr<base::MessageLoopProxy> AudioManagerBase::GetMessageLoop() {
+ return message_loop_;
+}
+
+scoped_refptr<base::MessageLoopProxy> AudioManagerBase::GetWorkerLoop() {
+ // Lazily start the worker thread.
+ if (!audio_thread_->IsRunning())
+ CHECK(audio_thread_->Start());
+
+ return audio_thread_->message_loop_proxy();
+}
+
+AudioOutputStream* AudioManagerBase::MakeAudioOutputStream(
+ const AudioParameters& params,
+ const std::string& input_device_id) {
+ // TODO(miu): Fix ~50 call points across several unit test modules to call
+ // this method on the audio thread, then uncomment the following:
+ // DCHECK(message_loop_->BelongsToCurrentThread());
+
+ if (!params.IsValid()) {
+ DLOG(ERROR) << "Audio parameters are invalid";
+ return NULL;
+ }
+
+ // Limit the number of audio streams opened. This is to prevent using
+ // excessive resources for a large number of audio streams. More
+ // importantly it prevents instability on certain systems.
+ // See bug: http://crbug.com/30242.
+ if (num_output_streams_ >= max_num_output_streams_) {
+ DLOG(ERROR) << "Number of opened output audio streams "
+ << num_output_streams_
+ << " exceed the max allowed number "
+ << max_num_output_streams_;
+ return NULL;
+ }
+
+ AudioOutputStream* stream;
+ switch (params.format()) {
+ case AudioParameters::AUDIO_PCM_LINEAR:
+ stream = MakeLinearOutputStream(params);
+ break;
+ case AudioParameters::AUDIO_PCM_LOW_LATENCY:
+ stream = MakeLowLatencyOutputStream(params, input_device_id);
+ break;
+ case AudioParameters::AUDIO_FAKE:
+ stream = FakeAudioOutputStream::MakeFakeStream(this, params);
+ break;
+ default:
+ stream = NULL;
+ break;
+ }
+
+ if (stream) {
+ ++num_output_streams_;
+ }
+
+ return stream;
+}
+
+AudioInputStream* AudioManagerBase::MakeAudioInputStream(
+ const AudioParameters& params, const std::string& device_id) {
+ // TODO(miu): Fix ~20 call points across several unit test modules to call
+ // this method on the audio thread, then uncomment the following:
+ // DCHECK(message_loop_->BelongsToCurrentThread());
+
+ if (!params.IsValid() || (params.channels() > kMaxInputChannels) ||
+ device_id.empty()) {
+ DLOG(ERROR) << "Audio parameters are invalid for device " << device_id;
+ return NULL;
+ }
+
+ if (num_input_streams_ >= max_num_input_streams_) {
+ DLOG(ERROR) << "Number of opened input audio streams "
+ << num_input_streams_
+ << " exceed the max allowed number " << max_num_input_streams_;
+ return NULL;
+ }
+
+ AudioInputStream* stream;
+ switch (params.format()) {
+ case AudioParameters::AUDIO_PCM_LINEAR:
+ stream = MakeLinearInputStream(params, device_id);
+ break;
+ case AudioParameters::AUDIO_PCM_LOW_LATENCY:
+ stream = MakeLowLatencyInputStream(params, device_id);
+ break;
+ case AudioParameters::AUDIO_FAKE:
+ stream = FakeAudioInputStream::MakeFakeStream(this, params);
+ break;
+ default:
+ stream = NULL;
+ break;
+ }
+
+ if (stream) {
+ ++num_input_streams_;
+ }
+
+ return stream;
+}
+
+AudioOutputStream* AudioManagerBase::MakeAudioOutputStreamProxy(
+ const AudioParameters& params, const std::string& input_device_id) {
+#if defined(OS_IOS)
+ // IOS implements audio input only.
+ NOTIMPLEMENTED();
+ return NULL;
+#else
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ // If we're not using AudioOutputResampler our output parameters are the same
+ // as our input parameters.
+ AudioParameters output_params = params;
+ if (params.format() == AudioParameters::AUDIO_PCM_LOW_LATENCY) {
+ output_params = GetPreferredOutputStreamParameters(params);
+
+ // Ensure we only pass on valid output parameters.
+ if (!output_params.IsValid()) {
+ // We've received invalid audio output parameters, so switch to a mock
+ // output device based on the input parameters. This may happen if the OS
+ // provided us junk values for the hardware configuration.
+ LOG(ERROR) << "Invalid audio output parameters received; using fake "
+ << "audio path. Channels: " << output_params.channels() << ", "
+ << "Sample Rate: " << output_params.sample_rate() << ", "
+ << "Bits Per Sample: " << output_params.bits_per_sample()
+ << ", Frames Per Buffer: "
+ << output_params.frames_per_buffer();
+
+ // Tell the AudioManager to create a fake output device.
+ output_params = AudioParameters(
+ AudioParameters::AUDIO_FAKE, params.channel_layout(),
+ params.sample_rate(), params.bits_per_sample(),
+ params.frames_per_buffer());
+ }
+ }
+
+ DispatcherParams* dispatcher_params =
+ new DispatcherParams(params, output_params, input_device_id);
+
+ AudioOutputDispatchers::iterator it =
+ std::find_if(output_dispatchers_.begin(), output_dispatchers_.end(),
+ CompareByParams(dispatcher_params));
+ if (it != output_dispatchers_.end()) {
+ delete dispatcher_params;
+ return new AudioOutputProxy((*it)->dispatcher.get());
+ }
+
+ const base::TimeDelta kCloseDelay =
+ base::TimeDelta::FromSeconds(kStreamCloseDelaySeconds);
+ scoped_refptr<AudioOutputDispatcher> dispatcher;
+ if (output_params.format() != AudioParameters::AUDIO_FAKE) {
+ dispatcher = new AudioOutputResampler(this, params, output_params,
+ input_device_id, kCloseDelay);
+ } else {
+ dispatcher = new AudioOutputDispatcherImpl(this, output_params,
+ input_device_id, kCloseDelay);
+ }
+
+ dispatcher_params->dispatcher = dispatcher;
+ output_dispatchers_.push_back(dispatcher_params);
+ return new AudioOutputProxy(dispatcher.get());
+#endif // defined(OS_IOS)
+}
+
+void AudioManagerBase::ShowAudioInputSettings() {
+}
+
+void AudioManagerBase::GetAudioInputDeviceNames(
+ media::AudioDeviceNames* device_names) {
+}
+
+void AudioManagerBase::ReleaseOutputStream(AudioOutputStream* stream) {
+ DCHECK(stream);
+ // TODO(xians) : Have a clearer destruction path for the AudioOutputStream.
+ // For example, pass the ownership to AudioManager so it can delete the
+ // streams.
+ --num_output_streams_;
+ delete stream;
+}
+
+void AudioManagerBase::ReleaseInputStream(AudioInputStream* stream) {
+ DCHECK(stream);
+ // TODO(xians) : Have a clearer destruction path for the AudioInputStream.
+ --num_input_streams_;
+ delete stream;
+}
+
+void AudioManagerBase::Shutdown() {
+ // To avoid running into deadlocks while we stop the thread, shut it down
+ // via a local variable while not holding the audio thread lock.
+ scoped_ptr<base::Thread> audio_thread;
+ {
+ base::AutoLock lock(audio_thread_lock_);
+ audio_thread_.swap(audio_thread);
+ }
+
+ if (!audio_thread)
+ return;
+
+ // Only true when we're sharing the UI message loop with the browser. The UI
+ // loop is no longer running at this time and browser destruction is imminent.
+ if (message_loop_->BelongsToCurrentThread()) {
+ ShutdownOnAudioThread();
+ } else {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &AudioManagerBase::ShutdownOnAudioThread, base::Unretained(this)));
+ }
+
+ // Stop() will wait for any posted messages to be processed first.
+ audio_thread->Stop();
+}
+
+void AudioManagerBase::ShutdownOnAudioThread() {
+// IOS implements audio input only.
+#if defined(OS_IOS)
+ return;
+#else
+ // This should always be running on the audio thread, but since we've cleared
+ // the audio_thread_ member pointer when we get here, we can't verify exactly
+ // what thread we're running on. The method is not public though and only
+ // called from one place, so we'll leave it at that.
+ AudioOutputDispatchers::iterator it = output_dispatchers_.begin();
+ for (; it != output_dispatchers_.end(); ++it) {
+ scoped_refptr<AudioOutputDispatcher>& dispatcher = (*it)->dispatcher;
+ if (dispatcher.get()) {
+ dispatcher->Shutdown();
+ // All AudioOutputProxies must have been freed before Shutdown is called.
+ // If they still exist, things will go bad. They have direct pointers to
+ // both physical audio stream objects that belong to the dispatcher as
+ // well as the message loop of the audio thread that will soon go away.
+ // So, better crash now than later.
+ DCHECK(dispatcher->HasOneRef()) << "AudioOutputProxies are still alive";
+ dispatcher = NULL;
+ }
+ }
+
+ output_dispatchers_.clear();
+#endif // defined(OS_IOS)
+}
+
+void AudioManagerBase::AddOutputDeviceChangeListener(
+ AudioDeviceListener* listener) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ output_listeners_.AddObserver(listener);
+}
+
+void AudioManagerBase::RemoveOutputDeviceChangeListener(
+ AudioDeviceListener* listener) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ output_listeners_.RemoveObserver(listener);
+}
+
+void AudioManagerBase::NotifyAllOutputDeviceChangeListeners() {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DVLOG(1) << "Firing OnDeviceChange() notifications.";
+ FOR_EACH_OBSERVER(AudioDeviceListener, output_listeners_, OnDeviceChange());
+}
+
+AudioParameters AudioManagerBase::GetDefaultOutputStreamParameters() {
+ return GetPreferredOutputStreamParameters(AudioParameters());
+}
+
+AudioParameters AudioManagerBase::GetInputStreamParameters(
+ const std::string& device_id) {
+ NOTREACHED();
+ return AudioParameters();
+}
+
+} // namespace media
diff --git a/chromium/media/audio/audio_manager_base.h b/chromium/media/audio/audio_manager_base.h
new file mode 100644
index 00000000000..8b34d9fcf94
--- /dev/null
+++ b/chromium/media/audio/audio_manager_base.h
@@ -0,0 +1,168 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_AUDIO_MANAGER_BASE_H_
+#define MEDIA_AUDIO_AUDIO_MANAGER_BASE_H_
+
+#include <string>
+#include <utility>
+
+#include "base/compiler_specific.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/scoped_vector.h"
+#include "base/observer_list.h"
+#include "base/synchronization/lock.h"
+#include "media/audio/audio_manager.h"
+
+#include "media/audio/audio_output_dispatcher.h"
+
+#if defined(OS_WIN)
+#include "base/win/scoped_com_initializer.h"
+#endif
+
+namespace base {
+class Thread;
+}
+
+namespace media {
+
+class AudioOutputDispatcher;
+
+// AudioManagerBase provides AudioManager functions common for all platforms.
+class MEDIA_EXPORT AudioManagerBase : public AudioManager {
+ public:
+ // Name of the generic "default" device.
+ static const char kDefaultDeviceName[];
+ // Unique Id of the generic "default" device.
+ static const char kDefaultDeviceId[];
+
+ virtual ~AudioManagerBase();
+
+ virtual scoped_refptr<base::MessageLoopProxy> GetMessageLoop() OVERRIDE;
+ virtual scoped_refptr<base::MessageLoopProxy> GetWorkerLoop() OVERRIDE;
+
+ virtual string16 GetAudioInputDeviceModel() OVERRIDE;
+
+ virtual void ShowAudioInputSettings() OVERRIDE;
+
+ virtual void GetAudioInputDeviceNames(
+ media::AudioDeviceNames* device_names) OVERRIDE;
+
+ virtual AudioOutputStream* MakeAudioOutputStream(
+ const AudioParameters& params,
+ const std::string& input_device_id) OVERRIDE;
+
+ virtual AudioInputStream* MakeAudioInputStream(
+ const AudioParameters& params, const std::string& device_id) OVERRIDE;
+
+ virtual AudioOutputStream* MakeAudioOutputStreamProxy(
+ const AudioParameters& params,
+ const std::string& input_device_id) OVERRIDE;
+
+ // Called internally by the audio stream when it has been closed.
+ virtual void ReleaseOutputStream(AudioOutputStream* stream);
+ virtual void ReleaseInputStream(AudioInputStream* stream);
+
+ // Creates the output stream for the |AUDIO_PCM_LINEAR| format. The legacy
+ // name is also from |AUDIO_PCM_LINEAR|.
+ virtual AudioOutputStream* MakeLinearOutputStream(
+ const AudioParameters& params) = 0;
+
+ // Creates the output stream for the |AUDIO_PCM_LOW_LATENCY| format.
+ // |input_device_id| is used by unified IO to open the correct input device.
+ virtual AudioOutputStream* MakeLowLatencyOutputStream(
+ const AudioParameters& params, const std::string& input_device_id) = 0;
+
+ // Creates the input stream for the |AUDIO_PCM_LINEAR| format. The legacy
+ // name is also from |AUDIO_PCM_LINEAR|.
+ virtual AudioInputStream* MakeLinearInputStream(
+ const AudioParameters& params, const std::string& device_id) = 0;
+
+ // Creates the input stream for the |AUDIO_PCM_LOW_LATENCY| format.
+ virtual AudioInputStream* MakeLowLatencyInputStream(
+ const AudioParameters& params, const std::string& device_id) = 0;
+
+ // Listeners will be notified on the AudioManager::GetMessageLoop() loop.
+ virtual void AddOutputDeviceChangeListener(
+ AudioDeviceListener* listener) OVERRIDE;
+ virtual void RemoveOutputDeviceChangeListener(
+ AudioDeviceListener* listener) OVERRIDE;
+
+ virtual AudioParameters GetDefaultOutputStreamParameters() OVERRIDE;
+ virtual AudioParameters GetInputStreamParameters(
+ const std::string& device_id) OVERRIDE;
+
+ protected:
+ AudioManagerBase();
+
+
+ // Shuts down the audio thread and releases all the audio output dispatchers
+ // on the audio thread. All audio streams should be freed before Shutdown()
+ // is called. This must be called in the destructor of every AudioManagerBase
+ // implementation.
+ void Shutdown();
+
+ void SetMaxOutputStreamsAllowed(int max) { max_num_output_streams_ = max; }
+
+ // Called by each platform specific AudioManager to notify output state change
+ // listeners that a state change has occurred. Must be called from the audio
+ // thread.
+ void NotifyAllOutputDeviceChangeListeners();
+
+ // Returns the preferred hardware audio output parameters for opening output
+ // streams. If the users inject a valid |input_params|, each AudioManager
+ // will decide if they should return the values from |input_params| or the
+ // default hardware values. If the |input_params| is invalid, it will return
+ // the default hardware audio parameters.
+ virtual AudioParameters GetPreferredOutputStreamParameters(
+ const AudioParameters& input_params) = 0;
+
+ // Get number of input or output streams.
+ int input_stream_count() { return num_input_streams_; }
+ int output_stream_count() { return num_output_streams_; }
+
+ private:
+ struct DispatcherParams;
+ typedef ScopedVector<DispatcherParams> AudioOutputDispatchers;
+
+ class CompareByParams;
+
+ // Called by Shutdown().
+ void ShutdownOnAudioThread();
+
+ // Max number of open output streams, modified by
+ // SetMaxOutputStreamsAllowed().
+ int max_num_output_streams_;
+
+ // Max number of open input streams.
+ int max_num_input_streams_;
+
+ // Number of currently open output streams.
+ int num_output_streams_;
+
+ // Number of currently open input streams.
+ int num_input_streams_;
+
+ // Track output state change listeners.
+ ObserverList<AudioDeviceListener> output_listeners_;
+
+ // Thread used to interact with audio streams created by this audio manager.
+ scoped_ptr<base::Thread> audio_thread_;
+ mutable base::Lock audio_thread_lock_;
+
+ // The message loop of the audio thread this object runs on. Used for internal
+ // tasks which run on the audio thread even after Shutdown() has been started
+ // and GetMessageLoop() starts returning NULL.
+ scoped_refptr<base::MessageLoopProxy> message_loop_;
+
+ // Map of cached AudioOutputDispatcher instances. Must only be touched
+ // from the audio thread (no locking).
+ AudioOutputDispatchers output_dispatchers_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioManagerBase);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_AUDIO_MANAGER_BASE_H_
diff --git a/chromium/media/audio/audio_output_controller.cc b/chromium/media/audio/audio_output_controller.cc
new file mode 100644
index 00000000000..f7f4cf8240b
--- /dev/null
+++ b/chromium/media/audio/audio_output_controller.cc
@@ -0,0 +1,399 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/audio_output_controller.h"
+
+#include "base/bind.h"
+#include "base/debug/trace_event.h"
+#include "base/message_loop/message_loop.h"
+#include "base/metrics/histogram.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "media/audio/audio_util.h"
+#include "media/audio/shared_memory_util.h"
+#include "media/base/scoped_histogram_timer.h"
+
+using base::Time;
+using base::TimeDelta;
+
+namespace media {
+
+// Time constant for AudioPowerMonitor. See AudioPowerMonitor ctor comments for
+// semantics. This value was arbitrarily chosen, but seems to work well.
+static const int kPowerMeasurementTimeConstantMillis = 10;
+
+// Desired frequency of calls to EventHandler::OnPowerMeasured() for reporting
+// power levels in the audio signal.
+static const int kPowerMeasurementsPerSecond = 30;
+
+// Polling-related constants.
+const int AudioOutputController::kPollNumAttempts = 3;
+const int AudioOutputController::kPollPauseInMilliseconds = 3;
+
+AudioOutputController::AudioOutputController(AudioManager* audio_manager,
+ EventHandler* handler,
+ const AudioParameters& params,
+ const std::string& input_device_id,
+ SyncReader* sync_reader)
+ : audio_manager_(audio_manager),
+ params_(params),
+ handler_(handler),
+ input_device_id_(input_device_id),
+ stream_(NULL),
+ diverting_to_stream_(NULL),
+ volume_(1.0),
+ state_(kEmpty),
+ num_allowed_io_(0),
+ sync_reader_(sync_reader),
+ message_loop_(audio_manager->GetMessageLoop()),
+ number_polling_attempts_left_(0),
+ power_monitor_(
+ params.sample_rate(),
+ TimeDelta::FromMilliseconds(kPowerMeasurementTimeConstantMillis)) {
+ DCHECK(audio_manager);
+ DCHECK(handler_);
+ DCHECK(sync_reader_);
+ DCHECK(message_loop_.get());
+}
+
+AudioOutputController::~AudioOutputController() {
+ DCHECK_EQ(kClosed, state_);
+}
+
+// static
+scoped_refptr<AudioOutputController> AudioOutputController::Create(
+ AudioManager* audio_manager,
+ EventHandler* event_handler,
+ const AudioParameters& params,
+ const std::string& input_device_id,
+ SyncReader* sync_reader) {
+ DCHECK(audio_manager);
+ DCHECK(sync_reader);
+
+ if (!params.IsValid() || !audio_manager)
+ return NULL;
+
+ scoped_refptr<AudioOutputController> controller(new AudioOutputController(
+ audio_manager, event_handler, params, input_device_id, sync_reader));
+ controller->message_loop_->PostTask(FROM_HERE, base::Bind(
+ &AudioOutputController::DoCreate, controller, false));
+ return controller;
+}
+
+void AudioOutputController::Play() {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &AudioOutputController::DoPlay, this));
+}
+
+void AudioOutputController::Pause() {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &AudioOutputController::DoPause, this));
+}
+
+void AudioOutputController::Close(const base::Closure& closed_task) {
+ DCHECK(!closed_task.is_null());
+ message_loop_->PostTaskAndReply(FROM_HERE, base::Bind(
+ &AudioOutputController::DoClose, this), closed_task);
+}
+
+void AudioOutputController::SetVolume(double volume) {
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &AudioOutputController::DoSetVolume, this, volume));
+}
+
+void AudioOutputController::DoCreate(bool is_for_device_change) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ SCOPED_UMA_HISTOGRAM_TIMER("Media.AudioOutputController.CreateTime");
+
+ // Close() can be called before DoCreate() is executed.
+ if (state_ == kClosed)
+ return;
+
+ DoStopCloseAndClearStream(); // Calls RemoveOutputDeviceChangeListener().
+ DCHECK_EQ(kEmpty, state_);
+
+ stream_ = diverting_to_stream_ ? diverting_to_stream_ :
+ audio_manager_->MakeAudioOutputStreamProxy(params_, input_device_id_);
+ if (!stream_) {
+ state_ = kError;
+ handler_->OnError();
+ return;
+ }
+
+ if (!stream_->Open()) {
+ DoStopCloseAndClearStream();
+ state_ = kError;
+ handler_->OnError();
+ return;
+ }
+
+ // Everything started okay, so re-register for state change callbacks if
+ // stream_ was created via AudioManager.
+ if (stream_ != diverting_to_stream_)
+ audio_manager_->AddOutputDeviceChangeListener(this);
+
+ // We have successfully opened the stream. Set the initial volume.
+ stream_->SetVolume(volume_);
+
+ // Finally set the state to kCreated.
+ state_ = kCreated;
+
+ // And then report we have been created if we haven't done so already.
+ if (!is_for_device_change)
+ handler_->OnCreated();
+}
+
+void AudioOutputController::DoPlay() {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ SCOPED_UMA_HISTOGRAM_TIMER("Media.AudioOutputController.PlayTime");
+
+ // We can start from created or paused state.
+ if (state_ != kCreated && state_ != kPaused)
+ return;
+
+ // Ask for first packet.
+ sync_reader_->UpdatePendingBytes(0);
+
+ state_ = kPlaying;
+
+ power_monitor_.Reset();
+ power_poll_callback_.Reset(
+ base::Bind(&AudioOutputController::ReportPowerMeasurementPeriodically,
+ this));
+ // Run the callback to send an initial notification that we're starting in
+ // silence, and to schedule periodic callbacks.
+ power_poll_callback_.callback().Run();
+
+ // We start the AudioOutputStream lazily.
+ AllowEntryToOnMoreIOData();
+ stream_->Start(this);
+
+ handler_->OnPlaying();
+}
+
+void AudioOutputController::ReportPowerMeasurementPeriodically() {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ const std::pair<float, bool>& reading =
+ power_monitor_.ReadCurrentPowerAndClip();
+ handler_->OnPowerMeasured(reading.first, reading.second);
+ message_loop_->PostDelayedTask(
+ FROM_HERE, power_poll_callback_.callback(),
+ TimeDelta::FromSeconds(1) / kPowerMeasurementsPerSecond);
+}
+
+void AudioOutputController::StopStream() {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ if (state_ == kPlaying) {
+ stream_->Stop();
+ DisallowEntryToOnMoreIOData();
+
+ power_poll_callback_.Cancel();
+
+ state_ = kPaused;
+ }
+}
+
+void AudioOutputController::DoPause() {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ SCOPED_UMA_HISTOGRAM_TIMER("Media.AudioOutputController.PauseTime");
+
+ StopStream();
+
+ if (state_ != kPaused)
+ return;
+
+ // Send a special pause mark to the low-latency audio thread.
+ sync_reader_->UpdatePendingBytes(kPauseMark);
+
+ // Paused means silence follows.
+ handler_->OnPowerMeasured(AudioPowerMonitor::zero_power(), false);
+
+ handler_->OnPaused();
+}
+
+void AudioOutputController::DoClose() {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ SCOPED_UMA_HISTOGRAM_TIMER("Media.AudioOutputController.CloseTime");
+
+ if (state_ != kClosed) {
+ DoStopCloseAndClearStream();
+ sync_reader_->Close();
+ state_ = kClosed;
+ }
+}
+
+void AudioOutputController::DoSetVolume(double volume) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ // Saves the volume to a member first. We may not be able to set the volume
+ // right away but when the stream is created we'll set the volume.
+ volume_ = volume;
+
+ switch (state_) {
+ case kCreated:
+ case kPlaying:
+ case kPaused:
+ stream_->SetVolume(volume_);
+ break;
+ default:
+ return;
+ }
+}
+
+void AudioOutputController::DoReportError() {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ if (state_ != kClosed)
+ handler_->OnError();
+}
+
+int AudioOutputController::OnMoreData(AudioBus* dest,
+ AudioBuffersState buffers_state) {
+ return OnMoreIOData(NULL, dest, buffers_state);
+}
+
+int AudioOutputController::OnMoreIOData(AudioBus* source,
+ AudioBus* dest,
+ AudioBuffersState buffers_state) {
+ DisallowEntryToOnMoreIOData();
+ TRACE_EVENT0("audio", "AudioOutputController::OnMoreIOData");
+
+ // The OS level audio APIs on Linux and Windows all have problems requesting
+ // data on a fixed interval. Sometimes they will issue calls back to back
+ // which can cause glitching, so wait until the renderer is ready.
+ //
+ // We also need to wait when diverting since the virtual stream will call this
+ // multiple times without waiting.
+ //
+ // NEVER wait on OSX unless a virtual stream is connected, otherwise we can
+ // end up hanging the entire OS.
+ //
+ // See many bugs for context behind this decision: http://crbug.com/170498,
+ // http://crbug.com/171651, http://crbug.com/174985, and more.
+#if defined(OS_WIN) || defined(OS_LINUX)
+ const bool kShouldBlock = true;
+#else
+ const bool kShouldBlock = diverting_to_stream_ != NULL;
+#endif
+
+ const int frames = sync_reader_->Read(kShouldBlock, source, dest);
+ DCHECK_LE(0, frames);
+ sync_reader_->UpdatePendingBytes(
+ buffers_state.total_bytes() + frames * params_.GetBytesPerFrame());
+
+ power_monitor_.Scan(*dest, frames);
+
+ AllowEntryToOnMoreIOData();
+ return frames;
+}
+
+void AudioOutputController::OnError(AudioOutputStream* stream) {
+ // Handle error on the audio controller thread.
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &AudioOutputController::DoReportError, this));
+}
+
+void AudioOutputController::DoStopCloseAndClearStream() {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ // Allow calling unconditionally and bail if we don't have a stream_ to close.
+ if (stream_) {
+ // De-register from state change callbacks if stream_ was created via
+ // AudioManager.
+ if (stream_ != diverting_to_stream_)
+ audio_manager_->RemoveOutputDeviceChangeListener(this);
+
+ StopStream();
+ stream_->Close();
+ if (stream_ == diverting_to_stream_)
+ diverting_to_stream_ = NULL;
+ stream_ = NULL;
+ }
+
+ state_ = kEmpty;
+}
+
+void AudioOutputController::OnDeviceChange() {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ SCOPED_UMA_HISTOGRAM_TIMER("Media.AudioOutputController.DeviceChangeTime");
+
+ // TODO(dalecurtis): Notify the renderer side that a device change has
+ // occurred. Currently querying the hardware information here will lead to
+ // crashes on OSX. See http://crbug.com/158170.
+
+ // Recreate the stream (DoCreate() will first shut down an existing stream).
+ // Exit if we ran into an error.
+ const State original_state = state_;
+ DoCreate(true);
+ if (!stream_ || state_ == kError)
+ return;
+
+ // Get us back to the original state or an equivalent state.
+ switch (original_state) {
+ case kPlaying:
+ DoPlay();
+ return;
+ case kCreated:
+ case kPaused:
+ // From the outside these two states are equivalent.
+ return;
+ default:
+ NOTREACHED() << "Invalid original state.";
+ }
+}
+
+const AudioParameters& AudioOutputController::GetAudioParameters() {
+ return params_;
+}
+
+void AudioOutputController::StartDiverting(AudioOutputStream* to_stream) {
+ message_loop_->PostTask(
+ FROM_HERE,
+ base::Bind(&AudioOutputController::DoStartDiverting, this, to_stream));
+}
+
+void AudioOutputController::StopDiverting() {
+ message_loop_->PostTask(
+ FROM_HERE, base::Bind(&AudioOutputController::DoStopDiverting, this));
+}
+
+void AudioOutputController::DoStartDiverting(AudioOutputStream* to_stream) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ if (state_ == kClosed)
+ return;
+
+ DCHECK(!diverting_to_stream_);
+ diverting_to_stream_ = to_stream;
+ // Note: OnDeviceChange() will engage the "re-create" process, which will
+ // detect and use the alternate AudioOutputStream rather than create a new one
+ // via AudioManager.
+ OnDeviceChange();
+}
+
+void AudioOutputController::DoStopDiverting() {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ if (state_ == kClosed)
+ return;
+
+ // Note: OnDeviceChange() will cause the existing stream (the consumer of the
+ // diverted audio data) to be closed, and diverting_to_stream_ will be set
+ // back to NULL.
+ OnDeviceChange();
+ DCHECK(!diverting_to_stream_);
+}
+
+void AudioOutputController::AllowEntryToOnMoreIOData() {
+ DCHECK(base::AtomicRefCountIsZero(&num_allowed_io_));
+ base::AtomicRefCountInc(&num_allowed_io_);
+}
+
+void AudioOutputController::DisallowEntryToOnMoreIOData() {
+ const bool is_zero = !base::AtomicRefCountDec(&num_allowed_io_);
+ DCHECK(is_zero);
+}
+
+} // namespace media
diff --git a/chromium/media/audio/audio_output_controller.h b/chromium/media/audio/audio_output_controller.h
new file mode 100644
index 00000000000..38a2c03f590
--- /dev/null
+++ b/chromium/media/audio/audio_output_controller.h
@@ -0,0 +1,245 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_AUDIO_OUTPUT_CONTROLLER_H_
+#define MEDIA_AUDIO_AUDIO_OUTPUT_CONTROLLER_H_
+
+#include "base/atomic_ref_count.h"
+#include "base/callback.h"
+#include "base/cancelable_callback.h"
+#include "base/memory/ref_counted.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_manager.h"
+#include "media/audio/audio_power_monitor.h"
+#include "media/audio/audio_source_diverter.h"
+#include "media/audio/simple_sources.h"
+#include "media/base/media_export.h"
+
+// An AudioOutputController controls an AudioOutputStream and provides data
+// to this output stream. It has an important function that it executes
+// audio operations like play, pause, stop, etc. on a separate thread,
+// namely the audio manager thread.
+//
+// All the public methods of AudioOutputController are non-blocking.
+// The actual operations are performed on the audio manager thread.
+//
+// Here is a state transition diagram for the AudioOutputController:
+//
+// *[ Empty ] --> [ Created ] --> [ Playing ] -------.
+// | | | ^ |
+// | | | | |
+// | | | | v
+// | | | `----- [ Paused ]
+// | | | |
+// | v v |
+// `-----------> [ Closed ] <-----------'
+//
+// * Initial state
+//
+// At any time after reaching the Created state but before Closed, the
+// AudioOutputController may be notified of a device change via
+// OnDeviceChange(). As the OnDeviceChange() is processed, state transitions
+// will occur, ultimately ending up in an equivalent pre-call state. E.g., if
+// the state was Paused, the new state will be Created, since these states are
+// all functionally equivalent and require a Play() call to continue to the next
+// state.
+//
+// The AudioOutputStream can request data from the AudioOutputController via the
+// AudioSourceCallback interface. AudioOutputController uses the SyncReader
+// passed to it via construction to synchronously fulfill this read request.
+//
+
+namespace media {
+
+class MEDIA_EXPORT AudioOutputController
+ : public base::RefCountedThreadSafe<AudioOutputController>,
+ public AudioOutputStream::AudioSourceCallback,
+ public AudioSourceDiverter,
+ NON_EXPORTED_BASE(public AudioManager::AudioDeviceListener) {
+ public:
+ // An event handler that receives events from the AudioOutputController. The
+ // following methods are called on the audio manager thread.
+ class MEDIA_EXPORT EventHandler {
+ public:
+ virtual void OnCreated() = 0;
+ virtual void OnPlaying() = 0;
+ virtual void OnPowerMeasured(float power_dbfs, bool clipped) = 0;
+ virtual void OnPaused() = 0;
+ virtual void OnError() = 0;
+ virtual void OnDeviceChange(int new_buffer_size, int new_sample_rate) = 0;
+
+ protected:
+ virtual ~EventHandler() {}
+ };
+
+ // A synchronous reader interface used by AudioOutputController for
+ // synchronous reading.
+ // TODO(crogers): find a better name for this class and the Read() method
+ // now that it can handle synchronized I/O.
+ class SyncReader {
+ public:
+ virtual ~SyncReader() {}
+
+ // Notify the synchronous reader the number of bytes in the
+ // AudioOutputController not yet played. This is used by SyncReader to
+ // prepare more data and perform synchronization.
+ virtual void UpdatePendingBytes(uint32 bytes) = 0;
+
+ // Attempt to completely fill |dest|, return the actual number of frames
+ // that could be read. |source| may optionally be provided for input data.
+ // If |block| is specified, the Read() will block until data is available
+ // or a timeout is reached.
+ virtual int Read(bool block, const AudioBus* source, AudioBus* dest) = 0;
+
+ // Close this synchronous reader.
+ virtual void Close() = 0;
+ };
+
+ // Factory method for creating an AudioOutputController.
+ // This also creates and opens an AudioOutputStream on the audio manager
+ // thread, and if this is successful, the |event_handler| will receive an
+ // OnCreated() call from the same audio manager thread. |audio_manager| must
+ // outlive AudioOutputController.
+ static scoped_refptr<AudioOutputController> Create(
+ AudioManager* audio_manager, EventHandler* event_handler,
+ const AudioParameters& params, const std::string& input_device_id,
+ SyncReader* sync_reader);
+
+ // Methods to control playback of the stream.
+
+ // Starts the playback of this audio output stream.
+ void Play();
+
+ // Pause this audio output stream.
+ void Pause();
+
+ // Closes the audio output stream. The state is changed and the resources
+ // are freed on the audio manager thread. closed_task is executed after that.
+ // Callbacks (EventHandler and SyncReader) must exist until closed_task is
+ // called.
+ //
+ // It is safe to call this method more than once. Calls after the first one
+ // will have no effect.
+ void Close(const base::Closure& closed_task);
+
+ // Sets the volume of the audio output stream.
+ void SetVolume(double volume);
+
+ // AudioSourceCallback implementation.
+ virtual int OnMoreData(AudioBus* dest,
+ AudioBuffersState buffers_state) OVERRIDE;
+ virtual int OnMoreIOData(AudioBus* source,
+ AudioBus* dest,
+ AudioBuffersState buffers_state) OVERRIDE;
+ virtual void OnError(AudioOutputStream* stream) OVERRIDE;
+
+ // AudioDeviceListener implementation. When called AudioOutputController will
+ // shutdown the existing |stream_|, transition to the kRecreating state,
+ // create a new stream, and then transition back to an equivalent state prior
+ // to being called.
+ virtual void OnDeviceChange() OVERRIDE;
+
+ // AudioSourceDiverter implementation.
+ virtual const AudioParameters& GetAudioParameters() OVERRIDE;
+ virtual void StartDiverting(AudioOutputStream* to_stream) OVERRIDE;
+ virtual void StopDiverting() OVERRIDE;
+
+ protected:
+ // Internal state of the source.
+ enum State {
+ kEmpty,
+ kCreated,
+ kPlaying,
+ kPaused,
+ kClosed,
+ kError,
+ };
+
+ friend class base::RefCountedThreadSafe<AudioOutputController>;
+ virtual ~AudioOutputController();
+
+ private:
+ // We are polling sync reader if data became available.
+ static const int kPollNumAttempts;
+ static const int kPollPauseInMilliseconds;
+
+ AudioOutputController(AudioManager* audio_manager, EventHandler* handler,
+ const AudioParameters& params,
+ const std::string& input_device_id,
+ SyncReader* sync_reader);
+
+ // The following methods are executed on the audio manager thread.
+ void DoCreate(bool is_for_device_change);
+ void DoPlay();
+ void DoPause();
+ void DoClose();
+ void DoSetVolume(double volume);
+ void DoReportError();
+ void DoStartDiverting(AudioOutputStream* to_stream);
+ void DoStopDiverting();
+
+ // Calls EventHandler::OnPowerMeasured() with the current power level and then
+ // schedules itself to be called again later.
+ void ReportPowerMeasurementPeriodically();
+
+ // Helper method that stops the physical stream.
+ void StopStream();
+
+ // Helper method that stops, closes, and NULLs |*stream_|.
+ void DoStopCloseAndClearStream();
+
+ // Sanity-check that entry/exit to OnMoreIOData() by the hardware audio thread
+ // happens only between AudioOutputStream::Start() and Stop().
+ void AllowEntryToOnMoreIOData();
+ void DisallowEntryToOnMoreIOData();
+
+ AudioManager* const audio_manager_;
+ const AudioParameters params_;
+ EventHandler* const handler_;
+
+ // Used by the unified IO to open the correct input device.
+ std::string input_device_id_;
+
+ AudioOutputStream* stream_;
+
+ // When non-NULL, audio is being diverted to this stream.
+ AudioOutputStream* diverting_to_stream_;
+
+ // The current volume of the audio stream.
+ double volume_;
+
+ // |state_| is written on the audio manager thread and is read on the
+ // hardware audio thread. These operations need to be locked. But lock
+ // is not required for reading on the audio manager thread.
+ State state_;
+
+ // Binary semaphore, used to ensure that only one thread enters the
+ // OnMoreIOData() method, and only when it is valid to do so. This is for
+ // sanity-checking the behavior of platform implementations of
+ // AudioOutputStream. In other words, multiple contention is not expected,
+ // nor in the design here.
+ base::AtomicRefCount num_allowed_io_;
+
+ // SyncReader is used only in low latency mode for synchronous reading.
+ SyncReader* const sync_reader_;
+
+ // The message loop of audio manager thread that this object runs on.
+ const scoped_refptr<base::MessageLoopProxy> message_loop_;
+
+ // When starting stream we wait for data to become available.
+ // Number of times left.
+ int number_polling_attempts_left_;
+
+ // Scans audio samples from OnMoreIOData() as input to compute power levels.
+ AudioPowerMonitor power_monitor_;
+
+ // Periodic callback to report power levels during playback.
+ base::CancelableClosure power_poll_callback_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioOutputController);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_AUDIO_OUTPUT_CONTROLLER_H_
diff --git a/chromium/media/audio/audio_output_controller_unittest.cc b/chromium/media/audio/audio_output_controller_unittest.cc
new file mode 100644
index 00000000000..128cc07716f
--- /dev/null
+++ b/chromium/media/audio/audio_output_controller_unittest.cc
@@ -0,0 +1,379 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/basictypes.h"
+#include "base/bind.h"
+#include "base/environment.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/synchronization/waitable_event.h"
+#include "media/audio/audio_output_controller.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/audio_bus.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::_;
+using ::testing::AtLeast;
+using ::testing::DoAll;
+using ::testing::Invoke;
+using ::testing::NotNull;
+using ::testing::Return;
+
+namespace media {
+
+static const int kSampleRate = AudioParameters::kAudioCDSampleRate;
+static const int kBitsPerSample = 16;
+static const ChannelLayout kChannelLayout = CHANNEL_LAYOUT_STEREO;
+static const int kSamplesPerPacket = kSampleRate / 100;
+static const int kHardwareBufferSize = kSamplesPerPacket *
+ ChannelLayoutToChannelCount(kChannelLayout) * kBitsPerSample / 8;
+static const double kTestVolume = 0.25;
+
+class MockAudioOutputControllerEventHandler
+ : public AudioOutputController::EventHandler {
+ public:
+ MockAudioOutputControllerEventHandler() {}
+
+ MOCK_METHOD0(OnCreated, void());
+ MOCK_METHOD0(OnPlaying, void());
+ MOCK_METHOD2(OnPowerMeasured, void(float power_dbfs, bool clipped));
+ MOCK_METHOD0(OnPaused, void());
+ MOCK_METHOD0(OnError, void());
+ MOCK_METHOD2(OnDeviceChange, void(int new_buffer_size, int new_sample_rate));
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockAudioOutputControllerEventHandler);
+};
+
+class MockAudioOutputControllerSyncReader
+ : public AudioOutputController::SyncReader {
+ public:
+ MockAudioOutputControllerSyncReader() {}
+
+ MOCK_METHOD1(UpdatePendingBytes, void(uint32 bytes));
+ MOCK_METHOD3(Read, int(bool block, const AudioBus* source, AudioBus* dest));
+ MOCK_METHOD0(Close, void());
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockAudioOutputControllerSyncReader);
+};
+
+class MockAudioOutputStream : public AudioOutputStream {
+ public:
+ MOCK_METHOD0(Open, bool());
+ MOCK_METHOD1(Start, void(AudioSourceCallback* callback));
+ MOCK_METHOD0(Stop, void());
+ MOCK_METHOD1(SetVolume, void(double volume));
+ MOCK_METHOD1(GetVolume, void(double* volume));
+ MOCK_METHOD0(Close, void());
+
+ // Set/get the callback passed to Start().
+ AudioSourceCallback* callback() const { return callback_; }
+ void SetCallback(AudioSourceCallback* asc) { callback_ = asc; }
+
+ private:
+ AudioSourceCallback* callback_;
+};
+
+ACTION_P(SignalEvent, event) {
+ event->Signal();
+}
+
+static const float kBufferNonZeroData = 1.0f;
+ACTION(PopulateBuffer) {
+ arg2->Zero();
+ // Note: To confirm the buffer will be populated in these tests, it's
+ // sufficient that only the first float in channel 0 is set to the value.
+ arg2->channel(0)[0] = kBufferNonZeroData;
+}
+
+class AudioOutputControllerTest : public testing::Test {
+ public:
+ AudioOutputControllerTest()
+ : audio_manager_(AudioManager::Create()),
+ create_event_(false, false),
+ play_event_(false, false),
+ read_event_(false, false),
+ pause_event_(false, false) {
+ }
+
+ virtual ~AudioOutputControllerTest() {
+ }
+
+ protected:
+ void Create(int samples_per_packet) {
+ EXPECT_FALSE(create_event_.IsSignaled());
+ EXPECT_FALSE(play_event_.IsSignaled());
+ EXPECT_FALSE(read_event_.IsSignaled());
+ EXPECT_FALSE(pause_event_.IsSignaled());
+
+ params_ = AudioParameters(
+ AudioParameters::AUDIO_FAKE, kChannelLayout,
+ kSampleRate, kBitsPerSample, samples_per_packet);
+
+ if (params_.IsValid()) {
+ EXPECT_CALL(mock_event_handler_, OnCreated())
+ .WillOnce(SignalEvent(&create_event_));
+ }
+
+ controller_ = AudioOutputController::Create(
+ audio_manager_.get(), &mock_event_handler_, params_, std::string(),
+ &mock_sync_reader_);
+ if (controller_.get())
+ controller_->SetVolume(kTestVolume);
+
+ EXPECT_EQ(params_.IsValid(), controller_.get() != NULL);
+ }
+
+ void Play() {
+ // Expect the event handler to receive one OnPlaying() call and one or more
+ // OnPowerMeasured() calls.
+ EXPECT_CALL(mock_event_handler_, OnPlaying())
+ .WillOnce(SignalEvent(&play_event_));
+ EXPECT_CALL(mock_event_handler_, OnPowerMeasured(_, false))
+ .Times(AtLeast(1));
+
+ // During playback, the mock pretends to provide audio data rendered and
+ // sent from the render process.
+ EXPECT_CALL(mock_sync_reader_, UpdatePendingBytes(_))
+ .Times(AtLeast(1));
+ EXPECT_CALL(mock_sync_reader_, Read(_, _, _))
+ .WillRepeatedly(DoAll(PopulateBuffer(),
+ SignalEvent(&read_event_),
+ Return(params_.frames_per_buffer())));
+ controller_->Play();
+ }
+
+ void Pause() {
+ // Expect the event handler to receive one OnPaused() call.
+ EXPECT_CALL(mock_event_handler_, OnPaused())
+ .WillOnce(SignalEvent(&pause_event_));
+
+ controller_->Pause();
+ }
+
+ void ChangeDevice() {
+ // Expect the event handler to receive one OnPaying() call and no OnPaused()
+ // call.
+ EXPECT_CALL(mock_event_handler_, OnPlaying())
+ .WillOnce(SignalEvent(&play_event_));
+ EXPECT_CALL(mock_event_handler_, OnPaused())
+ .Times(0);
+
+ // Simulate a device change event to AudioOutputController from the
+ // AudioManager.
+ audio_manager_->GetMessageLoop()->PostTask(
+ FROM_HERE,
+ base::Bind(&AudioOutputController::OnDeviceChange, controller_));
+ }
+
+ void Divert(bool was_playing, int num_times_to_be_started) {
+ if (was_playing) {
+ // Expect the handler to receive one OnPlaying() call as a result of the
+ // stream switching.
+ EXPECT_CALL(mock_event_handler_, OnPlaying())
+ .WillOnce(SignalEvent(&play_event_));
+ }
+
+ EXPECT_CALL(mock_stream_, Open())
+ .WillOnce(Return(true));
+ EXPECT_CALL(mock_stream_, SetVolume(kTestVolume));
+ if (num_times_to_be_started > 0) {
+ EXPECT_CALL(mock_stream_, Start(NotNull()))
+ .Times(num_times_to_be_started)
+ .WillRepeatedly(
+ Invoke(&mock_stream_, &MockAudioOutputStream::SetCallback));
+ EXPECT_CALL(mock_stream_, Stop())
+ .Times(num_times_to_be_started);
+ }
+
+ controller_->StartDiverting(&mock_stream_);
+ }
+
+ void ReadDivertedAudioData() {
+ scoped_ptr<AudioBus> dest = AudioBus::Create(params_);
+ ASSERT_TRUE(!!mock_stream_.callback());
+ const int frames_read =
+ mock_stream_.callback()->OnMoreData(dest.get(), AudioBuffersState());
+ EXPECT_LT(0, frames_read);
+ EXPECT_EQ(kBufferNonZeroData, dest->channel(0)[0]);
+ }
+
+ void Revert(bool was_playing) {
+ if (was_playing) {
+ // Expect the handler to receive one OnPlaying() call as a result of the
+ // stream switching back.
+ EXPECT_CALL(mock_event_handler_, OnPlaying())
+ .WillOnce(SignalEvent(&play_event_));
+ }
+
+ EXPECT_CALL(mock_stream_, Close());
+
+ controller_->StopDiverting();
+ }
+
+ void Close() {
+ EXPECT_CALL(mock_sync_reader_, Close());
+
+ controller_->Close(base::MessageLoop::QuitClosure());
+ base::MessageLoop::current()->Run();
+ }
+
+ // These help make test sequences more readable.
+ void DivertNeverPlaying() { Divert(false, 0); }
+ void DivertWillEventuallyBeTwicePlayed() { Divert(false, 2); }
+ void DivertWhilePlaying() { Divert(true, 1); }
+ void RevertWasNotPlaying() { Revert(false); }
+ void RevertWhilePlaying() { Revert(true); }
+
+ // These synchronize the main thread with key events taking place on other
+ // threads.
+ void WaitForCreate() { create_event_.Wait(); }
+ void WaitForPlay() { play_event_.Wait(); }
+ void WaitForReads() {
+ // Note: Arbitrarily chosen, but more iterations causes tests to take
+ // significantly more time.
+ static const int kNumIterations = 3;
+ for (int i = 0; i < kNumIterations; ++i) {
+ read_event_.Wait();
+ }
+ }
+ void WaitForPause() { pause_event_.Wait(); }
+
+ private:
+ base::MessageLoopForIO message_loop_;
+ scoped_ptr<AudioManager> audio_manager_;
+ MockAudioOutputControllerEventHandler mock_event_handler_;
+ MockAudioOutputControllerSyncReader mock_sync_reader_;
+ MockAudioOutputStream mock_stream_;
+ base::WaitableEvent create_event_;
+ base::WaitableEvent play_event_;
+ base::WaitableEvent read_event_;
+ base::WaitableEvent pause_event_;
+ AudioParameters params_;
+ scoped_refptr<AudioOutputController> controller_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioOutputControllerTest);
+};
+
+TEST_F(AudioOutputControllerTest, CreateAndClose) {
+ Create(kSamplesPerPacket);
+ Close();
+}
+
+TEST_F(AudioOutputControllerTest, HardwareBufferTooLarge) {
+ Create(kSamplesPerPacket * 1000);
+}
+
+TEST_F(AudioOutputControllerTest, PlayAndClose) {
+ Create(kSamplesPerPacket);
+ WaitForCreate();
+ Play();
+ WaitForPlay();
+ WaitForReads();
+ Close();
+}
+
+TEST_F(AudioOutputControllerTest, PlayPauseClose) {
+ Create(kSamplesPerPacket);
+ WaitForCreate();
+ Play();
+ WaitForPlay();
+ WaitForReads();
+ Pause();
+ WaitForPause();
+ Close();
+}
+
+TEST_F(AudioOutputControllerTest, PlayPausePlayClose) {
+ Create(kSamplesPerPacket);
+ WaitForCreate();
+ Play();
+ WaitForPlay();
+ WaitForReads();
+ Pause();
+ WaitForPause();
+ Play();
+ WaitForPlay();
+ Close();
+}
+
+TEST_F(AudioOutputControllerTest, PlayDeviceChangeClose) {
+ Create(kSamplesPerPacket);
+ WaitForCreate();
+ Play();
+ WaitForPlay();
+ WaitForReads();
+ ChangeDevice();
+ WaitForPlay();
+ WaitForReads();
+ Close();
+}
+
+TEST_F(AudioOutputControllerTest, PlayDivertRevertClose) {
+ Create(kSamplesPerPacket);
+ WaitForCreate();
+ Play();
+ WaitForPlay();
+ WaitForReads();
+ DivertWhilePlaying();
+ WaitForPlay();
+ ReadDivertedAudioData();
+ RevertWhilePlaying();
+ WaitForPlay();
+ WaitForReads();
+ Close();
+}
+
+TEST_F(AudioOutputControllerTest, PlayDivertRevertDivertRevertClose) {
+ Create(kSamplesPerPacket);
+ WaitForCreate();
+ Play();
+ WaitForPlay();
+ WaitForReads();
+ DivertWhilePlaying();
+ WaitForPlay();
+ ReadDivertedAudioData();
+ RevertWhilePlaying();
+ WaitForPlay();
+ WaitForReads();
+ DivertWhilePlaying();
+ WaitForPlay();
+ ReadDivertedAudioData();
+ RevertWhilePlaying();
+ WaitForPlay();
+ WaitForReads();
+ Close();
+}
+
+TEST_F(AudioOutputControllerTest, DivertPlayPausePlayRevertClose) {
+ Create(kSamplesPerPacket);
+ WaitForCreate();
+ DivertWillEventuallyBeTwicePlayed();
+ Play();
+ WaitForPlay();
+ ReadDivertedAudioData();
+ Pause();
+ WaitForPause();
+ Play();
+ WaitForPlay();
+ ReadDivertedAudioData();
+ RevertWhilePlaying();
+ WaitForPlay();
+ WaitForReads();
+ Close();
+}
+
+TEST_F(AudioOutputControllerTest, DivertRevertClose) {
+ Create(kSamplesPerPacket);
+ WaitForCreate();
+ DivertNeverPlaying();
+ RevertWasNotPlaying();
+ Close();
+}
+
+} // namespace media
diff --git a/chromium/media/audio/audio_output_device.cc b/chromium/media/audio/audio_output_device.cc
new file mode 100644
index 00000000000..0c406cab0d6
--- /dev/null
+++ b/chromium/media/audio/audio_output_device.cc
@@ -0,0 +1,352 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/audio_output_device.h"
+
+#include "base/basictypes.h"
+#include "base/debug/trace_event.h"
+#include "base/message_loop/message_loop.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/time/time.h"
+#include "media/audio/audio_output_controller.h"
+#include "media/audio/audio_util.h"
+#include "media/audio/shared_memory_util.h"
+#include "media/base/limits.h"
+
+namespace media {
+
+// Takes care of invoking the render callback on the audio thread.
+// An instance of this class is created for each capture stream in
+// OnStreamCreated().
+class AudioOutputDevice::AudioThreadCallback
+ : public AudioDeviceThread::Callback {
+ public:
+ AudioThreadCallback(const AudioParameters& audio_parameters,
+ base::SharedMemoryHandle memory,
+ int memory_length,
+ AudioRendererSink::RenderCallback* render_callback);
+ virtual ~AudioThreadCallback();
+
+ virtual void MapSharedMemory() OVERRIDE;
+
+ // Called whenever we receive notifications about pending data.
+ virtual void Process(int pending_data) OVERRIDE;
+
+ private:
+ AudioRendererSink::RenderCallback* render_callback_;
+ scoped_ptr<AudioBus> input_bus_;
+ scoped_ptr<AudioBus> output_bus_;
+ DISALLOW_COPY_AND_ASSIGN(AudioThreadCallback);
+};
+
+AudioOutputDevice::AudioOutputDevice(
+ scoped_ptr<AudioOutputIPC> ipc,
+ const scoped_refptr<base::MessageLoopProxy>& io_loop)
+ : ScopedLoopObserver(io_loop),
+ callback_(NULL),
+ ipc_(ipc.Pass()),
+ state_(IDLE),
+ play_on_start_(true),
+ session_id_(-1),
+ stopping_hack_(false) {
+ CHECK(ipc_);
+
+ // The correctness of the code depends on the relative values assigned in the
+ // State enum.
+ COMPILE_ASSERT(IPC_CLOSED < IDLE, invalid_enum_value_assignment_0);
+ COMPILE_ASSERT(IDLE < CREATING_STREAM, invalid_enum_value_assignment_1);
+ COMPILE_ASSERT(CREATING_STREAM < PAUSED, invalid_enum_value_assignment_2);
+ COMPILE_ASSERT(PAUSED < PLAYING, invalid_enum_value_assignment_3);
+}
+
+void AudioOutputDevice::InitializeUnifiedStream(const AudioParameters& params,
+ RenderCallback* callback,
+ int session_id) {
+ DCHECK(!callback_) << "Calling InitializeUnifiedStream() twice?";
+ DCHECK(params.IsValid());
+ audio_parameters_ = params;
+ callback_ = callback;
+ session_id_ = session_id;
+}
+
+void AudioOutputDevice::Initialize(const AudioParameters& params,
+ RenderCallback* callback) {
+ InitializeUnifiedStream(params, callback, 0);
+}
+
+AudioOutputDevice::~AudioOutputDevice() {
+ // The current design requires that the user calls Stop() before deleting
+ // this class.
+ DCHECK(audio_thread_.IsStopped());
+}
+
+void AudioOutputDevice::Start() {
+ DCHECK(callback_) << "Initialize hasn't been called";
+ message_loop()->PostTask(FROM_HERE,
+ base::Bind(&AudioOutputDevice::CreateStreamOnIOThread, this,
+ audio_parameters_));
+}
+
+void AudioOutputDevice::Stop() {
+ {
+ base::AutoLock auto_lock(audio_thread_lock_);
+ audio_thread_.Stop(base::MessageLoop::current());
+ stopping_hack_ = true;
+ }
+
+ message_loop()->PostTask(FROM_HERE,
+ base::Bind(&AudioOutputDevice::ShutDownOnIOThread, this));
+}
+
+void AudioOutputDevice::Play() {
+ message_loop()->PostTask(FROM_HERE,
+ base::Bind(&AudioOutputDevice::PlayOnIOThread, this));
+}
+
+void AudioOutputDevice::Pause() {
+ message_loop()->PostTask(FROM_HERE,
+ base::Bind(&AudioOutputDevice::PauseOnIOThread, this));
+}
+
+bool AudioOutputDevice::SetVolume(double volume) {
+ if (volume < 0 || volume > 1.0)
+ return false;
+
+ if (!message_loop()->PostTask(FROM_HERE,
+ base::Bind(&AudioOutputDevice::SetVolumeOnIOThread, this, volume))) {
+ return false;
+ }
+
+ return true;
+}
+
+void AudioOutputDevice::CreateStreamOnIOThread(const AudioParameters& params) {
+ DCHECK(message_loop()->BelongsToCurrentThread());
+ if (state_ == IDLE) {
+ state_ = CREATING_STREAM;
+ ipc_->CreateStream(this, params, session_id_);
+ }
+}
+
+void AudioOutputDevice::PlayOnIOThread() {
+ DCHECK(message_loop()->BelongsToCurrentThread());
+ if (state_ == PAUSED) {
+ ipc_->PlayStream();
+ state_ = PLAYING;
+ play_on_start_ = false;
+ } else {
+ play_on_start_ = true;
+ }
+}
+
+void AudioOutputDevice::PauseOnIOThread() {
+ DCHECK(message_loop()->BelongsToCurrentThread());
+ if (state_ == PLAYING) {
+ ipc_->PauseStream();
+ state_ = PAUSED;
+ }
+ play_on_start_ = false;
+}
+
+void AudioOutputDevice::ShutDownOnIOThread() {
+ DCHECK(message_loop()->BelongsToCurrentThread());
+
+ // Close the stream, if we haven't already.
+ if (state_ >= CREATING_STREAM) {
+ ipc_->CloseStream();
+ state_ = IDLE;
+ }
+
+ // We can run into an issue where ShutDownOnIOThread is called right after
+ // OnStreamCreated is called in cases where Start/Stop are called before we
+ // get the OnStreamCreated callback. To handle that corner case, we call
+ // Stop(). In most cases, the thread will already be stopped.
+ //
+ // Another situation is when the IO thread goes away before Stop() is called
+ // in which case, we cannot use the message loop to close the thread handle
+ // and can't rely on the main thread existing either.
+ base::AutoLock auto_lock_(audio_thread_lock_);
+ base::ThreadRestrictions::ScopedAllowIO allow_io;
+ audio_thread_.Stop(NULL);
+ audio_callback_.reset();
+ stopping_hack_ = false;
+}
+
+void AudioOutputDevice::SetVolumeOnIOThread(double volume) {
+ DCHECK(message_loop()->BelongsToCurrentThread());
+ if (state_ >= CREATING_STREAM)
+ ipc_->SetVolume(volume);
+}
+
+void AudioOutputDevice::OnStateChanged(AudioOutputIPCDelegate::State state) {
+ DCHECK(message_loop()->BelongsToCurrentThread());
+
+ // Do nothing if the stream has been closed.
+ if (state_ < CREATING_STREAM)
+ return;
+
+ // TODO(miu): Clean-up inconsistent and incomplete handling here.
+ // http://crbug.com/180640
+ switch (state) {
+ case AudioOutputIPCDelegate::kPlaying:
+ case AudioOutputIPCDelegate::kPaused:
+ break;
+ case AudioOutputIPCDelegate::kError:
+ DLOG(WARNING) << "AudioOutputDevice::OnStateChanged(kError)";
+ // Don't dereference the callback object if the audio thread
+ // is stopped or stopping. That could mean that the callback
+ // object has been deleted.
+ // TODO(tommi): Add an explicit contract for clearing the callback
+ // object. Possibly require calling Initialize again or provide
+ // a callback object via Start() and clear it in Stop().
+ if (!audio_thread_.IsStopped())
+ callback_->OnRenderError();
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+}
+
+void AudioOutputDevice::OnStreamCreated(
+ base::SharedMemoryHandle handle,
+ base::SyncSocket::Handle socket_handle,
+ int length) {
+ DCHECK(message_loop()->BelongsToCurrentThread());
+#if defined(OS_WIN)
+ DCHECK(handle);
+ DCHECK(socket_handle);
+#else
+ DCHECK_GE(handle.fd, 0);
+ DCHECK_GE(socket_handle, 0);
+#endif
+ DCHECK_GT(length, 0);
+
+ if (state_ != CREATING_STREAM)
+ return;
+
+ // We can receive OnStreamCreated() on the IO thread after the client has
+ // called Stop() but before ShutDownOnIOThread() is processed. In such a
+ // situation |callback_| might point to freed memory. Instead of starting
+ // |audio_thread_| do nothing and wait for ShutDownOnIOThread() to get called.
+ //
+ // TODO(scherkus): The real fix is to have sane ownership semantics. The fact
+ // that |callback_| (which should own and outlive this object!) can point to
+ // freed memory is a mess. AudioRendererSink should be non-refcounted so that
+ // owners (WebRtcAudioDeviceImpl, AudioRendererImpl, etc...) can Stop() and
+ // delete as they see fit. AudioOutputDevice should internally use WeakPtr
+ // to handle teardown and thread hopping. See http://crbug.com/151051 for
+ // details.
+ base::AutoLock auto_lock(audio_thread_lock_);
+ if (stopping_hack_)
+ return;
+
+ DCHECK(audio_thread_.IsStopped());
+ audio_callback_.reset(new AudioOutputDevice::AudioThreadCallback(
+ audio_parameters_, handle, length, callback_));
+ audio_thread_.Start(audio_callback_.get(), socket_handle,
+ "AudioOutputDevice");
+ state_ = PAUSED;
+
+ // We handle the case where Play() and/or Pause() may have been called
+ // multiple times before OnStreamCreated() gets called.
+ if (play_on_start_)
+ PlayOnIOThread();
+}
+
+void AudioOutputDevice::OnIPCClosed() {
+ DCHECK(message_loop()->BelongsToCurrentThread());
+ state_ = IPC_CLOSED;
+ ipc_.reset();
+}
+
+void AudioOutputDevice::WillDestroyCurrentMessageLoop() {
+ LOG(ERROR) << "IO loop going away before the audio device has been stopped";
+ ShutDownOnIOThread();
+}
+
+// AudioOutputDevice::AudioThreadCallback
+
+AudioOutputDevice::AudioThreadCallback::AudioThreadCallback(
+ const AudioParameters& audio_parameters,
+ base::SharedMemoryHandle memory,
+ int memory_length,
+ AudioRendererSink::RenderCallback* render_callback)
+ : AudioDeviceThread::Callback(audio_parameters,
+ memory,
+ memory_length,
+ 1),
+ render_callback_(render_callback) {
+}
+
+AudioOutputDevice::AudioThreadCallback::~AudioThreadCallback() {
+}
+
+void AudioOutputDevice::AudioThreadCallback::MapSharedMemory() {
+ CHECK_EQ(total_segments_, 1);
+ CHECK(shared_memory_.Map(TotalSharedMemorySizeInBytes(memory_length_)));
+
+ // Calculate output and input memory size.
+ int output_memory_size = AudioBus::CalculateMemorySize(audio_parameters_);
+ int input_channels = audio_parameters_.input_channels();
+ int frames = audio_parameters_.frames_per_buffer();
+ int input_memory_size =
+ AudioBus::CalculateMemorySize(input_channels, frames);
+
+ int io_size = output_memory_size + input_memory_size;
+
+ DCHECK_EQ(memory_length_, io_size);
+
+ output_bus_ =
+ AudioBus::WrapMemory(audio_parameters_, shared_memory_.memory());
+
+ if (input_channels > 0) {
+ // The input data is after the output data.
+ char* input_data =
+ static_cast<char*>(shared_memory_.memory()) + output_memory_size;
+ input_bus_ =
+ AudioBus::WrapMemory(input_channels, frames, input_data);
+ }
+}
+
+// Called whenever we receive notifications about pending data.
+void AudioOutputDevice::AudioThreadCallback::Process(int pending_data) {
+ if (pending_data == kPauseMark) {
+ memset(shared_memory_.memory(), 0, memory_length_);
+ SetActualDataSizeInBytes(&shared_memory_, memory_length_, 0);
+ return;
+ }
+
+ // Convert the number of pending bytes in the render buffer
+ // into milliseconds.
+ int audio_delay_milliseconds = pending_data / bytes_per_ms_;
+
+ TRACE_EVENT0("audio", "AudioOutputDevice::FireRenderCallback");
+
+ // Update the audio-delay measurement then ask client to render audio. Since
+ // |output_bus_| is wrapping the shared memory the Render() call is writing
+ // directly into the shared memory.
+ int input_channels = audio_parameters_.input_channels();
+ size_t num_frames = audio_parameters_.frames_per_buffer();
+
+ if (input_bus_.get() && input_channels > 0) {
+ render_callback_->RenderIO(input_bus_.get(),
+ output_bus_.get(),
+ audio_delay_milliseconds);
+ } else {
+ num_frames = render_callback_->Render(output_bus_.get(),
+ audio_delay_milliseconds);
+ }
+
+ // Let the host know we are done.
+ // TODO(dalecurtis): Technically this is not always correct. Due to channel
+ // padding for alignment, there may be more data available than this. We're
+ // relying on AudioSyncReader::Read() to parse this with that in mind. Rename
+ // these methods to Set/GetActualFrameCount().
+ SetActualDataSizeInBytes(
+ &shared_memory_, memory_length_,
+ num_frames * sizeof(*output_bus_->channel(0)) * output_bus_->channels());
+}
+
+} // namespace media.
diff --git a/chromium/media/audio/audio_output_device.h b/chromium/media/audio/audio_output_device.h
new file mode 100644
index 00000000000..66f78972f46
--- /dev/null
+++ b/chromium/media/audio/audio_output_device.h
@@ -0,0 +1,185 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Audio rendering unit utilizing audio output stream provided by browser
+// process through IPC.
+//
+// Relationship of classes.
+//
+// AudioOutputController AudioOutputDevice
+// ^ ^
+// | |
+// v IPC v
+// AudioRendererHost <---------> AudioOutputIPC (AudioMessageFilter)
+//
+// Transportation of audio samples from the render to the browser process
+// is done by using shared memory in combination with a sync socket pair
+// to generate a low latency transport. The AudioOutputDevice user registers an
+// AudioOutputDevice::RenderCallback at construction and will be polled by the
+// AudioOutputDevice for audio to be played out by the underlying audio layers.
+//
+// State sequences.
+//
+// Task [IO thread] IPC [IO thread]
+//
+// Start -> CreateStreamOnIOThread -----> CreateStream ------>
+// <- OnStreamCreated <- AudioMsg_NotifyStreamCreated <-
+// ---> PlayOnIOThread -----------> PlayStream -------->
+//
+// Optionally Play() / Pause() sequences may occur:
+// Play -> PlayOnIOThread --------------> PlayStream --------->
+// Pause -> PauseOnIOThread ------------> PauseStream -------->
+// (note that Play() / Pause() sequences before OnStreamCreated are
+// deferred until OnStreamCreated, with the last valid state being used)
+//
+// AudioOutputDevice::Render => audio transport on audio thread =>
+// |
+// Stop --> ShutDownOnIOThread --------> CloseStream -> Close
+//
+// This class utilizes several threads during its lifetime, namely:
+// 1. Creating thread.
+// Must be the main render thread.
+// 2. Control thread (may be the main render thread or another thread).
+// The methods: Start(), Stop(), Play(), Pause(), SetVolume()
+// must be called on the same thread.
+// 3. IO thread (internal implementation detail - not exposed to public API)
+// The thread within which this class receives all the IPC messages and
+// IPC communications can only happen in this thread.
+// 4. Audio transport thread (See AudioDeviceThread).
+// Responsible for calling the AudioThreadCallback implementation that in
+// turn calls AudioRendererSink::RenderCallback which feeds audio samples to
+// the audio layer in the browser process using sync sockets and shared
+// memory.
+//
+// Implementation notes:
+// - The user must call Stop() before deleting the class instance.
+
+#ifndef MEDIA_AUDIO_AUDIO_OUTPUT_DEVICE_H_
+#define MEDIA_AUDIO_AUDIO_OUTPUT_DEVICE_H_
+
+#include "base/basictypes.h"
+#include "base/bind.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/shared_memory.h"
+#include "base/message_loop/message_loop.h"
+#include "media/audio/audio_device_thread.h"
+#include "media/audio/audio_output_ipc.h"
+#include "media/audio/audio_parameters.h"
+#include "media/audio/scoped_loop_observer.h"
+#include "media/base/audio_renderer_sink.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class MEDIA_EXPORT AudioOutputDevice
+ : NON_EXPORTED_BASE(public AudioRendererSink),
+ NON_EXPORTED_BASE(public AudioOutputIPCDelegate),
+ NON_EXPORTED_BASE(public ScopedLoopObserver) {
+ public:
+ // NOTE: Clients must call Initialize() before using.
+ AudioOutputDevice(scoped_ptr<AudioOutputIPC> ipc,
+ const scoped_refptr<base::MessageLoopProxy>& io_loop);
+
+ // Initialize function for clients wishing to have unified input and
+ // output, |params| may specify |input_channels| > 0, representing a
+ // number of input channels which will be at the same sample-rate
+ // and buffer-size as the output as specified in |params|. |session_id| is
+ // used for the browser to select the correct input device.
+ // In this case, the callback's RenderIO() method will be called instead
+ // of Render(), providing the synchronized input data at the same time as
+ // when new output data is to be rendered.
+ void InitializeUnifiedStream(const AudioParameters& params,
+ RenderCallback* callback,
+ int session_id);
+
+ // AudioRendererSink implementation.
+ virtual void Initialize(const AudioParameters& params,
+ RenderCallback* callback) OVERRIDE;
+ virtual void Start() OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void Play() OVERRIDE;
+ virtual void Pause() OVERRIDE;
+ virtual bool SetVolume(double volume) OVERRIDE;
+
+ // Methods called on IO thread ----------------------------------------------
+ // AudioOutputIPCDelegate methods.
+ virtual void OnStateChanged(AudioOutputIPCDelegate::State state) OVERRIDE;
+ virtual void OnStreamCreated(base::SharedMemoryHandle handle,
+ base::SyncSocket::Handle socket_handle,
+ int length) OVERRIDE;
+ virtual void OnIPCClosed() OVERRIDE;
+
+ protected:
+ // Magic required by ref_counted.h to avoid any code deleting the object
+ // accidentally while there are references to it.
+ friend class base::RefCountedThreadSafe<AudioOutputDevice>;
+ virtual ~AudioOutputDevice();
+
+ private:
+ // Note: The ordering of members in this enum is critical to correct behavior!
+ enum State {
+ IPC_CLOSED, // No more IPCs can take place.
+ IDLE, // Not started.
+ CREATING_STREAM, // Waiting for OnStreamCreated() to be called back.
+ PAUSED, // Paused. OnStreamCreated() has been called. Can Play()/Stop().
+ PLAYING, // Playing back. Can Pause()/Stop().
+ };
+
+ // Methods called on IO thread ----------------------------------------------
+ // The following methods are tasks posted on the IO thread that need to
+ // be executed on that thread. They use AudioOutputIPC to send IPC messages
+ // upon state changes.
+ void CreateStreamOnIOThread(const AudioParameters& params);
+ void PlayOnIOThread();
+ void PauseOnIOThread();
+ void ShutDownOnIOThread();
+ void SetVolumeOnIOThread(double volume);
+
+ // base::MessageLoop::DestructionObserver implementation for the IO loop.
+ // If the IO loop dies before we do, we shut down the audio thread from here.
+ virtual void WillDestroyCurrentMessageLoop() OVERRIDE;
+
+ AudioParameters audio_parameters_;
+
+ RenderCallback* callback_;
+
+ // A pointer to the IPC layer that takes care of sending requests over to
+ // the AudioRendererHost. Only valid when state_ != IPC_CLOSED and must only
+ // be accessed on the IO thread.
+ scoped_ptr<AudioOutputIPC> ipc_;
+
+ // Current state (must only be accessed from the IO thread). See comments for
+ // State enum above.
+ State state_;
+
+ // State of Play() / Pause() calls before OnStreamCreated() is called.
+ bool play_on_start_;
+
+ // The media session ID used to identify which input device to be started.
+ // Only used by Unified IO.
+ int session_id_;
+
+ // Our audio thread callback class. See source file for details.
+ class AudioThreadCallback;
+
+ // In order to avoid a race between OnStreamCreated and Stop(), we use this
+ // guard to control stopping and starting the audio thread.
+ base::Lock audio_thread_lock_;
+ AudioDeviceThread audio_thread_;
+ scoped_ptr<AudioOutputDevice::AudioThreadCallback> audio_callback_;
+
+ // Temporary hack to ignore OnStreamCreated() due to the user calling Stop()
+ // so we don't start the audio thread pointing to a potentially freed
+ // |callback_|.
+ //
+ // TODO(scherkus): Replace this by changing AudioRendererSink to either accept
+ // the callback via Start(). See http://crbug.com/151051 for details.
+ bool stopping_hack_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioOutputDevice);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_AUDIO_OUTPUT_DEVICE_H_
diff --git a/chromium/media/audio/audio_output_device_unittest.cc b/chromium/media/audio/audio_output_device_unittest.cc
new file mode 100644
index 00000000000..96da77d7404
--- /dev/null
+++ b/chromium/media/audio/audio_output_device_unittest.cc
@@ -0,0 +1,294 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#include "base/at_exit.h"
+#include "base/memory/shared_memory.h"
+#include "base/message_loop/message_loop.h"
+#include "base/process/process_handle.h"
+#include "base/sync_socket.h"
+#include "base/test/test_timeouts.h"
+#include "media/audio/audio_output_device.h"
+#include "media/audio/sample_rates.h"
+#include "media/audio/shared_memory_util.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gmock_mutant.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using base::CancelableSyncSocket;
+using base::SharedMemory;
+using base::SyncSocket;
+using testing::_;
+using testing::DoAll;
+using testing::Invoke;
+using testing::Return;
+using testing::WithArgs;
+using testing::StrictMock;
+using testing::Values;
+
+namespace media {
+
+namespace {
+
+class MockRenderCallback : public AudioRendererSink::RenderCallback {
+ public:
+ MockRenderCallback() {}
+ virtual ~MockRenderCallback() {}
+
+ MOCK_METHOD2(Render, int(AudioBus* dest, int audio_delay_milliseconds));
+ MOCK_METHOD3(RenderIO, void(AudioBus* source,
+ AudioBus* dest,
+ int audio_delay_milliseconds));
+ MOCK_METHOD0(OnRenderError, void());
+};
+
+class MockAudioOutputIPC : public AudioOutputIPC {
+ public:
+ MockAudioOutputIPC() {}
+ virtual ~MockAudioOutputIPC() {}
+
+ MOCK_METHOD3(CreateStream, void(AudioOutputIPCDelegate* delegate,
+ const AudioParameters& params,
+ int session_id));
+ MOCK_METHOD0(PlayStream, void());
+ MOCK_METHOD0(PauseStream, void());
+ MOCK_METHOD0(CloseStream, void());
+ MOCK_METHOD1(SetVolume, void(double volume));
+};
+
+// Creates a copy of a SyncSocket handle that we can give to AudioOutputDevice.
+// On Windows this means duplicating the pipe handle so that AudioOutputDevice
+// can call CloseHandle() (since ownership has been transferred), but on other
+// platforms, we just copy the same socket handle since AudioOutputDevice on
+// those platforms won't actually own the socket (FileDescriptor.auto_close is
+// false).
+bool DuplicateSocketHandle(SyncSocket::Handle socket_handle,
+ SyncSocket::Handle* copy) {
+#if defined(OS_WIN)
+ HANDLE process = GetCurrentProcess();
+ ::DuplicateHandle(process, socket_handle, process, copy,
+ 0, FALSE, DUPLICATE_SAME_ACCESS);
+ return *copy != NULL;
+#else
+ *copy = socket_handle;
+ return *copy != -1;
+#endif
+}
+
+ACTION_P2(SendPendingBytes, socket, pending_bytes) {
+ socket->Send(&pending_bytes, sizeof(pending_bytes));
+}
+
+// Used to terminate a loop from a different thread than the loop belongs to.
+// |loop| should be a MessageLoopProxy.
+ACTION_P(QuitLoop, loop) {
+ loop->PostTask(FROM_HERE, base::MessageLoop::QuitClosure());
+}
+
+} // namespace.
+
+class AudioOutputDeviceTest
+ : public testing::Test,
+ public testing::WithParamInterface<bool> {
+ public:
+ AudioOutputDeviceTest();
+ ~AudioOutputDeviceTest();
+
+ void StartAudioDevice();
+ void CreateStream();
+ void ExpectRenderCallback();
+ void WaitUntilRenderCallback();
+ void StopAudioDevice();
+
+ protected:
+ // Used to clean up TLS pointers that the test(s) will initialize.
+ // Must remain the first member of this class.
+ base::ShadowingAtExitManager at_exit_manager_;
+ base::MessageLoopForIO io_loop_;
+ AudioParameters default_audio_parameters_;
+ StrictMock<MockRenderCallback> callback_;
+ MockAudioOutputIPC* audio_output_ipc_; // owned by audio_device_
+ scoped_refptr<AudioOutputDevice> audio_device_;
+
+ private:
+ int CalculateMemorySize();
+
+ const bool synchronized_io_;
+ const int input_channels_;
+ SharedMemory shared_memory_;
+ CancelableSyncSocket browser_socket_;
+ CancelableSyncSocket renderer_socket_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioOutputDeviceTest);
+};
+
+static const int kStreamId = 123;
+
+int AudioOutputDeviceTest::CalculateMemorySize() {
+ // Calculate output and input memory size.
+ int output_memory_size =
+ AudioBus::CalculateMemorySize(default_audio_parameters_);
+
+ int frames = default_audio_parameters_.frames_per_buffer();
+ int input_memory_size =
+ AudioBus::CalculateMemorySize(input_channels_, frames);
+
+ int io_buffer_size = output_memory_size + input_memory_size;
+
+ // This is where it gets a bit hacky. The shared memory contract between
+ // AudioOutputDevice and its browser side counter part includes a bit more
+ // than just the audio data, so we must call TotalSharedMemorySizeInBytes()
+ // to get the actual size needed to fit the audio data plus the extra data.
+ return TotalSharedMemorySizeInBytes(io_buffer_size);
+}
+
+AudioOutputDeviceTest::AudioOutputDeviceTest()
+ : synchronized_io_(GetParam()),
+ input_channels_(synchronized_io_ ? 2 : 0) {
+ default_audio_parameters_.Reset(
+ AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_STEREO, 2, input_channels_,
+ 48000, 16, 1024);
+
+ audio_output_ipc_ = new MockAudioOutputIPC();
+ audio_device_ = new AudioOutputDevice(
+ scoped_ptr<AudioOutputIPC>(audio_output_ipc_),
+ io_loop_.message_loop_proxy());
+
+ audio_device_->Initialize(default_audio_parameters_,
+ &callback_);
+
+ io_loop_.RunUntilIdle();
+}
+
+AudioOutputDeviceTest::~AudioOutputDeviceTest() {
+ audio_device_ = NULL;
+}
+
+void AudioOutputDeviceTest::StartAudioDevice() {
+ audio_device_->Start();
+
+ EXPECT_CALL(*audio_output_ipc_, CreateStream(audio_device_.get(), _, 0));
+
+ io_loop_.RunUntilIdle();
+}
+
+void AudioOutputDeviceTest::CreateStream() {
+ const int kMemorySize = CalculateMemorySize();
+
+ ASSERT_TRUE(shared_memory_.CreateAndMapAnonymous(kMemorySize));
+ memset(shared_memory_.memory(), 0xff, kMemorySize);
+
+ ASSERT_TRUE(CancelableSyncSocket::CreatePair(&browser_socket_,
+ &renderer_socket_));
+
+ // Create duplicates of the handles we pass to AudioOutputDevice since
+ // ownership will be transferred and AudioOutputDevice is responsible for
+ // freeing.
+ SyncSocket::Handle audio_device_socket = SyncSocket::kInvalidHandle;
+ ASSERT_TRUE(DuplicateSocketHandle(renderer_socket_.handle(),
+ &audio_device_socket));
+ base::SharedMemoryHandle duplicated_memory_handle;
+ ASSERT_TRUE(shared_memory_.ShareToProcess(base::GetCurrentProcessHandle(),
+ &duplicated_memory_handle));
+
+ audio_device_->OnStreamCreated(duplicated_memory_handle, audio_device_socket,
+ PacketSizeInBytes(kMemorySize));
+ io_loop_.RunUntilIdle();
+}
+
+void AudioOutputDeviceTest::ExpectRenderCallback() {
+ // We should get a 'play' notification when we call OnStreamCreated().
+ // Respond by asking for some audio data. This should ask our callback
+ // to provide some audio data that AudioOutputDevice then writes into the
+ // shared memory section.
+ const int kMemorySize = CalculateMemorySize();
+
+ EXPECT_CALL(*audio_output_ipc_, PlayStream())
+ .WillOnce(SendPendingBytes(&browser_socket_, kMemorySize));
+
+ // We expect calls to our audio renderer callback, which returns the number
+ // of frames written to the memory section.
+ // Here's the second place where it gets hacky: There's no way for us to
+ // know (without using a sleep loop!) when the AudioOutputDevice has finished
+ // writing the interleaved audio data into the shared memory section.
+ // So, for the sake of this test, we consider the call to Render a sign
+ // of success and quit the loop.
+ if (synchronized_io_) {
+ // For synchronized I/O, we expect RenderIO().
+ EXPECT_CALL(callback_, RenderIO(_, _, _))
+ .WillOnce(QuitLoop(io_loop_.message_loop_proxy()));
+ } else {
+ // For output only we expect Render().
+ const int kNumberOfFramesToProcess = 0;
+ EXPECT_CALL(callback_, Render(_, _))
+ .WillOnce(DoAll(
+ QuitLoop(io_loop_.message_loop_proxy()),
+ Return(kNumberOfFramesToProcess)));
+ }
+}
+
+void AudioOutputDeviceTest::WaitUntilRenderCallback() {
+ // Don't hang the test if we never get the Render() callback.
+ io_loop_.PostDelayedTask(FROM_HERE, base::MessageLoop::QuitClosure(),
+ TestTimeouts::action_timeout());
+ io_loop_.Run();
+}
+
+void AudioOutputDeviceTest::StopAudioDevice() {
+ audio_device_->Stop();
+
+ EXPECT_CALL(*audio_output_ipc_, CloseStream());
+
+ io_loop_.RunUntilIdle();
+}
+
+TEST_P(AudioOutputDeviceTest, Initialize) {
+ // Tests that the object can be constructed, initialized and destructed
+ // without having ever been started/stopped.
+}
+
+// Calls Start() followed by an immediate Stop() and check for the basic message
+// filter messages being sent in that case.
+TEST_P(AudioOutputDeviceTest, StartStop) {
+ StartAudioDevice();
+ StopAudioDevice();
+}
+
+// AudioOutputDevice supports multiple start/stop sequences.
+TEST_P(AudioOutputDeviceTest, StartStopStartStop) {
+ StartAudioDevice();
+ StopAudioDevice();
+ StartAudioDevice();
+ StopAudioDevice();
+}
+
+// Simulate receiving OnStreamCreated() prior to processing ShutDownOnIOThread()
+// on the IO loop.
+TEST_P(AudioOutputDeviceTest, StopBeforeRender) {
+ StartAudioDevice();
+
+ // Call Stop() but don't run the IO loop yet.
+ audio_device_->Stop();
+
+ // Expect us to shutdown IPC but not to render anything despite the stream
+ // getting created.
+ EXPECT_CALL(*audio_output_ipc_, CloseStream());
+ CreateStream();
+}
+
+// Full test with output only.
+TEST_P(AudioOutputDeviceTest, CreateStream) {
+ StartAudioDevice();
+ ExpectRenderCallback();
+ CreateStream();
+ WaitUntilRenderCallback();
+ StopAudioDevice();
+}
+
+INSTANTIATE_TEST_CASE_P(Render, AudioOutputDeviceTest, Values(false));
+INSTANTIATE_TEST_CASE_P(RenderIO, AudioOutputDeviceTest, Values(true));
+
+} // namespace media.
diff --git a/chromium/media/audio/audio_output_dispatcher.cc b/chromium/media/audio/audio_output_dispatcher.cc
new file mode 100644
index 00000000000..06206d7be7f
--- /dev/null
+++ b/chromium/media/audio/audio_output_dispatcher.cc
@@ -0,0 +1,28 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/audio_output_dispatcher.h"
+
+#include "base/message_loop/message_loop.h"
+
+namespace media {
+
+AudioOutputDispatcher::AudioOutputDispatcher(
+ AudioManager* audio_manager,
+ const AudioParameters& params,
+ const std::string& input_device_id)
+ : audio_manager_(audio_manager),
+ message_loop_(base::MessageLoop::current()),
+ params_(params),
+ input_device_id_(input_device_id) {
+ // We expect to be instantiated on the audio thread. Otherwise the
+ // message_loop_ member will point to the wrong message loop!
+ DCHECK(audio_manager->GetMessageLoop()->BelongsToCurrentThread());
+}
+
+AudioOutputDispatcher::~AudioOutputDispatcher() {
+ DCHECK_EQ(base::MessageLoop::current(), message_loop_);
+}
+
+} // namespace media
diff --git a/chromium/media/audio/audio_output_dispatcher.h b/chromium/media/audio/audio_output_dispatcher.h
new file mode 100644
index 00000000000..a79fd94477f
--- /dev/null
+++ b/chromium/media/audio/audio_output_dispatcher.h
@@ -0,0 +1,90 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// AudioOutputDispatcher is a single-threaded base class that dispatches
+// creation and deletion of audio output streams. AudioOutputProxy objects use
+// this class to allocate and recycle actual audio output streams. When playback
+// is started, the proxy calls StartStream() to get an output stream that it
+// uses to play audio. When playback is stopped, the proxy returns the stream
+// back to the dispatcher by calling StopStream().
+//
+// AudioManagerBase creates one specialization of AudioOutputDispatcher on the
+// audio thread for each possible set of audio parameters. I.e streams with
+// different parameters are managed independently. The AudioOutputDispatcher
+// instance is then deleted on the audio thread when the AudioManager shuts
+// down.
+
+#ifndef MEDIA_AUDIO_AUDIO_OUTPUT_DISPATCHER_H_
+#define MEDIA_AUDIO_AUDIO_OUTPUT_DISPATCHER_H_
+
+#include "base/basictypes.h"
+#include "base/memory/ref_counted.h"
+#include "base/timer/timer.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_manager.h"
+#include "media/audio/audio_parameters.h"
+
+namespace base {
+class MessageLoop;
+}
+
+namespace media {
+
+class AudioOutputProxy;
+
+class MEDIA_EXPORT AudioOutputDispatcher
+ : public base::RefCountedThreadSafe<AudioOutputDispatcher> {
+ public:
+ AudioOutputDispatcher(AudioManager* audio_manager,
+ const AudioParameters& params,
+ const std::string& input_device_id);
+
+ // Called by AudioOutputProxy to open the stream.
+ // Returns false, if it fails to open it.
+ virtual bool OpenStream() = 0;
+
+ // Called by AudioOutputProxy when the stream is started.
+ // Uses |callback| to get source data and report errors, if any.
+ // Does *not* take ownership of this callback.
+ // Returns true if started successfully, false otherwise.
+ virtual bool StartStream(AudioOutputStream::AudioSourceCallback* callback,
+ AudioOutputProxy* stream_proxy) = 0;
+
+ // Called by AudioOutputProxy when the stream is stopped.
+ // Ownership of the |stream_proxy| is passed to the dispatcher.
+ virtual void StopStream(AudioOutputProxy* stream_proxy) = 0;
+
+ // Called by AudioOutputProxy when the volume is set.
+ virtual void StreamVolumeSet(AudioOutputProxy* stream_proxy,
+ double volume) = 0;
+
+ // Called by AudioOutputProxy when the stream is closed.
+ virtual void CloseStream(AudioOutputProxy* stream_proxy) = 0;
+
+ // Called on the audio thread when the AudioManager is shutting down.
+ virtual void Shutdown() = 0;
+
+ // Accessor to the input device id used by unified IO.
+ const std::string& input_device_id() const { return input_device_id_; }
+
+ protected:
+ friend class base::RefCountedThreadSafe<AudioOutputDispatcher>;
+ friend class AudioOutputProxyTest;
+
+ virtual ~AudioOutputDispatcher();
+
+ // A no-reference-held pointer (we don't want circular references) back to the
+ // AudioManager that owns this object.
+ AudioManager* audio_manager_;
+ base::MessageLoop* message_loop_;
+ AudioParameters params_;
+ const std::string input_device_id_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AudioOutputDispatcher);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_AUDIO_OUTPUT_DISPATCHER_H_
diff --git a/chromium/media/audio/audio_output_dispatcher_impl.cc b/chromium/media/audio/audio_output_dispatcher_impl.cc
new file mode 100644
index 00000000000..1df8e7ddd5b
--- /dev/null
+++ b/chromium/media/audio/audio_output_dispatcher_impl.cc
@@ -0,0 +1,204 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/audio_output_dispatcher_impl.h"
+
+#include <algorithm>
+
+#include "base/bind.h"
+#include "base/compiler_specific.h"
+#include "base/message_loop/message_loop.h"
+#include "base/time/time.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_output_proxy.h"
+#include "media/audio/audio_util.h"
+
+namespace media {
+
+AudioOutputDispatcherImpl::AudioOutputDispatcherImpl(
+ AudioManager* audio_manager,
+ const AudioParameters& params,
+ const std::string& input_device_id,
+ const base::TimeDelta& close_delay)
+ : AudioOutputDispatcher(audio_manager, params, input_device_id),
+ pause_delay_(base::TimeDelta::FromMicroseconds(
+ 2 * params.frames_per_buffer() * base::Time::kMicrosecondsPerSecond /
+ static_cast<float>(params.sample_rate()))),
+ paused_proxies_(0),
+ weak_this_(this),
+ close_timer_(FROM_HERE,
+ close_delay,
+ this,
+ &AudioOutputDispatcherImpl::ClosePendingStreams) {
+}
+
+AudioOutputDispatcherImpl::~AudioOutputDispatcherImpl() {
+ DCHECK(proxy_to_physical_map_.empty());
+ DCHECK(idle_streams_.empty());
+ DCHECK(pausing_streams_.empty());
+}
+
+bool AudioOutputDispatcherImpl::OpenStream() {
+ DCHECK_EQ(base::MessageLoop::current(), message_loop_);
+
+ paused_proxies_++;
+
+ // Ensure that there is at least one open stream.
+ if (idle_streams_.empty() && !CreateAndOpenStream()) {
+ paused_proxies_--;
+ return false;
+ }
+
+ close_timer_.Reset();
+ return true;
+}
+
+bool AudioOutputDispatcherImpl::StartStream(
+ AudioOutputStream::AudioSourceCallback* callback,
+ AudioOutputProxy* stream_proxy) {
+ DCHECK_EQ(base::MessageLoop::current(), message_loop_);
+
+ if (idle_streams_.empty() && !CreateAndOpenStream())
+ return false;
+
+ AudioOutputStream* physical_stream = idle_streams_.back();
+ DCHECK(physical_stream);
+ idle_streams_.pop_back();
+
+ DCHECK_GT(paused_proxies_, 0u);
+ --paused_proxies_;
+
+ close_timer_.Reset();
+
+ // Schedule task to allocate streams for other proxies if we need to.
+ message_loop_->PostTask(FROM_HERE, base::Bind(
+ &AudioOutputDispatcherImpl::OpenTask, weak_this_.GetWeakPtr()));
+
+ double volume = 0;
+ stream_proxy->GetVolume(&volume);
+ physical_stream->SetVolume(volume);
+ physical_stream->Start(callback);
+ proxy_to_physical_map_[stream_proxy] = physical_stream;
+ return true;
+}
+
+void AudioOutputDispatcherImpl::StopStream(AudioOutputProxy* stream_proxy) {
+ DCHECK_EQ(base::MessageLoop::current(), message_loop_);
+
+ AudioStreamMap::iterator it = proxy_to_physical_map_.find(stream_proxy);
+ DCHECK(it != proxy_to_physical_map_.end());
+ AudioOutputStream* physical_stream = it->second;
+ proxy_to_physical_map_.erase(it);
+
+ physical_stream->Stop();
+
+ ++paused_proxies_;
+
+ pausing_streams_.push_front(physical_stream);
+
+ // Don't recycle stream until two buffers worth of time has elapsed.
+ message_loop_->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&AudioOutputDispatcherImpl::StopStreamTask,
+ weak_this_.GetWeakPtr()),
+ pause_delay_);
+}
+
+void AudioOutputDispatcherImpl::StreamVolumeSet(AudioOutputProxy* stream_proxy,
+ double volume) {
+ DCHECK_EQ(base::MessageLoop::current(), message_loop_);
+ AudioStreamMap::iterator it = proxy_to_physical_map_.find(stream_proxy);
+ if (it != proxy_to_physical_map_.end()) {
+ AudioOutputStream* physical_stream = it->second;
+ physical_stream->SetVolume(volume);
+ }
+}
+
+void AudioOutputDispatcherImpl::StopStreamTask() {
+ DCHECK_EQ(base::MessageLoop::current(), message_loop_);
+
+ if (pausing_streams_.empty())
+ return;
+
+ AudioOutputStream* stream = pausing_streams_.back();
+ pausing_streams_.pop_back();
+ idle_streams_.push_back(stream);
+ close_timer_.Reset();
+}
+
+void AudioOutputDispatcherImpl::CloseStream(AudioOutputProxy* stream_proxy) {
+ DCHECK_EQ(base::MessageLoop::current(), message_loop_);
+
+ while (!pausing_streams_.empty()) {
+ idle_streams_.push_back(pausing_streams_.back());
+ pausing_streams_.pop_back();
+ }
+
+ DCHECK_GT(paused_proxies_, 0u);
+ paused_proxies_--;
+
+ while (idle_streams_.size() > paused_proxies_) {
+ idle_streams_.back()->Close();
+ idle_streams_.pop_back();
+ }
+}
+
+void AudioOutputDispatcherImpl::Shutdown() {
+ DCHECK_EQ(base::MessageLoop::current(), message_loop_);
+
+ // Cancel any pending tasks to close paused streams or create new ones.
+ weak_this_.InvalidateWeakPtrs();
+
+ // No AudioOutputProxy objects should hold a reference to us when we get
+ // to this stage.
+ DCHECK(HasOneRef()) << "Only the AudioManager should hold a reference";
+
+ AudioOutputStreamList::iterator it = idle_streams_.begin();
+ for (; it != idle_streams_.end(); ++it)
+ (*it)->Close();
+ idle_streams_.clear();
+
+ it = pausing_streams_.begin();
+ for (; it != pausing_streams_.end(); ++it)
+ (*it)->Close();
+ pausing_streams_.clear();
+}
+
+bool AudioOutputDispatcherImpl::CreateAndOpenStream() {
+ DCHECK_EQ(base::MessageLoop::current(), message_loop_);
+ AudioOutputStream* stream = audio_manager_->MakeAudioOutputStream(
+ params_, input_device_id_);
+ if (!stream)
+ return false;
+
+ if (!stream->Open()) {
+ stream->Close();
+ return false;
+ }
+ idle_streams_.push_back(stream);
+ return true;
+}
+
+void AudioOutputDispatcherImpl::OpenTask() {
+ DCHECK_EQ(base::MessageLoop::current(), message_loop_);
+ // Make sure that we have at least one stream allocated if there
+ // are paused streams.
+ if (paused_proxies_ > 0 && idle_streams_.empty() &&
+ pausing_streams_.empty()) {
+ CreateAndOpenStream();
+ }
+
+ close_timer_.Reset();
+}
+
+// This method is called by |close_timer_|.
+void AudioOutputDispatcherImpl::ClosePendingStreams() {
+ DCHECK_EQ(base::MessageLoop::current(), message_loop_);
+ while (!idle_streams_.empty()) {
+ idle_streams_.back()->Close();
+ idle_streams_.pop_back();
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/audio/audio_output_dispatcher_impl.h b/chromium/media/audio/audio_output_dispatcher_impl.h
new file mode 100644
index 00000000000..06fe3ebeaf1
--- /dev/null
+++ b/chromium/media/audio/audio_output_dispatcher_impl.h
@@ -0,0 +1,101 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// AudioOutputDispatcherImpl is an implementation of AudioOutputDispatcher.
+//
+// To avoid opening and closing audio devices more frequently than necessary,
+// each dispatcher has a pool of inactive physical streams. A stream is closed
+// only if it hasn't been used for a certain period of time (specified via the
+// constructor).
+//
+
+#ifndef MEDIA_AUDIO_AUDIO_OUTPUT_DISPATCHER_IMPL_H_
+#define MEDIA_AUDIO_AUDIO_OUTPUT_DISPATCHER_IMPL_H_
+
+#include <list>
+#include <map>
+
+#include "base/basictypes.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+#include "base/timer/timer.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_manager.h"
+#include "media/audio/audio_output_dispatcher.h"
+#include "media/audio/audio_parameters.h"
+
+namespace media {
+
+class AudioOutputProxy;
+
+class MEDIA_EXPORT AudioOutputDispatcherImpl : public AudioOutputDispatcher {
+ public:
+ // |close_delay_ms| specifies delay after the stream is paused until
+ // the audio device is closed.
+ AudioOutputDispatcherImpl(AudioManager* audio_manager,
+ const AudioParameters& params,
+ const std::string& input_device_id,
+ const base::TimeDelta& close_delay);
+
+ // Opens a new physical stream if there are no pending streams in
+ // |idle_streams_|. Do not call Close() or Stop() if this method fails.
+ virtual bool OpenStream() OVERRIDE;
+
+ // If there are pending streams in |idle_streams_| then it reuses one of
+ // them, otherwise creates a new one.
+ virtual bool StartStream(AudioOutputStream::AudioSourceCallback* callback,
+ AudioOutputProxy* stream_proxy) OVERRIDE;
+
+ // Holds the physical stream temporarily in |pausing_streams_| and then
+ // |stream| is added to the pool of pending streams (i.e. |idle_streams_|).
+ virtual void StopStream(AudioOutputProxy* stream_proxy) OVERRIDE;
+
+ virtual void StreamVolumeSet(AudioOutputProxy* stream_proxy,
+ double volume) OVERRIDE;
+
+ virtual void CloseStream(AudioOutputProxy* stream_proxy) OVERRIDE;
+
+ virtual void Shutdown() OVERRIDE;
+
+ private:
+ typedef std::map<AudioOutputProxy*, AudioOutputStream*> AudioStreamMap;
+ friend class base::RefCountedThreadSafe<AudioOutputDispatcherImpl>;
+ virtual ~AudioOutputDispatcherImpl();
+
+ friend class AudioOutputProxyTest;
+
+ // Creates a new physical output stream, opens it and pushes to
+ // |idle_streams_|. Returns false if the stream couldn't be created or
+ // opened.
+ bool CreateAndOpenStream();
+
+ // A task scheduled by StartStream(). Opens a new stream and puts
+ // it in |idle_streams_|.
+ void OpenTask();
+
+ // Before a stream is reused, it should sit idle for a bit. This task is
+ // called once that time has elapsed.
+ void StopStreamTask();
+
+ // Called by |close_timer_|. Closes all pending streams.
+ void ClosePendingStreams();
+
+ base::TimeDelta pause_delay_;
+ size_t paused_proxies_;
+ typedef std::list<AudioOutputStream*> AudioOutputStreamList;
+ AudioOutputStreamList idle_streams_;
+ AudioOutputStreamList pausing_streams_;
+
+ // Used to post delayed tasks to ourselves that we cancel inside Shutdown().
+ base::WeakPtrFactory<AudioOutputDispatcherImpl> weak_this_;
+ base::DelayTimer<AudioOutputDispatcherImpl> close_timer_;
+
+ AudioStreamMap proxy_to_physical_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioOutputDispatcherImpl);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_AUDIO_OUTPUT_DISPATCHER_IMPL_H_
diff --git a/chromium/media/audio/audio_output_ipc.cc b/chromium/media/audio/audio_output_ipc.cc
new file mode 100644
index 00000000000..233a3b8ec7f
--- /dev/null
+++ b/chromium/media/audio/audio_output_ipc.cc
@@ -0,0 +1,13 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/audio_output_ipc.h"
+
+namespace media {
+
+AudioOutputIPCDelegate::~AudioOutputIPCDelegate() {}
+
+AudioOutputIPC::~AudioOutputIPC() {}
+
+} // namespace media
diff --git a/chromium/media/audio/audio_output_ipc.h b/chromium/media/audio/audio_output_ipc.h
new file mode 100644
index 00000000000..3353735b085
--- /dev/null
+++ b/chromium/media/audio/audio_output_ipc.h
@@ -0,0 +1,90 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_AUDIO_OUTPUT_IPC_H_
+#define MEDIA_AUDIO_AUDIO_OUTPUT_IPC_H_
+
+#include "base/memory/shared_memory.h"
+#include "base/sync_socket.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// Contains IPC notifications for the state of the server side
+// (AudioOutputController) audio state changes and when an AudioOutputController
+// has been created. Implemented by AudioOutputDevice.
+class MEDIA_EXPORT AudioOutputIPCDelegate {
+ public:
+ // Current status of the audio output stream in the browser process. Browser
+ // sends information about the current playback state and error to the
+ // renderer process using this type.
+ enum State {
+ kPlaying,
+ kPaused,
+ kError
+ };
+
+ // Called when state of an audio stream has changed.
+ virtual void OnStateChanged(State state) = 0;
+
+ // Called when an audio stream has been created.
+ // The shared memory |handle| points to a memory section that's used to
+ // transfer audio buffers from the AudioOutputIPCDelegate back to the
+ // AudioRendererHost. The implementation of OnStreamCreated takes ownership.
+ // The |socket_handle| is used by AudioRendererHost to signal requests for
+ // audio data to be written into the shared memory. The AudioOutputIPCDelegate
+ // must read from this socket and provide audio whenever data (search for
+ // "pending_bytes") is received.
+ virtual void OnStreamCreated(base::SharedMemoryHandle handle,
+ base::SyncSocket::Handle socket_handle,
+ int length) = 0;
+
+ // Called when the AudioOutputIPC object is going away and/or when the IPC
+ // channel has been closed and no more ipc requests can be made.
+ // Implementations should delete their owned AudioOutputIPC instance
+ // immediately.
+ virtual void OnIPCClosed() = 0;
+
+ protected:
+ virtual ~AudioOutputIPCDelegate();
+};
+
+// Provides the IPC functionality for an AudioOutputIPCDelegate (e.g., an
+// AudioOutputDevice). The implementation should asynchronously deliver the
+// messages to an AudioOutputController object (or create one in the case of
+// CreateStream()), that may live in a separate process.
+class MEDIA_EXPORT AudioOutputIPC {
+ public:
+ virtual ~AudioOutputIPC();
+
+ // Sends a request to create an AudioOutputController object in the peer
+ // process and configures it to use the specified audio |params| including
+ // number of synchronized input channels.|session_id| is used by the browser
+ // to select the correct input device if the input channel in |params| is
+ // valid, otherwise it will be ignored. Once the stream has been created,
+ // the implementation will notify |delegate| by calling OnStreamCreated().
+ virtual void CreateStream(AudioOutputIPCDelegate* delegate,
+ const AudioParameters& params,
+ int session_id) = 0;
+
+ // Starts playing the stream. This should generate a call to
+ // AudioOutputController::Play().
+ virtual void PlayStream() = 0;
+
+ // Pauses an audio stream. This should generate a call to
+ // AudioOutputController::Pause().
+ virtual void PauseStream() = 0;
+
+ // Closes the audio stream which should shut down the corresponding
+ // AudioOutputController in the peer process.
+ virtual void CloseStream() = 0;
+
+ // Sets the volume of the audio stream.
+ virtual void SetVolume(double volume) = 0;
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_AUDIO_OUTPUT_IPC_H_
diff --git a/chromium/media/audio/audio_output_proxy.cc b/chromium/media/audio/audio_output_proxy.cc
new file mode 100644
index 00000000000..a69cbc9522e
--- /dev/null
+++ b/chromium/media/audio/audio_output_proxy.cc
@@ -0,0 +1,93 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/audio_output_proxy.h"
+
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "media/audio/audio_manager.h"
+#include "media/audio/audio_output_dispatcher.h"
+
+namespace media {
+
+AudioOutputProxy::AudioOutputProxy(AudioOutputDispatcher* dispatcher)
+ : dispatcher_(dispatcher),
+ state_(kCreated),
+ volume_(1.0) {
+}
+
+AudioOutputProxy::~AudioOutputProxy() {
+ DCHECK(CalledOnValidThread());
+ DCHECK(state_ == kCreated || state_ == kClosed) << "State is: " << state_;
+}
+
+bool AudioOutputProxy::Open() {
+ DCHECK(CalledOnValidThread());
+ DCHECK_EQ(state_, kCreated);
+
+ if (!dispatcher_->OpenStream()) {
+ state_ = kOpenError;
+ return false;
+ }
+
+ state_ = kOpened;
+ return true;
+}
+
+void AudioOutputProxy::Start(AudioSourceCallback* callback) {
+ DCHECK(CalledOnValidThread());
+
+ // We need to support both states since the callback may not handle OnError()
+ // immediately (or at all). It's also possible for subsequent StartStream()
+ // calls to succeed after failing, so we allow it to be called again.
+ DCHECK(state_ == kOpened || state_ == kStartError);
+
+ if (!dispatcher_->StartStream(callback, this)) {
+ state_ = kStartError;
+ callback->OnError(this);
+ return;
+ }
+ state_ = kPlaying;
+}
+
+void AudioOutputProxy::Stop() {
+ DCHECK(CalledOnValidThread());
+ if (state_ != kPlaying)
+ return;
+
+ dispatcher_->StopStream(this);
+ state_ = kOpened;
+}
+
+void AudioOutputProxy::SetVolume(double volume) {
+ DCHECK(CalledOnValidThread());
+ volume_ = volume;
+ dispatcher_->StreamVolumeSet(this, volume);
+}
+
+void AudioOutputProxy::GetVolume(double* volume) {
+ DCHECK(CalledOnValidThread());
+ *volume = volume_;
+}
+
+void AudioOutputProxy::Close() {
+ DCHECK(CalledOnValidThread());
+ DCHECK(state_ == kCreated || state_ == kOpenError || state_ == kOpened ||
+ state_ == kStartError);
+
+ // kStartError means OpenStream() succeeded and the stream must be closed
+ // before destruction.
+ if (state_ != kCreated && state_ != kOpenError)
+ dispatcher_->CloseStream(this);
+
+ state_ = kClosed;
+
+ // Delete the object now like is done in the Close() implementation of
+ // physical stream objects. If we delete the object via DeleteSoon, we
+ // unnecessarily complicate the Shutdown procedure of the
+ // dispatcher+audio manager.
+ delete this;
+}
+
+} // namespace media
diff --git a/chromium/media/audio/audio_output_proxy.h b/chromium/media/audio/audio_output_proxy.h
new file mode 100644
index 00000000000..86dab513aa5
--- /dev/null
+++ b/chromium/media/audio/audio_output_proxy.h
@@ -0,0 +1,66 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_AUDIO_OUTPUT_PROXY_H_
+#define MEDIA_AUDIO_AUDIO_OUTPUT_PROXY_H_
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "base/memory/ref_counted.h"
+#include "base/threading/non_thread_safe.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+
+namespace media {
+
+class AudioOutputDispatcher;
+
+// AudioOutputProxy is an audio otput stream that uses resources more
+// efficiently than a regular audio output stream: it opens audio
+// device only when sound is playing, i.e. between Start() and Stop()
+// (there is still one physical stream per each audio output proxy in
+// playing state).
+//
+// AudioOutputProxy uses AudioOutputDispatcher to open and close
+// physical output streams.
+class MEDIA_EXPORT AudioOutputProxy
+ : public AudioOutputStream,
+ public NON_EXPORTED_BASE(base::NonThreadSafe) {
+ public:
+ // Caller keeps ownership of |dispatcher|.
+ explicit AudioOutputProxy(AudioOutputDispatcher* dispatcher);
+
+ // AudioOutputStream interface.
+ virtual bool Open() OVERRIDE;
+ virtual void Start(AudioSourceCallback* callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual void GetVolume(double* volume) OVERRIDE;
+ virtual void Close() OVERRIDE;
+
+ private:
+ enum State {
+ kCreated,
+ kOpened,
+ kPlaying,
+ kClosed,
+ kOpenError,
+ kStartError,
+ };
+
+ virtual ~AudioOutputProxy();
+
+ scoped_refptr<AudioOutputDispatcher> dispatcher_;
+ State state_;
+
+ // Need to save volume here, so that we can restore it in case the stream
+ // is stopped, and then started again.
+ double volume_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioOutputProxy);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_AUDIO_OUTPUT_PROXY_H_
diff --git a/chromium/media/audio/audio_output_proxy_unittest.cc b/chromium/media/audio/audio_output_proxy_unittest.cc
new file mode 100644
index 00000000000..de95b0661ec
--- /dev/null
+++ b/chromium/media/audio/audio_output_proxy_unittest.cc
@@ -0,0 +1,741 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "base/message_loop/message_loop.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "media/audio/audio_manager.h"
+#include "media/audio/audio_manager_base.h"
+#include "media/audio/audio_output_dispatcher_impl.h"
+#include "media/audio/audio_output_proxy.h"
+#include "media/audio/audio_output_resampler.h"
+#include "media/audio/fake_audio_output_stream.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::_;
+using ::testing::AllOf;
+using ::testing::DoAll;
+using ::testing::Field;
+using ::testing::Mock;
+using ::testing::NotNull;
+using ::testing::Return;
+using ::testing::SetArrayArgument;
+using media::AudioBus;
+using media::AudioBuffersState;
+using media::AudioInputStream;
+using media::AudioManager;
+using media::AudioManagerBase;
+using media::AudioOutputDispatcher;
+using media::AudioOutputProxy;
+using media::AudioOutputStream;
+using media::AudioParameters;
+using media::FakeAudioOutputStream;
+
+namespace {
+
+static const int kTestCloseDelayMs = 100;
+
+// Used in the test where we don't want a stream to be closed unexpectedly.
+static const int kTestBigCloseDelaySeconds = 1000;
+
+// Delay between callbacks to AudioSourceCallback::OnMoreData.
+static const int kOnMoreDataCallbackDelayMs = 10;
+
+// Let start run long enough for many OnMoreData callbacks to occur.
+static const int kStartRunTimeMs = kOnMoreDataCallbackDelayMs * 10;
+
+class MockAudioOutputStream : public AudioOutputStream {
+ public:
+ MockAudioOutputStream(AudioManagerBase* manager,
+ const AudioParameters& params)
+ : start_called_(false),
+ stop_called_(false),
+ params_(params),
+ fake_output_stream_(
+ FakeAudioOutputStream::MakeFakeStream(manager, params_)) {
+ }
+
+ void Start(AudioSourceCallback* callback) {
+ start_called_ = true;
+ fake_output_stream_->Start(callback);
+ }
+
+ void Stop() {
+ stop_called_ = true;
+ fake_output_stream_->Stop();
+ }
+
+ ~MockAudioOutputStream() {}
+
+ bool start_called() { return start_called_; }
+ bool stop_called() { return stop_called_; }
+
+ MOCK_METHOD0(Open, bool());
+ MOCK_METHOD1(SetVolume, void(double volume));
+ MOCK_METHOD1(GetVolume, void(double* volume));
+ MOCK_METHOD0(Close, void());
+
+ private:
+ bool start_called_;
+ bool stop_called_;
+ AudioParameters params_;
+ scoped_ptr<AudioOutputStream> fake_output_stream_;
+};
+
+class MockAudioManager : public AudioManagerBase {
+ public:
+ MockAudioManager() {}
+ virtual ~MockAudioManager() {
+ Shutdown();
+ }
+
+ MOCK_METHOD0(HasAudioOutputDevices, bool());
+ MOCK_METHOD0(HasAudioInputDevices, bool());
+ MOCK_METHOD0(GetAudioInputDeviceModel, string16());
+ MOCK_METHOD2(MakeAudioOutputStream, AudioOutputStream*(
+ const AudioParameters& params, const std::string& input_device_id));
+ MOCK_METHOD2(MakeAudioOutputStreamProxy, AudioOutputStream*(
+ const AudioParameters& params, const std::string& input_device_id));
+ MOCK_METHOD2(MakeAudioInputStream, AudioInputStream*(
+ const AudioParameters& params, const std::string& device_id));
+ MOCK_METHOD0(ShowAudioInputSettings, void());
+ MOCK_METHOD0(GetMessageLoop, scoped_refptr<base::MessageLoopProxy>());
+ MOCK_METHOD1(GetAudioInputDeviceNames, void(
+ media::AudioDeviceNames* device_name));
+
+ MOCK_METHOD1(MakeLinearOutputStream, AudioOutputStream*(
+ const AudioParameters& params));
+ MOCK_METHOD2(MakeLowLatencyOutputStream, AudioOutputStream*(
+ const AudioParameters& params, const std::string& input_device_id));
+ MOCK_METHOD2(MakeLinearInputStream, AudioInputStream*(
+ const AudioParameters& params, const std::string& device_id));
+ MOCK_METHOD2(MakeLowLatencyInputStream, AudioInputStream*(
+ const AudioParameters& params, const std::string& device_id));
+ MOCK_METHOD1(GetPreferredOutputStreamParameters, AudioParameters(
+ const AudioParameters& params));
+};
+
+class MockAudioSourceCallback : public AudioOutputStream::AudioSourceCallback {
+ public:
+ int OnMoreData(AudioBus* audio_bus, AudioBuffersState buffers_state) {
+ audio_bus->Zero();
+ return audio_bus->frames();
+ }
+ int OnMoreIOData(AudioBus* source, AudioBus* dest,
+ AudioBuffersState buffers_state) {
+ return OnMoreData(dest, buffers_state);
+ }
+ MOCK_METHOD1(OnError, void(AudioOutputStream* stream));
+};
+
+} // namespace
+
+namespace media {
+
+class AudioOutputProxyTest : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ EXPECT_CALL(manager_, GetMessageLoop())
+ .WillRepeatedly(Return(message_loop_.message_loop_proxy()));
+ InitDispatcher(base::TimeDelta::FromMilliseconds(kTestCloseDelayMs));
+ }
+
+ virtual void TearDown() {
+ // All paused proxies should have been closed at this point.
+ EXPECT_EQ(0u, dispatcher_impl_->paused_proxies_);
+
+ // This is necessary to free all proxy objects that have been
+ // closed by the test.
+ message_loop_.RunUntilIdle();
+ }
+
+ virtual void InitDispatcher(base::TimeDelta close_delay) {
+ // Use a low sample rate and large buffer size when testing otherwise the
+ // FakeAudioOutputStream will keep the message loop busy indefinitely; i.e.,
+ // RunUntilIdle() will never terminate.
+ params_ = AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_STEREO, 8000, 16, 2048);
+ dispatcher_impl_ = new AudioOutputDispatcherImpl(&manager(),
+ params_,
+ std::string(),
+ close_delay);
+
+ // Necessary to know how long the dispatcher will wait before posting
+ // StopStreamTask.
+ pause_delay_ = dispatcher_impl_->pause_delay_;
+ }
+
+ virtual void OnStart() {}
+
+ MockAudioManager& manager() {
+ return manager_;
+ }
+
+ // Wait for the close timer to fire.
+ void WaitForCloseTimer(const int timer_delay_ms) {
+ message_loop_.RunUntilIdle(); // OpenTask() may reset the timer.
+ base::PlatformThread::Sleep(
+ base::TimeDelta::FromMilliseconds(timer_delay_ms) * 2);
+ message_loop_.RunUntilIdle();
+ }
+
+ // Methods that do actual tests.
+ void OpenAndClose(AudioOutputDispatcher* dispatcher) {
+ MockAudioOutputStream stream(&manager_, params_);
+
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ .WillOnce(Return(&stream));
+ EXPECT_CALL(stream, Open())
+ .WillOnce(Return(true));
+ EXPECT_CALL(stream, Close())
+ .Times(1);
+
+ AudioOutputProxy* proxy = new AudioOutputProxy(dispatcher);
+ EXPECT_TRUE(proxy->Open());
+ proxy->Close();
+ WaitForCloseTimer(kTestCloseDelayMs);
+ }
+
+ // Create a stream, and then calls Start() and Stop().
+ void StartAndStop(AudioOutputDispatcher* dispatcher) {
+ MockAudioOutputStream stream(&manager_, params_);
+
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ .WillOnce(Return(&stream));
+ EXPECT_CALL(stream, Open())
+ .WillOnce(Return(true));
+ EXPECT_CALL(stream, SetVolume(_))
+ .Times(1);
+ EXPECT_CALL(stream, Close())
+ .Times(1);
+
+ AudioOutputProxy* proxy = new AudioOutputProxy(dispatcher);
+ EXPECT_TRUE(proxy->Open());
+
+ proxy->Start(&callback_);
+ OnStart();
+ proxy->Stop();
+
+ proxy->Close();
+ WaitForCloseTimer(kTestCloseDelayMs);
+ EXPECT_TRUE(stream.stop_called());
+ EXPECT_TRUE(stream.start_called());
+ }
+
+ // Verify that the stream is closed after Stop is called.
+ void CloseAfterStop(AudioOutputDispatcher* dispatcher) {
+ MockAudioOutputStream stream(&manager_, params_);
+
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ .WillOnce(Return(&stream));
+ EXPECT_CALL(stream, Open())
+ .WillOnce(Return(true));
+ EXPECT_CALL(stream, SetVolume(_))
+ .Times(1);
+ EXPECT_CALL(stream, Close())
+ .Times(1);
+
+ AudioOutputProxy* proxy = new AudioOutputProxy(dispatcher);
+ EXPECT_TRUE(proxy->Open());
+
+ proxy->Start(&callback_);
+ OnStart();
+ proxy->Stop();
+
+ // Wait for StopStream() to post StopStreamTask().
+ base::PlatformThread::Sleep(pause_delay_ * 2);
+ WaitForCloseTimer(kTestCloseDelayMs);
+
+ // Verify expectation before calling Close().
+ Mock::VerifyAndClear(&stream);
+
+ proxy->Close();
+ EXPECT_TRUE(stream.stop_called());
+ EXPECT_TRUE(stream.start_called());
+ }
+
+ // Create two streams, but don't start them. Only one device must be open.
+ void TwoStreams(AudioOutputDispatcher* dispatcher) {
+ MockAudioOutputStream stream(&manager_, params_);
+
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ .WillOnce(Return(&stream));
+ EXPECT_CALL(stream, Open())
+ .WillOnce(Return(true));
+ EXPECT_CALL(stream, Close())
+ .Times(1);
+
+ AudioOutputProxy* proxy1 = new AudioOutputProxy(dispatcher);
+ AudioOutputProxy* proxy2 = new AudioOutputProxy(dispatcher);
+ EXPECT_TRUE(proxy1->Open());
+ EXPECT_TRUE(proxy2->Open());
+ proxy1->Close();
+ proxy2->Close();
+ WaitForCloseTimer(kTestCloseDelayMs);
+ EXPECT_FALSE(stream.stop_called());
+ EXPECT_FALSE(stream.start_called());
+ }
+
+ // Open() method failed.
+ void OpenFailed(AudioOutputDispatcher* dispatcher) {
+ MockAudioOutputStream stream(&manager_, params_);
+
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ .WillOnce(Return(&stream));
+ EXPECT_CALL(stream, Open())
+ .WillOnce(Return(false));
+ EXPECT_CALL(stream, Close())
+ .Times(1);
+
+ AudioOutputProxy* proxy = new AudioOutputProxy(dispatcher);
+ EXPECT_FALSE(proxy->Open());
+ proxy->Close();
+ WaitForCloseTimer(kTestCloseDelayMs);
+ EXPECT_FALSE(stream.stop_called());
+ EXPECT_FALSE(stream.start_called());
+ }
+
+ void CreateAndWait(AudioOutputDispatcher* dispatcher) {
+ MockAudioOutputStream stream(&manager_, params_);
+
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ .WillOnce(Return(&stream));
+ EXPECT_CALL(stream, Open())
+ .WillOnce(Return(true));
+ EXPECT_CALL(stream, Close())
+ .Times(1);
+
+ AudioOutputProxy* proxy = new AudioOutputProxy(dispatcher);
+ EXPECT_TRUE(proxy->Open());
+
+ // Simulate a delay.
+ base::PlatformThread::Sleep(
+ base::TimeDelta::FromMilliseconds(kTestCloseDelayMs) * 2);
+ message_loop_.RunUntilIdle();
+
+ // Verify expectation before calling Close().
+ Mock::VerifyAndClear(&stream);
+
+ proxy->Close();
+ EXPECT_FALSE(stream.stop_called());
+ EXPECT_FALSE(stream.start_called());
+ }
+
+ void TwoStreams_OnePlaying(AudioOutputDispatcher* dispatcher) {
+ MockAudioOutputStream stream1(&manager_, params_);
+ MockAudioOutputStream stream2(&manager_, params_);
+
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ .WillOnce(Return(&stream1))
+ .WillOnce(Return(&stream2));
+
+ EXPECT_CALL(stream1, Open())
+ .WillOnce(Return(true));
+ EXPECT_CALL(stream1, SetVolume(_))
+ .Times(1);
+ EXPECT_CALL(stream1, Close())
+ .Times(1);
+
+ EXPECT_CALL(stream2, Open())
+ .WillOnce(Return(true));
+ EXPECT_CALL(stream2, Close())
+ .Times(1);
+
+ AudioOutputProxy* proxy1 = new AudioOutputProxy(dispatcher);
+ AudioOutputProxy* proxy2 = new AudioOutputProxy(dispatcher);
+ EXPECT_TRUE(proxy1->Open());
+ EXPECT_TRUE(proxy2->Open());
+
+ proxy1->Start(&callback_);
+ message_loop_.RunUntilIdle();
+ OnStart();
+ proxy1->Stop();
+
+ proxy1->Close();
+ proxy2->Close();
+ EXPECT_TRUE(stream1.stop_called());
+ EXPECT_TRUE(stream1.start_called());
+ EXPECT_FALSE(stream2.stop_called());
+ EXPECT_FALSE(stream2.start_called());
+ }
+
+ void TwoStreams_BothPlaying(AudioOutputDispatcher* dispatcher) {
+ MockAudioOutputStream stream1(&manager_, params_);
+ MockAudioOutputStream stream2(&manager_, params_);
+
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ .WillOnce(Return(&stream1))
+ .WillOnce(Return(&stream2));
+
+ EXPECT_CALL(stream1, Open())
+ .WillOnce(Return(true));
+ EXPECT_CALL(stream1, SetVolume(_))
+ .Times(1);
+ EXPECT_CALL(stream1, Close())
+ .Times(1);
+
+ EXPECT_CALL(stream2, Open())
+ .WillOnce(Return(true));
+ EXPECT_CALL(stream2, SetVolume(_))
+ .Times(1);
+ EXPECT_CALL(stream2, Close())
+ .Times(1);
+
+ AudioOutputProxy* proxy1 = new AudioOutputProxy(dispatcher);
+ AudioOutputProxy* proxy2 = new AudioOutputProxy(dispatcher);
+ EXPECT_TRUE(proxy1->Open());
+ EXPECT_TRUE(proxy2->Open());
+
+ proxy1->Start(&callback_);
+ proxy2->Start(&callback_);
+ OnStart();
+ proxy1->Stop();
+ proxy2->Stop();
+
+ proxy1->Close();
+ proxy2->Close();
+ EXPECT_TRUE(stream1.stop_called());
+ EXPECT_TRUE(stream1.start_called());
+ EXPECT_TRUE(stream2.stop_called());
+ EXPECT_TRUE(stream2.start_called());
+ }
+
+ void StartFailed(AudioOutputDispatcher* dispatcher) {
+ MockAudioOutputStream stream(&manager_, params_);
+
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ .WillOnce(Return(&stream));
+ EXPECT_CALL(stream, Open())
+ .WillOnce(Return(true));
+ EXPECT_CALL(stream, Close())
+ .Times(1);
+
+ AudioOutputProxy* proxy = new AudioOutputProxy(dispatcher);
+ EXPECT_TRUE(proxy->Open());
+
+ // Simulate a delay.
+ base::PlatformThread::Sleep(
+ base::TimeDelta::FromMilliseconds(kTestCloseDelayMs) * 2);
+ message_loop_.RunUntilIdle();
+
+ // Verify expectation before calling Close().
+ Mock::VerifyAndClear(&stream);
+
+ // |stream| is closed at this point. Start() should reopen it again.
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ .Times(2)
+ .WillRepeatedly(Return(reinterpret_cast<AudioOutputStream*>(NULL)));
+
+ EXPECT_CALL(callback_, OnError(_))
+ .Times(2);
+
+ proxy->Start(&callback_);
+
+ // Double Start() in the error case should be allowed since it's possible a
+ // callback may not have had time to process the OnError() in between.
+ proxy->Stop();
+ proxy->Start(&callback_);
+
+ Mock::VerifyAndClear(&callback_);
+
+ proxy->Close();
+ }
+
+ base::MessageLoop message_loop_;
+ scoped_refptr<AudioOutputDispatcherImpl> dispatcher_impl_;
+ base::TimeDelta pause_delay_;
+ MockAudioManager manager_;
+ MockAudioSourceCallback callback_;
+ AudioParameters params_;
+};
+
+class AudioOutputResamplerTest : public AudioOutputProxyTest {
+ public:
+ virtual void TearDown() {
+ AudioOutputProxyTest::TearDown();
+ }
+
+ virtual void InitDispatcher(base::TimeDelta close_delay) OVERRIDE {
+ AudioOutputProxyTest::InitDispatcher(close_delay);
+ // Use a low sample rate and large buffer size when testing otherwise the
+ // FakeAudioOutputStream will keep the message loop busy indefinitely; i.e.,
+ // RunUntilIdle() will never terminate.
+ resampler_params_ = AudioParameters(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
+ 16000, 16, 1024);
+ resampler_ = new AudioOutputResampler(
+ &manager(), params_, resampler_params_, std::string(), close_delay);
+ }
+
+ virtual void OnStart() OVERRIDE {
+ // Let start run for a bit.
+ message_loop_.RunUntilIdle();
+ base::PlatformThread::Sleep(
+ base::TimeDelta::FromMilliseconds(kStartRunTimeMs));
+ }
+
+ protected:
+ AudioParameters resampler_params_;
+ scoped_refptr<AudioOutputResampler> resampler_;
+};
+
+TEST_F(AudioOutputProxyTest, CreateAndClose) {
+ AudioOutputProxy* proxy = new AudioOutputProxy(dispatcher_impl_.get());
+ proxy->Close();
+}
+
+TEST_F(AudioOutputResamplerTest, CreateAndClose) {
+ AudioOutputProxy* proxy = new AudioOutputProxy(resampler_.get());
+ proxy->Close();
+}
+
+TEST_F(AudioOutputProxyTest, OpenAndClose) {
+ OpenAndClose(dispatcher_impl_.get());
+}
+
+TEST_F(AudioOutputResamplerTest, OpenAndClose) {
+ OpenAndClose(resampler_.get());
+}
+
+// Create a stream, and verify that it is closed after kTestCloseDelayMs.
+// if it doesn't start playing.
+TEST_F(AudioOutputProxyTest, CreateAndWait) {
+ CreateAndWait(dispatcher_impl_.get());
+}
+
+// Create a stream, and verify that it is closed after kTestCloseDelayMs.
+// if it doesn't start playing.
+TEST_F(AudioOutputResamplerTest, CreateAndWait) {
+ CreateAndWait(resampler_.get());
+}
+
+TEST_F(AudioOutputProxyTest, StartAndStop) {
+ StartAndStop(dispatcher_impl_.get());
+}
+
+TEST_F(AudioOutputResamplerTest, StartAndStop) {
+ StartAndStop(resampler_.get());
+}
+
+TEST_F(AudioOutputProxyTest, CloseAfterStop) {
+ CloseAfterStop(dispatcher_impl_.get());
+}
+
+TEST_F(AudioOutputResamplerTest, CloseAfterStop) {
+ CloseAfterStop(resampler_.get());
+}
+
+TEST_F(AudioOutputProxyTest, TwoStreams) { TwoStreams(dispatcher_impl_.get()); }
+
+TEST_F(AudioOutputResamplerTest, TwoStreams) { TwoStreams(resampler_.get()); }
+
+// Two streams: verify that second stream is allocated when the first
+// starts playing.
+TEST_F(AudioOutputProxyTest, TwoStreams_OnePlaying) {
+ InitDispatcher(base::TimeDelta::FromSeconds(kTestBigCloseDelaySeconds));
+ TwoStreams_OnePlaying(dispatcher_impl_.get());
+}
+
+TEST_F(AudioOutputResamplerTest, TwoStreams_OnePlaying) {
+ InitDispatcher(base::TimeDelta::FromSeconds(kTestBigCloseDelaySeconds));
+ TwoStreams_OnePlaying(resampler_.get());
+}
+
+// Two streams, both are playing. Dispatcher should not open a third stream.
+TEST_F(AudioOutputProxyTest, TwoStreams_BothPlaying) {
+ InitDispatcher(base::TimeDelta::FromSeconds(kTestBigCloseDelaySeconds));
+ TwoStreams_BothPlaying(dispatcher_impl_.get());
+}
+
+TEST_F(AudioOutputResamplerTest, TwoStreams_BothPlaying) {
+ InitDispatcher(base::TimeDelta::FromSeconds(kTestBigCloseDelaySeconds));
+ TwoStreams_BothPlaying(resampler_.get());
+}
+
+TEST_F(AudioOutputProxyTest, OpenFailed) { OpenFailed(dispatcher_impl_.get()); }
+
+// Start() method failed.
+TEST_F(AudioOutputProxyTest, StartFailed) {
+ StartFailed(dispatcher_impl_.get());
+}
+
+TEST_F(AudioOutputResamplerTest, StartFailed) { StartFailed(resampler_.get()); }
+
+// Simulate AudioOutputStream::Create() failure with a low latency stream and
+// ensure AudioOutputResampler falls back to the high latency path.
+TEST_F(AudioOutputResamplerTest, LowLatencyCreateFailedFallback) {
+ MockAudioOutputStream stream(&manager_, params_);
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ .Times(2)
+ .WillOnce(Return(static_cast<AudioOutputStream*>(NULL)))
+ .WillRepeatedly(Return(&stream));
+ EXPECT_CALL(stream, Open())
+ .WillOnce(Return(true));
+ EXPECT_CALL(stream, Close())
+ .Times(1);
+
+ AudioOutputProxy* proxy = new AudioOutputProxy(resampler_.get());
+ EXPECT_TRUE(proxy->Open());
+ proxy->Close();
+ WaitForCloseTimer(kTestCloseDelayMs);
+}
+
+// Simulate AudioOutputStream::Open() failure with a low latency stream and
+// ensure AudioOutputResampler falls back to the high latency path.
+TEST_F(AudioOutputResamplerTest, LowLatencyOpenFailedFallback) {
+ MockAudioOutputStream failed_stream(&manager_, params_);
+ MockAudioOutputStream okay_stream(&manager_, params_);
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ .Times(2)
+ .WillOnce(Return(&failed_stream))
+ .WillRepeatedly(Return(&okay_stream));
+ EXPECT_CALL(failed_stream, Open())
+ .WillOnce(Return(false));
+ EXPECT_CALL(failed_stream, Close())
+ .Times(1);
+ EXPECT_CALL(okay_stream, Open())
+ .WillOnce(Return(true));
+ EXPECT_CALL(okay_stream, Close())
+ .Times(1);
+
+ AudioOutputProxy* proxy = new AudioOutputProxy(resampler_.get());
+ EXPECT_TRUE(proxy->Open());
+ proxy->Close();
+ WaitForCloseTimer(kTestCloseDelayMs);
+}
+
+// Simulate failures to open both the low latency and the fallback high latency
+// stream and ensure AudioOutputResampler falls back to a fake stream.
+TEST_F(AudioOutputResamplerTest, HighLatencyFallbackFailed) {
+ MockAudioOutputStream okay_stream(&manager_, params_);
+
+// Only Windows has a high latency output driver that is not the same as the low
+// latency path.
+#if defined(OS_WIN)
+ static const int kFallbackCount = 2;
+#else
+ static const int kFallbackCount = 1;
+#endif
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ .Times(kFallbackCount)
+ .WillRepeatedly(Return(static_cast<AudioOutputStream*>(NULL)));
+
+ // To prevent shared memory issues the sample rate and buffer size should
+ // match the input stream parameters.
+ EXPECT_CALL(manager(), MakeAudioOutputStream(AllOf(
+ testing::Property(&AudioParameters::format, AudioParameters::AUDIO_FAKE),
+ testing::Property(&AudioParameters::sample_rate, params_.sample_rate()),
+ testing::Property(
+ &AudioParameters::frames_per_buffer, params_.frames_per_buffer())),
+ _))
+ .Times(1)
+ .WillOnce(Return(&okay_stream));
+ EXPECT_CALL(okay_stream, Open())
+ .WillOnce(Return(true));
+ EXPECT_CALL(okay_stream, Close())
+ .Times(1);
+
+ AudioOutputProxy* proxy = new AudioOutputProxy(resampler_.get());
+ EXPECT_TRUE(proxy->Open());
+ proxy->Close();
+ WaitForCloseTimer(kTestCloseDelayMs);
+}
+
+// Simulate failures to open both the low latency, the fallback high latency
+// stream, and the fake audio output stream and ensure AudioOutputResampler
+// terminates normally.
+TEST_F(AudioOutputResamplerTest, AllFallbackFailed) {
+// Only Windows has a high latency output driver that is not the same as the low
+// latency path.
+#if defined(OS_WIN)
+ static const int kFallbackCount = 3;
+#else
+ static const int kFallbackCount = 2;
+#endif
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ .Times(kFallbackCount)
+ .WillRepeatedly(Return(static_cast<AudioOutputStream*>(NULL)));
+
+ AudioOutputProxy* proxy = new AudioOutputProxy(resampler_.get());
+ EXPECT_FALSE(proxy->Open());
+ proxy->Close();
+ WaitForCloseTimer(kTestCloseDelayMs);
+}
+
+// Simulate an eventual OpenStream() failure; i.e. successful OpenStream() calls
+// eventually followed by one which fails; root cause of http://crbug.com/150619
+TEST_F(AudioOutputResamplerTest, LowLatencyOpenEventuallyFails) {
+ MockAudioOutputStream stream1(&manager_, params_);
+ MockAudioOutputStream stream2(&manager_, params_);
+ MockAudioOutputStream stream3(&manager_, params_);
+
+ // Setup the mock such that all three streams are successfully created.
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
+ .WillOnce(Return(&stream1))
+ .WillOnce(Return(&stream2))
+ .WillOnce(Return(&stream3))
+ .WillRepeatedly(Return(static_cast<AudioOutputStream*>(NULL)));
+
+ // Stream1 should be able to successfully open and start.
+ EXPECT_CALL(stream1, Open())
+ .WillOnce(Return(true));
+ EXPECT_CALL(stream1, Close())
+ .Times(1);
+ EXPECT_CALL(stream1, SetVolume(_))
+ .Times(1);
+
+ // Stream2 should also be able to successfully open and start.
+ EXPECT_CALL(stream2, Open())
+ .WillOnce(Return(true));
+ EXPECT_CALL(stream2, Close())
+ .Times(1);
+ EXPECT_CALL(stream2, SetVolume(_))
+ .Times(1);
+
+ // Stream3 should fail on Open() (yet still be closed since
+ // MakeAudioOutputStream returned a valid AudioOutputStream object).
+ EXPECT_CALL(stream3, Open())
+ .WillOnce(Return(false));
+ EXPECT_CALL(stream3, Close())
+ .Times(1);
+
+ // Open and start the first proxy and stream.
+ AudioOutputProxy* proxy1 = new AudioOutputProxy(resampler_.get());
+ EXPECT_TRUE(proxy1->Open());
+ proxy1->Start(&callback_);
+ OnStart();
+
+ // Open and start the second proxy and stream.
+ AudioOutputProxy* proxy2 = new AudioOutputProxy(resampler_.get());
+ EXPECT_TRUE(proxy2->Open());
+ proxy2->Start(&callback_);
+ OnStart();
+
+ // Attempt to open the third stream which should fail.
+ AudioOutputProxy* proxy3 = new AudioOutputProxy(resampler_.get());
+ EXPECT_FALSE(proxy3->Open());
+
+ // Perform the required Stop()/Close() shutdown dance for each proxy. Under
+ // the hood each proxy should correctly call CloseStream() if OpenStream()
+ // succeeded or not.
+ proxy3->Stop();
+ proxy3->Close();
+ proxy2->Stop();
+ proxy2->Close();
+ proxy1->Stop();
+ proxy1->Close();
+
+ // Wait for all of the messages to fly and then verify stream behavior.
+ WaitForCloseTimer(kTestCloseDelayMs);
+ EXPECT_TRUE(stream1.stop_called());
+ EXPECT_TRUE(stream1.start_called());
+ EXPECT_TRUE(stream2.stop_called());
+ EXPECT_TRUE(stream2.start_called());
+ EXPECT_FALSE(stream3.stop_called());
+ EXPECT_FALSE(stream3.start_called());
+}
+
+} // namespace media
diff --git a/chromium/media/audio/audio_output_resampler.cc b/chromium/media/audio/audio_output_resampler.cc
new file mode 100644
index 00000000000..6db0e2fb2fe
--- /dev/null
+++ b/chromium/media/audio/audio_output_resampler.cc
@@ -0,0 +1,395 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/audio_output_resampler.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/compiler_specific.h"
+#include "base/message_loop/message_loop.h"
+#include "base/metrics/histogram.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_output_dispatcher_impl.h"
+#include "media/audio/audio_output_proxy.h"
+#include "media/audio/audio_util.h"
+#include "media/audio/sample_rates.h"
+#include "media/base/audio_converter.h"
+#include "media/base/limits.h"
+
+namespace media {
+
+class OnMoreDataConverter
+ : public AudioOutputStream::AudioSourceCallback,
+ public AudioConverter::InputCallback {
+ public:
+ OnMoreDataConverter(const AudioParameters& input_params,
+ const AudioParameters& output_params);
+ virtual ~OnMoreDataConverter();
+
+ // AudioSourceCallback interface.
+ virtual int OnMoreData(AudioBus* dest,
+ AudioBuffersState buffers_state) OVERRIDE;
+ virtual int OnMoreIOData(AudioBus* source,
+ AudioBus* dest,
+ AudioBuffersState buffers_state) OVERRIDE;
+ virtual void OnError(AudioOutputStream* stream) OVERRIDE;
+
+ // Sets |source_callback_|. If this is not a new object, then Stop() must be
+ // called before Start().
+ void Start(AudioOutputStream::AudioSourceCallback* callback);
+
+ // Clears |source_callback_| and flushes the resampler.
+ void Stop();
+
+ private:
+ // AudioConverter::InputCallback implementation.
+ virtual double ProvideInput(AudioBus* audio_bus,
+ base::TimeDelta buffer_delay) OVERRIDE;
+
+ // Ratio of input bytes to output bytes used to correct playback delay with
+ // regard to buffering and resampling.
+ double io_ratio_;
+
+ // Source callback and associated lock.
+ base::Lock source_lock_;
+ AudioOutputStream::AudioSourceCallback* source_callback_;
+
+ // |source| passed to OnMoreIOData() which should be passed downstream.
+ AudioBus* source_bus_;
+
+ // Last AudioBuffersState object received via OnMoreData(), used to correct
+ // playback delay by ProvideInput() and passed on to |source_callback_|.
+ AudioBuffersState current_buffers_state_;
+
+ const int input_bytes_per_second_;
+
+ // Handles resampling, buffering, and channel mixing between input and output
+ // parameters.
+ AudioConverter audio_converter_;
+
+ DISALLOW_COPY_AND_ASSIGN(OnMoreDataConverter);
+};
+
+// Record UMA statistics for hardware output configuration.
+static void RecordStats(const AudioParameters& output_params) {
+ UMA_HISTOGRAM_ENUMERATION(
+ "Media.HardwareAudioBitsPerChannel", output_params.bits_per_sample(),
+ limits::kMaxBitsPerSample);
+ UMA_HISTOGRAM_ENUMERATION(
+ "Media.HardwareAudioChannelLayout", output_params.channel_layout(),
+ CHANNEL_LAYOUT_MAX);
+ UMA_HISTOGRAM_ENUMERATION(
+ "Media.HardwareAudioChannelCount", output_params.channels(),
+ limits::kMaxChannels);
+
+ AudioSampleRate asr = media::AsAudioSampleRate(output_params.sample_rate());
+ if (asr != kUnexpectedAudioSampleRate) {
+ UMA_HISTOGRAM_ENUMERATION(
+ "Media.HardwareAudioSamplesPerSecond", asr, kUnexpectedAudioSampleRate);
+ } else {
+ UMA_HISTOGRAM_COUNTS(
+ "Media.HardwareAudioSamplesPerSecondUnexpected",
+ output_params.sample_rate());
+ }
+}
+
+// Record UMA statistics for hardware output configuration after fallback.
+static void RecordFallbackStats(const AudioParameters& output_params) {
+ UMA_HISTOGRAM_BOOLEAN("Media.FallbackToHighLatencyAudioPath", true);
+ UMA_HISTOGRAM_ENUMERATION(
+ "Media.FallbackHardwareAudioBitsPerChannel",
+ output_params.bits_per_sample(), limits::kMaxBitsPerSample);
+ UMA_HISTOGRAM_ENUMERATION(
+ "Media.FallbackHardwareAudioChannelLayout",
+ output_params.channel_layout(), CHANNEL_LAYOUT_MAX);
+ UMA_HISTOGRAM_ENUMERATION(
+ "Media.FallbackHardwareAudioChannelCount",
+ output_params.channels(), limits::kMaxChannels);
+
+ AudioSampleRate asr = media::AsAudioSampleRate(output_params.sample_rate());
+ if (asr != kUnexpectedAudioSampleRate) {
+ UMA_HISTOGRAM_ENUMERATION(
+ "Media.FallbackHardwareAudioSamplesPerSecond",
+ asr, kUnexpectedAudioSampleRate);
+ } else {
+ UMA_HISTOGRAM_COUNTS(
+ "Media.FallbackHardwareAudioSamplesPerSecondUnexpected",
+ output_params.sample_rate());
+ }
+}
+
+// Only Windows has a high latency output driver that is not the same as the low
+// latency path.
+#if defined(OS_WIN)
+// Converts low latency based |output_params| into high latency appropriate
+// output parameters in error situations.
+static AudioParameters SetupFallbackParams(
+ const AudioParameters& input_params, const AudioParameters& output_params) {
+ // Choose AudioParameters appropriate for opening the device in high latency
+ // mode. |kMinLowLatencyFrameSize| is arbitrarily based on Pepper Flash's
+ // MAXIMUM frame size for low latency.
+ static const int kMinLowLatencyFrameSize = 2048;
+ int frames_per_buffer = std::min(
+ std::max(input_params.frames_per_buffer(), kMinLowLatencyFrameSize),
+ static_cast<int>(
+ GetHighLatencyOutputBufferSize(input_params.sample_rate())));
+
+ return AudioParameters(
+ AudioParameters::AUDIO_PCM_LINEAR, input_params.channel_layout(),
+ input_params.sample_rate(), input_params.bits_per_sample(),
+ frames_per_buffer);
+}
+#endif
+
+AudioOutputResampler::AudioOutputResampler(AudioManager* audio_manager,
+ const AudioParameters& input_params,
+ const AudioParameters& output_params,
+ const std::string& input_device_id,
+ const base::TimeDelta& close_delay)
+ : AudioOutputDispatcher(audio_manager, input_params, input_device_id),
+ close_delay_(close_delay),
+ output_params_(output_params),
+ input_device_id_(input_device_id),
+ streams_opened_(false) {
+ DCHECK(input_params.IsValid());
+ DCHECK(output_params.IsValid());
+ DCHECK_EQ(output_params_.format(), AudioParameters::AUDIO_PCM_LOW_LATENCY);
+
+ // Record UMA statistics for the hardware configuration.
+ RecordStats(output_params);
+
+ Initialize();
+}
+
+AudioOutputResampler::~AudioOutputResampler() {
+ DCHECK(callbacks_.empty());
+}
+
+void AudioOutputResampler::Initialize() {
+ DCHECK(!streams_opened_);
+ DCHECK(callbacks_.empty());
+ dispatcher_ = new AudioOutputDispatcherImpl(
+ audio_manager_, output_params_, input_device_id_, close_delay_);
+}
+
+bool AudioOutputResampler::OpenStream() {
+ DCHECK_EQ(base::MessageLoop::current(), message_loop_);
+
+ if (dispatcher_->OpenStream()) {
+ // Only record the UMA statistic if we didn't fallback during construction
+ // and only for the first stream we open.
+ if (!streams_opened_ &&
+ output_params_.format() == AudioParameters::AUDIO_PCM_LOW_LATENCY) {
+ UMA_HISTOGRAM_BOOLEAN("Media.FallbackToHighLatencyAudioPath", false);
+ }
+ streams_opened_ = true;
+ return true;
+ }
+
+ // If we've already tried to open the stream in high latency mode or we've
+ // successfully opened a stream previously, there's nothing more to be done.
+ if (output_params_.format() != AudioParameters::AUDIO_PCM_LOW_LATENCY ||
+ streams_opened_ || !callbacks_.empty()) {
+ return false;
+ }
+
+ DCHECK_EQ(output_params_.format(), AudioParameters::AUDIO_PCM_LOW_LATENCY);
+
+ // Record UMA statistics about the hardware which triggered the failure so
+ // we can debug and triage later.
+ RecordFallbackStats(output_params_);
+
+ // Only Windows has a high latency output driver that is not the same as the
+ // low latency path.
+#if defined(OS_WIN)
+ DLOG(ERROR) << "Unable to open audio device in low latency mode. Falling "
+ << "back to high latency audio output.";
+
+ output_params_ = SetupFallbackParams(params_, output_params_);
+ Initialize();
+ if (dispatcher_->OpenStream()) {
+ streams_opened_ = true;
+ return true;
+ }
+#endif
+
+ DLOG(ERROR) << "Unable to open audio device in high latency mode. Falling "
+ << "back to fake audio output.";
+
+ // Finally fall back to a fake audio output device.
+ output_params_.Reset(
+ AudioParameters::AUDIO_FAKE, params_.channel_layout(),
+ params_.channels(), params_.input_channels(), params_.sample_rate(),
+ params_.bits_per_sample(), params_.frames_per_buffer());
+ Initialize();
+ if (dispatcher_->OpenStream()) {
+ streams_opened_ = true;
+ return true;
+ }
+
+ return false;
+}
+
+bool AudioOutputResampler::StartStream(
+ AudioOutputStream::AudioSourceCallback* callback,
+ AudioOutputProxy* stream_proxy) {
+ DCHECK_EQ(base::MessageLoop::current(), message_loop_);
+
+ OnMoreDataConverter* resampler_callback = NULL;
+ CallbackMap::iterator it = callbacks_.find(stream_proxy);
+ if (it == callbacks_.end()) {
+ resampler_callback = new OnMoreDataConverter(params_, output_params_);
+ callbacks_[stream_proxy] = resampler_callback;
+ } else {
+ resampler_callback = it->second;
+ }
+
+ resampler_callback->Start(callback);
+ bool result = dispatcher_->StartStream(resampler_callback, stream_proxy);
+ if (!result)
+ resampler_callback->Stop();
+ return result;
+}
+
+void AudioOutputResampler::StreamVolumeSet(AudioOutputProxy* stream_proxy,
+ double volume) {
+ DCHECK_EQ(base::MessageLoop::current(), message_loop_);
+ dispatcher_->StreamVolumeSet(stream_proxy, volume);
+}
+
+void AudioOutputResampler::StopStream(AudioOutputProxy* stream_proxy) {
+ DCHECK_EQ(base::MessageLoop::current(), message_loop_);
+ dispatcher_->StopStream(stream_proxy);
+
+ // Now that StopStream() has completed the underlying physical stream should
+ // be stopped and no longer calling OnMoreData(), making it safe to Stop() the
+ // OnMoreDataConverter.
+ CallbackMap::iterator it = callbacks_.find(stream_proxy);
+ if (it != callbacks_.end())
+ it->second->Stop();
+}
+
+void AudioOutputResampler::CloseStream(AudioOutputProxy* stream_proxy) {
+ DCHECK_EQ(base::MessageLoop::current(), message_loop_);
+ dispatcher_->CloseStream(stream_proxy);
+
+ // We assume that StopStream() is always called prior to CloseStream(), so
+ // that it is safe to delete the OnMoreDataConverter here.
+ CallbackMap::iterator it = callbacks_.find(stream_proxy);
+ if (it != callbacks_.end()) {
+ delete it->second;
+ callbacks_.erase(it);
+ }
+}
+
+void AudioOutputResampler::Shutdown() {
+ DCHECK_EQ(base::MessageLoop::current(), message_loop_);
+
+ // No AudioOutputProxy objects should hold a reference to us when we get
+ // to this stage.
+ DCHECK(HasOneRef()) << "Only the AudioManager should hold a reference";
+
+ dispatcher_->Shutdown();
+ DCHECK(callbacks_.empty());
+}
+
+OnMoreDataConverter::OnMoreDataConverter(const AudioParameters& input_params,
+ const AudioParameters& output_params)
+ : source_callback_(NULL),
+ source_bus_(NULL),
+ input_bytes_per_second_(input_params.GetBytesPerSecond()),
+ audio_converter_(input_params, output_params, false) {
+ io_ratio_ =
+ static_cast<double>(input_params.GetBytesPerSecond()) /
+ output_params.GetBytesPerSecond();
+}
+
+OnMoreDataConverter::~OnMoreDataConverter() {
+ // Ensure Stop() has been called so we don't end up with an AudioOutputStream
+ // calling back into OnMoreData() after destruction.
+ CHECK(!source_callback_);
+}
+
+void OnMoreDataConverter::Start(
+ AudioOutputStream::AudioSourceCallback* callback) {
+ base::AutoLock auto_lock(source_lock_);
+ CHECK(!source_callback_);
+ source_callback_ = callback;
+
+ // While AudioConverter can handle multiple inputs, we're using it only with
+ // a single input currently. Eventually this may be the basis for a browser
+ // side mixer.
+ audio_converter_.AddInput(this);
+}
+
+void OnMoreDataConverter::Stop() {
+ base::AutoLock auto_lock(source_lock_);
+ CHECK(source_callback_);
+ source_callback_ = NULL;
+ audio_converter_.RemoveInput(this);
+}
+
+int OnMoreDataConverter::OnMoreData(AudioBus* dest,
+ AudioBuffersState buffers_state) {
+ return OnMoreIOData(NULL, dest, buffers_state);
+}
+
+int OnMoreDataConverter::OnMoreIOData(AudioBus* source,
+ AudioBus* dest,
+ AudioBuffersState buffers_state) {
+ base::AutoLock auto_lock(source_lock_);
+ // While we waited for |source_lock_| the callback might have been cleared.
+ if (!source_callback_) {
+ dest->Zero();
+ return dest->frames();
+ }
+
+ source_bus_ = source;
+ current_buffers_state_ = buffers_state;
+ audio_converter_.Convert(dest);
+
+ // Always return the full number of frames requested, ProvideInput_Locked()
+ // will pad with silence if it wasn't able to acquire enough data.
+ return dest->frames();
+}
+
+double OnMoreDataConverter::ProvideInput(AudioBus* dest,
+ base::TimeDelta buffer_delay) {
+ source_lock_.AssertAcquired();
+
+ // Adjust playback delay to include |buffer_delay|.
+ // TODO(dalecurtis): Stop passing bytes around, it doesn't make sense since
+ // AudioBus is just float data. Use TimeDelta instead.
+ AudioBuffersState new_buffers_state;
+ new_buffers_state.pending_bytes =
+ io_ratio_ * (current_buffers_state_.total_bytes() +
+ buffer_delay.InSecondsF() * input_bytes_per_second_);
+
+ // Retrieve data from the original callback.
+ int frames = source_callback_->OnMoreIOData(
+ source_bus_, dest, new_buffers_state);
+
+ // |source_bus_| should only be provided once.
+ // TODO(dalecurtis, crogers): This is not a complete fix. If ProvideInput()
+ // is called multiple times, we need to do something more clever here.
+ source_bus_ = NULL;
+
+ // Zero any unfilled frames if anything was filled, otherwise we'll just
+ // return a volume of zero and let AudioConverter drop the output.
+ if (frames > 0 && frames < dest->frames())
+ dest->ZeroFramesPartial(frames, dest->frames() - frames);
+
+ // TODO(dalecurtis): Return the correct volume here.
+ return frames > 0 ? 1 : 0;
+}
+
+void OnMoreDataConverter::OnError(AudioOutputStream* stream) {
+ base::AutoLock auto_lock(source_lock_);
+ if (source_callback_)
+ source_callback_->OnError(stream);
+}
+
+} // namespace media
diff --git a/chromium/media/audio/audio_output_resampler.h b/chromium/media/audio/audio_output_resampler.h
new file mode 100644
index 00000000000..df9e4320b55
--- /dev/null
+++ b/chromium/media/audio/audio_output_resampler.h
@@ -0,0 +1,89 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_AUDIO_OUTPUT_RESAMPLER_H_
+#define MEDIA_AUDIO_AUDIO_OUTPUT_RESAMPLER_H_
+
+#include <map>
+
+#include "base/basictypes.h"
+#include "base/memory/ref_counted.h"
+#include "base/time/time.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_manager.h"
+#include "media/audio/audio_output_dispatcher.h"
+#include "media/audio/audio_parameters.h"
+
+namespace media {
+
+class OnMoreDataConverter;
+
+// AudioOutputResampler is a browser-side resampling and buffering solution
+// which ensures audio data is always output at given parameters. See the
+// AudioConverter class for details on the conversion process.
+//
+// AOR works by intercepting the AudioSourceCallback provided to StartStream()
+// and redirecting it through an AudioConverter instance. AudioBuffersState is
+// adjusted for buffer delay caused by the conversion process.
+//
+// AOR will automatically fall back from AUDIO_PCM_LOW_LATENCY to
+// AUDIO_PCM_LINEAR if the output device fails to open at the requested output
+// parameters.
+//
+// TODO(dalecurtis): Ideally the low latency path will be as reliable as the
+// high latency path once we have channel mixing and support querying for the
+// hardware's configured bit depth. Monitor the UMA stats for fallback and
+// remove fallback support once it's stable. http://crbug.com/148418
+class MEDIA_EXPORT AudioOutputResampler : public AudioOutputDispatcher {
+ public:
+ AudioOutputResampler(AudioManager* audio_manager,
+ const AudioParameters& input_params,
+ const AudioParameters& output_params,
+ const std::string& input_device_id,
+ const base::TimeDelta& close_delay);
+
+ // AudioOutputDispatcher interface.
+ virtual bool OpenStream() OVERRIDE;
+ virtual bool StartStream(AudioOutputStream::AudioSourceCallback* callback,
+ AudioOutputProxy* stream_proxy) OVERRIDE;
+ virtual void StopStream(AudioOutputProxy* stream_proxy) OVERRIDE;
+ virtual void StreamVolumeSet(AudioOutputProxy* stream_proxy,
+ double volume) OVERRIDE;
+ virtual void CloseStream(AudioOutputProxy* stream_proxy) OVERRIDE;
+ virtual void Shutdown() OVERRIDE;
+
+ private:
+ friend class base::RefCountedThreadSafe<AudioOutputResampler>;
+ virtual ~AudioOutputResampler();
+
+ // Used to initialize and reinitialize |dispatcher_|.
+ void Initialize();
+
+ // Dispatcher to proxy all AudioOutputDispatcher calls too.
+ scoped_refptr<AudioOutputDispatcher> dispatcher_;
+
+ // Map of outstanding OnMoreDataConverter objects. A new object is created
+ // on every StartStream() call and destroyed on CloseStream().
+ typedef std::map<AudioOutputProxy*, OnMoreDataConverter*> CallbackMap;
+ CallbackMap callbacks_;
+
+ // Used by AudioOutputDispatcherImpl; kept so we can reinitialize on the fly.
+ base::TimeDelta close_delay_;
+
+ // AudioParameters used to setup the output stream.
+ AudioParameters output_params_;
+
+ // Device ID to be used by the unified IO to open the correct input device.
+ const std::string input_device_id_;
+
+ // Whether any streams have been opened through |dispatcher_|, if so we can't
+ // fallback on future OpenStream() failures.
+ bool streams_opened_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioOutputResampler);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_AUDIO_OUTPUT_RESAMPLER_H_
diff --git a/chromium/media/audio/audio_parameters.cc b/chromium/media/audio/audio_parameters.cc
new file mode 100644
index 00000000000..5e77c60cb94
--- /dev/null
+++ b/chromium/media/audio/audio_parameters.cc
@@ -0,0 +1,97 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/audio_parameters.h"
+
+#include "base/logging.h"
+#include "media/base/limits.h"
+
+namespace media {
+
+AudioParameters::AudioParameters()
+ : format_(AUDIO_PCM_LINEAR),
+ channel_layout_(CHANNEL_LAYOUT_NONE),
+ sample_rate_(0),
+ bits_per_sample_(0),
+ frames_per_buffer_(0),
+ channels_(0),
+ input_channels_(0) {
+}
+
+AudioParameters::AudioParameters(Format format, ChannelLayout channel_layout,
+ int sample_rate, int bits_per_sample,
+ int frames_per_buffer)
+ : format_(format),
+ channel_layout_(channel_layout),
+ sample_rate_(sample_rate),
+ bits_per_sample_(bits_per_sample),
+ frames_per_buffer_(frames_per_buffer),
+ channels_(ChannelLayoutToChannelCount(channel_layout)),
+ input_channels_(0) {
+}
+
+AudioParameters::AudioParameters(Format format, ChannelLayout channel_layout,
+ int input_channels,
+ int sample_rate, int bits_per_sample,
+ int frames_per_buffer)
+ : format_(format),
+ channel_layout_(channel_layout),
+ sample_rate_(sample_rate),
+ bits_per_sample_(bits_per_sample),
+ frames_per_buffer_(frames_per_buffer),
+ channels_(ChannelLayoutToChannelCount(channel_layout)),
+ input_channels_(input_channels) {
+}
+
+void AudioParameters::Reset(Format format, ChannelLayout channel_layout,
+ int channels, int input_channels,
+ int sample_rate, int bits_per_sample,
+ int frames_per_buffer) {
+ if (channel_layout != CHANNEL_LAYOUT_DISCRETE)
+ DCHECK_EQ(channels, ChannelLayoutToChannelCount(channel_layout));
+
+ format_ = format;
+ channel_layout_ = channel_layout;
+ channels_ = channels;
+ input_channels_ = input_channels;
+ sample_rate_ = sample_rate;
+ bits_per_sample_ = bits_per_sample;
+ frames_per_buffer_ = frames_per_buffer;
+}
+
+bool AudioParameters::IsValid() const {
+ return (format_ >= AUDIO_PCM_LINEAR) &&
+ (format_ < AUDIO_LAST_FORMAT) &&
+ (channels_ > 0) &&
+ (channels_ <= media::limits::kMaxChannels) &&
+ (channel_layout_ > CHANNEL_LAYOUT_UNSUPPORTED) &&
+ (channel_layout_ < CHANNEL_LAYOUT_MAX) &&
+ (input_channels_ >= 0) &&
+ (input_channels_ <= media::limits::kMaxChannels) &&
+ (sample_rate_ >= media::limits::kMinSampleRate) &&
+ (sample_rate_ <= media::limits::kMaxSampleRate) &&
+ (bits_per_sample_ > 0) &&
+ (bits_per_sample_ <= media::limits::kMaxBitsPerSample) &&
+ (frames_per_buffer_ > 0) &&
+ (frames_per_buffer_ <= media::limits::kMaxSamplesPerPacket);
+}
+
+int AudioParameters::GetBytesPerBuffer() const {
+ return frames_per_buffer_ * GetBytesPerFrame();
+}
+
+int AudioParameters::GetBytesPerSecond() const {
+ return sample_rate_ * GetBytesPerFrame();
+}
+
+int AudioParameters::GetBytesPerFrame() const {
+ return channels_ * bits_per_sample_ / 8;
+}
+
+void AudioParameters::SetDiscreteChannels(int channels) {
+ channel_layout_ = CHANNEL_LAYOUT_DISCRETE;
+ channels_ = channels;
+}
+
+} // namespace media
diff --git a/chromium/media/audio/audio_parameters.h b/chromium/media/audio/audio_parameters.h
new file mode 100644
index 00000000000..2817cd2c5a6
--- /dev/null
+++ b/chromium/media/audio/audio_parameters.h
@@ -0,0 +1,124 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_AUDIO_PARAMETERS_H_
+#define MEDIA_AUDIO_AUDIO_PARAMETERS_H_
+
+#include "base/basictypes.h"
+#include "media/base/channel_layout.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+struct MEDIA_EXPORT AudioInputBufferParameters {
+ double volume;
+ uint32 size;
+};
+
+// Use a struct-in-struct approach to ensure that we can calculate the required
+// size as sizeof(AudioInputBufferParameters) + #(bytes in audio buffer) without
+// using packing.
+struct MEDIA_EXPORT AudioInputBuffer {
+ AudioInputBufferParameters params;
+ int8 audio[1];
+};
+
+class MEDIA_EXPORT AudioParameters {
+ public:
+ // TODO(miu): Rename this enum to something that correctly reflects its
+ // semantics, such as "TransportScheme."
+ enum Format {
+ AUDIO_PCM_LINEAR = 0, // PCM is 'raw' amplitude samples.
+ AUDIO_PCM_LOW_LATENCY, // Linear PCM, low latency requested.
+ AUDIO_FAKE, // Creates a fake AudioOutputStream object.
+ AUDIO_LAST_FORMAT // Only used for validation of format.
+ };
+
+ enum {
+ // Telephone quality sample rate, mostly for speech-only audio.
+ kTelephoneSampleRate = 8000,
+ // CD sampling rate is 44.1 KHz or conveniently 2x2x3x3x5x5x7x7.
+ kAudioCDSampleRate = 44100,
+ };
+
+ AudioParameters();
+ AudioParameters(Format format, ChannelLayout channel_layout,
+ int sample_rate, int bits_per_sample,
+ int frames_per_buffer);
+ AudioParameters(Format format, ChannelLayout channel_layout,
+ int input_channels,
+ int sample_rate, int bits_per_sample,
+ int frames_per_buffer);
+ void Reset(Format format, ChannelLayout channel_layout,
+ int channels, int input_channels,
+ int sample_rate, int bits_per_sample,
+ int frames_per_buffer);
+
+ // Checks that all values are in the expected range. All limits are specified
+ // in media::Limits.
+ bool IsValid() const;
+
+ // Returns size of audio buffer in bytes.
+ int GetBytesPerBuffer() const;
+
+ // Returns the number of bytes representing one second of audio.
+ int GetBytesPerSecond() const;
+
+ // Returns the number of bytes representing a frame of audio.
+ int GetBytesPerFrame() const;
+
+ Format format() const { return format_; }
+ ChannelLayout channel_layout() const { return channel_layout_; }
+ int sample_rate() const { return sample_rate_; }
+ int bits_per_sample() const { return bits_per_sample_; }
+ int frames_per_buffer() const { return frames_per_buffer_; }
+ int channels() const { return channels_; }
+ int input_channels() const { return input_channels_; }
+
+ // Set to CHANNEL_LAYOUT_DISCRETE with given number of channels.
+ void SetDiscreteChannels(int channels);
+
+ // Comparison with other AudioParams.
+ bool operator==(const AudioParameters& other) const {
+ return format_ == other.format() &&
+ sample_rate_ == other.sample_rate() &&
+ channel_layout_ == other.channel_layout() &&
+ channels_ == other.channels() &&
+ input_channels_ == other.input_channels() &&
+ bits_per_sample_ == other.bits_per_sample() &&
+ frames_per_buffer_ == other.frames_per_buffer();
+ }
+
+ private:
+ Format format_; // Format of the stream.
+ ChannelLayout channel_layout_; // Order of surround sound channels.
+ int sample_rate_; // Sampling frequency/rate.
+ int bits_per_sample_; // Number of bits per sample.
+ int frames_per_buffer_; // Number of frames in a buffer.
+
+ int channels_; // Number of channels. Value set based on
+ // |channel_layout|.
+ int input_channels_; // Optional number of input channels.
+ // Normally 0, but can be set to specify
+ // synchronized I/O.
+};
+
+// Comparison is useful when AudioParameters is used with std structures.
+inline bool operator<(const AudioParameters& a, const AudioParameters& b) {
+ if (a.format() != b.format())
+ return a.format() < b.format();
+ if (a.channels() != b.channels())
+ return a.channels() < b.channels();
+ if (a.input_channels() != b.input_channels())
+ return a.input_channels() < b.input_channels();
+ if (a.sample_rate() != b.sample_rate())
+ return a.sample_rate() < b.sample_rate();
+ if (a.bits_per_sample() != b.bits_per_sample())
+ return a.bits_per_sample() < b.bits_per_sample();
+ return a.frames_per_buffer() < b.frames_per_buffer();
+}
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_AUDIO_PARAMETERS_H_
diff --git a/chromium/media/audio/audio_parameters_unittest.cc b/chromium/media/audio/audio_parameters_unittest.cc
new file mode 100644
index 00000000000..f0d37129eb9
--- /dev/null
+++ b/chromium/media/audio/audio_parameters_unittest.cc
@@ -0,0 +1,168 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/basictypes.h"
+#include "base/strings/string_number_conversions.h"
+#include "media/audio/audio_parameters.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+TEST(AudioParameters, Constructor_Default) {
+ AudioParameters::Format expected_format = AudioParameters::AUDIO_PCM_LINEAR;
+ int expected_bits = 0;
+ int expected_channels = 0;
+ ChannelLayout expected_channel_layout = CHANNEL_LAYOUT_NONE;
+ int expected_rate = 0;
+ int expected_samples = 0;
+
+ AudioParameters params;
+
+ EXPECT_EQ(expected_format, params.format());
+ EXPECT_EQ(expected_bits, params.bits_per_sample());
+ EXPECT_EQ(expected_channels, params.channels());
+ EXPECT_EQ(expected_channel_layout, params.channel_layout());
+ EXPECT_EQ(expected_rate, params.sample_rate());
+ EXPECT_EQ(expected_samples, params.frames_per_buffer());
+}
+
+TEST(AudioParameters, Constructor_ParameterValues) {
+ AudioParameters::Format expected_format =
+ AudioParameters::AUDIO_PCM_LOW_LATENCY;
+ int expected_bits = 16;
+ int expected_channels = 6;
+ ChannelLayout expected_channel_layout = CHANNEL_LAYOUT_5_1;
+ int expected_rate = 44100;
+ int expected_samples = 880;
+
+ AudioParameters params(expected_format, expected_channel_layout,
+ expected_rate, expected_bits, expected_samples);
+
+ EXPECT_EQ(expected_format, params.format());
+ EXPECT_EQ(expected_bits, params.bits_per_sample());
+ EXPECT_EQ(expected_channels, params.channels());
+ EXPECT_EQ(expected_channel_layout, params.channel_layout());
+ EXPECT_EQ(expected_rate, params.sample_rate());
+ EXPECT_EQ(expected_samples, params.frames_per_buffer());
+}
+
+TEST(AudioParameters, GetBytesPerBuffer) {
+ EXPECT_EQ(100, AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_MONO, 1000, 8, 100)
+ .GetBytesPerBuffer());
+ EXPECT_EQ(200, AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_MONO, 1000, 16, 100)
+ .GetBytesPerBuffer());
+ EXPECT_EQ(200, AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_STEREO, 1000, 8, 100)
+ .GetBytesPerBuffer());
+ EXPECT_EQ(200, AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_MONO, 1000, 8, 200)
+ .GetBytesPerBuffer());
+ EXPECT_EQ(800, AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_STEREO, 1000, 16, 200)
+ .GetBytesPerBuffer());
+}
+
+TEST(AudioParameters, GetBytesPerSecond) {
+ EXPECT_EQ(0, AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_NONE, 0, 0, 0)
+ .GetBytesPerSecond());
+ EXPECT_EQ(0, AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_STEREO, 0, 0, 0)
+ .GetBytesPerSecond());
+ EXPECT_EQ(0, AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_NONE, 100, 0, 0)
+ .GetBytesPerSecond());
+ EXPECT_EQ(0, AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_NONE, 0, 8, 0)
+ .GetBytesPerSecond());
+ EXPECT_EQ(200, AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_STEREO, 100, 8, 0)
+ .GetBytesPerSecond());
+}
+
+TEST(AudioParameters, Compare) {
+ AudioParameters values[] = {
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
+ 1000, 8, 100),
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
+ 1000, 8, 200),
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
+ 1000, 16, 100),
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
+ 1000, 16, 200),
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
+ 2000, 8, 100),
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
+ 2000, 8, 200),
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
+ 2000, 16, 100),
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
+ 2000, 16, 200),
+
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
+ 1000, 8, 100),
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
+ 1000, 8, 200),
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
+ 1000, 16, 100),
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
+ 1000, 16, 200),
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
+ 2000, 8, 100),
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
+ 2000, 8, 200),
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
+ 2000, 16, 100),
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
+ 2000, 16, 200),
+
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_MONO,
+ 1000, 8, 100),
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_MONO,
+ 1000, 8, 200),
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_MONO,
+ 1000, 16, 100),
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_MONO,
+ 1000, 16, 200),
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_MONO,
+ 2000, 8, 100),
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_MONO,
+ 2000, 8, 200),
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_MONO,
+ 2000, 16, 100),
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_MONO,
+ 2000, 16, 200),
+
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ CHANNEL_LAYOUT_STEREO, 1000, 8, 100),
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ CHANNEL_LAYOUT_STEREO, 1000, 8, 200),
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ CHANNEL_LAYOUT_STEREO, 1000, 16, 100),
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ CHANNEL_LAYOUT_STEREO, 1000, 16, 200),
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ CHANNEL_LAYOUT_STEREO, 2000, 8, 100),
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ CHANNEL_LAYOUT_STEREO, 2000, 8, 200),
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ CHANNEL_LAYOUT_STEREO, 2000, 16, 100),
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ CHANNEL_LAYOUT_STEREO, 2000, 16, 200),
+ };
+
+ for (size_t i = 0; i < arraysize(values); ++i) {
+ for (size_t j = 0; j < arraysize(values); ++j) {
+ SCOPED_TRACE("i=" + base::IntToString(i) + " j=" + base::IntToString(j));
+ EXPECT_EQ(i < j, values[i] < values[j]);
+ }
+
+ // Verify that a value is never less than itself.
+ EXPECT_FALSE(values[i] < values[i]);
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/audio/audio_power_monitor.cc b/chromium/media/audio/audio_power_monitor.cc
new file mode 100644
index 00000000000..d8b9436060e
--- /dev/null
+++ b/chromium/media/audio/audio_power_monitor.cc
@@ -0,0 +1,94 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/audio_power_monitor.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include "base/float_util.h"
+#include "base/logging.h"
+#include "base/time/time.h"
+#include "media/base/audio_bus.h"
+
+namespace media {
+
+AudioPowerMonitor::AudioPowerMonitor(
+ int sample_rate, const base::TimeDelta& time_constant)
+ : sample_weight_(
+ 1.0f - expf(-1.0f / (sample_rate * time_constant.InSecondsF()))) {
+ Reset();
+}
+
+AudioPowerMonitor::~AudioPowerMonitor() {
+}
+
+void AudioPowerMonitor::Reset() {
+ power_reading_ = average_power_ = 0.0f;
+ clipped_reading_ = has_clipped_ = false;
+}
+
+void AudioPowerMonitor::Scan(const AudioBus& buffer, int num_frames) {
+ DCHECK_LE(num_frames, buffer.frames());
+ const int num_channels = buffer.channels();
+ if (num_frames <= 0 || num_channels <= 0)
+ return;
+
+ // Calculate a new average power by applying a first-order low-pass filter
+ // over the audio samples in |buffer|.
+ //
+ // TODO(miu): Implement optimized SSE/NEON to more efficiently compute the
+ // results (in media/base/vector_math) in soon-upcoming change.
+ float sum_power = 0.0f;
+ for (int i = 0; i < num_channels; ++i) {
+ float average_power_this_channel = average_power_;
+ bool clipped = false;
+ const float* p = buffer.channel(i);
+ const float* const end_of_samples = p + num_frames;
+ for (; p < end_of_samples; ++p) {
+ const float sample = *p;
+ const float sample_squared = sample * sample;
+ clipped |= (sample_squared > 1.0f);
+ average_power_this_channel +=
+ (sample_squared - average_power_this_channel) * sample_weight_;
+ }
+ // If data in audio buffer is garbage, ignore its effect on the result.
+ if (base::IsNaN(average_power_this_channel)) {
+ average_power_this_channel = average_power_;
+ clipped = false;
+ }
+ sum_power += average_power_this_channel;
+ has_clipped_ |= clipped;
+ }
+
+ // Update accumulated results, with clamping for sanity.
+ average_power_ = std::max(0.0f, std::min(1.0f, sum_power / num_channels));
+
+ // Push results for reading by other threads, non-blocking.
+ if (reading_lock_.Try()) {
+ power_reading_ = average_power_;
+ if (has_clipped_) {
+ clipped_reading_ = true;
+ has_clipped_ = false;
+ }
+ reading_lock_.Release();
+ }
+}
+
+std::pair<float, bool> AudioPowerMonitor::ReadCurrentPowerAndClip() {
+ base::AutoLock for_reading(reading_lock_);
+
+ // Convert power level to dBFS units, and pin it down to zero if it is
+ // insignificantly small.
+ const float kInsignificantPower = 1.0e-10f; // -100 dBFS
+ const float power_dbfs = power_reading_ < kInsignificantPower ? zero_power() :
+ 10.0f * log10f(power_reading_);
+
+ const bool clipped = clipped_reading_;
+ clipped_reading_ = false;
+
+ return std::make_pair(power_dbfs, clipped);
+}
+
+} // namespace media
diff --git a/chromium/media/audio/audio_power_monitor.h b/chromium/media/audio/audio_power_monitor.h
new file mode 100644
index 00000000000..f840bbf6b9f
--- /dev/null
+++ b/chromium/media/audio/audio_power_monitor.h
@@ -0,0 +1,87 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_AUDIO_POWER_MONITOR_H_
+#define MEDIA_AUDIO_AUDIO_POWER_MONITOR_H_
+
+#include <limits>
+#include <utility>
+
+#include "base/callback.h"
+#include "base/synchronization/lock.h"
+#include "media/base/media_export.h"
+
+// An audio signal power monitor. It is periodically provided an AudioBus by
+// the native audio thread, and the audio samples in each channel are analyzed
+// to determine the average power of the signal over a time period. Here
+// "average power" is a running average calculated by using a first-order
+// low-pass filter over the square of the samples scanned. Whenever reporting
+// the power level, this running average is converted to dBFS (decibels relative
+// to full-scale) units.
+//
+// Note that extreme care has been taken to make the AudioPowerMonitor::Scan()
+// method safe to be called on the native audio thread. The code acquires no
+// locks, nor engages in any operation that could result in an
+// undetermined/unbounded amount of run-time.
+
+namespace base {
+class TimeDelta;
+}
+
+namespace media {
+
+class AudioBus;
+
+class MEDIA_EXPORT AudioPowerMonitor {
+ public:
+ // |sample_rate| is the audio signal sample rate (Hz). |time_constant|
+ // characterizes how samples are averaged over time to determine the power
+ // level; and is the amount of time it takes a zero power level to increase to
+ // ~63.2% of maximum given a step input signal.
+ AudioPowerMonitor(int sample_rate, const base::TimeDelta& time_constant);
+
+ ~AudioPowerMonitor();
+
+ // Reset power monitor to initial state (zero power level). This should not
+ // be called while another thread is scanning.
+ void Reset();
+
+ // Scan more |frames| of audio data from |buffer|. It is safe to call this
+ // from a real-time priority thread.
+ void Scan(const AudioBus& buffer, int frames);
+
+ // Returns the current power level in dBFS and clip status. Clip status is
+ // true whenever any *one* sample scanned exceeded maximum amplitude since
+ // this method's last invocation. It is safe to call this method from any
+ // thread.
+ std::pair<float, bool> ReadCurrentPowerAndClip();
+
+ // dBFS value corresponding to zero power in the audio signal.
+ static float zero_power() { return -std::numeric_limits<float>::infinity(); }
+
+ // dBFS value corresponding to maximum power in the audio signal.
+ static float max_power() { return 0.0f; }
+
+ private:
+ // The weight applied when averaging-in each sample. Computed from the
+ // |sample_rate| and |time_constant|.
+ const float sample_weight_;
+
+ // Accumulated results over one or more calls to Scan(). These should only be
+ // touched by the thread invoking Scan().
+ float average_power_;
+ bool has_clipped_;
+
+ // Copies of power and clip status, used to deliver results synchronously
+ // across threads.
+ base::Lock reading_lock_;
+ float power_reading_;
+ bool clipped_reading_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioPowerMonitor);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_AUDIO_POWER_MONITOR_H_
diff --git a/chromium/media/audio/audio_power_monitor_unittest.cc b/chromium/media/audio/audio_power_monitor_unittest.cc
new file mode 100644
index 00000000000..1289de0ab47
--- /dev/null
+++ b/chromium/media/audio/audio_power_monitor_unittest.cc
@@ -0,0 +1,304 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/audio_power_monitor.h"
+
+#include <limits>
+
+#include "base/time/time.h"
+#include "media/base/audio_bus.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+static const int kSampleRate = 48000;
+static const int kFramesPerBuffer = 128;
+
+static const int kTimeConstantMillis = 5;
+
+namespace {
+
+// Container for each parameterized test's data (input and expected results).
+class TestScenario {
+ public:
+ TestScenario(const float* data, int num_channels, int num_frames,
+ float expected_power, bool expected_clipped)
+ : expected_power_(expected_power), expected_clipped_(expected_clipped) {
+ CreatePopulatedBuffer(data, num_channels, num_frames);
+ }
+
+ // Copy constructor and assignment operator for ::testing::Values(...).
+ TestScenario(const TestScenario& other) { *this = other; }
+ TestScenario& operator=(const TestScenario& other) {
+ this->expected_power_ = other.expected_power_;
+ this->expected_clipped_ = other.expected_clipped_;
+ this->bus_ = AudioBus::Create(other.bus_->channels(), other.bus_->frames());
+ other.bus_->CopyTo(this->bus_.get());
+ return *this;
+ }
+
+ // Returns this TestScenario, but with a bad sample value placed in the middle
+ // of channel 0.
+ TestScenario WithABadSample(float bad_value) const {
+ TestScenario result(*this);
+ result.bus_->channel(0)[result.bus_->frames() / 2] = bad_value;
+ return result;
+ }
+
+ const AudioBus& data() const {
+ return *bus_;
+ }
+
+ float expected_power() const {
+ return expected_power_;
+ }
+
+ bool expected_clipped() const {
+ return expected_clipped_;
+ }
+
+ private:
+ // Creates an AudioBus, sized and populated with kFramesPerBuffer frames of
+ // data. The given test |data| is repeated to fill the buffer.
+ void CreatePopulatedBuffer(
+ const float* data, int num_channels, int num_frames) {
+ bus_ = AudioBus::Create(num_channels, kFramesPerBuffer);
+ for (int ch = 0; ch < num_channels; ++ch) {
+ for (int frames = 0; frames < kFramesPerBuffer; frames += num_frames) {
+ const int num_to_copy = std::min(num_frames, kFramesPerBuffer - frames);
+ memcpy(bus_->channel(ch) + frames, data + num_frames * ch,
+ sizeof(float) * num_to_copy);
+ }
+ }
+ }
+
+ float expected_power_;
+ bool expected_clipped_;
+ scoped_ptr<AudioBus> bus_;
+};
+
+// Value printer for TestScenario. Required to prevent Valgrind "access to
+// uninitialized memory" errors (http://crbug.com/263315).
+::std::ostream& operator<<(::std::ostream& os, const TestScenario& ts) {
+ return os << "{" << ts.data().channels() << "-channel signal} --> {"
+ << ts.expected_power() << " dBFS, "
+ << (ts.expected_clipped() ? "clipped" : "not clipped")
+ << "}";
+}
+
+// An observer that receives power measurements. Each power measurement should
+// should make progress towards the goal value.
+class MeasurementObserver {
+ public:
+ MeasurementObserver(float goal_power_measurement, bool goal_clipped)
+ : goal_power_measurement_(goal_power_measurement),
+ goal_clipped_(goal_clipped), measurement_count_(0),
+ last_power_measurement_(AudioPowerMonitor::zero_power()),
+ last_clipped_(false) {}
+
+ int measurement_count() const {
+ return measurement_count_;
+ }
+
+ float last_power_measurement() const {
+ return last_power_measurement_;
+ }
+
+ bool last_clipped() const {
+ return last_clipped_;
+ }
+
+ void OnPowerMeasured(float cur_power_measurement, bool clipped) {
+ if (measurement_count_ == 0) {
+ measurements_should_increase_ =
+ (cur_power_measurement < goal_power_measurement_);
+ } else {
+ SCOPED_TRACE(::testing::Message()
+ << "Power: goal=" << goal_power_measurement_
+ << "; last=" << last_power_measurement_
+ << "; cur=" << cur_power_measurement);
+
+ if (last_power_measurement_ != goal_power_measurement_) {
+ if (measurements_should_increase_) {
+ EXPECT_LE(last_power_measurement_, cur_power_measurement)
+ << "Measurements should be monotonically increasing.";
+ } else {
+ EXPECT_GE(last_power_measurement_, cur_power_measurement)
+ << "Measurements should be monotonically decreasing.";
+ }
+ } else {
+ EXPECT_EQ(last_power_measurement_, cur_power_measurement)
+ << "Measurements are numerically unstable at goal value.";
+ }
+ }
+
+ last_power_measurement_ = cur_power_measurement;
+ last_clipped_ = clipped;
+ ++measurement_count_;
+ }
+
+ private:
+ const float goal_power_measurement_;
+ const bool goal_clipped_;
+ int measurement_count_;
+ bool measurements_should_increase_;
+ float last_power_measurement_;
+ bool last_clipped_;
+
+ DISALLOW_COPY_AND_ASSIGN(MeasurementObserver);
+};
+
+} // namespace
+
+class AudioPowerMonitorTest : public ::testing::TestWithParam<TestScenario> {
+ public:
+ AudioPowerMonitorTest()
+ : power_monitor_(kSampleRate,
+ base::TimeDelta::FromMilliseconds(kTimeConstantMillis)) {
+ }
+
+ void FeedAndCheckExpectedPowerIsMeasured(
+ const AudioBus& bus, float power, bool clipped) {
+ // Feed the AudioPowerMonitor, read measurements from it, and record them in
+ // MeasurementObserver.
+ static const int kNumFeedIters = 100;
+ MeasurementObserver observer(power, clipped);
+ for (int i = 0; i < kNumFeedIters; ++i) {
+ power_monitor_.Scan(bus, bus.frames());
+ const std::pair<float, bool>& reading =
+ power_monitor_.ReadCurrentPowerAndClip();
+ observer.OnPowerMeasured(reading.first, reading.second);
+ }
+
+ // Check that the results recorded by the observer are the same whole-number
+ // dBFS.
+ EXPECT_EQ(static_cast<int>(power),
+ static_cast<int>(observer.last_power_measurement()));
+ EXPECT_EQ(clipped, observer.last_clipped());
+ }
+
+ private:
+ AudioPowerMonitor power_monitor_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioPowerMonitorTest);
+};
+
+TEST_P(AudioPowerMonitorTest, MeasuresPowerOfSignal) {
+ const TestScenario& scenario = GetParam();
+
+ scoped_ptr<AudioBus> zeroed_bus =
+ AudioBus::Create(scenario.data().channels(), scenario.data().frames());
+ zeroed_bus->Zero();
+
+ // Send a "zero power" audio signal, then this scenario's audio signal, then
+ // the "zero power" audio signal again; testing that the power monitor
+ // measurements match expected values.
+ FeedAndCheckExpectedPowerIsMeasured(
+ *zeroed_bus, AudioPowerMonitor::zero_power(), false);
+ FeedAndCheckExpectedPowerIsMeasured(
+ scenario.data(), scenario.expected_power(), scenario.expected_clipped());
+ FeedAndCheckExpectedPowerIsMeasured(
+ *zeroed_bus, AudioPowerMonitor::zero_power(), false);
+}
+
+static const float kMonoSilentNoise[] = {
+ 0.01f, -0.01f
+};
+
+static const float kMonoMaxAmplitude[] = {
+ 1.0f
+};
+
+static const float kMonoMaxAmplitude2[] = {
+ -1.0f, 1.0f
+};
+
+static const float kMonoHalfMaxAmplitude[] = {
+ 0.5f, -0.5f, 0.5f, -0.5f
+};
+
+static const float kMonoAmplitudeClipped[] = {
+ 2.0f, -2.0f
+};
+
+static const float kMonoMaxAmplitudeWithClip[] = {
+ 2.0f, 0.0, 0.0f, 0.0f
+};
+
+static const float kMonoMaxAmplitudeWithClip2[] = {
+ 4.0f, 0.0, 0.0f, 0.0f
+};
+
+static const float kStereoSilentNoise[] = {
+ // left channel
+ 0.005f, -0.005f,
+ // right channel
+ 0.005f, -0.005f
+};
+
+static const float kStereoMaxAmplitude[] = {
+ // left channel
+ 1.0f, -1.0f,
+ // right channel
+ -1.0f, 1.0f
+};
+
+static const float kRightChannelMaxAmplitude[] = {
+ // left channel
+ 0.0f, 0.0f, 0.0f, 0.0f,
+ // right channel
+ -1.0f, 1.0f, -1.0f, 1.0f
+};
+
+static const float kLeftChannelHalfMaxAmplitude[] = {
+ // left channel
+ 0.5f, -0.5f, 0.5f, -0.5f,
+ // right channel
+ 0.0f, 0.0f, 0.0f, 0.0f,
+};
+
+static const float kStereoMixed[] = {
+ // left channel
+ 0.5f, -0.5f, 0.5f, -0.5f,
+ // right channel
+ -1.0f, 1.0f, -1.0f, 1.0f
+};
+
+static const float kStereoMixed2[] = {
+ // left channel
+ 1.0f, -1.0f, 0.75f, -0.75f, 0.5f, -0.5f, 0.25f, -0.25f,
+ // right channel
+ 0.25f, -0.25f, 0.5f, -0.5f, 0.75f, -0.75f, 1.0f, -1.0f
+};
+
+INSTANTIATE_TEST_CASE_P(
+ Scenarios, AudioPowerMonitorTest,
+ ::testing::Values(
+ TestScenario(kMonoSilentNoise, 1, 2, -40, false),
+ TestScenario(kMonoMaxAmplitude, 1, 1,
+ AudioPowerMonitor::max_power(), false),
+ TestScenario(kMonoMaxAmplitude2, 1, 2,
+ AudioPowerMonitor::max_power(), false),
+ TestScenario(kMonoHalfMaxAmplitude, 1, 4, -6, false),
+ TestScenario(kMonoAmplitudeClipped, 1, 2,
+ AudioPowerMonitor::max_power(), true),
+ TestScenario(kMonoMaxAmplitudeWithClip, 1, 4,
+ AudioPowerMonitor::max_power(), true),
+ TestScenario(kMonoMaxAmplitudeWithClip2, 1, 4,
+ AudioPowerMonitor::max_power(), true),
+ TestScenario(kMonoSilentNoise, 1, 2,
+ AudioPowerMonitor::zero_power(), false).
+ WithABadSample(std::numeric_limits<float>::infinity()),
+ TestScenario(kMonoHalfMaxAmplitude, 1, 4,
+ AudioPowerMonitor::zero_power(), false).
+ WithABadSample(std::numeric_limits<float>::quiet_NaN()),
+ TestScenario(kStereoSilentNoise, 2, 2, -46, false),
+ TestScenario(kStereoMaxAmplitude, 2, 2,
+ AudioPowerMonitor::max_power(), false),
+ TestScenario(kRightChannelMaxAmplitude, 2, 4, -3, false),
+ TestScenario(kLeftChannelHalfMaxAmplitude, 2, 4, -9, false),
+ TestScenario(kStereoMixed, 2, 4, -2, false),
+ TestScenario(kStereoMixed2, 2, 8, -3, false)));
+
+} // namespace media
diff --git a/chromium/media/audio/audio_source_diverter.h b/chromium/media/audio/audio_source_diverter.h
new file mode 100644
index 00000000000..787ddec7867
--- /dev/null
+++ b/chromium/media/audio/audio_source_diverter.h
@@ -0,0 +1,40 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_AUDIO_SOURCE_DIVERTER_H_
+#define MEDIA_AUDIO_AUDIO_SOURCE_DIVERTER_H_
+
+#include "media/base/media_export.h"
+
+// Audio sources may optionally implement AudioSourceDiverter to temporarily
+// divert audio data to an alternate AudioOutputStream. This allows the audio
+// data to be plumbed to an alternate consumer; for example, a loopback
+// mechanism for audio mirroring.
+
+namespace media {
+
+class AudioOutputStream;
+class AudioParameters;
+
+class MEDIA_EXPORT AudioSourceDiverter {
+public:
+ // Returns the audio parameters of the divertable audio data.
+ virtual const AudioParameters& GetAudioParameters() = 0;
+
+ // Start providing audio data to the given |to_stream|, which is in an
+ // unopened state. |to_stream| remains under the control of the
+ // AudioSourceDiverter.
+ virtual void StartDiverting(AudioOutputStream* to_stream) = 0;
+
+ // Stops diverting audio data to the stream. The AudioSourceDiverter is
+ // responsible for making sure the stream is closed, perhaps asynchronously.
+ virtual void StopDiverting() = 0;
+
+protected:
+ virtual ~AudioSourceDiverter() {}
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_AUDIO_SOURCE_DIVERTER_H_
diff --git a/chromium/media/audio/audio_util.cc b/chromium/media/audio/audio_util.cc
new file mode 100644
index 00000000000..42c6c9109fd
--- /dev/null
+++ b/chromium/media/audio/audio_util.cc
@@ -0,0 +1,99 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Software adjust volume of samples, allows each audio stream its own
+// volume without impacting master volume for chrome and other applications.
+
+// Implemented as templates to allow 8, 16 and 32 bit implementations.
+// 8 bit is unsigned and biased by 128.
+
+// TODO(vrk): This file has been running pretty wild and free, and it's likely
+// that a lot of the functions can be simplified and made more elegant. Revisit
+// after other audio cleanup is done. (crbug.com/120319)
+
+#include "media/audio/audio_util.h"
+
+#include "base/command_line.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/time/time.h"
+#include "media/base/media_switches.h"
+
+#if defined(OS_WIN)
+#include "base/win/windows_version.h"
+#endif
+
+namespace media {
+
+// Returns user buffer size as specified on the command line or 0 if no buffer
+// size has been specified.
+int GetUserBufferSize() {
+ const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
+ int buffer_size = 0;
+ std::string buffer_size_str(cmd_line->GetSwitchValueASCII(
+ switches::kAudioBufferSize));
+ if (base::StringToInt(buffer_size_str, &buffer_size) && buffer_size > 0)
+ return buffer_size;
+
+ return 0;
+}
+
+// Computes a buffer size based on the given |sample_rate|. Must be used in
+// conjunction with AUDIO_PCM_LINEAR.
+size_t GetHighLatencyOutputBufferSize(int sample_rate) {
+ int user_buffer_size = GetUserBufferSize();
+ if (user_buffer_size)
+ return user_buffer_size;
+
+ // TODO(vrk/crogers): The buffer sizes that this function computes is probably
+ // overly conservative. However, reducing the buffer size to 2048-8192 bytes
+ // caused crbug.com/108396. This computation should be revisited while making
+ // sure crbug.com/108396 doesn't happen again.
+
+ // The minimum number of samples in a hardware packet.
+ // This value is selected so that we can handle down to 5khz sample rate.
+ static const size_t kMinSamplesPerHardwarePacket = 1024;
+
+ // The maximum number of samples in a hardware packet.
+ // This value is selected so that we can handle up to 192khz sample rate.
+ static const size_t kMaxSamplesPerHardwarePacket = 64 * 1024;
+
+ // This constant governs the hardware audio buffer size, this value should be
+ // chosen carefully.
+ // This value is selected so that we have 8192 samples for 48khz streams.
+ static const size_t kMillisecondsPerHardwarePacket = 170;
+
+ // Select the number of samples that can provide at least
+ // |kMillisecondsPerHardwarePacket| worth of audio data.
+ size_t samples = kMinSamplesPerHardwarePacket;
+ while (samples <= kMaxSamplesPerHardwarePacket &&
+ samples * base::Time::kMillisecondsPerSecond <
+ sample_rate * kMillisecondsPerHardwarePacket) {
+ samples *= 2;
+ }
+ return samples;
+}
+
+#if defined(OS_WIN)
+
+int NumberOfWaveOutBuffers() {
+ // Use the user provided buffer count if provided.
+ int buffers = 0;
+ std::string buffers_str(CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
+ switches::kWaveOutBuffers));
+ if (base::StringToInt(buffers_str, &buffers) && buffers > 0) {
+ return buffers;
+ }
+
+ // Use 4 buffers for Vista, 3 for everyone else:
+ // - The entire Windows audio stack was rewritten for Windows Vista and wave
+ // out performance was degraded compared to XP.
+ // - The regression was fixed in Windows 7 and most configurations will work
+ // with 2, but some (e.g., some Sound Blasters) still need 3.
+ // - Some XP configurations (even multi-processor ones) also need 3.
+ return (base::win::GetVersion() == base::win::VERSION_VISTA) ? 4 : 3;
+}
+
+#endif
+
+} // namespace media
diff --git a/chromium/media/audio/audio_util.h b/chromium/media/audio/audio_util.h
new file mode 100644
index 00000000000..a11c327aa47
--- /dev/null
+++ b/chromium/media/audio/audio_util.h
@@ -0,0 +1,31 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_AUDIO_UTIL_H_
+#define MEDIA_AUDIO_AUDIO_UTIL_H_
+
+#include "base/basictypes.h"
+#include "build/build_config.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// Returns user buffer size as specified on the command line or 0 if no buffer
+// size has been specified.
+MEDIA_EXPORT int GetUserBufferSize();
+
+// Computes a buffer size based on the given |sample_rate|. Must be used in
+// conjunction with AUDIO_PCM_LINEAR.
+MEDIA_EXPORT size_t GetHighLatencyOutputBufferSize(int sample_rate);
+
+#if defined(OS_WIN)
+
+// Returns number of buffers to be used by wave out.
+MEDIA_EXPORT int NumberOfWaveOutBuffers();
+
+#endif // defined(OS_WIN)
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_AUDIO_UTIL_H_
diff --git a/chromium/media/audio/clockless_audio_sink.cc b/chromium/media/audio/clockless_audio_sink.cc
new file mode 100644
index 00000000000..ff809d0541d
--- /dev/null
+++ b/chromium/media/audio/clockless_audio_sink.cc
@@ -0,0 +1,107 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/clockless_audio_sink.h"
+
+#include "base/threading/simple_thread.h"
+#include "base/time/time.h"
+#include "media/base/audio_renderer_sink.h"
+
+namespace media {
+
+// Internal to ClocklessAudioSink. Class is used to call Render() on a seperate
+// thread, running as fast as it can read the data.
+class ClocklessAudioSinkThread : public base::DelegateSimpleThread::Delegate {
+ public:
+ explicit ClocklessAudioSinkThread(const AudioParameters& params,
+ AudioRendererSink::RenderCallback* callback)
+ : callback_(callback),
+ audio_bus_(AudioBus::Create(params)),
+ stop_event_(new base::WaitableEvent(false, false)) {}
+
+ void Start() {
+ stop_event_->Reset();
+ thread_.reset(new base::DelegateSimpleThread(this, "ClocklessAudioSink"));
+ thread_->Start();
+ }
+
+ // Generate a signal to stop calling Render().
+ base::TimeDelta Stop() {
+ stop_event_->Signal();
+ thread_->Join();
+ return playback_time_;
+ }
+
+ private:
+ // Call Render() repeatedly, keeping track of the rendering time.
+ virtual void Run() OVERRIDE {
+ base::TimeTicks start;
+ while (!stop_event_->IsSignaled()) {
+ int frames_received = callback_->Render(audio_bus_.get(), 0);
+ if (frames_received <= 0) {
+ // No data received, so let other threads run to provide data.
+ base::PlatformThread::YieldCurrentThread();
+ } else if (start.is_null()) {
+ // First time we processed some audio, so record the starting time.
+ start = base::TimeTicks::HighResNow();
+ } else {
+ // Keep track of the last time data was rendered.
+ playback_time_ = base::TimeTicks::HighResNow() - start;
+ }
+ }
+ }
+
+ AudioRendererSink::RenderCallback* callback_;
+ scoped_ptr<AudioBus> audio_bus_;
+ scoped_ptr<base::WaitableEvent> stop_event_;
+ scoped_ptr<base::DelegateSimpleThread> thread_;
+ base::TimeDelta playback_time_;
+};
+
+ClocklessAudioSink::ClocklessAudioSink()
+ : initialized_(false),
+ playing_(false) {}
+
+ClocklessAudioSink::~ClocklessAudioSink() {}
+
+void ClocklessAudioSink::Initialize(const AudioParameters& params,
+ RenderCallback* callback) {
+ DCHECK(!initialized_);
+ thread_.reset(new ClocklessAudioSinkThread(params, callback));
+ initialized_ = true;
+}
+
+void ClocklessAudioSink::Start() {
+ DCHECK(!playing_);
+}
+
+void ClocklessAudioSink::Stop() {
+ DCHECK(initialized_);
+
+ if (!playing_)
+ return;
+
+ playback_time_ = thread_->Stop();
+}
+
+void ClocklessAudioSink::Play() {
+ DCHECK(initialized_);
+
+ if (playing_)
+ return;
+
+ playing_ = true;
+ thread_->Start();
+}
+
+void ClocklessAudioSink::Pause() {
+ Stop();
+}
+
+bool ClocklessAudioSink::SetVolume(double volume) {
+ // Audio is always muted.
+ return volume == 0.0;
+}
+
+} // namespace media
diff --git a/chromium/media/audio/clockless_audio_sink.h b/chromium/media/audio/clockless_audio_sink.h
new file mode 100644
index 00000000000..9e73b1a8817
--- /dev/null
+++ b/chromium/media/audio/clockless_audio_sink.h
@@ -0,0 +1,55 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_CLOCKLESS_AUDIO_SINK_H_
+#define MEDIA_AUDIO_CLOCKLESS_AUDIO_SINK_H_
+
+#include "base/memory/scoped_ptr.h"
+#include "base/time/time.h"
+#include "media/base/audio_renderer_sink.h"
+
+namespace base {
+class MessageLoopProxy;
+}
+
+namespace media {
+class AudioBus;
+class ClocklessAudioSinkThread;
+
+// Implementation of an AudioRendererSink that consumes the audio as fast as
+// possible. This class does not support multiple Play()/Pause() events.
+class MEDIA_EXPORT ClocklessAudioSink
+ : NON_EXPORTED_BASE(public AudioRendererSink) {
+ public:
+ ClocklessAudioSink();
+
+ // AudioRendererSink implementation.
+ virtual void Initialize(const AudioParameters& params,
+ RenderCallback* callback) OVERRIDE;
+ virtual void Start() OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void Pause() OVERRIDE;
+ virtual void Play() OVERRIDE;
+ virtual bool SetVolume(double volume) OVERRIDE;
+
+ // Returns the time taken to consume all the audio.
+ base::TimeDelta render_time() { return playback_time_; }
+
+ protected:
+ virtual ~ClocklessAudioSink();
+
+ private:
+ scoped_ptr<ClocklessAudioSinkThread> thread_;
+ bool initialized_;
+ bool playing_;
+
+ // Time taken in last set of Render() calls.
+ base::TimeDelta playback_time_;
+
+ DISALLOW_COPY_AND_ASSIGN(ClocklessAudioSink);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_CLOCKLESS_AUDIO_SINK_H_
diff --git a/chromium/media/audio/cras/audio_manager_cras.cc b/chromium/media/audio/cras/audio_manager_cras.cc
new file mode 100644
index 00000000000..165d642922c
--- /dev/null
+++ b/chromium/media/audio/cras/audio_manager_cras.cc
@@ -0,0 +1,133 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/cras/audio_manager_cras.h"
+
+#include "base/command_line.h"
+#include "base/environment.h"
+#include "base/logging.h"
+#include "base/nix/xdg_util.h"
+#include "base/stl_util.h"
+#include "media/audio/audio_util.h"
+#include "media/audio/cras/cras_input.h"
+#include "media/audio/cras/cras_unified.h"
+#include "media/base/channel_layout.h"
+
+namespace media {
+
+// Maximum number of output streams that can be open simultaneously.
+static const int kMaxOutputStreams = 50;
+
+// Default sample rate for input and output streams.
+static const int kDefaultSampleRate = 48000;
+
+const char AudioManagerCras::kLoopbackDeviceId[] = "loopback";
+
+bool AudioManagerCras::HasAudioOutputDevices() {
+ return true;
+}
+
+bool AudioManagerCras::HasAudioInputDevices() {
+ return true;
+}
+
+AudioManagerCras::AudioManagerCras() {
+ SetMaxOutputStreamsAllowed(kMaxOutputStreams);
+}
+
+AudioManagerCras::~AudioManagerCras() {
+ Shutdown();
+}
+
+void AudioManagerCras::ShowAudioInputSettings() {
+ NOTIMPLEMENTED();
+}
+
+void AudioManagerCras::GetAudioInputDeviceNames(
+ media::AudioDeviceNames* device_names) {
+ DCHECK(device_names->empty());
+ GetCrasAudioInputDevices(device_names);
+ return;
+}
+
+AudioParameters AudioManagerCras::GetInputStreamParameters(
+ const std::string& device_id) {
+ static const int kDefaultInputBufferSize = 1024;
+ // TODO(hshi): Fine-tune audio parameters based on |device_id|. The optimal
+ // parameters for the loopback stream may differ from the default.
+ return AudioParameters(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
+ kDefaultSampleRate, 16, kDefaultInputBufferSize);
+}
+
+void AudioManagerCras::GetCrasAudioInputDevices(
+ media::AudioDeviceNames* device_names) {
+ // Cras will route audio from a proper physical device automatically.
+ device_names->push_back(
+ AudioDeviceName(AudioManagerBase::kDefaultDeviceName,
+ AudioManagerBase::kDefaultDeviceId));
+}
+
+AudioOutputStream* AudioManagerCras::MakeLinearOutputStream(
+ const AudioParameters& params) {
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
+ return MakeOutputStream(params);
+}
+
+AudioOutputStream* AudioManagerCras::MakeLowLatencyOutputStream(
+ const AudioParameters& params, const std::string& input_device_id) {
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
+ // TODO(dgreid): Open the correct input device for unified IO.
+ return MakeOutputStream(params);
+}
+
+AudioInputStream* AudioManagerCras::MakeLinearInputStream(
+ const AudioParameters& params, const std::string& device_id) {
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
+ return MakeInputStream(params, device_id);
+}
+
+AudioInputStream* AudioManagerCras::MakeLowLatencyInputStream(
+ const AudioParameters& params, const std::string& device_id) {
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
+ return MakeInputStream(params, device_id);
+}
+
+AudioParameters AudioManagerCras::GetPreferredOutputStreamParameters(
+ const AudioParameters& input_params) {
+ static const int kDefaultOutputBufferSize = 512;
+
+ ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
+ int sample_rate = kDefaultSampleRate;
+ int buffer_size = kDefaultOutputBufferSize;
+ int bits_per_sample = 16;
+ int input_channels = 0;
+ if (input_params.IsValid()) {
+ sample_rate = input_params.sample_rate();
+ bits_per_sample = input_params.bits_per_sample();
+ channel_layout = input_params.channel_layout();
+ input_channels = input_params.input_channels();
+ buffer_size = input_params.frames_per_buffer();
+ }
+
+ int user_buffer_size = GetUserBufferSize();
+ if (user_buffer_size)
+ buffer_size = user_buffer_size;
+
+ return AudioParameters(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout, input_channels,
+ sample_rate, bits_per_sample, buffer_size);
+}
+
+AudioOutputStream* AudioManagerCras::MakeOutputStream(
+ const AudioParameters& params) {
+ return new CrasUnifiedStream(params, this);
+}
+
+AudioInputStream* AudioManagerCras::MakeInputStream(
+ const AudioParameters& params, const std::string& device_id) {
+ return new CrasInputStream(params, this, device_id);
+}
+
+} // namespace media
diff --git a/chromium/media/audio/cras/audio_manager_cras.h b/chromium/media/audio/cras/audio_manager_cras.h
new file mode 100644
index 00000000000..fdc5b02688a
--- /dev/null
+++ b/chromium/media/audio/cras/audio_manager_cras.h
@@ -0,0 +1,66 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_CRAS_AUDIO_MANAGER_CRAS_H_
+#define MEDIA_AUDIO_CRAS_AUDIO_MANAGER_CRAS_H_
+
+#include <string>
+
+#include "base/compiler_specific.h"
+#include "base/memory/ref_counted.h"
+#include "media/audio/audio_manager_base.h"
+
+namespace media {
+
+class MEDIA_EXPORT AudioManagerCras : public AudioManagerBase {
+ public:
+ // Unique ID of the "loopback" input device. This device captures post-mix,
+ // pre-DSP system audio.
+ static const char kLoopbackDeviceId[];
+
+ AudioManagerCras();
+
+ // AudioManager implementation.
+ virtual bool HasAudioOutputDevices() OVERRIDE;
+ virtual bool HasAudioInputDevices() OVERRIDE;
+ virtual void ShowAudioInputSettings() OVERRIDE;
+ virtual void GetAudioInputDeviceNames(media::AudioDeviceNames* device_names)
+ OVERRIDE;
+ virtual AudioParameters GetInputStreamParameters(
+ const std::string& device_id) OVERRIDE;
+
+ // AudioManagerBase implementation.
+ virtual AudioOutputStream* MakeLinearOutputStream(
+ const AudioParameters& params) OVERRIDE;
+ virtual AudioOutputStream* MakeLowLatencyOutputStream(
+ const AudioParameters& params,
+ const std::string& input_device_id) OVERRIDE;
+ virtual AudioInputStream* MakeLinearInputStream(
+ const AudioParameters& params, const std::string& device_id) OVERRIDE;
+ virtual AudioInputStream* MakeLowLatencyInputStream(
+ const AudioParameters& params, const std::string& device_id) OVERRIDE;
+
+ protected:
+ virtual ~AudioManagerCras();
+
+ virtual AudioParameters GetPreferredOutputStreamParameters(
+ const AudioParameters& input_params) OVERRIDE;
+
+ private:
+ // Gets a list of available cras input devices.
+ void GetCrasAudioInputDevices(media::AudioDeviceNames* device_names);
+
+ // Called by MakeLinearOutputStream and MakeLowLatencyOutputStream.
+ AudioOutputStream* MakeOutputStream(const AudioParameters& params);
+
+ // Called by MakeLinearInputStream and MakeLowLatencyInputStream.
+ AudioInputStream* MakeInputStream(const AudioParameters& params,
+ const std::string& device_id);
+
+ DISALLOW_COPY_AND_ASSIGN(AudioManagerCras);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_CRAS_AUDIO_MANAGER_CRAS_H_
diff --git a/chromium/media/audio/cras/cras_input.cc b/chromium/media/audio/cras/cras_input.cc
new file mode 100644
index 00000000000..a82fe283f7a
--- /dev/null
+++ b/chromium/media/audio/cras/cras_input.cc
@@ -0,0 +1,283 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/cras/cras_input.h"
+
+#include <math.h>
+
+#include "base/basictypes.h"
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/time/time.h"
+#include "media/audio/audio_manager.h"
+#include "media/audio/cras/audio_manager_cras.h"
+#include "media/audio/linux/alsa_util.h"
+
+namespace media {
+
+CrasInputStream::CrasInputStream(const AudioParameters& params,
+ AudioManagerCras* manager,
+ const std::string& device_id)
+ : audio_manager_(manager),
+ bytes_per_frame_(0),
+ callback_(NULL),
+ client_(NULL),
+ params_(params),
+ started_(false),
+ stream_id_(0),
+ stream_direction_(device_id == AudioManagerCras::kLoopbackDeviceId
+ ? CRAS_STREAM_POST_MIX_PRE_DSP
+ : CRAS_STREAM_INPUT) {
+ DCHECK(audio_manager_);
+}
+
+CrasInputStream::~CrasInputStream() {
+ DCHECK(!client_);
+}
+
+bool CrasInputStream::Open() {
+ if (client_) {
+ NOTREACHED() << "CrasInputStream already open";
+ return false; // Already open.
+ }
+
+ // Sanity check input values.
+ if (params_.sample_rate() <= 0) {
+ DLOG(WARNING) << "Unsupported audio frequency.";
+ return false;
+ }
+
+ if (AudioParameters::AUDIO_PCM_LINEAR != params_.format() &&
+ AudioParameters::AUDIO_PCM_LOW_LATENCY != params_.format()) {
+ DLOG(WARNING) << "Unsupported audio format.";
+ return false;
+ }
+
+ snd_pcm_format_t pcm_format =
+ alsa_util::BitsToFormat(params_.bits_per_sample());
+ if (pcm_format == SND_PCM_FORMAT_UNKNOWN) {
+ DLOG(WARNING) << "Unsupported bits/sample: " << params_.bits_per_sample();
+ return false;
+ }
+
+ // Create the client and connect to the CRAS server.
+ if (cras_client_create(&client_) < 0) {
+ DLOG(WARNING) << "Couldn't create CRAS client.\n";
+ client_ = NULL;
+ return false;
+ }
+
+ if (cras_client_connect(client_)) {
+ DLOG(WARNING) << "Couldn't connect CRAS client.\n";
+ cras_client_destroy(client_);
+ client_ = NULL;
+ return false;
+ }
+
+ // Then start running the client.
+ if (cras_client_run_thread(client_)) {
+ DLOG(WARNING) << "Couldn't run CRAS client.\n";
+ cras_client_destroy(client_);
+ client_ = NULL;
+ return false;
+ }
+
+ return true;
+}
+
+void CrasInputStream::Close() {
+ if (client_) {
+ cras_client_stop(client_);
+ cras_client_destroy(client_);
+ client_ = NULL;
+ }
+
+ if (callback_) {
+ callback_->OnClose(this);
+ callback_ = NULL;
+ }
+
+ // Signal to the manager that we're closed and can be removed.
+ // Should be last call in the method as it deletes "this".
+ audio_manager_->ReleaseInputStream(this);
+}
+
+void CrasInputStream::Start(AudioInputCallback* callback) {
+ DCHECK(client_);
+ DCHECK(callback);
+
+ // If already playing, stop before re-starting.
+ if (started_)
+ return;
+
+ StartAgc();
+
+ callback_ = callback;
+ LOG(ERROR) << "Input Start";
+
+ // Prepare |audio_format| and |stream_params| for the stream we
+ // will create.
+ cras_audio_format* audio_format = cras_audio_format_create(
+ alsa_util::BitsToFormat(params_.bits_per_sample()),
+ params_.sample_rate(),
+ params_.channels());
+ if (!audio_format) {
+ DLOG(WARNING) << "Error setting up audio parameters.";
+ callback_->OnError(this);
+ callback_ = NULL;
+ return;
+ }
+
+ unsigned int frames_per_packet = params_.frames_per_buffer();
+ cras_stream_params* stream_params = cras_client_stream_params_create(
+ stream_direction_,
+ frames_per_packet, // Total latency.
+ frames_per_packet, // Call back when this many ready.
+ frames_per_packet, // Minimum Callback level ignored for capture streams.
+ CRAS_STREAM_TYPE_DEFAULT,
+ 0, // Unused flags.
+ this,
+ CrasInputStream::SamplesReady,
+ CrasInputStream::StreamError,
+ audio_format);
+ if (!stream_params) {
+ DLOG(WARNING) << "Error setting up stream parameters.";
+ callback_->OnError(this);
+ callback_ = NULL;
+ cras_audio_format_destroy(audio_format);
+ return;
+ }
+
+ // Before starting the stream, save the number of bytes in a frame for use in
+ // the callback.
+ bytes_per_frame_ = cras_client_format_bytes_per_frame(audio_format);
+
+ // Adding the stream will start the audio callbacks.
+ if (cras_client_add_stream(client_, &stream_id_, stream_params)) {
+ DLOG(WARNING) << "Failed to add the stream.";
+ callback_->OnError(this);
+ callback_ = NULL;
+ }
+
+ // Done with config params.
+ cras_audio_format_destroy(audio_format);
+ cras_client_stream_params_destroy(stream_params);
+
+ started_ = true;
+}
+
+void CrasInputStream::Stop() {
+ DCHECK(client_);
+
+ if (!callback_ || !started_)
+ return;
+
+ StopAgc();
+
+ // Removing the stream from the client stops audio.
+ cras_client_rm_stream(client_, stream_id_);
+
+ started_ = false;
+}
+
+// Static callback asking for samples. Run on high priority thread.
+int CrasInputStream::SamplesReady(cras_client* client,
+ cras_stream_id_t stream_id,
+ uint8* samples,
+ size_t frames,
+ const timespec* sample_ts,
+ void* arg) {
+ CrasInputStream* me = static_cast<CrasInputStream*>(arg);
+ me->ReadAudio(frames, samples, sample_ts);
+ return frames;
+}
+
+// Static callback for stream errors.
+int CrasInputStream::StreamError(cras_client* client,
+ cras_stream_id_t stream_id,
+ int err,
+ void* arg) {
+ CrasInputStream* me = static_cast<CrasInputStream*>(arg);
+ me->NotifyStreamError(err);
+ return 0;
+}
+
+void CrasInputStream::ReadAudio(size_t frames,
+ uint8* buffer,
+ const timespec* sample_ts) {
+ DCHECK(callback_);
+
+ timespec latency_ts = {0, 0};
+
+ // Determine latency and pass that on to the sink. sample_ts is the wall time
+ // indicating when the first sample in the buffer was captured. Convert that
+ // to latency in bytes.
+ cras_client_calc_capture_latency(sample_ts, &latency_ts);
+ double latency_usec =
+ latency_ts.tv_sec * base::Time::kMicrosecondsPerSecond +
+ latency_ts.tv_nsec / base::Time::kNanosecondsPerMicrosecond;
+ double frames_latency =
+ latency_usec * params_.sample_rate() / base::Time::kMicrosecondsPerSecond;
+ unsigned int bytes_latency =
+ static_cast<unsigned int>(frames_latency * bytes_per_frame_);
+
+ // Update the AGC volume level once every second. Note that, |volume| is
+ // also updated each time SetVolume() is called through IPC by the
+ // render-side AGC.
+ double normalized_volume = 0.0;
+ GetAgcVolume(&normalized_volume);
+
+ callback_->OnData(this,
+ buffer,
+ frames * bytes_per_frame_,
+ bytes_latency,
+ normalized_volume);
+}
+
+void CrasInputStream::NotifyStreamError(int err) {
+ if (callback_)
+ callback_->OnError(this);
+}
+
+double CrasInputStream::GetMaxVolume() {
+ DCHECK(client_);
+
+ // Capture gain is returned as dB * 100 (150 => 1.5dBFS). Convert the dB
+ // value to a ratio before returning.
+ double dB = cras_client_get_system_max_capture_gain(client_) / 100.0;
+ return GetVolumeRatioFromDecibels(dB);
+}
+
+void CrasInputStream::SetVolume(double volume) {
+ DCHECK(client_);
+
+ // Convert from the passed volume ratio, to dB * 100.
+ double dB = GetDecibelsFromVolumeRatio(volume);
+ cras_client_set_system_capture_gain(client_, static_cast<long>(dB * 100.0));
+
+ // Update the AGC volume level based on the last setting above. Note that,
+ // the volume-level resolution is not infinite and it is therefore not
+ // possible to assume that the volume provided as input parameter can be
+ // used directly. Instead, a new query to the audio hardware is required.
+ // This method does nothing if AGC is disabled.
+ UpdateAgcVolume();
+}
+
+double CrasInputStream::GetVolume() {
+ if (!client_)
+ return 0.0;
+
+ long dB = cras_client_get_system_capture_gain(client_) / 100.0;
+ return GetVolumeRatioFromDecibels(dB);
+}
+
+double CrasInputStream::GetVolumeRatioFromDecibels(double dB) const {
+ return pow(10, dB / 20.0);
+}
+
+double CrasInputStream::GetDecibelsFromVolumeRatio(double volume_ratio) const {
+ return 20 * log10(volume_ratio);
+}
+
+} // namespace media
diff --git a/chromium/media/audio/cras/cras_input.h b/chromium/media/audio/cras/cras_input.h
new file mode 100644
index 00000000000..dd2cb5474a4
--- /dev/null
+++ b/chromium/media/audio/cras/cras_input.h
@@ -0,0 +1,108 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_CRAS_CRAS_INPUT_H_
+#define MEDIA_AUDIO_CRAS_CRAS_INPUT_H_
+
+#include <cras_client.h>
+
+#include <string>
+
+#include "base/compiler_specific.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "media/audio/agc_audio_stream.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+
+namespace media {
+
+class AudioManagerCras;
+
+// Provides an input stream for audio capture based on CRAS, the ChromeOS Audio
+// Server. This object is not thread safe and all methods should be invoked in
+// the thread that created the object.
+class CrasInputStream : public AgcAudioStream<AudioInputStream> {
+ public:
+ // The ctor takes all the usual parameters, plus |manager| which is the
+ // audio manager who is creating this object.
+ CrasInputStream(const AudioParameters& params, AudioManagerCras* manager,
+ const std::string& device_id);
+
+ // The dtor is typically called by the AudioManager only and it is usually
+ // triggered by calling AudioOutputStream::Close().
+ virtual ~CrasInputStream();
+
+ // Implementation of AudioInputStream.
+ virtual bool Open() OVERRIDE;
+ virtual void Start(AudioInputCallback* callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual double GetMaxVolume() OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual double GetVolume() OVERRIDE;
+
+ private:
+ // Handles requests to get samples from the provided buffer. This will be
+ // called by the audio server when it has samples ready.
+ static int SamplesReady(cras_client* client,
+ cras_stream_id_t stream_id,
+ uint8* samples,
+ size_t frames,
+ const timespec* sample_ts,
+ void* arg);
+
+ // Handles notificaiton that there was an error with the playback stream.
+ static int StreamError(cras_client* client,
+ cras_stream_id_t stream_id,
+ int err,
+ void* arg);
+
+ // Reads one or more buffers of audio from the device, passes on to the
+ // registered callback. Called from SamplesReady().
+ void ReadAudio(size_t frames, uint8* buffer, const timespec* sample_ts);
+
+ // Deals with an error that occured in the stream. Called from StreamError().
+ void NotifyStreamError(int err);
+
+ // Convert from dB * 100 to a volume ratio.
+ double GetVolumeRatioFromDecibels(double dB) const;
+
+ // Convert from a volume ratio to dB.
+ double GetDecibelsFromVolumeRatio(double volume_ratio) const;
+
+ // Non-refcounted pointer back to the audio manager.
+ // The AudioManager indirectly holds on to stream objects, so we don't
+ // want circular references. Additionally, stream objects live on the audio
+ // thread, which is owned by the audio manager and we don't want to addref
+ // the manager from that thread.
+ AudioManagerCras* const audio_manager_;
+
+ // Size of frame in bytes.
+ uint32 bytes_per_frame_;
+
+ // Callback to pass audio samples too, valid while recording.
+ AudioInputCallback* callback_;
+
+ // The client used to communicate with the audio server.
+ cras_client* client_;
+
+ // PCM parameters for the stream.
+ const AudioParameters params_;
+
+ // True if the stream has been started.
+ bool started_;
+
+ // ID of the playing stream.
+ cras_stream_id_t stream_id_;
+
+ // Direction of the stream.
+ const CRAS_STREAM_DIRECTION stream_direction_;
+
+ DISALLOW_COPY_AND_ASSIGN(CrasInputStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_CRAS_ALSA_INPUT_H_
diff --git a/chromium/media/audio/cras/cras_input_unittest.cc b/chromium/media/audio/cras/cras_input_unittest.cc
new file mode 100644
index 00000000000..27ea9858ba1
--- /dev/null
+++ b/chromium/media/audio/cras/cras_input_unittest.cc
@@ -0,0 +1,214 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <unistd.h>
+
+#include <string>
+
+#include "base/synchronization/waitable_event.h"
+#include "base/test/test_timeouts.h"
+#include "base/time/time.h"
+#include "media/audio/cras/audio_manager_cras.h"
+#include "media/audio/cras/cras_input.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using testing::_;
+using testing::AtLeast;
+using testing::Ge;
+using testing::InvokeWithoutArgs;
+using testing::StrictMock;
+
+namespace media {
+
+class MockAudioInputCallback : public AudioInputStream::AudioInputCallback {
+ public:
+ MOCK_METHOD5(OnData, void(
+ AudioInputStream*, const uint8*, uint32, uint32, double));
+ MOCK_METHOD1(OnError, void(AudioInputStream*));
+ MOCK_METHOD1(OnClose, void(AudioInputStream*));
+};
+
+class MockAudioManagerCrasInput : public AudioManagerCras {
+ public:
+ // We need to override this function in order to skip checking the number
+ // of active output streams. It is because the number of active streams
+ // is managed inside MakeAudioInputStream, and we don't use
+ // MakeAudioInputStream to create the stream in the tests.
+ virtual void ReleaseInputStream(AudioInputStream* stream) OVERRIDE {
+ DCHECK(stream);
+ delete stream;
+ }
+};
+
+class CrasInputStreamTest : public testing::Test {
+ protected:
+ CrasInputStreamTest() {
+ mock_manager_.reset(new StrictMock<MockAudioManagerCrasInput>());
+ }
+
+ virtual ~CrasInputStreamTest() {
+ }
+
+ CrasInputStream* CreateStream(ChannelLayout layout) {
+ return CreateStream(layout, kTestFramesPerPacket);
+ }
+
+ CrasInputStream* CreateStream(ChannelLayout layout,
+ int32 samples_per_packet) {
+ AudioParameters params(kTestFormat,
+ layout,
+ kTestSampleRate,
+ kTestBitsPerSample,
+ samples_per_packet);
+ return new CrasInputStream(params, mock_manager_.get(),
+ AudioManagerBase::kDefaultDeviceId);
+ }
+
+ void CaptureSomeFrames(const AudioParameters &params,
+ unsigned int duration_ms) {
+ CrasInputStream* test_stream = new CrasInputStream(
+ params, mock_manager_.get(), AudioManagerBase::kDefaultDeviceId);
+
+ ASSERT_TRUE(test_stream->Open());
+
+ // Allow 8 frames variance for SRC in the callback. Different numbers of
+ // samples can be provided when doing non-integer SRC. For example
+ // converting from 192k to 44.1k is a ratio of 4.35 to 1.
+ MockAudioInputCallback mock_callback;
+ unsigned int expected_size = (kTestFramesPerPacket - 8) *
+ params.channels() *
+ params.bits_per_sample() / 8;
+
+ base::WaitableEvent event(false, false);
+
+ EXPECT_CALL(mock_callback,
+ OnData(test_stream, _, Ge(expected_size), _, _))
+ .WillOnce(InvokeWithoutArgs(&event, &base::WaitableEvent::Signal));
+
+ test_stream->Start(&mock_callback);
+
+ // Wait for samples to be captured.
+ EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout()));
+
+ test_stream->Stop();
+
+ EXPECT_CALL(mock_callback, OnClose(test_stream)).Times(1);
+ test_stream->Close();
+ }
+
+ static const unsigned int kTestBitsPerSample;
+ static const unsigned int kTestCaptureDurationMs;
+ static const ChannelLayout kTestChannelLayout;
+ static const AudioParameters::Format kTestFormat;
+ static const uint32 kTestFramesPerPacket;
+ static const int kTestSampleRate;
+
+ scoped_ptr<StrictMock<MockAudioManagerCrasInput> > mock_manager_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(CrasInputStreamTest);
+};
+
+const unsigned int CrasInputStreamTest::kTestBitsPerSample = 16;
+const unsigned int CrasInputStreamTest::kTestCaptureDurationMs = 250;
+const ChannelLayout CrasInputStreamTest::kTestChannelLayout =
+ CHANNEL_LAYOUT_STEREO;
+const AudioParameters::Format CrasInputStreamTest::kTestFormat =
+ AudioParameters::AUDIO_PCM_LINEAR;
+const uint32 CrasInputStreamTest::kTestFramesPerPacket = 1000;
+const int CrasInputStreamTest::kTestSampleRate = 44100;
+
+TEST_F(CrasInputStreamTest, OpenMono) {
+ CrasInputStream* test_stream = CreateStream(CHANNEL_LAYOUT_MONO);
+ EXPECT_TRUE(test_stream->Open());
+ test_stream->Close();
+}
+
+TEST_F(CrasInputStreamTest, OpenStereo) {
+ CrasInputStream* test_stream = CreateStream(CHANNEL_LAYOUT_STEREO);
+ EXPECT_TRUE(test_stream->Open());
+ test_stream->Close();
+}
+
+TEST_F(CrasInputStreamTest, BadBitsPerSample) {
+ AudioParameters bad_bps_params(kTestFormat,
+ kTestChannelLayout,
+ kTestSampleRate,
+ kTestBitsPerSample - 1,
+ kTestFramesPerPacket);
+ CrasInputStream* test_stream = new CrasInputStream(
+ bad_bps_params, mock_manager_.get(), AudioManagerBase::kDefaultDeviceId);
+ EXPECT_FALSE(test_stream->Open());
+ test_stream->Close();
+}
+
+TEST_F(CrasInputStreamTest, BadFormat) {
+ AudioParameters bad_format_params(AudioParameters::AUDIO_LAST_FORMAT,
+ kTestChannelLayout,
+ kTestSampleRate,
+ kTestBitsPerSample,
+ kTestFramesPerPacket);
+ CrasInputStream* test_stream = new CrasInputStream(
+ bad_format_params, mock_manager_.get(),
+ AudioManagerBase::kDefaultDeviceId);
+ EXPECT_FALSE(test_stream->Open());
+ test_stream->Close();
+}
+
+TEST_F(CrasInputStreamTest, BadSampleRate) {
+ AudioParameters bad_rate_params(kTestFormat,
+ kTestChannelLayout,
+ 0,
+ kTestBitsPerSample,
+ kTestFramesPerPacket);
+ CrasInputStream* test_stream = new CrasInputStream(
+ bad_rate_params, mock_manager_.get(), AudioManagerBase::kDefaultDeviceId);
+ EXPECT_FALSE(test_stream->Open());
+ test_stream->Close();
+}
+
+TEST_F(CrasInputStreamTest, SetGetVolume) {
+ CrasInputStream* test_stream = CreateStream(CHANNEL_LAYOUT_MONO);
+ EXPECT_TRUE(test_stream->Open());
+
+ double max_volume = test_stream->GetMaxVolume();
+ EXPECT_GE(max_volume, 1.0);
+
+ test_stream->SetVolume(max_volume / 2);
+
+ double new_volume = test_stream->GetVolume();
+
+ EXPECT_GE(new_volume, 0.0);
+ EXPECT_LE(new_volume, max_volume);
+
+ test_stream->Close();
+}
+
+TEST_F(CrasInputStreamTest, CaptureFrames) {
+ const unsigned int rates[] =
+ {8000, 16000, 22050, 32000, 44100, 48000, 96000, 192000};
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(rates); i++) {
+ SCOPED_TRACE(testing::Message() << "Mono " << rates[i] << "Hz");
+ AudioParameters params_mono(kTestFormat,
+ CHANNEL_LAYOUT_MONO,
+ rates[i],
+ kTestBitsPerSample,
+ kTestFramesPerPacket);
+ CaptureSomeFrames(params_mono, kTestCaptureDurationMs);
+ }
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(rates); i++) {
+ SCOPED_TRACE(testing::Message() << "Stereo " << rates[i] << "Hz");
+ AudioParameters params_stereo(kTestFormat,
+ CHANNEL_LAYOUT_STEREO,
+ rates[i],
+ kTestBitsPerSample,
+ kTestFramesPerPacket);
+ CaptureSomeFrames(params_stereo, kTestCaptureDurationMs);
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/audio/cras/cras_unified.cc b/chromium/media/audio/cras/cras_unified.cc
new file mode 100644
index 00000000000..c1c3ee9228f
--- /dev/null
+++ b/chromium/media/audio/cras/cras_unified.cc
@@ -0,0 +1,369 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/cras/cras_unified.h"
+
+#include <cras_client.h>
+
+#include "base/command_line.h"
+#include "base/logging.h"
+#include "media/audio/audio_util.h"
+#include "media/audio/cras/audio_manager_cras.h"
+#include "media/audio/linux/alsa_util.h"
+
+namespace media {
+
+// Overview of operation:
+// 1) An object of CrasUnifiedStream is created by the AudioManager
+// factory: audio_man->MakeAudioStream().
+// 2) Next some thread will call Open(), at that point a client is created and
+// configured for the correct format and sample rate.
+// 3) Then Start(source) is called and a stream is added to the CRAS client
+// which will create its own thread that periodically calls the source for more
+// data as buffers are being consumed.
+// 4) When finished Stop() is called, which is handled by stopping the stream.
+// 5) Finally Close() is called. It cleans up and notifies the audio manager,
+// which likely will destroy this object.
+//
+// For output-only streams, a unified stream is created with 0 input channels.
+//
+// Simplified data flow for unified streams:
+//
+// +-------------+ +------------------+
+// | CRAS Server | | Chrome Client |
+// +------+------+ Add Stream +---------+--------+
+// |<----------------------------------|
+// | |
+// | buffer_frames captured to shm |
+// |---------------------------------->|
+// | | UnifiedCallback()
+// | | ReadWriteAudio()
+// | |
+// | buffer_frames written to shm |
+// |<----------------------------------|
+// | |
+// ... Repeats for each block. ...
+// | |
+// | |
+// | Remove stream |
+// |<----------------------------------|
+// | |
+//
+// Simplified data flow for output only streams:
+//
+// +-------------+ +------------------+
+// | CRAS Server | | Chrome Client |
+// +------+------+ Add Stream +---------+--------+
+// |<----------------------------------|
+// | |
+// | Near out of samples, request more |
+// |---------------------------------->|
+// | | UnifiedCallback()
+// | | WriteAudio()
+// | |
+// | buffer_frames written to shm |
+// |<----------------------------------|
+// | |
+// ... Repeats for each block. ...
+// | |
+// | |
+// | Remove stream |
+// |<----------------------------------|
+// | |
+//
+// For Unified streams the Chrome client is notified whenever buffer_frames have
+// been captured. For Output streams the client is notified a few milliseconds
+// before the hardware buffer underruns and fills the buffer with another block
+// of audio.
+
+CrasUnifiedStream::CrasUnifiedStream(const AudioParameters& params,
+ AudioManagerCras* manager)
+ : client_(NULL),
+ stream_id_(0),
+ params_(params),
+ bytes_per_frame_(0),
+ is_playing_(false),
+ volume_(1.0),
+ manager_(manager),
+ source_callback_(NULL),
+ stream_direction_(CRAS_STREAM_OUTPUT) {
+ DCHECK(manager_);
+ DCHECK(params_.channels() > 0);
+
+ // Must have at least one input or output. If there are both they must be the
+ // same.
+ int input_channels = params_.input_channels();
+
+ if (input_channels) {
+ // A unified stream for input and output.
+ DCHECK(params_.channels() == input_channels);
+ stream_direction_ = CRAS_STREAM_UNIFIED;
+ input_bus_ = AudioBus::Create(input_channels,
+ params_.frames_per_buffer());
+ }
+
+ output_bus_ = AudioBus::Create(params);
+}
+
+CrasUnifiedStream::~CrasUnifiedStream() {
+ DCHECK(!is_playing_);
+}
+
+bool CrasUnifiedStream::Open() {
+ // Sanity check input values.
+ if (params_.sample_rate() <= 0) {
+ LOG(WARNING) << "Unsupported audio frequency.";
+ return false;
+ }
+
+ if (alsa_util::BitsToFormat(params_.bits_per_sample()) ==
+ SND_PCM_FORMAT_UNKNOWN) {
+ LOG(WARNING) << "Unsupported pcm format";
+ return false;
+ }
+
+ // Create the client and connect to the CRAS server.
+ if (cras_client_create(&client_)) {
+ LOG(WARNING) << "Couldn't create CRAS client.\n";
+ client_ = NULL;
+ return false;
+ }
+
+ if (cras_client_connect(client_)) {
+ LOG(WARNING) << "Couldn't connect CRAS client.\n";
+ cras_client_destroy(client_);
+ client_ = NULL;
+ return false;
+ }
+
+ // Then start running the client.
+ if (cras_client_run_thread(client_)) {
+ LOG(WARNING) << "Couldn't run CRAS client.\n";
+ cras_client_destroy(client_);
+ client_ = NULL;
+ return false;
+ }
+
+ return true;
+}
+
+void CrasUnifiedStream::Close() {
+ if (client_) {
+ cras_client_stop(client_);
+ cras_client_destroy(client_);
+ client_ = NULL;
+ }
+
+ // Signal to the manager that we're closed and can be removed.
+ // Should be last call in the method as it deletes "this".
+ manager_->ReleaseOutputStream(this);
+}
+
+void CrasUnifiedStream::Start(AudioSourceCallback* callback) {
+ CHECK(callback);
+ source_callback_ = callback;
+
+ // Only start if we can enter the playing state.
+ if (is_playing_)
+ return;
+
+ LOG(ERROR) << "Unified Start";
+ // Prepare |audio_format| and |stream_params| for the stream we
+ // will create.
+ cras_audio_format* audio_format = cras_audio_format_create(
+ alsa_util::BitsToFormat(params_.bits_per_sample()),
+ params_.sample_rate(),
+ params_.channels());
+ if (!audio_format) {
+ LOG(WARNING) << "Error setting up audio parameters.";
+ callback->OnError(this);
+ return;
+ }
+
+ cras_stream_params* stream_params = cras_client_unified_params_create(
+ stream_direction_,
+ params_.frames_per_buffer(),
+ CRAS_STREAM_TYPE_DEFAULT,
+ 0,
+ this,
+ CrasUnifiedStream::UnifiedCallback,
+ CrasUnifiedStream::StreamError,
+ audio_format);
+ if (!stream_params) {
+ LOG(WARNING) << "Error setting up stream parameters.";
+ callback->OnError(this);
+ cras_audio_format_destroy(audio_format);
+ return;
+ }
+
+ // Before starting the stream, save the number of bytes in a frame for use in
+ // the callback.
+ bytes_per_frame_ = cras_client_format_bytes_per_frame(audio_format);
+
+ // Adding the stream will start the audio callbacks requesting data.
+ if (cras_client_add_stream(client_, &stream_id_, stream_params) < 0) {
+ LOG(WARNING) << "Failed to add the stream";
+ callback->OnError(this);
+ cras_audio_format_destroy(audio_format);
+ cras_client_stream_params_destroy(stream_params);
+ return;
+ }
+
+ // Set initial volume.
+ cras_client_set_stream_volume(client_, stream_id_, volume_);
+
+ // Done with config params.
+ cras_audio_format_destroy(audio_format);
+ cras_client_stream_params_destroy(stream_params);
+
+ is_playing_ = true;
+}
+
+void CrasUnifiedStream::Stop() {
+ if (!client_)
+ return;
+
+ // Removing the stream from the client stops audio.
+ cras_client_rm_stream(client_, stream_id_);
+
+ is_playing_ = false;
+}
+
+void CrasUnifiedStream::SetVolume(double volume) {
+ if (!client_)
+ return;
+ volume_ = static_cast<float>(volume);
+ cras_client_set_stream_volume(client_, stream_id_, volume_);
+}
+
+void CrasUnifiedStream::GetVolume(double* volume) {
+ *volume = volume_;
+}
+
+uint32 CrasUnifiedStream::GetBytesLatency(
+ const struct timespec& latency_ts) {
+ uint32 latency_usec;
+
+ // Treat negative latency (if we are too slow to render) as 0.
+ if (latency_ts.tv_sec < 0 || latency_ts.tv_nsec < 0) {
+ latency_usec = 0;
+ } else {
+ latency_usec = (latency_ts.tv_sec * base::Time::kMicrosecondsPerSecond) +
+ latency_ts.tv_nsec / base::Time::kNanosecondsPerMicrosecond;
+ }
+
+ double frames_latency =
+ latency_usec * params_.sample_rate() / base::Time::kMicrosecondsPerSecond;
+
+ return static_cast<unsigned int>(frames_latency * bytes_per_frame_);
+}
+
+// Static callback asking for samples.
+int CrasUnifiedStream::UnifiedCallback(cras_client* client,
+ cras_stream_id_t stream_id,
+ uint8* input_samples,
+ uint8* output_samples,
+ unsigned int frames,
+ const timespec* input_ts,
+ const timespec* output_ts,
+ void* arg) {
+ CrasUnifiedStream* me = static_cast<CrasUnifiedStream*>(arg);
+ return me->DispatchCallback(frames,
+ input_samples,
+ output_samples,
+ input_ts,
+ output_ts);
+}
+
+// Static callback for stream errors.
+int CrasUnifiedStream::StreamError(cras_client* client,
+ cras_stream_id_t stream_id,
+ int err,
+ void* arg) {
+ CrasUnifiedStream* me = static_cast<CrasUnifiedStream*>(arg);
+ me->NotifyStreamError(err);
+ return 0;
+}
+
+// Calls the appropriate rendering function for this type of stream.
+uint32 CrasUnifiedStream::DispatchCallback(size_t frames,
+ uint8* input_samples,
+ uint8* output_samples,
+ const timespec* input_ts,
+ const timespec* output_ts) {
+ switch (stream_direction_) {
+ case CRAS_STREAM_OUTPUT:
+ return WriteAudio(frames, output_samples, output_ts);
+ case CRAS_STREAM_INPUT:
+ NOTREACHED() << "CrasUnifiedStream doesn't support input streams.";
+ return 0;
+ case CRAS_STREAM_UNIFIED:
+ return ReadWriteAudio(frames, input_samples, output_samples,
+ input_ts, output_ts);
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+// Note these are run from a real time thread, so don't waste cycles here.
+uint32 CrasUnifiedStream::ReadWriteAudio(size_t frames,
+ uint8* input_samples,
+ uint8* output_samples,
+ const timespec* input_ts,
+ const timespec* output_ts) {
+ DCHECK_EQ(frames, static_cast<size_t>(output_bus_->frames()));
+ DCHECK(source_callback_);
+
+ uint32 bytes_per_sample = bytes_per_frame_ / params_.channels();
+ input_bus_->FromInterleaved(input_samples, frames, bytes_per_sample);
+
+ // Determine latency and pass that on to the source. We have the capture time
+ // of the first input sample and the playback time of the next audio sample
+ // passed from the audio server, add them together for total latency.
+ uint32 total_delay_bytes;
+ timespec latency_ts = {0, 0};
+ cras_client_calc_capture_latency(input_ts, &latency_ts);
+ total_delay_bytes = GetBytesLatency(latency_ts);
+ cras_client_calc_playback_latency(output_ts, &latency_ts);
+ total_delay_bytes += GetBytesLatency(latency_ts);
+
+ int frames_filled = source_callback_->OnMoreIOData(
+ input_bus_.get(),
+ output_bus_.get(),
+ AudioBuffersState(0, total_delay_bytes));
+
+ output_bus_->ToInterleaved(frames_filled, bytes_per_sample, output_samples);
+
+ return frames_filled;
+}
+
+uint32 CrasUnifiedStream::WriteAudio(size_t frames,
+ uint8* buffer,
+ const timespec* sample_ts) {
+ DCHECK_EQ(frames, static_cast<size_t>(output_bus_->frames()));
+
+ // Determine latency and pass that on to the source.
+ timespec latency_ts = {0, 0};
+ cras_client_calc_playback_latency(sample_ts, &latency_ts);
+
+ int frames_filled = source_callback_->OnMoreData(
+ output_bus_.get(), AudioBuffersState(0, GetBytesLatency(latency_ts)));
+
+ // Note: If this ever changes to output raw float the data must be clipped and
+ // sanitized since it may come from an untrusted source such as NaCl.
+ output_bus_->ToInterleaved(
+ frames_filled, bytes_per_frame_ / params_.channels(), buffer);
+
+ return frames_filled;
+}
+
+void CrasUnifiedStream::NotifyStreamError(int err) {
+ // This will remove the stream from the client.
+ if (source_callback_)
+ source_callback_->OnError(this);
+}
+
+} // namespace media
diff --git a/chromium/media/audio/cras/cras_unified.h b/chromium/media/audio/cras/cras_unified.h
new file mode 100644
index 00000000000..818763efb49
--- /dev/null
+++ b/chromium/media/audio/cras/cras_unified.h
@@ -0,0 +1,122 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Creates a unified stream based on the cras (ChromeOS audio server) interface.
+//
+// CrasUnifiedStream object is *not* thread-safe and should only be used
+// from the audio thread.
+
+#ifndef MEDIA_AUDIO_LINUX_CRAS_UNIFIED_H_
+#define MEDIA_AUDIO_LINUX_CRAS_UNIFIED_H_
+
+#include <alsa/asoundlib.h>
+#include <cras_client.h>
+
+#include "base/compiler_specific.h"
+#include "base/gtest_prod_util.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+
+namespace media {
+
+class AudioManagerCras;
+class AudioParameters;
+
+// Implementation of AudioOuputStream for Chrome OS using the Chrome OS audio
+// server.
+class MEDIA_EXPORT CrasUnifiedStream : public AudioOutputStream {
+ public:
+ // The ctor takes all the usual parameters, plus |manager| which is the
+ // audio manager who is creating this object.
+ CrasUnifiedStream(const AudioParameters& params, AudioManagerCras* manager);
+
+ // The dtor is typically called by the AudioManager only and it is usually
+ // triggered by calling AudioUnifiedStream::Close().
+ virtual ~CrasUnifiedStream();
+
+ // Implementation of AudioOutputStream.
+ virtual bool Open() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual void Start(AudioSourceCallback* callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual void GetVolume(double* volume) OVERRIDE;
+
+ private:
+ // Convert Latency in time to bytes.
+ uint32 GetBytesLatency(const struct timespec& latency);
+
+ // Handles captured audio and fills the ouput with audio to be played.
+ static int UnifiedCallback(cras_client* client,
+ cras_stream_id_t stream_id,
+ uint8* input_samples,
+ uint8* output_samples,
+ unsigned int frames,
+ const timespec* input_ts,
+ const timespec* output_ts,
+ void* arg);
+
+ // Handles notificaiton that there was an error with the playback stream.
+ static int StreamError(cras_client* client,
+ cras_stream_id_t stream_id,
+ int err,
+ void* arg);
+
+ // Chooses the correct audio callback based on stream direction.
+ uint32 DispatchCallback(size_t frames,
+ uint8* input_samples,
+ uint8* output_samples,
+ const timespec* input_ts,
+ const timespec* output_ts);
+
+ // Receives input samples and write output samples for a unified I/O stream.
+ uint32 ReadWriteAudio(size_t frames,
+ uint8* input_samples,
+ uint8* output_samples,
+ const timespec* input_ts,
+ const timespec* output_ts);
+
+ // Writes audio for a playback stream.
+ uint32 WriteAudio(size_t frames, uint8* buffer, const timespec* sample_ts);
+
+ // Deals with an error that occured in the stream. Called from StreamError().
+ void NotifyStreamError(int err);
+
+ // The client used to communicate with the audio server.
+ cras_client* client_;
+
+ // ID of the playing stream.
+ cras_stream_id_t stream_id_;
+
+ // PCM parameters for the stream.
+ AudioParameters params_;
+
+ // Size of frame in bytes.
+ uint32 bytes_per_frame_;
+
+ // True if stream is playing.
+ bool is_playing_;
+
+ // Volume level from 0.0 to 1.0.
+ float volume_;
+
+ // Audio manager that created us. Used to report that we've been closed.
+ AudioManagerCras* manager_;
+
+ // Callback to get audio samples.
+ AudioSourceCallback* source_callback_;
+
+ // Container for exchanging data with AudioSourceCallback::OnMoreIOData().
+ scoped_ptr<AudioBus> input_bus_;
+ scoped_ptr<AudioBus> output_bus_;
+
+ // Direciton of the stream.
+ CRAS_STREAM_DIRECTION stream_direction_;
+
+ DISALLOW_COPY_AND_ASSIGN(CrasUnifiedStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_LINUX_CRAS_UNIFIED_H_
diff --git a/chromium/media/audio/cras/cras_unified_unittest.cc b/chromium/media/audio/cras/cras_unified_unittest.cc
new file mode 100644
index 00000000000..7083eca427e
--- /dev/null
+++ b/chromium/media/audio/cras/cras_unified_unittest.cc
@@ -0,0 +1,157 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "base/synchronization/waitable_event.h"
+#include "base/test/test_timeouts.h"
+#include "base/time/time.h"
+#include "media/audio/cras/audio_manager_cras.h"
+#include "media/audio/cras/cras_unified.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using testing::_;
+using testing::DoAll;
+using testing::InvokeWithoutArgs;
+using testing::Return;
+using testing::SetArgumentPointee;
+using testing::StrictMock;
+
+namespace media {
+
+class MockAudioSourceCallback : public AudioOutputStream::AudioSourceCallback {
+ public:
+ MOCK_METHOD2(OnMoreData, int(AudioBus* audio_bus,
+ AudioBuffersState buffers_state));
+ MOCK_METHOD3(OnMoreIOData, int(AudioBus* source,
+ AudioBus* dest,
+ AudioBuffersState buffers_state));
+ MOCK_METHOD1(OnError, void(AudioOutputStream* stream));
+};
+
+class MockAudioManagerCras : public AudioManagerCras {
+ public:
+ MOCK_METHOD0(Init, void());
+ MOCK_METHOD0(HasAudioOutputDevices, bool());
+ MOCK_METHOD0(HasAudioInputDevices, bool());
+ MOCK_METHOD1(MakeLinearOutputStream, AudioOutputStream*(
+ const AudioParameters& params));
+ MOCK_METHOD1(MakeLowLatencyOutputStream, AudioOutputStream*(
+ const AudioParameters& params));
+ MOCK_METHOD2(MakeLinearOutputStream, AudioInputStream*(
+ const AudioParameters& params, const std::string& device_id));
+ MOCK_METHOD2(MakeLowLatencyInputStream, AudioInputStream*(
+ const AudioParameters& params, const std::string& device_id));
+
+ // We need to override this function in order to skip the checking the number
+ // of active output streams. It is because the number of active streams
+ // is managed inside MakeAudioOutputStream, and we don't use
+ // MakeAudioOutputStream to create the stream in the tests.
+ virtual void ReleaseOutputStream(AudioOutputStream* stream) OVERRIDE {
+ DCHECK(stream);
+ delete stream;
+ }
+};
+
+class CrasUnifiedStreamTest : public testing::Test {
+ protected:
+ CrasUnifiedStreamTest() {
+ mock_manager_.reset(new StrictMock<MockAudioManagerCras>());
+ }
+
+ virtual ~CrasUnifiedStreamTest() {
+ }
+
+ CrasUnifiedStream* CreateStream(ChannelLayout layout) {
+ return CreateStream(layout, kTestFramesPerPacket);
+ }
+
+ CrasUnifiedStream* CreateStream(ChannelLayout layout,
+ int32 samples_per_packet) {
+ AudioParameters params(kTestFormat, layout, kTestSampleRate,
+ kTestBitsPerSample, samples_per_packet);
+ return new CrasUnifiedStream(params, mock_manager_.get());
+ }
+
+ MockAudioManagerCras& mock_manager() {
+ return *(mock_manager_.get());
+ }
+
+ static const ChannelLayout kTestChannelLayout;
+ static const int kTestSampleRate;
+ static const int kTestBitsPerSample;
+ static const AudioParameters::Format kTestFormat;
+ static const uint32 kTestFramesPerPacket;
+
+ scoped_ptr<StrictMock<MockAudioManagerCras> > mock_manager_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(CrasUnifiedStreamTest);
+};
+
+const ChannelLayout CrasUnifiedStreamTest::kTestChannelLayout =
+ CHANNEL_LAYOUT_STEREO;
+const int CrasUnifiedStreamTest::kTestSampleRate =
+ AudioParameters::kAudioCDSampleRate;
+const int CrasUnifiedStreamTest::kTestBitsPerSample = 16;
+const AudioParameters::Format CrasUnifiedStreamTest::kTestFormat =
+ AudioParameters::AUDIO_PCM_LINEAR;
+const uint32 CrasUnifiedStreamTest::kTestFramesPerPacket = 1000;
+
+TEST_F(CrasUnifiedStreamTest, ConstructedState) {
+ // Should support mono.
+ CrasUnifiedStream* test_stream = CreateStream(CHANNEL_LAYOUT_MONO);
+ test_stream->Close();
+
+ // Should support stereo.
+ test_stream = CreateStream(CHANNEL_LAYOUT_SURROUND);
+ EXPECT_TRUE(test_stream->Open());
+ test_stream->Close();
+
+ // Bad bits per sample.
+ AudioParameters bad_bps_params(kTestFormat, kTestChannelLayout,
+ kTestSampleRate, kTestBitsPerSample - 1,
+ kTestFramesPerPacket);
+ test_stream = new CrasUnifiedStream(bad_bps_params, mock_manager_.get());
+ EXPECT_FALSE(test_stream->Open());
+ test_stream->Close();
+
+ // Bad sample rate.
+ AudioParameters bad_rate_params(kTestFormat, kTestChannelLayout,
+ 0, kTestBitsPerSample, kTestFramesPerPacket);
+ test_stream = new CrasUnifiedStream(bad_rate_params, mock_manager_.get());
+ EXPECT_FALSE(test_stream->Open());
+ test_stream->Close();
+
+ // Check that Mono works too.
+ test_stream = CreateStream(CHANNEL_LAYOUT_MONO);
+ ASSERT_TRUE(test_stream->Open());
+ test_stream->Close();
+}
+
+TEST_F(CrasUnifiedStreamTest, RenderFrames) {
+ CrasUnifiedStream* test_stream = CreateStream(CHANNEL_LAYOUT_MONO);
+ MockAudioSourceCallback mock_callback;
+
+ ASSERT_TRUE(test_stream->Open());
+
+ base::WaitableEvent event(false, false);
+
+ EXPECT_CALL(mock_callback, OnMoreData(_, _))
+ .WillRepeatedly(DoAll(
+ InvokeWithoutArgs(&event, &base::WaitableEvent::Signal),
+ Return(kTestFramesPerPacket)));
+
+ test_stream->Start(&mock_callback);
+
+ // Wait for samples to be captured.
+ EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout()));
+
+ test_stream->Stop();
+
+ test_stream->Close();
+}
+
+} // namespace media
diff --git a/chromium/media/audio/cross_process_notification.cc b/chromium/media/audio/cross_process_notification.cc
new file mode 100644
index 00000000000..1806f777da3
--- /dev/null
+++ b/chromium/media/audio/cross_process_notification.cc
@@ -0,0 +1,30 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/cross_process_notification.h"
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+
+CrossProcessNotification::CrossProcessNotification() {}
+
+CrossProcessNotification::WaitForMultiple::WaitForMultiple(
+ const Notifications* notifications) {
+ Reset(notifications);
+}
+
+int CrossProcessNotification::WaitForMultiple::Wait() {
+ DCHECK(CalledOnValidThread());
+ int ret = WaitMultiple(*notifications_, wait_offset_);
+ wait_offset_ = (ret + 1) % notifications_->size();
+ return ret;
+}
+
+void CrossProcessNotification::WaitForMultiple::Reset(
+ const Notifications* notifications) {
+ DCHECK(CalledOnValidThread());
+ wait_offset_ = 0;
+ notifications_ = notifications;
+ DCHECK(!notifications_->empty());
+}
diff --git a/chromium/media/audio/cross_process_notification.h b/chromium/media/audio/cross_process_notification.h
new file mode 100644
index 00000000000..16f2fc07fcf
--- /dev/null
+++ b/chromium/media/audio/cross_process_notification.h
@@ -0,0 +1,172 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_CROSS_PROCESS_NOTIFICATION_H_
+#define MEDIA_AUDIO_CROSS_PROCESS_NOTIFICATION_H_
+
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/process/process.h"
+#include "base/threading/non_thread_safe.h"
+#include "media/base/media_export.h"
+
+#if defined(OS_WIN)
+#include "base/win/scoped_handle.h"
+#else
+#include "base/file_descriptor_posix.h"
+#include "base/sync_socket.h"
+#endif
+
+// A mechanism to synchronize access to a shared resource between two parties
+// when the usage pattern resembles that of two players playing a game of chess.
+// Each end has an instance of CrossProcessNotification and calls Signal() when
+// it has finished using the shared resource.
+// Before accessing the resource, it must call Wait() in order to know when the
+// other end has called Signal().
+//
+// Here's some pseudo code for how this class can be used:
+//
+// This method is used by both processes as it's a general way to use the
+// shared resource and then grant the privilege to the other process:
+//
+// void WriteToSharedMemory(CrossProcessNotification* notification,
+// SharedMemory* mem,
+// const char my_char) {
+// notification->Wait(); // Wait for the other process to yield access.
+// reinterpret_cast<char*>(mem->memory())[0] = my_char;
+// notification->Signal(); // Grant the other process access.
+// }
+//
+// Process A:
+//
+// class A {
+// public:
+// void Initialize(base::ProcessHandle process_b) {
+// mem_.CreateNamed("foo", false, 1024);
+//
+// CrossProcessNotification other;
+// CHECK(CrossProcessNotification::InitializePair(&notification_, &other));
+// CrossProcessNotification::IPCHandle handle_1, handle_2;
+// CHECK(other.ShareToProcess(process_b, &handle_1, &handle_2));
+// // This could be implemented by using some IPC mechanism
+// // such as MessageLoop.
+// SendToProcessB(mem_, handle_1, handle_2);
+// // Allow process B the first chance to write to the memory:
+// notification_.Signal();
+// // Once B is done, we'll write 'A' to the shared memory.
+// WriteToSharedMemory(&notification_, &mem_, 'A');
+// }
+//
+// CrossProcessNotification notification_;
+// SharedMemory mem_;
+// };
+//
+// Process B:
+//
+// class B {
+// public:
+// // Called when we receive the IPC message from A.
+// void Initialize(SharedMemoryHandle mem,
+// CrossProcessNotification::IPCHandle handle_1,
+// CrossProcessNotification::IPCHandle handle_2) {
+// mem_.reset(new SharedMemory(mem, false));
+// notification_.reset(new CrossProcessNotification(handle_1, handle_2));
+// WriteToSharedMemory(&notification_, &mem_, 'B');
+// }
+//
+// CrossProcessNotification notification_;
+// scoped_ptr<SharedMemory> mem_;
+// };
+//
+class MEDIA_EXPORT CrossProcessNotification {
+ public:
+#if defined(OS_WIN)
+ typedef HANDLE IPCHandle;
+#else
+ typedef base::FileDescriptor IPCHandle;
+#endif
+
+ typedef std::vector<CrossProcessNotification*> Notifications;
+
+ // Default ctor. Initializes a NULL notification. User must call
+ // InitializePair() to initialize the instance along with a connected one.
+ CrossProcessNotification();
+
+ // Ctor for the user that does not call InitializePair but instead receives
+ // handles from the one that did. These handles come from a call to
+ // ShareToProcess.
+ CrossProcessNotification(IPCHandle handle_1, IPCHandle handle_2);
+ ~CrossProcessNotification();
+
+ // Raises a signal that the shared resource now can be accessed by the other
+ // party.
+ // NOTE: Calling Signal() more than once without calling Wait() in between
+ // is not a supported scenario and will result in undefined behavior (and
+ // different depending on platform).
+ void Signal();
+
+ // Waits for the other party to finish using the shared resource.
+ // NOTE: As with Signal(), you must not call Wait() more than once without
+ // calling Signal() in between.
+ void Wait();
+
+ bool IsValid() const;
+
+ // Copies the internal handles to the output parameters, |handle_1| and
+ // |handle_2|. The operation can fail, so the caller must be prepared to
+ // handle that case.
+ bool ShareToProcess(base::ProcessHandle process, IPCHandle* handle_1,
+ IPCHandle* handle_2);
+
+ // Initializes a pair of CrossProcessNotification instances. Note that this
+ // can fail (e.g. due to EMFILE on Linux).
+ static bool InitializePair(CrossProcessNotification* a,
+ CrossProcessNotification* b);
+
+ // Use an instance of this class when you have to repeatedly wait for multiple
+ // notifications on the same thread. The class will store information about
+ // which notification was last signaled and try to distribute the signals so
+ // that all notifications get a chance to be processed in times of high load
+ // and a busy one won't starve the others.
+ // TODO(tommi): Support a way to abort the wait.
+ class MEDIA_EXPORT WaitForMultiple :
+ public NON_EXPORTED_BASE(base::NonThreadSafe) {
+ public:
+ // Caller must make sure that the lifetime of the array is greater than
+ // that of the WaitForMultiple instance.
+ explicit WaitForMultiple(const Notifications* notifications);
+
+ // Waits for any of the notifications to be signaled. Returns the 0 based
+ // index of a signaled notification.
+ int Wait();
+
+ // Call when the array changes. This should be called on the same thread
+ // as Wait() is called on and the array must never change while a Wait()
+ // is in progress.
+ void Reset(const Notifications* notifications);
+
+ private:
+ const Notifications* notifications_;
+ size_t wait_offset_;
+ };
+
+ private:
+ // Only called by the WaitForMultiple class. See documentation
+ // for WaitForMultiple and comments inside WaitMultiple for details.
+ static int WaitMultiple(const Notifications& notifications,
+ size_t wait_offset);
+
+#if defined(OS_WIN)
+ base::win::ScopedHandle mine_;
+ base::win::ScopedHandle other_;
+#else
+ typedef base::CancelableSyncSocket SocketClass;
+ SocketClass socket_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(CrossProcessNotification);
+};
+
+#endif // MEDIA_AUDIO_CROSS_PROCESS_NOTIFICATION_H_
diff --git a/chromium/media/audio/cross_process_notification_posix.cc b/chromium/media/audio/cross_process_notification_posix.cc
new file mode 100644
index 00000000000..d5683495ef9
--- /dev/null
+++ b/chromium/media/audio/cross_process_notification_posix.cc
@@ -0,0 +1,114 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/cross_process_notification.h"
+
+#include <errno.h>
+#include <sys/poll.h>
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/file_descriptor_posix.h"
+
+CrossProcessNotification::~CrossProcessNotification() {}
+
+CrossProcessNotification::CrossProcessNotification(IPCHandle handle_1,
+ IPCHandle handle_2)
+ : socket_(handle_1.fd) {
+ DCHECK_NE(handle_1.fd, -1);
+ DCHECK_EQ(handle_2.fd, -1);
+ DCHECK(IsValid());
+}
+
+void CrossProcessNotification::Signal() {
+ DCHECK(IsValid());
+ char signal = 1;
+ size_t bytes = socket_.Send(&signal, sizeof(signal));
+ DCHECK_EQ(bytes, 1U) << "errno: " << errno;
+}
+
+void CrossProcessNotification::Wait() {
+ DCHECK(IsValid());
+ char signal = 0;
+ size_t bytes = socket_.Receive(&signal, sizeof(signal));
+ DCHECK_EQ(bytes, 1U) << "errno: " << errno;
+ DCHECK_EQ(signal, 1);
+}
+
+bool CrossProcessNotification::IsValid() const {
+ return socket_.handle() != SocketClass::kInvalidHandle;
+}
+
+bool CrossProcessNotification::ShareToProcess(base::ProcessHandle process,
+ IPCHandle* handle_1,
+ IPCHandle* handle_2) {
+ DCHECK(IsValid());
+ handle_1->fd = socket_.handle();
+ handle_1->auto_close = false;
+ handle_2->fd = -1;
+ return true;
+}
+
+// static
+bool CrossProcessNotification::InitializePair(CrossProcessNotification* a,
+ CrossProcessNotification* b) {
+ DCHECK(!a->IsValid());
+ DCHECK(!b->IsValid());
+
+ bool ok = SocketClass::CreatePair(&a->socket_, &b->socket_);
+
+ DLOG_IF(WARNING, !ok) << "failed to create socket: " << errno;
+ DCHECK(!ok || a->IsValid());
+ DCHECK(!ok || b->IsValid());
+ return ok;
+}
+
+// static
+int CrossProcessNotification::WaitMultiple(const Notifications& notifications,
+ size_t wait_offset) {
+ DCHECK_LT(wait_offset, notifications.size());
+
+ for (size_t i = 0; i < notifications.size(); ++i) {
+ DCHECK(notifications[i]->IsValid());
+ }
+
+ // Below, we always check the |revents| of the first socket in the array
+ // and return the index of that socket if set. This can cause sockets
+ // that come later in the array to starve when the first sockets are
+ // very busy. So to avoid the starving problem, we use the |wait_offset|
+ // variable to split up the array so that the last socket to be signaled
+ // becomes the last socket in the array and all the other sockets will have
+ // priority the next time WaitMultiple is called.
+ scoped_ptr<struct pollfd[]> sockets(new struct pollfd[notifications.size()]);
+ memset(&sockets[0], 0, notifications.size() * sizeof(sockets[0]));
+ size_t index = 0;
+ for (size_t i = wait_offset; i < notifications.size(); ++i) {
+ struct pollfd& fd = sockets[index++];
+ fd.events = POLLIN;
+ fd.fd = notifications[i]->socket_.handle();
+ }
+
+ for (size_t i = 0; i < wait_offset; ++i) {
+ struct pollfd& fd = sockets[index++];
+ fd.events = POLLIN;
+ fd.fd = notifications[i]->socket_.handle();
+ }
+ DCHECK_EQ(index, notifications.size());
+
+ int err = poll(&sockets[0], notifications.size(), -1);
+ if (err != -1) {
+ for (size_t i = 0; i < notifications.size(); ++i) {
+ if (sockets[i].revents) {
+ size_t ret = (i + wait_offset) % notifications.size();
+ DCHECK_EQ(sockets[i].fd, notifications[ret]->socket_.handle());
+ notifications[ret]->Wait();
+ return ret;
+ }
+ }
+ }
+ // Either poll() failed or we failed to find a single socket that was
+ // signaled. Either way continuing will result in undefined behavior.
+ LOG(FATAL) << "poll() failed: " << errno;
+ return -1;
+}
diff --git a/chromium/media/audio/cross_process_notification_unittest.cc b/chromium/media/audio/cross_process_notification_unittest.cc
new file mode 100644
index 00000000000..a27219496cb
--- /dev/null
+++ b/chromium/media/audio/cross_process_notification_unittest.cc
@@ -0,0 +1,462 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/memory/shared_memory.h"
+#include "base/process/kill.h"
+#include "base/stl_util.h"
+#include "base/test/multiprocess_test.h"
+#include "base/threading/platform_thread.h"
+#include "media/audio/cross_process_notification.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/multiprocess_func_list.h"
+
+#include <utility> // NOLINT
+
+namespace {
+
+// Initializes (ctor) and deletes (dtor) two vectors of pairs of
+// CrossProcessNotification instances.
+class NotificationsOwner {
+ public:
+ // Attempts to create up to |number_of_pairs| number of pairs. Call size()
+ // after construction to find out how many pairs were actually created.
+ explicit NotificationsOwner(size_t number_of_pairs) {
+ CreateMultiplePairs(number_of_pairs);
+ }
+ ~NotificationsOwner() {
+ STLDeleteElements(&a_);
+ STLDeleteElements(&b_);
+ }
+
+ size_t size() const {
+ DCHECK_EQ(a_.size(), b_.size());
+ return a_.size();
+ }
+
+ const CrossProcessNotification::Notifications& a() { return a_; }
+ const CrossProcessNotification::Notifications& b() { return b_; }
+
+ private:
+ void CreateMultiplePairs(size_t count) {
+ a_.resize(count);
+ b_.resize(count);
+ size_t i = 0;
+ for (; i < count; ++i) {
+ a_[i] = new CrossProcessNotification();
+ b_[i] = new CrossProcessNotification();
+ if (!CrossProcessNotification::InitializePair(a_[i], b_[i])) {
+ LOG(WARNING) << "InitializePair failed at " << i;
+ delete a_[i];
+ delete b_[i];
+ break;
+ }
+ }
+ a_.resize(i);
+ b_.resize(i);
+ }
+
+ CrossProcessNotification::Notifications a_;
+ CrossProcessNotification::Notifications b_;
+};
+
+// A simple thread that we'll run two instances of. Both threads get a pointer
+// to the same |shared_data| and use a CrossProcessNotification to control when
+// each thread can read/write.
+class SingleNotifierWorker : public base::PlatformThread::Delegate {
+ public:
+ SingleNotifierWorker(size_t* shared_data, size_t repeats,
+ CrossProcessNotification* notifier)
+ : shared_data_(shared_data), repeats_(repeats),
+ notifier_(notifier) {
+ }
+ virtual ~SingleNotifierWorker() {}
+
+ // base::PlatformThread::Delegate:
+ virtual void ThreadMain() OVERRIDE {
+ for (size_t i = 0; i < repeats_; ++i) {
+ notifier_->Wait();
+ ++(*shared_data_);
+ notifier_->Signal();
+ }
+ }
+
+ private:
+ size_t* shared_data_;
+ size_t repeats_;
+ CrossProcessNotification* notifier_;
+ DISALLOW_COPY_AND_ASSIGN(SingleNotifierWorker);
+};
+
+// Similar to SingleNotifierWorker, except each instance of this class will
+// have >1 instances of CrossProcessNotification to Wait/Signal and an equal
+// amount of |shared_data| that the notifiers control access to.
+class MultiNotifierWorker : public base::PlatformThread::Delegate {
+ public:
+ MultiNotifierWorker(size_t* shared_data, size_t repeats,
+ const CrossProcessNotification::Notifications* notifiers)
+ : shared_data_(shared_data), repeats_(repeats),
+ notifiers_(notifiers) {
+ }
+ virtual ~MultiNotifierWorker() {}
+
+ // base::PlatformThread::Delegate:
+ virtual void ThreadMain() OVERRIDE {
+ CrossProcessNotification::WaitForMultiple waiter(notifiers_);
+ for (size_t i = 0; i < repeats_; ++i) {
+ int signaled = waiter.Wait();
+ ++shared_data_[signaled];
+ (*notifiers_)[signaled]->Signal();
+ }
+ }
+
+ private:
+ size_t* shared_data_;
+ size_t repeats_;
+ const CrossProcessNotification::Notifications* notifiers_;
+ DISALLOW_COPY_AND_ASSIGN(MultiNotifierWorker);
+};
+
+// A fixed array of bool flags. Each flag uses 1 bit. Use sizeof(FlagArray)
+// to determine how much memory you need. The number of flags will therefore
+// be sizeof(FlagArray) * 8.
+// We use 'struct' to signify that this structures represents compiler
+// independent structured data. I.e. you must be able to map this class
+// to a piece of shared memory of size sizeof(FlagArray) and be able to
+// use the class. No vtables etc.
+// TODO(tommi): Move this to its own header when we start using it for signaling
+// audio devices. As is, it's just here for perf comparison against the
+// "multiple notifiers" approach.
+struct FlagArray {
+ public:
+ FlagArray() : flags_() {}
+
+ bool is_set(size_t index) const {
+ return (flags_[index >> 5] & (1 << (index & 31)));
+ }
+
+ void set(size_t index) {
+ flags_[index >> 5] |= (1U << (static_cast<uint32>(index) & 31));
+ }
+
+ void clear(size_t index) {
+ flags_[index >> 5] &= ~(1U << (static_cast<uint32>(index) & 31));
+ }
+
+ // Returns the number of flags that can be set/checked.
+ size_t size() const { return sizeof(flags_) * 8; }
+
+ private:
+ // 256 * 32 = 8192 flags in 1KB.
+ uint32 flags_[256];
+ DISALLOW_COPY_AND_ASSIGN(FlagArray);
+};
+
+class MultiNotifierWorkerFlagArray : public base::PlatformThread::Delegate {
+ public:
+ MultiNotifierWorkerFlagArray(size_t count, FlagArray* signals,
+ size_t* shared_data, size_t repeats,
+ CrossProcessNotification* notifier)
+ : count_(count), signals_(signals), shared_data_(shared_data),
+ repeats_(repeats), notifier_(notifier) {
+ }
+ virtual ~MultiNotifierWorkerFlagArray() {}
+
+ // base::PlatformThread::Delegate:
+ virtual void ThreadMain() OVERRIDE {
+ for (size_t i = 0; i < repeats_; ++i) {
+ notifier_->Wait();
+ for (size_t s = 0; s < count_; ++s) {
+ if (signals_->is_set(s)) {
+ ++shared_data_[s];
+ // We don't clear the flag here but simply leave it signaled because
+ // we want the other thread to also increment this variable.
+ }
+ }
+ notifier_->Signal();
+ }
+ }
+
+ private:
+ size_t count_;
+ FlagArray* signals_;
+ size_t* shared_data_;
+ size_t repeats_;
+ CrossProcessNotification* notifier_;
+ DISALLOW_COPY_AND_ASSIGN(MultiNotifierWorkerFlagArray);
+};
+
+} // end namespace
+
+TEST(CrossProcessNotification, FlagArray) {
+ FlagArray flags;
+ EXPECT_GT(flags.size(), 1000U);
+ for (size_t i = 0; i < flags.size(); ++i) {
+ EXPECT_FALSE(flags.is_set(i));
+ flags.set(i);
+ EXPECT_TRUE(flags.is_set(i));
+ flags.clear(i);
+ EXPECT_FALSE(flags.is_set(i));
+ }
+}
+
+// Initializes two notifiers, signals the each one and make sure the others
+// wait is satisfied.
+TEST(CrossProcessNotification, Basic) {
+ CrossProcessNotification a, b;
+ ASSERT_TRUE(CrossProcessNotification::InitializePair(&a, &b));
+ EXPECT_TRUE(a.IsValid());
+ EXPECT_TRUE(b.IsValid());
+
+ a.Signal();
+ b.Wait();
+
+ b.Signal();
+ a.Wait();
+}
+
+// Spins two worker threads, each with their own CrossProcessNotification
+// that they use to read and write from a shared memory buffer.
+// Disabled as it trips of the TSAN bot (false positive since TSAN doesn't
+// recognize sockets as being a synchronization primitive).
+TEST(CrossProcessNotification, DISABLED_TwoThreads) {
+ CrossProcessNotification a, b;
+ ASSERT_TRUE(CrossProcessNotification::InitializePair(&a, &b));
+
+ size_t data = 0;
+ const size_t kRepeats = 10000;
+ SingleNotifierWorker worker1(&data, kRepeats, &a);
+ SingleNotifierWorker worker2(&data, kRepeats, &b);
+ base::PlatformThreadHandle thread1, thread2;
+ base::PlatformThread::Create(0, &worker1, &thread1);
+ base::PlatformThread::Create(0, &worker2, &thread2);
+
+ // Start the first thread. They should ping pong a few times and take turns
+ // incrementing the shared variable and never step on each other's toes.
+ a.Signal();
+
+ base::PlatformThread::Join(thread1);
+ base::PlatformThread::Join(thread2);
+
+ EXPECT_EQ(kRepeats * 2, data);
+}
+
+// Uses a pair of threads to access up to 1000 pieces of synchronized shared
+// data. On regular dev machines, the number of notifiers should be 1000, but on
+// mac and linux bots, the number will be smaller due to the RLIMIT_NOFILE
+// limit. Specifically, linux will have this limit at 1024 which means for this
+// test that the max number of notifiers will be in the range 500-512. On Mac
+// the limit is 256, so |count| will be ~120. Oh, and raising the limit via
+// setrlimit() won't work.
+// DISABLED since the distribution won't be accurate when run on valgrind.
+TEST(CrossProcessNotification, DISABLED_ThousandNotifiersTwoThreads) {
+ const size_t kCount = 1000;
+ NotificationsOwner pairs(kCount);
+ size_t data[kCount] = {0};
+ // We use a multiple of the count so that the division in the check below
+ // will be nice and round.
+ size_t repeats = pairs.size() * 1;
+
+ MultiNotifierWorker worker_1(&data[0], repeats, &pairs.a());
+ MultiNotifierWorker worker_2(&data[0], repeats, &pairs.b());
+ base::PlatformThreadHandle thread_1, thread_2;
+ base::PlatformThread::Create(0, &worker_1, &thread_1);
+ base::PlatformThread::Create(0, &worker_2, &thread_2);
+
+ for (size_t i = 0; i < pairs.size(); ++i)
+ pairs.a()[i]->Signal();
+
+ base::PlatformThread::Join(thread_1);
+ base::PlatformThread::Join(thread_2);
+
+ size_t expected_total = pairs.size() * 2;
+ size_t total = 0;
+ for (size_t i = 0; i < pairs.size(); ++i) {
+ // The CrossProcessNotification::WaitForMultiple class should have ensured
+ // that all notifiers had the same quality of service.
+ EXPECT_EQ(expected_total / pairs.size(), data[i]);
+ total += data[i];
+ }
+ EXPECT_EQ(expected_total, total);
+}
+
+// Functionally equivalent (as far as the shared data goes) to the
+// ThousandNotifiersTwoThreads test but uses a single pair of notifiers +
+// FlagArray for the 1000 signals. This approach is significantly faster.
+// Disabled as it trips of the TSAN bot - "Possible data race during write of
+// size 4" (the flag array).
+TEST(CrossProcessNotification, DISABLED_TwoNotifiersTwoThreads1000Signals) {
+ CrossProcessNotification a, b;
+ ASSERT_TRUE(CrossProcessNotification::InitializePair(&a, &b));
+
+ const size_t kCount = 1000;
+ FlagArray signals;
+ ASSERT_GE(signals.size(), kCount);
+ size_t data[kCount] = {0};
+
+ // Since this algorithm checks all events each time the notifier is
+ // signaled, |repeat| doesn't mean the same thing here as it does in
+ // ThousandNotifiersTwoThreads. 1 repeat here is the same as kCount
+ // repeats in ThousandNotifiersTwoThreads.
+ size_t repeats = 1;
+ MultiNotifierWorkerFlagArray worker1(kCount, &signals, &data[0], repeats, &a);
+ MultiNotifierWorkerFlagArray worker2(kCount, &signals, &data[0], repeats, &b);
+ base::PlatformThreadHandle thread1, thread2;
+ base::PlatformThread::Create(0, &worker1, &thread1);
+ base::PlatformThread::Create(0, &worker2, &thread2);
+
+ for (size_t i = 0; i < kCount; ++i)
+ signals.set(i);
+ a.Signal();
+
+ base::PlatformThread::Join(thread1);
+ base::PlatformThread::Join(thread2);
+
+ size_t expected_total = kCount * 2;
+ size_t total = 0;
+ for (size_t i = 0; i < kCount; ++i) {
+ // Since for each signal, we process all signaled events, the shared data
+ // variables should all be equal.
+ EXPECT_EQ(expected_total / kCount, data[i]);
+ total += data[i];
+ }
+ EXPECT_EQ(expected_total, total);
+}
+
+// Test the maximum number of notifiers without spinning further wait
+// threads on Windows. This test assumes we can always create 64 pairs and
+// bails if we can't.
+TEST(CrossProcessNotification, MultipleWaits64) {
+ const size_t kCount = 64;
+ NotificationsOwner pairs(kCount);
+ ASSERT_TRUE(pairs.size() == kCount);
+
+ CrossProcessNotification::WaitForMultiple waiter(&pairs.b());
+ for (size_t i = 0; i < kCount; ++i) {
+ pairs.a()[i]->Signal();
+ int index = waiter.Wait();
+ EXPECT_EQ(i, static_cast<size_t>(index));
+ }
+}
+
+// Tests waiting for more notifiers than the OS supports on one thread.
+// The test will create at most 1000 pairs, but on mac/linux bots the actual
+// number will be lower. See comment about the RLIMIT_NOFILE limit above for
+// more details.
+// DISABLED since the distribution won't be accurate when run on valgrind.
+TEST(CrossProcessNotification, DISABLED_MultipleWaits1000) {
+ // A 1000 notifiers requires 16 threads on Windows, including the current
+ // one, to perform the wait operation.
+ const size_t kCount = 1000;
+ NotificationsOwner pairs(kCount);
+
+ for (size_t i = 0; i < pairs.size(); ++i) {
+ pairs.a()[i]->Signal();
+ // To disable the load distribution algorithm and force the extra worker
+ // thread(s) to catch the signaled event, we define the |waiter| inside
+ // the loop.
+ CrossProcessNotification::WaitForMultiple waiter(&pairs.b());
+ int index = waiter.Wait();
+ EXPECT_EQ(i, static_cast<size_t>(index));
+ }
+}
+
+class CrossProcessNotificationMultiProcessTest : public base::MultiProcessTest {
+};
+
+namespace {
+
+// A very crude IPC mechanism that we use to set up the spawned child process
+// and the parent process.
+struct CrudeIpc {
+ uint8 ready;
+ CrossProcessNotification::IPCHandle handle_1;
+ CrossProcessNotification::IPCHandle handle_2;
+};
+
+#if defined(OS_POSIX)
+const int kPosixChildSharedMem = 30;
+#else
+const char kSharedMemName[] = "CrossProcessNotificationMultiProcessTest";
+#endif
+
+const size_t kSharedMemSize = 1024;
+
+} // namespace
+
+// The main routine of the child process. Waits for the parent process
+// to copy handles over to the child and then uses a CrossProcessNotification to
+// wait and signal to the parent process.
+MULTIPROCESS_TEST_MAIN(CrossProcessNotificationChildMain) {
+#if defined(OS_POSIX)
+ base::SharedMemory mem(
+ base::SharedMemoryHandle(kPosixChildSharedMem, true /* auto close */),
+ false);
+#else
+ base::SharedMemory mem;
+ CHECK(mem.CreateNamed(kSharedMemName, true, kSharedMemSize));
+#endif
+
+ CHECK(mem.Map(kSharedMemSize));
+ CrudeIpc* ipc = reinterpret_cast<CrudeIpc*>(mem.memory());
+
+ while (!ipc->ready)
+ base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(10));
+
+ CrossProcessNotification notifier(ipc->handle_1, ipc->handle_2);
+ notifier.Wait();
+ notifier.Signal();
+
+ return 0;
+}
+
+// Spawns a new process and hands a CrossProcessNotification instance to the
+// new process. Once that's done, it waits for the child process to signal
+// it's end and quits.
+TEST_F(CrossProcessNotificationMultiProcessTest, Basic) {
+ CrossProcessNotification a, b;
+ ASSERT_TRUE(CrossProcessNotification::InitializePair(&a, &b));
+ EXPECT_TRUE(a.IsValid());
+ EXPECT_TRUE(b.IsValid());
+
+ base::SharedMemory mem;
+
+#if defined(OS_POSIX)
+ ASSERT_TRUE(mem.CreateAndMapAnonymous(kSharedMemSize));
+#else
+ mem.Delete(kSharedMemName); // In case a previous run was unsuccessful.
+ ASSERT_TRUE(mem.CreateNamed(kSharedMemName, false, kSharedMemSize));
+ ASSERT_TRUE(mem.Map(kSharedMemSize));
+#endif
+
+ CrudeIpc* ipc = reinterpret_cast<CrudeIpc*>(mem.memory());
+ ipc->ready = false;
+
+#if defined(OS_POSIX)
+ const int kPosixChildSocket = 20;
+ EXPECT_TRUE(b.ShareToProcess(
+ base::kNullProcessHandle, &ipc->handle_1, &ipc->handle_2));
+ base::FileHandleMappingVector fd_mapping_vec;
+ fd_mapping_vec.push_back(std::make_pair(ipc->handle_1.fd, kPosixChildSocket));
+ fd_mapping_vec.push_back(
+ std::make_pair(mem.handle().fd, kPosixChildSharedMem));
+ ipc->handle_1.fd = kPosixChildSocket;
+ base::ProcessHandle process = SpawnChild("CrossProcessNotificationChildMain",
+ fd_mapping_vec, false);
+#else
+ base::ProcessHandle process = SpawnChild("CrossProcessNotificationChildMain",
+ false);
+ EXPECT_TRUE(b.ShareToProcess(process, &ipc->handle_1, &ipc->handle_2));
+#endif
+
+ ipc->ready = true;
+
+ a.Signal();
+ a.Wait();
+
+ int exit_code = -1;
+ base::WaitForExitCode(process, &exit_code);
+ EXPECT_EQ(0, exit_code);
+}
diff --git a/chromium/media/audio/cross_process_notification_win.cc b/chromium/media/audio/cross_process_notification_win.cc
new file mode 100644
index 00000000000..53bf0f4525e
--- /dev/null
+++ b/chromium/media/audio/cross_process_notification_win.cc
@@ -0,0 +1,270 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/cross_process_notification.h"
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/threading/platform_thread.h"
+#include "base/win/scoped_handle.h"
+
+CrossProcessNotification::~CrossProcessNotification() {}
+
+CrossProcessNotification::CrossProcessNotification(IPCHandle handle_1,
+ IPCHandle handle_2)
+ : mine_(handle_1), other_(handle_2) {
+ DCHECK(IsValid());
+}
+
+void CrossProcessNotification::Signal() {
+ DCHECK(IsValid());
+ DCHECK_EQ(::WaitForSingleObject(mine_, 0), static_cast<DWORD>(WAIT_TIMEOUT))
+ << "Are you calling Signal() without calling Wait() first?";
+ BOOL ok = ::SetEvent(mine_);
+ CHECK(ok);
+}
+
+void CrossProcessNotification::Wait() {
+ DCHECK(IsValid());
+ DWORD wait = ::WaitForSingleObject(other_, INFINITE);
+ DCHECK_EQ(wait, WAIT_OBJECT_0);
+ BOOL ok = ::ResetEvent(other_);
+ CHECK(ok);
+}
+
+bool CrossProcessNotification::IsValid() const {
+ return mine_.IsValid() && other_.IsValid();
+}
+
+bool CrossProcessNotification::ShareToProcess(base::ProcessHandle process,
+ IPCHandle* handle_1,
+ IPCHandle* handle_2) {
+ DCHECK(IsValid());
+ HANDLE our_process = ::GetCurrentProcess();
+ if (!::DuplicateHandle(our_process, mine_, process, handle_1, 0, FALSE,
+ DUPLICATE_SAME_ACCESS)) {
+ return false;
+ }
+
+ if (!::DuplicateHandle(our_process, other_, process, handle_2, 0, FALSE,
+ DUPLICATE_SAME_ACCESS)) {
+ // In case we're sharing to ourselves, we can close the handle, but
+ // if the target process is a different process, we do nothing.
+ if (process == our_process)
+ ::CloseHandle(*handle_1);
+ *handle_1 = NULL;
+ return false;
+ }
+
+ return true;
+}
+
+// static
+bool CrossProcessNotification::InitializePair(CrossProcessNotification* a,
+ CrossProcessNotification* b) {
+ DCHECK(!a->IsValid());
+ DCHECK(!b->IsValid());
+
+ bool success = false;
+
+ // Create two manually resettable events and give each party a handle
+ // to both events.
+ HANDLE event_a = ::CreateEvent(NULL, TRUE, FALSE, NULL);
+ HANDLE event_b = ::CreateEvent(NULL, TRUE, FALSE, NULL);
+ if (event_a && event_b) {
+ a->mine_.Set(event_a);
+ a->other_.Set(event_b);
+ success = a->ShareToProcess(GetCurrentProcess(), &event_a, &event_b);
+ if (success) {
+ b->mine_.Set(event_b);
+ b->other_.Set(event_a);
+ } else {
+ a->mine_.Close();
+ a->other_.Close();
+ }
+ } else {
+ if (event_a)
+ ::CloseHandle(event_a);
+ if (event_b)
+ ::CloseHandle(event_b);
+ }
+
+ DCHECK(!success || a->IsValid());
+ DCHECK(!success || b->IsValid());
+
+ return success;
+}
+
+namespace {
+class ExtraWaitThread : public base::PlatformThread::Delegate {
+ public:
+ ExtraWaitThread(HANDLE stop, HANDLE* events, size_t count,
+ int* signaled_event)
+ : stop_(stop), events_(events), count_(count),
+ signaled_event_(signaled_event) {
+ *signaled_event_ = -1;
+ }
+ virtual ~ExtraWaitThread() {}
+
+ virtual void ThreadMain() OVERRIDE {
+ // Store the |stop_| event as the first event.
+ HANDLE events[MAXIMUM_WAIT_OBJECTS] = { stop_ };
+ HANDLE next_thread = NULL;
+ DWORD event_count = MAXIMUM_WAIT_OBJECTS;
+ int thread_signaled_event = -1;
+ scoped_ptr<ExtraWaitThread> extra_wait_thread;
+ if (count_ > (MAXIMUM_WAIT_OBJECTS - 1)) {
+ std::copy(&events_[0], &events_[MAXIMUM_WAIT_OBJECTS - 2], &events[1]);
+
+ extra_wait_thread.reset(new ExtraWaitThread(stop_,
+ &events_[MAXIMUM_WAIT_OBJECTS - 2],
+ count_ - (MAXIMUM_WAIT_OBJECTS - 2),
+ &thread_signaled_event));
+ base::PlatformThreadHandle handle;
+ base::PlatformThread::Create(0, extra_wait_thread.get(), &handle);
+ next_thread = handle.platform_handle();
+
+ event_count = MAXIMUM_WAIT_OBJECTS;
+ events[MAXIMUM_WAIT_OBJECTS - 1] = next_thread;
+ } else {
+ std::copy(&events_[0], &events_[count_], &events[1]);
+ event_count = count_ + 1;
+ }
+
+ DWORD wait = ::WaitForMultipleObjects(event_count, &events[0], FALSE,
+ INFINITE);
+ if (wait >= WAIT_OBJECT_0 && wait < (WAIT_OBJECT_0 + event_count)) {
+ wait -= WAIT_OBJECT_0;
+ if (wait == 0) {
+ // The stop event was signaled. Check if it was signaled by a
+ // sub thread. In case our sub thread had to spin another thread (and
+ // so on), we must wait for ours to exit before we can check the
+ // propagated event offset.
+ if (next_thread) {
+ base::PlatformThread::Join(base::PlatformThreadHandle(next_thread));
+ next_thread = NULL;
+ }
+ if (thread_signaled_event != -1)
+ *signaled_event_ = thread_signaled_event + (MAXIMUM_WAIT_OBJECTS - 2);
+ } else if (events[wait] == next_thread) {
+ NOTREACHED();
+ } else {
+ *signaled_event_ = static_cast<int>(wait);
+ SetEvent(stop_);
+ }
+ } else {
+ NOTREACHED();
+ }
+
+ if (next_thread)
+ base::PlatformThread::Join(base::PlatformThreadHandle(next_thread));
+ }
+
+ private:
+ HANDLE stop_;
+ HANDLE* events_;
+ size_t count_;
+ int* signaled_event_;
+ DISALLOW_COPY_AND_ASSIGN(ExtraWaitThread);
+};
+} // end namespace
+
+// static
+int CrossProcessNotification::WaitMultiple(const Notifications& notifications,
+ size_t wait_offset) {
+ DCHECK_LT(wait_offset, notifications.size());
+
+ for (size_t i = 0; i < notifications.size(); ++i) {
+ DCHECK(notifications[i]->IsValid());
+ }
+
+ // TODO(tommi): Should we wait in an alertable state so that we can be
+ // canceled via an APC?
+ scoped_ptr<HANDLE[]> handles(new HANDLE[notifications.size()]);
+
+ // Because of the way WaitForMultipleObjects works, we do a little trick here.
+ // When multiple events are signaled, WaitForMultipleObjects will return the
+ // index of the first signaled item (lowest). This means that if we always
+ // pass the array the same way to WaitForMultipleObjects, the objects that
+ // come first, have higher priority. In times of heavy load, this will cause
+ // elements at the back to become DOS-ed.
+ // So, we store the location of the item that was last signaled. Then we split
+ // up the array and move everything higher than the last signaled index to the
+ // front and the rest to the back (meaning that the last signaled item will
+ // become the last element in the list).
+ // Assuming equally busy events, this approach distributes the priority
+ // evenly.
+
+ size_t index = 0;
+ for (size_t i = wait_offset; i < notifications.size(); ++i)
+ handles[index++] = notifications[i]->other_;
+
+ for (size_t i = 0; i < wait_offset; ++i)
+ handles[index++] = notifications[i]->other_;
+ DCHECK_EQ(index, notifications.size());
+
+ DWORD wait = WAIT_FAILED;
+ bool wait_failed = false;
+ if (notifications.size() <= MAXIMUM_WAIT_OBJECTS) {
+ wait = ::WaitForMultipleObjects(notifications.size(), &handles[0], FALSE,
+ INFINITE);
+ wait_failed = wait < WAIT_OBJECT_0 ||
+ wait >= (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS);
+ } else {
+ // Used to stop the other wait threads when an event has been signaled.
+ base::win::ScopedHandle stop(::CreateEvent(NULL, TRUE, FALSE, NULL));
+
+ // Create the first thread and pass a pointer to all handles >63
+ // to the thread + 'stop'. Then implement the thread so that it checks
+ // if the number of handles is > 63. If so, spawns a new thread and
+ // passes >62 handles to that thread and waits for the 62 handles + stop +
+ // next thread. etc etc.
+
+ // Create a list of threads so that each thread waits on at most 62 events
+ // including one event for when a child thread signals completion and one
+ // event for when all of the threads must be stopped (due to some event
+ // being signaled).
+
+ int thread_signaled_event = -1;
+ ExtraWaitThread wait_thread(stop, &handles[MAXIMUM_WAIT_OBJECTS - 1],
+ notifications.size() - (MAXIMUM_WAIT_OBJECTS - 1),
+ &thread_signaled_event);
+ base::PlatformThreadHandle thread;
+ base::PlatformThread::Create(0, &wait_thread, &thread);
+ HANDLE events[MAXIMUM_WAIT_OBJECTS];
+ std::copy(&handles[0], &handles[MAXIMUM_WAIT_OBJECTS - 1], &events[0]);
+ events[MAXIMUM_WAIT_OBJECTS - 1] = thread.platform_handle();
+ wait = ::WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, &events[0], FALSE,
+ INFINITE);
+ wait_failed = wait < WAIT_OBJECT_0 ||
+ wait >= (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS);
+ if (wait == WAIT_OBJECT_0 + (MAXIMUM_WAIT_OBJECTS - 1)) {
+ if (thread_signaled_event < 0) {
+ wait_failed = true;
+ NOTREACHED();
+ } else {
+ wait = WAIT_OBJECT_0 + (MAXIMUM_WAIT_OBJECTS - 2) +
+ thread_signaled_event;
+ }
+ } else {
+ ::SetEvent(stop);
+ }
+ base::PlatformThread::Join(thread);
+ }
+
+ int ret = -1;
+ if (!wait_failed) {
+ // Subtract to be politically correct (WAIT_OBJECT_0 is actually 0).
+ wait -= WAIT_OBJECT_0;
+ BOOL ok = ::ResetEvent(handles[wait]);
+ CHECK(ok);
+ ret = (wait + wait_offset) % notifications.size();
+ DCHECK_EQ(handles[wait], notifications[ret]->other_.Get());
+ } else {
+ NOTREACHED();
+ }
+
+ CHECK_NE(ret, -1);
+ return ret;
+}
diff --git a/chromium/media/audio/fake_audio_consumer.cc b/chromium/media/audio/fake_audio_consumer.cc
new file mode 100644
index 00000000000..55c439ad9f3
--- /dev/null
+++ b/chromium/media/audio/fake_audio_consumer.cc
@@ -0,0 +1,162 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/fake_audio_consumer.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/cancelable_callback.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/thread_checker.h"
+#include "base/time/time.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/audio_bus.h"
+
+namespace media {
+
+class FakeAudioConsumer::Worker
+ : public base::RefCountedThreadSafe<FakeAudioConsumer::Worker> {
+ public:
+ Worker(const scoped_refptr<base::MessageLoopProxy>& worker_loop,
+ const AudioParameters& params);
+
+ bool IsStopped();
+ void Start(const ReadCB& read_cb);
+ void Stop();
+
+ private:
+ friend class base::RefCountedThreadSafe<Worker>;
+ ~Worker();
+
+ // Initialize and start regular calls to DoRead() on the worker thread.
+ void DoStart();
+
+ // Cancel any delayed callbacks to DoRead() in the worker loop's queue.
+ void DoCancel();
+
+ // Task that regularly calls |read_cb_| according to the playback rate as
+ // determined by the audio parameters given during construction. Runs on
+ // the worker loop.
+ void DoRead();
+
+ const scoped_refptr<base::MessageLoopProxy> worker_loop_;
+ const scoped_ptr<AudioBus> audio_bus_;
+ const base::TimeDelta buffer_duration_;
+
+ base::Lock read_cb_lock_; // Held while mutating or running |read_cb_|.
+ ReadCB read_cb_;
+ base::TimeTicks next_read_time_;
+
+ // Used to cancel any delayed tasks still inside the worker loop's queue.
+ base::CancelableClosure read_task_cb_;
+
+ base::ThreadChecker thread_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(Worker);
+};
+
+FakeAudioConsumer::FakeAudioConsumer(
+ const scoped_refptr<base::MessageLoopProxy>& worker_loop,
+ const AudioParameters& params)
+ : worker_(new Worker(worker_loop, params)) {
+}
+
+FakeAudioConsumer::~FakeAudioConsumer() {
+ DCHECK(worker_->IsStopped());
+}
+
+void FakeAudioConsumer::Start(const ReadCB& read_cb) {
+ DCHECK(worker_->IsStopped());
+ worker_->Start(read_cb);
+}
+
+void FakeAudioConsumer::Stop() {
+ worker_->Stop();
+}
+
+FakeAudioConsumer::Worker::Worker(
+ const scoped_refptr<base::MessageLoopProxy>& worker_loop,
+ const AudioParameters& params)
+ : worker_loop_(worker_loop),
+ audio_bus_(AudioBus::Create(params)),
+ buffer_duration_(base::TimeDelta::FromMicroseconds(
+ params.frames_per_buffer() * base::Time::kMicrosecondsPerSecond /
+ static_cast<float>(params.sample_rate()))) {
+ audio_bus_->Zero();
+
+ // Worker can be constructed on any thread, but will DCHECK that its
+ // Start/Stop methods are called from the same thread.
+ thread_checker_.DetachFromThread();
+}
+
+FakeAudioConsumer::Worker::~Worker() {
+ DCHECK(read_cb_.is_null());
+}
+
+bool FakeAudioConsumer::Worker::IsStopped() {
+ base::AutoLock scoped_lock(read_cb_lock_);
+ return read_cb_.is_null();
+}
+
+void FakeAudioConsumer::Worker::Start(const ReadCB& read_cb) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(!read_cb.is_null());
+ {
+ base::AutoLock scoped_lock(read_cb_lock_);
+ DCHECK(read_cb_.is_null());
+ read_cb_ = read_cb;
+ }
+ worker_loop_->PostTask(FROM_HERE, base::Bind(&Worker::DoStart, this));
+}
+
+void FakeAudioConsumer::Worker::DoStart() {
+ DCHECK(worker_loop_->BelongsToCurrentThread());
+ next_read_time_ = base::TimeTicks::Now();
+ read_task_cb_.Reset(base::Bind(&Worker::DoRead, this));
+ read_task_cb_.callback().Run();
+}
+
+void FakeAudioConsumer::Worker::Stop() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ {
+ base::AutoLock scoped_lock(read_cb_lock_);
+ if (read_cb_.is_null())
+ return;
+ read_cb_.Reset();
+ }
+ worker_loop_->PostTask(FROM_HERE, base::Bind(&Worker::DoCancel, this));
+}
+
+void FakeAudioConsumer::Worker::DoCancel() {
+ DCHECK(worker_loop_->BelongsToCurrentThread());
+ read_task_cb_.Cancel();
+}
+
+void FakeAudioConsumer::Worker::DoRead() {
+ DCHECK(worker_loop_->BelongsToCurrentThread());
+
+ {
+ base::AutoLock scoped_lock(read_cb_lock_);
+ if (!read_cb_.is_null())
+ read_cb_.Run(audio_bus_.get());
+ }
+
+ // Need to account for time spent here due to the cost of |read_cb_| as well
+ // as the imprecision of PostDelayedTask().
+ const base::TimeTicks now = base::TimeTicks::Now();
+ base::TimeDelta delay = next_read_time_ + buffer_duration_ - now;
+
+ // If we're behind, find the next nearest ontime interval.
+ if (delay < base::TimeDelta())
+ delay += buffer_duration_ * (-delay / buffer_duration_ + 1);
+ next_read_time_ = now + delay;
+
+ worker_loop_->PostDelayedTask(FROM_HERE, read_task_cb_.callback(), delay);
+}
+
+} // namespace media
diff --git a/chromium/media/audio/fake_audio_consumer.h b/chromium/media/audio/fake_audio_consumer.h
new file mode 100644
index 00000000000..50373565d00
--- /dev/null
+++ b/chromium/media/audio/fake_audio_consumer.h
@@ -0,0 +1,54 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_FAKE_AUDIO_CONSUMER_H_
+#define MEDIA_AUDIO_FAKE_AUDIO_CONSUMER_H_
+
+#include "base/callback_forward.h"
+#include "base/memory/ref_counted.h"
+#include "media/base/media_export.h"
+
+namespace base {
+class MessageLoopProxy;
+}
+
+namespace media {
+class AudioBus;
+class AudioParameters;
+
+// A fake audio consumer. Using a provided message loop, FakeAudioConsumer will
+// simulate a real time consumer of audio data.
+class MEDIA_EXPORT FakeAudioConsumer {
+ public:
+ // |worker_loop| is the loop on which the ReadCB provided to Start() will be
+ // executed on. This may or may not be the be for the same thread that
+ // invokes the Start/Stop methods.
+ // |params| is used to determine the frequency of callbacks.
+ FakeAudioConsumer(const scoped_refptr<base::MessageLoopProxy>& worker_loop,
+ const AudioParameters& params);
+ ~FakeAudioConsumer();
+
+ // Start executing |read_cb| at a regular intervals. Stop() must be called by
+ // the same thread before destroying FakeAudioConsumer.
+ typedef base::Callback<void(AudioBus* audio_bus)> ReadCB;
+ void Start(const ReadCB& read_cb);
+
+ // Stop executing the ReadCB provided to Start(). Blocks until the worker
+ // loop is not inside a ReadCB invocation. Safe to call multiple times. Must
+ // be called on the same thread that called Start().
+ void Stop();
+
+ private:
+ // All state and implementation is kept within this ref-counted class because
+ // cancellation of posted tasks must happen on the worker thread some time
+ // after the call to Stop() (on the main thread) returns.
+ class Worker;
+ const scoped_refptr<Worker> worker_;
+
+ DISALLOW_COPY_AND_ASSIGN(FakeAudioConsumer);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_FAKE_AUDIO_CONSUMER_H_
diff --git a/chromium/media/audio/fake_audio_consumer_unittest.cc b/chromium/media/audio/fake_audio_consumer_unittest.cc
new file mode 100644
index 00000000000..cb2f75c71da
--- /dev/null
+++ b/chromium/media/audio/fake_audio_consumer_unittest.cc
@@ -0,0 +1,143 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "base/message_loop/message_loop.h"
+#include "base/time/time.h"
+#include "media/audio/audio_buffers_state.h"
+#include "media/audio/audio_parameters.h"
+#include "media/audio/fake_audio_consumer.h"
+#include "media/audio/simple_sources.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+static const int kTestCallbacks = 5;
+
+class FakeAudioConsumerTest : public testing::Test {
+ public:
+ FakeAudioConsumerTest()
+ : params_(
+ AudioParameters::AUDIO_FAKE, CHANNEL_LAYOUT_STEREO, 44100, 8, 128),
+ fake_consumer_(message_loop_.message_loop_proxy(), params_),
+ source_(params_.channels(), 200.0, params_.sample_rate()) {
+ time_between_callbacks_ = base::TimeDelta::FromMicroseconds(
+ params_.frames_per_buffer() * base::Time::kMicrosecondsPerSecond /
+ static_cast<float>(params_.sample_rate()));
+ }
+
+ virtual ~FakeAudioConsumerTest() {}
+
+ void ConsumeData(AudioBus* audio_bus) {
+ source_.OnMoreData(audio_bus, AudioBuffersState());
+ }
+
+ void RunOnAudioThread() {
+ ASSERT_TRUE(message_loop_.message_loop_proxy()->BelongsToCurrentThread());
+ fake_consumer_.Start(base::Bind(
+ &FakeAudioConsumerTest::ConsumeData, base::Unretained(this)));
+ }
+
+ void RunOnceOnAudioThread() {
+ ASSERT_TRUE(message_loop_.message_loop_proxy()->BelongsToCurrentThread());
+ RunOnAudioThread();
+ // Start() should immediately post a task to run the source callback, so we
+ // should end up with only a single callback being run.
+ message_loop_.PostTask(FROM_HERE, base::Bind(
+ &FakeAudioConsumerTest::EndTest, base::Unretained(this), 1));
+ }
+
+ void StopStartOnAudioThread() {
+ ASSERT_TRUE(message_loop_.message_loop_proxy()->BelongsToCurrentThread());
+ fake_consumer_.Stop();
+ RunOnAudioThread();
+ }
+
+ void TimeCallbacksOnAudioThread(int callbacks) {
+ ASSERT_TRUE(message_loop_.message_loop_proxy()->BelongsToCurrentThread());
+
+ if (source_.callbacks() == 0) {
+ RunOnAudioThread();
+ start_time_ = base::TimeTicks::Now();
+ }
+
+ // Keep going until we've seen the requested number of callbacks.
+ if (source_.callbacks() < callbacks) {
+ message_loop_.PostDelayedTask(FROM_HERE, base::Bind(
+ &FakeAudioConsumerTest::TimeCallbacksOnAudioThread,
+ base::Unretained(this), callbacks), time_between_callbacks_ / 2);
+ } else {
+ end_time_ = base::TimeTicks::Now();
+ EndTest(callbacks);
+ }
+ }
+
+ void EndTest(int callbacks) {
+ ASSERT_TRUE(message_loop_.message_loop_proxy()->BelongsToCurrentThread());
+ fake_consumer_.Stop();
+ EXPECT_LE(callbacks, source_.callbacks());
+ message_loop_.PostTask(FROM_HERE, base::MessageLoop::QuitClosure());
+ }
+
+ protected:
+ base::MessageLoop message_loop_;
+ AudioParameters params_;
+ FakeAudioConsumer fake_consumer_;
+ SineWaveAudioSource source_;
+ base::TimeTicks start_time_;
+ base::TimeTicks end_time_;
+ base::TimeDelta time_between_callbacks_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FakeAudioConsumerTest);
+};
+
+// Ensure the fake audio stream runs on the audio thread and handles fires
+// callbacks to the AudioSourceCallback.
+TEST_F(FakeAudioConsumerTest, FakeStreamBasicCallback) {
+ message_loop_.PostTask(FROM_HERE, base::Bind(
+ &FakeAudioConsumerTest::RunOnceOnAudioThread,
+ base::Unretained(this)));
+ message_loop_.Run();
+}
+
+// Ensure the time between callbacks is sane.
+TEST_F(FakeAudioConsumerTest, TimeBetweenCallbacks) {
+ message_loop_.PostTask(FROM_HERE, base::Bind(
+ &FakeAudioConsumerTest::TimeCallbacksOnAudioThread,
+ base::Unretained(this), kTestCallbacks));
+ message_loop_.Run();
+
+ // There are only (kTestCallbacks - 1) intervals between kTestCallbacks.
+ base::TimeDelta actual_time_between_callbacks =
+ (end_time_ - start_time_) / (source_.callbacks() - 1);
+
+ // Ensure callback time is no faster than the expected time between callbacks.
+ EXPECT_TRUE(actual_time_between_callbacks >= time_between_callbacks_);
+
+ // Softly check if the callback time is no slower than twice the expected time
+ // between callbacks. Since this test runs on the bots we can't be too strict
+ // with the bounds.
+ if (actual_time_between_callbacks > 2 * time_between_callbacks_)
+ LOG(ERROR) << "Time between fake audio callbacks is too large!";
+}
+
+// Ensure Start()/Stop() on the stream doesn't generate too many callbacks. See
+// http://crbug.com/159049
+TEST_F(FakeAudioConsumerTest, StartStopClearsCallbacks) {
+ message_loop_.PostTask(FROM_HERE, base::Bind(
+ &FakeAudioConsumerTest::TimeCallbacksOnAudioThread,
+ base::Unretained(this), kTestCallbacks));
+
+ // Issue a Stop() / Start() in between expected callbacks to maximize the
+ // chance of catching the FakeAudioOutputStream doing the wrong thing.
+ message_loop_.PostDelayedTask(FROM_HERE, base::Bind(
+ &FakeAudioConsumerTest::StopStartOnAudioThread,
+ base::Unretained(this)), time_between_callbacks_ / 2);
+
+ // EndTest() will ensure the proper number of callbacks have occurred.
+ message_loop_.Run();
+}
+
+} // namespace media
diff --git a/chromium/media/audio/fake_audio_input_stream.cc b/chromium/media/audio/fake_audio_input_stream.cc
new file mode 100644
index 00000000000..a00a9b62001
--- /dev/null
+++ b/chromium/media/audio/fake_audio_input_stream.cc
@@ -0,0 +1,170 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/fake_audio_input_stream.h"
+
+#include "base/bind.h"
+#include "base/lazy_instance.h"
+#include "media/audio/audio_manager_base.h"
+
+using base::TimeTicks;
+using base::TimeDelta;
+
+namespace media {
+
+namespace {
+
+// These values are based on experiments for local-to-local
+// PeerConnection to demonstrate audio/video synchronization.
+const int kBeepDurationMilliseconds = 20;
+const int kBeepFrequency = 400;
+
+struct BeepContext {
+ BeepContext() : beep_once(false) {}
+ base::Lock beep_lock;
+ bool beep_once;
+};
+
+static base::LazyInstance<BeepContext> g_beep_context =
+ LAZY_INSTANCE_INITIALIZER;
+
+} // namespace
+
+AudioInputStream* FakeAudioInputStream::MakeFakeStream(
+ AudioManagerBase* manager,
+ const AudioParameters& params) {
+ return new FakeAudioInputStream(manager, params);
+}
+
+FakeAudioInputStream::FakeAudioInputStream(AudioManagerBase* manager,
+ const AudioParameters& params)
+ : audio_manager_(manager),
+ callback_(NULL),
+ buffer_size_((params.channels() * params.bits_per_sample() *
+ params.frames_per_buffer()) / 8),
+ params_(params),
+ thread_("FakeAudioRecordingThread"),
+ callback_interval_(base::TimeDelta::FromMilliseconds(
+ (params.frames_per_buffer() * 1000) / params.sample_rate())),
+ beep_duration_in_buffers_(
+ kBeepDurationMilliseconds * params.sample_rate() /
+ params.frames_per_buffer() / 1000),
+ beep_generated_in_buffers_(0),
+ beep_period_in_frames_(params.sample_rate() / kBeepFrequency),
+ frames_elapsed_(0) {
+}
+
+FakeAudioInputStream::~FakeAudioInputStream() {}
+
+bool FakeAudioInputStream::Open() {
+ buffer_.reset(new uint8[buffer_size_]);
+ memset(buffer_.get(), 0, buffer_size_);
+ return true;
+}
+
+void FakeAudioInputStream::Start(AudioInputCallback* callback) {
+ DCHECK(!thread_.IsRunning());
+ callback_ = callback;
+ last_callback_time_ = TimeTicks::Now();
+ thread_.Start();
+ thread_.message_loop()->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&FakeAudioInputStream::DoCallback, base::Unretained(this)),
+ callback_interval_);
+}
+
+void FakeAudioInputStream::DoCallback() {
+ DCHECK(callback_);
+
+ memset(buffer_.get(), 0, buffer_size_);
+
+ bool should_beep = false;
+ {
+ BeepContext* beep_context = g_beep_context.Pointer();
+ base::AutoLock auto_lock(beep_context->beep_lock);
+ should_beep = beep_context->beep_once;
+ beep_context->beep_once = false;
+ }
+
+ // If this object was instructed to generate a beep or has started to
+ // generate a beep sound.
+ if (should_beep || beep_generated_in_buffers_) {
+ // Compute the number of frames to output high value. Then compute the
+ // number of bytes based on channels and bits per channel.
+ int high_frames = beep_period_in_frames_ / 2;
+ int high_bytes = high_frames * params_.bits_per_sample() *
+ params_.channels() / 8;
+
+ // Separate high and low with the same number of bytes to generate a
+ // square wave.
+ int position = 0;
+ while (position + high_bytes <= buffer_size_) {
+ // Write high values first.
+ memset(buffer_.get() + position, 128, high_bytes);
+
+ // Then leave low values in the buffer with |high_bytes|.
+ position += high_bytes * 2;
+ }
+
+ ++beep_generated_in_buffers_;
+ if (beep_generated_in_buffers_ >= beep_duration_in_buffers_)
+ beep_generated_in_buffers_ = 0;
+ }
+
+ callback_->OnData(this, buffer_.get(), buffer_size_, buffer_size_, 1.0);
+ frames_elapsed_ += params_.frames_per_buffer();
+
+ const TimeTicks now = TimeTicks::Now();
+ base::TimeDelta next_callback_time =
+ last_callback_time_ + callback_interval_ * 2 - now;
+
+ // If we are falling behind, try to catch up as much as we can in the next
+ // callback.
+ if (next_callback_time < base::TimeDelta())
+ next_callback_time = base::TimeDelta();
+
+ last_callback_time_ = now;
+ thread_.message_loop()->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&FakeAudioInputStream::DoCallback, base::Unretained(this)),
+ next_callback_time);
+}
+
+void FakeAudioInputStream::Stop() {
+ thread_.Stop();
+}
+
+void FakeAudioInputStream::Close() {
+ if (callback_) {
+ callback_->OnClose(this);
+ callback_ = NULL;
+ }
+ audio_manager_->ReleaseInputStream(this);
+}
+
+double FakeAudioInputStream::GetMaxVolume() {
+ return 1.0;
+}
+
+void FakeAudioInputStream::SetVolume(double volume) {
+}
+
+double FakeAudioInputStream::GetVolume() {
+ return 1.0;
+}
+
+void FakeAudioInputStream::SetAutomaticGainControl(bool enabled) {}
+
+bool FakeAudioInputStream::GetAutomaticGainControl() {
+ return true;
+}
+
+// static
+void FakeAudioInputStream::BeepOnce() {
+ BeepContext* beep_context = g_beep_context.Pointer();
+ base::AutoLock auto_lock(beep_context->beep_lock);
+ beep_context->beep_once = true;
+}
+
+} // namespace media
diff --git a/chromium/media/audio/fake_audio_input_stream.h b/chromium/media/audio/fake_audio_input_stream.h
new file mode 100644
index 00000000000..5879ab39763
--- /dev/null
+++ b/chromium/media/audio/fake_audio_input_stream.h
@@ -0,0 +1,76 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// A fake implementation of AudioInputStream, useful for testing purpose.
+
+#ifndef MEDIA_AUDIO_FAKE_AUDIO_INPUT_STREAM_H_
+#define MEDIA_AUDIO_FAKE_AUDIO_INPUT_STREAM_H_
+
+#include <vector>
+
+#include "base/memory/scoped_ptr.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/thread.h"
+#include "base/time/time.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+
+namespace media {
+
+class AudioManagerBase;
+
+class MEDIA_EXPORT FakeAudioInputStream
+ : public AudioInputStream {
+ public:
+ static AudioInputStream* MakeFakeStream(AudioManagerBase* manager,
+ const AudioParameters& params);
+
+ virtual bool Open() OVERRIDE;
+ virtual void Start(AudioInputCallback* callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual double GetMaxVolume() OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual double GetVolume() OVERRIDE;
+ virtual void SetAutomaticGainControl(bool enabled) OVERRIDE;
+ virtual bool GetAutomaticGainControl() OVERRIDE;
+
+ // Generate one beep sound. This method is called by
+ // FakeVideoCaptureDevice to test audio/video synchronization.
+ // This is a static method because FakeVideoCaptureDevice is
+ // disconnected from an audio device. This means only one instance of
+ // this class gets to respond, which is okay because we assume there's
+ // only one stream for this testing purpose.
+ // TODO(hclam): Make this non-static. To do this we'll need to fix
+ // crbug.com/159053 such that video capture device is aware of audio
+ // input stream.
+ static void BeepOnce();
+
+ private:
+ FakeAudioInputStream(AudioManagerBase* manager,
+ const AudioParameters& params);
+
+ virtual ~FakeAudioInputStream();
+
+ void DoCallback();
+
+ AudioManagerBase* audio_manager_;
+ AudioInputCallback* callback_;
+ scoped_ptr<uint8[]> buffer_;
+ int buffer_size_;
+ AudioParameters params_;
+ base::Thread thread_;
+ base::TimeTicks last_callback_time_;
+ base::TimeDelta callback_interval_;
+ int beep_duration_in_buffers_;
+ int beep_generated_in_buffers_;
+ int beep_period_in_frames_;
+ int frames_elapsed_;
+
+ DISALLOW_COPY_AND_ASSIGN(FakeAudioInputStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_FAKE_AUDIO_INPUT_STREAM_H_
diff --git a/chromium/media/audio/fake_audio_output_stream.cc b/chromium/media/audio/fake_audio_output_stream.cc
new file mode 100644
index 00000000000..b21a054f13b
--- /dev/null
+++ b/chromium/media/audio/fake_audio_output_stream.cc
@@ -0,0 +1,67 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/fake_audio_output_stream.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "media/audio/audio_manager_base.h"
+
+namespace media {
+
+// static
+AudioOutputStream* FakeAudioOutputStream::MakeFakeStream(
+ AudioManagerBase* manager, const AudioParameters& params) {
+ return new FakeAudioOutputStream(manager, params);
+}
+
+FakeAudioOutputStream::FakeAudioOutputStream(AudioManagerBase* manager,
+ const AudioParameters& params)
+ : audio_manager_(manager),
+ callback_(NULL),
+ fake_consumer_(manager->GetMessageLoop(), params) {
+}
+
+FakeAudioOutputStream::~FakeAudioOutputStream() {
+ DCHECK(!callback_);
+}
+
+bool FakeAudioOutputStream::Open() {
+ DCHECK(audio_manager_->GetMessageLoop()->BelongsToCurrentThread());
+ return true;
+}
+
+void FakeAudioOutputStream::Start(AudioSourceCallback* callback) {
+ DCHECK(audio_manager_->GetMessageLoop()->BelongsToCurrentThread());
+ callback_ = callback;
+ fake_consumer_.Start(base::Bind(
+ &FakeAudioOutputStream::CallOnMoreData, base::Unretained(this)));
+}
+
+void FakeAudioOutputStream::Stop() {
+ DCHECK(audio_manager_->GetMessageLoop()->BelongsToCurrentThread());
+ fake_consumer_.Stop();
+ callback_ = NULL;
+}
+
+void FakeAudioOutputStream::Close() {
+ DCHECK(!callback_);
+ DCHECK(audio_manager_->GetMessageLoop()->BelongsToCurrentThread());
+ audio_manager_->ReleaseOutputStream(this);
+}
+
+void FakeAudioOutputStream::SetVolume(double volume) {};
+
+void FakeAudioOutputStream::GetVolume(double* volume) {
+ *volume = 0;
+};
+
+void FakeAudioOutputStream::CallOnMoreData(AudioBus* audio_bus) {
+ DCHECK(audio_manager_->GetMessageLoop()->BelongsToCurrentThread());
+ callback_->OnMoreData(audio_bus, AudioBuffersState());
+}
+
+} // namespace media
diff --git a/chromium/media/audio/fake_audio_output_stream.h b/chromium/media/audio/fake_audio_output_stream.h
new file mode 100644
index 00000000000..f7971bfc64e
--- /dev/null
+++ b/chromium/media/audio/fake_audio_output_stream.h
@@ -0,0 +1,50 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_FAKE_AUDIO_OUTPUT_STREAM_H_
+#define MEDIA_AUDIO_FAKE_AUDIO_OUTPUT_STREAM_H_
+
+#include "base/memory/scoped_ptr.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+#include "media/audio/fake_audio_consumer.h"
+
+namespace media {
+
+class AudioManagerBase;
+
+// A fake implementation of AudioOutputStream. Used for testing and when a real
+// audio output device is unavailable or refusing output (e.g. remote desktop).
+// Callbacks are driven on the AudioManager's message loop.
+class MEDIA_EXPORT FakeAudioOutputStream : public AudioOutputStream {
+ public:
+ static AudioOutputStream* MakeFakeStream(AudioManagerBase* manager,
+ const AudioParameters& params);
+
+ // AudioOutputStream implementation.
+ virtual bool Open() OVERRIDE;
+ virtual void Start(AudioSourceCallback* callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual void GetVolume(double* volume) OVERRIDE;
+ virtual void Close() OVERRIDE;
+
+ private:
+ FakeAudioOutputStream(AudioManagerBase* manager,
+ const AudioParameters& params);
+ virtual ~FakeAudioOutputStream();
+
+ // Task that periodically calls OnMoreData() to consume audio data.
+ void CallOnMoreData(AudioBus* audio_bus);
+
+ AudioManagerBase* audio_manager_;
+ AudioSourceCallback* callback_;
+ FakeAudioConsumer fake_consumer_;
+
+ DISALLOW_COPY_AND_ASSIGN(FakeAudioOutputStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_FAKE_AUDIO_OUTPUT_STREAM_H_
diff --git a/chromium/media/audio/ios/audio_manager_ios.h b/chromium/media/audio/ios/audio_manager_ios.h
new file mode 100644
index 00000000000..19751502fd2
--- /dev/null
+++ b/chromium/media/audio/ios/audio_manager_ios.h
@@ -0,0 +1,56 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_IOS_AUDIO_MANAGER_IOS_H_
+#define MEDIA_AUDIO_IOS_AUDIO_MANAGER_IOS_H_
+
+#include "base/basictypes.h"
+#include "media/audio/audio_manager_base.h"
+
+namespace media {
+
+class PCMQueueInAudioInputStream;
+
+// iOS implementation of the AudioManager singleton. Supports only audio input.
+class MEDIA_EXPORT AudioManagerIOS : public AudioManagerBase {
+ public:
+ AudioManagerIOS();
+
+ // Implementation of AudioManager.
+ virtual bool HasAudioOutputDevices() OVERRIDE;
+ virtual bool HasAudioInputDevices() OVERRIDE;
+ virtual AudioOutputStream* MakeAudioOutputStream(
+ const AudioParameters& params,
+ const std::string& input_device_id) OVERRIDE;
+ virtual AudioInputStream* MakeAudioInputStream(
+ const AudioParameters& params, const std::string& device_id) OVERRIDE;
+ virtual AudioParameters GetInputStreamParameters(
+ const std::string& device_id) OVERRIDE;
+
+ // Implementation of AudioManagerBase.
+ virtual AudioOutputStream* MakeLinearOutputStream(
+ const AudioParameters& params) OVERRIDE;
+ virtual AudioOutputStream* MakeLowLatencyOutputStream(
+ const AudioParameters& params,
+ const std::string& input_device_id) OVERRIDE;
+ virtual AudioInputStream* MakeLinearInputStream(
+ const AudioParameters& params, const std::string& device_id) OVERRIDE;
+ virtual AudioInputStream* MakeLowLatencyInputStream(
+ const AudioParameters& params, const std::string& device_id) OVERRIDE;
+ virtual void ReleaseOutputStream(AudioOutputStream* stream) OVERRIDE;
+ virtual void ReleaseInputStream(AudioInputStream* stream) OVERRIDE;
+
+ protected:
+ virtual ~AudioManagerIOS();
+
+ virtual AudioParameters GetPreferredOutputStreamParameters(
+ const AudioParameters& input_params) OVERRIDE;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AudioManagerIOS);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_IOS_AUDIO_MANAGER_IOS_H_
diff --git a/chromium/media/audio/ios/audio_manager_ios.mm b/chromium/media/audio/ios/audio_manager_ios.mm
new file mode 100644
index 00000000000..49479302efc
--- /dev/null
+++ b/chromium/media/audio/ios/audio_manager_ios.mm
@@ -0,0 +1,140 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/ios/audio_manager_ios.h"
+
+#import <AudioToolbox/AudioToolbox.h>
+#import <AVFoundation/AVFoundation.h>
+
+#include "base/sys_info.h"
+#include "media/audio/audio_parameters.h"
+#include "media/audio/fake_audio_input_stream.h"
+#include "media/audio/ios/audio_session_util_ios.h"
+#include "media/audio/mac/audio_input_mac.h"
+#include "media/base/channel_layout.h"
+#include "media/base/limits.h"
+
+namespace media {
+
+enum { kMaxInputChannels = 2 };
+
+AudioManagerIOS::AudioManagerIOS() {
+}
+
+AudioManagerIOS::~AudioManagerIOS() {
+ Shutdown();
+}
+
+bool AudioManagerIOS::HasAudioOutputDevices() {
+ return false;
+}
+
+bool AudioManagerIOS::HasAudioInputDevices() {
+ if (!InitAudioSessionIOS())
+ return false;
+ // Note that the |kAudioSessionProperty_AudioInputAvailable| property is a
+ // 32-bit integer, not a boolean.
+ UInt32 property_size;
+ OSStatus error =
+ AudioSessionGetPropertySize(kAudioSessionProperty_AudioInputAvailable,
+ &property_size);
+ if (error != kAudioSessionNoError)
+ return false;
+ UInt32 audio_input_is_available = false;
+ DCHECK(property_size == sizeof(audio_input_is_available));
+ error = AudioSessionGetProperty(kAudioSessionProperty_AudioInputAvailable,
+ &property_size,
+ &audio_input_is_available);
+ return error == kAudioSessionNoError ? audio_input_is_available : false;
+}
+
+AudioParameters AudioManagerIOS::GetInputStreamParameters(
+ const std::string& device_id) {
+ // TODO(xians): figure out the right input sample rate and buffer size to
+ // achieve the best audio performance for iOS devices.
+ // TODO(xians): query the native channel layout for the specific device.
+ static const int kDefaultSampleRate = 48000;
+ static const int kDefaultBufferSize = 2048;
+ return AudioParameters(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
+ kDefaultSampleRate, 16, kDefaultBufferSize);
+}
+
+AudioOutputStream* AudioManagerIOS::MakeAudioOutputStream(
+ const AudioParameters& params, const std::string& input_device_id) {
+ NOTIMPLEMENTED(); // Only input is supported on iOS.
+ return NULL;
+}
+
+AudioInputStream* AudioManagerIOS::MakeAudioInputStream(
+ const AudioParameters& params, const std::string& device_id) {
+ // Current line of iOS devices has only one audio input.
+ // Ignore the device_id (unittest uses a test value in it).
+ if (!params.IsValid() || (params.channels() > kMaxInputChannels))
+ return NULL;
+
+ if (params.format() == AudioParameters::AUDIO_FAKE)
+ return FakeAudioInputStream::MakeFakeStream(this, params);
+ else if (params.format() == AudioParameters::AUDIO_PCM_LINEAR)
+ return new PCMQueueInAudioInputStream(this, params);
+ return NULL;
+}
+
+AudioOutputStream* AudioManagerIOS::MakeLinearOutputStream(
+ const AudioParameters& params) {
+ NOTIMPLEMENTED(); // Only input is supported on iOS.
+ return NULL;
+}
+
+AudioOutputStream* AudioManagerIOS::MakeLowLatencyOutputStream(
+ const AudioParameters& params, const std::string& input_device_id) {
+ NOTIMPLEMENTED(); // Only input is supported on iOS.
+ return NULL;
+}
+
+AudioInputStream* AudioManagerIOS::MakeLinearInputStream(
+ const AudioParameters& params, const std::string& device_id) {
+ return MakeAudioInputStream(params, device_id);
+}
+
+AudioInputStream* AudioManagerIOS::MakeLowLatencyInputStream(
+ const AudioParameters& params, const std::string& device_id) {
+ NOTIMPLEMENTED(); // Only linear audio input is supported on iOS.
+ return MakeAudioInputStream(params, device_id);
+}
+
+
+AudioParameters AudioManagerIOS::GetPreferredOutputStreamParameters(
+ const AudioParameters& input_params) {
+ // TODO(xians): handle the case when input_params is valid.
+ // TODO(xians): figure out the right output sample rate and sample rate to
+ // achieve the best audio performance for iOS devices.
+ // TODO(xians): add support to --audio-buffer-size flag.
+ static const int kDefaultSampleRate = 48000;
+ static const int kDefaultBufferSize = 2048;
+ if (input_params.IsValid()) {
+ NOTREACHED();
+ }
+
+ return AudioParameters(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
+ kDefaultSampleRate, 16, kDefaultBufferSize);
+}
+
+// Called by the stream when it has been released by calling Close().
+void AudioManagerIOS::ReleaseOutputStream(AudioOutputStream* stream) {
+ NOTIMPLEMENTED(); // Only input is supported on iOS.
+}
+
+// Called by the stream when it has been released by calling Close().
+void AudioManagerIOS::ReleaseInputStream(AudioInputStream* stream) {
+ delete stream;
+}
+
+// static
+AudioManager* CreateAudioManager() {
+ return new AudioManagerIOS();
+}
+
+} // namespace media
diff --git a/chromium/media/audio/ios/audio_manager_ios_unittest.cc b/chromium/media/audio/ios/audio_manager_ios_unittest.cc
new file mode 100644
index 00000000000..30ebc04f204
--- /dev/null
+++ b/chromium/media/audio/ios/audio_manager_ios_unittest.cc
@@ -0,0 +1,34 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using namespace media;
+
+// Test that input is supported and output is not.
+TEST(IOSAudioTest, AudioSupport) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ ASSERT_TRUE(NULL != audio_manager.get());
+ ASSERT_FALSE(audio_manager->HasAudioOutputDevices());
+ ASSERT_TRUE(audio_manager->HasAudioInputDevices());
+}
+
+// Test that input stream can be opened and closed.
+TEST(IOSAudioTest, InputStreamOpenAndClose) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ ASSERT_TRUE(NULL != audio_manager.get());
+ if (!audio_manager->HasAudioInputDevices())
+ return;
+ AudioInputStream* ias = audio_manager->MakeAudioInputStream(
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
+ 8000, 16, 1024),
+ std::string("test_device"));
+ ASSERT_TRUE(NULL != ias);
+ EXPECT_TRUE(ias->Open());
+ ias->Close();
+}
diff --git a/chromium/media/audio/ios/audio_session_util_ios.h b/chromium/media/audio/ios/audio_session_util_ios.h
new file mode 100644
index 00000000000..175db91fae0
--- /dev/null
+++ b/chromium/media/audio/ios/audio_session_util_ios.h
@@ -0,0 +1,17 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_IOS_AUDIO_SESSION_UTIL_IOS_H_
+#define MEDIA_AUDIO_IOS_AUDIO_SESSION_UTIL_IOS_H_
+
+namespace media {
+
+// Initializes and configures the audio session, returning a bool indicating
+// whether initialization was successful. Can be called multiple times.
+// Safe to call from any thread.
+bool InitAudioSessionIOS();
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_IOS_AUDIO_SESSION_UTIL_IOS_H_
diff --git a/chromium/media/audio/ios/audio_session_util_ios.mm b/chromium/media/audio/ios/audio_session_util_ios.mm
new file mode 100644
index 00000000000..a4071a04cc1
--- /dev/null
+++ b/chromium/media/audio/ios/audio_session_util_ios.mm
@@ -0,0 +1,40 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/ios/audio_session_util_ios.h"
+
+#include <AVFoundation/AVFoundation.h>
+
+#include "base/logging.h"
+
+namespace media {
+
+bool InitAudioSessionIOS() {
+ static bool kSessionInitialized = false;
+ static dispatch_once_t once = 0;
+ dispatch_once(&once, ^{
+ OSStatus error = AudioSessionInitialize(NULL, NULL, NULL, NULL);
+ if (error != kAudioSessionNoError)
+ DLOG(ERROR) << "AudioSessionInitialize OSStatus error: " << error;
+ BOOL result = [[AVAudioSession sharedInstance]
+ setCategory:AVAudioSessionCategoryPlayAndRecord
+ error:nil];
+ if (!result)
+ DLOG(ERROR) << "AVAudioSession setCategory failed";
+ UInt32 allowMixing = true;
+ AudioSessionSetProperty(
+ kAudioSessionProperty_OverrideCategoryMixWithOthers,
+ sizeof(allowMixing), &allowMixing);
+ UInt32 defaultToSpeaker = true;
+ AudioSessionSetProperty(
+ kAudioSessionProperty_OverrideCategoryDefaultToSpeaker,
+ sizeof(defaultToSpeaker),
+ &defaultToSpeaker);
+ // Speech input cannot be used if either of these two conditions fail.
+ kSessionInitialized = (error == kAudioSessionNoError) && result;
+ });
+ return kSessionInitialized;
+}
+
+} // namespace media
diff --git a/chromium/media/audio/linux/alsa_input.cc b/chromium/media/audio/linux/alsa_input.cc
new file mode 100644
index 00000000000..929cbe79063
--- /dev/null
+++ b/chromium/media/audio/linux/alsa_input.cc
@@ -0,0 +1,340 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/linux/alsa_input.h"
+
+#include "base/basictypes.h"
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "base/time/time.h"
+#include "media/audio/audio_manager.h"
+#include "media/audio/linux/alsa_output.h"
+#include "media/audio/linux/alsa_util.h"
+#include "media/audio/linux/alsa_wrapper.h"
+#include "media/audio/linux/audio_manager_linux.h"
+
+namespace media {
+
+static const int kNumPacketsInRingBuffer = 3;
+
+static const char kDefaultDevice1[] = "default";
+static const char kDefaultDevice2[] = "plug:default";
+
+const char AlsaPcmInputStream::kAutoSelectDevice[] = "";
+
+AlsaPcmInputStream::AlsaPcmInputStream(AudioManagerLinux* audio_manager,
+ const std::string& device_name,
+ const AudioParameters& params,
+ AlsaWrapper* wrapper)
+ : audio_manager_(audio_manager),
+ device_name_(device_name),
+ params_(params),
+ bytes_per_buffer_(params.frames_per_buffer() *
+ (params.channels() * params.bits_per_sample()) / 8),
+ wrapper_(wrapper),
+ buffer_duration_(base::TimeDelta::FromMicroseconds(
+ params.frames_per_buffer() * base::Time::kMicrosecondsPerSecond /
+ static_cast<float>(params.sample_rate()))),
+ callback_(NULL),
+ device_handle_(NULL),
+ mixer_handle_(NULL),
+ mixer_element_handle_(NULL),
+ weak_factory_(this),
+ read_callback_behind_schedule_(false) {
+}
+
+AlsaPcmInputStream::~AlsaPcmInputStream() {}
+
+bool AlsaPcmInputStream::Open() {
+ if (device_handle_)
+ return false; // Already open.
+
+ snd_pcm_format_t pcm_format = alsa_util::BitsToFormat(
+ params_.bits_per_sample());
+ if (pcm_format == SND_PCM_FORMAT_UNKNOWN) {
+ LOG(WARNING) << "Unsupported bits per sample: "
+ << params_.bits_per_sample();
+ return false;
+ }
+
+ uint32 latency_us =
+ buffer_duration_.InMicroseconds() * kNumPacketsInRingBuffer;
+
+ // Use the same minimum required latency as output.
+ latency_us = std::max(latency_us, AlsaPcmOutputStream::kMinLatencyMicros);
+
+ if (device_name_ == kAutoSelectDevice) {
+ const char* device_names[] = { kDefaultDevice1, kDefaultDevice2 };
+ for (size_t i = 0; i < arraysize(device_names); ++i) {
+ device_handle_ = alsa_util::OpenCaptureDevice(
+ wrapper_, device_names[i], params_.channels(),
+ params_.sample_rate(), pcm_format, latency_us);
+
+ if (device_handle_) {
+ device_name_ = device_names[i];
+ break;
+ }
+ }
+ } else {
+ device_handle_ = alsa_util::OpenCaptureDevice(wrapper_,
+ device_name_.c_str(),
+ params_.channels(),
+ params_.sample_rate(),
+ pcm_format, latency_us);
+ }
+
+ if (device_handle_) {
+ audio_buffer_.reset(new uint8[bytes_per_buffer_]);
+
+ // Open the microphone mixer.
+ mixer_handle_ = alsa_util::OpenMixer(wrapper_, device_name_);
+ if (mixer_handle_) {
+ mixer_element_handle_ = alsa_util::LoadCaptureMixerElement(
+ wrapper_, mixer_handle_);
+ }
+ }
+
+ return device_handle_ != NULL;
+}
+
+void AlsaPcmInputStream::Start(AudioInputCallback* callback) {
+ DCHECK(!callback_ && callback);
+ callback_ = callback;
+ StartAgc();
+ int error = wrapper_->PcmPrepare(device_handle_);
+ if (error < 0) {
+ HandleError("PcmPrepare", error);
+ } else {
+ error = wrapper_->PcmStart(device_handle_);
+ if (error < 0)
+ HandleError("PcmStart", error);
+ }
+
+ if (error < 0) {
+ callback_ = NULL;
+ } else {
+ // We start reading data half |buffer_duration_| later than when the
+ // buffer might have got filled, to accommodate some delays in the audio
+ // driver. This could also give us a smooth read sequence going forward.
+ base::TimeDelta delay = buffer_duration_ + buffer_duration_ / 2;
+ next_read_time_ = base::TimeTicks::Now() + delay;
+ base::MessageLoop::current()->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&AlsaPcmInputStream::ReadAudio, weak_factory_.GetWeakPtr()),
+ delay);
+ }
+}
+
+bool AlsaPcmInputStream::Recover(int original_error) {
+ int error = wrapper_->PcmRecover(device_handle_, original_error, 1);
+ if (error < 0) {
+ // Docs say snd_pcm_recover returns the original error if it is not one
+ // of the recoverable ones, so this log message will probably contain the
+ // same error twice.
+ LOG(WARNING) << "Unable to recover from \""
+ << wrapper_->StrError(original_error) << "\": "
+ << wrapper_->StrError(error);
+ return false;
+ }
+
+ if (original_error == -EPIPE) { // Buffer underrun/overrun.
+ // For capture streams we have to repeat the explicit start() to get
+ // data flowing again.
+ error = wrapper_->PcmStart(device_handle_);
+ if (error < 0) {
+ HandleError("PcmStart", error);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+snd_pcm_sframes_t AlsaPcmInputStream::GetCurrentDelay() {
+ snd_pcm_sframes_t delay = -1;
+
+ int error = wrapper_->PcmDelay(device_handle_, &delay);
+ if (error < 0)
+ Recover(error);
+
+ // snd_pcm_delay() may not work in the beginning of the stream. In this case
+ // return delay of data we know currently is in the ALSA's buffer.
+ if (delay < 0)
+ delay = wrapper_->PcmAvailUpdate(device_handle_);
+
+ return delay;
+}
+
+void AlsaPcmInputStream::ReadAudio() {
+ DCHECK(callback_);
+
+ snd_pcm_sframes_t frames = wrapper_->PcmAvailUpdate(device_handle_);
+ if (frames < 0) { // Potentially recoverable error?
+ LOG(WARNING) << "PcmAvailUpdate(): " << wrapper_->StrError(frames);
+ Recover(frames);
+ }
+
+ if (frames < params_.frames_per_buffer()) {
+ // Not enough data yet or error happened. In both cases wait for a very
+ // small duration before checking again.
+ // Even Though read callback was behind schedule, there is no data, so
+ // reset the next_read_time_.
+ if (read_callback_behind_schedule_) {
+ next_read_time_ = base::TimeTicks::Now();
+ read_callback_behind_schedule_ = false;
+ }
+
+ base::TimeDelta next_check_time = buffer_duration_ / 2;
+ base::MessageLoop::current()->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&AlsaPcmInputStream::ReadAudio, weak_factory_.GetWeakPtr()),
+ next_check_time);
+ return;
+ }
+
+ int num_buffers = frames / params_.frames_per_buffer();
+ uint32 hardware_delay_bytes =
+ static_cast<uint32>(GetCurrentDelay() * params_.GetBytesPerFrame());
+ double normalized_volume = 0.0;
+
+ // Update the AGC volume level once every second. Note that, |volume| is
+ // also updated each time SetVolume() is called through IPC by the
+ // render-side AGC.
+ GetAgcVolume(&normalized_volume);
+
+ while (num_buffers--) {
+ int frames_read = wrapper_->PcmReadi(device_handle_, audio_buffer_.get(),
+ params_.frames_per_buffer());
+ if (frames_read == params_.frames_per_buffer()) {
+ callback_->OnData(this, audio_buffer_.get(), bytes_per_buffer_,
+ hardware_delay_bytes, normalized_volume);
+ } else {
+ LOG(WARNING) << "PcmReadi returning less than expected frames: "
+ << frames_read << " vs. " << params_.frames_per_buffer()
+ << ". Dropping this buffer.";
+ }
+ }
+
+ next_read_time_ += buffer_duration_;
+ base::TimeDelta delay = next_read_time_ - base::TimeTicks::Now();
+ if (delay < base::TimeDelta()) {
+ DVLOG(1) << "Audio read callback behind schedule by "
+ << (buffer_duration_ - delay).InMicroseconds()
+ << " (us).";
+ // Read callback is behind schedule. Assuming there is data pending in
+ // the soundcard, invoke the read callback immediate in order to catch up.
+ read_callback_behind_schedule_ = true;
+ delay = base::TimeDelta();
+ }
+
+ base::MessageLoop::current()->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&AlsaPcmInputStream::ReadAudio, weak_factory_.GetWeakPtr()),
+ delay);
+}
+
+void AlsaPcmInputStream::Stop() {
+ if (!device_handle_ || !callback_)
+ return;
+
+ StopAgc();
+
+ weak_factory_.InvalidateWeakPtrs(); // Cancel the next scheduled read.
+ int error = wrapper_->PcmDrop(device_handle_);
+ if (error < 0)
+ HandleError("PcmDrop", error);
+}
+
+void AlsaPcmInputStream::Close() {
+ if (device_handle_) {
+ weak_factory_.InvalidateWeakPtrs(); // Cancel the next scheduled read.
+ int error = alsa_util::CloseDevice(wrapper_, device_handle_);
+ if (error < 0)
+ HandleError("PcmClose", error);
+
+ if (mixer_handle_)
+ alsa_util::CloseMixer(wrapper_, mixer_handle_, device_name_);
+
+ audio_buffer_.reset();
+ device_handle_ = NULL;
+ mixer_handle_ = NULL;
+ mixer_element_handle_ = NULL;
+
+ if (callback_)
+ callback_->OnClose(this);
+ }
+
+ audio_manager_->ReleaseInputStream(this);
+}
+
+double AlsaPcmInputStream::GetMaxVolume() {
+ if (!mixer_handle_ || !mixer_element_handle_) {
+ DLOG(WARNING) << "GetMaxVolume is not supported for " << device_name_;
+ return 0.0;
+ }
+
+ if (!wrapper_->MixerSelemHasCaptureVolume(mixer_element_handle_)) {
+ DLOG(WARNING) << "Unsupported microphone volume for " << device_name_;
+ return 0.0;
+ }
+
+ long min = 0;
+ long max = 0;
+ if (wrapper_->MixerSelemGetCaptureVolumeRange(mixer_element_handle_,
+ &min,
+ &max)) {
+ DLOG(WARNING) << "Unsupported max microphone volume for " << device_name_;
+ return 0.0;
+ }
+ DCHECK(min == 0);
+ DCHECK(max > 0);
+
+ return static_cast<double>(max);
+}
+
+void AlsaPcmInputStream::SetVolume(double volume) {
+ if (!mixer_handle_ || !mixer_element_handle_) {
+ DLOG(WARNING) << "SetVolume is not supported for " << device_name_;
+ return;
+ }
+
+ int error = wrapper_->MixerSelemSetCaptureVolumeAll(
+ mixer_element_handle_, static_cast<long>(volume));
+ if (error < 0) {
+ DLOG(WARNING) << "Unable to set volume for " << device_name_;
+ }
+
+ // Update the AGC volume level based on the last setting above. Note that,
+ // the volume-level resolution is not infinite and it is therefore not
+ // possible to assume that the volume provided as input parameter can be
+ // used directly. Instead, a new query to the audio hardware is required.
+ // This method does nothing if AGC is disabled.
+ UpdateAgcVolume();
+}
+
+double AlsaPcmInputStream::GetVolume() {
+ if (!mixer_handle_ || !mixer_element_handle_) {
+ DLOG(WARNING) << "GetVolume is not supported for " << device_name_;
+ return 0.0;
+ }
+
+ long current_volume = 0;
+ int error = wrapper_->MixerSelemGetCaptureVolume(
+ mixer_element_handle_, static_cast<snd_mixer_selem_channel_id_t>(0),
+ &current_volume);
+ if (error < 0) {
+ DLOG(WARNING) << "Unable to get volume for " << device_name_;
+ return 0.0;
+ }
+
+ return static_cast<double>(current_volume);
+}
+
+void AlsaPcmInputStream::HandleError(const char* method, int error) {
+ LOG(WARNING) << method << ": " << wrapper_->StrError(error);
+ callback_->OnError(this);
+}
+
+} // namespace media
diff --git a/chromium/media/audio/linux/alsa_input.h b/chromium/media/audio/linux/alsa_input.h
new file mode 100644
index 00000000000..888e4780ac0
--- /dev/null
+++ b/chromium/media/audio/linux/alsa_input.h
@@ -0,0 +1,92 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_LINUX_ALSA_INPUT_H_
+#define MEDIA_AUDIO_LINUX_ALSA_INPUT_H_
+
+#include <alsa/asoundlib.h>
+
+#include <string>
+
+#include "base/compiler_specific.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/time/time.h"
+#include "media/audio/agc_audio_stream.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+
+namespace media {
+
+class AlsaWrapper;
+class AudioManagerLinux;
+
+// Provides an input stream for audio capture based on the ALSA PCM interface.
+// This object is not thread safe and all methods should be invoked in the
+// thread that created the object.
+class AlsaPcmInputStream : public AgcAudioStream<AudioInputStream> {
+ public:
+ // Pass this to the constructor if you want to attempt auto-selection
+ // of the audio recording device.
+ static const char kAutoSelectDevice[];
+
+ // Create a PCM Output stream for the ALSA device identified by
+ // |device_name|. If unsure of what to use for |device_name|, use
+ // |kAutoSelectDevice|.
+ AlsaPcmInputStream(AudioManagerLinux* audio_manager,
+ const std::string& device_name,
+ const AudioParameters& params,
+ AlsaWrapper* wrapper);
+
+ virtual ~AlsaPcmInputStream();
+
+ // Implementation of AudioInputStream.
+ virtual bool Open() OVERRIDE;
+ virtual void Start(AudioInputCallback* callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual double GetMaxVolume() OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual double GetVolume() OVERRIDE;
+
+ private:
+ // Logs the error and invokes any registered callbacks.
+ void HandleError(const char* method, int error);
+
+ // Reads one or more buffers of audio from the device, passes on to the
+ // registered callback and schedules the next read.
+ void ReadAudio();
+
+ // Recovers from any device errors if possible.
+ bool Recover(int error);
+
+ // Utility function for talking with the ALSA API.
+ snd_pcm_sframes_t GetCurrentDelay();
+
+ // Non-refcounted pointer back to the audio manager.
+ // The AudioManager indirectly holds on to stream objects, so we don't
+ // want circular references. Additionally, stream objects live on the audio
+ // thread, which is owned by the audio manager and we don't want to addref
+ // the manager from that thread.
+ AudioManagerLinux* audio_manager_;
+ std::string device_name_;
+ AudioParameters params_;
+ int bytes_per_buffer_;
+ AlsaWrapper* wrapper_;
+ base::TimeDelta buffer_duration_; // Length of each recorded buffer.
+ AudioInputCallback* callback_; // Valid during a recording session.
+ base::TimeTicks next_read_time_; // Scheduled time for next read callback.
+ snd_pcm_t* device_handle_; // Handle to the ALSA PCM recording device.
+ snd_mixer_t* mixer_handle_; // Handle to the ALSA microphone mixer.
+ snd_mixer_elem_t* mixer_element_handle_; // Handle to the capture element.
+ base::WeakPtrFactory<AlsaPcmInputStream> weak_factory_;
+ scoped_ptr<uint8[]> audio_buffer_; // Buffer used for reading audio data.
+ bool read_callback_behind_schedule_;
+
+ DISALLOW_COPY_AND_ASSIGN(AlsaPcmInputStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_LINUX_ALSA_INPUT_H_
diff --git a/chromium/media/audio/linux/alsa_output.cc b/chromium/media/audio/linux/alsa_output.cc
new file mode 100644
index 00000000000..fa838354b5a
--- /dev/null
+++ b/chromium/media/audio/linux/alsa_output.cc
@@ -0,0 +1,765 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// THREAD SAFETY
+//
+// AlsaPcmOutputStream object is *not* thread-safe and should only be used
+// from the audio thread. We DCHECK on this assumption whenever we can.
+//
+// SEMANTICS OF Close()
+//
+// Close() is responsible for cleaning up any resources that were acquired after
+// a successful Open(). Close() will nullify any scheduled outstanding runnable
+// methods.
+//
+//
+// SEMANTICS OF ERROR STATES
+//
+// The object has two distinct error states: |state_| == kInError
+// and |stop_stream_|. The |stop_stream_| variable is used to indicate
+// that the playback_handle should no longer be used either because of a
+// hardware/low-level event.
+//
+// When |state_| == kInError, all public API functions will fail with an error
+// (Start() will call the OnError() function on the callback immediately), or
+// no-op themselves with the exception of Close(). Even if an error state has
+// been entered, if Open() has previously returned successfully, Close() must be
+// called to cleanup the ALSA devices and release resources.
+//
+// When |stop_stream_| is set, no more commands will be made against the
+// ALSA device, and playback will effectively stop. From the client's point of
+// view, it will seem that the device has just clogged and stopped requesting
+// data.
+
+#include "media/audio/linux/alsa_output.h"
+
+#include <algorithm>
+
+#include "base/bind.h"
+#include "base/debug/trace_event.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "base/stl_util.h"
+#include "base/time/time.h"
+#include "media/audio/linux/alsa_util.h"
+#include "media/audio/linux/alsa_wrapper.h"
+#include "media/audio/linux/audio_manager_linux.h"
+#include "media/base/channel_mixer.h"
+#include "media/base/data_buffer.h"
+#include "media/base/seekable_buffer.h"
+
+namespace media {
+
+// Set to 0 during debugging if you want error messages due to underrun
+// events or other recoverable errors.
+#if defined(NDEBUG)
+static const int kPcmRecoverIsSilent = 1;
+#else
+static const int kPcmRecoverIsSilent = 0;
+#endif
+
+// While the "default" device may support multi-channel audio, in Alsa, only
+// the device names surround40, surround41, surround50, etc, have a defined
+// channel mapping according to Lennart:
+//
+// http://0pointer.de/blog/projects/guide-to-sound-apis.html
+//
+// This function makes a best guess at the specific > 2 channel device name
+// based on the number of channels requested. NULL is returned if no device
+// can be found to match the channel numbers. In this case, using
+// kDefaultDevice is probably the best bet.
+//
+// A five channel source is assumed to be surround50 instead of surround41
+// (which is also 5 channels).
+//
+// TODO(ajwong): The source data should have enough info to tell us if we want
+// surround41 versus surround51, etc., instead of needing us to guess based on
+// channel number. Fix API to pass that data down.
+static const char* GuessSpecificDeviceName(uint32 channels) {
+ switch (channels) {
+ case 8:
+ return "surround71";
+
+ case 7:
+ return "surround70";
+
+ case 6:
+ return "surround51";
+
+ case 5:
+ return "surround50";
+
+ case 4:
+ return "surround40";
+
+ default:
+ return NULL;
+ }
+}
+
+std::ostream& operator<<(std::ostream& os,
+ AlsaPcmOutputStream::InternalState state) {
+ switch (state) {
+ case AlsaPcmOutputStream::kInError:
+ os << "kInError";
+ break;
+ case AlsaPcmOutputStream::kCreated:
+ os << "kCreated";
+ break;
+ case AlsaPcmOutputStream::kIsOpened:
+ os << "kIsOpened";
+ break;
+ case AlsaPcmOutputStream::kIsPlaying:
+ os << "kIsPlaying";
+ break;
+ case AlsaPcmOutputStream::kIsStopped:
+ os << "kIsStopped";
+ break;
+ case AlsaPcmOutputStream::kIsClosed:
+ os << "kIsClosed";
+ break;
+ };
+ return os;
+}
+
+const char AlsaPcmOutputStream::kDefaultDevice[] = "default";
+const char AlsaPcmOutputStream::kAutoSelectDevice[] = "";
+const char AlsaPcmOutputStream::kPlugPrefix[] = "plug:";
+
+// We use 40ms as our minimum required latency. If it is needed, we may be able
+// to get it down to 20ms.
+const uint32 AlsaPcmOutputStream::kMinLatencyMicros = 40 * 1000;
+
+AlsaPcmOutputStream::AlsaPcmOutputStream(const std::string& device_name,
+ const AudioParameters& params,
+ AlsaWrapper* wrapper,
+ AudioManagerLinux* manager)
+ : requested_device_name_(device_name),
+ pcm_format_(alsa_util::BitsToFormat(params.bits_per_sample())),
+ channels_(params.channels()),
+ channel_layout_(params.channel_layout()),
+ sample_rate_(params.sample_rate()),
+ bytes_per_sample_(params.bits_per_sample() / 8),
+ bytes_per_frame_(params.GetBytesPerFrame()),
+ packet_size_(params.GetBytesPerBuffer()),
+ latency_(std::max(
+ base::TimeDelta::FromMicroseconds(kMinLatencyMicros),
+ FramesToTimeDelta(params.frames_per_buffer() * 2, sample_rate_))),
+ bytes_per_output_frame_(bytes_per_frame_),
+ alsa_buffer_frames_(0),
+ stop_stream_(false),
+ wrapper_(wrapper),
+ manager_(manager),
+ message_loop_(base::MessageLoop::current()),
+ playback_handle_(NULL),
+ frames_per_packet_(packet_size_ / bytes_per_frame_),
+ weak_factory_(this),
+ state_(kCreated),
+ volume_(1.0f),
+ source_callback_(NULL),
+ audio_bus_(AudioBus::Create(params)) {
+ DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
+ DCHECK_EQ(audio_bus_->frames() * bytes_per_frame_, packet_size_);
+
+ // Sanity check input values.
+ if (!params.IsValid()) {
+ LOG(WARNING) << "Unsupported audio parameters.";
+ TransitionTo(kInError);
+ }
+
+ if (pcm_format_ == SND_PCM_FORMAT_UNKNOWN) {
+ LOG(WARNING) << "Unsupported bits per sample: " << params.bits_per_sample();
+ TransitionTo(kInError);
+ }
+}
+
+AlsaPcmOutputStream::~AlsaPcmOutputStream() {
+ InternalState current_state = state();
+ DCHECK(current_state == kCreated ||
+ current_state == kIsClosed ||
+ current_state == kInError);
+ DCHECK(!playback_handle_);
+}
+
+bool AlsaPcmOutputStream::Open() {
+ DCHECK(IsOnAudioThread());
+
+ if (state() == kInError)
+ return false;
+
+ if (!CanTransitionTo(kIsOpened)) {
+ NOTREACHED() << "Invalid state: " << state();
+ return false;
+ }
+
+ // We do not need to check if the transition was successful because
+ // CanTransitionTo() was checked above, and it is assumed that this
+ // object's public API is only called on one thread so the state cannot
+ // transition out from under us.
+ TransitionTo(kIsOpened);
+
+ // Try to open the device.
+ if (requested_device_name_ == kAutoSelectDevice) {
+ playback_handle_ = AutoSelectDevice(latency_.InMicroseconds());
+ if (playback_handle_)
+ DVLOG(1) << "Auto-selected device: " << device_name_;
+ } else {
+ device_name_ = requested_device_name_;
+ playback_handle_ = alsa_util::OpenPlaybackDevice(
+ wrapper_, device_name_.c_str(), channels_, sample_rate_,
+ pcm_format_, latency_.InMicroseconds());
+ }
+
+ // Finish initializing the stream if the device was opened successfully.
+ if (playback_handle_ == NULL) {
+ stop_stream_ = true;
+ TransitionTo(kInError);
+ return false;
+ } else {
+ bytes_per_output_frame_ = channel_mixer_ ?
+ mixed_audio_bus_->channels() * bytes_per_sample_ : bytes_per_frame_;
+ uint32 output_packet_size = frames_per_packet_ * bytes_per_output_frame_;
+ buffer_.reset(new media::SeekableBuffer(0, output_packet_size));
+
+ // Get alsa buffer size.
+ snd_pcm_uframes_t buffer_size;
+ snd_pcm_uframes_t period_size;
+ int error = wrapper_->PcmGetParams(playback_handle_, &buffer_size,
+ &period_size);
+ if (error < 0) {
+ LOG(ERROR) << "Failed to get playback buffer size from ALSA: "
+ << wrapper_->StrError(error);
+ // Buffer size is at least twice of packet size.
+ alsa_buffer_frames_ = frames_per_packet_ * 2;
+ } else {
+ alsa_buffer_frames_ = buffer_size;
+ }
+ }
+
+ return true;
+}
+
+void AlsaPcmOutputStream::Close() {
+ DCHECK(IsOnAudioThread());
+
+ if (state() != kIsClosed)
+ TransitionTo(kIsClosed);
+
+ // Shutdown the audio device.
+ if (playback_handle_) {
+ if (alsa_util::CloseDevice(wrapper_, playback_handle_) < 0) {
+ LOG(WARNING) << "Unable to close audio device. Leaking handle.";
+ }
+ playback_handle_ = NULL;
+
+ // Release the buffer.
+ buffer_.reset();
+
+ // Signal anything that might already be scheduled to stop.
+ stop_stream_ = true; // Not necessary in production, but unit tests
+ // uses the flag to verify that stream was closed.
+ }
+
+ weak_factory_.InvalidateWeakPtrs();
+
+ // Signal to the manager that we're closed and can be removed.
+ // Should be last call in the method as it deletes "this".
+ manager_->ReleaseOutputStream(this);
+}
+
+void AlsaPcmOutputStream::Start(AudioSourceCallback* callback) {
+ DCHECK(IsOnAudioThread());
+
+ CHECK(callback);
+
+ if (stop_stream_)
+ return;
+
+ // Only post the task if we can enter the playing state.
+ if (TransitionTo(kIsPlaying) != kIsPlaying)
+ return;
+
+ // Before starting, the buffer might have audio from previous user of this
+ // device.
+ buffer_->Clear();
+
+ // When starting again, drop all packets in the device and prepare it again
+ // in case we are restarting from a pause state and need to flush old data.
+ int error = wrapper_->PcmDrop(playback_handle_);
+ if (error < 0 && error != -EAGAIN) {
+ LOG(ERROR) << "Failure clearing playback device ("
+ << wrapper_->PcmName(playback_handle_) << "): "
+ << wrapper_->StrError(error);
+ stop_stream_ = true;
+ return;
+ }
+
+ error = wrapper_->PcmPrepare(playback_handle_);
+ if (error < 0 && error != -EAGAIN) {
+ LOG(ERROR) << "Failure preparing stream ("
+ << wrapper_->PcmName(playback_handle_) << "): "
+ << wrapper_->StrError(error);
+ stop_stream_ = true;
+ return;
+ }
+
+ // Ensure the first buffer is silence to avoid startup glitches.
+ int buffer_size = GetAvailableFrames() * bytes_per_output_frame_;
+ scoped_refptr<DataBuffer> silent_packet = new DataBuffer(buffer_size);
+ silent_packet->set_data_size(buffer_size);
+ memset(silent_packet->writable_data(), 0, silent_packet->data_size());
+ buffer_->Append(silent_packet);
+ WritePacket();
+
+ // Start the callback chain.
+ set_source_callback(callback);
+ WriteTask();
+}
+
+void AlsaPcmOutputStream::Stop() {
+ DCHECK(IsOnAudioThread());
+
+ // Reset the callback, so that it is not called anymore.
+ set_source_callback(NULL);
+ weak_factory_.InvalidateWeakPtrs();
+
+ TransitionTo(kIsStopped);
+}
+
+void AlsaPcmOutputStream::SetVolume(double volume) {
+ DCHECK(IsOnAudioThread());
+
+ volume_ = static_cast<float>(volume);
+}
+
+void AlsaPcmOutputStream::GetVolume(double* volume) {
+ DCHECK(IsOnAudioThread());
+
+ *volume = volume_;
+}
+
+void AlsaPcmOutputStream::BufferPacket(bool* source_exhausted) {
+ DCHECK(IsOnAudioThread());
+
+ // If stopped, simulate a 0-length packet.
+ if (stop_stream_) {
+ buffer_->Clear();
+ *source_exhausted = true;
+ return;
+ }
+
+ *source_exhausted = false;
+
+ // Request more data only when we run out of data in the buffer, because
+ // WritePacket() comsumes only the current chunk of data.
+ if (!buffer_->forward_bytes()) {
+ // Before making a request to source for data we need to determine the
+ // delay (in bytes) for the requested data to be played.
+ const uint32 hardware_delay = GetCurrentDelay() * bytes_per_frame_;
+
+ scoped_refptr<media::DataBuffer> packet =
+ new media::DataBuffer(packet_size_);
+ int frames_filled = RunDataCallback(
+ audio_bus_.get(), AudioBuffersState(0, hardware_delay));
+
+ size_t packet_size = frames_filled * bytes_per_frame_;
+ DCHECK_LE(packet_size, packet_size_);
+
+ // TODO(dalecurtis): Channel downmixing, upmixing, should be done in mixer;
+ // volume adjust should use SSE optimized vector_fmul() prior to interleave.
+ AudioBus* output_bus = audio_bus_.get();
+ if (channel_mixer_) {
+ output_bus = mixed_audio_bus_.get();
+ channel_mixer_->Transform(audio_bus_.get(), output_bus);
+ // Adjust packet size for downmix.
+ packet_size = packet_size / bytes_per_frame_ * bytes_per_output_frame_;
+ }
+
+ // Note: If this ever changes to output raw float the data must be clipped
+ // and sanitized since it may come from an untrusted source such as NaCl.
+ output_bus->Scale(volume_);
+ output_bus->ToInterleaved(
+ frames_filled, bytes_per_sample_, packet->writable_data());
+
+ if (packet_size > 0) {
+ packet->set_data_size(packet_size);
+ // Add the packet to the buffer.
+ buffer_->Append(packet);
+ } else {
+ *source_exhausted = true;
+ }
+ }
+}
+
+void AlsaPcmOutputStream::WritePacket() {
+ DCHECK(IsOnAudioThread());
+
+ // If the device is in error, just eat the bytes.
+ if (stop_stream_) {
+ buffer_->Clear();
+ return;
+ }
+
+ if (state() != kIsPlaying)
+ return;
+
+ CHECK_EQ(buffer_->forward_bytes() % bytes_per_output_frame_, 0u);
+
+ const uint8* buffer_data;
+ int buffer_size;
+ if (buffer_->GetCurrentChunk(&buffer_data, &buffer_size)) {
+ buffer_size = buffer_size - (buffer_size % bytes_per_output_frame_);
+ snd_pcm_sframes_t frames = std::min(
+ static_cast<snd_pcm_sframes_t>(buffer_size / bytes_per_output_frame_),
+ GetAvailableFrames());
+
+ if (!frames)
+ return;
+
+ snd_pcm_sframes_t frames_written =
+ wrapper_->PcmWritei(playback_handle_, buffer_data, frames);
+ if (frames_written < 0) {
+ // Attempt once to immediately recover from EINTR,
+ // EPIPE (overrun/underrun), ESTRPIPE (stream suspended). WritePacket
+ // will eventually be called again, so eventual recovery will happen if
+ // muliple retries are required.
+ frames_written = wrapper_->PcmRecover(playback_handle_,
+ frames_written,
+ kPcmRecoverIsSilent);
+ if (frames_written < 0) {
+ if (frames_written != -EAGAIN) {
+ LOG(ERROR) << "Failed to write to pcm device: "
+ << wrapper_->StrError(frames_written);
+ RunErrorCallback(frames_written);
+ stop_stream_ = true;
+ }
+ }
+ } else {
+ DCHECK_EQ(frames_written, frames);
+
+ // Seek forward in the buffer after we've written some data to ALSA.
+ buffer_->Seek(frames_written * bytes_per_output_frame_);
+ }
+ } else {
+ // If nothing left to write and playback hasn't started yet, start it now.
+ // This ensures that shorter sounds will still play.
+ if (playback_handle_ &&
+ (wrapper_->PcmState(playback_handle_) == SND_PCM_STATE_PREPARED) &&
+ GetCurrentDelay() > 0) {
+ wrapper_->PcmStart(playback_handle_);
+ }
+ }
+}
+
+void AlsaPcmOutputStream::WriteTask() {
+ DCHECK(IsOnAudioThread());
+
+ if (stop_stream_)
+ return;
+
+ if (state() == kIsStopped)
+ return;
+
+ bool source_exhausted;
+ BufferPacket(&source_exhausted);
+ WritePacket();
+
+ ScheduleNextWrite(source_exhausted);
+}
+
+void AlsaPcmOutputStream::ScheduleNextWrite(bool source_exhausted) {
+ DCHECK(IsOnAudioThread());
+
+ if (stop_stream_ || state() != kIsPlaying)
+ return;
+
+ const uint32 kTargetFramesAvailable = alsa_buffer_frames_ / 2;
+ uint32 available_frames = GetAvailableFrames();
+
+ base::TimeDelta next_fill_time;
+ if (buffer_->forward_bytes() && available_frames) {
+ // If we've got data available and ALSA has room, deliver it immediately.
+ next_fill_time = base::TimeDelta();
+ } else if (buffer_->forward_bytes()) {
+ // If we've got data available and no room, poll until room is available.
+ // Polling in this manner allows us to ensure a more consistent callback
+ // schedule. In testing this yields a variance of +/- 5ms versus the non-
+ // polling strategy which is around +/- 30ms and bimodal.
+ next_fill_time = base::TimeDelta::FromMilliseconds(5);
+ } else if (available_frames < kTargetFramesAvailable) {
+ // Schedule the next write for the moment when the available buffer of the
+ // sound card hits |kTargetFramesAvailable|.
+ next_fill_time = FramesToTimeDelta(
+ kTargetFramesAvailable - available_frames, sample_rate_);
+ } else if (!source_exhausted) {
+ // The sound card has |kTargetFramesAvailable| or more frames available.
+ // Invoke the next write immediately to avoid underrun.
+ next_fill_time = base::TimeDelta();
+ } else {
+ // The sound card has frames available, but our source is exhausted, so
+ // avoid busy looping by delaying a bit.
+ next_fill_time = base::TimeDelta::FromMilliseconds(10);
+ }
+
+ message_loop_->PostDelayedTask(FROM_HERE, base::Bind(
+ &AlsaPcmOutputStream::WriteTask, weak_factory_.GetWeakPtr()),
+ next_fill_time);
+}
+
+// static
+base::TimeDelta AlsaPcmOutputStream::FramesToTimeDelta(int frames,
+ double sample_rate) {
+ return base::TimeDelta::FromMicroseconds(
+ frames * base::Time::kMicrosecondsPerSecond / sample_rate);
+}
+
+std::string AlsaPcmOutputStream::FindDeviceForChannels(uint32 channels) {
+ // Constants specified by the ALSA API for device hints.
+ static const int kGetAllDevices = -1;
+ static const char kPcmInterfaceName[] = "pcm";
+ static const char kIoHintName[] = "IOID";
+ static const char kNameHintName[] = "NAME";
+
+ const char* wanted_device = GuessSpecificDeviceName(channels);
+ if (!wanted_device)
+ return std::string();
+
+ std::string guessed_device;
+ void** hints = NULL;
+ int error = wrapper_->DeviceNameHint(kGetAllDevices,
+ kPcmInterfaceName,
+ &hints);
+ if (error == 0) {
+ // NOTE: Do not early return from inside this if statement. The
+ // hints above need to be freed.
+ for (void** hint_iter = hints; *hint_iter != NULL; hint_iter++) {
+ // Only examine devices that are output capable.. Valid values are
+ // "Input", "Output", and NULL which means both input and output.
+ scoped_ptr_malloc<char> io(
+ wrapper_->DeviceNameGetHint(*hint_iter, kIoHintName));
+ if (io != NULL && strcmp(io.get(), "Input") == 0)
+ continue;
+
+ // Attempt to select the closest device for number of channels.
+ scoped_ptr_malloc<char> name(
+ wrapper_->DeviceNameGetHint(*hint_iter, kNameHintName));
+ if (strncmp(wanted_device, name.get(), strlen(wanted_device)) == 0) {
+ guessed_device = name.get();
+ break;
+ }
+ }
+
+ // Destroy the hint now that we're done with it.
+ wrapper_->DeviceNameFreeHint(hints);
+ hints = NULL;
+ } else {
+ LOG(ERROR) << "Unable to get hints for devices: "
+ << wrapper_->StrError(error);
+ }
+
+ return guessed_device;
+}
+
+snd_pcm_sframes_t AlsaPcmOutputStream::GetCurrentDelay() {
+ snd_pcm_sframes_t delay = -1;
+ // Don't query ALSA's delay if we have underrun since it'll be jammed at some
+ // non-zero value and potentially even negative!
+ //
+ // Also, if we're in the prepared state, don't query because that seems to
+ // cause an I/O error when we do query the delay.
+ snd_pcm_state_t pcm_state = wrapper_->PcmState(playback_handle_);
+ if (pcm_state != SND_PCM_STATE_XRUN &&
+ pcm_state != SND_PCM_STATE_PREPARED) {
+ int error = wrapper_->PcmDelay(playback_handle_, &delay);
+ if (error < 0) {
+ // Assume a delay of zero and attempt to recover the device.
+ delay = -1;
+ error = wrapper_->PcmRecover(playback_handle_,
+ error,
+ kPcmRecoverIsSilent);
+ if (error < 0) {
+ LOG(ERROR) << "Failed querying delay: " << wrapper_->StrError(error);
+ }
+ }
+ }
+
+ // snd_pcm_delay() sometimes returns crazy values. In this case return delay
+ // of data we know currently is in ALSA's buffer. Note: When the underlying
+ // driver is PulseAudio based, certain configuration settings (e.g., tsched=1)
+ // will generate much larger delay values than |alsa_buffer_frames_|, so only
+ // clip if delay is truly crazy (> 10x expected).
+ if (static_cast<snd_pcm_uframes_t>(delay) > alsa_buffer_frames_ * 10) {
+ delay = alsa_buffer_frames_ - GetAvailableFrames();
+ }
+
+ if (delay < 0) {
+ delay = 0;
+ }
+
+ return delay;
+}
+
+snd_pcm_sframes_t AlsaPcmOutputStream::GetAvailableFrames() {
+ DCHECK(IsOnAudioThread());
+
+ if (stop_stream_)
+ return 0;
+
+ // Find the number of frames queued in the sound device.
+ snd_pcm_sframes_t available_frames =
+ wrapper_->PcmAvailUpdate(playback_handle_);
+ if (available_frames < 0) {
+ available_frames = wrapper_->PcmRecover(playback_handle_,
+ available_frames,
+ kPcmRecoverIsSilent);
+ }
+ if (available_frames < 0) {
+ LOG(ERROR) << "Failed querying available frames. Assuming 0: "
+ << wrapper_->StrError(available_frames);
+ return 0;
+ }
+ if (static_cast<uint32>(available_frames) > alsa_buffer_frames_ * 2) {
+ LOG(ERROR) << "ALSA returned " << available_frames << " of "
+ << alsa_buffer_frames_ << " frames available.";
+ return alsa_buffer_frames_;
+ }
+
+ return available_frames;
+}
+
+snd_pcm_t* AlsaPcmOutputStream::AutoSelectDevice(unsigned int latency) {
+ // For auto-selection:
+ // 1) Attempt to open a device that best matches the number of channels
+ // requested.
+ // 2) If that fails, attempt the "plug:" version of it in case ALSA can
+ // remap do some software conversion to make it work.
+ // 3) Fallback to kDefaultDevice.
+ // 4) If that fails too, try the "plug:" version of kDefaultDevice.
+ // 5) Give up.
+ snd_pcm_t* handle = NULL;
+ device_name_ = FindDeviceForChannels(channels_);
+
+ // Step 1.
+ if (!device_name_.empty()) {
+ if ((handle = alsa_util::OpenPlaybackDevice(wrapper_, device_name_.c_str(),
+ channels_, sample_rate_,
+ pcm_format_,
+ latency)) != NULL) {
+ return handle;
+ }
+
+ // Step 2.
+ device_name_ = kPlugPrefix + device_name_;
+ if ((handle = alsa_util::OpenPlaybackDevice(wrapper_, device_name_.c_str(),
+ channels_, sample_rate_,
+ pcm_format_,
+ latency)) != NULL) {
+ return handle;
+ }
+ }
+
+ // For the kDefaultDevice device, we can only reliably depend on 2-channel
+ // output to have the correct ordering according to Lennart. For the channel
+ // formats that we know how to downmix from (3 channel to 8 channel), setup
+ // downmixing.
+ uint32 default_channels = channels_;
+ if (default_channels > 2) {
+ channel_mixer_.reset(new ChannelMixer(
+ channel_layout_, CHANNEL_LAYOUT_STEREO));
+ default_channels = 2;
+ mixed_audio_bus_ = AudioBus::Create(
+ default_channels, audio_bus_->frames());
+ }
+
+ // Step 3.
+ device_name_ = kDefaultDevice;
+ if ((handle = alsa_util::OpenPlaybackDevice(
+ wrapper_, device_name_.c_str(), default_channels, sample_rate_,
+ pcm_format_, latency)) != NULL) {
+ return handle;
+ }
+
+ // Step 4.
+ device_name_ = kPlugPrefix + device_name_;
+ if ((handle = alsa_util::OpenPlaybackDevice(
+ wrapper_, device_name_.c_str(), default_channels, sample_rate_,
+ pcm_format_, latency)) != NULL) {
+ return handle;
+ }
+
+ // Unable to open any device.
+ device_name_.clear();
+ return NULL;
+}
+
+bool AlsaPcmOutputStream::CanTransitionTo(InternalState to) {
+ switch (state_) {
+ case kCreated:
+ return to == kIsOpened || to == kIsClosed || to == kInError;
+
+ case kIsOpened:
+ return to == kIsPlaying || to == kIsStopped ||
+ to == kIsClosed || to == kInError;
+
+ case kIsPlaying:
+ return to == kIsPlaying || to == kIsStopped ||
+ to == kIsClosed || to == kInError;
+
+ case kIsStopped:
+ return to == kIsPlaying || to == kIsStopped ||
+ to == kIsClosed || to == kInError;
+
+ case kInError:
+ return to == kIsClosed || to == kInError;
+
+ case kIsClosed:
+ default:
+ return false;
+ }
+}
+
+AlsaPcmOutputStream::InternalState
+AlsaPcmOutputStream::TransitionTo(InternalState to) {
+ DCHECK(IsOnAudioThread());
+
+ if (!CanTransitionTo(to)) {
+ NOTREACHED() << "Cannot transition from: " << state_ << " to: " << to;
+ state_ = kInError;
+ } else {
+ state_ = to;
+ }
+ return state_;
+}
+
+AlsaPcmOutputStream::InternalState AlsaPcmOutputStream::state() {
+ return state_;
+}
+
+bool AlsaPcmOutputStream::IsOnAudioThread() const {
+ return message_loop_ && message_loop_ == base::MessageLoop::current();
+}
+
+int AlsaPcmOutputStream::RunDataCallback(AudioBus* audio_bus,
+ AudioBuffersState buffers_state) {
+ TRACE_EVENT0("audio", "AlsaPcmOutputStream::RunDataCallback");
+
+ if (source_callback_)
+ return source_callback_->OnMoreData(audio_bus, buffers_state);
+
+ return 0;
+}
+
+void AlsaPcmOutputStream::RunErrorCallback(int code) {
+ if (source_callback_)
+ source_callback_->OnError(this);
+}
+
+// Changes the AudioSourceCallback to proxy calls to. Pass in NULL to
+// release ownership of the currently registered callback.
+void AlsaPcmOutputStream::set_source_callback(AudioSourceCallback* callback) {
+ DCHECK(IsOnAudioThread());
+ source_callback_ = callback;
+}
+
+} // namespace media
diff --git a/chromium/media/audio/linux/alsa_output.h b/chromium/media/audio/linux/alsa_output.h
new file mode 100644
index 00000000000..841615d9d3f
--- /dev/null
+++ b/chromium/media/audio/linux/alsa_output.h
@@ -0,0 +1,228 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Creates an output stream based on the ALSA PCM interface.
+//
+// On device write failure, the stream will move itself to an invalid state.
+// No more data will be pulled from the data source, or written to the device.
+// All calls to public API functions will either no-op themselves, or return an
+// error if possible. Specifically, If the stream is in an error state, Open()
+// will return false, and Start() will call OnError() immediately on the
+// provided callback.
+//
+// If the stream is successfully opened, Close() must be called. After Close
+// has been called, the object should be regarded as deleted and not touched.
+//
+// AlsaPcmOutputStream is a single threaded class that should only be used from
+// the audio thread. When modifying the code in this class, please read the
+// threading assumptions at the top of the implementation.
+
+#ifndef MEDIA_AUDIO_LINUX_ALSA_OUTPUT_H_
+#define MEDIA_AUDIO_LINUX_ALSA_OUTPUT_H_
+
+#include <alsa/asoundlib.h>
+
+#include <string>
+
+#include "base/compiler_specific.h"
+#include "base/gtest_prod_util.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/time/time.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+
+namespace base {
+class MessageLoop;
+}
+
+namespace media {
+
+class AlsaWrapper;
+class AudioManagerLinux;
+class ChannelMixer;
+class SeekableBuffer;
+
+class MEDIA_EXPORT AlsaPcmOutputStream : public AudioOutputStream {
+ public:
+ // String for the generic "default" ALSA device that has the highest
+ // compatibility and chance of working.
+ static const char kDefaultDevice[];
+
+ // Pass this to the AlsaPcmOutputStream if you want to attempt auto-selection
+ // of the audio device.
+ static const char kAutoSelectDevice[];
+
+ // Prefix for device names to enable ALSA library resampling.
+ static const char kPlugPrefix[];
+
+ // The minimum latency that is accepted by the device.
+ static const uint32 kMinLatencyMicros;
+
+ // Create a PCM Output stream for the ALSA device identified by
+ // |device_name|. The AlsaPcmOutputStream uses |wrapper| to communicate with
+ // the alsa libraries, allowing for dependency injection during testing. All
+ // requesting of data, and writing to the alsa device will be done on
+ // |message_loop|.
+ //
+ // If unsure of what to use for |device_name|, use |kAutoSelectDevice|.
+ AlsaPcmOutputStream(const std::string& device_name,
+ const AudioParameters& params,
+ AlsaWrapper* wrapper,
+ AudioManagerLinux* manager);
+
+ virtual ~AlsaPcmOutputStream();
+
+ // Implementation of AudioOutputStream.
+ virtual bool Open() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual void Start(AudioSourceCallback* callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual void GetVolume(double* volume) OVERRIDE;
+
+ private:
+ friend class AlsaPcmOutputStreamTest;
+ FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest,
+ AutoSelectDevice_DeviceSelect);
+ FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest,
+ AutoSelectDevice_FallbackDevices);
+ FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, AutoSelectDevice_HintFail);
+ FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, BufferPacket);
+ FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, BufferPacket_Negative);
+ FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, BufferPacket_StopStream);
+ FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, BufferPacket_Underrun);
+ FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, BufferPacket_FullBuffer);
+ FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, ConstructedState);
+ FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, LatencyFloor);
+ FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, OpenClose);
+ FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, PcmOpenFailed);
+ FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, PcmSetParamsFailed);
+ FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, ScheduleNextWrite);
+ FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest,
+ ScheduleNextWrite_StopStream);
+ FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, StartStop);
+ FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, WritePacket_FinishedPacket);
+ FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, WritePacket_NormalPacket);
+ FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, WritePacket_StopStream);
+ FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, WritePacket_WriteFails);
+
+ // Flags indicating the state of the stream.
+ enum InternalState {
+ kInError = 0,
+ kCreated,
+ kIsOpened,
+ kIsPlaying,
+ kIsStopped,
+ kIsClosed
+ };
+ friend std::ostream& operator<<(std::ostream& os, InternalState);
+
+ // Functions to get another packet from the data source and write it into the
+ // ALSA device.
+ void BufferPacket(bool* source_exhausted);
+ void WritePacket();
+ void WriteTask();
+ void ScheduleNextWrite(bool source_exhausted);
+
+ // Utility functions for talking with the ALSA API.
+ static base::TimeDelta FramesToTimeDelta(int frames, double sample_rate);
+ std::string FindDeviceForChannels(uint32 channels);
+ snd_pcm_sframes_t GetAvailableFrames();
+ snd_pcm_sframes_t GetCurrentDelay();
+
+ // Attempts to find the best matching linux audio device for the given number
+ // of channels. This function will set |device_name_| and |channel_mixer_|.
+ snd_pcm_t* AutoSelectDevice(uint32 latency);
+
+ // Functions to safeguard state transitions. All changes to the object state
+ // should go through these functions.
+ bool CanTransitionTo(InternalState to);
+ InternalState TransitionTo(InternalState to);
+ InternalState state();
+
+ // Returns true when we're on the audio thread or if the audio thread's
+ // message loop is NULL (which will happen during shutdown).
+ bool IsOnAudioThread() const;
+
+ // API for Proxying calls to the AudioSourceCallback provided during
+ // Start().
+ //
+ // TODO(ajwong): This is necessary because the ownership semantics for the
+ // |source_callback_| object are incorrect in AudioRenderHost. The callback
+ // is passed into the output stream, but ownership is not transfered which
+ // requires a synchronization on access of the |source_callback_| to avoid
+ // using a deleted callback.
+ int RunDataCallback(AudioBus* audio_bus, AudioBuffersState buffers_state);
+ void RunErrorCallback(int code);
+
+ // Changes the AudioSourceCallback to proxy calls to. Pass in NULL to
+ // release ownership of the currently registered callback.
+ void set_source_callback(AudioSourceCallback* callback);
+
+ // Configuration constants from the constructor. Referenceable by all threads
+ // since they are constants.
+ const std::string requested_device_name_;
+ const snd_pcm_format_t pcm_format_;
+ const uint32 channels_;
+ const ChannelLayout channel_layout_;
+ const uint32 sample_rate_;
+ const uint32 bytes_per_sample_;
+ const uint32 bytes_per_frame_;
+
+ // Device configuration data. Populated after OpenTask() completes.
+ std::string device_name_;
+ uint32 packet_size_;
+ base::TimeDelta latency_;
+ uint32 bytes_per_output_frame_;
+ uint32 alsa_buffer_frames_;
+
+ // Flag indicating the code should stop reading from the data source or
+ // writing to the ALSA device. This is set because the device has entered
+ // an unrecoverable error state, or the ClosedTask() has executed.
+ bool stop_stream_;
+
+ // Wrapper class to invoke all the ALSA functions.
+ AlsaWrapper* wrapper_;
+
+ // Audio manager that created us. Used to report that we've been closed.
+ AudioManagerLinux* manager_;
+
+ // Message loop to use for polling. The object is owned by the AudioManager.
+ // We hold a reference to the audio thread message loop since
+ // AudioManagerBase::ShutDown() can invalidate the message loop pointer
+ // before the stream gets deleted.
+ base::MessageLoop* message_loop_;
+
+ // Handle to the actual PCM playback device.
+ snd_pcm_t* playback_handle_;
+
+ scoped_ptr<media::SeekableBuffer> buffer_;
+ uint32 frames_per_packet_;
+
+ // Allows us to run tasks on the AlsaPcmOutputStream instance which are
+ // bound by its lifetime.
+ base::WeakPtrFactory<AlsaPcmOutputStream> weak_factory_;
+
+ InternalState state_;
+ float volume_; // Volume level from 0.0 to 1.0.
+
+ AudioSourceCallback* source_callback_;
+
+ // Container for retrieving data from AudioSourceCallback::OnMoreData().
+ scoped_ptr<AudioBus> audio_bus_;
+
+ // Channel mixer and temporary bus for the final mixed channel data.
+ scoped_ptr<ChannelMixer> channel_mixer_;
+ scoped_ptr<AudioBus> mixed_audio_bus_;
+
+ DISALLOW_COPY_AND_ASSIGN(AlsaPcmOutputStream);
+};
+
+MEDIA_EXPORT std::ostream& operator<<(std::ostream& os,
+ AlsaPcmOutputStream::InternalState);
+
+}; // namespace media
+
+#endif // MEDIA_AUDIO_LINUX_ALSA_OUTPUT_H_
diff --git a/chromium/media/audio/linux/alsa_output_unittest.cc b/chromium/media/audio/linux/alsa_output_unittest.cc
new file mode 100644
index 00000000000..32456360f47
--- /dev/null
+++ b/chromium/media/audio/linux/alsa_output_unittest.cc
@@ -0,0 +1,868 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_loop.h"
+#include "base/strings/stringprintf.h"
+#include "media/audio/linux/alsa_output.h"
+#include "media/audio/linux/alsa_wrapper.h"
+#include "media/audio/linux/audio_manager_linux.h"
+#include "media/base/data_buffer.h"
+#include "media/base/seekable_buffer.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using testing::_;
+using testing::AllOf;
+using testing::AtLeast;
+using testing::DoAll;
+using testing::Field;
+using testing::InSequence;
+using testing::Invoke;
+using testing::InvokeWithoutArgs;
+using testing::Mock;
+using testing::MockFunction;
+using testing::Return;
+using testing::SetArgumentPointee;
+using testing::StrictMock;
+using testing::StrEq;
+using testing::Unused;
+
+namespace media {
+
+class MockAlsaWrapper : public AlsaWrapper {
+ public:
+ MOCK_METHOD3(DeviceNameHint, int(int card,
+ const char* iface,
+ void*** hints));
+ MOCK_METHOD2(DeviceNameGetHint, char*(const void* hint, const char* id));
+ MOCK_METHOD1(DeviceNameFreeHint, int(void** hints));
+
+ MOCK_METHOD4(PcmOpen, int(snd_pcm_t** handle, const char* name,
+ snd_pcm_stream_t stream, int mode));
+ MOCK_METHOD1(PcmClose, int(snd_pcm_t* handle));
+ MOCK_METHOD1(PcmPrepare, int(snd_pcm_t* handle));
+ MOCK_METHOD1(PcmDrop, int(snd_pcm_t* handle));
+ MOCK_METHOD2(PcmDelay, int(snd_pcm_t* handle, snd_pcm_sframes_t* delay));
+ MOCK_METHOD3(PcmWritei, snd_pcm_sframes_t(snd_pcm_t* handle,
+ const void* buffer,
+ snd_pcm_uframes_t size));
+ MOCK_METHOD3(PcmReadi, snd_pcm_sframes_t(snd_pcm_t* handle,
+ void* buffer,
+ snd_pcm_uframes_t size));
+ MOCK_METHOD3(PcmRecover, int(snd_pcm_t* handle, int err, int silent));
+ MOCK_METHOD7(PcmSetParams, int(snd_pcm_t* handle, snd_pcm_format_t format,
+ snd_pcm_access_t access, unsigned int channels,
+ unsigned int rate, int soft_resample,
+ unsigned int latency));
+ MOCK_METHOD3(PcmGetParams, int(snd_pcm_t* handle,
+ snd_pcm_uframes_t* buffer_size,
+ snd_pcm_uframes_t* period_size));
+ MOCK_METHOD1(PcmName, const char*(snd_pcm_t* handle));
+ MOCK_METHOD1(PcmAvailUpdate, snd_pcm_sframes_t(snd_pcm_t* handle));
+ MOCK_METHOD1(PcmState, snd_pcm_state_t(snd_pcm_t* handle));
+ MOCK_METHOD1(PcmStart, int(snd_pcm_t* handle));
+
+ MOCK_METHOD1(StrError, const char*(int errnum));
+};
+
+class MockAudioSourceCallback : public AudioOutputStream::AudioSourceCallback {
+ public:
+ MOCK_METHOD2(OnMoreData, int(AudioBus* audio_bus,
+ AudioBuffersState buffers_state));
+ MOCK_METHOD3(OnMoreIOData, int(AudioBus* source,
+ AudioBus* dest,
+ AudioBuffersState buffers_state));
+ MOCK_METHOD1(OnError, void(AudioOutputStream* stream));
+};
+
+class MockAudioManagerLinux : public AudioManagerLinux {
+ public:
+ MOCK_METHOD0(Init, void());
+ MOCK_METHOD0(HasAudioOutputDevices, bool());
+ MOCK_METHOD0(HasAudioInputDevices, bool());
+ MOCK_METHOD1(MakeLinearOutputStream, AudioOutputStream*(
+ const AudioParameters& params));
+ MOCK_METHOD2(MakeLowLatencyOutputStream, AudioOutputStream*(
+ const AudioParameters& params, const std::string& input_device_id));
+ MOCK_METHOD2(MakeLowLatencyInputStream, AudioInputStream*(
+ const AudioParameters& params, const std::string& device_id));
+
+ // We need to override this function in order to skip the checking the number
+ // of active output streams. It is because the number of active streams
+ // is managed inside MakeAudioOutputStream, and we don't use
+ // MakeAudioOutputStream to create the stream in the tests.
+ virtual void ReleaseOutputStream(AudioOutputStream* stream) OVERRIDE {
+ DCHECK(stream);
+ delete stream;
+ }
+
+ // We don't mock this method since all tests will do the same thing
+ // and use the current message loop.
+ virtual scoped_refptr<base::MessageLoopProxy> GetMessageLoop() OVERRIDE {
+ return base::MessageLoop::current()->message_loop_proxy();
+ }
+};
+
+class AlsaPcmOutputStreamTest : public testing::Test {
+ protected:
+ AlsaPcmOutputStreamTest() {
+ mock_manager_.reset(new StrictMock<MockAudioManagerLinux>());
+ }
+
+ virtual ~AlsaPcmOutputStreamTest() {
+ }
+
+ AlsaPcmOutputStream* CreateStream(ChannelLayout layout) {
+ return CreateStream(layout, kTestFramesPerPacket);
+ }
+
+ AlsaPcmOutputStream* CreateStream(ChannelLayout layout,
+ int32 samples_per_packet) {
+ AudioParameters params(kTestFormat, layout, kTestSampleRate,
+ kTestBitsPerSample, samples_per_packet);
+ return new AlsaPcmOutputStream(kTestDeviceName,
+ params,
+ &mock_alsa_wrapper_,
+ mock_manager_.get());
+ }
+
+ // Helper function to malloc the string returned by DeviceNameHint for NAME.
+ static char* EchoHint(const void* name, Unused) {
+ return strdup(static_cast<const char*>(name));
+ }
+
+ // Helper function to malloc the string returned by DeviceNameHint for IOID.
+ static char* OutputHint(Unused, Unused) {
+ return strdup("Output");
+ }
+
+ // Helper function to initialize |test_stream->buffer_|. Must be called
+ // in all tests that use buffer_ without opening the stream.
+ void InitBuffer(AlsaPcmOutputStream* test_stream) {
+ DCHECK(test_stream);
+ packet_ = new media::DataBuffer(kTestPacketSize);
+ packet_->set_data_size(kTestPacketSize);
+ test_stream->buffer_.reset(new media::SeekableBuffer(0, kTestPacketSize));
+ test_stream->buffer_->Append(packet_.get());
+ }
+
+ static const ChannelLayout kTestChannelLayout;
+ static const int kTestSampleRate;
+ static const int kTestBitsPerSample;
+ static const int kTestBytesPerFrame;
+ static const AudioParameters::Format kTestFormat;
+ static const char kTestDeviceName[];
+ static const char kDummyMessage[];
+ static const uint32 kTestFramesPerPacket;
+ static const int kTestPacketSize;
+ static const int kTestFailedErrno;
+ static snd_pcm_t* const kFakeHandle;
+
+ // Used to simulate DeviceNameHint.
+ static char kSurround40[];
+ static char kSurround41[];
+ static char kSurround50[];
+ static char kSurround51[];
+ static char kSurround70[];
+ static char kSurround71[];
+ static void* kFakeHints[];
+
+ StrictMock<MockAlsaWrapper> mock_alsa_wrapper_;
+ scoped_ptr<StrictMock<MockAudioManagerLinux> > mock_manager_;
+ base::MessageLoop message_loop_;
+ scoped_refptr<media::DataBuffer> packet_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AlsaPcmOutputStreamTest);
+};
+
+const ChannelLayout AlsaPcmOutputStreamTest::kTestChannelLayout =
+ CHANNEL_LAYOUT_STEREO;
+const int AlsaPcmOutputStreamTest::kTestSampleRate =
+ AudioParameters::kAudioCDSampleRate;
+const int AlsaPcmOutputStreamTest::kTestBitsPerSample = 8;
+const int AlsaPcmOutputStreamTest::kTestBytesPerFrame =
+ AlsaPcmOutputStreamTest::kTestBitsPerSample / 8 *
+ ChannelLayoutToChannelCount(AlsaPcmOutputStreamTest::kTestChannelLayout);
+const AudioParameters::Format AlsaPcmOutputStreamTest::kTestFormat =
+ AudioParameters::AUDIO_PCM_LINEAR;
+const char AlsaPcmOutputStreamTest::kTestDeviceName[] = "TestDevice";
+const char AlsaPcmOutputStreamTest::kDummyMessage[] = "dummy";
+const uint32 AlsaPcmOutputStreamTest::kTestFramesPerPacket = 1000;
+const int AlsaPcmOutputStreamTest::kTestPacketSize =
+ AlsaPcmOutputStreamTest::kTestFramesPerPacket *
+ AlsaPcmOutputStreamTest::kTestBytesPerFrame;
+const int AlsaPcmOutputStreamTest::kTestFailedErrno = -EACCES;
+snd_pcm_t* const AlsaPcmOutputStreamTest::kFakeHandle =
+ reinterpret_cast<snd_pcm_t*>(1);
+
+char AlsaPcmOutputStreamTest::kSurround40[] = "surround40:CARD=foo,DEV=0";
+char AlsaPcmOutputStreamTest::kSurround41[] = "surround41:CARD=foo,DEV=0";
+char AlsaPcmOutputStreamTest::kSurround50[] = "surround50:CARD=foo,DEV=0";
+char AlsaPcmOutputStreamTest::kSurround51[] = "surround51:CARD=foo,DEV=0";
+char AlsaPcmOutputStreamTest::kSurround70[] = "surround70:CARD=foo,DEV=0";
+char AlsaPcmOutputStreamTest::kSurround71[] = "surround71:CARD=foo,DEV=0";
+void* AlsaPcmOutputStreamTest::kFakeHints[] = {
+ kSurround40, kSurround41, kSurround50, kSurround51,
+ kSurround70, kSurround71, NULL };
+
+// Custom action to clear a memory buffer.
+ACTION(ClearBuffer) {
+ arg0->Zero();
+}
+
+TEST_F(AlsaPcmOutputStreamTest, ConstructedState) {
+ AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout);
+ EXPECT_EQ(AlsaPcmOutputStream::kCreated, test_stream->state());
+ test_stream->Close();
+
+ // Should support mono.
+ test_stream = CreateStream(CHANNEL_LAYOUT_MONO);
+ EXPECT_EQ(AlsaPcmOutputStream::kCreated, test_stream->state());
+ test_stream->Close();
+
+ // Should support multi-channel.
+ test_stream = CreateStream(CHANNEL_LAYOUT_SURROUND);
+ EXPECT_EQ(AlsaPcmOutputStream::kCreated, test_stream->state());
+ test_stream->Close();
+
+ // Bad bits per sample.
+ AudioParameters bad_bps_params(kTestFormat, kTestChannelLayout,
+ kTestSampleRate, kTestBitsPerSample - 1,
+ kTestFramesPerPacket);
+ test_stream = new AlsaPcmOutputStream(kTestDeviceName,
+ bad_bps_params,
+ &mock_alsa_wrapper_,
+ mock_manager_.get());
+ EXPECT_EQ(AlsaPcmOutputStream::kInError, test_stream->state());
+ test_stream->Close();
+
+ // Bad format.
+ AudioParameters bad_format_params(
+ AudioParameters::AUDIO_LAST_FORMAT, kTestChannelLayout, kTestSampleRate,
+ kTestBitsPerSample, kTestFramesPerPacket);
+ test_stream = new AlsaPcmOutputStream(kTestDeviceName,
+ bad_format_params,
+ &mock_alsa_wrapper_,
+ mock_manager_.get());
+ EXPECT_EQ(AlsaPcmOutputStream::kInError, test_stream->state());
+ test_stream->Close();
+}
+
+TEST_F(AlsaPcmOutputStreamTest, LatencyFloor) {
+ const double kMicrosPerFrame =
+ static_cast<double>(1000000) / kTestSampleRate;
+ const double kPacketFramesInMinLatency =
+ AlsaPcmOutputStream::kMinLatencyMicros / kMicrosPerFrame / 2.0;
+
+ // Test that packets which would cause a latency under less than
+ // AlsaPcmOutputStream::kMinLatencyMicros will get clipped to
+ // AlsaPcmOutputStream::kMinLatencyMicros,
+ EXPECT_CALL(mock_alsa_wrapper_, PcmOpen(_, _, _, _))
+ .WillOnce(DoAll(SetArgumentPointee<0>(kFakeHandle),
+ Return(0)));
+ EXPECT_CALL(mock_alsa_wrapper_,
+ PcmSetParams(_, _, _, _, _, _,
+ AlsaPcmOutputStream::kMinLatencyMicros))
+ .WillOnce(Return(0));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmGetParams(_, _, _))
+ .WillOnce(DoAll(SetArgumentPointee<1>(kTestFramesPerPacket),
+ SetArgumentPointee<2>(kTestFramesPerPacket / 2),
+ Return(0)));
+
+ AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout,
+ kPacketFramesInMinLatency);
+ ASSERT_TRUE(test_stream->Open());
+
+ // Now close it and test that everything was released.
+ EXPECT_CALL(mock_alsa_wrapper_, PcmClose(kFakeHandle)).WillOnce(Return(0));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmName(kFakeHandle))
+ .WillOnce(Return(kTestDeviceName));
+ test_stream->Close();
+
+ Mock::VerifyAndClear(&mock_alsa_wrapper_);
+ Mock::VerifyAndClear(mock_manager_.get());
+
+ // Test that having more packets ends up with a latency based on packet size.
+ const int kOverMinLatencyPacketSize = kPacketFramesInMinLatency + 1;
+ int64 expected_micros = AlsaPcmOutputStream::FramesToTimeDelta(
+ kOverMinLatencyPacketSize * 2, kTestSampleRate).InMicroseconds();
+
+ EXPECT_CALL(mock_alsa_wrapper_, PcmOpen(_, _, _, _))
+ .WillOnce(DoAll(SetArgumentPointee<0>(kFakeHandle), Return(0)));
+ EXPECT_CALL(mock_alsa_wrapper_,
+ PcmSetParams(_, _, _, _, _, _, expected_micros))
+ .WillOnce(Return(0));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmGetParams(_, _, _))
+ .WillOnce(DoAll(SetArgumentPointee<1>(kTestFramesPerPacket),
+ SetArgumentPointee<2>(kTestFramesPerPacket / 2),
+ Return(0)));
+
+ test_stream = CreateStream(kTestChannelLayout,
+ kOverMinLatencyPacketSize);
+ ASSERT_TRUE(test_stream->Open());
+
+ // Now close it and test that everything was released.
+ EXPECT_CALL(mock_alsa_wrapper_, PcmClose(kFakeHandle))
+ .WillOnce(Return(0));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmName(kFakeHandle))
+ .WillOnce(Return(kTestDeviceName));
+ test_stream->Close();
+
+ Mock::VerifyAndClear(&mock_alsa_wrapper_);
+ Mock::VerifyAndClear(mock_manager_.get());
+}
+
+TEST_F(AlsaPcmOutputStreamTest, OpenClose) {
+ int64 expected_micros = AlsaPcmOutputStream::FramesToTimeDelta(
+ 2 * kTestFramesPerPacket, kTestSampleRate).InMicroseconds();
+
+ // Open() call opens the playback device, sets the parameters, posts a task
+ // with the resulting configuration data, and transitions the object state to
+ // kIsOpened.
+ EXPECT_CALL(mock_alsa_wrapper_,
+ PcmOpen(_, StrEq(kTestDeviceName),
+ SND_PCM_STREAM_PLAYBACK, SND_PCM_NONBLOCK))
+ .WillOnce(DoAll(SetArgumentPointee<0>(kFakeHandle),
+ Return(0)));
+ EXPECT_CALL(mock_alsa_wrapper_,
+ PcmSetParams(kFakeHandle,
+ SND_PCM_FORMAT_U8,
+ SND_PCM_ACCESS_RW_INTERLEAVED,
+ ChannelLayoutToChannelCount(kTestChannelLayout),
+ kTestSampleRate,
+ 1,
+ expected_micros))
+ .WillOnce(Return(0));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmGetParams(kFakeHandle, _, _))
+ .WillOnce(DoAll(SetArgumentPointee<1>(kTestFramesPerPacket),
+ SetArgumentPointee<2>(kTestFramesPerPacket / 2),
+ Return(0)));
+
+ // Open the stream.
+ AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout);
+ ASSERT_TRUE(test_stream->Open());
+
+ EXPECT_EQ(AlsaPcmOutputStream::kIsOpened, test_stream->state());
+ EXPECT_EQ(kFakeHandle, test_stream->playback_handle_);
+ EXPECT_EQ(kTestFramesPerPacket, test_stream->frames_per_packet_);
+ EXPECT_TRUE(test_stream->buffer_.get());
+ EXPECT_FALSE(test_stream->stop_stream_);
+
+ // Now close it and test that everything was released.
+ EXPECT_CALL(mock_alsa_wrapper_, PcmClose(kFakeHandle))
+ .WillOnce(Return(0));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmName(kFakeHandle))
+ .WillOnce(Return(kTestDeviceName));
+ test_stream->Close();
+}
+
+TEST_F(AlsaPcmOutputStreamTest, PcmOpenFailed) {
+ EXPECT_CALL(mock_alsa_wrapper_, PcmOpen(_, _, _, _))
+ .WillOnce(Return(kTestFailedErrno));
+ EXPECT_CALL(mock_alsa_wrapper_, StrError(kTestFailedErrno))
+ .WillOnce(Return(kDummyMessage));
+
+ AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout);
+ ASSERT_FALSE(test_stream->Open());
+ ASSERT_EQ(AlsaPcmOutputStream::kInError, test_stream->state());
+
+ // Ensure internal state is set for a no-op stream if PcmOpen() failes.
+ EXPECT_TRUE(test_stream->stop_stream_);
+ EXPECT_TRUE(test_stream->playback_handle_ == NULL);
+ EXPECT_FALSE(test_stream->buffer_.get());
+
+ // Close the stream since we opened it to make destruction happy.
+ test_stream->Close();
+}
+
+TEST_F(AlsaPcmOutputStreamTest, PcmSetParamsFailed) {
+ EXPECT_CALL(mock_alsa_wrapper_, PcmOpen(_, _, _, _))
+ .WillOnce(DoAll(SetArgumentPointee<0>(kFakeHandle),
+ Return(0)));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmSetParams(_, _, _, _, _, _, _))
+ .WillOnce(Return(kTestFailedErrno));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmClose(kFakeHandle))
+ .WillOnce(Return(0));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmName(kFakeHandle))
+ .WillOnce(Return(kTestDeviceName));
+ EXPECT_CALL(mock_alsa_wrapper_, StrError(kTestFailedErrno))
+ .WillOnce(Return(kDummyMessage));
+
+ // If open fails, the stream stays in kCreated because it has effectively had
+ // no changes.
+ AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout);
+ ASSERT_FALSE(test_stream->Open());
+ EXPECT_EQ(AlsaPcmOutputStream::kInError, test_stream->state());
+
+ // Ensure internal state is set for a no-op stream if PcmSetParams() failes.
+ EXPECT_TRUE(test_stream->stop_stream_);
+ EXPECT_TRUE(test_stream->playback_handle_ == NULL);
+ EXPECT_FALSE(test_stream->buffer_.get());
+
+ // Close the stream since we opened it to make destruction happy.
+ test_stream->Close();
+}
+
+TEST_F(AlsaPcmOutputStreamTest, StartStop) {
+ // Open() call opens the playback device, sets the parameters, posts a task
+ // with the resulting configuration data, and transitions the object state to
+ // kIsOpened.
+ EXPECT_CALL(mock_alsa_wrapper_, PcmOpen(_, _, _, _))
+ .WillOnce(DoAll(SetArgumentPointee<0>(kFakeHandle),
+ Return(0)));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmSetParams(_, _, _, _, _, _, _))
+ .WillOnce(Return(0));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmGetParams(_, _, _))
+ .WillOnce(DoAll(SetArgumentPointee<1>(kTestFramesPerPacket),
+ SetArgumentPointee<2>(kTestFramesPerPacket / 2),
+ Return(0)));
+
+ // Open the stream.
+ AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout);
+ ASSERT_TRUE(test_stream->Open());
+
+ // Expect Device setup.
+ EXPECT_CALL(mock_alsa_wrapper_, PcmDrop(kFakeHandle))
+ .WillOnce(Return(0));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmPrepare(kFakeHandle))
+ .WillOnce(Return(0));
+
+ // Expect the pre-roll.
+ MockAudioSourceCallback mock_callback;
+ EXPECT_CALL(mock_alsa_wrapper_, PcmState(kFakeHandle))
+ .WillRepeatedly(Return(SND_PCM_STATE_RUNNING));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmDelay(kFakeHandle, _))
+ .WillRepeatedly(DoAll(SetArgumentPointee<1>(0), Return(0)));
+ EXPECT_CALL(mock_callback, OnMoreData(_, _))
+ .WillRepeatedly(DoAll(ClearBuffer(), Return(kTestFramesPerPacket)));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmWritei(kFakeHandle, _, _))
+ .WillRepeatedly(Return(kTestFramesPerPacket));
+
+ // Expect scheduling.
+ EXPECT_CALL(mock_alsa_wrapper_, PcmAvailUpdate(kFakeHandle))
+ .Times(AtLeast(2))
+ .WillRepeatedly(Return(kTestFramesPerPacket));
+
+ test_stream->Start(&mock_callback);
+ // Start() will issue a WriteTask() directly and then schedule the next one,
+ // call Stop() immediately after to ensure we don't run the message loop
+ // forever.
+ test_stream->Stop();
+ message_loop_.RunUntilIdle();
+
+ EXPECT_CALL(mock_alsa_wrapper_, PcmClose(kFakeHandle))
+ .WillOnce(Return(0));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmName(kFakeHandle))
+ .WillOnce(Return(kTestDeviceName));
+ test_stream->Close();
+}
+
+TEST_F(AlsaPcmOutputStreamTest, WritePacket_FinishedPacket) {
+ AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout);
+ InitBuffer(test_stream);
+ test_stream->TransitionTo(AlsaPcmOutputStream::kIsOpened);
+ test_stream->TransitionTo(AlsaPcmOutputStream::kIsPlaying);
+
+ // Nothing should happen. Don't set any expectations and Our strict mocks
+ // should verify most of this.
+
+ // Test empty buffer.
+ test_stream->buffer_->Clear();
+ test_stream->WritePacket();
+ test_stream->Close();
+}
+
+TEST_F(AlsaPcmOutputStreamTest, WritePacket_NormalPacket) {
+ // We need to open the stream before writing data to ALSA.
+ EXPECT_CALL(mock_alsa_wrapper_, PcmOpen(_, _, _, _))
+ .WillOnce(DoAll(SetArgumentPointee<0>(kFakeHandle),
+ Return(0)));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmSetParams(_, _, _, _, _, _, _))
+ .WillOnce(Return(0));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmGetParams(_, _, _))
+ .WillOnce(DoAll(SetArgumentPointee<1>(kTestFramesPerPacket),
+ SetArgumentPointee<2>(kTestFramesPerPacket / 2),
+ Return(0)));
+ AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout);
+ ASSERT_TRUE(test_stream->Open());
+ InitBuffer(test_stream);
+ test_stream->TransitionTo(AlsaPcmOutputStream::kIsPlaying);
+
+ // Write a little less than half the data.
+ int written = packet_->data_size() / kTestBytesPerFrame / 2 - 1;
+ EXPECT_CALL(mock_alsa_wrapper_, PcmAvailUpdate(kFakeHandle))
+ .WillOnce(Return(written));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmWritei(kFakeHandle, packet_->data(), _))
+ .WillOnce(Return(written));
+
+ test_stream->WritePacket();
+
+ ASSERT_EQ(test_stream->buffer_->forward_bytes(),
+ packet_->data_size() - written * kTestBytesPerFrame);
+
+ // Write the rest.
+ EXPECT_CALL(mock_alsa_wrapper_, PcmAvailUpdate(kFakeHandle))
+ .WillOnce(Return(kTestFramesPerPacket - written));
+ EXPECT_CALL(mock_alsa_wrapper_,
+ PcmWritei(kFakeHandle,
+ packet_->data() + written * kTestBytesPerFrame,
+ _))
+ .WillOnce(Return(packet_->data_size() / kTestBytesPerFrame - written));
+ test_stream->WritePacket();
+ EXPECT_EQ(0, test_stream->buffer_->forward_bytes());
+
+ // Now close it and test that everything was released.
+ EXPECT_CALL(mock_alsa_wrapper_, PcmClose(kFakeHandle))
+ .WillOnce(Return(0));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmName(kFakeHandle))
+ .WillOnce(Return(kTestDeviceName));
+ test_stream->Close();
+}
+
+TEST_F(AlsaPcmOutputStreamTest, WritePacket_WriteFails) {
+ // We need to open the stream before writing data to ALSA.
+ EXPECT_CALL(mock_alsa_wrapper_, PcmOpen(_, _, _, _))
+ .WillOnce(DoAll(SetArgumentPointee<0>(kFakeHandle),
+ Return(0)));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmSetParams(_, _, _, _, _, _, _))
+ .WillOnce(Return(0));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmGetParams(_, _, _))
+ .WillOnce(DoAll(SetArgumentPointee<1>(kTestFramesPerPacket),
+ SetArgumentPointee<2>(kTestFramesPerPacket / 2),
+ Return(0)));
+ AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout);
+ ASSERT_TRUE(test_stream->Open());
+ InitBuffer(test_stream);
+ test_stream->TransitionTo(AlsaPcmOutputStream::kIsPlaying);
+
+ // Fail due to a recoverable error and see that PcmRecover code path
+ // continues normally.
+ EXPECT_CALL(mock_alsa_wrapper_, PcmAvailUpdate(kFakeHandle))
+ .WillOnce(Return(kTestFramesPerPacket));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmWritei(kFakeHandle, _, _))
+ .WillOnce(Return(-EINTR));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmRecover(kFakeHandle, _, _))
+ .WillOnce(Return(0));
+
+ test_stream->WritePacket();
+
+ ASSERT_EQ(test_stream->buffer_->forward_bytes(), packet_->data_size());
+
+ // Fail the next write, and see that stop_stream_ is set.
+ EXPECT_CALL(mock_alsa_wrapper_, PcmAvailUpdate(kFakeHandle))
+ .WillOnce(Return(kTestFramesPerPacket));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmWritei(kFakeHandle, _, _))
+ .WillOnce(Return(kTestFailedErrno));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmRecover(kFakeHandle, _, _))
+ .WillOnce(Return(kTestFailedErrno));
+ EXPECT_CALL(mock_alsa_wrapper_, StrError(kTestFailedErrno))
+ .WillOnce(Return(kDummyMessage));
+ test_stream->WritePacket();
+ EXPECT_EQ(test_stream->buffer_->forward_bytes(), packet_->data_size());
+ EXPECT_TRUE(test_stream->stop_stream_);
+
+ // Now close it and test that everything was released.
+ EXPECT_CALL(mock_alsa_wrapper_, PcmClose(kFakeHandle))
+ .WillOnce(Return(0));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmName(kFakeHandle))
+ .WillOnce(Return(kTestDeviceName));
+ test_stream->Close();
+}
+
+TEST_F(AlsaPcmOutputStreamTest, WritePacket_StopStream) {
+ AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout);
+ InitBuffer(test_stream);
+ test_stream->TransitionTo(AlsaPcmOutputStream::kIsOpened);
+ test_stream->TransitionTo(AlsaPcmOutputStream::kIsPlaying);
+
+ // No expectations set on the strict mock because nothing should be called.
+ test_stream->stop_stream_ = true;
+ test_stream->WritePacket();
+ EXPECT_EQ(0, test_stream->buffer_->forward_bytes());
+ test_stream->Close();
+}
+
+TEST_F(AlsaPcmOutputStreamTest, BufferPacket) {
+ AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout);
+ InitBuffer(test_stream);
+ test_stream->buffer_->Clear();
+
+ MockAudioSourceCallback mock_callback;
+ EXPECT_CALL(mock_alsa_wrapper_, PcmState(_))
+ .WillOnce(Return(SND_PCM_STATE_RUNNING));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmDelay(_, _))
+ .WillOnce(DoAll(SetArgumentPointee<1>(1), Return(0)));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmAvailUpdate(_))
+ .WillRepeatedly(Return(0)); // Buffer is full.
+
+ // Return a partially filled packet.
+ EXPECT_CALL(mock_callback, OnMoreData(_, _))
+ .WillOnce(DoAll(ClearBuffer(), Return(kTestFramesPerPacket / 2)));
+
+ bool source_exhausted;
+ test_stream->set_source_callback(&mock_callback);
+ test_stream->packet_size_ = kTestPacketSize;
+ test_stream->BufferPacket(&source_exhausted);
+
+ EXPECT_EQ(kTestPacketSize / 2, test_stream->buffer_->forward_bytes());
+ EXPECT_FALSE(source_exhausted);
+ test_stream->Close();
+}
+
+TEST_F(AlsaPcmOutputStreamTest, BufferPacket_Negative) {
+ AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout);
+ InitBuffer(test_stream);
+ test_stream->buffer_->Clear();
+
+ // Simulate where the underrun has occurred right after checking the delay.
+ MockAudioSourceCallback mock_callback;
+ EXPECT_CALL(mock_alsa_wrapper_, PcmState(_))
+ .WillOnce(Return(SND_PCM_STATE_RUNNING));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmDelay(_, _))
+ .WillOnce(DoAll(SetArgumentPointee<1>(-1), Return(0)));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmAvailUpdate(_))
+ .WillRepeatedly(Return(0)); // Buffer is full.
+ EXPECT_CALL(mock_callback, OnMoreData(_, _))
+ .WillOnce(DoAll(ClearBuffer(), Return(kTestFramesPerPacket / 2)));
+
+ bool source_exhausted;
+ test_stream->set_source_callback(&mock_callback);
+ test_stream->packet_size_ = kTestPacketSize;
+ test_stream->BufferPacket(&source_exhausted);
+
+ EXPECT_EQ(kTestPacketSize / 2, test_stream->buffer_->forward_bytes());
+ EXPECT_FALSE(source_exhausted);
+ test_stream->Close();
+}
+
+TEST_F(AlsaPcmOutputStreamTest, BufferPacket_Underrun) {
+ AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout);
+ InitBuffer(test_stream);
+ test_stream->buffer_->Clear();
+
+ // If ALSA has underrun then we should assume a delay of zero.
+ MockAudioSourceCallback mock_callback;
+ EXPECT_CALL(mock_alsa_wrapper_, PcmState(_))
+ .WillOnce(Return(SND_PCM_STATE_XRUN));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmAvailUpdate(_))
+ .WillRepeatedly(Return(0)); // Buffer is full.
+ EXPECT_CALL(mock_callback,
+ OnMoreData(_, AllOf(
+ Field(&AudioBuffersState::pending_bytes, 0),
+ Field(&AudioBuffersState::hardware_delay_bytes, 0))))
+ .WillOnce(DoAll(ClearBuffer(), Return(kTestFramesPerPacket / 2)));
+
+ bool source_exhausted;
+ test_stream->set_source_callback(&mock_callback);
+ test_stream->packet_size_ = kTestPacketSize;
+ test_stream->BufferPacket(&source_exhausted);
+
+ EXPECT_EQ(kTestPacketSize / 2, test_stream->buffer_->forward_bytes());
+ EXPECT_FALSE(source_exhausted);
+ test_stream->Close();
+}
+
+TEST_F(AlsaPcmOutputStreamTest, BufferPacket_FullBuffer) {
+ AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout);
+ InitBuffer(test_stream);
+ // No expectations set on the strict mock because nothing should be called.
+ bool source_exhausted;
+ test_stream->packet_size_ = kTestPacketSize;
+ test_stream->BufferPacket(&source_exhausted);
+ EXPECT_EQ(kTestPacketSize, test_stream->buffer_->forward_bytes());
+ EXPECT_FALSE(source_exhausted);
+ test_stream->Close();
+}
+
+TEST_F(AlsaPcmOutputStreamTest, AutoSelectDevice_DeviceSelect) {
+ // Try channels from 1 -> 9. and see that we get the more specific surroundXX
+ // device opened for channels 4-8. For all other channels, the device should
+ // default to |AlsaPcmOutputStream::kDefaultDevice|. We should also not
+ // downmix any channel in this case because downmixing is only defined for
+ // channels 4-8, which we are guaranteeing to work.
+ //
+ // Note that the loop starts at "1", so the first parameter is ignored in
+ // these arrays.
+ const char* kExpectedDeviceName[] = { NULL,
+ AlsaPcmOutputStream::kDefaultDevice,
+ AlsaPcmOutputStream::kDefaultDevice,
+ AlsaPcmOutputStream::kDefaultDevice,
+ kSurround40, kSurround50, kSurround51,
+ kSurround70, kSurround71,
+ AlsaPcmOutputStream::kDefaultDevice };
+ bool kExpectedDownmix[] = { false, false, false, false, false, true,
+ false, false, false, false };
+ ChannelLayout kExpectedLayouts[] = { CHANNEL_LAYOUT_NONE,
+ CHANNEL_LAYOUT_MONO,
+ CHANNEL_LAYOUT_STEREO,
+ CHANNEL_LAYOUT_SURROUND,
+ CHANNEL_LAYOUT_4_0,
+ CHANNEL_LAYOUT_5_0,
+ CHANNEL_LAYOUT_5_1,
+ CHANNEL_LAYOUT_7_0,
+ CHANNEL_LAYOUT_7_1 };
+
+
+ for (int i = 1; i < 9; ++i) {
+ if (i == 3 || i == 4 || i == 5) // invalid number of channels
+ continue;
+ SCOPED_TRACE(base::StringPrintf("Attempting %d Channel", i));
+
+ // Hints will only be grabbed for channel numbers that have non-default
+ // devices associated with them.
+ if (kExpectedDeviceName[i] != AlsaPcmOutputStream::kDefaultDevice) {
+ // The DeviceNameHint and DeviceNameFreeHint need to be paired to avoid a
+ // memory leak.
+ EXPECT_CALL(mock_alsa_wrapper_, DeviceNameHint(_, _, _))
+ .WillOnce(DoAll(SetArgumentPointee<2>(&kFakeHints[0]), Return(0)));
+ EXPECT_CALL(mock_alsa_wrapper_, DeviceNameFreeHint(&kFakeHints[0]))
+ .Times(1);
+ }
+
+ EXPECT_CALL(mock_alsa_wrapper_,
+ PcmOpen(_, StrEq(kExpectedDeviceName[i]), _, _))
+ .WillOnce(DoAll(SetArgumentPointee<0>(kFakeHandle), Return(0)));
+ EXPECT_CALL(mock_alsa_wrapper_,
+ PcmSetParams(kFakeHandle, _, _, i, _, _, _))
+ .WillOnce(Return(0));
+
+ // The parameters are specified by ALSA documentation, and are in constants
+ // in the implementation files.
+ EXPECT_CALL(mock_alsa_wrapper_, DeviceNameGetHint(_, StrEq("IOID")))
+ .WillRepeatedly(Invoke(OutputHint));
+ EXPECT_CALL(mock_alsa_wrapper_, DeviceNameGetHint(_, StrEq("NAME")))
+ .WillRepeatedly(Invoke(EchoHint));
+
+ AlsaPcmOutputStream* test_stream = CreateStream(kExpectedLayouts[i]);
+ EXPECT_TRUE(test_stream->AutoSelectDevice(i));
+ EXPECT_EQ(kExpectedDownmix[i],
+ static_cast<bool>(test_stream->channel_mixer_));
+
+ Mock::VerifyAndClearExpectations(&mock_alsa_wrapper_);
+ Mock::VerifyAndClearExpectations(mock_manager_.get());
+ test_stream->Close();
+ }
+}
+
+TEST_F(AlsaPcmOutputStreamTest, AutoSelectDevice_FallbackDevices) {
+ using std::string;
+
+ // If there are problems opening a multi-channel device, it the fallbacks
+ // operations should be as follows. Assume the multi-channel device name is
+ // surround50:
+ //
+ // 1) Try open "surround50"
+ // 2) Try open "plug:surround50".
+ // 3) Try open "default".
+ // 4) Try open "plug:default".
+ // 5) Give up trying to open.
+ //
+ const string first_try = kSurround50;
+ const string second_try = string(AlsaPcmOutputStream::kPlugPrefix) +
+ kSurround50;
+ const string third_try = AlsaPcmOutputStream::kDefaultDevice;
+ const string fourth_try = string(AlsaPcmOutputStream::kPlugPrefix) +
+ AlsaPcmOutputStream::kDefaultDevice;
+
+ EXPECT_CALL(mock_alsa_wrapper_, DeviceNameHint(_, _, _))
+ .WillOnce(DoAll(SetArgumentPointee<2>(&kFakeHints[0]), Return(0)));
+ EXPECT_CALL(mock_alsa_wrapper_, DeviceNameFreeHint(&kFakeHints[0]))
+ .Times(1);
+ EXPECT_CALL(mock_alsa_wrapper_, DeviceNameGetHint(_, StrEq("IOID")))
+ .WillRepeatedly(Invoke(OutputHint));
+ EXPECT_CALL(mock_alsa_wrapper_, DeviceNameGetHint(_, StrEq("NAME")))
+ .WillRepeatedly(Invoke(EchoHint));
+ EXPECT_CALL(mock_alsa_wrapper_, StrError(kTestFailedErrno))
+ .WillRepeatedly(Return(kDummyMessage));
+
+ InSequence s;
+ EXPECT_CALL(mock_alsa_wrapper_, PcmOpen(_, StrEq(first_try.c_str()), _, _))
+ .WillOnce(Return(kTestFailedErrno));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmOpen(_, StrEq(second_try.c_str()), _, _))
+ .WillOnce(Return(kTestFailedErrno));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmOpen(_, StrEq(third_try.c_str()), _, _))
+ .WillOnce(Return(kTestFailedErrno));
+ EXPECT_CALL(mock_alsa_wrapper_, PcmOpen(_, StrEq(fourth_try.c_str()), _, _))
+ .WillOnce(Return(kTestFailedErrno));
+
+ AlsaPcmOutputStream* test_stream = CreateStream(CHANNEL_LAYOUT_5_0);
+ EXPECT_FALSE(test_stream->AutoSelectDevice(5));
+ test_stream->Close();
+}
+
+TEST_F(AlsaPcmOutputStreamTest, AutoSelectDevice_HintFail) {
+ // Should get |kDefaultDevice|, and force a 2-channel downmix on a failure to
+ // enumerate devices.
+ EXPECT_CALL(mock_alsa_wrapper_, DeviceNameHint(_, _, _))
+ .WillRepeatedly(Return(kTestFailedErrno));
+ EXPECT_CALL(mock_alsa_wrapper_,
+ PcmOpen(_, StrEq(AlsaPcmOutputStream::kDefaultDevice), _, _))
+ .WillOnce(DoAll(SetArgumentPointee<0>(kFakeHandle), Return(0)));
+ EXPECT_CALL(mock_alsa_wrapper_,
+ PcmSetParams(kFakeHandle, _, _, 2, _, _, _))
+ .WillOnce(Return(0));
+ EXPECT_CALL(mock_alsa_wrapper_, StrError(kTestFailedErrno))
+ .WillOnce(Return(kDummyMessage));
+
+ AlsaPcmOutputStream* test_stream = CreateStream(CHANNEL_LAYOUT_5_0);
+ EXPECT_TRUE(test_stream->AutoSelectDevice(5));
+ EXPECT_TRUE(test_stream->channel_mixer_);
+ test_stream->Close();
+}
+
+TEST_F(AlsaPcmOutputStreamTest, BufferPacket_StopStream) {
+ AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout);
+ InitBuffer(test_stream);
+ test_stream->stop_stream_ = true;
+ bool source_exhausted;
+ test_stream->BufferPacket(&source_exhausted);
+ EXPECT_EQ(0, test_stream->buffer_->forward_bytes());
+ EXPECT_TRUE(source_exhausted);
+ test_stream->Close();
+}
+
+TEST_F(AlsaPcmOutputStreamTest, ScheduleNextWrite) {
+ AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout);
+ test_stream->TransitionTo(AlsaPcmOutputStream::kIsOpened);
+ test_stream->TransitionTo(AlsaPcmOutputStream::kIsPlaying);
+ InitBuffer(test_stream);
+ DVLOG(1) << test_stream->state();
+ EXPECT_CALL(mock_alsa_wrapper_, PcmAvailUpdate(_))
+ .WillOnce(Return(10));
+ test_stream->ScheduleNextWrite(false);
+ DVLOG(1) << test_stream->state();
+ // TODO(sergeyu): Figure out how to check that the task has been added to the
+ // message loop.
+
+ // Cleanup the message queue. Currently ~MessageQueue() doesn't free pending
+ // tasks unless running on valgrind. The code below is needed to keep
+ // heapcheck happy.
+
+ test_stream->stop_stream_ = true;
+ DVLOG(1) << test_stream->state();
+ test_stream->TransitionTo(AlsaPcmOutputStream::kIsClosed);
+ DVLOG(1) << test_stream->state();
+ test_stream->Close();
+}
+
+TEST_F(AlsaPcmOutputStreamTest, ScheduleNextWrite_StopStream) {
+ AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout);
+ test_stream->TransitionTo(AlsaPcmOutputStream::kIsOpened);
+ test_stream->TransitionTo(AlsaPcmOutputStream::kIsPlaying);
+
+ InitBuffer(test_stream);
+
+ test_stream->stop_stream_ = true;
+ test_stream->ScheduleNextWrite(true);
+
+ // TODO(ajwong): Find a way to test whether or not another task has been
+ // posted so we can verify that the Alsa code will indeed break the task
+ // posting loop.
+
+ test_stream->TransitionTo(AlsaPcmOutputStream::kIsClosed);
+ test_stream->Close();
+}
+
+} // namespace media
diff --git a/chromium/media/audio/linux/alsa_util.cc b/chromium/media/audio/linux/alsa_util.cc
new file mode 100644
index 00000000000..176ef697741
--- /dev/null
+++ b/chromium/media/audio/linux/alsa_util.cc
@@ -0,0 +1,200 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/linux/alsa_util.h"
+
+#include <string>
+
+#include "base/logging.h"
+#include "media/audio/linux/alsa_wrapper.h"
+
+namespace alsa_util {
+
+static snd_pcm_t* OpenDevice(media::AlsaWrapper* wrapper,
+ const char* device_name,
+ snd_pcm_stream_t type,
+ int channels,
+ int sample_rate,
+ snd_pcm_format_t pcm_format,
+ int latency_us) {
+ snd_pcm_t* handle = NULL;
+ int error = wrapper->PcmOpen(&handle, device_name, type, SND_PCM_NONBLOCK);
+ if (error < 0) {
+ LOG(WARNING) << "PcmOpen: " << device_name << ","
+ << wrapper->StrError(error);
+ return NULL;
+ }
+
+ error = wrapper->PcmSetParams(handle, pcm_format,
+ SND_PCM_ACCESS_RW_INTERLEAVED, channels,
+ sample_rate, 1, latency_us);
+ if (error < 0) {
+ LOG(WARNING) << "PcmSetParams: " << device_name << ", "
+ << wrapper->StrError(error) << " - Format: " << pcm_format
+ << " Channels: " << channels << " Latency: " << latency_us;
+ if (alsa_util::CloseDevice(wrapper, handle) < 0) {
+ // TODO(ajwong): Retry on certain errors?
+ LOG(WARNING) << "Unable to close audio device. Leaking handle.";
+ }
+ return NULL;
+ }
+
+ return handle;
+}
+
+static std::string DeviceNameToControlName(const std::string& device_name) {
+ const char kMixerPrefix[] = "hw";
+ std::string control_name;
+ size_t pos1 = device_name.find(':');
+ if (pos1 == std::string::npos) {
+ control_name = device_name;
+ } else {
+ // Examples:
+ // deviceName: "front:CARD=Intel,DEV=0", controlName: "hw:CARD=Intel".
+ // deviceName: "default:CARD=Intel", controlName: "CARD=Intel".
+ size_t pos2 = device_name.find(',');
+ control_name = (pos2 == std::string::npos) ?
+ device_name.substr(pos1) :
+ kMixerPrefix + device_name.substr(pos1, pos2 - pos1);
+ }
+
+ return control_name;
+}
+
+snd_pcm_format_t BitsToFormat(int bits_per_sample) {
+ switch (bits_per_sample) {
+ case 8:
+ return SND_PCM_FORMAT_U8;
+
+ case 16:
+ return SND_PCM_FORMAT_S16;
+
+ case 24:
+ return SND_PCM_FORMAT_S24;
+
+ case 32:
+ return SND_PCM_FORMAT_S32;
+
+ default:
+ return SND_PCM_FORMAT_UNKNOWN;
+ }
+}
+
+int CloseDevice(media::AlsaWrapper* wrapper, snd_pcm_t* handle) {
+ std::string device_name = wrapper->PcmName(handle);
+ int error = wrapper->PcmClose(handle);
+ if (error < 0) {
+ LOG(ERROR) << "PcmClose: " << device_name << ", "
+ << wrapper->StrError(error);
+ }
+
+ return error;
+}
+
+snd_pcm_t* OpenCaptureDevice(media::AlsaWrapper* wrapper,
+ const char* device_name,
+ int channels,
+ int sample_rate,
+ snd_pcm_format_t pcm_format,
+ int latency_us) {
+ return OpenDevice(wrapper, device_name, SND_PCM_STREAM_CAPTURE, channels,
+ sample_rate, pcm_format, latency_us);
+}
+
+snd_pcm_t* OpenPlaybackDevice(media::AlsaWrapper* wrapper,
+ const char* device_name,
+ int channels,
+ int sample_rate,
+ snd_pcm_format_t pcm_format,
+ int latency_us) {
+ return OpenDevice(wrapper, device_name, SND_PCM_STREAM_PLAYBACK, channels,
+ sample_rate, pcm_format, latency_us);
+}
+
+snd_mixer_t* OpenMixer(media::AlsaWrapper* wrapper,
+ const std::string& device_name) {
+ snd_mixer_t* mixer = NULL;
+
+ int error = wrapper->MixerOpen(&mixer, 0);
+ if (error < 0) {
+ LOG(ERROR) << "MixerOpen: " << device_name << ", "
+ << wrapper->StrError(error);
+ return NULL;
+ }
+
+ std::string control_name = DeviceNameToControlName(device_name);
+ error = wrapper->MixerAttach(mixer, control_name.c_str());
+ if (error < 0) {
+ LOG(ERROR) << "MixerAttach, " << control_name << ", "
+ << wrapper->StrError(error);
+ alsa_util::CloseMixer(wrapper, mixer, device_name);
+ return NULL;
+ }
+
+ error = wrapper->MixerElementRegister(mixer, NULL, NULL);
+ if (error < 0) {
+ LOG(ERROR) << "MixerElementRegister: " << control_name << ", "
+ << wrapper->StrError(error);
+ alsa_util::CloseMixer(wrapper, mixer, device_name);
+ return NULL;
+ }
+
+ return mixer;
+}
+
+void CloseMixer(media::AlsaWrapper* wrapper, snd_mixer_t* mixer,
+ const std::string& device_name) {
+ if (!mixer)
+ return;
+
+ wrapper->MixerFree(mixer);
+
+ int error = 0;
+ if (!device_name.empty()) {
+ std::string control_name = DeviceNameToControlName(device_name);
+ error = wrapper->MixerDetach(mixer, control_name.c_str());
+ if (error < 0) {
+ LOG(WARNING) << "MixerDetach: " << control_name << ", "
+ << wrapper->StrError(error);
+ }
+ }
+
+ error = wrapper->MixerClose(mixer);
+ if (error < 0) {
+ LOG(WARNING) << "MixerClose: " << wrapper->StrError(error);
+ }
+}
+
+snd_mixer_elem_t* LoadCaptureMixerElement(media::AlsaWrapper* wrapper,
+ snd_mixer_t* mixer) {
+ if (!mixer)
+ return NULL;
+
+ int error = wrapper->MixerLoad(mixer);
+ if (error < 0) {
+ LOG(ERROR) << "MixerLoad: " << wrapper->StrError(error);
+ return NULL;
+ }
+
+ snd_mixer_elem_t* elem = NULL;
+ snd_mixer_elem_t* mic_elem = NULL;
+ const char kCaptureElemName[] = "Capture";
+ const char kMicElemName[] = "Mic";
+ for (elem = wrapper->MixerFirstElem(mixer);
+ elem;
+ elem = wrapper->MixerNextElem(elem)) {
+ if (wrapper->MixerSelemIsActive(elem)) {
+ const char* elem_name = wrapper->MixerSelemName(elem);
+ if (strcmp(elem_name, kCaptureElemName) == 0)
+ return elem;
+ else if (strcmp(elem_name, kMicElemName) == 0)
+ mic_elem = elem;
+ }
+ }
+
+ // Did not find any Capture handle, use the Mic handle.
+ return mic_elem;
+}
+
+} // namespace alsa_util
diff --git a/chromium/media/audio/linux/alsa_util.h b/chromium/media/audio/linux/alsa_util.h
new file mode 100644
index 00000000000..53cf80af84c
--- /dev/null
+++ b/chromium/media/audio/linux/alsa_util.h
@@ -0,0 +1,47 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_LINUX_ALSA_UTIL_H_
+#define MEDIA_AUDIO_LINUX_ALSA_UTIL_H_
+
+#include <alsa/asoundlib.h>
+#include <string>
+
+namespace media {
+class AlsaWrapper;
+}
+
+namespace alsa_util {
+
+snd_pcm_format_t BitsToFormat(int bits_per_sample);
+
+snd_pcm_t* OpenCaptureDevice(media::AlsaWrapper* wrapper,
+ const char* device_name,
+ int channels,
+ int sample_rate,
+ snd_pcm_format_t pcm_format,
+ int latency_us);
+
+snd_pcm_t* OpenPlaybackDevice(media::AlsaWrapper* wrapper,
+ const char* device_name,
+ int channels,
+ int sample_rate,
+ snd_pcm_format_t pcm_format,
+ int latency_us);
+
+int CloseDevice(media::AlsaWrapper* wrapper, snd_pcm_t* handle);
+
+snd_mixer_t* OpenMixer(media::AlsaWrapper* wrapper,
+ const std::string& device_name);
+
+void CloseMixer(media::AlsaWrapper* wrapper,
+ snd_mixer_t* mixer,
+ const std::string& device_name);
+
+snd_mixer_elem_t* LoadCaptureMixerElement(media::AlsaWrapper* wrapper,
+ snd_mixer_t* mixer);
+
+} // namespace alsa_util
+
+#endif // MEDIA_AUDIO_LINUX_ALSA_UTIL_H_
diff --git a/chromium/media/audio/linux/alsa_wrapper.cc b/chromium/media/audio/linux/alsa_wrapper.cc
new file mode 100644
index 00000000000..c1ce359b303
--- /dev/null
+++ b/chromium/media/audio/linux/alsa_wrapper.cc
@@ -0,0 +1,173 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/linux/alsa_wrapper.h"
+
+#include <alsa/asoundlib.h>
+
+namespace media {
+
+AlsaWrapper::AlsaWrapper() {
+}
+
+AlsaWrapper::~AlsaWrapper() {
+}
+
+int AlsaWrapper::PcmOpen(snd_pcm_t** handle, const char* name,
+ snd_pcm_stream_t stream, int mode) {
+ return snd_pcm_open(handle, name, stream, mode);
+}
+
+int AlsaWrapper::DeviceNameHint(int card, const char* iface, void*** hints) {
+ return snd_device_name_hint(card, iface, hints);
+}
+
+char* AlsaWrapper::DeviceNameGetHint(const void* hint, const char* id) {
+ return snd_device_name_get_hint(hint, id);
+}
+
+int AlsaWrapper::DeviceNameFreeHint(void** hints) {
+ return snd_device_name_free_hint(hints);
+}
+
+int AlsaWrapper::CardNext(int* rcard) {
+ return snd_card_next(rcard);
+}
+
+int AlsaWrapper::PcmClose(snd_pcm_t* handle) {
+ return snd_pcm_close(handle);
+}
+
+int AlsaWrapper::PcmPrepare(snd_pcm_t* handle) {
+ return snd_pcm_prepare(handle);
+}
+
+int AlsaWrapper::PcmDrop(snd_pcm_t* handle) {
+ return snd_pcm_drop(handle);
+}
+
+int AlsaWrapper::PcmDelay(snd_pcm_t* handle, snd_pcm_sframes_t* delay) {
+ return snd_pcm_delay(handle, delay);
+}
+
+snd_pcm_sframes_t AlsaWrapper::PcmWritei(snd_pcm_t* handle,
+ const void* buffer,
+ snd_pcm_uframes_t size) {
+ return snd_pcm_writei(handle, buffer, size);
+}
+
+snd_pcm_sframes_t AlsaWrapper::PcmReadi(snd_pcm_t* handle,
+ void* buffer,
+ snd_pcm_uframes_t size) {
+ return snd_pcm_readi(handle, buffer, size);
+}
+
+int AlsaWrapper::PcmRecover(snd_pcm_t* handle, int err, int silent) {
+ return snd_pcm_recover(handle, err, silent);
+}
+
+const char* AlsaWrapper::PcmName(snd_pcm_t* handle) {
+ return snd_pcm_name(handle);
+}
+
+int AlsaWrapper::PcmSetParams(snd_pcm_t* handle, snd_pcm_format_t format,
+ snd_pcm_access_t access, unsigned int channels,
+ unsigned int rate, int soft_resample,
+ unsigned int latency) {
+ return snd_pcm_set_params(handle,
+ format,
+ access,
+ channels,
+ rate,
+ soft_resample,
+ latency);
+}
+
+int AlsaWrapper::PcmGetParams(snd_pcm_t* handle, snd_pcm_uframes_t* buffer_size,
+ snd_pcm_uframes_t* period_size) {
+ return snd_pcm_get_params(handle, buffer_size, period_size);
+}
+
+snd_pcm_sframes_t AlsaWrapper::PcmAvailUpdate(snd_pcm_t* handle) {
+ return snd_pcm_avail_update(handle);
+}
+
+snd_pcm_state_t AlsaWrapper::PcmState(snd_pcm_t* handle) {
+ return snd_pcm_state(handle);
+}
+
+const char* AlsaWrapper::StrError(int errnum) {
+ return snd_strerror(errnum);
+}
+
+int AlsaWrapper::PcmStart(snd_pcm_t* handle) {
+ return snd_pcm_start(handle);
+}
+
+int AlsaWrapper::MixerOpen(snd_mixer_t** mixer, int mode) {
+ return snd_mixer_open(mixer, mode);
+}
+
+int AlsaWrapper::MixerAttach(snd_mixer_t* mixer, const char* name) {
+ return snd_mixer_attach(mixer, name);
+}
+
+int AlsaWrapper::MixerElementRegister(snd_mixer_t* mixer,
+ struct snd_mixer_selem_regopt* options,
+ snd_mixer_class_t** classp) {
+ return snd_mixer_selem_register(mixer, options, classp);
+}
+
+void AlsaWrapper::MixerFree(snd_mixer_t* mixer) {
+ snd_mixer_free(mixer);
+}
+
+int AlsaWrapper::MixerDetach(snd_mixer_t* mixer, const char* name) {
+ return snd_mixer_detach(mixer, name);
+}
+
+int AlsaWrapper::MixerClose(snd_mixer_t* mixer) {
+ return snd_mixer_close(mixer);
+}
+
+int AlsaWrapper::MixerLoad(snd_mixer_t* mixer) {
+ return snd_mixer_load(mixer);
+}
+
+snd_mixer_elem_t* AlsaWrapper::MixerFirstElem(snd_mixer_t* mixer) {
+ return snd_mixer_first_elem(mixer);
+}
+
+snd_mixer_elem_t* AlsaWrapper::MixerNextElem(snd_mixer_elem_t* elem) {
+ return snd_mixer_elem_next(elem);
+}
+
+int AlsaWrapper::MixerSelemIsActive(snd_mixer_elem_t* elem) {
+ return snd_mixer_selem_is_active(elem);
+}
+
+const char* AlsaWrapper::MixerSelemName(snd_mixer_elem_t* elem) {
+ return snd_mixer_selem_get_name(elem);
+}
+
+int AlsaWrapper::MixerSelemSetCaptureVolumeAll(
+ snd_mixer_elem_t* elem, long value) {
+ return snd_mixer_selem_set_capture_volume_all(elem, value);
+}
+
+int AlsaWrapper::MixerSelemGetCaptureVolume(
+ snd_mixer_elem_t* elem, snd_mixer_selem_channel_id_t channel, long* value) {
+ return snd_mixer_selem_get_capture_volume(elem, channel, value);
+}
+
+int AlsaWrapper::MixerSelemHasCaptureVolume(snd_mixer_elem_t* elem) {
+ return snd_mixer_selem_has_capture_volume(elem);
+}
+
+int AlsaWrapper::MixerSelemGetCaptureVolumeRange(snd_mixer_elem_t* elem,
+ long* min, long* max) {
+ return snd_mixer_selem_get_capture_volume_range(elem, min, max);
+}
+
+} // namespace media
diff --git a/chromium/media/audio/linux/alsa_wrapper.h b/chromium/media/audio/linux/alsa_wrapper.h
new file mode 100644
index 00000000000..30d94635dd5
--- /dev/null
+++ b/chromium/media/audio/linux/alsa_wrapper.h
@@ -0,0 +1,81 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// AlsaWrapper is a simple stateless class that wraps the alsa library commands
+// we want to use. It's purpose is to allow injection of a mock so that the
+// higher level code is testable.
+
+#include <alsa/asoundlib.h>
+
+#include "base/basictypes.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class MEDIA_EXPORT AlsaWrapper {
+ public:
+ AlsaWrapper();
+ virtual ~AlsaWrapper();
+
+ virtual int DeviceNameHint(int card, const char* iface, void*** hints);
+ virtual char* DeviceNameGetHint(const void* hint, const char* id);
+ virtual int DeviceNameFreeHint(void** hints);
+ virtual int CardNext(int* rcard);
+
+ virtual int PcmOpen(snd_pcm_t** handle, const char* name,
+ snd_pcm_stream_t stream, int mode);
+ virtual int PcmClose(snd_pcm_t* handle);
+ virtual int PcmPrepare(snd_pcm_t* handle);
+ virtual int PcmDrop(snd_pcm_t* handle);
+ virtual int PcmDelay(snd_pcm_t* handle, snd_pcm_sframes_t* delay);
+ virtual snd_pcm_sframes_t PcmWritei(snd_pcm_t* handle,
+ const void* buffer,
+ snd_pcm_uframes_t size);
+ virtual snd_pcm_sframes_t PcmReadi(snd_pcm_t* handle,
+ void* buffer,
+ snd_pcm_uframes_t size);
+ virtual int PcmRecover(snd_pcm_t* handle, int err, int silent);
+ virtual int PcmSetParams(snd_pcm_t* handle, snd_pcm_format_t format,
+ snd_pcm_access_t access, unsigned int channels,
+ unsigned int rate, int soft_resample,
+ unsigned int latency);
+ virtual int PcmGetParams(snd_pcm_t* handle, snd_pcm_uframes_t* buffer_size,
+ snd_pcm_uframes_t* period_size);
+ virtual const char* PcmName(snd_pcm_t* handle);
+ virtual snd_pcm_sframes_t PcmAvailUpdate(snd_pcm_t* handle);
+ virtual snd_pcm_state_t PcmState(snd_pcm_t* handle);
+ virtual int PcmStart(snd_pcm_t* handle);
+
+ virtual int MixerOpen(snd_mixer_t** mixer, int mode);
+ virtual int MixerAttach(snd_mixer_t* mixer, const char* name);
+ virtual int MixerElementRegister(snd_mixer_t* mixer,
+ struct snd_mixer_selem_regopt* options,
+ snd_mixer_class_t** classp);
+ virtual void MixerFree(snd_mixer_t* mixer);
+ virtual int MixerDetach(snd_mixer_t* mixer, const char* name);
+ virtual int MixerClose(snd_mixer_t* mixer);
+ virtual int MixerLoad(snd_mixer_t* mixer);
+ virtual snd_mixer_elem_t* MixerFirstElem(snd_mixer_t* mixer);
+ virtual snd_mixer_elem_t* MixerNextElem(snd_mixer_elem_t* elem);
+ virtual int MixerSelemIsActive(snd_mixer_elem_t* elem);
+ virtual const char* MixerSelemName(snd_mixer_elem_t* elem);
+ virtual int MixerSelemSetCaptureVolumeAll(snd_mixer_elem_t* elem, long value);
+ virtual int MixerSelemGetCaptureVolume(snd_mixer_elem_t* elem,
+ snd_mixer_selem_channel_id_t channel,
+ long* value);
+ virtual int MixerSelemHasCaptureVolume(snd_mixer_elem_t* elem);
+ virtual int MixerSelemGetCaptureVolumeRange(snd_mixer_elem_t* elem,
+ long* min, long* max);
+
+ virtual const char* StrError(int errnum);
+
+ private:
+ int ConfigureHwParams(snd_pcm_t* handle, snd_pcm_hw_params_t* hw_params,
+ snd_pcm_format_t format, snd_pcm_access_t access,
+ unsigned int channels, unsigned int rate,
+ int soft_resample, unsigned int latency);
+ DISALLOW_COPY_AND_ASSIGN(AlsaWrapper);
+};
+
+} // namespace media
diff --git a/chromium/media/audio/linux/audio_manager_linux.cc b/chromium/media/audio/linux/audio_manager_linux.cc
new file mode 100644
index 00000000000..38253e2e651
--- /dev/null
+++ b/chromium/media/audio/linux/audio_manager_linux.cc
@@ -0,0 +1,352 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/linux/audio_manager_linux.h"
+
+#include "base/command_line.h"
+#include "base/environment.h"
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/metrics/histogram.h"
+#include "base/nix/xdg_util.h"
+#include "base/process/launch.h"
+#include "base/stl_util.h"
+#include "media/audio/audio_output_dispatcher.h"
+#include "media/audio/audio_parameters.h"
+#include "media/audio/audio_util.h"
+#if defined(USE_CRAS)
+#include "media/audio/cras/audio_manager_cras.h"
+#endif
+#include "media/audio/linux/alsa_input.h"
+#include "media/audio/linux/alsa_output.h"
+#include "media/audio/linux/alsa_wrapper.h"
+#if defined(USE_PULSEAUDIO)
+#include "media/audio/pulse/audio_manager_pulse.h"
+#endif
+#include "media/base/channel_layout.h"
+#include "media/base/limits.h"
+#include "media/base/media_switches.h"
+
+namespace media {
+
+// Maximum number of output streams that can be open simultaneously.
+static const int kMaxOutputStreams = 50;
+
+// Default sample rate for input and output streams.
+static const int kDefaultSampleRate = 48000;
+
+// Since "default", "pulse" and "dmix" devices are virtual devices mapped to
+// real devices, we remove them from the list to avoiding duplicate counting.
+// In addition, note that we support no more than 2 channels for recording,
+// hence surround devices are not stored in the list.
+static const char* kInvalidAudioInputDevices[] = {
+ "default",
+ "null",
+ "pulse",
+ "dmix",
+ "surround",
+};
+
+enum LinuxAudioIO {
+ kPulse,
+ kAlsa,
+ kCras,
+ kAudioIOMax // Must always be last!
+};
+
+// static
+void AudioManagerLinux::ShowLinuxAudioInputSettings() {
+ scoped_ptr<base::Environment> env(base::Environment::Create());
+ CommandLine command_line(CommandLine::NO_PROGRAM);
+ switch (base::nix::GetDesktopEnvironment(env.get())) {
+ case base::nix::DESKTOP_ENVIRONMENT_GNOME:
+ command_line.SetProgram(base::FilePath("gnome-volume-control"));
+ break;
+ case base::nix::DESKTOP_ENVIRONMENT_KDE3:
+ case base::nix::DESKTOP_ENVIRONMENT_KDE4:
+ command_line.SetProgram(base::FilePath("kmix"));
+ break;
+ case base::nix::DESKTOP_ENVIRONMENT_UNITY:
+ command_line.SetProgram(base::FilePath("gnome-control-center"));
+ command_line.AppendArg("sound");
+ command_line.AppendArg("input");
+ break;
+ default:
+ LOG(ERROR) << "Failed to show audio input settings: we don't know "
+ << "what command to use for your desktop environment.";
+ return;
+ }
+ base::LaunchProcess(command_line, base::LaunchOptions(), NULL);
+}
+
+// Implementation of AudioManager.
+bool AudioManagerLinux::HasAudioOutputDevices() {
+ return HasAnyAlsaAudioDevice(kStreamPlayback);
+}
+
+bool AudioManagerLinux::HasAudioInputDevices() {
+ return HasAnyAlsaAudioDevice(kStreamCapture);
+}
+
+AudioManagerLinux::AudioManagerLinux()
+ : wrapper_(new AlsaWrapper()) {
+ SetMaxOutputStreamsAllowed(kMaxOutputStreams);
+}
+
+AudioManagerLinux::~AudioManagerLinux() {
+ Shutdown();
+}
+
+void AudioManagerLinux::ShowAudioInputSettings() {
+ ShowLinuxAudioInputSettings();
+}
+
+void AudioManagerLinux::GetAudioInputDeviceNames(
+ media::AudioDeviceNames* device_names) {
+ DCHECK(device_names->empty());
+ GetAlsaAudioInputDevices(device_names);
+}
+
+AudioParameters AudioManagerLinux::GetInputStreamParameters(
+ const std::string& device_id) {
+ static const int kDefaultInputBufferSize = 1024;
+
+ return AudioParameters(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
+ kDefaultSampleRate, 16, kDefaultInputBufferSize);
+}
+
+void AudioManagerLinux::GetAlsaAudioInputDevices(
+ media::AudioDeviceNames* device_names) {
+ // Constants specified by the ALSA API for device hints.
+ static const char kPcmInterfaceName[] = "pcm";
+ int card = -1;
+
+ // Loop through the sound cards to get ALSA device hints.
+ while (!wrapper_->CardNext(&card) && card >= 0) {
+ void** hints = NULL;
+ int error = wrapper_->DeviceNameHint(card, kPcmInterfaceName, &hints);
+ if (!error) {
+ GetAlsaDevicesInfo(hints, device_names);
+
+ // Destroy the hints now that we're done with it.
+ wrapper_->DeviceNameFreeHint(hints);
+ } else {
+ DLOG(WARNING) << "GetAudioInputDevices: unable to get device hints: "
+ << wrapper_->StrError(error);
+ }
+ }
+}
+
+void AudioManagerLinux::GetAlsaDevicesInfo(
+ void** hints, media::AudioDeviceNames* device_names) {
+ static const char kIoHintName[] = "IOID";
+ static const char kNameHintName[] = "NAME";
+ static const char kDescriptionHintName[] = "DESC";
+ static const char kOutputDevice[] = "Output";
+
+ for (void** hint_iter = hints; *hint_iter != NULL; hint_iter++) {
+ // Only examine devices that are input capable. Valid values are
+ // "Input", "Output", and NULL which means both input and output.
+ scoped_ptr_malloc<char> io(wrapper_->DeviceNameGetHint(*hint_iter,
+ kIoHintName));
+ if (io != NULL && strcmp(kOutputDevice, io.get()) == 0)
+ continue;
+
+ // Found an input device, prepend the default device since we always want
+ // it to be on the top of the list for all platforms. And there is no
+ // duplicate counting here since it is only done if the list is still empty.
+ // Note, pulse has exclusively opened the default device, so we must open
+ // the device via the "default" moniker.
+ if (device_names->empty()) {
+ device_names->push_front(media::AudioDeviceName(
+ AudioManagerBase::kDefaultDeviceName,
+ AudioManagerBase::kDefaultDeviceId));
+ }
+
+ // Get the unique device name for the device.
+ scoped_ptr_malloc<char> unique_device_name(
+ wrapper_->DeviceNameGetHint(*hint_iter, kNameHintName));
+
+ // Find out if the device is available.
+ if (IsAlsaDeviceAvailable(unique_device_name.get())) {
+ // Get the description for the device.
+ scoped_ptr_malloc<char> desc(wrapper_->DeviceNameGetHint(
+ *hint_iter, kDescriptionHintName));
+
+ media::AudioDeviceName name;
+ name.unique_id = unique_device_name.get();
+ if (desc) {
+ // Use the more user friendly description as name.
+ // Replace '\n' with '-'.
+ char* pret = strchr(desc.get(), '\n');
+ if (pret)
+ *pret = '-';
+ name.device_name = desc.get();
+ } else {
+ // Virtual devices don't necessarily have descriptions.
+ // Use their names instead.
+ name.device_name = unique_device_name.get();
+ }
+
+ // Store the device information.
+ device_names->push_back(name);
+ }
+ }
+}
+
+bool AudioManagerLinux::IsAlsaDeviceAvailable(const char* device_name) {
+ if (!device_name)
+ return false;
+
+ // Check if the device is in the list of invalid devices.
+ for (size_t i = 0; i < arraysize(kInvalidAudioInputDevices); ++i) {
+ if (strncmp(kInvalidAudioInputDevices[i], device_name,
+ strlen(kInvalidAudioInputDevices[i])) == 0)
+ return false;
+ }
+
+ return true;
+}
+
+bool AudioManagerLinux::HasAnyAlsaAudioDevice(StreamType stream) {
+ static const char kPcmInterfaceName[] = "pcm";
+ static const char kIoHintName[] = "IOID";
+ const char* kNotWantedDevice =
+ (stream == kStreamPlayback ? "Input" : "Output");
+ void** hints = NULL;
+ bool has_device = false;
+ int card = -1;
+
+ // Loop through the sound cards.
+ // Don't use snd_device_name_hint(-1,..) since there is a access violation
+ // inside this ALSA API with libasound.so.2.0.0.
+ while (!wrapper_->CardNext(&card) && (card >= 0) && !has_device) {
+ int error = wrapper_->DeviceNameHint(card, kPcmInterfaceName, &hints);
+ if (!error) {
+ for (void** hint_iter = hints; *hint_iter != NULL; hint_iter++) {
+ // Only examine devices that are |stream| capable. Valid values are
+ // "Input", "Output", and NULL which means both input and output.
+ scoped_ptr_malloc<char> io(wrapper_->DeviceNameGetHint(*hint_iter,
+ kIoHintName));
+ if (io != NULL && strcmp(kNotWantedDevice, io.get()) == 0)
+ continue; // Wrong type, skip the device.
+
+ // Found an input device.
+ has_device = true;
+ break;
+ }
+
+ // Destroy the hints now that we're done with it.
+ wrapper_->DeviceNameFreeHint(hints);
+ hints = NULL;
+ } else {
+ DLOG(WARNING) << "HasAnyAudioDevice: unable to get device hints: "
+ << wrapper_->StrError(error);
+ }
+ }
+
+ return has_device;
+}
+
+AudioOutputStream* AudioManagerLinux::MakeLinearOutputStream(
+ const AudioParameters& params) {
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
+ return MakeOutputStream(params);
+}
+
+AudioOutputStream* AudioManagerLinux::MakeLowLatencyOutputStream(
+ const AudioParameters& params,
+ const std::string& input_device_id) {
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
+ // TODO(xians): Use input_device_id for unified IO.
+ return MakeOutputStream(params);
+}
+
+AudioInputStream* AudioManagerLinux::MakeLinearInputStream(
+ const AudioParameters& params, const std::string& device_id) {
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
+ return MakeInputStream(params, device_id);
+}
+
+AudioInputStream* AudioManagerLinux::MakeLowLatencyInputStream(
+ const AudioParameters& params, const std::string& device_id) {
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
+ return MakeInputStream(params, device_id);
+}
+
+AudioParameters AudioManagerLinux::GetPreferredOutputStreamParameters(
+ const AudioParameters& input_params) {
+ static const int kDefaultOutputBufferSize = 2048;
+ ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
+ int sample_rate = kDefaultSampleRate;
+ int buffer_size = kDefaultOutputBufferSize;
+ int bits_per_sample = 16;
+ int input_channels = 0;
+ if (input_params.IsValid()) {
+ // Some clients, such as WebRTC, have a more limited use case and work
+ // acceptably with a smaller buffer size. The check below allows clients
+ // which want to try a smaller buffer size on Linux to do so.
+ // TODO(dalecurtis): This should include bits per channel and channel layout
+ // eventually.
+ sample_rate = input_params.sample_rate();
+ bits_per_sample = input_params.bits_per_sample();
+ channel_layout = input_params.channel_layout();
+ input_channels = input_params.input_channels();
+ buffer_size = std::min(input_params.frames_per_buffer(), buffer_size);
+ }
+
+ int user_buffer_size = GetUserBufferSize();
+ if (user_buffer_size)
+ buffer_size = user_buffer_size;
+
+ return AudioParameters(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout, input_channels,
+ sample_rate, bits_per_sample, buffer_size);
+}
+
+AudioOutputStream* AudioManagerLinux::MakeOutputStream(
+ const AudioParameters& params) {
+ std::string device_name = AlsaPcmOutputStream::kAutoSelectDevice;
+ if (CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kAlsaOutputDevice)) {
+ device_name = CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
+ switches::kAlsaOutputDevice);
+ }
+ return new AlsaPcmOutputStream(device_name, params, wrapper_.get(), this);
+}
+
+AudioInputStream* AudioManagerLinux::MakeInputStream(
+ const AudioParameters& params, const std::string& device_id) {
+ std::string device_name = (device_id == AudioManagerBase::kDefaultDeviceId) ?
+ AlsaPcmInputStream::kAutoSelectDevice : device_id;
+ if (CommandLine::ForCurrentProcess()->HasSwitch(switches::kAlsaInputDevice)) {
+ device_name = CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
+ switches::kAlsaInputDevice);
+ }
+
+ return new AlsaPcmInputStream(this, device_name, params, wrapper_.get());
+}
+
+AudioManager* CreateAudioManager() {
+#if defined(USE_CRAS)
+ if (CommandLine::ForCurrentProcess()->HasSwitch(switches::kUseCras)) {
+ UMA_HISTOGRAM_ENUMERATION("Media.LinuxAudioIO", kCras, kAudioIOMax);
+ return new AudioManagerCras();
+ }
+#endif
+
+#if defined(USE_PULSEAUDIO)
+ AudioManager* manager = AudioManagerPulse::Create();
+ if (manager) {
+ UMA_HISTOGRAM_ENUMERATION("Media.LinuxAudioIO", kPulse, kAudioIOMax);
+ return manager;
+ }
+#endif
+
+ UMA_HISTOGRAM_ENUMERATION("Media.LinuxAudioIO", kAlsa, kAudioIOMax);
+ return new AudioManagerLinux();
+}
+
+} // namespace media
diff --git a/chromium/media/audio/linux/audio_manager_linux.h b/chromium/media/audio/linux/audio_manager_linux.h
new file mode 100644
index 00000000000..28abaa116e7
--- /dev/null
+++ b/chromium/media/audio/linux/audio_manager_linux.h
@@ -0,0 +1,82 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_LINUX_AUDIO_MANAGER_LINUX_H_
+#define MEDIA_AUDIO_LINUX_AUDIO_MANAGER_LINUX_H_
+
+#include <string>
+#include "base/compiler_specific.h"
+#include "base/memory/ref_counted.h"
+#include "base/threading/thread.h"
+#include "media/audio/audio_manager_base.h"
+
+namespace media {
+
+class AlsaWrapper;
+
+class MEDIA_EXPORT AudioManagerLinux : public AudioManagerBase {
+ public:
+ AudioManagerLinux();
+
+ static void ShowLinuxAudioInputSettings();
+
+ // Implementation of AudioManager.
+ virtual bool HasAudioOutputDevices() OVERRIDE;
+ virtual bool HasAudioInputDevices() OVERRIDE;
+ virtual void ShowAudioInputSettings() OVERRIDE;
+ virtual void GetAudioInputDeviceNames(media::AudioDeviceNames* device_names)
+ OVERRIDE;
+ virtual AudioParameters GetInputStreamParameters(
+ const std::string& device_id) OVERRIDE;
+
+ // Implementation of AudioManagerBase.
+ virtual AudioOutputStream* MakeLinearOutputStream(
+ const AudioParameters& params) OVERRIDE;
+ virtual AudioOutputStream* MakeLowLatencyOutputStream(
+ const AudioParameters& params,
+ const std::string& input_device_id) OVERRIDE;
+ virtual AudioInputStream* MakeLinearInputStream(
+ const AudioParameters& params, const std::string& device_id) OVERRIDE;
+ virtual AudioInputStream* MakeLowLatencyInputStream(
+ const AudioParameters& params, const std::string& device_id) OVERRIDE;
+
+ protected:
+ virtual ~AudioManagerLinux();
+
+ virtual AudioParameters GetPreferredOutputStreamParameters(
+ const AudioParameters& input_params) OVERRIDE;
+
+ private:
+ enum StreamType {
+ kStreamPlayback = 0,
+ kStreamCapture,
+ };
+
+ // Gets a list of available ALSA input devices.
+ void GetAlsaAudioInputDevices(media::AudioDeviceNames* device_names);
+
+ // Gets the ALSA devices' names and ids.
+ void GetAlsaDevicesInfo(void** hint, media::AudioDeviceNames* device_names);
+
+ // Checks if the specific ALSA device is available.
+ bool IsAlsaDeviceAvailable(const char* device_name);
+
+ // Returns true if a device is present for the given stream type.
+ bool HasAnyAlsaAudioDevice(StreamType stream);
+
+ // Called by MakeLinearOutputStream and MakeLowLatencyOutputStream.
+ AudioOutputStream* MakeOutputStream(const AudioParameters& params);
+
+ // Called by MakeLinearInputStream and MakeLowLatencyInputStream.
+ AudioInputStream* MakeInputStream(const AudioParameters& params,
+ const std::string& device_id);
+
+ scoped_ptr<AlsaWrapper> wrapper_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioManagerLinux);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_LINUX_AUDIO_MANAGER_LINUX_H_
diff --git a/chromium/media/audio/mac/aggregate_device_manager.cc b/chromium/media/audio/mac/aggregate_device_manager.cc
new file mode 100644
index 00000000000..c7f323322e7
--- /dev/null
+++ b/chromium/media/audio/mac/aggregate_device_manager.cc
@@ -0,0 +1,371 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/mac/aggregate_device_manager.h"
+
+#include <CoreAudio/AudioHardware.h>
+#include <string>
+
+#include "base/mac/mac_logging.h"
+#include "base/mac/scoped_cftyperef.h"
+#include "media/audio/audio_parameters.h"
+#include "media/audio/mac/audio_manager_mac.h"
+
+using base::ScopedCFTypeRef;
+
+namespace media {
+
+AggregateDeviceManager::AggregateDeviceManager()
+ : plugin_id_(kAudioObjectUnknown),
+ input_device_(kAudioDeviceUnknown),
+ output_device_(kAudioDeviceUnknown),
+ aggregate_device_(kAudioObjectUnknown) {
+}
+
+AggregateDeviceManager::~AggregateDeviceManager() {
+ DestroyAggregateDevice();
+}
+
+AudioDeviceID AggregateDeviceManager::GetDefaultAggregateDevice() {
+ AudioDeviceID current_input_device;
+ AudioDeviceID current_output_device;
+ AudioManagerMac::GetDefaultInputDevice(&current_input_device);
+ AudioManagerMac::GetDefaultOutputDevice(&current_output_device);
+
+ if (AudioManagerMac::HardwareSampleRateForDevice(current_input_device) !=
+ AudioManagerMac::HardwareSampleRateForDevice(current_output_device)) {
+ // TODO(crogers): with some extra work we can make aggregate devices work
+ // if the clock domain is the same but the sample-rate differ.
+ // For now we fallback to the synchronized path.
+ return kAudioDeviceUnknown;
+ }
+
+ // Use a lazily created aggregate device if it's already available
+ // and still appropriate.
+ if (aggregate_device_ != kAudioObjectUnknown) {
+ // TODO(crogers): handle default device changes for synchronized I/O.
+ // For now, we check to make sure the default devices haven't changed
+ // since we lazily created the aggregate device.
+ if (current_input_device == input_device_ &&
+ current_output_device == output_device_)
+ return aggregate_device_;
+
+ // For now, once lazily created don't attempt to create another
+ // aggregate device.
+ return kAudioDeviceUnknown;
+ }
+
+ input_device_ = current_input_device;
+ output_device_ = current_output_device;
+
+ // Only create an aggregrate device if the clock domains match.
+ UInt32 input_clockdomain = GetClockDomain(input_device_);
+ UInt32 output_clockdomain = GetClockDomain(output_device_);
+ DVLOG(1) << "input_clockdomain: " << input_clockdomain;
+ DVLOG(1) << "output_clockdomain: " << output_clockdomain;
+
+ if (input_clockdomain == 0 || input_clockdomain != output_clockdomain)
+ return kAudioDeviceUnknown;
+
+ OSStatus result = CreateAggregateDevice(
+ input_device_,
+ output_device_,
+ &aggregate_device_);
+ if (result != noErr)
+ DestroyAggregateDevice();
+
+ return aggregate_device_;
+}
+
+CFStringRef AggregateDeviceManager::GetDeviceUID(AudioDeviceID id) {
+ static const AudioObjectPropertyAddress kDeviceUIDAddress = {
+ kAudioDevicePropertyDeviceUID,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+ };
+
+ // As stated in the CoreAudio header (AudioHardwareBase.h),
+ // the caller is responsible for releasing the device_UID.
+ CFStringRef device_UID;
+ UInt32 size = sizeof(device_UID);
+ OSStatus result = AudioObjectGetPropertyData(
+ id,
+ &kDeviceUIDAddress,
+ 0,
+ 0,
+ &size,
+ &device_UID);
+
+ return (result == noErr) ? device_UID : NULL;
+}
+
+void AggregateDeviceManager::GetDeviceName(
+ AudioDeviceID id, char* name, UInt32 size) {
+ static const AudioObjectPropertyAddress kDeviceNameAddress = {
+ kAudioDevicePropertyDeviceName,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+ };
+
+ OSStatus result = AudioObjectGetPropertyData(
+ id,
+ &kDeviceNameAddress,
+ 0,
+ 0,
+ &size,
+ name);
+
+ if (result != noErr && size > 0)
+ name[0] = 0;
+}
+
+UInt32 AggregateDeviceManager::GetClockDomain(AudioDeviceID device_id) {
+ static const AudioObjectPropertyAddress kClockDomainAddress = {
+ kAudioDevicePropertyClockDomain,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+ };
+
+ UInt32 clockdomain = 0;
+ UInt32 size = sizeof(UInt32);
+ OSStatus result = AudioObjectGetPropertyData(
+ device_id,
+ &kClockDomainAddress,
+ 0,
+ 0,
+ &size,
+ &clockdomain);
+
+ return (result == noErr) ? clockdomain : 0;
+}
+
+OSStatus AggregateDeviceManager::GetPluginID(AudioObjectID* id) {
+ DCHECK(id);
+
+ // Get the audio hardware plugin.
+ CFStringRef bundle_name = CFSTR("com.apple.audio.CoreAudio");
+
+ AudioValueTranslation plugin_translation;
+ plugin_translation.mInputData = &bundle_name;
+ plugin_translation.mInputDataSize = sizeof(bundle_name);
+ plugin_translation.mOutputData = id;
+ plugin_translation.mOutputDataSize = sizeof(*id);
+
+ static const AudioObjectPropertyAddress kPlugInAddress = {
+ kAudioHardwarePropertyPlugInForBundleID,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+ };
+
+ UInt32 size = sizeof(plugin_translation);
+ OSStatus result = AudioObjectGetPropertyData(
+ kAudioObjectSystemObject,
+ &kPlugInAddress,
+ 0,
+ 0,
+ &size,
+ &plugin_translation);
+
+ DVLOG(1) << "CoreAudio plugin ID: " << *id;
+
+ return result;
+}
+
+CFMutableDictionaryRef
+AggregateDeviceManager::CreateAggregateDeviceDictionary(
+ AudioDeviceID input_id,
+ AudioDeviceID output_id) {
+ CFMutableDictionaryRef aggregate_device_dict = CFDictionaryCreateMutable(
+ NULL,
+ 0,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks);
+ if (!aggregate_device_dict)
+ return NULL;
+
+ const CFStringRef kAggregateDeviceName =
+ CFSTR("ChromeAggregateAudioDevice");
+ const CFStringRef kAggregateDeviceUID =
+ CFSTR("com.google.chrome.AggregateAudioDevice");
+
+ // Add name and UID of the device to the dictionary.
+ CFDictionaryAddValue(
+ aggregate_device_dict,
+ CFSTR(kAudioAggregateDeviceNameKey),
+ kAggregateDeviceName);
+ CFDictionaryAddValue(
+ aggregate_device_dict,
+ CFSTR(kAudioAggregateDeviceUIDKey),
+ kAggregateDeviceUID);
+
+ // Add a "private aggregate key" to the dictionary.
+ // The 1 value means that the created aggregate device will
+ // only be accessible from the process that created it, and
+ // won't be visible to outside processes.
+ int value = 1;
+ ScopedCFTypeRef<CFNumberRef> aggregate_device_number(CFNumberCreate(
+ NULL,
+ kCFNumberIntType,
+ &value));
+ CFDictionaryAddValue(
+ aggregate_device_dict,
+ CFSTR(kAudioAggregateDeviceIsPrivateKey),
+ aggregate_device_number);
+
+ return aggregate_device_dict;
+}
+
+CFMutableArrayRef
+AggregateDeviceManager::CreateSubDeviceArray(
+ CFStringRef input_device_UID, CFStringRef output_device_UID) {
+ CFMutableArrayRef sub_devices_array = CFArrayCreateMutable(
+ NULL,
+ 0,
+ &kCFTypeArrayCallBacks);
+
+ CFArrayAppendValue(sub_devices_array, input_device_UID);
+ CFArrayAppendValue(sub_devices_array, output_device_UID);
+
+ return sub_devices_array;
+}
+
+OSStatus AggregateDeviceManager::CreateAggregateDevice(
+ AudioDeviceID input_id,
+ AudioDeviceID output_id,
+ AudioDeviceID* aggregate_device) {
+ DCHECK(aggregate_device);
+
+ const size_t kMaxDeviceNameLength = 256;
+
+ scoped_ptr<char[]> input_device_name(new char[kMaxDeviceNameLength]);
+ GetDeviceName(
+ input_id,
+ input_device_name.get(),
+ sizeof(input_device_name));
+ DVLOG(1) << "Input device: \n" << input_device_name;
+
+ scoped_ptr<char[]> output_device_name(new char[kMaxDeviceNameLength]);
+ GetDeviceName(
+ output_id,
+ output_device_name.get(),
+ sizeof(output_device_name));
+ DVLOG(1) << "Output device: \n" << output_device_name;
+
+ OSStatus result = GetPluginID(&plugin_id_);
+ if (result != noErr)
+ return result;
+
+ // Create a dictionary for the aggregate device.
+ ScopedCFTypeRef<CFMutableDictionaryRef> aggregate_device_dict(
+ CreateAggregateDeviceDictionary(input_id, output_id));
+ if (!aggregate_device_dict)
+ return -1;
+
+ // Create the aggregate device.
+ static const AudioObjectPropertyAddress kCreateAggregateDeviceAddress = {
+ kAudioPlugInCreateAggregateDevice,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+ };
+
+ UInt32 size = sizeof(*aggregate_device);
+ result = AudioObjectGetPropertyData(
+ plugin_id_,
+ &kCreateAggregateDeviceAddress,
+ sizeof(aggregate_device_dict),
+ &aggregate_device_dict,
+ &size,
+ aggregate_device);
+ if (result != noErr) {
+ DLOG(ERROR) << "Error creating aggregate audio device!";
+ return result;
+ }
+
+ // Set the sub-devices for the aggregate device.
+ // In this case we use two: the input and output devices.
+
+ ScopedCFTypeRef<CFStringRef> input_device_UID(GetDeviceUID(input_id));
+ ScopedCFTypeRef<CFStringRef> output_device_UID(GetDeviceUID(output_id));
+ if (!input_device_UID || !output_device_UID) {
+ DLOG(ERROR) << "Error getting audio device UID strings.";
+ return -1;
+ }
+
+ ScopedCFTypeRef<CFMutableArrayRef> sub_devices_array(
+ CreateSubDeviceArray(input_device_UID, output_device_UID));
+ if (sub_devices_array == NULL) {
+ DLOG(ERROR) << "Error creating sub-devices array.";
+ return -1;
+ }
+
+ static const AudioObjectPropertyAddress kSetSubDevicesAddress = {
+ kAudioAggregateDevicePropertyFullSubDeviceList,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+ };
+
+ size = sizeof(CFMutableArrayRef);
+ result = AudioObjectSetPropertyData(
+ *aggregate_device,
+ &kSetSubDevicesAddress,
+ 0,
+ NULL,
+ size,
+ &sub_devices_array);
+ if (result != noErr) {
+ DLOG(ERROR) << "Error setting aggregate audio device sub-devices!";
+ return result;
+ }
+
+ // Use the input device as the master device.
+ static const AudioObjectPropertyAddress kSetMasterDeviceAddress = {
+ kAudioAggregateDevicePropertyMasterSubDevice,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+ };
+
+ size = sizeof(CFStringRef);
+ result = AudioObjectSetPropertyData(
+ *aggregate_device,
+ &kSetMasterDeviceAddress,
+ 0,
+ NULL,
+ size,
+ &input_device_UID);
+ if (result != noErr) {
+ DLOG(ERROR) << "Error setting aggregate audio device master device!";
+ return result;
+ }
+
+ DVLOG(1) << "New aggregate device: " << *aggregate_device;
+ return noErr;
+}
+
+void AggregateDeviceManager::DestroyAggregateDevice() {
+ if (aggregate_device_ == kAudioObjectUnknown)
+ return;
+
+ static const AudioObjectPropertyAddress kDestroyAddress = {
+ kAudioPlugInDestroyAggregateDevice,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+ };
+
+ UInt32 size = sizeof(aggregate_device_);
+ OSStatus result = AudioObjectGetPropertyData(
+ plugin_id_,
+ &kDestroyAddress,
+ 0,
+ NULL,
+ &size,
+ &aggregate_device_);
+ if (result != noErr) {
+ DLOG(ERROR) << "Error destroying aggregate audio device!";
+ return;
+ }
+
+ aggregate_device_ = kAudioObjectUnknown;
+}
+
+} // namespace media
diff --git a/chromium/media/audio/mac/aggregate_device_manager.h b/chromium/media/audio/mac/aggregate_device_manager.h
new file mode 100644
index 00000000000..7b8b71ff655
--- /dev/null
+++ b/chromium/media/audio/mac/aggregate_device_manager.h
@@ -0,0 +1,58 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_MAC_AGGREGATE_DEVICE_MANAGER_H_
+#define MEDIA_AUDIO_MAC_AGGREGATE_DEVICE_MANAGER_H_
+
+#include <CoreAudio/CoreAudio.h>
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class MEDIA_EXPORT AggregateDeviceManager {
+ public:
+ AggregateDeviceManager();
+ ~AggregateDeviceManager();
+
+ // Lazily creates an aggregate device based on the default
+ // input and output devices.
+ // It will either return a valid device or kAudioDeviceUnknown
+ // if the default devices are not suitable for aggregate devices.
+ AudioDeviceID GetDefaultAggregateDevice();
+
+ private:
+ // The caller is responsible for releasing the CFStringRef.
+ static CFStringRef GetDeviceUID(AudioDeviceID id);
+
+ static void GetDeviceName(AudioDeviceID id, char* name, UInt32 size);
+ static UInt32 GetClockDomain(AudioDeviceID device_id);
+ static OSStatus GetPluginID(AudioObjectID* id);
+
+ CFMutableDictionaryRef CreateAggregateDeviceDictionary(
+ AudioDeviceID input_id,
+ AudioDeviceID output_id);
+
+ CFMutableArrayRef CreateSubDeviceArray(CFStringRef input_device_UID,
+ CFStringRef output_device_UID);
+
+ OSStatus CreateAggregateDevice(AudioDeviceID input_id,
+ AudioDeviceID output_id,
+ AudioDeviceID* aggregate_device);
+ void DestroyAggregateDevice();
+
+ AudioObjectID plugin_id_;
+ AudioDeviceID input_device_;
+ AudioDeviceID output_device_;
+
+ AudioDeviceID aggregate_device_;
+
+ DISALLOW_COPY_AND_ASSIGN(AggregateDeviceManager);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_MAC_AGGREGATE_DEVICE_MANAGER_H_
diff --git a/chromium/media/audio/mac/audio_auhal_mac.cc b/chromium/media/audio/mac/audio_auhal_mac.cc
new file mode 100644
index 00000000000..051b709c31d
--- /dev/null
+++ b/chromium/media/audio/mac/audio_auhal_mac.cc
@@ -0,0 +1,542 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/mac/audio_auhal_mac.h"
+
+#include <CoreServices/CoreServices.h>
+
+#include "base/basictypes.h"
+#include "base/command_line.h"
+#include "base/debug/trace_event.h"
+#include "base/logging.h"
+#include "base/mac/mac_logging.h"
+#include "media/audio/audio_util.h"
+#include "media/audio/mac/audio_manager_mac.h"
+#include "media/base/media_switches.h"
+
+namespace media {
+
+static std::ostream& operator<<(std::ostream& os,
+ const AudioStreamBasicDescription& format) {
+ os << "sample rate : " << format.mSampleRate << std::endl
+ << "format ID : " << format.mFormatID << std::endl
+ << "format flags : " << format.mFormatFlags << std::endl
+ << "bytes per packet : " << format.mBytesPerPacket << std::endl
+ << "frames per packet : " << format.mFramesPerPacket << std::endl
+ << "bytes per frame : " << format.mBytesPerFrame << std::endl
+ << "channels per frame: " << format.mChannelsPerFrame << std::endl
+ << "bits per channel : " << format.mBitsPerChannel;
+ return os;
+}
+
+static void ZeroBufferList(AudioBufferList* buffer_list) {
+ for (size_t i = 0; i < buffer_list->mNumberBuffers; ++i) {
+ memset(buffer_list->mBuffers[i].mData,
+ 0,
+ buffer_list->mBuffers[i].mDataByteSize);
+ }
+}
+
+static void WrapBufferList(AudioBufferList* buffer_list,
+ AudioBus* bus,
+ int frames) {
+ DCHECK(buffer_list);
+ DCHECK(bus);
+ const int channels = bus->channels();
+ const int buffer_list_channels = buffer_list->mNumberBuffers;
+ DCHECK_EQ(channels, buffer_list_channels);
+
+ // Copy pointers from AudioBufferList.
+ for (int i = 0; i < channels; ++i) {
+ bus->SetChannelData(
+ i, static_cast<float*>(buffer_list->mBuffers[i].mData));
+ }
+
+ // Finally set the actual length.
+ bus->set_frames(frames);
+}
+
+AUHALStream::AUHALStream(
+ AudioManagerMac* manager,
+ const AudioParameters& params,
+ AudioDeviceID device)
+ : manager_(manager),
+ params_(params),
+ input_channels_(params_.input_channels()),
+ output_channels_(params_.channels()),
+ number_of_frames_(params_.frames_per_buffer()),
+ source_(NULL),
+ device_(device),
+ audio_unit_(0),
+ volume_(1),
+ hardware_latency_frames_(0),
+ stopped_(false),
+ notified_for_possible_device_change_(false),
+ input_buffer_list_(NULL) {
+ // We must have a manager.
+ DCHECK(manager_);
+
+ VLOG(1) << "AUHALStream::AUHALStream()";
+ VLOG(1) << "Device: " << device;
+ VLOG(1) << "Input channels: " << input_channels_;
+ VLOG(1) << "Output channels: " << output_channels_;
+ VLOG(1) << "Sample rate: " << params_.sample_rate();
+ VLOG(1) << "Buffer size: " << number_of_frames_;
+}
+
+AUHALStream::~AUHALStream() {
+}
+
+bool AUHALStream::Open() {
+ // Get the total number of input and output channels that the
+ // hardware supports.
+ int device_input_channels;
+ bool got_input_channels = AudioManagerMac::GetDeviceChannels(
+ device_,
+ kAudioDevicePropertyScopeInput,
+ &device_input_channels);
+
+ int device_output_channels;
+ bool got_output_channels = AudioManagerMac::GetDeviceChannels(
+ device_,
+ kAudioDevicePropertyScopeOutput,
+ &device_output_channels);
+
+ // Sanity check the requested I/O channels.
+ if (!got_input_channels ||
+ input_channels_ < 0 || input_channels_ > device_input_channels) {
+ LOG(ERROR) << "AudioDevice does not support requested input channels.";
+ return false;
+ }
+
+ if (!got_output_channels ||
+ output_channels_ <= 0 || output_channels_ > device_output_channels) {
+ LOG(ERROR) << "AudioDevice does not support requested output channels.";
+ return false;
+ }
+
+ // The requested sample-rate must match the hardware sample-rate.
+ int sample_rate = AudioManagerMac::HardwareSampleRateForDevice(device_);
+
+ if (sample_rate != params_.sample_rate()) {
+ LOG(ERROR) << "Requested sample-rate: " << params_.sample_rate()
+ << " must match the hardware sample-rate: " << sample_rate;
+ return false;
+ }
+
+ CreateIOBusses();
+
+ bool configured = ConfigureAUHAL();
+ if (configured)
+ hardware_latency_frames_ = GetHardwareLatency();
+
+ return configured;
+}
+
+void AUHALStream::Close() {
+ if (input_buffer_list_) {
+ input_buffer_list_storage_.reset();
+ input_buffer_list_ = NULL;
+ input_bus_.reset(NULL);
+ output_bus_.reset(NULL);
+ }
+
+ if (audio_unit_) {
+ AudioUnitUninitialize(audio_unit_);
+ AudioComponentInstanceDispose(audio_unit_);
+ }
+
+ // Inform the audio manager that we have been closed. This will cause our
+ // destruction.
+ manager_->ReleaseOutputStream(this);
+}
+
+void AUHALStream::Start(AudioSourceCallback* callback) {
+ DCHECK(callback);
+ if (!audio_unit_) {
+ DLOG(ERROR) << "Open() has not been called successfully";
+ return;
+ }
+
+ stopped_ = false;
+ notified_for_possible_device_change_ = false;
+ {
+ base::AutoLock auto_lock(source_lock_);
+ source_ = callback;
+ }
+
+ AudioOutputUnitStart(audio_unit_);
+}
+
+void AUHALStream::Stop() {
+ if (stopped_)
+ return;
+
+ AudioOutputUnitStop(audio_unit_);
+
+ base::AutoLock auto_lock(source_lock_);
+ source_ = NULL;
+ stopped_ = true;
+}
+
+void AUHALStream::SetVolume(double volume) {
+ volume_ = static_cast<float>(volume);
+}
+
+void AUHALStream::GetVolume(double* volume) {
+ *volume = volume_;
+}
+
+// Pulls on our provider to get rendered audio stream.
+// Note to future hackers of this function: Do not add locks which can
+// be contended in the middle of stream processing here (starting and stopping
+// the stream are ok) because this is running on a real-time thread.
+OSStatus AUHALStream::Render(
+ AudioUnitRenderActionFlags* flags,
+ const AudioTimeStamp* output_time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data) {
+ TRACE_EVENT0("audio", "AUHALStream::Render");
+
+ if (number_of_frames != number_of_frames_) {
+ // This can happen if we've suddenly changed sample-rates.
+ // The stream should be stopping very soon.
+ //
+ // Unfortunately AUAudioInputStream and AUHALStream share the frame
+ // size set by kAudioDevicePropertyBufferFrameSize above on a per process
+ // basis. What this means is that the |number_of_frames| value may be
+ // larger or smaller than the value set during ConfigureAUHAL().
+ // In this case either audio input or audio output will be broken,
+ // so just output silence.
+ ZeroBufferList(io_data);
+
+ // In case we missed a device notification, notify the AudioManager that the
+ // device has changed. HandleDeviceChanges() will check to make sure the
+ // device has actually changed before taking any action.
+ if (!notified_for_possible_device_change_) {
+ notified_for_possible_device_change_ = true;
+ manager_->GetMessageLoop()->PostTask(FROM_HERE, base::Bind(
+ &AudioManagerMac::HandleDeviceChanges, base::Unretained(manager_)));
+ }
+
+ return noErr;
+ }
+
+ if (input_channels_ > 0 && input_buffer_list_) {
+ // Get the input data. |input_buffer_list_| is wrapped
+ // to point to the data allocated in |input_bus_|.
+ OSStatus result = AudioUnitRender(
+ audio_unit_,
+ flags,
+ output_time_stamp,
+ 1,
+ number_of_frames,
+ input_buffer_list_);
+ if (result != noErr)
+ ZeroBufferList(input_buffer_list_);
+ }
+
+ // Make |output_bus_| wrap the output AudioBufferList.
+ WrapBufferList(io_data, output_bus_.get(), number_of_frames);
+
+ // Update the playout latency.
+ double playout_latency_frames = GetPlayoutLatency(output_time_stamp);
+
+ uint32 hardware_pending_bytes = static_cast<uint32>
+ ((playout_latency_frames + 0.5) * output_format_.mBytesPerFrame);
+
+ {
+ // Render() shouldn't be called except between AudioOutputUnitStart() and
+ // AudioOutputUnitStop() calls, but crash reports have shown otherwise:
+ // http://crbug.com/178765. We use |source_lock_| to prevent races and
+ // crashes in Render() when |source_| is cleared.
+ base::AutoLock auto_lock(source_lock_);
+ if (!source_) {
+ ZeroBufferList(io_data);
+ return noErr;
+ }
+
+ // Supply the input data and render the output data.
+ source_->OnMoreIOData(
+ input_bus_.get(),
+ output_bus_.get(),
+ AudioBuffersState(0, hardware_pending_bytes));
+ output_bus_->Scale(volume_);
+ }
+
+ return noErr;
+}
+
+// AUHAL callback.
+OSStatus AUHALStream::InputProc(
+ void* user_data,
+ AudioUnitRenderActionFlags* flags,
+ const AudioTimeStamp* output_time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data) {
+ // Dispatch to our class method.
+ AUHALStream* audio_output =
+ static_cast<AUHALStream*>(user_data);
+ if (!audio_output)
+ return -1;
+
+ return audio_output->Render(
+ flags,
+ output_time_stamp,
+ bus_number,
+ number_of_frames,
+ io_data);
+}
+
+double AUHALStream::GetHardwareLatency() {
+ if (!audio_unit_ || device_ == kAudioObjectUnknown) {
+ DLOG(WARNING) << "AudioUnit is NULL or device ID is unknown";
+ return 0.0;
+ }
+
+ // Get audio unit latency.
+ Float64 audio_unit_latency_sec = 0.0;
+ UInt32 size = sizeof(audio_unit_latency_sec);
+ OSStatus result = AudioUnitGetProperty(
+ audio_unit_,
+ kAudioUnitProperty_Latency,
+ kAudioUnitScope_Global,
+ 0,
+ &audio_unit_latency_sec,
+ &size);
+ if (result != noErr) {
+ OSSTATUS_DLOG(WARNING, result) << "Could not get AudioUnit latency";
+ return 0.0;
+ }
+
+ // Get output audio device latency.
+ static const AudioObjectPropertyAddress property_address = {
+ kAudioDevicePropertyLatency,
+ kAudioDevicePropertyScopeOutput,
+ kAudioObjectPropertyElementMaster
+ };
+
+ UInt32 device_latency_frames = 0;
+ size = sizeof(device_latency_frames);
+ result = AudioObjectGetPropertyData(
+ device_,
+ &property_address,
+ 0,
+ NULL,
+ &size,
+ &device_latency_frames);
+ if (result != noErr) {
+ OSSTATUS_DLOG(WARNING, result) << "Could not get audio device latency";
+ return 0.0;
+ }
+
+ return static_cast<double>((audio_unit_latency_sec *
+ output_format_.mSampleRate) + device_latency_frames);
+}
+
+double AUHALStream::GetPlayoutLatency(
+ const AudioTimeStamp* output_time_stamp) {
+ // Ensure mHostTime is valid.
+ if ((output_time_stamp->mFlags & kAudioTimeStampHostTimeValid) == 0)
+ return 0;
+
+ // Get the delay between the moment getting the callback and the scheduled
+ // time stamp that tells when the data is going to be played out.
+ UInt64 output_time_ns = AudioConvertHostTimeToNanos(
+ output_time_stamp->mHostTime);
+ UInt64 now_ns = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime());
+
+ // Prevent overflow leading to huge delay information; occurs regularly on
+ // the bots, probably less so in the wild.
+ if (now_ns > output_time_ns)
+ return 0;
+
+ double delay_frames = static_cast<double>
+ (1e-9 * (output_time_ns - now_ns) * output_format_.mSampleRate);
+
+ return (delay_frames + hardware_latency_frames_);
+}
+
+void AUHALStream::CreateIOBusses() {
+ if (input_channels_ > 0) {
+ // Allocate storage for the AudioBufferList used for the
+ // input data from the input AudioUnit.
+ // We allocate enough space for with one AudioBuffer per channel.
+ size_t buffer_list_size = offsetof(AudioBufferList, mBuffers[0]) +
+ (sizeof(AudioBuffer) * input_channels_);
+ input_buffer_list_storage_.reset(new uint8[buffer_list_size]);
+
+ input_buffer_list_ =
+ reinterpret_cast<AudioBufferList*>(input_buffer_list_storage_.get());
+ input_buffer_list_->mNumberBuffers = input_channels_;
+
+ // |input_bus_| allocates the storage for the PCM input data.
+ input_bus_ = AudioBus::Create(input_channels_, number_of_frames_);
+
+ // Make the AudioBufferList point to the memory in |input_bus_|.
+ UInt32 buffer_size_bytes = input_bus_->frames() * sizeof(Float32);
+ for (size_t i = 0; i < input_buffer_list_->mNumberBuffers; ++i) {
+ input_buffer_list_->mBuffers[i].mNumberChannels = 1;
+ input_buffer_list_->mBuffers[i].mDataByteSize = buffer_size_bytes;
+ input_buffer_list_->mBuffers[i].mData = input_bus_->channel(i);
+ }
+ }
+
+ // The output bus will wrap the AudioBufferList given to us in
+ // the Render() callback.
+ DCHECK_GT(output_channels_, 0);
+ output_bus_ = AudioBus::CreateWrapper(output_channels_);
+}
+
+bool AUHALStream::EnableIO(bool enable, UInt32 scope) {
+ // See Apple technote for details about the EnableIO property.
+ // Note that we use bus 1 for input and bus 0 for output:
+ // http://developer.apple.com/library/mac/#technotes/tn2091/_index.html
+ UInt32 enable_IO = enable ? 1 : 0;
+ OSStatus result = AudioUnitSetProperty(
+ audio_unit_,
+ kAudioOutputUnitProperty_EnableIO,
+ scope,
+ (scope == kAudioUnitScope_Input) ? 1 : 0,
+ &enable_IO,
+ sizeof(enable_IO));
+ return (result == noErr);
+}
+
+bool AUHALStream::SetStreamFormat(
+ AudioStreamBasicDescription* desc,
+ int channels,
+ UInt32 scope,
+ UInt32 element) {
+ DCHECK(desc);
+ AudioStreamBasicDescription& format = *desc;
+
+ format.mSampleRate = params_.sample_rate();
+ format.mFormatID = kAudioFormatLinearPCM;
+ format.mFormatFlags = kAudioFormatFlagsNativeFloatPacked |
+ kLinearPCMFormatFlagIsNonInterleaved;
+ format.mBytesPerPacket = sizeof(Float32);
+ format.mFramesPerPacket = 1;
+ format.mBytesPerFrame = sizeof(Float32);
+ format.mChannelsPerFrame = channels;
+ format.mBitsPerChannel = 32;
+ format.mReserved = 0;
+
+ OSStatus result = AudioUnitSetProperty(
+ audio_unit_,
+ kAudioUnitProperty_StreamFormat,
+ scope,
+ element,
+ &format,
+ sizeof(format));
+ return (result == noErr);
+}
+
+bool AUHALStream::ConfigureAUHAL() {
+ if (device_ == kAudioObjectUnknown ||
+ (input_channels_ == 0 && output_channels_ == 0))
+ return false;
+
+ AudioComponentDescription desc = {
+ kAudioUnitType_Output,
+ kAudioUnitSubType_HALOutput,
+ kAudioUnitManufacturer_Apple,
+ 0,
+ 0
+ };
+ AudioComponent comp = AudioComponentFindNext(0, &desc);
+ if (!comp)
+ return false;
+
+ OSStatus result = AudioComponentInstanceNew(comp, &audio_unit_);
+ if (result != noErr) {
+ OSSTATUS_DLOG(WARNING, result) << "AudioComponentInstanceNew() failed.";
+ return false;
+ }
+
+ // Enable input and output as appropriate.
+ if (!EnableIO(input_channels_ > 0, kAudioUnitScope_Input))
+ return false;
+ if (!EnableIO(output_channels_ > 0, kAudioUnitScope_Output))
+ return false;
+
+ // Set the device to be used with the AUHAL AudioUnit.
+ result = AudioUnitSetProperty(
+ audio_unit_,
+ kAudioOutputUnitProperty_CurrentDevice,
+ kAudioUnitScope_Global,
+ 0,
+ &device_,
+ sizeof(AudioDeviceID));
+ if (result != noErr)
+ return false;
+
+ // Set stream formats.
+ // See Apple's tech note for details on the peculiar way that
+ // inputs and outputs are handled in the AUHAL concerning scope and bus
+ // (element) numbers:
+ // http://developer.apple.com/library/mac/#technotes/tn2091/_index.html
+
+ if (input_channels_ > 0) {
+ if (!SetStreamFormat(&input_format_,
+ input_channels_,
+ kAudioUnitScope_Output,
+ 1))
+ return false;
+ }
+
+ if (output_channels_ > 0) {
+ if (!SetStreamFormat(&output_format_,
+ output_channels_,
+ kAudioUnitScope_Input,
+ 0))
+ return false;
+ }
+
+ // Set the buffer frame size.
+ // WARNING: Setting this value changes the frame size for all audio units in
+ // the current process. It's imperative that the input and output frame sizes
+ // be the same as the frames_per_buffer() returned by
+ // GetDefaultOutputStreamParameters().
+ // See http://crbug.com/154352 for details.
+ UInt32 buffer_size = number_of_frames_;
+ result = AudioUnitSetProperty(
+ audio_unit_,
+ kAudioDevicePropertyBufferFrameSize,
+ kAudioUnitScope_Output,
+ 0,
+ &buffer_size,
+ sizeof(buffer_size));
+ if (result != noErr) {
+ OSSTATUS_DLOG(WARNING, result)
+ << "AudioUnitSetProperty(kAudioDevicePropertyBufferFrameSize) failed.";
+ return false;
+ }
+
+ // Setup callback.
+ AURenderCallbackStruct callback;
+ callback.inputProc = InputProc;
+ callback.inputProcRefCon = this;
+ result = AudioUnitSetProperty(
+ audio_unit_,
+ kAudioUnitProperty_SetRenderCallback,
+ kAudioUnitScope_Input,
+ 0,
+ &callback,
+ sizeof(callback));
+ if (result != noErr)
+ return false;
+
+ result = AudioUnitInitialize(audio_unit_);
+ if (result != noErr) {
+ OSSTATUS_DLOG(WARNING, result) << "AudioUnitInitialize() failed.";
+ return false;
+ }
+
+ return true;
+}
+
+} // namespace media
diff --git a/chromium/media/audio/mac/audio_auhal_mac.h b/chromium/media/audio/mac/audio_auhal_mac.h
new file mode 100644
index 00000000000..66feb8d0d11
--- /dev/null
+++ b/chromium/media/audio/mac/audio_auhal_mac.h
@@ -0,0 +1,167 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Implementation notes:
+//
+// - It is recommended to first acquire the native sample rate of the default
+// output device and then use the same rate when creating this object.
+// Use AudioManagerMac::HardwareSampleRate() to retrieve the sample rate.
+// - Calling Close() also leads to self destruction.
+// - The latency consists of two parts:
+// 1) Hardware latency, which includes Audio Unit latency, audio device
+// latency;
+// 2) The delay between the moment getting the callback and the scheduled time
+// stamp that tells when the data is going to be played out.
+//
+#ifndef MEDIA_AUDIO_MAC_AUDIO_AUHAL_MAC_H_
+#define MEDIA_AUDIO_MAC_AUDIO_AUHAL_MAC_H_
+
+#include <AudioUnit/AudioUnit.h>
+#include <CoreAudio/CoreAudio.h>
+
+#include "base/compiler_specific.h"
+#include "base/synchronization/lock.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+
+namespace media {
+
+class AudioManagerMac;
+
+// Implementation of AudioOuputStream for Mac OS X using the
+// AUHAL Audio Unit present in OS 10.4 and later.
+// It is useful for low-latency output with optional synchronized
+// input.
+//
+// Overview of operation:
+// 1) An object of AUHALStream is created by the AudioManager
+// factory: audio_man->MakeAudioStream().
+// 2) Next some thread will call Open(), at that point the underlying
+// AUHAL Audio Unit is created and configured to use the |device|.
+// 3) Then some thread will call Start(source).
+// Then the AUHAL is started which creates its own thread which
+// periodically will call the source for more data as buffers are being
+// consumed.
+// 4) At some point some thread will call Stop(), which we handle by directly
+// stopping the default output Audio Unit.
+// 6) The same thread that called stop will call Close() where we cleanup
+// and notify the audio manager, which likely will destroy this object.
+
+class AUHALStream : public AudioOutputStream {
+ public:
+ // |manager| creates this object.
+ // |device| is the CoreAudio device to use for the stream.
+ // It will often be the default output device.
+ AUHALStream(AudioManagerMac* manager,
+ const AudioParameters& params,
+ AudioDeviceID device);
+ // The dtor is typically called by the AudioManager only and it is usually
+ // triggered by calling AudioOutputStream::Close().
+ virtual ~AUHALStream();
+
+ // Implementation of AudioOutputStream.
+ virtual bool Open() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual void Start(AudioSourceCallback* callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual void GetVolume(double* volume) OVERRIDE;
+
+ private:
+ // AUHAL callback.
+ static OSStatus InputProc(void* user_data,
+ AudioUnitRenderActionFlags* flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data);
+
+ OSStatus Render(AudioUnitRenderActionFlags* flags,
+ const AudioTimeStamp* output_time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data);
+
+ // Helper method to enable input and output.
+ bool EnableIO(bool enable, UInt32 scope);
+
+ // Sets the stream format on the AUHAL to PCM Float32 non-interleaved
+ // for the given number of channels on the given scope and element.
+ // The created stream description will be stored in |desc|.
+ bool SetStreamFormat(AudioStreamBasicDescription* desc,
+ int channels,
+ UInt32 scope,
+ UInt32 element);
+
+ // Creates the AUHAL, sets its stream format, buffer-size, etc.
+ bool ConfigureAUHAL();
+
+ // Creates the input and output busses.
+ void CreateIOBusses();
+
+ // Gets the fixed playout device hardware latency and stores it. Returns 0
+ // if not available.
+ double GetHardwareLatency();
+
+ // Gets the current playout latency value.
+ double GetPlayoutLatency(const AudioTimeStamp* output_time_stamp);
+
+ // Our creator, the audio manager needs to be notified when we close.
+ AudioManagerMac* manager_;
+
+ AudioParameters params_;
+ // For convenience - same as in params_.
+ int input_channels_;
+ int output_channels_;
+
+ // Buffer-size.
+ size_t number_of_frames_;
+
+ // Pointer to the object that will provide the audio samples.
+ AudioSourceCallback* source_;
+
+ // Protects |source_|. Necessary since Render() calls seem to be in flight
+ // when |audio_unit_| is supposedly stopped. See http://crbug.com/178765.
+ base::Lock source_lock_;
+
+ // Holds the stream format details such as bitrate.
+ AudioStreamBasicDescription input_format_;
+ AudioStreamBasicDescription output_format_;
+
+ // The audio device to use with the AUHAL.
+ // We can potentially handle both input and output with this device.
+ AudioDeviceID device_;
+
+ // The AUHAL Audio Unit which talks to |device_|.
+ AudioUnit audio_unit_;
+
+ // Volume level from 0 to 1.
+ float volume_;
+
+ // Fixed playout hardware latency in frames.
+ double hardware_latency_frames_;
+
+ // The flag used to stop the streaming.
+ bool stopped_;
+
+ // The flag used to indicate if the AudioManager has been notified of a
+ // potential device change. Reset to false during Start().
+ bool notified_for_possible_device_change_;
+
+ // The input AudioUnit renders its data here.
+ scoped_ptr<uint8[]> input_buffer_list_storage_;
+ AudioBufferList* input_buffer_list_;
+
+ // Holds the actual data for |input_buffer_list_|.
+ scoped_ptr<AudioBus> input_bus_;
+
+ // Container for retrieving data from AudioSourceCallback::OnMoreIOData().
+ scoped_ptr<AudioBus> output_bus_;
+
+ DISALLOW_COPY_AND_ASSIGN(AUHALStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_MAC_AUDIO_AUHAL_MAC_H_
diff --git a/chromium/media/audio/mac/audio_auhal_mac_unittest.cc b/chromium/media/audio/mac/audio_auhal_mac_unittest.cc
new file mode 100644
index 00000000000..b4cf8c64cc6
--- /dev/null
+++ b/chromium/media/audio/mac/audio_auhal_mac_unittest.cc
@@ -0,0 +1,219 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_manager.h"
+#include "media/audio/simple_sources.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::_;
+using ::testing::AnyNumber;
+using ::testing::DoAll;
+using ::testing::Field;
+using ::testing::InSequence;
+using ::testing::Invoke;
+using ::testing::NiceMock;
+using ::testing::NotNull;
+using ::testing::Return;
+
+static const int kBitsPerSample = 16;
+
+// TODO(crogers): Most of these tests can be made platform agnostic.
+// http://crbug.com/223242
+
+namespace media {
+
+class MockAudioSourceCallback : public AudioOutputStream::AudioSourceCallback {
+ public:
+ MOCK_METHOD2(OnMoreData, int(AudioBus* audio_bus,
+ AudioBuffersState buffers_state));
+ MOCK_METHOD3(OnMoreIOData, int(AudioBus* source,
+ AudioBus* dest,
+ AudioBuffersState buffers_state));
+ MOCK_METHOD1(OnError, void(AudioOutputStream* stream));
+};
+
+// Convenience method which creates a default AudioOutputStream object but
+// also allows the user to modify the default settings.
+class AudioOutputStreamWrapper {
+ public:
+ explicit AudioOutputStreamWrapper()
+ : audio_man_(AudioManager::Create()),
+ format_(AudioParameters::AUDIO_PCM_LOW_LATENCY),
+ bits_per_sample_(kBitsPerSample) {
+ AudioParameters preferred_params =
+ audio_man_->GetDefaultOutputStreamParameters();
+ channel_layout_ = preferred_params.channel_layout();
+ channels_ = preferred_params.channels();
+ sample_rate_ = preferred_params.sample_rate();
+ samples_per_packet_ = preferred_params.frames_per_buffer();
+ }
+
+ ~AudioOutputStreamWrapper() {}
+
+ // Creates AudioOutputStream object using default parameters.
+ AudioOutputStream* Create() {
+ return CreateOutputStream();
+ }
+
+ // Creates AudioOutputStream object using non-default parameters where the
+ // frame size is modified.
+ AudioOutputStream* Create(int samples_per_packet) {
+ samples_per_packet_ = samples_per_packet;
+ return CreateOutputStream();
+ }
+
+ // Creates AudioOutputStream object using non-default parameters where the
+ // sample rate is modified.
+ AudioOutputStream* CreateWithSampleRate(int sample_rate) {
+ sample_rate_ = sample_rate;
+ return CreateOutputStream();
+ }
+
+ // Creates AudioOutputStream object using non-default parameters where the
+ // channel layout is modified.
+ AudioOutputStream* CreateWithLayout(ChannelLayout layout) {
+ channel_layout_ = layout;
+ channels_ = ChannelLayoutToChannelCount(layout);
+ return CreateOutputStream();
+ }
+
+ AudioParameters::Format format() const { return format_; }
+ int channels() const { return ChannelLayoutToChannelCount(channel_layout_); }
+ int bits_per_sample() const { return bits_per_sample_; }
+ int sample_rate() const { return sample_rate_; }
+ int samples_per_packet() const { return samples_per_packet_; }
+
+ bool CanRunAudioTests() {
+ return audio_man_->HasAudioOutputDevices();
+ }
+
+ private:
+ AudioOutputStream* CreateOutputStream() {
+ AudioParameters params;
+ params.Reset(format_, channel_layout_,
+ channels_, 0,
+ sample_rate_, bits_per_sample_,
+ samples_per_packet_);
+
+ AudioOutputStream* aos = audio_man_->MakeAudioOutputStream(params,
+ std::string());
+ EXPECT_TRUE(aos);
+ return aos;
+ }
+
+ scoped_ptr<AudioManager> audio_man_;
+
+ AudioParameters::Format format_;
+ ChannelLayout channel_layout_;
+ int channels_;
+ int bits_per_sample_;
+ int sample_rate_;
+ int samples_per_packet_;
+};
+
+// Test that we can get the hardware sample-rate.
+TEST(AUHALStreamTest, HardwareSampleRate) {
+ AudioOutputStreamWrapper aosw;
+ if (!aosw.CanRunAudioTests())
+ return;
+
+ int sample_rate = aosw.sample_rate();
+ EXPECT_GE(sample_rate, 16000);
+ EXPECT_LE(sample_rate, 192000);
+}
+
+// Test Create(), Close() calling sequence.
+TEST(AUHALStreamTest, CreateAndClose) {
+ AudioOutputStreamWrapper aosw;
+ if (!aosw.CanRunAudioTests())
+ return;
+
+ AudioOutputStream* aos = aosw.Create();
+ aos->Close();
+}
+
+// Test Open(), Close() calling sequence.
+TEST(AUHALStreamTest, OpenAndClose) {
+ AudioOutputStreamWrapper aosw;
+ if (!aosw.CanRunAudioTests())
+ return;
+
+ AudioOutputStream* aos = aosw.Create();
+ EXPECT_TRUE(aos->Open());
+ aos->Close();
+}
+
+// Test Open(), Start(), Close() calling sequence.
+TEST(AUHALStreamTest, OpenStartAndClose) {
+ AudioOutputStreamWrapper aosw;
+ if (!aosw.CanRunAudioTests())
+ return;
+
+ AudioOutputStream* aos = aosw.Create();
+ EXPECT_TRUE(aos->Open());
+ MockAudioSourceCallback source;
+ EXPECT_CALL(source, OnError(aos))
+ .Times(0);
+ aos->Start(&source);
+ aos->Close();
+}
+
+// Test Open(), Start(), Stop(), Close() calling sequence.
+TEST(AUHALStreamTest, OpenStartStopAndClose) {
+ AudioOutputStreamWrapper aosw;
+ if (!aosw.CanRunAudioTests())
+ return;
+
+ AudioOutputStream* aos = aosw.Create();
+ EXPECT_TRUE(aos->Open());
+ MockAudioSourceCallback source;
+ EXPECT_CALL(source, OnError(aos))
+ .Times(0);
+ aos->Start(&source);
+ aos->Stop();
+ aos->Close();
+}
+
+// This test produces actual audio for 0.5 seconds on the default audio device
+// at the hardware sample-rate (usually 44.1KHz).
+// Parameters have been chosen carefully so you should not hear
+// pops or noises while the sound is playing.
+TEST(AUHALStreamTest, AUHALStreamPlay200HzTone) {
+ AudioOutputStreamWrapper aosw;
+ if (!aosw.CanRunAudioTests())
+ return;
+
+ AudioOutputStream* aos = aosw.CreateWithLayout(CHANNEL_LAYOUT_MONO);
+
+ EXPECT_TRUE(aos->Open());
+
+ SineWaveAudioSource source(1, 200.0, aosw.sample_rate());
+ aos->Start(&source);
+ usleep(500000);
+
+ aos->Stop();
+ aos->Close();
+}
+
+// Test that Open() will fail with a sample-rate which isn't the hardware
+// sample-rate.
+TEST(AUHALStreamTest, AUHALStreamInvalidSampleRate) {
+ AudioOutputStreamWrapper aosw;
+ if (!aosw.CanRunAudioTests())
+ return;
+
+ int non_default_sample_rate = aosw.sample_rate() == 44100 ?
+ 48000 : 44100;
+ AudioOutputStream* aos = aosw.CreateWithSampleRate(non_default_sample_rate);
+
+ EXPECT_FALSE(aos->Open());
+
+ aos->Close();
+}
+
+} // namespace media
diff --git a/chromium/media/audio/mac/audio_device_listener_mac.cc b/chromium/media/audio/mac/audio_device_listener_mac.cc
new file mode 100644
index 00000000000..5c5ca355b9a
--- /dev/null
+++ b/chromium/media/audio/mac/audio_device_listener_mac.cc
@@ -0,0 +1,77 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/mac/audio_device_listener_mac.h"
+
+#include "base/bind.h"
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/mac/mac_logging.h"
+#include "base/mac/mac_util.h"
+#include "base/message_loop/message_loop.h"
+#include "base/pending_task.h"
+#include "media/audio/mac/audio_low_latency_output_mac.h"
+
+namespace media {
+
+// Property address to monitor for device changes.
+const AudioObjectPropertyAddress
+AudioDeviceListenerMac::kDeviceChangePropertyAddress = {
+ kAudioHardwarePropertyDefaultOutputDevice,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+};
+
+// Callback from the system when the default device changes; this must be called
+// on the MessageLoop that created the AudioManager.
+// static
+OSStatus AudioDeviceListenerMac::OnDefaultDeviceChanged(
+ AudioObjectID object, UInt32 num_addresses,
+ const AudioObjectPropertyAddress addresses[], void* context) {
+ if (object != kAudioObjectSystemObject)
+ return noErr;
+
+ for (UInt32 i = 0; i < num_addresses; ++i) {
+ if (addresses[i].mSelector == kDeviceChangePropertyAddress.mSelector &&
+ addresses[i].mScope == kDeviceChangePropertyAddress.mScope &&
+ addresses[i].mElement == kDeviceChangePropertyAddress.mElement &&
+ context) {
+ static_cast<AudioDeviceListenerMac*>(context)->listener_cb_.Run();
+ break;
+ }
+ }
+
+ return noErr;
+}
+
+AudioDeviceListenerMac::AudioDeviceListenerMac(
+ const base::Closure& listener_cb) {
+ OSStatus result = AudioObjectAddPropertyListener(
+ kAudioObjectSystemObject, &kDeviceChangePropertyAddress,
+ &AudioDeviceListenerMac::OnDefaultDeviceChanged, this);
+
+ if (result != noErr) {
+ OSSTATUS_DLOG(ERROR, result)
+ << "AudioObjectAddPropertyListener() failed!";
+ return;
+ }
+
+ listener_cb_ = listener_cb;
+}
+
+AudioDeviceListenerMac::~AudioDeviceListenerMac() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (listener_cb_.is_null())
+ return;
+
+ // Since we're running on the same CFRunLoop, there can be no outstanding
+ // callbacks in flight.
+ OSStatus result = AudioObjectRemovePropertyListener(
+ kAudioObjectSystemObject, &kDeviceChangePropertyAddress,
+ &AudioDeviceListenerMac::OnDefaultDeviceChanged, this);
+ OSSTATUS_DLOG_IF(ERROR, result != noErr, result)
+ << "AudioObjectRemovePropertyListener() failed!";
+}
+
+} // namespace media
diff --git a/chromium/media/audio/mac/audio_device_listener_mac.h b/chromium/media/audio/mac/audio_device_listener_mac.h
new file mode 100644
index 00000000000..b513cd442f1
--- /dev/null
+++ b/chromium/media/audio/mac/audio_device_listener_mac.h
@@ -0,0 +1,46 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_MAC_AUDIO_DEVICE_LISTENER_MAC_H_
+#define MEDIA_AUDIO_MAC_AUDIO_DEVICE_LISTENER_MAC_H_
+
+#include <CoreAudio/AudioHardware.h>
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/threading/thread_checker.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// AudioDeviceListenerMac facilitates execution of device listener callbacks
+// issued via CoreAudio.
+class MEDIA_EXPORT AudioDeviceListenerMac {
+ public:
+ // |listener_cb| will be called when a device change occurs; it's a permanent
+ // callback and must outlive AudioDeviceListenerMac. Note that |listener_cb|
+ // might not be executed on the same thread as construction.
+ explicit AudioDeviceListenerMac(const base::Closure& listener_cb);
+ ~AudioDeviceListenerMac();
+
+ private:
+ friend class AudioDeviceListenerMacTest;
+ static const AudioObjectPropertyAddress kDeviceChangePropertyAddress;
+
+ static OSStatus OnDefaultDeviceChanged(
+ AudioObjectID object, UInt32 num_addresses,
+ const AudioObjectPropertyAddress addresses[], void* context);
+
+ base::Closure listener_cb_;
+
+ // AudioDeviceListenerMac must be constructed and destructed on the same
+ // thread.
+ base::ThreadChecker thread_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioDeviceListenerMac);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_MAC_AUDIO_DEVICE_LISTENER_MAC_H_
diff --git a/chromium/media/audio/mac/audio_device_listener_mac_unittest.cc b/chromium/media/audio/mac/audio_device_listener_mac_unittest.cc
new file mode 100644
index 00000000000..7efb3297172
--- /dev/null
+++ b/chromium/media/audio/mac/audio_device_listener_mac_unittest.cc
@@ -0,0 +1,87 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <CoreAudio/AudioHardware.h>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "media/audio/mac/audio_device_listener_mac.h"
+#include "media/base/bind_to_loop.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+class AudioDeviceListenerMacTest : public testing::Test {
+ public:
+ AudioDeviceListenerMacTest() {
+ // It's important to create the device listener from the message loop in
+ // order to ensure we don't end up with unbalanced TaskObserver calls.
+ message_loop_.PostTask(FROM_HERE, base::Bind(
+ &AudioDeviceListenerMacTest::CreateDeviceListener,
+ base::Unretained(this)));
+ message_loop_.RunUntilIdle();
+ }
+
+ virtual ~AudioDeviceListenerMacTest() {
+ // It's important to destroy the device listener from the message loop in
+ // order to ensure we don't end up with unbalanced TaskObserver calls.
+ message_loop_.PostTask(FROM_HERE, base::Bind(
+ &AudioDeviceListenerMacTest::DestroyDeviceListener,
+ base::Unretained(this)));
+ message_loop_.RunUntilIdle();
+ }
+
+ void CreateDeviceListener() {
+ // Force a post task using BindToLoop to ensure device listener internals
+ // are working correctly.
+ output_device_listener_.reset(new AudioDeviceListenerMac(BindToLoop(
+ message_loop_.message_loop_proxy(), base::Bind(
+ &AudioDeviceListenerMacTest::OnDeviceChange,
+ base::Unretained(this)))));
+ }
+
+ void DestroyDeviceListener() {
+ output_device_listener_.reset();
+ }
+
+ bool ListenerIsValid() {
+ return !output_device_listener_->listener_cb_.is_null();
+ }
+
+ // Simulate a device change where no output devices are available.
+ bool SimulateDefaultOutputDeviceChange() {
+ // Include multiple addresses to ensure only a single device change event
+ // occurs.
+ const AudioObjectPropertyAddress addresses[] = {
+ AudioDeviceListenerMac::kDeviceChangePropertyAddress,
+ { kAudioHardwarePropertyDevices,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster }
+ };
+
+ return noErr == output_device_listener_->OnDefaultDeviceChanged(
+ kAudioObjectSystemObject, 1, addresses, output_device_listener_.get());
+ }
+
+ MOCK_METHOD0(OnDeviceChange, void());
+
+ protected:
+ base::MessageLoop message_loop_;
+ scoped_ptr<AudioDeviceListenerMac> output_device_listener_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioDeviceListenerMacTest);
+};
+
+// Simulate a device change events and ensure we get the right callbacks.
+TEST_F(AudioDeviceListenerMacTest, OutputDeviceChange) {
+ ASSERT_TRUE(ListenerIsValid());
+ EXPECT_CALL(*this, OnDeviceChange()).Times(1);
+ ASSERT_TRUE(SimulateDefaultOutputDeviceChange());
+ message_loop_.RunUntilIdle();
+}
+
+} // namespace media
diff --git a/chromium/media/audio/mac/audio_input_mac.cc b/chromium/media/audio/mac/audio_input_mac.cc
new file mode 100644
index 00000000000..06af6d11c12
--- /dev/null
+++ b/chromium/media/audio/mac/audio_input_mac.cc
@@ -0,0 +1,231 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/mac/audio_input_mac.h"
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/mac/mac_logging.h"
+#include "media/audio/audio_manager_base.h"
+#include "media/audio/audio_util.h"
+
+#if !defined(OS_IOS)
+#include <CoreServices/CoreServices.h>
+#endif
+
+namespace media {
+
+PCMQueueInAudioInputStream::PCMQueueInAudioInputStream(
+ AudioManagerBase* manager, const AudioParameters& params)
+ : manager_(manager),
+ callback_(NULL),
+ audio_queue_(NULL),
+ buffer_size_bytes_(0),
+ started_(false) {
+ // We must have a manager.
+ DCHECK(manager_);
+ // A frame is one sample across all channels. In interleaved audio the per
+ // frame fields identify the set of n |channels|. In uncompressed audio, a
+ // packet is always one frame.
+ format_.mSampleRate = params.sample_rate();
+ format_.mFormatID = kAudioFormatLinearPCM;
+ format_.mFormatFlags = kLinearPCMFormatFlagIsPacked |
+ kLinearPCMFormatFlagIsSignedInteger;
+ format_.mBitsPerChannel = params.bits_per_sample();
+ format_.mChannelsPerFrame = params.channels();
+ format_.mFramesPerPacket = 1;
+ format_.mBytesPerPacket = (params.bits_per_sample() * params.channels()) / 8;
+ format_.mBytesPerFrame = format_.mBytesPerPacket;
+ format_.mReserved = 0;
+
+ buffer_size_bytes_ = params.GetBytesPerBuffer();
+}
+
+PCMQueueInAudioInputStream::~PCMQueueInAudioInputStream() {
+ DCHECK(!callback_);
+ DCHECK(!audio_queue_);
+}
+
+bool PCMQueueInAudioInputStream::Open() {
+ OSStatus err = AudioQueueNewInput(&format_,
+ &HandleInputBufferStatic,
+ this,
+ NULL, // Use OS CFRunLoop for |callback|
+ kCFRunLoopCommonModes,
+ 0, // Reserved
+ &audio_queue_);
+ if (err != noErr) {
+ HandleError(err);
+ return false;
+ }
+ return SetupBuffers();
+}
+
+void PCMQueueInAudioInputStream::Start(AudioInputCallback* callback) {
+ DCHECK(callback);
+ DLOG_IF(ERROR, !audio_queue_) << "Open() has not been called successfully";
+ if (callback_ || !audio_queue_)
+ return;
+ callback_ = callback;
+ OSStatus err = AudioQueueStart(audio_queue_, NULL);
+ if (err != noErr) {
+ HandleError(err);
+ } else {
+ started_ = true;
+ }
+}
+
+void PCMQueueInAudioInputStream::Stop() {
+ if (!audio_queue_ || !started_)
+ return;
+
+ // We request a synchronous stop, so the next call can take some time. In
+ // the windows implementation we block here as well.
+ OSStatus err = AudioQueueStop(audio_queue_, true);
+ if (err != noErr)
+ HandleError(err);
+
+ started_ = false;
+}
+
+void PCMQueueInAudioInputStream::Close() {
+ // It is valid to call Close() before calling Open() or Start(), thus
+ // |audio_queue_| and |callback_| might be NULL.
+ if (audio_queue_) {
+ OSStatus err = AudioQueueDispose(audio_queue_, true);
+ audio_queue_ = NULL;
+ if (err != noErr)
+ HandleError(err);
+ }
+ if (callback_) {
+ callback_->OnClose(this);
+ callback_ = NULL;
+ }
+ manager_->ReleaseInputStream(this);
+ // CARE: This object may now be destroyed.
+}
+
+double PCMQueueInAudioInputStream::GetMaxVolume() {
+ NOTREACHED() << "Only supported for low-latency mode.";
+ return 0.0;
+}
+
+void PCMQueueInAudioInputStream::SetVolume(double volume) {
+ NOTREACHED() << "Only supported for low-latency mode.";
+}
+
+double PCMQueueInAudioInputStream::GetVolume() {
+ NOTREACHED() << "Only supported for low-latency mode.";
+ return 0.0;
+}
+
+void PCMQueueInAudioInputStream::SetAutomaticGainControl(bool enabled) {
+ NOTREACHED() << "Only supported for low-latency mode.";
+}
+
+bool PCMQueueInAudioInputStream::GetAutomaticGainControl() {
+ NOTREACHED() << "Only supported for low-latency mode.";
+ return false;
+}
+
+void PCMQueueInAudioInputStream::HandleError(OSStatus err) {
+ if (callback_)
+ callback_->OnError(this);
+ // This point should never be reached.
+ OSSTATUS_DCHECK(0, err);
+}
+
+bool PCMQueueInAudioInputStream::SetupBuffers() {
+ DCHECK(buffer_size_bytes_);
+ for (int i = 0; i < kNumberBuffers; ++i) {
+ AudioQueueBufferRef buffer;
+ OSStatus err = AudioQueueAllocateBuffer(audio_queue_,
+ buffer_size_bytes_,
+ &buffer);
+ if (err == noErr)
+ err = QueueNextBuffer(buffer);
+ if (err != noErr) {
+ HandleError(err);
+ return false;
+ }
+ // |buffer| will automatically be freed when |audio_queue_| is released.
+ }
+ return true;
+}
+
+OSStatus PCMQueueInAudioInputStream::QueueNextBuffer(
+ AudioQueueBufferRef audio_buffer) {
+ // Only the first 2 params are needed for recording.
+ return AudioQueueEnqueueBuffer(audio_queue_, audio_buffer, 0, NULL);
+}
+
+// static
+void PCMQueueInAudioInputStream::HandleInputBufferStatic(
+ void* data,
+ AudioQueueRef audio_queue,
+ AudioQueueBufferRef audio_buffer,
+ const AudioTimeStamp* start_time,
+ UInt32 num_packets,
+ const AudioStreamPacketDescription* desc) {
+ reinterpret_cast<PCMQueueInAudioInputStream*>(data)->
+ HandleInputBuffer(audio_queue, audio_buffer, start_time,
+ num_packets, desc);
+}
+
+void PCMQueueInAudioInputStream::HandleInputBuffer(
+ AudioQueueRef audio_queue,
+ AudioQueueBufferRef audio_buffer,
+ const AudioTimeStamp* start_time,
+ UInt32 num_packets,
+ const AudioStreamPacketDescription* packet_desc) {
+ DCHECK_EQ(audio_queue_, audio_queue);
+ DCHECK(audio_buffer->mAudioData);
+ if (!callback_) {
+ // This can happen if Stop() was called without start.
+ DCHECK_EQ(0U, audio_buffer->mAudioDataByteSize);
+ return;
+ }
+
+ if (audio_buffer->mAudioDataByteSize) {
+ // The AudioQueue API may use a large internal buffer and repeatedly call us
+ // back to back once that internal buffer is filled. When this happens the
+ // renderer client does not have enough time to read data back from the
+ // shared memory before the next write comes along. If HandleInputBuffer()
+ // is called too frequently, Sleep() at least 5ms to ensure the shared
+ // memory doesn't get trampled.
+ // TODO(dalecurtis): This is a HACK. Long term the AudioQueue path is going
+ // away in favor of the AudioUnit based AUAudioInputStream(). Tracked by
+ // http://crbug.com/161383.
+ base::TimeDelta elapsed = base::TimeTicks::Now() - last_fill_;
+ const base::TimeDelta kMinDelay = base::TimeDelta::FromMilliseconds(5);
+ if (elapsed < kMinDelay)
+ base::PlatformThread::Sleep(kMinDelay - elapsed);
+
+ callback_->OnData(this,
+ reinterpret_cast<const uint8*>(audio_buffer->mAudioData),
+ audio_buffer->mAudioDataByteSize,
+ audio_buffer->mAudioDataByteSize,
+ 0.0);
+
+ last_fill_ = base::TimeTicks::Now();
+ }
+ // Recycle the buffer.
+ OSStatus err = QueueNextBuffer(audio_buffer);
+ if (err != noErr) {
+ if (err == kAudioQueueErr_EnqueueDuringReset) {
+ // This is the error you get if you try to enqueue a buffer and the
+ // queue has been closed. Not really a problem if indeed the queue
+ // has been closed.
+ // TODO(joth): PCMQueueOutAudioOutputStream uses callback_ to provide an
+ // extra guard for this situation, but it seems to introduce more
+ // complications than it solves (memory barrier issues accessing it from
+ // multiple threads, looses the means to indicate OnClosed to client).
+ // Should determine if we need to do something equivalent here.
+ return;
+ }
+ HandleError(err);
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/audio/mac/audio_input_mac.h b/chromium/media/audio/mac/audio_input_mac.h
new file mode 100644
index 00000000000..77eb65b0315
--- /dev/null
+++ b/chromium/media/audio/mac/audio_input_mac.h
@@ -0,0 +1,88 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_MAC_AUDIO_INPUT_MAC_H_
+#define MEDIA_AUDIO_MAC_AUDIO_INPUT_MAC_H_
+
+#include <AudioToolbox/AudioFormat.h>
+#include <AudioToolbox/AudioQueue.h>
+
+#include "base/compiler_specific.h"
+#include "base/time/time.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+
+namespace media {
+
+class AudioManagerBase;
+
+// Implementation of AudioInputStream for Mac OS X using the audio queue service
+// present in OS 10.5 and later. Design reflects PCMQueueOutAudioOutputStream.
+class PCMQueueInAudioInputStream : public AudioInputStream {
+ public:
+ // Parameters as per AudioManager::MakeAudioInputStream.
+ PCMQueueInAudioInputStream(AudioManagerBase* manager,
+ const AudioParameters& params);
+ virtual ~PCMQueueInAudioInputStream();
+
+ // Implementation of AudioInputStream.
+ virtual bool Open() OVERRIDE;
+ virtual void Start(AudioInputCallback* callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual double GetMaxVolume() OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual double GetVolume() OVERRIDE;
+ virtual void SetAutomaticGainControl(bool enabled) OVERRIDE;
+ virtual bool GetAutomaticGainControl() OVERRIDE;
+
+ private:
+ // Issue the OnError to |callback_|;
+ void HandleError(OSStatus err);
+
+ // Allocates and prepares the memory that will be used for recording.
+ bool SetupBuffers();
+
+ // Sends a buffer to the audio driver for recording.
+ OSStatus QueueNextBuffer(AudioQueueBufferRef audio_buffer);
+
+ // Callback from OS, delegates to non-static version below.
+ static void HandleInputBufferStatic(
+ void* data,
+ AudioQueueRef audio_queue,
+ AudioQueueBufferRef audio_buffer,
+ const AudioTimeStamp* start_time,
+ UInt32 num_packets,
+ const AudioStreamPacketDescription* desc);
+
+ // Handles callback from OS. Will be called on OS internal thread.
+ void HandleInputBuffer(AudioQueueRef audio_queue,
+ AudioQueueBufferRef audio_buffer,
+ const AudioTimeStamp* start_time,
+ UInt32 num_packets,
+ const AudioStreamPacketDescription* packet_desc);
+
+ static const int kNumberBuffers = 3;
+
+ // Manager that owns this stream, used for closing down.
+ AudioManagerBase* manager_;
+ // We use the callback mostly to periodically supply the recorded audio data.
+ AudioInputCallback* callback_;
+ // Structure that holds the stream format details such as bitrate.
+ AudioStreamBasicDescription format_;
+ // Handle to the OS audio queue object.
+ AudioQueueRef audio_queue_;
+ // Size of each of the buffers in |audio_buffers_|
+ uint32 buffer_size_bytes_;
+ // True iff Start() has been called successfully.
+ bool started_;
+ // Used to determine if we need to slow down |callback_| calls.
+ base::TimeTicks last_fill_;
+
+ DISALLOW_COPY_AND_ASSIGN(PCMQueueInAudioInputStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_MAC_AUDIO_INPUT_MAC_H_
diff --git a/chromium/media/audio/mac/audio_low_latency_input_mac.cc b/chromium/media/audio/mac/audio_low_latency_input_mac.cc
new file mode 100644
index 00000000000..17a87b0a7dc
--- /dev/null
+++ b/chromium/media/audio/mac/audio_low_latency_input_mac.cc
@@ -0,0 +1,664 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/mac/audio_low_latency_input_mac.h"
+
+#include <CoreServices/CoreServices.h>
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/mac/mac_logging.h"
+#include "media/audio/audio_util.h"
+#include "media/audio/mac/audio_manager_mac.h"
+#include "media/base/data_buffer.h"
+
+namespace media {
+
+static const int kMinIntervalBetweenVolumeUpdatesMs = 1000;
+
+static std::ostream& operator<<(std::ostream& os,
+ const AudioStreamBasicDescription& format) {
+ os << "sample rate : " << format.mSampleRate << std::endl
+ << "format ID : " << format.mFormatID << std::endl
+ << "format flags : " << format.mFormatFlags << std::endl
+ << "bytes per packet : " << format.mBytesPerPacket << std::endl
+ << "frames per packet : " << format.mFramesPerPacket << std::endl
+ << "bytes per frame : " << format.mBytesPerFrame << std::endl
+ << "channels per frame: " << format.mChannelsPerFrame << std::endl
+ << "bits per channel : " << format.mBitsPerChannel;
+ return os;
+}
+
+// See "Technical Note TN2091 - Device input using the HAL Output Audio Unit"
+// http://developer.apple.com/library/mac/#technotes/tn2091/_index.html
+// for more details and background regarding this implementation.
+
+AUAudioInputStream::AUAudioInputStream(
+ AudioManagerMac* manager, const AudioParameters& params,
+ AudioDeviceID audio_device_id)
+ : manager_(manager),
+ sink_(NULL),
+ audio_unit_(0),
+ input_device_id_(audio_device_id),
+ started_(false),
+ hardware_latency_frames_(0),
+ fifo_delay_bytes_(0),
+ number_of_channels_in_frame_(0) {
+ DCHECK(manager_);
+
+ // Set up the desired (output) format specified by the client.
+ format_.mSampleRate = params.sample_rate();
+ format_.mFormatID = kAudioFormatLinearPCM;
+ format_.mFormatFlags = kLinearPCMFormatFlagIsPacked |
+ kLinearPCMFormatFlagIsSignedInteger;
+ format_.mBitsPerChannel = params.bits_per_sample();
+ format_.mChannelsPerFrame = params.channels();
+ format_.mFramesPerPacket = 1; // uncompressed audio
+ format_.mBytesPerPacket = (format_.mBitsPerChannel *
+ params.channels()) / 8;
+ format_.mBytesPerFrame = format_.mBytesPerPacket;
+ format_.mReserved = 0;
+
+ DVLOG(1) << "Desired ouput format: " << format_;
+
+ // Set number of sample frames per callback used by the internal audio layer.
+ // An internal FIFO is then utilized to adapt the internal size to the size
+ // requested by the client.
+ // Note that we use the same native buffer size as for the output side here
+ // since the AUHAL implementation requires that both capture and render side
+ // use the same buffer size. See http://crbug.com/154352 for more details.
+ // TODO(xians): Get the audio parameters from the right device.
+ const AudioParameters parameters =
+ manager_->GetInputStreamParameters(AudioManagerBase::kDefaultDeviceId);
+ number_of_frames_ = parameters.frames_per_buffer();
+ DVLOG(1) << "Size of data buffer in frames : " << number_of_frames_;
+
+ // Derive size (in bytes) of the buffers that we will render to.
+ UInt32 data_byte_size = number_of_frames_ * format_.mBytesPerFrame;
+ DVLOG(1) << "Size of data buffer in bytes : " << data_byte_size;
+
+ // Allocate AudioBuffers to be used as storage for the received audio.
+ // The AudioBufferList structure works as a placeholder for the
+ // AudioBuffer structure, which holds a pointer to the actual data buffer.
+ audio_data_buffer_.reset(new uint8[data_byte_size]);
+ audio_buffer_list_.mNumberBuffers = 1;
+
+ AudioBuffer* audio_buffer = audio_buffer_list_.mBuffers;
+ audio_buffer->mNumberChannels = params.channels();
+ audio_buffer->mDataByteSize = data_byte_size;
+ audio_buffer->mData = audio_data_buffer_.get();
+
+ // Set up an internal FIFO buffer that will accumulate recorded audio frames
+ // until a requested size is ready to be sent to the client.
+ // It is not possible to ask for less than |kAudioFramesPerCallback| number of
+ // audio frames.
+ const size_t requested_size_frames =
+ params.GetBytesPerBuffer() / format_.mBytesPerPacket;
+ DCHECK_GE(requested_size_frames, number_of_frames_);
+ requested_size_bytes_ = requested_size_frames * format_.mBytesPerFrame;
+ DVLOG(1) << "Requested buffer size in bytes : " << requested_size_bytes_;
+ DLOG_IF(INFO, requested_size_frames > number_of_frames_) << "FIFO is used";
+
+ const int number_of_bytes = number_of_frames_ * format_.mBytesPerFrame;
+ fifo_delay_bytes_ = requested_size_bytes_ - number_of_bytes;
+
+ // Allocate some extra memory to avoid memory reallocations.
+ // Ensure that the size is an even multiple of |number_of_frames_ and
+ // larger than |requested_size_frames|.
+ // Example: number_of_frames_=128, requested_size_frames=480 =>
+ // allocated space equals 4*128=512 audio frames
+ const int max_forward_capacity = number_of_bytes *
+ ((requested_size_frames / number_of_frames_) + 1);
+ fifo_.reset(new media::SeekableBuffer(0, max_forward_capacity));
+
+ data_ = new media::DataBuffer(requested_size_bytes_);
+}
+
+AUAudioInputStream::~AUAudioInputStream() {}
+
+// Obtain and open the AUHAL AudioOutputUnit for recording.
+bool AUAudioInputStream::Open() {
+ // Verify that we are not already opened.
+ if (audio_unit_)
+ return false;
+
+ // Verify that we have a valid device.
+ if (input_device_id_ == kAudioObjectUnknown) {
+ NOTREACHED() << "Device ID is unknown";
+ return false;
+ }
+
+ // Start by obtaining an AudioOuputUnit using an AUHAL component description.
+
+ Component comp;
+ ComponentDescription desc;
+
+ // Description for the Audio Unit we want to use (AUHAL in this case).
+ desc.componentType = kAudioUnitType_Output;
+ desc.componentSubType = kAudioUnitSubType_HALOutput;
+ desc.componentManufacturer = kAudioUnitManufacturer_Apple;
+ desc.componentFlags = 0;
+ desc.componentFlagsMask = 0;
+ comp = FindNextComponent(0, &desc);
+ DCHECK(comp);
+
+ // Get access to the service provided by the specified Audio Unit.
+ OSStatus result = OpenAComponent(comp, &audio_unit_);
+ if (result) {
+ HandleError(result);
+ return false;
+ }
+
+ // Enable IO on the input scope of the Audio Unit.
+
+ // After creating the AUHAL object, we must enable IO on the input scope
+ // of the Audio Unit to obtain the device input. Input must be explicitly
+ // enabled with the kAudioOutputUnitProperty_EnableIO property on Element 1
+ // of the AUHAL. Beacause the AUHAL can be used for both input and output,
+ // we must also disable IO on the output scope.
+
+ UInt32 enableIO = 1;
+
+ // Enable input on the AUHAL.
+ result = AudioUnitSetProperty(audio_unit_,
+ kAudioOutputUnitProperty_EnableIO,
+ kAudioUnitScope_Input,
+ 1, // input element 1
+ &enableIO, // enable
+ sizeof(enableIO));
+ if (result) {
+ HandleError(result);
+ return false;
+ }
+
+ // Disable output on the AUHAL.
+ enableIO = 0;
+ result = AudioUnitSetProperty(audio_unit_,
+ kAudioOutputUnitProperty_EnableIO,
+ kAudioUnitScope_Output,
+ 0, // output element 0
+ &enableIO, // disable
+ sizeof(enableIO));
+ if (result) {
+ HandleError(result);
+ return false;
+ }
+
+ // Next, set the audio device to be the Audio Unit's current device.
+ // Note that, devices can only be set to the AUHAL after enabling IO.
+ result = AudioUnitSetProperty(audio_unit_,
+ kAudioOutputUnitProperty_CurrentDevice,
+ kAudioUnitScope_Global,
+ 0,
+ &input_device_id_,
+ sizeof(input_device_id_));
+ if (result) {
+ HandleError(result);
+ return false;
+ }
+
+ // Register the input procedure for the AUHAL.
+ // This procedure will be called when the AUHAL has received new data
+ // from the input device.
+ AURenderCallbackStruct callback;
+ callback.inputProc = InputProc;
+ callback.inputProcRefCon = this;
+ result = AudioUnitSetProperty(audio_unit_,
+ kAudioOutputUnitProperty_SetInputCallback,
+ kAudioUnitScope_Global,
+ 0,
+ &callback,
+ sizeof(callback));
+ if (result) {
+ HandleError(result);
+ return false;
+ }
+
+ // Set up the the desired (output) format.
+ // For obtaining input from a device, the device format is always expressed
+ // on the output scope of the AUHAL's Element 1.
+ result = AudioUnitSetProperty(audio_unit_,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Output,
+ 1,
+ &format_,
+ sizeof(format_));
+ if (result) {
+ HandleError(result);
+ return false;
+ }
+
+ // Set the desired number of frames in the IO buffer (output scope).
+ // WARNING: Setting this value changes the frame size for all audio units in
+ // the current process. It's imperative that the input and output frame sizes
+ // be the same as the frames_per_buffer() returned by
+ // GetInputStreamParameters().
+ // TODO(henrika): Due to http://crrev.com/159666 this is currently not true
+ // and should be fixed, a CHECK() should be added at that time.
+ result = AudioUnitSetProperty(audio_unit_,
+ kAudioDevicePropertyBufferFrameSize,
+ kAudioUnitScope_Output,
+ 1,
+ &number_of_frames_, // size is set in the ctor
+ sizeof(number_of_frames_));
+ if (result) {
+ HandleError(result);
+ return false;
+ }
+
+ // Finally, initialize the audio unit and ensure that it is ready to render.
+ // Allocates memory according to the maximum number of audio frames
+ // it can produce in response to a single render call.
+ result = AudioUnitInitialize(audio_unit_);
+ if (result) {
+ HandleError(result);
+ return false;
+ }
+
+ // The hardware latency is fixed and will not change during the call.
+ hardware_latency_frames_ = GetHardwareLatency();
+
+ // The master channel is 0, Left and right are channels 1 and 2.
+ // And the master channel is not counted in |number_of_channels_in_frame_|.
+ number_of_channels_in_frame_ = GetNumberOfChannelsFromStream();
+
+ return true;
+}
+
+void AUAudioInputStream::Start(AudioInputCallback* callback) {
+ DCHECK(callback);
+ DLOG_IF(ERROR, !audio_unit_) << "Open() has not been called successfully";
+ if (started_ || !audio_unit_)
+ return;
+ sink_ = callback;
+ StartAgc();
+ OSStatus result = AudioOutputUnitStart(audio_unit_);
+ if (result == noErr) {
+ started_ = true;
+ }
+ OSSTATUS_DLOG_IF(ERROR, result != noErr, result)
+ << "Failed to start acquiring data";
+}
+
+void AUAudioInputStream::Stop() {
+ if (!started_)
+ return;
+ StopAgc();
+ OSStatus result = AudioOutputUnitStop(audio_unit_);
+ if (result == noErr) {
+ started_ = false;
+ }
+ OSSTATUS_DLOG_IF(ERROR, result != noErr, result)
+ << "Failed to stop acquiring data";
+}
+
+void AUAudioInputStream::Close() {
+ // It is valid to call Close() before calling open or Start().
+ // It is also valid to call Close() after Start() has been called.
+ if (started_) {
+ Stop();
+ }
+ if (audio_unit_) {
+ // Deallocate the audio unit’s resources.
+ AudioUnitUninitialize(audio_unit_);
+
+ // Terminates our connection to the AUHAL component.
+ CloseComponent(audio_unit_);
+ audio_unit_ = 0;
+ }
+ if (sink_) {
+ sink_->OnClose(this);
+ sink_ = NULL;
+ }
+
+ // Inform the audio manager that we have been closed. This can cause our
+ // destruction.
+ manager_->ReleaseInputStream(this);
+}
+
+double AUAudioInputStream::GetMaxVolume() {
+ // Verify that we have a valid device.
+ if (input_device_id_ == kAudioObjectUnknown) {
+ NOTREACHED() << "Device ID is unknown";
+ return 0.0;
+ }
+
+ // Query if any of the master, left or right channels has volume control.
+ for (int i = 0; i <= number_of_channels_in_frame_; ++i) {
+ // If the volume is settable, the valid volume range is [0.0, 1.0].
+ if (IsVolumeSettableOnChannel(i))
+ return 1.0;
+ }
+
+ // Volume control is not available for the audio stream.
+ return 0.0;
+}
+
+void AUAudioInputStream::SetVolume(double volume) {
+ DVLOG(1) << "SetVolume(volume=" << volume << ")";
+ DCHECK_GE(volume, 0.0);
+ DCHECK_LE(volume, 1.0);
+
+ // Verify that we have a valid device.
+ if (input_device_id_ == kAudioObjectUnknown) {
+ NOTREACHED() << "Device ID is unknown";
+ return;
+ }
+
+ Float32 volume_float32 = static_cast<Float32>(volume);
+ AudioObjectPropertyAddress property_address = {
+ kAudioDevicePropertyVolumeScalar,
+ kAudioDevicePropertyScopeInput,
+ kAudioObjectPropertyElementMaster
+ };
+
+ // Try to set the volume for master volume channel.
+ if (IsVolumeSettableOnChannel(kAudioObjectPropertyElementMaster)) {
+ OSStatus result = AudioObjectSetPropertyData(input_device_id_,
+ &property_address,
+ 0,
+ NULL,
+ sizeof(volume_float32),
+ &volume_float32);
+ if (result != noErr) {
+ DLOG(WARNING) << "Failed to set volume to " << volume_float32;
+ }
+ return;
+ }
+
+ // There is no master volume control, try to set volume for each channel.
+ int successful_channels = 0;
+ for (int i = 1; i <= number_of_channels_in_frame_; ++i) {
+ property_address.mElement = static_cast<UInt32>(i);
+ if (IsVolumeSettableOnChannel(i)) {
+ OSStatus result = AudioObjectSetPropertyData(input_device_id_,
+ &property_address,
+ 0,
+ NULL,
+ sizeof(volume_float32),
+ &volume_float32);
+ if (result == noErr)
+ ++successful_channels;
+ }
+ }
+
+ DLOG_IF(WARNING, successful_channels == 0)
+ << "Failed to set volume to " << volume_float32;
+
+ // Update the AGC volume level based on the last setting above. Note that,
+ // the volume-level resolution is not infinite and it is therefore not
+ // possible to assume that the volume provided as input parameter can be
+ // used directly. Instead, a new query to the audio hardware is required.
+ // This method does nothing if AGC is disabled.
+ UpdateAgcVolume();
+}
+
+double AUAudioInputStream::GetVolume() {
+ // Verify that we have a valid device.
+ if (input_device_id_ == kAudioObjectUnknown){
+ NOTREACHED() << "Device ID is unknown";
+ return 0.0;
+ }
+
+ AudioObjectPropertyAddress property_address = {
+ kAudioDevicePropertyVolumeScalar,
+ kAudioDevicePropertyScopeInput,
+ kAudioObjectPropertyElementMaster
+ };
+
+ if (AudioObjectHasProperty(input_device_id_, &property_address)) {
+ // The device supports master volume control, get the volume from the
+ // master channel.
+ Float32 volume_float32 = 0.0;
+ UInt32 size = sizeof(volume_float32);
+ OSStatus result = AudioObjectGetPropertyData(input_device_id_,
+ &property_address,
+ 0,
+ NULL,
+ &size,
+ &volume_float32);
+ if (result == noErr)
+ return static_cast<double>(volume_float32);
+ } else {
+ // There is no master volume control, try to get the average volume of
+ // all the channels.
+ Float32 volume_float32 = 0.0;
+ int successful_channels = 0;
+ for (int i = 1; i <= number_of_channels_in_frame_; ++i) {
+ property_address.mElement = static_cast<UInt32>(i);
+ if (AudioObjectHasProperty(input_device_id_, &property_address)) {
+ Float32 channel_volume = 0;
+ UInt32 size = sizeof(channel_volume);
+ OSStatus result = AudioObjectGetPropertyData(input_device_id_,
+ &property_address,
+ 0,
+ NULL,
+ &size,
+ &channel_volume);
+ if (result == noErr) {
+ volume_float32 += channel_volume;
+ ++successful_channels;
+ }
+ }
+ }
+
+ // Get the average volume of the channels.
+ if (successful_channels != 0)
+ return static_cast<double>(volume_float32 / successful_channels);
+ }
+
+ DLOG(WARNING) << "Failed to get volume";
+ return 0.0;
+}
+
+// AUHAL AudioDeviceOutput unit callback
+OSStatus AUAudioInputStream::InputProc(void* user_data,
+ AudioUnitRenderActionFlags* flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data) {
+ // Verify that the correct bus is used (Input bus/Element 1)
+ DCHECK_EQ(bus_number, static_cast<UInt32>(1));
+ AUAudioInputStream* audio_input =
+ reinterpret_cast<AUAudioInputStream*>(user_data);
+ DCHECK(audio_input);
+ if (!audio_input)
+ return kAudioUnitErr_InvalidElement;
+
+ // Receive audio from the AUHAL from the output scope of the Audio Unit.
+ OSStatus result = AudioUnitRender(audio_input->audio_unit(),
+ flags,
+ time_stamp,
+ bus_number,
+ number_of_frames,
+ audio_input->audio_buffer_list());
+ if (result)
+ return result;
+
+ // Deliver recorded data to the consumer as a callback.
+ return audio_input->Provide(number_of_frames,
+ audio_input->audio_buffer_list(),
+ time_stamp);
+}
+
+OSStatus AUAudioInputStream::Provide(UInt32 number_of_frames,
+ AudioBufferList* io_data,
+ const AudioTimeStamp* time_stamp) {
+ // Update the capture latency.
+ double capture_latency_frames = GetCaptureLatency(time_stamp);
+
+ // The AGC volume level is updated once every second on a separate thread.
+ // Note that, |volume| is also updated each time SetVolume() is called
+ // through IPC by the render-side AGC.
+ double normalized_volume = 0.0;
+ GetAgcVolume(&normalized_volume);
+
+ AudioBuffer& buffer = io_data->mBuffers[0];
+ uint8* audio_data = reinterpret_cast<uint8*>(buffer.mData);
+ uint32 capture_delay_bytes = static_cast<uint32>
+ ((capture_latency_frames + 0.5) * format_.mBytesPerFrame);
+ // Account for the extra delay added by the FIFO.
+ capture_delay_bytes += fifo_delay_bytes_;
+ DCHECK(audio_data);
+ if (!audio_data)
+ return kAudioUnitErr_InvalidElement;
+
+ // Accumulate captured audio in FIFO until we can match the output size
+ // requested by the client.
+ fifo_->Append(audio_data, buffer.mDataByteSize);
+
+ // Deliver recorded data to the client as soon as the FIFO contains a
+ // sufficient amount.
+ if (fifo_->forward_bytes() >= requested_size_bytes_) {
+ // Read from FIFO into temporary data buffer.
+ fifo_->Read(data_->writable_data(), requested_size_bytes_);
+
+ // Deliver data packet, delay estimation and volume level to the user.
+ sink_->OnData(this,
+ data_->data(),
+ requested_size_bytes_,
+ capture_delay_bytes,
+ normalized_volume);
+ }
+
+ return noErr;
+}
+
+int AUAudioInputStream::HardwareSampleRate() {
+ // Determine the default input device's sample-rate.
+ AudioDeviceID device_id = kAudioObjectUnknown;
+ UInt32 info_size = sizeof(device_id);
+
+ AudioObjectPropertyAddress default_input_device_address = {
+ kAudioHardwarePropertyDefaultInputDevice,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+ };
+ OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject,
+ &default_input_device_address,
+ 0,
+ 0,
+ &info_size,
+ &device_id);
+ if (result != noErr)
+ return 0.0;
+
+ Float64 nominal_sample_rate;
+ info_size = sizeof(nominal_sample_rate);
+
+ AudioObjectPropertyAddress nominal_sample_rate_address = {
+ kAudioDevicePropertyNominalSampleRate,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+ };
+ result = AudioObjectGetPropertyData(device_id,
+ &nominal_sample_rate_address,
+ 0,
+ 0,
+ &info_size,
+ &nominal_sample_rate);
+ if (result != noErr)
+ return 0.0;
+
+ return static_cast<int>(nominal_sample_rate);
+}
+
+double AUAudioInputStream::GetHardwareLatency() {
+ if (!audio_unit_ || input_device_id_ == kAudioObjectUnknown) {
+ DLOG(WARNING) << "Audio unit object is NULL or device ID is unknown";
+ return 0.0;
+ }
+
+ // Get audio unit latency.
+ Float64 audio_unit_latency_sec = 0.0;
+ UInt32 size = sizeof(audio_unit_latency_sec);
+ OSStatus result = AudioUnitGetProperty(audio_unit_,
+ kAudioUnitProperty_Latency,
+ kAudioUnitScope_Global,
+ 0,
+ &audio_unit_latency_sec,
+ &size);
+ OSSTATUS_DLOG_IF(WARNING, result != noErr, result)
+ << "Could not get audio unit latency";
+
+ // Get input audio device latency.
+ AudioObjectPropertyAddress property_address = {
+ kAudioDevicePropertyLatency,
+ kAudioDevicePropertyScopeInput,
+ kAudioObjectPropertyElementMaster
+ };
+ UInt32 device_latency_frames = 0;
+ size = sizeof(device_latency_frames);
+ result = AudioObjectGetPropertyData(input_device_id_,
+ &property_address,
+ 0,
+ NULL,
+ &size,
+ &device_latency_frames);
+ DLOG_IF(WARNING, result != noErr) << "Could not get audio device latency.";
+
+ return static_cast<double>((audio_unit_latency_sec *
+ format_.mSampleRate) + device_latency_frames);
+}
+
+double AUAudioInputStream::GetCaptureLatency(
+ const AudioTimeStamp* input_time_stamp) {
+ // Get the delay between between the actual recording instant and the time
+ // when the data packet is provided as a callback.
+ UInt64 capture_time_ns = AudioConvertHostTimeToNanos(
+ input_time_stamp->mHostTime);
+ UInt64 now_ns = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime());
+ double delay_frames = static_cast<double>
+ (1e-9 * (now_ns - capture_time_ns) * format_.mSampleRate);
+
+ // Total latency is composed by the dynamic latency and the fixed
+ // hardware latency.
+ return (delay_frames + hardware_latency_frames_);
+}
+
+int AUAudioInputStream::GetNumberOfChannelsFromStream() {
+ // Get the stream format, to be able to read the number of channels.
+ AudioObjectPropertyAddress property_address = {
+ kAudioDevicePropertyStreamFormat,
+ kAudioDevicePropertyScopeInput,
+ kAudioObjectPropertyElementMaster
+ };
+ AudioStreamBasicDescription stream_format;
+ UInt32 size = sizeof(stream_format);
+ OSStatus result = AudioObjectGetPropertyData(input_device_id_,
+ &property_address,
+ 0,
+ NULL,
+ &size,
+ &stream_format);
+ if (result != noErr) {
+ DLOG(WARNING) << "Could not get stream format";
+ return 0;
+ }
+
+ return static_cast<int>(stream_format.mChannelsPerFrame);
+}
+
+void AUAudioInputStream::HandleError(OSStatus err) {
+ NOTREACHED() << "error " << GetMacOSStatusErrorString(err)
+ << " (" << err << ")";
+ if (sink_)
+ sink_->OnError(this);
+}
+
+bool AUAudioInputStream::IsVolumeSettableOnChannel(int channel) {
+ Boolean is_settable = false;
+ AudioObjectPropertyAddress property_address = {
+ kAudioDevicePropertyVolumeScalar,
+ kAudioDevicePropertyScopeInput,
+ static_cast<UInt32>(channel)
+ };
+ OSStatus result = AudioObjectIsPropertySettable(input_device_id_,
+ &property_address,
+ &is_settable);
+ return (result == noErr) ? is_settable : false;
+}
+
+} // namespace media
diff --git a/chromium/media/audio/mac/audio_low_latency_input_mac.h b/chromium/media/audio/mac/audio_low_latency_input_mac.h
new file mode 100644
index 00000000000..736bf082f5b
--- /dev/null
+++ b/chromium/media/audio/mac/audio_low_latency_input_mac.h
@@ -0,0 +1,169 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Implementation of AudioInputStream for Mac OS X using the special AUHAL
+// input Audio Unit present in OS 10.4 and later.
+// The AUHAL input Audio Unit is for low-latency audio I/O.
+//
+// Overview of operation:
+//
+// - An object of AUAudioInputStream is created by the AudioManager
+// factory: audio_man->MakeAudioInputStream().
+// - Next some thread will call Open(), at that point the underlying
+// AUHAL output Audio Unit is created and configured.
+// - Then some thread will call Start(sink).
+// Then the Audio Unit is started which creates its own thread which
+// periodically will provide the sink with more data as buffers are being
+// produced/recorded.
+// - At some point some thread will call Stop(), which we handle by directly
+// stopping the AUHAL output Audio Unit.
+// - The same thread that called stop will call Close() where we cleanup
+// and notify the audio manager, which likely will destroy this object.
+//
+// Implementation notes:
+//
+// - It is recommended to first acquire the native sample rate of the default
+// input device and then use the same rate when creating this object.
+// Use AUAudioInputStream::HardwareSampleRate() to retrieve the sample rate.
+// - Calling Close() also leads to self destruction.
+// - The latency consists of two parts:
+// 1) Hardware latency, which includes Audio Unit latency, audio device
+// latency;
+// 2) The delay between the actual recording instant and the time when the
+// data packet is provided as a callback.
+//
+#ifndef MEDIA_AUDIO_MAC_AUDIO_LOW_LATENCY_INPUT_MAC_H_
+#define MEDIA_AUDIO_MAC_AUDIO_LOW_LATENCY_INPUT_MAC_H_
+
+#include <AudioUnit/AudioUnit.h>
+#include <CoreAudio/CoreAudio.h>
+
+#include "base/atomicops.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/synchronization/lock.h"
+#include "media/audio/agc_audio_stream.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/seekable_buffer.h"
+
+namespace media {
+
+class AudioManagerMac;
+class DataBuffer;
+
+class AUAudioInputStream : public AgcAudioStream<AudioInputStream> {
+ public:
+ // The ctor takes all the usual parameters, plus |manager| which is the
+ // the audio manager who is creating this object.
+ AUAudioInputStream(AudioManagerMac* manager,
+ const AudioParameters& params,
+ AudioDeviceID audio_device_id);
+ // The dtor is typically called by the AudioManager only and it is usually
+ // triggered by calling AudioInputStream::Close().
+ virtual ~AUAudioInputStream();
+
+ // Implementation of AudioInputStream.
+ virtual bool Open() OVERRIDE;
+ virtual void Start(AudioInputCallback* callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual double GetMaxVolume() OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual double GetVolume() OVERRIDE;
+
+ // Returns the current hardware sample rate for the default input device.
+ MEDIA_EXPORT static int HardwareSampleRate();
+
+ bool started() const { return started_; }
+ AudioUnit audio_unit() { return audio_unit_; }
+ AudioBufferList* audio_buffer_list() { return &audio_buffer_list_; }
+
+ private:
+ // AudioOutputUnit callback.
+ static OSStatus InputProc(void* user_data,
+ AudioUnitRenderActionFlags* flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data);
+
+ // Pushes recorded data to consumer of the input audio stream.
+ OSStatus Provide(UInt32 number_of_frames, AudioBufferList* io_data,
+ const AudioTimeStamp* time_stamp);
+
+ // Gets the fixed capture hardware latency and store it during initialization.
+ // Returns 0 if not available.
+ double GetHardwareLatency();
+
+ // Gets the current capture delay value.
+ double GetCaptureLatency(const AudioTimeStamp* input_time_stamp);
+
+ // Gets the number of channels for a stream of audio data.
+ int GetNumberOfChannelsFromStream();
+
+ // Issues the OnError() callback to the |sink_|.
+ void HandleError(OSStatus err);
+
+ // Helper function to check if the volume control is avialable on specific
+ // channel.
+ bool IsVolumeSettableOnChannel(int channel);
+
+ // Our creator, the audio manager needs to be notified when we close.
+ AudioManagerMac* manager_;
+
+ // Contains the desired number of audio frames in each callback.
+ size_t number_of_frames_;
+
+ // Pointer to the object that will receive the recorded audio samples.
+ AudioInputCallback* sink_;
+
+ // Structure that holds the desired output format of the stream.
+ // Note that, this format can differ from the device(=input) format.
+ AudioStreamBasicDescription format_;
+
+ // The special Audio Unit called AUHAL, which allows us to pass audio data
+ // directly from a microphone, through the HAL, and to our application.
+ // The AUHAL also enables selection of non default devices.
+ AudioUnit audio_unit_;
+
+ // The UID refers to the current input audio device.
+ AudioDeviceID input_device_id_;
+
+ // Provides a mechanism for encapsulating one or more buffers of audio data.
+ AudioBufferList audio_buffer_list_;
+
+ // Temporary storage for recorded data. The InputProc() renders into this
+ // array as soon as a frame of the desired buffer size has been recorded.
+ scoped_ptr<uint8[]> audio_data_buffer_;
+
+ // True after successfull Start(), false after successful Stop().
+ bool started_;
+
+ // Fixed capture hardware latency in frames.
+ double hardware_latency_frames_;
+
+ // Delay due to the FIFO in bytes.
+ int fifo_delay_bytes_;
+
+ // The number of channels in each frame of audio data, which is used
+ // when querying the volume of each channel.
+ int number_of_channels_in_frame_;
+
+ // Accumulates recorded data packets until the requested size has been stored.
+ scoped_ptr<media::SeekableBuffer> fifo_;
+
+ // Intermediate storage of data from the FIFO before sending it to the
+ // client using the OnData() callback.
+ scoped_refptr<media::DataBuffer> data_;
+
+ // The client requests that the recorded data shall be delivered using
+ // OnData() callbacks where each callback contains this amount of bytes.
+ int requested_size_bytes_;
+
+ DISALLOW_COPY_AND_ASSIGN(AUAudioInputStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_MAC_AUDIO_LOW_LATENCY_INPUT_MAC_H_
diff --git a/chromium/media/audio/mac/audio_low_latency_input_mac_unittest.cc b/chromium/media/audio/mac/audio_low_latency_input_mac_unittest.cc
new file mode 100644
index 00000000000..9b5985117d7
--- /dev/null
+++ b/chromium/media/audio/mac/audio_low_latency_input_mac_unittest.cc
@@ -0,0 +1,317 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/basictypes.h"
+#include "base/environment.h"
+#include "base/message_loop/message_loop.h"
+#include "base/test/test_timeouts.h"
+#include "base/threading/platform_thread.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_manager_base.h"
+#include "media/audio/mac/audio_low_latency_input_mac.h"
+#include "media/base/seekable_buffer.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::_;
+using ::testing::AnyNumber;
+using ::testing::AtLeast;
+using ::testing::Ge;
+using ::testing::NotNull;
+
+namespace media {
+
+ACTION_P3(CheckCountAndPostQuitTask, count, limit, loop) {
+ if (++*count >= limit) {
+ loop->PostTask(FROM_HERE, base::MessageLoop::QuitClosure());
+ }
+}
+
+class MockAudioInputCallback : public AudioInputStream::AudioInputCallback {
+ public:
+ MOCK_METHOD5(OnData, void(AudioInputStream* stream,
+ const uint8* src, uint32 size,
+ uint32 hardware_delay_bytes, double volume));
+ MOCK_METHOD1(OnClose, void(AudioInputStream* stream));
+ MOCK_METHOD1(OnError, void(AudioInputStream* stream));
+};
+
+// This audio sink implementation should be used for manual tests only since
+// the recorded data is stored on a raw binary data file.
+// The last test (WriteToFileAudioSink) - which is disabled by default -
+// can use this audio sink to store the captured data on a file for offline
+// analysis.
+class WriteToFileAudioSink : public AudioInputStream::AudioInputCallback {
+ public:
+ // Allocate space for ~10 seconds of data @ 48kHz in stereo:
+ // 2 bytes per sample, 2 channels, 10ms @ 48kHz, 10 seconds <=> 1920000 bytes.
+ static const int kMaxBufferSize = 2 * 2 * 480 * 100 * 10;
+
+ explicit WriteToFileAudioSink(const char* file_name)
+ : buffer_(0, kMaxBufferSize),
+ file_(fopen(file_name, "wb")),
+ bytes_to_write_(0) {
+ }
+
+ virtual ~WriteToFileAudioSink() {
+ int bytes_written = 0;
+ while (bytes_written < bytes_to_write_) {
+ const uint8* chunk;
+ int chunk_size;
+
+ // Stop writing if no more data is available.
+ if (!buffer_.GetCurrentChunk(&chunk, &chunk_size))
+ break;
+
+ // Write recorded data chunk to the file and prepare for next chunk.
+ fwrite(chunk, 1, chunk_size, file_);
+ buffer_.Seek(chunk_size);
+ bytes_written += chunk_size;
+ }
+ fclose(file_);
+ }
+
+ // AudioInputStream::AudioInputCallback implementation.
+ virtual void OnData(AudioInputStream* stream,
+ const uint8* src, uint32 size,
+ uint32 hardware_delay_bytes, double volume) OVERRIDE {
+ // Store data data in a temporary buffer to avoid making blocking
+ // fwrite() calls in the audio callback. The complete buffer will be
+ // written to file in the destructor.
+ if (buffer_.Append(src, size)) {
+ bytes_to_write_ += size;
+ }
+ }
+
+ virtual void OnClose(AudioInputStream* stream) OVERRIDE {}
+ virtual void OnError(AudioInputStream* stream) OVERRIDE {}
+
+ private:
+ media::SeekableBuffer buffer_;
+ FILE* file_;
+ int bytes_to_write_;
+};
+
+class MacAudioInputTest : public testing::Test {
+ protected:
+ MacAudioInputTest() : audio_manager_(AudioManager::Create()) {}
+ virtual ~MacAudioInputTest() {}
+
+ // Convenience method which ensures that we are not running on the build
+ // bots and that at least one valid input device can be found.
+ bool CanRunAudioTests() {
+ bool has_input = audio_manager_->HasAudioInputDevices();
+ if (!has_input)
+ LOG(WARNING) << "No input devices detected";
+ return has_input;
+ }
+
+ // Convenience method which creates a default AudioInputStream object using
+ // a 10ms frame size and a sample rate which is set to the hardware sample
+ // rate.
+ AudioInputStream* CreateDefaultAudioInputStream() {
+ int fs = static_cast<int>(AUAudioInputStream::HardwareSampleRate());
+ int samples_per_packet = fs / 100;
+ AudioInputStream* ais = audio_manager_->MakeAudioInputStream(
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ CHANNEL_LAYOUT_STEREO, fs, 16, samples_per_packet),
+ AudioManagerBase::kDefaultDeviceId);
+ EXPECT_TRUE(ais);
+ return ais;
+ }
+
+ // Convenience method which creates an AudioInputStream object with a
+ // specified channel layout.
+ AudioInputStream* CreateAudioInputStream(ChannelLayout channel_layout) {
+ int fs = static_cast<int>(AUAudioInputStream::HardwareSampleRate());
+ int samples_per_packet = fs / 100;
+ AudioInputStream* ais = audio_manager_->MakeAudioInputStream(
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ channel_layout, fs, 16, samples_per_packet),
+ AudioManagerBase::kDefaultDeviceId);
+ EXPECT_TRUE(ais);
+ return ais;
+ }
+
+ scoped_ptr<AudioManager> audio_manager_;
+};
+
+// Test Create(), Close().
+TEST_F(MacAudioInputTest, AUAudioInputStreamCreateAndClose) {
+ if (!CanRunAudioTests())
+ return;
+ AudioInputStream* ais = CreateDefaultAudioInputStream();
+ ais->Close();
+}
+
+// Test Open(), Close().
+TEST_F(MacAudioInputTest, AUAudioInputStreamOpenAndClose) {
+ if (!CanRunAudioTests())
+ return;
+ AudioInputStream* ais = CreateDefaultAudioInputStream();
+ EXPECT_TRUE(ais->Open());
+ ais->Close();
+}
+
+// Test Open(), Start(), Close().
+TEST_F(MacAudioInputTest, AUAudioInputStreamOpenStartAndClose) {
+ if (!CanRunAudioTests())
+ return;
+ AudioInputStream* ais = CreateDefaultAudioInputStream();
+ EXPECT_TRUE(ais->Open());
+ MockAudioInputCallback sink;
+ ais->Start(&sink);
+ EXPECT_CALL(sink, OnClose(ais))
+ .Times(1);
+ ais->Close();
+}
+
+// Test Open(), Start(), Stop(), Close().
+TEST_F(MacAudioInputTest, AUAudioInputStreamOpenStartStopAndClose) {
+ if (!CanRunAudioTests())
+ return;
+ AudioInputStream* ais = CreateDefaultAudioInputStream();
+ EXPECT_TRUE(ais->Open());
+ MockAudioInputCallback sink;
+ ais->Start(&sink);
+ ais->Stop();
+ EXPECT_CALL(sink, OnClose(ais))
+ .Times(1);
+ ais->Close();
+}
+
+// Test some additional calling sequences.
+TEST_F(MacAudioInputTest, AUAudioInputStreamMiscCallingSequences) {
+ if (!CanRunAudioTests())
+ return;
+ AudioInputStream* ais = CreateDefaultAudioInputStream();
+ AUAudioInputStream* auais = static_cast<AUAudioInputStream*>(ais);
+
+ // Open(), Open() should fail the second time.
+ EXPECT_TRUE(ais->Open());
+ EXPECT_FALSE(ais->Open());
+
+ MockAudioInputCallback sink;
+
+ // Start(), Start() is a valid calling sequence (second call does nothing).
+ ais->Start(&sink);
+ EXPECT_TRUE(auais->started());
+ ais->Start(&sink);
+ EXPECT_TRUE(auais->started());
+
+ // Stop(), Stop() is a valid calling sequence (second call does nothing).
+ ais->Stop();
+ EXPECT_FALSE(auais->started());
+ ais->Stop();
+ EXPECT_FALSE(auais->started());
+
+ EXPECT_CALL(sink, OnClose(ais))
+ .Times(1);
+ ais->Close();
+}
+
+// Verify that recording starts and stops correctly in mono using mocked sink.
+TEST_F(MacAudioInputTest, AUAudioInputStreamVerifyMonoRecording) {
+ if (!CanRunAudioTests())
+ return;
+
+ int count = 0;
+ base::MessageLoopForUI loop;
+
+ // Create an audio input stream which records in mono.
+ AudioInputStream* ais = CreateAudioInputStream(CHANNEL_LAYOUT_MONO);
+ EXPECT_TRUE(ais->Open());
+
+ int fs = static_cast<int>(AUAudioInputStream::HardwareSampleRate());
+ int samples_per_packet = fs / 100;
+ int bits_per_sample = 16;
+ uint32 bytes_per_packet = samples_per_packet * (bits_per_sample / 8);
+
+ MockAudioInputCallback sink;
+
+ // We use 10ms packets and will run the test until ten packets are received.
+ // All should contain valid packets of the same size and a valid delay
+ // estimate.
+ EXPECT_CALL(sink, OnData(ais, NotNull(), bytes_per_packet, _, _))
+ .Times(AtLeast(10))
+ .WillRepeatedly(CheckCountAndPostQuitTask(&count, 10, &loop));
+ ais->Start(&sink);
+ loop.Run();
+ ais->Stop();
+
+ // Verify that the sink receieves OnClose() call when calling Close().
+ EXPECT_CALL(sink, OnClose(ais))
+ .Times(1);
+ ais->Close();
+}
+
+// Verify that recording starts and stops correctly in mono using mocked sink.
+TEST_F(MacAudioInputTest, AUAudioInputStreamVerifyStereoRecording) {
+ if (!CanRunAudioTests())
+ return;
+
+ int count = 0;
+ base::MessageLoopForUI loop;
+
+ // Create an audio input stream which records in stereo.
+ AudioInputStream* ais = CreateAudioInputStream(CHANNEL_LAYOUT_STEREO);
+ EXPECT_TRUE(ais->Open());
+
+ int fs = static_cast<int>(AUAudioInputStream::HardwareSampleRate());
+ int samples_per_packet = fs / 100;
+ int bits_per_sample = 16;
+ uint32 bytes_per_packet = 2 * samples_per_packet * (bits_per_sample / 8);
+
+ MockAudioInputCallback sink;
+
+ // We use 10ms packets and will run the test until ten packets are received.
+ // All should contain valid packets of the same size and a valid delay
+ // estimate.
+ // TODO(henrika): http://crbug.com/154352 forced us to run the capture side
+ // using a native buffer size of 128 audio frames and combine it with a FIFO
+ // to match the requested size by the client. This change might also have
+ // modified the delay estimates since the existing Ge(bytes_per_packet) for
+ // parameter #4 does no longer pass. I am removing this restriction here to
+ // ensure that we can land the patch but will revisit this test again when
+ // more analysis of the delay estimates are done.
+ EXPECT_CALL(sink, OnData(ais, NotNull(), bytes_per_packet, _, _))
+ .Times(AtLeast(10))
+ .WillRepeatedly(CheckCountAndPostQuitTask(&count, 10, &loop));
+ ais->Start(&sink);
+ loop.Run();
+ ais->Stop();
+
+ // Verify that the sink receieves OnClose() call when calling Close().
+ EXPECT_CALL(sink, OnClose(ais))
+ .Times(1);
+ ais->Close();
+}
+
+// This test is intended for manual tests and should only be enabled
+// when it is required to store the captured data on a local file.
+// By default, GTest will print out YOU HAVE 1 DISABLED TEST.
+// To include disabled tests in test execution, just invoke the test program
+// with --gtest_also_run_disabled_tests or set the GTEST_ALSO_RUN_DISABLED_TESTS
+// environment variable to a value greater than 0.
+TEST_F(MacAudioInputTest, DISABLED_AUAudioInputStreamRecordToFile) {
+ if (!CanRunAudioTests())
+ return;
+ const char* file_name = "out_stereo_10sec.pcm";
+
+ int fs = static_cast<int>(AUAudioInputStream::HardwareSampleRate());
+ AudioInputStream* ais = CreateDefaultAudioInputStream();
+ EXPECT_TRUE(ais->Open());
+
+ fprintf(stderr, " File name : %s\n", file_name);
+ fprintf(stderr, " Sample rate: %d\n", fs);
+ WriteToFileAudioSink file_sink(file_name);
+ fprintf(stderr, " >> Speak into the mic while recording...\n");
+ ais->Start(&file_sink);
+ base::PlatformThread::Sleep(TestTimeouts::action_timeout());
+ ais->Stop();
+ fprintf(stderr, " >> Recording has stopped.\n");
+ ais->Close();
+}
+
+} // namespace media
diff --git a/chromium/media/audio/mac/audio_low_latency_output_mac.cc b/chromium/media/audio/mac/audio_low_latency_output_mac.cc
new file mode 100644
index 00000000000..afa480aefb9
--- /dev/null
+++ b/chromium/media/audio/mac/audio_low_latency_output_mac.cc
@@ -0,0 +1,416 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/mac/audio_low_latency_output_mac.h"
+
+#include <CoreServices/CoreServices.h>
+
+#include "base/basictypes.h"
+#include "base/command_line.h"
+#include "base/logging.h"
+#include "base/mac/mac_logging.h"
+#include "media/audio/mac/audio_manager_mac.h"
+#include "media/base/media_switches.h"
+
+namespace media {
+
+static std::ostream& operator<<(std::ostream& os,
+ const AudioStreamBasicDescription& format) {
+ os << "sample rate : " << format.mSampleRate << std::endl
+ << "format ID : " << format.mFormatID << std::endl
+ << "format flags : " << format.mFormatFlags << std::endl
+ << "bytes per packet : " << format.mBytesPerPacket << std::endl
+ << "frames per packet : " << format.mFramesPerPacket << std::endl
+ << "bytes per frame : " << format.mBytesPerFrame << std::endl
+ << "channels per frame: " << format.mChannelsPerFrame << std::endl
+ << "bits per channel : " << format.mBitsPerChannel;
+ return os;
+}
+
+static AudioObjectPropertyAddress kDefaultOutputDeviceAddress = {
+ kAudioHardwarePropertyDefaultOutputDevice,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+};
+
+// Overview of operation:
+// 1) An object of AUAudioOutputStream is created by the AudioManager
+// factory: audio_man->MakeAudioStream().
+// 2) Next some thread will call Open(), at that point the underlying
+// default output Audio Unit is created and configured.
+// 3) Then some thread will call Start(source).
+// Then the Audio Unit is started which creates its own thread which
+// periodically will call the source for more data as buffers are being
+// consumed.
+// 4) At some point some thread will call Stop(), which we handle by directly
+// stopping the default output Audio Unit.
+// 6) The same thread that called stop will call Close() where we cleanup
+// and notify the audio manager, which likely will destroy this object.
+
+AUAudioOutputStream::AUAudioOutputStream(
+ AudioManagerMac* manager, const AudioParameters& params)
+ : manager_(manager),
+ source_(NULL),
+ output_unit_(0),
+ output_device_id_(kAudioObjectUnknown),
+ volume_(1),
+ hardware_latency_frames_(0),
+ stopped_(false),
+ audio_bus_(AudioBus::Create(params)) {
+ // We must have a manager.
+ DCHECK(manager_);
+
+ // A frame is one sample across all channels. In interleaved audio the per
+ // frame fields identify the set of n |channels|. In uncompressed audio, a
+ // packet is always one frame.
+ format_.mSampleRate = params.sample_rate();
+ format_.mFormatID = kAudioFormatLinearPCM;
+ format_.mFormatFlags = kLinearPCMFormatFlagIsPacked |
+ kLinearPCMFormatFlagIsSignedInteger;
+ format_.mBitsPerChannel = params.bits_per_sample();
+ format_.mChannelsPerFrame = params.channels();
+ format_.mFramesPerPacket = 1;
+ format_.mBytesPerPacket = (format_.mBitsPerChannel * params.channels()) / 8;
+ format_.mBytesPerFrame = format_.mBytesPerPacket;
+ format_.mReserved = 0;
+
+ DVLOG(1) << "Desired ouput format: " << format_;
+
+ // Calculate the number of sample frames per callback.
+ number_of_frames_ = params.frames_per_buffer();
+ DVLOG(1) << "Number of frames per callback: " << number_of_frames_;
+}
+
+AUAudioOutputStream::~AUAudioOutputStream() {
+}
+
+bool AUAudioOutputStream::Open() {
+ // Obtain the current input device selected by the user.
+ UInt32 size = sizeof(output_device_id_);
+ OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject,
+ &kDefaultOutputDeviceAddress,
+ 0,
+ 0,
+ &size,
+ &output_device_id_);
+ if (result != noErr || output_device_id_ == kAudioObjectUnknown) {
+ OSSTATUS_DLOG(ERROR, result)
+ << "Could not get default audio output device.";
+ return false;
+ }
+
+ // Open and initialize the DefaultOutputUnit.
+ AudioComponent comp;
+ AudioComponentDescription desc;
+
+ desc.componentType = kAudioUnitType_Output;
+ desc.componentSubType = kAudioUnitSubType_DefaultOutput;
+ desc.componentManufacturer = kAudioUnitManufacturer_Apple;
+ desc.componentFlags = 0;
+ desc.componentFlagsMask = 0;
+ comp = AudioComponentFindNext(0, &desc);
+ if (!comp)
+ return false;
+
+ result = AudioComponentInstanceNew(comp, &output_unit_);
+ if (result != noErr) {
+ OSSTATUS_DLOG(ERROR, result) << "AudioComponentInstanceNew() failed.";
+ return false;
+ }
+
+ result = AudioUnitInitialize(output_unit_);
+ if (result != noErr) {
+ OSSTATUS_DLOG(ERROR, result) << "AudioUnitInitialize() failed.";
+ return false;
+ }
+
+ hardware_latency_frames_ = GetHardwareLatency();
+
+ return Configure();
+}
+
+bool AUAudioOutputStream::Configure() {
+ // Set the render callback.
+ AURenderCallbackStruct input;
+ input.inputProc = InputProc;
+ input.inputProcRefCon = this;
+ OSStatus result = AudioUnitSetProperty(
+ output_unit_,
+ kAudioUnitProperty_SetRenderCallback,
+ kAudioUnitScope_Global,
+ 0,
+ &input,
+ sizeof(input));
+ if (result != noErr) {
+ OSSTATUS_DLOG(ERROR, result)
+ << "AudioUnitSetProperty(kAudioUnitProperty_SetRenderCallback) failed.";
+ return false;
+ }
+
+ // Set the stream format.
+ result = AudioUnitSetProperty(
+ output_unit_,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Input,
+ 0,
+ &format_,
+ sizeof(format_));
+ if (result != noErr) {
+ OSSTATUS_DLOG(ERROR, result)
+ << "AudioUnitSetProperty(kAudioUnitProperty_StreamFormat) failed.";
+ return false;
+ }
+
+ // Set the buffer frame size.
+ // WARNING: Setting this value changes the frame size for all audio units in
+ // the current process. It's imperative that the input and output frame sizes
+ // be the same as the frames_per_buffer() returned by
+ // GetDefaultOutputStreamParameters.
+ // See http://crbug.com/154352 for details.
+ const AudioParameters hw_params =
+ manager_->GetDefaultOutputStreamParameters();
+ if (number_of_frames_ != static_cast<size_t>(hw_params.frames_per_buffer())) {
+ DLOG(ERROR) << "Audio buffer size does not match hardware buffer size.";
+ return false;
+ }
+
+ UInt32 buffer_size = number_of_frames_;
+ result = AudioUnitSetProperty(
+ output_unit_,
+ kAudioDevicePropertyBufferFrameSize,
+ kAudioUnitScope_Output,
+ 0,
+ &buffer_size,
+ sizeof(buffer_size));
+ if (result != noErr) {
+ OSSTATUS_DLOG(ERROR, result)
+ << "AudioUnitSetProperty(kAudioDevicePropertyBufferFrameSize) failed.";
+ return false;
+ }
+
+ return true;
+}
+
+void AUAudioOutputStream::Close() {
+ if (output_unit_)
+ AudioComponentInstanceDispose(output_unit_);
+
+ // Inform the audio manager that we have been closed. This can cause our
+ // destruction.
+ manager_->ReleaseOutputStream(this);
+}
+
+void AUAudioOutputStream::Start(AudioSourceCallback* callback) {
+ DCHECK(callback);
+ if (!output_unit_) {
+ DLOG(ERROR) << "Open() has not been called successfully";
+ return;
+ }
+
+ stopped_ = false;
+ {
+ base::AutoLock auto_lock(source_lock_);
+ source_ = callback;
+ }
+
+ AudioOutputUnitStart(output_unit_);
+}
+
+void AUAudioOutputStream::Stop() {
+ if (stopped_)
+ return;
+
+ AudioOutputUnitStop(output_unit_);
+
+ base::AutoLock auto_lock(source_lock_);
+ source_ = NULL;
+ stopped_ = true;
+}
+
+void AUAudioOutputStream::SetVolume(double volume) {
+ if (!output_unit_)
+ return;
+ volume_ = static_cast<float>(volume);
+
+ // TODO(crogers): set volume property
+}
+
+void AUAudioOutputStream::GetVolume(double* volume) {
+ if (!output_unit_)
+ return;
+ *volume = volume_;
+}
+
+// Pulls on our provider to get rendered audio stream.
+// Note to future hackers of this function: Do not add locks here because this
+// is running on a real-time thread (for low-latency).
+OSStatus AUAudioOutputStream::Render(UInt32 number_of_frames,
+ AudioBufferList* io_data,
+ const AudioTimeStamp* output_time_stamp) {
+ // Update the playout latency.
+ double playout_latency_frames = GetPlayoutLatency(output_time_stamp);
+
+ AudioBuffer& buffer = io_data->mBuffers[0];
+ uint8* audio_data = reinterpret_cast<uint8*>(buffer.mData);
+ uint32 hardware_pending_bytes = static_cast<uint32>
+ ((playout_latency_frames + 0.5) * format_.mBytesPerFrame);
+
+ // Unfortunately AUAudioInputStream and AUAudioOutputStream share the frame
+ // size set by kAudioDevicePropertyBufferFrameSize above on a per process
+ // basis. What this means is that the |number_of_frames| value may be larger
+ // or smaller than the value set during Configure(). In this case either
+ // audio input or audio output will be broken, so just output silence.
+ // TODO(crogers): Figure out what can trigger a change in |number_of_frames|.
+ // See http://crbug.com/154352 for details.
+ if (number_of_frames != static_cast<UInt32>(audio_bus_->frames())) {
+ memset(audio_data, 0, number_of_frames * format_.mBytesPerFrame);
+ return noErr;
+ }
+
+ int frames_filled = 0;
+ {
+ // Render() shouldn't be called except between AudioOutputUnitStart() and
+ // AudioOutputUnitStop() calls, but crash reports have shown otherwise:
+ // http://crbug.com/178765. We use |source_lock_| to prevent races and
+ // crashes in Render() when |source_| is cleared.
+ base::AutoLock auto_lock(source_lock_);
+ if (!source_) {
+ memset(audio_data, 0, number_of_frames * format_.mBytesPerFrame);
+ return noErr;
+ }
+
+ frames_filled = source_->OnMoreData(
+ audio_bus_.get(), AudioBuffersState(0, hardware_pending_bytes));
+ }
+
+ // Note: If this ever changes to output raw float the data must be clipped and
+ // sanitized since it may come from an untrusted source such as NaCl.
+ audio_bus_->Scale(volume_);
+ audio_bus_->ToInterleaved(
+ frames_filled, format_.mBitsPerChannel / 8, audio_data);
+
+ return noErr;
+}
+
+// DefaultOutputUnit callback
+OSStatus AUAudioOutputStream::InputProc(void* user_data,
+ AudioUnitRenderActionFlags*,
+ const AudioTimeStamp* output_time_stamp,
+ UInt32,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data) {
+ AUAudioOutputStream* audio_output =
+ static_cast<AUAudioOutputStream*>(user_data);
+ if (!audio_output)
+ return -1;
+
+ return audio_output->Render(number_of_frames, io_data, output_time_stamp);
+}
+
+int AUAudioOutputStream::HardwareSampleRate() {
+ // Determine the default output device's sample-rate.
+ AudioDeviceID device_id = kAudioObjectUnknown;
+ UInt32 info_size = sizeof(device_id);
+ OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject,
+ &kDefaultOutputDeviceAddress,
+ 0,
+ 0,
+ &info_size,
+ &device_id);
+ if (result != noErr || device_id == kAudioObjectUnknown) {
+ OSSTATUS_DLOG(WARNING, result)
+ << "Could not get default audio output device.";
+ return 0;
+ }
+
+ Float64 nominal_sample_rate;
+ info_size = sizeof(nominal_sample_rate);
+
+ AudioObjectPropertyAddress nominal_sample_rate_address = {
+ kAudioDevicePropertyNominalSampleRate,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+ };
+ result = AudioObjectGetPropertyData(device_id,
+ &nominal_sample_rate_address,
+ 0,
+ 0,
+ &info_size,
+ &nominal_sample_rate);
+ if (result != noErr) {
+ OSSTATUS_DLOG(WARNING, result)
+ << "Could not get default sample rate for device: " << device_id;
+ return 0;
+ }
+
+ return static_cast<int>(nominal_sample_rate);
+}
+
+double AUAudioOutputStream::GetHardwareLatency() {
+ if (!output_unit_ || output_device_id_ == kAudioObjectUnknown) {
+ DLOG(WARNING) << "Audio unit object is NULL or device ID is unknown";
+ return 0.0;
+ }
+
+ // Get audio unit latency.
+ Float64 audio_unit_latency_sec = 0.0;
+ UInt32 size = sizeof(audio_unit_latency_sec);
+ OSStatus result = AudioUnitGetProperty(output_unit_,
+ kAudioUnitProperty_Latency,
+ kAudioUnitScope_Global,
+ 0,
+ &audio_unit_latency_sec,
+ &size);
+ if (result != noErr) {
+ OSSTATUS_DLOG(WARNING, result) << "Could not get audio unit latency";
+ return 0.0;
+ }
+
+ // Get output audio device latency.
+ AudioObjectPropertyAddress property_address = {
+ kAudioDevicePropertyLatency,
+ kAudioDevicePropertyScopeOutput,
+ kAudioObjectPropertyElementMaster
+ };
+ UInt32 device_latency_frames = 0;
+ size = sizeof(device_latency_frames);
+ result = AudioObjectGetPropertyData(output_device_id_,
+ &property_address,
+ 0,
+ NULL,
+ &size,
+ &device_latency_frames);
+ if (result != noErr) {
+ OSSTATUS_DLOG(WARNING, result) << "Could not get audio unit latency";
+ return 0.0;
+ }
+
+ return static_cast<double>((audio_unit_latency_sec *
+ format_.mSampleRate) + device_latency_frames);
+}
+
+double AUAudioOutputStream::GetPlayoutLatency(
+ const AudioTimeStamp* output_time_stamp) {
+ // Ensure mHostTime is valid.
+ if ((output_time_stamp->mFlags & kAudioTimeStampHostTimeValid) == 0)
+ return 0;
+
+ // Get the delay between the moment getting the callback and the scheduled
+ // time stamp that tells when the data is going to be played out.
+ UInt64 output_time_ns = AudioConvertHostTimeToNanos(
+ output_time_stamp->mHostTime);
+ UInt64 now_ns = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime());
+
+ // Prevent overflow leading to huge delay information; occurs regularly on
+ // the bots, probably less so in the wild.
+ if (now_ns > output_time_ns)
+ return 0;
+
+ double delay_frames = static_cast<double>
+ (1e-9 * (output_time_ns - now_ns) * format_.mSampleRate);
+
+ return (delay_frames + hardware_latency_frames_);
+}
+
+} // namespace media
diff --git a/chromium/media/audio/mac/audio_low_latency_output_mac.h b/chromium/media/audio/mac/audio_low_latency_output_mac.h
new file mode 100644
index 00000000000..27f3b3a837a
--- /dev/null
+++ b/chromium/media/audio/mac/audio_low_latency_output_mac.h
@@ -0,0 +1,115 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Implementation notes:
+//
+// - It is recommended to first acquire the native sample rate of the default
+// output device and then use the same rate when creating this object.
+// Use AUAudioOutputStream::HardwareSampleRate() to retrieve the sample rate.
+// - Calling Close() also leads to self destruction.
+// - The latency consists of two parts:
+// 1) Hardware latency, which includes Audio Unit latency, audio device
+// latency;
+// 2) The delay between the moment getting the callback and the scheduled time
+// stamp that tells when the data is going to be played out.
+//
+#ifndef MEDIA_AUDIO_MAC_AUDIO_LOW_LATENCY_OUTPUT_MAC_H_
+#define MEDIA_AUDIO_MAC_AUDIO_LOW_LATENCY_OUTPUT_MAC_H_
+
+#include <AudioUnit/AudioUnit.h>
+#include <CoreAudio/CoreAudio.h>
+
+#include "base/compiler_specific.h"
+#include "base/synchronization/lock.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+
+namespace media {
+
+class AudioManagerMac;
+
+// Implementation of AudioOuputStream for Mac OS X using the
+// default output Audio Unit present in OS 10.4 and later.
+// The default output Audio Unit is for low-latency audio I/O.
+class AUAudioOutputStream : public AudioOutputStream {
+ public:
+ // The ctor takes all the usual parameters, plus |manager| which is the
+ // the audio manager who is creating this object.
+ AUAudioOutputStream(AudioManagerMac* manager,
+ const AudioParameters& params);
+ // The dtor is typically called by the AudioManager only and it is usually
+ // triggered by calling AudioOutputStream::Close().
+ virtual ~AUAudioOutputStream();
+
+ // Implementation of AudioOutputStream.
+ virtual bool Open() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual void Start(AudioSourceCallback* callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual void GetVolume(double* volume) OVERRIDE;
+
+ static int HardwareSampleRate();
+
+ private:
+ // DefaultOutputUnit callback.
+ static OSStatus InputProc(void* user_data,
+ AudioUnitRenderActionFlags* flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data);
+
+ OSStatus Render(UInt32 number_of_frames, AudioBufferList* io_data,
+ const AudioTimeStamp* output_time_stamp);
+
+ // Sets up the stream format for the default output Audio Unit.
+ bool Configure();
+
+ // Gets the fixed playout device hardware latency and stores it. Returns 0
+ // if not available.
+ double GetHardwareLatency();
+
+ // Gets the current playout latency value.
+ double GetPlayoutLatency(const AudioTimeStamp* output_time_stamp);
+
+ // Our creator, the audio manager needs to be notified when we close.
+ AudioManagerMac* manager_;
+
+ size_t number_of_frames_;
+
+ // Pointer to the object that will provide the audio samples.
+ AudioSourceCallback* source_;
+
+ // Protects |source_|. Necessary since Render() calls seem to be in flight
+ // when |output_unit_| is supposedly stopped. See http://crbug.com/178765.
+ base::Lock source_lock_;
+
+ // Structure that holds the stream format details such as bitrate.
+ AudioStreamBasicDescription format_;
+
+ // The default output Audio Unit which talks to the audio hardware.
+ AudioUnit output_unit_;
+
+ // The UID refers to the current output audio device.
+ AudioDeviceID output_device_id_;
+
+ // Volume level from 0 to 1.
+ float volume_;
+
+ // Fixed playout hardware latency in frames.
+ double hardware_latency_frames_;
+
+ // The flag used to stop the streaming.
+ bool stopped_;
+
+ // Container for retrieving data from AudioSourceCallback::OnMoreData().
+ scoped_ptr<AudioBus> audio_bus_;
+
+ DISALLOW_COPY_AND_ASSIGN(AUAudioOutputStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_MAC_AUDIO_LOW_LATENCY_OUTPUT_MAC_H_
diff --git a/chromium/media/audio/mac/audio_manager_mac.cc b/chromium/media/audio/mac/audio_manager_mac.cc
new file mode 100644
index 00000000000..c0c18ee2cce
--- /dev/null
+++ b/chromium/media/audio/mac/audio_manager_mac.cc
@@ -0,0 +1,610 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/mac/audio_manager_mac.h"
+
+#include <CoreAudio/AudioHardware.h>
+#include <string>
+
+#include "base/bind.h"
+#include "base/command_line.h"
+#include "base/mac/mac_logging.h"
+#include "base/mac/scoped_cftyperef.h"
+#include "base/strings/sys_string_conversions.h"
+#include "media/audio/audio_parameters.h"
+#include "media/audio/audio_util.h"
+#include "media/audio/mac/audio_auhal_mac.h"
+#include "media/audio/mac/audio_input_mac.h"
+#include "media/audio/mac/audio_low_latency_input_mac.h"
+#include "media/audio/mac/audio_low_latency_output_mac.h"
+#include "media/audio/mac/audio_synchronized_mac.h"
+#include "media/audio/mac/audio_unified_mac.h"
+#include "media/base/bind_to_loop.h"
+#include "media/base/channel_layout.h"
+#include "media/base/limits.h"
+#include "media/base/media_switches.h"
+
+namespace media {
+
+// Maximum number of output streams that can be open simultaneously.
+static const int kMaxOutputStreams = 50;
+
+// Default buffer size in samples for low-latency input and output streams.
+static const int kDefaultLowLatencyBufferSize = 128;
+
+// Default sample-rate on most Apple hardware.
+static const int kFallbackSampleRate = 44100;
+
+static int ChooseBufferSize(int output_sample_rate) {
+ int buffer_size = kDefaultLowLatencyBufferSize;
+ const int user_buffer_size = GetUserBufferSize();
+ if (user_buffer_size) {
+ buffer_size = user_buffer_size;
+ } else if (output_sample_rate > 48000) {
+ // The default buffer size is too small for higher sample rates and may lead
+ // to glitching. Adjust upwards by multiples of the default size.
+ if (output_sample_rate <= 96000)
+ buffer_size = 2 * kDefaultLowLatencyBufferSize;
+ else if (output_sample_rate <= 192000)
+ buffer_size = 4 * kDefaultLowLatencyBufferSize;
+ }
+
+ return buffer_size;
+}
+
+static bool HasAudioHardware(AudioObjectPropertySelector selector) {
+ AudioDeviceID output_device_id = kAudioObjectUnknown;
+ const AudioObjectPropertyAddress property_address = {
+ selector,
+ kAudioObjectPropertyScopeGlobal, // mScope
+ kAudioObjectPropertyElementMaster // mElement
+ };
+ UInt32 output_device_id_size = static_cast<UInt32>(sizeof(output_device_id));
+ OSStatus err = AudioObjectGetPropertyData(kAudioObjectSystemObject,
+ &property_address,
+ 0, // inQualifierDataSize
+ NULL, // inQualifierData
+ &output_device_id_size,
+ &output_device_id);
+ return err == kAudioHardwareNoError &&
+ output_device_id != kAudioObjectUnknown;
+}
+
+// Returns true if the default input device is the same as
+// the default output device.
+bool AudioManagerMac::HasUnifiedDefaultIO() {
+ AudioDeviceID input_id, output_id;
+ if (!GetDefaultInputDevice(&input_id) || !GetDefaultOutputDevice(&output_id))
+ return false;
+
+ return input_id == output_id;
+}
+
+static void GetAudioDeviceInfo(bool is_input,
+ media::AudioDeviceNames* device_names) {
+ DCHECK(device_names);
+ device_names->clear();
+
+ // Query the number of total devices.
+ AudioObjectPropertyAddress property_address = {
+ kAudioHardwarePropertyDevices,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+ };
+ UInt32 size = 0;
+ OSStatus result = AudioObjectGetPropertyDataSize(kAudioObjectSystemObject,
+ &property_address,
+ 0,
+ NULL,
+ &size);
+ if (result || !size)
+ return;
+
+ int device_count = size / sizeof(AudioDeviceID);
+
+ // Get the array of device ids for all the devices, which includes both
+ // input devices and output devices.
+ scoped_ptr_malloc<AudioDeviceID>
+ devices(reinterpret_cast<AudioDeviceID*>(malloc(size)));
+ AudioDeviceID* device_ids = devices.get();
+ result = AudioObjectGetPropertyData(kAudioObjectSystemObject,
+ &property_address,
+ 0,
+ NULL,
+ &size,
+ device_ids);
+ if (result)
+ return;
+
+ // Iterate over all available devices to gather information.
+ for (int i = 0; i < device_count; ++i) {
+ // Get the number of input or output channels of the device.
+ property_address.mScope = is_input ?
+ kAudioDevicePropertyScopeInput : kAudioDevicePropertyScopeOutput;
+ property_address.mSelector = kAudioDevicePropertyStreams;
+ size = 0;
+ result = AudioObjectGetPropertyDataSize(device_ids[i],
+ &property_address,
+ 0,
+ NULL,
+ &size);
+ if (result || !size)
+ continue;
+
+ // Get device UID.
+ CFStringRef uid = NULL;
+ size = sizeof(uid);
+ property_address.mSelector = kAudioDevicePropertyDeviceUID;
+ property_address.mScope = kAudioObjectPropertyScopeGlobal;
+ result = AudioObjectGetPropertyData(device_ids[i],
+ &property_address,
+ 0,
+ NULL,
+ &size,
+ &uid);
+ if (result)
+ continue;
+
+ // Get device name.
+ CFStringRef name = NULL;
+ property_address.mSelector = kAudioObjectPropertyName;
+ property_address.mScope = kAudioObjectPropertyScopeGlobal;
+ result = AudioObjectGetPropertyData(device_ids[i],
+ &property_address,
+ 0,
+ NULL,
+ &size,
+ &name);
+ if (result) {
+ if (uid)
+ CFRelease(uid);
+ continue;
+ }
+
+ // Store the device name and UID.
+ media::AudioDeviceName device_name;
+ device_name.device_name = base::SysCFStringRefToUTF8(name);
+ device_name.unique_id = base::SysCFStringRefToUTF8(uid);
+ device_names->push_back(device_name);
+
+ // We are responsible for releasing the returned CFObject. See the
+ // comment in the AudioHardware.h for constant
+ // kAudioDevicePropertyDeviceUID.
+ if (uid)
+ CFRelease(uid);
+ if (name)
+ CFRelease(name);
+ }
+}
+
+static AudioDeviceID GetAudioDeviceIdByUId(bool is_input,
+ const std::string& device_id) {
+ AudioObjectPropertyAddress property_address = {
+ kAudioHardwarePropertyDevices,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+ };
+ AudioDeviceID audio_device_id = kAudioObjectUnknown;
+ UInt32 device_size = sizeof(audio_device_id);
+ OSStatus result = -1;
+
+ if (device_id == AudioManagerBase::kDefaultDeviceId) {
+ // Default Device.
+ property_address.mSelector = is_input ?
+ kAudioHardwarePropertyDefaultInputDevice :
+ kAudioHardwarePropertyDefaultOutputDevice;
+
+ result = AudioObjectGetPropertyData(kAudioObjectSystemObject,
+ &property_address,
+ 0,
+ 0,
+ &device_size,
+ &audio_device_id);
+ } else {
+ // Non-default device.
+ base::ScopedCFTypeRef<CFStringRef> uid(
+ base::SysUTF8ToCFStringRef(device_id));
+ AudioValueTranslation value;
+ value.mInputData = &uid;
+ value.mInputDataSize = sizeof(CFStringRef);
+ value.mOutputData = &audio_device_id;
+ value.mOutputDataSize = device_size;
+ UInt32 translation_size = sizeof(AudioValueTranslation);
+
+ property_address.mSelector = kAudioHardwarePropertyDeviceForUID;
+ result = AudioObjectGetPropertyData(kAudioObjectSystemObject,
+ &property_address,
+ 0,
+ 0,
+ &translation_size,
+ &value);
+ }
+
+ if (result) {
+ OSSTATUS_DLOG(WARNING, result) << "Unable to query device " << device_id
+ << " for AudioDeviceID";
+ }
+
+ return audio_device_id;
+}
+
+AudioManagerMac::AudioManagerMac()
+ : current_sample_rate_(0) {
+ current_output_device_ = kAudioDeviceUnknown;
+
+ SetMaxOutputStreamsAllowed(kMaxOutputStreams);
+
+ // Task must be posted last to avoid races from handing out "this" to the
+ // audio thread. Always PostTask even if we're on the right thread since
+ // AudioManager creation is on the startup path and this may be slow.
+ GetMessageLoop()->PostTask(FROM_HERE, base::Bind(
+ &AudioManagerMac::CreateDeviceListener, base::Unretained(this)));
+}
+
+AudioManagerMac::~AudioManagerMac() {
+ if (GetMessageLoop()->BelongsToCurrentThread()) {
+ DestroyDeviceListener();
+ } else {
+ // It's safe to post a task here since Shutdown() will wait for all tasks to
+ // complete before returning.
+ GetMessageLoop()->PostTask(FROM_HERE, base::Bind(
+ &AudioManagerMac::DestroyDeviceListener, base::Unretained(this)));
+ }
+
+ Shutdown();
+}
+
+bool AudioManagerMac::HasAudioOutputDevices() {
+ return HasAudioHardware(kAudioHardwarePropertyDefaultOutputDevice);
+}
+
+bool AudioManagerMac::HasAudioInputDevices() {
+ return HasAudioHardware(kAudioHardwarePropertyDefaultInputDevice);
+}
+
+// TODO(crogers): There are several places on the OSX specific code which
+// could benefit from these helper functions.
+bool AudioManagerMac::GetDefaultInputDevice(
+ AudioDeviceID* device) {
+ return GetDefaultDevice(device, true);
+}
+
+bool AudioManagerMac::GetDefaultOutputDevice(
+ AudioDeviceID* device) {
+ return GetDefaultDevice(device, false);
+}
+
+bool AudioManagerMac::GetDefaultDevice(
+ AudioDeviceID* device, bool input) {
+ CHECK(device);
+
+ // Obtain the current output device selected by the user.
+ AudioObjectPropertyAddress pa;
+ pa.mSelector = input ? kAudioHardwarePropertyDefaultInputDevice :
+ kAudioHardwarePropertyDefaultOutputDevice;
+ pa.mScope = kAudioObjectPropertyScopeGlobal;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+
+ UInt32 size = sizeof(*device);
+
+ OSStatus result = AudioObjectGetPropertyData(
+ kAudioObjectSystemObject,
+ &pa,
+ 0,
+ 0,
+ &size,
+ device);
+
+ if ((result != kAudioHardwareNoError) || (*device == kAudioDeviceUnknown)) {
+ DLOG(ERROR) << "Error getting default AudioDevice.";
+ return false;
+ }
+
+ return true;
+}
+
+bool AudioManagerMac::GetDefaultOutputChannels(
+ int* channels) {
+ AudioDeviceID device;
+ if (!GetDefaultOutputDevice(&device))
+ return false;
+
+ return GetDeviceChannels(device,
+ kAudioDevicePropertyScopeOutput,
+ channels);
+}
+
+bool AudioManagerMac::GetDeviceChannels(
+ AudioDeviceID device,
+ AudioObjectPropertyScope scope,
+ int* channels) {
+ CHECK(channels);
+
+ // Get stream configuration.
+ AudioObjectPropertyAddress pa;
+ pa.mSelector = kAudioDevicePropertyStreamConfiguration;
+ pa.mScope = scope;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+
+ UInt32 size;
+ OSStatus result = AudioObjectGetPropertyDataSize(device, &pa, 0, 0, &size);
+ if (result != noErr || !size)
+ return false;
+
+ // Allocate storage.
+ scoped_ptr<uint8[]> list_storage(new uint8[size]);
+ AudioBufferList& buffer_list =
+ *reinterpret_cast<AudioBufferList*>(list_storage.get());
+
+ result = AudioObjectGetPropertyData(
+ device,
+ &pa,
+ 0,
+ 0,
+ &size,
+ &buffer_list);
+ if (result != noErr)
+ return false;
+
+ // Determine number of input channels.
+ int channels_per_frame = buffer_list.mNumberBuffers > 0 ?
+ buffer_list.mBuffers[0].mNumberChannels : 0;
+ if (channels_per_frame == 1 && buffer_list.mNumberBuffers > 1) {
+ // Non-interleaved.
+ *channels = buffer_list.mNumberBuffers;
+ } else {
+ // Interleaved.
+ *channels = channels_per_frame;
+ }
+
+ return true;
+}
+
+int AudioManagerMac::HardwareSampleRateForDevice(AudioDeviceID device_id) {
+ Float64 nominal_sample_rate;
+ UInt32 info_size = sizeof(nominal_sample_rate);
+
+ static const AudioObjectPropertyAddress kNominalSampleRateAddress = {
+ kAudioDevicePropertyNominalSampleRate,
+ kAudioObjectPropertyScopeGlobal,
+ kAudioObjectPropertyElementMaster
+ };
+ OSStatus result = AudioObjectGetPropertyData(
+ device_id,
+ &kNominalSampleRateAddress,
+ 0,
+ 0,
+ &info_size,
+ &nominal_sample_rate);
+ if (result != noErr) {
+ OSSTATUS_DLOG(WARNING, result)
+ << "Could not get default sample rate for device: " << device_id;
+ return 0;
+ }
+
+ return static_cast<int>(nominal_sample_rate);
+}
+
+int AudioManagerMac::HardwareSampleRate() {
+ // Determine the default output device's sample-rate.
+ AudioDeviceID device_id = kAudioObjectUnknown;
+ if (!GetDefaultOutputDevice(&device_id))
+ return kFallbackSampleRate;
+
+ return HardwareSampleRateForDevice(device_id);
+}
+
+void AudioManagerMac::GetAudioInputDeviceNames(
+ media::AudioDeviceNames* device_names) {
+ GetAudioDeviceInfo(true, device_names);
+ if (!device_names->empty()) {
+ // Prepend the default device to the list since we always want it to be
+ // on the top of the list for all platforms. There is no duplicate
+ // counting here since the default device has been abstracted out before.
+ media::AudioDeviceName name;
+ name.device_name = AudioManagerBase::kDefaultDeviceName;
+ name.unique_id = AudioManagerBase::kDefaultDeviceId;
+ device_names->push_front(name);
+ }
+}
+
+AudioParameters AudioManagerMac::GetInputStreamParameters(
+ const std::string& device_id) {
+ // Due to the sharing of the input and output buffer sizes, we need to choose
+ // the input buffer size based on the output sample rate. See
+ // http://crbug.com/154352.
+ const int buffer_size = ChooseBufferSize(
+ AUAudioOutputStream::HardwareSampleRate());
+
+ AudioDeviceID device = GetAudioDeviceIdByUId(true, device_id);
+ if (device == kAudioObjectUnknown) {
+ DLOG(ERROR) << "Invalid device " << device_id;
+ return AudioParameters();
+ }
+
+ int channels = 0;
+ ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
+ if (GetDeviceChannels(device, kAudioDevicePropertyScopeInput, &channels) &&
+ channels <= 2) {
+ channel_layout = GuessChannelLayout(channels);
+ } else {
+ DLOG(ERROR) << "Failed to get the device channels, use stereo as default "
+ << "for device " << device_id;
+ }
+
+ int sample_rate = HardwareSampleRateForDevice(device);
+ if (!sample_rate)
+ sample_rate = kFallbackSampleRate;
+
+ // TODO(xians): query the native channel layout for the specific device.
+ return AudioParameters(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout,
+ sample_rate, 16, buffer_size);
+}
+
+AudioOutputStream* AudioManagerMac::MakeLinearOutputStream(
+ const AudioParameters& params) {
+ return MakeLowLatencyOutputStream(params, std::string());
+}
+
+AudioOutputStream* AudioManagerMac::MakeLowLatencyOutputStream(
+ const AudioParameters& params, const std::string& input_device_id) {
+ // Handle basic output with no input channels.
+ if (params.input_channels() == 0) {
+ AudioDeviceID device = kAudioObjectUnknown;
+ GetDefaultOutputDevice(&device);
+ return new AUHALStream(this, params, device);
+ }
+
+ // TODO(crogers): support more than stereo input.
+ if (params.input_channels() != 2) {
+ // WebAudio is currently hard-coded to 2 channels so we should not
+ // see this case.
+ NOTREACHED() << "Only stereo input is currently supported!";
+ return NULL;
+ }
+
+ AudioDeviceID device = kAudioObjectUnknown;
+ if (HasUnifiedDefaultIO()) {
+ // For I/O, the simplest case is when the default input and output
+ // devices are the same.
+ GetDefaultOutputDevice(&device);
+ LOG(INFO) << "UNIFIED: default input and output devices are identical";
+ } else {
+ // Some audio hardware is presented as separate input and output devices
+ // even though they are really the same physical hardware and
+ // share the same "clock domain" at the lowest levels of the driver.
+ // A common of example of this is the "built-in" audio hardware:
+ // "Built-in Line Input"
+ // "Built-in Output"
+ // We would like to use an "aggregate" device for these situations, since
+ // CoreAudio will make the most efficient use of the shared "clock domain"
+ // so we get the lowest latency and use fewer threads.
+ device = aggregate_device_manager_.GetDefaultAggregateDevice();
+ if (device != kAudioObjectUnknown)
+ LOG(INFO) << "Using AGGREGATE audio device";
+ }
+
+ if (device != kAudioObjectUnknown &&
+ input_device_id == AudioManagerBase::kDefaultDeviceId)
+ return new AUHALStream(this, params, device);
+
+ // Fallback to AudioSynchronizedStream which will handle completely
+ // different and arbitrary combinations of input and output devices
+ // even running at different sample-rates.
+ // kAudioDeviceUnknown translates to "use default" here.
+ // TODO(crogers): consider tracking UMA stats on AUHALStream
+ // versus AudioSynchronizedStream.
+ AudioDeviceID audio_device_id = GetAudioDeviceIdByUId(true, input_device_id);
+ if (audio_device_id == kAudioObjectUnknown)
+ return NULL;
+
+ return new AudioSynchronizedStream(this,
+ params,
+ audio_device_id,
+ kAudioDeviceUnknown);
+}
+
+AudioInputStream* AudioManagerMac::MakeLinearInputStream(
+ const AudioParameters& params, const std::string& device_id) {
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
+ return new PCMQueueInAudioInputStream(this, params);
+}
+
+AudioInputStream* AudioManagerMac::MakeLowLatencyInputStream(
+ const AudioParameters& params, const std::string& device_id) {
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
+ // Gets the AudioDeviceID that refers to the AudioOutputDevice with the device
+ // unique id. This AudioDeviceID is used to set the device for Audio Unit.
+ AudioDeviceID audio_device_id = GetAudioDeviceIdByUId(true, device_id);
+ AudioInputStream* stream = NULL;
+ if (audio_device_id != kAudioObjectUnknown)
+ stream = new AUAudioInputStream(this, params, audio_device_id);
+
+ return stream;
+}
+
+AudioParameters AudioManagerMac::GetPreferredOutputStreamParameters(
+ const AudioParameters& input_params) {
+ int hardware_channels = 2;
+ if (!GetDefaultOutputChannels(&hardware_channels)) {
+ // Fallback to stereo.
+ hardware_channels = 2;
+ }
+
+ ChannelLayout channel_layout = GuessChannelLayout(hardware_channels);
+
+ const int hardware_sample_rate = AUAudioOutputStream::HardwareSampleRate();
+ const int buffer_size = ChooseBufferSize(hardware_sample_rate);
+
+ int input_channels = 0;
+ if (input_params.IsValid()) {
+ input_channels = input_params.input_channels();
+
+ if (input_channels > 0) {
+ // TODO(crogers): given the limitations of the AudioOutputStream
+ // back-ends used with synchronized I/O, we hard-code to stereo.
+ // Specifically, this is a limitation of AudioSynchronizedStream which
+ // can be removed as part of the work to consolidate these back-ends.
+ channel_layout = CHANNEL_LAYOUT_STEREO;
+ }
+ }
+
+ AudioParameters params(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ channel_layout,
+ input_channels,
+ hardware_sample_rate,
+ 16,
+ buffer_size);
+
+ if (channel_layout == CHANNEL_LAYOUT_UNSUPPORTED)
+ params.SetDiscreteChannels(hardware_channels);
+
+ return params;
+}
+
+void AudioManagerMac::CreateDeviceListener() {
+ DCHECK(GetMessageLoop()->BelongsToCurrentThread());
+
+ // Get a baseline for the sample-rate and current device,
+ // so we can intelligently handle device notifications only when necessary.
+ current_sample_rate_ = HardwareSampleRate();
+ if (!GetDefaultOutputDevice(&current_output_device_))
+ current_output_device_ = kAudioDeviceUnknown;
+
+ output_device_listener_.reset(new AudioDeviceListenerMac(base::Bind(
+ &AudioManagerMac::HandleDeviceChanges, base::Unretained(this))));
+}
+
+void AudioManagerMac::DestroyDeviceListener() {
+ DCHECK(GetMessageLoop()->BelongsToCurrentThread());
+ output_device_listener_.reset();
+}
+
+void AudioManagerMac::HandleDeviceChanges() {
+ if (!GetMessageLoop()->BelongsToCurrentThread()) {
+ GetMessageLoop()->PostTask(FROM_HERE, base::Bind(
+ &AudioManagerMac::HandleDeviceChanges, base::Unretained(this)));
+ return;
+ }
+
+ int new_sample_rate = HardwareSampleRate();
+ AudioDeviceID new_output_device;
+ GetDefaultOutputDevice(&new_output_device);
+
+ if (current_sample_rate_ == new_sample_rate &&
+ current_output_device_ == new_output_device)
+ return;
+
+ current_sample_rate_ = new_sample_rate;
+ current_output_device_ = new_output_device;
+ NotifyAllOutputDeviceChangeListeners();
+}
+
+AudioManager* CreateAudioManager() {
+ return new AudioManagerMac();
+}
+
+} // namespace media
diff --git a/chromium/media/audio/mac/audio_manager_mac.h b/chromium/media/audio/mac/audio_manager_mac.h
new file mode 100644
index 00000000000..cd3cc2e94b5
--- /dev/null
+++ b/chromium/media/audio/mac/audio_manager_mac.h
@@ -0,0 +1,90 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_MAC_AUDIO_MANAGER_MAC_H_
+#define MEDIA_AUDIO_MAC_AUDIO_MANAGER_MAC_H_
+
+#include <CoreAudio/AudioHardware.h>
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "media/audio/audio_manager_base.h"
+#include "media/audio/mac/aggregate_device_manager.h"
+#include "media/audio/mac/audio_device_listener_mac.h"
+
+namespace media {
+
+// Mac OS X implementation of the AudioManager singleton. This class is internal
+// to the audio output and only internal users can call methods not exposed by
+// the AudioManager class.
+class MEDIA_EXPORT AudioManagerMac : public AudioManagerBase {
+ public:
+ AudioManagerMac();
+
+ // Implementation of AudioManager.
+ virtual bool HasAudioOutputDevices() OVERRIDE;
+ virtual bool HasAudioInputDevices() OVERRIDE;
+ virtual void GetAudioInputDeviceNames(media::AudioDeviceNames* device_names)
+ OVERRIDE;
+ virtual AudioParameters GetInputStreamParameters(
+ const std::string& device_id) OVERRIDE;
+
+ // Implementation of AudioManagerBase.
+ virtual AudioOutputStream* MakeLinearOutputStream(
+ const AudioParameters& params) OVERRIDE;
+ virtual AudioOutputStream* MakeLowLatencyOutputStream(
+ const AudioParameters& params,
+ const std::string& input_device_id) OVERRIDE;
+ virtual AudioInputStream* MakeLinearInputStream(
+ const AudioParameters& params, const std::string& device_id) OVERRIDE;
+ virtual AudioInputStream* MakeLowLatencyInputStream(
+ const AudioParameters& params, const std::string& device_id) OVERRIDE;
+
+ static bool GetDefaultInputDevice(AudioDeviceID* device);
+ static bool GetDefaultOutputDevice(AudioDeviceID* device);
+ static bool GetDefaultDevice(AudioDeviceID* device, bool input);
+
+ static bool GetDefaultOutputChannels(int* channels);
+
+ static bool GetDeviceChannels(AudioDeviceID device,
+ AudioObjectPropertyScope scope,
+ int* channels);
+
+ static int HardwareSampleRateForDevice(AudioDeviceID device_id);
+ static int HardwareSampleRate();
+
+ // Notify streams of a device change if the default output device or its
+ // sample rate has changed, otherwise does nothing.
+ void HandleDeviceChanges();
+
+ protected:
+ virtual ~AudioManagerMac();
+
+ virtual AudioParameters GetPreferredOutputStreamParameters(
+ const AudioParameters& input_params) OVERRIDE;
+
+ private:
+ bool HasUnifiedDefaultIO();
+
+ // Helper methods for constructing AudioDeviceListenerMac on the audio thread.
+ void CreateDeviceListener();
+ void DestroyDeviceListener();
+
+ scoped_ptr<AudioDeviceListenerMac> output_device_listener_;
+
+ // Track the output sample-rate and the default output device
+ // so we can intelligently handle device notifications only when necessary.
+ int current_sample_rate_;
+ AudioDeviceID current_output_device_;
+
+ AggregateDeviceManager aggregate_device_manager_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioManagerMac);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_MAC_AUDIO_MANAGER_MAC_H_
diff --git a/chromium/media/audio/mac/audio_synchronized_mac.cc b/chromium/media/audio/mac/audio_synchronized_mac.cc
new file mode 100644
index 00000000000..a2484ca67fe
--- /dev/null
+++ b/chromium/media/audio/mac/audio_synchronized_mac.cc
@@ -0,0 +1,977 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/mac/audio_synchronized_mac.h"
+
+#include <CoreServices/CoreServices.h>
+#include <algorithm>
+
+#include "base/basictypes.h"
+#include "base/debug/trace_event.h"
+#include "base/logging.h"
+#include "base/mac/mac_logging.h"
+#include "media/audio/audio_util.h"
+#include "media/audio/mac/audio_manager_mac.h"
+#include "media/base/channel_mixer.h"
+
+namespace media {
+
+static const int kHardwareBufferSize = 128;
+static const int kFifoSize = 16384;
+
+// TODO(crogers): handle the non-stereo case.
+static const int kChannels = 2;
+
+// This value was determined empirically for minimum latency while still
+// guarding against FIFO under-runs.
+static const int kBaseTargetFifoFrames = 256 + 64;
+
+// If the input and output sample-rate don't match, then we need to maintain
+// an additional safety margin due to the callback timing jitter and the
+// varispeed buffering. This value was empirically tuned.
+static const int kAdditionalTargetFifoFrames = 128;
+
+static void ZeroBufferList(AudioBufferList* buffer_list) {
+ for (size_t i = 0; i < buffer_list->mNumberBuffers; ++i)
+ memset(buffer_list->mBuffers[i].mData,
+ 0,
+ buffer_list->mBuffers[i].mDataByteSize);
+}
+
+static void WrapBufferList(AudioBufferList* buffer_list,
+ AudioBus* bus,
+ int frames) {
+ DCHECK(buffer_list);
+ DCHECK(bus);
+ int channels = bus->channels();
+ int buffer_list_channels = buffer_list->mNumberBuffers;
+
+ // Copy pointers from AudioBufferList.
+ int source_idx = 0;
+ for (int i = 0; i < channels; ++i) {
+ bus->SetChannelData(
+ i, static_cast<float*>(buffer_list->mBuffers[source_idx].mData));
+
+ // It's ok to pass in a |buffer_list| with fewer channels, in which
+ // case we just duplicate the last channel.
+ if (source_idx < buffer_list_channels - 1)
+ ++source_idx;
+ }
+
+ // Finally set the actual length.
+ bus->set_frames(frames);
+}
+
+AudioSynchronizedStream::AudioSynchronizedStream(
+ AudioManagerMac* manager,
+ const AudioParameters& params,
+ AudioDeviceID input_id,
+ AudioDeviceID output_id)
+ : manager_(manager),
+ params_(params),
+ input_sample_rate_(0),
+ output_sample_rate_(0),
+ input_id_(input_id),
+ output_id_(output_id),
+ input_buffer_list_(NULL),
+ fifo_(kChannels, kFifoSize),
+ target_fifo_frames_(kBaseTargetFifoFrames),
+ average_delta_(0.0),
+ fifo_rate_compensation_(1.0),
+ input_unit_(0),
+ varispeed_unit_(0),
+ output_unit_(0),
+ first_input_time_(-1),
+ is_running_(false),
+ hardware_buffer_size_(kHardwareBufferSize),
+ channels_(kChannels) {
+ VLOG(1) << "AudioSynchronizedStream::AudioSynchronizedStream()";
+}
+
+AudioSynchronizedStream::~AudioSynchronizedStream() {
+ DCHECK(!input_unit_);
+ DCHECK(!output_unit_);
+ DCHECK(!varispeed_unit_);
+}
+
+bool AudioSynchronizedStream::Open() {
+ if (params_.channels() != kChannels) {
+ LOG(ERROR) << "Only stereo output is currently supported.";
+ return false;
+ }
+
+ // Create the input, output, and varispeed AudioUnits.
+ OSStatus result = CreateAudioUnits();
+ if (result != noErr) {
+ LOG(ERROR) << "Cannot create AudioUnits.";
+ return false;
+ }
+
+ result = SetupInput(input_id_);
+ if (result != noErr) {
+ LOG(ERROR) << "Error configuring input AudioUnit.";
+ return false;
+ }
+
+ result = SetupOutput(output_id_);
+ if (result != noErr) {
+ LOG(ERROR) << "Error configuring output AudioUnit.";
+ return false;
+ }
+
+ result = SetupCallbacks();
+ if (result != noErr) {
+ LOG(ERROR) << "Error setting up callbacks on AudioUnits.";
+ return false;
+ }
+
+ result = SetupStreamFormats();
+ if (result != noErr) {
+ LOG(ERROR) << "Error configuring stream formats on AudioUnits.";
+ return false;
+ }
+
+ AllocateInputData();
+
+ // Final initialization of the AudioUnits.
+ result = AudioUnitInitialize(input_unit_);
+ if (result != noErr) {
+ LOG(ERROR) << "Error initializing input AudioUnit.";
+ return false;
+ }
+
+ result = AudioUnitInitialize(output_unit_);
+ if (result != noErr) {
+ LOG(ERROR) << "Error initializing output AudioUnit.";
+ return false;
+ }
+
+ result = AudioUnitInitialize(varispeed_unit_);
+ if (result != noErr) {
+ LOG(ERROR) << "Error initializing varispeed AudioUnit.";
+ return false;
+ }
+
+ if (input_sample_rate_ != output_sample_rate_) {
+ // Add extra safety margin.
+ target_fifo_frames_ += kAdditionalTargetFifoFrames;
+ }
+
+ // Buffer initial silence corresponding to target I/O buffering.
+ fifo_.Clear();
+ scoped_ptr<AudioBus> silence =
+ AudioBus::Create(channels_, target_fifo_frames_);
+ silence->Zero();
+ fifo_.Push(silence.get());
+
+ return true;
+}
+
+void AudioSynchronizedStream::Close() {
+ DCHECK(!is_running_);
+
+ if (input_buffer_list_) {
+ free(input_buffer_list_);
+ input_buffer_list_ = 0;
+ input_bus_.reset(NULL);
+ wrapper_bus_.reset(NULL);
+ }
+
+ if (input_unit_) {
+ AudioUnitUninitialize(input_unit_);
+ CloseComponent(input_unit_);
+ }
+
+ if (output_unit_) {
+ AudioUnitUninitialize(output_unit_);
+ CloseComponent(output_unit_);
+ }
+
+ if (varispeed_unit_) {
+ AudioUnitUninitialize(varispeed_unit_);
+ CloseComponent(varispeed_unit_);
+ }
+
+ input_unit_ = NULL;
+ output_unit_ = NULL;
+ varispeed_unit_ = NULL;
+
+ // Inform the audio manager that we have been closed. This can cause our
+ // destruction.
+ manager_->ReleaseOutputStream(this);
+}
+
+void AudioSynchronizedStream::Start(AudioSourceCallback* callback) {
+ DCHECK(callback);
+ DCHECK(input_unit_);
+ DCHECK(output_unit_);
+ DCHECK(varispeed_unit_);
+
+ if (is_running_ || !input_unit_ || !output_unit_ || !varispeed_unit_)
+ return;
+
+ source_ = callback;
+
+ // Reset state variables each time we Start().
+ fifo_rate_compensation_ = 1.0;
+ average_delta_ = 0.0;
+
+ OSStatus result = noErr;
+
+ if (!is_running_) {
+ first_input_time_ = -1;
+
+ result = AudioOutputUnitStart(input_unit_);
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ if (result == noErr) {
+ result = AudioOutputUnitStart(output_unit_);
+ OSSTATUS_DCHECK(result == noErr, result);
+ }
+ }
+
+ is_running_ = true;
+}
+
+void AudioSynchronizedStream::Stop() {
+ OSStatus result = noErr;
+ if (is_running_) {
+ result = AudioOutputUnitStop(input_unit_);
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ if (result == noErr) {
+ result = AudioOutputUnitStop(output_unit_);
+ OSSTATUS_DCHECK(result == noErr, result);
+ }
+ }
+
+ if (result == noErr)
+ is_running_ = false;
+}
+
+bool AudioSynchronizedStream::IsRunning() {
+ return is_running_;
+}
+
+// TODO(crogers): implement - or remove from AudioOutputStream.
+void AudioSynchronizedStream::SetVolume(double volume) {}
+void AudioSynchronizedStream::GetVolume(double* volume) {}
+
+OSStatus AudioSynchronizedStream::SetOutputDeviceAsCurrent(
+ AudioDeviceID output_id) {
+ OSStatus result = noErr;
+
+ // Get the default output device if device is unknown.
+ if (output_id == kAudioDeviceUnknown) {
+ AudioObjectPropertyAddress pa;
+ pa.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
+ pa.mScope = kAudioObjectPropertyScopeGlobal;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+ UInt32 size = sizeof(output_id);
+
+ result = AudioObjectGetPropertyData(
+ kAudioObjectSystemObject,
+ &pa,
+ 0,
+ 0,
+ &size,
+ &output_id);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+ }
+
+ // Set the render frame size.
+ UInt32 frame_size = hardware_buffer_size_;
+ AudioObjectPropertyAddress pa;
+ pa.mSelector = kAudioDevicePropertyBufferFrameSize;
+ pa.mScope = kAudioDevicePropertyScopeInput;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+ result = AudioObjectSetPropertyData(
+ output_id,
+ &pa,
+ 0,
+ 0,
+ sizeof(frame_size),
+ &frame_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ output_info_.Initialize(output_id, false);
+
+ // Set the Current Device to the Default Output Unit.
+ result = AudioUnitSetProperty(
+ output_unit_,
+ kAudioOutputUnitProperty_CurrentDevice,
+ kAudioUnitScope_Global,
+ 0,
+ &output_info_.id_,
+ sizeof(output_info_.id_));
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ return result;
+}
+
+OSStatus AudioSynchronizedStream::SetInputDeviceAsCurrent(
+ AudioDeviceID input_id) {
+ OSStatus result = noErr;
+
+ // Get the default input device if device is unknown.
+ if (input_id == kAudioDeviceUnknown) {
+ AudioObjectPropertyAddress pa;
+ pa.mSelector = kAudioHardwarePropertyDefaultInputDevice;
+ pa.mScope = kAudioObjectPropertyScopeGlobal;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+ UInt32 size = sizeof(input_id);
+
+ result = AudioObjectGetPropertyData(
+ kAudioObjectSystemObject,
+ &pa,
+ 0,
+ 0,
+ &size,
+ &input_id);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+ }
+
+ // Set the render frame size.
+ UInt32 frame_size = hardware_buffer_size_;
+ AudioObjectPropertyAddress pa;
+ pa.mSelector = kAudioDevicePropertyBufferFrameSize;
+ pa.mScope = kAudioDevicePropertyScopeInput;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+ result = AudioObjectSetPropertyData(
+ input_id,
+ &pa,
+ 0,
+ 0,
+ sizeof(frame_size),
+ &frame_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ input_info_.Initialize(input_id, true);
+
+ // Set the Current Device to the AUHAL.
+ // This should be done only after I/O has been enabled on the AUHAL.
+ result = AudioUnitSetProperty(
+ input_unit_,
+ kAudioOutputUnitProperty_CurrentDevice,
+ kAudioUnitScope_Global,
+ 0,
+ &input_info_.id_,
+ sizeof(input_info_.id_));
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ return result;
+}
+
+OSStatus AudioSynchronizedStream::CreateAudioUnits() {
+ // Q: Why do we need a varispeed unit?
+ // A: If the input device and the output device are running at
+ // different sample rates and/or on different clocks, we will need
+ // to compensate to avoid a pitch change and
+ // to avoid buffer under and over runs.
+ ComponentDescription varispeed_desc;
+ varispeed_desc.componentType = kAudioUnitType_FormatConverter;
+ varispeed_desc.componentSubType = kAudioUnitSubType_Varispeed;
+ varispeed_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
+ varispeed_desc.componentFlags = 0;
+ varispeed_desc.componentFlagsMask = 0;
+
+ Component varispeed_comp = FindNextComponent(NULL, &varispeed_desc);
+ if (varispeed_comp == NULL)
+ return -1;
+
+ OSStatus result = OpenAComponent(varispeed_comp, &varispeed_unit_);
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Open input AudioUnit.
+ ComponentDescription input_desc;
+ input_desc.componentType = kAudioUnitType_Output;
+ input_desc.componentSubType = kAudioUnitSubType_HALOutput;
+ input_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
+ input_desc.componentFlags = 0;
+ input_desc.componentFlagsMask = 0;
+
+ Component input_comp = FindNextComponent(NULL, &input_desc);
+ if (input_comp == NULL)
+ return -1;
+
+ result = OpenAComponent(input_comp, &input_unit_);
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Open output AudioUnit.
+ ComponentDescription output_desc;
+ output_desc.componentType = kAudioUnitType_Output;
+ output_desc.componentSubType = kAudioUnitSubType_DefaultOutput;
+ output_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
+ output_desc.componentFlags = 0;
+ output_desc.componentFlagsMask = 0;
+
+ Component output_comp = FindNextComponent(NULL, &output_desc);
+ if (output_comp == NULL)
+ return -1;
+
+ result = OpenAComponent(output_comp, &output_unit_);
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ return noErr;
+}
+
+OSStatus AudioSynchronizedStream::SetupInput(AudioDeviceID input_id) {
+ // The AUHAL used for input needs to be initialized
+ // before anything is done to it.
+ OSStatus result = AudioUnitInitialize(input_unit_);
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // We must enable the Audio Unit (AUHAL) for input and disable output
+ // BEFORE setting the AUHAL's current device.
+ result = EnableIO();
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ result = SetInputDeviceAsCurrent(input_id);
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ return result;
+}
+
+OSStatus AudioSynchronizedStream::EnableIO() {
+ // Enable input on the AUHAL.
+ UInt32 enable_io = 1;
+ OSStatus result = AudioUnitSetProperty(
+ input_unit_,
+ kAudioOutputUnitProperty_EnableIO,
+ kAudioUnitScope_Input,
+ 1, // input element
+ &enable_io,
+ sizeof(enable_io));
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Disable Output on the AUHAL.
+ enable_io = 0;
+ result = AudioUnitSetProperty(
+ input_unit_,
+ kAudioOutputUnitProperty_EnableIO,
+ kAudioUnitScope_Output,
+ 0, // output element
+ &enable_io,
+ sizeof(enable_io));
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ return result;
+}
+
+OSStatus AudioSynchronizedStream::SetupOutput(AudioDeviceID output_id) {
+ OSStatus result = noErr;
+
+ result = SetOutputDeviceAsCurrent(output_id);
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Tell the output unit not to reset timestamps.
+ // Otherwise sample rate changes will cause sync loss.
+ UInt32 start_at_zero = 0;
+ result = AudioUnitSetProperty(
+ output_unit_,
+ kAudioOutputUnitProperty_StartTimestampsAtZero,
+ kAudioUnitScope_Global,
+ 0,
+ &start_at_zero,
+ sizeof(start_at_zero));
+
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ return result;
+}
+
+OSStatus AudioSynchronizedStream::SetupCallbacks() {
+ // Set the input callback.
+ AURenderCallbackStruct callback;
+ callback.inputProc = InputProc;
+ callback.inputProcRefCon = this;
+ OSStatus result = AudioUnitSetProperty(
+ input_unit_,
+ kAudioOutputUnitProperty_SetInputCallback,
+ kAudioUnitScope_Global,
+ 0,
+ &callback,
+ sizeof(callback));
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Set the output callback.
+ callback.inputProc = OutputProc;
+ callback.inputProcRefCon = this;
+ result = AudioUnitSetProperty(
+ output_unit_,
+ kAudioUnitProperty_SetRenderCallback,
+ kAudioUnitScope_Input,
+ 0,
+ &callback,
+ sizeof(callback));
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Set the varispeed callback.
+ callback.inputProc = VarispeedProc;
+ callback.inputProcRefCon = this;
+ result = AudioUnitSetProperty(
+ varispeed_unit_,
+ kAudioUnitProperty_SetRenderCallback,
+ kAudioUnitScope_Input,
+ 0,
+ &callback,
+ sizeof(callback));
+
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ return result;
+}
+
+OSStatus AudioSynchronizedStream::SetupStreamFormats() {
+ AudioStreamBasicDescription asbd, asbd_dev1_in, asbd_dev2_out;
+
+ // Get the Stream Format (Output client side).
+ UInt32 property_size = sizeof(asbd_dev1_in);
+ OSStatus result = AudioUnitGetProperty(
+ input_unit_,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Input,
+ 1,
+ &asbd_dev1_in,
+ &property_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Get the Stream Format (client side).
+ property_size = sizeof(asbd);
+ result = AudioUnitGetProperty(
+ input_unit_,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Output,
+ 1,
+ &asbd,
+ &property_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Get the Stream Format (Output client side).
+ property_size = sizeof(asbd_dev2_out);
+ result = AudioUnitGetProperty(
+ output_unit_,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Output,
+ 0,
+ &asbd_dev2_out,
+ &property_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Set the format of all the AUs to the input/output devices channel count.
+ // For a simple case, you want to set this to
+ // the lower of count of the channels in the input device vs output device.
+ asbd.mChannelsPerFrame = std::min(asbd_dev1_in.mChannelsPerFrame,
+ asbd_dev2_out.mChannelsPerFrame);
+
+ // We must get the sample rate of the input device and set it to the
+ // stream format of AUHAL.
+ Float64 rate = 0;
+ property_size = sizeof(rate);
+
+ AudioObjectPropertyAddress pa;
+ pa.mSelector = kAudioDevicePropertyNominalSampleRate;
+ pa.mScope = kAudioObjectPropertyScopeWildcard;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+ result = AudioObjectGetPropertyData(
+ input_info_.id_,
+ &pa,
+ 0,
+ 0,
+ &property_size,
+ &rate);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ input_sample_rate_ = rate;
+
+ asbd.mSampleRate = rate;
+ property_size = sizeof(asbd);
+
+ // Set the new formats to the AUs...
+ result = AudioUnitSetProperty(
+ input_unit_,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Output,
+ 1,
+ &asbd,
+ property_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ result = AudioUnitSetProperty(
+ varispeed_unit_,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Input,
+ 0,
+ &asbd,
+ property_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Set the correct sample rate for the output device,
+ // but keep the channel count the same.
+ property_size = sizeof(rate);
+
+ pa.mSelector = kAudioDevicePropertyNominalSampleRate;
+ pa.mScope = kAudioObjectPropertyScopeWildcard;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+ result = AudioObjectGetPropertyData(
+ output_info_.id_,
+ &pa,
+ 0,
+ 0,
+ &property_size,
+ &rate);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ output_sample_rate_ = rate;
+
+ // The requested sample-rate must match the hardware sample-rate.
+ if (output_sample_rate_ != params_.sample_rate()) {
+ LOG(ERROR) << "Requested sample-rate: " << params_.sample_rate()
+ << " must match the hardware sample-rate: " << output_sample_rate_;
+ return kAudioDeviceUnsupportedFormatError;
+ }
+
+ asbd.mSampleRate = rate;
+ property_size = sizeof(asbd);
+
+ // Set the new audio stream formats for the rest of the AUs...
+ result = AudioUnitSetProperty(
+ varispeed_unit_,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Output,
+ 0,
+ &asbd,
+ property_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ result = AudioUnitSetProperty(
+ output_unit_,
+ kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Input,
+ 0,
+ &asbd,
+ property_size);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ return result;
+}
+
+void AudioSynchronizedStream::AllocateInputData() {
+ // Get the native number of input channels that the hardware supports.
+ int hardware_channels = 0;
+ bool got_hardware_channels = AudioManagerMac::GetDeviceChannels(
+ input_id_, kAudioDevicePropertyScopeInput, &hardware_channels);
+ if (!got_hardware_channels || hardware_channels > 2) {
+ // Only mono and stereo are supported on the input side. When it fails to
+ // get the native channel number or the native channel number is bigger
+ // than 2, we open the device in stereo mode.
+ hardware_channels = 2;
+ }
+
+ // Allocate storage for the AudioBufferList used for the
+ // input data from the input AudioUnit.
+ // We allocate enough space for with one AudioBuffer per channel.
+ size_t malloc_size = offsetof(AudioBufferList, mBuffers[0]) +
+ (sizeof(AudioBuffer) * hardware_channels);
+
+ input_buffer_list_ = static_cast<AudioBufferList*>(malloc(malloc_size));
+ input_buffer_list_->mNumberBuffers = hardware_channels;
+
+ input_bus_ = AudioBus::Create(hardware_channels, hardware_buffer_size_);
+ wrapper_bus_ = AudioBus::CreateWrapper(channels_);
+ if (hardware_channels != params_.input_channels()) {
+ ChannelLayout hardware_channel_layout =
+ GuessChannelLayout(hardware_channels);
+ ChannelLayout requested_channel_layout =
+ GuessChannelLayout(params_.input_channels());
+ channel_mixer_.reset(new ChannelMixer(hardware_channel_layout,
+ requested_channel_layout));
+ mixer_bus_ = AudioBus::Create(params_.input_channels(),
+ hardware_buffer_size_);
+ }
+
+ // Allocate buffers for AudioBufferList.
+ UInt32 buffer_size_bytes = input_bus_->frames() * sizeof(Float32);
+ for (size_t i = 0; i < input_buffer_list_->mNumberBuffers; ++i) {
+ input_buffer_list_->mBuffers[i].mNumberChannels = 1;
+ input_buffer_list_->mBuffers[i].mDataByteSize = buffer_size_bytes;
+ input_buffer_list_->mBuffers[i].mData = input_bus_->channel(i);
+ }
+}
+
+OSStatus AudioSynchronizedStream::HandleInputCallback(
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data) {
+ TRACE_EVENT0("audio", "AudioSynchronizedStream::HandleInputCallback");
+
+ if (first_input_time_ < 0.0)
+ first_input_time_ = time_stamp->mSampleTime;
+
+ // Get the new audio input data.
+ OSStatus result = AudioUnitRender(
+ input_unit_,
+ io_action_flags,
+ time_stamp,
+ bus_number,
+ number_of_frames,
+ input_buffer_list_);
+
+ // TODO(xians): Add back the DCHECK after synchronize IO supports all
+ // combination of input and output params. See http://issue/246521.
+ if (result != noErr)
+ return result;
+
+ // Buffer input into FIFO.
+ int available_frames = fifo_.max_frames() - fifo_.frames();
+ if (input_bus_->frames() <= available_frames) {
+ if (channel_mixer_) {
+ channel_mixer_->Transform(input_bus_.get(), mixer_bus_.get());
+ fifo_.Push(mixer_bus_.get());
+ } else {
+ fifo_.Push(input_bus_.get());
+ }
+ }
+
+ return result;
+}
+
+OSStatus AudioSynchronizedStream::HandleVarispeedCallback(
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data) {
+ // Create a wrapper bus on the AudioBufferList.
+ WrapBufferList(io_data, wrapper_bus_.get(), number_of_frames);
+
+ if (fifo_.frames() < static_cast<int>(number_of_frames)) {
+ // We don't DCHECK here, since this is a possible run-time condition
+ // if the machine is bogged down.
+ wrapper_bus_->Zero();
+ return noErr;
+ }
+
+ // Read from the FIFO to feed the varispeed.
+ fifo_.Consume(wrapper_bus_.get(), 0, number_of_frames);
+
+ return noErr;
+}
+
+OSStatus AudioSynchronizedStream::HandleOutputCallback(
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data) {
+ // Input callback hasn't run yet or we've suddenly changed sample-rates
+ // -> silence.
+ if (first_input_time_ < 0.0 ||
+ static_cast<int>(number_of_frames) != params_.frames_per_buffer()) {
+ ZeroBufferList(io_data);
+ return noErr;
+ }
+
+ // Use the varispeed playback rate to offset small discrepancies
+ // in hardware clocks, and also any differences in sample-rate
+ // between input and output devices.
+
+ // Calculate a varispeed rate scalar factor to compensate for drift between
+ // input and output. We use the actual number of frames still in the FIFO
+ // compared with the ideal value of |target_fifo_frames_|.
+ int delta = fifo_.frames() - target_fifo_frames_;
+
+ // Average |delta| because it can jitter back/forth quite frequently
+ // by +/- the hardware buffer-size *if* the input and output callbacks are
+ // happening at almost exactly the same time. Also, if the input and output
+ // sample-rates are different then |delta| will jitter quite a bit due to
+ // the rate conversion happening in the varispeed, plus the jittering of
+ // the callbacks. The average value is what's important here.
+ average_delta_ += (delta - average_delta_) * 0.1;
+
+ // Compute a rate compensation which always attracts us back to the
+ // |target_fifo_frames_| over a period of kCorrectionTimeSeconds.
+ const double kCorrectionTimeSeconds = 0.1;
+ double correction_time_frames = kCorrectionTimeSeconds * output_sample_rate_;
+ fifo_rate_compensation_ =
+ (correction_time_frames + average_delta_) / correction_time_frames;
+
+ // Adjust for FIFO drift.
+ OSStatus result = AudioUnitSetParameter(
+ varispeed_unit_,
+ kVarispeedParam_PlaybackRate,
+ kAudioUnitScope_Global,
+ 0,
+ fifo_rate_compensation_,
+ 0);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Render to the output using the varispeed.
+ result = AudioUnitRender(
+ varispeed_unit_,
+ io_action_flags,
+ time_stamp,
+ 0,
+ number_of_frames,
+ io_data);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+ if (result != noErr)
+ return result;
+
+ // Create a wrapper bus on the AudioBufferList.
+ WrapBufferList(io_data, wrapper_bus_.get(), number_of_frames);
+
+ // Process in-place!
+ source_->OnMoreIOData(wrapper_bus_.get(),
+ wrapper_bus_.get(),
+ AudioBuffersState(0, 0));
+
+ return noErr;
+}
+
+OSStatus AudioSynchronizedStream::InputProc(
+ void* user_data,
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data) {
+ AudioSynchronizedStream* stream =
+ static_cast<AudioSynchronizedStream*>(user_data);
+ DCHECK(stream);
+
+ return stream->HandleInputCallback(
+ io_action_flags,
+ time_stamp,
+ bus_number,
+ number_of_frames,
+ io_data);
+}
+
+OSStatus AudioSynchronizedStream::VarispeedProc(
+ void* user_data,
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data) {
+ AudioSynchronizedStream* stream =
+ static_cast<AudioSynchronizedStream*>(user_data);
+ DCHECK(stream);
+
+ return stream->HandleVarispeedCallback(
+ io_action_flags,
+ time_stamp,
+ bus_number,
+ number_of_frames,
+ io_data);
+}
+
+OSStatus AudioSynchronizedStream::OutputProc(
+ void* user_data,
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data) {
+ AudioSynchronizedStream* stream =
+ static_cast<AudioSynchronizedStream*>(user_data);
+ DCHECK(stream);
+
+ return stream->HandleOutputCallback(
+ io_action_flags,
+ time_stamp,
+ bus_number,
+ number_of_frames,
+ io_data);
+}
+
+void AudioSynchronizedStream::AudioDeviceInfo::Initialize(
+ AudioDeviceID id, bool is_input) {
+ id_ = id;
+ is_input_ = is_input;
+ if (id_ == kAudioDeviceUnknown)
+ return;
+
+ UInt32 property_size = sizeof(buffer_size_frames_);
+
+ AudioObjectPropertyAddress pa;
+ pa.mSelector = kAudioDevicePropertyBufferFrameSize;
+ pa.mScope = kAudioObjectPropertyScopeWildcard;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+ OSStatus result = AudioObjectGetPropertyData(
+ id_,
+ &pa,
+ 0,
+ 0,
+ &property_size,
+ &buffer_size_frames_);
+
+ OSSTATUS_DCHECK(result == noErr, result);
+}
+
+} // namespace media
diff --git a/chromium/media/audio/mac/audio_synchronized_mac.h b/chromium/media/audio/mac/audio_synchronized_mac.h
new file mode 100644
index 00000000000..a6db48e3037
--- /dev/null
+++ b/chromium/media/audio/mac/audio_synchronized_mac.h
@@ -0,0 +1,216 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_MAC_AUDIO_SYNCHRONIZED_MAC_H_
+#define MEDIA_AUDIO_MAC_AUDIO_SYNCHRONIZED_MAC_H_
+
+#include <AudioToolbox/AudioToolbox.h>
+#include <AudioUnit/AudioUnit.h>
+#include <CoreAudio/CoreAudio.h>
+
+#include "base/compiler_specific.h"
+#include "base/synchronization/lock.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/audio_bus.h"
+#include "media/base/audio_fifo.h"
+
+namespace media {
+
+class AudioManagerMac;
+class ChannelMixer;
+
+// AudioSynchronizedStream allows arbitrary combinations of input and output
+// devices running off different clocks and using different drivers, with
+// potentially differing sample-rates. It implements AudioOutputStream
+// and shuttles its synchronized I/O data using AudioSourceCallback.
+//
+// It is required to first acquire the native sample rate of the selected
+// output device and then use the same rate when creating this object.
+//
+// ............................................................................
+// Theory of Operation:
+// .
+// INPUT THREAD . OUTPUT THREAD
+// +-----------------+ +------+ .
+// | Input AudioUnit | --> | | .
+// +-----------------+ | | .
+// | FIFO | .
+// | | +-----------+
+// | | -----> | Varispeed |
+// | | +-----------+
+// +------+ . |
+// . | +-----------+
+// . OnMoreIOData() --> | Output AU |
+// . +-----------+
+//
+// The input AudioUnit's InputProc is called on one thread which feeds the
+// FIFO. The output AudioUnit's OutputProc is called on a second thread
+// which pulls on the varispeed to get the current input data. The varispeed
+// handles mismatches between input and output sample-rate and also clock drift
+// between the input and output drivers. The varispeed consumes its data from
+// the FIFO and adjusts its rate dynamically according to the amount
+// of data buffered in the FIFO. If the FIFO starts getting too much data
+// buffered then the varispeed will speed up slightly to compensate
+// and similarly if the FIFO doesn't have enough data buffered then the
+// varispeed will slow down slightly.
+//
+// Finally, once the input data is available then OnMoreIOData() is called
+// which is given this input, and renders the output which is finally sent
+// to the Output AudioUnit.
+class AudioSynchronizedStream : public AudioOutputStream {
+ public:
+ // The ctor takes all the usual parameters, plus |manager| which is the
+ // the audio manager who is creating this object.
+ AudioSynchronizedStream(AudioManagerMac* manager,
+ const AudioParameters& params,
+ AudioDeviceID input_id,
+ AudioDeviceID output_id);
+
+ virtual ~AudioSynchronizedStream();
+
+ // Implementation of AudioOutputStream.
+ virtual bool Open() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual void Start(AudioSourceCallback* callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual void GetVolume(double* volume) OVERRIDE;
+
+ OSStatus SetInputDeviceAsCurrent(AudioDeviceID input_id);
+ OSStatus SetOutputDeviceAsCurrent(AudioDeviceID output_id);
+ AudioDeviceID GetInputDeviceID() { return input_info_.id_; }
+ AudioDeviceID GetOutputDeviceID() { return output_info_.id_; }
+
+ bool IsRunning();
+
+ private:
+ // Initialization.
+ OSStatus CreateAudioUnits();
+ OSStatus SetupInput(AudioDeviceID input_id);
+ OSStatus EnableIO();
+ OSStatus SetupOutput(AudioDeviceID output_id);
+ OSStatus SetupCallbacks();
+ OSStatus SetupStreamFormats();
+ void AllocateInputData();
+
+ // Handlers for the AudioUnit callbacks.
+ OSStatus HandleInputCallback(AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data);
+
+ OSStatus HandleVarispeedCallback(AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data);
+
+ OSStatus HandleOutputCallback(AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data);
+
+ // AudioUnit callbacks.
+ static OSStatus InputProc(void* user_data,
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data);
+
+ static OSStatus VarispeedProc(void* user_data,
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data);
+
+ static OSStatus OutputProc(void* user_data,
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 bus_number,
+ UInt32 number_of_frames,
+ AudioBufferList* io_data);
+
+ // Our creator.
+ AudioManagerMac* manager_;
+
+ // Client parameters.
+ AudioParameters params_;
+
+ double input_sample_rate_;
+ double output_sample_rate_;
+
+ // Pointer to the object that will provide the audio samples.
+ AudioSourceCallback* source_;
+
+ // Values used in Open().
+ AudioDeviceID input_id_;
+ AudioDeviceID output_id_;
+
+ // The input AudioUnit renders its data here.
+ AudioBufferList* input_buffer_list_;
+
+ // Holds the actual data for |input_buffer_list_|.
+ scoped_ptr<AudioBus> input_bus_;
+
+ // Used to overlay AudioBufferLists.
+ scoped_ptr<AudioBus> wrapper_bus_;
+
+ class AudioDeviceInfo {
+ public:
+ AudioDeviceInfo()
+ : id_(kAudioDeviceUnknown),
+ is_input_(false),
+ buffer_size_frames_(0) {}
+ void Initialize(AudioDeviceID inID, bool isInput);
+ bool IsInitialized() const { return id_ != kAudioDeviceUnknown; }
+
+ AudioDeviceID id_;
+ bool is_input_;
+ UInt32 buffer_size_frames_;
+ };
+
+ AudioDeviceInfo input_info_;
+ AudioDeviceInfo output_info_;
+
+ // Used for input to output buffering.
+ AudioFifo fifo_;
+
+ // The optimal number of frames we'd like to keep in the FIFO at all times.
+ int target_fifo_frames_;
+
+ // A running average of the measured delta between actual number of frames
+ // in the FIFO versus |target_fifo_frames_|.
+ double average_delta_;
+
+ // A varispeed rate scalar which is calculated based on FIFO drift.
+ double fifo_rate_compensation_;
+
+ // AudioUnits.
+ AudioUnit input_unit_;
+ AudioUnit varispeed_unit_;
+ AudioUnit output_unit_;
+
+ double first_input_time_;
+
+ bool is_running_;
+ int hardware_buffer_size_;
+ int channels_;
+
+ // Channel mixer used to transform mono to stereo data. It is only created
+ // if the input_hardware_channels is mono.
+ scoped_ptr<ChannelMixer> channel_mixer_;
+ scoped_ptr<AudioBus> mixer_bus_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioSynchronizedStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_MAC_AUDIO_SYNCHRONIZED_MAC_H_
diff --git a/chromium/media/audio/mac/audio_unified_mac.cc b/chromium/media/audio/mac/audio_unified_mac.cc
new file mode 100644
index 00000000000..67ec2fe6f3e
--- /dev/null
+++ b/chromium/media/audio/mac/audio_unified_mac.cc
@@ -0,0 +1,398 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/mac/audio_unified_mac.h"
+
+#include <CoreServices/CoreServices.h>
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/mac/mac_logging.h"
+#include "media/audio/audio_util.h"
+#include "media/audio/mac/audio_manager_mac.h"
+
+namespace media {
+
+// TODO(crogers): support more than hard-coded stereo input.
+// Ideally we would like to receive this value as a constructor argument.
+static const int kDefaultInputChannels = 2;
+
+AudioHardwareUnifiedStream::AudioHardwareUnifiedStream(
+ AudioManagerMac* manager, const AudioParameters& params)
+ : manager_(manager),
+ source_(NULL),
+ client_input_channels_(kDefaultInputChannels),
+ volume_(1.0f),
+ input_channels_(0),
+ output_channels_(0),
+ input_channels_per_frame_(0),
+ output_channels_per_frame_(0),
+ io_proc_id_(0),
+ device_(kAudioObjectUnknown),
+ is_playing_(false) {
+ DCHECK(manager_);
+
+ // A frame is one sample across all channels. In interleaved audio the per
+ // frame fields identify the set of n |channels|. In uncompressed audio, a
+ // packet is always one frame.
+ format_.mSampleRate = params.sample_rate();
+ format_.mFormatID = kAudioFormatLinearPCM;
+ format_.mFormatFlags = kLinearPCMFormatFlagIsPacked |
+ kLinearPCMFormatFlagIsSignedInteger;
+ format_.mBitsPerChannel = params.bits_per_sample();
+ format_.mChannelsPerFrame = params.channels();
+ format_.mFramesPerPacket = 1;
+ format_.mBytesPerPacket = (format_.mBitsPerChannel * params.channels()) / 8;
+ format_.mBytesPerFrame = format_.mBytesPerPacket;
+ format_.mReserved = 0;
+
+ // Calculate the number of sample frames per callback.
+ number_of_frames_ = params.GetBytesPerBuffer() / format_.mBytesPerPacket;
+
+ input_bus_ = AudioBus::Create(client_input_channels_,
+ params.frames_per_buffer());
+ output_bus_ = AudioBus::Create(params);
+}
+
+AudioHardwareUnifiedStream::~AudioHardwareUnifiedStream() {
+ DCHECK_EQ(device_, kAudioObjectUnknown);
+}
+
+bool AudioHardwareUnifiedStream::Open() {
+ // Obtain the current output device selected by the user.
+ AudioObjectPropertyAddress pa;
+ pa.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
+ pa.mScope = kAudioObjectPropertyScopeGlobal;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+
+ UInt32 size = sizeof(device_);
+
+ OSStatus result = AudioObjectGetPropertyData(
+ kAudioObjectSystemObject,
+ &pa,
+ 0,
+ 0,
+ &size,
+ &device_);
+
+ if ((result != kAudioHardwareNoError) || (device_ == kAudioDeviceUnknown)) {
+ LOG(ERROR) << "Cannot open unified AudioDevice.";
+ return false;
+ }
+
+ // The requested sample-rate must match the hardware sample-rate.
+ Float64 sample_rate = 0.0;
+ size = sizeof(sample_rate);
+
+ pa.mSelector = kAudioDevicePropertyNominalSampleRate;
+ pa.mScope = kAudioObjectPropertyScopeWildcard;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+
+ result = AudioObjectGetPropertyData(
+ device_,
+ &pa,
+ 0,
+ 0,
+ &size,
+ &sample_rate);
+
+ if (result != noErr || sample_rate != format_.mSampleRate) {
+ LOG(ERROR) << "Requested sample-rate: " << format_.mSampleRate
+ << " must match the hardware sample-rate: " << sample_rate;
+ return false;
+ }
+
+ // Configure buffer frame size.
+ UInt32 frame_size = number_of_frames_;
+
+ pa.mSelector = kAudioDevicePropertyBufferFrameSize;
+ pa.mScope = kAudioDevicePropertyScopeInput;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+ result = AudioObjectSetPropertyData(
+ device_,
+ &pa,
+ 0,
+ 0,
+ sizeof(frame_size),
+ &frame_size);
+
+ if (result != noErr) {
+ LOG(ERROR) << "Unable to set input buffer frame size: " << frame_size;
+ return false;
+ }
+
+ pa.mScope = kAudioDevicePropertyScopeOutput;
+ result = AudioObjectSetPropertyData(
+ device_,
+ &pa,
+ 0,
+ 0,
+ sizeof(frame_size),
+ &frame_size);
+
+ if (result != noErr) {
+ LOG(ERROR) << "Unable to set output buffer frame size: " << frame_size;
+ return false;
+ }
+
+ DVLOG(1) << "Sample rate: " << sample_rate;
+ DVLOG(1) << "Frame size: " << frame_size;
+
+ // Determine the number of input and output channels.
+ // We handle both the interleaved and non-interleaved cases.
+
+ // Get input stream configuration.
+ pa.mSelector = kAudioDevicePropertyStreamConfiguration;
+ pa.mScope = kAudioDevicePropertyScopeInput;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+
+ result = AudioObjectGetPropertyDataSize(device_, &pa, 0, 0, &size);
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ if (result == noErr && size > 0) {
+ // Allocate storage.
+ scoped_ptr<uint8[]> input_list_storage(new uint8[size]);
+ AudioBufferList& input_list =
+ *reinterpret_cast<AudioBufferList*>(input_list_storage.get());
+
+ result = AudioObjectGetPropertyData(
+ device_,
+ &pa,
+ 0,
+ 0,
+ &size,
+ &input_list);
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ if (result == noErr) {
+ // Determine number of input channels.
+ input_channels_per_frame_ = input_list.mNumberBuffers > 0 ?
+ input_list.mBuffers[0].mNumberChannels : 0;
+ if (input_channels_per_frame_ == 1 && input_list.mNumberBuffers > 1) {
+ // Non-interleaved.
+ input_channels_ = input_list.mNumberBuffers;
+ } else {
+ // Interleaved.
+ input_channels_ = input_channels_per_frame_;
+ }
+ }
+ }
+
+ DVLOG(1) << "Input channels: " << input_channels_;
+ DVLOG(1) << "Input channels per frame: " << input_channels_per_frame_;
+
+ // The hardware must have at least the requested input channels.
+ if (result != noErr || client_input_channels_ > input_channels_) {
+ LOG(ERROR) << "AudioDevice does not support requested input channels.";
+ return false;
+ }
+
+ // Get output stream configuration.
+ pa.mSelector = kAudioDevicePropertyStreamConfiguration;
+ pa.mScope = kAudioDevicePropertyScopeOutput;
+ pa.mElement = kAudioObjectPropertyElementMaster;
+
+ result = AudioObjectGetPropertyDataSize(device_, &pa, 0, 0, &size);
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ if (result == noErr && size > 0) {
+ // Allocate storage.
+ scoped_ptr<uint8[]> output_list_storage(new uint8[size]);
+ AudioBufferList& output_list =
+ *reinterpret_cast<AudioBufferList*>(output_list_storage.get());
+
+ result = AudioObjectGetPropertyData(
+ device_,
+ &pa,
+ 0,
+ 0,
+ &size,
+ &output_list);
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ if (result == noErr) {
+ // Determine number of output channels.
+ output_channels_per_frame_ = output_list.mBuffers[0].mNumberChannels;
+ if (output_channels_per_frame_ == 1 && output_list.mNumberBuffers > 1) {
+ // Non-interleaved.
+ output_channels_ = output_list.mNumberBuffers;
+ } else {
+ // Interleaved.
+ output_channels_ = output_channels_per_frame_;
+ }
+ }
+ }
+
+ DVLOG(1) << "Output channels: " << output_channels_;
+ DVLOG(1) << "Output channels per frame: " << output_channels_per_frame_;
+
+ // The hardware must have at least the requested output channels.
+ if (result != noErr ||
+ output_channels_ < static_cast<int>(format_.mChannelsPerFrame)) {
+ LOG(ERROR) << "AudioDevice does not support requested output channels.";
+ return false;
+ }
+
+ // Setup the I/O proc.
+ result = AudioDeviceCreateIOProcID(device_, RenderProc, this, &io_proc_id_);
+ if (result != noErr) {
+ LOG(ERROR) << "Error creating IOProc.";
+ return false;
+ }
+
+ return true;
+}
+
+void AudioHardwareUnifiedStream::Close() {
+ DCHECK(!is_playing_);
+
+ OSStatus result = AudioDeviceDestroyIOProcID(device_, io_proc_id_);
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ io_proc_id_ = 0;
+ device_ = kAudioObjectUnknown;
+
+ // Inform the audio manager that we have been closed. This can cause our
+ // destruction.
+ manager_->ReleaseOutputStream(this);
+}
+
+void AudioHardwareUnifiedStream::Start(AudioSourceCallback* callback) {
+ DCHECK(callback);
+ DCHECK_NE(device_, kAudioObjectUnknown);
+ DCHECK(!is_playing_);
+ if (device_ == kAudioObjectUnknown || is_playing_)
+ return;
+
+ source_ = callback;
+
+ OSStatus result = AudioDeviceStart(device_, io_proc_id_);
+ OSSTATUS_DCHECK(result == noErr, result);
+
+ if (result == noErr)
+ is_playing_ = true;
+}
+
+void AudioHardwareUnifiedStream::Stop() {
+ if (!is_playing_)
+ return;
+
+ if (device_ != kAudioObjectUnknown) {
+ OSStatus result = AudioDeviceStop(device_, io_proc_id_);
+ OSSTATUS_DCHECK(result == noErr, result);
+ }
+
+ is_playing_ = false;
+ source_ = NULL;
+}
+
+void AudioHardwareUnifiedStream::SetVolume(double volume) {
+ volume_ = static_cast<float>(volume);
+ // TODO(crogers): set volume property
+}
+
+void AudioHardwareUnifiedStream::GetVolume(double* volume) {
+ *volume = volume_;
+}
+
+// Pulls on our provider with optional input, asking it to render output.
+// Note to future hackers of this function: Do not add locks here because this
+// is running on a real-time thread (for low-latency).
+OSStatus AudioHardwareUnifiedStream::Render(
+ AudioDeviceID device,
+ const AudioTimeStamp* now,
+ const AudioBufferList* input_data,
+ const AudioTimeStamp* input_time,
+ AudioBufferList* output_data,
+ const AudioTimeStamp* output_time) {
+ // Convert the input data accounting for possible interleaving.
+ // TODO(crogers): it's better to simply memcpy() if source is already planar.
+ if (input_channels_ >= client_input_channels_) {
+ for (int channel_index = 0; channel_index < client_input_channels_;
+ ++channel_index) {
+ float* source;
+
+ int source_channel_index = channel_index;
+
+ if (input_channels_per_frame_ > 1) {
+ // Interleaved.
+ source = static_cast<float*>(input_data->mBuffers[0].mData) +
+ source_channel_index;
+ } else {
+ // Non-interleaved.
+ source = static_cast<float*>(
+ input_data->mBuffers[source_channel_index].mData);
+ }
+
+ float* p = input_bus_->channel(channel_index);
+ for (int i = 0; i < number_of_frames_; ++i) {
+ p[i] = *source;
+ source += input_channels_per_frame_;
+ }
+ }
+ } else if (input_channels_) {
+ input_bus_->Zero();
+ }
+
+ // Give the client optional input data and have it render the output data.
+ source_->OnMoreIOData(input_bus_.get(),
+ output_bus_.get(),
+ AudioBuffersState(0, 0));
+
+ // TODO(crogers): handle final Core Audio 5.1 layout for 5.1 audio.
+
+ // Handle interleaving as necessary.
+ // TODO(crogers): it's better to simply memcpy() if dest is already planar.
+
+ for (int channel_index = 0;
+ channel_index < static_cast<int>(format_.mChannelsPerFrame);
+ ++channel_index) {
+ float* dest;
+
+ int dest_channel_index = channel_index;
+
+ if (output_channels_per_frame_ > 1) {
+ // Interleaved.
+ dest = static_cast<float*>(output_data->mBuffers[0].mData) +
+ dest_channel_index;
+ } else {
+ // Non-interleaved.
+ dest = static_cast<float*>(
+ output_data->mBuffers[dest_channel_index].mData);
+ }
+
+ float* p = output_bus_->channel(channel_index);
+ for (int i = 0; i < number_of_frames_; ++i) {
+ *dest = p[i];
+ dest += output_channels_per_frame_;
+ }
+ }
+
+ return noErr;
+}
+
+OSStatus AudioHardwareUnifiedStream::RenderProc(
+ AudioDeviceID device,
+ const AudioTimeStamp* now,
+ const AudioBufferList* input_data,
+ const AudioTimeStamp* input_time,
+ AudioBufferList* output_data,
+ const AudioTimeStamp* output_time,
+ void* user_data) {
+ AudioHardwareUnifiedStream* audio_output =
+ static_cast<AudioHardwareUnifiedStream*>(user_data);
+ DCHECK(audio_output);
+ if (!audio_output)
+ return -1;
+
+ return audio_output->Render(
+ device,
+ now,
+ input_data,
+ input_time,
+ output_data,
+ output_time);
+}
+
+} // namespace media
diff --git a/chromium/media/audio/mac/audio_unified_mac.h b/chromium/media/audio/mac/audio_unified_mac.h
new file mode 100644
index 00000000000..ff090e3be1a
--- /dev/null
+++ b/chromium/media/audio/mac/audio_unified_mac.h
@@ -0,0 +1,100 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_MAC_AUDIO_UNIFIED_MAC_H_
+#define MEDIA_AUDIO_MAC_AUDIO_UNIFIED_MAC_H_
+
+#include <CoreAudio/CoreAudio.h>
+
+#include "base/memory/scoped_ptr.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+
+namespace media {
+
+class AudioManagerMac;
+
+// Implementation of AudioOutputStream for Mac OS X using the
+// CoreAudio AudioHardware API suitable for low-latency unified audio I/O
+// when using devices which support *both* input and output
+// in the same driver. This is the case with professional
+// USB and Firewire devices.
+//
+// Please note that it's required to first get the native sample-rate of the
+// default output device and use that sample-rate when creating this object.
+class AudioHardwareUnifiedStream : public AudioOutputStream {
+ public:
+ // The ctor takes all the usual parameters, plus |manager| which is the
+ // the audio manager who is creating this object.
+ AudioHardwareUnifiedStream(AudioManagerMac* manager,
+ const AudioParameters& params);
+ // The dtor is typically called by the AudioManager only and it is usually
+ // triggered by calling AudioOutputStream::Close().
+ virtual ~AudioHardwareUnifiedStream();
+
+ // Implementation of AudioOutputStream.
+ virtual bool Open() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual void Start(AudioSourceCallback* callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual void GetVolume(double* volume) OVERRIDE;
+
+ int input_channels() const { return input_channels_; }
+ int output_channels() const { return output_channels_; }
+
+ private:
+ OSStatus Render(AudioDeviceID device,
+ const AudioTimeStamp* now,
+ const AudioBufferList* input_data,
+ const AudioTimeStamp* input_time,
+ AudioBufferList* output_data,
+ const AudioTimeStamp* output_time);
+
+ static OSStatus RenderProc(AudioDeviceID device,
+ const AudioTimeStamp* now,
+ const AudioBufferList* input_data,
+ const AudioTimeStamp* input_time,
+ AudioBufferList* output_data,
+ const AudioTimeStamp* output_time,
+ void* user_data);
+
+ // Our creator, the audio manager needs to be notified when we close.
+ AudioManagerMac* manager_;
+
+ // Pointer to the object that will provide the audio samples.
+ AudioSourceCallback* source_;
+
+ // Structure that holds the stream format details such as bitrate.
+ AudioStreamBasicDescription format_;
+
+ // Hardware buffer size.
+ int number_of_frames_;
+
+ // Number of audio channels provided to the client via OnMoreIOData().
+ int client_input_channels_;
+
+ // Volume level from 0 to 1.
+ float volume_;
+
+ // Number of input and output channels queried from the hardware.
+ int input_channels_;
+ int output_channels_;
+ int input_channels_per_frame_;
+ int output_channels_per_frame_;
+
+ AudioDeviceIOProcID io_proc_id_;
+ AudioDeviceID device_;
+ bool is_playing_;
+
+ // Intermediate buffers used with call to OnMoreIOData().
+ scoped_ptr<AudioBus> input_bus_;
+ scoped_ptr<AudioBus> output_bus_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioHardwareUnifiedStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_MAC_AUDIO_UNIFIED_MAC_H_
diff --git a/chromium/media/audio/mock_audio_manager.cc b/chromium/media/audio/mock_audio_manager.cc
new file mode 100644
index 00000000000..60898bd61b8
--- /dev/null
+++ b/chromium/media/audio/mock_audio_manager.cc
@@ -0,0 +1,85 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/mock_audio_manager.h"
+
+#include "base/logging.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "media/audio/audio_parameters.h"
+
+namespace media {
+
+MockAudioManager::MockAudioManager(base::MessageLoopProxy* message_loop_proxy)
+ : message_loop_proxy_(message_loop_proxy) {
+}
+
+MockAudioManager::~MockAudioManager() {
+}
+
+bool MockAudioManager::HasAudioOutputDevices() {
+ return true;
+}
+
+bool MockAudioManager::HasAudioInputDevices() {
+ return true;
+}
+
+string16 MockAudioManager::GetAudioInputDeviceModel() {
+ return string16();
+}
+
+void MockAudioManager::ShowAudioInputSettings() {
+}
+
+void MockAudioManager::GetAudioInputDeviceNames(
+ media::AudioDeviceNames* device_names) {
+}
+
+media::AudioOutputStream* MockAudioManager::MakeAudioOutputStream(
+ const media::AudioParameters& params,
+ const std::string& input_device_id) {
+ NOTREACHED();
+ return NULL;
+}
+
+media::AudioOutputStream* MockAudioManager::MakeAudioOutputStreamProxy(
+ const media::AudioParameters& params,
+ const std::string& input_device_id) {
+ NOTREACHED();
+ return NULL;
+}
+
+media::AudioInputStream* MockAudioManager::MakeAudioInputStream(
+ const media::AudioParameters& params,
+ const std::string& device_id) {
+ NOTREACHED();
+ return NULL;
+}
+
+scoped_refptr<base::MessageLoopProxy> MockAudioManager::GetMessageLoop() {
+ return message_loop_proxy_;
+}
+
+scoped_refptr<base::MessageLoopProxy> MockAudioManager::GetWorkerLoop() {
+ return message_loop_proxy_;
+}
+
+void MockAudioManager::AddOutputDeviceChangeListener(
+ AudioDeviceListener* listener) {
+}
+
+void MockAudioManager::RemoveOutputDeviceChangeListener(
+ AudioDeviceListener* listener) {
+}
+
+AudioParameters MockAudioManager::GetDefaultOutputStreamParameters() {
+ return AudioParameters();
+}
+
+AudioParameters MockAudioManager::GetInputStreamParameters(
+ const std::string& device_id) {
+ return AudioParameters();
+}
+
+} // namespace media.
diff --git a/chromium/media/audio/mock_audio_manager.h b/chromium/media/audio/mock_audio_manager.h
new file mode 100644
index 00000000000..eee84b1643f
--- /dev/null
+++ b/chromium/media/audio/mock_audio_manager.h
@@ -0,0 +1,71 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_MOCK_AUDIO_MANAGER_H_
+#define MEDIA_AUDIO_MOCK_AUDIO_MANAGER_H_
+
+#include "media/audio/audio_manager.h"
+
+namespace media {
+
+// This class is a simple mock around AudioManager, used exclusively for tests,
+// which has the following purposes:
+// 1) Avoids to use the actual (system and platform dependent) AudioManager.
+// Some bots does not have input devices, thus using the actual AudioManager
+// would causing failures on classes which expect that.
+// 2) Allows the mock audio events to be dispatched on an arbitrary thread,
+// rather than forcing them on the audio thread, easing their handling in
+// browser tests (Note: sharing a thread can cause deadlocks on production
+// classes if WaitableEvents or any other form of lock is used for
+// synchronization purposes).
+class MockAudioManager : public media::AudioManager {
+ public:
+ explicit MockAudioManager(base::MessageLoopProxy* message_loop_proxy);
+
+ virtual bool HasAudioOutputDevices() OVERRIDE;
+
+ virtual bool HasAudioInputDevices() OVERRIDE;
+
+ virtual string16 GetAudioInputDeviceModel() OVERRIDE;
+
+ virtual void ShowAudioInputSettings() OVERRIDE;
+
+ virtual void GetAudioInputDeviceNames(
+ media::AudioDeviceNames* device_names) OVERRIDE;
+
+ virtual media::AudioOutputStream* MakeAudioOutputStream(
+ const media::AudioParameters& params,
+ const std::string& input_device_id) OVERRIDE;
+
+ virtual media::AudioOutputStream* MakeAudioOutputStreamProxy(
+ const media::AudioParameters& params,
+ const std::string& input_device_id) OVERRIDE;
+
+ virtual media::AudioInputStream* MakeAudioInputStream(
+ const media::AudioParameters& params,
+ const std::string& device_id) OVERRIDE;
+
+ virtual scoped_refptr<base::MessageLoopProxy> GetMessageLoop() OVERRIDE;
+ virtual scoped_refptr<base::MessageLoopProxy> GetWorkerLoop() OVERRIDE;
+
+ virtual void AddOutputDeviceChangeListener(
+ AudioDeviceListener* listener) OVERRIDE;
+ virtual void RemoveOutputDeviceChangeListener(
+ AudioDeviceListener* listener) OVERRIDE;
+
+ virtual AudioParameters GetDefaultOutputStreamParameters() OVERRIDE;
+ virtual AudioParameters GetInputStreamParameters(
+ const std::string& device_id) OVERRIDE;
+
+ private:
+ virtual ~MockAudioManager();
+
+ scoped_refptr<base::MessageLoopProxy> message_loop_proxy_;
+
+ DISALLOW_COPY_AND_ASSIGN(MockAudioManager);
+};
+
+} // namespace media.
+
+#endif // MEDIA_AUDIO_MOCK_AUDIO_MANAGER_H_
diff --git a/chromium/media/audio/null_audio_sink.cc b/chromium/media/audio/null_audio_sink.cc
new file mode 100644
index 00000000000..607d7d861e2
--- /dev/null
+++ b/chromium/media/audio/null_audio_sink.cc
@@ -0,0 +1,91 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/null_audio_sink.h"
+
+#include "base/bind.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "media/audio/fake_audio_consumer.h"
+#include "media/base/audio_hash.h"
+
+namespace media {
+
+NullAudioSink::NullAudioSink(
+ const scoped_refptr<base::MessageLoopProxy>& message_loop)
+ : initialized_(false),
+ playing_(false),
+ callback_(NULL),
+ message_loop_(message_loop) {
+}
+
+NullAudioSink::~NullAudioSink() {}
+
+void NullAudioSink::Initialize(const AudioParameters& params,
+ RenderCallback* callback) {
+ DCHECK(!initialized_);
+ fake_consumer_.reset(new FakeAudioConsumer(message_loop_, params));
+ callback_ = callback;
+ initialized_ = true;
+}
+
+void NullAudioSink::Start() {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(!playing_);
+}
+
+void NullAudioSink::Stop() {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ // Stop may be called at any time, so we have to check before stopping.
+ if (fake_consumer_)
+ fake_consumer_->Stop();
+}
+
+void NullAudioSink::Play() {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(initialized_);
+
+ if (playing_)
+ return;
+
+ fake_consumer_->Start(base::Bind(
+ &NullAudioSink::CallRender, base::Unretained(this)));
+ playing_ = true;
+}
+
+void NullAudioSink::Pause() {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ if (!playing_)
+ return;
+
+ fake_consumer_->Stop();
+ playing_ = false;
+}
+
+bool NullAudioSink::SetVolume(double volume) {
+ // Audio is always muted.
+ return volume == 0.0;
+}
+
+void NullAudioSink::CallRender(AudioBus* audio_bus) {
+ DCHECK(message_loop_->BelongsToCurrentThread());
+
+ int frames_received = callback_->Render(audio_bus, 0);
+ if (!audio_hash_ || frames_received <= 0)
+ return;
+
+ audio_hash_->Update(audio_bus, frames_received);
+}
+
+void NullAudioSink::StartAudioHashForTesting() {
+ DCHECK(!initialized_);
+ audio_hash_.reset(new AudioHash());
+}
+
+std::string NullAudioSink::GetAudioHashForTesting() {
+ return audio_hash_ ? audio_hash_->ToString() : std::string();
+}
+
+} // namespace media
diff --git a/chromium/media/audio/null_audio_sink.h b/chromium/media/audio/null_audio_sink.h
new file mode 100644
index 00000000000..072414606ff
--- /dev/null
+++ b/chromium/media/audio/null_audio_sink.h
@@ -0,0 +1,64 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_NULL_AUDIO_SINK_H_
+#define MEDIA_AUDIO_NULL_AUDIO_SINK_H_
+
+#include <string>
+
+#include "base/memory/scoped_ptr.h"
+#include "media/base/audio_renderer_sink.h"
+
+namespace base {
+class MessageLoopProxy;
+}
+
+namespace media {
+class AudioBus;
+class AudioHash;
+class FakeAudioConsumer;
+
+class MEDIA_EXPORT NullAudioSink
+ : NON_EXPORTED_BASE(public AudioRendererSink) {
+ public:
+ NullAudioSink(const scoped_refptr<base::MessageLoopProxy>& message_loop);
+
+ // AudioRendererSink implementation.
+ virtual void Initialize(const AudioParameters& params,
+ RenderCallback* callback) OVERRIDE;
+ virtual void Start() OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void Pause() OVERRIDE;
+ virtual void Play() OVERRIDE;
+ virtual bool SetVolume(double volume) OVERRIDE;
+
+ // Enables audio frame hashing. Must be called prior to Initialize().
+ void StartAudioHashForTesting();
+
+ // Returns the hash of all audio frames seen since construction.
+ std::string GetAudioHashForTesting();
+
+ protected:
+ virtual ~NullAudioSink();
+
+ private:
+ // Task that periodically calls Render() to consume audio data.
+ void CallRender(AudioBus* audio_bus);
+
+ bool initialized_;
+ bool playing_;
+ RenderCallback* callback_;
+
+ // Controls whether or not a running hash is computed for audio frames.
+ scoped_ptr<AudioHash> audio_hash_;
+
+ scoped_refptr<base::MessageLoopProxy> message_loop_;
+ scoped_ptr<FakeAudioConsumer> fake_consumer_;
+
+ DISALLOW_COPY_AND_ASSIGN(NullAudioSink);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_NULL_AUDIO_SINK_H_
diff --git a/chromium/media/audio/openbsd/audio_manager_openbsd.cc b/chromium/media/audio/openbsd/audio_manager_openbsd.cc
new file mode 100644
index 00000000000..4005aeb98f0
--- /dev/null
+++ b/chromium/media/audio/openbsd/audio_manager_openbsd.cc
@@ -0,0 +1,154 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/openbsd/audio_manager_openbsd.h"
+
+#include <fcntl.h>
+
+#include "base/command_line.h"
+#include "base/file_path.h"
+#include "base/stl_util.h"
+#include "media/audio/audio_output_dispatcher.h"
+#include "media/audio/audio_parameters.h"
+#include "media/audio/audio_util.h"
+#include "media/audio/pulse/pulse_output.h"
+#include "media/audio/pulse/pulse_stubs.h"
+#include "media/base/channel_layout.h"
+#include "media/base/limits.h"
+#include "media/base/media_switches.h"
+
+using media_audio_pulse::kModulePulse;
+using media_audio_pulse::InitializeStubs;
+using media_audio_pulse::StubPathMap;
+
+namespace media {
+
+// Maximum number of output streams that can be open simultaneously.
+static const int kMaxOutputStreams = 50;
+
+// Default sample rate for input and output streams.
+static const int kDefaultSampleRate = 48000;
+
+static const base::FilePath::CharType kPulseLib[] =
+ FILE_PATH_LITERAL("libpulse.so.0");
+
+// Implementation of AudioManager.
+static bool HasAudioHardware() {
+ int fd;
+ const char *file;
+
+ if ((file = getenv("AUDIOCTLDEVICE")) == 0 || *file == '\0')
+ file = "/dev/audioctl";
+
+ if ((fd = open(file, O_RDONLY)) < 0)
+ return false;
+
+ close(fd);
+ return true;
+}
+
+bool AudioManagerOpenBSD::HasAudioOutputDevices() {
+ return HasAudioHardware();
+}
+
+bool AudioManagerOpenBSD::HasAudioInputDevices() {
+ return HasAudioHardware();
+}
+
+AudioParameters AudioManagerOpenBSD::GetInputStreamParameters(
+ const std::string& device_id) {
+ static const int kDefaultInputBufferSize = 1024;
+
+ return AudioParameters(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
+ kDefaultSampleRate, 16, kDefaultInputBufferSize);
+}
+
+AudioManagerOpenBSD::AudioManagerOpenBSD()
+ : pulse_library_is_initialized_(false) {
+ SetMaxOutputStreamsAllowed(kMaxOutputStreams);
+ StubPathMap paths;
+
+ // Check if the pulse library is avialbale.
+ paths[kModulePulse].push_back(kPulseLib);
+ if (!InitializeStubs(paths)) {
+ DLOG(WARNING) << "Failed on loading the Pulse library and symbols";
+ return;
+ }
+
+ pulse_library_is_initialized_ = true;
+}
+
+AudioManagerOpenBSD::~AudioManagerOpenBSD() {
+ Shutdown();
+}
+
+AudioOutputStream* AudioManagerOpenBSD::MakeLinearOutputStream(
+ const AudioParameters& params) {
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format);
+ return MakeOutputStream(params);
+}
+
+AudioOutputStream* AudioManagerOpenBSD::MakeLowLatencyOutputStream(
+ const AudioParameters& params,
+ const std::string& input_device_id) {
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format);
+ return MakeOutputStream(params);
+}
+
+AudioInputStream* AudioManagerOpenBSD::MakeLinearInputStream(
+ const AudioParameters& params, const std::string& device_id) {
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format);
+ NOTIMPLEMENTED();
+ return NULL;
+}
+
+AudioInputStream* AudioManagerOpenBSD::MakeLowLatencyInputStream(
+ const AudioParameters& params, const std::string& device_id) {
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format);
+ NOTIMPLEMENTED();
+ return NULL;
+}
+
+AudioParameters AudioManagerOpenBSD::GetPreferredOutputStreamParameters(
+ const AudioParameters& input_params) {
+ static const int kDefaultOutputBufferSize = 512;
+
+ ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
+ int sample_rate = kDefaultSampleRate;
+ int buffer_size = kDefaultOutputBufferSize;
+ int bits_per_sample = 16;
+ int input_channels = 0;
+ if (input_params.IsValid()) {
+ sample_rate = input_params.sample_rate();
+ bits_per_sample = input_params.bits_per_sample();
+ channel_layout = input_params.channel_layout();
+ input_channels = input_params.input_channels();
+ buffer_size = std::min(buffer_size, input_params.frames_per_buffer());
+ }
+
+ int user_buffer_size = GetUserBufferSize();
+ if (user_buffer_size)
+ buffer_size = user_buffer_size;
+
+ return AudioParameters(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout, input_channels,
+ sample_rate, bits_per_sample, buffer_size);
+}
+
+AudioOutputStream* AudioManagerOpenBSD::MakeOutputStream(
+ const AudioParameters& params) {
+ if (pulse_library_is_initialized_)
+ return new PulseAudioOutputStream(params, this);
+
+ return NULL;
+}
+
+// TODO(xians): Merge AudioManagerOpenBSD with AudioManagerPulse;
+// static
+AudioManager* CreateAudioManager() {
+ return new AudioManagerOpenBSD();
+}
+
+} // namespace media
diff --git a/chromium/media/audio/openbsd/audio_manager_openbsd.h b/chromium/media/audio/openbsd/audio_manager_openbsd.h
new file mode 100644
index 00000000000..a1adcb6c86c
--- /dev/null
+++ b/chromium/media/audio/openbsd/audio_manager_openbsd.h
@@ -0,0 +1,54 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_OPENBSD_AUDIO_MANAGER_OPENBSD_H_
+#define MEDIA_AUDIO_OPENBSD_AUDIO_MANAGER_OPENBSD_H_
+
+#include <set>
+
+#include "base/compiler_specific.h"
+#include "media/audio/audio_manager_base.h"
+
+namespace media {
+
+class MEDIA_EXPORT AudioManagerOpenBSD : public AudioManagerBase {
+ public:
+ AudioManagerOpenBSD();
+
+ // Implementation of AudioManager.
+ virtual bool HasAudioOutputDevices() OVERRIDE;
+ virtual bool HasAudioInputDevices() OVERRIDE;
+ virtual AudioParameters GetInputStreamParameters(
+ const std::string& device_id) OVERRIDE;
+
+ // Implementation of AudioManagerBase.
+ virtual AudioOutputStream* MakeLinearOutputStream(
+ const AudioParameters& params) OVERRIDE;
+ virtual AudioOutputStream* MakeLowLatencyOutputStream(
+ const AudioParameters& params,
+ const std::string& input_device_id) OVERRIDE;
+ virtual AudioInputStream* MakeLinearInputStream(
+ const AudioParameters& params, const std::string& device_id) OVERRIDE;
+ virtual AudioInputStream* MakeLowLatencyInputStream(
+ const AudioParameters& params, const std::string& device_id) OVERRIDE;
+
+ protected:
+ virtual ~AudioManagerOpenBSD();
+
+ virtual AudioParameters GetPreferredOutputStreamParameters(
+ const AudioParameters& input_params) OVERRIDE;
+
+ private:
+ // Called by MakeLinearOutputStream and MakeLowLatencyOutputStream.
+ AudioOutputStream* MakeOutputStream(const AudioParameters& params);
+
+ // Flag to indicate whether the pulse library has been initialized or not.
+ bool pulse_library_is_initialized_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioManagerOpenBSD);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_OPENBSD_AUDIO_MANAGER_OPENBSD_H_
diff --git a/chromium/media/audio/pulse/audio_manager_pulse.cc b/chromium/media/audio/pulse/audio_manager_pulse.cc
new file mode 100644
index 00000000000..dcdd3282228
--- /dev/null
+++ b/chromium/media/audio/pulse/audio_manager_pulse.cc
@@ -0,0 +1,318 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/pulse/audio_manager_pulse.h"
+
+#include "base/command_line.h"
+#include "base/environment.h"
+#include "base/files/file_path.h"
+#include "base/logging.h"
+#include "base/nix/xdg_util.h"
+#include "base/stl_util.h"
+#include "media/audio/audio_parameters.h"
+#include "media/audio/audio_util.h"
+#include "media/audio/linux/audio_manager_linux.h"
+#include "media/audio/pulse/pulse_input.h"
+#include "media/audio/pulse/pulse_output.h"
+#include "media/audio/pulse/pulse_unified.h"
+#include "media/audio/pulse/pulse_util.h"
+#include "media/base/channel_layout.h"
+
+#if defined(DLOPEN_PULSEAUDIO)
+#include "media/audio/pulse/pulse_stubs.h"
+
+using media_audio_pulse::kModulePulse;
+using media_audio_pulse::InitializeStubs;
+using media_audio_pulse::StubPathMap;
+#endif // defined(DLOPEN_PULSEAUDIO)
+
+namespace media {
+
+using pulse::AutoPulseLock;
+using pulse::WaitForOperationCompletion;
+
+// Maximum number of output streams that can be open simultaneously.
+static const int kMaxOutputStreams = 50;
+
+static const base::FilePath::CharType kPulseLib[] =
+ FILE_PATH_LITERAL("libpulse.so.0");
+
+// static
+AudioManager* AudioManagerPulse::Create() {
+ scoped_ptr<AudioManagerPulse> ret(new AudioManagerPulse());
+ if (ret->Init())
+ return ret.release();
+
+ DVLOG(1) << "PulseAudio is not available on the OS";
+ return NULL;
+}
+
+AudioManagerPulse::AudioManagerPulse()
+ : input_mainloop_(NULL),
+ input_context_(NULL),
+ devices_(NULL),
+ native_input_sample_rate_(0) {
+ SetMaxOutputStreamsAllowed(kMaxOutputStreams);
+}
+
+AudioManagerPulse::~AudioManagerPulse() {
+ Shutdown();
+
+ // The Pulse objects are the last things to be destroyed since Shutdown()
+ // needs them.
+ DestroyPulse();
+}
+
+// Implementation of AudioManager.
+bool AudioManagerPulse::HasAudioOutputDevices() {
+ DCHECK(input_mainloop_);
+ DCHECK(input_context_);
+ media::AudioDeviceNames devices;
+ AutoPulseLock auto_lock(input_mainloop_);
+ devices_ = &devices;
+ pa_operation* operation = pa_context_get_sink_info_list(
+ input_context_, OutputDevicesInfoCallback, this);
+ WaitForOperationCompletion(input_mainloop_, operation);
+ return !devices.empty();
+}
+
+bool AudioManagerPulse::HasAudioInputDevices() {
+ media::AudioDeviceNames devices;
+ GetAudioInputDeviceNames(&devices);
+ return !devices.empty();
+}
+
+void AudioManagerPulse::ShowAudioInputSettings() {
+ AudioManagerLinux::ShowLinuxAudioInputSettings();
+}
+
+void AudioManagerPulse::GetAudioInputDeviceNames(
+ media::AudioDeviceNames* device_names) {
+ DCHECK(device_names->empty());
+ DCHECK(input_mainloop_);
+ DCHECK(input_context_);
+ AutoPulseLock auto_lock(input_mainloop_);
+ devices_ = device_names;
+ pa_operation* operation = pa_context_get_source_info_list(
+ input_context_, InputDevicesInfoCallback, this);
+ WaitForOperationCompletion(input_mainloop_, operation);
+
+ // Append the default device on the top of the list if the list is not empty.
+ if (!device_names->empty()) {
+ device_names->push_front(
+ AudioDeviceName(AudioManagerBase::kDefaultDeviceName,
+ AudioManagerBase::kDefaultDeviceId));
+ }
+}
+
+AudioParameters AudioManagerPulse::GetInputStreamParameters(
+ const std::string& device_id) {
+ static const int kDefaultInputBufferSize = 1024;
+
+ // TODO(xians): add support for querying native channel layout for pulse.
+ return AudioParameters(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
+ GetNativeSampleRate(), 16, kDefaultInputBufferSize);
+}
+
+AudioOutputStream* AudioManagerPulse::MakeLinearOutputStream(
+ const AudioParameters& params) {
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
+ return MakeOutputStream(params, std::string());
+}
+
+AudioOutputStream* AudioManagerPulse::MakeLowLatencyOutputStream(
+ const AudioParameters& params, const std::string& input_device_id) {
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
+ return MakeOutputStream(params, input_device_id);
+}
+
+AudioInputStream* AudioManagerPulse::MakeLinearInputStream(
+ const AudioParameters& params, const std::string& device_id) {
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
+ return MakeInputStream(params, device_id);
+}
+
+AudioInputStream* AudioManagerPulse::MakeLowLatencyInputStream(
+ const AudioParameters& params, const std::string& device_id) {
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
+ return MakeInputStream(params, device_id);
+}
+
+AudioParameters AudioManagerPulse::GetPreferredOutputStreamParameters(
+ const AudioParameters& input_params) {
+ static const int kDefaultOutputBufferSize = 512;
+
+ ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
+ int buffer_size = kDefaultOutputBufferSize;
+ int bits_per_sample = 16;
+ int input_channels = 0;
+ int sample_rate;
+ if (input_params.IsValid()) {
+ bits_per_sample = input_params.bits_per_sample();
+ channel_layout = input_params.channel_layout();
+ input_channels = input_params.input_channels();
+ buffer_size = std::min(buffer_size, input_params.frames_per_buffer());
+ sample_rate = input_params.sample_rate();
+ } else {
+ sample_rate = GetNativeSampleRate();
+ }
+
+ int user_buffer_size = GetUserBufferSize();
+ if (user_buffer_size)
+ buffer_size = user_buffer_size;
+
+ return AudioParameters(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout, input_channels,
+ sample_rate, bits_per_sample, buffer_size);
+}
+
+AudioOutputStream* AudioManagerPulse::MakeOutputStream(
+ const AudioParameters& params, const std::string& input_device_id) {
+ if (params.input_channels()) {
+ return new PulseAudioUnifiedStream(params, input_device_id, this);
+ }
+
+ return new PulseAudioOutputStream(params, this);
+}
+
+AudioInputStream* AudioManagerPulse::MakeInputStream(
+ const AudioParameters& params, const std::string& device_id) {
+ return new PulseAudioInputStream(this, device_id, params,
+ input_mainloop_, input_context_);
+}
+
+int AudioManagerPulse::GetNativeSampleRate() {
+ DCHECK(input_mainloop_);
+ DCHECK(input_context_);
+ AutoPulseLock auto_lock(input_mainloop_);
+ pa_operation* operation = pa_context_get_server_info(
+ input_context_, SampleRateInfoCallback, this);
+ WaitForOperationCompletion(input_mainloop_, operation);
+
+ return native_input_sample_rate_;
+}
+
+bool AudioManagerPulse::Init() {
+ DCHECK(!input_mainloop_);
+
+#if defined(DLOPEN_PULSEAUDIO)
+ StubPathMap paths;
+
+ // Check if the pulse library is avialbale.
+ paths[kModulePulse].push_back(kPulseLib);
+ if (!InitializeStubs(paths)) {
+ DLOG(WARNING) << "Failed on loading the Pulse library and symbols";
+ return false;
+ }
+#endif // defined(DLOPEN_PULSEAUDIO)
+
+ // Create a mainloop API and connect to the default server.
+ // The mainloop is the internal asynchronous API event loop.
+ input_mainloop_ = pa_threaded_mainloop_new();
+ if (!input_mainloop_)
+ return false;
+
+ // Start the threaded mainloop.
+ if (pa_threaded_mainloop_start(input_mainloop_))
+ return false;
+
+ // Lock the event loop object, effectively blocking the event loop thread
+ // from processing events. This is necessary.
+ AutoPulseLock auto_lock(input_mainloop_);
+
+ pa_mainloop_api* pa_mainloop_api =
+ pa_threaded_mainloop_get_api(input_mainloop_);
+ input_context_ = pa_context_new(pa_mainloop_api, "Chrome input");
+ if (!input_context_)
+ return false;
+
+ pa_context_set_state_callback(input_context_, &pulse::ContextStateCallback,
+ input_mainloop_);
+ if (pa_context_connect(input_context_, NULL, PA_CONTEXT_NOAUTOSPAWN, NULL)) {
+ DLOG(ERROR) << "Failed to connect to the context. Error: "
+ << pa_strerror(pa_context_errno(input_context_));
+ return false;
+ }
+
+ // Wait until |input_context_| is ready. pa_threaded_mainloop_wait() must be
+ // called after pa_context_get_state() in case the context is already ready,
+ // otherwise pa_threaded_mainloop_wait() will hang indefinitely.
+ while (true) {
+ pa_context_state_t context_state = pa_context_get_state(input_context_);
+ if (!PA_CONTEXT_IS_GOOD(context_state))
+ return false;
+ if (context_state == PA_CONTEXT_READY)
+ break;
+ pa_threaded_mainloop_wait(input_mainloop_);
+ }
+
+ return true;
+}
+
+void AudioManagerPulse::DestroyPulse() {
+ if (!input_mainloop_) {
+ DCHECK(!input_context_);
+ return;
+ }
+
+ {
+ AutoPulseLock auto_lock(input_mainloop_);
+ if (input_context_) {
+ // Clear our state callback.
+ pa_context_set_state_callback(input_context_, NULL, NULL);
+ pa_context_disconnect(input_context_);
+ pa_context_unref(input_context_);
+ input_context_ = NULL;
+ }
+ }
+
+ pa_threaded_mainloop_stop(input_mainloop_);
+ pa_threaded_mainloop_free(input_mainloop_);
+ input_mainloop_ = NULL;
+}
+
+void AudioManagerPulse::InputDevicesInfoCallback(pa_context* context,
+ const pa_source_info* info,
+ int error, void *user_data) {
+ AudioManagerPulse* manager = reinterpret_cast<AudioManagerPulse*>(user_data);
+
+ if (error) {
+ // Signal the pulse object that it is done.
+ pa_threaded_mainloop_signal(manager->input_mainloop_, 0);
+ return;
+ }
+
+ // Exclude the output devices.
+ if (info->monitor_of_sink == PA_INVALID_INDEX) {
+ manager->devices_->push_back(media::AudioDeviceName(info->description,
+ info->name));
+ }
+}
+
+void AudioManagerPulse::OutputDevicesInfoCallback(pa_context* context,
+ const pa_sink_info* info,
+ int error, void *user_data) {
+ AudioManagerPulse* manager = reinterpret_cast<AudioManagerPulse*>(user_data);
+
+ if (error) {
+ // Signal the pulse object that it is done.
+ pa_threaded_mainloop_signal(manager->input_mainloop_, 0);
+ return;
+ }
+
+ manager->devices_->push_back(media::AudioDeviceName(info->description,
+ info->name));
+}
+
+void AudioManagerPulse::SampleRateInfoCallback(pa_context* context,
+ const pa_server_info* info,
+ void* user_data) {
+ AudioManagerPulse* manager = reinterpret_cast<AudioManagerPulse*>(user_data);
+
+ manager->native_input_sample_rate_ = info->sample_spec.rate;
+ pa_threaded_mainloop_signal(manager->input_mainloop_, 0);
+}
+
+} // namespace media
diff --git a/chromium/media/audio/pulse/audio_manager_pulse.h b/chromium/media/audio/pulse/audio_manager_pulse.h
new file mode 100644
index 00000000000..6dfebaeff39
--- /dev/null
+++ b/chromium/media/audio/pulse/audio_manager_pulse.h
@@ -0,0 +1,87 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_PULSE_AUDIO_MANAGER_PULSE_H_
+#define MEDIA_AUDIO_PULSE_AUDIO_MANAGER_PULSE_H_
+
+#include <pulse/pulseaudio.h>
+#include <string>
+
+#include "base/compiler_specific.h"
+#include "base/memory/ref_counted.h"
+#include "media/audio/audio_manager_base.h"
+
+namespace media {
+
+class MEDIA_EXPORT AudioManagerPulse : public AudioManagerBase {
+ public:
+ AudioManagerPulse();
+ virtual ~AudioManagerPulse();
+
+ static AudioManager* Create();
+
+ // Implementation of AudioManager.
+ virtual bool HasAudioOutputDevices() OVERRIDE;
+ virtual bool HasAudioInputDevices() OVERRIDE;
+ virtual void ShowAudioInputSettings() OVERRIDE;
+ virtual void GetAudioInputDeviceNames(media::AudioDeviceNames* device_names)
+ OVERRIDE;
+ virtual AudioParameters GetInputStreamParameters(
+ const std::string& device_id) OVERRIDE;
+
+ // Implementation of AudioManagerBase.
+ virtual AudioOutputStream* MakeLinearOutputStream(
+ const AudioParameters& params) OVERRIDE;
+ virtual AudioOutputStream* MakeLowLatencyOutputStream(
+ const AudioParameters& params,
+ const std::string& input_device_id) OVERRIDE;
+ virtual AudioInputStream* MakeLinearInputStream(
+ const AudioParameters& params, const std::string& device_id) OVERRIDE;
+ virtual AudioInputStream* MakeLowLatencyInputStream(
+ const AudioParameters& params, const std::string& device_id) OVERRIDE;
+
+ protected:
+ virtual AudioParameters GetPreferredOutputStreamParameters(
+ const AudioParameters& input_params) OVERRIDE;
+
+ private:
+ bool Init();
+ void DestroyPulse();
+
+ // Callback to get the devices' info like names, used by GetInputDevices().
+ static void InputDevicesInfoCallback(pa_context* context,
+ const pa_source_info* info,
+ int error, void* user_data);
+ static void OutputDevicesInfoCallback(pa_context* context,
+ const pa_sink_info* info,
+ int error, void* user_data);
+
+ // Callback to get the native sample rate of PulseAudio, used by
+ // GetNativeSampleRate().
+ static void SampleRateInfoCallback(pa_context* context,
+ const pa_server_info* info,
+ void* user_data);
+
+ // Called by MakeLinearOutputStream and MakeLowLatencyOutputStream.
+ AudioOutputStream* MakeOutputStream(const AudioParameters& params,
+ const std::string& input_device_id);
+
+ // Called by MakeLinearInputStream and MakeLowLatencyInputStream.
+ AudioInputStream* MakeInputStream(const AudioParameters& params,
+ const std::string& device_id);
+
+ // Gets the native sample rate of Pulse.
+ int GetNativeSampleRate();
+
+ pa_threaded_mainloop* input_mainloop_;
+ pa_context* input_context_;
+ AudioDeviceNames* devices_;
+ int native_input_sample_rate_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioManagerPulse);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_PULSE_AUDIO_MANAGER_PULSE_H_
diff --git a/chromium/media/audio/pulse/pulse.sigs b/chromium/media/audio/pulse/pulse.sigs
new file mode 100644
index 00000000000..b5d927c754c
--- /dev/null
+++ b/chromium/media/audio/pulse/pulse.sigs
@@ -0,0 +1,52 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+#------------------------------------------------
+# Functions from pulse used in media code.
+#------------------------------------------------
+pa_mainloop_api* pa_threaded_mainloop_get_api(pa_threaded_mainloop* m);
+void pa_threaded_mainloop_free(pa_threaded_mainloop* m);
+pa_threaded_mainloop* pa_threaded_mainloop_new();
+void pa_threaded_mainloop_lock(pa_threaded_mainloop* m);
+int pa_threaded_mainloop_in_thread(pa_threaded_mainloop* m);
+void pa_threaded_mainloop_signal(pa_threaded_mainloop* m, int wait_for_accept);
+int pa_threaded_mainloop_start(pa_threaded_mainloop* m);
+void pa_threaded_mainloop_stop(pa_threaded_mainloop* m);
+void pa_threaded_mainloop_unlock(pa_threaded_mainloop* m);
+void pa_threaded_mainloop_wait(pa_threaded_mainloop* m);
+pa_channel_map* pa_channel_map_init(pa_channel_map* m);
+int pa_context_connect(pa_context* c, const char* server, pa_context_flags_t flags, const pa_spawn_api* api);
+void pa_context_disconnect(pa_context* c);
+pa_operation* pa_context_get_server_info(pa_context* c, pa_server_info_cb_t cb, void* userdata);
+pa_operation* pa_context_get_source_info_by_index(pa_context* c, uint32_t idx, pa_source_info_cb_t cb, void* userdata);
+pa_operation* pa_context_get_source_info_list(pa_context* c, pa_source_info_cb_t cb, void* userdata);
+pa_operation* pa_context_get_sink_info_list(pa_context* c, pa_sink_info_cb_t cb, void* userdata);
+pa_context_state_t pa_context_get_state(pa_context* c);
+pa_context* pa_context_new(pa_mainloop_api* mainloop, const char* name);
+pa_operation* pa_context_set_source_volume_by_index(pa_context* c, uint32_t idx, const pa_cvolume* volume, pa_context_success_cb_t cb, void* userdata);
+void pa_context_set_state_callback(pa_context* c, pa_context_notify_cb_t cb, void* userdata);
+pa_operation_state_t pa_operation_get_state(pa_operation* o);
+void pa_context_unref(pa_context* c);
+void pa_operation_unref(pa_operation* o);
+int pa_stream_begin_write(pa_stream* p, void** data, size_t* nbytes);
+int pa_stream_connect_playback(pa_stream* s, const char* dev, const pa_buffer_attr* attr, pa_stream_flags_t flags, const pa_cvolume* volume,pa_stream* sync_stream);
+int pa_stream_connect_record(pa_stream* s, const char* dev, const pa_buffer_attr* attr, pa_stream_flags_t flags);
+pa_operation* pa_stream_cork(pa_stream* s, int b, pa_stream_success_cb_t cb, void* userdata);
+int pa_stream_disconnect(pa_stream* s);
+int pa_stream_drop(pa_stream *p);
+pa_operation* pa_stream_flush(pa_stream* s, pa_stream_success_cb_t cb, void* userdata);
+uint32_t pa_stream_get_device_index(pa_stream* s);
+int pa_stream_get_latency(pa_stream* s, pa_usec_t* r_usec, int* negative);
+pa_stream_state_t pa_stream_get_state(pa_stream* p);
+pa_stream* pa_stream_new(pa_context* c, const char* name, const pa_sample_spec* ss, const pa_channel_map * map);
+size_t pa_stream_readable_size(pa_stream *p);
+int pa_stream_peek(pa_stream* p, const void** data, size_t* nbytes);
+void pa_stream_set_read_callback(pa_stream* p, pa_stream_request_cb_t cb, void* userdata);
+void pa_stream_set_state_callback(pa_stream* s, pa_stream_notify_cb_t cb, void* userdata);
+int pa_stream_write(pa_stream* p, const void* data, size_t nbytes, pa_free_cb_t free_cb, int64_t offset, pa_seek_mode_t seek);
+void pa_stream_set_write_callback(pa_stream *p, pa_stream_request_cb_t cb, void *userdata);
+void pa_stream_unref(pa_stream* s);
+int pa_context_errno(pa_context *c);
+const char* pa_strerror(int error);
+pa_cvolume* pa_cvolume_set(pa_cvolume* a, unsigned channels, pa_volume_t v);
diff --git a/chromium/media/audio/pulse/pulse_input.cc b/chromium/media/audio/pulse/pulse_input.cc
new file mode 100644
index 00000000000..54dfc1e05ab
--- /dev/null
+++ b/chromium/media/audio/pulse/pulse_input.cc
@@ -0,0 +1,292 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/pulse/pulse_input.h"
+
+#include <pulse/pulseaudio.h>
+
+#include "base/logging.h"
+#include "media/audio/pulse/audio_manager_pulse.h"
+#include "media/audio/pulse/pulse_util.h"
+#include "media/base/seekable_buffer.h"
+
+namespace media {
+
+using pulse::AutoPulseLock;
+using pulse::WaitForOperationCompletion;
+
+PulseAudioInputStream::PulseAudioInputStream(AudioManagerPulse* audio_manager,
+ const std::string& device_name,
+ const AudioParameters& params,
+ pa_threaded_mainloop* mainloop,
+ pa_context* context)
+ : audio_manager_(audio_manager),
+ callback_(NULL),
+ device_name_(device_name),
+ params_(params),
+ channels_(0),
+ volume_(0.0),
+ stream_started_(false),
+ pa_mainloop_(mainloop),
+ pa_context_(context),
+ handle_(NULL),
+ context_state_changed_(false) {
+ DCHECK(mainloop);
+ DCHECK(context);
+}
+
+PulseAudioInputStream::~PulseAudioInputStream() {
+ // All internal structures should already have been freed in Close(),
+ // which calls AudioManagerPulse::Release which deletes this object.
+ DCHECK(!handle_);
+}
+
+bool PulseAudioInputStream::Open() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ AutoPulseLock auto_lock(pa_mainloop_);
+ if (!pulse::CreateInputStream(pa_mainloop_, pa_context_, &handle_, params_,
+ device_name_, &StreamNotifyCallback, this)) {
+ return false;
+ }
+
+ DCHECK(handle_);
+
+ buffer_.reset(new media::SeekableBuffer(0, 2 * params_.GetBytesPerBuffer()));
+ audio_data_buffer_.reset(new uint8[params_.GetBytesPerBuffer()]);
+ return true;
+}
+
+void PulseAudioInputStream::Start(AudioInputCallback* callback) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(callback);
+ DCHECK(handle_);
+
+ // AGC needs to be started out of the lock.
+ StartAgc();
+
+ AutoPulseLock auto_lock(pa_mainloop_);
+
+ if (stream_started_)
+ return;
+
+ // Clean up the old buffer.
+ pa_stream_drop(handle_);
+ buffer_->Clear();
+
+ // Start the streaming.
+ callback_ = callback;
+ pa_stream_set_read_callback(handle_, &ReadCallback, this);
+ pa_stream_readable_size(handle_);
+ stream_started_ = true;
+
+ pa_operation* operation = pa_stream_cork(handle_, 0, NULL, NULL);
+ WaitForOperationCompletion(pa_mainloop_, operation);
+}
+
+void PulseAudioInputStream::Stop() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ AutoPulseLock auto_lock(pa_mainloop_);
+ if (!stream_started_)
+ return;
+
+ StopAgc();
+
+ // Set the flag to false to stop filling new data to soundcard.
+ stream_started_ = false;
+
+ pa_operation* operation = pa_stream_flush(handle_,
+ &pulse::StreamSuccessCallback,
+ pa_mainloop_);
+ WaitForOperationCompletion(pa_mainloop_, operation);
+
+ // Stop the stream.
+ pa_stream_set_read_callback(handle_, NULL, NULL);
+ operation = pa_stream_cork(handle_, 1, &pulse::StreamSuccessCallback,
+ pa_mainloop_);
+ WaitForOperationCompletion(pa_mainloop_, operation);
+}
+
+void PulseAudioInputStream::Close() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ {
+ AutoPulseLock auto_lock(pa_mainloop_);
+ if (handle_) {
+ // Disable all the callbacks before disconnecting.
+ pa_stream_set_state_callback(handle_, NULL, NULL);
+ pa_stream_flush(handle_, NULL, NULL);
+
+ if (pa_stream_get_state(handle_) != PA_STREAM_UNCONNECTED)
+ pa_stream_disconnect(handle_);
+
+ // Release PulseAudio structures.
+ pa_stream_unref(handle_);
+ handle_ = NULL;
+ }
+ }
+
+ if (callback_)
+ callback_->OnClose(this);
+
+ // Signal to the manager that we're closed and can be removed.
+ // This should be the last call in the function as it deletes "this".
+ audio_manager_->ReleaseInputStream(this);
+}
+
+double PulseAudioInputStream::GetMaxVolume() {
+ return static_cast<double>(PA_VOLUME_NORM);
+}
+
+void PulseAudioInputStream::SetVolume(double volume) {
+ AutoPulseLock auto_lock(pa_mainloop_);
+ if (!handle_)
+ return;
+
+ size_t index = pa_stream_get_device_index(handle_);
+ pa_operation* operation = NULL;
+ if (!channels_) {
+ // Get the number of channels for the source only when the |channels_| is 0.
+ // We are assuming the stream source is not changed on the fly here.
+ operation = pa_context_get_source_info_by_index(
+ pa_context_, index, &VolumeCallback, this);
+ WaitForOperationCompletion(pa_mainloop_, operation);
+ if (!channels_) {
+ DLOG(WARNING) << "Failed to get the number of channels for the source";
+ return;
+ }
+ }
+
+ pa_cvolume pa_volume;
+ pa_cvolume_set(&pa_volume, channels_, volume);
+ operation = pa_context_set_source_volume_by_index(
+ pa_context_, index, &pa_volume, NULL, NULL);
+
+ // Don't need to wait for this task to complete.
+ pa_operation_unref(operation);
+}
+
+double PulseAudioInputStream::GetVolume() {
+ if (pa_threaded_mainloop_in_thread(pa_mainloop_)) {
+ // When being called by the pulse thread, GetVolume() is asynchronous and
+ // called under AutoPulseLock.
+ if (!handle_)
+ return 0.0;
+
+ size_t index = pa_stream_get_device_index(handle_);
+ pa_operation* operation = pa_context_get_source_info_by_index(
+ pa_context_, index, &VolumeCallback, this);
+ // Do not wait for the operation since we can't block the pulse thread.
+ pa_operation_unref(operation);
+
+ // Return zero and the callback will asynchronously update the |volume_|.
+ return 0.0;
+ } else {
+ // Called by other thread, put an AutoPulseLock and wait for the operation.
+ AutoPulseLock auto_lock(pa_mainloop_);
+ if (!handle_)
+ return 0.0;
+
+ size_t index = pa_stream_get_device_index(handle_);
+ pa_operation* operation = pa_context_get_source_info_by_index(
+ pa_context_, index, &VolumeCallback, this);
+ WaitForOperationCompletion(pa_mainloop_, operation);
+
+ return volume_;
+ }
+}
+
+// static, used by pa_stream_set_read_callback.
+void PulseAudioInputStream::ReadCallback(pa_stream* handle,
+ size_t length,
+ void* user_data) {
+ PulseAudioInputStream* stream =
+ reinterpret_cast<PulseAudioInputStream*>(user_data);
+
+ stream->ReadData();
+}
+
+// static, used by pa_context_get_source_info_by_index.
+void PulseAudioInputStream::VolumeCallback(pa_context* context,
+ const pa_source_info* info,
+ int error, void* user_data) {
+ PulseAudioInputStream* stream =
+ reinterpret_cast<PulseAudioInputStream*>(user_data);
+
+ if (error) {
+ pa_threaded_mainloop_signal(stream->pa_mainloop_, 0);
+ return;
+ }
+
+ if (stream->channels_ != info->channel_map.channels)
+ stream->channels_ = info->channel_map.channels;
+
+ pa_volume_t volume = PA_VOLUME_MUTED; // Minimum possible value.
+ // Use the max volume of any channel as the volume.
+ for (int i = 0; i < stream->channels_; ++i) {
+ if (volume < info->volume.values[i])
+ volume = info->volume.values[i];
+ }
+
+ // It is safe to access |volume_| here since VolumeCallback() is running
+ // under PulseLock.
+ stream->volume_ = static_cast<double>(volume);
+}
+
+// static, used by pa_stream_set_state_callback.
+void PulseAudioInputStream::StreamNotifyCallback(pa_stream* s,
+ void* user_data) {
+ PulseAudioInputStream* stream =
+ reinterpret_cast<PulseAudioInputStream*>(user_data);
+ if (s && stream->callback_ &&
+ pa_stream_get_state(s) == PA_STREAM_FAILED) {
+ stream->callback_->OnError(stream);
+ }
+
+ pa_threaded_mainloop_signal(stream->pa_mainloop_, 0);
+}
+
+void PulseAudioInputStream::ReadData() {
+ uint32 hardware_delay = pulse::GetHardwareLatencyInBytes(
+ handle_, params_.sample_rate(), params_.GetBytesPerFrame());
+
+ // Update the AGC volume level once every second. Note that,
+ // |volume| is also updated each time SetVolume() is called
+ // through IPC by the render-side AGC.
+ // We disregard the |normalized_volume| from GetAgcVolume()
+ // and use the value calculated by |volume_|.
+ double normalized_volume = 0.0;
+ GetAgcVolume(&normalized_volume);
+ normalized_volume = volume_ / GetMaxVolume();
+
+ do {
+ size_t length = 0;
+ const void* data = NULL;
+ pa_stream_peek(handle_, &data, &length);
+ if (!data || length == 0)
+ break;
+
+ buffer_->Append(reinterpret_cast<const uint8*>(data), length);
+
+ // Checks if we still have data.
+ pa_stream_drop(handle_);
+ } while (pa_stream_readable_size(handle_) > 0);
+
+ int packet_size = params_.GetBytesPerBuffer();
+ while (buffer_->forward_bytes() >= packet_size) {
+ buffer_->Read(audio_data_buffer_.get(), packet_size);
+ callback_->OnData(this, audio_data_buffer_.get(), packet_size,
+ hardware_delay, normalized_volume);
+
+ if (buffer_->forward_bytes() < packet_size)
+ break;
+
+ // TODO(xians): Remove once PPAPI is using circular buffers.
+ DVLOG(1) << "OnData is being called consecutively, sleep 5ms to "
+ << "wait until render consumes the data";
+ base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(5));
+ }
+
+ pa_threaded_mainloop_signal(pa_mainloop_, 0);
+}
+
+} // namespace media
diff --git a/chromium/media/audio/pulse/pulse_input.h b/chromium/media/audio/pulse/pulse_input.h
new file mode 100644
index 00000000000..7566eacf10b
--- /dev/null
+++ b/chromium/media/audio/pulse/pulse_input.h
@@ -0,0 +1,85 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_PULSE_PULSE_INPUT_H_
+#define MEDIA_AUDIO_PULSE_PULSE_INPUT_H_
+
+#include <string>
+
+#include "base/threading/thread_checker.h"
+#include "media/audio/agc_audio_stream.h"
+#include "media/audio/audio_device_name.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+
+struct pa_context;
+struct pa_source_info;
+struct pa_stream;
+struct pa_threaded_mainloop;
+
+namespace media {
+
+class AudioManagerPulse;
+class SeekableBuffer;
+
+class PulseAudioInputStream : public AgcAudioStream<AudioInputStream> {
+ public:
+ PulseAudioInputStream(AudioManagerPulse* audio_manager,
+ const std::string& device_name,
+ const AudioParameters& params,
+ pa_threaded_mainloop* mainloop,
+ pa_context* context);
+
+ virtual ~PulseAudioInputStream();
+
+ // Implementation of AudioInputStream.
+ virtual bool Open() OVERRIDE;
+ virtual void Start(AudioInputCallback* callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual double GetMaxVolume() OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual double GetVolume() OVERRIDE;
+
+ private:
+ // PulseAudio Callbacks.
+ static void ReadCallback(pa_stream* handle, size_t length, void* user_data);
+ static void StreamNotifyCallback(pa_stream* stream, void* user_data);
+ static void VolumeCallback(pa_context* context, const pa_source_info* info,
+ int error, void* user_data);
+
+ // Helper for the ReadCallback.
+ void ReadData();
+
+ AudioManagerPulse* audio_manager_;
+ AudioInputCallback* callback_;
+ std::string device_name_;
+ AudioParameters params_;
+ int channels_;
+ double volume_;
+ bool stream_started_;
+
+ // Holds the data from the OS.
+ scoped_ptr<media::SeekableBuffer> buffer_;
+
+ // Temporary storage for recorded data. It gets a packet of data from
+ // |buffer_| and deliver the data to OnData() callback.
+ scoped_ptr<uint8[]> audio_data_buffer_;
+
+ // PulseAudio API structs.
+ pa_threaded_mainloop* pa_mainloop_; // Weak.
+ pa_context* pa_context_; // Weak.
+ pa_stream* handle_;
+
+ // Flag indicating the state of the context has been changed.
+ bool context_state_changed_;
+
+ base::ThreadChecker thread_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(PulseAudioInputStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_PULSE_PULSE_INPUT_H_
diff --git a/chromium/media/audio/pulse/pulse_output.cc b/chromium/media/audio/pulse/pulse_output.cc
new file mode 100644
index 00000000000..c40d4f65051
--- /dev/null
+++ b/chromium/media/audio/pulse/pulse_output.cc
@@ -0,0 +1,216 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/pulse/pulse_output.h"
+
+#include <pulse/pulseaudio.h>
+
+#include "base/message_loop/message_loop.h"
+#include "media/audio/audio_manager_base.h"
+#include "media/audio/audio_parameters.h"
+#include "media/audio/pulse/pulse_util.h"
+
+namespace media {
+
+using pulse::AutoPulseLock;
+using pulse::WaitForOperationCompletion;
+
+// static, pa_stream_notify_cb
+void PulseAudioOutputStream::StreamNotifyCallback(pa_stream* s, void* p_this) {
+ PulseAudioOutputStream* stream = static_cast<PulseAudioOutputStream*>(p_this);
+
+ // Forward unexpected failures to the AudioSourceCallback if available. All
+ // these variables are only modified under pa_threaded_mainloop_lock() so this
+ // should be thread safe.
+ if (s && stream->source_callback_ &&
+ pa_stream_get_state(s) == PA_STREAM_FAILED) {
+ stream->source_callback_->OnError(stream);
+ }
+
+ pa_threaded_mainloop_signal(stream->pa_mainloop_, 0);
+}
+
+// static, pa_stream_request_cb_t
+void PulseAudioOutputStream::StreamRequestCallback(pa_stream* s, size_t len,
+ void* p_this) {
+ // Fulfill write request; must always result in a pa_stream_write() call.
+ static_cast<PulseAudioOutputStream*>(p_this)->FulfillWriteRequest(len);
+}
+
+PulseAudioOutputStream::PulseAudioOutputStream(const AudioParameters& params,
+ AudioManagerBase* manager)
+ : params_(params),
+ manager_(manager),
+ pa_context_(NULL),
+ pa_mainloop_(NULL),
+ pa_stream_(NULL),
+ volume_(1.0f),
+ source_callback_(NULL) {
+ DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
+
+ CHECK(params_.IsValid());
+ audio_bus_ = AudioBus::Create(params_);
+}
+
+PulseAudioOutputStream::~PulseAudioOutputStream() {
+ // All internal structures should already have been freed in Close(), which
+ // calls AudioManagerBase::ReleaseOutputStream() which deletes this object.
+ DCHECK(!pa_stream_);
+ DCHECK(!pa_context_);
+ DCHECK(!pa_mainloop_);
+}
+
+bool PulseAudioOutputStream::Open() {
+ DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
+ return pulse::CreateOutputStream(&pa_mainloop_, &pa_context_, &pa_stream_,
+ params_, &StreamNotifyCallback,
+ &StreamRequestCallback, this);
+}
+
+void PulseAudioOutputStream::Reset() {
+ if (!pa_mainloop_) {
+ DCHECK(!pa_stream_);
+ DCHECK(!pa_context_);
+ return;
+ }
+
+ {
+ AutoPulseLock auto_lock(pa_mainloop_);
+
+ // Close the stream.
+ if (pa_stream_) {
+ // Ensure all samples are played out before shutdown.
+ pa_operation* operation = pa_stream_flush(
+ pa_stream_, &pulse::StreamSuccessCallback, pa_mainloop_);
+ WaitForOperationCompletion(pa_mainloop_, operation);
+
+ // Release PulseAudio structures.
+ pa_stream_disconnect(pa_stream_);
+ pa_stream_set_write_callback(pa_stream_, NULL, NULL);
+ pa_stream_set_state_callback(pa_stream_, NULL, NULL);
+ pa_stream_unref(pa_stream_);
+ pa_stream_ = NULL;
+ }
+
+ if (pa_context_) {
+ pa_context_disconnect(pa_context_);
+ pa_context_set_state_callback(pa_context_, NULL, NULL);
+ pa_context_unref(pa_context_);
+ pa_context_ = NULL;
+ }
+ }
+
+ pa_threaded_mainloop_stop(pa_mainloop_);
+ pa_threaded_mainloop_free(pa_mainloop_);
+ pa_mainloop_ = NULL;
+}
+
+void PulseAudioOutputStream::Close() {
+ DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
+
+ Reset();
+
+ // Signal to the manager that we're closed and can be removed.
+ // This should be the last call in the function as it deletes "this".
+ manager_->ReleaseOutputStream(this);
+}
+
+void PulseAudioOutputStream::FulfillWriteRequest(size_t requested_bytes) {
+ int bytes_remaining = requested_bytes;
+ while (bytes_remaining > 0) {
+ void* buffer = NULL;
+ size_t bytes_to_fill = params_.GetBytesPerBuffer();
+ CHECK_GE(pa_stream_begin_write(pa_stream_, &buffer, &bytes_to_fill), 0);
+ CHECK_EQ(bytes_to_fill, static_cast<size_t>(params_.GetBytesPerBuffer()));
+
+ int frames_filled = 0;
+ if (source_callback_) {
+ uint32 hardware_delay = pulse::GetHardwareLatencyInBytes(
+ pa_stream_, params_.sample_rate(),
+ params_.GetBytesPerFrame());
+ frames_filled = source_callback_->OnMoreData(
+ audio_bus_.get(), AudioBuffersState(0, hardware_delay));
+ }
+
+ // Zero any unfilled data so it plays back as silence.
+ if (frames_filled < audio_bus_->frames()) {
+ audio_bus_->ZeroFramesPartial(
+ frames_filled, audio_bus_->frames() - frames_filled);
+ }
+
+ // Note: If this ever changes to output raw float the data must be clipped
+ // and sanitized since it may come from an untrusted source such as NaCl.
+ audio_bus_->Scale(volume_);
+ audio_bus_->ToInterleaved(
+ audio_bus_->frames(), params_.bits_per_sample() / 8, buffer);
+
+ if (pa_stream_write(pa_stream_, buffer, bytes_to_fill, NULL, 0LL,
+ PA_SEEK_RELATIVE) < 0) {
+ if (source_callback_) {
+ source_callback_->OnError(this);
+ }
+ }
+
+ bytes_remaining -= bytes_to_fill;
+ }
+}
+
+void PulseAudioOutputStream::Start(AudioSourceCallback* callback) {
+ DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
+ CHECK(callback);
+ CHECK(pa_stream_);
+
+ AutoPulseLock auto_lock(pa_mainloop_);
+
+ // Ensure the context and stream are ready.
+ if (pa_context_get_state(pa_context_) != PA_CONTEXT_READY &&
+ pa_stream_get_state(pa_stream_) != PA_STREAM_READY) {
+ callback->OnError(this);
+ return;
+ }
+
+ source_callback_ = callback;
+
+ // Uncork (resume) the stream.
+ pa_operation* operation = pa_stream_cork(
+ pa_stream_, 0, &pulse::StreamSuccessCallback, pa_mainloop_);
+ WaitForOperationCompletion(pa_mainloop_, operation);
+}
+
+void PulseAudioOutputStream::Stop() {
+ DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
+
+ // Cork (pause) the stream. Waiting for the main loop lock will ensure
+ // outstanding callbacks have completed.
+ AutoPulseLock auto_lock(pa_mainloop_);
+
+ // Set |source_callback_| to NULL so all FulfillWriteRequest() calls which may
+ // occur while waiting on the flush and cork exit immediately.
+ source_callback_ = NULL;
+
+ // Flush the stream prior to cork, doing so after will cause hangs. Write
+ // callbacks are suspended while inside pa_threaded_mainloop_lock() so this
+ // is all thread safe.
+ pa_operation* operation = pa_stream_flush(
+ pa_stream_, &pulse::StreamSuccessCallback, pa_mainloop_);
+ WaitForOperationCompletion(pa_mainloop_, operation);
+
+ operation = pa_stream_cork(pa_stream_, 1, &pulse::StreamSuccessCallback,
+ pa_mainloop_);
+ WaitForOperationCompletion(pa_mainloop_, operation);
+}
+
+void PulseAudioOutputStream::SetVolume(double volume) {
+ DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
+
+ volume_ = static_cast<float>(volume);
+}
+
+void PulseAudioOutputStream::GetVolume(double* volume) {
+ DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
+
+ *volume = volume_;
+}
+
+} // namespace media
diff --git a/chromium/media/audio/pulse/pulse_output.h b/chromium/media/audio/pulse/pulse_output.h
new file mode 100644
index 00000000000..583cce7e5bd
--- /dev/null
+++ b/chromium/media/audio/pulse/pulse_output.h
@@ -0,0 +1,92 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Creates an audio output stream based on the PulseAudio asynchronous API;
+// specifically using the pa_threaded_mainloop model.
+//
+// If the stream is successfully opened, Close() must be called before the
+// stream is deleted as Close() is responsible for ensuring resource cleanup
+// occurs.
+//
+// This object is designed so that all AudioOutputStream methods will be called
+// on the same thread that created the object.
+//
+// WARNING: This object blocks on internal PulseAudio calls in Open() while
+// waiting for PulseAudio's context structure to be ready. It also blocks in
+// inside PulseAudio in Start() and repeated during playback, waiting for
+// PulseAudio write callbacks to occur.
+
+#ifndef MEDIA_AUDIO_PULSE_PULSE_OUTPUT_H_
+#define MEDIA_AUDIO_PULSE_PULSE_OUTPUT_H_
+
+#include "base/memory/scoped_ptr.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+
+struct pa_context;
+struct pa_operation;
+struct pa_stream;
+struct pa_threaded_mainloop;
+
+namespace media {
+class AudioManagerBase;
+
+class PulseAudioOutputStream : public AudioOutputStream {
+ public:
+ PulseAudioOutputStream(const AudioParameters& params,
+ AudioManagerBase* manager);
+
+ virtual ~PulseAudioOutputStream();
+
+ // Implementation of AudioOutputStream.
+ virtual bool Open() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual void Start(AudioSourceCallback* callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual void GetVolume(double* volume) OVERRIDE;
+
+ private:
+ // Called by PulseAudio when |pa_stream_| change state. If an unexpected
+ // failure state change happens and |source_callback_| is set
+ // this method will forward the error via OnError().
+ static void StreamNotifyCallback(pa_stream* s, void* p_this);
+
+ // Called by PulseAudio when it needs more audio data.
+ static void StreamRequestCallback(pa_stream* s, size_t len, void* p_this);
+
+ // Fulfill a write request from the write request callback. Outputs silence
+ // if the request could not be fulfilled.
+ void FulfillWriteRequest(size_t requested_bytes);
+
+ // Close() helper function to free internal structs.
+ void Reset();
+
+ // AudioParameters from the constructor.
+ const AudioParameters params_;
+
+ // Audio manager that created us. Used to report that we've closed.
+ AudioManagerBase* manager_;
+
+ // PulseAudio API structs.
+ pa_context* pa_context_;
+ pa_threaded_mainloop* pa_mainloop_;
+ pa_stream* pa_stream_;
+
+ // Float representation of volume from 0.0 to 1.0.
+ float volume_;
+
+ // Callback to audio data source. Must only be modified while holding a lock
+ // on |pa_mainloop_| via pa_threaded_mainloop_lock().
+ AudioSourceCallback* source_callback_;
+
+ // Container for retrieving data from AudioSourceCallback::OnMoreData().
+ scoped_ptr<AudioBus> audio_bus_;
+
+ DISALLOW_COPY_AND_ASSIGN(PulseAudioOutputStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_PULSE_PULSE_OUTPUT_H_
diff --git a/chromium/media/audio/pulse/pulse_stub_header.fragment b/chromium/media/audio/pulse/pulse_stub_header.fragment
new file mode 100644
index 00000000000..2a2d3e7552b
--- /dev/null
+++ b/chromium/media/audio/pulse/pulse_stub_header.fragment
@@ -0,0 +1,8 @@
+// The extra include header needed in the generated stub file for defining
+// various Pulse types.
+
+extern "C" {
+
+#include <pulse/pulseaudio.h>
+
+}
diff --git a/chromium/media/audio/pulse/pulse_unified.cc b/chromium/media/audio/pulse/pulse_unified.cc
new file mode 100644
index 00000000000..c68a797469f
--- /dev/null
+++ b/chromium/media/audio/pulse/pulse_unified.cc
@@ -0,0 +1,292 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/pulse/pulse_unified.h"
+
+#include "base/message_loop/message_loop.h"
+#include "base/time/time.h"
+#include "media/audio/audio_manager_base.h"
+#include "media/audio/audio_parameters.h"
+#include "media/audio/pulse/pulse_util.h"
+#include "media/base/seekable_buffer.h"
+
+namespace media {
+
+using pulse::AutoPulseLock;
+using pulse::WaitForOperationCompletion;
+
+static const int kFifoSizeInPackets = 10;
+
+// static, pa_stream_notify_cb
+void PulseAudioUnifiedStream::StreamNotifyCallback(pa_stream* s,
+ void* user_data) {
+ PulseAudioUnifiedStream* stream =
+ static_cast<PulseAudioUnifiedStream*>(user_data);
+
+ // Forward unexpected failures to the AudioSourceCallback if available. All
+ // these variables are only modified under pa_threaded_mainloop_lock() so this
+ // should be thread safe.
+ if (s && stream->source_callback_ &&
+ pa_stream_get_state(s) == PA_STREAM_FAILED) {
+ stream->source_callback_->OnError(stream);
+ }
+
+ pa_threaded_mainloop_signal(stream->pa_mainloop_, 0);
+}
+
+// static, used by pa_stream_set_read_callback.
+void PulseAudioUnifiedStream::ReadCallback(pa_stream* handle, size_t length,
+ void* user_data) {
+ static_cast<PulseAudioUnifiedStream*>(user_data)->ReadData();
+}
+
+PulseAudioUnifiedStream::PulseAudioUnifiedStream(
+ const AudioParameters& params,
+ const std::string& input_device_id,
+ AudioManagerBase* manager)
+ : params_(params),
+ input_device_id_(input_device_id),
+ manager_(manager),
+ pa_context_(NULL),
+ pa_mainloop_(NULL),
+ input_stream_(NULL),
+ output_stream_(NULL),
+ volume_(1.0f),
+ source_callback_(NULL) {
+ DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
+ CHECK(params_.IsValid());
+ input_bus_ = AudioBus::Create(params_);
+ output_bus_ = AudioBus::Create(params_);
+}
+
+PulseAudioUnifiedStream::~PulseAudioUnifiedStream() {
+ // All internal structures should already have been freed in Close(), which
+ // calls AudioManagerBase::ReleaseOutputStream() which deletes this object.
+ DCHECK(!input_stream_);
+ DCHECK(!output_stream_);
+ DCHECK(!pa_context_);
+ DCHECK(!pa_mainloop_);
+}
+
+bool PulseAudioUnifiedStream::Open() {
+ DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
+ // Prepare the recording buffers for the callbacks.
+ fifo_.reset(new media::SeekableBuffer(
+ 0, kFifoSizeInPackets * params_.GetBytesPerBuffer()));
+ input_data_buffer_.reset(new uint8[params_.GetBytesPerBuffer()]);
+
+ if (!pulse::CreateOutputStream(&pa_mainloop_, &pa_context_, &output_stream_,
+ params_, &StreamNotifyCallback, NULL, this))
+ return false;
+
+ if (!pulse::CreateInputStream(pa_mainloop_, pa_context_, &input_stream_,
+ params_, input_device_id_,
+ &StreamNotifyCallback, this))
+ return false;
+
+ DCHECK(pa_mainloop_);
+ DCHECK(pa_context_);
+ DCHECK(input_stream_);
+ DCHECK(output_stream_);
+ return true;
+}
+
+void PulseAudioUnifiedStream::Reset() {
+ if (!pa_mainloop_) {
+ DCHECK(!input_stream_);
+ DCHECK(!output_stream_);
+ DCHECK(!pa_context_);
+ return;
+ }
+
+ {
+ AutoPulseLock auto_lock(pa_mainloop_);
+
+ // Close the input stream.
+ if (input_stream_) {
+ // Disable all the callbacks before disconnecting.
+ pa_stream_set_state_callback(input_stream_, NULL, NULL);
+ pa_stream_flush(input_stream_, NULL, NULL);
+ pa_stream_disconnect(input_stream_);
+
+ // Release PulseAudio structures.
+ pa_stream_unref(input_stream_);
+ input_stream_ = NULL;
+ }
+
+ // Close the ouput stream.
+ if (output_stream_) {
+ // Release PulseAudio output stream structures.
+ pa_stream_set_state_callback(output_stream_, NULL, NULL);
+ pa_stream_disconnect(output_stream_);
+ pa_stream_unref(output_stream_);
+ output_stream_ = NULL;
+ }
+
+ if (pa_context_) {
+ pa_context_disconnect(pa_context_);
+ pa_context_set_state_callback(pa_context_, NULL, NULL);
+ pa_context_unref(pa_context_);
+ pa_context_ = NULL;
+ }
+ }
+
+ pa_threaded_mainloop_stop(pa_mainloop_);
+ pa_threaded_mainloop_free(pa_mainloop_);
+ pa_mainloop_ = NULL;
+}
+
+void PulseAudioUnifiedStream::Close() {
+ DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
+ Reset();
+
+ // Signal to the manager that we're closed and can be removed.
+ // This should be the last call in the function as it deletes "this".
+ manager_->ReleaseOutputStream(this);
+}
+
+void PulseAudioUnifiedStream::WriteData(size_t requested_bytes) {
+ CHECK_EQ(requested_bytes, static_cast<size_t>(params_.GetBytesPerBuffer()));
+
+ void* buffer = NULL;
+ int frames_filled = 0;
+ if (source_callback_) {
+ CHECK_GE(pa_stream_begin_write(
+ output_stream_, &buffer, &requested_bytes), 0);
+ uint32 hardware_delay = pulse::GetHardwareLatencyInBytes(
+ output_stream_, params_.sample_rate(),
+ params_.GetBytesPerFrame());
+ fifo_->Read(input_data_buffer_.get(), requested_bytes);
+ input_bus_->FromInterleaved(
+ input_data_buffer_.get(), params_.frames_per_buffer(), 2);
+
+ frames_filled = source_callback_->OnMoreIOData(
+ input_bus_.get(),
+ output_bus_.get(),
+ AudioBuffersState(0, hardware_delay));
+ }
+
+ // Zero the unfilled data so it plays back as silence.
+ if (frames_filled < output_bus_->frames()) {
+ output_bus_->ZeroFramesPartial(
+ frames_filled, output_bus_->frames() - frames_filled);
+ }
+
+ // Note: If this ever changes to output raw float the data must be clipped
+ // and sanitized since it may come from an untrusted source such as NaCl.
+ output_bus_->Scale(volume_);
+ output_bus_->ToInterleaved(
+ output_bus_->frames(), params_.bits_per_sample() / 8, buffer);
+
+ if (pa_stream_write(output_stream_, buffer, requested_bytes, NULL, 0LL,
+ PA_SEEK_RELATIVE) < 0) {
+ if (source_callback_) {
+ source_callback_->OnError(this);
+ }
+ }
+}
+
+void PulseAudioUnifiedStream::ReadData() {
+ do {
+ size_t length = 0;
+ const void* data = NULL;
+ pa_stream_peek(input_stream_, &data, &length);
+ if (!data || length == 0)
+ break;
+
+ fifo_->Append(reinterpret_cast<const uint8*>(data), length);
+
+ // Deliver the recording data to the renderer and drive the playout.
+ int packet_size = params_.GetBytesPerBuffer();
+ while (fifo_->forward_bytes() >= packet_size) {
+ WriteData(packet_size);
+ }
+
+ // Checks if we still have data.
+ pa_stream_drop(input_stream_);
+ } while (pa_stream_readable_size(input_stream_) > 0);
+
+ pa_threaded_mainloop_signal(pa_mainloop_, 0);
+}
+
+void PulseAudioUnifiedStream::Start(AudioSourceCallback* callback) {
+ DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
+ CHECK(callback);
+ CHECK(input_stream_);
+ CHECK(output_stream_);
+ AutoPulseLock auto_lock(pa_mainloop_);
+
+ // Ensure the context and stream are ready.
+ if (pa_context_get_state(pa_context_) != PA_CONTEXT_READY &&
+ pa_stream_get_state(output_stream_) != PA_STREAM_READY &&
+ pa_stream_get_state(input_stream_) != PA_STREAM_READY) {
+ callback->OnError(this);
+ return;
+ }
+
+ source_callback_ = callback;
+
+ fifo_->Clear();
+
+ // Uncork (resume) the input stream.
+ pa_stream_set_read_callback(input_stream_, &ReadCallback, this);
+ pa_stream_readable_size(input_stream_);
+ pa_operation* operation = pa_stream_cork(input_stream_, 0, NULL, NULL);
+ WaitForOperationCompletion(pa_mainloop_, operation);
+
+ // Uncork (resume) the output stream.
+ // We use the recording stream to drive the playback, so we do not need to
+ // register the write callback using pa_stream_set_write_callback().
+ operation = pa_stream_cork(output_stream_, 0,
+ &pulse::StreamSuccessCallback, pa_mainloop_);
+ WaitForOperationCompletion(pa_mainloop_, operation);
+}
+
+void PulseAudioUnifiedStream::Stop() {
+ DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
+
+ // Cork (pause) the stream. Waiting for the main loop lock will ensure
+ // outstanding callbacks have completed.
+ AutoPulseLock auto_lock(pa_mainloop_);
+
+ // Set |source_callback_| to NULL so all FulfillWriteRequest() calls which may
+ // occur while waiting on the flush and cork exit immediately.
+ source_callback_ = NULL;
+
+ // Set the read callback to NULL before flushing the stream, otherwise it
+ // will cause deadlock on the operation.
+ pa_stream_set_read_callback(input_stream_, NULL, NULL);
+ pa_operation* operation = pa_stream_flush(
+ input_stream_, &pulse::StreamSuccessCallback, pa_mainloop_);
+ WaitForOperationCompletion(pa_mainloop_, operation);
+
+ operation = pa_stream_cork(input_stream_, 1, &pulse::StreamSuccessCallback,
+ pa_mainloop_);
+ WaitForOperationCompletion(pa_mainloop_, operation);
+
+ // Flush the stream prior to cork, doing so after will cause hangs. Write
+ // callbacks are suspended while inside pa_threaded_mainloop_lock() so this
+ // is all thread safe.
+ operation = pa_stream_flush(
+ output_stream_, &pulse::StreamSuccessCallback, pa_mainloop_);
+ WaitForOperationCompletion(pa_mainloop_, operation);
+
+ operation = pa_stream_cork(output_stream_, 1, &pulse::StreamSuccessCallback,
+ pa_mainloop_);
+ WaitForOperationCompletion(pa_mainloop_, operation);
+}
+
+void PulseAudioUnifiedStream::SetVolume(double volume) {
+ DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
+
+ volume_ = static_cast<float>(volume);
+}
+
+void PulseAudioUnifiedStream::GetVolume(double* volume) {
+ DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
+
+ *volume = volume_;
+}
+
+} // namespace media
diff --git a/chromium/media/audio/pulse/pulse_unified.h b/chromium/media/audio/pulse/pulse_unified.h
new file mode 100644
index 00000000000..a800d099a10
--- /dev/null
+++ b/chromium/media/audio/pulse/pulse_unified.h
@@ -0,0 +1,90 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_PULSE_PULSE_UNIFIED_H_
+#define MEDIA_AUDIO_PULSE_PULSE_UNIFIED_H_
+
+#include <pulse/pulseaudio.h>
+#include <string>
+
+#include "base/memory/scoped_ptr.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/audio_fifo.h"
+
+namespace media {
+
+class AudioManagerBase;
+class SeekableBuffer;
+
+class PulseAudioUnifiedStream : public AudioOutputStream {
+ public:
+ PulseAudioUnifiedStream(const AudioParameters& params,
+ const std::string& input_device_id,
+ AudioManagerBase* manager);
+
+ virtual ~PulseAudioUnifiedStream();
+
+ // Implementation of PulseAudioUnifiedStream.
+ virtual bool Open() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual void Start(AudioSourceCallback* callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual void GetVolume(double* volume) OVERRIDE;
+
+ private:
+ // Called by PulseAudio when |pa_stream_| change state. If an unexpected
+ // failure state change happens and |source_callback_| is set
+ // this method will forward the error via OnError().
+ static void StreamNotifyCallback(pa_stream* s, void* user_data);
+
+ // Called by PulseAudio recording stream when it has data.
+ static void ReadCallback(pa_stream* s, size_t length, void* user_data);
+
+ // Helpers for ReadCallback() to read and write data.
+ void WriteData(size_t requested_bytes);
+ void ReadData();
+
+ // Close() helper function to free internal structs.
+ void Reset();
+
+ // AudioParameters from the constructor.
+ const AudioParameters params_;
+
+ // Device unique ID of the input device.
+ const std::string input_device_id_;
+
+ // Audio manager that created us. Used to report that we've closed.
+ AudioManagerBase* manager_;
+
+ // PulseAudio API structs.
+ pa_context* pa_context_;
+ pa_threaded_mainloop* pa_mainloop_;
+ pa_stream* input_stream_;
+ pa_stream* output_stream_;
+
+ // Float representation of volume from 0.0 to 1.0.
+ float volume_;
+
+ // Callback to audio data source. Must only be modified while holding a lock
+ // on |pa_mainloop_| via pa_threaded_mainloop_lock().
+ AudioSourceCallback* source_callback_;
+
+ scoped_ptr<AudioBus> input_bus_;
+ scoped_ptr<AudioBus> output_bus_;
+
+ // Used for input to output buffering.
+ scoped_ptr<media::SeekableBuffer> fifo_;
+
+ // Temporary storage for recorded data. It gets a packet of data from
+ // |fifo_| and deliver the data to OnMoreIOData() callback.
+ scoped_ptr<uint8[]> input_data_buffer_;
+
+ DISALLOW_COPY_AND_ASSIGN(PulseAudioUnifiedStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_PULSE_PULSE_UNIFIED_H_
diff --git a/chromium/media/audio/pulse/pulse_util.cc b/chromium/media/audio/pulse/pulse_util.cc
new file mode 100644
index 00000000000..96831cfabe3
--- /dev/null
+++ b/chromium/media/audio/pulse/pulse_util.cc
@@ -0,0 +1,315 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/pulse/pulse_util.h"
+
+#include "base/logging.h"
+#include "base/time/time.h"
+#include "media/audio/audio_manager_base.h"
+#include "media/audio/audio_parameters.h"
+
+namespace media {
+
+namespace pulse {
+
+namespace {
+
+pa_channel_position ChromiumToPAChannelPosition(Channels channel) {
+ switch (channel) {
+ // PulseAudio does not differentiate between left/right and
+ // stereo-left/stereo-right, both translate to front-left/front-right.
+ case LEFT:
+ return PA_CHANNEL_POSITION_FRONT_LEFT;
+ case RIGHT:
+ return PA_CHANNEL_POSITION_FRONT_RIGHT;
+ case CENTER:
+ return PA_CHANNEL_POSITION_FRONT_CENTER;
+ case LFE:
+ return PA_CHANNEL_POSITION_LFE;
+ case BACK_LEFT:
+ return PA_CHANNEL_POSITION_REAR_LEFT;
+ case BACK_RIGHT:
+ return PA_CHANNEL_POSITION_REAR_RIGHT;
+ case LEFT_OF_CENTER:
+ return PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER;
+ case RIGHT_OF_CENTER:
+ return PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER;
+ case BACK_CENTER:
+ return PA_CHANNEL_POSITION_REAR_CENTER;
+ case SIDE_LEFT:
+ return PA_CHANNEL_POSITION_SIDE_LEFT;
+ case SIDE_RIGHT:
+ return PA_CHANNEL_POSITION_SIDE_RIGHT;
+ case CHANNELS_MAX:
+ return PA_CHANNEL_POSITION_INVALID;
+ default:
+ NOTREACHED() << "Invalid channel: " << channel;
+ return PA_CHANNEL_POSITION_INVALID;
+ }
+}
+
+} // namespace
+
+// static, pa_stream_success_cb_t
+void StreamSuccessCallback(pa_stream* s, int error, void* mainloop) {
+ pa_threaded_mainloop* pa_mainloop =
+ static_cast<pa_threaded_mainloop*>(mainloop);
+ pa_threaded_mainloop_signal(pa_mainloop, 0);
+}
+
+// |pa_context| and |pa_stream| state changed cb.
+void ContextStateCallback(pa_context* context, void* mainloop) {
+ pa_threaded_mainloop* pa_mainloop =
+ static_cast<pa_threaded_mainloop*>(mainloop);
+ pa_threaded_mainloop_signal(pa_mainloop, 0);
+}
+
+pa_sample_format_t BitsToPASampleFormat(int bits_per_sample) {
+ switch (bits_per_sample) {
+ case 8:
+ return PA_SAMPLE_U8;
+ case 16:
+ return PA_SAMPLE_S16LE;
+ case 24:
+ return PA_SAMPLE_S24LE;
+ case 32:
+ return PA_SAMPLE_S32LE;
+ default:
+ NOTREACHED() << "Invalid bits per sample: " << bits_per_sample;
+ return PA_SAMPLE_INVALID;
+ }
+}
+
+pa_channel_map ChannelLayoutToPAChannelMap(ChannelLayout channel_layout) {
+ pa_channel_map channel_map;
+ pa_channel_map_init(&channel_map);
+
+ channel_map.channels = ChannelLayoutToChannelCount(channel_layout);
+ for (Channels ch = LEFT; ch < CHANNELS_MAX;
+ ch = static_cast<Channels>(ch + 1)) {
+ int channel_index = ChannelOrder(channel_layout, ch);
+ if (channel_index < 0)
+ continue;
+
+ channel_map.map[channel_index] = ChromiumToPAChannelPosition(ch);
+ }
+
+ return channel_map;
+}
+
+void WaitForOperationCompletion(pa_threaded_mainloop* pa_mainloop,
+ pa_operation* operation) {
+ if (!operation) {
+ DLOG(WARNING) << "Operation is NULL";
+ return;
+ }
+
+ while (pa_operation_get_state(operation) == PA_OPERATION_RUNNING)
+ pa_threaded_mainloop_wait(pa_mainloop);
+
+ pa_operation_unref(operation);
+}
+
+int GetHardwareLatencyInBytes(pa_stream* stream,
+ int sample_rate,
+ int bytes_per_frame) {
+ DCHECK(stream);
+ int negative = 0;
+ pa_usec_t latency_micros = 0;
+ if (pa_stream_get_latency(stream, &latency_micros, &negative) != 0)
+ return 0;
+
+ if (negative)
+ return 0;
+
+ return latency_micros * sample_rate * bytes_per_frame /
+ base::Time::kMicrosecondsPerSecond;
+}
+
+// Helper macro for CreateInput/OutputStream() to avoid code spam and
+// string bloat.
+#define RETURN_ON_FAILURE(expression, message) do { \
+ if (!(expression)) { \
+ DLOG(ERROR) << message; \
+ return false; \
+ } \
+} while(0)
+
+bool CreateInputStream(pa_threaded_mainloop* mainloop,
+ pa_context* context,
+ pa_stream** stream,
+ const AudioParameters& params,
+ const std::string& device_id,
+ pa_stream_notify_cb_t stream_callback,
+ void* user_data) {
+ DCHECK(mainloop);
+ DCHECK(context);
+
+ // Set sample specifications.
+ pa_sample_spec sample_specifications;
+ sample_specifications.format = BitsToPASampleFormat(
+ params.bits_per_sample());
+ sample_specifications.rate = params.sample_rate();
+ sample_specifications.channels = params.channels();
+
+ // Get channel mapping and open recording stream.
+ pa_channel_map source_channel_map = ChannelLayoutToPAChannelMap(
+ params.channel_layout());
+ pa_channel_map* map = (source_channel_map.channels != 0) ?
+ &source_channel_map : NULL;
+
+ // Create a new recording stream.
+ *stream = pa_stream_new(context, "RecordStream", &sample_specifications, map);
+ RETURN_ON_FAILURE(*stream, "failed to create PA recording stream");
+
+ pa_stream_set_state_callback(*stream, stream_callback, user_data);
+
+ // Set server-side capture buffer metrics. Detailed documentation on what
+ // values should be chosen can be found at
+ // freedesktop.org/software/pulseaudio/doxygen/structpa__buffer__attr.html.
+ pa_buffer_attr buffer_attributes;
+ const unsigned int buffer_size = params.GetBytesPerBuffer();
+ buffer_attributes.maxlength = static_cast<uint32_t>(-1);
+ buffer_attributes.tlength = buffer_size;
+ buffer_attributes.minreq = buffer_size;
+ buffer_attributes.prebuf = static_cast<uint32_t>(-1);
+ buffer_attributes.fragsize = buffer_size;
+ int flags = PA_STREAM_AUTO_TIMING_UPDATE |
+ PA_STREAM_INTERPOLATE_TIMING |
+ PA_STREAM_ADJUST_LATENCY |
+ PA_STREAM_START_CORKED;
+ RETURN_ON_FAILURE(
+ pa_stream_connect_record(
+ *stream,
+ device_id == AudioManagerBase::kDefaultDeviceId ?
+ NULL : device_id.c_str(),
+ &buffer_attributes,
+ static_cast<pa_stream_flags_t>(flags)) == 0,
+ "pa_stream_connect_record FAILED ");
+
+ // Wait for the stream to be ready.
+ while (true) {
+ pa_stream_state_t stream_state = pa_stream_get_state(*stream);
+ RETURN_ON_FAILURE(
+ PA_STREAM_IS_GOOD(stream_state), "Invalid PulseAudio stream state");
+ if (stream_state == PA_STREAM_READY)
+ break;
+ pa_threaded_mainloop_wait(mainloop);
+ }
+
+ return true;
+}
+
+bool CreateOutputStream(pa_threaded_mainloop** mainloop,
+ pa_context** context,
+ pa_stream** stream,
+ const AudioParameters& params,
+ pa_stream_notify_cb_t stream_callback,
+ pa_stream_request_cb_t write_callback,
+ void* user_data) {
+ DCHECK(!*mainloop);
+ DCHECK(!*context);
+
+ *mainloop = pa_threaded_mainloop_new();
+ RETURN_ON_FAILURE(*mainloop, "Failed to create PulseAudio main loop.");
+
+ pa_mainloop_api* pa_mainloop_api = pa_threaded_mainloop_get_api(*mainloop);
+ *context = pa_context_new(pa_mainloop_api, "Chromium");
+ RETURN_ON_FAILURE(*context, "Failed to create PulseAudio context.");
+
+ // A state callback must be set before calling pa_threaded_mainloop_lock() or
+ // pa_threaded_mainloop_wait() calls may lead to dead lock.
+ pa_context_set_state_callback(*context, &ContextStateCallback, *mainloop);
+
+ // Lock the main loop while setting up the context. Failure to do so may lead
+ // to crashes as the PulseAudio thread tries to run before things are ready.
+ AutoPulseLock auto_lock(*mainloop);
+
+ RETURN_ON_FAILURE(pa_threaded_mainloop_start(*mainloop) == 0,
+ "Failed to start PulseAudio main loop.");
+ RETURN_ON_FAILURE(
+ pa_context_connect(*context, NULL, PA_CONTEXT_NOAUTOSPAWN, NULL) == 0,
+ "Failed to connect PulseAudio context.");
+
+ // Wait until |pa_context_| is ready. pa_threaded_mainloop_wait() must be
+ // called after pa_context_get_state() in case the context is already ready,
+ // otherwise pa_threaded_mainloop_wait() will hang indefinitely.
+ while (true) {
+ pa_context_state_t context_state = pa_context_get_state(*context);
+ RETURN_ON_FAILURE(
+ PA_CONTEXT_IS_GOOD(context_state), "Invalid PulseAudio context state.");
+ if (context_state == PA_CONTEXT_READY)
+ break;
+ pa_threaded_mainloop_wait(*mainloop);
+ }
+
+ // Set sample specifications.
+ pa_sample_spec sample_specifications;
+ sample_specifications.format = BitsToPASampleFormat(
+ params.bits_per_sample());
+ sample_specifications.rate = params.sample_rate();
+ sample_specifications.channels = params.channels();
+
+ // Get channel mapping and open playback stream.
+ pa_channel_map* map = NULL;
+ pa_channel_map source_channel_map = ChannelLayoutToPAChannelMap(
+ params.channel_layout());
+ if (source_channel_map.channels != 0) {
+ // The source data uses a supported channel map so we will use it rather
+ // than the default channel map (NULL).
+ map = &source_channel_map;
+ }
+ *stream = pa_stream_new(*context, "Playback", &sample_specifications, map);
+ RETURN_ON_FAILURE(*stream, "failed to create PA playback stream");
+
+ pa_stream_set_state_callback(*stream, stream_callback, user_data);
+
+ // Even though we start the stream corked above, PulseAudio will issue one
+ // stream request after setup. write_callback() must fulfill the write.
+ pa_stream_set_write_callback(*stream, write_callback, user_data);
+
+ // Pulse is very finicky with the small buffer sizes used by Chrome. The
+ // settings below are mostly found through trial and error. Essentially we
+ // want Pulse to auto size its internal buffers, but call us back nearly every
+ // |minreq| bytes. |tlength| should be a multiple of |minreq|; too low and
+ // Pulse will issue callbacks way too fast, too high and we don't get
+ // callbacks frequently enough.
+ pa_buffer_attr pa_buffer_attributes;
+ pa_buffer_attributes.maxlength = static_cast<uint32_t>(-1);
+ pa_buffer_attributes.minreq = params.GetBytesPerBuffer();
+ pa_buffer_attributes.prebuf = static_cast<uint32_t>(-1);
+ pa_buffer_attributes.tlength = params.GetBytesPerBuffer() * 3;
+ pa_buffer_attributes.fragsize = static_cast<uint32_t>(-1);
+
+ // Connect playback stream. Like pa_buffer_attr, the pa_stream_flags have a
+ // huge impact on the performance of the stream and were chosen through trial
+ // and error.
+ RETURN_ON_FAILURE(
+ pa_stream_connect_playback(
+ *stream, NULL, &pa_buffer_attributes,
+ static_cast<pa_stream_flags_t>(
+ PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_ADJUST_LATENCY |
+ PA_STREAM_AUTO_TIMING_UPDATE | PA_STREAM_NOT_MONOTONIC |
+ PA_STREAM_START_CORKED),
+ NULL, NULL) == 0,
+ "pa_stream_connect_playback FAILED ");
+
+ // Wait for the stream to be ready.
+ while (true) {
+ pa_stream_state_t stream_state = pa_stream_get_state(*stream);
+ RETURN_ON_FAILURE(
+ PA_STREAM_IS_GOOD(stream_state), "Invalid PulseAudio stream state");
+ if (stream_state == PA_STREAM_READY)
+ break;
+ pa_threaded_mainloop_wait(*mainloop);
+ }
+
+ return true;
+}
+
+#undef RETURN_ON_FAILURE
+
+} // namespace pulse
+
+} // namespace media
diff --git a/chromium/media/audio/pulse/pulse_util.h b/chromium/media/audio/pulse/pulse_util.h
new file mode 100644
index 00000000000..da0cb0f42d7
--- /dev/null
+++ b/chromium/media/audio/pulse/pulse_util.h
@@ -0,0 +1,80 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_PULSE_PULSE_UTIL_H_
+#define MEDIA_AUDIO_PULSE_PULSE_UTIL_H_
+
+#include <pulse/pulseaudio.h>
+
+#include "base/basictypes.h"
+#include "media/audio/audio_device_name.h"
+#include "media/base/channel_layout.h"
+
+namespace media {
+
+class AudioParameters;
+
+namespace pulse {
+
+// A helper class that acquires pa_threaded_mainloop_lock() while in scope.
+class AutoPulseLock {
+ public:
+ explicit AutoPulseLock(pa_threaded_mainloop* pa_mainloop)
+ : pa_mainloop_(pa_mainloop) {
+ pa_threaded_mainloop_lock(pa_mainloop_);
+ }
+
+ ~AutoPulseLock() {
+ pa_threaded_mainloop_unlock(pa_mainloop_);
+ }
+
+ private:
+ pa_threaded_mainloop* pa_mainloop_;
+ DISALLOW_COPY_AND_ASSIGN(AutoPulseLock);
+};
+
+// Triggers pa_threaded_mainloop_signal() to avoid deadlocks.
+void StreamSuccessCallback(pa_stream* s, int error, void* mainloop);
+void ContextStateCallback(pa_context* context, void* mainloop);
+
+pa_sample_format_t BitsToPASampleFormat(int bits_per_sample);
+
+pa_channel_map ChannelLayoutToPAChannelMap(ChannelLayout channel_layout);
+
+void WaitForOperationCompletion(pa_threaded_mainloop* mainloop,
+ pa_operation* operation);
+
+int GetHardwareLatencyInBytes(pa_stream* stream,
+ int sample_rate,
+ int bytes_per_frame);
+
+// Create a recording stream for the threaded mainloop, return true if success,
+// otherwise false. |mainloop| and |context| have to be from a valid Pulse
+// threaded mainloop and the handle of the created stream will be returned by
+// |stream|.
+bool CreateInputStream(pa_threaded_mainloop* mainloop,
+ pa_context* context,
+ pa_stream** stream,
+ const AudioParameters& params,
+ const std::string& device_id,
+ pa_stream_notify_cb_t stream_callback,
+ void* user_data);
+
+// Create a playback stream for the threaded mainloop, return true if success,
+// otherwise false. This function will create a new Pulse threaded mainloop,
+// and the handles of the mainloop, context and stream will be returned by
+// |mainloop|, |context| and |stream|.
+bool CreateOutputStream(pa_threaded_mainloop** mainloop,
+ pa_context** context,
+ pa_stream** stream,
+ const AudioParameters& params,
+ pa_stream_notify_cb_t stream_callback,
+ pa_stream_request_cb_t write_callback,
+ void* user_data);
+
+} // namespace pulse
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_PULSE_PULSE_UTIL_H_
diff --git a/chromium/media/audio/sample_rates.cc b/chromium/media/audio/sample_rates.cc
new file mode 100644
index 00000000000..a082a938ab8
--- /dev/null
+++ b/chromium/media/audio/sample_rates.cc
@@ -0,0 +1,26 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/sample_rates.h"
+
+namespace media {
+
+AudioSampleRate AsAudioSampleRate(int sample_rate) {
+ switch (sample_rate) {
+ case 8000: return k8000Hz;
+ case 16000: return k16000Hz;
+ case 32000: return k32000Hz;
+ case 48000: return k48000Hz;
+ case 96000: return k96000Hz;
+ case 11025: return k11025Hz;
+ case 22050: return k22050Hz;
+ case 44100: return k44100Hz;
+ case 88200: return k88200Hz;
+ case 176400: return k176400Hz;
+ case 192000: return k192000Hz;
+ }
+ return kUnexpectedAudioSampleRate;
+}
+
+} // namespace media
diff --git a/chromium/media/audio/sample_rates.h b/chromium/media/audio/sample_rates.h
new file mode 100644
index 00000000000..7c29e548b34
--- /dev/null
+++ b/chromium/media/audio/sample_rates.h
@@ -0,0 +1,35 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_SAMPLE_RATES_H_
+#define MEDIA_AUDIO_SAMPLE_RATES_H_
+
+#include "media/base/media_export.h"
+
+namespace media {
+
+// Enumeration used for histogramming sample rates into distinct buckets.
+// Logged to UMA, so never reuse a value, always add new/greater ones!
+enum AudioSampleRate {
+ k8000Hz = 0,
+ k16000Hz = 1,
+ k32000Hz = 2,
+ k48000Hz = 3,
+ k96000Hz = 4,
+ k11025Hz = 5,
+ k22050Hz = 6,
+ k44100Hz = 7,
+ k88200Hz = 8,
+ k176400Hz = 9,
+ k192000Hz = 10,
+ kUnexpectedAudioSampleRate // Must always be last!
+};
+
+// Helper method to convert integral values to their respective enum values,
+// or kUnexpectedAudioSampleRate if no match exists.
+MEDIA_EXPORT AudioSampleRate AsAudioSampleRate(int sample_rate);
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_SAMPLE_RATES_H_
diff --git a/chromium/media/audio/scoped_loop_observer.cc b/chromium/media/audio/scoped_loop_observer.cc
new file mode 100644
index 00000000000..01187ec8f99
--- /dev/null
+++ b/chromium/media/audio/scoped_loop_observer.cc
@@ -0,0 +1,47 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/scoped_loop_observer.h"
+
+#include "base/bind.h"
+#include "base/synchronization/waitable_event.h"
+
+namespace media {
+
+ScopedLoopObserver::ScopedLoopObserver(
+ const scoped_refptr<base::MessageLoopProxy>& loop)
+ : loop_(loop) {
+ ObserveLoopDestruction(true, NULL);
+}
+
+ScopedLoopObserver::~ScopedLoopObserver() {
+ ObserveLoopDestruction(false, NULL);
+}
+
+void ScopedLoopObserver::ObserveLoopDestruction(bool enable,
+ base::WaitableEvent* done) {
+ // Note: |done| may be NULL.
+ if (loop_->BelongsToCurrentThread()) {
+ base::MessageLoop* loop = base::MessageLoop::current();
+ if (enable) {
+ loop->AddDestructionObserver(this);
+ } else {
+ loop->RemoveDestructionObserver(this);
+ }
+ } else {
+ base::WaitableEvent event(false, false);
+ if (loop_->PostTask(FROM_HERE,
+ base::Bind(&ScopedLoopObserver::ObserveLoopDestruction,
+ base::Unretained(this), enable, &event))) {
+ event.Wait();
+ } else {
+ // The message loop's thread has already terminated, so no need to wait.
+ }
+ }
+
+ if (done)
+ done->Signal();
+}
+
+} // namespace media.
diff --git a/chromium/media/audio/scoped_loop_observer.h b/chromium/media/audio/scoped_loop_observer.h
new file mode 100644
index 00000000000..7aaab542225
--- /dev/null
+++ b/chromium/media/audio/scoped_loop_observer.h
@@ -0,0 +1,50 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_SCOPED_LOOP_OBSERVER_H_
+#define MEDIA_AUDIO_SCOPED_LOOP_OBSERVER_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/message_loop/message_loop.h"
+#include "base/message_loop/message_loop_proxy.h"
+
+namespace base {
+class WaitableEvent;
+}
+
+namespace media {
+
+// A common base class for AudioOutputDevice and AudioInputDevice that manages
+// a message loop pointer and monitors it for destruction. If the object goes
+// out of scope before the message loop, the object will automatically remove
+// itself from the message loop's list of destruction observers.
+// NOTE: The class that inherits from this class must implement the
+// WillDestroyCurrentMessageLoop virtual method from DestructionObserver.
+class ScopedLoopObserver
+ : public base::MessageLoop::DestructionObserver {
+ public:
+ explicit ScopedLoopObserver(
+ const scoped_refptr<base::MessageLoopProxy>& message_loop);
+
+ protected:
+ virtual ~ScopedLoopObserver();
+
+ // Accessor to the loop that's used by the derived class.
+ const scoped_refptr<base::MessageLoopProxy>& message_loop() { return loop_; }
+
+ private:
+ // Call to add or remove ourselves from the list of destruction observers for
+ // the message loop.
+ void ObserveLoopDestruction(bool enable, base::WaitableEvent* done);
+
+ // A pointer to the message loop's proxy. In case the loop gets destroyed
+ // before this object goes out of scope, PostTask etc will fail but not crash.
+ scoped_refptr<base::MessageLoopProxy> loop_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedLoopObserver);
+};
+
+} // namespace media.
+
+#endif // MEDIA_AUDIO_SCOPED_LOOP_OBSERVER_H_
diff --git a/chromium/media/audio/shared_memory_util.cc b/chromium/media/audio/shared_memory_util.cc
new file mode 100644
index 00000000000..b65df03e2e1
--- /dev/null
+++ b/chromium/media/audio/shared_memory_util.cc
@@ -0,0 +1,70 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/shared_memory_util.h"
+
+#include "base/atomicops.h"
+#include "base/logging.h"
+
+using base::subtle::Atomic32;
+
+static const uint32 kUnknownDataSize = static_cast<uint32>(-1);
+
+namespace media {
+
+uint32 TotalSharedMemorySizeInBytes(uint32 packet_size) {
+ // Need to reserve extra 4 bytes for size of data.
+ return packet_size + sizeof(Atomic32);
+}
+
+uint32 PacketSizeInBytes(uint32 shared_memory_created_size) {
+ return shared_memory_created_size - sizeof(Atomic32);
+}
+
+uint32 GetActualDataSizeInBytes(base::SharedMemory* shared_memory,
+ uint32 packet_size) {
+ char* ptr = static_cast<char*>(shared_memory->memory()) + packet_size;
+ DCHECK_EQ(0u, reinterpret_cast<size_t>(ptr) & 3);
+
+ // Actual data size stored at the end of the buffer.
+ uint32 actual_data_size =
+ base::subtle::Acquire_Load(reinterpret_cast<volatile Atomic32*>(ptr));
+ return std::min(actual_data_size, packet_size);
+}
+
+void SetActualDataSizeInBytes(void* shared_memory_ptr,
+ uint32 packet_size,
+ uint32 actual_data_size) {
+ char* ptr = static_cast<char*>(shared_memory_ptr) + packet_size;
+ DCHECK_EQ(0u, reinterpret_cast<size_t>(ptr) & 3);
+
+ // Set actual data size at the end of the buffer.
+ base::subtle::Release_Store(reinterpret_cast<volatile Atomic32*>(ptr),
+ actual_data_size);
+}
+
+void SetActualDataSizeInBytes(base::SharedMemory* shared_memory,
+ uint32 packet_size,
+ uint32 actual_data_size) {
+ SetActualDataSizeInBytes(shared_memory->memory(),
+ packet_size, actual_data_size);
+}
+
+void SetUnknownDataSize(base::SharedMemory* shared_memory,
+ uint32 packet_size) {
+ SetActualDataSizeInBytes(shared_memory, packet_size, kUnknownDataSize);
+}
+
+bool IsUnknownDataSize(base::SharedMemory* shared_memory,
+ uint32 packet_size) {
+ char* ptr = static_cast<char*>(shared_memory->memory()) + packet_size;
+ DCHECK_EQ(0u, reinterpret_cast<size_t>(ptr) & 3);
+
+ // Actual data size stored at the end of the buffer.
+ uint32 actual_data_size =
+ base::subtle::Acquire_Load(reinterpret_cast<volatile Atomic32*>(ptr));
+ return actual_data_size == kUnknownDataSize;
+}
+
+} // namespace media
diff --git a/chromium/media/audio/shared_memory_util.h b/chromium/media/audio/shared_memory_util.h
new file mode 100644
index 00000000000..9186d5c9529
--- /dev/null
+++ b/chromium/media/audio/shared_memory_util.h
@@ -0,0 +1,39 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_SHARED_MEMORY_UTIL_H_
+#define MEDIA_AUDIO_SHARED_MEMORY_UTIL_H_
+
+#include "base/basictypes.h"
+#include "base/memory/shared_memory.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// Value sent by the controller to the renderer in low-latency mode
+// indicating that the stream is paused.
+enum { kPauseMark = -1 };
+
+// Functions that handle data buffer passed between processes in the shared
+// memory. Called on both IPC sides. These are necessary because the shared
+// memory has a layout: the last word in the block is the data size in bytes.
+
+MEDIA_EXPORT uint32 TotalSharedMemorySizeInBytes(uint32 packet_size);
+MEDIA_EXPORT uint32 PacketSizeInBytes(uint32 shared_memory_created_size);
+MEDIA_EXPORT uint32 GetActualDataSizeInBytes(base::SharedMemory* shared_memory,
+ uint32 packet_size);
+MEDIA_EXPORT void SetActualDataSizeInBytes(base::SharedMemory* shared_memory,
+ uint32 packet_size,
+ uint32 actual_data_size);
+MEDIA_EXPORT void SetActualDataSizeInBytes(void* shared_memory_ptr,
+ uint32 packet_size,
+ uint32 actual_data_size);
+MEDIA_EXPORT void SetUnknownDataSize(base::SharedMemory* shared_memory,
+ uint32 packet_size);
+MEDIA_EXPORT bool IsUnknownDataSize(base::SharedMemory* shared_memory,
+ uint32 packet_size);
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_SHARED_MEMORY_UTIL_H_
diff --git a/chromium/media/audio/simple_sources.cc b/chromium/media/audio/simple_sources.cc
new file mode 100644
index 00000000000..7aa74d6e5f1
--- /dev/null
+++ b/chromium/media/audio/simple_sources.cc
@@ -0,0 +1,73 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// MSVC++ requires this to be set before any other includes to get M_PI.
+#define _USE_MATH_DEFINES
+#include <cmath>
+
+#include "media/audio/simple_sources.h"
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "media/audio/audio_util.h"
+
+namespace media {
+
+//////////////////////////////////////////////////////////////////////////////
+// SineWaveAudioSource implementation.
+
+SineWaveAudioSource::SineWaveAudioSource(int channels,
+ double freq, double sample_freq)
+ : channels_(channels),
+ f_(freq / sample_freq),
+ time_state_(0),
+ cap_(0),
+ callbacks_(0),
+ errors_(0) {
+}
+
+// The implementation could be more efficient if a lookup table is constructed
+// but it is efficient enough for our simple needs.
+int SineWaveAudioSource::OnMoreData(AudioBus* audio_bus,
+ AudioBuffersState audio_buffers) {
+ base::AutoLock auto_lock(time_lock_);
+ callbacks_++;
+
+ // The table is filled with s(t) = kint16max*sin(Theta*t),
+ // where Theta = 2*PI*fs.
+ // We store the discrete time value |t| in a member to ensure that the
+ // next pass starts at a correct state.
+ int max_frames = cap_ > 0 ?
+ std::min(audio_bus->frames(), cap_ - time_state_) : audio_bus->frames();
+ for (int i = 0; i < max_frames; ++i)
+ audio_bus->channel(0)[i] = sin(2.0 * M_PI * f_ * time_state_++);
+ for (int i = 1; i < audio_bus->channels(); ++i) {
+ memcpy(audio_bus->channel(i), audio_bus->channel(0),
+ max_frames * sizeof(*audio_bus->channel(i)));
+ }
+ return max_frames;
+}
+
+int SineWaveAudioSource::OnMoreIOData(AudioBus* source,
+ AudioBus* dest,
+ AudioBuffersState audio_buffers) {
+ return OnMoreData(dest, audio_buffers);
+}
+
+void SineWaveAudioSource::OnError(AudioOutputStream* stream) {
+ errors_++;
+}
+
+void SineWaveAudioSource::CapSamples(int cap) {
+ base::AutoLock auto_lock(time_lock_);
+ DCHECK_GT(cap, 0);
+ cap_ = cap;
+}
+
+void SineWaveAudioSource::Reset() {
+ base::AutoLock auto_lock(time_lock_);
+ time_state_ = 0;
+}
+
+} // namespace media
diff --git a/chromium/media/audio/simple_sources.h b/chromium/media/audio/simple_sources.h
new file mode 100644
index 00000000000..449f875b5d6
--- /dev/null
+++ b/chromium/media/audio/simple_sources.h
@@ -0,0 +1,53 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_SIMPLE_SOURCES_H_
+#define MEDIA_AUDIO_SIMPLE_SOURCES_H_
+
+#include "base/synchronization/lock.h"
+#include "media/audio/audio_io.h"
+#include "media/base/seekable_buffer.h"
+
+namespace media {
+
+// An audio source that produces a pure sinusoidal tone.
+class MEDIA_EXPORT SineWaveAudioSource
+ : public AudioOutputStream::AudioSourceCallback {
+ public:
+ // |channels| is the number of audio channels, |freq| is the frequency in
+ // hertz and it has to be less than half of the sampling frequency
+ // |sample_freq| or else you will get aliasing.
+ SineWaveAudioSource(int channels, double freq, double sample_freq);
+ virtual ~SineWaveAudioSource() {}
+
+ // Return up to |cap| samples of data via OnMoreData(). Use Reset() to
+ // allow more data to be served.
+ void CapSamples(int cap);
+ void Reset();
+
+ // Implementation of AudioSourceCallback.
+ virtual int OnMoreData(AudioBus* audio_bus,
+ AudioBuffersState audio_buffers) OVERRIDE;
+ virtual int OnMoreIOData(AudioBus* source,
+ AudioBus* dest,
+ AudioBuffersState audio_buffers) OVERRIDE;
+ virtual void OnError(AudioOutputStream* stream) OVERRIDE;
+
+ // The number of OnMoreData()+OnMoreIOData() and OnError() calls respectively.
+ int callbacks() { return callbacks_; }
+ int errors() { return errors_; }
+
+ protected:
+ int channels_;
+ double f_;
+ int time_state_;
+ int cap_;
+ int callbacks_;
+ int errors_;
+ base::Lock time_lock_;
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_SIMPLE_SOURCES_H_
diff --git a/chromium/media/audio/simple_sources_unittest.cc b/chromium/media/audio/simple_sources_unittest.cc
new file mode 100644
index 00000000000..5ee86863371
--- /dev/null
+++ b/chromium/media/audio/simple_sources_unittest.cc
@@ -0,0 +1,78 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "base/logging.h"
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/audio/audio_parameters.h"
+#include "media/audio/simple_sources.h"
+#include "media/base/audio_bus.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+// Validate that the SineWaveAudioSource writes the expected values.
+TEST(SimpleSources, SineWaveAudioSource) {
+ static const uint32 samples = 1024;
+ static const uint32 bytes_per_sample = 2;
+ static const int freq = 200;
+
+ AudioParameters params(
+ AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
+ AudioParameters::kTelephoneSampleRate, bytes_per_sample * 8, samples);
+
+ SineWaveAudioSource source(1, freq, params.sample_rate());
+ scoped_ptr<AudioBus> audio_bus = AudioBus::Create(params);
+ source.OnMoreData(audio_bus.get(), AudioBuffersState());
+ EXPECT_EQ(1, source.callbacks());
+ EXPECT_EQ(0, source.errors());
+
+ uint32 half_period = AudioParameters::kTelephoneSampleRate / (freq * 2);
+
+ // Spot test positive incursion of sine wave.
+ EXPECT_NEAR(0, audio_bus->channel(0)[0],
+ std::numeric_limits<float>::epsilon());
+ EXPECT_FLOAT_EQ(0.15643446f, audio_bus->channel(0)[1]);
+ EXPECT_LT(audio_bus->channel(0)[1], audio_bus->channel(0)[2]);
+ EXPECT_LT(audio_bus->channel(0)[2], audio_bus->channel(0)[3]);
+ // Spot test negative incursion of sine wave.
+ EXPECT_NEAR(0, audio_bus->channel(0)[half_period],
+ std::numeric_limits<float>::epsilon());
+ EXPECT_FLOAT_EQ(-0.15643446f, audio_bus->channel(0)[half_period + 1]);
+ EXPECT_GT(audio_bus->channel(0)[half_period + 1],
+ audio_bus->channel(0)[half_period + 2]);
+ EXPECT_GT(audio_bus->channel(0)[half_period + 2],
+ audio_bus->channel(0)[half_period + 3]);
+}
+
+TEST(SimpleSources, SineWaveAudioCapped) {
+ SineWaveAudioSource source(1, 200, AudioParameters::kTelephoneSampleRate);
+
+ static const int kSampleCap = 100;
+ source.CapSamples(kSampleCap);
+
+ scoped_ptr<AudioBus> audio_bus = AudioBus::Create(1, 2 * kSampleCap);
+ EXPECT_EQ(source.OnMoreData(
+ audio_bus.get(), AudioBuffersState()), kSampleCap);
+ EXPECT_EQ(1, source.callbacks());
+ EXPECT_EQ(source.OnMoreData(audio_bus.get(), AudioBuffersState()), 0);
+ EXPECT_EQ(2, source.callbacks());
+ source.Reset();
+ EXPECT_EQ(source.OnMoreData(
+ audio_bus.get(), AudioBuffersState()), kSampleCap);
+ EXPECT_EQ(3, source.callbacks());
+ EXPECT_EQ(0, source.errors());
+}
+
+TEST(SimpleSources, OnError) {
+ SineWaveAudioSource source(1, 200, AudioParameters::kTelephoneSampleRate);
+ source.OnError(NULL);
+ EXPECT_EQ(1, source.errors());
+ source.OnError(NULL);
+ EXPECT_EQ(2, source.errors());
+}
+
+} // namespace media
diff --git a/chromium/media/audio/test_audio_input_controller_factory.cc b/chromium/media/audio/test_audio_input_controller_factory.cc
new file mode 100644
index 00000000000..64bfb9f060d
--- /dev/null
+++ b/chromium/media/audio/test_audio_input_controller_factory.cc
@@ -0,0 +1,69 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/test_audio_input_controller_factory.h"
+#include "media/audio/audio_io.h"
+
+namespace media {
+
+TestAudioInputController::TestAudioInputController(
+ TestAudioInputControllerFactory* factory,
+ AudioManager* audio_manager,
+ const AudioParameters& audio_parameters,
+ EventHandler* event_handler,
+ SyncWriter* sync_writer)
+ : AudioInputController(event_handler, sync_writer),
+ audio_parameters_(audio_parameters),
+ factory_(factory),
+ event_handler_(event_handler) {
+ message_loop_ = audio_manager->GetMessageLoop();
+}
+
+TestAudioInputController::~TestAudioInputController() {
+ // Inform the factory so that it allows creating new instances in future.
+ factory_->OnTestAudioInputControllerDestroyed(this);
+}
+
+void TestAudioInputController::Record() {
+ if (factory_->delegate_)
+ factory_->delegate_->TestAudioControllerOpened(this);
+}
+
+void TestAudioInputController::Close(const base::Closure& closed_task) {
+ message_loop_->PostTask(FROM_HERE, closed_task);
+ if (factory_->delegate_)
+ factory_->delegate_->TestAudioControllerClosed(this);
+}
+
+TestAudioInputControllerFactory::TestAudioInputControllerFactory()
+ : controller_(NULL),
+ delegate_(NULL) {
+}
+
+TestAudioInputControllerFactory::~TestAudioInputControllerFactory() {
+ DCHECK(!controller_);
+}
+
+AudioInputController* TestAudioInputControllerFactory::Create(
+ AudioManager* audio_manager,
+ AudioInputController::EventHandler* event_handler,
+ AudioParameters params) {
+ DCHECK(!controller_); // Only one test instance managed at a time.
+ controller_ = new TestAudioInputController(this, audio_manager, params,
+ event_handler, NULL);
+ return controller_;
+}
+
+void TestAudioInputControllerFactory::SetDelegateForTests(
+ TestAudioInputControllerDelegate* delegate) {
+ delegate_ = delegate;
+}
+
+void TestAudioInputControllerFactory::OnTestAudioInputControllerDestroyed(
+ TestAudioInputController* controller) {
+ DCHECK_EQ(controller_, controller);
+ controller_ = NULL;
+}
+
+} // namespace media
diff --git a/chromium/media/audio/test_audio_input_controller_factory.h b/chromium/media/audio/test_audio_input_controller_factory.h
new file mode 100644
index 00000000000..0a179473c1c
--- /dev/null
+++ b/chromium/media/audio/test_audio_input_controller_factory.h
@@ -0,0 +1,121 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_TEST_AUDIO_INPUT_CONTROLLER_FACTORY_H_
+#define MEDIA_AUDIO_TEST_AUDIO_INPUT_CONTROLLER_FACTORY_H_
+
+#include "base/bind.h"
+#include "media/audio/audio_input_controller.h"
+
+namespace media {
+
+class TestAudioInputControllerFactory;
+
+// TestAudioInputController and TestAudioInputControllerFactory are used for
+// testing consumers of AudioInputController. TestAudioInputControllerFactory
+// is a AudioInputController::Factory that creates TestAudioInputControllers.
+//
+// TestAudioInputController::Record and Close are overriden to do nothing. It is
+// expected that you'll grab the EventHandler from the TestAudioInputController
+// and invoke the callback methods when appropriate. In this way it's easy to
+// mock a AudioInputController.
+//
+// Typical usage:
+// // Create and register factory.
+// TestAudioInputControllerFactory factory;
+// AudioInputController::set_factory_for_testing(&factory);
+//
+// // Do something that triggers creation of an AudioInputController.
+// TestAudioInputController* controller = factory.last_controller();
+// DCHECK(controller);
+//
+// // Notify event handler with whatever data you want.
+// controller->event_handler()->OnCreated(...);
+//
+// // Do something that triggers AudioInputController::Record to be called.
+// controller->event_handler()->OnData(...);
+// controller->event_handler()->OnError(...);
+//
+// // Make sure consumer of AudioInputController does the right thing.
+// ...
+// // Reset factory.
+// AudioInputController::set_factory_for_testing(NULL);
+
+class TestAudioInputController : public AudioInputController {
+ public:
+ class Delegate {
+ public:
+ virtual void TestAudioControllerOpened(
+ TestAudioInputController* controller) = 0;
+ virtual void TestAudioControllerClosed(
+ TestAudioInputController* controller) = 0;
+ };
+
+ TestAudioInputController(TestAudioInputControllerFactory* factory,
+ AudioManager* audio_manager,
+ const AudioParameters& audio_parameters,
+ EventHandler* event_handler,
+ SyncWriter* sync_writer);
+
+ // Returns the event handler installed on the AudioInputController.
+ EventHandler* event_handler() const { return event_handler_; }
+
+ // Notifies the TestAudioControllerOpened() event to the delegate (if any).
+ virtual void Record() OVERRIDE;
+
+ // Ensure that the closure is run on the audio-manager thread.
+ virtual void Close(const base::Closure& closed_task) OVERRIDE;
+
+ protected:
+ virtual ~TestAudioInputController();
+
+ private:
+ AudioParameters audio_parameters_;
+
+ // These are not owned by us and expected to be valid for this object's
+ // lifetime.
+ TestAudioInputControllerFactory* factory_;
+ EventHandler* event_handler_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestAudioInputController);
+};
+
+typedef TestAudioInputController::Delegate TestAudioInputControllerDelegate;
+
+// Simple AudioInputController::Factory method that creates
+// TestAudioInputControllers.
+class TestAudioInputControllerFactory : public AudioInputController::Factory {
+ public:
+ TestAudioInputControllerFactory();
+ virtual ~TestAudioInputControllerFactory();
+
+ // AudioInputController::Factory methods.
+ virtual AudioInputController* Create(
+ AudioManager* audio_manager,
+ AudioInputController::EventHandler* event_handler,
+ AudioParameters params) OVERRIDE;
+
+ void SetDelegateForTests(TestAudioInputControllerDelegate* delegate);
+
+ TestAudioInputController* controller() const { return controller_; }
+
+ private:
+ friend class TestAudioInputController;
+
+ // Invoked by a TestAudioInputController when it gets destroyed.
+ void OnTestAudioInputControllerDestroyed(
+ TestAudioInputController* controller);
+
+ // The caller of Create owns this object.
+ TestAudioInputController* controller_;
+
+ // The delegate for tests for receiving audio controller events.
+ TestAudioInputControllerDelegate* delegate_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestAudioInputControllerFactory);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_TEST_AUDIO_INPUT_CONTROLLER_FACTORY_H_
diff --git a/chromium/media/audio/virtual_audio_input_stream.cc b/chromium/media/audio/virtual_audio_input_stream.cc
new file mode 100644
index 00000000000..9c4e7a1f16f
--- /dev/null
+++ b/chromium/media/audio/virtual_audio_input_stream.cc
@@ -0,0 +1,188 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/virtual_audio_input_stream.h"
+
+#include <algorithm>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/message_loop/message_loop.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "media/audio/virtual_audio_output_stream.h"
+
+namespace media {
+
+// LoopbackAudioConverter works similar to AudioConverter and converts input
+// streams to different audio parameters. Then, the LoopbackAudioConverter can
+// be used as an input to another AudioConverter. This allows us to
+// use converted audio from AudioOutputStreams as input to an AudioConverter.
+// For example, this allows converting multiple streams into a common format and
+// using the converted audio as input to another AudioConverter (i.e. a mixer).
+class LoopbackAudioConverter : public AudioConverter::InputCallback {
+ public:
+ LoopbackAudioConverter(const AudioParameters& input_params,
+ const AudioParameters& output_params)
+ : audio_converter_(input_params, output_params, false) {}
+
+ virtual ~LoopbackAudioConverter() {}
+
+ void AddInput(AudioConverter::InputCallback* input) {
+ audio_converter_.AddInput(input);
+ }
+
+ void RemoveInput(AudioConverter::InputCallback* input) {
+ audio_converter_.RemoveInput(input);
+ }
+
+ private:
+ virtual double ProvideInput(AudioBus* audio_bus,
+ base::TimeDelta buffer_delay) OVERRIDE {
+ audio_converter_.Convert(audio_bus);
+ return 1.0;
+ }
+
+ AudioConverter audio_converter_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoopbackAudioConverter);
+};
+
+VirtualAudioInputStream::VirtualAudioInputStream(
+ const AudioParameters& params,
+ const scoped_refptr<base::MessageLoopProxy>& worker_loop,
+ const AfterCloseCallback& after_close_cb)
+ : worker_loop_(worker_loop),
+ after_close_cb_(after_close_cb),
+ callback_(NULL),
+ buffer_(new uint8[params.GetBytesPerBuffer()]),
+ params_(params),
+ mixer_(params_, params_, false),
+ num_attached_output_streams_(0),
+ fake_consumer_(worker_loop_, params_) {
+ DCHECK(params_.IsValid());
+ DCHECK(worker_loop_.get());
+
+ // VAIS can be constructed on any thread, but will DCHECK that all
+ // AudioInputStream methods are called from the same thread.
+ thread_checker_.DetachFromThread();
+}
+
+VirtualAudioInputStream::~VirtualAudioInputStream() {
+ DCHECK(!callback_);
+
+ // Sanity-check: Contract for Add/RemoveOutputStream() requires that all
+ // output streams be removed before VirtualAudioInputStream is destroyed.
+ DCHECK_EQ(0, num_attached_output_streams_);
+
+ for (AudioConvertersMap::iterator it = converters_.begin();
+ it != converters_.end(); ++it) {
+ delete it->second;
+ }
+}
+
+bool VirtualAudioInputStream::Open() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ memset(buffer_.get(), 0, params_.GetBytesPerBuffer());
+ return true;
+}
+
+void VirtualAudioInputStream::Start(AudioInputCallback* callback) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ callback_ = callback;
+ fake_consumer_.Start(base::Bind(
+ &VirtualAudioInputStream::PumpAudio, base::Unretained(this)));
+}
+
+void VirtualAudioInputStream::Stop() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ fake_consumer_.Stop();
+}
+
+void VirtualAudioInputStream::AddOutputStream(
+ VirtualAudioOutputStream* stream, const AudioParameters& output_params) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ base::AutoLock scoped_lock(converter_network_lock_);
+
+ AudioConvertersMap::iterator converter = converters_.find(output_params);
+ if (converter == converters_.end()) {
+ std::pair<AudioConvertersMap::iterator, bool> result = converters_.insert(
+ std::make_pair(output_params,
+ new LoopbackAudioConverter(output_params, params_)));
+ converter = result.first;
+
+ // Add to main mixer if we just added a new AudioTransform.
+ mixer_.AddInput(converter->second);
+ }
+ converter->second->AddInput(stream);
+ ++num_attached_output_streams_;
+}
+
+void VirtualAudioInputStream::RemoveOutputStream(
+ VirtualAudioOutputStream* stream, const AudioParameters& output_params) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ base::AutoLock scoped_lock(converter_network_lock_);
+
+ DCHECK(converters_.find(output_params) != converters_.end());
+ converters_[output_params]->RemoveInput(stream);
+
+ --num_attached_output_streams_;
+ DCHECK_LE(0, num_attached_output_streams_);
+}
+
+void VirtualAudioInputStream::PumpAudio(AudioBus* audio_bus) {
+ DCHECK(worker_loop_->BelongsToCurrentThread());
+ DCHECK(callback_);
+
+ {
+ base::AutoLock scoped_lock(converter_network_lock_);
+ mixer_.Convert(audio_bus);
+ }
+ audio_bus->ToInterleaved(params_.frames_per_buffer(),
+ params_.bits_per_sample() / 8,
+ buffer_.get());
+ callback_->OnData(this,
+ buffer_.get(),
+ params_.GetBytesPerBuffer(),
+ params_.GetBytesPerBuffer(),
+ 1.0);
+}
+
+void VirtualAudioInputStream::Close() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ Stop(); // Make sure callback_ is no longer being used.
+ if (callback_) {
+ callback_->OnClose(this);
+ callback_ = NULL;
+ }
+
+ // If a non-null AfterCloseCallback was provided to the constructor, invoke it
+ // here. The callback is moved to a stack-local first since |this| could be
+ // destroyed during Run().
+ if (!after_close_cb_.is_null()) {
+ const AfterCloseCallback cb = after_close_cb_;
+ after_close_cb_.Reset();
+ cb.Run(this);
+ }
+}
+
+double VirtualAudioInputStream::GetMaxVolume() {
+ return 1.0;
+}
+
+void VirtualAudioInputStream::SetVolume(double volume) {}
+
+double VirtualAudioInputStream::GetVolume() {
+ return 1.0;
+}
+
+void VirtualAudioInputStream::SetAutomaticGainControl(bool enabled) {}
+
+bool VirtualAudioInputStream::GetAutomaticGainControl() {
+ return false;
+}
+
+} // namespace media
diff --git a/chromium/media/audio/virtual_audio_input_stream.h b/chromium/media/audio/virtual_audio_input_stream.h
new file mode 100644
index 00000000000..53a10738732
--- /dev/null
+++ b/chromium/media/audio/virtual_audio_input_stream.h
@@ -0,0 +1,116 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_VIRTUAL_AUDIO_INPUT_STREAM_H_
+#define MEDIA_AUDIO_VIRTUAL_AUDIO_INPUT_STREAM_H_
+
+#include <map>
+#include <set>
+
+#include "base/gtest_prod_util.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/thread_checker.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+#include "media/audio/fake_audio_consumer.h"
+#include "media/base/audio_converter.h"
+
+namespace base {
+class MessageLoopProxy;
+}
+
+namespace media {
+
+class LoopbackAudioConverter;
+class VirtualAudioOutputStream;
+
+// VirtualAudioInputStream converts and mixes audio from attached
+// VirtualAudioOutputStreams into a single stream. It will continuously render
+// audio until this VirtualAudioInputStream is stopped and closed.
+class MEDIA_EXPORT VirtualAudioInputStream : public AudioInputStream {
+ public:
+ // Callback invoked just after VirtualAudioInputStream is closed.
+ typedef base::Callback<void(VirtualAudioInputStream* vais)>
+ AfterCloseCallback;
+
+ // Construct a target for audio loopback which mixes multiple data streams
+ // into a single stream having the given |params|. |worker_loop| is the loop
+ // on which AudioInputCallback methods are called and may or may not be the
+ // single thread that invokes the AudioInputStream methods.
+ VirtualAudioInputStream(
+ const AudioParameters& params,
+ const scoped_refptr<base::MessageLoopProxy>& worker_loop,
+ const AfterCloseCallback& after_close_cb);
+
+ virtual ~VirtualAudioInputStream();
+
+ // AudioInputStream:
+ virtual bool Open() OVERRIDE;
+ virtual void Start(AudioInputCallback* callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual double GetMaxVolume() OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual double GetVolume() OVERRIDE;
+ virtual void SetAutomaticGainControl(bool enabled) OVERRIDE;
+ virtual bool GetAutomaticGainControl() OVERRIDE;
+
+ // Attaches a VirtualAudioOutputStream to be used as input. This
+ // VirtualAudioInputStream must outlive all attached streams, so any attached
+ // stream must be closed (which causes a detach) before
+ // VirtualAudioInputStream is destroyed.
+ virtual void AddOutputStream(VirtualAudioOutputStream* stream,
+ const AudioParameters& output_params);
+
+ // Detaches a VirtualAudioOutputStream and removes it as input.
+ virtual void RemoveOutputStream(VirtualAudioOutputStream* stream,
+ const AudioParameters& output_params);
+
+ private:
+ friend class VirtualAudioInputStreamTest;
+
+ typedef std::map<AudioParameters, LoopbackAudioConverter*> AudioConvertersMap;
+
+ // Pulls audio data from all attached VirtualAudioOutputStreams, mixes and
+ // converts the streams into one, and pushes the result to |callback_|.
+ // Invoked on the worker thread.
+ void PumpAudio(AudioBus* audio_bus);
+
+ const scoped_refptr<base::MessageLoopProxy> worker_loop_;
+
+ AfterCloseCallback after_close_cb_;
+
+ AudioInputCallback* callback_;
+
+ // Non-const for testing.
+ scoped_ptr<uint8[]> buffer_;
+ AudioParameters params_;
+
+ // Guards concurrent access to the converter network: converters_, mixer_, and
+ // num_attached_output_streams_.
+ base::Lock converter_network_lock_;
+
+ // AudioConverters associated with the attached VirtualAudioOutputStreams,
+ // partitioned by common AudioParameters.
+ AudioConvertersMap converters_;
+
+ // AudioConverter that takes all the audio converters and mixes them into one
+ // final audio stream.
+ AudioConverter mixer_;
+
+ // Number of currently attached VirtualAudioOutputStreams.
+ int num_attached_output_streams_;
+
+ // Handles callback timing for consumption of audio data.
+ FakeAudioConsumer fake_consumer_;
+
+ base::ThreadChecker thread_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(VirtualAudioInputStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_VIRTUAL_AUDIO_INPUT_STREAM_H_
diff --git a/chromium/media/audio/virtual_audio_input_stream_unittest.cc b/chromium/media/audio/virtual_audio_input_stream_unittest.cc
new file mode 100644
index 00000000000..aab67cca571
--- /dev/null
+++ b/chromium/media/audio/virtual_audio_input_stream_unittest.cc
@@ -0,0 +1,358 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <list>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/message_loop/message_loop.h"
+#include "base/rand_util.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/simple_sources.h"
+#include "media/audio/virtual_audio_input_stream.h"
+#include "media/audio/virtual_audio_output_stream.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::_;
+using ::testing::AtLeast;
+using ::testing::InvokeWithoutArgs;
+using ::testing::NotNull;
+
+namespace media {
+
+namespace {
+
+const AudioParameters kParams(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO, 8000, 8, 10);
+
+class MockInputCallback : public AudioInputStream::AudioInputCallback {
+ public:
+ MockInputCallback()
+ : data_pushed_(false, false) {
+ ON_CALL(*this, OnData(_, _, _, _, _))
+ .WillByDefault(InvokeWithoutArgs(&data_pushed_,
+ &base::WaitableEvent::Signal));
+ }
+
+ virtual ~MockInputCallback() {}
+
+ MOCK_METHOD5(OnData, void(AudioInputStream* stream, const uint8* data,
+ uint32 size, uint32 hardware_delay_bytes,
+ double volume));
+ MOCK_METHOD1(OnClose, void(AudioInputStream* stream));
+ MOCK_METHOD1(OnError, void(AudioInputStream* stream));
+
+ void WaitForDataPushes() {
+ for (int i = 0; i < 3; ++i) {
+ data_pushed_.Wait();
+ }
+ }
+
+ private:
+ base::WaitableEvent data_pushed_;
+
+ DISALLOW_COPY_AND_ASSIGN(MockInputCallback);
+};
+
+class TestAudioSource : public SineWaveAudioSource {
+ public:
+ TestAudioSource()
+ : SineWaveAudioSource(
+ kParams.channel_layout(), 200.0, kParams.sample_rate()),
+ data_pulled_(false, false) {}
+
+ virtual ~TestAudioSource() {}
+
+ virtual int OnMoreData(AudioBus* audio_bus,
+ AudioBuffersState audio_buffers) OVERRIDE {
+ const int ret = SineWaveAudioSource::OnMoreData(audio_bus, audio_buffers);
+ data_pulled_.Signal();
+ return ret;
+ }
+
+ virtual int OnMoreIOData(AudioBus* source,
+ AudioBus* dest,
+ AudioBuffersState audio_buffers) OVERRIDE {
+ const int ret =
+ SineWaveAudioSource::OnMoreIOData(source, dest, audio_buffers);
+ data_pulled_.Signal();
+ return ret;
+ }
+
+ void WaitForDataPulls() {
+ for (int i = 0; i < 3; ++i) {
+ data_pulled_.Wait();
+ }
+ }
+
+ private:
+ base::WaitableEvent data_pulled_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestAudioSource);
+};
+
+} // namespace
+
+class VirtualAudioInputStreamTest : public testing::TestWithParam<bool> {
+ public:
+ VirtualAudioInputStreamTest()
+ : audio_thread_(new base::Thread("AudioThread")),
+ worker_thread_(new base::Thread("AudioWorkerThread")),
+ stream_(NULL),
+ closed_stream_(false, false) {
+ audio_thread_->Start();
+ audio_message_loop_ = audio_thread_->message_loop_proxy();
+ }
+
+ virtual ~VirtualAudioInputStreamTest() {
+ SyncWithAudioThread();
+
+ DCHECK(output_streams_.empty());
+ DCHECK(stopped_output_streams_.empty());
+ }
+
+ void Create() {
+ const bool worker_is_separate_thread = GetParam();
+ stream_ = new VirtualAudioInputStream(
+ kParams, GetWorkerLoop(worker_is_separate_thread),
+ base::Bind(&base::DeletePointer<VirtualAudioInputStream>));
+ stream_->Open();
+ }
+
+ void Start() {
+ EXPECT_CALL(input_callback_, OnClose(_));
+ EXPECT_CALL(input_callback_, OnData(_, NotNull(), _, _, _))
+ .Times(AtLeast(1));
+
+ ASSERT_TRUE(!!stream_);
+ stream_->Start(&input_callback_);
+ }
+
+ void CreateAndStartOneOutputStream() {
+ ASSERT_TRUE(!!stream_);
+ AudioOutputStream* const output_stream = new VirtualAudioOutputStream(
+ kParams,
+ stream_,
+ base::Bind(&base::DeletePointer<VirtualAudioOutputStream>));
+ output_streams_.push_back(output_stream);
+
+ output_stream->Open();
+ output_stream->Start(&source_);
+ }
+
+ void Stop() {
+ ASSERT_TRUE(!!stream_);
+ stream_->Stop();
+ }
+
+ void Close() {
+ ASSERT_TRUE(!!stream_);
+ stream_->Close();
+ stream_ = NULL;
+ closed_stream_.Signal();
+ }
+
+ void WaitForDataToFlow() {
+ // Wait until audio thread is idle before calling output_streams_.size().
+ SyncWithAudioThread();
+
+ const int count = output_streams_.size();
+ for (int i = 0; i < count; ++i) {
+ source_.WaitForDataPulls();
+ }
+
+ input_callback_.WaitForDataPushes();
+ }
+
+ void WaitUntilClosed() {
+ closed_stream_.Wait();
+ }
+
+ void StopAndCloseOneOutputStream() {
+ ASSERT_TRUE(!output_streams_.empty());
+ AudioOutputStream* const output_stream = output_streams_.front();
+ ASSERT_TRUE(!!output_stream);
+ output_streams_.pop_front();
+
+ output_stream->Stop();
+ output_stream->Close();
+ }
+
+ void StopFirstOutputStream() {
+ ASSERT_TRUE(!output_streams_.empty());
+ AudioOutputStream* const output_stream = output_streams_.front();
+ ASSERT_TRUE(!!output_stream);
+ output_streams_.pop_front();
+ output_stream->Stop();
+ stopped_output_streams_.push_back(output_stream);
+ }
+
+ void StopSomeOutputStreams() {
+ ASSERT_LE(2, static_cast<int>(output_streams_.size()));
+ for (int remaning = base::RandInt(1, output_streams_.size() - 1);
+ remaning > 0; --remaning) {
+ StopFirstOutputStream();
+ }
+ }
+
+ void RestartAllStoppedOutputStreams() {
+ typedef std::list<AudioOutputStream*>::const_iterator ConstIter;
+ for (ConstIter it = stopped_output_streams_.begin();
+ it != stopped_output_streams_.end(); ++it) {
+ (*it)->Start(&source_);
+ output_streams_.push_back(*it);
+ }
+ stopped_output_streams_.clear();
+ }
+
+ const scoped_refptr<base::MessageLoopProxy>& audio_message_loop() const {
+ return audio_message_loop_;
+ }
+
+ const scoped_refptr<base::MessageLoopProxy>& GetWorkerLoop(
+ bool worker_is_separate_thread) {
+ if (worker_is_separate_thread) {
+ if (!worker_thread_->IsRunning()) {
+ worker_thread_->Start();
+ worker_message_loop_ = worker_thread_->message_loop_proxy();
+ }
+ return worker_message_loop_;
+ } else {
+ return audio_message_loop_;
+ }
+ }
+
+ private:
+ void SyncWithAudioThread() {
+ base::WaitableEvent done(false, false);
+ audio_message_loop_->PostTask(
+ FROM_HERE,
+ base::Bind(&base::WaitableEvent::Signal, base::Unretained(&done)));
+ done.Wait();
+ }
+
+ scoped_ptr<base::Thread> audio_thread_;
+ scoped_refptr<base::MessageLoopProxy> audio_message_loop_;
+ scoped_ptr<base::Thread> worker_thread_;
+ scoped_refptr<base::MessageLoopProxy> worker_message_loop_;
+
+ VirtualAudioInputStream* stream_;
+ MockInputCallback input_callback_;
+ base::WaitableEvent closed_stream_;
+
+ std::list<AudioOutputStream*> output_streams_;
+ std::list<AudioOutputStream*> stopped_output_streams_;
+ TestAudioSource source_;
+
+ DISALLOW_COPY_AND_ASSIGN(VirtualAudioInputStreamTest);
+};
+
+#define RUN_ON_AUDIO_THREAD(method) \
+ audio_message_loop()->PostTask( \
+ FROM_HERE, base::Bind(&VirtualAudioInputStreamTest::method, \
+ base::Unretained(this)))
+
+TEST_P(VirtualAudioInputStreamTest, CreateAndClose) {
+ RUN_ON_AUDIO_THREAD(Create);
+ RUN_ON_AUDIO_THREAD(Close);
+ WaitUntilClosed();
+}
+
+TEST_P(VirtualAudioInputStreamTest, NoOutputs) {
+ RUN_ON_AUDIO_THREAD(Create);
+ RUN_ON_AUDIO_THREAD(Start);
+ WaitForDataToFlow();
+ RUN_ON_AUDIO_THREAD(Stop);
+ RUN_ON_AUDIO_THREAD(Close);
+ WaitUntilClosed();
+}
+
+TEST_P(VirtualAudioInputStreamTest, SingleOutput) {
+ RUN_ON_AUDIO_THREAD(Create);
+ RUN_ON_AUDIO_THREAD(Start);
+ RUN_ON_AUDIO_THREAD(CreateAndStartOneOutputStream);
+ WaitForDataToFlow();
+ RUN_ON_AUDIO_THREAD(StopAndCloseOneOutputStream);
+ RUN_ON_AUDIO_THREAD(Stop);
+ RUN_ON_AUDIO_THREAD(Close);
+ WaitUntilClosed();
+}
+
+TEST_P(VirtualAudioInputStreamTest, SingleOutputPausedAndRestarted) {
+ RUN_ON_AUDIO_THREAD(Create);
+ RUN_ON_AUDIO_THREAD(Start);
+ RUN_ON_AUDIO_THREAD(CreateAndStartOneOutputStream);
+ WaitForDataToFlow();
+ RUN_ON_AUDIO_THREAD(StopFirstOutputStream);
+ RUN_ON_AUDIO_THREAD(RestartAllStoppedOutputStreams);
+ WaitForDataToFlow();
+ RUN_ON_AUDIO_THREAD(StopAndCloseOneOutputStream);
+ RUN_ON_AUDIO_THREAD(Stop);
+ RUN_ON_AUDIO_THREAD(Close);
+ WaitUntilClosed();
+}
+
+TEST_P(VirtualAudioInputStreamTest, MultipleOutputs) {
+ RUN_ON_AUDIO_THREAD(Create);
+ RUN_ON_AUDIO_THREAD(Start);
+ RUN_ON_AUDIO_THREAD(CreateAndStartOneOutputStream);
+ WaitForDataToFlow();
+ RUN_ON_AUDIO_THREAD(CreateAndStartOneOutputStream);
+ RUN_ON_AUDIO_THREAD(CreateAndStartOneOutputStream);
+ WaitForDataToFlow();
+ RUN_ON_AUDIO_THREAD(StopFirstOutputStream);
+ RUN_ON_AUDIO_THREAD(StopFirstOutputStream);
+ WaitForDataToFlow();
+ RUN_ON_AUDIO_THREAD(StopFirstOutputStream);
+ RUN_ON_AUDIO_THREAD(RestartAllStoppedOutputStreams);
+ WaitForDataToFlow();
+ RUN_ON_AUDIO_THREAD(StopAndCloseOneOutputStream);
+ RUN_ON_AUDIO_THREAD(StopAndCloseOneOutputStream);
+ RUN_ON_AUDIO_THREAD(Stop);
+ RUN_ON_AUDIO_THREAD(StopAndCloseOneOutputStream);
+ RUN_ON_AUDIO_THREAD(Close);
+ WaitUntilClosed();
+}
+
+// A combination of all of the above tests with many output streams.
+TEST_P(VirtualAudioInputStreamTest, ComprehensiveTest) {
+ static const int kNumOutputs = 8;
+ static const int kHalfNumOutputs = kNumOutputs / 2;
+ static const int kPauseIterations = 5;
+
+ RUN_ON_AUDIO_THREAD(Create);
+ for (int i = 0; i < kHalfNumOutputs; ++i) {
+ RUN_ON_AUDIO_THREAD(CreateAndStartOneOutputStream);
+ }
+ RUN_ON_AUDIO_THREAD(Start);
+ WaitForDataToFlow();
+ for (int i = 0; i < kHalfNumOutputs; ++i) {
+ RUN_ON_AUDIO_THREAD(CreateAndStartOneOutputStream);
+ }
+ WaitForDataToFlow();
+ for (int i = 0; i < kPauseIterations; ++i) {
+ RUN_ON_AUDIO_THREAD(StopSomeOutputStreams);
+ WaitForDataToFlow();
+ RUN_ON_AUDIO_THREAD(RestartAllStoppedOutputStreams);
+ WaitForDataToFlow();
+ }
+ for (int i = 0; i < kHalfNumOutputs; ++i) {
+ RUN_ON_AUDIO_THREAD(StopAndCloseOneOutputStream);
+ }
+ RUN_ON_AUDIO_THREAD(Stop);
+ for (int i = 0; i < kHalfNumOutputs; ++i) {
+ RUN_ON_AUDIO_THREAD(StopAndCloseOneOutputStream);
+ }
+ RUN_ON_AUDIO_THREAD(Close);
+ WaitUntilClosed();
+}
+
+INSTANTIATE_TEST_CASE_P(SingleVersusMultithreaded,
+ VirtualAudioInputStreamTest,
+ ::testing::Values(false, true));
+
+} // namespace media
diff --git a/chromium/media/audio/virtual_audio_output_stream.cc b/chromium/media/audio/virtual_audio_output_stream.cc
new file mode 100644
index 00000000000..43b83cf2be8
--- /dev/null
+++ b/chromium/media/audio/virtual_audio_output_stream.cc
@@ -0,0 +1,87 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/virtual_audio_output_stream.h"
+
+#include "base/logging.h"
+#include "media/audio/virtual_audio_input_stream.h"
+
+namespace media {
+
+VirtualAudioOutputStream::VirtualAudioOutputStream(
+ const AudioParameters& params, VirtualAudioInputStream* target,
+ const AfterCloseCallback& after_close_cb)
+ : params_(params), target_input_stream_(target),
+ after_close_cb_(after_close_cb), callback_(NULL), volume_(1.0f) {
+ DCHECK(params_.IsValid());
+ DCHECK(target);
+
+ // VAOS can be constructed on any thread, but will DCHECK that all
+ // AudioOutputStream methods are called from the same thread.
+ thread_checker_.DetachFromThread();
+}
+
+VirtualAudioOutputStream::~VirtualAudioOutputStream() {
+ DCHECK(!callback_);
+}
+
+bool VirtualAudioOutputStream::Open() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return true;
+}
+
+void VirtualAudioOutputStream::Start(AudioSourceCallback* callback) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(!callback_);
+ callback_ = callback;
+ target_input_stream_->AddOutputStream(this, params_);
+}
+
+void VirtualAudioOutputStream::Stop() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (callback_) {
+ target_input_stream_->RemoveOutputStream(this, params_);
+ callback_ = NULL;
+ }
+}
+
+void VirtualAudioOutputStream::Close() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ Stop();
+
+ // If a non-null AfterCloseCallback was provided to the constructor, invoke it
+ // here. The callback is moved to a stack-local first since |this| could be
+ // destroyed during Run().
+ if (!after_close_cb_.is_null()) {
+ const AfterCloseCallback cb = after_close_cb_;
+ after_close_cb_.Reset();
+ cb.Run(this);
+ }
+}
+
+void VirtualAudioOutputStream::SetVolume(double volume) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ volume_ = volume;
+}
+
+void VirtualAudioOutputStream::GetVolume(double* volume) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ *volume = volume_;
+}
+
+double VirtualAudioOutputStream::ProvideInput(AudioBus* audio_bus,
+ base::TimeDelta buffer_delay) {
+ // Note: This method may be invoked on any one thread, depending on the
+ // platform.
+ DCHECK(callback_);
+
+ const int frames = callback_->OnMoreData(audio_bus, AudioBuffersState());
+ if (frames < audio_bus->frames())
+ audio_bus->ZeroFramesPartial(frames, audio_bus->frames() - frames);
+
+ return frames > 0 ? volume_ : 0;
+}
+
+} // namespace media
diff --git a/chromium/media/audio/virtual_audio_output_stream.h b/chromium/media/audio/virtual_audio_output_stream.h
new file mode 100644
index 00000000000..0df6d5b2f75
--- /dev/null
+++ b/chromium/media/audio/virtual_audio_output_stream.h
@@ -0,0 +1,70 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_VIRTUAL_AUDIO_OUTPUT_STREAM_H_
+#define MEDIA_AUDIO_VIRTUAL_AUDIO_OUTPUT_STREAM_H_
+
+#include "base/callback.h"
+#include "base/threading/thread_checker.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/audio_converter.h"
+
+namespace media {
+
+class VirtualAudioInputStream;
+
+// VirtualAudioOutputStream attaches to a VirtualAudioInputStream when Start()
+// is called and is used as an audio source. VirtualAudioOutputStream also
+// implements an interface so it can be used as an input to AudioConverter so
+// that we can get audio frames that match the AudioParameters that
+// VirtualAudioInputStream expects.
+class MEDIA_EXPORT VirtualAudioOutputStream
+ : public AudioOutputStream,
+ public AudioConverter::InputCallback {
+ public:
+ // Callback invoked just after VirtualAudioOutputStream is closed.
+ typedef base::Callback<void(VirtualAudioOutputStream* vaos)>
+ AfterCloseCallback;
+
+ // Construct an audio loopback pathway to the given |target| (not owned).
+ // |target| must outlive this instance.
+ VirtualAudioOutputStream(const AudioParameters& params,
+ VirtualAudioInputStream* target,
+ const AfterCloseCallback& after_close_cb);
+
+ virtual ~VirtualAudioOutputStream();
+
+ // AudioOutputStream:
+ virtual bool Open() OVERRIDE;
+ virtual void Start(AudioSourceCallback* callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual void GetVolume(double* volume) OVERRIDE;
+ virtual void Close() OVERRIDE;
+
+ private:
+ // AudioConverter::InputCallback:
+ virtual double ProvideInput(AudioBus* audio_bus,
+ base::TimeDelta buffer_delay) OVERRIDE;
+
+ const AudioParameters params_;
+ // Pointer to the VirtualAudioInputStream to attach to when Start() is called.
+ // This pointer should always be valid because VirtualAudioInputStream should
+ // outlive this class.
+ VirtualAudioInputStream* const target_input_stream_;
+
+ AfterCloseCallback after_close_cb_;
+
+ AudioSourceCallback* callback_;
+ double volume_;
+
+ base::ThreadChecker thread_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(VirtualAudioOutputStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_VIRTUAL_AUDIO_OUTPUT_STREAM_H_
diff --git a/chromium/media/audio/virtual_audio_output_stream_unittest.cc b/chromium/media/audio/virtual_audio_output_stream_unittest.cc
new file mode 100644
index 00000000000..1e3abd1c6bb
--- /dev/null
+++ b/chromium/media/audio/virtual_audio_output_stream_unittest.cc
@@ -0,0 +1,122 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/message_loop/message_loop.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread.h"
+#include "media/audio/audio_manager.h"
+#include "media/audio/simple_sources.h"
+#include "media/audio/virtual_audio_input_stream.h"
+#include "media/audio/virtual_audio_output_stream.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::_;
+
+namespace media {
+
+namespace {
+const AudioParameters kParams(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_MONO, 8000, 8, 128);
+}
+
+class MockVirtualAudioInputStream : public VirtualAudioInputStream {
+ public:
+ explicit MockVirtualAudioInputStream(
+ const scoped_refptr<base::MessageLoopProxy>& worker_loop)
+ : VirtualAudioInputStream(
+ kParams,
+ worker_loop,
+ base::Bind(&base::DeletePointer<VirtualAudioInputStream>)) {}
+ ~MockVirtualAudioInputStream() {}
+
+ MOCK_METHOD2(AddOutputStream, void(VirtualAudioOutputStream* stream,
+ const AudioParameters& output_params));
+ MOCK_METHOD2(RemoveOutputStream, void(VirtualAudioOutputStream* stream,
+ const AudioParameters& output_params));
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockVirtualAudioInputStream);
+};
+
+class MockAudioDeviceListener : public AudioManager::AudioDeviceListener {
+ public:
+ MOCK_METHOD0(OnDeviceChange, void());
+};
+
+class VirtualAudioOutputStreamTest : public testing::Test {
+ public:
+ VirtualAudioOutputStreamTest()
+ : audio_thread_(new base::Thread("AudioThread")) {
+ audio_thread_->Start();
+ audio_message_loop_ = audio_thread_->message_loop_proxy();
+ }
+
+ const scoped_refptr<base::MessageLoopProxy>& audio_message_loop() const {
+ return audio_message_loop_;
+ }
+
+ void SyncWithAudioThread() {
+ base::WaitableEvent done(false, false);
+ audio_message_loop()->PostTask(
+ FROM_HERE, base::Bind(&base::WaitableEvent::Signal,
+ base::Unretained(&done)));
+ done.Wait();
+ }
+
+ private:
+ scoped_ptr<base::Thread> audio_thread_;
+ scoped_refptr<base::MessageLoopProxy> audio_message_loop_;
+
+ DISALLOW_COPY_AND_ASSIGN(VirtualAudioOutputStreamTest);
+};
+
+TEST_F(VirtualAudioOutputStreamTest, StartStopStartStop) {
+ static const int kCycles = 3;
+
+ MockVirtualAudioInputStream* const input_stream =
+ new MockVirtualAudioInputStream(audio_message_loop());
+ audio_message_loop()->PostTask(
+ FROM_HERE, base::Bind(
+ base::IgnoreResult(&MockVirtualAudioInputStream::Open),
+ base::Unretained(input_stream)));
+
+ VirtualAudioOutputStream* const output_stream = new VirtualAudioOutputStream(
+ kParams,
+ input_stream,
+ base::Bind(&base::DeletePointer<VirtualAudioOutputStream>));
+
+ EXPECT_CALL(*input_stream, AddOutputStream(output_stream, _))
+ .Times(kCycles);
+ EXPECT_CALL(*input_stream, RemoveOutputStream(output_stream, _))
+ .Times(kCycles);
+
+ audio_message_loop()->PostTask(
+ FROM_HERE, base::Bind(base::IgnoreResult(&VirtualAudioOutputStream::Open),
+ base::Unretained(output_stream)));
+ SineWaveAudioSource source(CHANNEL_LAYOUT_STEREO, 200.0, 128);
+ for (int i = 0; i < kCycles; ++i) {
+ audio_message_loop()->PostTask(
+ FROM_HERE, base::Bind(&VirtualAudioOutputStream::Start,
+ base::Unretained(output_stream),
+ &source));
+ audio_message_loop()->PostTask(
+ FROM_HERE, base::Bind(&VirtualAudioOutputStream::Stop,
+ base::Unretained(output_stream)));
+ }
+ audio_message_loop()->PostTask(
+ FROM_HERE, base::Bind(&VirtualAudioOutputStream::Close,
+ base::Unretained(output_stream)));
+
+ audio_message_loop()->PostTask(
+ FROM_HERE, base::Bind(&MockVirtualAudioInputStream::Close,
+ base::Unretained(input_stream)));
+
+ SyncWithAudioThread();
+}
+
+} // namespace media
diff --git a/chromium/media/audio/win/audio_device_listener_win.cc b/chromium/media/audio/win/audio_device_listener_win.cc
new file mode 100644
index 00000000000..8734cf2b78f
--- /dev/null
+++ b/chromium/media/audio/win/audio_device_listener_win.cc
@@ -0,0 +1,159 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/win/audio_device_listener_win.h"
+
+#include <Audioclient.h>
+
+#include "base/logging.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/system_monitor/system_monitor.h"
+#include "base/win/scoped_co_mem.h"
+#include "base/win/windows_version.h"
+#include "media/audio/audio_util.h"
+#include "media/audio/win/core_audio_util_win.h"
+
+using base::win::ScopedCoMem;
+
+namespace media {
+
+static std::string FlowToString(EDataFlow flow) {
+ return (flow == eRender) ? "eRender" : "eConsole";
+}
+
+static std::string RoleToString(ERole role) {
+ switch (role) {
+ case eConsole: return "eConsole";
+ case eMultimedia: return "eMultimedia";
+ case eCommunications: return "eCommunications";
+ default: return "undefined";
+ }
+}
+
+AudioDeviceListenerWin::AudioDeviceListenerWin(const base::Closure& listener_cb)
+ : listener_cb_(listener_cb) {
+ CHECK(CoreAudioUtil::IsSupported());
+
+ ScopedComPtr<IMMDeviceEnumerator> device_enumerator(
+ CoreAudioUtil::CreateDeviceEnumerator());
+ if (!device_enumerator)
+ return;
+
+ HRESULT hr = device_enumerator->RegisterEndpointNotificationCallback(this);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "RegisterEndpointNotificationCallback failed: "
+ << std::hex << hr;
+ return;
+ }
+
+ device_enumerator_ = device_enumerator;
+
+ ScopedComPtr<IMMDevice> device =
+ CoreAudioUtil::CreateDefaultDevice(eRender, eConsole);
+ if (!device) {
+ // Most probable reason for ending up here is that all audio devices are
+ // disabled or unplugged.
+ VLOG(1) << "CoreAudioUtil::CreateDefaultDevice failed. No device?";
+ return;
+ }
+
+ AudioDeviceName device_name;
+ hr = CoreAudioUtil::GetDeviceName(device, &device_name);
+ if (FAILED(hr)) {
+ VLOG(1) << "Failed to retrieve the device id: " << std::hex << hr;
+ return;
+ }
+ default_render_device_id_ = device_name.unique_id;
+}
+
+AudioDeviceListenerWin::~AudioDeviceListenerWin() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (device_enumerator_) {
+ HRESULT hr =
+ device_enumerator_->UnregisterEndpointNotificationCallback(this);
+ LOG_IF(ERROR, FAILED(hr)) << "UnregisterEndpointNotificationCallback() "
+ << "failed: " << std::hex << hr;
+ }
+}
+
+STDMETHODIMP_(ULONG) AudioDeviceListenerWin::AddRef() {
+ return 1;
+}
+
+STDMETHODIMP_(ULONG) AudioDeviceListenerWin::Release() {
+ return 1;
+}
+
+STDMETHODIMP AudioDeviceListenerWin::QueryInterface(REFIID iid, void** object) {
+ if (iid == IID_IUnknown || iid == __uuidof(IMMNotificationClient)) {
+ *object = static_cast<IMMNotificationClient*>(this);
+ return S_OK;
+ }
+
+ *object = NULL;
+ return E_NOINTERFACE;
+}
+
+STDMETHODIMP AudioDeviceListenerWin::OnPropertyValueChanged(
+ LPCWSTR device_id, const PROPERTYKEY key) {
+ // TODO(dalecurtis): We need to handle changes for the current default device
+ // here. It's tricky because this method may be called many (20+) times for
+ // a single change like sample rate. http://crbug.com/153056
+ return S_OK;
+}
+
+STDMETHODIMP AudioDeviceListenerWin::OnDeviceAdded(LPCWSTR device_id) {
+ // We don't care when devices are added.
+ return S_OK;
+}
+
+STDMETHODIMP AudioDeviceListenerWin::OnDeviceRemoved(LPCWSTR device_id) {
+ // We don't care when devices are removed.
+ return S_OK;
+}
+
+STDMETHODIMP AudioDeviceListenerWin::OnDeviceStateChanged(LPCWSTR device_id,
+ DWORD new_state) {
+ if (new_state != DEVICE_STATE_ACTIVE && new_state != DEVICE_STATE_NOTPRESENT)
+ return S_OK;
+
+ base::SystemMonitor* monitor = base::SystemMonitor::Get();
+ if (monitor)
+ monitor->ProcessDevicesChanged(base::SystemMonitor::DEVTYPE_AUDIO_CAPTURE);
+
+ return S_OK;
+}
+
+STDMETHODIMP AudioDeviceListenerWin::OnDefaultDeviceChanged(
+ EDataFlow flow, ERole role, LPCWSTR new_default_device_id) {
+ // Only listen for output device changes right now...
+ if (flow != eConsole && role != eRender)
+ return S_OK;
+
+ // If no device is now available, |new_default_device_id| will be NULL.
+ std::string new_device_id;
+ if (new_default_device_id)
+ new_device_id = WideToUTF8(new_default_device_id);
+
+ VLOG(1) << "OnDefaultDeviceChanged() "
+ << "new_default_device: "
+ << (new_default_device_id ?
+ CoreAudioUtil::GetFriendlyName(new_device_id) : "No device")
+ << ", flow: " << FlowToString(flow)
+ << ", role: " << RoleToString(role);
+
+ // Only fire a state change event if the device has actually changed.
+ // TODO(dalecurtis): This still seems to fire an extra event on my machine for
+ // an unplug event (probably others too); e.g., we get two transitions to a
+ // new default device id.
+ if (new_device_id.compare(default_render_device_id_) == 0)
+ return S_OK;
+
+ default_render_device_id_ = new_device_id;
+ listener_cb_.Run();
+
+ return S_OK;
+}
+
+} // namespace media
diff --git a/chromium/media/audio/win/audio_device_listener_win.h b/chromium/media/audio/win/audio_device_listener_win.h
new file mode 100644
index 00000000000..6a312519af9
--- /dev/null
+++ b/chromium/media/audio/win/audio_device_listener_win.h
@@ -0,0 +1,61 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_WIN_AUDIO_DEVICE_LISTENER_WIN_H_
+#define MEDIA_AUDIO_WIN_AUDIO_DEVICE_LISTENER_WIN_H_
+
+#include <MMDeviceAPI.h>
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/threading/thread_checker.h"
+#include "base/win/scoped_comptr.h"
+#include "media/base/media_export.h"
+
+using base::win::ScopedComPtr;
+
+namespace media {
+
+// IMMNotificationClient implementation for listening for default device changes
+// and forwarding to AudioManagerWin so it can notify downstream clients. Only
+// output (eRender) device changes are supported currently. Core Audio support
+// is required to construct this object. Must be constructed and destructed on
+// a single COM initialized thread.
+// TODO(dalecurtis, henrika): Support input device changes.
+class MEDIA_EXPORT AudioDeviceListenerWin : public IMMNotificationClient {
+ public:
+ // The listener callback will be called from a system level multimedia thread,
+ // thus the callee must be thread safe. |listener| is a permanent callback
+ // and must outlive AudioDeviceListenerWin.
+ explicit AudioDeviceListenerWin(const base::Closure& listener_cb);
+ virtual ~AudioDeviceListenerWin();
+
+ private:
+ friend class AudioDeviceListenerWinTest;
+
+ // IMMNotificationClient implementation.
+ STDMETHOD_(ULONG, AddRef)();
+ STDMETHOD_(ULONG, Release)();
+ STDMETHOD(QueryInterface)(REFIID iid, void** object);
+ STDMETHOD(OnPropertyValueChanged)(LPCWSTR device_id, const PROPERTYKEY key);
+ STDMETHOD(OnDeviceAdded)(LPCWSTR device_id);
+ STDMETHOD(OnDeviceRemoved)(LPCWSTR device_id);
+ STDMETHOD(OnDeviceStateChanged)(LPCWSTR device_id, DWORD new_state);
+ STDMETHOD(OnDefaultDeviceChanged)(EDataFlow flow, ERole role,
+ LPCWSTR new_default_device_id);
+
+ base::Closure listener_cb_;
+ ScopedComPtr<IMMDeviceEnumerator> device_enumerator_;
+ std::string default_render_device_id_;
+
+ // AudioDeviceListenerWin must be constructed and destructed on one thread.
+ base::ThreadChecker thread_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioDeviceListenerWin);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_WIN_AUDIO_DEVICE_LISTENER_WIN_H_
diff --git a/chromium/media/audio/win/audio_device_listener_win_unittest.cc b/chromium/media/audio/win/audio_device_listener_win_unittest.cc
new file mode 100644
index 00000000000..3076fff2513
--- /dev/null
+++ b/chromium/media/audio/win/audio_device_listener_win_unittest.cc
@@ -0,0 +1,103 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/win/scoped_com_initializer.h"
+#include "media/audio/audio_manager.h"
+#include "media/audio/win/audio_device_listener_win.h"
+#include "media/audio/win/core_audio_util_win.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using base::win::ScopedCOMInitializer;
+
+namespace media {
+
+static const char kNoDevice[] = "";
+static const char kFirstTestDevice[] = "test_device_0";
+static const char kSecondTestDevice[] = "test_device_1";
+
+class AudioDeviceListenerWinTest : public testing::Test {
+ public:
+ AudioDeviceListenerWinTest()
+ : com_init_(ScopedCOMInitializer::kMTA) {
+ }
+
+ virtual void SetUp() {
+ if (!CoreAudioUtil::IsSupported())
+ return;
+
+ output_device_listener_.reset(new AudioDeviceListenerWin(base::Bind(
+ &AudioDeviceListenerWinTest::OnDeviceChange, base::Unretained(this))));
+ }
+
+ // Simulate a device change where no output devices are available.
+ bool SimulateNullDefaultOutputDeviceChange() {
+ return output_device_listener_->OnDefaultDeviceChanged(
+ static_cast<EDataFlow>(eConsole), static_cast<ERole>(eRender),
+ NULL) == S_OK;
+ }
+
+ bool SimulateDefaultOutputDeviceChange(const char* new_device_id) {
+ return output_device_listener_->OnDefaultDeviceChanged(
+ static_cast<EDataFlow>(eConsole), static_cast<ERole>(eRender),
+ ASCIIToWide(new_device_id).c_str()) == S_OK;
+ }
+
+ void SetOutputDeviceId(std::string new_device_id) {
+ output_device_listener_->default_render_device_id_ = new_device_id;
+ }
+
+ MOCK_METHOD0(OnDeviceChange, void());
+
+ private:
+ ScopedCOMInitializer com_init_;
+ scoped_ptr<AudioDeviceListenerWin> output_device_listener_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioDeviceListenerWinTest);
+};
+
+// Simulate a device change events and ensure we get the right callbacks.
+TEST_F(AudioDeviceListenerWinTest, OutputDeviceChange) {
+ if (!CoreAudioUtil::IsSupported())
+ return;
+
+ SetOutputDeviceId(kNoDevice);
+ EXPECT_CALL(*this, OnDeviceChange()).Times(1);
+ ASSERT_TRUE(SimulateDefaultOutputDeviceChange(kFirstTestDevice));
+
+ testing::Mock::VerifyAndClear(this);
+ EXPECT_CALL(*this, OnDeviceChange()).Times(1);
+ ASSERT_TRUE(SimulateDefaultOutputDeviceChange(kSecondTestDevice));
+
+ // The second device event should be ignored since the device id has not
+ // changed.
+ ASSERT_TRUE(SimulateDefaultOutputDeviceChange(kSecondTestDevice));
+}
+
+// Ensure that null output device changes don't crash. Simulates the situation
+// where we have no output devices.
+TEST_F(AudioDeviceListenerWinTest, NullOutputDeviceChange) {
+ if (!CoreAudioUtil::IsSupported())
+ return;
+
+ SetOutputDeviceId(kNoDevice);
+ EXPECT_CALL(*this, OnDeviceChange()).Times(0);
+ ASSERT_TRUE(SimulateNullDefaultOutputDeviceChange());
+
+ testing::Mock::VerifyAndClear(this);
+ EXPECT_CALL(*this, OnDeviceChange()).Times(1);
+ ASSERT_TRUE(SimulateDefaultOutputDeviceChange(kFirstTestDevice));
+
+ testing::Mock::VerifyAndClear(this);
+ EXPECT_CALL(*this, OnDeviceChange()).Times(1);
+ ASSERT_TRUE(SimulateNullDefaultOutputDeviceChange());
+}
+
+} // namespace media
diff --git a/chromium/media/audio/win/audio_low_latency_input_win.cc b/chromium/media/audio/win/audio_low_latency_input_win.cc
new file mode 100644
index 00000000000..e0819439109
--- /dev/null
+++ b/chromium/media/audio/win/audio_low_latency_input_win.cc
@@ -0,0 +1,641 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/win/audio_low_latency_input_win.h"
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/strings/utf_string_conversions.h"
+#include "media/audio/audio_util.h"
+#include "media/audio/win/audio_manager_win.h"
+#include "media/audio/win/avrt_wrapper_win.h"
+
+using base::win::ScopedComPtr;
+using base::win::ScopedCOMInitializer;
+
+namespace media {
+
+WASAPIAudioInputStream::WASAPIAudioInputStream(
+ AudioManagerWin* manager, const AudioParameters& params,
+ const std::string& device_id)
+ : manager_(manager),
+ capture_thread_(NULL),
+ opened_(false),
+ started_(false),
+ endpoint_buffer_size_frames_(0),
+ device_id_(device_id),
+ sink_(NULL) {
+ DCHECK(manager_);
+
+ // Load the Avrt DLL if not already loaded. Required to support MMCSS.
+ bool avrt_init = avrt::Initialize();
+ DCHECK(avrt_init) << "Failed to load the Avrt.dll";
+
+ // Set up the desired capture format specified by the client.
+ format_.nSamplesPerSec = params.sample_rate();
+ format_.wFormatTag = WAVE_FORMAT_PCM;
+ format_.wBitsPerSample = params.bits_per_sample();
+ format_.nChannels = params.channels();
+ format_.nBlockAlign = (format_.wBitsPerSample / 8) * format_.nChannels;
+ format_.nAvgBytesPerSec = format_.nSamplesPerSec * format_.nBlockAlign;
+ format_.cbSize = 0;
+
+ // Size in bytes of each audio frame.
+ frame_size_ = format_.nBlockAlign;
+ // Store size of audio packets which we expect to get from the audio
+ // endpoint device in each capture event.
+ packet_size_frames_ = params.GetBytesPerBuffer() / format_.nBlockAlign;
+ packet_size_bytes_ = params.GetBytesPerBuffer();
+ DVLOG(1) << "Number of bytes per audio frame : " << frame_size_;
+ DVLOG(1) << "Number of audio frames per packet: " << packet_size_frames_;
+
+ // All events are auto-reset events and non-signaled initially.
+
+ // Create the event which the audio engine will signal each time
+ // a buffer becomes ready to be processed by the client.
+ audio_samples_ready_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
+ DCHECK(audio_samples_ready_event_.IsValid());
+
+ // Create the event which will be set in Stop() when capturing shall stop.
+ stop_capture_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
+ DCHECK(stop_capture_event_.IsValid());
+
+ ms_to_frame_count_ = static_cast<double>(params.sample_rate()) / 1000.0;
+
+ LARGE_INTEGER performance_frequency;
+ if (QueryPerformanceFrequency(&performance_frequency)) {
+ perf_count_to_100ns_units_ =
+ (10000000.0 / static_cast<double>(performance_frequency.QuadPart));
+ } else {
+ LOG(ERROR) << "High-resolution performance counters are not supported.";
+ perf_count_to_100ns_units_ = 0.0;
+ }
+}
+
+WASAPIAudioInputStream::~WASAPIAudioInputStream() {}
+
+bool WASAPIAudioInputStream::Open() {
+ DCHECK(CalledOnValidThread());
+ // Verify that we are not already opened.
+ if (opened_)
+ return false;
+
+ // Obtain a reference to the IMMDevice interface of the capturing
+ // device with the specified unique identifier or role which was
+ // set at construction.
+ HRESULT hr = SetCaptureDevice();
+ if (FAILED(hr))
+ return false;
+
+ // Obtain an IAudioClient interface which enables us to create and initialize
+ // an audio stream between an audio application and the audio engine.
+ hr = ActivateCaptureDevice();
+ if (FAILED(hr))
+ return false;
+
+ // Retrieve the stream format which the audio engine uses for its internal
+ // processing/mixing of shared-mode streams. This function call is for
+ // diagnostic purposes only and only in debug mode.
+#ifndef NDEBUG
+ hr = GetAudioEngineStreamFormat();
+#endif
+
+ // Verify that the selected audio endpoint supports the specified format
+ // set during construction.
+ if (!DesiredFormatIsSupported()) {
+ return false;
+ }
+
+ // Initialize the audio stream between the client and the device using
+ // shared mode and a lowest possible glitch-free latency.
+ hr = InitializeAudioEngine();
+
+ opened_ = SUCCEEDED(hr);
+ return opened_;
+}
+
+void WASAPIAudioInputStream::Start(AudioInputCallback* callback) {
+ DCHECK(CalledOnValidThread());
+ DCHECK(callback);
+ DLOG_IF(ERROR, !opened_) << "Open() has not been called successfully";
+ if (!opened_)
+ return;
+
+ if (started_)
+ return;
+
+ sink_ = callback;
+
+ // Starts periodic AGC microphone measurements if the AGC has been enabled
+ // using SetAutomaticGainControl().
+ StartAgc();
+
+ // Create and start the thread that will drive the capturing by waiting for
+ // capture events.
+ capture_thread_ =
+ new base::DelegateSimpleThread(this, "wasapi_capture_thread");
+ capture_thread_->Start();
+
+ // Start streaming data between the endpoint buffer and the audio engine.
+ HRESULT hr = audio_client_->Start();
+ DLOG_IF(ERROR, FAILED(hr)) << "Failed to start input streaming.";
+
+ started_ = SUCCEEDED(hr);
+}
+
+void WASAPIAudioInputStream::Stop() {
+ DCHECK(CalledOnValidThread());
+ DVLOG(1) << "WASAPIAudioInputStream::Stop()";
+ if (!started_)
+ return;
+
+ // Stops periodic AGC microphone measurements.
+ StopAgc();
+
+ // Shut down the capture thread.
+ if (stop_capture_event_.IsValid()) {
+ SetEvent(stop_capture_event_.Get());
+ }
+
+ // Stop the input audio streaming.
+ HRESULT hr = audio_client_->Stop();
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to stop input streaming.";
+ }
+
+ // Wait until the thread completes and perform cleanup.
+ if (capture_thread_) {
+ SetEvent(stop_capture_event_.Get());
+ capture_thread_->Join();
+ capture_thread_ = NULL;
+ }
+
+ started_ = false;
+}
+
+void WASAPIAudioInputStream::Close() {
+ DVLOG(1) << "WASAPIAudioInputStream::Close()";
+ // It is valid to call Close() before calling open or Start().
+ // It is also valid to call Close() after Start() has been called.
+ Stop();
+ if (sink_) {
+ sink_->OnClose(this);
+ sink_ = NULL;
+ }
+
+ // Inform the audio manager that we have been closed. This will cause our
+ // destruction.
+ manager_->ReleaseInputStream(this);
+}
+
+double WASAPIAudioInputStream::GetMaxVolume() {
+ // Verify that Open() has been called succesfully, to ensure that an audio
+ // session exists and that an ISimpleAudioVolume interface has been created.
+ DLOG_IF(ERROR, !opened_) << "Open() has not been called successfully";
+ if (!opened_)
+ return 0.0;
+
+ // The effective volume value is always in the range 0.0 to 1.0, hence
+ // we can return a fixed value (=1.0) here.
+ return 1.0;
+}
+
+void WASAPIAudioInputStream::SetVolume(double volume) {
+ DVLOG(1) << "SetVolume(volume=" << volume << ")";
+ DCHECK(CalledOnValidThread());
+ DCHECK_GE(volume, 0.0);
+ DCHECK_LE(volume, 1.0);
+
+ DLOG_IF(ERROR, !opened_) << "Open() has not been called successfully";
+ if (!opened_)
+ return;
+
+ // Set a new master volume level. Valid volume levels are in the range
+ // 0.0 to 1.0. Ignore volume-change events.
+ HRESULT hr = simple_audio_volume_->SetMasterVolume(static_cast<float>(volume),
+ NULL);
+ DLOG_IF(WARNING, FAILED(hr)) << "Failed to set new input master volume.";
+
+ // Update the AGC volume level based on the last setting above. Note that,
+ // the volume-level resolution is not infinite and it is therefore not
+ // possible to assume that the volume provided as input parameter can be
+ // used directly. Instead, a new query to the audio hardware is required.
+ // This method does nothing if AGC is disabled.
+ UpdateAgcVolume();
+}
+
+double WASAPIAudioInputStream::GetVolume() {
+ DLOG_IF(ERROR, !opened_) << "Open() has not been called successfully";
+ if (!opened_)
+ return 0.0;
+
+ // Retrieve the current volume level. The value is in the range 0.0 to 1.0.
+ float level = 0.0f;
+ HRESULT hr = simple_audio_volume_->GetMasterVolume(&level);
+ DLOG_IF(WARNING, FAILED(hr)) << "Failed to get input master volume.";
+
+ return static_cast<double>(level);
+}
+
+// static
+int WASAPIAudioInputStream::HardwareSampleRate(
+ const std::string& device_id) {
+ base::win::ScopedCoMem<WAVEFORMATEX> audio_engine_mix_format;
+ HRESULT hr = GetMixFormat(device_id, &audio_engine_mix_format);
+ if (FAILED(hr))
+ return 0;
+
+ return static_cast<int>(audio_engine_mix_format->nSamplesPerSec);
+}
+
+// static
+uint32 WASAPIAudioInputStream::HardwareChannelCount(
+ const std::string& device_id) {
+ base::win::ScopedCoMem<WAVEFORMATEX> audio_engine_mix_format;
+ HRESULT hr = GetMixFormat(device_id, &audio_engine_mix_format);
+ if (FAILED(hr))
+ return 0;
+
+ return static_cast<uint32>(audio_engine_mix_format->nChannels);
+}
+
+// static
+HRESULT WASAPIAudioInputStream::GetMixFormat(const std::string& device_id,
+ WAVEFORMATEX** device_format) {
+ // It is assumed that this static method is called from a COM thread, i.e.,
+ // CoInitializeEx() is not called here to avoid STA/MTA conflicts.
+ ScopedComPtr<IMMDeviceEnumerator> enumerator;
+ HRESULT hr = enumerator.CreateInstance(__uuidof(MMDeviceEnumerator), NULL,
+ CLSCTX_INPROC_SERVER);
+ if (FAILED(hr))
+ return hr;
+
+ ScopedComPtr<IMMDevice> endpoint_device;
+ if (device_id == AudioManagerBase::kDefaultDeviceId) {
+ // Retrieve the default capture audio endpoint.
+ hr = enumerator->GetDefaultAudioEndpoint(eCapture, eConsole,
+ endpoint_device.Receive());
+ } else {
+ // Retrieve a capture endpoint device that is specified by an endpoint
+ // device-identification string.
+ hr = enumerator->GetDevice(UTF8ToUTF16(device_id).c_str(),
+ endpoint_device.Receive());
+ }
+ if (FAILED(hr))
+ return hr;
+
+ ScopedComPtr<IAudioClient> audio_client;
+ hr = endpoint_device->Activate(__uuidof(IAudioClient),
+ CLSCTX_INPROC_SERVER,
+ NULL,
+ audio_client.ReceiveVoid());
+ return SUCCEEDED(hr) ? audio_client->GetMixFormat(device_format) : hr;
+}
+
+void WASAPIAudioInputStream::Run() {
+ ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA);
+
+ // Increase the thread priority.
+ capture_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio);
+
+ // Enable MMCSS to ensure that this thread receives prioritized access to
+ // CPU resources.
+ DWORD task_index = 0;
+ HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio",
+ &task_index);
+ bool mmcss_is_ok =
+ (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL));
+ if (!mmcss_is_ok) {
+ // Failed to enable MMCSS on this thread. It is not fatal but can lead
+ // to reduced QoS at high load.
+ DWORD err = GetLastError();
+ LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ").";
+ }
+
+ // Allocate a buffer with a size that enables us to take care of cases like:
+ // 1) The recorded buffer size is smaller, or does not match exactly with,
+ // the selected packet size used in each callback.
+ // 2) The selected buffer size is larger than the recorded buffer size in
+ // each event.
+ size_t buffer_frame_index = 0;
+ size_t capture_buffer_size = std::max(
+ 2 * endpoint_buffer_size_frames_ * frame_size_,
+ 2 * packet_size_frames_ * frame_size_);
+ scoped_ptr<uint8[]> capture_buffer(new uint8[capture_buffer_size]);
+
+ LARGE_INTEGER now_count;
+ bool recording = true;
+ bool error = false;
+ double volume = GetVolume();
+ HANDLE wait_array[2] = {stop_capture_event_, audio_samples_ready_event_};
+
+ while (recording && !error) {
+ HRESULT hr = S_FALSE;
+
+ // Wait for a close-down event or a new capture event.
+ DWORD wait_result = WaitForMultipleObjects(2, wait_array, FALSE, INFINITE);
+ switch (wait_result) {
+ case WAIT_FAILED:
+ error = true;
+ break;
+ case WAIT_OBJECT_0 + 0:
+ // |stop_capture_event_| has been set.
+ recording = false;
+ break;
+ case WAIT_OBJECT_0 + 1:
+ {
+ // |audio_samples_ready_event_| has been set.
+ BYTE* data_ptr = NULL;
+ UINT32 num_frames_to_read = 0;
+ DWORD flags = 0;
+ UINT64 device_position = 0;
+ UINT64 first_audio_frame_timestamp = 0;
+
+ // Retrieve the amount of data in the capture endpoint buffer,
+ // replace it with silence if required, create callbacks for each
+ // packet and store non-delivered data for the next event.
+ hr = audio_capture_client_->GetBuffer(&data_ptr,
+ &num_frames_to_read,
+ &flags,
+ &device_position,
+ &first_audio_frame_timestamp);
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "Failed to get data from the capture buffer";
+ continue;
+ }
+
+ if (num_frames_to_read != 0) {
+ size_t pos = buffer_frame_index * frame_size_;
+ size_t num_bytes = num_frames_to_read * frame_size_;
+ DCHECK_GE(capture_buffer_size, pos + num_bytes);
+
+ if (flags & AUDCLNT_BUFFERFLAGS_SILENT) {
+ // Clear out the local buffer since silence is reported.
+ memset(&capture_buffer[pos], 0, num_bytes);
+ } else {
+ // Copy captured data from audio engine buffer to local buffer.
+ memcpy(&capture_buffer[pos], data_ptr, num_bytes);
+ }
+
+ buffer_frame_index += num_frames_to_read;
+ }
+
+ hr = audio_capture_client_->ReleaseBuffer(num_frames_to_read);
+ DLOG_IF(ERROR, FAILED(hr)) << "Failed to release capture buffer";
+
+ // Derive a delay estimate for the captured audio packet.
+ // The value contains two parts (A+B), where A is the delay of the
+ // first audio frame in the packet and B is the extra delay
+ // contained in any stored data. Unit is in audio frames.
+ QueryPerformanceCounter(&now_count);
+ double audio_delay_frames =
+ ((perf_count_to_100ns_units_ * now_count.QuadPart -
+ first_audio_frame_timestamp) / 10000.0) * ms_to_frame_count_ +
+ buffer_frame_index - num_frames_to_read;
+
+ // Get a cached AGC volume level which is updated once every second
+ // on the audio manager thread. Note that, |volume| is also updated
+ // each time SetVolume() is called through IPC by the render-side AGC.
+ GetAgcVolume(&volume);
+
+ // Deliver captured data to the registered consumer using a packet
+ // size which was specified at construction.
+ uint32 delay_frames = static_cast<uint32>(audio_delay_frames + 0.5);
+ while (buffer_frame_index >= packet_size_frames_) {
+ uint8* audio_data =
+ reinterpret_cast<uint8*>(capture_buffer.get());
+
+ // Deliver data packet, delay estimation and volume level to
+ // the user.
+ sink_->OnData(this,
+ audio_data,
+ packet_size_bytes_,
+ delay_frames * frame_size_,
+ volume);
+
+ // Store parts of the recorded data which can't be delivered
+ // using the current packet size. The stored section will be used
+ // either in the next while-loop iteration or in the next
+ // capture event.
+ memmove(&capture_buffer[0],
+ &capture_buffer[packet_size_bytes_],
+ (buffer_frame_index - packet_size_frames_) * frame_size_);
+
+ buffer_frame_index -= packet_size_frames_;
+ delay_frames -= packet_size_frames_;
+ }
+ }
+ break;
+ default:
+ error = true;
+ break;
+ }
+ }
+
+ if (recording && error) {
+ // TODO(henrika): perhaps it worth improving the cleanup here by e.g.
+ // stopping the audio client, joining the thread etc.?
+ NOTREACHED() << "WASAPI capturing failed with error code "
+ << GetLastError();
+ }
+
+ // Disable MMCSS.
+ if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) {
+ PLOG(WARNING) << "Failed to disable MMCSS";
+ }
+}
+
+void WASAPIAudioInputStream::HandleError(HRESULT err) {
+ NOTREACHED() << "Error code: " << err;
+ if (sink_)
+ sink_->OnError(this);
+}
+
+HRESULT WASAPIAudioInputStream::SetCaptureDevice() {
+ ScopedComPtr<IMMDeviceEnumerator> enumerator;
+ HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator),
+ NULL,
+ CLSCTX_INPROC_SERVER,
+ __uuidof(IMMDeviceEnumerator),
+ enumerator.ReceiveVoid());
+ if (SUCCEEDED(hr)) {
+ // Retrieve the IMMDevice by using the specified role or the specified
+ // unique endpoint device-identification string.
+ // TODO(henrika): possibly add support for the eCommunications as well.
+ if (device_id_ == AudioManagerBase::kDefaultDeviceId) {
+ // Retrieve the default capture audio endpoint for the specified role.
+ // Note that, in Windows Vista, the MMDevice API supports device roles
+ // but the system-supplied user interface programs do not.
+ hr = enumerator->GetDefaultAudioEndpoint(eCapture,
+ eConsole,
+ endpoint_device_.Receive());
+ } else {
+ // Retrieve a capture endpoint device that is specified by an endpoint
+ // device-identification string.
+ hr = enumerator->GetDevice(UTF8ToUTF16(device_id_).c_str(),
+ endpoint_device_.Receive());
+ }
+
+ if (FAILED(hr))
+ return hr;
+
+ // Verify that the audio endpoint device is active, i.e., the audio
+ // adapter that connects to the endpoint device is present and enabled.
+ DWORD state = DEVICE_STATE_DISABLED;
+ hr = endpoint_device_->GetState(&state);
+ if (SUCCEEDED(hr)) {
+ if (!(state & DEVICE_STATE_ACTIVE)) {
+ DLOG(ERROR) << "Selected capture device is not active.";
+ hr = E_ACCESSDENIED;
+ }
+ }
+ }
+
+ return hr;
+}
+
+HRESULT WASAPIAudioInputStream::ActivateCaptureDevice() {
+ // Creates and activates an IAudioClient COM object given the selected
+ // capture endpoint device.
+ HRESULT hr = endpoint_device_->Activate(__uuidof(IAudioClient),
+ CLSCTX_INPROC_SERVER,
+ NULL,
+ audio_client_.ReceiveVoid());
+ return hr;
+}
+
+HRESULT WASAPIAudioInputStream::GetAudioEngineStreamFormat() {
+ HRESULT hr = S_OK;
+#ifndef NDEBUG
+ // The GetMixFormat() method retrieves the stream format that the
+ // audio engine uses for its internal processing of shared-mode streams.
+ // The method always uses a WAVEFORMATEXTENSIBLE structure, instead
+ // of a stand-alone WAVEFORMATEX structure, to specify the format.
+ // An WAVEFORMATEXTENSIBLE structure can specify both the mapping of
+ // channels to speakers and the number of bits of precision in each sample.
+ base::win::ScopedCoMem<WAVEFORMATEXTENSIBLE> format_ex;
+ hr = audio_client_->GetMixFormat(
+ reinterpret_cast<WAVEFORMATEX**>(&format_ex));
+
+ // See http://msdn.microsoft.com/en-us/windows/hardware/gg463006#EFH
+ // for details on the WAVE file format.
+ WAVEFORMATEX format = format_ex->Format;
+ DVLOG(2) << "WAVEFORMATEX:";
+ DVLOG(2) << " wFormatTags : 0x" << std::hex << format.wFormatTag;
+ DVLOG(2) << " nChannels : " << format.nChannels;
+ DVLOG(2) << " nSamplesPerSec : " << format.nSamplesPerSec;
+ DVLOG(2) << " nAvgBytesPerSec: " << format.nAvgBytesPerSec;
+ DVLOG(2) << " nBlockAlign : " << format.nBlockAlign;
+ DVLOG(2) << " wBitsPerSample : " << format.wBitsPerSample;
+ DVLOG(2) << " cbSize : " << format.cbSize;
+
+ DVLOG(2) << "WAVEFORMATEXTENSIBLE:";
+ DVLOG(2) << " wValidBitsPerSample: " <<
+ format_ex->Samples.wValidBitsPerSample;
+ DVLOG(2) << " dwChannelMask : 0x" << std::hex <<
+ format_ex->dwChannelMask;
+ if (format_ex->SubFormat == KSDATAFORMAT_SUBTYPE_PCM)
+ DVLOG(2) << " SubFormat : KSDATAFORMAT_SUBTYPE_PCM";
+ else if (format_ex->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT)
+ DVLOG(2) << " SubFormat : KSDATAFORMAT_SUBTYPE_IEEE_FLOAT";
+ else if (format_ex->SubFormat == KSDATAFORMAT_SUBTYPE_WAVEFORMATEX)
+ DVLOG(2) << " SubFormat : KSDATAFORMAT_SUBTYPE_WAVEFORMATEX";
+#endif
+ return hr;
+}
+
+bool WASAPIAudioInputStream::DesiredFormatIsSupported() {
+ // An application that uses WASAPI to manage shared-mode streams can rely
+ // on the audio engine to perform only limited format conversions. The audio
+ // engine can convert between a standard PCM sample size used by the
+ // application and the floating-point samples that the engine uses for its
+ // internal processing. However, the format for an application stream
+ // typically must have the same number of channels and the same sample
+ // rate as the stream format used by the device.
+ // Many audio devices support both PCM and non-PCM stream formats. However,
+ // the audio engine can mix only PCM streams.
+ base::win::ScopedCoMem<WAVEFORMATEX> closest_match;
+ HRESULT hr = audio_client_->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED,
+ &format_,
+ &closest_match);
+ DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported "
+ << "but a closest match exists.";
+ return (hr == S_OK);
+}
+
+HRESULT WASAPIAudioInputStream::InitializeAudioEngine() {
+ // Initialize the audio stream between the client and the device.
+ // We connect indirectly through the audio engine by using shared mode
+ // and WASAPI is initialized in an event driven mode.
+ // Note that, |hnsBufferDuration| is set of 0, which ensures that the
+ // buffer is never smaller than the minimum buffer size needed to ensure
+ // that glitches do not occur between the periodic processing passes.
+ // This setting should lead to lowest possible latency.
+ HRESULT hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_SHARED,
+ AUDCLNT_STREAMFLAGS_EVENTCALLBACK |
+ AUDCLNT_STREAMFLAGS_NOPERSIST,
+ 0, // hnsBufferDuration
+ 0,
+ &format_,
+ NULL);
+ if (FAILED(hr))
+ return hr;
+
+ // Retrieve the length of the endpoint buffer shared between the client
+ // and the audio engine. The buffer length determines the maximum amount
+ // of capture data that the audio engine can read from the endpoint buffer
+ // during a single processing pass.
+ // A typical value is 960 audio frames <=> 20ms @ 48kHz sample rate.
+ hr = audio_client_->GetBufferSize(&endpoint_buffer_size_frames_);
+ if (FAILED(hr))
+ return hr;
+ DVLOG(1) << "endpoint buffer size: " << endpoint_buffer_size_frames_
+ << " [frames]";
+
+#ifndef NDEBUG
+ // The period between processing passes by the audio engine is fixed for a
+ // particular audio endpoint device and represents the smallest processing
+ // quantum for the audio engine. This period plus the stream latency between
+ // the buffer and endpoint device represents the minimum possible latency
+ // that an audio application can achieve.
+ // TODO(henrika): possibly remove this section when all parts are ready.
+ REFERENCE_TIME device_period_shared_mode = 0;
+ REFERENCE_TIME device_period_exclusive_mode = 0;
+ HRESULT hr_dbg = audio_client_->GetDevicePeriod(
+ &device_period_shared_mode, &device_period_exclusive_mode);
+ if (SUCCEEDED(hr_dbg)) {
+ DVLOG(1) << "device period: "
+ << static_cast<double>(device_period_shared_mode / 10000.0)
+ << " [ms]";
+ }
+
+ REFERENCE_TIME latency = 0;
+ hr_dbg = audio_client_->GetStreamLatency(&latency);
+ if (SUCCEEDED(hr_dbg)) {
+ DVLOG(1) << "stream latency: " << static_cast<double>(latency / 10000.0)
+ << " [ms]";
+ }
+#endif
+
+ // Set the event handle that the audio engine will signal each time
+ // a buffer becomes ready to be processed by the client.
+ hr = audio_client_->SetEventHandle(audio_samples_ready_event_.Get());
+ if (FAILED(hr))
+ return hr;
+
+ // Get access to the IAudioCaptureClient interface. This interface
+ // enables us to read input data from the capture endpoint buffer.
+ hr = audio_client_->GetService(__uuidof(IAudioCaptureClient),
+ audio_capture_client_.ReceiveVoid());
+ if (FAILED(hr))
+ return hr;
+
+ // Obtain a reference to the ISimpleAudioVolume interface which enables
+ // us to control the master volume level of an audio session.
+ hr = audio_client_->GetService(__uuidof(ISimpleAudioVolume),
+ simple_audio_volume_.ReceiveVoid());
+ return hr;
+}
+
+} // namespace media
diff --git a/chromium/media/audio/win/audio_low_latency_input_win.h b/chromium/media/audio/win/audio_low_latency_input_win.h
new file mode 100644
index 00000000000..4f9c7fb6c88
--- /dev/null
+++ b/chromium/media/audio/win/audio_low_latency_input_win.h
@@ -0,0 +1,209 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Implementation of AudioInputStream for Windows using Windows Core Audio
+// WASAPI for low latency capturing.
+//
+// Overview of operation:
+//
+// - An object of WASAPIAudioInputStream is created by the AudioManager
+// factory.
+// - Next some thread will call Open(), at that point the underlying
+// Core Audio APIs are utilized to create two WASAPI interfaces called
+// IAudioClient and IAudioCaptureClient.
+// - Then some thread will call Start(sink).
+// A thread called "wasapi_capture_thread" is started and this thread listens
+// on an event signal which is set periodically by the audio engine for
+// each recorded data packet. As a result, data samples will be provided
+// to the registered sink.
+// - At some point, a thread will call Stop(), which stops and joins the
+// capture thread and at the same time stops audio streaming.
+// - The same thread that called stop will call Close() where we cleanup
+// and notify the audio manager, which likely will destroy this object.
+//
+// Implementation notes:
+//
+// - The minimum supported client is Windows Vista.
+// - This implementation is single-threaded, hence:
+// o Construction and destruction must take place from the same thread.
+// o It is recommended to call all APIs from the same thread as well.
+// - It is recommended to first acquire the native sample rate of the default
+// input device and then use the same rate when creating this object. Use
+// WASAPIAudioInputStream::HardwareSampleRate() to retrieve the sample rate.
+// - Calling Close() also leads to self destruction.
+//
+// Core Audio API details:
+//
+// - Utilized MMDevice interfaces:
+// o IMMDeviceEnumerator
+// o IMMDevice
+// - Utilized WASAPI interfaces:
+// o IAudioClient
+// o IAudioCaptureClient
+// - The stream is initialized in shared mode and the processing of the
+// audio buffer is event driven.
+// - The Multimedia Class Scheduler service (MMCSS) is utilized to boost
+// the priority of the capture thread.
+// - Audio applications that use the MMDevice API and WASAPI typically use
+// the ISimpleAudioVolume interface to manage stream volume levels on a
+// per-session basis. It is also possible to use of the IAudioEndpointVolume
+// interface to control the master volume level of an audio endpoint device.
+// This implementation is using the ISimpleAudioVolume interface.
+// MSDN states that "In rare cases, a specialized audio application might
+// require the use of the IAudioEndpointVolume".
+//
+#ifndef MEDIA_AUDIO_WIN_AUDIO_LOW_LATENCY_INPUT_WIN_H_
+#define MEDIA_AUDIO_WIN_AUDIO_LOW_LATENCY_INPUT_WIN_H_
+
+#include <Audioclient.h>
+#include <MMDeviceAPI.h>
+
+#include <string>
+
+#include "base/compiler_specific.h"
+#include "base/threading/non_thread_safe.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/simple_thread.h"
+#include "base/win/scoped_co_mem.h"
+#include "base/win/scoped_com_initializer.h"
+#include "base/win/scoped_comptr.h"
+#include "base/win/scoped_handle.h"
+#include "media/audio/agc_audio_stream.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class AudioManagerWin;
+
+// AudioInputStream implementation using Windows Core Audio APIs.
+class MEDIA_EXPORT WASAPIAudioInputStream
+ : public AgcAudioStream<AudioInputStream>,
+ public base::DelegateSimpleThread::Delegate,
+ NON_EXPORTED_BASE(public base::NonThreadSafe) {
+ public:
+ // The ctor takes all the usual parameters, plus |manager| which is the
+ // the audio manager who is creating this object.
+ WASAPIAudioInputStream(AudioManagerWin* manager,
+ const AudioParameters& params,
+ const std::string& device_id);
+ // The dtor is typically called by the AudioManager only and it is usually
+ // triggered by calling AudioInputStream::Close().
+ virtual ~WASAPIAudioInputStream();
+
+ // Implementation of AudioInputStream.
+ virtual bool Open() OVERRIDE;
+ virtual void Start(AudioInputCallback* callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual double GetMaxVolume() OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual double GetVolume() OVERRIDE;
+
+ // Retrieves the sample rate used by the audio engine for its internal
+ // processing/mixing of shared-mode streams given a specifed device.
+ static int HardwareSampleRate(const std::string& device_id);
+
+ // Retrieves the number of audio channels used by the audio engine for its
+ // internal processing/mixing of shared-mode streams given a specified device.
+ static uint32 HardwareChannelCount(const std::string& device_id);
+
+ bool started() const { return started_; }
+
+ private:
+ // DelegateSimpleThread::Delegate implementation.
+ virtual void Run() OVERRIDE;
+
+ // Issues the OnError() callback to the |sink_|.
+ void HandleError(HRESULT err);
+
+ // The Open() method is divided into these sub methods.
+ HRESULT SetCaptureDevice();
+ HRESULT ActivateCaptureDevice();
+ HRESULT GetAudioEngineStreamFormat();
+ bool DesiredFormatIsSupported();
+ HRESULT InitializeAudioEngine();
+
+ // Retrieves the stream format that the audio engine uses for its internal
+ // processing/mixing of shared-mode streams.
+ static HRESULT GetMixFormat(const std::string& device_id,
+ WAVEFORMATEX** device_format);
+
+ // Our creator, the audio manager needs to be notified when we close.
+ AudioManagerWin* manager_;
+
+ // Capturing is driven by this thread (which has no message loop).
+ // All OnData() callbacks will be called from this thread.
+ base::DelegateSimpleThread* capture_thread_;
+
+ // Contains the desired audio format which is set up at construction.
+ WAVEFORMATEX format_;
+
+ bool opened_;
+ bool started_;
+
+ // Size in bytes of each audio frame (4 bytes for 16-bit stereo PCM)
+ size_t frame_size_;
+
+ // Size in audio frames of each audio packet where an audio packet
+ // is defined as the block of data which the user received in each
+ // OnData() callback.
+ size_t packet_size_frames_;
+
+ // Size in bytes of each audio packet.
+ size_t packet_size_bytes_;
+
+ // Length of the audio endpoint buffer.
+ uint32 endpoint_buffer_size_frames_;
+
+ // Contains the unique name of the selected endpoint device.
+ // Note that AudioManagerBase::kDefaultDeviceId represents the default
+ // device role and is not a valid ID as such.
+ std::string device_id_;
+
+ // Conversion factor used in delay-estimation calculations.
+ // Converts a raw performance counter value to 100-nanosecond unit.
+ double perf_count_to_100ns_units_;
+
+ // Conversion factor used in delay-estimation calculations.
+ // Converts from milliseconds to audio frames.
+ double ms_to_frame_count_;
+
+ // Pointer to the object that will receive the recorded audio samples.
+ AudioInputCallback* sink_;
+
+ // Windows Multimedia Device (MMDevice) API interfaces.
+
+ // An IMMDevice interface which represents an audio endpoint device.
+ base::win::ScopedComPtr<IMMDevice> endpoint_device_;
+
+ // Windows Audio Session API (WASAP) interfaces.
+
+ // An IAudioClient interface which enables a client to create and initialize
+ // an audio stream between an audio application and the audio engine.
+ base::win::ScopedComPtr<IAudioClient> audio_client_;
+
+ // The IAudioCaptureClient interface enables a client to read input data
+ // from a capture endpoint buffer.
+ base::win::ScopedComPtr<IAudioCaptureClient> audio_capture_client_;
+
+ // The ISimpleAudioVolume interface enables a client to control the
+ // master volume level of an audio session.
+ // The volume-level is a value in the range 0.0 to 1.0.
+ // This interface does only work with shared-mode streams.
+ base::win::ScopedComPtr<ISimpleAudioVolume> simple_audio_volume_;
+
+ // The audio engine will signal this event each time a buffer has been
+ // recorded.
+ base::win::ScopedHandle audio_samples_ready_event_;
+
+ // This event will be signaled when capturing shall stop.
+ base::win::ScopedHandle stop_capture_event_;
+
+ DISALLOW_COPY_AND_ASSIGN(WASAPIAudioInputStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_WIN_AUDIO_LOW_LATENCY_INPUT_WIN_H_
diff --git a/chromium/media/audio/win/audio_low_latency_input_win_unittest.cc b/chromium/media/audio/win/audio_low_latency_input_win_unittest.cc
new file mode 100644
index 00000000000..40990ec13d4
--- /dev/null
+++ b/chromium/media/audio/win/audio_low_latency_input_win_unittest.cc
@@ -0,0 +1,405 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <windows.h>
+#include <mmsystem.h>
+
+#include "base/basictypes.h"
+#include "base/environment.h"
+#include "base/file_util.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/path_service.h"
+#include "base/test/test_timeouts.h"
+#include "base/win/scoped_com_initializer.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_manager_base.h"
+#include "media/audio/win/audio_low_latency_input_win.h"
+#include "media/audio/win/core_audio_util_win.h"
+#include "media/base/seekable_buffer.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using base::win::ScopedCOMInitializer;
+using ::testing::_;
+using ::testing::AnyNumber;
+using ::testing::AtLeast;
+using ::testing::Gt;
+using ::testing::NotNull;
+
+namespace media {
+
+ACTION_P3(CheckCountAndPostQuitTask, count, limit, loop) {
+ if (++*count >= limit) {
+ loop->PostTask(FROM_HERE, base::MessageLoop::QuitClosure());
+ }
+}
+
+class MockAudioInputCallback : public AudioInputStream::AudioInputCallback {
+ public:
+ MOCK_METHOD5(OnData, void(AudioInputStream* stream,
+ const uint8* src, uint32 size,
+ uint32 hardware_delay_bytes, double volume));
+ MOCK_METHOD1(OnClose, void(AudioInputStream* stream));
+ MOCK_METHOD1(OnError, void(AudioInputStream* stream));
+};
+
+// This audio sink implementation should be used for manual tests only since
+// the recorded data is stored on a raw binary data file.
+class WriteToFileAudioSink : public AudioInputStream::AudioInputCallback {
+ public:
+ // Allocate space for ~10 seconds of data @ 48kHz in stereo:
+ // 2 bytes per sample, 2 channels, 10ms @ 48kHz, 10 seconds <=> 1920000 bytes.
+ static const size_t kMaxBufferSize = 2 * 2 * 480 * 100 * 10;
+
+ explicit WriteToFileAudioSink(const char* file_name)
+ : buffer_(0, kMaxBufferSize),
+ bytes_to_write_(0) {
+ base::FilePath file_path;
+ EXPECT_TRUE(PathService::Get(base::DIR_EXE, &file_path));
+ file_path = file_path.AppendASCII(file_name);
+ binary_file_ = file_util::OpenFile(file_path, "wb");
+ DLOG_IF(ERROR, !binary_file_) << "Failed to open binary PCM data file.";
+ LOG(INFO) << ">> Output file: " << file_path.value()
+ << " has been created.";
+ }
+
+ virtual ~WriteToFileAudioSink() {
+ size_t bytes_written = 0;
+ while (bytes_written < bytes_to_write_) {
+ const uint8* chunk;
+ int chunk_size;
+
+ // Stop writing if no more data is available.
+ if (!buffer_.GetCurrentChunk(&chunk, &chunk_size))
+ break;
+
+ // Write recorded data chunk to the file and prepare for next chunk.
+ fwrite(chunk, 1, chunk_size, binary_file_);
+ buffer_.Seek(chunk_size);
+ bytes_written += chunk_size;
+ }
+ file_util::CloseFile(binary_file_);
+ }
+
+ // AudioInputStream::AudioInputCallback implementation.
+ virtual void OnData(AudioInputStream* stream,
+ const uint8* src,
+ uint32 size,
+ uint32 hardware_delay_bytes,
+ double volume) {
+ // Store data data in a temporary buffer to avoid making blocking
+ // fwrite() calls in the audio callback. The complete buffer will be
+ // written to file in the destructor.
+ if (buffer_.Append(src, size)) {
+ bytes_to_write_ += size;
+ }
+ }
+
+ virtual void OnClose(AudioInputStream* stream) {}
+ virtual void OnError(AudioInputStream* stream) {}
+
+ private:
+ media::SeekableBuffer buffer_;
+ FILE* binary_file_;
+ size_t bytes_to_write_;
+};
+
+// Convenience method which ensures that we are not running on the build
+// bots and that at least one valid input device can be found. We also
+// verify that we are not running on XP since the low-latency (WASAPI-
+// based) version requires Windows Vista or higher.
+static bool CanRunAudioTests(AudioManager* audio_man) {
+ if (!CoreAudioUtil::IsSupported()) {
+ LOG(WARNING) << "This tests requires Windows Vista or higher.";
+ return false;
+ }
+ // TODO(henrika): note that we use Wave today to query the number of
+ // existing input devices.
+ bool input = audio_man->HasAudioInputDevices();
+ LOG_IF(WARNING, !input) << "No input device detected.";
+ return input;
+}
+
+// Convenience method which creates a default AudioInputStream object but
+// also allows the user to modify the default settings.
+class AudioInputStreamWrapper {
+ public:
+ explicit AudioInputStreamWrapper(AudioManager* audio_manager)
+ : com_init_(ScopedCOMInitializer::kMTA),
+ audio_man_(audio_manager),
+ format_(AudioParameters::AUDIO_PCM_LOW_LATENCY),
+ channel_layout_(CHANNEL_LAYOUT_STEREO),
+ bits_per_sample_(16) {
+ // Use native/mixing sample rate and 10ms frame size as default.
+ sample_rate_ = static_cast<int>(
+ WASAPIAudioInputStream::HardwareSampleRate(
+ AudioManagerBase::kDefaultDeviceId));
+ samples_per_packet_ = sample_rate_ / 100;
+ }
+
+ ~AudioInputStreamWrapper() {}
+
+ // Creates AudioInputStream object using default parameters.
+ AudioInputStream* Create() {
+ return CreateInputStream();
+ }
+
+ // Creates AudioInputStream object using non-default parameters where the
+ // frame size is modified.
+ AudioInputStream* Create(int samples_per_packet) {
+ samples_per_packet_ = samples_per_packet;
+ return CreateInputStream();
+ }
+
+ AudioParameters::Format format() const { return format_; }
+ int channels() const {
+ return ChannelLayoutToChannelCount(channel_layout_);
+ }
+ int bits_per_sample() const { return bits_per_sample_; }
+ int sample_rate() const { return sample_rate_; }
+ int samples_per_packet() const { return samples_per_packet_; }
+
+ private:
+ AudioInputStream* CreateInputStream() {
+ AudioInputStream* ais = audio_man_->MakeAudioInputStream(
+ AudioParameters(format_, channel_layout_, sample_rate_,
+ bits_per_sample_, samples_per_packet_),
+ AudioManagerBase::kDefaultDeviceId);
+ EXPECT_TRUE(ais);
+ return ais;
+ }
+
+ ScopedCOMInitializer com_init_;
+ AudioManager* audio_man_;
+ AudioParameters::Format format_;
+ ChannelLayout channel_layout_;
+ int bits_per_sample_;
+ int sample_rate_;
+ int samples_per_packet_;
+};
+
+// Convenience method which creates a default AudioInputStream object.
+static AudioInputStream* CreateDefaultAudioInputStream(
+ AudioManager* audio_manager) {
+ AudioInputStreamWrapper aisw(audio_manager);
+ AudioInputStream* ais = aisw.Create();
+ return ais;
+}
+
+// Verify that we can retrieve the current hardware/mixing sample rate
+// for all available input devices.
+TEST(WinAudioInputTest, WASAPIAudioInputStreamHardwareSampleRate) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ if (!CanRunAudioTests(audio_manager.get()))
+ return;
+
+ ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA);
+
+ // Retrieve a list of all available input devices.
+ media::AudioDeviceNames device_names;
+ audio_manager->GetAudioInputDeviceNames(&device_names);
+
+ // Scan all available input devices and repeat the same test for all of them.
+ for (media::AudioDeviceNames::const_iterator it = device_names.begin();
+ it != device_names.end(); ++it) {
+ // Retrieve the hardware sample rate given a specified audio input device.
+ // TODO(tommi): ensure that we don't have to cast here.
+ int fs = static_cast<int>(WASAPIAudioInputStream::HardwareSampleRate(
+ it->unique_id));
+ EXPECT_GE(fs, 0);
+ }
+}
+
+// Test Create(), Close() calling sequence.
+TEST(WinAudioInputTest, WASAPIAudioInputStreamCreateAndClose) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ if (!CanRunAudioTests(audio_manager.get()))
+ return;
+ AudioInputStream* ais = CreateDefaultAudioInputStream(audio_manager.get());
+ ais->Close();
+}
+
+// Test Open(), Close() calling sequence.
+TEST(WinAudioInputTest, WASAPIAudioInputStreamOpenAndClose) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ if (!CanRunAudioTests(audio_manager.get()))
+ return;
+ AudioInputStream* ais = CreateDefaultAudioInputStream(audio_manager.get());
+ EXPECT_TRUE(ais->Open());
+ ais->Close();
+}
+
+// Test Open(), Start(), Close() calling sequence.
+TEST(WinAudioInputTest, WASAPIAudioInputStreamOpenStartAndClose) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ if (!CanRunAudioTests(audio_manager.get()))
+ return;
+ AudioInputStream* ais = CreateDefaultAudioInputStream(audio_manager.get());
+ EXPECT_TRUE(ais->Open());
+ MockAudioInputCallback sink;
+ ais->Start(&sink);
+ EXPECT_CALL(sink, OnClose(ais))
+ .Times(1);
+ ais->Close();
+}
+
+// Test Open(), Start(), Stop(), Close() calling sequence.
+TEST(WinAudioInputTest, WASAPIAudioInputStreamOpenStartStopAndClose) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ if (!CanRunAudioTests(audio_manager.get()))
+ return;
+ AudioInputStream* ais = CreateDefaultAudioInputStream(audio_manager.get());
+ EXPECT_TRUE(ais->Open());
+ MockAudioInputCallback sink;
+ ais->Start(&sink);
+ ais->Stop();
+ EXPECT_CALL(sink, OnClose(ais))
+ .Times(1);
+ ais->Close();
+}
+
+// Test some additional calling sequences.
+TEST(WinAudioInputTest, WASAPIAudioInputStreamMiscCallingSequences) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ if (!CanRunAudioTests(audio_manager.get()))
+ return;
+ AudioInputStream* ais = CreateDefaultAudioInputStream(audio_manager.get());
+ WASAPIAudioInputStream* wais = static_cast<WASAPIAudioInputStream*>(ais);
+
+ // Open(), Open() should fail the second time.
+ EXPECT_TRUE(ais->Open());
+ EXPECT_FALSE(ais->Open());
+
+ MockAudioInputCallback sink;
+
+ // Start(), Start() is a valid calling sequence (second call does nothing).
+ ais->Start(&sink);
+ EXPECT_TRUE(wais->started());
+ ais->Start(&sink);
+ EXPECT_TRUE(wais->started());
+
+ // Stop(), Stop() is a valid calling sequence (second call does nothing).
+ ais->Stop();
+ EXPECT_FALSE(wais->started());
+ ais->Stop();
+ EXPECT_FALSE(wais->started());
+
+ EXPECT_CALL(sink, OnClose(ais))
+ .Times(1);
+ ais->Close();
+}
+
+TEST(WinAudioInputTest, WASAPIAudioInputStreamTestPacketSizes) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ if (!CanRunAudioTests(audio_manager.get()))
+ return;
+
+ int count = 0;
+ base::MessageLoopForUI loop;
+
+ // 10 ms packet size.
+
+ // Create default WASAPI input stream which records in stereo using
+ // the shared mixing rate. The default buffer size is 10ms.
+ AudioInputStreamWrapper aisw(audio_manager.get());
+ AudioInputStream* ais = aisw.Create();
+ EXPECT_TRUE(ais->Open());
+
+ MockAudioInputCallback sink;
+
+ // Derive the expected size in bytes of each recorded packet.
+ uint32 bytes_per_packet = aisw.channels() * aisw.samples_per_packet() *
+ (aisw.bits_per_sample() / 8);
+
+ // We use 10ms packets and will run the test until ten packets are received.
+ // All should contain valid packets of the same size and a valid delay
+ // estimate.
+ EXPECT_CALL(sink, OnData(
+ ais, NotNull(), bytes_per_packet, Gt(bytes_per_packet), _))
+ .Times(AtLeast(10))
+ .WillRepeatedly(CheckCountAndPostQuitTask(&count, 10, &loop));
+ ais->Start(&sink);
+ loop.Run();
+ ais->Stop();
+
+ // Store current packet size (to be used in the subsequent tests).
+ int samples_per_packet_10ms = aisw.samples_per_packet();
+
+ EXPECT_CALL(sink, OnClose(ais))
+ .Times(1);
+ ais->Close();
+
+ // 20 ms packet size.
+
+ count = 0;
+ ais = aisw.Create(2 * samples_per_packet_10ms);
+ EXPECT_TRUE(ais->Open());
+ bytes_per_packet = aisw.channels() * aisw.samples_per_packet() *
+ (aisw.bits_per_sample() / 8);
+
+ EXPECT_CALL(sink, OnData(
+ ais, NotNull(), bytes_per_packet, Gt(bytes_per_packet), _))
+ .Times(AtLeast(10))
+ .WillRepeatedly(CheckCountAndPostQuitTask(&count, 10, &loop));
+ ais->Start(&sink);
+ loop.Run();
+ ais->Stop();
+
+ EXPECT_CALL(sink, OnClose(ais))
+ .Times(1);
+ ais->Close();
+
+ // 5 ms packet size.
+
+ count = 0;
+ ais = aisw.Create(samples_per_packet_10ms / 2);
+ EXPECT_TRUE(ais->Open());
+ bytes_per_packet = aisw.channels() * aisw.samples_per_packet() *
+ (aisw.bits_per_sample() / 8);
+
+ EXPECT_CALL(sink, OnData(
+ ais, NotNull(), bytes_per_packet, Gt(bytes_per_packet), _))
+ .Times(AtLeast(10))
+ .WillRepeatedly(CheckCountAndPostQuitTask(&count, 10, &loop));
+ ais->Start(&sink);
+ loop.Run();
+ ais->Stop();
+
+ EXPECT_CALL(sink, OnClose(ais))
+ .Times(1);
+ ais->Close();
+}
+
+// This test is intended for manual tests and should only be enabled
+// when it is required to store the captured data on a local file.
+// By default, GTest will print out YOU HAVE 1 DISABLED TEST.
+// To include disabled tests in test execution, just invoke the test program
+// with --gtest_also_run_disabled_tests or set the GTEST_ALSO_RUN_DISABLED_TESTS
+// environment variable to a value greater than 0.
+TEST(WinAudioInputTest, DISABLED_WASAPIAudioInputStreamRecordToFile) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ if (!CanRunAudioTests(audio_manager.get()))
+ return;
+
+ // Name of the output PCM file containing captured data. The output file
+ // will be stored in the directory containing 'media_unittests.exe'.
+ // Example of full name: \src\build\Debug\out_stereo_10sec.pcm.
+ const char* file_name = "out_stereo_10sec.pcm";
+
+ AudioInputStreamWrapper aisw(audio_manager.get());
+ AudioInputStream* ais = aisw.Create();
+ EXPECT_TRUE(ais->Open());
+
+ LOG(INFO) << ">> Sample rate: " << aisw.sample_rate() << " [Hz]";
+ WriteToFileAudioSink file_sink(file_name);
+ LOG(INFO) << ">> Speak into the default microphone while recording.";
+ ais->Start(&file_sink);
+ base::PlatformThread::Sleep(TestTimeouts::action_timeout());
+ ais->Stop();
+ LOG(INFO) << ">> Recording has stopped.";
+ ais->Close();
+}
+
+} // namespace media
diff --git a/chromium/media/audio/win/audio_low_latency_output_win.cc b/chromium/media/audio/win/audio_low_latency_output_win.cc
new file mode 100644
index 00000000000..b2098b02094
--- /dev/null
+++ b/chromium/media/audio/win/audio_low_latency_output_win.cc
@@ -0,0 +1,685 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/win/audio_low_latency_output_win.h"
+
+#include <Functiondiscoverykeys_devpkey.h>
+
+#include "base/command_line.h"
+#include "base/debug/trace_event.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/metrics/histogram.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/win/scoped_propvariant.h"
+#include "media/audio/win/audio_manager_win.h"
+#include "media/audio/win/avrt_wrapper_win.h"
+#include "media/audio/win/core_audio_util_win.h"
+#include "media/base/limits.h"
+#include "media/base/media_switches.h"
+
+using base::win::ScopedComPtr;
+using base::win::ScopedCOMInitializer;
+using base::win::ScopedCoMem;
+
+namespace media {
+
+typedef uint32 ChannelConfig;
+
+// Retrieves an integer mask which corresponds to the channel layout the
+// audio engine uses for its internal processing/mixing of shared-mode
+// streams. This mask indicates which channels are present in the multi-
+// channel stream. The least significant bit corresponds with the Front Left
+// speaker, the next least significant bit corresponds to the Front Right
+// speaker, and so on, continuing in the order defined in KsMedia.h.
+// See http://msdn.microsoft.com/en-us/library/windows/hardware/ff537083(v=vs.85).aspx
+// for more details.
+static ChannelConfig GetChannelConfig() {
+ WAVEFORMATPCMEX format;
+ return SUCCEEDED(CoreAudioUtil::GetDefaultSharedModeMixFormat(
+ eRender, eConsole, &format)) ?
+ static_cast<int>(format.dwChannelMask) : 0;
+}
+
+// Compare two sets of audio parameters and return true if they are equal.
+// Note that bits_per_sample() is excluded from this comparison since Core
+// Audio can deal with most bit depths. As an example, if the native/mixing
+// bit depth is 32 bits (default), opening at 16 or 24 still works fine and
+// the audio engine will do the required conversion for us. Channel count is
+// excluded since Open() will fail anyways and it doesn't impact buffering.
+static bool CompareAudioParametersNoBitDepthOrChannels(
+ const media::AudioParameters& a, const media::AudioParameters& b) {
+ return (a.format() == b.format() &&
+ a.sample_rate() == b.sample_rate() &&
+ a.frames_per_buffer() == b.frames_per_buffer());
+}
+
+// Converts Microsoft's channel configuration to ChannelLayout.
+// This mapping is not perfect but the best we can do given the current
+// ChannelLayout enumerator and the Windows-specific speaker configurations
+// defined in ksmedia.h. Don't assume that the channel ordering in
+// ChannelLayout is exactly the same as the Windows specific configuration.
+// As an example: KSAUDIO_SPEAKER_7POINT1_SURROUND is mapped to
+// CHANNEL_LAYOUT_7_1 but the positions of Back L, Back R and Side L, Side R
+// speakers are different in these two definitions.
+static ChannelLayout ChannelConfigToChannelLayout(ChannelConfig config) {
+ switch (config) {
+ case KSAUDIO_SPEAKER_DIRECTOUT:
+ return CHANNEL_LAYOUT_NONE;
+ case KSAUDIO_SPEAKER_MONO:
+ return CHANNEL_LAYOUT_MONO;
+ case KSAUDIO_SPEAKER_STEREO:
+ return CHANNEL_LAYOUT_STEREO;
+ case KSAUDIO_SPEAKER_QUAD:
+ return CHANNEL_LAYOUT_QUAD;
+ case KSAUDIO_SPEAKER_SURROUND:
+ return CHANNEL_LAYOUT_4_0;
+ case KSAUDIO_SPEAKER_5POINT1:
+ return CHANNEL_LAYOUT_5_1_BACK;
+ case KSAUDIO_SPEAKER_5POINT1_SURROUND:
+ return CHANNEL_LAYOUT_5_1;
+ case KSAUDIO_SPEAKER_7POINT1:
+ return CHANNEL_LAYOUT_7_1_WIDE;
+ case KSAUDIO_SPEAKER_7POINT1_SURROUND:
+ return CHANNEL_LAYOUT_7_1;
+ default:
+ VLOG(1) << "Unsupported channel layout: " << config;
+ return CHANNEL_LAYOUT_UNSUPPORTED;
+ }
+}
+
+// static
+AUDCLNT_SHAREMODE WASAPIAudioOutputStream::GetShareMode() {
+ const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
+ if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio))
+ return AUDCLNT_SHAREMODE_EXCLUSIVE;
+ return AUDCLNT_SHAREMODE_SHARED;
+}
+
+// static
+int WASAPIAudioOutputStream::HardwareChannelCount() {
+ WAVEFORMATPCMEX format;
+ return SUCCEEDED(CoreAudioUtil::GetDefaultSharedModeMixFormat(
+ eRender, eConsole, &format)) ?
+ static_cast<int>(format.Format.nChannels) : 0;
+}
+
+// static
+ChannelLayout WASAPIAudioOutputStream::HardwareChannelLayout() {
+ return ChannelConfigToChannelLayout(GetChannelConfig());
+}
+
+// static
+int WASAPIAudioOutputStream::HardwareSampleRate() {
+ WAVEFORMATPCMEX format;
+ return SUCCEEDED(CoreAudioUtil::GetDefaultSharedModeMixFormat(
+ eRender, eConsole, &format)) ?
+ static_cast<int>(format.Format.nSamplesPerSec) : 0;
+}
+
+WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
+ const AudioParameters& params,
+ ERole device_role)
+ : creating_thread_id_(base::PlatformThread::CurrentId()),
+ manager_(manager),
+ opened_(false),
+ audio_parameters_are_valid_(false),
+ volume_(1.0),
+ endpoint_buffer_size_frames_(0),
+ device_role_(device_role),
+ share_mode_(GetShareMode()),
+ num_written_frames_(0),
+ source_(NULL),
+ audio_bus_(AudioBus::Create(params)) {
+ DCHECK(manager_);
+ VLOG(1) << "WASAPIAudioOutputStream::WASAPIAudioOutputStream()";
+ VLOG_IF(1, share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE)
+ << "Core Audio (WASAPI) EXCLUSIVE MODE is enabled.";
+
+ if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
+ // Verify that the input audio parameters are identical (bit depth and
+ // channel count are excluded) to the preferred (native) audio parameters.
+ // Open() will fail if this is not the case.
+ AudioParameters preferred_params;
+ HRESULT hr = CoreAudioUtil::GetPreferredAudioParameters(
+ eRender, device_role, &preferred_params);
+ audio_parameters_are_valid_ = SUCCEEDED(hr) &&
+ CompareAudioParametersNoBitDepthOrChannels(params, preferred_params);
+ LOG_IF(WARNING, !audio_parameters_are_valid_)
+ << "Input and preferred parameters are not identical.";
+ }
+
+ // Load the Avrt DLL if not already loaded. Required to support MMCSS.
+ bool avrt_init = avrt::Initialize();
+ DCHECK(avrt_init) << "Failed to load the avrt.dll";
+
+ // Set up the desired render format specified by the client. We use the
+ // WAVE_FORMAT_EXTENSIBLE structure to ensure that multiple channel ordering
+ // and high precision data can be supported.
+
+ // Begin with the WAVEFORMATEX structure that specifies the basic format.
+ WAVEFORMATEX* format = &format_.Format;
+ format->wFormatTag = WAVE_FORMAT_EXTENSIBLE;
+ format->nChannels = params.channels();
+ format->nSamplesPerSec = params.sample_rate();
+ format->wBitsPerSample = params.bits_per_sample();
+ format->nBlockAlign = (format->wBitsPerSample / 8) * format->nChannels;
+ format->nAvgBytesPerSec = format->nSamplesPerSec * format->nBlockAlign;
+ format->cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX);
+
+ // Add the parts which are unique to WAVE_FORMAT_EXTENSIBLE.
+ format_.Samples.wValidBitsPerSample = params.bits_per_sample();
+ format_.dwChannelMask = GetChannelConfig();
+ format_.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
+
+ // Store size (in different units) of audio packets which we expect to
+ // get from the audio endpoint device in each render event.
+ packet_size_frames_ = params.frames_per_buffer();
+ packet_size_bytes_ = params.GetBytesPerBuffer();
+ packet_size_ms_ = (1000.0 * packet_size_frames_) / params.sample_rate();
+ VLOG(1) << "Number of bytes per audio frame : " << format->nBlockAlign;
+ VLOG(1) << "Number of audio frames per packet: " << packet_size_frames_;
+ VLOG(1) << "Number of bytes per packet : " << packet_size_bytes_;
+ VLOG(1) << "Number of milliseconds per packet: " << packet_size_ms_;
+
+ // All events are auto-reset events and non-signaled initially.
+
+ // Create the event which the audio engine will signal each time
+ // a buffer becomes ready to be processed by the client.
+ audio_samples_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
+ DCHECK(audio_samples_render_event_.IsValid());
+
+ // Create the event which will be set in Stop() when capturing shall stop.
+ stop_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
+ DCHECK(stop_render_event_.IsValid());
+}
+
+WASAPIAudioOutputStream::~WASAPIAudioOutputStream() {}
+
+bool WASAPIAudioOutputStream::Open() {
+ VLOG(1) << "WASAPIAudioOutputStream::Open()";
+ DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
+ if (opened_)
+ return true;
+
+
+ // Audio parameters must be identical to the preferred set of parameters
+ // if shared mode (default) is utilized.
+ if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
+ if (!audio_parameters_are_valid_) {
+ LOG(ERROR) << "Audio parameters are not valid.";
+ return false;
+ }
+ }
+
+ // Create an IAudioClient interface for the default rendering IMMDevice.
+ ScopedComPtr<IAudioClient> audio_client =
+ CoreAudioUtil::CreateDefaultClient(eRender, device_role_);
+ if (!audio_client)
+ return false;
+
+ // Extra sanity to ensure that the provided device format is still valid.
+ if (!CoreAudioUtil::IsFormatSupported(audio_client,
+ share_mode_,
+ &format_)) {
+ return false;
+ }
+
+ HRESULT hr = S_FALSE;
+ if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
+ // Initialize the audio stream between the client and the device in shared
+ // mode and using event-driven buffer handling.
+ hr = CoreAudioUtil::SharedModeInitialize(
+ audio_client, &format_, audio_samples_render_event_.Get(),
+ &endpoint_buffer_size_frames_);
+ if (FAILED(hr))
+ return false;
+
+ // We know from experience that the best possible callback sequence is
+ // achieved when the packet size (given by the native device period)
+ // is an even multiple of the endpoint buffer size.
+ // Examples: 48kHz => 960 % 480, 44.1kHz => 896 % 448 or 882 % 441.
+ if (endpoint_buffer_size_frames_ % packet_size_frames_ != 0) {
+ LOG(ERROR) << "Bailing out due to non-perfect timing.";
+ return false;
+ }
+ } else {
+ // TODO(henrika): break out to CoreAudioUtil::ExclusiveModeInitialize()
+ // when removing the enable-exclusive-audio flag.
+ hr = ExclusiveModeInitialization(audio_client,
+ audio_samples_render_event_.Get(),
+ &endpoint_buffer_size_frames_);
+ if (FAILED(hr))
+ return false;
+
+ // The buffer scheme for exclusive mode streams is not designed for max
+ // flexibility. We only allow a "perfect match" between the packet size set
+ // by the user and the actual endpoint buffer size.
+ if (endpoint_buffer_size_frames_ != packet_size_frames_) {
+ LOG(ERROR) << "Bailing out due to non-perfect timing.";
+ return false;
+ }
+ }
+
+ // Create an IAudioRenderClient client for an initialized IAudioClient.
+ // The IAudioRenderClient interface enables us to write output data to
+ // a rendering endpoint buffer.
+ ScopedComPtr<IAudioRenderClient> audio_render_client =
+ CoreAudioUtil::CreateRenderClient(audio_client);
+ if (!audio_render_client)
+ return false;
+
+ // Store valid COM interfaces.
+ audio_client_ = audio_client;
+ audio_render_client_ = audio_render_client;
+
+ opened_ = true;
+ return true;
+}
+
+void WASAPIAudioOutputStream::Start(AudioSourceCallback* callback) {
+ VLOG(1) << "WASAPIAudioOutputStream::Start()";
+ DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
+ CHECK(callback);
+ CHECK(opened_);
+
+ if (render_thread_) {
+ CHECK_EQ(callback, source_);
+ return;
+ }
+
+ source_ = callback;
+
+ // Create and start the thread that will drive the rendering by waiting for
+ // render events.
+ render_thread_.reset(
+ new base::DelegateSimpleThread(this, "wasapi_render_thread"));
+ render_thread_->Start();
+ if (!render_thread_->HasBeenStarted()) {
+ LOG(ERROR) << "Failed to start WASAPI render thread.";
+ return;
+ }
+
+ // Ensure that the endpoint buffer is prepared with silence.
+ if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
+ if (!CoreAudioUtil::FillRenderEndpointBufferWithSilence(
+ audio_client_, audio_render_client_)) {
+ LOG(WARNING) << "Failed to prepare endpoint buffers with silence.";
+ return;
+ }
+ }
+ num_written_frames_ = endpoint_buffer_size_frames_;
+
+ // Start streaming data between the endpoint buffer and the audio engine.
+ HRESULT hr = audio_client_->Start();
+ if (FAILED(hr)) {
+ SetEvent(stop_render_event_.Get());
+ render_thread_->Join();
+ render_thread_.reset();
+ HandleError(hr);
+ }
+}
+
+void WASAPIAudioOutputStream::Stop() {
+ VLOG(1) << "WASAPIAudioOutputStream::Stop()";
+ DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
+ if (!render_thread_)
+ return;
+
+ // Stop output audio streaming.
+ HRESULT hr = audio_client_->Stop();
+ if (FAILED(hr)) {
+ LOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED)
+ << "Failed to stop output streaming: " << std::hex << hr;
+ }
+
+ // Wait until the thread completes and perform cleanup.
+ SetEvent(stop_render_event_.Get());
+ render_thread_->Join();
+ render_thread_.reset();
+
+ // Ensure that we don't quit the main thread loop immediately next
+ // time Start() is called.
+ ResetEvent(stop_render_event_.Get());
+
+ // Clear source callback, it'll be set again on the next Start() call.
+ source_ = NULL;
+
+ // Flush all pending data and reset the audio clock stream position to 0.
+ hr = audio_client_->Reset();
+ if (FAILED(hr)) {
+ LOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED)
+ << "Failed to reset streaming: " << std::hex << hr;
+ }
+
+ // Extra safety check to ensure that the buffers are cleared.
+ // If the buffers are not cleared correctly, the next call to Start()
+ // would fail with AUDCLNT_E_BUFFER_ERROR at IAudioRenderClient::GetBuffer().
+ // This check is is only needed for shared-mode streams.
+ if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
+ UINT32 num_queued_frames = 0;
+ audio_client_->GetCurrentPadding(&num_queued_frames);
+ DCHECK_EQ(0u, num_queued_frames);
+ }
+}
+
+void WASAPIAudioOutputStream::Close() {
+ VLOG(1) << "WASAPIAudioOutputStream::Close()";
+ DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
+
+ // It is valid to call Close() before calling open or Start().
+ // It is also valid to call Close() after Start() has been called.
+ Stop();
+
+ // Inform the audio manager that we have been closed. This will cause our
+ // destruction.
+ manager_->ReleaseOutputStream(this);
+}
+
+void WASAPIAudioOutputStream::SetVolume(double volume) {
+ VLOG(1) << "SetVolume(volume=" << volume << ")";
+ float volume_float = static_cast<float>(volume);
+ if (volume_float < 0.0f || volume_float > 1.0f) {
+ return;
+ }
+ volume_ = volume_float;
+}
+
+void WASAPIAudioOutputStream::GetVolume(double* volume) {
+ VLOG(1) << "GetVolume()";
+ *volume = static_cast<double>(volume_);
+}
+
+void WASAPIAudioOutputStream::Run() {
+ ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA);
+
+ // Increase the thread priority.
+ render_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio);
+
+ // Enable MMCSS to ensure that this thread receives prioritized access to
+ // CPU resources.
+ DWORD task_index = 0;
+ HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio",
+ &task_index);
+ bool mmcss_is_ok =
+ (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL));
+ if (!mmcss_is_ok) {
+ // Failed to enable MMCSS on this thread. It is not fatal but can lead
+ // to reduced QoS at high load.
+ DWORD err = GetLastError();
+ LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ").";
+ }
+
+ HRESULT hr = S_FALSE;
+
+ bool playing = true;
+ bool error = false;
+ HANDLE wait_array[] = { stop_render_event_,
+ audio_samples_render_event_ };
+ UINT64 device_frequency = 0;
+
+ // The IAudioClock interface enables us to monitor a stream's data
+ // rate and the current position in the stream. Allocate it before we
+ // start spinning.
+ ScopedComPtr<IAudioClock> audio_clock;
+ hr = audio_client_->GetService(__uuidof(IAudioClock),
+ audio_clock.ReceiveVoid());
+ if (SUCCEEDED(hr)) {
+ // The device frequency is the frequency generated by the hardware clock in
+ // the audio device. The GetFrequency() method reports a constant frequency.
+ hr = audio_clock->GetFrequency(&device_frequency);
+ }
+ error = FAILED(hr);
+ PLOG_IF(ERROR, error) << "Failed to acquire IAudioClock interface: "
+ << std::hex << hr;
+
+ // Keep rendering audio until the stop event or the stream-switch event
+ // is signaled. An error event can also break the main thread loop.
+ while (playing && !error) {
+ // Wait for a close-down event, stream-switch event or a new render event.
+ DWORD wait_result = WaitForMultipleObjects(arraysize(wait_array),
+ wait_array,
+ FALSE,
+ INFINITE);
+
+ switch (wait_result) {
+ case WAIT_OBJECT_0 + 0:
+ // |stop_render_event_| has been set.
+ playing = false;
+ break;
+ case WAIT_OBJECT_0 + 1:
+ // |audio_samples_render_event_| has been set.
+ RenderAudioFromSource(audio_clock, device_frequency);
+ break;
+ default:
+ error = true;
+ break;
+ }
+ }
+
+ if (playing && error) {
+ // Stop audio rendering since something has gone wrong in our main thread
+ // loop. Note that, we are still in a "started" state, hence a Stop() call
+ // is required to join the thread properly.
+ audio_client_->Stop();
+ PLOG(ERROR) << "WASAPI rendering failed.";
+ }
+
+ // Disable MMCSS.
+ if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) {
+ PLOG(WARNING) << "Failed to disable MMCSS";
+ }
+}
+
+void WASAPIAudioOutputStream::RenderAudioFromSource(
+ IAudioClock* audio_clock, UINT64 device_frequency) {
+ TRACE_EVENT0("audio", "RenderAudioFromSource");
+
+ HRESULT hr = S_FALSE;
+ UINT32 num_queued_frames = 0;
+ uint8* audio_data = NULL;
+
+ // Contains how much new data we can write to the buffer without
+ // the risk of overwriting previously written data that the audio
+ // engine has not yet read from the buffer.
+ size_t num_available_frames = 0;
+
+ if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
+ // Get the padding value which represents the amount of rendering
+ // data that is queued up to play in the endpoint buffer.
+ hr = audio_client_->GetCurrentPadding(&num_queued_frames);
+ num_available_frames =
+ endpoint_buffer_size_frames_ - num_queued_frames;
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "Failed to retrieve amount of available space: "
+ << std::hex << hr;
+ return;
+ }
+ } else {
+ // While the stream is running, the system alternately sends one
+ // buffer or the other to the client. This form of double buffering
+ // is referred to as "ping-ponging". Each time the client receives
+ // a buffer from the system (triggers this event) the client must
+ // process the entire buffer. Calls to the GetCurrentPadding method
+ // are unnecessary because the packet size must always equal the
+ // buffer size. In contrast to the shared mode buffering scheme,
+ // the latency for an event-driven, exclusive-mode stream depends
+ // directly on the buffer size.
+ num_available_frames = endpoint_buffer_size_frames_;
+ }
+
+ // Check if there is enough available space to fit the packet size
+ // specified by the client.
+ if (num_available_frames < packet_size_frames_)
+ return;
+
+ DLOG_IF(ERROR, num_available_frames % packet_size_frames_ != 0)
+ << "Non-perfect timing detected (num_available_frames="
+ << num_available_frames << ", packet_size_frames="
+ << packet_size_frames_ << ")";
+
+ // Derive the number of packets we need to get from the client to
+ // fill up the available area in the endpoint buffer.
+ // |num_packets| will always be one for exclusive-mode streams and
+ // will be one in most cases for shared mode streams as well.
+ // However, we have found that two packets can sometimes be
+ // required.
+ size_t num_packets = (num_available_frames / packet_size_frames_);
+
+ for (size_t n = 0; n < num_packets; ++n) {
+ // Grab all available space in the rendering endpoint buffer
+ // into which the client can write a data packet.
+ hr = audio_render_client_->GetBuffer(packet_size_frames_,
+ &audio_data);
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "Failed to use rendering audio buffer: "
+ << std::hex << hr;
+ return;
+ }
+
+ // Derive the audio delay which corresponds to the delay between
+ // a render event and the time when the first audio sample in a
+ // packet is played out through the speaker. This delay value
+ // can typically be utilized by an acoustic echo-control (AEC)
+ // unit at the render side.
+ UINT64 position = 0;
+ int audio_delay_bytes = 0;
+ hr = audio_clock->GetPosition(&position, NULL);
+ if (SUCCEEDED(hr)) {
+ // Stream position of the sample that is currently playing
+ // through the speaker.
+ double pos_sample_playing_frames = format_.Format.nSamplesPerSec *
+ (static_cast<double>(position) / device_frequency);
+
+ // Stream position of the last sample written to the endpoint
+ // buffer. Note that, the packet we are about to receive in
+ // the upcoming callback is also included.
+ size_t pos_last_sample_written_frames =
+ num_written_frames_ + packet_size_frames_;
+
+ // Derive the actual delay value which will be fed to the
+ // render client using the OnMoreData() callback.
+ audio_delay_bytes = (pos_last_sample_written_frames -
+ pos_sample_playing_frames) * format_.Format.nBlockAlign;
+ }
+
+ // Read a data packet from the registered client source and
+ // deliver a delay estimate in the same callback to the client.
+ // A time stamp is also stored in the AudioBuffersState. This
+ // time stamp can be used at the client side to compensate for
+ // the delay between the usage of the delay value and the time
+ // of generation.
+
+ int frames_filled = source_->OnMoreData(
+ audio_bus_.get(), AudioBuffersState(0, audio_delay_bytes));
+ uint32 num_filled_bytes = frames_filled * format_.Format.nBlockAlign;
+ DCHECK_LE(num_filled_bytes, packet_size_bytes_);
+
+ // Note: If this ever changes to output raw float the data must be
+ // clipped and sanitized since it may come from an untrusted
+ // source such as NaCl.
+ const int bytes_per_sample = format_.Format.wBitsPerSample >> 3;
+ audio_bus_->Scale(volume_);
+ audio_bus_->ToInterleaved(
+ frames_filled, bytes_per_sample, audio_data);
+
+
+ // Release the buffer space acquired in the GetBuffer() call.
+ // Render silence if we were not able to fill up the buffer totally.
+ DWORD flags = (num_filled_bytes < packet_size_bytes_) ?
+ AUDCLNT_BUFFERFLAGS_SILENT : 0;
+ audio_render_client_->ReleaseBuffer(packet_size_frames_, flags);
+
+ num_written_frames_ += packet_size_frames_;
+ }
+}
+
+void WASAPIAudioOutputStream::HandleError(HRESULT err) {
+ CHECK((started() && GetCurrentThreadId() == render_thread_->tid()) ||
+ (!started() && GetCurrentThreadId() == creating_thread_id_));
+ NOTREACHED() << "Error code: " << std::hex << err;
+ if (source_)
+ source_->OnError(this);
+}
+
+HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization(
+ IAudioClient* client, HANDLE event_handle, uint32* endpoint_buffer_size) {
+ DCHECK_EQ(share_mode_, AUDCLNT_SHAREMODE_EXCLUSIVE);
+
+ float f = (1000.0 * packet_size_frames_) / format_.Format.nSamplesPerSec;
+ REFERENCE_TIME requested_buffer_duration =
+ static_cast<REFERENCE_TIME>(f * 10000.0 + 0.5);
+
+ DWORD stream_flags = AUDCLNT_STREAMFLAGS_NOPERSIST;
+ bool use_event = (event_handle != NULL &&
+ event_handle != INVALID_HANDLE_VALUE);
+ if (use_event)
+ stream_flags |= AUDCLNT_STREAMFLAGS_EVENTCALLBACK;
+ VLOG(2) << "stream_flags: 0x" << std::hex << stream_flags;
+
+ // Initialize the audio stream between the client and the device.
+ // For an exclusive-mode stream that uses event-driven buffering, the
+ // caller must specify nonzero values for hnsPeriodicity and
+ // hnsBufferDuration, and the values of these two parameters must be equal.
+ // The Initialize method allocates two buffers for the stream. Each buffer
+ // is equal in duration to the value of the hnsBufferDuration parameter.
+ // Following the Initialize call for a rendering stream, the caller should
+ // fill the first of the two buffers before starting the stream.
+ HRESULT hr = S_FALSE;
+ hr = client->Initialize(AUDCLNT_SHAREMODE_EXCLUSIVE,
+ stream_flags,
+ requested_buffer_duration,
+ requested_buffer_duration,
+ reinterpret_cast<WAVEFORMATEX*>(&format_),
+ NULL);
+ if (FAILED(hr)) {
+ if (hr == AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED) {
+ LOG(ERROR) << "AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED";
+
+ UINT32 aligned_buffer_size = 0;
+ client->GetBufferSize(&aligned_buffer_size);
+ VLOG(1) << "Use aligned buffer size instead: " << aligned_buffer_size;
+
+ // Calculate new aligned periodicity. Each unit of reference time
+ // is 100 nanoseconds.
+ REFERENCE_TIME aligned_buffer_duration = static_cast<REFERENCE_TIME>(
+ (10000000.0 * aligned_buffer_size / format_.Format.nSamplesPerSec)
+ + 0.5);
+
+ // It is possible to re-activate and re-initialize the audio client
+ // at this stage but we bail out with an error code instead and
+ // combine it with a log message which informs about the suggested
+ // aligned buffer size which should be used instead.
+ VLOG(1) << "aligned_buffer_duration: "
+ << static_cast<double>(aligned_buffer_duration / 10000.0)
+ << " [ms]";
+ } else if (hr == AUDCLNT_E_INVALID_DEVICE_PERIOD) {
+ // We will get this error if we try to use a smaller buffer size than
+ // the minimum supported size (usually ~3ms on Windows 7).
+ LOG(ERROR) << "AUDCLNT_E_INVALID_DEVICE_PERIOD";
+ }
+ return hr;
+ }
+
+ if (use_event) {
+ hr = client->SetEventHandle(event_handle);
+ if (FAILED(hr)) {
+ VLOG(1) << "IAudioClient::SetEventHandle: " << std::hex << hr;
+ return hr;
+ }
+ }
+
+ UINT32 buffer_size_in_frames = 0;
+ hr = client->GetBufferSize(&buffer_size_in_frames);
+ if (FAILED(hr)) {
+ VLOG(1) << "IAudioClient::GetBufferSize: " << std::hex << hr;
+ return hr;
+ }
+
+ *endpoint_buffer_size = buffer_size_in_frames;
+ VLOG(2) << "endpoint buffer size: " << buffer_size_in_frames;
+ return hr;
+}
+
+} // namespace media
diff --git a/chromium/media/audio/win/audio_low_latency_output_win.h b/chromium/media/audio/win/audio_low_latency_output_win.h
new file mode 100644
index 00000000000..b0e990bb1a4
--- /dev/null
+++ b/chromium/media/audio/win/audio_low_latency_output_win.h
@@ -0,0 +1,262 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Implementation of AudioOutputStream for Windows using Windows Core Audio
+// WASAPI for low latency rendering.
+//
+// Overview of operation and performance:
+//
+// - An object of WASAPIAudioOutputStream is created by the AudioManager
+// factory.
+// - Next some thread will call Open(), at that point the underlying
+// Core Audio APIs are utilized to create two WASAPI interfaces called
+// IAudioClient and IAudioRenderClient.
+// - Then some thread will call Start(source).
+// A thread called "wasapi_render_thread" is started and this thread listens
+// on an event signal which is set periodically by the audio engine to signal
+// render events. As a result, OnMoreData() will be called and the registered
+// client is then expected to provide data samples to be played out.
+// - At some point, a thread will call Stop(), which stops and joins the
+// render thread and at the same time stops audio streaming.
+// - The same thread that called stop will call Close() where we cleanup
+// and notify the audio manager, which likely will destroy this object.
+// - A total typical delay of 35 ms contains three parts:
+// o Audio endpoint device period (~10 ms).
+// o Stream latency between the buffer and endpoint device (~5 ms).
+// o Endpoint buffer (~20 ms to ensure glitch-free rendering).
+//
+// Implementation notes:
+//
+// - The minimum supported client is Windows Vista.
+// - This implementation is single-threaded, hence:
+// o Construction and destruction must take place from the same thread.
+// o All APIs must be called from the creating thread as well.
+// - It is required to first acquire the native audio parameters of the default
+// output device and then use the same rate when creating this object. Use
+// e.g. WASAPIAudioOutputStream::HardwareSampleRate() to retrieve the sample
+// rate. Open() will fail unless "perfect" audio parameters are utilized.
+// - Calling Close() also leads to self destruction.
+// - Support for 8-bit audio has not yet been verified and tested.
+//
+// Core Audio API details:
+//
+// - The public API methods (Open(), Start(), Stop() and Close()) must be
+// called on constructing thread. The reason is that we want to ensure that
+// the COM environment is the same for all API implementations.
+// - Utilized MMDevice interfaces:
+// o IMMDeviceEnumerator
+// o IMMDevice
+// - Utilized WASAPI interfaces:
+// o IAudioClient
+// o IAudioRenderClient
+// - The stream is initialized in shared mode and the processing of the
+// audio buffer is event driven.
+// - The Multimedia Class Scheduler service (MMCSS) is utilized to boost
+// the priority of the render thread.
+// - Audio-rendering endpoint devices can have three roles:
+// Console (eConsole), Communications (eCommunications), and Multimedia
+// (eMultimedia). Search for "Device Roles" on MSDN for more details.
+//
+// Threading details:
+//
+// - It is assumed that this class is created on the audio thread owned
+// by the AudioManager.
+// - It is a requirement to call the following methods on the same audio
+// thread: Open(), Start(), Stop(), and Close().
+// - Audio rendering is performed on the audio render thread, owned by this
+// class, and the AudioSourceCallback::OnMoreData() method will be called
+// from this thread. Stream switching also takes place on the audio-render
+// thread.
+//
+// Experimental exclusive mode:
+//
+// - It is possible to open up a stream in exclusive mode by using the
+// --enable-exclusive-audio command line flag.
+// - The internal buffering scheme is less flexible for exclusive streams.
+// Hence, some manual tuning will be required before deciding what frame
+// size to use. See the WinAudioOutputTest unit test for more details.
+// - If an application opens a stream in exclusive mode, the application has
+// exclusive use of the audio endpoint device that plays the stream.
+// - Exclusive-mode should only be utilized when the lowest possible latency
+// is important.
+// - In exclusive mode, the client can choose to open the stream in any audio
+// format that the endpoint device supports, i.e. not limited to the device's
+// current (default) configuration.
+// - Initial measurements on Windows 7 (HP Z600 workstation) have shown that
+// the lowest possible latencies we can achieve on this machine are:
+// o ~3.3333ms @ 48kHz <=> 160 audio frames per buffer.
+// o ~3.6281ms @ 44.1kHz <=> 160 audio frames per buffer.
+// - See http://msdn.microsoft.com/en-us/library/windows/desktop/dd370844(v=vs.85).aspx
+// for more details.
+
+#ifndef MEDIA_AUDIO_WIN_AUDIO_LOW_LATENCY_OUTPUT_WIN_H_
+#define MEDIA_AUDIO_WIN_AUDIO_LOW_LATENCY_OUTPUT_WIN_H_
+
+#include <Audioclient.h>
+#include <MMDeviceAPI.h>
+
+#include <string>
+
+#include "base/compiler_specific.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/simple_thread.h"
+#include "base/win/scoped_co_mem.h"
+#include "base/win/scoped_com_initializer.h"
+#include "base/win/scoped_comptr.h"
+#include "base/win/scoped_handle.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class AudioManagerWin;
+
+// AudioOutputStream implementation using Windows Core Audio APIs.
+class MEDIA_EXPORT WASAPIAudioOutputStream :
+ public AudioOutputStream,
+ public base::DelegateSimpleThread::Delegate {
+ public:
+ // The ctor takes all the usual parameters, plus |manager| which is the
+ // the audio manager who is creating this object.
+ WASAPIAudioOutputStream(AudioManagerWin* manager,
+ const AudioParameters& params,
+ ERole device_role);
+
+ // The dtor is typically called by the AudioManager only and it is usually
+ // triggered by calling AudioOutputStream::Close().
+ virtual ~WASAPIAudioOutputStream();
+
+ // Implementation of AudioOutputStream.
+ virtual bool Open() OVERRIDE;
+ virtual void Start(AudioSourceCallback* callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual void GetVolume(double* volume) OVERRIDE;
+
+ // Retrieves the number of channels the audio engine uses for its internal
+ // processing/mixing of shared-mode streams for the default endpoint device.
+ static int HardwareChannelCount();
+
+ // Retrieves the channel layout the audio engine uses for its internal
+ // processing/mixing of shared-mode streams for the default endpoint device.
+ // Note that we convert an internal channel layout mask (see ChannelMask())
+ // into a Chrome-specific channel layout enumerator in this method, hence
+ // the match might not be perfect.
+ static ChannelLayout HardwareChannelLayout();
+
+ // Retrieves the sample rate the audio engine uses for its internal
+ // processing/mixing of shared-mode streams for the default endpoint device.
+ static int HardwareSampleRate();
+
+ // Returns AUDCLNT_SHAREMODE_EXCLUSIVE if --enable-exclusive-mode is used
+ // as command-line flag and AUDCLNT_SHAREMODE_SHARED otherwise (default).
+ static AUDCLNT_SHAREMODE GetShareMode();
+
+ bool started() const { return render_thread_.get() != NULL; }
+
+ private:
+ // DelegateSimpleThread::Delegate implementation.
+ virtual void Run() OVERRIDE;
+
+ // Core part of the thread loop which controls the actual rendering.
+ // Checks available amount of space in the endpoint buffer and reads
+ // data from the client to fill up the buffer without causing audio
+ // glitches.
+ void RenderAudioFromSource(IAudioClock* audio_clock, UINT64 device_frequency);
+
+ // Issues the OnError() callback to the |sink_|.
+ void HandleError(HRESULT err);
+
+ // Called when the device will be opened in exclusive mode and use the
+ // application specified format.
+ // TODO(henrika): rewrite and move to CoreAudioUtil when removing flag
+ // for exclusive audio mode.
+ HRESULT ExclusiveModeInitialization(IAudioClient* client,
+ HANDLE event_handle,
+ uint32* endpoint_buffer_size);
+
+ // Contains the thread ID of the creating thread.
+ base::PlatformThreadId creating_thread_id_;
+
+ // Our creator, the audio manager needs to be notified when we close.
+ AudioManagerWin* manager_;
+
+ // Rendering is driven by this thread (which has no message loop).
+ // All OnMoreData() callbacks will be called from this thread.
+ scoped_ptr<base::DelegateSimpleThread> render_thread_;
+
+ // Contains the desired audio format which is set up at construction.
+ // Extended PCM waveform format structure based on WAVEFORMATEXTENSIBLE.
+ // Use this for multiple channel and hi-resolution PCM data.
+ WAVEFORMATPCMEX format_;
+
+ // Set to true when stream is successfully opened.
+ bool opened_;
+
+ // We check if the input audio parameters are identical (bit depth is
+ // excluded) to the preferred (native) audio parameters during construction.
+ // Open() will fail if |audio_parameters_are_valid_| is false.
+ bool audio_parameters_are_valid_;
+
+ // Volume level from 0 to 1.
+ float volume_;
+
+ // Size in audio frames of each audio packet where an audio packet
+ // is defined as the block of data which the source is expected to deliver
+ // in each OnMoreData() callback.
+ size_t packet_size_frames_;
+
+ // Size in bytes of each audio packet.
+ size_t packet_size_bytes_;
+
+ // Size in milliseconds of each audio packet.
+ float packet_size_ms_;
+
+ // Length of the audio endpoint buffer.
+ uint32 endpoint_buffer_size_frames_;
+
+ // Defines the role that the system has assigned to an audio endpoint device.
+ ERole device_role_;
+
+ // The sharing mode for the connection.
+ // Valid values are AUDCLNT_SHAREMODE_SHARED and AUDCLNT_SHAREMODE_EXCLUSIVE
+ // where AUDCLNT_SHAREMODE_SHARED is the default.
+ AUDCLNT_SHAREMODE share_mode_;
+
+ // Counts the number of audio frames written to the endpoint buffer.
+ UINT64 num_written_frames_;
+
+ // Pointer to the client that will deliver audio samples to be played out.
+ AudioSourceCallback* source_;
+
+ // An IMMDeviceEnumerator interface which represents a device enumerator.
+ base::win::ScopedComPtr<IMMDeviceEnumerator> device_enumerator_;
+
+ // An IAudioClient interface which enables a client to create and initialize
+ // an audio stream between an audio application and the audio engine.
+ base::win::ScopedComPtr<IAudioClient> audio_client_;
+
+ // The IAudioRenderClient interface enables a client to write output
+ // data to a rendering endpoint buffer.
+ base::win::ScopedComPtr<IAudioRenderClient> audio_render_client_;
+
+ // The audio engine will signal this event each time a buffer becomes
+ // ready to be filled by the client.
+ base::win::ScopedHandle audio_samples_render_event_;
+
+ // This event will be signaled when rendering shall stop.
+ base::win::ScopedHandle stop_render_event_;
+
+ // Container for retrieving data from AudioSourceCallback::OnMoreData().
+ scoped_ptr<AudioBus> audio_bus_;
+
+ DISALLOW_COPY_AND_ASSIGN(WASAPIAudioOutputStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_WIN_AUDIO_LOW_LATENCY_OUTPUT_WIN_H_
diff --git a/chromium/media/audio/win/audio_low_latency_output_win_unittest.cc b/chromium/media/audio/win/audio_low_latency_output_win_unittest.cc
new file mode 100644
index 00000000000..8c3e366c0cc
--- /dev/null
+++ b/chromium/media/audio/win/audio_low_latency_output_win_unittest.cc
@@ -0,0 +1,703 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <windows.h>
+#include <mmsystem.h>
+
+#include "base/basictypes.h"
+#include "base/environment.h"
+#include "base/file_util.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/path_service.h"
+#include "base/test/test_timeouts.h"
+#include "base/time/time.h"
+#include "base/win/scoped_com_initializer.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_manager.h"
+#include "media/audio/audio_util.h"
+#include "media/audio/win/audio_low_latency_output_win.h"
+#include "media/audio/win/core_audio_util_win.h"
+#include "media/base/decoder_buffer.h"
+#include "media/base/seekable_buffer.h"
+#include "media/base/test_data_util.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gmock_mutant.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::_;
+using ::testing::AnyNumber;
+using ::testing::AtLeast;
+using ::testing::Between;
+using ::testing::CreateFunctor;
+using ::testing::DoAll;
+using ::testing::Gt;
+using ::testing::InvokeWithoutArgs;
+using ::testing::NotNull;
+using ::testing::Return;
+using base::win::ScopedCOMInitializer;
+
+namespace media {
+
+static const char kSpeechFile_16b_s_48k[] = "speech_16b_stereo_48kHz.raw";
+static const char kSpeechFile_16b_s_44k[] = "speech_16b_stereo_44kHz.raw";
+static const size_t kFileDurationMs = 20000;
+static const size_t kNumFileSegments = 2;
+static const int kBitsPerSample = 16;
+static const size_t kMaxDeltaSamples = 1000;
+static const char kDeltaTimeMsFileName[] = "delta_times_ms.txt";
+
+MATCHER_P(HasValidDelay, value, "") {
+ // It is difficult to come up with a perfect test condition for the delay
+ // estimation. For now, verify that the produced output delay is always
+ // larger than the selected buffer size.
+ return arg.hardware_delay_bytes >= value.hardware_delay_bytes;
+}
+
+// Used to terminate a loop from a different thread than the loop belongs to.
+// |loop| should be a MessageLoopProxy.
+ACTION_P(QuitLoop, loop) {
+ loop->PostTask(FROM_HERE, base::MessageLoop::QuitClosure());
+}
+
+class MockAudioSourceCallback : public AudioOutputStream::AudioSourceCallback {
+ public:
+ MOCK_METHOD2(OnMoreData, int(AudioBus* audio_bus,
+ AudioBuffersState buffers_state));
+ MOCK_METHOD3(OnMoreIOData, int(AudioBus* source,
+ AudioBus* dest,
+ AudioBuffersState buffers_state));
+ MOCK_METHOD1(OnError, void(AudioOutputStream* stream));
+};
+
+// This audio source implementation should be used for manual tests only since
+// it takes about 20 seconds to play out a file.
+class ReadFromFileAudioSource : public AudioOutputStream::AudioSourceCallback {
+ public:
+ explicit ReadFromFileAudioSource(const std::string& name)
+ : pos_(0),
+ previous_call_time_(base::TimeTicks::Now()),
+ text_file_(NULL),
+ elements_to_write_(0) {
+ // Reads a test file from media/test/data directory.
+ file_ = ReadTestDataFile(name);
+
+ // Creates an array that will store delta times between callbacks.
+ // The content of this array will be written to a text file at
+ // destruction and can then be used for off-line analysis of the exact
+ // timing of callbacks. The text file will be stored in media/test/data.
+ delta_times_.reset(new int[kMaxDeltaSamples]);
+ }
+
+ virtual ~ReadFromFileAudioSource() {
+ // Get complete file path to output file in directory containing
+ // media_unittests.exe.
+ base::FilePath file_name;
+ EXPECT_TRUE(PathService::Get(base::DIR_EXE, &file_name));
+ file_name = file_name.AppendASCII(kDeltaTimeMsFileName);
+
+ EXPECT_TRUE(!text_file_);
+ text_file_ = file_util::OpenFile(file_name, "wt");
+ DLOG_IF(ERROR, !text_file_) << "Failed to open log file.";
+
+ // Write the array which contains delta times to a text file.
+ size_t elements_written = 0;
+ while (elements_written < elements_to_write_) {
+ fprintf(text_file_, "%d\n", delta_times_[elements_written]);
+ ++elements_written;
+ }
+
+ file_util::CloseFile(text_file_);
+ }
+
+ // AudioOutputStream::AudioSourceCallback implementation.
+ virtual int OnMoreData(AudioBus* audio_bus,
+ AudioBuffersState buffers_state) {
+ // Store time difference between two successive callbacks in an array.
+ // These values will be written to a file in the destructor.
+ const base::TimeTicks now_time = base::TimeTicks::Now();
+ const int diff = (now_time - previous_call_time_).InMilliseconds();
+ previous_call_time_ = now_time;
+ if (elements_to_write_ < kMaxDeltaSamples) {
+ delta_times_[elements_to_write_] = diff;
+ ++elements_to_write_;
+ }
+
+ int max_size =
+ audio_bus->frames() * audio_bus->channels() * kBitsPerSample / 8;
+
+ // Use samples read from a data file and fill up the audio buffer
+ // provided to us in the callback.
+ if (pos_ + static_cast<int>(max_size) > file_size())
+ max_size = file_size() - pos_;
+ int frames = max_size / (audio_bus->channels() * kBitsPerSample / 8);
+ if (max_size) {
+ audio_bus->FromInterleaved(
+ file_->data() + pos_, frames, kBitsPerSample / 8);
+ pos_ += max_size;
+ }
+ return frames;
+ }
+
+ virtual int OnMoreIOData(AudioBus* source,
+ AudioBus* dest,
+ AudioBuffersState buffers_state) OVERRIDE {
+ NOTREACHED();
+ return 0;
+ }
+
+ virtual void OnError(AudioOutputStream* stream) {}
+
+ int file_size() { return file_->data_size(); }
+
+ private:
+ scoped_refptr<DecoderBuffer> file_;
+ scoped_ptr<int[]> delta_times_;
+ int pos_;
+ base::TimeTicks previous_call_time_;
+ FILE* text_file_;
+ size_t elements_to_write_;
+};
+
+static bool ExclusiveModeIsEnabled() {
+ return (WASAPIAudioOutputStream::GetShareMode() ==
+ AUDCLNT_SHAREMODE_EXCLUSIVE);
+}
+
+// Convenience method which ensures that we are not running on the build
+// bots and that at least one valid output device can be found. We also
+// verify that we are not running on XP since the low-latency (WASAPI-
+// based) version requires Windows Vista or higher.
+static bool CanRunAudioTests(AudioManager* audio_man) {
+ if (!CoreAudioUtil::IsSupported()) {
+ LOG(WARNING) << "This test requires Windows Vista or higher.";
+ return false;
+ }
+
+ // TODO(henrika): note that we use Wave today to query the number of
+ // existing output devices.
+ if (!audio_man->HasAudioOutputDevices()) {
+ LOG(WARNING) << "No output devices detected.";
+ return false;
+ }
+
+ return true;
+}
+
+// Convenience method which creates a default AudioOutputStream object but
+// also allows the user to modify the default settings.
+class AudioOutputStreamWrapper {
+ public:
+ explicit AudioOutputStreamWrapper(AudioManager* audio_manager)
+ : audio_man_(audio_manager),
+ format_(AudioParameters::AUDIO_PCM_LOW_LATENCY),
+ bits_per_sample_(kBitsPerSample) {
+ AudioParameters preferred_params;
+ EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetPreferredAudioParameters(
+ eRender, eConsole, &preferred_params)));
+ channel_layout_ = preferred_params.channel_layout();
+ sample_rate_ = preferred_params.sample_rate();
+ samples_per_packet_ = preferred_params.frames_per_buffer();
+ }
+
+ ~AudioOutputStreamWrapper() {}
+
+ // Creates AudioOutputStream object using default parameters.
+ AudioOutputStream* Create() {
+ return CreateOutputStream();
+ }
+
+ // Creates AudioOutputStream object using non-default parameters where the
+ // frame size is modified.
+ AudioOutputStream* Create(int samples_per_packet) {
+ samples_per_packet_ = samples_per_packet;
+ return CreateOutputStream();
+ }
+
+ // Creates AudioOutputStream object using non-default parameters where the
+ // sample rate and frame size are modified.
+ AudioOutputStream* Create(int sample_rate, int samples_per_packet) {
+ sample_rate_ = sample_rate;
+ samples_per_packet_ = samples_per_packet;
+ return CreateOutputStream();
+ }
+
+ AudioParameters::Format format() const { return format_; }
+ int channels() const { return ChannelLayoutToChannelCount(channel_layout_); }
+ int bits_per_sample() const { return bits_per_sample_; }
+ int sample_rate() const { return sample_rate_; }
+ int samples_per_packet() const { return samples_per_packet_; }
+
+ private:
+ AudioOutputStream* CreateOutputStream() {
+ AudioOutputStream* aos = audio_man_->MakeAudioOutputStream(
+ AudioParameters(format_, channel_layout_, sample_rate_,
+ bits_per_sample_, samples_per_packet_),
+ std::string());
+ EXPECT_TRUE(aos);
+ return aos;
+ }
+
+ AudioManager* audio_man_;
+ AudioParameters::Format format_;
+ ChannelLayout channel_layout_;
+ int bits_per_sample_;
+ int sample_rate_;
+ int samples_per_packet_;
+};
+
+// Convenience method which creates a default AudioOutputStream object.
+static AudioOutputStream* CreateDefaultAudioOutputStream(
+ AudioManager* audio_manager) {
+ AudioOutputStreamWrapper aosw(audio_manager);
+ AudioOutputStream* aos = aosw.Create();
+ return aos;
+}
+
+// Verify that we can retrieve the current hardware/mixing sample rate
+// for the default audio device.
+// TODO(henrika): modify this test when we support full device enumeration.
+TEST(WASAPIAudioOutputStreamTest, HardwareSampleRate) {
+ // Skip this test in exclusive mode since the resulting rate is only utilized
+ // for shared mode streams.
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ if (!CanRunAudioTests(audio_manager.get()) || ExclusiveModeIsEnabled())
+ return;
+
+ // Default device intended for games, system notification sounds,
+ // and voice commands.
+ int fs = static_cast<int>(
+ WASAPIAudioOutputStream::HardwareSampleRate());
+ EXPECT_GE(fs, 0);
+}
+
+// Test Create(), Close() calling sequence.
+TEST(WASAPIAudioOutputStreamTest, CreateAndClose) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ if (!CanRunAudioTests(audio_manager.get()))
+ return;
+ AudioOutputStream* aos = CreateDefaultAudioOutputStream(audio_manager.get());
+ aos->Close();
+}
+
+// Test Open(), Close() calling sequence.
+TEST(WASAPIAudioOutputStreamTest, OpenAndClose) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ if (!CanRunAudioTests(audio_manager.get()))
+ return;
+ AudioOutputStream* aos = CreateDefaultAudioOutputStream(audio_manager.get());
+ EXPECT_TRUE(aos->Open());
+ aos->Close();
+}
+
+// Test Open(), Start(), Close() calling sequence.
+TEST(WASAPIAudioOutputStreamTest, OpenStartAndClose) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ if (!CanRunAudioTests(audio_manager.get()))
+ return;
+ AudioOutputStream* aos = CreateDefaultAudioOutputStream(audio_manager.get());
+ EXPECT_TRUE(aos->Open());
+ MockAudioSourceCallback source;
+ EXPECT_CALL(source, OnError(aos))
+ .Times(0);
+ aos->Start(&source);
+ aos->Close();
+}
+
+// Test Open(), Start(), Stop(), Close() calling sequence.
+TEST(WASAPIAudioOutputStreamTest, OpenStartStopAndClose) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ if (!CanRunAudioTests(audio_manager.get()))
+ return;
+ AudioOutputStream* aos = CreateDefaultAudioOutputStream(audio_manager.get());
+ EXPECT_TRUE(aos->Open());
+ MockAudioSourceCallback source;
+ EXPECT_CALL(source, OnError(aos))
+ .Times(0);
+ aos->Start(&source);
+ aos->Stop();
+ aos->Close();
+}
+
+// Test SetVolume(), GetVolume()
+TEST(WASAPIAudioOutputStreamTest, Volume) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ if (!CanRunAudioTests(audio_manager.get()))
+ return;
+ AudioOutputStream* aos = CreateDefaultAudioOutputStream(audio_manager.get());
+
+ // Initial volume should be full volume (1.0).
+ double volume = 0.0;
+ aos->GetVolume(&volume);
+ EXPECT_EQ(1.0, volume);
+
+ // Verify some valid volume settings.
+ aos->SetVolume(0.0);
+ aos->GetVolume(&volume);
+ EXPECT_EQ(0.0, volume);
+
+ aos->SetVolume(0.5);
+ aos->GetVolume(&volume);
+ EXPECT_EQ(0.5, volume);
+
+ aos->SetVolume(1.0);
+ aos->GetVolume(&volume);
+ EXPECT_EQ(1.0, volume);
+
+ // Ensure that invalid volume setting have no effect.
+ aos->SetVolume(1.5);
+ aos->GetVolume(&volume);
+ EXPECT_EQ(1.0, volume);
+
+ aos->SetVolume(-0.5);
+ aos->GetVolume(&volume);
+ EXPECT_EQ(1.0, volume);
+
+ aos->Close();
+}
+
+// Test some additional calling sequences.
+TEST(WASAPIAudioOutputStreamTest, MiscCallingSequences) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ if (!CanRunAudioTests(audio_manager.get()))
+ return;
+
+ AudioOutputStream* aos = CreateDefaultAudioOutputStream(audio_manager.get());
+ WASAPIAudioOutputStream* waos = static_cast<WASAPIAudioOutputStream*>(aos);
+
+ // Open(), Open() is a valid calling sequence (second call does nothing).
+ EXPECT_TRUE(aos->Open());
+ EXPECT_TRUE(aos->Open());
+
+ MockAudioSourceCallback source;
+
+ // Start(), Start() is a valid calling sequence (second call does nothing).
+ aos->Start(&source);
+ EXPECT_TRUE(waos->started());
+ aos->Start(&source);
+ EXPECT_TRUE(waos->started());
+
+ // Stop(), Stop() is a valid calling sequence (second call does nothing).
+ aos->Stop();
+ EXPECT_FALSE(waos->started());
+ aos->Stop();
+ EXPECT_FALSE(waos->started());
+
+ // Start(), Stop(), Start(), Stop().
+ aos->Start(&source);
+ EXPECT_TRUE(waos->started());
+ aos->Stop();
+ EXPECT_FALSE(waos->started());
+ aos->Start(&source);
+ EXPECT_TRUE(waos->started());
+ aos->Stop();
+ EXPECT_FALSE(waos->started());
+
+ aos->Close();
+}
+
+// Use preferred packet size and verify that rendering starts.
+TEST(WASAPIAudioOutputStreamTest, ValidPacketSize) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ if (!CanRunAudioTests(audio_manager.get()))
+ return;
+
+ base::MessageLoopForUI loop;
+ MockAudioSourceCallback source;
+
+ // Create default WASAPI output stream which plays out in stereo using
+ // the shared mixing rate. The default buffer size is 10ms.
+ AudioOutputStreamWrapper aosw(audio_manager.get());
+ AudioOutputStream* aos = aosw.Create();
+ EXPECT_TRUE(aos->Open());
+
+ // Derive the expected size in bytes of each packet.
+ uint32 bytes_per_packet = aosw.channels() * aosw.samples_per_packet() *
+ (aosw.bits_per_sample() / 8);
+
+ // Set up expected minimum delay estimation.
+ AudioBuffersState state(0, bytes_per_packet);
+
+ // Wait for the first callback and verify its parameters.
+ EXPECT_CALL(source, OnMoreData(NotNull(), HasValidDelay(state)))
+ .WillOnce(DoAll(
+ QuitLoop(loop.message_loop_proxy()),
+ Return(aosw.samples_per_packet())));
+
+ aos->Start(&source);
+ loop.PostDelayedTask(FROM_HERE, base::MessageLoop::QuitClosure(),
+ TestTimeouts::action_timeout());
+ loop.Run();
+ aos->Stop();
+ aos->Close();
+}
+
+// Use a non-preferred packet size and verify that Open() fails.
+TEST(WASAPIAudioOutputStreamTest, InvalidPacketSize) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ if (!CanRunAudioTests(audio_manager.get()))
+ return;
+
+ if (ExclusiveModeIsEnabled())
+ return;
+
+ AudioParameters preferred_params;
+ EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetPreferredAudioParameters(
+ eRender, eConsole, &preferred_params)));
+ int too_large_packet_size = 2 * preferred_params.frames_per_buffer();
+
+ AudioOutputStreamWrapper aosw(audio_manager.get());
+ AudioOutputStream* aos = aosw.Create(too_large_packet_size);
+ EXPECT_FALSE(aos->Open());
+
+ aos->Close();
+}
+
+// This test is intended for manual tests and should only be enabled
+// when it is required to play out data from a local PCM file.
+// By default, GTest will print out YOU HAVE 1 DISABLED TEST.
+// To include disabled tests in test execution, just invoke the test program
+// with --gtest_also_run_disabled_tests or set the GTEST_ALSO_RUN_DISABLED_TESTS
+// environment variable to a value greater than 0.
+// The test files are approximately 20 seconds long.
+TEST(WASAPIAudioOutputStreamTest, DISABLED_ReadFromStereoFile) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ if (!CanRunAudioTests(audio_manager.get()))
+ return;
+
+ AudioOutputStreamWrapper aosw(audio_manager.get());
+ AudioOutputStream* aos = aosw.Create();
+ EXPECT_TRUE(aos->Open());
+
+ std::string file_name;
+ if (aosw.sample_rate() == 48000) {
+ file_name = kSpeechFile_16b_s_48k;
+ } else if (aosw.sample_rate() == 44100) {
+ file_name = kSpeechFile_16b_s_44k;
+ } else if (aosw.sample_rate() == 96000) {
+ // Use 48kHz file at 96kHz as well. Will sound like Donald Duck.
+ file_name = kSpeechFile_16b_s_48k;
+ } else {
+ FAIL() << "This test supports 44.1, 48kHz and 96kHz only.";
+ return;
+ }
+ ReadFromFileAudioSource file_source(file_name);
+
+ LOG(INFO) << "File name : " << file_name.c_str();
+ LOG(INFO) << "Sample rate : " << aosw.sample_rate();
+ LOG(INFO) << "Bits per sample: " << aosw.bits_per_sample();
+ LOG(INFO) << "#channels : " << aosw.channels();
+ LOG(INFO) << "File size : " << file_source.file_size();
+ LOG(INFO) << "#file segments : " << kNumFileSegments;
+ LOG(INFO) << ">> Listen to the stereo file while playing...";
+
+ for (int i = 0; i < kNumFileSegments; i++) {
+ // Each segment will start with a short (~20ms) block of zeros, hence
+ // some short glitches might be heard in this test if kNumFileSegments
+ // is larger than one. The exact length of the silence period depends on
+ // the selected sample rate.
+ aos->Start(&file_source);
+ base::PlatformThread::Sleep(
+ base::TimeDelta::FromMilliseconds(kFileDurationMs / kNumFileSegments));
+ aos->Stop();
+ }
+
+ LOG(INFO) << ">> Stereo file playout has stopped.";
+ aos->Close();
+}
+
+// Verify that we can open the output stream in exclusive mode using a
+// certain set of audio parameters and a sample rate of 48kHz.
+// The expected outcomes of each setting in this test has been derived
+// manually using log outputs (--v=1).
+TEST(WASAPIAudioOutputStreamTest, ExclusiveModeBufferSizesAt48kHz) {
+ if (!ExclusiveModeIsEnabled())
+ return;
+
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ if (!CanRunAudioTests(audio_manager.get()))
+ return;
+
+ AudioOutputStreamWrapper aosw(audio_manager.get());
+
+ // 10ms @ 48kHz shall work.
+ // Note that, this is the same size as we can use for shared-mode streaming
+ // but here the endpoint buffer delay is only 10ms instead of 20ms.
+ AudioOutputStream* aos = aosw.Create(48000, 480);
+ EXPECT_TRUE(aos->Open());
+ aos->Close();
+
+ // 5ms @ 48kHz does not work due to misalignment.
+ // This test will propose an aligned buffer size of 5.3333ms.
+ // Note that we must call Close() even is Open() fails since Close() also
+ // deletes the object and we want to create a new object in the next test.
+ aos = aosw.Create(48000, 240);
+ EXPECT_FALSE(aos->Open());
+ aos->Close();
+
+ // 5.3333ms @ 48kHz should work (see test above).
+ aos = aosw.Create(48000, 256);
+ EXPECT_TRUE(aos->Open());
+ aos->Close();
+
+ // 2.6667ms is smaller than the minimum supported size (=3ms).
+ aos = aosw.Create(48000, 128);
+ EXPECT_FALSE(aos->Open());
+ aos->Close();
+
+ // 3ms does not correspond to an aligned buffer size.
+ // This test will propose an aligned buffer size of 3.3333ms.
+ aos = aosw.Create(48000, 144);
+ EXPECT_FALSE(aos->Open());
+ aos->Close();
+
+ // 3.3333ms @ 48kHz <=> smallest possible buffer size we can use.
+ aos = aosw.Create(48000, 160);
+ EXPECT_TRUE(aos->Open());
+ aos->Close();
+}
+
+// Verify that we can open the output stream in exclusive mode using a
+// certain set of audio parameters and a sample rate of 44.1kHz.
+// The expected outcomes of each setting in this test has been derived
+// manually using log outputs (--v=1).
+TEST(WASAPIAudioOutputStreamTest, ExclusiveModeBufferSizesAt44kHz) {
+ if (!ExclusiveModeIsEnabled())
+ return;
+
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ if (!CanRunAudioTests(audio_manager.get()))
+ return;
+
+ AudioOutputStreamWrapper aosw(audio_manager.get());
+
+ // 10ms @ 44.1kHz does not work due to misalignment.
+ // This test will propose an aligned buffer size of 10.1587ms.
+ AudioOutputStream* aos = aosw.Create(44100, 441);
+ EXPECT_FALSE(aos->Open());
+ aos->Close();
+
+ // 10.1587ms @ 44.1kHz shall work (see test above).
+ aos = aosw.Create(44100, 448);
+ EXPECT_TRUE(aos->Open());
+ aos->Close();
+
+ // 5.8050ms @ 44.1 should work.
+ aos = aosw.Create(44100, 256);
+ EXPECT_TRUE(aos->Open());
+ aos->Close();
+
+ // 4.9887ms @ 44.1kHz does not work to misalignment.
+ // This test will propose an aligned buffer size of 5.0794ms.
+ // Note that we must call Close() even is Open() fails since Close() also
+ // deletes the object and we want to create a new object in the next test.
+ aos = aosw.Create(44100, 220);
+ EXPECT_FALSE(aos->Open());
+ aos->Close();
+
+ // 5.0794ms @ 44.1kHz shall work (see test above).
+ aos = aosw.Create(44100, 224);
+ EXPECT_TRUE(aos->Open());
+ aos->Close();
+
+ // 2.9025ms is smaller than the minimum supported size (=3ms).
+ aos = aosw.Create(44100, 132);
+ EXPECT_FALSE(aos->Open());
+ aos->Close();
+
+ // 3.01587ms is larger than the minimum size but is not aligned.
+ // This test will propose an aligned buffer size of 3.6281ms.
+ aos = aosw.Create(44100, 133);
+ EXPECT_FALSE(aos->Open());
+ aos->Close();
+
+ // 3.6281ms @ 44.1kHz <=> smallest possible buffer size we can use.
+ aos = aosw.Create(44100, 160);
+ EXPECT_TRUE(aos->Open());
+ aos->Close();
+}
+
+// Verify that we can open and start the output stream in exclusive mode at
+// the lowest possible delay at 48kHz.
+TEST(WASAPIAudioOutputStreamTest, ExclusiveModeMinBufferSizeAt48kHz) {
+ if (!ExclusiveModeIsEnabled())
+ return;
+
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ if (!CanRunAudioTests(audio_manager.get()))
+ return;
+
+ base::MessageLoopForUI loop;
+ MockAudioSourceCallback source;
+
+ // Create exclusive-mode WASAPI output stream which plays out in stereo
+ // using the minimum buffer size at 48kHz sample rate.
+ AudioOutputStreamWrapper aosw(audio_manager.get());
+ AudioOutputStream* aos = aosw.Create(48000, 160);
+ EXPECT_TRUE(aos->Open());
+
+ // Derive the expected size in bytes of each packet.
+ uint32 bytes_per_packet = aosw.channels() * aosw.samples_per_packet() *
+ (aosw.bits_per_sample() / 8);
+
+ // Set up expected minimum delay estimation.
+ AudioBuffersState state(0, bytes_per_packet);
+
+ // Wait for the first callback and verify its parameters.
+ EXPECT_CALL(source, OnMoreData(NotNull(), HasValidDelay(state)))
+ .WillOnce(DoAll(
+ QuitLoop(loop.message_loop_proxy()),
+ Return(aosw.samples_per_packet())))
+ .WillRepeatedly(Return(aosw.samples_per_packet()));
+
+ aos->Start(&source);
+ loop.PostDelayedTask(FROM_HERE, base::MessageLoop::QuitClosure(),
+ TestTimeouts::action_timeout());
+ loop.Run();
+ aos->Stop();
+ aos->Close();
+}
+
+// Verify that we can open and start the output stream in exclusive mode at
+// the lowest possible delay at 44.1kHz.
+TEST(WASAPIAudioOutputStreamTest, ExclusiveModeMinBufferSizeAt44kHz) {
+ if (!ExclusiveModeIsEnabled())
+ return;
+
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ if (!CanRunAudioTests(audio_manager.get()))
+ return;
+
+ base::MessageLoopForUI loop;
+ MockAudioSourceCallback source;
+
+ // Create exclusive-mode WASAPI output stream which plays out in stereo
+ // using the minimum buffer size at 44.1kHz sample rate.
+ AudioOutputStreamWrapper aosw(audio_manager.get());
+ AudioOutputStream* aos = aosw.Create(44100, 160);
+ EXPECT_TRUE(aos->Open());
+
+ // Derive the expected size in bytes of each packet.
+ uint32 bytes_per_packet = aosw.channels() * aosw.samples_per_packet() *
+ (aosw.bits_per_sample() / 8);
+
+ // Set up expected minimum delay estimation.
+ AudioBuffersState state(0, bytes_per_packet);
+
+ // Wait for the first callback and verify its parameters.
+ EXPECT_CALL(source, OnMoreData(NotNull(), HasValidDelay(state)))
+ .WillOnce(DoAll(
+ QuitLoop(loop.message_loop_proxy()),
+ Return(aosw.samples_per_packet())))
+ .WillRepeatedly(Return(aosw.samples_per_packet()));
+
+ aos->Start(&source);
+ loop.PostDelayedTask(FROM_HERE, base::MessageLoop::QuitClosure(),
+ TestTimeouts::action_timeout());
+ loop.Run();
+ aos->Stop();
+ aos->Close();
+}
+
+} // namespace media
diff --git a/chromium/media/audio/win/audio_manager_win.cc b/chromium/media/audio/win/audio_manager_win.cc
new file mode 100644
index 00000000000..a753e554cb4
--- /dev/null
+++ b/chromium/media/audio/win/audio_manager_win.cc
@@ -0,0 +1,455 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/audio_io.h"
+
+#include <windows.h>
+#include <objbase.h> // This has to be before initguid.h
+#include <initguid.h>
+#include <mmsystem.h>
+#include <setupapi.h>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/command_line.h"
+#include "base/files/file_path.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/path_service.h"
+#include "base/process/launch.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_util.h"
+#include "media/audio/audio_parameters.h"
+#include "media/audio/audio_util.h"
+#include "media/audio/win/audio_device_listener_win.h"
+#include "media/audio/win/audio_low_latency_input_win.h"
+#include "media/audio/win/audio_low_latency_output_win.h"
+#include "media/audio/win/audio_manager_win.h"
+#include "media/audio/win/audio_unified_win.h"
+#include "media/audio/win/core_audio_util_win.h"
+#include "media/audio/win/device_enumeration_win.h"
+#include "media/audio/win/wavein_input_win.h"
+#include "media/audio/win/waveout_output_win.h"
+#include "media/base/bind_to_loop.h"
+#include "media/base/channel_layout.h"
+#include "media/base/limits.h"
+#include "media/base/media_switches.h"
+
+// Libraries required for the SetupAPI and Wbem APIs used here.
+#pragma comment(lib, "setupapi.lib")
+
+// The following are defined in various DDK headers, and we (re)define them here
+// to avoid adding the DDK as a chrome dependency.
+#define DRV_QUERYDEVICEINTERFACE 0x80c
+#define DRVM_MAPPER_PREFERRED_GET 0x2015
+#define DRV_QUERYDEVICEINTERFACESIZE 0x80d
+DEFINE_GUID(AM_KSCATEGORY_AUDIO, 0x6994ad04, 0x93ef, 0x11d0,
+ 0xa3, 0xcc, 0x00, 0xa0, 0xc9, 0x22, 0x31, 0x96);
+
+namespace media {
+
+// Maximum number of output streams that can be open simultaneously.
+static const int kMaxOutputStreams = 50;
+
+// Up to 8 channels can be passed to the driver. This should work, given the
+// right drivers, but graceful error handling is needed.
+static const int kWinMaxChannels = 8;
+
+// We use 3 buffers for recording audio so that if a recording callback takes
+// some time to return we won't lose audio. More buffers while recording are
+// ok because they don't introduce any delay in recording, unlike in playback
+// where you first need to fill in that number of buffers before starting to
+// play.
+static const int kNumInputBuffers = 3;
+
+// Buffer size to use for input and output stream when a proper size can't be
+// determined from the system
+static const int kFallbackBufferSize = 2048;
+
+static int GetVersionPartAsInt(DWORDLONG num) {
+ return static_cast<int>(num & 0xffff);
+}
+
+// Returns a string containing the given device's description and installed
+// driver version.
+static string16 GetDeviceAndDriverInfo(HDEVINFO device_info,
+ SP_DEVINFO_DATA* device_data) {
+ // Save the old install params setting and set a flag for the
+ // SetupDiBuildDriverInfoList below to return only the installed drivers.
+ SP_DEVINSTALL_PARAMS old_device_install_params;
+ old_device_install_params.cbSize = sizeof(old_device_install_params);
+ SetupDiGetDeviceInstallParams(device_info, device_data,
+ &old_device_install_params);
+ SP_DEVINSTALL_PARAMS device_install_params = old_device_install_params;
+ device_install_params.FlagsEx |= DI_FLAGSEX_INSTALLEDDRIVER;
+ SetupDiSetDeviceInstallParams(device_info, device_data,
+ &device_install_params);
+
+ SP_DRVINFO_DATA driver_data;
+ driver_data.cbSize = sizeof(driver_data);
+ string16 device_and_driver_info;
+ if (SetupDiBuildDriverInfoList(device_info, device_data,
+ SPDIT_COMPATDRIVER)) {
+ if (SetupDiEnumDriverInfo(device_info, device_data, SPDIT_COMPATDRIVER, 0,
+ &driver_data)) {
+ DWORDLONG version = driver_data.DriverVersion;
+ device_and_driver_info = string16(driver_data.Description) + L" v" +
+ base::IntToString16(GetVersionPartAsInt((version >> 48))) + L"." +
+ base::IntToString16(GetVersionPartAsInt((version >> 32))) + L"." +
+ base::IntToString16(GetVersionPartAsInt((version >> 16))) + L"." +
+ base::IntToString16(GetVersionPartAsInt(version));
+ }
+ SetupDiDestroyDriverInfoList(device_info, device_data, SPDIT_COMPATDRIVER);
+ }
+
+ SetupDiSetDeviceInstallParams(device_info, device_data,
+ &old_device_install_params);
+
+ return device_and_driver_info;
+}
+
+AudioManagerWin::AudioManagerWin() {
+ if (!CoreAudioUtil::IsSupported()) {
+ // Use the Wave API for device enumeration if XP or lower.
+ enumeration_type_ = kWaveEnumeration;
+ } else {
+ // Use the MMDevice API for device enumeration if Vista or higher.
+ enumeration_type_ = kMMDeviceEnumeration;
+ }
+
+ SetMaxOutputStreamsAllowed(kMaxOutputStreams);
+
+ // Task must be posted last to avoid races from handing out "this" to the
+ // audio thread.
+ GetMessageLoop()->PostTask(FROM_HERE, base::Bind(
+ &AudioManagerWin::CreateDeviceListener, base::Unretained(this)));
+}
+
+AudioManagerWin::~AudioManagerWin() {
+ // It's safe to post a task here since Shutdown() will wait for all tasks to
+ // complete before returning.
+ GetMessageLoop()->PostTask(FROM_HERE, base::Bind(
+ &AudioManagerWin::DestroyDeviceListener, base::Unretained(this)));
+ Shutdown();
+}
+
+bool AudioManagerWin::HasAudioOutputDevices() {
+ return (::waveOutGetNumDevs() != 0);
+}
+
+bool AudioManagerWin::HasAudioInputDevices() {
+ return (::waveInGetNumDevs() != 0);
+}
+
+void AudioManagerWin::CreateDeviceListener() {
+ // AudioDeviceListenerWin must be initialized on a COM thread and should only
+ // be used if WASAPI / Core Audio is supported.
+ if (CoreAudioUtil::IsSupported()) {
+ output_device_listener_.reset(new AudioDeviceListenerWin(BindToLoop(
+ GetMessageLoop(), base::Bind(
+ &AudioManagerWin::NotifyAllOutputDeviceChangeListeners,
+ base::Unretained(this)))));
+ }
+}
+
+void AudioManagerWin::DestroyDeviceListener() {
+ output_device_listener_.reset();
+}
+
+string16 AudioManagerWin::GetAudioInputDeviceModel() {
+ // Get the default audio capture device and its device interface name.
+ DWORD device_id = 0;
+ waveInMessage(reinterpret_cast<HWAVEIN>(WAVE_MAPPER),
+ DRVM_MAPPER_PREFERRED_GET,
+ reinterpret_cast<DWORD_PTR>(&device_id), NULL);
+ ULONG device_interface_name_size = 0;
+ waveInMessage(reinterpret_cast<HWAVEIN>(device_id),
+ DRV_QUERYDEVICEINTERFACESIZE,
+ reinterpret_cast<DWORD_PTR>(&device_interface_name_size), 0);
+ size_t bytes_in_char16 = sizeof(string16::value_type);
+ DCHECK_EQ(0u, device_interface_name_size % bytes_in_char16);
+ if (device_interface_name_size <= bytes_in_char16)
+ return string16(); // No audio capture device.
+
+ string16 device_interface_name;
+ string16::value_type* name_ptr = WriteInto(&device_interface_name,
+ device_interface_name_size / bytes_in_char16);
+ waveInMessage(reinterpret_cast<HWAVEIN>(device_id),
+ DRV_QUERYDEVICEINTERFACE,
+ reinterpret_cast<DWORD_PTR>(name_ptr),
+ static_cast<DWORD_PTR>(device_interface_name_size));
+
+ // Enumerate all audio devices and find the one matching the above device
+ // interface name.
+ HDEVINFO device_info = SetupDiGetClassDevs(
+ &AM_KSCATEGORY_AUDIO, 0, 0, DIGCF_DEVICEINTERFACE | DIGCF_PRESENT);
+ if (device_info == INVALID_HANDLE_VALUE)
+ return string16();
+
+ DWORD interface_index = 0;
+ SP_DEVICE_INTERFACE_DATA interface_data;
+ interface_data.cbSize = sizeof(interface_data);
+ while (SetupDiEnumDeviceInterfaces(device_info, 0, &AM_KSCATEGORY_AUDIO,
+ interface_index++, &interface_data)) {
+ // Query the size of the struct, allocate it and then query the data.
+ SP_DEVINFO_DATA device_data;
+ device_data.cbSize = sizeof(device_data);
+ DWORD interface_detail_size = 0;
+ SetupDiGetDeviceInterfaceDetail(device_info, &interface_data, 0, 0,
+ &interface_detail_size, &device_data);
+ if (!interface_detail_size)
+ continue;
+
+ scoped_ptr<char[]> interface_detail_buffer(new char[interface_detail_size]);
+ SP_DEVICE_INTERFACE_DETAIL_DATA* interface_detail =
+ reinterpret_cast<SP_DEVICE_INTERFACE_DETAIL_DATA*>(
+ interface_detail_buffer.get());
+ interface_detail->cbSize = interface_detail_size;
+ if (!SetupDiGetDeviceInterfaceDetail(device_info, &interface_data,
+ interface_detail,
+ interface_detail_size, NULL,
+ &device_data))
+ return string16();
+
+ bool device_found = (device_interface_name == interface_detail->DevicePath);
+
+ if (device_found)
+ return GetDeviceAndDriverInfo(device_info, &device_data);
+ }
+
+ return string16();
+}
+
+void AudioManagerWin::ShowAudioInputSettings() {
+ std::wstring program;
+ std::string argument;
+ if (!CoreAudioUtil::IsSupported()) {
+ program = L"sndvol32.exe";
+ argument = "-R";
+ } else {
+ program = L"control.exe";
+ argument = "mmsys.cpl,,1";
+ }
+
+ base::FilePath path;
+ PathService::Get(base::DIR_SYSTEM, &path);
+ path = path.Append(program);
+ CommandLine command_line(path);
+ command_line.AppendArg(argument);
+ base::LaunchProcess(command_line, base::LaunchOptions(), NULL);
+}
+
+void AudioManagerWin::GetAudioInputDeviceNames(
+ media::AudioDeviceNames* device_names) {
+ DCHECK(enumeration_type() != kUninitializedEnumeration);
+ // Enumerate all active audio-endpoint capture devices.
+ if (enumeration_type() == kWaveEnumeration) {
+ // Utilize the Wave API for Windows XP.
+ media::GetInputDeviceNamesWinXP(device_names);
+ } else {
+ // Utilize the MMDevice API (part of Core Audio) for Vista and higher.
+ media::GetInputDeviceNamesWin(device_names);
+ }
+
+ // Always add default device parameters as first element.
+ if (!device_names->empty()) {
+ media::AudioDeviceName name;
+ name.device_name = AudioManagerBase::kDefaultDeviceName;
+ name.unique_id = AudioManagerBase::kDefaultDeviceId;
+ device_names->push_front(name);
+ }
+}
+
+AudioParameters AudioManagerWin::GetInputStreamParameters(
+ const std::string& device_id) {
+ int sample_rate = 48000;
+ ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
+ if (CoreAudioUtil::IsSupported()) {
+ int hw_sample_rate = WASAPIAudioInputStream::HardwareSampleRate(device_id);
+ if (hw_sample_rate)
+ sample_rate = hw_sample_rate;
+ channel_layout =
+ WASAPIAudioInputStream::HardwareChannelCount(device_id) == 1 ?
+ CHANNEL_LAYOUT_MONO : CHANNEL_LAYOUT_STEREO;
+ }
+
+ // TODO(Henrika): improve the default buffer size value for input stream.
+ return AudioParameters(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout,
+ sample_rate, 16, kFallbackBufferSize);
+}
+
+// Factory for the implementations of AudioOutputStream for AUDIO_PCM_LINEAR
+// mode.
+// - PCMWaveOutAudioOutputStream: Based on the waveOut API.
+AudioOutputStream* AudioManagerWin::MakeLinearOutputStream(
+ const AudioParameters& params) {
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
+ if (params.channels() > kWinMaxChannels)
+ return NULL;
+
+ return new PCMWaveOutAudioOutputStream(this,
+ params,
+ media::NumberOfWaveOutBuffers(),
+ WAVE_MAPPER);
+}
+
+// Factory for the implementations of AudioOutputStream for
+// AUDIO_PCM_LOW_LATENCY mode. Two implementations should suffice most
+// windows user's needs.
+// - PCMWaveOutAudioOutputStream: Based on the waveOut API.
+// - WASAPIAudioOutputStream: Based on Core Audio (WASAPI) API.
+AudioOutputStream* AudioManagerWin::MakeLowLatencyOutputStream(
+ const AudioParameters& params, const std::string& input_device_id) {
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
+ if (params.channels() > kWinMaxChannels)
+ return NULL;
+
+ if (!CoreAudioUtil::IsSupported()) {
+ // Fall back to Windows Wave implementation on Windows XP or lower.
+ DVLOG(1) << "Using WaveOut since WASAPI requires at least Vista.";
+ return new PCMWaveOutAudioOutputStream(
+ this, params, media::NumberOfWaveOutBuffers(), WAVE_MAPPER);
+ }
+
+ // TODO(crogers): support more than stereo input.
+ if (params.input_channels() > 0) {
+ DVLOG(1) << "WASAPIUnifiedStream is created.";
+ return new WASAPIUnifiedStream(this, params, input_device_id);
+ }
+
+ return new WASAPIAudioOutputStream(this, params, eConsole);
+}
+
+// Factory for the implementations of AudioInputStream for AUDIO_PCM_LINEAR
+// mode.
+AudioInputStream* AudioManagerWin::MakeLinearInputStream(
+ const AudioParameters& params, const std::string& device_id) {
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
+ return CreatePCMWaveInAudioInputStream(params, device_id);
+}
+
+// Factory for the implementations of AudioInputStream for
+// AUDIO_PCM_LOW_LATENCY mode.
+AudioInputStream* AudioManagerWin::MakeLowLatencyInputStream(
+ const AudioParameters& params, const std::string& device_id) {
+ DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
+ AudioInputStream* stream = NULL;
+ if (!CoreAudioUtil::IsSupported()) {
+ // Fall back to Windows Wave implementation on Windows XP or lower.
+ DVLOG(1) << "Using WaveIn since WASAPI requires at least Vista.";
+ stream = CreatePCMWaveInAudioInputStream(params, device_id);
+ } else {
+ stream = new WASAPIAudioInputStream(this, params, device_id);
+ }
+
+ return stream;
+}
+
+AudioParameters AudioManagerWin::GetPreferredOutputStreamParameters(
+ const AudioParameters& input_params) {
+ const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
+ ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
+ int sample_rate = 48000;
+ int buffer_size = kFallbackBufferSize;
+ int bits_per_sample = 16;
+ int input_channels = 0;
+ bool use_input_params = !CoreAudioUtil::IsSupported();
+ if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio)) {
+ // TODO(crogers): tune these values for best possible WebAudio performance.
+ // WebRTC works well at 48kHz and a buffer size of 480 samples will be used
+ // for this case. Note that exclusive mode is experimental.
+ // This sample rate will be combined with a buffer size of 256 samples,
+ // which corresponds to an output delay of ~5.33ms.
+ sample_rate = 48000;
+ buffer_size = 256;
+ if (input_params.IsValid())
+ channel_layout = input_params.channel_layout();
+ } else if (!use_input_params) {
+ // Hardware sample-rate on Windows can be configured, so we must query.
+ // TODO(henrika): improve possibility to specify an audio endpoint.
+ // Use the default device (same as for Wave) for now to be compatible.
+ int hw_sample_rate = WASAPIAudioOutputStream::HardwareSampleRate();
+
+ AudioParameters params;
+ HRESULT hr = CoreAudioUtil::GetPreferredAudioParameters(eRender, eConsole,
+ &params);
+ int hw_buffer_size =
+ FAILED(hr) ? kFallbackBufferSize : params.frames_per_buffer();
+ channel_layout = WASAPIAudioOutputStream::HardwareChannelLayout();
+
+ // TODO(henrika): Figure out the right thing to do here.
+ if (hw_sample_rate && hw_buffer_size) {
+ sample_rate = hw_sample_rate;
+ buffer_size = hw_buffer_size;
+ } else {
+ use_input_params = true;
+ }
+ }
+
+ if (input_params.IsValid()) {
+ if (cmd_line->HasSwitch(switches::kTrySupportedChannelLayouts) &&
+ CoreAudioUtil::IsSupported()) {
+ // Check if it is possible to open up at the specified input channel
+ // layout but avoid checking if the specified layout is the same as the
+ // hardware (preferred) layout. We do this extra check to avoid the
+ // CoreAudioUtil::IsChannelLayoutSupported() overhead in most cases.
+ if (input_params.channel_layout() != channel_layout) {
+ if (CoreAudioUtil::IsChannelLayoutSupported(
+ eRender, eConsole, input_params.channel_layout())) {
+ // Open up using the same channel layout as the source if it is
+ // supported by the hardware.
+ channel_layout = input_params.channel_layout();
+ VLOG(1) << "Hardware channel layout is not used; using same layout"
+ << " as the source instead (" << channel_layout << ")";
+ }
+ }
+ }
+ input_channels = input_params.input_channels();
+ if (use_input_params) {
+ // If WASAPI isn't supported we'll fallback to WaveOut, which will take
+ // care of resampling and bits per sample changes. By setting these
+ // equal to the input values, AudioOutputResampler will skip resampling
+ // and bit per sample differences (since the input parameters will match
+ // the output parameters).
+ sample_rate = input_params.sample_rate();
+ bits_per_sample = input_params.bits_per_sample();
+ channel_layout = input_params.channel_layout();
+ buffer_size = input_params.frames_per_buffer();
+ }
+ }
+
+ int user_buffer_size = GetUserBufferSize();
+ if (user_buffer_size)
+ buffer_size = user_buffer_size;
+
+ return AudioParameters(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout, input_channels,
+ sample_rate, bits_per_sample, buffer_size);
+}
+
+AudioInputStream* AudioManagerWin::CreatePCMWaveInAudioInputStream(
+ const AudioParameters& params,
+ const std::string& device_id) {
+ std::string xp_device_id = device_id;
+ if (device_id != AudioManagerBase::kDefaultDeviceId &&
+ enumeration_type_ == kMMDeviceEnumeration) {
+ xp_device_id = media::ConvertToWinXPDeviceId(device_id);
+ if (xp_device_id.empty()) {
+ DLOG(ERROR) << "Cannot find a waveIn device which matches the device ID "
+ << device_id;
+ return NULL;
+ }
+ }
+
+ return new PCMWaveInAudioInputStream(this, params, kNumInputBuffers,
+ xp_device_id);
+}
+
+/// static
+AudioManager* CreateAudioManager() {
+ return new AudioManagerWin();
+}
+
+} // namespace media
diff --git a/chromium/media/audio/win/audio_manager_win.h b/chromium/media/audio/win/audio_manager_win.h
new file mode 100644
index 00000000000..65cc73bbd6e
--- /dev/null
+++ b/chromium/media/audio/win/audio_manager_win.h
@@ -0,0 +1,87 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_WIN_AUDIO_MANAGER_WIN_H_
+#define MEDIA_AUDIO_WIN_AUDIO_MANAGER_WIN_H_
+
+#include <string>
+
+#include "media/audio/audio_manager_base.h"
+
+namespace media {
+
+class AudioDeviceListenerWin;
+
+// Windows implementation of the AudioManager singleton. This class is internal
+// to the audio output and only internal users can call methods not exposed by
+// the AudioManager class.
+class MEDIA_EXPORT AudioManagerWin : public AudioManagerBase {
+ public:
+ AudioManagerWin();
+
+ // Implementation of AudioManager.
+ virtual bool HasAudioOutputDevices() OVERRIDE;
+ virtual bool HasAudioInputDevices() OVERRIDE;
+ virtual string16 GetAudioInputDeviceModel() OVERRIDE;
+ virtual void ShowAudioInputSettings() OVERRIDE;
+ virtual void GetAudioInputDeviceNames(media::AudioDeviceNames* device_names)
+ OVERRIDE;
+ virtual AudioParameters GetInputStreamParameters(
+ const std::string& device_id) OVERRIDE;
+
+ // Implementation of AudioManagerBase.
+ virtual AudioOutputStream* MakeLinearOutputStream(
+ const AudioParameters& params) OVERRIDE;
+ virtual AudioOutputStream* MakeLowLatencyOutputStream(
+ const AudioParameters& params,
+ const std::string& input_device_id) OVERRIDE;
+ virtual AudioInputStream* MakeLinearInputStream(
+ const AudioParameters& params, const std::string& device_id) OVERRIDE;
+ virtual AudioInputStream* MakeLowLatencyInputStream(
+ const AudioParameters& params, const std::string& device_id) OVERRIDE;
+
+ protected:
+ virtual ~AudioManagerWin();
+
+ virtual AudioParameters GetPreferredOutputStreamParameters(
+ const AudioParameters& input_params) OVERRIDE;
+
+ private:
+ enum EnumerationType {
+ kUninitializedEnumeration = 0,
+ kMMDeviceEnumeration,
+ kWaveEnumeration,
+ };
+
+ // Allow unit test to modify the utilized enumeration API.
+ friend class AudioInputDeviceTest;
+
+ EnumerationType enumeration_type_;
+ EnumerationType enumeration_type() { return enumeration_type_; }
+ void SetEnumerationType(EnumerationType type) {
+ enumeration_type_ = type;
+ }
+
+ // Returns a PCMWaveInAudioInputStream instance or NULL on failure.
+ // This method converts MMDevice-style device ID to WaveIn-style device ID if
+ // necessary.
+ // (Please see device_enumeration_win.h for more info about the two kinds of
+ // device IDs.)
+ AudioInputStream* CreatePCMWaveInAudioInputStream(
+ const AudioParameters& params,
+ const std::string& device_id);
+
+ // Helper methods for constructing AudioDeviceListenerWin on the audio thread.
+ void CreateDeviceListener();
+ void DestroyDeviceListener();
+
+ // Listen for output device changes.
+ scoped_ptr<AudioDeviceListenerWin> output_device_listener_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioManagerWin);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_WIN_AUDIO_MANAGER_WIN_H_
diff --git a/chromium/media/audio/win/audio_output_win_unittest.cc b/chromium/media/audio/win/audio_output_win_unittest.cc
new file mode 100644
index 00000000000..4e13d84f3d6
--- /dev/null
+++ b/chromium/media/audio/win/audio_output_win_unittest.cc
@@ -0,0 +1,713 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <windows.h>
+#include <mmsystem.h>
+
+#include "base/basictypes.h"
+#include "base/base_paths.h"
+#include "base/memory/aligned_memory.h"
+#include "base/path_service.h"
+#include "base/sync_socket.h"
+#include "base/win/scoped_com_initializer.h"
+#include "base/win/windows_version.h"
+#include "media/base/limits.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_util.h"
+#include "media/audio/audio_manager.h"
+#include "media/audio/simple_sources.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::_;
+using ::testing::AnyNumber;
+using ::testing::DoAll;
+using ::testing::Field;
+using ::testing::Invoke;
+using ::testing::InSequence;
+using ::testing::NiceMock;
+using ::testing::NotNull;
+using ::testing::Return;
+
+using base::win::ScopedCOMInitializer;
+
+namespace media {
+
+static const wchar_t kAudioFile1_16b_m_16K[]
+ = L"media\\test\\data\\sweep02_16b_mono_16KHz.raw";
+
+// This class allows to find out if the callbacks are occurring as
+// expected and if any error has been reported.
+class TestSourceBasic : public AudioOutputStream::AudioSourceCallback {
+ public:
+ explicit TestSourceBasic()
+ : callback_count_(0),
+ had_error_(0) {
+ }
+ // AudioSourceCallback::OnMoreData implementation:
+ virtual int OnMoreData(AudioBus* audio_bus,
+ AudioBuffersState buffers_state) {
+ ++callback_count_;
+ // Touch the channel memory value to make sure memory is good.
+ audio_bus->Zero();
+ return audio_bus->frames();
+ }
+ virtual int OnMoreIOData(AudioBus* source,
+ AudioBus* dest,
+ AudioBuffersState buffers_state) {
+ NOTREACHED();
+ return 0;
+ }
+ // AudioSourceCallback::OnError implementation:
+ virtual void OnError(AudioOutputStream* stream) {
+ ++had_error_;
+ }
+ // Returns how many times OnMoreData() has been called.
+ int callback_count() const {
+ return callback_count_;
+ }
+ // Returns how many times the OnError callback was called.
+ int had_error() const {
+ return had_error_;
+ }
+
+ void set_error(bool error) {
+ had_error_ += error ? 1 : 0;
+ }
+
+ private:
+ int callback_count_;
+ int had_error_;
+};
+
+const int kMaxNumBuffers = 3;
+// Specializes TestSourceBasic to simulate a source that blocks for some time
+// in the OnMoreData callback.
+class TestSourceLaggy : public TestSourceBasic {
+ public:
+ TestSourceLaggy(int laggy_after_buffer, int lag_in_ms)
+ : laggy_after_buffer_(laggy_after_buffer), lag_in_ms_(lag_in_ms) {
+ }
+ virtual int OnMoreData(AudioBus* audio_bus,
+ AudioBuffersState buffers_state) {
+ // Call the base, which increments the callback_count_.
+ TestSourceBasic::OnMoreData(audio_bus, buffers_state);
+ if (callback_count() > kMaxNumBuffers) {
+ ::Sleep(lag_in_ms_);
+ }
+ return audio_bus->frames();
+ }
+ private:
+ int laggy_after_buffer_;
+ int lag_in_ms_;
+};
+
+class MockAudioSource : public AudioOutputStream::AudioSourceCallback {
+ public:
+ MOCK_METHOD2(OnMoreData, int(AudioBus* audio_bus,
+ AudioBuffersState buffers_state));
+ MOCK_METHOD3(OnMoreIOData, int(AudioBus* source,
+ AudioBus* dest,
+ AudioBuffersState buffers_state));
+ MOCK_METHOD1(OnError, void(AudioOutputStream* stream));
+
+ static int ClearData(AudioBus* audio_bus, AudioBuffersState buffers_state) {
+ audio_bus->Zero();
+ return audio_bus->frames();
+ }
+};
+
+// Helper class to memory map an entire file. The mapping is read-only. Don't
+// use for gigabyte-sized files. Attempts to write to this memory generate
+// memory access violations.
+class ReadOnlyMappedFile {
+ public:
+ explicit ReadOnlyMappedFile(const wchar_t* file_name)
+ : fmap_(NULL), start_(NULL), size_(0) {
+ HANDLE file = ::CreateFileW(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
+ OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
+ if (INVALID_HANDLE_VALUE == file)
+ return;
+ fmap_ = ::CreateFileMappingW(file, NULL, PAGE_READONLY, 0, 0, NULL);
+ ::CloseHandle(file);
+ if (!fmap_)
+ return;
+ start_ = reinterpret_cast<char*>(::MapViewOfFile(fmap_, FILE_MAP_READ,
+ 0, 0, 0));
+ if (!start_)
+ return;
+ MEMORY_BASIC_INFORMATION mbi = {0};
+ ::VirtualQuery(start_, &mbi, sizeof(mbi));
+ size_ = mbi.RegionSize;
+ }
+ ~ReadOnlyMappedFile() {
+ if (start_) {
+ ::UnmapViewOfFile(start_);
+ ::CloseHandle(fmap_);
+ }
+ }
+ // Returns true if the file was successfully mapped.
+ bool is_valid() const {
+ return ((start_ > 0) && (size_ > 0));
+ }
+ // Returns the size in bytes of the mapped memory.
+ uint32 size() const {
+ return size_;
+ }
+ // Returns the memory backing the file.
+ const void* GetChunkAt(uint32 offset) {
+ return &start_[offset];
+ }
+
+ private:
+ HANDLE fmap_;
+ char* start_;
+ uint32 size_;
+};
+
+// ===========================================================================
+// Validation of AudioManager::AUDIO_PCM_LINEAR
+//
+// NOTE:
+// The tests can fail on the build bots when somebody connects to them via
+// remote-desktop and the rdp client installs an audio device that fails to open
+// at some point, possibly when the connection goes idle.
+
+// Test that can it be created and closed.
+TEST(WinAudioTest, PCMWaveStreamGetAndClose) {
+ scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ if (!audio_man->HasAudioOutputDevices()) {
+ LOG(WARNING) << "No output device detected.";
+ return;
+ }
+
+ AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
+ 8000, 16, 256),
+ std::string());
+ ASSERT_TRUE(NULL != oas);
+ oas->Close();
+}
+
+// Test that can it be cannot be created with invalid parameters.
+TEST(WinAudioTest, SanityOnMakeParams) {
+ scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ if (!audio_man->HasAudioOutputDevices()) {
+ LOG(WARNING) << "No output device detected.";
+ return;
+ }
+
+ AudioParameters::Format fmt = AudioParameters::AUDIO_PCM_LINEAR;
+ EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
+ AudioParameters(fmt, CHANNEL_LAYOUT_UNSUPPORTED, 8000, 16, 256),
+ std::string()));
+ EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
+ AudioParameters(fmt, CHANNEL_LAYOUT_MONO, 1024 * 1024, 16, 256),
+ std::string()));
+ EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
+ AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, 8000, 80, 256),
+ std::string()));
+ EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
+ AudioParameters(fmt, CHANNEL_LAYOUT_UNSUPPORTED, 8000, 16, 256),
+ std::string()));
+ EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
+ AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, -8000, 16, 256),
+ std::string()));
+ EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
+ AudioParameters(fmt, CHANNEL_LAYOUT_MONO, 8000, 16, -100),
+ std::string()));
+ EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
+ AudioParameters(fmt, CHANNEL_LAYOUT_MONO, 8000, 16, 0),
+ std::string()));
+ EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
+ AudioParameters(fmt, CHANNEL_LAYOUT_MONO, 8000, 16,
+ media::limits::kMaxSamplesPerPacket + 1),
+ std::string()));
+}
+
+// Test that it can be opened and closed.
+TEST(WinAudioTest, PCMWaveStreamOpenAndClose) {
+ scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ if (!audio_man->HasAudioOutputDevices()) {
+ LOG(WARNING) << "No output device detected.";
+ return;
+ }
+
+ AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
+ 8000, 16, 256),
+ std::string());
+ ASSERT_TRUE(NULL != oas);
+ EXPECT_TRUE(oas->Open());
+ oas->Close();
+}
+
+// Test that it has a maximum packet size.
+TEST(WinAudioTest, PCMWaveStreamOpenLimit) {
+ scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ if (!audio_man->HasAudioOutputDevices()) {
+ LOG(WARNING) << "No output device detected.";
+ return;
+ }
+
+ AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
+ 8000, 16, 1024 * 1024 * 1024),
+ std::string());
+ EXPECT_TRUE(NULL == oas);
+ if (oas)
+ oas->Close();
+}
+
+// Test potential deadlock situation if the source is slow or blocks for some
+// time. The actual EXPECT_GT are mostly meaningless and the real test is that
+// the test completes in reasonable time.
+TEST(WinAudioTest, PCMWaveSlowSource) {
+ scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ if (!audio_man->HasAudioOutputDevices()) {
+ LOG(WARNING) << "No output device detected.";
+ return;
+ }
+
+ AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
+ 16000, 16, 256),
+ std::string());
+ ASSERT_TRUE(NULL != oas);
+ TestSourceLaggy test_laggy(2, 90);
+ EXPECT_TRUE(oas->Open());
+ // The test parameters cause a callback every 32 ms and the source is
+ // sleeping for 90 ms, so it is guaranteed that we run out of ready buffers.
+ oas->Start(&test_laggy);
+ ::Sleep(500);
+ EXPECT_GT(test_laggy.callback_count(), 2);
+ EXPECT_FALSE(test_laggy.had_error());
+ oas->Stop();
+ ::Sleep(500);
+ oas->Close();
+}
+
+// Test another potential deadlock situation if the thread that calls Start()
+// gets paused. This test is best when run over RDP with audio enabled. See
+// bug 19276 for more details.
+TEST(WinAudioTest, PCMWaveStreamPlaySlowLoop) {
+ scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ if (!audio_man->HasAudioOutputDevices()) {
+ LOG(WARNING) << "No output device detected.";
+ return;
+ }
+
+ uint32 samples_100_ms = AudioParameters::kAudioCDSampleRate / 10;
+ AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
+ AudioParameters::kAudioCDSampleRate, 16, samples_100_ms),
+ std::string());
+ ASSERT_TRUE(NULL != oas);
+
+ SineWaveAudioSource source(1, 200.0, AudioParameters::kAudioCDSampleRate);
+
+ EXPECT_TRUE(oas->Open());
+ oas->SetVolume(1.0);
+
+ for (int ix = 0; ix != 5; ++ix) {
+ oas->Start(&source);
+ ::Sleep(10);
+ oas->Stop();
+ }
+ oas->Close();
+}
+
+
+// This test produces actual audio for .5 seconds on the default wave
+// device at 44.1K s/sec. Parameters have been chosen carefully so you should
+// not hear pops or noises while the sound is playing.
+TEST(WinAudioTest, PCMWaveStreamPlay200HzTone44Kss) {
+ scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ if (!audio_man->HasAudioOutputDevices()) {
+ LOG(WARNING) << "No output device detected.";
+ return;
+ }
+
+ uint32 samples_100_ms = AudioParameters::kAudioCDSampleRate / 10;
+ AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
+ AudioParameters::kAudioCDSampleRate, 16, samples_100_ms),
+ std::string());
+ ASSERT_TRUE(NULL != oas);
+
+ SineWaveAudioSource source(1, 200.0, AudioParameters::kAudioCDSampleRate);
+
+ EXPECT_TRUE(oas->Open());
+ oas->SetVolume(1.0);
+ oas->Start(&source);
+ ::Sleep(500);
+ oas->Stop();
+ oas->Close();
+}
+
+// This test produces actual audio for for .5 seconds on the default wave
+// device at 22K s/sec. Parameters have been chosen carefully so you should
+// not hear pops or noises while the sound is playing. The audio also should
+// sound with a lower volume than PCMWaveStreamPlay200HzTone44Kss.
+TEST(WinAudioTest, PCMWaveStreamPlay200HzTone22Kss) {
+ scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ if (!audio_man->HasAudioOutputDevices()) {
+ LOG(WARNING) << "No output device detected.";
+ return;
+ }
+
+ uint32 samples_100_ms = AudioParameters::kAudioCDSampleRate / 20;
+ AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
+ AudioParameters::kAudioCDSampleRate / 2, 16,
+ samples_100_ms),
+ std::string());
+ ASSERT_TRUE(NULL != oas);
+
+ SineWaveAudioSource source(1, 200.0, AudioParameters::kAudioCDSampleRate/2);
+
+ EXPECT_TRUE(oas->Open());
+
+ oas->SetVolume(0.5);
+ oas->Start(&source);
+ ::Sleep(500);
+
+ // Test that the volume is within the set limits.
+ double volume = 0.0;
+ oas->GetVolume(&volume);
+ EXPECT_LT(volume, 0.51);
+ EXPECT_GT(volume, 0.49);
+ oas->Stop();
+ oas->Close();
+}
+
+// Uses a restricted source to play ~2 seconds of audio for about 5 seconds. We
+// try hard to generate situation where the two threads are accessing the
+// object roughly at the same time.
+TEST(WinAudioTest, PushSourceFile16KHz) {
+ scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ if (!audio_man->HasAudioOutputDevices()) {
+ LOG(WARNING) << "No output device detected.";
+ return;
+ }
+
+ static const int kSampleRate = 16000;
+ SineWaveAudioSource source(1, 200.0, kSampleRate);
+ // Compute buffer size for 100ms of audio.
+ const uint32 kSamples100ms = (kSampleRate / 1000) * 100;
+ // Restrict SineWaveAudioSource to 100ms of samples.
+ source.CapSamples(kSamples100ms);
+
+ AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
+ kSampleRate, 16, kSamples100ms),
+ std::string());
+ ASSERT_TRUE(NULL != oas);
+
+ EXPECT_TRUE(oas->Open());
+
+ oas->SetVolume(1.0);
+ oas->Start(&source);
+
+ // We buffer and play at the same time, buffering happens every ~10ms and the
+ // consuming of the buffer happens every ~100ms. We do 100 buffers which
+ // effectively wrap around the file more than once.
+ for (uint32 ix = 0; ix != 100; ++ix) {
+ ::Sleep(10);
+ source.Reset();
+ }
+
+ // Play a little bit more of the file.
+ ::Sleep(500);
+
+ oas->Stop();
+ oas->Close();
+}
+
+// This test is to make sure an AudioOutputStream can be started after it was
+// stopped. You will here two .5 seconds wave signal separated by 0.5 seconds
+// of silence.
+TEST(WinAudioTest, PCMWaveStreamPlayTwice200HzTone44Kss) {
+ scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ if (!audio_man->HasAudioOutputDevices()) {
+ LOG(WARNING) << "No output device detected.";
+ return;
+ }
+
+ uint32 samples_100_ms = AudioParameters::kAudioCDSampleRate / 10;
+ AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
+ AudioParameters::kAudioCDSampleRate, 16, samples_100_ms),
+ std::string());
+ ASSERT_TRUE(NULL != oas);
+
+ SineWaveAudioSource source(1, 200.0, AudioParameters::kAudioCDSampleRate);
+ EXPECT_TRUE(oas->Open());
+ oas->SetVolume(1.0);
+
+ // Play the wave for .5 seconds.
+ oas->Start(&source);
+ ::Sleep(500);
+ oas->Stop();
+
+ // Sleep to give silence after stopping the AudioOutputStream.
+ ::Sleep(250);
+
+ // Start again and play for .5 seconds.
+ oas->Start(&source);
+ ::Sleep(500);
+ oas->Stop();
+
+ oas->Close();
+}
+
+// With the low latency mode, WASAPI is utilized by default for Vista and
+// higher and Wave is used for XP and lower. It is possible to utilize a
+// smaller buffer size for WASAPI than for Wave.
+TEST(WinAudioTest, PCMWaveStreamPlay200HzToneLowLatency) {
+ scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ if (!audio_man->HasAudioOutputDevices()) {
+ LOG(WARNING) << "No output device detected.";
+ return;
+ }
+
+ // The WASAPI API requires a correct COM environment.
+ ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA);
+
+ // Use 10 ms buffer size for WASAPI and 50 ms buffer size for Wave.
+ // Take the existing native sample rate into account.
+ const AudioParameters params = audio_man->GetDefaultOutputStreamParameters();
+ int sample_rate = params.sample_rate();
+ uint32 samples_10_ms = sample_rate / 100;
+ int n = 1;
+ (base::win::GetVersion() <= base::win::VERSION_XP) ? n = 5 : n = 1;
+ AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ CHANNEL_LAYOUT_MONO, sample_rate,
+ 16, n * samples_10_ms),
+ std::string());
+ ASSERT_TRUE(NULL != oas);
+
+ SineWaveAudioSource source(1, 200, sample_rate);
+
+ bool opened = oas->Open();
+ if (!opened) {
+ // It was not possible to open this audio device in mono.
+ // No point in continuing the test so let's break here.
+ LOG(WARNING) << "Mono is not supported. Skipping test.";
+ oas->Close();
+ return;
+ }
+ oas->SetVolume(1.0);
+
+ // Play the wave for .8 seconds.
+ oas->Start(&source);
+ ::Sleep(800);
+ oas->Stop();
+ oas->Close();
+}
+
+// Check that the pending bytes value is correct what the stream starts.
+TEST(WinAudioTest, PCMWaveStreamPendingBytes) {
+ scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ if (!audio_man->HasAudioOutputDevices()) {
+ LOG(WARNING) << "No output device detected.";
+ return;
+ }
+
+ uint32 samples_100_ms = AudioParameters::kAudioCDSampleRate / 10;
+ AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
+ AudioParameters::kAudioCDSampleRate, 16, samples_100_ms),
+ std::string());
+ ASSERT_TRUE(NULL != oas);
+
+ NiceMock<MockAudioSource> source;
+ EXPECT_TRUE(oas->Open());
+
+ uint32 bytes_100_ms = samples_100_ms * 2;
+
+ // Audio output stream has either a double or triple buffer scheme.
+ // We expect the amount of pending bytes will reaching up to 2 times of
+ // |bytes_100_ms| depending on number of buffers used.
+ // From that it would decrease as we are playing the data but not providing
+ // new one. And then we will try to provide zero data so the amount of
+ // pending bytes will go down and eventually read zero.
+ InSequence s;
+
+ EXPECT_CALL(source, OnMoreData(NotNull(),
+ Field(&AudioBuffersState::pending_bytes, 0)))
+ .WillOnce(Invoke(MockAudioSource::ClearData));
+ switch (NumberOfWaveOutBuffers()) {
+ case 2:
+ break; // Calls are the same as at end of 3-buffer scheme.
+ case 3:
+ EXPECT_CALL(source, OnMoreData(NotNull(),
+ Field(&AudioBuffersState::pending_bytes,
+ bytes_100_ms)))
+ .WillOnce(Invoke(MockAudioSource::ClearData));
+ EXPECT_CALL(source, OnMoreData(NotNull(),
+ Field(&AudioBuffersState::pending_bytes,
+ 2 * bytes_100_ms)))
+ .WillOnce(Invoke(MockAudioSource::ClearData));
+ EXPECT_CALL(source, OnMoreData(NotNull(),
+ Field(&AudioBuffersState::pending_bytes,
+ 2 * bytes_100_ms)))
+ .Times(AnyNumber())
+ .WillRepeatedly(Return(0));
+ break;
+ default:
+ ASSERT_TRUE(false)
+ << "Unexpected number of buffers: " << NumberOfWaveOutBuffers();
+ }
+ EXPECT_CALL(source, OnMoreData(NotNull(),
+ Field(&AudioBuffersState::pending_bytes,
+ bytes_100_ms)))
+ .Times(AnyNumber())
+ .WillRepeatedly(Return(0));
+ EXPECT_CALL(source, OnMoreData(NotNull(),
+ Field(&AudioBuffersState::pending_bytes, 0)))
+ .Times(AnyNumber())
+ .WillRepeatedly(Return(0));
+
+ oas->Start(&source);
+ ::Sleep(500);
+ oas->Stop();
+ oas->Close();
+}
+
+// Simple source that uses a SyncSocket to retrieve the audio data
+// from a potentially remote thread.
+class SyncSocketSource : public AudioOutputStream::AudioSourceCallback {
+ public:
+ SyncSocketSource(base::SyncSocket* socket, const AudioParameters& params)
+ : socket_(socket) {
+ // Setup AudioBus wrapping data we'll receive over the sync socket.
+ data_size_ = AudioBus::CalculateMemorySize(params);
+ data_.reset(static_cast<float*>(
+ base::AlignedAlloc(data_size_, AudioBus::kChannelAlignment)));
+ audio_bus_ = AudioBus::WrapMemory(params, data_.get());
+ }
+ ~SyncSocketSource() {}
+
+ // AudioSourceCallback::OnMoreData implementation:
+ virtual int OnMoreData(AudioBus* audio_bus,
+ AudioBuffersState buffers_state) {
+ socket_->Send(&buffers_state, sizeof(buffers_state));
+ uint32 size = socket_->Receive(data_.get(), data_size_);
+ DCHECK_EQ(static_cast<size_t>(size) % sizeof(*audio_bus_->channel(0)), 0U);
+ audio_bus_->CopyTo(audio_bus);
+ return audio_bus_->frames();
+ }
+ virtual int OnMoreIOData(AudioBus* source,
+ AudioBus* dest,
+ AudioBuffersState buffers_state) {
+ NOTREACHED();
+ return 0;
+ }
+ // AudioSourceCallback::OnError implementation:
+ virtual void OnError(AudioOutputStream* stream) {
+ }
+
+ private:
+ base::SyncSocket* socket_;
+ int data_size_;
+ scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> data_;
+ scoped_ptr<AudioBus> audio_bus_;
+};
+
+struct SyncThreadContext {
+ base::SyncSocket* socket;
+ int sample_rate;
+ int channels;
+ int frames;
+ double sine_freq;
+ uint32 packet_size_bytes;
+};
+
+// This thread provides the data that the SyncSocketSource above needs
+// using the other end of a SyncSocket. The protocol is as follows:
+//
+// SyncSocketSource ---send 4 bytes ------------> SyncSocketThread
+// <--- audio packet ----------
+//
+DWORD __stdcall SyncSocketThread(void* context) {
+ SyncThreadContext& ctx = *(reinterpret_cast<SyncThreadContext*>(context));
+
+ // Setup AudioBus wrapping data we'll pass over the sync socket.
+ scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> data(static_cast<float*>(
+ base::AlignedAlloc(ctx.packet_size_bytes, AudioBus::kChannelAlignment)));
+ scoped_ptr<AudioBus> audio_bus = AudioBus::WrapMemory(
+ ctx.channels, ctx.frames, data.get());
+
+ SineWaveAudioSource sine(1, ctx.sine_freq, ctx.sample_rate);
+ const int kTwoSecFrames = ctx.sample_rate * 2;
+
+ AudioBuffersState buffers_state;
+ int times = 0;
+ for (int ix = 0; ix < kTwoSecFrames; ix += ctx.frames) {
+ if (ctx.socket->Receive(&buffers_state, sizeof(buffers_state)) == 0)
+ break;
+ if ((times > 0) && (buffers_state.pending_bytes < 1000)) __debugbreak();
+ sine.OnMoreData(audio_bus.get(), buffers_state);
+ ctx.socket->Send(data.get(), ctx.packet_size_bytes);
+ ++times;
+ }
+
+ return 0;
+}
+
+// Test the basic operation of AudioOutputStream used with a SyncSocket.
+// The emphasis is to verify that it is possible to feed data to the audio
+// layer using a source based on SyncSocket. In a real situation we would
+// go for the low-latency version in combination with SyncSocket, but to keep
+// the test more simple, AUDIO_PCM_LINEAR is utilized instead. The main
+// principle of the test still remains and we avoid the additional complexity
+// related to the two different audio-layers for AUDIO_PCM_LOW_LATENCY.
+// In this test you should hear a continuous 200Hz tone for 2 seconds.
+TEST(WinAudioTest, SyncSocketBasic) {
+ scoped_ptr<AudioManager> audio_man(AudioManager::Create());
+ if (!audio_man->HasAudioOutputDevices()) {
+ LOG(WARNING) << "No output device detected.";
+ return;
+ }
+
+ static const int sample_rate = AudioParameters::kAudioCDSampleRate;
+ static const uint32 kSamples20ms = sample_rate / 50;
+ AudioParameters params(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_MONO, sample_rate, 16, kSamples20ms);
+
+
+ AudioOutputStream* oas = audio_man->MakeAudioOutputStream(params,
+ std::string());
+ ASSERT_TRUE(NULL != oas);
+
+ ASSERT_TRUE(oas->Open());
+
+ base::SyncSocket sockets[2];
+ ASSERT_TRUE(base::SyncSocket::CreatePair(&sockets[0], &sockets[1]));
+
+ SyncSocketSource source(&sockets[0], params);
+
+ SyncThreadContext thread_context;
+ thread_context.sample_rate = params.sample_rate();
+ thread_context.sine_freq = 200.0;
+ thread_context.packet_size_bytes = AudioBus::CalculateMemorySize(params);
+ thread_context.frames = params.frames_per_buffer();
+ thread_context.channels = params.channels();
+ thread_context.socket = &sockets[1];
+
+ HANDLE thread = ::CreateThread(NULL, 0, SyncSocketThread,
+ &thread_context, 0, NULL);
+
+ oas->Start(&source);
+
+ ::WaitForSingleObject(thread, INFINITE);
+ ::CloseHandle(thread);
+
+ oas->Stop();
+ oas->Close();
+}
+
+} // namespace media
diff --git a/chromium/media/audio/win/audio_unified_win.cc b/chromium/media/audio/win/audio_unified_win.cc
new file mode 100644
index 00000000000..5c1594ef8f8
--- /dev/null
+++ b/chromium/media/audio/win/audio_unified_win.cc
@@ -0,0 +1,1000 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/win/audio_unified_win.h"
+
+#include <Functiondiscoverykeys_devpkey.h>
+
+#include "base/debug/trace_event.h"
+#ifndef NDEBUG
+#include "base/file_util.h"
+#include "base/path_service.h"
+#endif
+#include "base/time/time.h"
+#include "base/win/scoped_com_initializer.h"
+#include "media/audio/win/audio_manager_win.h"
+#include "media/audio/win/avrt_wrapper_win.h"
+#include "media/audio/win/core_audio_util_win.h"
+
+using base::win::ScopedComPtr;
+using base::win::ScopedCOMInitializer;
+using base::win::ScopedCoMem;
+
+// Smoothing factor in exponential smoothing filter where 0 < alpha < 1.
+// Larger values of alpha reduce the level of smoothing.
+// See http://en.wikipedia.org/wiki/Exponential_smoothing for details.
+static const double kAlpha = 0.1;
+
+// Compute a rate compensation which always attracts us back to a specified
+// target level over a period of |kCorrectionTimeSeconds|.
+static const double kCorrectionTimeSeconds = 0.1;
+
+#ifndef NDEBUG
+// Max number of columns in the output text file |kUnifiedAudioDebugFileName|.
+// See LogElementNames enumerator for details on what each column represents.
+static const size_t kMaxNumSampleTypes = 4;
+
+static const size_t kMaxNumParams = 2;
+
+// Max number of rows in the output file |kUnifiedAudioDebugFileName|.
+// Each row corresponds to one set of sample values for (approximately) the
+// same time instant (stored in the first column).
+static const size_t kMaxFileSamples = 10000;
+
+// Name of output debug file used for off-line analysis of measurements which
+// can be utilized for performance tuning of this class.
+static const char kUnifiedAudioDebugFileName[] = "unified_win_debug.txt";
+
+// Name of output debug file used for off-line analysis of measurements.
+// This file will contain a list of audio parameters.
+static const char kUnifiedAudioParamsFileName[] = "unified_win_params.txt";
+#endif
+
+typedef uint32 ChannelConfig;
+
+// Retrieves an integer mask which corresponds to the channel layout the
+// audio engine uses for its internal processing/mixing of shared-mode
+// streams. This mask indicates which channels are present in the multi-
+// channel stream. The least significant bit corresponds with the Front Left
+// speaker, the next least significant bit corresponds to the Front Right
+// speaker, and so on, continuing in the order defined in KsMedia.h.
+// See http://msdn.microsoft.com/en-us/library/windows/hardware/ff537083(v=vs.85).aspx
+// for more details.
+static ChannelConfig GetChannelConfig(EDataFlow data_flow) {
+ WAVEFORMATPCMEX format;
+ return SUCCEEDED(media::CoreAudioUtil::GetDefaultSharedModeMixFormat(
+ data_flow, eConsole, &format)) ?
+ static_cast<int>(format.dwChannelMask) : 0;
+}
+
+// Use the acquired IAudioClock interface to derive a time stamp of the audio
+// sample which is currently playing through the speakers.
+static double SpeakerStreamPosInMilliseconds(IAudioClock* clock) {
+ UINT64 device_frequency = 0, position = 0;
+ if (FAILED(clock->GetFrequency(&device_frequency)) ||
+ FAILED(clock->GetPosition(&position, NULL))) {
+ return 0.0;
+ }
+ return base::Time::kMillisecondsPerSecond *
+ (static_cast<double>(position) / device_frequency);
+}
+
+// Get a time stamp in milliseconds given number of audio frames in |num_frames|
+// using the current sample rate |fs| as scale factor.
+// Example: |num_frames| = 960 and |fs| = 48000 => 20 [ms].
+static double CurrentStreamPosInMilliseconds(UINT64 num_frames, DWORD fs) {
+ return base::Time::kMillisecondsPerSecond *
+ (static_cast<double>(num_frames) / fs);
+}
+
+// Convert a timestamp in milliseconds to byte units given the audio format
+// in |format|.
+// Example: |ts_milliseconds| equals 10, sample rate is 48000 and frame size
+// is 4 bytes per audio frame => 480 * 4 = 1920 [bytes].
+static int MillisecondsToBytes(double ts_milliseconds,
+ const WAVEFORMATPCMEX& format) {
+ double seconds = ts_milliseconds / base::Time::kMillisecondsPerSecond;
+ return static_cast<int>(seconds * format.Format.nSamplesPerSec *
+ format.Format.nBlockAlign + 0.5);
+}
+
+// Convert frame count to milliseconds given the audio format in |format|.
+static double FrameCountToMilliseconds(int num_frames,
+ const WAVEFORMATPCMEX& format) {
+ return (base::Time::kMillisecondsPerSecond * num_frames) /
+ static_cast<double>(format.Format.nSamplesPerSec);
+}
+
+namespace media {
+
+WASAPIUnifiedStream::WASAPIUnifiedStream(AudioManagerWin* manager,
+ const AudioParameters& params,
+ const std::string& input_device_id)
+ : creating_thread_id_(base::PlatformThread::CurrentId()),
+ manager_(manager),
+ params_(params),
+ input_channels_(params.input_channels()),
+ output_channels_(params.channels()),
+ input_device_id_(input_device_id),
+ share_mode_(CoreAudioUtil::GetShareMode()),
+ opened_(false),
+ volume_(1.0),
+ output_buffer_size_frames_(0),
+ input_buffer_size_frames_(0),
+ endpoint_render_buffer_size_frames_(0),
+ endpoint_capture_buffer_size_frames_(0),
+ num_written_frames_(0),
+ total_delay_ms_(0.0),
+ total_delay_bytes_(0),
+ source_(NULL),
+ input_callback_received_(false),
+ io_sample_rate_ratio_(1),
+ target_fifo_frames_(0),
+ average_delta_(0),
+ fifo_rate_compensation_(1),
+ update_output_delay_(false),
+ capture_delay_ms_(0) {
+ TRACE_EVENT0("audio", "WASAPIUnifiedStream::WASAPIUnifiedStream");
+ VLOG(1) << "WASAPIUnifiedStream::WASAPIUnifiedStream()";
+ DCHECK(manager_);
+
+ VLOG(1) << "Input channels : " << input_channels_;
+ VLOG(1) << "Output channels: " << output_channels_;
+ VLOG(1) << "Sample rate : " << params_.sample_rate();
+ VLOG(1) << "Buffer size : " << params.frames_per_buffer();
+
+#ifndef NDEBUG
+ input_time_stamps_.reset(new int64[kMaxFileSamples]);
+ num_frames_in_fifo_.reset(new int[kMaxFileSamples]);
+ resampler_margin_.reset(new int[kMaxFileSamples]);
+ fifo_rate_comps_.reset(new double[kMaxFileSamples]);
+ num_elements_.reset(new int[kMaxNumSampleTypes]);
+ std::fill(num_elements_.get(), num_elements_.get() + kMaxNumSampleTypes, 0);
+ input_params_.reset(new int[kMaxNumParams]);
+ output_params_.reset(new int[kMaxNumParams]);
+#endif
+
+ DVLOG_IF(1, share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE)
+ << "Core Audio (WASAPI) EXCLUSIVE MODE is enabled.";
+
+ // Load the Avrt DLL if not already loaded. Required to support MMCSS.
+ bool avrt_init = avrt::Initialize();
+ DCHECK(avrt_init) << "Failed to load the avrt.dll";
+
+ // All events are auto-reset events and non-signaled initially.
+
+ // Create the event which the audio engine will signal each time a buffer
+ // has been recorded.
+ capture_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
+
+ // Create the event which will be set in Stop() when straeming shall stop.
+ stop_streaming_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
+}
+
+WASAPIUnifiedStream::~WASAPIUnifiedStream() {
+ VLOG(1) << "WASAPIUnifiedStream::~WASAPIUnifiedStream()";
+#ifndef NDEBUG
+ base::FilePath data_file_name;
+ PathService::Get(base::DIR_EXE, &data_file_name);
+ data_file_name = data_file_name.AppendASCII(kUnifiedAudioDebugFileName);
+ data_file_ = file_util::OpenFile(data_file_name, "wt");
+ DVLOG(1) << ">> Output file " << data_file_name.value() << " is created.";
+
+ size_t n = 0;
+ size_t elements_to_write = *std::min_element(
+ num_elements_.get(), num_elements_.get() + kMaxNumSampleTypes);
+ while (n < elements_to_write) {
+ fprintf(data_file_, "%I64d %d %d %10.9f\n",
+ input_time_stamps_[n],
+ num_frames_in_fifo_[n],
+ resampler_margin_[n],
+ fifo_rate_comps_[n]);
+ ++n;
+ }
+ file_util::CloseFile(data_file_);
+
+ base::FilePath param_file_name;
+ PathService::Get(base::DIR_EXE, &param_file_name);
+ param_file_name = param_file_name.AppendASCII(kUnifiedAudioParamsFileName);
+ param_file_ = file_util::OpenFile(param_file_name, "wt");
+ DVLOG(1) << ">> Output file " << param_file_name.value() << " is created.";
+ fprintf(param_file_, "%d %d\n", input_params_[0], input_params_[1]);
+ fprintf(param_file_, "%d %d\n", output_params_[0], output_params_[1]);
+ file_util::CloseFile(param_file_);
+#endif
+}
+
+bool WASAPIUnifiedStream::Open() {
+ TRACE_EVENT0("audio", "WASAPIUnifiedStream::Open");
+ DVLOG(1) << "WASAPIUnifiedStream::Open()";
+ DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
+ if (opened_)
+ return true;
+
+ AudioParameters hw_output_params;
+ HRESULT hr = CoreAudioUtil::GetPreferredAudioParameters(
+ eRender, eConsole, &hw_output_params);
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to get preferred output audio parameters.";
+ return false;
+ }
+
+ AudioParameters hw_input_params;
+ if (input_device_id_ == AudioManagerBase::kDefaultDeviceId) {
+ // Query native parameters for the default capture device.
+ hr = CoreAudioUtil::GetPreferredAudioParameters(
+ eCapture, eConsole, &hw_input_params);
+ } else {
+ // Query native parameters for the capture device given by
+ // |input_device_id_|.
+ hr = CoreAudioUtil::GetPreferredAudioParameters(
+ input_device_id_, &hw_input_params);
+ }
+ if (FAILED(hr)) {
+ LOG(ERROR) << "Failed to get preferred input audio parameters.";
+ return false;
+ }
+
+ // It is currently only possible to open up the output audio device using
+ // the native number of channels.
+ if (output_channels_ != hw_output_params.channels()) {
+ LOG(ERROR) << "Audio device does not support requested output channels.";
+ return false;
+ }
+
+ // It is currently only possible to open up the input audio device using
+ // the native number of channels. If the client asks for a higher channel
+ // count, we will do channel upmixing in this class. The most typical
+ // example is that the client provides stereo but the hardware can only be
+ // opened in mono mode. We will do mono to stereo conversion in this case.
+ if (input_channels_ < hw_input_params.channels()) {
+ LOG(ERROR) << "Audio device does not support requested input channels.";
+ return false;
+ } else if (input_channels_ > hw_input_params.channels()) {
+ ChannelLayout input_layout =
+ GuessChannelLayout(hw_input_params.channels());
+ ChannelLayout output_layout = GuessChannelLayout(input_channels_);
+ channel_mixer_.reset(new ChannelMixer(input_layout, output_layout));
+ DVLOG(1) << "Remixing input channel layout from " << input_layout
+ << " to " << output_layout << "; from "
+ << hw_input_params.channels() << " channels to "
+ << input_channels_;
+ }
+
+ if (hw_output_params.sample_rate() != params_.sample_rate()) {
+ LOG(ERROR) << "Requested sample-rate: " << params_.sample_rate()
+ << " must match the hardware sample-rate: "
+ << hw_output_params.sample_rate();
+ return false;
+ }
+
+ if (hw_output_params.frames_per_buffer() != params_.frames_per_buffer()) {
+ LOG(ERROR) << "Requested buffer size: " << params_.frames_per_buffer()
+ << " must match the hardware buffer size: "
+ << hw_output_params.frames_per_buffer();
+ return false;
+ }
+
+ // Set up WAVEFORMATPCMEX structures for input and output given the specified
+ // audio parameters.
+ SetIOFormats(hw_input_params, params_);
+
+ // Create the input and output busses.
+ input_bus_ = AudioBus::Create(
+ hw_input_params.channels(), input_buffer_size_frames_);
+ output_bus_ = AudioBus::Create(params_);
+
+ // One extra bus is needed for the input channel mixing case.
+ if (channel_mixer_) {
+ DCHECK_LT(hw_input_params.channels(), input_channels_);
+ // The size of the |channel_bus_| must be the same as the size of the
+ // output bus to ensure that the channel manager can deal with both
+ // resampled and non-resampled data as input.
+ channel_bus_ = AudioBus::Create(
+ input_channels_, params_.frames_per_buffer());
+ }
+
+ // Check if FIFO and resampling is required to match the input rate to the
+ // output rate. If so, a special thread loop, optimized for this case, will
+ // be used. This mode is also called varispeed mode.
+ // Note that we can also use this mode when input and output rates are the
+ // same but native buffer sizes differ (can happen if two different audio
+ // devices are used). For this case, the resampler uses a target ratio of
+ // 1.0 but SetRatio is called to compensate for clock-drift. The FIFO is
+ // required to compensate for the difference in buffer sizes.
+ // TODO(henrika): we could perhaps improve the performance for the second
+ // case here by only using the FIFO and avoid resampling. Not sure how much
+ // that would give and we risk not compensation for clock drift.
+ if (hw_input_params.sample_rate() != params_.sample_rate() ||
+ hw_input_params.frames_per_buffer() != params_.frames_per_buffer()) {
+ DoVarispeedInitialization(hw_input_params, params_);
+ }
+
+ // Render side (event driven only in varispeed mode):
+
+ ScopedComPtr<IAudioClient> audio_output_client =
+ CoreAudioUtil::CreateDefaultClient(eRender, eConsole);
+ if (!audio_output_client)
+ return false;
+
+ if (!CoreAudioUtil::IsFormatSupported(audio_output_client,
+ share_mode_,
+ &output_format_)) {
+ return false;
+ }
+
+ if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
+ // The |render_event_| will be NULL unless varispeed mode is utilized.
+ hr = CoreAudioUtil::SharedModeInitialize(
+ audio_output_client, &output_format_, render_event_.Get(),
+ &endpoint_render_buffer_size_frames_);
+ } else {
+ // TODO(henrika): add support for AUDCLNT_SHAREMODE_EXCLUSIVE.
+ }
+ if (FAILED(hr))
+ return false;
+
+ ScopedComPtr<IAudioRenderClient> audio_render_client =
+ CoreAudioUtil::CreateRenderClient(audio_output_client);
+ if (!audio_render_client)
+ return false;
+
+ // Capture side (always event driven but format depends on varispeed or not):
+
+ ScopedComPtr<IAudioClient> audio_input_client;
+ if (input_device_id_ == AudioManagerBase::kDefaultDeviceId) {
+ audio_input_client = CoreAudioUtil::CreateDefaultClient(eCapture, eConsole);
+ } else {
+ ScopedComPtr<IMMDevice> audio_input_device(
+ CoreAudioUtil::CreateDevice(input_device_id_));
+ audio_input_client = CoreAudioUtil::CreateClient(audio_input_device);
+ }
+ if (!audio_input_client)
+ return false;
+
+ if (!CoreAudioUtil::IsFormatSupported(audio_input_client,
+ share_mode_,
+ &input_format_)) {
+ return false;
+ }
+
+ if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
+ // Include valid event handle for event-driven initialization.
+ // The input side is always event driven independent of if varispeed is
+ // used or not.
+ hr = CoreAudioUtil::SharedModeInitialize(
+ audio_input_client, &input_format_, capture_event_.Get(),
+ &endpoint_capture_buffer_size_frames_);
+ } else {
+ // TODO(henrika): add support for AUDCLNT_SHAREMODE_EXCLUSIVE.
+ }
+ if (FAILED(hr))
+ return false;
+
+ ScopedComPtr<IAudioCaptureClient> audio_capture_client =
+ CoreAudioUtil::CreateCaptureClient(audio_input_client);
+ if (!audio_capture_client)
+ return false;
+
+ // Varispeed mode requires additional preparations.
+ if (VarispeedMode())
+ ResetVarispeed();
+
+ // Store all valid COM interfaces.
+ audio_output_client_ = audio_output_client;
+ audio_render_client_ = audio_render_client;
+ audio_input_client_ = audio_input_client;
+ audio_capture_client_ = audio_capture_client;
+
+ opened_ = true;
+ return SUCCEEDED(hr);
+}
+
+void WASAPIUnifiedStream::Start(AudioSourceCallback* callback) {
+ TRACE_EVENT0("audio", "WASAPIUnifiedStream::Start");
+ DVLOG(1) << "WASAPIUnifiedStream::Start()";
+ DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
+ CHECK(callback);
+ CHECK(opened_);
+
+ if (audio_io_thread_) {
+ CHECK_EQ(callback, source_);
+ return;
+ }
+
+ source_ = callback;
+
+ if (VarispeedMode()) {
+ ResetVarispeed();
+ fifo_rate_compensation_ = 1.0;
+ average_delta_ = 0.0;
+ input_callback_received_ = false;
+ update_output_delay_ = false;
+ }
+
+ // Create and start the thread that will listen for capture events.
+ // We will also listen on render events on the same thread if varispeed
+ // mode is utilized.
+ audio_io_thread_.reset(
+ new base::DelegateSimpleThread(this, "wasapi_io_thread"));
+ audio_io_thread_->Start();
+ if (!audio_io_thread_->HasBeenStarted()) {
+ DLOG(ERROR) << "Failed to start WASAPI IO thread.";
+ return;
+ }
+
+ // Start input streaming data between the endpoint buffer and the audio
+ // engine.
+ HRESULT hr = audio_input_client_->Start();
+ if (FAILED(hr)) {
+ StopAndJoinThread(hr);
+ return;
+ }
+
+ // Ensure that the endpoint buffer is prepared with silence.
+ if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
+ if (!CoreAudioUtil::FillRenderEndpointBufferWithSilence(
+ audio_output_client_, audio_render_client_)) {
+ DLOG(WARNING) << "Failed to prepare endpoint buffers with silence.";
+ return;
+ }
+ }
+ num_written_frames_ = endpoint_render_buffer_size_frames_;
+
+ // Start output streaming data between the endpoint buffer and the audio
+ // engine.
+ hr = audio_output_client_->Start();
+ if (FAILED(hr)) {
+ StopAndJoinThread(hr);
+ return;
+ }
+}
+
+void WASAPIUnifiedStream::Stop() {
+ TRACE_EVENT0("audio", "WASAPIUnifiedStream::Stop");
+ DVLOG(1) << "WASAPIUnifiedStream::Stop()";
+ DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
+ if (!audio_io_thread_)
+ return;
+
+ // Stop input audio streaming.
+ HRESULT hr = audio_input_client_->Stop();
+ if (FAILED(hr)) {
+ DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED)
+ << "Failed to stop input streaming: " << std::hex << hr;
+ }
+
+ // Stop output audio streaming.
+ hr = audio_output_client_->Stop();
+ if (FAILED(hr)) {
+ DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED)
+ << "Failed to stop output streaming: " << std::hex << hr;
+ }
+
+ // Wait until the thread completes and perform cleanup.
+ SetEvent(stop_streaming_event_.Get());
+ audio_io_thread_->Join();
+ audio_io_thread_.reset();
+
+ // Ensure that we don't quit the main thread loop immediately next
+ // time Start() is called.
+ ResetEvent(stop_streaming_event_.Get());
+
+ // Clear source callback, it'll be set again on the next Start() call.
+ source_ = NULL;
+
+ // Flush all pending data and reset the audio clock stream position to 0.
+ hr = audio_output_client_->Reset();
+ if (FAILED(hr)) {
+ DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED)
+ << "Failed to reset output streaming: " << std::hex << hr;
+ }
+
+ audio_input_client_->Reset();
+ if (FAILED(hr)) {
+ DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED)
+ << "Failed to reset input streaming: " << std::hex << hr;
+ }
+
+ // Extra safety check to ensure that the buffers are cleared.
+ // If the buffers are not cleared correctly, the next call to Start()
+ // would fail with AUDCLNT_E_BUFFER_ERROR at IAudioRenderClient::GetBuffer().
+ // TODO(henrika): this check is is only needed for shared-mode streams.
+ UINT32 num_queued_frames = 0;
+ audio_output_client_->GetCurrentPadding(&num_queued_frames);
+ DCHECK_EQ(0u, num_queued_frames);
+}
+
+void WASAPIUnifiedStream::Close() {
+ TRACE_EVENT0("audio", "WASAPIUnifiedStream::Close");
+ DVLOG(1) << "WASAPIUnifiedStream::Close()";
+ DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
+
+ // It is valid to call Close() before calling open or Start().
+ // It is also valid to call Close() after Start() has been called.
+ Stop();
+
+ // Inform the audio manager that we have been closed. This will cause our
+ // destruction.
+ manager_->ReleaseOutputStream(this);
+}
+
+void WASAPIUnifiedStream::SetVolume(double volume) {
+ DVLOG(1) << "SetVolume(volume=" << volume << ")";
+ if (volume < 0 || volume > 1)
+ return;
+ volume_ = volume;
+}
+
+void WASAPIUnifiedStream::GetVolume(double* volume) {
+ DVLOG(1) << "GetVolume()";
+ *volume = static_cast<double>(volume_);
+}
+
+
+void WASAPIUnifiedStream::ProvideInput(int frame_delay, AudioBus* audio_bus) {
+ // TODO(henrika): utilize frame_delay?
+ // A non-zero framed delay means multiple callbacks were necessary to
+ // fulfill the requested number of frames.
+ if (frame_delay > 0)
+ DVLOG(3) << "frame_delay: " << frame_delay;
+
+#ifndef NDEBUG
+ resampler_margin_[num_elements_[RESAMPLER_MARGIN]] =
+ fifo_->frames() - audio_bus->frames();
+ num_elements_[RESAMPLER_MARGIN]++;
+#endif
+
+ if (fifo_->frames() < audio_bus->frames()) {
+ DVLOG(ERROR) << "Not enough data in the FIFO ("
+ << fifo_->frames() << " < " << audio_bus->frames() << ")";
+ audio_bus->Zero();
+ return;
+ }
+
+ fifo_->Consume(audio_bus, 0, audio_bus->frames());
+}
+
+void WASAPIUnifiedStream::SetIOFormats(const AudioParameters& input_params,
+ const AudioParameters& output_params) {
+ for (int n = 0; n < 2; ++n) {
+ const AudioParameters& params = (n == 0) ? input_params : output_params;
+ WAVEFORMATPCMEX* xformat = (n == 0) ? &input_format_ : &output_format_;
+ WAVEFORMATEX* format = &xformat->Format;
+
+ // Begin with the WAVEFORMATEX structure that specifies the basic format.
+ format->wFormatTag = WAVE_FORMAT_EXTENSIBLE;
+ format->nChannels = params.channels();
+ format->nSamplesPerSec = params.sample_rate();
+ format->wBitsPerSample = params.bits_per_sample();
+ format->nBlockAlign = (format->wBitsPerSample / 8) * format->nChannels;
+ format->nAvgBytesPerSec = format->nSamplesPerSec * format->nBlockAlign;
+ format->cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX);
+
+ // Add the parts which are unique to WAVE_FORMAT_EXTENSIBLE.
+ // Note that we always open up using the native channel layout.
+ (*xformat).Samples.wValidBitsPerSample = format->wBitsPerSample;
+ (*xformat).dwChannelMask = (n == 0) ?
+ GetChannelConfig(eCapture) : GetChannelConfig(eRender);
+ (*xformat).SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
+ }
+
+ input_buffer_size_frames_ = input_params.frames_per_buffer();
+ output_buffer_size_frames_ = output_params.frames_per_buffer();
+ VLOG(1) << "#audio frames per input buffer : " << input_buffer_size_frames_;
+ VLOG(1) << "#audio frames per output buffer: " << output_buffer_size_frames_;
+
+#ifndef NDEBUG
+ input_params_[0] = input_format_.Format.nSamplesPerSec;
+ input_params_[1] = input_buffer_size_frames_;
+ output_params_[0] = output_format_.Format.nSamplesPerSec;
+ output_params_[1] = output_buffer_size_frames_;
+#endif
+}
+
+void WASAPIUnifiedStream::DoVarispeedInitialization(
+ const AudioParameters& input_params, const AudioParameters& output_params) {
+ DVLOG(1) << "WASAPIUnifiedStream::DoVarispeedInitialization()";
+
+ // A FIFO is required in this mode for input to output buffering.
+ // Note that it will add some latency.
+ fifo_.reset(new AudioFifo(input_params.channels(), kFifoSize));
+ VLOG(1) << "Using FIFO of size " << fifo_->max_frames()
+ << " (#channels=" << input_params.channels() << ")";
+
+ // Create the multi channel resampler using the initial sample rate ratio.
+ // We will call MultiChannelResampler::SetRatio() during runtime to
+ // allow arbitrary combinations of input and output devices running off
+ // different clocks and using different drivers, with potentially
+ // differing sample-rates. Note that the requested block size is given by
+ // the native input buffer size |input_buffer_size_frames_|.
+ io_sample_rate_ratio_ = input_params.sample_rate() /
+ static_cast<double>(output_params.sample_rate());
+ DVLOG(2) << "io_sample_rate_ratio: " << io_sample_rate_ratio_;
+ resampler_.reset(new MultiChannelResampler(
+ input_params.channels(), io_sample_rate_ratio_, input_buffer_size_frames_,
+ base::Bind(&WASAPIUnifiedStream::ProvideInput, base::Unretained(this))));
+ VLOG(1) << "Resampling from " << input_params.sample_rate() << " to "
+ << output_params.sample_rate();
+
+ // The optimal number of frames we'd like to keep in the FIFO at all times.
+ // The actual size will vary but the goal is to ensure that the average size
+ // is given by this value.
+ target_fifo_frames_ = kTargetFifoSafetyFactor * input_buffer_size_frames_;
+ VLOG(1) << "Target FIFO size: " << target_fifo_frames_;
+
+ // Create the event which the audio engine will signal each time it
+ // wants an audio buffer to render.
+ render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
+
+ // Allocate memory for temporary audio bus used to store resampled input
+ // audio.
+ resampled_bus_ = AudioBus::Create(
+ input_params.channels(), output_buffer_size_frames_);
+
+ // Buffer initial silence corresponding to target I/O buffering.
+ ResetVarispeed();
+}
+
+void WASAPIUnifiedStream::ResetVarispeed() {
+ DCHECK(VarispeedMode());
+
+ // Buffer initial silence corresponding to target I/O buffering.
+ fifo_->Clear();
+ scoped_ptr<AudioBus> silence =
+ AudioBus::Create(input_format_.Format.nChannels,
+ target_fifo_frames_);
+ silence->Zero();
+ fifo_->Push(silence.get());
+ resampler_->Flush();
+}
+
+void WASAPIUnifiedStream::Run() {
+ ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA);
+
+ // Increase the thread priority.
+ audio_io_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio);
+
+ // Enable MMCSS to ensure that this thread receives prioritized access to
+ // CPU resources.
+ // TODO(henrika): investigate if it is possible to include these additional
+ // settings in SetThreadPriority() as well.
+ DWORD task_index = 0;
+ HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio",
+ &task_index);
+ bool mmcss_is_ok =
+ (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL));
+ if (!mmcss_is_ok) {
+ // Failed to enable MMCSS on this thread. It is not fatal but can lead
+ // to reduced QoS at high load.
+ DWORD err = GetLastError();
+ LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ").";
+ }
+
+ // The IAudioClock interface enables us to monitor a stream's data
+ // rate and the current position in the stream. Allocate it before we
+ // start spinning.
+ ScopedComPtr<IAudioClock> audio_output_clock;
+ HRESULT hr = audio_output_client_->GetService(
+ __uuidof(IAudioClock), audio_output_clock.ReceiveVoid());
+ LOG_IF(WARNING, FAILED(hr)) << "Failed to create IAudioClock: "
+ << std::hex << hr;
+
+ bool streaming = true;
+ bool error = false;
+
+ HANDLE wait_array[3];
+ size_t num_handles = 0;
+ wait_array[num_handles++] = stop_streaming_event_;
+ wait_array[num_handles++] = capture_event_;
+ if (render_event_) {
+ // One extra event handle is needed in varispeed mode.
+ wait_array[num_handles++] = render_event_;
+ }
+
+ // Keep streaming audio until stop event is signaled.
+ // Capture events are always used but render events are only active in
+ // varispeed mode.
+ while (streaming && !error) {
+ // Wait for a close-down event, or a new capture event.
+ DWORD wait_result = WaitForMultipleObjects(num_handles,
+ wait_array,
+ FALSE,
+ INFINITE);
+ switch (wait_result) {
+ case WAIT_OBJECT_0 + 0:
+ // |stop_streaming_event_| has been set.
+ streaming = false;
+ break;
+ case WAIT_OBJECT_0 + 1:
+ // |capture_event_| has been set
+ if (VarispeedMode()) {
+ ProcessInputAudio();
+ } else {
+ ProcessInputAudio();
+ ProcessOutputAudio(audio_output_clock);
+ }
+ break;
+ case WAIT_OBJECT_0 + 2:
+ DCHECK(VarispeedMode());
+ // |render_event_| has been set
+ ProcessOutputAudio(audio_output_clock);
+ break;
+ default:
+ error = true;
+ break;
+ }
+ }
+
+ if (streaming && error) {
+ // Stop audio streaming since something has gone wrong in our main thread
+ // loop. Note that, we are still in a "started" state, hence a Stop() call
+ // is required to join the thread properly.
+ audio_input_client_->Stop();
+ audio_output_client_->Stop();
+ PLOG(ERROR) << "WASAPI streaming failed.";
+ }
+
+ // Disable MMCSS.
+ if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) {
+ PLOG(WARNING) << "Failed to disable MMCSS";
+ }
+}
+
+void WASAPIUnifiedStream::ProcessInputAudio() {
+ TRACE_EVENT0("audio", "WASAPIUnifiedStream::ProcessInputAudio");
+
+ BYTE* data_ptr = NULL;
+ UINT32 num_captured_frames = 0;
+ DWORD flags = 0;
+ UINT64 device_position = 0;
+ UINT64 capture_time_stamp = 0;
+
+ const int bytes_per_sample = input_format_.Format.wBitsPerSample >> 3;
+
+ base::TimeTicks now_tick = base::TimeTicks::HighResNow();
+
+#ifndef NDEBUG
+ if (VarispeedMode()) {
+ input_time_stamps_[num_elements_[INPUT_TIME_STAMP]] =
+ now_tick.ToInternalValue();
+ num_elements_[INPUT_TIME_STAMP]++;
+ }
+#endif
+
+ // Retrieve the amount of data in the capture endpoint buffer.
+ // |endpoint_capture_time_stamp| is the value of the performance
+ // counter at the time that the audio endpoint device recorded
+ // the device position of the first audio frame in the data packet.
+ HRESULT hr = audio_capture_client_->GetBuffer(&data_ptr,
+ &num_captured_frames,
+ &flags,
+ &device_position,
+ &capture_time_stamp);
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "Failed to get data from the capture buffer";
+ return;
+ }
+
+ if (hr == AUDCLNT_S_BUFFER_EMPTY) {
+ // The return coded is a success code but a new packet is *not* available
+ // and none of the output parameters in the GetBuffer() call contains valid
+ // values. Best we can do is to deliver silence and avoid setting
+ // |input_callback_received_| since this only seems to happen for the
+ // initial event(s) on some devices.
+ input_bus_->Zero();
+ } else {
+ // Valid data has been recorded and it is now OK to set the flag which
+ // informs the render side that capturing has started.
+ input_callback_received_ = true;
+ }
+
+ if (num_captured_frames != 0) {
+ if (flags & AUDCLNT_BUFFERFLAGS_SILENT) {
+ // Clear out the capture buffer since silence is reported.
+ input_bus_->Zero();
+ } else {
+ // Store captured data in an audio bus after de-interleaving
+ // the data to match the audio bus structure.
+ input_bus_->FromInterleaved(
+ data_ptr, num_captured_frames, bytes_per_sample);
+ }
+ }
+
+ hr = audio_capture_client_->ReleaseBuffer(num_captured_frames);
+ DLOG_IF(ERROR, FAILED(hr)) << "Failed to release capture buffer";
+
+ // Buffer input into FIFO if varispeed mode is used. The render event
+ // will drive resampling of this data to match the output side.
+ if (VarispeedMode()) {
+ int available_frames = fifo_->max_frames() - fifo_->frames();
+ if (input_bus_->frames() <= available_frames) {
+ fifo_->Push(input_bus_.get());
+ }
+#ifndef NDEBUG
+ num_frames_in_fifo_[num_elements_[NUM_FRAMES_IN_FIFO]] =
+ fifo_->frames();
+ num_elements_[NUM_FRAMES_IN_FIFO]++;
+#endif
+ }
+
+ // Save resource by not asking for new delay estimates each time.
+ // These estimates are fairly stable and it is perfectly safe to only
+ // sample at a rate of ~1Hz.
+ // TODO(henrika): we might have to increase the update rate in varispeed
+ // mode since the delay variations are higher in this mode.
+ if ((now_tick - last_delay_sample_time_).InMilliseconds() >
+ kTimeDiffInMillisecondsBetweenDelayMeasurements &&
+ input_callback_received_) {
+ // Calculate the estimated capture delay, i.e., the latency between
+ // the recording time and the time we when we are notified about
+ // the recorded data. Note that the capture time stamp is given in
+ // 100-nanosecond (0.1 microseconds) units.
+ base::TimeDelta diff =
+ now_tick - base::TimeTicks::FromInternalValue(0.1 * capture_time_stamp);
+ capture_delay_ms_ = diff.InMillisecondsF();
+
+ last_delay_sample_time_ = now_tick;
+ update_output_delay_ = true;
+ }
+}
+
+void WASAPIUnifiedStream::ProcessOutputAudio(IAudioClock* audio_output_clock) {
+ TRACE_EVENT0("audio", "WASAPIUnifiedStream::ProcessOutputAudio");
+
+ if (!input_callback_received_) {
+ if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
+ if (!CoreAudioUtil::FillRenderEndpointBufferWithSilence(
+ audio_output_client_, audio_render_client_))
+ DLOG(WARNING) << "Failed to prepare endpoint buffers with silence.";
+ }
+ return;
+ }
+
+ // Rate adjusted resampling is required in varispeed mode. It means that
+ // recorded audio samples will be read from the FIFO, resampled to match the
+ // output sample-rate and then stored in |resampled_bus_|.
+ if (VarispeedMode()) {
+ // Calculate a varispeed rate scalar factor to compensate for drift between
+ // input and output. We use the actual number of frames still in the FIFO
+ // compared with the ideal value of |target_fifo_frames_|.
+ int delta = fifo_->frames() - target_fifo_frames_;
+
+ // Average |delta| because it can jitter back/forth quite frequently
+ // by +/- the hardware buffer-size *if* the input and output callbacks are
+ // happening at almost exactly the same time. Also, if the input and output
+ // sample-rates are different then |delta| will jitter quite a bit due to
+ // the rate conversion happening in the varispeed, plus the jittering of
+ // the callbacks. The average value is what's important here.
+ // We use an exponential smoothing filter to reduce the variations.
+ average_delta_ += kAlpha * (delta - average_delta_);
+
+ // Compute a rate compensation which always attracts us back to the
+ // |target_fifo_frames_| over a period of kCorrectionTimeSeconds.
+ double correction_time_frames =
+ kCorrectionTimeSeconds * output_format_.Format.nSamplesPerSec;
+ fifo_rate_compensation_ =
+ (correction_time_frames + average_delta_) / correction_time_frames;
+
+#ifndef NDEBUG
+ fifo_rate_comps_[num_elements_[RATE_COMPENSATION]] =
+ fifo_rate_compensation_;
+ num_elements_[RATE_COMPENSATION]++;
+#endif
+
+ // Adjust for FIFO drift.
+ const double new_ratio = io_sample_rate_ratio_ * fifo_rate_compensation_;
+ resampler_->SetRatio(new_ratio);
+ // Get resampled input audio from FIFO where the size is given by the
+ // output side.
+ resampler_->Resample(resampled_bus_->frames(), resampled_bus_.get());
+ }
+
+ // Derive a new total delay estimate if the capture side has set the
+ // |update_output_delay_| flag.
+ if (update_output_delay_) {
+ // Calculate the estimated render delay, i.e., the time difference
+ // between the time when data is added to the endpoint buffer and
+ // when the data is played out on the actual speaker.
+ const double stream_pos = CurrentStreamPosInMilliseconds(
+ num_written_frames_ + output_buffer_size_frames_,
+ output_format_.Format.nSamplesPerSec);
+ const double speaker_pos =
+ SpeakerStreamPosInMilliseconds(audio_output_clock);
+ const double render_delay_ms = stream_pos - speaker_pos;
+ const double fifo_delay_ms = VarispeedMode() ?
+ FrameCountToMilliseconds(target_fifo_frames_, input_format_) : 0;
+
+ // Derive the total delay, i.e., the sum of the input and output
+ // delays. Also convert the value into byte units. An extra FIFO delay
+ // is added for varispeed usage cases.
+ total_delay_ms_ = VarispeedMode() ?
+ capture_delay_ms_ + render_delay_ms + fifo_delay_ms :
+ capture_delay_ms_ + render_delay_ms;
+ DVLOG(2) << "total_delay_ms : " << total_delay_ms_;
+ DVLOG(3) << " capture_delay_ms: " << capture_delay_ms_;
+ DVLOG(3) << " render_delay_ms : " << render_delay_ms;
+ DVLOG(3) << " fifo_delay_ms : " << fifo_delay_ms;
+ total_delay_bytes_ = MillisecondsToBytes(total_delay_ms_, output_format_);
+
+ // Wait for new signal from the capture side.
+ update_output_delay_ = false;
+ }
+
+ // Select source depending on if varispeed is utilized or not.
+ // Also, the source might be the output of a channel mixer if channel mixing
+ // is required to match the native input channels to the number of input
+ // channels used by the client (given by |input_channels_| in this case).
+ AudioBus* input_bus = VarispeedMode() ?
+ resampled_bus_.get() : input_bus_.get();
+ if (channel_mixer_) {
+ DCHECK_EQ(input_bus->frames(), channel_bus_->frames());
+ // Most common case is 1->2 channel upmixing.
+ channel_mixer_->Transform(input_bus, channel_bus_.get());
+ // Use the output from the channel mixer as new input bus.
+ input_bus = channel_bus_.get();
+ }
+
+ // Prepare for rendering by calling OnMoreIOData().
+ int frames_filled = source_->OnMoreIOData(
+ input_bus,
+ output_bus_.get(),
+ AudioBuffersState(0, total_delay_bytes_));
+ DCHECK_EQ(frames_filled, output_bus_->frames());
+
+ // Keep track of number of rendered frames since we need it for
+ // our delay calculations.
+ num_written_frames_ += frames_filled;
+
+ // Derive the the amount of available space in the endpoint buffer.
+ // Avoid render attempt if there is no room for a captured packet.
+ UINT32 num_queued_frames = 0;
+ audio_output_client_->GetCurrentPadding(&num_queued_frames);
+ if (endpoint_render_buffer_size_frames_ - num_queued_frames <
+ output_buffer_size_frames_)
+ return;
+
+ // Grab all available space in the rendering endpoint buffer
+ // into which the client can write a data packet.
+ uint8* audio_data = NULL;
+ HRESULT hr = audio_render_client_->GetBuffer(output_buffer_size_frames_,
+ &audio_data);
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "Failed to access render buffer";
+ return;
+ }
+
+ const int bytes_per_sample = output_format_.Format.wBitsPerSample >> 3;
+
+ // Convert the audio bus content to interleaved integer data using
+ // |audio_data| as destination.
+ output_bus_->Scale(volume_);
+ output_bus_->ToInterleaved(
+ output_buffer_size_frames_, bytes_per_sample, audio_data);
+
+ // Release the buffer space acquired in the GetBuffer() call.
+ audio_render_client_->ReleaseBuffer(output_buffer_size_frames_, 0);
+ DLOG_IF(ERROR, FAILED(hr)) << "Failed to release render buffer";
+
+ return;
+}
+
+void WASAPIUnifiedStream::HandleError(HRESULT err) {
+ CHECK((started() && GetCurrentThreadId() == audio_io_thread_->tid()) ||
+ (!started() && GetCurrentThreadId() == creating_thread_id_));
+ NOTREACHED() << "Error code: " << std::hex << err;
+ if (source_)
+ source_->OnError(this);
+}
+
+void WASAPIUnifiedStream::StopAndJoinThread(HRESULT err) {
+ CHECK(GetCurrentThreadId() == creating_thread_id_);
+ DCHECK(audio_io_thread_.get());
+ SetEvent(stop_streaming_event_.Get());
+ audio_io_thread_->Join();
+ audio_io_thread_.reset();
+ HandleError(err);
+}
+
+} // namespace media
diff --git a/chromium/media/audio/win/audio_unified_win.h b/chromium/media/audio/win/audio_unified_win.h
new file mode 100644
index 00000000000..76c53297b51
--- /dev/null
+++ b/chromium/media/audio/win/audio_unified_win.h
@@ -0,0 +1,352 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_WIN_AUDIO_UNIFIED_WIN_H_
+#define MEDIA_AUDIO_WIN_AUDIO_UNIFIED_WIN_H_
+
+#include <Audioclient.h>
+#include <MMDeviceAPI.h>
+
+#include <string>
+
+#include "base/compiler_specific.h"
+#include "base/gtest_prod_util.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/simple_thread.h"
+#include "base/win/scoped_co_mem.h"
+#include "base/win/scoped_comptr.h"
+#include "base/win/scoped_handle.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/audio_fifo.h"
+#include "media/base/channel_mixer.h"
+#include "media/base/media_export.h"
+#include "media/base/multi_channel_resampler.h"
+
+namespace media {
+
+class AudioManagerWin;
+
+// Implementation of AudioOutputStream for Windows using the Core Audio API
+// where both capturing and rendering takes place on the same thread to enable
+// audio I/O. This class allows arbitrary combinations of input and output
+// devices running off different clocks and using different drivers, with
+// potentially differing sample-rates.
+//
+// It is required to first acquire the native sample rate of the selected
+// output device and then use the same rate when creating this object.
+// The inner operation depends on the input sample rate which is determined
+// during construction. Three different main modes are supported:
+//
+// 1) input rate == output rate => input side drives output side directly.
+// 2) input rate != output rate => both sides are driven independently by
+// events and a FIFO plus a resampling unit is used to compensate for
+// differences in sample rates between the two sides.
+// 3) input rate == output rate but native buffer sizes are not identical =>
+// same inner functionality as in (2) to compensate for the differences
+// in buffer sizes and also compensate for any potential clock drift
+// between the two devices.
+//
+// Mode detection is is done at construction and using mode (1) will lead to
+// best performance (lower delay and no "varispeed distortion"), i.e., it is
+// recommended to use same sample rates for input and output. Mode (2) uses a
+// resampler which supports rate adjustments to fine tune for things like
+// clock drift and differences in sample rates between different devices.
+// Mode (2) - which uses a FIFO and a adjustable multi-channel resampler -
+// is also called the varispeed mode and it is used for case (3) as well to
+// compensate for the difference in buffer sizes mainly.
+// Mode (3) can happen if two different audio devices are used.
+// As an example: some devices needs a buffer size of 441 @ 44.1kHz and others
+// 448 @ 44.1kHz. This is a rare case and will only happen for sample rates
+// which are even multiples of 11025 Hz (11025, 22050, 44100, 88200 etc.).
+//
+// Implementation notes:
+//
+// - Open() can fail if the input and output parameters do not fulfill
+// certain conditions. See source for Open() for more details.
+// - Channel mixing will be performed if the clients asks for a larger
+// number of channels than the native audio layer provides.
+// Example: client wants stereo but audio layer provides mono. In this case
+// upmixing from mono to stereo (1->2) will be done.
+//
+// TODO(henrika):
+//
+// - Add support for exclusive mode.
+// - Add support for KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, i.e., 32-bit float
+// as internal sample-value representation.
+// - Perform fine-tuning for non-matching sample rates to reduce latency.
+//
+class MEDIA_EXPORT WASAPIUnifiedStream
+ : public AudioOutputStream,
+ public base::DelegateSimpleThread::Delegate {
+ public:
+ // The ctor takes all the usual parameters, plus |manager| which is the
+ // the audio manager who is creating this object.
+ WASAPIUnifiedStream(AudioManagerWin* manager,
+ const AudioParameters& params,
+ const std::string& input_device_id);
+
+ // The dtor is typically called by the AudioManager only and it is usually
+ // triggered by calling AudioOutputStream::Close().
+ virtual ~WASAPIUnifiedStream();
+
+ // Implementation of AudioOutputStream.
+ virtual bool Open() OVERRIDE;
+ virtual void Start(AudioSourceCallback* callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual void GetVolume(double* volume) OVERRIDE;
+
+ bool started() const {
+ return audio_io_thread_.get() != NULL;
+ }
+
+ // Returns true if input sample rate differs from the output sample rate.
+ // A FIFO and a adjustable multi-channel resampler are utilized in this mode.
+ bool VarispeedMode() const { return (fifo_ && resampler_); }
+
+ private:
+ enum {
+ // Time in milliseconds between two successive delay measurements.
+ // We save resources by not updating the delay estimates for each capture
+ // event (typically 100Hz rate).
+ kTimeDiffInMillisecondsBetweenDelayMeasurements = 1000,
+
+ // Max possible FIFO size.
+ kFifoSize = 16384,
+
+ // This value was determined empirically for minimum latency while still
+ // guarding against FIFO under-runs. The actual target size will be equal
+ // to kTargetFifoSafetyFactor * (native input buffer size).
+ // TODO(henrika): tune this value for lowest possible latency for all
+ // possible sample rate combinations.
+ kTargetFifoSafetyFactor = 2
+ };
+
+ // Additional initialization required when input and output sample rate
+ // differs. Allocates resources for |fifo_|, |resampler_|, |render_event_|,
+ // and the |capture_bus_| and configures the |input_format_| structure
+ // given the provided input and output audio parameters.
+ void DoVarispeedInitialization(const AudioParameters& input_params,
+ const AudioParameters& output_params);
+
+ // Clears varispeed related components such as the FIFO and the resampler.
+ void ResetVarispeed();
+
+ // Builds WAVEFORMATEX structures for input and output based on input and
+ // output audio parameters.
+ void SetIOFormats(const AudioParameters& input_params,
+ const AudioParameters& output_params);
+
+ // DelegateSimpleThread::Delegate implementation.
+ virtual void Run() OVERRIDE;
+
+ // MultiChannelResampler::MultiChannelAudioSourceProvider implementation.
+ // Callback for providing more data into the resampler.
+ // Only used in varispeed mode, i.e., when input rate != output rate.
+ virtual void ProvideInput(int frame_delay, AudioBus* audio_bus);
+
+ // Issues the OnError() callback to the |source_|.
+ void HandleError(HRESULT err);
+
+ // Stops and joins the audio thread in case of an error.
+ void StopAndJoinThread(HRESULT err);
+
+ // Converts unique endpoint ID to user-friendly device name.
+ std::string GetDeviceName(LPCWSTR device_id) const;
+
+ // Called on the audio IO thread for each capture event.
+ // Buffers captured audio into a FIFO if varispeed is used or into an audio
+ // bus if input and output sample rates are identical.
+ void ProcessInputAudio();
+
+ // Called on the audio IO thread for each render event when varispeed is
+ // active or for each capture event when varispeed is not used.
+ // In varispeed mode, it triggers a resampling callback, which reads from the
+ // FIFO, and calls AudioSourceCallback::OnMoreIOData using the resampled
+ // input signal and at the same time asks for data to play out.
+ // If input and output rates are the same - instead of reading from the FIFO
+ // and do resampling - we read directly from the audio bus used to store
+ // captured data in ProcessInputAudio.
+ void ProcessOutputAudio(IAudioClock* audio_output_clock);
+
+ // Contains the thread ID of the creating thread.
+ base::PlatformThreadId creating_thread_id_;
+
+ // Our creator, the audio manager needs to be notified when we close.
+ AudioManagerWin* manager_;
+
+ // Contains the audio parameter structure provided at construction.
+ AudioParameters params_;
+ // For convenience, same as in params_.
+ int input_channels_;
+ int output_channels_;
+
+ // Unique ID of the input device to be opened.
+ const std::string input_device_id_;
+
+ // The sharing mode for the streams.
+ // Valid values are AUDCLNT_SHAREMODE_SHARED and AUDCLNT_SHAREMODE_EXCLUSIVE
+ // where AUDCLNT_SHAREMODE_SHARED is the default.
+ AUDCLNT_SHAREMODE share_mode_;
+
+ // Rendering and capturing is driven by this thread (no message loop).
+ // All OnMoreIOData() callbacks will be called from this thread.
+ scoped_ptr<base::DelegateSimpleThread> audio_io_thread_;
+
+ // Contains the desired audio output format which is set up at construction.
+ // It is required to first acquire the native sample rate of the selected
+ // output device and then use the same rate when creating this object.
+ WAVEFORMATPCMEX output_format_;
+
+ // Contains the native audio input format which is set up at construction
+ // if varispeed mode is utilized.
+ WAVEFORMATPCMEX input_format_;
+
+ // True when successfully opened.
+ bool opened_;
+
+ // Volume level from 0 to 1 used for output scaling.
+ double volume_;
+
+ // Size in audio frames of each audio packet where an audio packet
+ // is defined as the block of data which the destination is expected to
+ // receive in each OnMoreIOData() callback.
+ size_t output_buffer_size_frames_;
+
+ // Size in audio frames of each audio packet where an audio packet
+ // is defined as the block of data which the source is expected to
+ // deliver in each OnMoreIOData() callback.
+ size_t input_buffer_size_frames_;
+
+ // Length of the audio endpoint buffer.
+ uint32 endpoint_render_buffer_size_frames_;
+ uint32 endpoint_capture_buffer_size_frames_;
+
+ // Counts the number of audio frames written to the endpoint buffer.
+ uint64 num_written_frames_;
+
+ // Time stamp for last delay measurement.
+ base::TimeTicks last_delay_sample_time_;
+
+ // Contains the total (sum of render and capture) delay in milliseconds.
+ double total_delay_ms_;
+
+ // Contains the total (sum of render and capture and possibly FIFO) delay
+ // in bytes. The update frequency is set by a constant called
+ // |kTimeDiffInMillisecondsBetweenDelayMeasurements|.
+ int total_delay_bytes_;
+
+ // Pointer to the client that will deliver audio samples to be played out.
+ AudioSourceCallback* source_;
+
+ // IMMDevice interfaces which represents audio endpoint devices.
+ base::win::ScopedComPtr<IMMDevice> endpoint_render_device_;
+ base::win::ScopedComPtr<IMMDevice> endpoint_capture_device_;
+
+ // IAudioClient interfaces which enables a client to create and initialize
+ // an audio stream between an audio application and the audio engine.
+ base::win::ScopedComPtr<IAudioClient> audio_output_client_;
+ base::win::ScopedComPtr<IAudioClient> audio_input_client_;
+
+ // IAudioRenderClient interfaces enables a client to write output
+ // data to a rendering endpoint buffer.
+ base::win::ScopedComPtr<IAudioRenderClient> audio_render_client_;
+
+ // IAudioCaptureClient interfaces enables a client to read input
+ // data from a capturing endpoint buffer.
+ base::win::ScopedComPtr<IAudioCaptureClient> audio_capture_client_;
+
+ // The audio engine will signal this event each time a buffer has been
+ // recorded.
+ base::win::ScopedHandle capture_event_;
+
+ // The audio engine will signal this event each time it needs a new
+ // audio buffer to play out.
+ // Only utilized in varispeed mode.
+ base::win::ScopedHandle render_event_;
+
+ // This event will be signaled when streaming shall stop.
+ base::win::ScopedHandle stop_streaming_event_;
+
+ // Container for retrieving data from AudioSourceCallback::OnMoreIOData().
+ scoped_ptr<AudioBus> output_bus_;
+
+ // Container for sending data to AudioSourceCallback::OnMoreIOData().
+ scoped_ptr<AudioBus> input_bus_;
+
+ // Container for storing output from the channel mixer.
+ scoped_ptr<AudioBus> channel_bus_;
+
+ // All members below are only allocated, or used, in varispeed mode:
+
+ // Temporary storage of resampled input audio data.
+ scoped_ptr<AudioBus> resampled_bus_;
+
+ // Set to true first time a capture event has been received in varispeed
+ // mode.
+ bool input_callback_received_;
+
+ // MultiChannelResampler is a multi channel wrapper for SincResampler;
+ // allowing high quality sample rate conversion of multiple channels at once.
+ scoped_ptr<MultiChannelResampler> resampler_;
+
+ // Resampler I/O ratio.
+ double io_sample_rate_ratio_;
+
+ // Used for input to output buffering.
+ scoped_ptr<AudioFifo> fifo_;
+
+ // The channel mixer is only created and utilized if number of input channels
+ // is larger than the native number of input channels (e.g client wants
+ // stereo but the audio device only supports mono).
+ scoped_ptr<ChannelMixer> channel_mixer_;
+
+ // The optimal number of frames we'd like to keep in the FIFO at all times.
+ int target_fifo_frames_;
+
+ // A running average of the measured delta between actual number of frames
+ // in the FIFO versus |target_fifo_frames_|.
+ double average_delta_;
+
+ // A varispeed rate scalar which is calculated based on FIFO drift.
+ double fifo_rate_compensation_;
+
+ // Set to true when input side signals output side that a new delay
+ // estimate is needed.
+ bool update_output_delay_;
+
+ // Capture side stores its delay estimate so the sum can be derived in
+ // the render side.
+ double capture_delay_ms_;
+
+ // TODO(henrika): possibly remove these members once the performance is
+ // properly tuned. Only used for off-line debugging.
+#ifndef NDEBUG
+ enum LogElementNames {
+ INPUT_TIME_STAMP,
+ NUM_FRAMES_IN_FIFO,
+ RESAMPLER_MARGIN,
+ RATE_COMPENSATION
+ };
+
+ scoped_ptr<int64[]> input_time_stamps_;
+ scoped_ptr<int[]> num_frames_in_fifo_;
+ scoped_ptr<int[]> resampler_margin_;
+ scoped_ptr<double[]> fifo_rate_comps_;
+ scoped_ptr<int[]> num_elements_;
+ scoped_ptr<int[]> input_params_;
+ scoped_ptr<int[]> output_params_;
+
+ FILE* data_file_;
+ FILE* param_file_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(WASAPIUnifiedStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_WIN_AUDIO_UNIFIED_WIN_H_
diff --git a/chromium/media/audio/win/audio_unified_win_unittest.cc b/chromium/media/audio/win/audio_unified_win_unittest.cc
new file mode 100644
index 00000000000..cfd17aea14f
--- /dev/null
+++ b/chromium/media/audio/win/audio_unified_win_unittest.cc
@@ -0,0 +1,366 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/basictypes.h"
+#include "base/command_line.h"
+#include "base/file_util.h"
+#include "base/message_loop/message_loop.h"
+#include "base/path_service.h"
+#include "base/test/test_timeouts.h"
+#include "base/time/time.h"
+#include "base/win/scoped_com_initializer.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_manager.h"
+#include "media/audio/audio_util.h"
+#include "media/audio/win/audio_unified_win.h"
+#include "media/audio/win/core_audio_util_win.h"
+#include "media/base/channel_mixer.h"
+#include "media/base/media_switches.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::_;
+using ::testing::AtLeast;
+using ::testing::Between;
+using ::testing::DoAll;
+using ::testing::NotNull;
+using ::testing::Return;
+using base::win::ScopedCOMInitializer;
+
+namespace media {
+
+static const size_t kMaxDeltaSamples = 1000;
+static const char kDeltaTimeMsFileName[] = "unified_delta_times_ms.txt";
+
+// Verify that the delay estimate in the OnMoreIOData() callback is larger
+// than an expected minumum value.
+MATCHER_P(DelayGreaterThan, value, "") {
+ return (arg.hardware_delay_bytes > value.hardware_delay_bytes);
+}
+
+// Used to terminate a loop from a different thread than the loop belongs to.
+// |loop| should be a MessageLoopProxy.
+ACTION_P(QuitLoop, loop) {
+ loop->PostTask(FROM_HERE, base::MessageLoop::QuitClosure());
+}
+
+class MockUnifiedSourceCallback
+ : public AudioOutputStream::AudioSourceCallback {
+ public:
+ MOCK_METHOD2(OnMoreData, int(AudioBus* audio_bus,
+ AudioBuffersState buffers_state));
+ MOCK_METHOD3(OnMoreIOData, int(AudioBus* source,
+ AudioBus* dest,
+ AudioBuffersState buffers_state));
+ MOCK_METHOD1(OnError, void(AudioOutputStream* stream));
+};
+
+// AudioOutputStream::AudioSourceCallback implementation which enables audio
+// play-through. It also creates a text file that contains times between two
+// successive callbacks. Units are in milliseconds. This file can be used for
+// off-line analysis of the callback sequence.
+class UnifiedSourceCallback : public AudioOutputStream::AudioSourceCallback {
+ public:
+ explicit UnifiedSourceCallback()
+ : previous_call_time_(base::TimeTicks::Now()),
+ text_file_(NULL),
+ elements_to_write_(0) {
+ delta_times_.reset(new int[kMaxDeltaSamples]);
+ }
+
+ virtual ~UnifiedSourceCallback() {
+ base::FilePath file_name;
+ EXPECT_TRUE(PathService::Get(base::DIR_EXE, &file_name));
+ file_name = file_name.AppendASCII(kDeltaTimeMsFileName);
+
+ EXPECT_TRUE(!text_file_);
+ text_file_ = file_util::OpenFile(file_name, "wt");
+ DLOG_IF(ERROR, !text_file_) << "Failed to open log file.";
+ LOG(INFO) << ">> Output file " << file_name.value() << " has been created.";
+
+ // Write the array which contains delta times to a text file.
+ size_t elements_written = 0;
+ while (elements_written < elements_to_write_) {
+ fprintf(text_file_, "%d\n", delta_times_[elements_written]);
+ ++elements_written;
+ }
+ file_util::CloseFile(text_file_);
+ }
+
+ virtual int OnMoreData(AudioBus* dest,
+ AudioBuffersState buffers_state) {
+ NOTREACHED();
+ return 0;
+ };
+
+ virtual int OnMoreIOData(AudioBus* source,
+ AudioBus* dest,
+ AudioBuffersState buffers_state) {
+ // Store time between this callback and the previous callback.
+ const base::TimeTicks now_time = base::TimeTicks::Now();
+ const int diff = (now_time - previous_call_time_).InMilliseconds();
+ previous_call_time_ = now_time;
+ if (elements_to_write_ < kMaxDeltaSamples) {
+ delta_times_[elements_to_write_] = diff;
+ ++elements_to_write_;
+ }
+
+ // Play out the recorded audio samples in loop back. Perform channel mixing
+ // if required using a channel mixer which is created only if needed.
+ if (source->channels() == dest->channels()) {
+ source->CopyTo(dest);
+ } else {
+ // A channel mixer is required for converting audio between two different
+ // channel layouts.
+ if (!channel_mixer_) {
+ // Guessing the channel layout will work OK for this unit test.
+ // Main thing is that the number of channels is correct.
+ ChannelLayout input_layout = GuessChannelLayout(source->channels());
+ ChannelLayout output_layout = GuessChannelLayout(dest->channels());
+ channel_mixer_.reset(new ChannelMixer(input_layout, output_layout));
+ DVLOG(1) << "Remixing channel layout from " << input_layout
+ << " to " << output_layout << "; from "
+ << source->channels() << " channels to "
+ << dest->channels() << " channels.";
+ }
+ if (channel_mixer_)
+ channel_mixer_->Transform(source, dest);
+ }
+ return source->frames();
+ };
+
+ virtual void OnError(AudioOutputStream* stream) {
+ NOTREACHED();
+ }
+
+ private:
+ base::TimeTicks previous_call_time_;
+ scoped_ptr<int[]> delta_times_;
+ FILE* text_file_;
+ size_t elements_to_write_;
+ scoped_ptr<ChannelMixer> channel_mixer_;
+};
+
+// Convenience method which ensures that we fulfill all required conditions
+// to run unified audio tests on Windows.
+static bool CanRunUnifiedAudioTests(AudioManager* audio_man) {
+ if (!CoreAudioUtil::IsSupported()) {
+ LOG(WARNING) << "This tests requires Windows Vista or higher.";
+ return false;
+ }
+
+ if (!audio_man->HasAudioOutputDevices()) {
+ LOG(WARNING) << "No output devices detected.";
+ return false;
+ }
+
+ if (!audio_man->HasAudioInputDevices()) {
+ LOG(WARNING) << "No input devices detected.";
+ return false;
+ }
+
+ return true;
+}
+
+// Convenience class which simplifies creation of a unified AudioOutputStream
+// object.
+class AudioUnifiedStreamWrapper {
+ public:
+ explicit AudioUnifiedStreamWrapper(AudioManager* audio_manager)
+ : com_init_(ScopedCOMInitializer::kMTA),
+ audio_man_(audio_manager) {
+ // We open up both both sides (input and output) using the preferred
+ // set of audio parameters. These parameters corresponds to the mix format
+ // that the audio engine uses internally for processing of shared-mode
+ // output streams.
+ AudioParameters out_params;
+ EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetPreferredAudioParameters(
+ eRender, eConsole, &out_params)));
+
+ // WebAudio is the only real user of unified audio and it always asks
+ // for stereo.
+ // TODO(henrika): extend support to other input channel layouts as well.
+ const int kInputChannels = 2;
+
+ params_.Reset(out_params.format(),
+ out_params.channel_layout(),
+ out_params.channels(),
+ kInputChannels,
+ out_params.sample_rate(),
+ out_params.bits_per_sample(),
+ out_params.frames_per_buffer());
+ }
+
+ ~AudioUnifiedStreamWrapper() {}
+
+ // Creates an AudioOutputStream object using default parameters.
+ WASAPIUnifiedStream* Create() {
+ return static_cast<WASAPIUnifiedStream*> (CreateOutputStream());
+ }
+
+ // Creates an AudioOutputStream object using default parameters but a
+ // specified input device.
+ WASAPIUnifiedStream* Create(const std::string device_id) {
+ return static_cast<WASAPIUnifiedStream*> (CreateOutputStream(device_id));
+ }
+
+ AudioParameters::Format format() const { return params_.format(); }
+ int channels() const { return params_.channels(); }
+ int bits_per_sample() const { return params_.bits_per_sample(); }
+ int sample_rate() const { return params_.sample_rate(); }
+ int frames_per_buffer() const { return params_.frames_per_buffer(); }
+ int bytes_per_buffer() const { return params_.GetBytesPerBuffer(); }
+ int input_channels() const { return params_.input_channels(); }
+
+ private:
+ AudioOutputStream* CreateOutputStream() {
+ // Get the unique device ID of the default capture device instead of using
+ // AudioManagerBase::kDefaultDeviceId since it provides slightly better
+ // test coverage and will utilize the same code path as if a non default
+ // input device was used.
+ ScopedComPtr<IMMDevice> audio_device =
+ CoreAudioUtil::CreateDefaultDevice(eCapture, eConsole);
+ AudioDeviceName name;
+ EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetDeviceName(audio_device, &name)));
+ const std::string& device_id = name.unique_id;
+ EXPECT_TRUE(CoreAudioUtil::DeviceIsDefault(eCapture, eConsole, device_id));
+
+ // Create the unified audio I/O stream using the default input device.
+ AudioOutputStream* aos = audio_man_->MakeAudioOutputStream(params_,
+ device_id);
+ EXPECT_TRUE(aos);
+ return aos;
+ }
+
+ AudioOutputStream* CreateOutputStream(const std::string& device_id) {
+ // Create the unified audio I/O stream using the specified input device.
+ AudioOutputStream* aos = audio_man_->MakeAudioOutputStream(params_,
+ device_id);
+ EXPECT_TRUE(aos);
+ return aos;
+ }
+
+ ScopedCOMInitializer com_init_;
+ AudioManager* audio_man_;
+ AudioParameters params_;
+};
+
+// Convenience method which creates a default WASAPIUnifiedStream object.
+static WASAPIUnifiedStream* CreateDefaultUnifiedStream(
+ AudioManager* audio_manager) {
+ AudioUnifiedStreamWrapper aosw(audio_manager);
+ return aosw.Create();
+}
+
+// Convenience method which creates a default WASAPIUnifiedStream object but
+// with a specified audio input device.
+static WASAPIUnifiedStream* CreateDefaultUnifiedStream(
+ AudioManager* audio_manager, const std::string& device_id) {
+ AudioUnifiedStreamWrapper aosw(audio_manager);
+ return aosw.Create(device_id);
+}
+
+// Test Open(), Close() calling sequence.
+TEST(WASAPIUnifiedStreamTest, OpenAndClose) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ if (!CanRunUnifiedAudioTests(audio_manager.get()))
+ return;
+
+ WASAPIUnifiedStream* wus = CreateDefaultUnifiedStream(audio_manager.get());
+ EXPECT_TRUE(wus->Open());
+ wus->Close();
+}
+
+// Test Open(), Close() calling sequence for all available capture devices.
+TEST(WASAPIUnifiedStreamTest, OpenAndCloseForAllInputDevices) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ if (!CanRunUnifiedAudioTests(audio_manager.get()))
+ return;
+
+ AudioDeviceNames device_names;
+ audio_manager->GetAudioInputDeviceNames(&device_names);
+ for (AudioDeviceNames::iterator i = device_names.begin();
+ i != device_names.end(); ++i) {
+ WASAPIUnifiedStream* wus = CreateDefaultUnifiedStream(
+ audio_manager.get(), i->unique_id);
+ EXPECT_TRUE(wus->Open());
+ wus->Close();
+ }
+}
+
+// Test Open(), Start(), Close() calling sequence.
+TEST(WASAPIUnifiedStreamTest, OpenStartAndClose) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ if (!CanRunUnifiedAudioTests(audio_manager.get()))
+ return;
+
+ MockUnifiedSourceCallback source;
+ AudioUnifiedStreamWrapper ausw(audio_manager.get());
+ WASAPIUnifiedStream* wus = ausw.Create();
+
+ EXPECT_TRUE(wus->Open());
+ EXPECT_CALL(source, OnError(wus))
+ .Times(0);
+ EXPECT_CALL(source, OnMoreIOData(NotNull(), NotNull(), _))
+ .Times(Between(0, 1))
+ .WillOnce(Return(ausw.frames_per_buffer()));
+ wus->Start(&source);
+ wus->Close();
+}
+
+// Verify that IO callbacks starts as they should.
+TEST(WASAPIUnifiedStreamTest, StartLoopbackAudio) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ if (!CanRunUnifiedAudioTests(audio_manager.get()))
+ return;
+
+ base::MessageLoopForUI loop;
+ MockUnifiedSourceCallback source;
+ AudioUnifiedStreamWrapper ausw(audio_manager.get());
+ WASAPIUnifiedStream* wus = ausw.Create();
+
+ // Set up expected minimum delay estimation where we use a minium delay
+ // which is equal to the sum of render and capture sizes. We can never
+ // reach a delay lower than this value.
+ AudioBuffersState min_total_audio_delay(0, 2 * ausw.bytes_per_buffer());
+
+ EXPECT_TRUE(wus->Open());
+ EXPECT_CALL(source, OnError(wus))
+ .Times(0);
+ EXPECT_CALL(source, OnMoreIOData(
+ NotNull(), NotNull(), DelayGreaterThan(min_total_audio_delay)))
+ .Times(AtLeast(2))
+ .WillOnce(Return(ausw.frames_per_buffer()))
+ .WillOnce(DoAll(
+ QuitLoop(loop.message_loop_proxy()),
+ Return(ausw.frames_per_buffer())));
+ wus->Start(&source);
+ loop.PostDelayedTask(FROM_HERE, base::MessageLoop::QuitClosure(),
+ TestTimeouts::action_timeout());
+ loop.Run();
+ wus->Stop();
+ wus->Close();
+}
+
+// Perform a real-time test in loopback where the recorded audio is echoed
+// back to the speaker. This test allows the user to verify that the audio
+// sounds OK. A text file with name |kDeltaTimeMsFileName| is also generated.
+TEST(WASAPIUnifiedStreamTest, DISABLED_RealTimePlayThrough) {
+ scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
+ if (!CanRunUnifiedAudioTests(audio_manager.get()))
+ return;
+
+ base::MessageLoopForUI loop;
+ UnifiedSourceCallback source;
+ WASAPIUnifiedStream* wus = CreateDefaultUnifiedStream(audio_manager.get());
+
+ EXPECT_TRUE(wus->Open());
+ wus->Start(&source);
+ loop.PostDelayedTask(FROM_HERE, base::MessageLoop::QuitClosure(),
+ base::TimeDelta::FromMilliseconds(10000));
+ loop.Run();
+ wus->Close();
+}
+
+} // namespace media
diff --git a/chromium/media/audio/win/avrt_wrapper_win.cc b/chromium/media/audio/win/avrt_wrapper_win.cc
new file mode 100644
index 00000000000..c9f15991743
--- /dev/null
+++ b/chromium/media/audio/win/avrt_wrapper_win.cc
@@ -0,0 +1,64 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/win/avrt_wrapper_win.h"
+
+#include "base/logging.h"
+
+namespace avrt {
+
+// Function pointers
+typedef BOOL (WINAPI *AvRevertMmThreadCharacteristicsFn)(HANDLE);
+typedef HANDLE (WINAPI *AvSetMmThreadCharacteristicsFn)(LPCWSTR, LPDWORD);
+typedef BOOL (WINAPI *AvSetMmThreadPriorityFn)(HANDLE, AVRT_PRIORITY);
+
+HMODULE g_avrt = NULL;
+AvRevertMmThreadCharacteristicsFn g_revert_mm_thread_characteristics = NULL;
+AvSetMmThreadCharacteristicsFn g_set_mm_thread_characteristics = NULL;
+AvSetMmThreadPriorityFn g_set_mm_thread_priority = NULL;
+
+bool Initialize() {
+ if (!g_set_mm_thread_priority) {
+ // The avrt.dll is available on Windows Vista and later.
+ wchar_t path[MAX_PATH] = {0};
+ ExpandEnvironmentStrings(L"%WINDIR%\\system32\\avrt.dll", path,
+ arraysize(path));
+ g_avrt = LoadLibraryExW(path, NULL, LOAD_WITH_ALTERED_SEARCH_PATH);
+ if (!g_avrt)
+ return false;
+
+ g_revert_mm_thread_characteristics =
+ reinterpret_cast<AvRevertMmThreadCharacteristicsFn>(
+ GetProcAddress(g_avrt, "AvRevertMmThreadCharacteristics"));
+ g_set_mm_thread_characteristics =
+ reinterpret_cast<AvSetMmThreadCharacteristicsFn>(
+ GetProcAddress(g_avrt, "AvSetMmThreadCharacteristicsW"));
+ g_set_mm_thread_priority = reinterpret_cast<AvSetMmThreadPriorityFn>(
+ GetProcAddress(g_avrt, "AvSetMmThreadPriority"));
+ }
+
+ return (g_avrt && g_revert_mm_thread_characteristics &&
+ g_set_mm_thread_characteristics && g_set_mm_thread_priority);
+}
+
+bool AvRevertMmThreadCharacteristics(HANDLE avrt_handle) {
+ DCHECK(g_revert_mm_thread_characteristics);
+ return (g_revert_mm_thread_characteristics &&
+ g_revert_mm_thread_characteristics(avrt_handle));
+}
+
+HANDLE AvSetMmThreadCharacteristics(const wchar_t* task_name,
+ DWORD* task_index) {
+ DCHECK(g_set_mm_thread_characteristics);
+ return (g_set_mm_thread_characteristics ?
+ g_set_mm_thread_characteristics(task_name, task_index) : NULL);
+}
+
+bool AvSetMmThreadPriority(HANDLE avrt_handle, AVRT_PRIORITY priority) {
+ DCHECK(g_set_mm_thread_priority);
+ return (g_set_mm_thread_priority &&
+ g_set_mm_thread_priority(avrt_handle, priority));
+}
+
+} // namespace avrt
diff --git a/chromium/media/audio/win/avrt_wrapper_win.h b/chromium/media/audio/win/avrt_wrapper_win.h
new file mode 100644
index 00000000000..8127b6bff6f
--- /dev/null
+++ b/chromium/media/audio/win/avrt_wrapper_win.h
@@ -0,0 +1,39 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// The avrt namespace encapsulates the details needed to support MMCSS.
+//
+// The Multimedia Class Scheduler service (MMCSS) enables multimedia
+// applications to ensure that their time-sensitive processing receives
+// prioritized access to CPU resources. This service enables multimedia
+// applications to utilize as much of the CPU as possible without denying
+// CPU resources to lower-priority applications.
+// MMCSS requires Windows Vista or higher and that the Avrt DLL is loaded.
+//
+// TODO(henrika): refactor and merge into existing thread implementation
+// for Windows to ensure that MMCSS can be enabled for all threads.
+//
+#ifndef MEDIA_AUDIO_WIN_AVRT_WRAPPER_WIN_H_
+#define MEDIA_AUDIO_WIN_AVRT_WRAPPER_WIN_H_
+
+#include <windows.h>
+#include <avrt.h>
+
+#include "base/basictypes.h"
+
+namespace avrt {
+
+// Loads the Avrt.dll which is available on Windows Vista and later.
+bool Initialize();
+
+// Function wrappers for the underlying MMCSS functions.
+bool AvRevertMmThreadCharacteristics(HANDLE avrt_handle);
+HANDLE AvSetMmThreadCharacteristics(const wchar_t* task_name,
+ DWORD* task_index);
+bool AvSetMmThreadPriority(HANDLE avrt_handle, AVRT_PRIORITY priority);
+
+} // namespace avrt
+
+#endif // MEDIA_AUDIO_WIN_AVRT_WRAPPER_WIN_H_
+
diff --git a/chromium/media/audio/win/core_audio_util_win.cc b/chromium/media/audio/win/core_audio_util_win.cc
new file mode 100644
index 00000000000..392184b7a01
--- /dev/null
+++ b/chromium/media/audio/win/core_audio_util_win.cc
@@ -0,0 +1,718 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/win/core_audio_util_win.h"
+
+#include <Audioclient.h>
+#include <Functiondiscoverykeys_devpkey.h>
+
+#include "base/command_line.h"
+#include "base/logging.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/win/scoped_co_mem.h"
+#include "base/win/scoped_handle.h"
+#include "base/win/scoped_propvariant.h"
+#include "base/win/windows_version.h"
+#include "media/base/media_switches.h"
+
+using base::win::ScopedCoMem;
+using base::win::ScopedHandle;
+
+namespace media {
+
+enum { KSAUDIO_SPEAKER_UNSUPPORTED = 0 };
+
+typedef uint32 ChannelConfig;
+
+// Converts Microsoft's channel configuration to ChannelLayout.
+// This mapping is not perfect but the best we can do given the current
+// ChannelLayout enumerator and the Windows-specific speaker configurations
+// defined in ksmedia.h. Don't assume that the channel ordering in
+// ChannelLayout is exactly the same as the Windows specific configuration.
+// As an example: KSAUDIO_SPEAKER_7POINT1_SURROUND is mapped to
+// CHANNEL_LAYOUT_7_1 but the positions of Back L, Back R and Side L, Side R
+// speakers are different in these two definitions.
+static ChannelLayout ChannelConfigToChannelLayout(ChannelConfig config) {
+ switch (config) {
+ case KSAUDIO_SPEAKER_DIRECTOUT:
+ DVLOG(2) << "KSAUDIO_SPEAKER_DIRECTOUT=>CHANNEL_LAYOUT_NONE";
+ return CHANNEL_LAYOUT_NONE;
+ case KSAUDIO_SPEAKER_MONO:
+ DVLOG(2) << "KSAUDIO_SPEAKER_MONO=>CHANNEL_LAYOUT_MONO";
+ return CHANNEL_LAYOUT_MONO;
+ case KSAUDIO_SPEAKER_STEREO:
+ DVLOG(2) << "KSAUDIO_SPEAKER_STEREO=>CHANNEL_LAYOUT_STEREO";
+ return CHANNEL_LAYOUT_STEREO;
+ case KSAUDIO_SPEAKER_QUAD:
+ DVLOG(2) << "KSAUDIO_SPEAKER_QUAD=>CHANNEL_LAYOUT_QUAD";
+ return CHANNEL_LAYOUT_QUAD;
+ case KSAUDIO_SPEAKER_SURROUND:
+ DVLOG(2) << "KSAUDIO_SPEAKER_SURROUND=>CHANNEL_LAYOUT_4_0";
+ return CHANNEL_LAYOUT_4_0;
+ case KSAUDIO_SPEAKER_5POINT1:
+ DVLOG(2) << "KSAUDIO_SPEAKER_5POINT1=>CHANNEL_LAYOUT_5_1_BACK";
+ return CHANNEL_LAYOUT_5_1_BACK;
+ case KSAUDIO_SPEAKER_5POINT1_SURROUND:
+ DVLOG(2) << "KSAUDIO_SPEAKER_5POINT1_SURROUND=>CHANNEL_LAYOUT_5_1";
+ return CHANNEL_LAYOUT_5_1;
+ case KSAUDIO_SPEAKER_7POINT1:
+ DVLOG(2) << "KSAUDIO_SPEAKER_7POINT1=>CHANNEL_LAYOUT_7_1_WIDE";
+ return CHANNEL_LAYOUT_7_1_WIDE;
+ case KSAUDIO_SPEAKER_7POINT1_SURROUND:
+ DVLOG(2) << "KSAUDIO_SPEAKER_7POINT1_SURROUND=>CHANNEL_LAYOUT_7_1";
+ return CHANNEL_LAYOUT_7_1;
+ default:
+ DVLOG(2) << "Unsupported channel configuration: " << config;
+ return CHANNEL_LAYOUT_UNSUPPORTED;
+ }
+}
+
+// TODO(henrika): add mapping for all types in the ChannelLayout enumerator.
+static ChannelConfig ChannelLayoutToChannelConfig(ChannelLayout layout) {
+ switch (layout) {
+ case CHANNEL_LAYOUT_NONE:
+ DVLOG(2) << "CHANNEL_LAYOUT_NONE=>KSAUDIO_SPEAKER_UNSUPPORTED";
+ return KSAUDIO_SPEAKER_UNSUPPORTED;
+ case CHANNEL_LAYOUT_UNSUPPORTED:
+ DVLOG(2) << "CHANNEL_LAYOUT_UNSUPPORTED=>KSAUDIO_SPEAKER_UNSUPPORTED";
+ return KSAUDIO_SPEAKER_UNSUPPORTED;
+ case CHANNEL_LAYOUT_MONO:
+ DVLOG(2) << "CHANNEL_LAYOUT_MONO=>KSAUDIO_SPEAKER_MONO";
+ return KSAUDIO_SPEAKER_MONO;
+ case CHANNEL_LAYOUT_STEREO:
+ DVLOG(2) << "CHANNEL_LAYOUT_STEREO=>KSAUDIO_SPEAKER_STEREO";
+ return KSAUDIO_SPEAKER_STEREO;
+ case CHANNEL_LAYOUT_QUAD:
+ DVLOG(2) << "CHANNEL_LAYOUT_QUAD=>KSAUDIO_SPEAKER_QUAD";
+ return KSAUDIO_SPEAKER_QUAD;
+ case CHANNEL_LAYOUT_4_0:
+ DVLOG(2) << "CHANNEL_LAYOUT_4_0=>KSAUDIO_SPEAKER_SURROUND";
+ return KSAUDIO_SPEAKER_SURROUND;
+ case CHANNEL_LAYOUT_5_1_BACK:
+ DVLOG(2) << "CHANNEL_LAYOUT_5_1_BACK=>KSAUDIO_SPEAKER_5POINT1";
+ return KSAUDIO_SPEAKER_5POINT1;
+ case CHANNEL_LAYOUT_5_1:
+ DVLOG(2) << "CHANNEL_LAYOUT_5_1=>KSAUDIO_SPEAKER_5POINT1_SURROUND";
+ return KSAUDIO_SPEAKER_5POINT1_SURROUND;
+ case CHANNEL_LAYOUT_7_1_WIDE:
+ DVLOG(2) << "CHANNEL_LAYOUT_7_1_WIDE=>KSAUDIO_SPEAKER_7POINT1";
+ return KSAUDIO_SPEAKER_7POINT1;
+ case CHANNEL_LAYOUT_7_1:
+ DVLOG(2) << "CHANNEL_LAYOUT_7_1=>KSAUDIO_SPEAKER_7POINT1_SURROUND";
+ return KSAUDIO_SPEAKER_7POINT1_SURROUND;
+ default:
+ DVLOG(2) << "Unsupported channel layout: " << layout;
+ return KSAUDIO_SPEAKER_UNSUPPORTED;
+ }
+}
+
+static std::ostream& operator<<(std::ostream& os,
+ const WAVEFORMATPCMEX& format) {
+ os << "wFormatTag: 0x" << std::hex << format.Format.wFormatTag
+ << ", nChannels: " << std::dec << format.Format.nChannels
+ << ", nSamplesPerSec: " << format.Format.nSamplesPerSec
+ << ", nAvgBytesPerSec: " << format.Format.nAvgBytesPerSec
+ << ", nBlockAlign: " << format.Format.nBlockAlign
+ << ", wBitsPerSample: " << format.Format.wBitsPerSample
+ << ", cbSize: " << format.Format.cbSize
+ << ", wValidBitsPerSample: " << format.Samples.wValidBitsPerSample
+ << ", dwChannelMask: 0x" << std::hex << format.dwChannelMask;
+ return os;
+}
+
+bool LoadAudiosesDll() {
+ static const wchar_t* const kAudiosesDLL =
+ L"%WINDIR%\\system32\\audioses.dll";
+
+ wchar_t path[MAX_PATH] = {0};
+ ExpandEnvironmentStringsW(kAudiosesDLL, path, arraysize(path));
+ return (LoadLibraryExW(path, NULL, LOAD_WITH_ALTERED_SEARCH_PATH) != NULL);
+}
+
+bool CanCreateDeviceEnumerator() {
+ ScopedComPtr<IMMDeviceEnumerator> device_enumerator;
+ HRESULT hr = device_enumerator.CreateInstance(__uuidof(MMDeviceEnumerator),
+ NULL, CLSCTX_INPROC_SERVER);
+
+ // If we hit CO_E_NOTINITIALIZED, CoInitialize has not been called and it
+ // must be called at least once for each thread that uses the COM library.
+ CHECK_NE(hr, CO_E_NOTINITIALIZED);
+
+ return SUCCEEDED(hr);
+}
+
+bool CoreAudioUtil::IsSupported() {
+ // It is possible to force usage of WaveXxx APIs by using a command line flag.
+ const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
+ if (cmd_line->HasSwitch(switches::kForceWaveAudio)) {
+ LOG(WARNING) << "Forcing usage of Windows WaveXxx APIs";
+ return false;
+ }
+
+ // Microsoft does not plan to make the Core Audio APIs available for use
+ // with earlier versions of Windows, including Microsoft Windows Server 2003,
+ // Windows XP, Windows Millennium Edition, Windows 2000, and Windows 98.
+ if (base::win::GetVersion() < base::win::VERSION_VISTA)
+ return false;
+
+ // The audio core APIs are implemented in the Mmdevapi.dll and Audioses.dll
+ // system components.
+ // Dependency Walker shows that it is enough to verify possibility to load
+ // the Audioses DLL since it depends on Mmdevapi.dll.
+ // See http://crbug.com/166397 why this extra step is required to guarantee
+ // Core Audio support.
+ static bool g_audioses_dll_available = LoadAudiosesDll();
+ if (!g_audioses_dll_available)
+ return false;
+
+ // Being able to load the Audioses.dll does not seem to be sufficient for
+ // all devices to guarantee Core Audio support. To be 100%, we also verify
+ // that it is possible to a create the IMMDeviceEnumerator interface. If this
+ // works as well we should be home free.
+ static bool g_can_create_device_enumerator = CanCreateDeviceEnumerator();
+ LOG_IF(ERROR, !g_can_create_device_enumerator)
+ << "Failed to create Core Audio device enumerator on thread with ID "
+ << GetCurrentThreadId();
+ return g_can_create_device_enumerator;
+}
+
+base::TimeDelta CoreAudioUtil::RefererenceTimeToTimeDelta(REFERENCE_TIME time) {
+ // Each unit of reference time is 100 nanoseconds <=> 0.1 microsecond.
+ return base::TimeDelta::FromMicroseconds(0.1 * time + 0.5);
+}
+
+AUDCLNT_SHAREMODE CoreAudioUtil::GetShareMode() {
+ const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
+ if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio))
+ return AUDCLNT_SHAREMODE_EXCLUSIVE;
+ return AUDCLNT_SHAREMODE_SHARED;
+}
+
+int CoreAudioUtil::NumberOfActiveDevices(EDataFlow data_flow) {
+ DCHECK(IsSupported());
+ // Create the IMMDeviceEnumerator interface.
+ ScopedComPtr<IMMDeviceEnumerator> device_enumerator =
+ CreateDeviceEnumerator();
+ if (!device_enumerator)
+ return 0;
+
+ // Generate a collection of active (present and not disabled) audio endpoint
+ // devices for the specified data-flow direction.
+ // This method will succeed even if all devices are disabled.
+ ScopedComPtr<IMMDeviceCollection> collection;
+ HRESULT hr = device_enumerator->EnumAudioEndpoints(data_flow,
+ DEVICE_STATE_ACTIVE,
+ collection.Receive());
+ if (FAILED(hr)) {
+ LOG(ERROR) << "IMMDeviceCollection::EnumAudioEndpoints: " << std::hex << hr;
+ return 0;
+ }
+
+ // Retrieve the number of active audio devices for the specified direction
+ UINT number_of_active_devices = 0;
+ collection->GetCount(&number_of_active_devices);
+ DVLOG(2) << ((data_flow == eCapture) ? "[in ] " : "[out] ")
+ << "number of devices: " << number_of_active_devices;
+ return static_cast<int>(number_of_active_devices);
+}
+
+ScopedComPtr<IMMDeviceEnumerator> CoreAudioUtil::CreateDeviceEnumerator() {
+ DCHECK(IsSupported());
+ ScopedComPtr<IMMDeviceEnumerator> device_enumerator;
+ HRESULT hr = device_enumerator.CreateInstance(__uuidof(MMDeviceEnumerator),
+ NULL, CLSCTX_INPROC_SERVER);
+ CHECK(SUCCEEDED(hr));
+ return device_enumerator;
+}
+
+ScopedComPtr<IMMDevice> CoreAudioUtil::CreateDefaultDevice(EDataFlow data_flow,
+ ERole role) {
+ DCHECK(IsSupported());
+ ScopedComPtr<IMMDevice> endpoint_device;
+
+ // Create the IMMDeviceEnumerator interface.
+ ScopedComPtr<IMMDeviceEnumerator> device_enumerator =
+ CreateDeviceEnumerator();
+ if (!device_enumerator)
+ return endpoint_device;
+
+ // Retrieve the default audio endpoint for the specified data-flow
+ // direction and role.
+ HRESULT hr = device_enumerator->GetDefaultAudioEndpoint(
+ data_flow, role, endpoint_device.Receive());
+
+ if (FAILED(hr)) {
+ DVLOG(1) << "IMMDeviceEnumerator::GetDefaultAudioEndpoint: "
+ << std::hex << hr;
+ return endpoint_device;
+ }
+
+ // Verify that the audio endpoint device is active, i.e., that the audio
+ // adapter that connects to the endpoint device is present and enabled.
+ DWORD state = DEVICE_STATE_DISABLED;
+ hr = endpoint_device->GetState(&state);
+ if (SUCCEEDED(hr)) {
+ if (!(state & DEVICE_STATE_ACTIVE)) {
+ DVLOG(1) << "Selected endpoint device is not active";
+ endpoint_device.Release();
+ }
+ }
+ return endpoint_device;
+}
+
+ScopedComPtr<IMMDevice> CoreAudioUtil::CreateDevice(
+ const std::string& device_id) {
+ DCHECK(IsSupported());
+ ScopedComPtr<IMMDevice> endpoint_device;
+
+ // Create the IMMDeviceEnumerator interface.
+ ScopedComPtr<IMMDeviceEnumerator> device_enumerator =
+ CreateDeviceEnumerator();
+ if (!device_enumerator)
+ return endpoint_device;
+
+ // Retrieve an audio device specified by an endpoint device-identification
+ // string.
+ HRESULT hr = device_enumerator->GetDevice(UTF8ToUTF16(device_id).c_str(),
+ endpoint_device.Receive());
+ DVLOG_IF(1, FAILED(hr)) << "IMMDeviceEnumerator::GetDevice: "
+ << std::hex << hr;
+ return endpoint_device;
+}
+
+HRESULT CoreAudioUtil::GetDeviceName(IMMDevice* device, AudioDeviceName* name) {
+ DCHECK(IsSupported());
+
+ // Retrieve unique name of endpoint device.
+ // Example: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}".
+ AudioDeviceName device_name;
+ ScopedCoMem<WCHAR> endpoint_device_id;
+ HRESULT hr = device->GetId(&endpoint_device_id);
+ if (FAILED(hr))
+ return hr;
+ WideToUTF8(endpoint_device_id, wcslen(endpoint_device_id),
+ &device_name.unique_id);
+
+ // Retrieve user-friendly name of endpoint device.
+ // Example: "Microphone (Realtek High Definition Audio)".
+ ScopedComPtr<IPropertyStore> properties;
+ hr = device->OpenPropertyStore(STGM_READ, properties.Receive());
+ if (FAILED(hr))
+ return hr;
+ base::win::ScopedPropVariant friendly_name;
+ hr = properties->GetValue(PKEY_Device_FriendlyName, friendly_name.Receive());
+ if (FAILED(hr))
+ return hr;
+ if (friendly_name.get().vt == VT_LPWSTR && friendly_name.get().pwszVal) {
+ WideToUTF8(friendly_name.get().pwszVal,
+ wcslen(friendly_name.get().pwszVal),
+ &device_name.device_name);
+ }
+
+ *name = device_name;
+ DVLOG(2) << "friendly name: " << device_name.device_name;
+ DVLOG(2) << "unique id : " << device_name.unique_id;
+ return hr;
+}
+
+std::string CoreAudioUtil::GetFriendlyName(const std::string& device_id) {
+ DCHECK(IsSupported());
+ ScopedComPtr<IMMDevice> audio_device = CreateDevice(device_id);
+ if (!audio_device)
+ return std::string();
+
+ AudioDeviceName device_name;
+ HRESULT hr = GetDeviceName(audio_device, &device_name);
+ if (FAILED(hr))
+ return std::string();
+
+ return device_name.device_name;
+}
+
+bool CoreAudioUtil::DeviceIsDefault(EDataFlow flow,
+ ERole role,
+ const std::string& device_id) {
+ DCHECK(IsSupported());
+ ScopedComPtr<IMMDevice> device = CreateDefaultDevice(flow, role);
+ if (!device)
+ return false;
+
+ ScopedCoMem<WCHAR> default_device_id;
+ HRESULT hr = device->GetId(&default_device_id);
+ if (FAILED(hr))
+ return false;
+
+ std::string str_default;
+ WideToUTF8(default_device_id, wcslen(default_device_id), &str_default);
+ if (device_id.compare(str_default) != 0)
+ return false;
+ return true;
+}
+
+EDataFlow CoreAudioUtil::GetDataFlow(IMMDevice* device) {
+ DCHECK(IsSupported());
+ ScopedComPtr<IMMEndpoint> endpoint;
+ HRESULT hr = device->QueryInterface(endpoint.Receive());
+ if (FAILED(hr)) {
+ DVLOG(1) << "IMMDevice::QueryInterface: " << std::hex << hr;
+ return eAll;
+ }
+
+ EDataFlow data_flow;
+ hr = endpoint->GetDataFlow(&data_flow);
+ if (FAILED(hr)) {
+ DVLOG(1) << "IMMEndpoint::GetDataFlow: " << std::hex << hr;
+ return eAll;
+ }
+ return data_flow;
+}
+
+ScopedComPtr<IAudioClient> CoreAudioUtil::CreateClient(
+ IMMDevice* audio_device) {
+ DCHECK(IsSupported());
+
+ // Creates and activates an IAudioClient COM object given the selected
+ // endpoint device.
+ ScopedComPtr<IAudioClient> audio_client;
+ HRESULT hr = audio_device->Activate(__uuidof(IAudioClient),
+ CLSCTX_INPROC_SERVER,
+ NULL,
+ audio_client.ReceiveVoid());
+ DVLOG_IF(1, FAILED(hr)) << "IMMDevice::Activate: " << std::hex << hr;
+ return audio_client;
+}
+
+ScopedComPtr<IAudioClient> CoreAudioUtil::CreateDefaultClient(
+ EDataFlow data_flow, ERole role) {
+ DCHECK(IsSupported());
+ ScopedComPtr<IMMDevice> default_device(CreateDefaultDevice(data_flow, role));
+ return (default_device ? CreateClient(default_device) :
+ ScopedComPtr<IAudioClient>());
+}
+
+HRESULT CoreAudioUtil::GetSharedModeMixFormat(
+ IAudioClient* client, WAVEFORMATPCMEX* format) {
+ DCHECK(IsSupported());
+ ScopedCoMem<WAVEFORMATPCMEX> format_pcmex;
+ HRESULT hr = client->GetMixFormat(
+ reinterpret_cast<WAVEFORMATEX**>(&format_pcmex));
+ if (FAILED(hr))
+ return hr;
+
+ size_t bytes = sizeof(WAVEFORMATEX) + format_pcmex->Format.cbSize;
+ DCHECK_EQ(bytes, sizeof(WAVEFORMATPCMEX));
+
+ memcpy(format, format_pcmex, bytes);
+ DVLOG(2) << *format;
+
+ return hr;
+}
+
+HRESULT CoreAudioUtil::GetDefaultSharedModeMixFormat(
+ EDataFlow data_flow, ERole role, WAVEFORMATPCMEX* format) {
+ DCHECK(IsSupported());
+ ScopedComPtr<IAudioClient> client(CreateDefaultClient(data_flow, role));
+ if (!client) {
+ // Map NULL-pointer to new error code which can be different from the
+ // actual error code. The exact value is not important here.
+ return AUDCLNT_E_ENDPOINT_CREATE_FAILED;
+ }
+ return CoreAudioUtil::GetSharedModeMixFormat(client, format);
+}
+
+bool CoreAudioUtil::IsFormatSupported(IAudioClient* client,
+ AUDCLNT_SHAREMODE share_mode,
+ const WAVEFORMATPCMEX* format) {
+ DCHECK(IsSupported());
+ ScopedCoMem<WAVEFORMATEXTENSIBLE> closest_match;
+ HRESULT hr = client->IsFormatSupported(
+ share_mode, reinterpret_cast<const WAVEFORMATEX*>(format),
+ reinterpret_cast<WAVEFORMATEX**>(&closest_match));
+
+ // This log can only be triggered for shared mode.
+ DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported "
+ << "but a closest match exists.";
+ // This log can be triggered both for shared and exclusive modes.
+ DLOG_IF(ERROR, hr == AUDCLNT_E_UNSUPPORTED_FORMAT) << "Unsupported format.";
+ if (hr == S_FALSE) {
+ DVLOG(2) << *closest_match;
+ }
+
+ return (hr == S_OK);
+}
+
+bool CoreAudioUtil::IsChannelLayoutSupported(EDataFlow data_flow, ERole role,
+ ChannelLayout channel_layout) {
+ DCHECK(IsSupported());
+
+ // First, get the preferred mixing format for shared mode streams.
+
+ ScopedComPtr<IAudioClient> client(CreateDefaultClient(data_flow, role));
+ if (!client)
+ return false;
+
+ WAVEFORMATPCMEX format;
+ HRESULT hr = CoreAudioUtil::GetSharedModeMixFormat(client, &format);
+ if (FAILED(hr))
+ return false;
+
+ // Next, check if it is possible to use an alternative format where the
+ // channel layout (and possibly number of channels) is modified.
+
+ // Convert generic channel layout into Windows-specific channel configuration.
+ ChannelConfig new_config = ChannelLayoutToChannelConfig(channel_layout);
+ if (new_config == KSAUDIO_SPEAKER_UNSUPPORTED) {
+ return false;
+ }
+ format.dwChannelMask = new_config;
+
+ // Modify the format if the new channel layout has changed the number of
+ // utilized channels.
+ const int channels = ChannelLayoutToChannelCount(channel_layout);
+ if (channels != format.Format.nChannels) {
+ format.Format.nChannels = channels;
+ format.Format.nBlockAlign = (format.Format.wBitsPerSample / 8) * channels;
+ format.Format.nAvgBytesPerSec = format.Format.nSamplesPerSec *
+ format.Format.nBlockAlign;
+ }
+ DVLOG(2) << format;
+
+ // Some devices can initialize a shared-mode stream with a format that is
+ // not identical to the mix format obtained from the GetMixFormat() method.
+ // However, chances of succeeding increases if we use the same number of
+ // channels and the same sample rate as the mix format. I.e, this call will
+ // return true only in those cases where the audio engine is able to support
+ // an even wider range of shared-mode formats where the installation package
+ // for the audio device includes a local effects (LFX) audio processing
+ // object (APO) that can handle format conversions.
+ return CoreAudioUtil::IsFormatSupported(client, AUDCLNT_SHAREMODE_SHARED,
+ &format);
+}
+
+HRESULT CoreAudioUtil::GetDevicePeriod(IAudioClient* client,
+ AUDCLNT_SHAREMODE share_mode,
+ REFERENCE_TIME* device_period) {
+ DCHECK(IsSupported());
+
+ // Get the period of the engine thread.
+ REFERENCE_TIME default_period = 0;
+ REFERENCE_TIME minimum_period = 0;
+ HRESULT hr = client->GetDevicePeriod(&default_period, &minimum_period);
+ if (FAILED(hr))
+ return hr;
+
+ *device_period = (share_mode == AUDCLNT_SHAREMODE_SHARED) ? default_period :
+ minimum_period;
+ DVLOG(2) << "device_period: "
+ << RefererenceTimeToTimeDelta(*device_period).InMillisecondsF()
+ << " [ms]";
+ return hr;
+}
+
+HRESULT CoreAudioUtil::GetPreferredAudioParameters(
+ IAudioClient* client, AudioParameters* params) {
+ DCHECK(IsSupported());
+ WAVEFORMATPCMEX mix_format;
+ HRESULT hr = GetSharedModeMixFormat(client, &mix_format);
+ if (FAILED(hr))
+ return hr;
+
+ REFERENCE_TIME default_period = 0;
+ hr = GetDevicePeriod(client, AUDCLNT_SHAREMODE_SHARED, &default_period);
+ if (FAILED(hr))
+ return hr;
+
+ // Get the integer mask which corresponds to the channel layout the
+ // audio engine uses for its internal processing/mixing of shared-mode
+ // streams. This mask indicates which channels are present in the multi-
+ // channel stream. The least significant bit corresponds with the Front Left
+ // speaker, the next least significant bit corresponds to the Front Right
+ // speaker, and so on, continuing in the order defined in KsMedia.h.
+ // See http://msdn.microsoft.com/en-us/library/windows/hardware/ff537083.aspx
+ // for more details.
+ ChannelConfig channel_config = mix_format.dwChannelMask;
+
+ // Convert Microsoft's channel configuration to genric ChannelLayout.
+ ChannelLayout channel_layout = ChannelConfigToChannelLayout(channel_config);
+
+ // Preferred sample rate.
+ int sample_rate = mix_format.Format.nSamplesPerSec;
+
+ // TODO(henrika): possibly use format.Format.wBitsPerSample here instead.
+ // We use a hard-coded value of 16 bits per sample today even if most audio
+ // engines does the actual mixing in 32 bits per sample.
+ int bits_per_sample = 16;
+
+ // We are using the native device period to derive the smallest possible
+ // buffer size in shared mode. Note that the actual endpoint buffer will be
+ // larger than this size but it will be possible to fill it up in two calls.
+ // TODO(henrika): ensure that this scheme works for capturing as well.
+ int frames_per_buffer = static_cast<int>(sample_rate *
+ RefererenceTimeToTimeDelta(default_period).InSecondsF() + 0.5);
+
+ DVLOG(1) << "channel_layout : " << channel_layout;
+ DVLOG(1) << "sample_rate : " << sample_rate;
+ DVLOG(1) << "bits_per_sample : " << bits_per_sample;
+ DVLOG(1) << "frames_per_buffer: " << frames_per_buffer;
+
+ AudioParameters audio_params(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ channel_layout,
+ sample_rate,
+ bits_per_sample,
+ frames_per_buffer);
+
+ *params = audio_params;
+ return hr;
+}
+
+HRESULT CoreAudioUtil::GetPreferredAudioParameters(
+ EDataFlow data_flow, ERole role, AudioParameters* params) {
+ DCHECK(IsSupported());
+ ScopedComPtr<IAudioClient> client(CreateDefaultClient(data_flow, role));
+ if (!client) {
+ // Map NULL-pointer to new error code which can be different from the
+ // actual error code. The exact value is not important here.
+ return AUDCLNT_E_ENDPOINT_CREATE_FAILED;
+ }
+ return GetPreferredAudioParameters(client, params);
+}
+
+HRESULT CoreAudioUtil::GetPreferredAudioParameters(
+ const std::string& device_id, AudioParameters* params) {
+ DCHECK(IsSupported());
+ ScopedComPtr<IMMDevice> device(CreateDevice(device_id));
+ if (!device) {
+ // Map NULL-pointer to new error code which can be different from the
+ // actual error code. The exact value is not important here.
+ return AUDCLNT_E_DEVICE_INVALIDATED;
+ }
+
+ ScopedComPtr<IAudioClient> client(CreateClient(device));
+ if (!client) {
+ // Map NULL-pointer to new error code which can be different from the
+ // actual error code. The exact value is not important here.
+ return AUDCLNT_E_ENDPOINT_CREATE_FAILED;
+ }
+ return GetPreferredAudioParameters(client, params);
+}
+
+HRESULT CoreAudioUtil::SharedModeInitialize(IAudioClient* client,
+ const WAVEFORMATPCMEX* format,
+ HANDLE event_handle,
+ uint32* endpoint_buffer_size) {
+ DCHECK(IsSupported());
+
+ // Use default flags (i.e, dont set AUDCLNT_STREAMFLAGS_NOPERSIST) to
+ // ensure that the volume level and muting state for a rendering session
+ // are persistent across system restarts. The volume level and muting
+ // state for a capture session are never persistent.
+ DWORD stream_flags = 0;
+
+ // Enable event-driven streaming if a valid event handle is provided.
+ // After the stream starts, the audio engine will signal the event handle
+ // to notify the client each time a buffer becomes ready to process.
+ // Event-driven buffering is supported for both rendering and capturing.
+ // Both shared-mode and exclusive-mode streams can use event-driven buffering.
+ bool use_event = (event_handle != NULL &&
+ event_handle != INVALID_HANDLE_VALUE);
+ if (use_event)
+ stream_flags |= AUDCLNT_STREAMFLAGS_EVENTCALLBACK;
+ DVLOG(2) << "stream_flags: 0x" << std::hex << stream_flags;
+
+ // Initialize the shared mode client for minimal delay.
+ HRESULT hr = client->Initialize(AUDCLNT_SHAREMODE_SHARED,
+ stream_flags,
+ 0,
+ 0,
+ reinterpret_cast<const WAVEFORMATEX*>(format),
+ NULL);
+ if (FAILED(hr)) {
+ DVLOG(1) << "IAudioClient::Initialize: " << std::hex << hr;
+ return hr;
+ }
+
+ if (use_event) {
+ hr = client->SetEventHandle(event_handle);
+ if (FAILED(hr)) {
+ DVLOG(1) << "IAudioClient::SetEventHandle: " << std::hex << hr;
+ return hr;
+ }
+ }
+
+ UINT32 buffer_size_in_frames = 0;
+ hr = client->GetBufferSize(&buffer_size_in_frames);
+ if (FAILED(hr)) {
+ DVLOG(1) << "IAudioClient::GetBufferSize: " << std::hex << hr;
+ return hr;
+ }
+
+ *endpoint_buffer_size = buffer_size_in_frames;
+ DVLOG(2) << "endpoint buffer size: " << buffer_size_in_frames;
+
+ // TODO(henrika): utilize when delay measurements are added.
+ REFERENCE_TIME latency = 0;
+ hr = client->GetStreamLatency(&latency);
+ DVLOG(2) << "stream latency: "
+ << RefererenceTimeToTimeDelta(latency).InMillisecondsF() << " [ms]";
+ return hr;
+}
+
+ScopedComPtr<IAudioRenderClient> CoreAudioUtil::CreateRenderClient(
+ IAudioClient* client) {
+ DCHECK(IsSupported());
+
+ // Get access to the IAudioRenderClient interface. This interface
+ // enables us to write output data to a rendering endpoint buffer.
+ ScopedComPtr<IAudioRenderClient> audio_render_client;
+ HRESULT hr = client->GetService(__uuidof(IAudioRenderClient),
+ audio_render_client.ReceiveVoid());
+ if (FAILED(hr)) {
+ DVLOG(1) << "IAudioClient::GetService: " << std::hex << hr;
+ return ScopedComPtr<IAudioRenderClient>();
+ }
+ return audio_render_client;
+}
+
+ScopedComPtr<IAudioCaptureClient> CoreAudioUtil::CreateCaptureClient(
+ IAudioClient* client) {
+ DCHECK(IsSupported());
+
+ // Get access to the IAudioCaptureClient interface. This interface
+ // enables us to read input data from a capturing endpoint buffer.
+ ScopedComPtr<IAudioCaptureClient> audio_capture_client;
+ HRESULT hr = client->GetService(__uuidof(IAudioCaptureClient),
+ audio_capture_client.ReceiveVoid());
+ if (FAILED(hr)) {
+ DVLOG(1) << "IAudioClient::GetService: " << std::hex << hr;
+ return ScopedComPtr<IAudioCaptureClient>();
+ }
+ return audio_capture_client;
+}
+
+bool CoreAudioUtil::FillRenderEndpointBufferWithSilence(
+ IAudioClient* client, IAudioRenderClient* render_client) {
+ DCHECK(IsSupported());
+
+ UINT32 endpoint_buffer_size = 0;
+ if (FAILED(client->GetBufferSize(&endpoint_buffer_size)))
+ return false;
+
+ UINT32 num_queued_frames = 0;
+ if (FAILED(client->GetCurrentPadding(&num_queued_frames)))
+ return false;
+
+ BYTE* data = NULL;
+ int num_frames_to_fill = endpoint_buffer_size - num_queued_frames;
+ if (FAILED(render_client->GetBuffer(num_frames_to_fill, &data)))
+ return false;
+
+ // Using the AUDCLNT_BUFFERFLAGS_SILENT flag eliminates the need to
+ // explicitly write silence data to the rendering buffer.
+ DVLOG(2) << "filling up " << num_frames_to_fill << " frames with silence";
+ return SUCCEEDED(render_client->ReleaseBuffer(num_frames_to_fill,
+ AUDCLNT_BUFFERFLAGS_SILENT));
+}
+
+} // namespace media
diff --git a/chromium/media/audio/win/core_audio_util_win.h b/chromium/media/audio/win/core_audio_util_win.h
new file mode 100644
index 00000000000..3b2734570d0
--- /dev/null
+++ b/chromium/media/audio/win/core_audio_util_win.h
@@ -0,0 +1,191 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Utility methods for the Core Audio API on Windows.
+// Always ensure that Core Audio is supported before using these methods.
+// Use media::CoreAudioIsSupported() for this purpose.
+// Also, all methods must be called on a valid COM thread. This can be done
+// by using the base::win::ScopedCOMInitializer helper class.
+
+#ifndef MEDIA_AUDIO_WIN_CORE_AUDIO_UTIL_WIN_H_
+#define MEDIA_AUDIO_WIN_CORE_AUDIO_UTIL_WIN_H_
+
+#include <audioclient.h>
+#include <mmdeviceapi.h>
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/time/time.h"
+#include "base/win/scoped_comptr.h"
+#include "media/audio/audio_device_name.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/media_export.h"
+
+using base::win::ScopedComPtr;
+
+namespace media {
+
+class MEDIA_EXPORT CoreAudioUtil {
+ public:
+ // Returns true if Windows Core Audio is supported.
+ // Always verify that this method returns true before using any of the
+ // methods in this class.
+ static bool IsSupported();
+
+ // Converts between reference time to base::TimeDelta.
+ // One reference-time unit is 100 nanoseconds.
+ // Example: double s = RefererenceTimeToTimeDelta(t).InMillisecondsF();
+ static base::TimeDelta RefererenceTimeToTimeDelta(REFERENCE_TIME time);
+
+ // Returns AUDCLNT_SHAREMODE_EXCLUSIVE if --enable-exclusive-mode is used
+ // as command-line flag and AUDCLNT_SHAREMODE_SHARED otherwise (default).
+ static AUDCLNT_SHAREMODE GetShareMode();
+
+ // The Windows Multimedia Device (MMDevice) API enables audio clients to
+ // discover audio endpoint devices and determine their capabilities.
+
+ // Number of active audio devices in the specified flow data flow direction.
+ // Set |data_flow| to eAll to retrieve the total number of active audio
+ // devices.
+ static int NumberOfActiveDevices(EDataFlow data_flow);
+
+ // Creates an IMMDeviceEnumerator interface which provides methods for
+ // enumerating audio endpoint devices.
+ static ScopedComPtr<IMMDeviceEnumerator> CreateDeviceEnumerator();
+
+ // Creates a default endpoint device that is specified by a data-flow
+ // direction and role, e.g. default render device.
+ static ScopedComPtr<IMMDevice> CreateDefaultDevice(
+ EDataFlow data_flow, ERole role);
+
+ // Creates an endpoint device that is specified by a unique endpoint device-
+ // identification string.
+ static ScopedComPtr<IMMDevice> CreateDevice(const std::string& device_id);
+
+ // Returns the unique ID and user-friendly name of a given endpoint device.
+ // Example: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}", and
+ // "Microphone (Realtek High Definition Audio)".
+ static HRESULT GetDeviceName(IMMDevice* device, AudioDeviceName* name);
+
+ // Gets the user-friendly name of the endpoint device which is represented
+ // by a unique id in |device_id|.
+ static std::string GetFriendlyName(const std::string& device_id);
+
+ // Returns true if the provided unique |device_id| corresponds to the current
+ // default device for the specified by a data-flow direction and role.
+ static bool DeviceIsDefault(
+ EDataFlow flow, ERole role, const std::string& device_id);
+
+ // Query if the audio device is a rendering device or a capture device.
+ static EDataFlow GetDataFlow(IMMDevice* device);
+
+ // The Windows Audio Session API (WASAPI) enables client applications to
+ // manage the flow of audio data between the application and an audio endpoint
+ // device.
+
+ // Create an IAudioClient interface for the default IMMDevice where
+ // flow direction and role is define by |data_flow| and |role|.
+ // The IAudioClient interface enables a client to create and initialize an
+ // audio stream between an audio application and the audio engine (for a
+ // shared-mode stream) or the hardware buffer of an audio endpoint device
+ // (for an exclusive-mode stream).
+ static ScopedComPtr<IAudioClient> CreateDefaultClient(EDataFlow data_flow,
+ ERole role);
+
+ // Create an IAudioClient interface for an existing IMMDevice given by
+ // |audio_device|. Flow direction and role is define by the |audio_device|.
+ static ScopedComPtr<IAudioClient> CreateClient(IMMDevice* audio_device);
+
+ // Get the mix format that the audio engine uses internally for processing
+ // of shared-mode streams. This format is not necessarily a format that the
+ // audio endpoint device supports. Thus, the caller might not succeed in
+ // creating an exclusive-mode stream with a format obtained by this method.
+ static HRESULT GetSharedModeMixFormat(IAudioClient* client,
+ WAVEFORMATPCMEX* format);
+
+ // Get the mix format that the audio engine uses internally for processing
+ // of shared-mode streams using the default IMMDevice where flow direction
+ // and role is define by |data_flow| and |role|.
+ static HRESULT GetDefaultSharedModeMixFormat(EDataFlow data_flow,
+ ERole role,
+ WAVEFORMATPCMEX* format);
+
+ // Returns true if the specified |client| supports the format in |format|
+ // for the given |share_mode| (shared or exclusive).
+ static bool IsFormatSupported(IAudioClient* client,
+ AUDCLNT_SHAREMODE share_mode,
+ const WAVEFORMATPCMEX* format);
+
+ // Returns true if the specified |channel_layout| is supported for the
+ // default IMMDevice where flow direction and role is define by |data_flow|
+ // and |role|. If this method returns true for a certain channel layout, it
+ // means that SharedModeInitialize() will succeed using a format based on
+ // the preferred format where the channel layout has been modified.
+ static bool IsChannelLayoutSupported(EDataFlow data_flow, ERole role,
+ ChannelLayout channel_layout);
+
+ // For a shared-mode stream, the audio engine periodically processes the
+ // data in the endpoint buffer at the period obtained in |device_period|.
+ // For an exclusive mode stream, |device_period| corresponds to the minimum
+ // time interval between successive processing by the endpoint device.
+ // This period plus the stream latency between the buffer and endpoint device
+ // represents the minimum possible latency that an audio application can
+ // achieve. The time in |device_period| is expressed in 100-nanosecond units.
+ static HRESULT GetDevicePeriod(IAudioClient* client,
+ AUDCLNT_SHAREMODE share_mode,
+ REFERENCE_TIME* device_period);
+
+ // Get the preferred audio parameters for the specified |client| or the
+ // given direction and role is define by |data_flow| and |role|, or the
+ // unique device id given by |device_id|.
+ // The acquired values should only be utilized for shared mode streamed since
+ // there are no preferred settings for an exclusive mode stream.
+ static HRESULT GetPreferredAudioParameters(IAudioClient* client,
+ AudioParameters* params);
+ static HRESULT GetPreferredAudioParameters(EDataFlow data_flow, ERole role,
+ AudioParameters* params);
+ static HRESULT GetPreferredAudioParameters(const std::string& device_id,
+ AudioParameters* params);
+
+ // After activating an IAudioClient interface on an audio endpoint device,
+ // the client must initialize it once, and only once, to initialize the audio
+ // stream between the client and the device. In shared mode, the client
+ // connects indirectly through the audio engine which does the mixing.
+ // In exclusive mode, the client connects directly to the audio hardware.
+ // If a valid event is provided in |event_handle|, the client will be
+ // initialized for event-driven buffer handling. If |event_handle| is set to
+ // NULL, event-driven buffer handling is not utilized.
+ static HRESULT SharedModeInitialize(IAudioClient* client,
+ const WAVEFORMATPCMEX* format,
+ HANDLE event_handle,
+ uint32* endpoint_buffer_size);
+ // TODO(henrika): add ExclusiveModeInitialize(...)
+
+ // Create an IAudioRenderClient client for an existing IAudioClient given by
+ // |client|. The IAudioRenderClient interface enables a client to write
+ // output data to a rendering endpoint buffer.
+ static ScopedComPtr<IAudioRenderClient> CreateRenderClient(
+ IAudioClient* client);
+
+ // Create an IAudioCaptureClient client for an existing IAudioClient given by
+ // |client|. The IAudioCaptureClient interface enables a client to read
+ // input data from a capture endpoint buffer.
+ static ScopedComPtr<IAudioCaptureClient> CreateCaptureClient(
+ IAudioClient* client);
+
+ // Fills up the endpoint rendering buffer with silence for an existing
+ // IAudioClient given by |client| and a corresponding IAudioRenderClient
+ // given by |render_client|.
+ static bool FillRenderEndpointBufferWithSilence(
+ IAudioClient* client, IAudioRenderClient* render_client);
+
+ private:
+ CoreAudioUtil() {}
+ ~CoreAudioUtil() {}
+ DISALLOW_COPY_AND_ASSIGN(CoreAudioUtil);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_WIN_CORE_AUDIO_UTIL_WIN_H_
diff --git a/chromium/media/audio/win/core_audio_util_win_unittest.cc b/chromium/media/audio/win/core_audio_util_win_unittest.cc
new file mode 100644
index 00000000000..6d3e1fcf093
--- /dev/null
+++ b/chromium/media/audio/win/core_audio_util_win_unittest.cc
@@ -0,0 +1,453 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/scoped_ptr.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/win/scoped_com_initializer.h"
+#include "base/win/scoped_handle.h"
+#include "media/audio/win/core_audio_util_win.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using base::win::ScopedCOMInitializer;
+
+namespace media {
+
+class CoreAudioUtilWinTest : public ::testing::Test {
+ protected:
+ // The test runs on a COM thread in the multithreaded apartment (MTA).
+ // If we don't initialize the COM library on a thread before using COM,
+ // all function calls will return CO_E_NOTINITIALIZED.
+ CoreAudioUtilWinTest()
+ : com_init_(ScopedCOMInitializer::kMTA) {
+ DCHECK(com_init_.succeeded());
+ }
+ virtual ~CoreAudioUtilWinTest() {}
+
+ bool CanRunAudioTest() {
+ bool core_audio = CoreAudioUtil::IsSupported();
+ if (!core_audio)
+ return false;
+ int capture_devices = CoreAudioUtil::NumberOfActiveDevices(eCapture);
+ int render_devices = CoreAudioUtil::NumberOfActiveDevices(eRender);
+ return ((capture_devices > 0) && (render_devices > 0));
+ }
+
+ ScopedCOMInitializer com_init_;
+};
+
+TEST_F(CoreAudioUtilWinTest, NumberOfActiveDevices) {
+ if (!CanRunAudioTest())
+ return;
+
+ int render_devices = CoreAudioUtil::NumberOfActiveDevices(eRender);
+ EXPECT_GT(render_devices, 0);
+ int capture_devices = CoreAudioUtil::NumberOfActiveDevices(eCapture);
+ EXPECT_GT(capture_devices, 0);
+ int total_devices = CoreAudioUtil::NumberOfActiveDevices(eAll);
+ EXPECT_EQ(total_devices, render_devices + capture_devices);
+}
+
+TEST_F(CoreAudioUtilWinTest, CreateDeviceEnumerator) {
+ if (!CanRunAudioTest())
+ return;
+
+ ScopedComPtr<IMMDeviceEnumerator> enumerator =
+ CoreAudioUtil::CreateDeviceEnumerator();
+ EXPECT_TRUE(enumerator);
+}
+
+TEST_F(CoreAudioUtilWinTest, CreateDefaultDevice) {
+ if (!CanRunAudioTest())
+ return;
+
+ struct {
+ EDataFlow flow;
+ ERole role;
+ } data[] = {
+ {eRender, eConsole},
+ {eRender, eCommunications},
+ {eRender, eMultimedia},
+ {eCapture, eConsole},
+ {eCapture, eCommunications},
+ {eCapture, eMultimedia}
+ };
+
+ // Create default devices for all flow/role combinations above.
+ ScopedComPtr<IMMDevice> audio_device;
+ for (int i = 0; i < arraysize(data); ++i) {
+ audio_device =
+ CoreAudioUtil::CreateDefaultDevice(data[i].flow, data[i].role);
+ EXPECT_TRUE(audio_device);
+ EXPECT_EQ(data[i].flow, CoreAudioUtil::GetDataFlow(audio_device));
+ }
+
+ // Only eRender and eCapture are allowed as flow parameter.
+ audio_device = CoreAudioUtil::CreateDefaultDevice(eAll, eConsole);
+ EXPECT_FALSE(audio_device);
+}
+
+TEST_F(CoreAudioUtilWinTest, CreateDevice) {
+ if (!CanRunAudioTest())
+ return;
+
+ // Get name and ID of default device used for playback.
+ ScopedComPtr<IMMDevice> default_render_device =
+ CoreAudioUtil::CreateDefaultDevice(eRender, eConsole);
+ AudioDeviceName default_render_name;
+ EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetDeviceName(default_render_device,
+ &default_render_name)));
+
+ // Use the uniqe ID as input to CreateDevice() and create a corresponding
+ // IMMDevice.
+ ScopedComPtr<IMMDevice> audio_device =
+ CoreAudioUtil::CreateDevice(default_render_name.unique_id);
+ EXPECT_TRUE(audio_device);
+
+ // Verify that the two IMMDevice interfaces represents the same endpoint
+ // by comparing their unique IDs.
+ AudioDeviceName device_name;
+ EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetDeviceName(audio_device,
+ &device_name)));
+ EXPECT_EQ(default_render_name.unique_id, device_name.unique_id);
+}
+
+TEST_F(CoreAudioUtilWinTest, GetDefaultDeviceName) {
+ if (!CanRunAudioTest())
+ return;
+
+ struct {
+ EDataFlow flow;
+ ERole role;
+ } data[] = {
+ {eRender, eConsole},
+ {eRender, eCommunications},
+ {eCapture, eConsole},
+ {eCapture, eCommunications}
+ };
+
+ // Get name and ID of default devices for all flow/role combinations above.
+ ScopedComPtr<IMMDevice> audio_device;
+ AudioDeviceName device_name;
+ for (int i = 0; i < arraysize(data); ++i) {
+ audio_device =
+ CoreAudioUtil::CreateDefaultDevice(data[i].flow, data[i].role);
+ EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetDeviceName(audio_device,
+ &device_name)));
+ EXPECT_FALSE(device_name.device_name.empty());
+ EXPECT_FALSE(device_name.unique_id.empty());
+ }
+}
+
+TEST_F(CoreAudioUtilWinTest, GetFriendlyName) {
+ if (!CanRunAudioTest())
+ return;
+
+ // Get name and ID of default device used for recording.
+ ScopedComPtr<IMMDevice> audio_device =
+ CoreAudioUtil::CreateDefaultDevice(eCapture, eConsole);
+ AudioDeviceName device_name;
+ HRESULT hr = CoreAudioUtil::GetDeviceName(audio_device, &device_name);
+ EXPECT_TRUE(SUCCEEDED(hr));
+
+ // Use unique ID as input to GetFriendlyName() and compare the result
+ // with the already obtained friendly name for the default capture device.
+ std::string friendly_name = CoreAudioUtil::GetFriendlyName(
+ device_name.unique_id);
+ EXPECT_EQ(friendly_name, device_name.device_name);
+
+ // Same test as above but for playback.
+ audio_device = CoreAudioUtil::CreateDefaultDevice(eRender, eConsole);
+ hr = CoreAudioUtil::GetDeviceName(audio_device, &device_name);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ friendly_name = CoreAudioUtil::GetFriendlyName(device_name.unique_id);
+ EXPECT_EQ(friendly_name, device_name.device_name);
+}
+
+TEST_F(CoreAudioUtilWinTest, DeviceIsDefault) {
+ if (!CanRunAudioTest())
+ return;
+
+ // Verify that the default render device is correctly identified as a
+ // default device.
+ ScopedComPtr<IMMDevice> audio_device =
+ CoreAudioUtil::CreateDefaultDevice(eRender, eConsole);
+ AudioDeviceName name;
+ EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetDeviceName(audio_device, &name)));
+ const std::string id = name.unique_id;
+ EXPECT_TRUE(CoreAudioUtil::DeviceIsDefault(eRender, eConsole, id));
+ EXPECT_FALSE(CoreAudioUtil::DeviceIsDefault(eCapture, eConsole, id));
+}
+
+TEST_F(CoreAudioUtilWinTest, CreateDefaultClient) {
+ if (!CanRunAudioTest())
+ return;
+
+ EDataFlow data[] = {eRender, eCapture};
+
+ for (int i = 0; i < arraysize(data); ++i) {
+ ScopedComPtr<IAudioClient> client;
+ client = CoreAudioUtil::CreateDefaultClient(data[i], eConsole);
+ EXPECT_TRUE(client);
+ }
+}
+
+TEST_F(CoreAudioUtilWinTest, CreateClient) {
+ if (!CanRunAudioTest())
+ return;
+
+ EDataFlow data[] = {eRender, eCapture};
+
+ for (int i = 0; i < arraysize(data); ++i) {
+ ScopedComPtr<IMMDevice> device;
+ ScopedComPtr<IAudioClient> client;
+ device = CoreAudioUtil::CreateDefaultDevice(data[i], eConsole);
+ EXPECT_TRUE(device);
+ EXPECT_EQ(data[i], CoreAudioUtil::GetDataFlow(device));
+ client = CoreAudioUtil::CreateClient(device);
+ EXPECT_TRUE(client);
+ }
+}
+
+TEST_F(CoreAudioUtilWinTest, GetSharedModeMixFormat) {
+ if (!CanRunAudioTest())
+ return;
+
+ ScopedComPtr<IMMDevice> device;
+ ScopedComPtr<IAudioClient> client;
+ device = CoreAudioUtil::CreateDefaultDevice(eRender, eConsole);
+ EXPECT_TRUE(device);
+ client = CoreAudioUtil::CreateClient(device);
+ EXPECT_TRUE(client);
+
+ // Perform a simple sanity test of the aquired format structure.
+ WAVEFORMATPCMEX format;
+ EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetSharedModeMixFormat(client,
+ &format)));
+ EXPECT_GE(format.Format.nChannels, 1);
+ EXPECT_GE(format.Format.nSamplesPerSec, 8000u);
+ EXPECT_GE(format.Format.wBitsPerSample, 16);
+ EXPECT_GE(format.Samples.wValidBitsPerSample, 16);
+ EXPECT_EQ(format.Format.wFormatTag, WAVE_FORMAT_EXTENSIBLE);
+}
+
+TEST_F(CoreAudioUtilWinTest, IsChannelLayoutSupported) {
+ if (!CanRunAudioTest())
+ return;
+
+ // The preferred channel layout should always be supported. Being supported
+ // means that it is possible to initialize a shared mode stream with the
+ // particular channel layout.
+ AudioParameters mix_params;
+ HRESULT hr = CoreAudioUtil::GetPreferredAudioParameters(eRender, eConsole,
+ &mix_params);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_TRUE(mix_params.IsValid());
+ EXPECT_TRUE(CoreAudioUtil::IsChannelLayoutSupported(
+ eRender, eConsole, mix_params.channel_layout()));
+
+ // Check if it is possible to modify the channel layout to stereo for a
+ // device which reports that it prefers to be openen up in an other
+ // channel configuration.
+ if (mix_params.channel_layout() != CHANNEL_LAYOUT_STEREO) {
+ ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
+ // TODO(henrika): it might be too pessimistic to assume false as return
+ // value here.
+ EXPECT_FALSE(CoreAudioUtil::IsChannelLayoutSupported(
+ eRender, eConsole, channel_layout));
+ }
+}
+
+TEST_F(CoreAudioUtilWinTest, GetDevicePeriod) {
+ if (!CanRunAudioTest())
+ return;
+
+ EDataFlow data[] = {eRender, eCapture};
+
+ // Verify that the device periods are valid for the default render and
+ // capture devices.
+ for (int i = 0; i < arraysize(data); ++i) {
+ ScopedComPtr<IAudioClient> client;
+ REFERENCE_TIME shared_time_period = 0;
+ REFERENCE_TIME exclusive_time_period = 0;
+ client = CoreAudioUtil::CreateDefaultClient(data[i], eConsole);
+ EXPECT_TRUE(client);
+ EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetDevicePeriod(
+ client, AUDCLNT_SHAREMODE_SHARED, &shared_time_period)));
+ EXPECT_GT(shared_time_period, 0);
+ EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetDevicePeriod(
+ client, AUDCLNT_SHAREMODE_EXCLUSIVE, &exclusive_time_period)));
+ EXPECT_GT(exclusive_time_period, 0);
+ EXPECT_LE(exclusive_time_period, shared_time_period);
+ }
+}
+
+TEST_F(CoreAudioUtilWinTest, GetPreferredAudioParameters) {
+ if (!CanRunAudioTest())
+ return;
+
+ EDataFlow data[] = {eRender, eCapture};
+
+ // Verify that the preferred audio parameters are OK for the default render
+ // and capture devices.
+ for (int i = 0; i < arraysize(data); ++i) {
+ ScopedComPtr<IAudioClient> client;
+ AudioParameters params;
+ client = CoreAudioUtil::CreateDefaultClient(data[i], eConsole);
+ EXPECT_TRUE(client);
+ EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetPreferredAudioParameters(client,
+ &params)));
+ EXPECT_TRUE(params.IsValid());
+ }
+}
+
+TEST_F(CoreAudioUtilWinTest, SharedModeInitialize) {
+ if (!CanRunAudioTest())
+ return;
+
+ ScopedComPtr<IAudioClient> client;
+ client = CoreAudioUtil::CreateDefaultClient(eRender, eConsole);
+ EXPECT_TRUE(client);
+
+ WAVEFORMATPCMEX format;
+ EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetSharedModeMixFormat(client,
+ &format)));
+
+ // Perform a shared-mode initialization without event-driven buffer handling.
+ uint32 endpoint_buffer_size = 0;
+ HRESULT hr = CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
+ &endpoint_buffer_size);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_GT(endpoint_buffer_size, 0u);
+
+ // It is only possible to create a client once.
+ hr = CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
+ &endpoint_buffer_size);
+ EXPECT_FALSE(SUCCEEDED(hr));
+ EXPECT_EQ(hr, AUDCLNT_E_ALREADY_INITIALIZED);
+
+ // Verify that it is possible to reinitialize the client after releasing it.
+ client = CoreAudioUtil::CreateDefaultClient(eRender, eConsole);
+ EXPECT_TRUE(client);
+ hr = CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
+ &endpoint_buffer_size);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_GT(endpoint_buffer_size, 0u);
+
+ // Use a non-supported format and verify that initialization fails.
+ // A simple way to emulate an invalid format is to use the shared-mode
+ // mixing format and modify the preferred sample.
+ client = CoreAudioUtil::CreateDefaultClient(eRender, eConsole);
+ EXPECT_TRUE(client);
+ format.Format.nSamplesPerSec = format.Format.nSamplesPerSec + 1;
+ EXPECT_FALSE(CoreAudioUtil::IsFormatSupported(
+ client, AUDCLNT_SHAREMODE_SHARED, &format));
+ hr = CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
+ &endpoint_buffer_size);
+ EXPECT_TRUE(FAILED(hr));
+ EXPECT_EQ(hr, E_INVALIDARG);
+
+ // Finally, perform a shared-mode initialization using event-driven buffer
+ // handling. The event handle will be signaled when an audio buffer is ready
+ // to be processed by the client (not verified here).
+ // The event handle should be in the nonsignaled state.
+ base::win::ScopedHandle event_handle(::CreateEvent(NULL, TRUE, FALSE, NULL));
+ client = CoreAudioUtil::CreateDefaultClient(eRender, eConsole);
+ EXPECT_TRUE(client);
+ EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetSharedModeMixFormat(client,
+ &format)));
+ EXPECT_TRUE(CoreAudioUtil::IsFormatSupported(
+ client, AUDCLNT_SHAREMODE_SHARED, &format));
+ hr = CoreAudioUtil::SharedModeInitialize(client, &format, event_handle.Get(),
+ &endpoint_buffer_size);
+ EXPECT_TRUE(SUCCEEDED(hr));
+ EXPECT_GT(endpoint_buffer_size, 0u);
+}
+
+TEST_F(CoreAudioUtilWinTest, CreateRenderAndCaptureClients) {
+ if (!CanRunAudioTest())
+ return;
+
+ EDataFlow data[] = {eRender, eCapture};
+
+ WAVEFORMATPCMEX format;
+ uint32 endpoint_buffer_size = 0;
+
+ for (int i = 0; i < arraysize(data); ++i) {
+ ScopedComPtr<IAudioClient> client;
+ ScopedComPtr<IAudioRenderClient> render_client;
+ ScopedComPtr<IAudioCaptureClient> capture_client;
+
+ client = CoreAudioUtil::CreateDefaultClient(data[i], eConsole);
+ EXPECT_TRUE(client);
+ EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetSharedModeMixFormat(client,
+ &format)));
+ if (data[i] == eRender) {
+ // It is not possible to create a render client using an unitialized
+ // client interface.
+ render_client = CoreAudioUtil::CreateRenderClient(client);
+ EXPECT_FALSE(render_client);
+
+ // Do a proper initialization and verify that it works this time.
+ CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
+ &endpoint_buffer_size);
+ render_client = CoreAudioUtil::CreateRenderClient(client);
+ EXPECT_TRUE(render_client);
+ EXPECT_GT(endpoint_buffer_size, 0u);
+ } else if (data[i] == eCapture) {
+ // It is not possible to create a capture client using an unitialized
+ // client interface.
+ capture_client = CoreAudioUtil::CreateCaptureClient(client);
+ EXPECT_FALSE(capture_client);
+
+ // Do a proper initialization and verify that it works this time.
+ CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
+ &endpoint_buffer_size);
+ capture_client = CoreAudioUtil::CreateCaptureClient(client);
+ EXPECT_TRUE(capture_client);
+ EXPECT_GT(endpoint_buffer_size, 0u);
+ }
+ }
+}
+
+TEST_F(CoreAudioUtilWinTest, FillRenderEndpointBufferWithSilence) {
+ if (!CanRunAudioTest())
+ return;
+
+ // Create default clients using the default mixing format for shared mode.
+ ScopedComPtr<IAudioClient> client(
+ CoreAudioUtil::CreateDefaultClient(eRender, eConsole));
+ EXPECT_TRUE(client);
+
+ WAVEFORMATPCMEX format;
+ uint32 endpoint_buffer_size = 0;
+ EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetSharedModeMixFormat(client,
+ &format)));
+ CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
+ &endpoint_buffer_size);
+ EXPECT_GT(endpoint_buffer_size, 0u);
+
+ ScopedComPtr<IAudioRenderClient> render_client(
+ CoreAudioUtil::CreateRenderClient(client));
+ EXPECT_TRUE(render_client);
+
+ // The endpoint audio buffer should not be filled up by default after being
+ // created.
+ UINT32 num_queued_frames = 0;
+ client->GetCurrentPadding(&num_queued_frames);
+ EXPECT_EQ(num_queued_frames, 0u);
+
+ // Fill it up with zeros and verify that the buffer is full.
+ // It is not possible to verify that the actual data consists of zeros
+ // since we can't access data that has already been sent to the endpoint
+ // buffer.
+ EXPECT_TRUE(CoreAudioUtil::FillRenderEndpointBufferWithSilence(
+ client, render_client));
+ client->GetCurrentPadding(&num_queued_frames);
+ EXPECT_EQ(num_queued_frames, endpoint_buffer_size);
+}
+
+//
+
+} // namespace media
diff --git a/chromium/media/audio/win/device_enumeration_win.cc b/chromium/media/audio/win/device_enumeration_win.cc
new file mode 100644
index 00000000000..36ed2913ffe
--- /dev/null
+++ b/chromium/media/audio/win/device_enumeration_win.cc
@@ -0,0 +1,177 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <MMDeviceAPI.h>
+#include <mmsystem.h>
+#include <Functiondiscoverykeys_devpkey.h> // MMDeviceAPI.h must come first
+
+#include "media/audio/win/audio_manager_win.h"
+
+#include "base/logging.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/win/scoped_co_mem.h"
+#include "base/win/scoped_comptr.h"
+#include "base/win/scoped_propvariant.h"
+
+using media::AudioDeviceNames;
+using base::win::ScopedComPtr;
+using base::win::ScopedCoMem;
+
+// Taken from Mmddk.h.
+#define DRV_RESERVED 0x0800
+#define DRV_QUERYFUNCTIONINSTANCEID (DRV_RESERVED + 17)
+#define DRV_QUERYFUNCTIONINSTANCEIDSIZE (DRV_RESERVED + 18)
+
+namespace media {
+
+bool GetInputDeviceNamesWin(AudioDeviceNames* device_names) {
+ // It is assumed that this method is called from a COM thread, i.e.,
+ // CoInitializeEx() is not called here again to avoid STA/MTA conflicts.
+ ScopedComPtr<IMMDeviceEnumerator> enumerator;
+ HRESULT hr = enumerator.CreateInstance(__uuidof(MMDeviceEnumerator), NULL,
+ CLSCTX_INPROC_SERVER);
+ DCHECK_NE(CO_E_NOTINITIALIZED, hr);
+ if (FAILED(hr)) {
+ LOG(WARNING) << "Failed to create IMMDeviceEnumerator: " << std::hex << hr;
+ return false;
+ }
+
+ // Generate a collection of active audio capture endpoint devices.
+ // This method will succeed even if all devices are disabled.
+ ScopedComPtr<IMMDeviceCollection> collection;
+ hr = enumerator->EnumAudioEndpoints(eCapture,
+ DEVICE_STATE_ACTIVE,
+ collection.Receive());
+ if (FAILED(hr))
+ return false;
+
+ // Retrieve the number of active capture devices.
+ UINT number_of_active_devices = 0;
+ collection->GetCount(&number_of_active_devices);
+ if (number_of_active_devices == 0)
+ return true;
+
+ media::AudioDeviceName device;
+
+ // Loop over all active capture devices and add friendly name and
+ // unique ID to the |device_names| list.
+ for (UINT i = 0; i < number_of_active_devices; ++i) {
+ // Retrieve unique name of endpoint device.
+ // Example: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}".
+ ScopedComPtr<IMMDevice> audio_device;
+ hr = collection->Item(i, audio_device.Receive());
+ if (FAILED(hr))
+ continue;
+
+ // Store the unique name.
+ ScopedCoMem<WCHAR> endpoint_device_id;
+ audio_device->GetId(&endpoint_device_id);
+ device.unique_id = WideToUTF8(static_cast<WCHAR*>(endpoint_device_id));
+
+ // Retrieve user-friendly name of endpoint device.
+ // Example: "Microphone (Realtek High Definition Audio)".
+ ScopedComPtr<IPropertyStore> properties;
+ hr = audio_device->OpenPropertyStore(STGM_READ, properties.Receive());
+ if (SUCCEEDED(hr)) {
+ base::win::ScopedPropVariant friendly_name;
+ hr = properties->GetValue(PKEY_Device_FriendlyName,
+ friendly_name.Receive());
+
+ // Store the user-friendly name.
+ if (SUCCEEDED(hr) &&
+ friendly_name.get().vt == VT_LPWSTR && friendly_name.get().pwszVal) {
+ device.device_name = WideToUTF8(friendly_name.get().pwszVal);
+ }
+ }
+
+ // Add combination of user-friendly and unique name to the output list.
+ device_names->push_back(device);
+ }
+
+ return true;
+}
+
+bool GetInputDeviceNamesWinXP(AudioDeviceNames* device_names) {
+ // Retrieve the number of active waveform input devices.
+ UINT number_of_active_devices = waveInGetNumDevs();
+ if (number_of_active_devices == 0)
+ return true;
+
+ media::AudioDeviceName device;
+ WAVEINCAPS capabilities;
+ MMRESULT err = MMSYSERR_NOERROR;
+
+ // Loop over all active capture devices and add friendly name and
+ // unique ID to the |device_names| list. Note that, for Wave on XP,
+ // the "unique" name will simply be a copy of the friendly name since
+ // there is no safe method to retrieve a unique device name on XP.
+ for (UINT i = 0; i < number_of_active_devices; ++i) {
+ // Retrieve the capabilities of the specified waveform-audio input device.
+ err = waveInGetDevCaps(i, &capabilities, sizeof(capabilities));
+ if (err != MMSYSERR_NOERROR)
+ continue;
+
+ // Store the user-friendly name. Max length is MAXPNAMELEN(=32)
+ // characters and the name cane be truncated on XP.
+ // Example: "Microphone (Realtek High Defini".
+ device.device_name = WideToUTF8(capabilities.szPname);
+
+ // Store the "unique" name (we use same as friendly name on Windows XP).
+ device.unique_id = WideToUTF8(capabilities.szPname);
+
+ // Add combination of user-friendly and unique name to the output list.
+ device_names->push_back(device);
+ }
+
+ return true;
+}
+
+std::string ConvertToWinXPDeviceId(const std::string& device_id) {
+ UINT number_of_active_devices = waveInGetNumDevs();
+ MMRESULT result = MMSYSERR_NOERROR;
+
+ UINT i = 0;
+ for (; i < number_of_active_devices; ++i) {
+ size_t size = 0;
+ // Get the size (including the terminating NULL) of the endpoint ID of the
+ // waveIn device.
+ result = waveInMessage(reinterpret_cast<HWAVEIN>(i),
+ DRV_QUERYFUNCTIONINSTANCEIDSIZE,
+ reinterpret_cast<DWORD_PTR>(&size), NULL);
+ if (result != MMSYSERR_NOERROR)
+ continue;
+
+ ScopedCoMem<WCHAR> id;
+ id.Reset(static_cast<WCHAR*>(CoTaskMemAlloc(size)));
+ if (!id)
+ continue;
+
+ // Get the endpoint ID string for this waveIn device.
+ result = waveInMessage(
+ reinterpret_cast<HWAVEIN>(i), DRV_QUERYFUNCTIONINSTANCEID,
+ reinterpret_cast<DWORD_PTR>(static_cast<WCHAR*>(id)), size);
+ if (result != MMSYSERR_NOERROR)
+ continue;
+
+ std::string utf8_id = WideToUTF8(static_cast<WCHAR*>(id));
+ // Check whether the endpoint ID string of this waveIn device matches that
+ // of the audio endpoint device.
+ if (device_id == utf8_id)
+ break;
+ }
+
+ // If a matching waveIn device was found, convert the unique endpoint ID
+ // string to a standard friendly name with max 32 characters.
+ if (i < number_of_active_devices) {
+ WAVEINCAPS capabilities;
+
+ result = waveInGetDevCaps(i, &capabilities, sizeof(capabilities));
+ if (result == MMSYSERR_NOERROR)
+ return WideToUTF8(capabilities.szPname);
+ }
+
+ return std::string();
+}
+
+} // namespace media
diff --git a/chromium/media/audio/win/device_enumeration_win.h b/chromium/media/audio/win/device_enumeration_win.h
new file mode 100644
index 00000000000..3d44670a6d3
--- /dev/null
+++ b/chromium/media/audio/win/device_enumeration_win.h
@@ -0,0 +1,40 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_WIN_DEVICE_ENUMERATION_WIN_H_
+#define MEDIA_AUDIO_WIN_DEVICE_ENUMERATION_WIN_H_
+
+#include <string>
+
+#include "media/audio/audio_device_name.h"
+
+namespace media {
+
+// Returns a list of audio input device structures (name and unique device ID)
+// using the MMDevice API which is supported on Windows Vista and higher.
+// Example record in the output list:
+// - device_name: "Microphone (Realtek High Definition Audio)".
+// - unique_id: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}"
+// This method must be called from a COM thread using MTA.
+bool GetInputDeviceNamesWin(media::AudioDeviceNames* device_names);
+
+// Returns a list of audio input device structures (name and unique device ID)
+// using the WaveIn API which is supported on Windows XP and higher.
+// Example record in the output list:
+// - device_name: "Microphone (Realtek High Defini".
+// - unique_id: "Microphone (Realtek High Defini" (same as friendly name).
+bool GetInputDeviceNamesWinXP(media::AudioDeviceNames* device_names);
+
+// Converts a device ID generated by |GetInputDeviceNamesWin()| to the
+// corresponding ID by |GetInputDeviceNamesWinXP()|. Returns an empty string on
+// failure.
+// Example input and output:
+// - input ID: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}"
+// - output ID: "Microphone (Realtek High Defini"
+std::string ConvertToWinXPDeviceId(const std::string& device_id);
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_WIN_DEVICE_ENUMERATION_WIN_H_
+
diff --git a/chromium/media/audio/win/wavein_input_win.cc b/chromium/media/audio/win/wavein_input_win.cc
new file mode 100644
index 00000000000..3c4147738df
--- /dev/null
+++ b/chromium/media/audio/win/wavein_input_win.cc
@@ -0,0 +1,316 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/win/wavein_input_win.h"
+
+#pragma comment(lib, "winmm.lib")
+
+#include "base/logging.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_util.h"
+#include "media/audio/win/audio_manager_win.h"
+#include "media/audio/win/device_enumeration_win.h"
+
+namespace media {
+
+// Our sound buffers are allocated once and kept in a linked list using the
+// the WAVEHDR::dwUser variable. The last buffer points to the first buffer.
+static WAVEHDR* GetNextBuffer(WAVEHDR* current) {
+ return reinterpret_cast<WAVEHDR*>(current->dwUser);
+}
+
+PCMWaveInAudioInputStream::PCMWaveInAudioInputStream(
+ AudioManagerWin* manager, const AudioParameters& params, int num_buffers,
+ const std::string& device_id)
+ : state_(kStateEmpty),
+ manager_(manager),
+ device_id_(device_id),
+ wavein_(NULL),
+ callback_(NULL),
+ num_buffers_(num_buffers),
+ buffer_(NULL),
+ channels_(params.channels()) {
+ DCHECK_GT(num_buffers_, 0);
+ format_.wFormatTag = WAVE_FORMAT_PCM;
+ format_.nChannels = params.channels() > 2 ? 2 : params.channels();
+ format_.nSamplesPerSec = params.sample_rate();
+ format_.wBitsPerSample = params.bits_per_sample();
+ format_.cbSize = 0;
+ format_.nBlockAlign = (format_.nChannels * format_.wBitsPerSample) / 8;
+ format_.nAvgBytesPerSec = format_.nBlockAlign * format_.nSamplesPerSec;
+ buffer_size_ = params.frames_per_buffer() * format_.nBlockAlign;
+ // If we don't have a packet size we use 100ms.
+ if (!buffer_size_)
+ buffer_size_ = format_.nAvgBytesPerSec / 10;
+ // The event is auto-reset.
+ stopped_event_.Set(::CreateEventW(NULL, FALSE, FALSE, NULL));
+}
+
+PCMWaveInAudioInputStream::~PCMWaveInAudioInputStream() {
+ DCHECK(NULL == wavein_);
+}
+
+bool PCMWaveInAudioInputStream::Open() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (state_ != kStateEmpty)
+ return false;
+ if (num_buffers_ < 2 || num_buffers_ > 10)
+ return false;
+
+ // Convert the stored device id string into an unsigned integer
+ // corresponding to the selected device.
+ UINT device_id = WAVE_MAPPER;
+ if (!GetDeviceId(&device_id)) {
+ return false;
+ }
+
+ // Open the specified input device for recording.
+ MMRESULT result = MMSYSERR_NOERROR;
+ result = ::waveInOpen(&wavein_, device_id, &format_,
+ reinterpret_cast<DWORD_PTR>(WaveCallback),
+ reinterpret_cast<DWORD_PTR>(this),
+ CALLBACK_FUNCTION);
+ if (result != MMSYSERR_NOERROR)
+ return false;
+
+ SetupBuffers();
+ state_ = kStateReady;
+ return true;
+}
+
+void PCMWaveInAudioInputStream::SetupBuffers() {
+ WAVEHDR* last = NULL;
+ WAVEHDR* first = NULL;
+ for (int ix = 0; ix != num_buffers_; ++ix) {
+ uint32 sz = sizeof(WAVEHDR) + buffer_size_;
+ buffer_ = reinterpret_cast<WAVEHDR*>(new char[sz]);
+ buffer_->lpData = reinterpret_cast<char*>(buffer_) + sizeof(WAVEHDR);
+ buffer_->dwBufferLength = buffer_size_;
+ buffer_->dwBytesRecorded = 0;
+ buffer_->dwUser = reinterpret_cast<DWORD_PTR>(last);
+ buffer_->dwFlags = WHDR_DONE;
+ buffer_->dwLoops = 0;
+ if (ix == 0)
+ first = buffer_;
+ last = buffer_;
+ ::waveInPrepareHeader(wavein_, buffer_, sizeof(WAVEHDR));
+ }
+ // Fix the first buffer to point to the last one.
+ first->dwUser = reinterpret_cast<DWORD_PTR>(last);
+}
+
+void PCMWaveInAudioInputStream::FreeBuffers() {
+ WAVEHDR* current = buffer_;
+ for (int ix = 0; ix != num_buffers_; ++ix) {
+ WAVEHDR* next = GetNextBuffer(current);
+ if (current->dwFlags & WHDR_PREPARED)
+ ::waveInUnprepareHeader(wavein_, current, sizeof(WAVEHDR));
+ delete[] reinterpret_cast<char*>(current);
+ current = next;
+ }
+ buffer_ = NULL;
+}
+
+void PCMWaveInAudioInputStream::Start(AudioInputCallback* callback) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (state_ != kStateReady)
+ return;
+
+ DCHECK(!callback_);
+ callback_ = callback;
+ state_ = kStateRecording;
+
+ WAVEHDR* buffer = buffer_;
+ for (int ix = 0; ix != num_buffers_; ++ix) {
+ QueueNextPacket(buffer);
+ buffer = GetNextBuffer(buffer);
+ }
+ buffer = buffer_;
+
+ MMRESULT result = ::waveInStart(wavein_);
+ if (result != MMSYSERR_NOERROR) {
+ HandleError(result);
+ state_ = kStateReady;
+ callback_ = NULL;
+ }
+}
+
+// Stopping is tricky. First, no buffer should be locked by the audio driver
+// or else the waveInReset() will deadlock and secondly, the callback should
+// not be inside the AudioInputCallback's OnData because waveInReset()
+// forcefully kills the callback thread.
+void PCMWaveInAudioInputStream::Stop() {
+ DVLOG(1) << "PCMWaveInAudioInputStream::Stop()";
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (state_ != kStateRecording)
+ return;
+
+ bool already_stopped = false;
+ {
+ // Tell the callback that we're stopping.
+ // As a result, |stopped_event_| will be signaled in callback method.
+ base::AutoLock auto_lock(lock_);
+ already_stopped = (callback_ == NULL);
+ callback_ = NULL;
+ }
+
+ if (already_stopped)
+ return;
+
+ // Wait for the callback to finish, it will signal us when ready to be reset.
+ DWORD wait = ::WaitForSingleObject(stopped_event_, INFINITE);
+ DCHECK_EQ(wait, WAIT_OBJECT_0);
+
+ // Stop input and reset the current position to zero for |wavein_|.
+ // All pending buffers are marked as done and returned to the application.
+ MMRESULT res = ::waveInReset(wavein_);
+ DCHECK_EQ(res, static_cast<MMRESULT>(MMSYSERR_NOERROR));
+
+ state_ = kStateReady;
+}
+
+void PCMWaveInAudioInputStream::Close() {
+ DVLOG(1) << "PCMWaveInAudioInputStream::Close()";
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // We should not call Close() while recording. Catch it with DCHECK and
+ // implement auto-stop just in case.
+ DCHECK_NE(state_, kStateRecording);
+ Stop();
+
+ if (wavein_) {
+ FreeBuffers();
+
+ // waveInClose() generates a WIM_CLOSE callback. In case Start() was never
+ // called, force a reset to ensure close succeeds.
+ MMRESULT res = ::waveInReset(wavein_);
+ DCHECK_EQ(res, static_cast<MMRESULT>(MMSYSERR_NOERROR));
+ res = ::waveInClose(wavein_);
+ DCHECK_EQ(res, static_cast<MMRESULT>(MMSYSERR_NOERROR));
+ state_ = kStateClosed;
+ wavein_ = NULL;
+ }
+
+ // Tell the audio manager that we have been released. This can result in
+ // the manager destroying us in-place so this needs to be the last thing
+ // we do on this function.
+ manager_->ReleaseInputStream(this);
+}
+
+double PCMWaveInAudioInputStream::GetMaxVolume() {
+ // TODO(henrika): Add volume support using the Audio Mixer API.
+ return 0.0;
+}
+
+void PCMWaveInAudioInputStream::SetVolume(double volume) {
+ // TODO(henrika): Add volume support using the Audio Mixer API.
+}
+
+double PCMWaveInAudioInputStream::GetVolume() {
+ // TODO(henrika): Add volume support using the Audio Mixer API.
+ return 0.0;
+}
+
+void PCMWaveInAudioInputStream::SetAutomaticGainControl(bool enabled) {
+ // TODO(henrika): Add AGC support when volume control has been added.
+ NOTIMPLEMENTED();
+}
+
+bool PCMWaveInAudioInputStream::GetAutomaticGainControl() {
+ // TODO(henrika): Add AGC support when volume control has been added.
+ NOTIMPLEMENTED();
+ return false;
+}
+
+void PCMWaveInAudioInputStream::HandleError(MMRESULT error) {
+ DLOG(WARNING) << "PCMWaveInAudio error " << error;
+ callback_->OnError(this);
+}
+
+void PCMWaveInAudioInputStream::QueueNextPacket(WAVEHDR *buffer) {
+ MMRESULT res = ::waveInAddBuffer(wavein_, buffer, sizeof(WAVEHDR));
+ if (res != MMSYSERR_NOERROR)
+ HandleError(res);
+}
+
+bool PCMWaveInAudioInputStream::GetDeviceId(UINT* device_index) {
+ // Deliver the default input device id (WAVE_MAPPER) if the default
+ // device has been selected.
+ if (device_id_ == AudioManagerBase::kDefaultDeviceId) {
+ *device_index = WAVE_MAPPER;
+ return true;
+ }
+
+ // Get list of all available and active devices.
+ AudioDeviceNames device_names;
+ if (!media::GetInputDeviceNamesWinXP(&device_names))
+ return false;
+
+ if (device_names.empty())
+ return false;
+
+ // Search the full list of devices and compare with the specified
+ // device id which was specified in the constructor. Stop comparing
+ // when a match is found and return the corresponding index.
+ UINT index = 0;
+ bool found_device = false;
+ AudioDeviceNames::const_iterator it = device_names.begin();
+ while (it != device_names.end()) {
+ if (it->unique_id.compare(device_id_) == 0) {
+ *device_index = index;
+ found_device = true;
+ break;
+ }
+ ++index;
+ ++it;
+ }
+
+ return found_device;
+}
+
+// Windows calls us back in this function when some events happen. Most notably
+// when it has an audio buffer with recorded data.
+void PCMWaveInAudioInputStream::WaveCallback(HWAVEIN hwi, UINT msg,
+ DWORD_PTR instance,
+ DWORD_PTR param1, DWORD_PTR) {
+ PCMWaveInAudioInputStream* obj =
+ reinterpret_cast<PCMWaveInAudioInputStream*>(instance);
+
+ // The lock ensures that Stop() can't be called during a callback.
+ base::AutoLock auto_lock(obj->lock_);
+
+ if (msg == WIM_DATA) {
+ // The WIM_DATA message is sent when waveform-audio data is present in
+ // the input buffer and the buffer is being returned to the application.
+ // The message can be sent when the buffer is full or after the
+ // waveInReset function is called.
+ if (obj->callback_) {
+ // TODO(henrika): the |volume| parameter is always set to zero since
+ // there is currently no support for controlling the microphone volume
+ // level.
+ WAVEHDR* buffer = reinterpret_cast<WAVEHDR*>(param1);
+ obj->callback_->OnData(obj,
+ reinterpret_cast<const uint8*>(buffer->lpData),
+ buffer->dwBytesRecorded,
+ buffer->dwBytesRecorded,
+ 0.0);
+
+ // Queue the finished buffer back with the audio driver. Since we are
+ // reusing the same buffers we can get away without calling
+ // waveInPrepareHeader.
+ obj->QueueNextPacket(buffer);
+ } else {
+ // Main thread has called Stop() and set |callback_| to NULL and is
+ // now waiting to issue waveInReset which will kill this thread.
+ // We should not call AudioSourceCallback code anymore.
+ ::SetEvent(obj->stopped_event_);
+ }
+ } else if (msg == WIM_CLOSE) {
+ // Intentionaly no-op for now.
+ } else if (msg == WIM_OPEN) {
+ // Intentionaly no-op for now.
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/audio/win/wavein_input_win.h b/chromium/media/audio/win/wavein_input_win.h
new file mode 100644
index 00000000000..4b830e34805
--- /dev/null
+++ b/chromium/media/audio/win/wavein_input_win.h
@@ -0,0 +1,131 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_WIN_WAVEIN_INPUT_WIN_H_
+#define MEDIA_AUDIO_WIN_WAVEIN_INPUT_WIN_H_
+
+#include <string>
+
+#include <windows.h>
+#include <mmsystem.h>
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/thread_checker.h"
+#include "base/win/scoped_handle.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+
+namespace media {
+
+class AudioManagerWin;
+
+class PCMWaveInAudioInputStream : public AudioInputStream {
+ public:
+ // The ctor takes all the usual parameters, plus |manager| which is the
+ // the audio manager who is creating this object and |device_id| which
+ // is provided by the operating system.
+ PCMWaveInAudioInputStream(AudioManagerWin* manager,
+ const AudioParameters& params,
+ int num_buffers,
+ const std::string& device_id);
+ virtual ~PCMWaveInAudioInputStream();
+
+ // Implementation of AudioInputStream.
+ virtual bool Open() OVERRIDE;
+ virtual void Start(AudioInputCallback* callback) OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void Close() OVERRIDE;
+ // TODO(henrika): Add volume support using the Audio Mixer API.
+ virtual double GetMaxVolume() OVERRIDE;
+ virtual void SetVolume(double volume) OVERRIDE;
+ virtual double GetVolume() OVERRIDE;
+ virtual void SetAutomaticGainControl(bool enabled) OVERRIDE;
+ virtual bool GetAutomaticGainControl() OVERRIDE;
+
+ private:
+ enum State {
+ kStateEmpty, // Initial state.
+ kStateReady, // Device obtained and ready to record.
+ kStateRecording, // Recording audio.
+ kStateStopping, // Trying to stop, waiting for callback to finish.
+ kStateStopped, // Stopped. Device was reset.
+ kStateClosed // Device has been released.
+ };
+
+ // Allow unit tests to query the device ID.
+ friend class AudioInputDeviceTest;
+
+ // Windows calls us back with the recorded audio data here. See msdn
+ // documentation for 'waveInProc' for details about the parameters.
+ static void CALLBACK WaveCallback(HWAVEIN hwi, UINT msg, DWORD_PTR instance,
+ DWORD_PTR param1, DWORD_PTR param2);
+
+ // If windows reports an error this function handles it and passes it to
+ // the attached AudioInputCallback::OnError().
+ void HandleError(MMRESULT error);
+
+ // Allocates and prepares the memory that will be used for recording.
+ void SetupBuffers();
+
+ // Deallocates the memory allocated in SetupBuffers.
+ void FreeBuffers();
+
+ // Sends a buffer to the audio driver for recording.
+ void QueueNextPacket(WAVEHDR* buffer);
+
+ // Converts the stored device id string into an unsigned integer which
+ // can be used by waveInOpen() to open the specified capture device.
+ bool GetDeviceId(UINT* device_index);
+
+ base::ThreadChecker thread_checker_;
+
+ // Reader beware. Visual C has stronger guarantees on volatile vars than
+ // most people expect. In fact, it has release semantics on write and
+ // acquire semantics on reads. See the msdn documentation.
+ volatile State state_;
+
+ // The audio manager that created this input stream. We notify it when
+ // we close so it can release its own resources.
+ AudioManagerWin* manager_;
+
+ // We use the callback mostly to periodically give the recorded audio data.
+ AudioInputCallback* callback_;
+
+ // The number of buffers of size |buffer_size_| each to use.
+ const int num_buffers_;
+
+ // The size in bytes of each audio buffer.
+ uint32 buffer_size_;
+
+ // Channels, 1 or 2.
+ const int channels_;
+
+ // Contains the unique name of the selected endpoint device.
+ // Note that AudioManagerBase::kDefaultDeviceId represents the default
+ // device role and is not a valid ID as such.
+ std::string device_id_;
+
+ // Windows native structure to encode the format parameters.
+ WAVEFORMATEX format_;
+
+ // Handle to the instance of the wave device.
+ HWAVEIN wavein_;
+
+ // Pointer to the first allocated audio buffer. This object owns it.
+ WAVEHDR* buffer_;
+
+ // An event that is signaled when the callback thread is ready to stop.
+ base::win::ScopedHandle stopped_event_;
+
+ // Lock used to avoid conflicts when Stop() is called during a callback.
+ base::Lock lock_;
+
+ DISALLOW_COPY_AND_ASSIGN(PCMWaveInAudioInputStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_WIN_WAVEIN_INPUT_WIN_H_
diff --git a/chromium/media/audio/win/waveout_output_win.cc b/chromium/media/audio/win/waveout_output_win.cc
new file mode 100644
index 00000000000..47d4fa65053
--- /dev/null
+++ b/chromium/media/audio/win/waveout_output_win.cc
@@ -0,0 +1,410 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/win/waveout_output_win.h"
+
+#include <windows.h>
+#include <mmsystem.h>
+#pragma comment(lib, "winmm.lib")
+
+#include "base/atomicops.h"
+#include "base/basictypes.h"
+#include "base/debug/trace_event.h"
+#include "base/logging.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/win/audio_manager_win.h"
+
+namespace media {
+
+// Some general thoughts about the waveOut API which is badly documented :
+// - We use CALLBACK_EVENT mode in which XP signals events such as buffer
+// releases.
+// - We use RegisterWaitForSingleObject() so one of threads in thread pool
+// automatically calls our callback that feeds more data to Windows.
+// - Windows does not provide a way to query if the device is playing or paused
+// thus it forces you to maintain state, which naturally is not exactly
+// synchronized to the actual device state.
+
+// Sixty four MB is the maximum buffer size per AudioOutputStream.
+static const uint32 kMaxOpenBufferSize = 1024 * 1024 * 64;
+
+// See Also
+// http://www.thx.com/consumer/home-entertainment/home-theater/surround-sound-speaker-set-up/
+// http://en.wikipedia.org/wiki/Surround_sound
+
+static const int kMaxChannelsToMask = 8;
+static const unsigned int kChannelsToMask[kMaxChannelsToMask + 1] = {
+ 0,
+ // 1 = Mono
+ SPEAKER_FRONT_CENTER,
+ // 2 = Stereo
+ SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT,
+ // 3 = Stereo + Center
+ SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER,
+ // 4 = Quad
+ SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT |
+ SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT,
+ // 5 = 5.0
+ SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER |
+ SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT,
+ // 6 = 5.1
+ SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT |
+ SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY |
+ SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT,
+ // 7 = 6.1
+ SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT |
+ SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY |
+ SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT |
+ SPEAKER_BACK_CENTER,
+ // 8 = 7.1
+ SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT |
+ SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY |
+ SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT |
+ SPEAKER_SIDE_LEFT | SPEAKER_SIDE_RIGHT
+ // TODO(fbarchard): Add additional masks for 7.2 and beyond.
+};
+
+inline size_t PCMWaveOutAudioOutputStream::BufferSize() const {
+ // Round size of buffer up to the nearest 16 bytes.
+ return (sizeof(WAVEHDR) + buffer_size_ + 15u) & static_cast<size_t>(~15);
+}
+
+inline WAVEHDR* PCMWaveOutAudioOutputStream::GetBuffer(int n) const {
+ DCHECK_GE(n, 0);
+ DCHECK_LT(n, num_buffers_);
+ return reinterpret_cast<WAVEHDR*>(&buffers_[n * BufferSize()]);
+}
+
+PCMWaveOutAudioOutputStream::PCMWaveOutAudioOutputStream(
+ AudioManagerWin* manager, const AudioParameters& params, int num_buffers,
+ UINT device_id)
+ : state_(PCMA_BRAND_NEW),
+ manager_(manager),
+ device_id_(device_id),
+ waveout_(NULL),
+ callback_(NULL),
+ num_buffers_(num_buffers),
+ buffer_size_(params.GetBytesPerBuffer()),
+ volume_(1),
+ channels_(params.channels()),
+ pending_bytes_(0),
+ waiting_handle_(NULL),
+ audio_bus_(AudioBus::Create(params)) {
+ format_.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
+ format_.Format.nChannels = params.channels();
+ format_.Format.nSamplesPerSec = params.sample_rate();
+ format_.Format.wBitsPerSample = params.bits_per_sample();
+ format_.Format.cbSize = sizeof(format_) - sizeof(WAVEFORMATEX);
+ // The next are computed from above.
+ format_.Format.nBlockAlign = (format_.Format.nChannels *
+ format_.Format.wBitsPerSample) / 8;
+ format_.Format.nAvgBytesPerSec = format_.Format.nBlockAlign *
+ format_.Format.nSamplesPerSec;
+ if (params.channels() > kMaxChannelsToMask) {
+ format_.dwChannelMask = kChannelsToMask[kMaxChannelsToMask];
+ } else {
+ format_.dwChannelMask = kChannelsToMask[params.channels()];
+ }
+ format_.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
+ format_.Samples.wValidBitsPerSample = params.bits_per_sample();
+}
+
+PCMWaveOutAudioOutputStream::~PCMWaveOutAudioOutputStream() {
+ DCHECK(NULL == waveout_);
+}
+
+bool PCMWaveOutAudioOutputStream::Open() {
+ if (state_ != PCMA_BRAND_NEW)
+ return false;
+ if (BufferSize() * num_buffers_ > kMaxOpenBufferSize)
+ return false;
+ if (num_buffers_ < 2 || num_buffers_ > 5)
+ return false;
+
+ // Create buffer event.
+ buffer_event_.Set(::CreateEvent(NULL, // Security attributes.
+ FALSE, // It will auto-reset.
+ FALSE, // Initial state.
+ NULL)); // No name.
+ if (!buffer_event_.Get())
+ return false;
+
+ // Open the device.
+ // We'll be getting buffer_event_ events when it's time to refill the buffer.
+ MMRESULT result = ::waveOutOpen(
+ &waveout_,
+ device_id_,
+ reinterpret_cast<LPCWAVEFORMATEX>(&format_),
+ reinterpret_cast<DWORD_PTR>(buffer_event_.Get()),
+ NULL,
+ CALLBACK_EVENT);
+ if (result != MMSYSERR_NOERROR)
+ return false;
+
+ SetupBuffers();
+ state_ = PCMA_READY;
+ return true;
+}
+
+void PCMWaveOutAudioOutputStream::SetupBuffers() {
+ buffers_.reset(new char[BufferSize() * num_buffers_]);
+ for (int ix = 0; ix != num_buffers_; ++ix) {
+ WAVEHDR* buffer = GetBuffer(ix);
+ buffer->lpData = reinterpret_cast<char*>(buffer) + sizeof(WAVEHDR);
+ buffer->dwBufferLength = buffer_size_;
+ buffer->dwBytesRecorded = 0;
+ buffer->dwFlags = WHDR_DONE;
+ buffer->dwLoops = 0;
+ // Tell windows sound drivers about our buffers. Not documented what
+ // this does but we can guess that causes the OS to keep a reference to
+ // the memory pages so the driver can use them without worries.
+ ::waveOutPrepareHeader(waveout_, buffer, sizeof(WAVEHDR));
+ }
+}
+
+void PCMWaveOutAudioOutputStream::FreeBuffers() {
+ for (int ix = 0; ix != num_buffers_; ++ix) {
+ ::waveOutUnprepareHeader(waveout_, GetBuffer(ix), sizeof(WAVEHDR));
+ }
+ buffers_.reset();
+}
+
+// Initially we ask the source to fill up all audio buffers. If we don't do
+// this then we would always get the driver callback when it is about to run
+// samples and that would leave too little time to react.
+void PCMWaveOutAudioOutputStream::Start(AudioSourceCallback* callback) {
+ if (state_ != PCMA_READY)
+ return;
+ callback_ = callback;
+
+ // Reset buffer event, it can be left in the arbitrary state if we
+ // previously stopped the stream. Can happen because we are stopping
+ // callbacks before stopping playback itself.
+ if (!::ResetEvent(buffer_event_.Get())) {
+ HandleError(MMSYSERR_ERROR);
+ return;
+ }
+
+ // Start watching for buffer events.
+ if (!::RegisterWaitForSingleObject(&waiting_handle_,
+ buffer_event_.Get(),
+ &BufferCallback,
+ this,
+ INFINITE,
+ WT_EXECUTEDEFAULT)) {
+ HandleError(MMSYSERR_ERROR);
+ waiting_handle_ = NULL;
+ return;
+ }
+
+ state_ = PCMA_PLAYING;
+
+ // Queue the buffers.
+ pending_bytes_ = 0;
+ for (int ix = 0; ix != num_buffers_; ++ix) {
+ WAVEHDR* buffer = GetBuffer(ix);
+ QueueNextPacket(buffer); // Read more data.
+ pending_bytes_ += buffer->dwBufferLength;
+ }
+
+ // From now on |pending_bytes_| would be accessed by callback thread.
+ // Most likely waveOutPause() or waveOutRestart() has its own memory barrier,
+ // but issuing our own is safer.
+ base::subtle::MemoryBarrier();
+
+ MMRESULT result = ::waveOutPause(waveout_);
+ if (result != MMSYSERR_NOERROR) {
+ HandleError(result);
+ return;
+ }
+
+ // Send the buffers to the audio driver. Note that the device is paused
+ // so we avoid entering the callback method while still here.
+ for (int ix = 0; ix != num_buffers_; ++ix) {
+ result = ::waveOutWrite(waveout_, GetBuffer(ix), sizeof(WAVEHDR));
+ if (result != MMSYSERR_NOERROR) {
+ HandleError(result);
+ break;
+ }
+ }
+ result = ::waveOutRestart(waveout_);
+ if (result != MMSYSERR_NOERROR) {
+ HandleError(result);
+ return;
+ }
+}
+
+// Stopping is tricky if we want it be fast.
+// For now just do it synchronously and avoid all the complexities.
+// TODO(enal): if we want faster Stop() we can create singleton that keeps track
+// of all currently playing streams. Then you don't have to wait
+// till all callbacks are completed. Of course access to singleton
+// should be under its own lock, and checking the liveness and
+// acquiring the lock on stream should be done atomically.
+void PCMWaveOutAudioOutputStream::Stop() {
+ if (state_ != PCMA_PLAYING)
+ return;
+ state_ = PCMA_STOPPING;
+ base::subtle::MemoryBarrier();
+
+ // Stop watching for buffer event, wait till all the callbacks are complete.
+ // Should be done before ::waveOutReset() call to avoid race condition when
+ // callback that is currently active and already checked that stream is still
+ // being played calls ::waveOutWrite() after ::waveOutReset() returns, later
+ // causing ::waveOutClose() to fail with WAVERR_STILLPLAYING.
+ // TODO(enal): that delays actual stopping of playback. Alternative can be
+ // to call ::waveOutReset() twice, once before
+ // ::UnregisterWaitEx() and once after.
+ if (waiting_handle_) {
+ if (!::UnregisterWaitEx(waiting_handle_, INVALID_HANDLE_VALUE)) {
+ state_ = PCMA_PLAYING;
+ HandleError(MMSYSERR_ERROR);
+ return;
+ }
+ waiting_handle_ = NULL;
+ }
+
+ // Stop playback.
+ MMRESULT res = ::waveOutReset(waveout_);
+ if (res != MMSYSERR_NOERROR) {
+ state_ = PCMA_PLAYING;
+ HandleError(res);
+ return;
+ }
+
+ // Wait for lock to ensure all outstanding callbacks have completed.
+ base::AutoLock auto_lock(lock_);
+
+ // waveOutReset() leaves buffers in the unpredictable state, causing
+ // problems if we want to close, release, or reuse them. Fix the states.
+ for (int ix = 0; ix != num_buffers_; ++ix) {
+ GetBuffer(ix)->dwFlags = WHDR_PREPARED;
+ }
+
+ // Don't use callback after Stop().
+ callback_ = NULL;
+
+ state_ = PCMA_READY;
+}
+
+// We can Close in any state except that trying to close a stream that is
+// playing Windows generates an error. We cannot propagate it to the source,
+// as callback_ is set to NULL. Just print it and hope somebody somehow
+// will find it...
+void PCMWaveOutAudioOutputStream::Close() {
+ // Force Stop() to ensure it's safe to release buffers and free the stream.
+ Stop();
+
+ if (waveout_) {
+ FreeBuffers();
+
+ // waveOutClose() generates a WIM_CLOSE callback. In case Start() was never
+ // called, force a reset to ensure close succeeds.
+ MMRESULT res = ::waveOutReset(waveout_);
+ DCHECK_EQ(res, static_cast<MMRESULT>(MMSYSERR_NOERROR));
+ res = ::waveOutClose(waveout_);
+ DCHECK_EQ(res, static_cast<MMRESULT>(MMSYSERR_NOERROR));
+ state_ = PCMA_CLOSED;
+ waveout_ = NULL;
+ }
+
+ // Tell the audio manager that we have been released. This can result in
+ // the manager destroying us in-place so this needs to be the last thing
+ // we do on this function.
+ manager_->ReleaseOutputStream(this);
+}
+
+void PCMWaveOutAudioOutputStream::SetVolume(double volume) {
+ if (!waveout_)
+ return;
+ volume_ = static_cast<float>(volume);
+}
+
+void PCMWaveOutAudioOutputStream::GetVolume(double* volume) {
+ if (!waveout_)
+ return;
+ *volume = volume_;
+}
+
+void PCMWaveOutAudioOutputStream::HandleError(MMRESULT error) {
+ DLOG(WARNING) << "PCMWaveOutAudio error " << error;
+ if (callback_)
+ callback_->OnError(this);
+}
+
+void PCMWaveOutAudioOutputStream::QueueNextPacket(WAVEHDR *buffer) {
+ DCHECK_EQ(channels_, format_.Format.nChannels);
+ // Call the source which will fill our buffer with pleasant sounds and
+ // return to us how many bytes were used.
+ // TODO(fbarchard): Handle used 0 by queueing more.
+
+ // TODO(sergeyu): Specify correct hardware delay for AudioBuffersState.
+ int frames_filled = callback_->OnMoreData(
+ audio_bus_.get(), AudioBuffersState(pending_bytes_, 0));
+ uint32 used = frames_filled * audio_bus_->channels() *
+ format_.Format.wBitsPerSample / 8;
+
+ if (used <= buffer_size_) {
+ // Note: If this ever changes to output raw float the data must be clipped
+ // and sanitized since it may come from an untrusted source such as NaCl.
+ audio_bus_->Scale(volume_);
+ audio_bus_->ToInterleaved(
+ frames_filled, format_.Format.wBitsPerSample / 8, buffer->lpData);
+
+ buffer->dwBufferLength = used * format_.Format.nChannels / channels_;
+ } else {
+ HandleError(0);
+ return;
+ }
+ buffer->dwFlags = WHDR_PREPARED;
+}
+
+// One of the threads in our thread pool asynchronously calls this function when
+// buffer_event_ is signalled. Search through all the buffers looking for freed
+// ones, fills them with data, and "feed" the Windows.
+// Note: by searching through all the buffers we guarantee that we fill all the
+// buffers, even when "event loss" happens, i.e. if Windows signals event
+// when it did not flip into unsignaled state from the previous signal.
+void NTAPI PCMWaveOutAudioOutputStream::BufferCallback(PVOID lpParameter,
+ BOOLEAN timer_fired) {
+ TRACE_EVENT0("audio", "PCMWaveOutAudioOutputStream::BufferCallback");
+
+ DCHECK(!timer_fired);
+ PCMWaveOutAudioOutputStream* stream =
+ reinterpret_cast<PCMWaveOutAudioOutputStream*>(lpParameter);
+
+ // Lock the stream so callbacks do not interfere with each other.
+ // Several callbacks can be called simultaneously by different threads in the
+ // thread pool if some of the callbacks are slow, or system is very busy and
+ // scheduled callbacks are not called on time.
+ base::AutoLock auto_lock(stream->lock_);
+ if (stream->state_ != PCMA_PLAYING)
+ return;
+
+ for (int ix = 0; ix != stream->num_buffers_; ++ix) {
+ WAVEHDR* buffer = stream->GetBuffer(ix);
+ if (buffer->dwFlags & WHDR_DONE) {
+ // Before we queue the next packet, we need to adjust the number of
+ // pending bytes since the last write to hardware.
+ stream->pending_bytes_ -= buffer->dwBufferLength;
+ stream->QueueNextPacket(buffer);
+
+ // QueueNextPacket() can take a long time, especially if several of them
+ // were called back-to-back. Check if we are stopping now.
+ if (stream->state_ != PCMA_PLAYING)
+ return;
+
+ // Time to send the buffer to the audio driver. Since we are reusing
+ // the same buffers we can get away without calling waveOutPrepareHeader.
+ MMRESULT result = ::waveOutWrite(stream->waveout_,
+ buffer,
+ sizeof(WAVEHDR));
+ if (result != MMSYSERR_NOERROR)
+ stream->HandleError(result);
+ stream->pending_bytes_ += buffer->dwBufferLength;
+ }
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/audio/win/waveout_output_win.h b/chromium/media/audio/win/waveout_output_win.h
new file mode 100644
index 00000000000..5c7009d0971
--- /dev/null
+++ b/chromium/media/audio/win/waveout_output_win.h
@@ -0,0 +1,141 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_WIN_WAVEOUT_OUTPUT_WIN_H_
+#define MEDIA_AUDIO_WIN_WAVEOUT_OUTPUT_WIN_H_
+
+#include <windows.h>
+#include <mmsystem.h>
+#include <mmreg.h>
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/synchronization/lock.h"
+#include "base/win/scoped_handle.h"
+#include "media/audio/audio_io.h"
+#include "media/audio/audio_parameters.h"
+
+namespace media {
+
+class AudioManagerWin;
+
+// Implements PCM audio output support for Windows using the WaveXXX API.
+// While not as nice as the DirectSound-based API, it should work in all target
+// operating systems regardless or DirectX version installed. It is known that
+// in some machines WaveXXX based audio is better while in others DirectSound
+// is better.
+//
+// Important: the OnXXXX functions in AudioSourceCallback are called by more
+// than one thread so it is important to have some form of synchronization if
+// you are keeping state in it.
+class PCMWaveOutAudioOutputStream : public AudioOutputStream {
+ public:
+ // The ctor takes all the usual parameters, plus |manager| which is the the
+ // audio manager who is creating this object and |device_id| which is provided
+ // by the operating system.
+ PCMWaveOutAudioOutputStream(AudioManagerWin* manager,
+ const AudioParameters& params,
+ int num_buffers,
+ UINT device_id);
+ virtual ~PCMWaveOutAudioOutputStream();
+
+ // Implementation of AudioOutputStream.
+ virtual bool Open();
+ virtual void Close();
+ virtual void Start(AudioSourceCallback* callback);
+ virtual void Stop();
+ virtual void SetVolume(double volume);
+ virtual void GetVolume(double* volume);
+
+ // Sends a buffer to the audio driver for playback.
+ void QueueNextPacket(WAVEHDR* buffer);
+
+ private:
+ enum State {
+ PCMA_BRAND_NEW, // Initial state.
+ PCMA_READY, // Device obtained and ready to play.
+ PCMA_PLAYING, // Playing audio.
+ PCMA_STOPPING, // Audio is stopping, do not "feed" data to Windows.
+ PCMA_CLOSED // Device has been released.
+ };
+
+ // Returns pointer to the n-th buffer.
+ inline WAVEHDR* GetBuffer(int n) const;
+
+ // Size of one buffer in bytes, rounded up if necessary.
+ inline size_t BufferSize() const;
+
+ // Windows calls us back asking for more data when buffer_event_ signalled.
+ // See MSDN for help on RegisterWaitForSingleObject() and waveOutOpen().
+ static void NTAPI BufferCallback(PVOID lpParameter, BOOLEAN timer_fired);
+
+ // If windows reports an error this function handles it and passes it to
+ // the attached AudioSourceCallback::OnError().
+ void HandleError(MMRESULT error);
+
+ // Allocates and prepares the memory that will be used for playback.
+ void SetupBuffers();
+
+ // Deallocates the memory allocated in SetupBuffers.
+ void FreeBuffers();
+
+ // Reader beware. Visual C has stronger guarantees on volatile vars than
+ // most people expect. In fact, it has release semantics on write and
+ // acquire semantics on reads. See the msdn documentation.
+ volatile State state_;
+
+ // The audio manager that created this output stream. We notify it when
+ // we close so it can release its own resources.
+ AudioManagerWin* manager_;
+
+ // We use the callback mostly to periodically request more audio data.
+ AudioSourceCallback* callback_;
+
+ // The number of buffers of size |buffer_size_| each to use.
+ const int num_buffers_;
+
+ // The size in bytes of each audio buffer, we usually have two of these.
+ uint32 buffer_size_;
+
+ // Volume level from 0 to 1.
+ float volume_;
+
+ // Channels from 0 to 8.
+ const int channels_;
+
+ // Number of bytes yet to be played in the hardware buffer.
+ uint32 pending_bytes_;
+
+ // The id assigned by the operating system to the selected wave output
+ // hardware device. Usually this is just -1 which means 'default device'.
+ UINT device_id_;
+
+ // Windows native structure to encode the format parameters.
+ WAVEFORMATPCMEX format_;
+
+ // Handle to the instance of the wave device.
+ HWAVEOUT waveout_;
+
+ // Handle to the buffer event.
+ base::win::ScopedHandle buffer_event_;
+
+ // Handle returned by RegisterWaitForSingleObject().
+ HANDLE waiting_handle_;
+
+ // Pointer to the allocated audio buffers, we allocate all buffers in one big
+ // chunk. This object owns them.
+ scoped_ptr<char[]> buffers_;
+
+ // Lock used to avoid the conflict when callbacks are called simultaneously.
+ base::Lock lock_;
+
+ // Container for retrieving data from AudioSourceCallback::OnMoreData().
+ scoped_ptr<AudioBus> audio_bus_;
+
+ DISALLOW_COPY_AND_ASSIGN(PCMWaveOutAudioOutputStream);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_WIN_WAVEOUT_OUTPUT_WIN_H_