summaryrefslogtreecommitdiff
path: root/chromium/media/video
diff options
context:
space:
mode:
authorAndras Becsi <andras.becsi@digia.com>2014-03-18 13:16:26 +0100
committerFrederik Gladhorn <frederik.gladhorn@digia.com>2014-03-20 15:55:39 +0100
commit3f0f86b0caed75241fa71c95a5d73bc0164348c5 (patch)
tree92b9fb00f2e9e90b0be2262093876d4f43b6cd13 /chromium/media/video
parente90d7c4b152c56919d963987e2503f9909a666d2 (diff)
downloadqtwebengine-chromium-3f0f86b0caed75241fa71c95a5d73bc0164348c5.tar.gz
Update to new stable branch 1750
This also includes an updated ninja and chromium dependencies needed on Windows. Change-Id: Icd597d80ed3fa4425933c9f1334c3c2e31291c42 Reviewed-by: Zoltan Arvai <zarvai@inf.u-szeged.hu> Reviewed-by: Zeno Albisser <zeno.albisser@digia.com>
Diffstat (limited to 'chromium/media/video')
-rw-r--r--chromium/media/video/capture/android/video_capture_device_android.cc134
-rw-r--r--chromium/media/video/capture/android/video_capture_device_android.h24
-rw-r--r--chromium/media/video/capture/fake_video_capture_device.cc252
-rw-r--r--chromium/media/video/capture/fake_video_capture_device.h66
-rw-r--r--chromium/media/video/capture/file_video_capture_device.cc300
-rw-r--r--chromium/media/video/capture/file_video_capture_device.h79
-rw-r--r--chromium/media/video/capture/linux/video_capture_device_linux.cc206
-rw-r--r--chromium/media/video/capture/linux/video_capture_device_linux.h28
-rw-r--r--chromium/media/video/capture/mac/avfoundation_glue.h158
-rw-r--r--chromium/media/video/capture/mac/avfoundation_glue.mm161
-rw-r--r--chromium/media/video/capture/mac/coremedia_glue.h46
-rw-r--r--chromium/media/video/capture/mac/coremedia_glue.mm70
-rw-r--r--chromium/media/video/capture/mac/platform_video_capturing_mac.h50
-rw-r--r--chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.h113
-rw-r--r--chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.mm246
-rw-r--r--chromium/media/video/capture/mac/video_capture_device_mac.h28
-rw-r--r--chromium/media/video/capture/mac/video_capture_device_mac.mm209
-rw-r--r--chromium/media/video/capture/mac/video_capture_device_qtkit_mac.h22
-rw-r--r--chromium/media/video/capture/mac/video_capture_device_qtkit_mac.mm65
-rw-r--r--chromium/media/video/capture/video_capture.h16
-rw-r--r--chromium/media/video/capture/video_capture_device.cc28
-rw-r--r--chromium/media/video/capture/video_capture_device.h179
-rw-r--r--chromium/media/video/capture/video_capture_device_unittest.cc354
-rw-r--r--chromium/media/video/capture/video_capture_proxy.cc22
-rw-r--r--chromium/media/video/capture/video_capture_proxy.h10
-rw-r--r--chromium/media/video/capture/video_capture_types.cc56
-rw-r--r--chromium/media/video/capture/video_capture_types.h56
-rw-r--r--chromium/media/video/capture/win/capability_list_win.cc10
-rw-r--r--chromium/media/video/capture/win/capability_list_win.h9
-rw-r--r--chromium/media/video/capture/win/sink_filter_win.cc9
-rw-r--r--chromium/media/video/capture/win/sink_filter_win.h7
-rw-r--r--chromium/media/video/capture/win/sink_input_pin_win.cc62
-rw-r--r--chromium/media/video/capture/win/sink_input_pin_win.h8
-rw-r--r--chromium/media/video/capture/win/video_capture_device_mf_win.cc105
-rw-r--r--chromium/media/video/capture/win/video_capture_device_mf_win.h19
-rw-r--r--chromium/media/video/capture/win/video_capture_device_win.cc158
-rw-r--r--chromium/media/video/capture/win/video_capture_device_win.h16
-rw-r--r--chromium/media/video/video_decode_accelerator.h6
38 files changed, 2196 insertions, 1191 deletions
diff --git a/chromium/media/video/capture/android/video_capture_device_android.cc b/chromium/media/video/capture/android/video_capture_device_android.cc
index 141a5d0fae2..adfa9a3455c 100644
--- a/chromium/media/video/capture/android/video_capture_device_android.cc
+++ b/chromium/media/video/capture/android/video_capture_device_android.cc
@@ -53,6 +53,12 @@ void VideoCaptureDevice::GetDeviceNames(Names* device_names) {
}
}
+// static
+void VideoCaptureDevice::GetDeviceSupportedFormats(const Name& device,
+ VideoCaptureFormats* formats) {
+ NOTIMPLEMENTED();
+}
+
const std::string VideoCaptureDevice::Name::GetModel() const {
// Android cameras are not typically USB devices, and this method is currently
// only used for USB model identifiers, so this implementation just indicates
@@ -80,15 +86,10 @@ bool VideoCaptureDeviceAndroid::RegisterVideoCaptureDevice(JNIEnv* env) {
}
VideoCaptureDeviceAndroid::VideoCaptureDeviceAndroid(const Name& device_name)
- : state_(kIdle),
- got_first_frame_(false),
- observer_(NULL),
- device_name_(device_name),
- current_settings_() {
-}
+ : state_(kIdle), got_first_frame_(false), device_name_(device_name) {}
VideoCaptureDeviceAndroid::~VideoCaptureDeviceAndroid() {
- DeAllocate();
+ StopAndDeAllocate();
}
bool VideoCaptureDeviceAndroid::Init() {
@@ -100,78 +101,60 @@ bool VideoCaptureDeviceAndroid::Init() {
j_capture_.Reset(Java_VideoCapture_createVideoCapture(
env, base::android::GetApplicationContext(), id,
- reinterpret_cast<jint>(this)));
+ reinterpret_cast<intptr_t>(this)));
return true;
}
-const VideoCaptureDevice::Name& VideoCaptureDeviceAndroid::device_name() {
- return device_name_;
-}
-
-void VideoCaptureDeviceAndroid::Allocate(
- const VideoCaptureCapability& capture_format,
- EventHandler* observer) {
+void VideoCaptureDeviceAndroid::AllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<Client> client) {
+ DVLOG(1) << "VideoCaptureDeviceAndroid::AllocateAndStart";
{
base::AutoLock lock(lock_);
if (state_ != kIdle)
return;
- observer_ = observer;
- state_ = kAllocated;
+ client_ = client.Pass();
+ got_first_frame_ = false;
}
JNIEnv* env = AttachCurrentThread();
- jboolean ret = Java_VideoCapture_allocate(env,
- j_capture_.obj(),
- capture_format.width,
- capture_format.height,
- capture_format.frame_rate);
+ jboolean ret =
+ Java_VideoCapture_allocate(env,
+ j_capture_.obj(),
+ params.requested_format.frame_size.width(),
+ params.requested_format.frame_size.height(),
+ params.requested_format.frame_rate);
if (!ret) {
SetErrorState("failed to allocate");
return;
}
// Store current width and height.
- current_settings_.width =
- Java_VideoCapture_queryWidth(env, j_capture_.obj());
- current_settings_.height =
- Java_VideoCapture_queryHeight(env, j_capture_.obj());
- current_settings_.frame_rate =
+ capture_format_.frame_size.SetSize(
+ Java_VideoCapture_queryWidth(env, j_capture_.obj()),
+ Java_VideoCapture_queryHeight(env, j_capture_.obj()));
+ capture_format_.frame_rate =
Java_VideoCapture_queryFrameRate(env, j_capture_.obj());
- current_settings_.color = GetColorspace();
- DCHECK_NE(current_settings_.color, media::PIXEL_FORMAT_UNKNOWN);
- CHECK(current_settings_.width > 0 && !(current_settings_.width % 2));
- CHECK(current_settings_.height > 0 && !(current_settings_.height % 2));
+ capture_format_.pixel_format = GetColorspace();
+ DCHECK_NE(capture_format_.pixel_format, media::PIXEL_FORMAT_UNKNOWN);
+ CHECK(capture_format_.frame_size.GetArea() > 0);
+ CHECK(!(capture_format_.frame_size.width() % 2));
+ CHECK(!(capture_format_.frame_size.height() % 2));
- if (capture_format.frame_rate > 0) {
+ if (capture_format_.frame_rate > 0) {
frame_interval_ = base::TimeDelta::FromMicroseconds(
- (base::Time::kMicrosecondsPerSecond + capture_format.frame_rate - 1) /
- capture_format.frame_rate);
+ (base::Time::kMicrosecondsPerSecond + capture_format_.frame_rate - 1) /
+ capture_format_.frame_rate);
}
- DVLOG(1) << "VideoCaptureDeviceAndroid::Allocate: queried width="
- << current_settings_.width
- << ", height="
- << current_settings_.height
- << ", frame_rate="
- << current_settings_.frame_rate;
- // Report the frame size to the observer.
- observer_->OnFrameInfo(current_settings_);
-}
+ DVLOG(1) << "VideoCaptureDeviceAndroid::Allocate: queried frame_size="
+ << capture_format_.frame_size.ToString()
+ << ", frame_rate=" << capture_format_.frame_rate;
-void VideoCaptureDeviceAndroid::Start() {
- DVLOG(1) << "VideoCaptureDeviceAndroid::Start";
- {
- base::AutoLock lock(lock_);
- got_first_frame_ = false;
- DCHECK_EQ(state_, kAllocated);
- }
-
- JNIEnv* env = AttachCurrentThread();
-
- jint ret = Java_VideoCapture_startCapture(env, j_capture_.obj());
- if (ret < 0) {
+ jint result = Java_VideoCapture_startCapture(env, j_capture_.obj());
+ if (result < 0) {
SetErrorState("failed to start capture");
return;
}
@@ -182,14 +165,12 @@ void VideoCaptureDeviceAndroid::Start() {
}
}
-void VideoCaptureDeviceAndroid::Stop() {
- DVLOG(1) << "VideoCaptureDeviceAndroid::Stop";
+void VideoCaptureDeviceAndroid::StopAndDeAllocate() {
+ DVLOG(1) << "VideoCaptureDeviceAndroid::StopAndDeAllocate";
{
base::AutoLock lock(lock_);
if (state_ != kCapturing && state_ != kError)
return;
- if (state_ == kCapturing)
- state_ = kAllocated;
}
JNIEnv* env = AttachCurrentThread();
@@ -199,28 +180,13 @@ void VideoCaptureDeviceAndroid::Stop() {
SetErrorState("failed to stop capture");
return;
}
-}
-void VideoCaptureDeviceAndroid::DeAllocate() {
- DVLOG(1) << "VideoCaptureDeviceAndroid::DeAllocate";
{
base::AutoLock lock(lock_);
- if (state_ == kIdle)
- return;
-
- if (state_ == kCapturing) {
- base::AutoUnlock unlock(lock_);
- Stop();
- }
-
- if (state_ == kAllocated)
- state_ = kIdle;
-
- observer_ = NULL;
+ state_ = kIdle;
+ client_.reset();
}
- JNIEnv* env = AttachCurrentThread();
-
Java_VideoCapture_deallocate(env, j_capture_.obj());
}
@@ -229,13 +195,11 @@ void VideoCaptureDeviceAndroid::OnFrameAvailable(
jobject obj,
jbyteArray data,
jint length,
- jint rotation,
- jboolean flip_vert,
- jboolean flip_horiz) {
+ jint rotation) {
DVLOG(3) << "VideoCaptureDeviceAndroid::OnFrameAvailable: length =" << length;
base::AutoLock lock(lock_);
- if (state_ != kCapturing || !observer_)
+ if (state_ != kCapturing || !client_.get())
return;
jbyte* buffer = env->GetByteArrayElements(data, NULL);
@@ -256,9 +220,11 @@ void VideoCaptureDeviceAndroid::OnFrameAvailable(
if (expected_next_frame_time_ <= current_time) {
expected_next_frame_time_ += frame_interval_;
- observer_->OnIncomingCapturedFrame(
- reinterpret_cast<uint8*>(buffer), length, base::Time::Now(),
- rotation, flip_vert, flip_horiz);
+ client_->OnIncomingCapturedFrame(reinterpret_cast<uint8*>(buffer),
+ length,
+ base::Time::Now(),
+ rotation,
+ capture_format_);
}
env->ReleaseByteArrayElements(data, buffer, JNI_ABORT);
@@ -291,7 +257,7 @@ void VideoCaptureDeviceAndroid::SetErrorState(const std::string& reason) {
base::AutoLock lock(lock_);
state_ = kError;
}
- observer_->OnError();
+ client_->OnError();
}
} // namespace media
diff --git a/chromium/media/video/capture/android/video_capture_device_android.h b/chromium/media/video/capture/android/video_capture_device_android.h
index de6955d9e8f..635417af572 100644
--- a/chromium/media/video/capture/android/video_capture_device_android.h
+++ b/chromium/media/video/capture/android/video_capture_device_android.h
@@ -19,9 +19,9 @@ namespace media {
// VideoCaptureDevice on Android. The VideoCaptureDevice API's are called
// by VideoCaptureManager on its own thread, while OnFrameAvailable is called
-// on JAVA thread (i.e., UI thread). Both will access |state_| and |observer_|,
+// on JAVA thread (i.e., UI thread). Both will access |state_| and |client_|,
// but only VideoCaptureManager would change their value.
-class MEDIA_EXPORT VideoCaptureDeviceAndroid : public VideoCaptureDevice1 {
+class MEDIA_EXPORT VideoCaptureDeviceAndroid : public VideoCaptureDevice {
public:
virtual ~VideoCaptureDeviceAndroid();
@@ -29,12 +29,9 @@ class MEDIA_EXPORT VideoCaptureDeviceAndroid : public VideoCaptureDevice1 {
static bool RegisterVideoCaptureDevice(JNIEnv* env);
// VideoCaptureDevice implementation.
- virtual void Allocate(const VideoCaptureCapability& capture_format,
- EventHandler* observer) OVERRIDE;
- virtual void Start() OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void DeAllocate() OVERRIDE;
- virtual const Name& device_name() OVERRIDE;
+ virtual void AllocateAndStart(const VideoCaptureParams& params,
+ scoped_ptr<Client> client) OVERRIDE;
+ virtual void StopAndDeAllocate() OVERRIDE;
// Implement org.chromium.media.VideoCapture.nativeOnFrameAvailable.
void OnFrameAvailable(
@@ -42,14 +39,11 @@ class MEDIA_EXPORT VideoCaptureDeviceAndroid : public VideoCaptureDevice1 {
jobject obj,
jbyteArray data,
jint length,
- jint rotation,
- jboolean flip_vert,
- jboolean flip_horiz);
+ jint rotation);
private:
enum InternalState {
kIdle, // The device is opened but not in use.
- kAllocated, // All resouces have been allocated and camera can be started.
kCapturing, // Video is being captured.
kError // Hit error. User needs to recover by destroying the object.
};
@@ -66,17 +60,17 @@ class MEDIA_EXPORT VideoCaptureDeviceAndroid : public VideoCaptureDevice1 {
VideoPixelFormat GetColorspace();
void SetErrorState(const std::string& reason);
- // Prevent racing on accessing |state_| and |observer_| since both could be
+ // Prevent racing on accessing |state_| and |client_| since both could be
// accessed from different threads.
base::Lock lock_;
InternalState state_;
bool got_first_frame_;
base::TimeTicks expected_next_frame_time_;
base::TimeDelta frame_interval_;
- VideoCaptureDevice::EventHandler* observer_;
+ scoped_ptr<VideoCaptureDevice::Client> client_;
Name device_name_;
- VideoCaptureCapability current_settings_;
+ VideoCaptureFormat capture_format_;
// Java VideoCaptureAndroid instance.
base::android::ScopedJavaGlobalRef<jobject> j_capture_;
diff --git a/chromium/media/video/capture/fake_video_capture_device.cc b/chromium/media/video/capture/fake_video_capture_device.cc
index 8434bc3ebbe..a87514d4347 100644
--- a/chromium/media/video/capture/fake_video_capture_device.cc
+++ b/chromium/media/video/capture/fake_video_capture_device.cc
@@ -22,157 +22,161 @@ static const int kFakeCaptureCapabilityChangePeriod = 30;
enum { kNumberOfFakeDevices = 2 };
bool FakeVideoCaptureDevice::fail_next_create_ = false;
+base::subtle::Atomic32 FakeVideoCaptureDevice::number_of_devices_ =
+ kNumberOfFakeDevices;
+// static
+size_t FakeVideoCaptureDevice::NumberOfFakeDevices(void) {
+ return number_of_devices_;
+}
+
+// static
void FakeVideoCaptureDevice::GetDeviceNames(Names* const device_names) {
// Empty the name list.
device_names->erase(device_names->begin(), device_names->end());
- for (int n = 0; n < kNumberOfFakeDevices; n++) {
+ int number_of_devices = base::subtle::NoBarrier_Load(&number_of_devices_);
+ for (int32 n = 0; n < number_of_devices; n++) {
Name name(base::StringPrintf("fake_device_%d", n),
base::StringPrintf("/dev/video%d", n));
device_names->push_back(name);
}
}
+// static
+void FakeVideoCaptureDevice::GetDeviceSupportedFormats(
+ const Name& device,
+ VideoCaptureFormats* supported_formats) {
+
+ supported_formats->clear();
+ VideoCaptureFormat capture_format_640x480;
+ capture_format_640x480.pixel_format = media::PIXEL_FORMAT_I420;
+ capture_format_640x480.frame_size.SetSize(640, 480);
+ capture_format_640x480.frame_rate = 1000 / kFakeCaptureTimeoutMs;
+ supported_formats->push_back(capture_format_640x480);
+ VideoCaptureFormat capture_format_320x240;
+ capture_format_320x240.pixel_format = media::PIXEL_FORMAT_I420;
+ capture_format_320x240.frame_size.SetSize(320, 240);
+ capture_format_320x240.frame_rate = 1000 / kFakeCaptureTimeoutMs;
+ supported_formats->push_back(capture_format_320x240);
+}
+
+// static
VideoCaptureDevice* FakeVideoCaptureDevice::Create(const Name& device_name) {
if (fail_next_create_) {
fail_next_create_ = false;
return NULL;
}
- for (int n = 0; n < kNumberOfFakeDevices; ++n) {
+ int number_of_devices = base::subtle::NoBarrier_Load(&number_of_devices_);
+ for (int32 n = 0; n < number_of_devices; ++n) {
std::string possible_id = base::StringPrintf("/dev/video%d", n);
if (device_name.id().compare(possible_id) == 0) {
- return new FakeVideoCaptureDevice(device_name);
+ return new FakeVideoCaptureDevice();
}
}
return NULL;
}
+// static
void FakeVideoCaptureDevice::SetFailNextCreate() {
fail_next_create_ = true;
}
-FakeVideoCaptureDevice::FakeVideoCaptureDevice(const Name& device_name)
- : device_name_(device_name),
- observer_(NULL),
- state_(kIdle),
- capture_thread_("CaptureThread"),
- frame_count_(0),
- capabilities_roster_index_(0) {
+// static
+void FakeVideoCaptureDevice::SetNumberOfFakeDevices(size_t number_of_devices) {
+ base::subtle::NoBarrier_AtomicExchange(&number_of_devices_,
+ number_of_devices);
}
+FakeVideoCaptureDevice::FakeVideoCaptureDevice()
+ : capture_thread_("CaptureThread"),
+ frame_count_(0),
+ format_roster_index_(0) {}
+
FakeVideoCaptureDevice::~FakeVideoCaptureDevice() {
- // Check if the thread is running.
- // This means that the device have not been DeAllocated properly.
+ DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(!capture_thread_.IsRunning());
}
-void FakeVideoCaptureDevice::Allocate(
- const VideoCaptureCapability& capture_format,
- EventHandler* observer) {
- capture_format_.frame_size_type = capture_format.frame_size_type;
- if (capture_format.frame_size_type == VariableResolutionVideoCaptureDevice)
- PopulateCapabilitiesRoster();
-
- if (state_ != kIdle) {
- return; // Wrong state.
- }
-
- observer_ = observer;
- capture_format_.color = PIXEL_FORMAT_I420;
- capture_format_.expected_capture_delay = 0;
- capture_format_.interlaced = false;
- if (capture_format.width > 320) { // VGA
- capture_format_.width = 640;
- capture_format_.height = 480;
- capture_format_.frame_rate = 30;
- } else { // QVGA
- capture_format_.width = 320;
- capture_format_.height = 240;
- capture_format_.frame_rate = 30;
- }
+void FakeVideoCaptureDevice::AllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(!capture_thread_.IsRunning());
- const size_t fake_frame_size = VideoFrame::AllocationSize(
- VideoFrame::I420,
- gfx::Size(capture_format_.width, capture_format_.height));
- fake_frame_.reset(new uint8[fake_frame_size]);
+ capture_thread_.Start();
+ capture_thread_.message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&FakeVideoCaptureDevice::OnAllocateAndStart,
+ base::Unretained(this),
+ params,
+ base::Passed(&client)));
+}
- state_ = kAllocated;
- observer_->OnFrameInfo(capture_format_);
+void FakeVideoCaptureDevice::StopAndDeAllocate() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(capture_thread_.IsRunning());
+ capture_thread_.message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&FakeVideoCaptureDevice::OnStopAndDeAllocate,
+ base::Unretained(this)));
+ capture_thread_.Stop();
}
-void FakeVideoCaptureDevice::Reallocate() {
- DCHECK_EQ(state_, kCapturing);
- capture_format_ = capabilities_roster_.at(++capabilities_roster_index_ %
- capabilities_roster_.size());
- DCHECK_EQ(capture_format_.color, PIXEL_FORMAT_I420);
- DVLOG(3) << "Reallocating FakeVideoCaptureDevice, new capture resolution ("
- << capture_format_.width << "x" << capture_format_.height << ")";
-
- const size_t fake_frame_size = VideoFrame::AllocationSize(
- VideoFrame::I420,
- gfx::Size(capture_format_.width, capture_format_.height));
+void FakeVideoCaptureDevice::OnAllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) {
+ DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
+ client_ = client.Pass();
+ capture_format_.pixel_format = PIXEL_FORMAT_I420;
+ capture_format_.frame_rate = 30;
+ if (params.requested_format.frame_size.width() > 320)
+ capture_format_.frame_size.SetSize(640, 480);
+ else
+ capture_format_.frame_size.SetSize(320, 240);
+ if (params.allow_resolution_change)
+ PopulateFormatRoster();
+ const size_t fake_frame_size =
+ VideoFrame::AllocationSize(VideoFrame::I420, capture_format_.frame_size);
fake_frame_.reset(new uint8[fake_frame_size]);
- observer_->OnFrameInfoChanged(capture_format_);
-}
-
-void FakeVideoCaptureDevice::Start() {
- if (state_ != kAllocated) {
- return; // Wrong state.
- }
- state_ = kCapturing;
- capture_thread_.Start();
capture_thread_.message_loop()->PostTask(
FROM_HERE,
base::Bind(&FakeVideoCaptureDevice::OnCaptureTask,
base::Unretained(this)));
}
-void FakeVideoCaptureDevice::Stop() {
- if (state_ != kCapturing) {
- return; // Wrong state.
- }
- capture_thread_.Stop();
- state_ = kAllocated;
-}
-
-void FakeVideoCaptureDevice::DeAllocate() {
- if (state_ != kAllocated && state_ != kCapturing) {
- return; // Wrong state.
- }
- capture_thread_.Stop();
- state_ = kIdle;
-}
-
-const VideoCaptureDevice::Name& FakeVideoCaptureDevice::device_name() {
- return device_name_;
+void FakeVideoCaptureDevice::OnStopAndDeAllocate() {
+ DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
+ client_.reset();
}
void FakeVideoCaptureDevice::OnCaptureTask() {
- if (state_ != kCapturing) {
+ if (!client_)
return;
- }
- const size_t frame_size = VideoFrame::AllocationSize(
- VideoFrame::I420,
- gfx::Size(capture_format_.width, capture_format_.height));
+ const size_t frame_size =
+ VideoFrame::AllocationSize(VideoFrame::I420, capture_format_.frame_size);
memset(fake_frame_.get(), 0, frame_size);
SkBitmap bitmap;
bitmap.setConfig(SkBitmap::kA8_Config,
- capture_format_.width,
- capture_format_.height,
- capture_format_.width);
- bitmap.setPixels(fake_frame_.get());
+ capture_format_.frame_size.width(),
+ capture_format_.frame_size.height(),
+ capture_format_.frame_size.width()),
+ bitmap.setPixels(fake_frame_.get());
SkCanvas canvas(bitmap);
// Draw a sweeping circle to show an animation.
- int radius = std::min(capture_format_.width, capture_format_.height) / 4;
- SkRect rect = SkRect::MakeXYWH(
- capture_format_.width / 2 - radius, capture_format_.height / 2 - radius,
- 2 * radius, 2 * radius);
+ int radius = std::min(capture_format_.frame_size.width(),
+ capture_format_.frame_size.height()) /
+ 4;
+ SkRect rect =
+ SkRect::MakeXYWH(capture_format_.frame_size.width() / 2 - radius,
+ capture_format_.frame_size.height() / 2 - radius,
+ 2 * radius,
+ 2 * radius);
SkPaint paint;
paint.setStyle(SkPaint::kFill_Style);
@@ -209,12 +213,14 @@ void FakeVideoCaptureDevice::OnCaptureTask() {
frame_count_++;
- // Give the captured frame to the observer.
- observer_->OnIncomingCapturedFrame(
- fake_frame_.get(), frame_size, base::Time::Now(), 0, false, false);
+ // Give the captured frame to the client.
+ client_->OnIncomingCapturedFrame(fake_frame_.get(),
+ frame_size,
+ base::Time::Now(),
+ 0,
+ capture_format_);
if (!(frame_count_ % kFakeCaptureCapabilityChangePeriod) &&
- (capture_format_.frame_size_type ==
- VariableResolutionVideoCaptureDevice)) {
+ format_roster_.size() > 0U) {
Reallocate();
}
// Reschedule next CaptureTask.
@@ -225,33 +231,29 @@ void FakeVideoCaptureDevice::OnCaptureTask() {
base::TimeDelta::FromMilliseconds(kFakeCaptureTimeoutMs));
}
-void FakeVideoCaptureDevice::PopulateCapabilitiesRoster() {
- capabilities_roster_.push_back(
- media::VideoCaptureCapability(320,
- 240,
- 30,
- PIXEL_FORMAT_I420,
- 0,
- false,
- VariableResolutionVideoCaptureDevice));
- capabilities_roster_.push_back(
- media::VideoCaptureCapability(640,
- 480,
- 30,
- PIXEL_FORMAT_I420,
- 0,
- false,
- VariableResolutionVideoCaptureDevice));
- capabilities_roster_.push_back(
- media::VideoCaptureCapability(800,
- 600,
- 30,
- PIXEL_FORMAT_I420,
- 0,
- false,
- VariableResolutionVideoCaptureDevice));
-
- capabilities_roster_index_ = 0;
+void FakeVideoCaptureDevice::Reallocate() {
+ DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
+ capture_format_ =
+ format_roster_.at(++format_roster_index_ % format_roster_.size());
+ DCHECK_EQ(capture_format_.pixel_format, PIXEL_FORMAT_I420);
+ DVLOG(3) << "Reallocating FakeVideoCaptureDevice, new capture resolution "
+ << capture_format_.frame_size.ToString();
+
+ const size_t fake_frame_size =
+ VideoFrame::AllocationSize(VideoFrame::I420, capture_format_.frame_size);
+ fake_frame_.reset(new uint8[fake_frame_size]);
+}
+
+void FakeVideoCaptureDevice::PopulateFormatRoster() {
+ DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
+ format_roster_.push_back(
+ media::VideoCaptureFormat(gfx::Size(320, 240), 30, PIXEL_FORMAT_I420));
+ format_roster_.push_back(
+ media::VideoCaptureFormat(gfx::Size(640, 480), 30, PIXEL_FORMAT_I420));
+ format_roster_.push_back(
+ media::VideoCaptureFormat(gfx::Size(800, 600), 30, PIXEL_FORMAT_I420));
+
+ format_roster_index_ = 0;
}
} // namespace media
diff --git a/chromium/media/video/capture/fake_video_capture_device.h b/chromium/media/video/capture/fake_video_capture_device.h
index e8ab25567f7..399a68268fb 100644
--- a/chromium/media/video/capture/fake_video_capture_device.h
+++ b/chromium/media/video/capture/fake_video_capture_device.h
@@ -10,64 +10,70 @@
#include <string>
+#include "base/atomicops.h"
#include "base/memory/scoped_ptr.h"
#include "base/threading/thread.h"
+#include "base/threading/thread_checker.h"
#include "media/video/capture/video_capture_device.h"
namespace media {
-class MEDIA_EXPORT FakeVideoCaptureDevice : public VideoCaptureDevice1 {
+class MEDIA_EXPORT FakeVideoCaptureDevice : public VideoCaptureDevice {
public:
static VideoCaptureDevice* Create(const Name& device_name);
virtual ~FakeVideoCaptureDevice();
// Used for testing. This will make sure the next call to Create will
// return NULL;
static void SetFailNextCreate();
+ static void SetNumberOfFakeDevices(size_t number_of_devices);
+ static size_t NumberOfFakeDevices();
static void GetDeviceNames(Names* device_names);
+ static void GetDeviceSupportedFormats(const Name& device,
+ VideoCaptureFormats* supported_formats);
// VideoCaptureDevice implementation.
- virtual void Allocate(const VideoCaptureCapability& capture_format,
- VideoCaptureDevice::EventHandler* observer) OVERRIDE;
- virtual void Start() OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void DeAllocate() OVERRIDE;
- virtual const Name& device_name() OVERRIDE;
+ virtual void AllocateAndStart(const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client)
+ OVERRIDE;
+ virtual void StopAndDeAllocate() OVERRIDE;
private:
- // Flag indicating the internal state.
- enum InternalState {
- kIdle,
- kAllocated,
- kCapturing,
- kError
- };
- explicit FakeVideoCaptureDevice(const Name& device_name);
-
- // Called on the capture_thread_.
- void OnCaptureTask();
+ FakeVideoCaptureDevice();
- // EXPERIMENTAL, similar to allocate, but changes resolution and calls
- // observer->OnFrameInfoChanged(VideoCaptureCapability&)
+ // Called on the |capture_thread_| only.
+ void OnAllocateAndStart(const VideoCaptureParams& params,
+ scoped_ptr<Client> client);
+ void OnStopAndDeAllocate();
+ void OnCaptureTask();
void Reallocate();
- void PopulateCapabilitiesRoster();
+ void PopulateFormatRoster();
+
+ // |thread_checker_| is used to check that destructor, AllocateAndStart() and
+ // StopAndDeAllocate() are called in the correct thread that owns the object.
+ base::ThreadChecker thread_checker_;
- Name device_name_;
- VideoCaptureDevice::EventHandler* observer_;
- InternalState state_;
base::Thread capture_thread_;
+ // The following members are only used on the |capture_thread_|.
+ scoped_ptr<VideoCaptureDevice::Client> client_;
scoped_ptr<uint8[]> fake_frame_;
int frame_count_;
- VideoCaptureCapability capture_format_;
+ VideoCaptureFormat capture_format_;
- // When the device is configured as mutating video captures, this vector
- // holds the available ones which are used in sequence, restarting at the end.
- std::vector<VideoCaptureCapability> capabilities_roster_;
- int capabilities_roster_index_;
+ // When the device is allowed to change resolution, this vector holds the
+ // available ones which are used in sequence, restarting at the end. These
+ // two members belong to and are only used in |capture_thread_|.
+ std::vector<VideoCaptureFormat> format_roster_;
+ int format_roster_index_;
static bool fail_next_create_;
+ // |number_of_devices_| is atomic since tests can call SetNumberOfFakeDevices
+ // on the IO thread to set |number_of_devices_|. The variable can be
+ // read from a separate thread.
+ // TODO(perkj): Make tests independent of global state. crbug/323913
+ static base::subtle::Atomic32 number_of_devices_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(FakeVideoCaptureDevice);
+ DISALLOW_COPY_AND_ASSIGN(FakeVideoCaptureDevice);
};
} // namespace media
diff --git a/chromium/media/video/capture/file_video_capture_device.cc b/chromium/media/video/capture/file_video_capture_device.cc
new file mode 100644
index 00000000000..6f118d29e38
--- /dev/null
+++ b/chromium/media/video/capture/file_video_capture_device.cc
@@ -0,0 +1,300 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/video/capture/file_video_capture_device.h"
+
+#include <string>
+
+#include "base/bind.h"
+#include "base/command_line.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/sys_string_conversions.h"
+#include "media/base/media_switches.h"
+
+
+namespace media {
+static const char kFileVideoCaptureDeviceName[] =
+ "/dev/placeholder-for-file-backed-fake-capture-device";
+
+static const int kY4MHeaderMaxSize = 200;
+static const char kY4MSimpleFrameDelimiter[] = "FRAME";
+static const int kY4MSimpleFrameDelimiterSize = 6;
+
+int ParseY4MInt(const base::StringPiece& token) {
+ int temp_int;
+ CHECK(base::StringToInt(token, &temp_int));
+ return temp_int;
+}
+
+// Extract numerator and denominator out of a token that must have the aspect
+// numerator:denominator, both integer numbers.
+void ParseY4MRational(const base::StringPiece& token,
+ int* numerator,
+ int* denominator) {
+ size_t index_divider = token.find(':');
+ CHECK_NE(index_divider, token.npos);
+ *numerator = ParseY4MInt(token.substr(0, index_divider));
+ *denominator = ParseY4MInt(token.substr(index_divider + 1, token.length()));
+ CHECK(*denominator);
+}
+
+// This function parses the ASCII string in |header| as belonging to a Y4M file,
+// returning the collected format in |video_format|. For a non authoritative
+// explanation of the header format, check
+// http://wiki.multimedia.cx/index.php?title=YUV4MPEG2
+// Restrictions: Only interlaced I420 pixel format is supported, and pixel
+// aspect ratio is ignored.
+// Implementation notes: Y4M header should end with an ASCII 0x20 (whitespace)
+// character, however all examples mentioned in the Y4M header description end
+// with a newline character instead. Also, some headers do _not_ specify pixel
+// format, in this case it means I420.
+// This code was inspired by third_party/libvpx/.../y4minput.* .
+void ParseY4MTags(const std::string& file_header,
+ media::VideoCaptureFormat* video_format) {
+ video_format->pixel_format = media::PIXEL_FORMAT_I420;
+ video_format->frame_size.set_width(0);
+ video_format->frame_size.set_height(0);
+ size_t index = 0;
+ size_t blank_position = 0;
+ base::StringPiece token;
+ while ((blank_position = file_header.find_first_of("\n ", index)) !=
+ std::string::npos) {
+ // Every token is supposed to have an identifier letter and a bunch of
+ // information immediately after, which we extract into a |token| here.
+ token =
+ base::StringPiece(&file_header[index + 1], blank_position - index - 1);
+ CHECK(!token.empty());
+ switch (file_header[index]) {
+ case 'W':
+ video_format->frame_size.set_width(ParseY4MInt(token));
+ break;
+ case 'H':
+ video_format->frame_size.set_height(ParseY4MInt(token));
+ break;
+ case 'F': {
+ // If the token is "FRAME", it means we have finished with the header.
+ if (token[0] == 'R')
+ break;
+ int fps_numerator, fps_denominator;
+ ParseY4MRational(token, &fps_numerator, &fps_denominator);
+ video_format->frame_rate = fps_numerator / fps_denominator;
+ break;
+ }
+ case 'I':
+ // Interlacing is ignored, but we don't like mixed modes.
+ CHECK_NE(token[0], 'm');
+ break;
+ case 'A':
+ // Pixel aspect ratio ignored.
+ break;
+ case 'C':
+ CHECK_EQ(ParseY4MInt(token), 420); // Only I420 supported.
+ break;
+ default:
+ break;
+ }
+ // We're done if we have found a newline character right after the token.
+ if (file_header[blank_position] == '\n')
+ break;
+ index = blank_position + 1;
+ }
+ // Last video format semantic correctness check before sending it back.
+ CHECK(video_format->IsValid());
+}
+
+// Reads and parses the header of a Y4M |file|, returning the collected pixel
+// format in |video_format|. Returns the index of the first byte of the first
+// video frame.
+// Restrictions: Only trivial per-frame headers are supported.
+int64 ParseFileAndExtractVideoFormat(
+ const base::PlatformFile& file,
+ media::VideoCaptureFormat* video_format) {
+ std::string header(kY4MHeaderMaxSize, 0);
+ base::ReadPlatformFile(file, 0, &header[0], kY4MHeaderMaxSize - 1);
+
+ size_t header_end = header.find(kY4MSimpleFrameDelimiter);
+ CHECK_NE(header_end, header.npos);
+
+ ParseY4MTags(header, video_format);
+ return header_end + kY4MSimpleFrameDelimiterSize;
+}
+
+// Opens a given file for reading, and returns the file to the caller, who is
+// responsible for closing it.
+base::PlatformFile OpenFileForRead(const base::FilePath& file_path) {
+ base::PlatformFileError file_error;
+ base::PlatformFile file = base::CreatePlatformFile(
+ file_path,
+ base::PLATFORM_FILE_OPEN | base::PLATFORM_FILE_READ,
+ NULL,
+ &file_error);
+ CHECK_EQ(file_error, base::PLATFORM_FILE_OK);
+ return file;
+}
+
+// Inspects the command line and retrieves the file path parameter.
+base::FilePath GetFilePathFromCommandLine() {
+ base::FilePath command_line_file_path =
+ CommandLine::ForCurrentProcess()->GetSwitchValuePath(
+ switches::kUseFileForFakeVideoCapture);
+ CHECK(!command_line_file_path.empty());
+ return command_line_file_path;
+}
+
+void FileVideoCaptureDevice::GetDeviceNames(Names* const device_names) {
+ DCHECK(device_names->empty());
+ base::FilePath command_line_file_path = GetFilePathFromCommandLine();
+#if defined(OS_WIN)
+ device_names->push_back(
+ Name(base::SysWideToUTF8(command_line_file_path.value()),
+ kFileVideoCaptureDeviceName));
+#else
+ device_names->push_back(Name(command_line_file_path.value(),
+ kFileVideoCaptureDeviceName));
+#endif // OS_WIN
+}
+
+void FileVideoCaptureDevice::GetDeviceSupportedFormats(
+ const Name& device,
+ VideoCaptureFormats* supported_formats) {
+ base::PlatformFile file = OpenFileForRead(GetFilePathFromCommandLine());
+ VideoCaptureFormat capture_format;
+ ParseFileAndExtractVideoFormat(file, &capture_format);
+ supported_formats->push_back(capture_format);
+
+ CHECK(base::ClosePlatformFile(file));
+}
+
+VideoCaptureDevice* FileVideoCaptureDevice::Create(const Name& device_name) {
+#if defined(OS_WIN)
+ return new FileVideoCaptureDevice(
+ base::FilePath(base::SysUTF8ToWide(device_name.name())));
+#else
+ return new FileVideoCaptureDevice(base::FilePath(device_name.name()));
+#endif // OS_WIN
+}
+
+FileVideoCaptureDevice::FileVideoCaptureDevice(const base::FilePath& file_path)
+ : capture_thread_("CaptureThread"),
+ file_path_(file_path),
+ file_(base::kInvalidPlatformFileValue),
+ frame_size_(0),
+ current_byte_index_(0),
+ first_frame_byte_index_(0) {}
+
+FileVideoCaptureDevice::~FileVideoCaptureDevice() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ // Check if the thread is running.
+ // This means that the device have not been DeAllocated properly.
+ CHECK(!capture_thread_.IsRunning());
+}
+
+void FileVideoCaptureDevice::AllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ CHECK(!capture_thread_.IsRunning());
+
+ capture_thread_.Start();
+ capture_thread_.message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&FileVideoCaptureDevice::OnAllocateAndStart,
+ base::Unretained(this),
+ params,
+ base::Passed(&client)));
+}
+
+void FileVideoCaptureDevice::StopAndDeAllocate() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ CHECK(capture_thread_.IsRunning());
+
+ capture_thread_.message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&FileVideoCaptureDevice::OnStopAndDeAllocate,
+ base::Unretained(this)));
+ capture_thread_.Stop();
+}
+
+int FileVideoCaptureDevice::CalculateFrameSize() {
+ DCHECK_EQ(capture_format_.pixel_format, PIXEL_FORMAT_I420);
+ DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
+ return capture_format_.frame_size.GetArea() * 12 / 8;
+}
+
+void FileVideoCaptureDevice::OnAllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) {
+ DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
+
+ client_ = client.Pass();
+
+ // Open the file and parse the header. Get frame size and format.
+ DCHECK_EQ(file_, base::kInvalidPlatformFileValue);
+ file_ = OpenFileForRead(file_path_);
+ first_frame_byte_index_ =
+ ParseFileAndExtractVideoFormat(file_, &capture_format_);
+ current_byte_index_ = first_frame_byte_index_;
+ DVLOG(1) << "Opened video file " << capture_format_.frame_size.ToString()
+ << ", fps: " << capture_format_.frame_rate;
+
+ frame_size_ = CalculateFrameSize();
+ video_frame_.reset(new uint8[frame_size_]);
+
+ capture_thread_.message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&FileVideoCaptureDevice::OnCaptureTask,
+ base::Unretained(this)));
+}
+
+void FileVideoCaptureDevice::OnStopAndDeAllocate() {
+ DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
+ CHECK(base::ClosePlatformFile(file_));
+ client_.reset();
+ current_byte_index_ = 0;
+ first_frame_byte_index_ = 0;
+ frame_size_ = 0;
+ video_frame_.reset();
+}
+
+void FileVideoCaptureDevice::OnCaptureTask() {
+ DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
+ if (!client_)
+ return;
+ int result =
+ base::ReadPlatformFile(file_,
+ current_byte_index_,
+ reinterpret_cast<char*>(video_frame_.get()),
+ frame_size_);
+
+ // If we passed EOF to PlatformFile, it will return 0 read characters. In that
+ // case, reset the pointer and read again.
+ if (result != frame_size_) {
+ CHECK_EQ(result, 0);
+ current_byte_index_ = first_frame_byte_index_;
+ CHECK_EQ(base::ReadPlatformFile(file_,
+ current_byte_index_,
+ reinterpret_cast<char*>(video_frame_.get()),
+ frame_size_),
+ frame_size_);
+ } else {
+ current_byte_index_ += frame_size_ + kY4MSimpleFrameDelimiterSize;
+ }
+
+ // Give the captured frame to the client.
+ client_->OnIncomingCapturedFrame(video_frame_.get(),
+ frame_size_,
+ base::Time::Now(),
+ 0,
+ capture_format_);
+ // Reschedule next CaptureTask.
+ base::MessageLoop::current()->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&FileVideoCaptureDevice::OnCaptureTask,
+ base::Unretained(this)),
+ base::TimeDelta::FromSeconds(1) / capture_format_.frame_rate);
+}
+
+} // namespace media
diff --git a/chromium/media/video/capture/file_video_capture_device.h b/chromium/media/video/capture/file_video_capture_device.h
new file mode 100644
index 00000000000..06e6033254d
--- /dev/null
+++ b/chromium/media/video/capture/file_video_capture_device.h
@@ -0,0 +1,79 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_CAPTURE_FILE_VIDEO_CAPTURE_DEVICE_H_
+#define MEDIA_VIDEO_CAPTURE_FILE_VIDEO_CAPTURE_DEVICE_H_
+
+#include <string>
+
+#include "base/memory/scoped_ptr.h"
+#include "base/platform_file.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_checker.h"
+#include "media/video/capture/video_capture_device.h"
+
+namespace media {
+
+// Implementation of a VideoCaptureDevice class that reads from a file. Used for
+// testing the video capture pipeline when no real hardware is available. The
+// only supported file format is YUV4MPEG2 (a.k.a. Y4M), a minimal container
+// with a series of uncompressed video only frames, see the link
+// http://wiki.multimedia.cx/index.php?title=YUV4MPEG2 for more information
+// on the file format. Several restrictions and notes apply, see the
+// implementation file.
+// Example videos can be found in http://media.xiph.org/video/derf.
+class MEDIA_EXPORT FileVideoCaptureDevice : public VideoCaptureDevice {
+ public:
+ // VideoCaptureDevice implementation, static methods. Create() returns a
+ // pointer to the object, fully owned by the caller.
+ // TODO(mcasas): Create() should return a scoped_ptr<> http://crbug.com/321613
+ static VideoCaptureDevice* Create(const Name& device_name);
+ static void GetDeviceNames(Names* device_names);
+ static void GetDeviceSupportedFormats(const Name& device,
+ VideoCaptureFormats* supported_formats);
+
+ // VideoCaptureDevice implementation, class methods.
+ virtual ~FileVideoCaptureDevice();
+ virtual void AllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) OVERRIDE;
+ virtual void StopAndDeAllocate() OVERRIDE;
+
+ private:
+ // Constructor of the class, with a fully qualified file path as input, which
+ // represents the Y4M video file to stream repeatedly.
+ explicit FileVideoCaptureDevice(const base::FilePath& file_path);
+ // Returns size in bytes of an I420 frame, not including possible paddings,
+ // defined by |capture_format_|.
+ int CalculateFrameSize();
+
+ // Called on the |capture_thread_|.
+ void OnAllocateAndStart(const VideoCaptureParams& params,
+ scoped_ptr<Client> client);
+ void OnStopAndDeAllocate();
+ void OnCaptureTask();
+
+ // |thread_checker_| is used to check that destructor, AllocateAndStart() and
+ // StopAndDeAllocate() are called in the correct thread that owns the object.
+ base::ThreadChecker thread_checker_;
+
+ // |capture_thread_| is used for internal operations via posting tasks to it.
+ // It is active between OnAllocateAndStart() and OnStopAndDeAllocate().
+ base::Thread capture_thread_;
+ // The following members belong to |capture_thread_|.
+ scoped_ptr<VideoCaptureDevice::Client> client_;
+ const base::FilePath file_path_;
+ base::PlatformFile file_;
+ scoped_ptr<uint8[]> video_frame_;
+ VideoCaptureFormat capture_format_;
+ int frame_size_;
+ int64 current_byte_index_;
+ int64 first_frame_byte_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(FileVideoCaptureDevice);
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_FILE_VIDEO_CAPTURE_DEVICE_H_
diff --git a/chromium/media/video/capture/linux/video_capture_device_linux.cc b/chromium/media/video/capture/linux/video_capture_device_linux.cc
index fdd52772cb1..21f57ee132a 100644
--- a/chromium/media/video/capture/linux/video_capture_device_linux.cc
+++ b/chromium/media/video/capture/linux/video_capture_device_linux.cc
@@ -45,7 +45,7 @@ static const int32 kV4l2RawFmts[] = {
V4L2_PIX_FMT_YUYV
};
-// USB VID and PID are both 4 bytes long
+// USB VID and PID are both 4 bytes long.
static const size_t kVidPidSize = 4;
// /sys/class/video4linux/video{N}/device is a symlink to the corresponding
@@ -55,6 +55,8 @@ static const char kVidPathTemplate[] =
static const char kPidPathTemplate[] =
"/sys/class/video4linux/%s/device/../idProduct";
+// This function translates Video4Linux pixel formats to Chromium pixel formats,
+// should only support those listed in GetListOfUsableFourCCs.
static VideoPixelFormat V4l2ColorToVideoCaptureColorFormat(
int32 v4l2_fourcc) {
VideoPixelFormat result = PIXEL_FORMAT_UNKNOWN;
@@ -69,8 +71,9 @@ static VideoPixelFormat V4l2ColorToVideoCaptureColorFormat(
case V4L2_PIX_FMT_JPEG:
result = PIXEL_FORMAT_MJPEG;
break;
+ default:
+ DVLOG(1) << "Unsupported pixel format " << std::hex << v4l2_fourcc;
}
- DCHECK_NE(result, PIXEL_FORMAT_UNKNOWN);
return result;
}
@@ -141,6 +144,76 @@ void VideoCaptureDevice::GetDeviceNames(Names* device_names) {
}
}
+void VideoCaptureDevice::GetDeviceSupportedFormats(
+ const Name& device,
+ VideoCaptureFormats* supported_formats) {
+ if (device.id().empty())
+ return;
+ int fd;
+ if ((fd = open(device.id().c_str(), O_RDONLY)) < 0)
+ return;
+
+ supported_formats->clear();
+ // Retrieve the caps one by one, first get pixel format, then sizes, then
+ // frame rates. See http://linuxtv.org/downloads/v4l-dvb-apis for reference.
+ v4l2_fmtdesc pixel_format = {};
+ pixel_format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ while (ioctl(fd, VIDIOC_ENUM_FMT, &pixel_format) == 0) {
+ VideoCaptureFormat supported_format;
+ supported_format.pixel_format =
+ V4l2ColorToVideoCaptureColorFormat((int32)pixel_format.pixelformat);
+ if (supported_format.pixel_format == PIXEL_FORMAT_UNKNOWN) {
+ ++pixel_format.index;
+ continue;
+ }
+
+ v4l2_frmsizeenum frame_size = {};
+ frame_size.pixel_format = pixel_format.pixelformat;
+ while (ioctl(fd, VIDIOC_ENUM_FRAMESIZES, &frame_size) == 0) {
+ if (frame_size.type == V4L2_FRMSIZE_TYPE_DISCRETE) {
+ supported_format.frame_size.SetSize(
+ frame_size.discrete.width, frame_size.discrete.height);
+ } else if (frame_size.type == V4L2_FRMSIZE_TYPE_STEPWISE) {
+ // TODO(mcasas): see http://crbug.com/249953, support these devices.
+ NOTIMPLEMENTED();
+ } else if (frame_size.type == V4L2_FRMSIZE_TYPE_CONTINUOUS) {
+ // TODO(mcasas): see http://crbug.com/249953, support these devices.
+ NOTIMPLEMENTED();
+ }
+ v4l2_frmivalenum frame_interval = {};
+ frame_interval.pixel_format = pixel_format.pixelformat;
+ frame_interval.width = frame_size.discrete.width;
+ frame_interval.height = frame_size.discrete.height;
+ while (ioctl(fd, VIDIOC_ENUM_FRAMEINTERVALS, &frame_interval) == 0) {
+ if (frame_interval.type == V4L2_FRMIVAL_TYPE_DISCRETE) {
+ if (frame_interval.discrete.numerator != 0) {
+ supported_format.frame_rate =
+ static_cast<float>(frame_interval.discrete.denominator) /
+ static_cast<float>(frame_interval.discrete.numerator);
+ } else {
+ supported_format.frame_rate = 0;
+ }
+ } else if (frame_interval.type == V4L2_FRMIVAL_TYPE_CONTINUOUS) {
+ // TODO(mcasas): see http://crbug.com/249953, support these devices.
+ NOTIMPLEMENTED();
+ break;
+ } else if (frame_interval.type == V4L2_FRMIVAL_TYPE_STEPWISE) {
+ // TODO(mcasas): see http://crbug.com/249953, support these devices.
+ NOTIMPLEMENTED();
+ break;
+ }
+ supported_formats->push_back(supported_format);
+ ++frame_interval.index;
+ }
+ ++frame_size.index;
+ }
+ ++pixel_format.index;
+ }
+
+ close(fd);
+ return;
+}
+
static bool ReadIdFile(const std::string path, std::string* id) {
char id_buf[kVidPidSize];
FILE* file = fopen(path.c_str(), "rb");
@@ -196,14 +269,12 @@ VideoCaptureDevice* VideoCaptureDevice::Create(const Name& device_name) {
VideoCaptureDeviceLinux::VideoCaptureDeviceLinux(const Name& device_name)
: state_(kIdle),
- observer_(NULL),
device_name_(device_name),
device_fd_(-1),
v4l2_thread_("V4L2Thread"),
buffer_pool_(NULL),
buffer_pool_size_(0),
- timeout_count_(0) {
-}
+ timeout_count_(0) {}
VideoCaptureDeviceLinux::~VideoCaptureDeviceLinux() {
state_ = kIdle;
@@ -217,68 +288,45 @@ VideoCaptureDeviceLinux::~VideoCaptureDeviceLinux() {
}
}
-void VideoCaptureDeviceLinux::Allocate(
- const VideoCaptureCapability& capture_format,
- EventHandler* observer) {
+void VideoCaptureDeviceLinux::AllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) {
if (v4l2_thread_.IsRunning()) {
return; // Wrong state.
}
v4l2_thread_.Start();
- v4l2_thread_.message_loop()
- ->PostTask(FROM_HERE,
- base::Bind(&VideoCaptureDeviceLinux::OnAllocate,
- base::Unretained(this),
- capture_format.width,
- capture_format.height,
- capture_format.frame_rate,
- observer));
-}
-
-void VideoCaptureDeviceLinux::Start() {
- if (!v4l2_thread_.IsRunning()) {
- return; // Wrong state.
- }
- v4l2_thread_.message_loop()->PostTask(
- FROM_HERE,
- base::Bind(&VideoCaptureDeviceLinux::OnStart, base::Unretained(this)));
-}
-
-void VideoCaptureDeviceLinux::Stop() {
- if (!v4l2_thread_.IsRunning()) {
- return; // Wrong state.
- }
v4l2_thread_.message_loop()->PostTask(
FROM_HERE,
- base::Bind(&VideoCaptureDeviceLinux::OnStop, base::Unretained(this)));
+ base::Bind(&VideoCaptureDeviceLinux::OnAllocateAndStart,
+ base::Unretained(this),
+ params.requested_format.frame_size.width(),
+ params.requested_format.frame_size.height(),
+ params.requested_format.frame_rate,
+ base::Passed(&client)));
}
-void VideoCaptureDeviceLinux::DeAllocate() {
+void VideoCaptureDeviceLinux::StopAndDeAllocate() {
if (!v4l2_thread_.IsRunning()) {
return; // Wrong state.
}
v4l2_thread_.message_loop()->PostTask(
FROM_HERE,
- base::Bind(&VideoCaptureDeviceLinux::OnDeAllocate,
+ base::Bind(&VideoCaptureDeviceLinux::OnStopAndDeAllocate,
base::Unretained(this)));
v4l2_thread_.Stop();
-
// Make sure no buffers are still allocated.
// This can happen (theoretically) if an error occurs when trying to stop
// the camera.
DeAllocateVideoBuffers();
}
-const VideoCaptureDevice::Name& VideoCaptureDeviceLinux::device_name() {
- return device_name_;
-}
-
-void VideoCaptureDeviceLinux::OnAllocate(int width,
- int height,
- int frame_rate,
- EventHandler* observer) {
+void VideoCaptureDeviceLinux::OnAllocateAndStart(int width,
+ int height,
+ int frame_rate,
+ scoped_ptr<Client> client) {
DCHECK_EQ(v4l2_thread_.message_loop(), base::MessageLoop::current());
- observer_ = observer;
+ client_ = client.Pass();
// Need to open camera with O_RDWR after Linux kernel 3.3.
if ((device_fd_ = open(device_name_.id().c_str(), O_RDWR)) < 0) {
@@ -359,46 +407,13 @@ void VideoCaptureDeviceLinux::OnAllocate(int width,
// framerate configuration, or the actual one is different from the desired?
// Store our current width and height.
- VideoCaptureCapability current_settings;
- current_settings.color = V4l2ColorToVideoCaptureColorFormat(
- video_fmt.fmt.pix.pixelformat);
- current_settings.width = video_fmt.fmt.pix.width;
- current_settings.height = video_fmt.fmt.pix.height;
- current_settings.frame_rate = frame_rate;
- current_settings.expected_capture_delay = 0;
- current_settings.interlaced = false;
-
- state_ = kAllocated;
- // Report the resulting frame size to the observer.
- observer_->OnFrameInfo(current_settings);
-}
-
-void VideoCaptureDeviceLinux::OnDeAllocate() {
- DCHECK_EQ(v4l2_thread_.message_loop(), base::MessageLoop::current());
-
- // If we are in error state or capturing
- // try to stop the camera.
- if (state_ == kCapturing) {
- OnStop();
- }
- if (state_ == kAllocated) {
- state_ = kIdle;
- }
-
- // We need to close and open the device if we want to change the settings
- // Otherwise VIDIOC_S_FMT will return error
- // Sad but true.
- close(device_fd_);
- device_fd_ = -1;
-}
-
-void VideoCaptureDeviceLinux::OnStart() {
- DCHECK_EQ(v4l2_thread_.message_loop(), base::MessageLoop::current());
-
- if (state_ != kAllocated) {
- return;
- }
+ capture_format_.frame_size.SetSize(video_fmt.fmt.pix.width,
+ video_fmt.fmt.pix.height);
+ capture_format_.frame_rate = frame_rate;
+ capture_format_.pixel_format =
+ V4l2ColorToVideoCaptureColorFormat(video_fmt.fmt.pix.pixelformat);
+ // Start capturing.
if (!AllocateVideoBuffers()) {
// Error, We can not recover.
SetErrorState("Allocate buffer failed");
@@ -420,11 +435,9 @@ void VideoCaptureDeviceLinux::OnStart() {
base::Unretained(this)));
}
-void VideoCaptureDeviceLinux::OnStop() {
+void VideoCaptureDeviceLinux::OnStopAndDeAllocate() {
DCHECK_EQ(v4l2_thread_.message_loop(), base::MessageLoop::current());
- state_ = kAllocated;
-
v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (ioctl(device_fd_, VIDIOC_STREAMOFF, &type) < 0) {
SetErrorState("VIDIOC_STREAMOFF failed");
@@ -433,6 +446,14 @@ void VideoCaptureDeviceLinux::OnStop() {
// We don't dare to deallocate the buffers if we can't stop
// the capture device.
DeAllocateVideoBuffers();
+
+ // We need to close and open the device if we want to change the settings
+ // Otherwise VIDIOC_S_FMT will return error
+ // Sad but true.
+ close(device_fd_);
+ device_fd_ = -1;
+ state_ = kIdle;
+ client_.reset();
}
void VideoCaptureDeviceLinux::OnCaptureTask() {
@@ -488,9 +509,12 @@ void VideoCaptureDeviceLinux::OnCaptureTask() {
buffer.memory = V4L2_MEMORY_MMAP;
// Dequeue a buffer.
if (ioctl(device_fd_, VIDIOC_DQBUF, &buffer) == 0) {
- observer_->OnIncomingCapturedFrame(
- static_cast<uint8*> (buffer_pool_[buffer.index].start),
- buffer.bytesused, base::Time::Now(), 0, false, false);
+ client_->OnIncomingCapturedFrame(
+ static_cast<uint8*>(buffer_pool_[buffer.index].start),
+ buffer.bytesused,
+ base::Time::Now(),
+ 0,
+ capture_format_);
// Enqueue the buffer again.
if (ioctl(device_fd_, VIDIOC_QBUF, &buffer) == -1) {
@@ -581,9 +605,11 @@ void VideoCaptureDeviceLinux::DeAllocateVideoBuffers() {
}
void VideoCaptureDeviceLinux::SetErrorState(const std::string& reason) {
+ DCHECK(!v4l2_thread_.IsRunning() ||
+ v4l2_thread_.message_loop() == base::MessageLoop::current());
DVLOG(1) << reason;
state_ = kError;
- observer_->OnError();
+ client_->OnError();
}
} // namespace media
diff --git a/chromium/media/video/capture/linux/video_capture_device_linux.h b/chromium/media/video/capture/linux/video_capture_device_linux.h
index aab61aed77b..a5917b71f12 100644
--- a/chromium/media/video/capture/linux/video_capture_device_linux.h
+++ b/chromium/media/video/capture/linux/video_capture_device_linux.h
@@ -18,23 +18,20 @@
namespace media {
-class VideoCaptureDeviceLinux : public VideoCaptureDevice1 {
+class VideoCaptureDeviceLinux : public VideoCaptureDevice {
public:
explicit VideoCaptureDeviceLinux(const Name& device_name);
virtual ~VideoCaptureDeviceLinux();
// VideoCaptureDevice implementation.
- virtual void Allocate(const VideoCaptureCapability& capture_format,
- EventHandler* observer) OVERRIDE;
- virtual void Start() OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void DeAllocate() OVERRIDE;
- virtual const Name& device_name() OVERRIDE;
+ virtual void AllocateAndStart(const VideoCaptureParams& params,
+ scoped_ptr<Client> client) OVERRIDE;
+
+ virtual void StopAndDeAllocate() OVERRIDE;
private:
enum InternalState {
kIdle, // The device driver is opened but camera is not in use.
- kAllocated, // The camera has been allocated and can be started.
kCapturing, // Video is being captured.
kError // Error accessing HW functions.
// User needs to recover by destroying the object.
@@ -48,13 +45,11 @@ class VideoCaptureDeviceLinux : public VideoCaptureDevice1 {
};
// Called on the v4l2_thread_.
- void OnAllocate(int width,
- int height,
- int frame_rate,
- EventHandler* observer);
- void OnStart();
- void OnStop();
- void OnDeAllocate();
+ void OnAllocateAndStart(int width,
+ int height,
+ int frame_rate,
+ scoped_ptr<Client> client);
+ void OnStopAndDeAllocate();
void OnCaptureTask();
bool AllocateVideoBuffers();
@@ -62,13 +57,14 @@ class VideoCaptureDeviceLinux : public VideoCaptureDevice1 {
void SetErrorState(const std::string& reason);
InternalState state_;
- VideoCaptureDevice::EventHandler* observer_;
+ scoped_ptr<VideoCaptureDevice::Client> client_;
Name device_name_;
int device_fd_; // File descriptor for the opened camera device.
base::Thread v4l2_thread_; // Thread used for reading data from the device.
Buffer* buffer_pool_;
int buffer_pool_size_; // Number of allocated buffers.
int timeout_count_;
+ VideoCaptureFormat capture_format_;
DISALLOW_IMPLICIT_CONSTRUCTORS(VideoCaptureDeviceLinux);
};
diff --git a/chromium/media/video/capture/mac/avfoundation_glue.h b/chromium/media/video/capture/mac/avfoundation_glue.h
new file mode 100644
index 00000000000..f9b23a2c240
--- /dev/null
+++ b/chromium/media/video/capture/mac/avfoundation_glue.h
@@ -0,0 +1,158 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// AVFoundation API is only introduced in Mac OS X > 10.6, and there is only one
+// build of Chromium, so the (potential) linking with AVFoundation has to happen
+// in runtime. For this to be clean, an AVFoundationGlue class is defined to try
+// and load these AVFoundation system libraries. If it succeeds, subsequent
+// clients can use AVFoundation via the rest of the classes declared in this
+// file.
+
+#ifndef MEDIA_VIDEO_CAPTURE_MAC_AVFOUNDATION_GLUE_H_
+#define MEDIA_VIDEO_CAPTURE_MAC_AVFOUNDATION_GLUE_H_
+
+#import <Foundation/Foundation.h>
+
+#include "base/basictypes.h"
+#include "media/base/media_export.h"
+#import "media/video/capture/mac/coremedia_glue.h"
+
+class MEDIA_EXPORT AVFoundationGlue {
+ public:
+ // This method returns true if the OS version supports AVFoundation and the
+ // AVFoundation bundle could be loaded correctly, or false otherwise.
+ static bool IsAVFoundationSupported();
+
+ static NSBundle const* AVFoundationBundle();
+
+ static void* AVFoundationLibraryHandle();
+
+ // Originally coming from AVCaptureDevice.h but in global namespace.
+ static NSString* AVCaptureDeviceWasConnectedNotification();
+ static NSString* AVCaptureDeviceWasDisconnectedNotification();
+
+ // Originally coming from AVMediaFormat.h but in global namespace.
+ static NSString* AVMediaTypeVideo();
+ static NSString* AVMediaTypeAudio();
+ static NSString* AVMediaTypeMuxed();
+
+ // Originally from AVCaptureSession.h but in global namespace.
+ static NSString* AVCaptureSessionRuntimeErrorNotification();
+ static NSString* AVCaptureSessionDidStopRunningNotification();
+ static NSString* AVCaptureSessionErrorKey();
+ static NSString* AVCaptureSessionPreset320x240();
+ static NSString* AVCaptureSessionPreset640x480();
+ static NSString* AVCaptureSessionPreset1280x720();
+
+ // Originally from AVVideoSettings.h but in global namespace.
+ static NSString* AVVideoScalingModeKey();
+ static NSString* AVVideoScalingModeResizeAspect();
+
+ static Class AVCaptureSessionClass();
+ static Class AVCaptureVideoDataOutputClass();
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AVFoundationGlue);
+};
+
+// Originally AVCaptureDevice and coming from AVCaptureDevice.h
+MEDIA_EXPORT
+@interface CrAVCaptureDevice : NSObject
+
+- (BOOL)hasMediaType:(NSString*)mediaType;
+- (NSString*)uniqueID;
+- (NSString*)localizedName;
+- (BOOL)supportsAVCaptureSessionPreset:(NSString*)preset;
+
+@end
+
+MEDIA_EXPORT
+@interface CrAVCaptureInput // Originally from AVCaptureInput.h.
+@end
+
+MEDIA_EXPORT
+@interface CrAVCaptureOutput // Originally from AVCaptureOutput.h.
+@end
+
+// Originally AVCaptureSession and coming from AVCaptureSession.h.
+MEDIA_EXPORT
+@interface CrAVCaptureSession : NSObject
+
+- (void)release;
+- (BOOL)canSetSessionPreset:(NSString*)preset;
+- (void)setSessionPreset:(NSString*)preset;
+- (NSString*)sessionPreset;
+- (void)addInput:(CrAVCaptureInput*)input;
+- (void)removeInput:(CrAVCaptureInput*)input;
+- (void)addOutput:(CrAVCaptureOutput*)output;
+- (void)removeOutput:(CrAVCaptureOutput*)output;
+- (BOOL)isRunning;
+- (void)startRunning;
+- (void)stopRunning;
+
+@end
+
+// Originally AVCaptureConnection and coming from AVCaptureSession.h.
+MEDIA_EXPORT
+@interface CrAVCaptureConnection : NSObject
+
+- (BOOL)isVideoMinFrameDurationSupported;
+- (void)setVideoMinFrameDuration:(CoreMediaGlue::CMTime)minFrameRate;
+- (BOOL)isVideoMaxFrameDurationSupported;
+- (void)setVideoMaxFrameDuration:(CoreMediaGlue::CMTime)maxFrameRate;
+
+@end
+
+// Originally AVCaptureDeviceInput and coming from AVCaptureInput.h.
+MEDIA_EXPORT
+@interface CrAVCaptureDeviceInput : CrAVCaptureInput
+
+@end
+
+// Originally AVCaptureVideoDataOutputSampleBufferDelegate from
+// AVCaptureOutput.h.
+@protocol CrAVCaptureVideoDataOutputSampleBufferDelegate <NSObject>
+
+@optional
+
+- (void)captureOutput:(CrAVCaptureOutput*)captureOutput
+didOutputSampleBuffer:(CoreMediaGlue::CMSampleBufferRef)sampleBuffer
+ fromConnection:(CrAVCaptureConnection*)connection;
+
+@end
+
+// Originally AVCaptureVideoDataOutput and coming from AVCaptureOutput.h.
+MEDIA_EXPORT
+@interface CrAVCaptureVideoDataOutput : CrAVCaptureOutput
+
+- (oneway void)release;
+- (void)setSampleBufferDelegate:(id)sampleBufferDelegate
+ queue:(dispatch_queue_t)sampleBufferCallbackQueue;
+
+- (void)setVideoSettings:(NSDictionary*)videoSettings;
+- (NSDictionary*)videoSettings;
+- (CrAVCaptureConnection*)connectionWithMediaType:(NSString*)mediaType;
+
+@end
+
+// Class to provide access to class methods of AVCaptureDevice.
+MEDIA_EXPORT
+@interface AVCaptureDeviceGlue : NSObject
+
++ (NSArray*)devices;
+
++ (CrAVCaptureDevice*)deviceWithUniqueID:(NSString*)deviceUniqueID;
+
+@end
+
+// Class to provide access to class methods of AVCaptureDeviceInput.
+MEDIA_EXPORT
+@interface AVCaptureDeviceInputGlue : NSObject
+
++ (CrAVCaptureDeviceInput*)deviceInputWithDevice:(CrAVCaptureDevice*)device
+ error:(NSError**)outError;
+
+@end
+
+#endif // MEDIA_VIDEO_CAPTURE_MAC_AVFOUNDATION_GLUE_H_
diff --git a/chromium/media/video/capture/mac/avfoundation_glue.mm b/chromium/media/video/capture/mac/avfoundation_glue.mm
new file mode 100644
index 00000000000..1610d0f104a
--- /dev/null
+++ b/chromium/media/video/capture/mac/avfoundation_glue.mm
@@ -0,0 +1,161 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "media/video/capture/mac/avfoundation_glue.h"
+
+#include <dlfcn.h>
+
+#include "base/command_line.h"
+#include "base/lazy_instance.h"
+#include "base/mac/mac_util.h"
+#include "media/base/media_switches.h"
+
+namespace {
+
+// This class is used to retrieve AVFoundation NSBundle and library handle. It
+// must be used as a LazyInstance so that it is initialised once and in a
+// thread-safe way. Normally no work is done in constructors: LazyInstance is
+// an exception.
+class AVFoundationInternal {
+ public:
+ AVFoundationInternal() {
+ bundle_ = [NSBundle
+ bundleWithPath:@"/System/Library/Frameworks/AVFoundation.framework"];
+
+ const char* path = [[bundle_ executablePath] fileSystemRepresentation];
+ CHECK(path);
+ library_handle_ = dlopen(path, RTLD_LAZY | RTLD_LOCAL);
+ CHECK(library_handle_) << dlerror();
+ }
+ NSBundle* bundle() const { return bundle_; }
+ void* library_handle() const { return library_handle_; }
+
+ private:
+ NSBundle* bundle_;
+ void* library_handle_;
+
+ DISALLOW_COPY_AND_ASSIGN(AVFoundationInternal);
+};
+
+} // namespace
+
+static base::LazyInstance<AVFoundationInternal> g_avfoundation_handle =
+ LAZY_INSTANCE_INITIALIZER;
+
+namespace media {
+
+// TODO(mcasas):http://crbug.com/323536 cache the string pointers.
+static NSString* ReadNSStringPtr(const char* symbol) {
+ NSString** string_pointer = reinterpret_cast<NSString**>(
+ dlsym(AVFoundationGlue::AVFoundationLibraryHandle(), symbol));
+ DCHECK(string_pointer) << dlerror();
+ return *string_pointer;
+}
+
+} // namespace media
+
+bool AVFoundationGlue::IsAVFoundationSupported() {
+ const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
+ return cmd_line->HasSwitch(switches::kEnableAVFoundation) &&
+ base::mac::IsOSLionOrLater() && [AVFoundationBundle() load];
+}
+
+NSBundle const* AVFoundationGlue::AVFoundationBundle() {
+ return g_avfoundation_handle.Get().bundle();
+}
+
+void* AVFoundationGlue::AVFoundationLibraryHandle() {
+ return g_avfoundation_handle.Get().library_handle();
+}
+
+NSString* AVFoundationGlue::AVCaptureDeviceWasConnectedNotification() {
+ return media::ReadNSStringPtr("AVCaptureDeviceWasConnectedNotification");
+}
+
+NSString* AVFoundationGlue::AVCaptureDeviceWasDisconnectedNotification() {
+ return media::ReadNSStringPtr("AVCaptureDeviceWasDisconnectedNotification");
+}
+
+NSString* AVFoundationGlue::AVMediaTypeVideo() {
+ return media::ReadNSStringPtr("AVMediaTypeVideo");
+}
+
+NSString* AVFoundationGlue::AVMediaTypeAudio() {
+ return media::ReadNSStringPtr("AVMediaTypeAudio");
+}
+
+NSString* AVFoundationGlue::AVMediaTypeMuxed() {
+ return media::ReadNSStringPtr("AVMediaTypeMuxed");
+}
+
+NSString* AVFoundationGlue::AVCaptureSessionRuntimeErrorNotification() {
+ return media::ReadNSStringPtr("AVCaptureSessionRuntimeErrorNotification");
+}
+
+NSString* AVFoundationGlue::AVCaptureSessionDidStopRunningNotification() {
+ return media::ReadNSStringPtr("AVCaptureSessionDidStopRunningNotification");
+}
+
+NSString* AVFoundationGlue::AVCaptureSessionErrorKey() {
+ return media::ReadNSStringPtr("AVCaptureSessionErrorKey");
+}
+
+NSString* AVFoundationGlue::AVCaptureSessionPreset320x240() {
+ return media::ReadNSStringPtr("AVCaptureSessionPreset320x240");
+}
+
+NSString* AVFoundationGlue::AVCaptureSessionPreset640x480() {
+ return media::ReadNSStringPtr("AVCaptureSessionPreset640x480");
+}
+
+NSString* AVFoundationGlue::AVCaptureSessionPreset1280x720() {
+ return media::ReadNSStringPtr("AVCaptureSessionPreset1280x720");
+}
+
+NSString* AVFoundationGlue::AVVideoScalingModeKey() {
+ return media::ReadNSStringPtr("AVVideoScalingModeKey");
+}
+
+NSString* AVFoundationGlue::AVVideoScalingModeResizeAspect() {
+ return media::ReadNSStringPtr("AVVideoScalingModeResizeAspect");
+}
+
+Class AVFoundationGlue::AVCaptureSessionClass() {
+ return [AVFoundationBundle() classNamed:@"AVCaptureSession"];
+}
+
+Class AVFoundationGlue::AVCaptureVideoDataOutputClass() {
+ return [AVFoundationBundle() classNamed:@"AVCaptureVideoDataOutput"];
+}
+
+@implementation AVCaptureDeviceGlue
+
++ (NSArray*)devices {
+ Class avcClass =
+ [AVFoundationGlue::AVFoundationBundle() classNamed:@"AVCaptureDevice"];
+ if ([avcClass respondsToSelector:@selector(devices)]) {
+ return [avcClass performSelector:@selector(devices)];
+ }
+ return nil;
+}
+
++ (CrAVCaptureDevice*)deviceWithUniqueID:(NSString*)deviceUniqueID {
+ Class avcClass =
+ [AVFoundationGlue::AVFoundationBundle() classNamed:@"AVCaptureDevice"];
+ return [avcClass performSelector:@selector(deviceWithUniqueID:)
+ withObject:deviceUniqueID];
+}
+
+@end // @implementation AVCaptureDeviceGlue
+
+@implementation AVCaptureDeviceInputGlue
+
++ (CrAVCaptureDeviceInput*)deviceInputWithDevice:(CrAVCaptureDevice*)device
+ error:(NSError**)outError {
+ return [[AVFoundationGlue::AVFoundationBundle()
+ classNamed:@"AVCaptureDeviceInput"] deviceInputWithDevice:device
+ error:outError];
+}
+
+@end // @implementation AVCaptureDeviceInputGlue
diff --git a/chromium/media/video/capture/mac/coremedia_glue.h b/chromium/media/video/capture/mac/coremedia_glue.h
new file mode 100644
index 00000000000..a1f21eb1480
--- /dev/null
+++ b/chromium/media/video/capture/mac/coremedia_glue.h
@@ -0,0 +1,46 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_CAPTURE_MAC_COREMEDIA_GLUE_H_
+#define MEDIA_VIDEO_CAPTURE_MAC_COREMEDIA_GLUE_H_
+
+#import <CoreVideo/CoreVideo.h>
+#import <Foundation/Foundation.h>
+
+#include "base/basictypes.h"
+#include "media/base/media_export.h"
+
+// CoreMedia API is only introduced in Mac OS X > 10.6, the (potential) linking
+// with it has to happen in runtime. If it succeeds, subsequent clients can use
+// CoreMedia via the class declared in this file, where the original naming has
+// been kept as much as possible.
+class MEDIA_EXPORT CoreMediaGlue {
+ public:
+ // Originally from CMTime.h
+ typedef int64_t CMTimeValue;
+ typedef int32_t CMTimeScale;
+ typedef int64_t CMTimeEpoch;
+ typedef uint32_t CMTimeFlags;
+ typedef struct {
+ CMTimeValue value;
+ CMTimeScale timescale;
+ CMTimeFlags flags;
+ CMTimeEpoch epoch;
+ } CMTime;
+
+ // Originally from CMSampleBuffer.h.
+ typedef struct OpaqueCMSampleBuffer* CMSampleBufferRef;
+
+ // Originally from CMTime.h.
+ static CMTime CMTimeMake(int64_t value, int32_t timescale);
+
+ // Originally from CMSampleBuffer.h.
+ static CVImageBufferRef CMSampleBufferGetImageBuffer(
+ CMSampleBufferRef buffer);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CoreMediaGlue);
+};
+
+#endif // MEDIA_VIDEO_CAPTURE_MAC_COREMEDIA_GLUE_H_
diff --git a/chromium/media/video/capture/mac/coremedia_glue.mm b/chromium/media/video/capture/mac/coremedia_glue.mm
new file mode 100644
index 00000000000..f94256b6c92
--- /dev/null
+++ b/chromium/media/video/capture/mac/coremedia_glue.mm
@@ -0,0 +1,70 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/video/capture/mac/coremedia_glue.h"
+
+#include <dlfcn.h>
+
+#include "base/logging.h"
+#include "base/lazy_instance.h"
+
+namespace {
+
+// This class is used to retrieve some CoreMedia library functions. It must be
+// used as a LazyInstance so that it is initialised once and in a thread-safe
+// way. Normally no work is done in constructors: LazyInstance is an exception.
+class CoreMediaLibraryInternal {
+ public:
+ typedef CoreMediaGlue::CMTime (*CMTimeMakeMethod)(int64_t, int32_t);
+ typedef CVImageBufferRef (*CMSampleBufferGetImageBufferMethod)(
+ CoreMediaGlue::CMSampleBufferRef);
+
+ CoreMediaLibraryInternal() {
+ NSBundle* bundle = [NSBundle
+ bundleWithPath:@"/System/Library/Frameworks/CoreMedia.framework"];
+
+ const char* path = [[bundle executablePath] fileSystemRepresentation];
+ CHECK(path);
+ void* library_handle = dlopen(path, RTLD_LAZY | RTLD_LOCAL);
+ CHECK(library_handle) << dlerror();
+
+ // Now extract the methods.
+ cm_time_make_ = reinterpret_cast<CMTimeMakeMethod>(
+ dlsym(library_handle, "CMTimeMake"));
+ CHECK(cm_time_make_) << dlerror();
+
+ cm_sample_buffer_get_image_buffer_method_ =
+ reinterpret_cast<CMSampleBufferGetImageBufferMethod>(
+ dlsym(library_handle, "CMSampleBufferGetImageBuffer"));
+ CHECK(cm_sample_buffer_get_image_buffer_method_) << dlerror();
+ }
+
+ const CMTimeMakeMethod& cm_time_make() const { return cm_time_make_; }
+ const CMSampleBufferGetImageBufferMethod&
+ cm_sample_buffer_get_image_buffer_method() const {
+ return cm_sample_buffer_get_image_buffer_method_;
+ }
+
+ private:
+ CMTimeMakeMethod cm_time_make_;
+ CMSampleBufferGetImageBufferMethod cm_sample_buffer_get_image_buffer_method_;
+
+ DISALLOW_COPY_AND_ASSIGN(CoreMediaLibraryInternal);
+};
+
+} // namespace
+
+static base::LazyInstance<CoreMediaLibraryInternal> g_coremedia_handle =
+ LAZY_INSTANCE_INITIALIZER;
+
+CoreMediaGlue::CMTime CoreMediaGlue::CMTimeMake(int64_t value,
+ int32_t timescale) {
+ return g_coremedia_handle.Get().cm_time_make()(value, timescale);
+}
+
+CVImageBufferRef CoreMediaGlue::CMSampleBufferGetImageBuffer(
+ CMSampleBufferRef buffer) {
+ return g_coremedia_handle.Get().cm_sample_buffer_get_image_buffer_method()(
+ buffer);
+}
diff --git a/chromium/media/video/capture/mac/platform_video_capturing_mac.h b/chromium/media/video/capture/mac/platform_video_capturing_mac.h
new file mode 100644
index 00000000000..466ae1bc8fd
--- /dev/null
+++ b/chromium/media/video/capture/mac/platform_video_capturing_mac.h
@@ -0,0 +1,50 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_CAPTURE_MAC_PLATFORM_VIDEO_CAPTURING_MAC_H_
+#define MEDIA_VIDEO_CAPTURE_MAC_PLATFORM_VIDEO_CAPTURING_MAC_H_
+
+#import <Foundation/Foundation.h>
+
+namespace media {
+class VideoCaptureDeviceMac;
+}
+
+// Protocol representing platform-dependent video capture on Mac, implemented
+// by both QTKit and AVFoundation APIs.
+@protocol PlatformVideoCapturingMac <NSObject>
+
+// This method initializes the instance by calling NSObject |init| and registers
+// internally a frame receiver at the same time. The frame receiver is supposed
+// to be initialised before and outlive the VideoCapturingDeviceMac
+// implementation.
+- (id)initWithFrameReceiver:(media::VideoCaptureDeviceMac*)frameReceiver;
+
+// Set the frame receiver. This method executes the registration in mutual
+// exclusion.
+// TODO(mcasas): This method and stopCapture() are always called in sequence and
+// this one is only used to clear the frameReceiver, investigate if both can be
+// merged.
+- (void)setFrameReceiver:(media::VideoCaptureDeviceMac*)frameReceiver;
+
+// Sets which capture device to use by name passed as deviceId argument. The
+// device names are usually obtained via VideoCaptureDevice::GetDeviceNames()
+// method. This method will also configure all device properties except those in
+// setCaptureHeight:widht:frameRate. If |deviceId| is nil, all potential
+// configuration is torn down. Returns YES on sucess, NO otherwise.
+- (BOOL)setCaptureDevice:(NSString*)deviceId;
+
+// Configures the capture properties.
+- (BOOL)setCaptureHeight:(int)height width:(int)width frameRate:(int)frameRate;
+
+// Start video capturing, register observers. Returns YES on sucess, NO
+// otherwise.
+- (BOOL)startCapture;
+
+// Stops video capturing, unregisters observers.
+- (void)stopCapture;
+
+@end
+
+#endif // MEDIA_VIDEO_CAPTURE_MAC_PLATFORM_VIDEO_CAPTURING_MAC_H_
diff --git a/chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.h b/chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.h
new file mode 100644
index 00000000000..0e617e90cda
--- /dev/null
+++ b/chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.h
@@ -0,0 +1,113 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_AVFOUNDATION_MAC_H_
+#define MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_AVFOUNDATION_MAC_H_
+
+#import <Foundation/Foundation.h>
+
+#import "base/mac/scoped_nsobject.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/thread_checker.h"
+#import "media/video/capture/mac/avfoundation_glue.h"
+#import "media/video/capture/mac/platform_video_capturing_mac.h"
+
+namespace media {
+class VideoCaptureDeviceMac;
+}
+
+@class CrAVCaptureDevice;
+@class CrAVCaptureSession;
+@class CrAVCaptureVideoDataOutput;
+
+// Class used by VideoCaptureDeviceMac (VCDM) for video capture using
+// AVFoundation API. This class lives inside the thread created by its owner
+// VCDM.
+//
+// * Clients (VCDM) should call +deviceNames to fetch the list of devices
+// available in the system; this method returns the list of device names that
+// have to be used with -setCaptureDevice:.
+// * Previous to any use, clients (VCDM) must call -initWithFrameReceiver: to
+// initialise an object of this class and register a |frameReceiver_|.
+// * Frame receiver registration or removal can also happen via explicit call
+// to -setFrameReceiver:. Re-registrations are safe and allowed, even during
+// capture using this method.
+// * Method -setCaptureDevice: must be called at least once with a device
+// identifier from +deviceNames. Creates all the necessary AVFoundation
+// objects on first call; it connects them ready for capture every time.
+// This method should not be called during capture (i.e. between
+// -startCapture and -stopCapture).
+// * -setCaptureWidth:height:frameRate: is called if a resolution or frame rate
+// different than the by default one set by -setCaptureDevice: is needed.
+// This method should not be called during capture. This method must be
+// called after -setCaptureDevice:.
+// * -startCapture registers the notification listeners and starts the
+// capture. The capture can be stop using -stopCapture. The capture can be
+// restarted and restoped multiple times, reconfiguring or not the device in
+// between.
+// * -setCaptureDevice can be called with a |nil| value, case in which it stops
+// the capture and disconnects the library objects. This step is not
+// necessary.
+// * Deallocation of the library objects happens gracefully on destruction of
+// the VideoCaptureDeviceAVFoundation object.
+//
+//
+@interface VideoCaptureDeviceAVFoundation
+ : NSObject<CrAVCaptureVideoDataOutputSampleBufferDelegate,
+ PlatformVideoCapturingMac> {
+ @private
+ // The following attributes are set via -setCaptureHeight:width:frameRate:.
+ int frameWidth_;
+ int frameHeight_;
+ int frameRate_;
+
+ base::Lock lock_; // Protects concurrent setting and using of frameReceiver_.
+ media::VideoCaptureDeviceMac* frameReceiver_; // weak.
+
+ base::scoped_nsobject<CrAVCaptureSession> captureSession_;
+
+ // |captureDevice_| is an object coming from AVFoundation, used only to be
+ // plugged in |captureDeviceInput_| and to query for session preset support.
+ CrAVCaptureDevice* captureDevice_;
+ // |captureDeviceInput_| is owned by |captureSession_|.
+ CrAVCaptureDeviceInput* captureDeviceInput_;
+ base::scoped_nsobject<CrAVCaptureVideoDataOutput> captureVideoDataOutput_;
+
+ base::ThreadChecker thread_checker_;
+}
+
+// Returns a dictionary of capture devices with friendly name and unique id.
++ (NSDictionary*)deviceNames;
+
+// Initializes the instance and the underlying capture session and registers the
+// frame receiver.
+- (id)initWithFrameReceiver:(media::VideoCaptureDeviceMac*)frameReceiver;
+
+// Sets the frame receiver.
+- (void)setFrameReceiver:(media::VideoCaptureDeviceMac*)frameReceiver;
+
+// Sets which capture device to use by name, retrieved via |deviceNames|. Once
+// the deviceId is known, the library objects are created if needed and
+// connected for the capture, and a by default resolution is set. If deviceId is
+// nil, then the eventual capture is stopped and library objects are
+// disconnected. Returns YES on sucess, NO otherwise. This method should not be
+// called during capture.
+- (BOOL)setCaptureDevice:(NSString*)deviceId;
+
+// Configures the capture properties for the capture session and the video data
+// output; this means it MUST be called after setCaptureDevice:. Return YES on
+// success, else NO.
+- (BOOL)setCaptureHeight:(int)height width:(int)width frameRate:(int)frameRate;
+
+// Starts video capturing and register the notification listeners. Must be
+// called after setCaptureDevice:, and, eventually, also after
+// setCaptureHeight:width:frameRate:. Returns YES on sucess, NO otherwise.
+- (BOOL)startCapture;
+
+// Stops video capturing and stops listening to notifications.
+- (void)stopCapture;
+
+@end
+
+#endif // MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_AVFOUNDATION_MAC_H_
diff --git a/chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.mm b/chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.mm
new file mode 100644
index 00000000000..a6bf920a2c2
--- /dev/null
+++ b/chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.mm
@@ -0,0 +1,246 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "media/video/capture/mac/video_capture_device_avfoundation_mac.h"
+
+#import <CoreVideo/CoreVideo.h>
+
+#include "base/logging.h"
+#include "base/mac/foundation_util.h"
+#include "media/video/capture/mac/video_capture_device_mac.h"
+#include "ui/gfx/size.h"
+
+@implementation VideoCaptureDeviceAVFoundation
+
+#pragma mark Class methods
+
++ (void)getDeviceNames:(NSMutableDictionary*)deviceNames {
+ // At this stage we already know that AVFoundation is supported and the whole
+ // library is loaded and initialised, by the device monitoring.
+ NSArray* devices = [AVCaptureDeviceGlue devices];
+ for (CrAVCaptureDevice* device in devices) {
+ if ([device hasMediaType:AVFoundationGlue::AVMediaTypeVideo()] ||
+ [device hasMediaType:AVFoundationGlue::AVMediaTypeMuxed()]) {
+ [deviceNames setObject:[device localizedName]
+ forKey:[device uniqueID]];
+ }
+ }
+}
+
++ (NSDictionary*)deviceNames {
+ NSMutableDictionary* deviceNames =
+ [[[NSMutableDictionary alloc] init] autorelease];
+ // The device name retrieval is not going to happen in the main thread, and
+ // this might cause instabilities (it did in QTKit), so keep an eye here.
+ [self getDeviceNames:deviceNames];
+ return deviceNames;
+}
+
+#pragma mark Public methods
+
+- (id)initWithFrameReceiver:(media::VideoCaptureDeviceMac*)frameReceiver {
+ if ((self = [super init])) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(frameReceiver);
+ [self setFrameReceiver:frameReceiver];
+ captureSession_.reset(
+ [[AVFoundationGlue::AVCaptureSessionClass() alloc] init]);
+ }
+ return self;
+}
+
+- (void)dealloc {
+ [self stopCapture];
+ [super dealloc];
+}
+
+- (void)setFrameReceiver:(media::VideoCaptureDeviceMac*)frameReceiver {
+ base::AutoLock lock(lock_);
+ frameReceiver_ = frameReceiver;
+}
+
+- (BOOL)setCaptureDevice:(NSString*)deviceId {
+ DCHECK(captureSession_);
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ if (!deviceId) {
+ // First stop the capture session, if it's running.
+ [self stopCapture];
+ // Now remove the input and output from the capture session.
+ [captureSession_ removeOutput:captureVideoDataOutput_];
+ if (captureDeviceInput_) {
+ [captureSession_ removeInput:captureDeviceInput_];
+ // No need to release |captureDeviceInput_|, is owned by the session.
+ captureDeviceInput_ = nil;
+ }
+ return YES;
+ }
+
+ // Look for input device with requested name.
+ captureDevice_ = [AVCaptureDeviceGlue deviceWithUniqueID:deviceId];
+ if (!captureDevice_) {
+ DLOG(ERROR) << "Could not open video capture device.";
+ return NO;
+ }
+
+ // Create the capture input associated with the device. Easy peasy.
+ NSError* error = nil;
+ captureDeviceInput_ = [AVCaptureDeviceInputGlue
+ deviceInputWithDevice:captureDevice_
+ error:&error];
+ if (!captureDeviceInput_) {
+ captureDevice_ = nil;
+ DLOG(ERROR) << "Could not create video capture input: "
+ << [[error localizedDescription] UTF8String];
+ return NO;
+ }
+ [captureSession_ addInput:captureDeviceInput_];
+
+ // Create a new data output for video. The data output is configured to
+ // discard late frames by default.
+ captureVideoDataOutput_.reset(
+ [[AVFoundationGlue::AVCaptureVideoDataOutputClass() alloc] init]);
+ if (!captureVideoDataOutput_) {
+ [captureSession_ removeInput:captureDeviceInput_];
+ DLOG(ERROR) << "Could not create video data output.";
+ return NO;
+ }
+ [captureVideoDataOutput_
+ setSampleBufferDelegate:self
+ queue:dispatch_get_global_queue(
+ DISPATCH_QUEUE_PRIORITY_DEFAULT, 0)];
+ [captureSession_ addOutput:captureVideoDataOutput_];
+ return YES;
+}
+
+- (BOOL)setCaptureHeight:(int)height width:(int)width frameRate:(int)frameRate {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ frameWidth_ = width;
+ frameHeight_ = height;
+ frameRate_ = frameRate;
+
+ // Identify the sessionPreset that corresponds to the desired resolution.
+ NSString* sessionPreset;
+ if (width == 1280 && height == 720 && [captureSession_ canSetSessionPreset:
+ AVFoundationGlue::AVCaptureSessionPreset1280x720()]) {
+ sessionPreset = AVFoundationGlue::AVCaptureSessionPreset1280x720();
+ } else if (width == 640 && height == 480 && [captureSession_
+ canSetSessionPreset:
+ AVFoundationGlue::AVCaptureSessionPreset640x480()]) {
+ sessionPreset = AVFoundationGlue::AVCaptureSessionPreset640x480();
+ } else if (width == 320 && height == 240 && [captureSession_
+ canSetSessionPreset:
+ AVFoundationGlue::AVCaptureSessionPreset320x240()]) {
+ sessionPreset = AVFoundationGlue::AVCaptureSessionPreset320x240();
+ } else {
+ DLOG(ERROR) << "Unsupported resolution (" << width << "x" << height << ")";
+ return NO;
+ }
+ [captureSession_ setSessionPreset:sessionPreset];
+
+ // Check that our capture Device can be used with the current preset.
+ if (![captureDevice_ supportsAVCaptureSessionPreset:
+ [captureSession_ sessionPreset]]){
+ DLOG(ERROR) << "Video capture device does not support current preset";
+ return NO;
+ }
+
+ // Despite all Mac documentation detailing that setting the sessionPreset is
+ // enough, that is not the case for, at least, the MacBook Air built-in
+ // FaceTime HD Camera, and the capture output has to be configured as well.
+ // The reason for this mismatch is probably because most of the AVFoundation
+ // docs are written for iOS and not for MacOsX.
+ // AVVideoScalingModeKey() refers to letterboxing yes/no and preserve aspect
+ // ratio yes/no when scaling. Currently we set letterbox and preservation.
+ NSDictionary* videoSettingsDictionary = @{
+ (id)kCVPixelBufferWidthKey : @(width),
+ (id)kCVPixelBufferHeightKey : @(height),
+ (id)kCVPixelBufferPixelFormatTypeKey : @(kCVPixelFormatType_422YpCbCr8),
+ AVFoundationGlue::AVVideoScalingModeKey() :
+ AVFoundationGlue::AVVideoScalingModeResizeAspect()
+ };
+ [captureVideoDataOutput_ setVideoSettings:videoSettingsDictionary];
+
+ CrAVCaptureConnection* captureConnection = [captureVideoDataOutput_
+ connectionWithMediaType:AVFoundationGlue::AVMediaTypeVideo()];
+ // TODO(mcasas): Check selector existence, related to bugs
+ // http://crbug.com/327532 and http://crbug.com/328096.
+ if ([captureConnection
+ respondsToSelector:@selector(isVideoMinFrameDurationSupported)] &&
+ [captureConnection isVideoMinFrameDurationSupported]) {
+ [captureConnection setVideoMinFrameDuration:
+ CoreMediaGlue::CMTimeMake(1, frameRate)];
+ }
+ if ([captureConnection
+ respondsToSelector:@selector(isVideoMaxFrameDurationSupported)] &&
+ [captureConnection isVideoMaxFrameDurationSupported]) {
+ [captureConnection setVideoMaxFrameDuration:
+ CoreMediaGlue::CMTimeMake(1, frameRate)];
+ }
+ return YES;
+}
+
+- (BOOL)startCapture {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (!captureSession_) {
+ DLOG(ERROR) << "Video capture session not initialized.";
+ return NO;
+ }
+ // Connect the notifications.
+ NSNotificationCenter* nc = [NSNotificationCenter defaultCenter];
+ [nc addObserver:self
+ selector:@selector(onVideoError:)
+ name:AVFoundationGlue::AVCaptureSessionRuntimeErrorNotification()
+ object:captureSession_];
+ [captureSession_ startRunning];
+ return YES;
+}
+
+- (void)stopCapture {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if ([captureSession_ isRunning])
+ [captureSession_ stopRunning]; // Synchronous.
+ [[NSNotificationCenter defaultCenter] removeObserver:self];
+}
+
+#pragma mark Private methods
+
+// |captureOutput| is called by the capture device to deliver a new frame.
+- (void)captureOutput:(CrAVCaptureOutput*)captureOutput
+ didOutputSampleBuffer:(CoreMediaGlue::CMSampleBufferRef)sampleBuffer
+ fromConnection:(CrAVCaptureConnection*)connection {
+ CVImageBufferRef videoFrame =
+ CoreMediaGlue::CMSampleBufferGetImageBuffer(sampleBuffer);
+ // Lock the frame and calculate frame size.
+ const int kLockFlags = 0;
+ if (CVPixelBufferLockBaseAddress(videoFrame, kLockFlags) ==
+ kCVReturnSuccess) {
+ void* baseAddress = CVPixelBufferGetBaseAddress(videoFrame);
+ size_t bytesPerRow = CVPixelBufferGetBytesPerRow(videoFrame);
+ size_t frameWidth = CVPixelBufferGetWidth(videoFrame);
+ size_t frameHeight = CVPixelBufferGetHeight(videoFrame);
+ size_t frameSize = bytesPerRow * frameHeight;
+ UInt8* addressToPass = reinterpret_cast<UInt8*>(baseAddress);
+
+ media::VideoCaptureFormat captureFormat(
+ gfx::Size(frameWidth, frameHeight),
+ frameRate_,
+ media::PIXEL_FORMAT_UYVY);
+ base::AutoLock lock(lock_);
+ if (!frameReceiver_)
+ return;
+ frameReceiver_->ReceiveFrame(addressToPass, frameSize, captureFormat, 0, 0);
+ CVPixelBufferUnlockBaseAddress(videoFrame, kLockFlags);
+ }
+}
+
+- (void)onVideoError:(NSNotification*)errorNotification {
+ NSError* error = base::mac::ObjCCast<NSError>([[errorNotification userInfo]
+ objectForKey:AVFoundationGlue::AVCaptureSessionErrorKey()]);
+ base::AutoLock lock(lock_);
+ if (frameReceiver_)
+ frameReceiver_->ReceiveError([[error localizedDescription] UTF8String]);
+}
+
+@end
diff --git a/chromium/media/video/capture/mac/video_capture_device_mac.h b/chromium/media/video/capture/mac/video_capture_device_mac.h
index e600459e2c9..474e7e1bf45 100644
--- a/chromium/media/video/capture/mac/video_capture_device_mac.h
+++ b/chromium/media/video/capture/mac/video_capture_device_mac.h
@@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// OS X implementation of VideoCaptureDevice, using QTKit as native capture API.
+// MacOSX implementation of generic VideoCaptureDevice, using either QTKit or
+// AVFoundation as native capture API. QTKit is used in OSX versions 10.6 and
+// previous, and AVFoundation is used in the rest.
#ifndef MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_MAC_H_
#define MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_MAC_H_
@@ -16,31 +18,29 @@
#include "media/video/capture/video_capture_device.h"
#include "media/video/capture/video_capture_types.h"
-@class VideoCaptureDeviceQTKit;
+@protocol PlatformVideoCapturingMac;
namespace media {
// Called by VideoCaptureManager to open, close and start, stop video capture
// devices.
-class VideoCaptureDeviceMac : public VideoCaptureDevice1 {
+class VideoCaptureDeviceMac : public VideoCaptureDevice {
public:
explicit VideoCaptureDeviceMac(const Name& device_name);
virtual ~VideoCaptureDeviceMac();
// VideoCaptureDevice implementation.
- virtual void Allocate(const VideoCaptureCapability& capture_format,
- VideoCaptureDevice::EventHandler* observer) OVERRIDE;
- virtual void Start() OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void DeAllocate() OVERRIDE;
- virtual const Name& device_name() OVERRIDE;
+ virtual void AllocateAndStart(const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client)
+ OVERRIDE;
+ virtual void StopAndDeAllocate() OVERRIDE;
bool Init();
// Called to deliver captured video frames.
void ReceiveFrame(const uint8* video_frame,
int video_frame_length,
- const VideoCaptureCapability& frame_info,
+ const VideoCaptureFormat& frame_format,
int aspect_numerator,
int aspect_denominator);
@@ -54,16 +54,16 @@ class VideoCaptureDeviceMac : public VideoCaptureDevice1 {
enum InternalState {
kNotInitialized,
kIdle,
- kAllocated,
kCapturing,
kError
};
Name device_name_;
- VideoCaptureDevice::EventHandler* observer_;
+ scoped_ptr<VideoCaptureDevice::Client> client_;
- VideoCaptureCapability current_settings_;
+ VideoCaptureFormat capture_format_;
bool sent_frame_info_;
+ bool tried_to_square_pixels_;
// Only read and write state_ from inside this loop.
const scoped_refptr<base::MessageLoopProxy> loop_proxy_;
@@ -74,7 +74,7 @@ class VideoCaptureDeviceMac : public VideoCaptureDevice1 {
base::WeakPtrFactory<VideoCaptureDeviceMac> weak_factory_;
base::WeakPtr<VideoCaptureDeviceMac> weak_this_;
- VideoCaptureDeviceQTKit* capture_device_;
+ id<PlatformVideoCapturingMac> capture_device_;
DISALLOW_COPY_AND_ASSIGN(VideoCaptureDeviceMac);
};
diff --git a/chromium/media/video/capture/mac/video_capture_device_mac.mm b/chromium/media/video/capture/mac/video_capture_device_mac.mm
index eea861481fe..dba4fa1c6fb 100644
--- a/chromium/media/video/capture/mac/video_capture_device_mac.mm
+++ b/chromium/media/video/capture/mac/video_capture_device_mac.mm
@@ -4,15 +4,16 @@
#include "media/video/capture/mac/video_capture_device_mac.h"
-#import <QTKit/QTKit.h>
-
#include "base/bind.h"
#include "base/location.h"
#include "base/logging.h"
#include "base/time/time.h"
-#include "media/video/capture/mac/video_capture_device_qtkit_mac.h"
+#import "media/video/capture/mac/avfoundation_glue.h"
+#import "media/video/capture/mac/platform_video_capturing_mac.h"
+#import "media/video/capture/mac/video_capture_device_avfoundation_mac.h"
+#import "media/video/capture/mac/video_capture_device_qtkit_mac.h"
-namespace {
+namespace media {
const int kMinFrameRate = 1;
const int kMaxFrameRate = 30;
@@ -35,6 +36,12 @@ const Resolution* const kWellSupportedResolutions[] = {
&kHD,
};
+// Rescaling the image to fix the pixel aspect ratio runs the risk of making
+// the aspect ratio worse, if QTKit selects a new source mode with a different
+// shape. This constant ensures that we don't take this risk if the current
+// aspect ratio is tolerable.
+const float kMaxPixelAspectRatio = 1.15;
+
// TODO(ronghuawu): Replace this with CapabilityList::GetBestMatchedCapability.
void GetBestMatchSupportedResolution(int* width, int* height) {
int min_diff = kint32max;
@@ -55,15 +62,18 @@ void GetBestMatchSupportedResolution(int* width, int* height) {
*height = matched_height;
}
-}
-
-namespace media {
-
void VideoCaptureDevice::GetDeviceNames(Names* device_names) {
// Loop through all available devices and add to |device_names|.
device_names->clear();
- NSDictionary* capture_devices = [VideoCaptureDeviceQTKit deviceNames];
+ NSDictionary* capture_devices;
+ if (AVFoundationGlue::IsAVFoundationSupported()) {
+ DVLOG(1) << "Enumerating video capture devices using AVFoundation";
+ capture_devices = [VideoCaptureDeviceAVFoundation deviceNames];
+ } else {
+ DVLOG(1) << "Enumerating video capture devices using QTKit";
+ capture_devices = [VideoCaptureDeviceQTKit deviceNames];
+ }
for (NSString* key in capture_devices) {
Name name([[capture_devices valueForKey:key] UTF8String],
[key UTF8String]);
@@ -71,6 +81,12 @@ void VideoCaptureDevice::GetDeviceNames(Names* device_names) {
}
}
+// static
+void VideoCaptureDevice::GetDeviceSupportedFormats(const Name& device,
+ VideoCaptureFormats* formats) {
+ NOTIMPLEMENTED();
+}
+
const std::string VideoCaptureDevice::Name::GetModel() const {
// Both PID and VID are 4 characters.
if (unique_id_.size() < 2 * kVidPidSize) {
@@ -99,8 +115,8 @@ VideoCaptureDevice* VideoCaptureDevice::Create(const Name& device_name) {
VideoCaptureDeviceMac::VideoCaptureDeviceMac(const Name& device_name)
: device_name_(device_name),
- observer_(NULL),
sent_frame_info_(false),
+ tried_to_square_pixels_(false),
loop_proxy_(base::MessageLoopProxy::current()),
state_(kNotInitialized),
weak_factory_(this),
@@ -113,24 +129,23 @@ VideoCaptureDeviceMac::~VideoCaptureDeviceMac() {
[capture_device_ release];
}
-void VideoCaptureDeviceMac::Allocate(
- const VideoCaptureCapability& capture_format,
- EventHandler* observer) {
+void VideoCaptureDeviceMac::AllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) {
DCHECK_EQ(loop_proxy_, base::MessageLoopProxy::current());
if (state_ != kIdle) {
return;
}
- int width = capture_format.width;
- int height = capture_format.height;
- int frame_rate = capture_format.frame_rate;
+ int width = params.requested_format.frame_size.width();
+ int height = params.requested_format.frame_size.height();
+ int frame_rate = params.requested_format.frame_rate;
- // QTKit can scale captured frame to any size requested, which would lead to
- // undesired aspect ratio change. Tries to open the camera with a natively
- // supported format and let the client to crop/pad the captured frames.
- GetBestMatchSupportedResolution(&width,
- &height);
+ // The OS API can scale captured frame to any size requested, which would lead
+ // to undesired aspect ratio change. Try to open the camera with a natively
+ // supported format and let the client crop/pad the captured frames.
+ GetBestMatchSupportedResolution(&width, &height);
- observer_ = observer;
+ client_ = client.Pass();
NSString* deviceId =
[NSString stringWithUTF8String:device_name_.id().c_str()];
@@ -145,23 +160,18 @@ void VideoCaptureDeviceMac::Allocate(
else if (frame_rate > kMaxFrameRate)
frame_rate = kMaxFrameRate;
- current_settings_.color = PIXEL_FORMAT_UYVY;
- current_settings_.width = width;
- current_settings_.height = height;
- current_settings_.frame_rate = frame_rate;
- current_settings_.expected_capture_delay = 0;
- current_settings_.interlaced = false;
+ capture_format_.frame_size.SetSize(width, height);
+ capture_format_.frame_rate = frame_rate;
+ capture_format_.pixel_format = PIXEL_FORMAT_UYVY;
- if (width != kHD.width || height != kHD.height) {
+ if (width <= kVGA.width || height <= kVGA.height) {
// If the resolution is VGA or QVGA, set the capture resolution to the
- // target size. For most cameras (though not all), at these resolutions
- // QTKit produces frames with square pixels.
+ // target size. Essentially all supported cameras offer at least VGA.
if (!UpdateCaptureResolution())
return;
-
- sent_frame_info_ = true;
- observer_->OnFrameInfo(current_settings_);
}
+ // For higher resolutions, we first open at the default resolution to find
+ // out if the request is larger than the camera's native resolution.
// If the resolution is HD, start capturing without setting a resolution.
// QTKit will produce frames at the native resolution, allowing us to
@@ -174,56 +184,46 @@ void VideoCaptureDeviceMac::Allocate(
return;
}
- state_ = kAllocated;
-}
-
-void VideoCaptureDeviceMac::Start() {
- DCHECK_EQ(loop_proxy_, base::MessageLoopProxy::current());
- DCHECK_EQ(state_, kAllocated);
state_ = kCapturing;
-
- // This method no longer has any effect. Capturing is triggered by
- // the call to Allocate.
- // TODO(bemasc, ncarter): Remove this method.
}
-void VideoCaptureDeviceMac::Stop() {
+void VideoCaptureDeviceMac::StopAndDeAllocate() {
DCHECK_EQ(loop_proxy_, base::MessageLoopProxy::current());
DCHECK(state_ == kCapturing || state_ == kError) << state_;
[capture_device_ stopCapture];
- state_ = kAllocated;
-}
-void VideoCaptureDeviceMac::DeAllocate() {
- DCHECK_EQ(loop_proxy_, base::MessageLoopProxy::current());
- if (state_ != kAllocated && state_ != kCapturing) {
- return;
- }
- if (state_ == kCapturing) {
- [capture_device_ stopCapture];
- }
[capture_device_ setCaptureDevice:nil];
[capture_device_ setFrameReceiver:nil];
-
+ client_.reset();
state_ = kIdle;
-}
-
-const VideoCaptureDevice::Name& VideoCaptureDeviceMac::device_name() {
- return device_name_;
+ tried_to_square_pixels_ = false;
}
bool VideoCaptureDeviceMac::Init() {
DCHECK_EQ(loop_proxy_, base::MessageLoopProxy::current());
DCHECK_EQ(state_, kNotInitialized);
+ // TODO(mcasas): The following check might not be necessary; if the device has
+ // disappeared after enumeration and before coming here, opening would just
+ // fail but not necessarily produce a crash.
Names device_names;
GetDeviceNames(&device_names);
- Name* found = device_names.FindById(device_name_.id());
- if (!found)
+ Names::iterator it = device_names.begin();
+ for (; it != device_names.end(); ++it) {
+ if (it->id() == device_name_.id())
+ break;
+ }
+ if (it == device_names.end())
return false;
- capture_device_ =
- [[VideoCaptureDeviceQTKit alloc] initWithFrameReceiver:this];
+ if (AVFoundationGlue::IsAVFoundationSupported()) {
+ capture_device_ =
+ [[VideoCaptureDeviceAVFoundation alloc] initWithFrameReceiver:this];
+ } else {
+ capture_device_ =
+ [[VideoCaptureDeviceQTKit alloc] initWithFrameReceiver:this];
+ }
+
if (!capture_device_)
return false;
@@ -234,54 +234,81 @@ bool VideoCaptureDeviceMac::Init() {
void VideoCaptureDeviceMac::ReceiveFrame(
const uint8* video_frame,
int video_frame_length,
- const VideoCaptureCapability& frame_info,
+ const VideoCaptureFormat& frame_format,
int aspect_numerator,
int aspect_denominator) {
// This method is safe to call from a device capture thread,
// i.e. any thread controlled by QTKit.
if (!sent_frame_info_) {
- if (current_settings_.width == kHD.width &&
- current_settings_.height == kHD.height) {
- bool changeToVga = false;
- if (frame_info.width < kHD.width || frame_info.height < kHD.height) {
+ // Final resolution has not yet been selected.
+ if (capture_format_.frame_size.width() > kVGA.width ||
+ capture_format_.frame_size.height() > kVGA.height) {
+ // We are requesting HD. Make sure that the picture is good, otherwise
+ // drop down to VGA.
+ bool change_to_vga = false;
+ if (frame_format.frame_size.width() <
+ capture_format_.frame_size.width() ||
+ frame_format.frame_size.height() <
+ capture_format_.frame_size.height()) {
// These are the default capture settings, not yet configured to match
- // |current_settings_|.
- DCHECK(frame_info.frame_rate == 0);
+ // |capture_format_|.
+ DCHECK(frame_format.frame_rate == 0);
DVLOG(1) << "Switching to VGA because the default resolution is " <<
- frame_info.width << "x" << frame_info.height;
- changeToVga = true;
+ frame_format.frame_size.ToString();
+ change_to_vga = true;
}
- if (frame_info.width == kHD.width && frame_info.height == kHD.height &&
+
+ if (capture_format_.frame_size == frame_format.frame_size &&
aspect_numerator != aspect_denominator) {
DVLOG(1) << "Switching to VGA because HD has nonsquare pixel " <<
"aspect ratio " << aspect_numerator << ":" << aspect_denominator;
- changeToVga = true;
+ change_to_vga = true;
}
- if (changeToVga) {
- current_settings_.width = kVGA.width;
- current_settings_.height = kVGA.height;
+ if (change_to_vga) {
+ capture_format_.frame_size.SetSize(kVGA.width, kVGA.height);
+ }
+ }
+
+ if (capture_format_.frame_size == frame_format.frame_size &&
+ !tried_to_square_pixels_ &&
+ (aspect_numerator > kMaxPixelAspectRatio * aspect_denominator ||
+ aspect_denominator > kMaxPixelAspectRatio * aspect_numerator)) {
+ // The requested size results in non-square PAR.
+ // Shrink the frame to 1:1 PAR (assuming QTKit selects the same input
+ // mode, which is not guaranteed).
+ int new_width = capture_format_.frame_size.width();
+ int new_height = capture_format_.frame_size.height();
+ if (aspect_numerator < aspect_denominator) {
+ new_width = (new_width * aspect_numerator) / aspect_denominator;
+ } else {
+ new_height = (new_height * aspect_denominator) / aspect_numerator;
}
+ capture_format_.frame_size.SetSize(new_width, new_height);
+ tried_to_square_pixels_ = true;
}
- if (current_settings_.width == frame_info.width &&
- current_settings_.height == frame_info.height) {
+ if (capture_format_.frame_size == frame_format.frame_size) {
sent_frame_info_ = true;
- observer_->OnFrameInfo(current_settings_);
} else {
UpdateCaptureResolution();
- // The current frame does not have the right width and height, so it
- // must not be passed to |observer_|.
+ // OnFrameInfo has not yet been called. OnIncomingCapturedFrame must
+ // not be called until after OnFrameInfo, so we return early.
return;
}
}
- DCHECK(current_settings_.width == frame_info.width &&
- current_settings_.height == frame_info.height);
+ DCHECK_EQ(capture_format_.frame_size.width(),
+ frame_format.frame_size.width());
+ DCHECK_EQ(capture_format_.frame_size.height(),
+ frame_format.frame_size.height());
- observer_->OnIncomingCapturedFrame(
- video_frame, video_frame_length, base::Time::Now(), 0, false, false);
+ client_->OnIncomingCapturedFrame(video_frame,
+ video_frame_length,
+ base::Time::Now(),
+ 0,
+ capture_format_);
}
void VideoCaptureDeviceMac::ReceiveError(const std::string& reason) {
@@ -294,13 +321,13 @@ void VideoCaptureDeviceMac::SetErrorState(const std::string& reason) {
DCHECK_EQ(loop_proxy_, base::MessageLoopProxy::current());
DLOG(ERROR) << reason;
state_ = kError;
- observer_->OnError();
+ client_->OnError();
}
bool VideoCaptureDeviceMac::UpdateCaptureResolution() {
- if (![capture_device_ setCaptureHeight:current_settings_.height
- width:current_settings_.width
- frameRate:current_settings_.frame_rate]) {
+ if (![capture_device_ setCaptureHeight:capture_format_.frame_size.height()
+ width:capture_format_.frame_size.width()
+ frameRate:capture_format_.frame_rate]) {
ReceiveError("Could not configure capture device.");
return false;
}
diff --git a/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.h b/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.h
index d032ef0481f..1eba8a12ea2 100644
--- a/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.h
+++ b/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.h
@@ -5,21 +5,23 @@
// VideoCaptureDeviceQTKit implements all QTKit related code for
// communicating with a QTKit capture device.
-#ifndef MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_MAC_QTKIT_H_
-#define MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_MAC_QTKIT_H_
+#ifndef MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_QTKIT_MAC_H_
+#define MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_QTKIT_MAC_H_
#import <Foundation/Foundation.h>
#include <vector>
+#import "media/video/capture/mac/platform_video_capturing_mac.h"
+
namespace media {
- class VideoCaptureDeviceMac;
+class VideoCaptureDeviceMac;
}
@class QTCaptureDeviceInput;
@class QTCaptureSession;
-@interface VideoCaptureDeviceQTKit : NSObject {
+@interface VideoCaptureDeviceQTKit : NSObject<PlatformVideoCapturingMac> {
@private
// Settings.
int frameRate_;
@@ -38,16 +40,16 @@ namespace media {
}
// Returns a dictionary of capture devices with friendly name and unique id.
-+ (NSDictionary *)deviceNames;
++ (NSDictionary*)deviceNames;
// Initializes the instance and registers the frame receiver.
-- (id)initWithFrameReceiver:(media::VideoCaptureDeviceMac *)frameReceiver;
+- (id)initWithFrameReceiver:(media::VideoCaptureDeviceMac*)frameReceiver;
// Set the frame receiver.
-- (void)setFrameReceiver:(media::VideoCaptureDeviceMac *)frameReceiver;
+- (void)setFrameReceiver:(media::VideoCaptureDeviceMac*)frameReceiver;
// Sets which capture device to use. Returns YES on sucess, NO otherwise.
-- (BOOL)setCaptureDevice:(NSString *)deviceId;
+- (BOOL)setCaptureDevice:(NSString*)deviceId;
// Configures the capture properties.
- (BOOL)setCaptureHeight:(int)height width:(int)width frameRate:(int)frameRate;
@@ -59,8 +61,8 @@ namespace media {
- (void)stopCapture;
// Handle any QTCaptureSessionRuntimeErrorNotifications.
-- (void)handleNotification:(NSNotification *)errorNotification;
+- (void)handleNotification:(NSNotification*)errorNotification;
@end
-#endif // MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_MAC_QTKIT_H_
+#endif // MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_QTKIT_MAC_H_
diff --git a/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.mm b/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.mm
index 2b7e28e4e70..cd9c6d333e9 100644
--- a/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.mm
+++ b/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.mm
@@ -12,6 +12,7 @@
#include "media/video/capture/mac/video_capture_device_mac.h"
#include "media/video/capture/video_capture_device.h"
#include "media/video/capture/video_capture_types.h"
+#include "ui/gfx/size.h"
@implementation VideoCaptureDeviceQTKit
@@ -28,8 +29,9 @@
});
for (QTCaptureDevice* device in captureDevices) {
- [deviceNames setObject:[device localizedDisplayName]
- forKey:[device uniqueID]];
+ if (![[device attributeForKey:QTCaptureDeviceSuspendedAttribute] boolValue])
+ [deviceNames setObject:[device localizedDisplayName]
+ forKey:[device uniqueID]];
}
}
@@ -47,7 +49,7 @@
#pragma mark Public methods
-- (id)initWithFrameReceiver:(media::VideoCaptureDeviceMac *)frameReceiver {
+- (id)initWithFrameReceiver:(media::VideoCaptureDeviceMac*)frameReceiver {
self = [super init];
if (self) {
frameReceiver_ = frameReceiver;
@@ -62,13 +64,13 @@
[super dealloc];
}
-- (void)setFrameReceiver:(media::VideoCaptureDeviceMac *)frameReceiver {
+- (void)setFrameReceiver:(media::VideoCaptureDeviceMac*)frameReceiver {
[lock_ lock];
frameReceiver_ = frameReceiver;
[lock_ unlock];
}
-- (BOOL)setCaptureDevice:(NSString *)deviceId {
+- (BOOL)setCaptureDevice:(NSString*)deviceId {
if (deviceId) {
// Set the capture device.
if (captureDeviceInput_) {
@@ -86,6 +88,11 @@
return NO;
}
QTCaptureDevice *device = [captureDevices objectAtIndex:index];
+ if ([[device attributeForKey:QTCaptureDeviceSuspendedAttribute]
+ boolValue]) {
+ DLOG(ERROR) << "Cannot open suspended video capture device.";
+ return NO;
+ }
NSError *error;
if (![device open:&error]) {
DLOG(ERROR) << "Could not open video capture device."
@@ -172,22 +179,15 @@
QTCaptureDecompressedVideoOutput *output =
[[captureSession_ outputs] objectAtIndex:0];
- // The old capture dictionary is used to retrieve the initial pixel
- // format, which must be maintained.
- NSDictionary *oldCaptureDictionary = [output pixelBufferAttributes];
-
- // Set up desired output properties.
- NSDictionary *captureDictionary =
- [NSDictionary dictionaryWithObjectsAndKeys:
- [NSNumber numberWithDouble:width],
- (id)kCVPixelBufferWidthKey,
- [NSNumber numberWithDouble:height],
- (id)kCVPixelBufferHeightKey,
- [oldCaptureDictionary
- valueForKey:(id)kCVPixelBufferPixelFormatTypeKey],
- (id)kCVPixelBufferPixelFormatTypeKey,
- nil];
- [output setPixelBufferAttributes:captureDictionary];
+ // Set up desired output properties. The old capture dictionary is used to
+ // retrieve the initial pixel format, which must be maintained.
+ NSDictionary* videoSettingsDictionary = @{
+ (id)kCVPixelBufferWidthKey : @(width),
+ (id)kCVPixelBufferHeightKey : @(height),
+ (id)kCVPixelBufferPixelFormatTypeKey : [[output pixelBufferAttributes]
+ valueForKey:(id)kCVPixelBufferPixelFormatTypeKey]
+ };
+ [output setPixelBufferAttributes:videoSettingsDictionary];
[output setMinimumVideoFrameInterval:(NSTimeInterval)1/(float)frameRate];
return YES;
@@ -227,10 +227,10 @@
}
// |captureOutput| is called by the capture device to deliver a new frame.
-- (void)captureOutput:(QTCaptureOutput *)captureOutput
+- (void)captureOutput:(QTCaptureOutput*)captureOutput
didOutputVideoFrame:(CVImageBufferRef)videoFrame
- withSampleBuffer:(QTSampleBuffer *)sampleBuffer
- fromConnection:(QTCaptureConnection *)connection {
+ withSampleBuffer:(QTSampleBuffer*)sampleBuffer
+ fromConnection:(QTCaptureConnection*)connection {
[lock_ lock];
if(!frameReceiver_) {
[lock_ unlock];
@@ -275,13 +275,10 @@
addressToPass = adjustedAddress;
frameSize = frameHeight * expectedBytesPerRow;
}
- media::VideoCaptureCapability captureCapability;
- captureCapability.width = frameWidth;
- captureCapability.height = frameHeight;
- captureCapability.frame_rate = frameRate_;
- captureCapability.color = media::PIXEL_FORMAT_UYVY;
- captureCapability.expected_capture_delay = 0;
- captureCapability.interlaced = false;
+
+ media::VideoCaptureFormat captureFormat(gfx::Size(frameWidth, frameHeight),
+ frameRate_,
+ media::PIXEL_FORMAT_UYVY);
// The aspect ratio dictionary is often missing, in which case we report
// a pixel aspect ratio of 0:0.
@@ -301,7 +298,7 @@
}
// Deliver the captured video frame.
- frameReceiver_->ReceiveFrame(addressToPass, frameSize, captureCapability,
+ frameReceiver_->ReceiveFrame(addressToPass, frameSize, captureFormat,
aspectNumerator, aspectDenominator);
CVPixelBufferUnlockBaseAddress(videoFrame, kLockFlags);
@@ -309,8 +306,8 @@
[lock_ unlock];
}
-- (void)handleNotification:(NSNotification *)errorNotification {
- NSError * error = (NSError *)[[errorNotification userInfo]
+- (void)handleNotification:(NSNotification*)errorNotification {
+ NSError * error = (NSError*)[[errorNotification userInfo]
objectForKey:QTCaptureSessionErrorKey];
frameReceiver_->ReceiveError([[error localizedDescription] UTF8String]);
}
diff --git a/chromium/media/video/capture/video_capture.h b/chromium/media/video/capture/video_capture.h
index 3a4eb0e2d32..9a0e94378bb 100644
--- a/chromium/media/video/capture/video_capture.h
+++ b/chromium/media/video/capture/video_capture.h
@@ -45,35 +45,23 @@ class MEDIA_EXPORT VideoCapture {
VideoCapture* capture,
const scoped_refptr<media::VideoFrame>& frame) = 0;
- // Notify client about device info.
- virtual void OnDeviceInfoReceived(
- VideoCapture* capture,
- const VideoCaptureParams& device_info) = 0;
-
- // Notify client about the newly changed device info.
- virtual void OnDeviceInfoChanged(
- VideoCapture* capture,
- const VideoCaptureParams& device_info) {};
-
protected:
virtual ~EventHandler() {}
};
VideoCapture() {}
- // Request video capture to start capturing with |capability|.
+ // Request video capture to start capturing with |params|.
// Also register |handler| with video capture for event handling.
// |handler| must remain valid until it has received |OnRemoved()|.
virtual void StartCapture(EventHandler* handler,
- const VideoCaptureCapability& capability) = 0;
+ const VideoCaptureParams& params) = 0;
// Request video capture to stop capturing for client |handler|.
// |handler| must remain valid until it has received |OnRemoved()|.
virtual void StopCapture(EventHandler* handler) = 0;
virtual bool CaptureStarted() = 0;
- virtual int CaptureWidth() = 0;
- virtual int CaptureHeight() = 0;
virtual int CaptureFrameRate() = 0;
protected:
diff --git a/chromium/media/video/capture/video_capture_device.cc b/chromium/media/video/capture/video_capture_device.cc
index 4175412138f..c370d092c93 100644
--- a/chromium/media/video/capture/video_capture_device.cc
+++ b/chromium/media/video/capture/video_capture_device.cc
@@ -17,34 +17,6 @@ const std::string VideoCaptureDevice::Name::GetNameAndModel() const {
return device_name_ + suffix;
}
-VideoCaptureDevice::Name*
-VideoCaptureDevice::Names::FindById(const std::string& id) {
- for (iterator it = begin(); it != end(); ++it) {
- if (it->id() == id)
- return &(*it);
- }
- return NULL;
-}
-
VideoCaptureDevice::~VideoCaptureDevice() {}
-VideoCaptureDevice1::VideoCaptureDevice1() {}
-
-VideoCaptureDevice1::~VideoCaptureDevice1() {}
-
-void VideoCaptureDevice1::AllocateAndStart(
- const VideoCaptureCapability& capture_format,
- scoped_ptr<EventHandler> client) {
- client_ = client.Pass();
- Allocate(capture_format, client_.get());
- Start();
-}
-
-void VideoCaptureDevice1::StopAndDeAllocate() {
- Stop();
- DeAllocate();
- client_.reset();
-};
-
-
} // namespace media
diff --git a/chromium/media/video/capture/video_capture_device.h b/chromium/media/video/capture/video_capture_device.h
index e7340841cee..295401c3686 100644
--- a/chromium/media/video/capture/video_capture_device.h
+++ b/chromium/media/video/capture/video_capture_device.h
@@ -16,8 +16,11 @@
#include <string>
#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
#include "base/time/time.h"
#include "media/base/media_export.h"
+#include "media/base/video_frame.h"
#include "media/video/capture/video_capture_types.h"
namespace media {
@@ -109,32 +112,44 @@ class MEDIA_EXPORT VideoCaptureDevice {
};
// Manages a list of Name entries.
- class MEDIA_EXPORT Names
- : public NON_EXPORTED_BASE(std::list<Name>) {
+ typedef std::list<Name> Names;
+
+ class MEDIA_EXPORT Client {
public:
- // Returns NULL if no entry was found by that ID.
- Name* FindById(const std::string& id);
+ // Memory buffer returned by Client::ReserveOutputBuffer().
+ class Buffer : public base::RefCountedThreadSafe<Buffer> {
+ public:
+ int id() const { return id_; }
+ void* data() const { return data_; }
+ size_t size() const { return size_; }
- // Allow generated copy constructor and assignment.
- };
+ protected:
+ friend class base::RefCountedThreadSafe<Buffer>;
- class MEDIA_EXPORT EventHandler {
- public:
- virtual ~EventHandler() {}
+ Buffer(int id, void* data, size_t size)
+ : id_(id), data_(data), size_(size) {}
+ virtual ~Buffer() {}
- // Reserve an output buffer into which a video frame can be captured
- // directly. If all buffers are currently busy, returns NULL.
- //
- // The returned VideoFrames will always be allocated with a YV12 format. The
- // size will match that specified by an earlier call to OnFrameInfo. It is
- // the VideoCaptureDevice's responsibility to obey whatever stride and
- // memory layout are indicated on the returned VideoFrame object.
+ const int id_;
+ void* const data_;
+ const size_t size_;
+ };
+
+ virtual ~Client() {}
+
+ // Reserve an output buffer into which contents can be captured directly.
+ // The returned Buffer will always be allocated with a memory size suitable
+ // for holding a packed video frame of |format| format, of |dimensions|
+ // dimensions. It is permissible for |dimensions| to be zero; in which
+ // case the returned Buffer does not guarantee memory backing, but functions
+ // as a reservation for external input for the purposes of buffer
+ // throttling.
//
- // The output buffer stays reserved for use by the calling
- // VideoCaptureDevice until either the last reference to the VideoFrame is
- // released, or until the buffer is passed back to the EventHandler's
- // OnIncomingCapturedFrame() method.
- virtual scoped_refptr<media::VideoFrame> ReserveOutputBuffer() = 0;
+ // The output buffer stays reserved for use until the Buffer object is
+ // destroyed.
+ virtual scoped_refptr<Buffer> ReserveOutputBuffer(
+ media::VideoFrame::Format format,
+ const gfx::Size& dimensions) = 0;
// Captured a new video frame as a raw buffer. The size, color format, and
// layout are taken from the parameters specified by an earlier call to
@@ -144,44 +159,31 @@ class MEDIA_EXPORT VideoCaptureDevice {
// This method will try to reserve an output buffer and copy from |data|
// into the output buffer. If no output buffer is available, the frame will
// be silently dropped.
- virtual void OnIncomingCapturedFrame(const uint8* data,
- int length,
- base::Time timestamp,
- int rotation, // Clockwise.
- bool flip_vert,
- bool flip_horiz) = 0;
-
- // Captured a new video frame, held in a VideoFrame container.
- //
- // If |frame| was created via the ReserveOutputBuffer() mechanism, then the
- // frame delivery is guaranteed (it will not be silently dropped), and
- // delivery will require no additional copies in the browser process. For
- // such frames, the VideoCaptureDevice's reservation on the output buffer
- // ends immediately. The VideoCaptureDevice may not read or write the
- // underlying memory afterwards, and it should release its references to
- // |frame| as soon as possible, to allow buffer reuse.
+ virtual void OnIncomingCapturedFrame(
+ const uint8* data,
+ int length,
+ base::Time timestamp,
+ int rotation, // Clockwise.
+ const VideoCaptureFormat& frame_format) = 0;
+
+ // Captured a new video frame, held in |buffer|.
//
- // If |frame| was NOT created via ReserveOutputBuffer(), then this method
- // will try to reserve an output buffer and copy from |frame| into the
- // output buffer. If no output buffer is available, the frame will be
- // silently dropped. |frame| must be allocated as RGB32, YV12 or I420, and
- // the size must match that specified by an earlier call to OnFrameInfo().
- virtual void OnIncomingCapturedVideoFrame(
- const scoped_refptr<media::VideoFrame>& frame,
- base::Time timestamp) = 0;
+ // As the frame is backed by a reservation returned by
+ // ReserveOutputBuffer(), delivery is guaranteed and will require no
+ // additional copies in the browser process. |dimensions| indicates the
+ // frame width and height of the buffer contents; this is assumed to be of
+ // |format| format and tightly packed.
+ virtual void OnIncomingCapturedBuffer(const scoped_refptr<Buffer>& buffer,
+ media::VideoFrame::Format format,
+ const gfx::Size& dimensions,
+ base::Time timestamp,
+ int frame_rate) = 0;
// An error has occurred that cannot be handled and VideoCaptureDevice must
// be StopAndDeAllocate()-ed.
virtual void OnError() = 0;
-
- // Called when VideoCaptureDevice::AllocateAndStart() has been called to
- // inform of the resulting frame size.
- virtual void OnFrameInfo(const VideoCaptureCapability& info) = 0;
-
- // Called when the native resolution of VideoCaptureDevice has been changed
- // and it needs to inform its client of the new frame size.
- virtual void OnFrameInfoChanged(const VideoCaptureCapability& info) {};
};
+
// Creates a VideoCaptureDevice object.
// Return NULL if the hardware is not available.
static VideoCaptureDevice* Create(const Name& device_name);
@@ -190,13 +192,18 @@ class MEDIA_EXPORT VideoCaptureDevice {
// Gets the names of all video capture devices connected to this computer.
static void GetDeviceNames(Names* device_names);
- // Prepare the camera for use. After this function has been called no other
- // applications can use the camera. On completion EventHandler::OnFrameInfo()
- // is called informing of the resulting resolution and frame rate.
- // StopAndDeAllocate() must be called before the object is deleted.
- virtual void AllocateAndStart(
- const VideoCaptureCapability& capture_format,
- scoped_ptr<EventHandler> client) = 0;
+ // Gets the supported formats of a particular device attached to the system.
+ // This method should be called before allocating or starting a device. In
+ // case format enumeration is not supported, or there was a problem, the
+ // formats array will be empty.
+ static void GetDeviceSupportedFormats(const Name& device,
+ VideoCaptureFormats* supported_formats);
+
+ // Prepares the camera for use. After this function has been called no other
+ // applications can use the camera. StopAndDeAllocate() must be called before
+ // the object is deleted.
+ virtual void AllocateAndStart(const VideoCaptureParams& params,
+ scoped_ptr<Client> client) = 0;
// Deallocates the camera, possibly asynchronously.
//
@@ -212,58 +219,6 @@ class MEDIA_EXPORT VideoCaptureDevice {
virtual void StopAndDeAllocate() = 0;
};
-// VideoCaptureDevice1 is a bridge to an older API against which
-// VideoCaptureDevices were implemented. Differences between VideoCaptureDevice
-// (new style) and VideoCaptureDevice1 (old style) are as follows:
-//
-// [1] The Stop+DeAllocate calls are merged in the new style.
-// [2] The Allocate+Start calls are merged in the new style.
-// [3] New style devices own their EventHandler* pointers, allowing handlers to
-// remain valid even after the device is stopped. Whereas old style devices
-// may not dereference their handlers after DeAllocate().
-// [4] device_name() is eliminated from the new-style interface.
-//
-// TODO(nick): Remove this bridge class. It exists to enable incremental
-// migration to an alternative VideoCaptureDevice API.
-class MEDIA_EXPORT VideoCaptureDevice1 : public VideoCaptureDevice {
- public:
- VideoCaptureDevice1();
- virtual ~VideoCaptureDevice1();
-
- // VideoCaptureDevice implementation.
- virtual void AllocateAndStart(
- const VideoCaptureCapability& capture_format,
- scoped_ptr<EventHandler> client) OVERRIDE;
- virtual void StopAndDeAllocate() OVERRIDE;
-
- // Prepare the camera for use. After this function has been called no other
- // applications can use the camera. On completion EventHandler::OnFrameInfo()
- // is called informing of the resulting resolution and frame rate.
- // DeAllocate() must be called before this function can be called again and
- // before the object is deleted.
- virtual void Allocate(const VideoCaptureCapability& capture_format,
- EventHandler* client) = 0;
-
- // Start capturing video frames. Allocate must be called before this function.
- virtual void Start() = 0;
-
- // Stop capturing video frames.
- virtual void Stop() = 0;
-
- // Deallocates the camera. This means other applications can use it. After
- // this function has been called the capture device is reset to the state it
- // was when created. After DeAllocate() is called, the VideoCaptureDevice is
- // not permitted to make any additional calls to its EventHandler.
- virtual void DeAllocate() = 0;
-
- // Get the name of the capture device.
- virtual const Name& device_name() = 0;
-
- private:
- // The device client which proxies device events to the controller.
- scoped_ptr<EventHandler> client_;
-};
-
} // namespace media
#endif // MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_H_
diff --git a/chromium/media/video/capture/video_capture_device_unittest.cc b/chromium/media/video/capture/video_capture_device_unittest.cc
index 586060f169f..5e05ad4b4b2 100644
--- a/chromium/media/video/capture/video_capture_device_unittest.cc
+++ b/chromium/media/video/capture/video_capture_device_unittest.cc
@@ -58,70 +58,87 @@ using ::testing::_;
using ::testing::AnyNumber;
using ::testing::Return;
using ::testing::AtLeast;
-using ::testing::SaveArg;
namespace media {
-class MockFrameObserver : public media::VideoCaptureDevice::EventHandler {
+class MockClient : public media::VideoCaptureDevice::Client {
public:
- MOCK_METHOD0(ReserveOutputBuffer, scoped_refptr<media::VideoFrame>());
+ MOCK_METHOD2(ReserveOutputBuffer,
+ scoped_refptr<Buffer>(media::VideoFrame::Format format,
+ const gfx::Size& dimensions));
MOCK_METHOD0(OnErr, void());
- MOCK_METHOD1(OnFrameInfo, void(const VideoCaptureCapability&));
- MOCK_METHOD1(OnFrameInfoChanged, void(const VideoCaptureCapability&));
- explicit MockFrameObserver(
- base::Closure frame_cb)
- : main_thread_(base::MessageLoopProxy::current()),
- frame_cb_(frame_cb) {}
+ explicit MockClient(base::Callback<void(const VideoCaptureFormat&)> frame_cb)
+ : main_thread_(base::MessageLoopProxy::current()), frame_cb_(frame_cb) {}
virtual void OnError() OVERRIDE {
OnErr();
}
- virtual void OnIncomingCapturedFrame(
- const uint8* data,
- int length,
- base::Time timestamp,
- int rotation,
- bool flip_vert,
- bool flip_horiz) OVERRIDE {
- main_thread_->PostTask(FROM_HERE, frame_cb_);
+ virtual void OnIncomingCapturedFrame(const uint8* data,
+ int length,
+ base::Time timestamp,
+ int rotation,
+ const VideoCaptureFormat& format)
+ OVERRIDE {
+ main_thread_->PostTask(FROM_HERE, base::Bind(frame_cb_, format));
}
- virtual void OnIncomingCapturedVideoFrame(
- const scoped_refptr<media::VideoFrame>& frame,
- base::Time timestamp) OVERRIDE {
- main_thread_->PostTask(FROM_HERE, frame_cb_);
+ virtual void OnIncomingCapturedBuffer(const scoped_refptr<Buffer>& buffer,
+ media::VideoFrame::Format format,
+ const gfx::Size& dimensions,
+ base::Time timestamp,
+ int frame_rate) OVERRIDE {
+ NOTREACHED();
}
private:
scoped_refptr<base::MessageLoopProxy> main_thread_;
- base::Closure frame_cb_;
+ base::Callback<void(const VideoCaptureFormat&)> frame_cb_;
};
class VideoCaptureDeviceTest : public testing::Test {
protected:
- typedef media::VideoCaptureDevice::EventHandler EventHandler;
+ typedef media::VideoCaptureDevice::Client Client;
+
+ VideoCaptureDeviceTest()
+ : loop_(new base::MessageLoop()),
+ client_(
+ new MockClient(base::Bind(&VideoCaptureDeviceTest::OnFrameCaptured,
+ base::Unretained(this)))) {}
virtual void SetUp() {
- loop_.reset(new base::MessageLoopForUI());
- frame_observer_.reset(new MockFrameObserver(loop_->QuitClosure()));
#if defined(OS_ANDROID)
media::VideoCaptureDeviceAndroid::RegisterVideoCaptureDevice(
base::android::AttachCurrentThread());
#endif
}
+ void ResetWithNewClient() {
+ client_.reset(new MockClient(base::Bind(
+ &VideoCaptureDeviceTest::OnFrameCaptured, base::Unretained(this))));
+ }
+
+ void OnFrameCaptured(const VideoCaptureFormat& format) {
+ last_format_ = format;
+ run_loop_->QuitClosure().Run();
+ }
+
void WaitForCapturedFrame() {
- loop_->Run();
+ run_loop_.reset(new base::RunLoop());
+ run_loop_->Run();
}
+ const VideoCaptureFormat& last_format() const { return last_format_; }
+
#if defined(OS_WIN)
base::win::ScopedCOMInitializer initialize_com_;
#endif
- scoped_ptr<MockFrameObserver> frame_observer_;
VideoCaptureDevice::Names names_;
scoped_ptr<base::MessageLoop> loop_;
+ scoped_ptr<base::RunLoop> run_loop_;
+ scoped_ptr<MockClient> client_;
+ VideoCaptureFormat last_format_;
};
TEST_F(VideoCaptureDeviceTest, OpenInvalidDevice) {
@@ -149,27 +166,20 @@ TEST_F(VideoCaptureDeviceTest, CaptureVGA) {
VideoCaptureDevice::Create(names_.front()));
ASSERT_FALSE(device.get() == NULL);
DVLOG(1) << names_.front().id();
- // Get info about the new resolution.
- VideoCaptureCapability rx_capability;
- EXPECT_CALL(*frame_observer_, OnFrameInfo(_))
- .Times(1).WillOnce(SaveArg<0>(&rx_capability));
- EXPECT_CALL(*frame_observer_, OnErr())
+ EXPECT_CALL(*client_, OnErr())
.Times(0);
- VideoCaptureCapability capture_format(640,
- 480,
- 30,
- PIXEL_FORMAT_I420,
- 0,
- false,
- ConstantResolutionVideoCaptureDevice);
- device->AllocateAndStart(capture_format,
- frame_observer_.PassAs<EventHandler>());
+ VideoCaptureParams capture_params;
+ capture_params.requested_format.frame_size.SetSize(640, 480);
+ capture_params.requested_format.frame_rate = 30;
+ capture_params.requested_format.pixel_format = PIXEL_FORMAT_I420;
+ capture_params.allow_resolution_change = false;
+ device->AllocateAndStart(capture_params, client_.PassAs<Client>());
// Get captured video frames.
- loop_->Run();
- EXPECT_EQ(rx_capability.width, 640);
- EXPECT_EQ(rx_capability.height, 480);
+ WaitForCapturedFrame();
+ EXPECT_EQ(last_format().frame_size.width(), 640);
+ EXPECT_EQ(last_format().frame_size.height(), 480);
device->StopAndDeAllocate();
}
@@ -184,24 +194,15 @@ TEST_F(VideoCaptureDeviceTest, Capture720p) {
VideoCaptureDevice::Create(names_.front()));
ASSERT_FALSE(device.get() == NULL);
- // Get info about the new resolution.
- // We don't care about the resulting resolution or frame rate as it might
- // be different from one machine to the next.
- EXPECT_CALL(*frame_observer_, OnFrameInfo(_))
- .Times(1);
-
- EXPECT_CALL(*frame_observer_, OnErr())
+ EXPECT_CALL(*client_, OnErr())
.Times(0);
- VideoCaptureCapability capture_format(1280,
- 720,
- 30,
- PIXEL_FORMAT_I420,
- 0,
- false,
- ConstantResolutionVideoCaptureDevice);
- device->AllocateAndStart(capture_format,
- frame_observer_.PassAs<EventHandler>());
+ VideoCaptureParams capture_params;
+ capture_params.requested_format.frame_size.SetSize(1280, 720);
+ capture_params.requested_format.frame_rate = 30;
+ capture_params.requested_format.pixel_format = PIXEL_FORMAT_I420;
+ capture_params.allow_resolution_change = false;
+ device->AllocateAndStart(capture_params, client_.PassAs<Client>());
// Get captured video frames.
WaitForCapturedFrame();
device->StopAndDeAllocate();
@@ -217,26 +218,19 @@ TEST_F(VideoCaptureDeviceTest, MAYBE_AllocateBadSize) {
VideoCaptureDevice::Create(names_.front()));
ASSERT_TRUE(device.get() != NULL);
- EXPECT_CALL(*frame_observer_, OnErr())
+ EXPECT_CALL(*client_, OnErr())
.Times(0);
- // Get info about the new resolution.
- VideoCaptureCapability rx_capability;
- EXPECT_CALL(*frame_observer_, OnFrameInfo(_))
- .Times(AtLeast(1)).WillOnce(SaveArg<0>(&rx_capability));
-
- VideoCaptureCapability capture_format(637,
- 472,
- 35,
- PIXEL_FORMAT_I420,
- 0,
- false,
- ConstantResolutionVideoCaptureDevice);
- device->AllocateAndStart(capture_format,
- frame_observer_.PassAs<EventHandler>());
+ VideoCaptureParams capture_params;
+ capture_params.requested_format.frame_size.SetSize(637, 472);
+ capture_params.requested_format.frame_rate = 35;
+ capture_params.requested_format.pixel_format = PIXEL_FORMAT_I420;
+ capture_params.allow_resolution_change = false;
+ device->AllocateAndStart(capture_params, client_.PassAs<Client>());
+ WaitForCapturedFrame();
device->StopAndDeAllocate();
- EXPECT_EQ(rx_capability.width, 640);
- EXPECT_EQ(rx_capability.height, 480);
+ EXPECT_EQ(last_format().frame_size.width(), 640);
+ EXPECT_EQ(last_format().frame_size.height(), 480);
}
TEST_F(VideoCaptureDeviceTest, ReAllocateCamera) {
@@ -248,8 +242,7 @@ TEST_F(VideoCaptureDeviceTest, ReAllocateCamera) {
// First, do a number of very fast device start/stops.
for (int i = 0; i <= 5; i++) {
- scoped_ptr<MockFrameObserver> frame_observer(
- new MockFrameObserver(base::Bind(&base::DoNothing)));
+ ResetWithNewClient();
scoped_ptr<VideoCaptureDevice> device(
VideoCaptureDevice::Create(names_.front()));
gfx::Size resolution;
@@ -258,54 +251,32 @@ TEST_F(VideoCaptureDeviceTest, ReAllocateCamera) {
} else {
resolution = gfx::Size(1280, 1024);
}
- VideoCaptureCapability requested_format(
- resolution.width(),
- resolution.height(),
- 30,
- PIXEL_FORMAT_I420,
- 0,
- false,
- ConstantResolutionVideoCaptureDevice);
-
- // The device (if it is an async implementation) may or may not get as far
- // as the OnFrameInfo() step; we're intentionally not going to wait for it
- // to get that far.
- ON_CALL(*frame_observer, OnFrameInfo(_));
- device->AllocateAndStart(requested_format,
- frame_observer.PassAs<EventHandler>());
+ VideoCaptureParams capture_params;
+ capture_params.requested_format.frame_size = resolution;
+ capture_params.requested_format.frame_rate = 30;
+ capture_params.requested_format.pixel_format = PIXEL_FORMAT_I420;
+ capture_params.allow_resolution_change = false;
+ device->AllocateAndStart(capture_params, client_.PassAs<Client>());
device->StopAndDeAllocate();
}
// Finally, do a device start and wait for it to finish.
- gfx::Size resolution;
- VideoCaptureCapability requested_format(
- 320,
- 240,
- 30,
- PIXEL_FORMAT_I420,
- 0,
- false,
- ConstantResolutionVideoCaptureDevice);
-
- base::RunLoop run_loop;
- scoped_ptr<MockFrameObserver> frame_observer(
- new MockFrameObserver(base::Bind(run_loop.QuitClosure())));
+ VideoCaptureParams capture_params;
+ capture_params.requested_format.frame_size.SetSize(320, 240);
+ capture_params.requested_format.frame_rate = 30;
+ capture_params.requested_format.pixel_format = PIXEL_FORMAT_I420;
+ capture_params.allow_resolution_change = false;
+
+ ResetWithNewClient();
scoped_ptr<VideoCaptureDevice> device(
VideoCaptureDevice::Create(names_.front()));
- // The device (if it is an async implementation) may or may not get as far
- // as the OnFrameInfo() step; we're intentionally not going to wait for it
- // to get that far.
- VideoCaptureCapability final_format;
- EXPECT_CALL(*frame_observer, OnFrameInfo(_))
- .Times(1).WillOnce(SaveArg<0>(&final_format));
- device->AllocateAndStart(requested_format,
- frame_observer.PassAs<EventHandler>());
- run_loop.Run(); // Waits for a frame.
+ device->AllocateAndStart(capture_params, client_.PassAs<Client>());
+ WaitForCapturedFrame();
device->StopAndDeAllocate();
device.reset();
- EXPECT_EQ(final_format.width, 320);
- EXPECT_EQ(final_format.height, 240);
+ EXPECT_EQ(last_format().frame_size.width(), 320);
+ EXPECT_EQ(last_format().frame_size.height(), 240);
}
TEST_F(VideoCaptureDeviceTest, DeAllocateCameraWhileRunning) {
@@ -318,27 +289,20 @@ TEST_F(VideoCaptureDeviceTest, DeAllocateCameraWhileRunning) {
VideoCaptureDevice::Create(names_.front()));
ASSERT_TRUE(device.get() != NULL);
- EXPECT_CALL(*frame_observer_, OnErr())
+ EXPECT_CALL(*client_, OnErr())
.Times(0);
- // Get info about the new resolution.
- VideoCaptureCapability rx_capability;
- EXPECT_CALL(*frame_observer_, OnFrameInfo(_))
- .WillOnce(SaveArg<0>(&rx_capability));
-
- VideoCaptureCapability capture_format(640,
- 480,
- 30,
- PIXEL_FORMAT_I420,
- 0,
- false,
- ConstantResolutionVideoCaptureDevice);
- device->AllocateAndStart(capture_format,
- frame_observer_.PassAs<EventHandler>());
+
+ VideoCaptureParams capture_params;
+ capture_params.requested_format.frame_size.SetSize(640, 480);
+ capture_params.requested_format.frame_rate = 30;
+ capture_params.requested_format.pixel_format = PIXEL_FORMAT_I420;
+ capture_params.allow_resolution_change = false;
+ device->AllocateAndStart(capture_params, client_.PassAs<Client>());
// Get captured video frames.
WaitForCapturedFrame();
- EXPECT_EQ(rx_capability.width, 640);
- EXPECT_EQ(rx_capability.height, 480);
- EXPECT_EQ(rx_capability.frame_rate, 30);
+ EXPECT_EQ(last_format().frame_size.width(), 640);
+ EXPECT_EQ(last_format().frame_size.height(), 480);
+ EXPECT_EQ(last_format().frame_rate, 30);
device->StopAndDeAllocate();
}
@@ -353,27 +317,19 @@ TEST_F(VideoCaptureDeviceTest, FakeCapture) {
FakeVideoCaptureDevice::Create(names.front()));
ASSERT_TRUE(device.get() != NULL);
- // Get info about the new resolution.
- VideoCaptureCapability rx_capability;
- EXPECT_CALL(*frame_observer_, OnFrameInfo(_))
- .Times(1).WillOnce(SaveArg<0>(&rx_capability));
-
- EXPECT_CALL(*frame_observer_, OnErr())
+ EXPECT_CALL(*client_, OnErr())
.Times(0);
- VideoCaptureCapability capture_format(640,
- 480,
- 30,
- PIXEL_FORMAT_I420,
- 0,
- false,
- ConstantResolutionVideoCaptureDevice);
- device->AllocateAndStart(capture_format,
- frame_observer_.PassAs<EventHandler>());
+ VideoCaptureParams capture_params;
+ capture_params.requested_format.frame_size.SetSize(640, 480);
+ capture_params.requested_format.frame_rate = 30;
+ capture_params.requested_format.pixel_format = PIXEL_FORMAT_I420;
+ capture_params.allow_resolution_change = false;
+ device->AllocateAndStart(capture_params, client_.PassAs<Client>());
WaitForCapturedFrame();
- EXPECT_EQ(rx_capability.width, 640);
- EXPECT_EQ(rx_capability.height, 480);
- EXPECT_EQ(rx_capability.frame_rate, 30);
+ EXPECT_EQ(last_format().frame_size.width(), 640);
+ EXPECT_EQ(last_format().frame_size.height(), 480);
+ EXPECT_EQ(last_format().frame_rate, 30);
device->StopAndDeAllocate();
}
@@ -388,38 +344,48 @@ TEST_F(VideoCaptureDeviceTest, MAYBE_CaptureMjpeg) {
VideoCaptureDevice::Create(names_.front()));
ASSERT_TRUE(device.get() != NULL);
- EXPECT_CALL(*frame_observer_, OnErr())
+ EXPECT_CALL(*client_, OnErr())
.Times(0);
- // Verify we get MJPEG from the device. Not all devices can capture 1280x720
- // @ 30 fps, so we don't care about the exact resolution we get.
- VideoCaptureCapability rx_capability;
- EXPECT_CALL(*frame_observer_, OnFrameInfo(_))
- .WillOnce(SaveArg<0>(&rx_capability));
-
- VideoCaptureCapability capture_format(1280,
- 720,
- 30,
- PIXEL_FORMAT_MJPEG,
- 0,
- false,
- ConstantResolutionVideoCaptureDevice);
- device->AllocateAndStart(capture_format,
- frame_observer_.PassAs<EventHandler>());
+
+ VideoCaptureParams capture_params;
+ capture_params.requested_format.frame_size.SetSize(1280, 720);
+ capture_params.requested_format.frame_rate = 30;
+ capture_params.requested_format.pixel_format = PIXEL_FORMAT_MJPEG;
+ capture_params.allow_resolution_change = false;
+ device->AllocateAndStart(capture_params, client_.PassAs<Client>());
// Get captured video frames.
WaitForCapturedFrame();
- EXPECT_EQ(rx_capability.color, PIXEL_FORMAT_MJPEG);
+ // Verify we get MJPEG from the device. Not all devices can capture 1280x720
+ // @ 30 fps, so we don't care about the exact resolution we get.
+ EXPECT_EQ(last_format().pixel_format, PIXEL_FORMAT_MJPEG);
device->StopAndDeAllocate();
}
+TEST_F(VideoCaptureDeviceTest, GetDeviceSupportedFormats) {
+ VideoCaptureDevice::GetDeviceNames(&names_);
+ if (!names_.size()) {
+ DVLOG(1) << "No camera available. Exiting test.";
+ return;
+ }
+ VideoCaptureFormats supported_formats;
+ VideoCaptureDevice::Names::iterator names_iterator;
+ for (names_iterator = names_.begin(); names_iterator != names_.end();
+ ++names_iterator) {
+ VideoCaptureDevice::GetDeviceSupportedFormats(*names_iterator,
+ &supported_formats);
+ // Nothing to test here since we cannot forecast the hardware capabilities.
+ }
+}
+
TEST_F(VideoCaptureDeviceTest, FakeCaptureVariableResolution) {
VideoCaptureDevice::Names names;
FakeVideoCaptureDevice::GetDeviceNames(&names);
- media::VideoCaptureCapability capture_format;
- capture_format.width = 640;
- capture_format.height = 480;
- capture_format.frame_rate = 30;
- capture_format.frame_size_type = media::VariableResolutionVideoCaptureDevice;
+ VideoCaptureParams capture_params;
+ capture_params.requested_format.frame_size.SetSize(640, 480);
+ capture_params.requested_format.frame_rate = 30;
+ capture_params.requested_format.pixel_format = PIXEL_FORMAT_I420;
+ capture_params.allow_resolution_change = true;
ASSERT_GT(static_cast<int>(names.size()), 0);
@@ -427,21 +393,12 @@ TEST_F(VideoCaptureDeviceTest, FakeCaptureVariableResolution) {
FakeVideoCaptureDevice::Create(names.front()));
ASSERT_TRUE(device.get() != NULL);
- // Get info about the new resolution.
- EXPECT_CALL(*frame_observer_, OnFrameInfo(_))
- .Times(1);
-
- EXPECT_CALL(*frame_observer_, OnErr())
+ EXPECT_CALL(*client_, OnErr())
.Times(0);
int action_count = 200;
- EXPECT_CALL(*frame_observer_, OnFrameInfoChanged(_))
- .Times(AtLeast(action_count / 30));
- device->AllocateAndStart(capture_format,
- frame_observer_.PassAs<EventHandler>());
+ device->AllocateAndStart(capture_params, client_.PassAs<Client>());
- // The amount of times the OnFrameInfoChanged gets called depends on how often
- // FakeDevice is supposed to change and what is its actual frame rate.
// We set TimeWait to 200 action timeouts and this should be enough for at
// least action_count/kFakeCaptureCapabilityChangePeriod calls.
for (int i = 0; i < action_count; ++i) {
@@ -450,4 +407,27 @@ TEST_F(VideoCaptureDeviceTest, FakeCaptureVariableResolution) {
device->StopAndDeAllocate();
}
+TEST_F(VideoCaptureDeviceTest, FakeGetDeviceSupportedFormats) {
+ VideoCaptureDevice::Names names;
+ FakeVideoCaptureDevice::GetDeviceNames(&names);
+
+ VideoCaptureFormats supported_formats;
+ VideoCaptureDevice::Names::iterator names_iterator;
+
+ for (names_iterator = names.begin(); names_iterator != names.end();
+ ++names_iterator) {
+ FakeVideoCaptureDevice::GetDeviceSupportedFormats(*names_iterator,
+ &supported_formats);
+ EXPECT_EQ(supported_formats.size(), 2u);
+ EXPECT_EQ(supported_formats[0].frame_size.width(), 640);
+ EXPECT_EQ(supported_formats[0].frame_size.height(), 480);
+ EXPECT_EQ(supported_formats[0].pixel_format, media::PIXEL_FORMAT_I420);
+ EXPECT_GE(supported_formats[0].frame_rate, 20);
+ EXPECT_EQ(supported_formats[1].frame_size.width(), 320);
+ EXPECT_EQ(supported_formats[1].frame_size.height(), 240);
+ EXPECT_EQ(supported_formats[1].pixel_format, media::PIXEL_FORMAT_I420);
+ EXPECT_GE(supported_formats[1].frame_rate, 20);
+ }
+}
+
}; // namespace media
diff --git a/chromium/media/video/capture/video_capture_proxy.cc b/chromium/media/video/capture/video_capture_proxy.cc
index 3adbb7ce3b2..d488c50fe02 100644
--- a/chromium/media/video/capture/video_capture_proxy.cc
+++ b/chromium/media/video/capture/video_capture_proxy.cc
@@ -7,6 +7,7 @@
#include "base/bind.h"
#include "base/location.h"
#include "base/message_loop/message_loop_proxy.h"
+#include "media/base/video_frame.h"
namespace {
@@ -16,8 +17,6 @@ media::VideoCaptureHandlerProxy::VideoCaptureState GetState(
media::VideoCapture* capture) {
media::VideoCaptureHandlerProxy::VideoCaptureState state;
state.started = capture->CaptureStarted();
- state.width = capture->CaptureWidth();
- state.height = capture->CaptureHeight();
state.frame_rate = capture->CaptureFrameRate();
return state;
}
@@ -89,17 +88,6 @@ void VideoCaptureHandlerProxy::OnFrameReady(
frame));
}
-void VideoCaptureHandlerProxy::OnDeviceInfoReceived(
- VideoCapture* capture,
- const VideoCaptureParams& device_info) {
- main_message_loop_->PostTask(FROM_HERE, base::Bind(
- &VideoCaptureHandlerProxy::OnDeviceInfoReceivedOnMainThread,
- base::Unretained(this),
- capture,
- GetState(capture),
- device_info));
-}
-
void VideoCaptureHandlerProxy::OnStartedOnMainThread(
VideoCapture* capture,
const VideoCaptureState& state) {
@@ -144,12 +132,4 @@ void VideoCaptureHandlerProxy::OnFrameReadyOnMainThread(
proxied_->OnFrameReady(capture, frame);
}
-void VideoCaptureHandlerProxy::OnDeviceInfoReceivedOnMainThread(
- VideoCapture* capture,
- const VideoCaptureState& state,
- const VideoCaptureParams& device_info) {
- state_ = state;
- proxied_->OnDeviceInfoReceived(capture, device_info);
-}
-
} // namespace media
diff --git a/chromium/media/video/capture/video_capture_proxy.h b/chromium/media/video/capture/video_capture_proxy.h
index fbb75776abe..fca0a80add7 100644
--- a/chromium/media/video/capture/video_capture_proxy.h
+++ b/chromium/media/video/capture/video_capture_proxy.h
@@ -28,10 +28,8 @@ class MEDIA_EXPORT VideoCaptureHandlerProxy
: public VideoCapture::EventHandler {
public:
struct VideoCaptureState {
- VideoCaptureState() : started(false), width(0), height(0), frame_rate(0) {}
+ VideoCaptureState() : started(false), frame_rate(0) {}
bool started;
- int width;
- int height;
int frame_rate;
};
@@ -52,9 +50,6 @@ class MEDIA_EXPORT VideoCaptureHandlerProxy
virtual void OnRemoved(VideoCapture* capture) OVERRIDE;
virtual void OnFrameReady(VideoCapture* capture,
const scoped_refptr<VideoFrame>& frame) OVERRIDE;
- virtual void OnDeviceInfoReceived(
- VideoCapture* capture,
- const VideoCaptureParams& device_info) OVERRIDE;
private:
// Called on main thread.
@@ -77,9 +72,6 @@ class MEDIA_EXPORT VideoCaptureHandlerProxy
void OnFrameReadyOnMainThread(VideoCapture* capture,
const VideoCaptureState& state,
const scoped_refptr<VideoFrame>& frame);
- void OnDeviceInfoReceivedOnMainThread(VideoCapture* capture,
- const VideoCaptureState& state,
- const VideoCaptureParams& device_info);
// Only accessed from main thread.
VideoCapture::EventHandler* proxied_;
diff --git a/chromium/media/video/capture/video_capture_types.cc b/chromium/media/video/capture/video_capture_types.cc
index 5b8e2265360..aee3865a57b 100644
--- a/chromium/media/video/capture/video_capture_types.cc
+++ b/chromium/media/video/capture/video_capture_types.cc
@@ -9,52 +9,26 @@
namespace media {
VideoCaptureFormat::VideoCaptureFormat()
- : width(0),
- height(0),
- frame_rate(0),
- frame_size_type(ConstantResolutionVideoCaptureDevice) {}
-
-VideoCaptureFormat::VideoCaptureFormat(
- int width,
- int height,
- int frame_rate,
- VideoCaptureResolutionType frame_size_type)
- : width(width),
- height(height),
+ : frame_rate(0), pixel_format(PIXEL_FORMAT_UNKNOWN) {}
+
+VideoCaptureFormat::VideoCaptureFormat(const gfx::Size& frame_size,
+ int frame_rate,
+ VideoPixelFormat pixel_format)
+ : frame_size(frame_size),
frame_rate(frame_rate),
- frame_size_type(frame_size_type) {}
+ pixel_format(pixel_format) {}
bool VideoCaptureFormat::IsValid() const {
- return (width > 0) && (height > 0) && (frame_rate > 0) &&
+ return (frame_size.width() < media::limits::kMaxDimension) &&
+ (frame_size.height() < media::limits::kMaxDimension) &&
+ (frame_size.GetArea() > 0) &&
+ (frame_size.GetArea() < media::limits::kMaxCanvas) &&
+ (frame_rate > 0) &&
(frame_rate < media::limits::kMaxFramesPerSecond) &&
- (width < media::limits::kMaxDimension) &&
- (height < media::limits::kMaxDimension) &&
- (width * height < media::limits::kMaxCanvas) &&
- (frame_size_type >= 0) &&
- (frame_size_type < media::MaxVideoCaptureResolutionType);
+ (pixel_format >= PIXEL_FORMAT_UNKNOWN) &&
+ (pixel_format < PIXEL_FORMAT_MAX);
}
-VideoCaptureParams::VideoCaptureParams()
- : session_id(0) {}
-
-VideoCaptureCapability::VideoCaptureCapability()
- : color(PIXEL_FORMAT_UNKNOWN),
- expected_capture_delay(0),
- interlaced(false),
- session_id(0) {}
-
-VideoCaptureCapability::VideoCaptureCapability(
- int width,
- int height,
- int frame_rate,
- VideoPixelFormat color,
- int delay,
- bool interlaced,
- VideoCaptureResolutionType frame_size_type)
- : VideoCaptureFormat(width, height, frame_rate, frame_size_type),
- color(color),
- expected_capture_delay(delay),
- interlaced(interlaced),
- session_id(0) {}
+VideoCaptureParams::VideoCaptureParams() : allow_resolution_change(false) {}
} // namespace media
diff --git a/chromium/media/video/capture/video_capture_types.h b/chromium/media/video/capture/video_capture_types.h
index 1a170aaf5e7..6a4f453280b 100644
--- a/chromium/media/video/capture/video_capture_types.h
+++ b/chromium/media/video/capture/video_capture_types.h
@@ -5,7 +5,10 @@
#ifndef MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_TYPES_H_
#define MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_TYPES_H_
-#include "media/base/video_frame.h"
+#include <vector>
+
+#include "media/base/media_export.h"
+#include "ui/gfx/size.h"
namespace media {
@@ -13,12 +16,6 @@ namespace media {
// shared with device manager.
typedef int VideoCaptureSessionId;
-enum VideoCaptureResolutionType {
- ConstantResolutionVideoCaptureDevice = 0,
- VariableResolutionVideoCaptureDevice,
- MaxVideoCaptureResolutionType, // Must be last.
-};
-
// Color formats from camera.
enum VideoPixelFormat {
PIXEL_FORMAT_UNKNOWN, // Color format not set.
@@ -30,51 +27,44 @@ enum VideoPixelFormat {
PIXEL_FORMAT_MJPEG,
PIXEL_FORMAT_NV21,
PIXEL_FORMAT_YV12,
+ PIXEL_FORMAT_MAX,
};
// Video capture format specification.
+// This class is used by the video capture device to specify the format of every
+// frame captured and returned to a client. It is also used to specify a
+// supported capture format by a device.
class MEDIA_EXPORT VideoCaptureFormat {
public:
VideoCaptureFormat();
- VideoCaptureFormat(int width,
- int height,
+ VideoCaptureFormat(const gfx::Size& frame_size,
int frame_rate,
- VideoCaptureResolutionType frame_size_type);
+ VideoPixelFormat pixel_format);
// Checks that all values are in the expected range. All limits are specified
// in media::Limits.
bool IsValid() const;
- int width;
- int height;
+ gfx::Size frame_size;
int frame_rate;
- VideoCaptureResolutionType frame_size_type;
+ VideoPixelFormat pixel_format;
};
-// Parameters for starting video capture and device information.
-class MEDIA_EXPORT VideoCaptureParams : public VideoCaptureFormat {
+typedef std::vector<VideoCaptureFormat> VideoCaptureFormats;
+
+// Parameters for starting video capture.
+// This class is used by the client of a video capture device to specify the
+// format of frames in which the client would like to have captured frames
+// returned.
+class MEDIA_EXPORT VideoCaptureParams {
public:
VideoCaptureParams();
- VideoCaptureSessionId session_id;
-};
-
-// Capabilities describe the format a camera capture video in.
-class MEDIA_EXPORT VideoCaptureCapability : public VideoCaptureFormat {
- public:
- VideoCaptureCapability();
- VideoCaptureCapability(int width,
- int height,
- int frame_rate,
- VideoPixelFormat color,
- int delay,
- bool interlaced,
- VideoCaptureResolutionType frame_size_type);
+ // Requests a resolution and format at which the capture will occur.
+ VideoCaptureFormat requested_format;
- VideoPixelFormat color; // Desired video type.
- int expected_capture_delay; // Expected delay in millisecond.
- bool interlaced; // Need interlace format.
- VideoCaptureSessionId session_id;
+ // Allow mid-capture resolution change.
+ bool allow_resolution_change;
};
} // namespace media
diff --git a/chromium/media/video/capture/win/capability_list_win.cc b/chromium/media/video/capture/win/capability_list_win.cc
index 18325bb6398..bfa58edcc4b 100644
--- a/chromium/media/video/capture/win/capability_list_win.cc
+++ b/chromium/media/video/capture/win/capability_list_win.cc
@@ -33,7 +33,8 @@ bool CompareFrameRate(const ResolutionDiff& item1,
}
bool CompareColor(const ResolutionDiff& item1, const ResolutionDiff& item2) {
- return item1.capability->color < item2.capability->color;
+ return item1.capability->supported_format.pixel_format <
+ item2.capability->supported_format.pixel_format;
}
} // namespace.
@@ -50,7 +51,7 @@ void CapabilityList::Add(const VideoCaptureCapabilityWin& capability) {
capabilities_.push_back(capability);
}
-const VideoCaptureCapabilityWin& CapabilityList::GetBestMatchedCapability(
+const VideoCaptureCapabilityWin& CapabilityList::GetBestMatchedFormat(
int requested_width,
int requested_height,
int requested_frame_rate) const {
@@ -65,8 +66,9 @@ const VideoCaptureCapabilityWin& CapabilityList::GetBestMatchedCapability(
it != capabilities_.end(); ++it) {
ResolutionDiff diff;
diff.capability = &(*it);
- diff.diff_width = it->width - requested_width;
- diff.diff_height = it->height - requested_height;
+ diff.diff_width = it->supported_format.frame_size.width() - requested_width;
+ diff.diff_height =
+ it->supported_format.frame_size.height() - requested_height;
// The 1000 allows using integer arithmetic for f.i. 29.971 fps.
diff.diff_frame_rate =
1000 * ((static_cast<float>(it->frame_rate_numerator) /
diff --git a/chromium/media/video/capture/win/capability_list_win.h b/chromium/media/video/capture/win/capability_list_win.h
index c07b220b0d5..bf1e8d6ee89 100644
--- a/chromium/media/video/capture/win/capability_list_win.h
+++ b/chromium/media/video/capture/win/capability_list_win.h
@@ -11,12 +11,13 @@
#include <list>
+#include "base/basictypes.h"
#include "base/threading/non_thread_safe.h"
#include "media/video/capture/video_capture_types.h"
namespace media {
-struct VideoCaptureCapabilityWin : public VideoCaptureCapability {
+struct VideoCaptureCapabilityWin {
explicit VideoCaptureCapabilityWin(int index)
: stream_index(index),
frame_rate_numerator(0),
@@ -26,6 +27,7 @@ struct VideoCaptureCapabilityWin : public VideoCaptureCapability {
// so framerates can be properly represented, f.i. 29.971fps= 30000/1001.
int frame_rate_numerator;
int frame_rate_denominator;
+ VideoCaptureFormat supported_format;
};
class CapabilityList : public base::NonThreadSafe {
@@ -41,8 +43,9 @@ class CapabilityList : public base::NonThreadSafe {
// Loops through the list of capabilities and returns an index of the best
// matching capability. The algorithm prioritizes height, width, frame rate
// and color format in that order.
- const VideoCaptureCapabilityWin& GetBestMatchedCapability(
- int requested_width, int requested_height,
+ const VideoCaptureCapabilityWin& GetBestMatchedFormat(
+ int requested_width,
+ int requested_height,
int requested_frame_rate) const;
private:
diff --git a/chromium/media/video/capture/win/sink_filter_win.cc b/chromium/media/video/capture/win/sink_filter_win.cc
index c3fc410dd7f..e3bb0a58564 100644
--- a/chromium/media/video/capture/win/sink_filter_win.cc
+++ b/chromium/media/video/capture/win/sink_filter_win.cc
@@ -28,13 +28,12 @@ SinkFilter::~SinkFilter() {
input_pin_->SetOwner(NULL);
}
-void SinkFilter::SetRequestedMediaCapability(
- const VideoCaptureCapability& capability) {
- input_pin_->SetRequestedMediaCapability(capability);
+void SinkFilter::SetRequestedMediaFormat(const VideoCaptureFormat& format) {
+ input_pin_->SetRequestedMediaFormat(format);
}
-const VideoCaptureCapability& SinkFilter::ResultingCapability() {
- return input_pin_->ResultingCapability();
+const VideoCaptureFormat& SinkFilter::ResultingFormat() {
+ return input_pin_->ResultingFormat();
}
size_t SinkFilter::NoOfPins() {
diff --git a/chromium/media/video/capture/win/sink_filter_win.h b/chromium/media/video/capture/win/sink_filter_win.h
index 36bb124cc9d..e454f0b984b 100644
--- a/chromium/media/video/capture/win/sink_filter_win.h
+++ b/chromium/media/video/capture/win/sink_filter_win.h
@@ -32,11 +32,10 @@ class __declspec(uuid("88cdbbdc-a73b-4afa-acbf-15d5e2ce12c3"))
explicit SinkFilter(SinkFilterObserver* observer);
virtual ~SinkFilter();
- void SetRequestedMediaCapability(
- const VideoCaptureCapability& capability);
- // Returns the capability that is negotiated when this
+ void SetRequestedMediaFormat(const VideoCaptureFormat& format);
+ // Returns the format that is negotiated when this
// filter is connected to a media filter.
- const VideoCaptureCapability& ResultingCapability();
+ const VideoCaptureFormat& ResultingFormat();
// Implement FilterBase.
virtual size_t NoOfPins();
diff --git a/chromium/media/video/capture/win/sink_input_pin_win.cc b/chromium/media/video/capture/win/sink_input_pin_win.cc
index 1de1ea1671a..0126e13db8f 100644
--- a/chromium/media/video/capture/win/sink_input_pin_win.cc
+++ b/chromium/media/video/capture/win/sink_input_pin_win.cc
@@ -20,8 +20,6 @@ SinkInputPin::SinkInputPin(IBaseFilter* filter,
SinkFilterObserver* observer)
: observer_(observer),
PinBase(filter) {
- memset(&requested_capability_, 0, sizeof(requested_capability_));
- memset(&resulting_capability_, 0, sizeof(resulting_capability_));
}
SinkInputPin::~SinkInputPin() {}
@@ -38,9 +36,9 @@ bool SinkInputPin::GetValidMediaType(int index, AM_MEDIA_TYPE* media_type) {
pvi->bmiHeader.biPlanes = 1;
pvi->bmiHeader.biClrImportant = 0;
pvi->bmiHeader.biClrUsed = 0;
- if (requested_capability_.frame_rate > 0) {
- pvi->AvgTimePerFrame = kSecondsToReferenceTime /
- requested_capability_.frame_rate;
+ if (requested_format_.frame_rate > 0) {
+ pvi->AvgTimePerFrame =
+ kSecondsToReferenceTime / requested_format_.frame_rate;
}
media_type->majortype = MEDIATYPE_Video;
@@ -51,30 +49,28 @@ bool SinkInputPin::GetValidMediaType(int index, AM_MEDIA_TYPE* media_type) {
case 0: {
pvi->bmiHeader.biCompression = MAKEFOURCC('I', '4', '2', '0');
pvi->bmiHeader.biBitCount = 12; // bit per pixel
- pvi->bmiHeader.biWidth = requested_capability_.width;
- pvi->bmiHeader.biHeight = requested_capability_.height;
- pvi->bmiHeader.biSizeImage = 3 * requested_capability_.height *
- requested_capability_.width / 2;
+ pvi->bmiHeader.biWidth = requested_format_.frame_size.width();
+ pvi->bmiHeader.biHeight = requested_format_.frame_size.height();
+ pvi->bmiHeader.biSizeImage =
+ requested_format_.frame_size.GetArea() * 3 / 2;
media_type->subtype = kMediaSubTypeI420;
break;
}
case 1: {
pvi->bmiHeader.biCompression = MAKEFOURCC('Y', 'U', 'Y', '2');
pvi->bmiHeader.biBitCount = 16;
- pvi->bmiHeader.biWidth = requested_capability_.width;
- pvi->bmiHeader.biHeight = requested_capability_.height;
- pvi->bmiHeader.biSizeImage = 2 * requested_capability_.width *
- requested_capability_.height;
+ pvi->bmiHeader.biWidth = requested_format_.frame_size.width();
+ pvi->bmiHeader.biHeight = requested_format_.frame_size.height();
+ pvi->bmiHeader.biSizeImage = requested_format_.frame_size.GetArea() * 2;
media_type->subtype = MEDIASUBTYPE_YUY2;
break;
}
case 2: {
pvi->bmiHeader.biCompression = BI_RGB;
pvi->bmiHeader.biBitCount = 24;
- pvi->bmiHeader.biWidth = requested_capability_.width;
- pvi->bmiHeader.biHeight = requested_capability_.height;
- pvi->bmiHeader.biSizeImage = 3 * requested_capability_.height *
- requested_capability_.width;
+ pvi->bmiHeader.biWidth = requested_format_.frame_size.width();
+ pvi->bmiHeader.biHeight = requested_format_.frame_size.height();
+ pvi->bmiHeader.biSizeImage = requested_format_.frame_size.GetArea() * 3;
media_type->subtype = MEDIASUBTYPE_RGB24;
break;
}
@@ -104,27 +100,27 @@ bool SinkInputPin::IsMediaTypeValid(const AM_MEDIA_TYPE* media_type) {
return false;
// Store the incoming width and height.
- resulting_capability_.width = pvi->bmiHeader.biWidth;
- resulting_capability_.height = abs(pvi->bmiHeader.biHeight);
+ resulting_format_.frame_size.SetSize(pvi->bmiHeader.biWidth,
+ abs(pvi->bmiHeader.biHeight));
if (pvi->AvgTimePerFrame > 0) {
- resulting_capability_.frame_rate =
+ resulting_format_.frame_rate =
static_cast<int>(kSecondsToReferenceTime / pvi->AvgTimePerFrame);
} else {
- resulting_capability_.frame_rate = requested_capability_.frame_rate;
+ resulting_format_.frame_rate = requested_format_.frame_rate;
}
if (sub_type == kMediaSubTypeI420 &&
pvi->bmiHeader.biCompression == MAKEFOURCC('I', '4', '2', '0')) {
- resulting_capability_.color = PIXEL_FORMAT_I420;
+ resulting_format_.pixel_format = PIXEL_FORMAT_I420;
return true; // This format is acceptable.
}
if (sub_type == MEDIASUBTYPE_YUY2 &&
pvi->bmiHeader.biCompression == MAKEFOURCC('Y', 'U', 'Y', '2')) {
- resulting_capability_.color = PIXEL_FORMAT_YUY2;
+ resulting_format_.pixel_format = PIXEL_FORMAT_YUY2;
return true; // This format is acceptable.
}
if (sub_type == MEDIASUBTYPE_RGB24 &&
pvi->bmiHeader.biCompression == BI_RGB) {
- resulting_capability_.color = PIXEL_FORMAT_RGB24;
+ resulting_format_.pixel_format = PIXEL_FORMAT_RGB24;
return true; // This format is acceptable.
}
return false;
@@ -140,19 +136,15 @@ HRESULT SinkInputPin::Receive(IMediaSample* sample) {
return S_OK;
}
-void SinkInputPin::SetRequestedMediaCapability(
- const VideoCaptureCapability& capability) {
- requested_capability_ = capability;
- resulting_capability_.width = 0;
- resulting_capability_.height = 0;
- resulting_capability_.frame_rate = 0;
- resulting_capability_.color = PIXEL_FORMAT_UNKNOWN;
- resulting_capability_.expected_capture_delay = 0;
- resulting_capability_.interlaced = false;
+void SinkInputPin::SetRequestedMediaFormat(const VideoCaptureFormat& format) {
+ requested_format_ = format;
+ resulting_format_.frame_size.SetSize(0, 0);
+ resulting_format_.frame_rate = 0;
+ resulting_format_.pixel_format = PIXEL_FORMAT_UNKNOWN;
}
-const VideoCaptureCapability& SinkInputPin::ResultingCapability() {
- return resulting_capability_;
+const VideoCaptureFormat& SinkInputPin::ResultingFormat() {
+ return resulting_format_;
}
} // namespace media
diff --git a/chromium/media/video/capture/win/sink_input_pin_win.h b/chromium/media/video/capture/win/sink_input_pin_win.h
index 16168a39a1e..f14ca33073c 100644
--- a/chromium/media/video/capture/win/sink_input_pin_win.h
+++ b/chromium/media/video/capture/win/sink_input_pin_win.h
@@ -24,10 +24,10 @@ class SinkInputPin : public PinBase {
SinkInputPin(IBaseFilter* filter, SinkFilterObserver* observer);
virtual ~SinkInputPin();
- void SetRequestedMediaCapability(const VideoCaptureCapability& capability);
+ void SetRequestedMediaFormat(const VideoCaptureFormat& format);
// Returns the capability that is negotiated when this
// pin is connected to a media filter.
- const VideoCaptureCapability& ResultingCapability();
+ const VideoCaptureFormat& ResultingFormat();
// Implement PinBase.
virtual bool IsMediaTypeValid(const AM_MEDIA_TYPE* media_type);
@@ -36,8 +36,8 @@ class SinkInputPin : public PinBase {
STDMETHOD(Receive)(IMediaSample* media_sample);
private:
- VideoCaptureCapability requested_capability_;
- VideoCaptureCapability resulting_capability_;
+ VideoCaptureFormat requested_format_;
+ VideoCaptureFormat resulting_format_;
SinkFilterObserver* observer_;
DISALLOW_IMPLICIT_CONSTRUCTORS(SinkInputPin);
diff --git a/chromium/media/video/capture/win/video_capture_device_mf_win.cc b/chromium/media/video/capture/win/video_capture_device_mf_win.cc
index 874408fb2cd..cc1e7505dbe 100644
--- a/chromium/media/video/capture/win/video_capture_device_mf_win.cc
+++ b/chromium/media/video/capture/win/video_capture_device_mf_win.cc
@@ -94,12 +94,11 @@ bool FormatFromGuid(const GUID& guid, VideoPixelFormat* format) {
return false;
}
-bool GetFrameSize(IMFMediaType* type, int* width, int* height) {
+bool GetFrameSize(IMFMediaType* type, gfx::Size* frame_size) {
UINT32 width32, height32;
if (FAILED(MFGetAttributeSize(type, MF_MT_FRAME_SIZE, &width32, &height32)))
return false;
- *width = width32;
- *height = height32;
+ frame_size->SetSize(width32, height32);
return true;
}
@@ -121,20 +120,17 @@ bool FillCapabilitiesFromType(IMFMediaType* type,
VideoCaptureCapabilityWin* capability) {
GUID type_guid;
if (FAILED(type->GetGUID(MF_MT_SUBTYPE, &type_guid)) ||
- !FormatFromGuid(type_guid, &capability->color) ||
- !GetFrameSize(type, &capability->width, &capability->height) ||
+ !GetFrameSize(type, &capability->supported_format.frame_size) ||
!GetFrameRate(type,
&capability->frame_rate_numerator,
- &capability->frame_rate_denominator)) {
+ &capability->frame_rate_denominator) ||
+ !FormatFromGuid(type_guid, &capability->supported_format.pixel_format)) {
return false;
}
// Keep the integer version of the frame_rate for (potential) returns.
- capability->frame_rate =
+ capability->supported_format.frame_rate =
capability->frame_rate_numerator / capability->frame_rate_denominator;
- capability->expected_capture_delay = 0; // Currently not used.
- capability->interlaced = false; // Currently not used.
-
return true;
}
@@ -210,7 +206,7 @@ class MFReaderCallback
DWORD stream_flags, LONGLONG time_stamp, IMFSample* sample) {
base::Time stamp(base::Time::Now());
if (!sample) {
- observer_->OnIncomingCapturedFrame(NULL, 0, stamp, 0, false, false);
+ observer_->OnIncomingCapturedFrame(NULL, 0, stamp, 0);
return S_OK;
}
@@ -224,8 +220,7 @@ class MFReaderCallback
DWORD length = 0, max_length = 0;
BYTE* data = NULL;
buffer->Lock(&data, &max_length, &length);
- observer_->OnIncomingCapturedFrame(data, length, stamp,
- 0, false, false);
+ observer_->OnIncomingCapturedFrame(data, length, stamp, 0);
buffer->Unlock();
}
}
@@ -312,7 +307,7 @@ const std::string VideoCaptureDevice::Name::GetModel() const {
}
VideoCaptureDeviceMFWin::VideoCaptureDeviceMFWin(const Name& device_name)
- : name_(device_name), observer_(NULL), capture_(0) {
+ : name_(device_name), capture_(0) {
DetachFromThread();
}
@@ -339,19 +334,14 @@ bool VideoCaptureDeviceMFWin::Init() {
reader_.Receive()));
}
-void VideoCaptureDeviceMFWin::Allocate(
- const VideoCaptureCapability& capture_format,
- VideoCaptureDevice::EventHandler* observer) {
+void VideoCaptureDeviceMFWin::AllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) {
DCHECK(CalledOnValidThread());
base::AutoLock lock(lock_);
- if (observer_) {
- DCHECK_EQ(observer, observer_);
- return;
- }
-
- observer_ = observer;
+ client_ = client.Pass();
DCHECK_EQ(capture_, false);
CapabilityList capabilities;
@@ -361,14 +351,11 @@ void VideoCaptureDeviceMFWin::Allocate(
return;
}
- const VideoCaptureCapabilityWin& found_capability =
- capabilities.GetBestMatchedCapability(capture_format.width,
- capture_format.height,
- capture_format.frame_rate);
- DLOG(INFO) << "Chosen capture format= (" << found_capability.width << "x"
- << found_capability.height << ")@("
- << found_capability.frame_rate_numerator << "/"
- << found_capability.frame_rate_denominator << ")fps";
+ VideoCaptureCapabilityWin found_capability =
+ capabilities.GetBestMatchedFormat(
+ params.requested_format.frame_size.width(),
+ params.requested_format.frame_size.height(),
+ params.requested_format.frame_rate);
ScopedComPtr<IMFMediaType> type;
if (FAILED(hr = reader_->GetNativeMediaType(
@@ -380,25 +367,16 @@ void VideoCaptureDeviceMFWin::Allocate(
return;
}
- observer_->OnFrameInfo(found_capability);
-}
-
-void VideoCaptureDeviceMFWin::Start() {
- DCHECK(CalledOnValidThread());
-
- base::AutoLock lock(lock_);
- if (!capture_) {
- capture_ = true;
- HRESULT hr;
- if (FAILED(hr = reader_->ReadSample(MF_SOURCE_READER_FIRST_VIDEO_STREAM, 0,
- NULL, NULL, NULL, NULL))) {
- OnError(hr);
- capture_ = false;
- }
+ if (FAILED(hr = reader_->ReadSample(MF_SOURCE_READER_FIRST_VIDEO_STREAM, 0,
+ NULL, NULL, NULL, NULL))) {
+ OnError(hr);
+ return;
}
+ capture_format_ = found_capability.supported_format;
+ capture_ = true;
}
-void VideoCaptureDeviceMFWin::Stop() {
+void VideoCaptureDeviceMFWin::StopAndDeAllocate() {
DCHECK(CalledOnValidThread());
base::WaitableEvent flushed(false, false);
const int kFlushTimeOutInMs = 1000;
@@ -412,9 +390,9 @@ void VideoCaptureDeviceMFWin::Stop() {
wait = SUCCEEDED(hr);
if (!wait) {
callback_->SetSignalOnFlush(NULL);
- OnError(hr);
}
}
+ client_.reset();
}
// If the device has been unplugged, the Flush() won't trigger the event
@@ -426,31 +404,18 @@ void VideoCaptureDeviceMFWin::Stop() {
flushed.TimedWait(base::TimeDelta::FromMilliseconds(kFlushTimeOutInMs));
}
-void VideoCaptureDeviceMFWin::DeAllocate() {
- DCHECK(CalledOnValidThread());
-
- Stop();
-
- base::AutoLock lock(lock_);
- observer_ = NULL;
-}
-
-const VideoCaptureDevice::Name& VideoCaptureDeviceMFWin::device_name() {
- DCHECK(CalledOnValidThread());
- return name_;
-}
-
void VideoCaptureDeviceMFWin::OnIncomingCapturedFrame(
const uint8* data,
int length,
const base::Time& time_stamp,
- int rotation,
- bool flip_vert,
- bool flip_horiz) {
+ int rotation) {
base::AutoLock lock(lock_);
- if (data && observer_)
- observer_->OnIncomingCapturedFrame(data, length, time_stamp,
- rotation, flip_vert, flip_horiz);
+ if (data && client_.get())
+ client_->OnIncomingCapturedFrame(data,
+ length,
+ time_stamp,
+ rotation,
+ capture_format_);
if (capture_) {
HRESULT hr = reader_->ReadSample(MF_SOURCE_READER_FIRST_VIDEO_STREAM, 0,
@@ -468,8 +433,8 @@ void VideoCaptureDeviceMFWin::OnIncomingCapturedFrame(
void VideoCaptureDeviceMFWin::OnError(HRESULT hr) {
DLOG(ERROR) << "VideoCaptureDeviceMFWin: " << std::hex << hr;
- if (observer_)
- observer_->OnError();
+ if (client_.get())
+ client_->OnError();
}
} // namespace media
diff --git a/chromium/media/video/capture/win/video_capture_device_mf_win.h b/chromium/media/video/capture/win/video_capture_device_mf_win.h
index 2daa03535dd..8f7fc75cf45 100644
--- a/chromium/media/video/capture/win/video_capture_device_mf_win.h
+++ b/chromium/media/video/capture/win/video_capture_device_mf_win.h
@@ -28,7 +28,7 @@ class MFReaderCallback;
class MEDIA_EXPORT VideoCaptureDeviceMFWin
: public base::NonThreadSafe,
- public VideoCaptureDevice1 {
+ public VideoCaptureDevice {
public:
explicit VideoCaptureDeviceMFWin(const Name& device_name);
virtual ~VideoCaptureDeviceMFWin();
@@ -38,12 +38,10 @@ class MEDIA_EXPORT VideoCaptureDeviceMFWin
bool Init();
// VideoCaptureDevice implementation.
- virtual void Allocate(const VideoCaptureCapability& capture_format,
- VideoCaptureDevice::EventHandler* observer) OVERRIDE;
- virtual void Start() OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void DeAllocate() OVERRIDE;
- virtual const Name& device_name() OVERRIDE;
+ virtual void AllocateAndStart(const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client)
+ OVERRIDE;
+ virtual void StopAndDeAllocate() OVERRIDE;
// Returns true iff the current platform supports the Media Foundation API
// and that the DLLs are available. On Vista this API is an optional download
@@ -59,9 +57,7 @@ class MEDIA_EXPORT VideoCaptureDeviceMFWin
const uint8* data,
int length,
const base::Time& time_stamp,
- int rotation,
- bool flip_vert,
- bool flip_horiz);
+ int rotation);
private:
void OnError(HRESULT hr);
@@ -71,8 +67,9 @@ class MEDIA_EXPORT VideoCaptureDeviceMFWin
scoped_refptr<MFReaderCallback> callback_;
base::Lock lock_; // Used to guard the below variables.
- VideoCaptureDevice::EventHandler* observer_;
+ scoped_ptr<VideoCaptureDevice::Client> client_;
base::win::ScopedComPtr<IMFSourceReader> reader_;
+ VideoCaptureFormat capture_format_;
bool capture_;
DISALLOW_IMPLICIT_CONSTRUCTORS(VideoCaptureDeviceMFWin);
diff --git a/chromium/media/video/capture/win/video_capture_device_win.cc b/chromium/media/video/capture/win/video_capture_device_win.cc
index 307ab2967bf..00056a70168 100644
--- a/chromium/media/video/capture/win/video_capture_device_win.cc
+++ b/chromium/media/video/capture/win/video_capture_device_win.cc
@@ -11,10 +11,12 @@
#include "base/strings/string_util.h"
#include "base/strings/sys_string_conversions.h"
#include "base/win/metro.h"
+#include "base/win/scoped_co_mem.h"
#include "base/win/scoped_variant.h"
#include "media/base/media_switches.h"
#include "media/video/capture/win/video_capture_device_mf_win.h"
+using base::win::ScopedCoMem;
using base::win::ScopedComPtr;
using base::win::ScopedVariant;
@@ -163,6 +165,12 @@ void VideoCaptureDevice::GetDeviceNames(Names* device_names) {
}
// static
+void VideoCaptureDevice::GetDeviceSupportedFormats(const Name& device,
+ VideoCaptureFormats* formats) {
+ NOTIMPLEMENTED();
+}
+
+// static
VideoCaptureDevice* VideoCaptureDevice::Create(const Name& device_name) {
VideoCaptureDevice* ret = NULL;
if (device_name.capture_api_type() == Name::MEDIA_FOUNDATION) {
@@ -257,8 +265,7 @@ void VideoCaptureDeviceWin::GetDeviceNames(Names* device_names) {
VideoCaptureDeviceWin::VideoCaptureDeviceWin(const Name& device_name)
: device_name_(device_name),
- state_(kIdle),
- observer_(NULL) {
+ state_(kIdle) {
DetachFromThread();
}
@@ -333,26 +340,27 @@ bool VideoCaptureDeviceWin::Init() {
return CreateCapabilityMap();
}
-void VideoCaptureDeviceWin::Allocate(
- const VideoCaptureCapability& capture_format,
- VideoCaptureDevice::EventHandler* observer) {
+void VideoCaptureDeviceWin::AllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) {
DCHECK(CalledOnValidThread());
if (state_ != kIdle)
return;
- observer_ = observer;
+ client_ = client.Pass();
// Get the camera capability that best match the requested resolution.
const VideoCaptureCapabilityWin& found_capability =
- capabilities_.GetBestMatchedCapability(capture_format.width,
- capture_format.height,
- capture_format.frame_rate);
- VideoCaptureCapability capability = found_capability;
+ capabilities_.GetBestMatchedFormat(
+ params.requested_format.frame_size.width(),
+ params.requested_format.frame_size.height(),
+ params.requested_format.frame_rate);
+ VideoCaptureFormat format = found_capability.supported_format;
// Reduce the frame rate if the requested frame rate is lower
// than the capability.
- if (capability.frame_rate > capture_format.frame_rate)
- capability.frame_rate = capture_format.frame_rate;
+ if (format.frame_rate > params.requested_format.frame_rate)
+ format.frame_rate = params.requested_format.frame_rate;
AM_MEDIA_TYPE* pmt = NULL;
VIDEO_STREAM_CONFIG_CAPS caps;
@@ -370,20 +378,19 @@ void VideoCaptureDeviceWin::Allocate(
if (SUCCEEDED(hr)) {
if (pmt->formattype == FORMAT_VideoInfo) {
VIDEOINFOHEADER* h = reinterpret_cast<VIDEOINFOHEADER*>(pmt->pbFormat);
- if (capability.frame_rate > 0)
- h->AvgTimePerFrame = kSecondsToReferenceTime / capability.frame_rate;
+ if (format.frame_rate > 0)
+ h->AvgTimePerFrame = kSecondsToReferenceTime / format.frame_rate;
}
- // Set the sink filter to request this capability.
- sink_filter_->SetRequestedMediaCapability(capability);
- // Order the capture device to use this capability.
+ // Set the sink filter to request this format.
+ sink_filter_->SetRequestedMediaFormat(format);
+ // Order the capture device to use this format.
hr = stream_config->SetFormat(pmt);
}
if (FAILED(hr))
SetErrorState("Failed to set capture device output format");
- if (capability.color == PIXEL_FORMAT_MJPEG &&
- !mjpg_filter_.get()) {
+ if (format.pixel_format == PIXEL_FORMAT_MJPEG && !mjpg_filter_.get()) {
// Create MJPG filter if we need it.
hr = mjpg_filter_.CreateInstance(CLSID_MjpegDec, NULL, CLSCTX_INPROC);
@@ -401,8 +408,7 @@ void VideoCaptureDeviceWin::Allocate(
}
}
- if (capability.color == PIXEL_FORMAT_MJPEG &&
- mjpg_filter_.get()) {
+ if (format.pixel_format == PIXEL_FORMAT_MJPEG && mjpg_filter_.get()) {
// Connect the camera to the MJPEG decoder.
hr = graph_builder_->ConnectDirect(output_capture_pin_, input_mjpg_pin_,
NULL);
@@ -426,21 +432,12 @@ void VideoCaptureDeviceWin::Allocate(
return;
}
- // Get the capability back from the sink filter after the filter have been
+ // Get the format back from the sink filter after the filter have been
// connected.
- const VideoCaptureCapability& used_capability
- = sink_filter_->ResultingCapability();
- observer_->OnFrameInfo(used_capability);
+ capture_format_ = sink_filter_->ResultingFormat();
- state_ = kAllocated;
-}
-
-void VideoCaptureDeviceWin::Start() {
- DCHECK(CalledOnValidThread());
- if (state_ != kAllocated)
- return;
-
- HRESULT hr = media_control_->Run();
+ // Start capturing.
+ hr = media_control_->Run();
if (FAILED(hr)) {
SetErrorState("Failed to start the Capture device.");
return;
@@ -449,7 +446,7 @@ void VideoCaptureDeviceWin::Start() {
state_ = kCapturing;
}
-void VideoCaptureDeviceWin::Stop() {
+void VideoCaptureDeviceWin::StopAndDeAllocate() {
DCHECK(CalledOnValidThread());
if (state_ != kCapturing)
return;
@@ -460,15 +457,6 @@ void VideoCaptureDeviceWin::Stop() {
return;
}
- state_ = kAllocated;
-}
-
-void VideoCaptureDeviceWin::DeAllocate() {
- DCHECK(CalledOnValidThread());
- if (state_ == kIdle)
- return;
-
- HRESULT hr = media_control_->Stop();
graph_builder_->Disconnect(output_capture_pin_);
graph_builder_->Disconnect(input_sink_pin_);
@@ -482,20 +470,15 @@ void VideoCaptureDeviceWin::DeAllocate() {
SetErrorState("Failed to Stop the Capture device");
return;
}
-
+ client_.reset();
state_ = kIdle;
}
-const VideoCaptureDevice::Name& VideoCaptureDeviceWin::device_name() {
- DCHECK(CalledOnValidThread());
- return device_name_;
-}
-
// Implements SinkFilterObserver::SinkFilterObserver.
void VideoCaptureDeviceWin::FrameReceived(const uint8* buffer,
int length) {
- observer_->OnIncomingCapturedFrame(buffer, length, base::Time::Now(),
- 0, false, false);
+ client_->OnIncomingCapturedFrame(
+ buffer, length, base::Time::Now(), 0, capture_format_);
}
bool VideoCaptureDeviceWin::CreateCapabilityMap() {
@@ -526,7 +509,9 @@ bool VideoCaptureDeviceWin::CreateCapabilityMap() {
for (int i = 0; i < count; ++i) {
hr = stream_config->GetStreamCaps(i, &media_type,
reinterpret_cast<BYTE*>(&caps));
- if (FAILED(hr)) {
+ // GetStreamCaps() may return S_FALSE, so don't use FAILED() or SUCCEED()
+ // macros here since they'll trigger incorrectly.
+ if (hr != S_OK) {
DVLOG(2) << "Failed to GetStreamCaps";
return false;
}
@@ -534,21 +519,19 @@ bool VideoCaptureDeviceWin::CreateCapabilityMap() {
if (media_type->majortype == MEDIATYPE_Video &&
media_type->formattype == FORMAT_VideoInfo) {
VideoCaptureCapabilityWin capability(i);
- REFERENCE_TIME time_per_frame = 0;
-
VIDEOINFOHEADER* h =
reinterpret_cast<VIDEOINFOHEADER*>(media_type->pbFormat);
- capability.width = h->bmiHeader.biWidth;
- capability.height = h->bmiHeader.biHeight;
- time_per_frame = h->AvgTimePerFrame;
+ capability.supported_format.frame_size.SetSize(h->bmiHeader.biWidth,
+ h->bmiHeader.biHeight);
- // Try to get the max frame rate from IAMVideoControl.
+ // Try to get a better |time_per_frame| from IAMVideoControl. If not, use
+ // the value from VIDEOINFOHEADER.
+ REFERENCE_TIME time_per_frame = h->AvgTimePerFrame;
if (video_control) {
- LONGLONG* max_fps_ptr;
- LONG list_size;
- SIZE size;
- size.cx = capability.width;
- size.cy = capability.height;
+ ScopedCoMem<LONGLONG> max_fps;
+ LONG list_size = 0;
+ SIZE size = {capability.supported_format.frame_size.width(),
+ capability.supported_format.frame_size.height()};
// GetFrameRateList doesn't return max frame rate always
// eg: Logitech Notebook. This may be due to a bug in that API
@@ -556,45 +539,42 @@ bool VideoCaptureDeviceWin::CreateCapabilityMap() {
// a util method written. Can't assume the first value will return
// the max fps.
hr = video_control->GetFrameRateList(output_capture_pin_, i, size,
- &list_size, &max_fps_ptr);
-
- if (SUCCEEDED(hr) && list_size > 0) {
- int min_time = *std::min_element(max_fps_ptr,
- max_fps_ptr + list_size);
- capability.frame_rate = (min_time > 0) ?
- kSecondsToReferenceTime / min_time : 0;
- } else {
- // Get frame rate from VIDEOINFOHEADER.
- capability.frame_rate = (time_per_frame > 0) ?
- static_cast<int>(kSecondsToReferenceTime / time_per_frame) : 0;
+ &list_size, &max_fps);
+ // Sometimes |list_size| will be > 0, but max_fps will be NULL. Some
+ // drivers may return an HRESULT of S_FALSE which SUCCEEDED() translates
+ // into success, so explicitly check S_OK. See http://crbug.com/306237.
+ if (hr == S_OK && list_size > 0 && max_fps) {
+ time_per_frame = *std::min_element(max_fps.get(),
+ max_fps.get() + list_size);
}
- } else {
- // Get frame rate from VIDEOINFOHEADER since IAMVideoControl is
- // not supported.
- capability.frame_rate = (time_per_frame > 0) ?
- static_cast<int>(kSecondsToReferenceTime / time_per_frame) : 0;
}
+
+ capability.supported_format.frame_rate =
+ (time_per_frame > 0)
+ ? static_cast<int>(kSecondsToReferenceTime / time_per_frame)
+ : 0;
+
// DirectShow works at the moment only on integer frame_rate but the
// best capability matching class works on rational frame rates.
- capability.frame_rate_numerator = capability.frame_rate;
+ capability.frame_rate_numerator = capability.supported_format.frame_rate;
capability.frame_rate_denominator = 1;
// We can't switch MEDIATYPE :~(.
if (media_type->subtype == kMediaSubTypeI420) {
- capability.color = PIXEL_FORMAT_I420;
+ capability.supported_format.pixel_format = PIXEL_FORMAT_I420;
} else if (media_type->subtype == MEDIASUBTYPE_IYUV) {
// This is identical to PIXEL_FORMAT_I420.
- capability.color = PIXEL_FORMAT_I420;
+ capability.supported_format.pixel_format = PIXEL_FORMAT_I420;
} else if (media_type->subtype == MEDIASUBTYPE_RGB24) {
- capability.color = PIXEL_FORMAT_RGB24;
+ capability.supported_format.pixel_format = PIXEL_FORMAT_RGB24;
} else if (media_type->subtype == MEDIASUBTYPE_YUY2) {
- capability.color = PIXEL_FORMAT_YUY2;
+ capability.supported_format.pixel_format = PIXEL_FORMAT_YUY2;
} else if (media_type->subtype == MEDIASUBTYPE_MJPG) {
- capability.color = PIXEL_FORMAT_MJPEG;
+ capability.supported_format.pixel_format = PIXEL_FORMAT_MJPEG;
} else if (media_type->subtype == MEDIASUBTYPE_UYVY) {
- capability.color = PIXEL_FORMAT_UYVY;
+ capability.supported_format.pixel_format = PIXEL_FORMAT_UYVY;
} else if (media_type->subtype == MEDIASUBTYPE_ARGB32) {
- capability.color = PIXEL_FORMAT_ARGB;
+ capability.supported_format.pixel_format = PIXEL_FORMAT_ARGB;
} else {
WCHAR guid_str[128];
StringFromGUID2(media_type->subtype, guid_str, arraysize(guid_str));
@@ -614,6 +594,6 @@ void VideoCaptureDeviceWin::SetErrorState(const char* reason) {
DCHECK(CalledOnValidThread());
DVLOG(1) << reason;
state_ = kError;
- observer_->OnError();
+ client_->OnError();
}
} // namespace media
diff --git a/chromium/media/video/capture/win/video_capture_device_win.h b/chromium/media/video/capture/win/video_capture_device_win.h
index 4c83d6b3062..164c01c9e26 100644
--- a/chromium/media/video/capture/win/video_capture_device_win.h
+++ b/chromium/media/video/capture/win/video_capture_device_win.h
@@ -30,7 +30,7 @@ namespace media {
// All the methods in the class can only be run on a COM initialized thread.
class VideoCaptureDeviceWin
: public base::NonThreadSafe,
- public VideoCaptureDevice1,
+ public VideoCaptureDevice,
public SinkFilterObserver {
public:
explicit VideoCaptureDeviceWin(const Name& device_name);
@@ -40,19 +40,16 @@ class VideoCaptureDeviceWin
bool Init();
// VideoCaptureDevice implementation.
- virtual void Allocate(const VideoCaptureCapability& capture_format,
- VideoCaptureDevice::EventHandler* observer) OVERRIDE;
- virtual void Start() OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void DeAllocate() OVERRIDE;
- virtual const Name& device_name() OVERRIDE;
+ virtual void AllocateAndStart(const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client)
+ OVERRIDE;
+ virtual void StopAndDeAllocate() OVERRIDE;
static void GetDeviceNames(Names* device_names);
private:
enum InternalState {
kIdle, // The device driver is opened but camera is not in use.
- kAllocated, // The camera has been allocated and can be started.
kCapturing, // Video is being captured.
kError // Error accessing HW functions.
// User needs to recover by destroying the object.
@@ -66,7 +63,7 @@ class VideoCaptureDeviceWin
Name device_name_;
InternalState state_;
- VideoCaptureDevice::EventHandler* observer_;
+ scoped_ptr<VideoCaptureDevice::Client> client_;
base::win::ScopedComPtr<IBaseFilter> capture_filter_;
base::win::ScopedComPtr<IGraphBuilder> graph_builder_;
@@ -82,6 +79,7 @@ class VideoCaptureDeviceWin
// Map of all capabilities this device support.
CapabilityList capabilities_;
+ VideoCaptureFormat capture_format_;
DISALLOW_IMPLICIT_CONSTRUCTORS(VideoCaptureDeviceWin);
};
diff --git a/chromium/media/video/video_decode_accelerator.h b/chromium/media/video/video_decode_accelerator.h
index 1aa0954ba96..5212db2c488 100644
--- a/chromium/media/video/video_decode_accelerator.h
+++ b/chromium/media/video/video_decode_accelerator.h
@@ -90,9 +90,9 @@ class MEDIA_EXPORT VideoDecodeAccelerator
// Returns true when command successfully accepted. Otherwise false.
virtual bool Initialize(VideoCodecProfile profile) = 0;
- // Decodes given bitstream buffer. Once decoder is done with processing
- // |bitstream_buffer| it will call NotifyEndOfBitstreamBuffer() with the
- // bitstream buffer id.
+ // Decodes given bitstream buffer that contains at most one frame. Once
+ // decoder is done with processing |bitstream_buffer| it will call
+ // NotifyEndOfBitstreamBuffer() with the bitstream buffer id.
// Parameters:
// |bitstream_buffer| is the input bitstream that is sent for decoding.
virtual void Decode(const BitstreamBuffer& bitstream_buffer) = 0;