summaryrefslogtreecommitdiff
path: root/sys/decklink
diff options
context:
space:
mode:
authorSebastian Dröge <sebastian@centricular.com>2019-05-04 00:15:32 +0300
committerSebastian Dröge <slomo@coaxion.net>2019-05-03 21:20:49 +0000
commit6354769d8995baaf8447c3fe4341fca1355705f6 (patch)
tree37b0be299d138cb70832f307a30c59b473bf497a /sys/decklink
parentba21c2106b59560f784b4f1f33518c7e2cf3bab7 (diff)
downloadgstreamer-plugins-bad-6354769d8995baaf8447c3fe4341fca1355705f6.tar.gz
decklink: Add support for parsing/outputting AFD/Bar
Diffstat (limited to 'sys/decklink')
-rw-r--r--sys/decklink/gstdecklinkvideosink.cpp438
-rw-r--r--sys/decklink/gstdecklinkvideosink.h3
-rw-r--r--sys/decklink/gstdecklinkvideosrc.cpp290
-rw-r--r--sys/decklink/gstdecklinkvideosrc.h6
4 files changed, 543 insertions, 194 deletions
diff --git a/sys/decklink/gstdecklinkvideosink.cpp b/sys/decklink/gstdecklinkvideosink.cpp
index 7aea3f7a2..8cea56798 100644
--- a/sys/decklink/gstdecklinkvideosink.cpp
+++ b/sys/decklink/gstdecklinkvideosink.cpp
@@ -244,6 +244,7 @@ enum
PROP_KEYER_LEVEL,
PROP_HW_SERIAL_NUMBER,
PROP_CC_LINE,
+ PROP_AFD_BAR_LINE,
};
static void gst_decklink_video_sink_set_property (GObject * object,
@@ -388,6 +389,13 @@ gst_decklink_video_sink_class_init (GstDecklinkVideoSinkClass * klass)
(GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
G_PARAM_CONSTRUCT)));
+ g_object_class_install_property (gobject_class, PROP_AFD_BAR_LINE,
+ g_param_spec_int ("afd-bar-line", "AFD/Bar Line",
+ "Line number to use for inserting AFD/Bar data (0 = disabled)", 0,
+ 10000, 0,
+ (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS |
+ G_PARAM_CONSTRUCT)));
+
templ_caps = gst_decklink_mode_get_template_caps (FALSE);
templ_caps = gst_caps_make_writable (templ_caps);
/* For output we support any framerate and only really care about timestamps */
@@ -415,6 +423,7 @@ gst_decklink_video_sink_init (GstDecklinkVideoSink * self)
/* VITC is legacy, we should expect RP188 in modern use cases */
self->timecode_format = bmdTimecodeRP188Any;
self->caption_line = 0;
+ self->afd_bar_line = 0;
gst_base_sink_set_max_lateness (GST_BASE_SINK_CAST (self), 20 * GST_MSECOND);
gst_base_sink_set_qos_enabled (GST_BASE_SINK_CAST (self), TRUE);
@@ -469,6 +478,9 @@ gst_decklink_video_sink_set_property (GObject * object, guint property_id,
case PROP_CC_LINE:
self->caption_line = g_value_get_int (value);
break;
+ case PROP_AFD_BAR_LINE:
+ self->afd_bar_line = g_value_get_int (value);
+ break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);
break;
@@ -515,6 +527,9 @@ gst_decklink_video_sink_get_property (GObject * object, guint property_id,
case PROP_CC_LINE:
g_value_set_int (value, self->caption_line);
break;
+ case PROP_AFD_BAR_LINE:
+ g_value_set_int (value, self->afd_bar_line);
+ break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);
break;
@@ -613,13 +628,15 @@ gst_decklink_video_sink_set_caps (GstBaseSink * bsink, GstCaps * caps)
* Note that this flag will have no effect in practice if the video stream
* does not contain timecode metadata.
*/
- if ((gint64) self->timecode_format == (gint64) GST_DECKLINK_TIMECODE_FORMAT_VITC ||
- (gint64) self->timecode_format == (gint64) GST_DECKLINK_TIMECODE_FORMAT_VITCFIELD2)
+ if ((gint64) self->timecode_format ==
+ (gint64) GST_DECKLINK_TIMECODE_FORMAT_VITC
+ || (gint64) self->timecode_format ==
+ (gint64) GST_DECKLINK_TIMECODE_FORMAT_VITCFIELD2)
flags = bmdVideoOutputVITC;
else
flags = bmdVideoOutputRP188;
- if (self->caption_line > 0)
+ if (self->caption_line > 0 || self->afd_bar_line > 0)
flags = (BMDVideoOutputFlags) (flags | bmdVideoOutputVANC);
ret = self->output->output->EnableVideoOutput (mode->mode, flags);
@@ -771,7 +788,7 @@ gst_decklink_video_sink_convert_to_internal_clock (GstDecklinkVideoSink * self,
}
if (external_base != GST_CLOCK_TIME_NONE &&
- internal_base != GST_CLOCK_TIME_NONE)
+ internal_base != GST_CLOCK_TIME_NONE)
*timestamp += internal_offset;
else
*timestamp = gst_clock_get_internal_time (self->output->clock);
@@ -883,6 +900,287 @@ convert_cea708_cc_data_cea708_cdp_internal (GstDecklinkVideoSink * self,
return len;
}
+static void
+write_vbi (GstDecklinkVideoSink * self, GstBuffer * buffer,
+ BMDPixelFormat format, IDeckLinkMutableVideoFrame * frame,
+ GstVideoTimeCodeMeta * tc_meta)
+{
+ IDeckLinkVideoFrameAncillary *vanc_frame = NULL;
+ gpointer iter = NULL;
+ GstVideoCaptionMeta *cc_meta;
+ guint8 *vancdata;
+ gboolean got_captions = FALSE;
+
+ if (self->caption_line == 0 && self->afd_bar_line == 0)
+ return;
+
+ if (self->vbiencoder == NULL) {
+ self->vbiencoder =
+ gst_video_vbi_encoder_new (self->info.finfo->format, self->info.width);
+ self->anc_vformat = self->info.finfo->format;
+ }
+
+ /* Put any closed captions into the configured line */
+ while ((cc_meta =
+ (GstVideoCaptionMeta *) gst_buffer_iterate_meta_filtered (buffer,
+ &iter, GST_VIDEO_CAPTION_META_API_TYPE))) {
+ switch (cc_meta->caption_type) {
+ case GST_VIDEO_CAPTION_TYPE_CEA608_RAW:{
+ guint8 data[138];
+ guint i, n;
+
+ n = cc_meta->size / 2;
+ if (cc_meta->size > 46) {
+ GST_WARNING_OBJECT (self, "Too big raw CEA608 buffer");
+ break;
+ }
+
+ /* This is the offset from line 9 for 525-line fields and from line
+ * 5 for 625-line fields.
+ *
+ * The highest bit is set for field 1 but not for field 0, but we
+ * have no way of knowning the field here
+ */
+ for (i = 0; i < n; i++) {
+ data[3 * i] = 0x80 | (self->info.height ==
+ 525 ? self->caption_line - 9 : self->caption_line - 5);
+ data[3 * i + 1] = cc_meta->data[2 * i];
+ data[3 * i + 2] = cc_meta->data[2 * i + 1];
+ }
+
+ if (!gst_video_vbi_encoder_add_ancillary (self->vbiencoder,
+ FALSE,
+ GST_VIDEO_ANCILLARY_DID16_S334_EIA_608 >> 8,
+ GST_VIDEO_ANCILLARY_DID16_S334_EIA_608 & 0xff, data, 3))
+ GST_WARNING_OBJECT (self, "Couldn't add meta to ancillary data");
+
+ got_captions = TRUE;
+
+ break;
+ }
+ case GST_VIDEO_CAPTION_TYPE_CEA608_S334_1A:{
+ if (!gst_video_vbi_encoder_add_ancillary (self->vbiencoder,
+ FALSE,
+ GST_VIDEO_ANCILLARY_DID16_S334_EIA_608 >> 8,
+ GST_VIDEO_ANCILLARY_DID16_S334_EIA_608 & 0xff, cc_meta->data,
+ cc_meta->size))
+ GST_WARNING_OBJECT (self, "Couldn't add meta to ancillary data");
+
+ got_captions = TRUE;
+
+ break;
+ }
+ case GST_VIDEO_CAPTION_TYPE_CEA708_RAW:{
+ guint8 data[256];
+ guint n;
+
+ n = cc_meta->size / 3;
+ if (cc_meta->size > 46) {
+ GST_WARNING_OBJECT (self, "Too big raw CEA708 buffer");
+ break;
+ }
+
+ n = convert_cea708_cc_data_cea708_cdp_internal (self, cc_meta->data,
+ cc_meta->size, data, sizeof (data), tc_meta);
+ if (!gst_video_vbi_encoder_add_ancillary (self->vbiencoder, FALSE,
+ GST_VIDEO_ANCILLARY_DID16_S334_EIA_708 >> 8,
+ GST_VIDEO_ANCILLARY_DID16_S334_EIA_708 & 0xff, data, n))
+ GST_WARNING_OBJECT (self, "Couldn't add meta to ancillary data");
+
+ got_captions = TRUE;
+
+ break;
+ }
+ case GST_VIDEO_CAPTION_TYPE_CEA708_CDP:{
+ if (!gst_video_vbi_encoder_add_ancillary (self->vbiencoder,
+ FALSE,
+ GST_VIDEO_ANCILLARY_DID16_S334_EIA_708 >> 8,
+ GST_VIDEO_ANCILLARY_DID16_S334_EIA_708 & 0xff, cc_meta->data,
+ cc_meta->size))
+ GST_WARNING_OBJECT (self, "Couldn't add meta to ancillary data");
+
+ got_captions = TRUE;
+
+ break;
+ }
+ default:{
+ GST_FIXME_OBJECT (self, "Caption type %d not supported",
+ cc_meta->caption_type);
+ break;
+ }
+ }
+ }
+
+ if ((got_captions || self->afd_bar_line != 0)
+ && self->output->output->CreateAncillaryData (format,
+ &vanc_frame) == S_OK) {
+ GstVideoAFDMeta *afd_meta = NULL, *afd_meta2 = NULL;
+ GstVideoBarMeta *bar_meta = NULL, *bar_meta2 = NULL;
+ GstMeta *meta;
+ gpointer meta_iter;
+ guint8 afd_bar_data[8] = { 0, };
+ guint8 afd_bar_data2[8] = { 0, };
+ guint8 afd = 0;
+ gboolean is_letterbox = 0;
+ guint16 bar1 = 0, bar2 = 0;
+ guint i;
+
+ // Get any reasonable AFD/Bar metas for both fields
+ meta_iter = NULL;
+ while ((meta =
+ gst_buffer_iterate_meta_filtered (buffer, &meta_iter,
+ GST_VIDEO_AFD_META_API_TYPE))) {
+ GstVideoAFDMeta *tmp_meta = (GstVideoAFDMeta *) meta;
+
+ if (tmp_meta->field == 0 || !afd_meta || (afd_meta && afd_meta->field != 0
+ && tmp_meta->field == 0))
+ afd_meta = tmp_meta;
+ if (tmp_meta->field == 1 || !afd_meta2 || (afd_meta2
+ && afd_meta->field != 1 && tmp_meta->field == 1))
+ afd_meta2 = tmp_meta;
+ }
+
+ meta_iter = NULL;
+ while ((meta =
+ gst_buffer_iterate_meta_filtered (buffer, &meta_iter,
+ GST_VIDEO_BAR_META_API_TYPE))) {
+ GstVideoBarMeta *tmp_meta = (GstVideoBarMeta *) meta;
+
+ if (tmp_meta->field == 0 || !bar_meta || (bar_meta && bar_meta->field != 0
+ && tmp_meta->field == 0))
+ bar_meta = tmp_meta;
+ if (tmp_meta->field == 1 || !bar_meta2 || (bar_meta2
+ && bar_meta->field != 1 && tmp_meta->field == 1))
+ bar_meta2 = tmp_meta;
+ }
+
+ for (i = 0; i < 2; i++) {
+ guint8 *afd_bar_data_ptr;
+
+ if (i == 0) {
+ afd_bar_data_ptr = afd_bar_data;
+ afd = afd_meta ? afd_meta->afd : 0;
+ is_letterbox = bar_meta ? bar_meta->is_letterbox : FALSE;
+ bar1 = bar_meta ? bar_meta->bar_data1 : 0;
+ bar2 = bar_meta ? bar_meta->bar_data2 : 0;
+ } else {
+ afd_bar_data_ptr = afd_bar_data2;
+ afd = afd_meta2 ? afd_meta2->afd : 0;
+ is_letterbox = bar_meta2 ? bar_meta2->is_letterbox : FALSE;
+ bar1 = bar_meta2 ? bar_meta2->bar_data1 : 0;
+ bar2 = bar_meta2 ? bar_meta2->bar_data2 : 0;
+ }
+
+ /* See SMPTE 2016-3 Section 4 */
+ /* AFD and AR */
+ if (self->mode < (gint) GST_DECKLINK_MODE_NTSC_WIDESCREEN) {
+ afd_bar_data_ptr[0] = (afd << 3) | 0x0;
+ } else {
+ afd_bar_data_ptr[0] = (afd << 3) | 0x4;
+ }
+
+ /* Bar flags */
+ afd_bar_data_ptr[3] = is_letterbox ? 0xc0 : 0x30;
+
+ /* Bar value 1 and 2 */
+ GST_WRITE_UINT16_BE (&afd_bar_data_ptr[4], bar1);
+ GST_WRITE_UINT16_BE (&afd_bar_data_ptr[6], bar2);
+ }
+
+ /* AFD on the same line as the captions */
+ if (self->caption_line == self->afd_bar_line) {
+ if (!gst_video_vbi_encoder_add_ancillary (self->vbiencoder,
+ FALSE, GST_VIDEO_ANCILLARY_DID16_S2016_3_AFD_BAR >> 8,
+ GST_VIDEO_ANCILLARY_DID16_S2016_3_AFD_BAR & 0xff, afd_bar_data,
+ sizeof (afd_bar_data)))
+ GST_WARNING_OBJECT (self,
+ "Couldn't add AFD/Bar data to ancillary data");
+ }
+
+ /* FIXME: Add captions to the correct field? Captions for the second
+ * field should probably be inserted into the second field */
+
+ if (got_captions || self->caption_line == self->afd_bar_line) {
+ if (vanc_frame->GetBufferForVerticalBlankingLine (self->caption_line,
+ (void **) &vancdata) == S_OK) {
+ gst_video_vbi_encoder_write_line (self->vbiencoder, vancdata);
+ } else {
+ GST_WARNING_OBJECT (self,
+ "Failed to get buffer for line %d ancillary data",
+ self->caption_line);
+ }
+ }
+
+ /* AFD on a different line than the captions */
+ if (self->afd_bar_line != 0 && self->caption_line != self->afd_bar_line) {
+ if (!gst_video_vbi_encoder_add_ancillary (self->vbiencoder,
+ FALSE, GST_VIDEO_ANCILLARY_DID16_S2016_3_AFD_BAR >> 8,
+ GST_VIDEO_ANCILLARY_DID16_S2016_3_AFD_BAR & 0xff, afd_bar_data,
+ sizeof (afd_bar_data)))
+ GST_WARNING_OBJECT (self,
+ "Couldn't add AFD/Bar data to ancillary data");
+
+ if (vanc_frame->GetBufferForVerticalBlankingLine (self->afd_bar_line,
+ (void **) &vancdata) == S_OK) {
+ gst_video_vbi_encoder_write_line (self->vbiencoder, vancdata);
+ } else {
+ GST_WARNING_OBJECT (self,
+ "Failed to get buffer for line %d ancillary data",
+ self->afd_bar_line);
+ }
+ }
+
+ /* For interlaced video we need to also add AFD to the second field */
+ if (GST_VIDEO_INFO_IS_INTERLACED (&self->info) && self->afd_bar_line != 0) {
+ guint field2_offset;
+
+ /* The VANC lines for the second field are at an offset, depending on
+ * the format in use.
+ */
+ switch (self->info.height) {
+ case 486:
+ /* NTSC: 525 / 2 + 1 */
+ field2_offset = 263;
+ break;
+ case 576:
+ /* PAL: 625 / 2 + 1 */
+ field2_offset = 313;
+ break;
+ case 1080:
+ /* 1080i: 1125 / 2 + 1 */
+ field2_offset = 563;
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+
+ if (!gst_video_vbi_encoder_add_ancillary (self->vbiencoder,
+ FALSE, GST_VIDEO_ANCILLARY_DID16_S2016_3_AFD_BAR >> 8,
+ GST_VIDEO_ANCILLARY_DID16_S2016_3_AFD_BAR & 0xff, afd_bar_data2,
+ sizeof (afd_bar_data)))
+ GST_WARNING_OBJECT (self,
+ "Couldn't add AFD/Bar data to ancillary data");
+
+ if (vanc_frame->GetBufferForVerticalBlankingLine (self->afd_bar_line +
+ field2_offset, (void **) &vancdata) == S_OK) {
+ gst_video_vbi_encoder_write_line (self->vbiencoder, vancdata);
+ } else {
+ GST_WARNING_OBJECT (self,
+ "Failed to get buffer for line %d ancillary data",
+ self->afd_bar_line);
+ }
+ }
+
+ if (frame->SetAncillaryData (vanc_frame) != S_OK) {
+ GST_WARNING_OBJECT (self, "Failed to set ancillary data");
+ }
+
+ vanc_frame->Release ();
+ } else if (got_captions || self->afd_bar_line != 0) {
+ GST_WARNING_OBJECT (self, "Failed to allocate ancillary data frame");
+ }
+}
+
static GstFlowReturn
gst_decklink_video_sink_prepare (GstBaseSink * bsink, GstBuffer * buffer)
{
@@ -1004,131 +1302,7 @@ gst_decklink_video_sink_prepare (GstBaseSink * bsink, GstBuffer * buffer)
g_free (tc_str);
}
- if (self->caption_line != 0) {
- IDeckLinkVideoFrameAncillary *vanc_frame = NULL;
- gpointer iter = NULL;
- GstVideoCaptionMeta *cc_meta;
- guint8 *vancdata;
- gboolean got_captions = FALSE;
-
- /* Put any closed captions into the configured line */
- while ((cc_meta =
- (GstVideoCaptionMeta *) gst_buffer_iterate_meta_filtered (buffer,
- &iter, GST_VIDEO_CAPTION_META_API_TYPE))) {
- if (self->vbiencoder == NULL) {
- self->vbiencoder =
- gst_video_vbi_encoder_new (self->info.finfo->format,
- self->info.width);
- self->anc_vformat = self->info.finfo->format;
- }
-
- switch (cc_meta->caption_type) {
- case GST_VIDEO_CAPTION_TYPE_CEA608_RAW:{
- guint8 data[138];
- guint i, n;
-
- n = cc_meta->size / 2;
- if (cc_meta->size > 46) {
- GST_WARNING_OBJECT (self, "Too big raw CEA608 buffer");
- break;
- }
-
- /* This is the offset from line 9 for 525-line fields and from line
- * 5 for 625-line fields.
- *
- * The highest bit is set for field 1 but not for field 0, but we
- * have no way of knowning the field here
- */
- for (i = 0; i < n; i++) {
- data[3 * i] = 0x80 | (self->info.height ==
- 525 ? self->caption_line - 9 : self->caption_line - 5);
- data[3 * i + 1] = cc_meta->data[2 * i];
- data[3 * i + 2] = cc_meta->data[2 * i + 1];
- }
-
- if (!gst_video_vbi_encoder_add_ancillary (self->vbiencoder,
- FALSE,
- GST_VIDEO_ANCILLARY_DID16_S334_EIA_608 >> 8,
- GST_VIDEO_ANCILLARY_DID16_S334_EIA_608 & 0xff, data, 3))
- GST_WARNING_OBJECT (self, "Couldn't add meta to ancillary data");
-
- got_captions = TRUE;
-
- break;
- }
- case GST_VIDEO_CAPTION_TYPE_CEA608_S334_1A:{
- if (!gst_video_vbi_encoder_add_ancillary (self->vbiencoder,
- FALSE,
- GST_VIDEO_ANCILLARY_DID16_S334_EIA_608 >> 8,
- GST_VIDEO_ANCILLARY_DID16_S334_EIA_608 & 0xff, cc_meta->data,
- cc_meta->size))
- GST_WARNING_OBJECT (self, "Couldn't add meta to ancillary data");
-
- got_captions = TRUE;
-
- break;
- }
- case GST_VIDEO_CAPTION_TYPE_CEA708_RAW:{
- guint8 data[256];
- guint n;
-
- n = cc_meta->size / 3;
- if (cc_meta->size > 46) {
- GST_WARNING_OBJECT (self, "Too big raw CEA708 buffer");
- break;
- }
-
- n = convert_cea708_cc_data_cea708_cdp_internal (self, cc_meta->data,
- cc_meta->size, data, sizeof (data), tc_meta);
- if (!gst_video_vbi_encoder_add_ancillary (self->vbiencoder, FALSE,
- GST_VIDEO_ANCILLARY_DID16_S334_EIA_708 >> 8,
- GST_VIDEO_ANCILLARY_DID16_S334_EIA_708 & 0xff, data, n))
- GST_WARNING_OBJECT (self, "Couldn't add meta to ancillary data");
-
- got_captions = TRUE;
-
- break;
- }
- case GST_VIDEO_CAPTION_TYPE_CEA708_CDP:{
- if (!gst_video_vbi_encoder_add_ancillary (self->vbiencoder,
- FALSE,
- GST_VIDEO_ANCILLARY_DID16_S334_EIA_708 >> 8,
- GST_VIDEO_ANCILLARY_DID16_S334_EIA_708 & 0xff, cc_meta->data,
- cc_meta->size))
- GST_WARNING_OBJECT (self, "Couldn't add meta to ancillary data");
-
- got_captions = TRUE;
-
- break;
- }
- default:{
- GST_FIXME_OBJECT (self, "Caption type %d not supported",
- cc_meta->caption_type);
- break;
- }
- }
- }
-
- if (got_captions
- && self->output->output->CreateAncillaryData (format,
- &vanc_frame) == S_OK) {
- if (vanc_frame->GetBufferForVerticalBlankingLine (self->caption_line,
- (void **) &vancdata) == S_OK) {
- gst_video_vbi_encoder_write_line (self->vbiencoder, vancdata);
- if (frame->SetAncillaryData (vanc_frame) != S_OK) {
- GST_WARNING_OBJECT (self, "Failed to set ancillary data");
- }
- } else {
- GST_WARNING_OBJECT (self,
- "Failed to get buffer for line %d ancillary data",
- self->caption_line);
- }
- vanc_frame->Release ();
- } else if (got_captions) {
- GST_WARNING_OBJECT (self, "Failed to allocate ancillary data frame");
- }
-
- }
+ write_vbi (self, buffer, format, frame, tc_meta);
gst_decklink_video_sink_convert_to_internal_clock (self, &running_time,
&running_time_duration);
@@ -1419,9 +1593,11 @@ gst_decklink_video_sink_change_state (GstElement * element,
}
GST_OBJECT_LOCK (self);
- if (self->external_base_time == GST_CLOCK_TIME_NONE || self->internal_base_time == GST_CLOCK_TIME_NONE) {
+ if (self->external_base_time == GST_CLOCK_TIME_NONE
+ || self->internal_base_time == GST_CLOCK_TIME_NONE) {
self->external_base_time = gst_clock_get_internal_time (clock);
- self->internal_base_time = gst_clock_get_internal_time (self->output->clock);
+ self->internal_base_time =
+ gst_clock_get_internal_time (self->output->clock);
self->internal_time_offset = self->internal_base_time;
}
diff --git a/sys/decklink/gstdecklinkvideosink.h b/sys/decklink/gstdecklinkvideosink.h
index 07d3e9cff..02b321309 100644
--- a/sys/decklink/gstdecklinkvideosink.h
+++ b/sys/decklink/gstdecklinkvideosink.h
@@ -69,8 +69,11 @@ struct _GstDecklinkVideoSink
GstVideoVBIEncoder *vbiencoder;
GstVideoFormat anc_vformat;
+
gint caption_line;
guint16 cdp_hdr_sequence_cntr;
+
+ gint afd_bar_line;
};
struct _GstDecklinkVideoSinkClass
diff --git a/sys/decklink/gstdecklinkvideosrc.cpp b/sys/decklink/gstdecklinkvideosrc.cpp
index 98940fe31..3894da499 100644
--- a/sys/decklink/gstdecklinkvideosrc.cpp
+++ b/sys/decklink/gstdecklinkvideosrc.cpp
@@ -148,6 +148,7 @@ GST_DEBUG_CATEGORY_STATIC (gst_decklink_video_src_debug);
#define DEFAULT_SKIP_FIRST_TIME (0)
#define DEFAULT_DROP_NO_SIGNAL_FRAMES (FALSE)
#define DEFAULT_OUTPUT_CC (FALSE)
+#define DEFAULT_OUTPUT_AFD_BAR (FALSE)
#ifndef ABSDIFF
#define ABSDIFF(x, y) ( (x) > (y) ? ((x) - (y)) : ((y) - (x)) )
@@ -168,7 +169,8 @@ enum
PROP_DROP_NO_SIGNAL_FRAMES,
PROP_SIGNAL,
PROP_HW_SERIAL_NUMBER,
- PROP_OUTPUT_CC
+ PROP_OUTPUT_CC,
+ PROP_OUTPUT_AFD_BAR,
};
typedef struct
@@ -356,6 +358,12 @@ gst_decklink_video_src_class_init (GstDecklinkVideoSrcClass * klass)
DEFAULT_OUTPUT_CC,
(GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
+ g_object_class_install_property (gobject_class, PROP_OUTPUT_AFD_BAR,
+ g_param_spec_boolean ("output-afd-bar", "Output AFD/Bar data",
+ "Extract and output AFD/Bar as GstMeta (if present)",
+ DEFAULT_OUTPUT_AFD_BAR,
+ (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
+
templ_caps = gst_decklink_mode_get_template_caps (TRUE);
gst_element_class_add_pad_template (element_class,
gst_pad_template_new ("src", GST_PAD_SRC, GST_PAD_ALWAYS, templ_caps));
@@ -386,6 +394,8 @@ gst_decklink_video_src_init (GstDecklinkVideoSrc * self)
self->output_stream_time = DEFAULT_OUTPUT_STREAM_TIME;
self->skip_first_time = DEFAULT_SKIP_FIRST_TIME;
self->drop_no_signal_frames = DEFAULT_DROP_NO_SIGNAL_FRAMES;
+ self->output_cc = DEFAULT_OUTPUT_CC;
+ self->output_afd_bar = DEFAULT_OUTPUT_AFD_BAR;
self->window_size = 64;
self->times = g_new (GstClockTime, 4 * self->window_size);
@@ -470,6 +480,9 @@ gst_decklink_video_src_set_property (GObject * object, guint property_id,
case PROP_OUTPUT_CC:
self->output_cc = g_value_get_boolean (value);
break;
+ case PROP_OUTPUT_AFD_BAR:
+ self->output_afd_bar = g_value_get_boolean (value);
+ break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);
break;
@@ -527,6 +540,9 @@ gst_decklink_video_src_get_property (GObject * object, guint property_id,
case PROP_OUTPUT_CC:
g_value_set_boolean (value, self->output_cc);
break;
+ case PROP_OUTPUT_AFD_BAR:
+ g_value_set_boolean (value, self->output_afd_bar);
+ break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);
break;
@@ -929,14 +945,118 @@ gst_decklink_video_src_got_frame (GstElement * element,
}
static void
-extract_cc_from_vbi (GstDecklinkVideoSrc * self, GstBuffer ** buffer,
+extract_vbi_line (GstDecklinkVideoSrc * self, GstBuffer ** buffer,
+ IDeckLinkVideoFrameAncillary * vanc_frame, guint field2_offset, guint line,
+ gboolean * found_cc_out, gboolean * found_afd_bar_out)
+{
+ GstVideoAncillary gstanc;
+ const guint8 *vancdata;
+ gboolean found_cc = FALSE, found_afd_bar = FALSE;
+
+ if (vanc_frame->GetBufferForVerticalBlankingLine (field2_offset + line,
+ (void **) &vancdata) != S_OK)
+ return;
+
+ GST_DEBUG_OBJECT (self, "Checking for VBI data on field line %u (field %u)",
+ field2_offset + line, field2_offset ? 2 : 1);
+ gst_video_vbi_parser_add_line (self->vbiparser, vancdata);
+
+ /* Check if CC or AFD/Bar is on this line if we didn't find any on a
+ * previous line. Remember the line where we found them */
+
+ while (gst_video_vbi_parser_get_ancillary (self->vbiparser,
+ &gstanc) == GST_VIDEO_VBI_PARSER_RESULT_OK) {
+ g_print ("found at line %d\n", field2_offset + line);
+ gst_util_dump_mem (vancdata, 16);
+ switch (GST_VIDEO_ANCILLARY_DID16 (&gstanc)) {
+ case GST_VIDEO_ANCILLARY_DID16_S334_EIA_708:
+ if (*found_cc_out || !self->output_cc)
+ continue;
+
+ GST_DEBUG_OBJECT (self,
+ "Adding CEA-708 CDP meta to buffer for line %u",
+ field2_offset + line);
+ GST_MEMDUMP_OBJECT (self, "CDP", gstanc.data, gstanc.data_count);
+ gst_buffer_add_video_caption_meta (*buffer,
+ GST_VIDEO_CAPTION_TYPE_CEA708_CDP, gstanc.data, gstanc.data_count);
+
+ found_cc = TRUE;
+ if (field2_offset)
+ self->last_cc_vbi_line_field2 = line;
+ else
+ self->last_cc_vbi_line = line;
+ break;
+ case GST_VIDEO_ANCILLARY_DID16_S334_EIA_608:
+ if (*found_cc_out || !self->output_cc)
+ continue;
+
+ GST_DEBUG_OBJECT (self,
+ "Adding CEA-608 meta to buffer for line %u", field2_offset + line);
+ GST_MEMDUMP_OBJECT (self, "CEA608", gstanc.data, gstanc.data_count);
+ gst_buffer_add_video_caption_meta (*buffer,
+ GST_VIDEO_CAPTION_TYPE_CEA608_S334_1A, gstanc.data,
+ gstanc.data_count);
+
+ found_cc = TRUE;
+ if (field2_offset)
+ self->last_cc_vbi_line_field2 = line;
+ else
+ self->last_cc_vbi_line = line;
+ break;
+ case GST_VIDEO_ANCILLARY_DID16_S2016_3_AFD_BAR:{
+ GstVideoAFDValue afd;
+ gboolean is_letterbox;
+ guint16 bar1, bar2;
+
+ if (*found_afd_bar_out || !self->output_afd_bar)
+ continue;
+
+ GST_DEBUG_OBJECT (self,
+ "Adding AFD/Bar meta to buffer for line %u", field2_offset + line);
+ GST_MEMDUMP_OBJECT (self, "AFD/Bar", gstanc.data, gstanc.data_count);
+
+ if (gstanc.data_count < 16) {
+ GST_WARNING_OBJECT (self, "AFD/Bar data too small");
+ continue;
+ }
+
+ afd = (GstVideoAFDValue) ((gstanc.data[0] >> 3) & 0xf);
+ is_letterbox = ((gstanc.data[3] >> 4) & 0x3) == 0;
+ bar1 = GST_READ_UINT16_BE (&gstanc.data[4]);
+ bar2 = GST_READ_UINT16_BE (&gstanc.data[6]);
+
+ gst_buffer_add_video_afd_meta (*buffer, field2_offset ? 1 : 0,
+ GST_VIDEO_AFD_SPEC_SMPTE_ST2016_1, afd);
+ gst_buffer_add_video_bar_meta (*buffer, field2_offset ? 1 : 0,
+ is_letterbox, bar1, bar2);
+
+ found_afd_bar = TRUE;
+ if (field2_offset)
+ self->last_afd_bar_vbi_line_field2 = line;
+ else
+ self->last_afd_bar_vbi_line = line;
+ break;
+ }
+ default:
+ /* otherwise continue looking */
+ continue;
+ }
+ }
+
+ if (found_cc)
+ *found_cc_out = TRUE;
+ if (found_afd_bar)
+ *found_afd_bar_out = TRUE;
+}
+
+static void
+extract_vbi (GstDecklinkVideoSrc * self, GstBuffer ** buffer,
VideoFrame * vf, const GstDecklinkMode * mode)
{
IDeckLinkVideoFrameAncillary *vanc_frame = NULL;
- gint fi;
- guint8 *vancdata;
+ gint line;
GstVideoFormat videoformat;
- gboolean found = FALSE;
+ gboolean found_cc = FALSE, found_afd_bar = FALSE;
if (vf->frame->GetAncillaryData (&vanc_frame) != S_OK)
return;
@@ -955,66 +1075,107 @@ extract_cc_from_vbi (GstDecklinkVideoSrc * self, GstBuffer ** buffer,
self->vbiparser = NULL;
}
+ if (self->vbiparser == NULL) {
+ self->vbiparser = gst_video_vbi_parser_new (videoformat, mode->width);
+ self->anc_vformat = videoformat;
+ }
+
GST_DEBUG_OBJECT (self, "Checking for ancillary data in VBI");
- fi = self->last_cc_vbi_line;
- if (fi == -1)
- fi = 1;
-
- while (fi < 22 && !found) {
- if (vanc_frame->GetBufferForVerticalBlankingLine (fi,
- (void **) &vancdata) == S_OK) {
- GstVideoAncillary gstanc;
- if (self->vbiparser == NULL) {
- self->vbiparser = gst_video_vbi_parser_new (videoformat, mode->width);
- self->anc_vformat = videoformat;
- }
- GST_DEBUG_OBJECT (self, "Might have data on line %d", fi);
- gst_video_vbi_parser_add_line (self->vbiparser, vancdata);
-
- while (gst_video_vbi_parser_get_ancillary (self->vbiparser,
- &gstanc) == GST_VIDEO_VBI_PARSER_RESULT_OK) {
- switch (GST_VIDEO_ANCILLARY_DID16 (&gstanc)) {
- case GST_VIDEO_ANCILLARY_DID16_S334_EIA_708:
- GST_DEBUG_OBJECT (self,
- "Adding CEA-708 CDP meta to buffer for line %d", fi);
- GST_MEMDUMP_OBJECT (self, "CDP", gstanc.data, gstanc.data_count);
- gst_buffer_add_video_caption_meta (*buffer,
- GST_VIDEO_CAPTION_TYPE_CEA708_CDP, gstanc.data,
- gstanc.data_count);
-
- break;
- case GST_VIDEO_ANCILLARY_DID16_S334_EIA_608:
- GST_DEBUG_OBJECT (self,
- "Adding CEA-608 meta to buffer for line %d", fi);
- GST_MEMDUMP_OBJECT (self, "CEA608", gstanc.data, gstanc.data_count);
- gst_buffer_add_video_caption_meta (*buffer,
- GST_VIDEO_CAPTION_TYPE_CEA608_S334_1A, gstanc.data,
- gstanc.data_count);
- break;
- default:
- /* otherwise continue looking */
- continue;
- }
+ /* First check last known lines, if any */
+ if (self->last_cc_vbi_line > 0) {
+ extract_vbi_line (self, buffer, vanc_frame, 0, self->last_cc_vbi_line,
+ &found_cc, &found_afd_bar);
+ }
+ if (self->last_afd_bar_vbi_line > 0
+ && self->last_cc_vbi_line != self->last_afd_bar_vbi_line) {
+ extract_vbi_line (self, buffer, vanc_frame, 0, self->last_afd_bar_vbi_line,
+ &found_cc, &found_afd_bar);
+ }
- found = TRUE;
- self->last_cc_vbi_line = fi;
+ if (!found_cc)
+ self->last_cc_vbi_line = -1;
+ if (!found_afd_bar)
+ self->last_afd_bar_vbi_line = -1;
+
+ if ((self->output_cc && !found_cc) || (self->output_afd_bar
+ && !found_afd_bar)) {
+ /* Otherwise loop through the first 21 lines and hope to find the data */
+ /* FIXME: For the different formats the number of lines that can contain
+ * VANC are different */
+ for (line = 1; line < 22; line++) {
+ extract_vbi_line (self, buffer, vanc_frame, 0, line, &found_cc,
+ &found_afd_bar);
+
+ /* If we found everything we wanted to extract, stop here */
+ if ((!self->output_cc || found_cc) &&
+ (!self->output_afd_bar || found_afd_bar))
break;
- }
}
+ }
- /* If we didn't find it at the previous line, start again searching from
- * line 1 onwards */
- if (!found && (gint) self->last_cc_vbi_line != -1) {
- self->last_cc_vbi_line = -1;
- fi = 1;
- } else {
- fi++;
+ /* Do the same for field 2 in case of interlaced content */
+ if (GST_VIDEO_INFO_IS_INTERLACED (&self->info)) {
+ gboolean found_cc_field2 = FALSE, found_afd_bar_field2 = FALSE;
+ guint field2_offset = 0;
+
+ /* The VANC lines for the second field are at an offset, depending on
+ * the format in use
+ */
+ switch (self->info.height) {
+ case 486:
+ /* NTSC: 525 / 2 + 1 */
+ field2_offset = 263;
+ break;
+ case 576:
+ /* PAL: 625 / 2 + 1 */
+ field2_offset = 313;
+ break;
+ case 1080:
+ /* 1080i: 1125 / 2 + 1 */
+ field2_offset = 563;
+ break;
+ default:
+ g_assert_not_reached ();
}
- }
- if (!found)
- self->last_cc_vbi_line = -1;
+ /* First try the same lines as for field 1 if we don't know yet */
+ if (self->last_cc_vbi_line_field2 <= 0)
+ self->last_cc_vbi_line_field2 = self->last_cc_vbi_line;
+ if (self->last_afd_bar_vbi_line_field2 <= 0)
+ self->last_afd_bar_vbi_line_field2 = self->last_afd_bar_vbi_line;
+
+ if (self->last_cc_vbi_line_field2 > 0) {
+ extract_vbi_line (self, buffer, vanc_frame, field2_offset,
+ self->last_cc_vbi_line_field2, &found_cc_field2,
+ &found_afd_bar_field2);
+ }
+ if (self->last_afd_bar_vbi_line_field2 > 0
+ && self->last_cc_vbi_line_field2 !=
+ self->last_afd_bar_vbi_line_field2) {
+ extract_vbi_line (self, buffer, vanc_frame, field2_offset,
+ self->last_afd_bar_vbi_line_field2, &found_cc_field2,
+ &found_afd_bar_field2);
+ }
+
+ if (!found_cc_field2)
+ self->last_cc_vbi_line_field2 = -1;
+ if (!found_afd_bar_field2)
+ self->last_afd_bar_vbi_line_field2 = -1;
+
+ if (((self->output_cc && !found_cc_field2) || (self->output_afd_bar
+ && !found_afd_bar_field2))) {
+ for (line = 1; line < 22; line++) {
+ extract_vbi_line (self, buffer, vanc_frame, field2_offset, line,
+ &found_cc_field2, &found_afd_bar_field2);
+
+ /* If we found everything we wanted to extract, stop here */
+ if ((!self->output_cc || found_cc_field2) &&
+ (!self->output_afd_bar || found_afd_bar_field2))
+ break;
+ }
+ }
+ }
vanc_frame->Release ();
}
@@ -1096,20 +1257,25 @@ gst_decklink_video_src_create (GstPushSrc * bsrc, GstBuffer ** buffer)
running_time = gst_segment_to_running_time (&GST_BASE_SRC (self)->segment,
GST_FORMAT_TIME, f.timestamp);
- msg = gst_message_new_qos (GST_OBJECT (self), TRUE, running_time, f.stream_timestamp,
- f.timestamp, f.duration);
+ msg =
+ gst_message_new_qos (GST_OBJECT (self), TRUE, running_time,
+ f.stream_timestamp, f.timestamp, f.duration);
gst_message_set_qos_stats (msg, GST_FORMAT_TIME, self->processed,
self->dropped);
gst_element_post_message (GST_ELEMENT (self), msg);
}
if (self->first_stream_time == GST_CLOCK_TIME_NONE)
self->first_stream_time = f.stream_timestamp;
- self->processed = f.stream_timestamp - self->dropped - self->first_stream_time;
+ self->processed =
+ f.stream_timestamp - self->dropped - self->first_stream_time;
self->expected_stream_time = f.stream_timestamp + f.stream_duration;
g_mutex_unlock (&self->lock);
if (caps_changed) {
self->last_cc_vbi_line = -1;
+ self->last_afd_bar_vbi_line = -1;
+ self->last_cc_vbi_line_field2 = -1;
+ self->last_afd_bar_vbi_line_field2 = -1;
caps = gst_decklink_mode_get_caps (f.mode, f.format, TRUE);
gst_video_info_from_caps (&self->info, caps);
gst_base_src_set_caps (GST_BASE_SRC_CAST (bsrc), caps);
@@ -1158,8 +1324,8 @@ gst_decklink_video_src_create (GstPushSrc * bsrc, GstBuffer ** buffer)
// If we have a format that supports VANC and we are asked to extract CC,
// then do it here.
- if (self->output_cc && !self->no_signal)
- extract_cc_from_vbi (self, buffer, vf, mode);
+ if ((self->output_cc || self->output_afd_bar) && !self->no_signal)
+ extract_vbi (self, buffer, vf, mode);
if (f.no_signal)
GST_BUFFER_FLAG_SET (*buffer, GST_BUFFER_FLAG_GAP);
diff --git a/sys/decklink/gstdecklinkvideosrc.h b/sys/decklink/gstdecklinkvideosrc.h
index 1ac960230..eb3b54803 100644
--- a/sys/decklink/gstdecklinkvideosrc.h
+++ b/sys/decklink/gstdecklinkvideosrc.h
@@ -99,7 +99,11 @@ struct _GstDecklinkVideoSrc
GstVideoVBIParser *vbiparser;
GstVideoFormat anc_vformat;
gboolean output_cc;
- guint last_cc_vbi_line;
+ gint last_cc_vbi_line;
+ gint last_cc_vbi_line_field2;
+ gboolean output_afd_bar;
+ gint last_afd_bar_vbi_line;
+ gint last_afd_bar_vbi_line_field2;
};
struct _GstDecklinkVideoSrcClass