diff options
author | He Junyan <junyan.he@intel.com> | 2021-01-19 15:36:29 +0800 |
---|---|---|
committer | He Junyan <junyan.he@intel.com> | 2021-02-23 13:53:43 +0800 |
commit | e15be4cf0fd686ad1bdcb31421c2c88a1703aeac (patch) | |
tree | 7fc8de8d3726f5ddaec63e10067de77b1a1a1d7f /sys | |
parent | a119a940e4127bb764008b5ec5e8d7eaa9e50b5a (diff) | |
download | gstreamer-plugins-bad-e15be4cf0fd686ad1bdcb31421c2c88a1703aeac.tar.gz |
VA: Add the vaav1dec element as the av1 decoder.
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1636>
Diffstat (limited to 'sys')
-rw-r--r-- | sys/va/gstvaav1dec.c | 1008 | ||||
-rw-r--r-- | sys/va/gstvaav1dec.h | 33 | ||||
-rw-r--r-- | sys/va/gstvabasedec.h | 3 | ||||
-rw-r--r-- | sys/va/gstvaprofile.c | 4 | ||||
-rw-r--r-- | sys/va/meson.build | 6 | ||||
-rw-r--r-- | sys/va/plugin.c | 10 |
6 files changed, 1062 insertions, 2 deletions
diff --git a/sys/va/gstvaav1dec.c b/sys/va/gstvaav1dec.c new file mode 100644 index 000000000..d0a1e0d17 --- /dev/null +++ b/sys/va/gstvaav1dec.c @@ -0,0 +1,1008 @@ +/* GStreamer + * Copyright (C) 2020 Intel Corporation + * Author: He Junyan <junyan.he@intel.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the0 + * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, + * Boston, MA 02110-1301, USA. + */ + +/** + * SECTION:element-vaav1dec + * @title: vaav1dec + * @short_description: A VA-API based AV1 video decoder + * + * vaav1dec decodes AV1 bitstreams to VA surfaces using the + * installed and chosen [VA-API](https://01.org/linuxmedia/vaapi) + * driver. + * + * The decoding surfaces can be mapped onto main memory as video + * frames. + * + * ## Example launch line + * ``` + * gst-launch-1.0 filesrc location=sample.av1 ! ivfparse ! av1parse ! vaav1dec ! autovideosink + * ``` + * + * Since: 1.20 + * + */ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include <gst/codecs/gstav1decoder.h> +#include "gstvaav1dec.h" +#include "gstvabasedec.h" +#include "gstvaallocator.h" + +GST_DEBUG_CATEGORY_STATIC (gst_va_av1dec_debug); +#ifndef GST_DISABLE_GST_DEBUG +#define GST_CAT_DEFAULT gst_va_av1dec_debug +#else +#define GST_CAT_DEFAULT NULL +#endif + +#define GST_VA_AV1_DEC(obj) ((GstVaAV1Dec *) obj) +#define GST_VA_AV1_DEC_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS ((obj), G_TYPE_FROM_INSTANCE (obj), GstVaAV1DecClass)) +#define GST_VA_AV1_DEC_CLASS(klass) ((GstVaAV1DecClass *) klass) + +typedef struct _GstVaAV1Dec GstVaAV1Dec; +typedef struct _GstVaAV1DecClass GstVaAV1DecClass; + +struct _GstVaAV1DecClass +{ + GstVaBaseDecClass parent_class; +}; + +struct _GstVaAV1Dec +{ + GstVaBaseDec parent; + + GstFlowReturn last_ret; + + GstAV1SequenceHeaderOBU seq; + gint max_width; + gint max_height; + + gboolean need_negotiation; +}; + +#define parent_class gst_va_base_dec_parent_class +extern gpointer gst_va_base_dec_parent_class; + +/* *INDENT-OFF* */ +static const gchar *src_caps_str = GST_VIDEO_CAPS_MAKE_WITH_FEATURES ("memory:VAMemory", + "{ NV12, P010_10LE }") " ;" GST_VIDEO_CAPS_MAKE ("{ NV12, P010_10LE }"); +/* *INDENT-ON* */ + +static const gchar *sink_caps_str = "video/x-av1"; + +static gboolean +gst_va_av1_dec_negotiate (GstVideoDecoder * decoder) +{ + GstVaAV1Dec *self = GST_VA_AV1_DEC (decoder); + GstVaBaseDec *base = GST_VA_BASE_DEC (decoder); + GstAV1Decoder *av1dec = GST_AV1_DECODER (decoder); + GstVideoFormat format = GST_VIDEO_FORMAT_UNKNOWN; + GstCapsFeatures *capsfeatures = NULL; + + /* Ignore downstream renegotiation request. */ + if (!self->need_negotiation) + return TRUE; + + self->need_negotiation = FALSE; + + /* Do not re-create the context if only the frame size changes */ + if (gst_va_decoder_format_changed (base->decoder, base->profile, + base->rt_format, self->max_width, self->max_height)) { + if (gst_va_decoder_is_open (base->decoder) + && !gst_va_decoder_close (base->decoder)) + return FALSE; + + if (!gst_va_decoder_open (base->decoder, base->profile, base->rt_format)) + return FALSE; + + if (!gst_va_decoder_set_format (base->decoder, self->max_width, + self->max_height, NULL)) + return FALSE; + } + + if (base->output_state) + gst_video_codec_state_unref (base->output_state); + + gst_va_base_dec_get_preferred_format_and_caps_features (base, &format, + &capsfeatures); + + base->output_state = gst_video_decoder_set_output_state (decoder, format, + base->width, base->height, av1dec->input_state); + + base->output_state->caps = gst_video_info_to_caps (&base->output_state->info); + if (capsfeatures) + gst_caps_set_features_simple (base->output_state->caps, capsfeatures); + + GST_INFO_OBJECT (self, "Negotiated caps %" GST_PTR_FORMAT, + base->output_state->caps); + + return GST_VIDEO_DECODER_CLASS (parent_class)->negotiate (decoder); +} + +static GstCaps * +_complete_sink_caps (GstCaps * sinkcaps) +{ + GstCaps *caps = gst_caps_copy (sinkcaps); + GValue val = G_VALUE_INIT; + + g_value_init (&val, G_TYPE_STRING); + g_value_set_string (&val, "frame"); + gst_caps_set_value (caps, "alignment", &val); + g_value_unset (&val); + + return caps; +} + +static VAProfile +_get_profile (GstVaAV1Dec * self, const GstAV1SequenceHeaderOBU * seq_hdr) +{ + GstVaBaseDec *base = GST_VA_BASE_DEC (self); + VAProfile profile = VAProfileNone; + + switch (seq_hdr->seq_profile) { + case GST_AV1_PROFILE_0: + profile = VAProfileAV1Profile0; + break; + case GST_AV1_PROFILE_1: + profile = VAProfileAV1Profile1; + break; + default: + GST_ERROR_OBJECT (self, "Unsupported av1 profile value %d", + seq_hdr->seq_profile); + return VAProfileNone; + } + + if (!gst_va_decoder_has_profile (base->decoder, profile)) { + GST_ERROR_OBJECT (self, "Profile %s is not supported by HW", + gst_va_profile_name (profile)); + return VAProfileNone; + } + + return profile; +} + +static guint +_get_rtformat (GstVaAV1Dec * self, VAProfile profile, + const GstAV1SequenceHeaderOBU * seq_header) +{ + /* 6.4.1: + seq_profile Bit depth Monochrome support Chroma subsampling + 0 8 or 10 Yes YUV 4:2:0 + 1 8 or 10 No YUV 4:4:4 + 2 8 or 10 Yes YUV 4:2:2 + 2 12 Yes YUV 4:2:0,YUV 4:2:2,YUV 4:4:4 + */ + + /* TODO: consider Monochrome case. Just return 4:2:0 for Monochrome now. */ + switch (profile) { + case VAProfileAV1Profile0: + if (seq_header->bit_depth == 8) { + return VA_RT_FORMAT_YUV420; + } else if (seq_header->bit_depth == 10) { + return VA_RT_FORMAT_YUV420_10; + } + break; + case VAProfileAV1Profile1: + if (seq_header->bit_depth == 8) { + return VA_RT_FORMAT_YUV444; + } else if (seq_header->bit_depth == 10) { + return VA_RT_FORMAT_YUV444_10; + } + break; + default: + break; + } + + GST_ERROR_OBJECT (self, "Fail to find rtformat for profile:%s, bit_depth:%d", + gst_va_profile_name (profile), seq_header->bit_depth); + return 0; +} + +static GstCaps * +gst_va_av1_dec_getcaps (GstVideoDecoder * decoder, GstCaps * filter) +{ + GstCaps *sinkcaps, *caps = NULL, *tmp; + GstVaBaseDec *base = GST_VA_BASE_DEC (decoder); + + if (base->decoder) + caps = gst_va_decoder_get_sinkpad_caps (base->decoder); + + if (caps) { + sinkcaps = _complete_sink_caps (caps); + gst_caps_unref (caps); + if (filter) { + tmp = gst_caps_intersect_full (filter, sinkcaps, + GST_CAPS_INTERSECT_FIRST); + gst_caps_unref (sinkcaps); + caps = tmp; + } else { + caps = sinkcaps; + } + GST_LOG_OBJECT (base, "Returning caps %" GST_PTR_FORMAT, caps); + } else if (!caps) { + caps = gst_video_decoder_proxy_getcaps (decoder, NULL, filter); + } + + return caps; +} + +static gboolean +gst_va_av1_dec_new_sequence (GstAV1Decoder * decoder, + const GstAV1SequenceHeaderOBU * seq_hdr) +{ + GstVaAV1Dec *self = GST_VA_AV1_DEC (decoder); + GstVaBaseDec *base = GST_VA_BASE_DEC (decoder); + VAProfile profile; + guint rt_format; + + GST_LOG_OBJECT (self, "new sequence"); + + profile = _get_profile (self, seq_hdr); + if (profile == VAProfileNone) + return FALSE; + + rt_format = _get_rtformat (self, profile, seq_hdr); + if (!rt_format) + return FALSE; + + self->seq = *seq_hdr; + + if (gst_va_decoder_format_changed (base->decoder, profile, + rt_format, seq_hdr->max_frame_width_minus_1 + 1, + seq_hdr->max_frame_height_minus_1 + 1)) { + base->profile = profile; + base->rt_format = rt_format; + self->max_width = seq_hdr->max_frame_width_minus_1 + 1; + self->max_height = seq_hdr->max_frame_height_minus_1 + 1; + self->need_negotiation = TRUE; + + base->min_buffers = 7 + 4; /* dpb size + scratch surfaces */ + + /* May be changed by frame header */ + base->width = self->max_width; + base->height = self->max_height; + base->need_valign = FALSE; + } + + return TRUE; +} + +static gboolean +gst_va_av1_dec_new_picture (GstAV1Decoder * decoder, + GstVideoCodecFrame * frame, GstAV1Picture * picture) +{ + GstVaAV1Dec *self = GST_VA_AV1_DEC (decoder); + GstVaBaseDec *base = GST_VA_BASE_DEC (decoder); + GstVaDecodePicture *pic; + GstVideoDecoder *vdec = GST_VIDEO_DECODER (decoder); + GstAV1FrameHeaderOBU *frame_hdr = &picture->frame_hdr; + + if (frame_hdr->upscaled_width != base->width + || frame_hdr->frame_height != base->height) { + base->width = frame_hdr->upscaled_width; + base->height = frame_hdr->frame_height; + + if (base->width < self->max_width || base->height < self->max_height) { + base->need_valign = TRUE; + /* *INDENT-OFF* */ + base->valign = (GstVideoAlignment){ + .padding_bottom = self->max_height - base->height, + .padding_left = self->max_width - base->width, + }; + /* *INDENT-ON* */ + } + + self->need_negotiation = TRUE; + } + + if (self->need_negotiation) { + if (!gst_video_decoder_negotiate (GST_VIDEO_DECODER (self))) { + GST_ERROR_OBJECT (self, "Failed to negotiate with downstream"); + return FALSE; + } + } + + self->last_ret = gst_video_decoder_allocate_output_frame (vdec, frame); + if (self->last_ret != GST_FLOW_OK) { + GST_WARNING_OBJECT (self, + "Failed to allocated output buffer, return %s", + gst_flow_get_name (self->last_ret)); + return FALSE; + } + + if (picture->apply_grain) { + if (!gst_va_buffer_create_aux_surface (frame->output_buffer)) { + GST_WARNING_OBJECT (self, + "Failed to allocated aux surface for buffer %p", + frame->output_buffer); + return FALSE; + } + } + + pic = gst_va_decode_picture_new (base->decoder, frame->output_buffer); + + gst_av1_picture_set_user_data (picture, pic, + (GDestroyNotify) gst_va_decode_picture_free); + + if (picture->apply_grain) { + GST_LOG_OBJECT (self, "New va decode picture %p - %#x(aux: %#x)", pic, + gst_va_decode_picture_get_surface (pic), + gst_va_decode_picture_get_aux_surface (pic)); + } else { + GST_LOG_OBJECT (self, "New va decode picture %p - %#x", pic, + gst_va_decode_picture_get_surface (pic)); + } + + return TRUE; +} + +static GstAV1Picture * +gst_va_av1_dec_duplicate_picture (GstAV1Decoder * decoder, + GstAV1Picture * picture) +{ + GstVaAV1Dec *self = GST_VA_AV1_DEC (decoder); + GstVaBaseDec *base = GST_VA_BASE_DEC (decoder); + GstVaDecodePicture *pic; + GstVaDecodePicture *new_pic; + GstAV1Picture *new_picture; + + pic = gst_av1_picture_get_user_data (picture); + if (!pic) { + GST_ERROR_OBJECT (self, "Parent picture does not have a va picture"); + return NULL; + } + + new_picture = gst_av1_picture_new (); + g_assert (pic->gstbuffer); + new_pic = gst_va_decode_picture_new (base->decoder, pic->gstbuffer); + + GST_LOG_OBJECT (self, "Duplicate output with buffer %" GST_PTR_FORMAT + " (surface %#x)", pic, gst_va_decode_picture_get_surface (pic)); + + gst_av1_picture_set_user_data (new_picture, new_pic, + (GDestroyNotify) gst_va_decode_picture_free); + + return new_picture; +} + +static void +_setup_segment_info (VADecPictureParameterBufferAV1 * pic_param, + GstAV1FrameHeaderOBU * frame_header) +{ + guint i, j; + uint8_t feature_mask; + + for (i = 0; i < GST_AV1_MAX_SEGMENTS; i++) + for (j = 0; j < GST_AV1_SEG_LVL_MAX; j++) + pic_param->seg_info.feature_data[i][j] = + frame_header->segmentation_params.feature_data[i][j]; + + for (i = 0; i < GST_AV1_MAX_SEGMENTS; i++) { + feature_mask = 0; + for (j = 0; j < GST_AV1_SEG_LVL_MAX; j++) { + if (frame_header->segmentation_params.feature_enabled[i][j]) + feature_mask |= 1 << j; + } + pic_param->seg_info.feature_mask[i] = feature_mask; + } +} + +static void +_setup_film_grain_info (VADecPictureParameterBufferAV1 * pic_param, + GstAV1FrameHeaderOBU * frame_header) +{ + guint i; + + if (!frame_header->film_grain_params.apply_grain) + return; + + pic_param->film_grain_info.num_y_points = + frame_header->film_grain_params.num_y_points; + for (i = 0; i < frame_header->film_grain_params.num_y_points; i++) { + pic_param->film_grain_info.point_y_value[i] = + frame_header->film_grain_params.point_y_value[i]; + pic_param->film_grain_info.point_y_scaling[i] = + frame_header->film_grain_params.point_y_scaling[i]; + } + + pic_param->film_grain_info.num_cb_points = + frame_header->film_grain_params.num_cb_points; + for (i = 0; i < frame_header->film_grain_params.num_cb_points; i++) { + pic_param->film_grain_info.point_cb_value[i] = + frame_header->film_grain_params.point_cb_value[i]; + pic_param->film_grain_info.point_cb_scaling[i] = + frame_header->film_grain_params.point_cb_scaling[i]; + } + + pic_param->film_grain_info.num_cr_points = + frame_header->film_grain_params.num_cr_points; + for (i = 0; i < frame_header->film_grain_params.num_cr_points; i++) { + pic_param->film_grain_info.point_cr_value[i] = + frame_header->film_grain_params.point_cr_value[i]; + pic_param->film_grain_info.point_cr_scaling[i] = + frame_header->film_grain_params.point_cr_scaling[i]; + } + + + if (pic_param->film_grain_info.num_y_points) { + for (i = 0; i < 24; i++) { + pic_param->film_grain_info.ar_coeffs_y[i] = + frame_header->film_grain_params.ar_coeffs_y_plus_128[i] - 128; + } + } + if (frame_header->film_grain_params.chroma_scaling_from_luma + || pic_param->film_grain_info.num_cb_points) { + for (i = 0; i < GST_AV1_MAX_NUM_POS_LUMA; i++) { + pic_param->film_grain_info.ar_coeffs_cb[i] = + frame_header->film_grain_params.ar_coeffs_cb_plus_128[i] - 128; + } + } + if (frame_header->film_grain_params.chroma_scaling_from_luma + || pic_param->film_grain_info.num_cr_points) { + for (i = 0; i < GST_AV1_MAX_NUM_POS_LUMA; i++) { + pic_param->film_grain_info.ar_coeffs_cr[i] = + frame_header->film_grain_params.ar_coeffs_cr_plus_128[i] - 128; + } + } +} + +static void +_setup_loop_filter_info (VADecPictureParameterBufferAV1 * pic_param, + GstAV1FrameHeaderOBU * frame_header) +{ + guint i; + + pic_param->filter_level[0] = + frame_header->loop_filter_params.loop_filter_level[0]; + pic_param->filter_level[1] = + frame_header->loop_filter_params.loop_filter_level[1]; + pic_param->filter_level_u = + frame_header->loop_filter_params.loop_filter_level[2]; + pic_param->filter_level_v = + frame_header->loop_filter_params.loop_filter_level[3]; + + for (i = 0; i < GST_AV1_TOTAL_REFS_PER_FRAME; i++) + pic_param->ref_deltas[i] = + frame_header->loop_filter_params.loop_filter_ref_deltas[i]; + for (i = 0; i < 2; i++) + pic_param->mode_deltas[i] = + frame_header->loop_filter_params.loop_filter_mode_deltas[i]; +} + +static void +_setup_quantization_info (VADecPictureParameterBufferAV1 * pic_param, + GstAV1FrameHeaderOBU * frame_header) +{ + pic_param->qmatrix_fields.bits.using_qmatrix = + frame_header->quantization_params.using_qmatrix; + if (frame_header->quantization_params.using_qmatrix) { + pic_param->qmatrix_fields.bits.qm_y = + frame_header->quantization_params.qm_y; + pic_param->qmatrix_fields.bits.qm_u = + frame_header->quantization_params.qm_u; + pic_param->qmatrix_fields.bits.qm_v = + frame_header->quantization_params.qm_v; + } else { + pic_param->qmatrix_fields.bits.qm_y = 0; + pic_param->qmatrix_fields.bits.qm_u = 0; + pic_param->qmatrix_fields.bits.qm_v = 0; + } +} + +static void +_setup_cdef_info (VADecPictureParameterBufferAV1 * pic_param, + GstAV1FrameHeaderOBU * frame_header, guint8 num_planes) +{ + guint8 sec_strength; + guint i; + + pic_param->cdef_damping_minus_3 = frame_header->cdef_params.cdef_damping - 3; + pic_param->cdef_bits = frame_header->cdef_params.cdef_bits; + for (i = 0; i < GST_AV1_CDEF_MAX; i++) { + sec_strength = frame_header->cdef_params.cdef_y_sec_strength[i]; + g_assert (sec_strength <= 4); + /* may need to minus 1 in order to merge with primary value. */ + if (sec_strength == 4) + sec_strength--; + + pic_param->cdef_y_strengths[i] = + ((frame_header->cdef_params.cdef_y_pri_strength[i] & 0xf) << 2) | + (sec_strength & 0x03); + } + if (num_planes > 1) { + for (i = 0; i < GST_AV1_CDEF_MAX; i++) { + sec_strength = frame_header->cdef_params.cdef_uv_sec_strength[i]; + g_assert (sec_strength <= 4); + /* may need to minus 1 in order to merge with primary value. */ + if (sec_strength == 4) + sec_strength--; + + pic_param->cdef_uv_strengths[i] = + ((frame_header->cdef_params.cdef_uv_pri_strength[i] & 0xf) << 2) | + (sec_strength & 0x03); + } + } else { + for (i = 0; i < GST_AV1_CDEF_MAX; i++) { + pic_param->cdef_uv_strengths[i] = 0; + } + } +} + +static void +_setup_global_motion_info (VADecPictureParameterBufferAV1 * pic_param, + GstAV1FrameHeaderOBU * frame_header) +{ + guint i, j; + + for (i = 0; i < 7; i++) { + pic_param->wm[i].wmtype = + frame_header->global_motion_params.gm_type[GST_AV1_REF_LAST_FRAME + i]; + + for (j = 0; j < 6; j++) + pic_param->wm[i].wmmat[j] = + frame_header->global_motion_params.gm_params + [GST_AV1_REF_LAST_FRAME + i][j]; + + pic_param->wm[i].wmmat[6] = 0; + pic_param->wm[i].wmmat[7] = 0; + + pic_param->wm[i].invalid = + frame_header->global_motion_params.invalid[GST_AV1_REF_LAST_FRAME + i]; + } +} + +static gboolean +gst_va_av1_dec_start_picture (GstAV1Decoder * decoder, GstAV1Picture * picture, + GstAV1Dpb * dpb) +{ + GstVaAV1Dec *self = GST_VA_AV1_DEC (decoder); + GstVaBaseDec *base = GST_VA_BASE_DEC (decoder); + GstAV1FrameHeaderOBU *frame_header = &picture->frame_hdr; + GstAV1SequenceHeaderOBU *seq_header = &self->seq; + VADecPictureParameterBufferAV1 pic_param = { }; + GstVaDecodePicture *va_pic; + guint i; + + va_pic = gst_av1_picture_get_user_data (picture); + g_assert (va_pic); + + /* *INDENT-OFF* */ + pic_param = (VADecPictureParameterBufferAV1){ + .profile = seq_header->seq_profile, + .order_hint_bits_minus_1 = seq_header->order_hint_bits_minus_1, + .matrix_coefficients = seq_header->color_config.matrix_coefficients, + .seq_info_fields.fields = { + .still_picture = seq_header->still_picture, + .use_128x128_superblock = seq_header->use_128x128_superblock, + .enable_filter_intra = seq_header->enable_filter_intra, + .enable_intra_edge_filter = seq_header->enable_intra_edge_filter, + .enable_interintra_compound = seq_header->enable_interintra_compound, + .enable_masked_compound = seq_header->enable_masked_compound, + .enable_dual_filter = seq_header->enable_dual_filter, + .enable_order_hint = seq_header->enable_order_hint, + .enable_jnt_comp = seq_header->enable_jnt_comp, + .enable_cdef = seq_header->enable_cdef, + .mono_chrome = seq_header->color_config.mono_chrome, + .color_range = seq_header->color_config.color_range, + .subsampling_x = seq_header->color_config.subsampling_x, + .subsampling_y = seq_header->color_config.subsampling_y, + .chroma_sample_position = 0, + .film_grain_params_present = seq_header->film_grain_params_present, + }, + .anchor_frames_num = 0, + .anchor_frames_list = NULL, + .frame_width_minus1 = frame_header->upscaled_width - 1, + .frame_height_minus1 = frame_header->frame_height - 1, + .output_frame_width_in_tiles_minus_1 = 0, + .output_frame_height_in_tiles_minus_1 = 0, + .order_hint = frame_header->order_hint, + /* Segmentation */ + .seg_info.segment_info_fields.bits = { + .enabled = frame_header->segmentation_params.segmentation_enabled, + .update_map = frame_header->segmentation_params.segmentation_update_map, + .temporal_update = + frame_header->segmentation_params.segmentation_temporal_update, + .update_data = + frame_header->segmentation_params.segmentation_update_data, + }, + /* FilmGrain */ + .film_grain_info = { + .film_grain_info_fields.bits = { + .apply_grain = frame_header->film_grain_params.apply_grain, + .chroma_scaling_from_luma = + frame_header->film_grain_params.chroma_scaling_from_luma, + .grain_scaling_minus_8 = + frame_header->film_grain_params.grain_scaling_minus_8, + .ar_coeff_lag = frame_header->film_grain_params.ar_coeff_lag, + .ar_coeff_shift_minus_6 = + frame_header->film_grain_params.ar_coeff_shift_minus_6, + .grain_scale_shift = frame_header->film_grain_params.grain_scale_shift, + .overlap_flag = frame_header->film_grain_params.overlap_flag, + .clip_to_restricted_range = + frame_header->film_grain_params.clip_to_restricted_range, + }, + .grain_seed = frame_header->film_grain_params.grain_seed, + .cb_mult = frame_header->film_grain_params.cb_mult, + .cb_luma_mult = frame_header->film_grain_params.cb_luma_mult, + .cb_offset = frame_header->film_grain_params.cb_offset, + .cr_mult = frame_header->film_grain_params.cr_mult, + .cr_luma_mult = frame_header->film_grain_params.cr_luma_mult, + .cr_offset = frame_header->film_grain_params.cr_offset, + }, + .tile_cols = frame_header->tile_info.tile_cols, + .tile_rows = frame_header->tile_info.tile_rows, + .context_update_tile_id = frame_header->tile_info.context_update_tile_id, + .pic_info_fields.bits = { + .frame_type = frame_header->frame_type, + .show_frame = frame_header->show_frame, + .showable_frame = frame_header->showable_frame, + .error_resilient_mode = frame_header->error_resilient_mode, + .disable_cdf_update = frame_header->disable_cdf_update, + .allow_screen_content_tools = frame_header->allow_screen_content_tools, + .force_integer_mv = frame_header->force_integer_mv, + .allow_intrabc = frame_header->allow_intrabc, + .use_superres = frame_header->use_superres, + .allow_high_precision_mv = frame_header->allow_high_precision_mv, + .is_motion_mode_switchable = frame_header->is_motion_mode_switchable, + .use_ref_frame_mvs = frame_header->use_ref_frame_mvs, + .disable_frame_end_update_cdf = + frame_header->disable_frame_end_update_cdf, + .uniform_tile_spacing_flag = + frame_header->tile_info.uniform_tile_spacing_flag, + .allow_warped_motion = frame_header->allow_warped_motion, + }, + .superres_scale_denominator = frame_header->superres_denom, + .interp_filter = frame_header->interpolation_filter, + /* loop filter */ + .loop_filter_info_fields.bits = { + .sharpness_level = + frame_header->loop_filter_params.loop_filter_sharpness, + .mode_ref_delta_enabled = + frame_header->loop_filter_params.loop_filter_delta_enabled, + .mode_ref_delta_update = + frame_header->loop_filter_params.loop_filter_delta_update, + }, + .mode_control_fields.bits = { + .delta_lf_present_flag = + frame_header->loop_filter_params.delta_lf_present, + .log2_delta_lf_res = frame_header->loop_filter_params.delta_lf_res, + .delta_lf_multi = frame_header->loop_filter_params.delta_lf_multi, + }, + /* quantization */ + .base_qindex = frame_header->quantization_params.base_q_idx, + .y_dc_delta_q = frame_header->quantization_params.delta_q_y_dc, + .u_dc_delta_q = frame_header->quantization_params.delta_q_u_dc, + .u_ac_delta_q = frame_header->quantization_params.delta_q_u_ac, + .v_dc_delta_q = frame_header->quantization_params.delta_q_v_dc, + .v_ac_delta_q = frame_header->quantization_params.delta_q_v_ac, + .mode_control_fields.bits = { + .delta_q_present_flag = + frame_header->quantization_params.delta_q_present, + .log2_delta_q_res = frame_header->quantization_params.delta_q_res, + .tx_mode = frame_header->tx_mode, + .reference_select = frame_header->reference_select, + .reduced_tx_set_used = frame_header->reduced_tx_set, + .skip_mode_present = frame_header->skip_mode_present, + }, + /* loop restoration */ + .loop_restoration_fields.bits = { + .yframe_restoration_type = + frame_header->loop_restoration_params.frame_restoration_type[0], + .cbframe_restoration_type = + frame_header->loop_restoration_params.frame_restoration_type[1], + .crframe_restoration_type = + frame_header->loop_restoration_params.frame_restoration_type[2], + .lr_unit_shift = frame_header->loop_restoration_params.lr_unit_shift, + .lr_uv_shift = frame_header->loop_restoration_params.lr_uv_shift, + }, + }; + /* *INDENT-ON* */ + + if (seq_header->bit_depth == 8) { + pic_param.bit_depth_idx = 0; + } else if (seq_header->bit_depth == 10) { + pic_param.bit_depth_idx = 1; + } else if (seq_header->bit_depth == 12) { + pic_param.bit_depth_idx = 2; + } else { + g_assert (0); + } + + if (frame_header->film_grain_params.apply_grain) { + pic_param.current_frame = gst_va_decode_picture_get_aux_surface (va_pic); + pic_param.current_display_picture = + gst_va_decode_picture_get_surface (va_pic); + } else { + pic_param.current_frame = gst_va_decode_picture_get_surface (va_pic); + pic_param.current_display_picture = VA_INVALID_SURFACE; + } + + for (i = 0; i < GST_AV1_NUM_REF_FRAMES; i++) { + if (dpb->pic_list[i]) { + if (dpb->pic_list[i]->apply_grain) { + pic_param.ref_frame_map[i] = gst_va_decode_picture_get_aux_surface + (gst_av1_picture_get_user_data (dpb->pic_list[i])); + } else { + pic_param.ref_frame_map[i] = gst_va_decode_picture_get_surface + (gst_av1_picture_get_user_data (dpb->pic_list[i])); + } + } else { + pic_param.ref_frame_map[i] = VA_INVALID_SURFACE; + } + } + for (i = 0; i < GST_AV1_REFS_PER_FRAME; i++) { + pic_param.ref_frame_idx[i] = frame_header->ref_frame_idx[i]; + } + pic_param.primary_ref_frame = frame_header->primary_ref_frame; + + _setup_segment_info (&pic_param, frame_header); + _setup_film_grain_info (&pic_param, frame_header); + + for (i = 0; i < 63; i++) { + pic_param.width_in_sbs_minus_1[i] = + frame_header->tile_info.width_in_sbs_minus_1[i]; + pic_param.height_in_sbs_minus_1[i] = + frame_header->tile_info.height_in_sbs_minus_1[i]; + } + + _setup_loop_filter_info (&pic_param, frame_header); + _setup_quantization_info (&pic_param, frame_header); + _setup_cdef_info (&pic_param, frame_header, seq_header->num_planes); + _setup_global_motion_info (&pic_param, frame_header); + + if (!gst_va_decoder_add_param_buffer (base->decoder, va_pic, + VAPictureParameterBufferType, &pic_param, sizeof (pic_param))) + return FALSE; + + return TRUE; +} + +static gboolean +gst_va_av1_dec_decode_tile (GstAV1Decoder * decoder, GstAV1Picture * picture, + GstAV1Tile * tile) +{ + GstVaAV1Dec *self = GST_VA_AV1_DEC (decoder); + GstVaBaseDec *base = GST_VA_BASE_DEC (decoder); + GstAV1TileGroupOBU *tile_group = &tile->tile_group; + GstVaDecodePicture *va_pic; + guint i; + VASliceParameterBufferAV1 slice_param[GST_AV1_MAX_TILE_COUNT]; + + GST_TRACE_OBJECT (self, "-"); + + for (i = 0; i < tile_group->tg_end - tile_group->tg_start + 1; i++) { + slice_param[i] = (VASliceParameterBufferAV1) { + }; + slice_param[i].slice_data_size = + tile_group->entry[tile_group->tg_start + i].tile_size; + slice_param[i].slice_data_offset = + tile_group->entry[tile_group->tg_start + i].tile_offset; + slice_param[i].tile_row = + tile_group->entry[tile_group->tg_start + i].tile_row; + slice_param[i].tile_column = + tile_group->entry[tile_group->tg_start + i].tile_col; + slice_param[i].slice_data_flag = 0; + } + + va_pic = gst_av1_picture_get_user_data (picture); + return gst_va_decoder_add_slice_buffer_with_n_params (base->decoder, va_pic, + slice_param, sizeof (VASliceParameterBufferAV1), i, tile->obu.data, + tile->obu.obu_size); +} + +static gboolean +gst_va_av1_dec_end_picture (GstAV1Decoder * decoder, GstAV1Picture * picture) +{ + GstVaAV1Dec *self = GST_VA_AV1_DEC (decoder); + GstVaBaseDec *base = GST_VA_BASE_DEC (decoder); + GstVaDecodePicture *va_pic; + + GST_LOG_OBJECT (self, "end picture %p, (system_frame_number %d)", + picture, picture->system_frame_number); + + va_pic = gst_av1_picture_get_user_data (picture); + + return gst_va_decoder_decode_with_aux_surface (base->decoder, va_pic, + picture->apply_grain); +} + +static GstFlowReturn +gst_va_av1_dec_output_picture (GstAV1Decoder * decoder, + GstVideoCodecFrame * frame, GstAV1Picture * picture) +{ + GstVaAV1Dec *self = GST_VA_AV1_DEC (decoder); + GstVaBaseDec *base = GST_VA_BASE_DEC (decoder); + + g_assert (picture->frame_hdr.show_frame || + picture->frame_hdr.show_existing_frame); + + GST_LOG_OBJECT (self, + "Outputting picture %p (system_frame_number %d)", + picture, picture->system_frame_number); + + if (self->last_ret != GST_FLOW_OK) { + gst_av1_picture_unref (picture); + gst_video_decoder_drop_frame (GST_VIDEO_DECODER (self), frame); + return self->last_ret; + } + + if (picture->frame_hdr.show_existing_frame) { + GstVaDecodePicture *pic; + + g_assert (!frame->output_buffer); + pic = gst_av1_picture_get_user_data (picture); + frame->output_buffer = gst_buffer_ref (pic->gstbuffer); + } + + if (base->copy_frames) + gst_va_base_dec_copy_output_buffer (base, frame); + + gst_av1_picture_unref (picture); + + return gst_video_decoder_finish_frame (GST_VIDEO_DECODER (self), frame); +} + +static void +gst_va_av1_dec_init (GTypeInstance * instance, gpointer g_class) +{ + gst_va_base_dec_init (GST_VA_BASE_DEC (instance), GST_CAT_DEFAULT); +} + +static void +gst_va_av1_dec_dispose (GObject * object) +{ + gst_va_base_dec_close (GST_VIDEO_DECODER (object)); + G_OBJECT_CLASS (parent_class)->dispose (object); +} + +static void +gst_va_av1_dec_class_init (gpointer g_class, gpointer class_data) +{ + GstCaps *src_doc_caps, *sink_doc_caps; + GObjectClass *gobject_class = G_OBJECT_CLASS (g_class); + GstElementClass *element_class = GST_ELEMENT_CLASS (g_class); + GstAV1DecoderClass *av1decoder_class = GST_AV1_DECODER_CLASS (g_class); + GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_CLASS (g_class); + struct CData *cdata = class_data; + gchar *long_name; + + if (cdata->description) { + long_name = g_strdup_printf ("VA-API AV1 Decoder in %s", + cdata->description); + } else { + long_name = g_strdup ("VA-API AV1 Decoder"); + } + + gst_element_class_set_metadata (element_class, long_name, + "Codec/Decoder/Video/Hardware", + "VA-API based AV1 video decoder", "He Junyan <junyan.he@intel.com>"); + + sink_doc_caps = gst_caps_from_string (sink_caps_str); + src_doc_caps = gst_caps_from_string (src_caps_str); + + gst_va_base_dec_class_init (GST_VA_BASE_DEC_CLASS (g_class), AV1, + cdata->render_device_path, cdata->sink_caps, cdata->src_caps, + src_doc_caps, sink_doc_caps); + + gobject_class->dispose = gst_va_av1_dec_dispose; + + decoder_class->getcaps = GST_DEBUG_FUNCPTR (gst_va_av1_dec_getcaps); + decoder_class->negotiate = GST_DEBUG_FUNCPTR (gst_va_av1_dec_negotiate); + + av1decoder_class->new_sequence = + GST_DEBUG_FUNCPTR (gst_va_av1_dec_new_sequence); + av1decoder_class->new_picture = + GST_DEBUG_FUNCPTR (gst_va_av1_dec_new_picture); + av1decoder_class->duplicate_picture = + GST_DEBUG_FUNCPTR (gst_va_av1_dec_duplicate_picture); + av1decoder_class->start_picture = + GST_DEBUG_FUNCPTR (gst_va_av1_dec_start_picture); + av1decoder_class->decode_tile = + GST_DEBUG_FUNCPTR (gst_va_av1_dec_decode_tile); + av1decoder_class->end_picture = + GST_DEBUG_FUNCPTR (gst_va_av1_dec_end_picture); + av1decoder_class->output_picture = + GST_DEBUG_FUNCPTR (gst_va_av1_dec_output_picture); + + g_free (long_name); + g_free (cdata->description); + g_free (cdata->render_device_path); + gst_caps_unref (cdata->src_caps); + gst_caps_unref (cdata->sink_caps); + g_free (cdata); +} + +static gpointer +_register_debug_category (gpointer data) +{ + GST_DEBUG_CATEGORY_INIT (gst_va_av1dec_debug, "vaav1dec", 0, + "VA AV1 decoder"); + + return NULL; +} + +gboolean +gst_va_av1_dec_register (GstPlugin * plugin, GstVaDevice * device, + GstCaps * sink_caps, GstCaps * src_caps, guint rank) +{ + static GOnce debug_once = G_ONCE_INIT; + GType type; + GTypeInfo type_info = { + .class_size = sizeof (GstVaAV1DecClass), + .class_init = gst_va_av1_dec_class_init, + .instance_size = sizeof (GstVaAV1Dec), + .instance_init = gst_va_av1_dec_init, + }; + struct CData *cdata; + gboolean ret; + gchar *type_name, *feature_name; + + g_return_val_if_fail (GST_IS_PLUGIN (plugin), FALSE); + g_return_val_if_fail (GST_IS_VA_DEVICE (device), FALSE); + g_return_val_if_fail (GST_IS_CAPS (sink_caps), FALSE); + g_return_val_if_fail (GST_IS_CAPS (src_caps), FALSE); + + cdata = g_new (struct CData, 1); + cdata->description = NULL; + cdata->render_device_path = g_strdup (device->render_device_path); + cdata->sink_caps = _complete_sink_caps (sink_caps); + cdata->src_caps = gst_caps_ref (src_caps); + + /* class data will be leaked if the element never gets instantiated */ + GST_MINI_OBJECT_FLAG_SET (cdata->sink_caps, + GST_MINI_OBJECT_FLAG_MAY_BE_LEAKED); + GST_MINI_OBJECT_FLAG_SET (src_caps, GST_MINI_OBJECT_FLAG_MAY_BE_LEAKED); + + type_info.class_data = cdata; + + type_name = g_strdup ("GstVaAV1Dec"); + feature_name = g_strdup ("vaav1dec"); + + /* The first decoder to be registered should use a constant name, + * like vaav1dec, for any additional decoders, we create unique + * names, using inserting the render device name. */ + if (g_type_from_name (type_name)) { + gchar *basename = g_path_get_basename (device->render_device_path); + g_free (type_name); + g_free (feature_name); + type_name = g_strdup_printf ("GstVa%sAV1Dec", basename); + feature_name = g_strdup_printf ("va%sav1dec", basename); + cdata->description = basename; + + /* lower rank for non-first device */ + if (rank > 0) + rank--; + } + + g_once (&debug_once, _register_debug_category, NULL); + + type = g_type_register_static (GST_TYPE_AV1_DECODER, + type_name, &type_info, 0); + + ret = gst_element_register (plugin, feature_name, rank, type); + + g_free (type_name); + g_free (feature_name); + + return ret; +} diff --git a/sys/va/gstvaav1dec.h b/sys/va/gstvaav1dec.h new file mode 100644 index 000000000..c73a809e8 --- /dev/null +++ b/sys/va/gstvaav1dec.h @@ -0,0 +1,33 @@ +/* GStreamer + * Copyright (C) 2020 Intel Corporation + * Author: He Junyan <junyan.he@intel.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the0 + * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, + * Boston, MA 02110-1301, USA. + */ + +#pragma once + +#include "gstvadevice.h" + +G_BEGIN_DECLS + +gboolean gst_va_av1_dec_register (GstPlugin * plugin, + GstVaDevice * device, + GstCaps * sink_caps, + GstCaps * src_caps, + guint rank); + +G_END_DECLS diff --git a/sys/va/gstvabasedec.h b/sys/va/gstvabasedec.h index 513bfa479..e3d034baa 100644 --- a/sys/va/gstvabasedec.h +++ b/sys/va/gstvabasedec.h @@ -26,6 +26,7 @@ #include <gst/codecs/gstmpeg2decoder.h> #include <gst/codecs/gstvp8decoder.h> #include <gst/codecs/gstvp9decoder.h> +#include <gst/codecs/gstav1decoder.h> #include "gstvadevice.h" #include "gstvadecoder.h" @@ -50,6 +51,7 @@ struct _GstVaBaseDec GstMpeg2Decoder mpeg2; GstVp8Decoder vp8; GstVp9Decoder vp9; + GstAV1Decoder av1; } parent; GstDebugCategory *debug_category; @@ -83,6 +85,7 @@ struct _GstVaBaseDecClass GstMpeg2DecoderClass mpeg2; GstVp8DecoderClass vp8; GstVp9DecoderClass vp9; + GstAV1DecoderClass av1; } parent_class; GstVaCodecs codec; diff --git a/sys/va/gstvaprofile.c b/sys/va/gstvaprofile.c index a3f3049d2..bbe40dbfd 100644 --- a/sys/va/gstvaprofile.c +++ b/sys/va/gstvaprofile.c @@ -87,8 +87,8 @@ static const struct ProfileMap P (HEVC, SccMain444, "video/x-h265", "profile = (string) screen-extended-main-444"), #if VA_CHECK_VERSION(1,7,0) - P (AV1, Profile0, "video/x-av1", NULL), - P (AV1, Profile1, "video/x-av1", NULL), + P (AV1, Profile0, "video/x-av1", "profile = (string) 0"), + P (AV1, Profile1, "video/x-av1", "profile = (string) 1"), #endif #if VA_CHECK_VERSION(1, 8, 0) P (HEVC, SccMain444_10, "video/x-h265", diff --git a/sys/va/meson.build b/sys/va/meson.build index 375ffb949..803b7e2df 100644 --- a/sys/va/meson.build +++ b/sys/va/meson.build @@ -35,6 +35,12 @@ libgudev_dep = dependency('gudev-1.0', required: va_option) libdrm_dep = dependency('libdrm', required: false, fallback: ['libdrm', 'ext_libdrm']) +libva_av1_req = ['>= 1.8'] +libva_av1_dep = dependency('libva', version: libva_av1_req, required: va_option) +if libva_av1_dep.found() + va_sources += 'gstvaav1dec.c' +endif + have_va = libva_dep.found() and libva_drm_dep.found() if not (have_va and libgudev_dep.found()) if va_option.enabled() diff --git a/sys/va/plugin.c b/sys/va/plugin.c index 9baf4775d..d8f5cbf3d 100644 --- a/sys/va/plugin.c +++ b/sys/va/plugin.c @@ -35,6 +35,7 @@ #include "gstvaprofile.h" #include "gstvavp8dec.h" #include "gstvavp9dec.h" +#include "gstvaav1dec.h" #include "gstvavpp.h" #define GST_CAT_DEFAULT gstva_debug @@ -128,6 +129,15 @@ plugin_register_decoders (GstPlugin * plugin, GstVaDevice * device, device->render_device_path); } break; +#if VA_CHECK_VERSION(1, 8, 0) + case AV1: + if (!gst_va_av1_dec_register (plugin, device, sinkcaps, srccaps, + GST_RANK_NONE)) { + GST_WARNING ("Failed to register AV1 decoder: %s", + device->render_device_path); + } + break; +#endif default: GST_DEBUG ("No decoder implementation for %" GST_FOURCC_FORMAT, GST_FOURCC_ARGS (codec)); |