summaryrefslogtreecommitdiff
path: root/libavfilter/vf_sr.c
diff options
context:
space:
mode:
authorGuo, Yejun <yejun.guo@intel.com>2020-08-28 12:51:44 +0800
committerGuo, Yejun <yejun.guo@intel.com>2020-09-21 21:26:56 +0800
commit2003e32f62d94ba75b59d70632c9f2862b383591 (patch)
tree55ec60788bc740eb45dbafd613bd8cf50a10417a /libavfilter/vf_sr.c
parent6918e240d706f7390272976d8b8d502afe426a18 (diff)
downloadffmpeg-2003e32f62d94ba75b59d70632c9f2862b383591.tar.gz
dnn: change dnn interface to replace DNNData* with AVFrame*
Currently, every filter needs to provide code to transfer data from AVFrame* to model input (DNNData*), and also from model output (DNNData*) to AVFrame*. Actually, such transfer can be implemented within DNN module, and so filter can focus on its own business logic. DNN module also exports the function pointer pre_proc and post_proc in struct DNNModel, just in case that a filter has its special logic to transfer data between AVFrame* and DNNData*. The default implementation within DNN module is used if the filter does not set pre/post_proc.
Diffstat (limited to 'libavfilter/vf_sr.c')
-rw-r--r--libavfilter/vf_sr.c166
1 files changed, 63 insertions, 103 deletions
diff --git a/libavfilter/vf_sr.c b/libavfilter/vf_sr.c
index 445777f0c6..2eda8c3219 100644
--- a/libavfilter/vf_sr.c
+++ b/libavfilter/vf_sr.c
@@ -41,11 +41,10 @@ typedef struct SRContext {
DNNBackendType backend_type;
DNNModule *dnn_module;
DNNModel *model;
- DNNData input;
- DNNData output;
int scale_factor;
- struct SwsContext *sws_contexts[3];
- int sws_slice_h, sws_input_linesize, sws_output_linesize;
+ struct SwsContext *sws_uv_scale;
+ int sws_uv_height;
+ struct SwsContext *sws_pre_scale;
} SRContext;
#define OFFSET(x) offsetof(SRContext, x)
@@ -87,11 +86,6 @@ static av_cold int init(AVFilterContext *context)
return AVERROR(EIO);
}
- sr_context->input.dt = DNN_FLOAT;
- sr_context->sws_contexts[0] = NULL;
- sr_context->sws_contexts[1] = NULL;
- sr_context->sws_contexts[2] = NULL;
-
return 0;
}
@@ -111,95 +105,63 @@ static int query_formats(AVFilterContext *context)
return ff_set_common_formats(context, formats_list);
}
-static int config_props(AVFilterLink *inlink)
+static int config_output(AVFilterLink *outlink)
{
- AVFilterContext *context = inlink->dst;
- SRContext *sr_context = context->priv;
- AVFilterLink *outlink = context->outputs[0];
+ AVFilterContext *context = outlink->src;
+ SRContext *ctx = context->priv;
DNNReturnType result;
- int sws_src_h, sws_src_w, sws_dst_h, sws_dst_w;
+ AVFilterLink *inlink = context->inputs[0];
+ AVFrame *out = NULL;
const char *model_output_name = "y";
- sr_context->input.width = inlink->w * sr_context->scale_factor;
- sr_context->input.height = inlink->h * sr_context->scale_factor;
- sr_context->input.channels = 1;
-
- result = (sr_context->model->set_input)(sr_context->model->model, &sr_context->input, "x");
- if (result != DNN_SUCCESS){
- av_log(context, AV_LOG_ERROR, "could not set input and output for the model\n");
+ AVFrame *fake_in = ff_get_video_buffer(inlink, inlink->w, inlink->h);
+ result = (ctx->model->set_input)(ctx->model->model, fake_in, "x");
+ if (result != DNN_SUCCESS) {
+ av_log(context, AV_LOG_ERROR, "could not set input for the model\n");
return AVERROR(EIO);
}
- result = (sr_context->dnn_module->execute_model)(sr_context->model, &sr_context->output, &model_output_name, 1);
+ // have a try run in case that the dnn model resize the frame
+ out = ff_get_video_buffer(inlink, inlink->w, inlink->h);
+ result = (ctx->dnn_module->execute_model)(ctx->model, (const char **)&model_output_name, 1, out);
if (result != DNN_SUCCESS){
av_log(context, AV_LOG_ERROR, "failed to execute loaded model\n");
return AVERROR(EIO);
}
- if (sr_context->input.height != sr_context->output.height || sr_context->input.width != sr_context->output.width){
- sr_context->input.width = inlink->w;
- sr_context->input.height = inlink->h;
- result = (sr_context->model->set_input)(sr_context->model->model, &sr_context->input, "x");
- if (result != DNN_SUCCESS){
- av_log(context, AV_LOG_ERROR, "could not set input and output for the model\n");
- return AVERROR(EIO);
- }
- result = (sr_context->dnn_module->execute_model)(sr_context->model, &sr_context->output, &model_output_name, 1);
- if (result != DNN_SUCCESS){
- av_log(context, AV_LOG_ERROR, "failed to execute loaded model\n");
- return AVERROR(EIO);
- }
- sr_context->scale_factor = 0;
- }
- outlink->h = sr_context->output.height;
- outlink->w = sr_context->output.width;
- sr_context->sws_contexts[1] = sws_getContext(sr_context->input.width, sr_context->input.height, AV_PIX_FMT_GRAY8,
- sr_context->input.width, sr_context->input.height, AV_PIX_FMT_GRAYF32,
- 0, NULL, NULL, NULL);
- sr_context->sws_input_linesize = sr_context->input.width << 2;
- sr_context->sws_contexts[2] = sws_getContext(sr_context->output.width, sr_context->output.height, AV_PIX_FMT_GRAYF32,
- sr_context->output.width, sr_context->output.height, AV_PIX_FMT_GRAY8,
- 0, NULL, NULL, NULL);
- sr_context->sws_output_linesize = sr_context->output.width << 2;
- if (!sr_context->sws_contexts[1] || !sr_context->sws_contexts[2]){
- av_log(context, AV_LOG_ERROR, "could not create SwsContext for conversions\n");
- return AVERROR(ENOMEM);
- }
- if (sr_context->scale_factor){
- sr_context->sws_contexts[0] = sws_getContext(inlink->w, inlink->h, inlink->format,
- outlink->w, outlink->h, outlink->format,
- SWS_BICUBIC, NULL, NULL, NULL);
- if (!sr_context->sws_contexts[0]){
- av_log(context, AV_LOG_ERROR, "could not create SwsContext for scaling\n");
- return AVERROR(ENOMEM);
- }
- sr_context->sws_slice_h = inlink->h;
- } else {
+ if (fake_in->width != out->width || fake_in->height != out->height) {
+ //espcn
+ outlink->w = out->width;
+ outlink->h = out->height;
if (inlink->format != AV_PIX_FMT_GRAY8){
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
- sws_src_h = AV_CEIL_RSHIFT(sr_context->input.height, desc->log2_chroma_h);
- sws_src_w = AV_CEIL_RSHIFT(sr_context->input.width, desc->log2_chroma_w);
- sws_dst_h = AV_CEIL_RSHIFT(sr_context->output.height, desc->log2_chroma_h);
- sws_dst_w = AV_CEIL_RSHIFT(sr_context->output.width, desc->log2_chroma_w);
-
- sr_context->sws_contexts[0] = sws_getContext(sws_src_w, sws_src_h, AV_PIX_FMT_GRAY8,
- sws_dst_w, sws_dst_h, AV_PIX_FMT_GRAY8,
- SWS_BICUBIC, NULL, NULL, NULL);
- if (!sr_context->sws_contexts[0]){
- av_log(context, AV_LOG_ERROR, "could not create SwsContext for scaling\n");
- return AVERROR(ENOMEM);
- }
- sr_context->sws_slice_h = sws_src_h;
+ int sws_src_h = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
+ int sws_src_w = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
+ int sws_dst_h = AV_CEIL_RSHIFT(outlink->h, desc->log2_chroma_h);
+ int sws_dst_w = AV_CEIL_RSHIFT(outlink->w, desc->log2_chroma_w);
+ ctx->sws_uv_scale = sws_getContext(sws_src_w, sws_src_h, AV_PIX_FMT_GRAY8,
+ sws_dst_w, sws_dst_h, AV_PIX_FMT_GRAY8,
+ SWS_BICUBIC, NULL, NULL, NULL);
+ ctx->sws_uv_height = sws_src_h;
}
+ } else {
+ //srcnn
+ outlink->w = out->width * ctx->scale_factor;
+ outlink->h = out->height * ctx->scale_factor;
+ ctx->sws_pre_scale = sws_getContext(inlink->w, inlink->h, inlink->format,
+ outlink->w, outlink->h, outlink->format,
+ SWS_BICUBIC, NULL, NULL, NULL);
}
+ av_frame_free(&fake_in);
+ av_frame_free(&out);
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *context = inlink->dst;
- SRContext *sr_context = context->priv;
+ SRContext *ctx = context->priv;
AVFilterLink *outlink = context->outputs[0];
AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
DNNReturnType dnn_result;
@@ -211,45 +173,44 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
- out->height = sr_context->output.height;
- out->width = sr_context->output.width;
- if (sr_context->scale_factor){
- sws_scale(sr_context->sws_contexts[0], (const uint8_t **)in->data, in->linesize,
- 0, sr_context->sws_slice_h, out->data, out->linesize);
- sws_scale(sr_context->sws_contexts[1], (const uint8_t **)out->data, out->linesize,
- 0, out->height, (uint8_t * const*)(&sr_context->input.data),
- (const int [4]){sr_context->sws_input_linesize, 0, 0, 0});
+ if (ctx->sws_pre_scale) {
+ sws_scale(ctx->sws_pre_scale,
+ (const uint8_t **)in->data, in->linesize, 0, in->height,
+ out->data, out->linesize);
+ dnn_result = (ctx->model->set_input)(ctx->model->model, out, "x");
} else {
- if (sr_context->sws_contexts[0]){
- sws_scale(sr_context->sws_contexts[0], (const uint8_t **)(in->data + 1), in->linesize + 1,
- 0, sr_context->sws_slice_h, out->data + 1, out->linesize + 1);
- sws_scale(sr_context->sws_contexts[0], (const uint8_t **)(in->data + 2), in->linesize + 2,
- 0, sr_context->sws_slice_h, out->data + 2, out->linesize + 2);
- }
+ dnn_result = (ctx->model->set_input)(ctx->model->model, in, "x");
+ }
- sws_scale(sr_context->sws_contexts[1], (const uint8_t **)in->data, in->linesize,
- 0, in->height, (uint8_t * const*)(&sr_context->input.data),
- (const int [4]){sr_context->sws_input_linesize, 0, 0, 0});
+ if (dnn_result != DNN_SUCCESS) {
+ av_frame_free(&in);
+ av_frame_free(&out);
+ av_log(context, AV_LOG_ERROR, "could not set input for the model\n");
+ return AVERROR(EIO);
}
- av_frame_free(&in);
- dnn_result = (sr_context->dnn_module->execute_model)(sr_context->model, &sr_context->output, &model_output_name, 1);
+ dnn_result = (ctx->dnn_module->execute_model)(ctx->model, (const char **)&model_output_name, 1, out);
if (dnn_result != DNN_SUCCESS){
- av_log(context, AV_LOG_ERROR, "failed to execute loaded model\n");
+ av_log(ctx, AV_LOG_ERROR, "failed to execute loaded model\n");
+ av_frame_free(&in);
+ av_frame_free(&out);
return AVERROR(EIO);
}
- sws_scale(sr_context->sws_contexts[2], (const uint8_t *[4]){(const uint8_t *)sr_context->output.data, 0, 0, 0},
- (const int[4]){sr_context->sws_output_linesize, 0, 0, 0},
- 0, out->height, (uint8_t * const*)out->data, out->linesize);
+ if (ctx->sws_uv_scale) {
+ sws_scale(ctx->sws_uv_scale, (const uint8_t **)(in->data + 1), in->linesize + 1,
+ 0, ctx->sws_uv_height, out->data + 1, out->linesize + 1);
+ sws_scale(ctx->sws_uv_scale, (const uint8_t **)(in->data + 2), in->linesize + 2,
+ 0, ctx->sws_uv_height, out->data + 2, out->linesize + 2);
+ }
+ av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
static av_cold void uninit(AVFilterContext *context)
{
- int i;
SRContext *sr_context = context->priv;
if (sr_context->dnn_module){
@@ -257,16 +218,14 @@ static av_cold void uninit(AVFilterContext *context)
av_freep(&sr_context->dnn_module);
}
- for (i = 0; i < 3; ++i){
- sws_freeContext(sr_context->sws_contexts[i]);
- }
+ sws_freeContext(sr_context->sws_uv_scale);
+ sws_freeContext(sr_context->sws_pre_scale);
}
static const AVFilterPad sr_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
- .config_props = config_props,
.filter_frame = filter_frame,
},
{ NULL }
@@ -275,6 +234,7 @@ static const AVFilterPad sr_inputs[] = {
static const AVFilterPad sr_outputs[] = {
{
.name = "default",
+ .config_props = config_output,
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }