summaryrefslogtreecommitdiff
path: root/libavfilter
diff options
context:
space:
mode:
authorTing Fu <ting.fu@intel.com>2021-03-24 15:39:27 +0800
committerGuo, Yejun <yejun.guo@intel.com>2021-03-25 13:10:32 +0800
commitb08b5f07b6323cfd2dae1a945a8802e07ae24e6c (patch)
tree587f395f18f27431fd8d45736a66227d4be9665a /libavfilter
parent3499ec84cced7e6d2898ed29a94e6e191af9ca07 (diff)
downloadffmpeg-b08b5f07b6323cfd2dae1a945a8802e07ae24e6c.tar.gz
lavfi/dnn_backend_tensorflow.c: fix mem leak in load_native_model
Signed-off-by: Ting Fu <ting.fu@intel.com>
Diffstat (limited to 'libavfilter')
-rw-r--r--libavfilter/dnn/dnn_backend_tf.c55
1 files changed, 31 insertions, 24 deletions
diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c
index e016571304..c18cb4063f 100644
--- a/libavfilter/dnn/dnn_backend_tf.c
+++ b/libavfilter/dnn/dnn_backend_tf.c
@@ -330,7 +330,7 @@ static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_o
TF_OperationDescription *op_desc;
TF_Output input;
int64_t strides[] = {1, 1, 1, 1};
- TF_Tensor *tensor;
+ TF_Tensor *kernel_tensor = NULL, *biases_tensor = NULL;
int64_t dims[4];
int dims_len;
char name_buffer[NAME_BUFFER_SIZE];
@@ -347,17 +347,15 @@ static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_o
dims[2] = params->kernel_size;
dims[3] = params->input_num;
dims_len = 4;
- tensor = TF_AllocateTensor(TF_FLOAT, dims, dims_len, size * sizeof(float));
- memcpy(TF_TensorData(tensor), params->kernel, size * sizeof(float));
- TF_SetAttrTensor(op_desc, "value", tensor, tf_model->status);
+ kernel_tensor = TF_AllocateTensor(TF_FLOAT, dims, dims_len, size * sizeof(float));
+ memcpy(TF_TensorData(kernel_tensor), params->kernel, size * sizeof(float));
+ TF_SetAttrTensor(op_desc, "value", kernel_tensor, tf_model->status);
if (TF_GetCode(tf_model->status) != TF_OK){
- av_log(ctx, AV_LOG_ERROR, "Failed to set value for kernel of conv layer %d\n", layer);
- return DNN_ERROR;
+ goto err;
}
op = TF_FinishOperation(op_desc, tf_model->status);
if (TF_GetCode(tf_model->status) != TF_OK){
- av_log(ctx, AV_LOG_ERROR, "Failed to add kernel to conv layer %d\n", layer);
- return DNN_ERROR;
+ goto err;
}
snprintf(name_buffer, NAME_BUFFER_SIZE, "transpose%d", layer);
@@ -370,8 +368,7 @@ static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_o
TF_SetAttrType(op_desc, "Tperm", TF_INT32);
op = TF_FinishOperation(op_desc, tf_model->status);
if (TF_GetCode(tf_model->status) != TF_OK){
- av_log(ctx, AV_LOG_ERROR, "Failed to add transpose to conv layer %d\n", layer);
- return DNN_ERROR;
+ goto err;
}
snprintf(name_buffer, NAME_BUFFER_SIZE, "conv2d%d", layer);
@@ -385,8 +382,7 @@ static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_o
TF_SetAttrString(op_desc, "padding", "VALID", 5);
*cur_op = TF_FinishOperation(op_desc, tf_model->status);
if (TF_GetCode(tf_model->status) != TF_OK){
- av_log(ctx, AV_LOG_ERROR, "Failed to add conv2d to conv layer %d\n", layer);
- return DNN_ERROR;
+ goto err;
}
snprintf(name_buffer, NAME_BUFFER_SIZE, "conv_biases%d", layer);
@@ -394,17 +390,15 @@ static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_o
TF_SetAttrType(op_desc, "dtype", TF_FLOAT);
dims[0] = params->output_num;
dims_len = 1;
- tensor = TF_AllocateTensor(TF_FLOAT, dims, dims_len, params->output_num * sizeof(float));
- memcpy(TF_TensorData(tensor), params->biases, params->output_num * sizeof(float));
- TF_SetAttrTensor(op_desc, "value", tensor, tf_model->status);
+ biases_tensor = TF_AllocateTensor(TF_FLOAT, dims, dims_len, params->output_num * sizeof(float));
+ memcpy(TF_TensorData(biases_tensor), params->biases, params->output_num * sizeof(float));
+ TF_SetAttrTensor(op_desc, "value", biases_tensor, tf_model->status);
if (TF_GetCode(tf_model->status) != TF_OK){
- av_log(ctx, AV_LOG_ERROR, "Failed to set value for conv_biases of conv layer %d\n", layer);
- return DNN_ERROR;
+ goto err;
}
op = TF_FinishOperation(op_desc, tf_model->status);
if (TF_GetCode(tf_model->status) != TF_OK){
- av_log(ctx, AV_LOG_ERROR, "Failed to add conv_biases to conv layer %d\n", layer);
- return DNN_ERROR;
+ goto err;
}
snprintf(name_buffer, NAME_BUFFER_SIZE, "bias_add%d", layer);
@@ -416,8 +410,7 @@ static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_o
TF_SetAttrType(op_desc, "T", TF_FLOAT);
*cur_op = TF_FinishOperation(op_desc, tf_model->status);
if (TF_GetCode(tf_model->status) != TF_OK){
- av_log(ctx, AV_LOG_ERROR, "Failed to add bias_add to conv layer %d\n", layer);
- return DNN_ERROR;
+ goto err;
}
snprintf(name_buffer, NAME_BUFFER_SIZE, "activation%d", layer);
@@ -440,11 +433,15 @@ static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_o
TF_SetAttrType(op_desc, "T", TF_FLOAT);
*cur_op = TF_FinishOperation(op_desc, tf_model->status);
if (TF_GetCode(tf_model->status) != TF_OK){
- av_log(ctx, AV_LOG_ERROR, "Failed to add activation function to conv layer %d\n", layer);
- return DNN_ERROR;
+ goto err;
}
return DNN_SUCCESS;
+err:
+ TF_DeleteTensor(kernel_tensor);
+ TF_DeleteTensor(biases_tensor);
+ av_log(ctx, AV_LOG_ERROR, "Failed to add conv layer %d\n", layer);
+ return DNN_ERROR;
}
static DNNReturnType add_depth_to_space_layer(TFModel *tf_model, TF_Operation **cur_op,
@@ -499,11 +496,13 @@ static DNNReturnType add_pad_layer(TFModel *tf_model, TF_Operation **cur_op,
pads[7] = params->paddings[3][1];
TF_SetAttrTensor(op_desc, "value", tensor, tf_model->status);
if (TF_GetCode(tf_model->status) != TF_OK){
+ TF_DeleteTensor(tensor);
av_log(ctx, AV_LOG_ERROR, "Failed to set value for pad of layer %d\n", layer);
return DNN_ERROR;
}
op = TF_FinishOperation(op_desc, tf_model->status);
if (TF_GetCode(tf_model->status) != TF_OK){
+ TF_DeleteTensor(tensor);
av_log(ctx, AV_LOG_ERROR, "Failed to add pad to layer %d\n", layer);
return DNN_ERROR;
}
@@ -519,6 +518,7 @@ static DNNReturnType add_pad_layer(TFModel *tf_model, TF_Operation **cur_op,
TF_SetAttrString(op_desc, "mode", "SYMMETRIC", 9);
*cur_op = TF_FinishOperation(op_desc, tf_model->status);
if (TF_GetCode(tf_model->status) != TF_OK){
+ TF_DeleteTensor(tensor);
av_log(ctx, AV_LOG_ERROR, "Failed to add mirror_pad to layer %d\n", layer);
return DNN_ERROR;
}
@@ -546,11 +546,13 @@ static DNNReturnType add_maximum_layer(TFModel *tf_model, TF_Operation **cur_op,
*y = params->val.y;
TF_SetAttrTensor(op_desc, "value", tensor, tf_model->status);
if (TF_GetCode(tf_model->status) != TF_OK){
+ TF_DeleteTensor(tensor);
av_log(ctx, AV_LOG_ERROR, "Failed to set value for maximum/y of layer %d", layer);
return DNN_ERROR;
}
op = TF_FinishOperation(op_desc, tf_model->status);
if (TF_GetCode(tf_model->status) != TF_OK){
+ TF_DeleteTensor(tensor);
av_log(ctx, AV_LOG_ERROR, "Failed to add maximum/y to layer %d\n", layer);
return DNN_ERROR;
}
@@ -565,6 +567,7 @@ static DNNReturnType add_maximum_layer(TFModel *tf_model, TF_Operation **cur_op,
TF_SetAttrType(op_desc, "T", TF_FLOAT);
*cur_op = TF_FinishOperation(op_desc, tf_model->status);
if (TF_GetCode(tf_model->status) != TF_OK){
+ TF_DeleteTensor(tensor);
av_log(ctx, AV_LOG_ERROR, "Failed to add maximum to layer %d\n", layer);
return DNN_ERROR;
}
@@ -579,7 +582,7 @@ static DNNReturnType load_native_model(TFModel *tf_model, const char *model_file
TF_OperationDescription *op_desc;
TF_Operation *op;
TF_Operation *transpose_op;
- TF_Tensor *tensor;
+ TF_Tensor *tensor = NULL;
TF_Output input;
int32_t *transpose_perm;
int64_t transpose_perm_shape[] = {4};
@@ -600,6 +603,7 @@ static DNNReturnType load_native_model(TFModel *tf_model, const char *model_file
#define CLEANUP_ON_ERROR(tf_model) \
{ \
+ TF_DeleteTensor(tensor); \
TF_DeleteGraph(tf_model->graph); \
TF_DeleteStatus(tf_model->status); \
av_log(ctx, AV_LOG_ERROR, "Failed to set value or add operator to layer\n"); \
@@ -627,6 +631,9 @@ static DNNReturnType load_native_model(TFModel *tf_model, const char *model_file
CLEANUP_ON_ERROR(tf_model);
}
transpose_op = TF_FinishOperation(op_desc, tf_model->status);
+ if (TF_GetCode(tf_model->status) != TF_OK){
+ CLEANUP_ON_ERROR(tf_model);
+ }
for (layer = 0; layer < native_model->layers_num; ++layer){
switch (native_model->layers[layer].type){