diff options
-rw-r--r-- | libavcodec/snow.c | 68 | ||||
-rw-r--r-- | libavcodec/snow.h | 14 | ||||
-rw-r--r-- | libavcodec/snowdec.c | 14 | ||||
-rw-r--r-- | libavcodec/snowenc.c | 77 |
4 files changed, 87 insertions, 86 deletions
diff --git a/libavcodec/snow.c b/libavcodec/snow.c index c4df4b68ea..1c27ae2cf9 100644 --- a/libavcodec/snow.c +++ b/libavcodec/snow.c @@ -337,7 +337,7 @@ void ff_snow_pred_block(SnowContext *s, uint8_t *dst, uint8_t *tmp, int stride, } } }else{ - uint8_t *src= s->last_picture[block->ref].data[plane_index]; + uint8_t *src= s->last_picture[block->ref]->data[plane_index]; const int scale= plane_index ? (2*s->mv_scale)>>s->chroma_h_shift : 2*s->mv_scale; int mx= block->mx*scale; int my= block->my*scale; @@ -461,11 +461,11 @@ av_cold int ff_snow_common_init(AVCodecContext *avctx){ for(i=0; i<MAX_REF_FRAMES; i++) { for(j=0; j<MAX_REF_FRAMES; j++) ff_scale_mv_ref[i][j] = 256*(i+1)/(j+1); - avcodec_get_frame_defaults(&s->last_picture[i]); + s->last_picture[i] = av_frame_alloc(); } - avcodec_get_frame_defaults(&s->mconly_picture); - avcodec_get_frame_defaults(&s->current_picture); + s->mconly_picture = av_frame_alloc(); + s->current_picture = av_frame_alloc(); return 0; fail: @@ -478,15 +478,15 @@ int ff_snow_common_init_after_header(AVCodecContext *avctx) { int ret, emu_buf_size; if(!s->scratchbuf) { - if ((ret = ff_get_buffer(s->avctx, &s->mconly_picture, + if ((ret = ff_get_buffer(s->avctx, s->mconly_picture, AV_GET_BUFFER_FLAG_REF)) < 0) return ret; - FF_ALLOCZ_OR_GOTO(avctx, s->scratchbuf, FFMAX(s->mconly_picture.linesize[0], 2*avctx->width+256)*7*MB_SIZE, fail); - emu_buf_size = FFMAX(s->mconly_picture.linesize[0], 2*avctx->width+256) * (2 * MB_SIZE + HTAPS_MAX - 1); + FF_ALLOCZ_OR_GOTO(avctx, s->scratchbuf, FFMAX(s->mconly_picture->linesize[0], 2*avctx->width+256)*7*MB_SIZE, fail); + emu_buf_size = FFMAX(s->mconly_picture->linesize[0], 2*avctx->width+256) * (2 * MB_SIZE + HTAPS_MAX - 1); FF_ALLOC_OR_GOTO(avctx, s->emu_edge_buffer, emu_buf_size, fail); } - if(s->mconly_picture.format != avctx->pix_fmt) { + if(s->mconly_picture->format != avctx->pix_fmt) { av_log(avctx, AV_LOG_ERROR, "pixel format changed\n"); return AVERROR_INVALIDDATA; } @@ -596,51 +596,51 @@ void ff_snow_release_buffer(AVCodecContext *avctx) SnowContext *s = avctx->priv_data; int i; - if(s->last_picture[s->max_ref_frames-1].data[0]){ - av_frame_unref(&s->last_picture[s->max_ref_frames-1]); + if(s->last_picture[s->max_ref_frames-1]->data[0]){ + av_frame_unref(s->last_picture[s->max_ref_frames-1]); for(i=0; i<9; i++) if(s->halfpel_plane[s->max_ref_frames-1][1+i/3][i%3]) - av_free(s->halfpel_plane[s->max_ref_frames-1][1+i/3][i%3] - EDGE_WIDTH*(1+s->current_picture.linesize[i%3])); + av_free(s->halfpel_plane[s->max_ref_frames-1][1+i/3][i%3] - EDGE_WIDTH*(1+s->current_picture->linesize[i%3])); } } int ff_snow_frame_start(SnowContext *s){ - AVFrame tmp; + AVFrame *tmp; int i, ret; int w= s->avctx->width; //FIXME round up to x16 ? int h= s->avctx->height; - if (s->current_picture.data[0] && !(s->avctx->flags&CODEC_FLAG_EMU_EDGE)) { - s->dsp.draw_edges(s->current_picture.data[0], - s->current_picture.linesize[0], w , h , + if (s->current_picture->data[0] && !(s->avctx->flags&CODEC_FLAG_EMU_EDGE)) { + s->dsp.draw_edges(s->current_picture->data[0], + s->current_picture->linesize[0], w , h , EDGE_WIDTH , EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM); - s->dsp.draw_edges(s->current_picture.data[1], - s->current_picture.linesize[1], w>>s->chroma_h_shift, h>>s->chroma_v_shift, + s->dsp.draw_edges(s->current_picture->data[1], + s->current_picture->linesize[1], w>>s->chroma_h_shift, h>>s->chroma_v_shift, EDGE_WIDTH>>s->chroma_h_shift, EDGE_WIDTH>>s->chroma_v_shift, EDGE_TOP | EDGE_BOTTOM); - s->dsp.draw_edges(s->current_picture.data[2], - s->current_picture.linesize[2], w>>s->chroma_h_shift, h>>s->chroma_v_shift, + s->dsp.draw_edges(s->current_picture->data[2], + s->current_picture->linesize[2], w>>s->chroma_h_shift, h>>s->chroma_v_shift, EDGE_WIDTH>>s->chroma_h_shift, EDGE_WIDTH>>s->chroma_v_shift, EDGE_TOP | EDGE_BOTTOM); } ff_snow_release_buffer(s->avctx); - av_frame_move_ref(&tmp, &s->last_picture[s->max_ref_frames-1]); + tmp= s->last_picture[s->max_ref_frames-1]; for(i=s->max_ref_frames-1; i>0; i--) - av_frame_move_ref(&s->last_picture[i], &s->last_picture[i-1]); + s->last_picture[i] = s->last_picture[i-1]; memmove(s->halfpel_plane+1, s->halfpel_plane, (s->max_ref_frames-1)*sizeof(void*)*4*4); - if(USE_HALFPEL_PLANE && s->current_picture.data[0]) { - if((ret = halfpel_interpol(s, s->halfpel_plane[0], &s->current_picture)) < 0) + if(USE_HALFPEL_PLANE && s->current_picture->data[0]) { + if((ret = halfpel_interpol(s, s->halfpel_plane[0], s->current_picture)) < 0) return ret; } - av_frame_move_ref(&s->last_picture[0], &s->current_picture); - av_frame_move_ref(&s->current_picture, &tmp); + s->last_picture[0] = s->current_picture; + s->current_picture = tmp; if(s->keyframe){ s->ref_frames= 0; }else{ int i; - for(i=0; i<s->max_ref_frames && s->last_picture[i].data[0]; i++) - if(i && s->last_picture[i-1].key_frame) + for(i=0; i<s->max_ref_frames && s->last_picture[i]->data[0]; i++) + if(i && s->last_picture[i-1]->key_frame) break; s->ref_frames= i; if(s->ref_frames==0){ @@ -649,10 +649,10 @@ int ff_snow_frame_start(SnowContext *s){ } } - if ((ret = ff_get_buffer(s->avctx, &s->current_picture, AV_GET_BUFFER_FLAG_REF)) < 0) + if ((ret = ff_get_buffer(s->avctx, s->current_picture, AV_GET_BUFFER_FLAG_REF)) < 0) return ret; - s->current_picture.key_frame= s->keyframe; + s->current_picture->key_frame= s->keyframe; return 0; } @@ -680,10 +680,10 @@ av_cold void ff_snow_common_end(SnowContext *s) for(i=0; i<MAX_REF_FRAMES; i++){ av_freep(&s->ref_mvs[i]); av_freep(&s->ref_scores[i]); - if(s->last_picture[i].data[0]) { - av_assert0(s->last_picture[i].data[0] != s->current_picture.data[0]); - av_frame_unref(&s->last_picture[i]); + if(s->last_picture[i]->data[0]) { + av_assert0(s->last_picture[i]->data[0] != s->current_picture->data[0]); } + av_frame_free(&s->last_picture[i]); } for(plane_index=0; plane_index<3; plane_index++){ @@ -695,6 +695,6 @@ av_cold void ff_snow_common_end(SnowContext *s) } } } - av_frame_unref(&s->mconly_picture); - av_frame_unref(&s->current_picture); + av_frame_free(&s->mconly_picture); + av_frame_free(&s->current_picture); } diff --git a/libavcodec/snow.h b/libavcodec/snow.h index 922a48e54d..06f3731f9e 100644 --- a/libavcodec/snow.h +++ b/libavcodec/snow.h @@ -114,12 +114,12 @@ typedef struct SnowContext{ VideoDSPContext vdsp; H264QpelContext h264qpel; SnowDWTContext dwt; - AVFrame new_picture; - AVFrame input_picture; ///< new_picture with the internal linesizes - AVFrame current_picture; - AVFrame last_picture[MAX_REF_FRAMES]; + AVFrame *new_picture; + AVFrame *input_picture; ///< new_picture with the internal linesizes + AVFrame *current_picture; + AVFrame *last_picture[MAX_REF_FRAMES]; uint8_t *halfpel_plane[MAX_REF_FRAMES][4][4]; - AVFrame mconly_picture; + AVFrame *mconly_picture; // uint8_t q_context[16]; uint8_t header_state[32]; uint8_t block_state[128 + 32*128]; @@ -414,8 +414,8 @@ static av_always_inline void predict_slice(SnowContext *s, IDWTELEM *buf, int pl int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size; const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth]; const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size; - int ref_stride= s->current_picture.linesize[plane_index]; - uint8_t *dst8= s->current_picture.data[plane_index]; + int ref_stride= s->current_picture->linesize[plane_index]; + uint8_t *dst8= s->current_picture->data[plane_index]; int w= p->width; int h= p->height; av_assert2(s->chroma_h_shift == s->chroma_v_shift); // obmc params assume squares diff --git a/libavcodec/snowdec.c b/libavcodec/snowdec.c index b222c22269..8da2f17ffb 100644 --- a/libavcodec/snowdec.c +++ b/libavcodec/snowdec.c @@ -43,8 +43,8 @@ static av_always_inline void predict_slice_buffered(SnowContext *s, slice_buffer int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size; const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth]; int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size; - int ref_stride= s->current_picture.linesize[plane_index]; - uint8_t *dst8= s->current_picture.data[plane_index]; + int ref_stride= s->current_picture->linesize[plane_index]; + uint8_t *dst8= s->current_picture->data[plane_index]; int w= p->width; int h= p->height; @@ -403,7 +403,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, ff_init_range_decoder(c, buf, buf_size); ff_build_rac_states(c, 0.05*(1LL<<32), 256-8); - s->current_picture.pict_type= AV_PICTURE_TYPE_I; //FIXME I vs. P + s->current_picture->pict_type= AV_PICTURE_TYPE_I; //FIXME I vs. P if(decode_header(s)<0) return -1; if ((res=ff_snow_common_init_after_header(avctx)) < 0) @@ -449,8 +449,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, for(y=0; y<h; y++){ for(x=0; x<w; x++){ - int v= s->current_picture.data[plane_index][y*s->current_picture.linesize[plane_index] + x]; - s->mconly_picture.data[plane_index][y*s->mconly_picture.linesize[plane_index] + x]= v; + int v= s->current_picture->data[plane_index][y*s->current_picture->linesize[plane_index] + x]; + s->mconly_picture->data[plane_index][y*s->mconly_picture->linesize[plane_index] + x]= v; } } } @@ -548,9 +548,9 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, ff_snow_release_buffer(avctx); if(!(s->avctx->debug&2048)) - av_frame_ref(picture, &s->current_picture); + av_frame_ref(picture, s->current_picture); else - av_frame_ref(picture, &s->mconly_picture); + av_frame_ref(picture, s->mconly_picture); *got_frame = 1; diff --git a/libavcodec/snowenc.c b/libavcodec/snowenc.c index de3a8ba55c..10864e7c96 100644 --- a/libavcodec/snowenc.c +++ b/libavcodec/snowenc.c @@ -103,7 +103,6 @@ static av_cold int encode_init(AVCodecContext *avctx) } s->pass1_rc= !(avctx->flags & (CODEC_FLAG_QSCALE|CODEC_FLAG_PASS2)); - avctx->coded_frame= &s->current_picture; switch(avctx->pix_fmt){ case AV_PIX_FMT_YUV444P: // case AV_PIX_FMT_YUV422P: @@ -125,7 +124,8 @@ static av_cold int encode_init(AVCodecContext *avctx) ff_set_cmp(&s->dsp, s->dsp.me_cmp, s->avctx->me_cmp); ff_set_cmp(&s->dsp, s->dsp.me_sub_cmp, s->avctx->me_sub_cmp); - if ((ret = ff_get_buffer(s->avctx, &s->input_picture, AV_GET_BUFFER_FLAG_REF)) < 0) + s->input_picture = av_frame_alloc(); + if ((ret = ff_get_buffer(s->avctx, s->input_picture, AV_GET_BUFFER_FLAG_REF)) < 0) return ret; if(s->avctx->me_method == ME_ITER){ @@ -235,11 +235,11 @@ static int encode_q_branch(SnowContext *s, int level, int x, int y){ int pmx, pmy; int mx=0, my=0; int l,cr,cb; - const int stride= s->current_picture.linesize[0]; - const int uvstride= s->current_picture.linesize[1]; - uint8_t *current_data[3]= { s->input_picture.data[0] + (x + y* stride)*block_w, - s->input_picture.data[1] + ((x*block_w)>>s->chroma_h_shift) + ((y*uvstride*block_w)>>s->chroma_v_shift), - s->input_picture.data[2] + ((x*block_w)>>s->chroma_h_shift) + ((y*uvstride*block_w)>>s->chroma_v_shift)}; + const int stride= s->current_picture->linesize[0]; + const int uvstride= s->current_picture->linesize[1]; + uint8_t *current_data[3]= { s->input_picture->data[0] + (x + y* stride)*block_w, + s->input_picture->data[1] + ((x*block_w)>>s->chroma_h_shift) + ((y*uvstride*block_w)>>s->chroma_v_shift), + s->input_picture->data[2] + ((x*block_w)>>s->chroma_h_shift) + ((y*uvstride*block_w)>>s->chroma_v_shift)}; int P[10][2]; int16_t last_mv[3][2]; int qpel= !!(s->avctx->flags & CODEC_FLAG_QPEL); //unused @@ -313,7 +313,7 @@ static int encode_q_branch(SnowContext *s, int level, int x, int y){ score= INT_MAX; best_ref= 0; for(ref=0; ref<s->ref_frames; ref++){ - init_ref(c, current_data, s->last_picture[ref].data, NULL, block_w*x, block_w*y, 0); + init_ref(c, current_data, s->last_picture[ref]->data, NULL, block_w*x, block_w*y, 0); ref_score= ff_epzs_motion_search(&s->m, &ref_mx, &ref_my, P, 0, /*ref_index*/ 0, last_mv, (1<<16)>>shift, level-LOG2_MB_SIZE+4, block_w); @@ -493,8 +493,8 @@ static int get_dc(SnowContext *s, int mb_x, int mb_y, int plane_index){ const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size; const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth]; const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size; - const int ref_stride= s->current_picture.linesize[plane_index]; - uint8_t *src= s-> input_picture.data[plane_index]; + const int ref_stride= s->current_picture->linesize[plane_index]; + uint8_t *src= s-> input_picture->data[plane_index]; IDWTELEM *dst= (IDWTELEM*)s->m.obmc_scratchpad + plane_index*block_size*block_size*4; //FIXME change to unsigned const int b_stride = s->b_width << s->block_max_depth; const int w= p->width; @@ -587,9 +587,9 @@ static int get_block_rd(SnowContext *s, int mb_x, int mb_y, int plane_index, uin const int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size; const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size; const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size; - const int ref_stride= s->current_picture.linesize[plane_index]; - uint8_t *dst= s->current_picture.data[plane_index]; - uint8_t *src= s-> input_picture.data[plane_index]; + const int ref_stride= s->current_picture->linesize[plane_index]; + uint8_t *dst= s->current_picture->data[plane_index]; + uint8_t *src= s-> input_picture->data[plane_index]; IDWTELEM *pred= (IDWTELEM*)s->m.obmc_scratchpad + plane_index*block_size*block_size*4; uint8_t *cur = s->scratchbuf; uint8_t *tmp = s->emu_edge_buffer; @@ -690,9 +690,9 @@ static int get_4block_rd(SnowContext *s, int mb_x, int mb_y, int plane_index){ const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size; const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth]; const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size; - const int ref_stride= s->current_picture.linesize[plane_index]; - uint8_t *dst= s->current_picture.data[plane_index]; - uint8_t *src= s-> input_picture.data[plane_index]; + const int ref_stride= s->current_picture->linesize[plane_index]; + uint8_t *dst= s->current_picture->data[plane_index]; + uint8_t *src= s-> input_picture->data[plane_index]; //FIXME zero_dst is const but add_yblock changes dst if add is 0 (this is never the case for dst=zero_dst // const has only been removed from zero_dst to suppress a warning static IDWTELEM zero_dst[4096]; //FIXME @@ -1046,9 +1046,9 @@ static void iterative_me(SnowContext *s){ //skip stuff outside the picture if(mb_x==0 || mb_y==0 || mb_x==b_width-1 || mb_y==b_height-1){ - uint8_t *src= s-> input_picture.data[0]; - uint8_t *dst= s->current_picture.data[0]; - const int stride= s->current_picture.linesize[0]; + uint8_t *src= s-> input_picture->data[0]; + uint8_t *dst= s->current_picture->data[0]; + const int stride= s->current_picture->linesize[0]; const int block_w= MB_SIZE >> s->block_max_depth; const int block_h= MB_SIZE >> s->block_max_depth; const int sx= block_w*mb_x - block_w/2; @@ -1539,7 +1539,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, { SnowContext *s = avctx->priv_data; RangeCoder * const c= &s->c; - AVFrame *pic = &s->new_picture; + AVFrame *pic = pict; const int width= s->avctx->width; const int height= s->avctx->height; int level, orientation, plane_index, i, y, ret; @@ -1556,17 +1556,17 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, int hshift= i ? s->chroma_h_shift : 0; int vshift= i ? s->chroma_v_shift : 0; for(y=0; y<(height>>vshift); y++) - memcpy(&s->input_picture.data[i][y * s->input_picture.linesize[i]], + memcpy(&s->input_picture->data[i][y * s->input_picture->linesize[i]], &pict->data[i][y * pict->linesize[i]], width>>hshift); - s->dsp.draw_edges(s->input_picture.data[i], s->input_picture.linesize[i], + s->dsp.draw_edges(s->input_picture->data[i], s->input_picture->linesize[i], width >> hshift, height >> vshift, EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, EDGE_TOP | EDGE_BOTTOM); } emms_c(); - s->new_picture = *pict; + s->new_picture = pict; s->m.picture_number= avctx->frame_number; if(avctx->flags&CODEC_FLAG_PASS2){ @@ -1594,6 +1594,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, }//else keep previous frame's qlog until after motion estimation ff_snow_frame_start(s); + avctx->coded_frame= s->current_picture; s->m.current_picture_ptr= &s->m.current_picture; s->m.last_picture.f.pts = s->m.current_picture.f.pts; @@ -1601,21 +1602,21 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, if(pic->pict_type == AV_PICTURE_TYPE_P){ int block_width = (width +15)>>4; int block_height= (height+15)>>4; - int stride= s->current_picture.linesize[0]; + int stride= s->current_picture->linesize[0]; - av_assert0(s->current_picture.data[0]); - av_assert0(s->last_picture[0].data[0]); + av_assert0(s->current_picture->data[0]); + av_assert0(s->last_picture[0]->data[0]); s->m.avctx= s->avctx; - s->m.current_picture.f.data[0] = s->current_picture.data[0]; - s->m. last_picture.f.data[0] = s->last_picture[0].data[0]; - s->m. new_picture.f.data[0] = s-> input_picture.data[0]; + s->m.current_picture.f.data[0] = s->current_picture->data[0]; + s->m. last_picture.f.data[0] = s->last_picture[0]->data[0]; + s->m. new_picture.f.data[0] = s-> input_picture->data[0]; s->m. last_picture_ptr= &s->m. last_picture; s->m.linesize= s->m. last_picture.f.linesize[0] = s->m. new_picture.f.linesize[0] = s->m.current_picture.f.linesize[0] = stride; - s->m.uvlinesize= s->current_picture.linesize[1]; + s->m.uvlinesize= s->current_picture->linesize[1]; s->m.width = width; s->m.height= height; s->m.mb_width = block_width; @@ -1704,7 +1705,7 @@ redo_frame: ff_build_rac_states(c, 0.05*(1LL<<32), 256-8); pic->pict_type= AV_PICTURE_TYPE_I; s->keyframe=1; - s->current_picture.key_frame=1; + s->current_picture->key_frame=1; goto redo_frame; } @@ -1775,7 +1776,7 @@ redo_frame: if(pic->pict_type == AV_PICTURE_TYPE_I){ for(y=0; y<h; y++){ for(x=0; x<w; x++){ - s->current_picture.data[plane_index][y*s->current_picture.linesize[plane_index] + x]= + s->current_picture->data[plane_index][y*s->current_picture->linesize[plane_index] + x]= pict->data[plane_index][y*pict->linesize[plane_index] + x]; } } @@ -1790,12 +1791,12 @@ redo_frame: if(pict->data[plane_index]) //FIXME gray hack for(y=0; y<h; y++){ for(x=0; x<w; x++){ - int d= s->current_picture.data[plane_index][y*s->current_picture.linesize[plane_index] + x] - pict->data[plane_index][y*pict->linesize[plane_index] + x]; + int d= s->current_picture->data[plane_index][y*s->current_picture->linesize[plane_index] + x] - pict->data[plane_index][y*pict->linesize[plane_index] + x]; error += d*d; } } s->avctx->error[plane_index] += error; - s->current_picture.error[plane_index] = error; + s->current_picture->error[plane_index] = error; } } @@ -1804,9 +1805,9 @@ redo_frame: ff_snow_release_buffer(avctx); - s->current_picture.coded_picture_number = avctx->frame_number; - s->current_picture.pict_type = pict->pict_type; - s->current_picture.quality = pict->quality; + s->current_picture->coded_picture_number = avctx->frame_number; + s->current_picture->pict_type = pict->pict_type; + s->current_picture->quality = pict->quality; s->m.frame_bits = 8*(s->c.bytestream - s->c.bytestream_start); s->m.p_tex_bits = s->m.frame_bits - s->m.misc_bits - s->m.mv_bits; s->m.current_picture.f.display_picture_number = @@ -1840,7 +1841,7 @@ static av_cold int encode_end(AVCodecContext *avctx) ff_snow_common_end(s); ff_rate_control_uninit(&s->m); - av_frame_unref(&s->input_picture); + av_frame_free(&s->input_picture); av_free(avctx->stats_out); return 0; |