diff options
Diffstat (limited to 'libavcodec/vc1dec.c')
-rw-r--r-- | libavcodec/vc1dec.c | 286 |
1 files changed, 196 insertions, 90 deletions
diff --git a/libavcodec/vc1dec.c b/libavcodec/vc1dec.c index db4b87b48a..aea2bbb897 100644 --- a/libavcodec/vc1dec.c +++ b/libavcodec/vc1dec.c @@ -4,20 +4,20 @@ * Copyright (c) 2006-2007 Konstantin Shishkov * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -39,6 +39,7 @@ #include "unary.h" #include "mathops.h" #include "vdpau_internal.h" +#include "libavutil/avassert.h" #undef NDEBUG #include <assert.h> @@ -73,6 +74,17 @@ enum Imode { }; /** @} */ //imode defines +static void init_block_index(VC1Context *v) +{ + MpegEncContext *s = &v->s; + ff_init_block_index(s); + if (v->field_mode && v->second_field) { + s->dest[0] += s->current_picture_ptr->f.linesize[0]; + s->dest[1] += s->current_picture_ptr->f.linesize[1]; + s->dest[2] += s->current_picture_ptr->f.linesize[2]; + } +} + /** @} */ //Bitplane group @@ -80,7 +92,7 @@ static void vc1_put_signed_blocks_clamped(VC1Context *v) { MpegEncContext *s = &v->s; int topleft_mb_pos, top_mb_pos; - int stride_y, fieldtx; + int stride_y, fieldtx = 0; int v_dist; /* The put pixels loop is always one MB row behind the decoding loop, @@ -93,7 +105,8 @@ static void vc1_put_signed_blocks_clamped(VC1Context *v) if (!s->first_slice_line) { if (s->mb_x) { topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1; - fieldtx = v->fieldtx_plane[topleft_mb_pos]; + if (v->fcm == ILACE_FRAME) + fieldtx = v->fieldtx_plane[topleft_mb_pos]; stride_y = s->linesize << fieldtx; v_dist = (16 - fieldtx) >> (fieldtx == 0); s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][0], @@ -117,7 +130,8 @@ static void vc1_put_signed_blocks_clamped(VC1Context *v) } if (s->mb_x == s->mb_width - 1) { top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x; - fieldtx = v->fieldtx_plane[top_mb_pos]; + if (v->fcm == ILACE_FRAME) + fieldtx = v->fieldtx_plane[top_mb_pos]; stride_y = s->linesize << fieldtx; v_dist = fieldtx ? 15 : 8; s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][0], @@ -331,7 +345,6 @@ static void vc1_smooth_overlap_filter_iblk(VC1Context *v) static void vc1_mc_1mv(VC1Context *v, int dir) { MpegEncContext *s = &v->s; - DSPContext *dsp = &v->s.dsp; H264ChromaContext *h264chroma = &v->h264chroma; uint8_t *srcY, *srcU, *srcV; int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y; @@ -370,7 +383,7 @@ static void vc1_mc_1mv(VC1Context *v, int dir) } if (v->field_mode) { // interlaced field picture if (!dir) { - if ((v->cur_field_type != v->ref_field_type[dir]) && v->cur_field_type) { + if ((v->cur_field_type != v->ref_field_type[dir]) && v->second_field) { srcY = s->current_picture.f.data[0]; srcU = s->current_picture.f.data[1]; srcV = s->current_picture.f.data[2]; @@ -396,6 +409,9 @@ static void vc1_mc_1mv(VC1Context *v, int dir) } } + if(!srcY) + return; + src_x = s->mb_x * 16 + (mx >> 2); src_y = s->mb_y * 16 + (my >> 2); uvsrc_x = s->mb_x * 8 + (uvmx >> 2); @@ -494,13 +510,8 @@ static void vc1_mc_1mv(VC1Context *v, int dir) srcY += s->mspel * (1 + s->linesize); } - if (v->field_mode && v->cur_field_type) { - off = s->current_picture_ptr->f.linesize[0]; - off_uv = s->current_picture_ptr->f.linesize[1]; - } else { off = 0; off_uv = 0; - } if (s->mspel) { dxy = ((my & 3) << 2) | (mx & 3); v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd); @@ -511,9 +522,9 @@ static void vc1_mc_1mv(VC1Context *v, int dir) } else { // hpel mc - always used for luma dxy = (my & 2) | ((mx & 2) >> 1); if (!v->rnd) - dsp->put_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16); + s->hdsp.put_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16); else - dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16); + s->hdsp.put_no_rnd_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16); } if (s->flags & CODEC_FLAG_GRAY) return; @@ -545,7 +556,6 @@ static inline int median4(int a, int b, int c, int d) static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir) { MpegEncContext *s = &v->s; - DSPContext *dsp = &v->s.dsp; uint8_t *srcY; int dxy, mx, my, src_x, src_y; int off; @@ -562,7 +572,7 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir) if (!dir) { if (v->field_mode) { - if ((v->cur_field_type != v->ref_field_type[dir]) && v->cur_field_type) + if ((v->cur_field_type != v->ref_field_type[dir]) && v->second_field) srcY = s->current_picture.f.data[0]; else srcY = s->last_picture.f.data[0]; @@ -571,6 +581,9 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir) } else srcY = s->next_picture.f.data[0]; + if(!srcY) + return; + if (v->field_mode) { if (v->cur_field_type != v->ref_field_type[dir]) my = my - 2 + 4 * v->cur_field_type; @@ -603,6 +616,8 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir) tx = (chosen_mv[f][0][0] + chosen_mv[f][1][0]) / 2; ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2; break; + default: + av_assert2(0); } s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx; s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty; @@ -631,8 +646,6 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir) off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8; else off = s->linesize * 4 * (n & 2) + (n & 1) * 8; - if (v->field_mode && v->cur_field_type) - off += s->current_picture_ptr->f.linesize[0]; src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2); if (!fieldmv) @@ -707,9 +720,9 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir) } else { // hpel mc - always used for luma dxy = (my & 2) | ((mx & 2) >> 1); if (!v->rnd) - dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8); + s->hdsp.put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8); else - dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8); + s->hdsp.put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8); } } @@ -843,27 +856,33 @@ static void vc1_mc_4mv_chroma(VC1Context *v, int dir) if (!dir) { if (v->field_mode) { if ((v->cur_field_type != chroma_ref_type) && v->cur_field_type) { - srcU = s->current_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x; - srcV = s->current_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x; + srcU = s->current_picture.f.data[1]; + srcV = s->current_picture.f.data[2]; } else { - srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x; - srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x; + srcU = s->last_picture.f.data[1]; + srcV = s->last_picture.f.data[2]; } } else { - srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x; - srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x; + srcU = s->last_picture.f.data[1]; + srcV = s->last_picture.f.data[2]; } } else { - srcU = s->next_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x; - srcV = s->next_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x; + srcU = s->next_picture.f.data[1]; + srcV = s->next_picture.f.data[2]; } + if(!srcU) + return; + + srcU += uvsrc_y * s->uvlinesize + uvsrc_x; + srcV += uvsrc_y * s->uvlinesize + uvsrc_x; + if (v->field_mode) { if (chroma_ref_type) { srcU += s->current_picture_ptr->f.linesize[1]; srcV += s->current_picture_ptr->f.linesize[2]; } - off = v->cur_field_type ? s->current_picture_ptr->f.linesize[1] : 0; + off = 0; } if (v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP) @@ -968,7 +987,8 @@ static void vc1_mc_4mv_chroma4(VC1Context *v) uvmy_field[i] = (uvmy_field[i] & 3) << 1; if (fieldmv && !(uvsrc_y & 1)) - v_edge_pos--; + v_edge_pos = (s->v_edge_pos >> 1) - 1; + if (fieldmv && (uvsrc_y & 1) && uvsrc_y < 2) uvsrc_y--; if ((v->mv_mode == MV_PMODE_INTENSITY_COMP) @@ -1144,6 +1164,7 @@ static av_always_inline void get_mvdata_interlaced(VC1Context *v, int *dmv_x, } } else { + av_assert0(index < esc); if (extend_x) offs_tab = offset_table2; else @@ -1740,9 +1761,10 @@ static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y, px = mid_pred(A[0], B[0], C[0]); py = mid_pred(A[1], B[1], C[1]); } else if (total_valid) { - if (a_valid) { px = A[0]; py = A[1]; } - if (b_valid) { px = B[0]; py = B[1]; } - if (c_valid) { px = C[0]; py = C[1]; } + if (a_valid) { px = A[0]; py = A[1]; } + else if (b_valid) { px = B[0]; py = B[1]; } + else if (c_valid) { px = C[0]; py = C[1]; } + else av_assert2(0); } else px = py = 0; } @@ -1797,7 +1819,7 @@ static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y, } else if (c_valid) { px = C[0]; py = C[1]; - } + } else px = py = 0; } } else if (total_valid == 1) { px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]); @@ -1829,7 +1851,6 @@ static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y, static void vc1_interp_mc(VC1Context *v) { MpegEncContext *s = &v->s; - DSPContext *dsp = &v->s.dsp; H264ChromaContext *h264chroma = &v->h264chroma; uint8_t *srcY, *srcU, *srcV; int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y; @@ -1931,13 +1952,8 @@ static void vc1_interp_mc(VC1Context *v) srcY += s->mspel * (1 + s->linesize); } - if (v->field_mode && v->cur_field_type) { - off = s->current_picture_ptr->f.linesize[0]; - off_uv = s->current_picture_ptr->f.linesize[1]; - } else { off = 0; off_uv = 0; - } if (s->mspel) { dxy = ((my & 3) << 2) | (mx & 3); @@ -1950,9 +1966,9 @@ static void vc1_interp_mc(VC1Context *v) dxy = (my & 2) | ((mx & 2) >> 1); if (!v->rnd) - dsp->avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16); + s->hdsp.avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16); else - dsp->avg_no_rnd_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, 16); + s->hdsp.avg_no_rnd_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, 16); } if (s->flags & CODEC_FLAG_GRAY) return; @@ -2711,7 +2727,7 @@ static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n, MpegEncContext *s = &v->s; int dc_pred_dir = 0; /* Direction of the DC prediction used */ int i; - int16_t *dc_val; + int16_t *dc_val = NULL; int16_t *ac_val, *ac_val2; int dcdiff; int a_avail = v->a_avail, c_avail = v->c_avail; @@ -2923,7 +2939,7 @@ static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n, MpegEncContext *s = &v->s; int dc_pred_dir = 0; /* Direction of the DC prediction used */ int i; - int16_t *dc_val; + int16_t *dc_val = NULL; int16_t *ac_val, *ac_val2; int dcdiff; int mb_pos = s->mb_x + s->mb_y * s->mb_stride; @@ -3698,7 +3714,7 @@ static int vc1_decode_p_mb_intfr(VC1Context *v) int idx_mbmode = 0, mvbp; int stride_y, fieldtx; - mquant = v->pq; /* Loosy initialization */ + mquant = v->pq; /* Lossy initialization */ if (v->skip_is_raw) skipped = get_bits1(gb); @@ -3902,11 +3918,11 @@ static int vc1_decode_p_mb_intfi(VC1Context *v) int val; /* temp values */ int first_block = 1; int dst_idx, off; - int pred_flag; + int pred_flag = 0; int block_cbp = 0, pat, block_tt = 0; int idx_mbmode = 0; - mquant = v->pq; /* Loosy initialization */ + mquant = v->pq; /* Lossy initialization */ idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2); if (idx_mbmode <= 1) { // intra MB @@ -3941,7 +3957,6 @@ static int vc1_decode_p_mb_intfi(VC1Context *v) continue; v->vc1dsp.vc1_inv_trans_8x8(s->block[i]); off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize); - off += v->cur_field_type ? ((i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]) : 0; s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize); // TODO: loop filter } @@ -3988,8 +4003,6 @@ static int vc1_decode_p_mb_intfi(VC1Context *v) dst_idx += i >> 2; val = ((cbp >> (5 - i)) & 1); off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize; - if (v->cur_field_type) - off += (i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]; if (val) { pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block, s->dest[dst_idx] + off, @@ -4179,7 +4192,7 @@ static void vc1_decode_b_mb_intfi(VC1Context *v) int bmvtype = BMV_TYPE_BACKWARD; int idx_mbmode, interpmvp; - mquant = v->pq; /* Loosy initialization */ + mquant = v->pq; /* Lossy initialization */ s->mb_intra = 0; idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2); @@ -4218,7 +4231,6 @@ static void vc1_decode_b_mb_intfi(VC1Context *v) for (j = 0; j < 64; j++) s->block[i][j] <<= 1; off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize); - off += v->cur_field_type ? ((i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]) : 0; s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize); // TODO: yet to perform loop filter } @@ -4300,8 +4312,6 @@ static void vc1_decode_b_mb_intfi(VC1Context *v) dst_idx += i >> 2; val = ((cbp >> (5 - i)) & 1); off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize; - if (v->cur_field_type) - off += (i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]; if (val) { vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block, s->dest[dst_idx] + off, @@ -4360,7 +4370,7 @@ static void vc1_decode_i_blocks(VC1Context *v) s->first_slice_line = 1; for (s->mb_y = 0; s->mb_y < s->end_mb_y; s->mb_y++) { s->mb_x = 0; - ff_init_block_index(s); + init_block_index(v); for (; s->mb_x < v->end_mb_x; s->mb_x++) { uint8_t *dst[6]; ff_update_block_index(s); @@ -4500,13 +4510,13 @@ static void vc1_decode_i_blocks_adv(VC1Context *v) s->mb_y = s->start_mb_y; if (s->start_mb_y) { s->mb_x = 0; - ff_init_block_index(s); + init_block_index(v); memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0, (1 + s->b8_stride) * sizeof(*s->coded_block)); } for (; s->mb_y < s->end_mb_y; s->mb_y++) { s->mb_x = 0; - ff_init_block_index(s); + init_block_index(v); for (;s->mb_x < s->mb_width; s->mb_x++) { int16_t (*block)[64] = v->block[v->cur_blk_idx]; ff_update_block_index(s); @@ -4577,7 +4587,8 @@ static void vc1_decode_i_blocks_adv(VC1Context *v) /* raw bottom MB row */ s->mb_x = 0; - ff_init_block_index(s); + init_block_index(v); + for (;s->mb_x < s->mb_width; s->mb_x++) { ff_update_block_index(s); vc1_put_signed_blocks_clamped(v); @@ -4625,7 +4636,7 @@ static void vc1_decode_p_blocks(VC1Context *v) memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride); for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) { s->mb_x = 0; - ff_init_block_index(s); + init_block_index(v); for (; s->mb_x < s->mb_width; s->mb_x++) { ff_update_block_index(s); @@ -4651,9 +4662,9 @@ static void vc1_decode_p_blocks(VC1Context *v) if (s->mb_y != s->start_mb_y) ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16); s->first_slice_line = 0; } - if (apply_loop_filter) { + if (apply_loop_filter && v->fcm == PROGRESSIVE) { s->mb_x = 0; - ff_init_block_index(s); + init_block_index(v); for (; s->mb_x < s->mb_width; s->mb_x++) { ff_update_block_index(s); vc1_apply_p_loop_filter(v); @@ -4697,7 +4708,7 @@ static void vc1_decode_b_blocks(VC1Context *v) s->first_slice_line = 1; for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) { s->mb_x = 0; - ff_init_block_index(s); + init_block_index(v); for (; s->mb_x < s->mb_width; s->mb_x++) { ff_update_block_index(s); @@ -4734,11 +4745,13 @@ static void vc1_decode_skip_blocks(VC1Context *v) s->first_slice_line = 1; for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) { s->mb_x = 0; - ff_init_block_index(s); + init_block_index(v); ff_update_block_index(s); - memcpy(s->dest[0], s->last_picture.f.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16); - memcpy(s->dest[1], s->last_picture.f.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8); - memcpy(s->dest[2], s->last_picture.f.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8); + if (s->last_picture.f.data[0]) { + memcpy(s->dest[0], s->last_picture.f.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16); + memcpy(s->dest[1], s->last_picture.f.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8); + memcpy(s->dest[2], s->last_picture.f.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8); + } ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16); s->first_slice_line = 0; } @@ -5008,6 +5021,7 @@ static void vc1_draw_sprites(VC1Context *v, SpriteData* sd) static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb) { + int ret; MpegEncContext *s = &v->s; AVCodecContext *avctx = s->avctx; SpriteData sd; @@ -5025,10 +5039,8 @@ static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb) } av_frame_unref(&v->sprite_output_frame); - if (ff_get_buffer(avctx, &v->sprite_output_frame, 0) < 0) { - av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); - return -1; - } + if ((ret = ff_get_buffer(avctx, &v->sprite_output_frame, 0)) < 0) + return ret; vc1_draw_sprites(v, &sd); @@ -5163,6 +5175,16 @@ static av_cold int vc1_decode_init(AVCodecContext *avctx) if (ff_vc1_init_common(v) < 0) return -1; + // ensure static VLC tables are initialized + if (ff_msmpeg4_decode_init(avctx) < 0) + return -1; + if (ff_vc1_decode_init_alloc_tables(v) < 0) + return -1; + // Hack to ensure the above functions will be called + // again once we know all necessary settings. + // That this is necessary might indicate a bug. + ff_vc1_decode_end(avctx); + ff_h264chroma_init(&v->h264chroma, 8); ff_vc1dsp_init(&v->vc1dsp); @@ -5263,6 +5285,11 @@ static av_cold int vc1_decode_init(AVCodecContext *avctx) v->sprite_height > 1 << 14 || v->output_width > 1 << 14 || v->output_height > 1 << 14) return -1; + + if ((v->sprite_width&1) || (v->sprite_height&1)) { + avpriv_request_sample(avctx, "odd sprites support"); + return AVERROR_PATCHWELCOME; + } } return 0; } @@ -5315,14 +5342,19 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, MpegEncContext *s = &v->s; AVFrame *pict = data; uint8_t *buf2 = NULL; - const uint8_t *buf_start = buf; - int mb_height, n_slices1; + const uint8_t *buf_start = buf, *buf_start_second_field = NULL; + int mb_height, n_slices1=-1; struct { uint8_t *buf; GetBitContext gb; int mby_start; } *slices = NULL, *tmp; + v->second_field = 0; + + if(s->flags & CODEC_FLAG_LOW_DELAY) + s->low_delay = 1; + /* no supplementary picture */ if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) { /* special case for last picture */ @@ -5334,7 +5366,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, *got_frame = 1; } - return 0; + return buf_size; } if (s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) { @@ -5367,6 +5399,9 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, break; case VC1_CODE_FIELD: { int buf_size3; + if (avctx->hwaccel || + s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) + buf_start_second_field = start; tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1)); if (!tmp) goto err; @@ -5418,6 +5453,9 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n"); goto err; } else { // found field marker, unescape second field + if (avctx->hwaccel || + s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) + buf_start_second_field = divider; tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1)); if (!tmp) goto err; @@ -5471,6 +5509,8 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, s->low_delay = !avctx->has_b_frames || v->res_sprite; if (v->profile == PROFILE_ADVANCED) { + if(avctx->coded_width<=1 || avctx->coded_height<=1) + goto err; s->h_edge_pos = avctx->coded_width; s->v_edge_pos = avctx->coded_height; } @@ -5487,15 +5527,20 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, // do parse frame header v->pic_header_flag = 0; + v->first_pic_header_flag = 1; if (v->profile < PROFILE_ADVANCED) { - if (ff_vc1_parse_frame_header(v, &s->gb) == -1) { + if (ff_vc1_parse_frame_header(v, &s->gb) < 0) { goto err; } } else { - if (ff_vc1_parse_frame_header_adv(v, &s->gb) == -1) { + if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) { goto err; } } + v->first_pic_header_flag = 0; + + if (avctx->debug & FF_DEBUG_PICT_INFO) + av_log(v->s.avctx, AV_LOG_DEBUG, "pict_type: %c\n", av_get_picture_type_char(s->pict_type)); if ((avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) && s->pict_type != AV_PICTURE_TYPE_I) { @@ -5503,6 +5548,11 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, goto err; } + if ((s->mb_height >> v->field_mode) == 0) { + av_log(v->s.avctx, AV_LOG_ERROR, "image too short\n"); + goto err; + } + // process pulldown flags s->current_picture_ptr->f.repeat_pict = 0; // Pulldown flags are only valid when 'broadcast' has been set. @@ -5540,6 +5590,9 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, goto err; } + v->s.current_picture_ptr->f.interlaced_frame = (v->fcm != PROGRESSIVE); + v->s.current_picture_ptr->f.top_field_first = v->tff; + s->me.qpel_put = s->dsp.put_qpel_pixels_tab; s->me.qpel_avg = s->dsp.avg_qpel_pixels_tab; @@ -5547,13 +5600,48 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, &&s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start); else if (avctx->hwaccel) { - if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0) - goto err; - if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0) - goto err; - if (avctx->hwaccel->end_frame(avctx) < 0) - goto err; + if (v->field_mode && buf_start_second_field) { + // decode first field + s->picture_structure = PICT_BOTTOM_FIELD - v->tff; + if (avctx->hwaccel->start_frame(avctx, buf_start, buf_start_second_field - buf_start) < 0) + goto err; + if (avctx->hwaccel->decode_slice(avctx, buf_start, buf_start_second_field - buf_start) < 0) + goto err; + if (avctx->hwaccel->end_frame(avctx) < 0) + goto err; + + // decode second field + s->gb = slices[n_slices1 + 1].gb; + s->picture_structure = PICT_TOP_FIELD + v->tff; + v->second_field = 1; + v->pic_header_flag = 0; + if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) { + av_log(avctx, AV_LOG_ERROR, "parsing header for second field failed"); + goto err; + } + v->s.current_picture_ptr->f.pict_type = v->s.pict_type; + + if (avctx->hwaccel->start_frame(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field) < 0) + goto err; + if (avctx->hwaccel->decode_slice(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field) < 0) + goto err; + if (avctx->hwaccel->end_frame(avctx) < 0) + goto err; + } else { + s->picture_structure = PICT_FRAME; + if (avctx->hwaccel->start_frame(avctx, buf_start, (buf + buf_size) - buf_start) < 0) + goto err; + if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0) + goto err; + if (avctx->hwaccel->end_frame(avctx) < 0) + goto err; + } } else { + int header_ret = 0; + + if (v->fcm == ILACE_FRAME && s->pict_type == AV_PICTURE_TYPE_B) + goto err; // This codepath is still incomplete thus it is disabled + ff_mpeg_er_frame_start(s); v->bits = buf_size * 8; @@ -5584,7 +5672,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, continue; } v->second_field = 1; - v->blocks_off = s->mb_width * s->mb_height << 1; + v->blocks_off = s->b8_stride * (s->mb_height&~1); v->mb_off = s->mb_stride * s->mb_height >> 1; } else { v->second_field = 0; @@ -5594,23 +5682,38 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, if (i) { v->pic_header_flag = 0; if (v->field_mode && i == n_slices1 + 2) { - if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) { + if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) { av_log(v->s.avctx, AV_LOG_ERROR, "Field header damaged\n"); continue; } } else if (get_bits1(&s->gb)) { v->pic_header_flag = 1; - if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) { + if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) { av_log(v->s.avctx, AV_LOG_ERROR, "Slice header damaged\n"); continue; } } } + if (header_ret < 0) + continue; s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start % mb_height); if (!v->field_mode || v->second_field) s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height); - else + else { + if (i >= n_slices) { + av_log(v->s.avctx, AV_LOG_ERROR, "first field slice count too large\n"); + continue; + } s->end_mb_y = (i <= n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height); + } + if (s->end_mb_y <= s->start_mb_y) { + av_log(v->s.avctx, AV_LOG_ERROR, "end mb y %d %d invalid\n", s->end_mb_y, s->start_mb_y); + continue; + } + if (!v->p_frame_skipped && s->pict_type != AV_PICTURE_TYPE_I && !v->cbpcy_vlc) { + av_log(v->s.avctx, AV_LOG_ERROR, "missing cbpcy_vlc\n"); + continue; + } ff_vc1_decode_blocks(v); if (i != n_slices) s->gb = slices[i].gb; @@ -5631,7 +5734,10 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, get_bits_count(&s->gb), s->gb.size_in_bits); // if (get_bits_count(&s->gb) > buf_size * 8) // return -1; - ff_er_frame_end(&s->er); + if(s->er.error_occurred && s->pict_type == AV_PICTURE_TYPE_B) + goto err; + if(!v->field_mode) + ff_er_frame_end(&s->er); } ff_MPV_frame_end(s); @@ -5653,11 +5759,11 @@ image: if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) { if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0) goto err; - ff_print_debug_info(s, s->current_picture_ptr); + ff_print_debug_info(s, s->current_picture_ptr, pict); } else if (s->last_picture_ptr != NULL) { if ((ret = av_frame_ref(pict, &s->last_picture_ptr->f)) < 0) goto err; - ff_print_debug_info(s, s->last_picture_ptr); + ff_print_debug_info(s, s->last_picture_ptr, pict); } if (s->last_picture_ptr || s->low_delay) { *got_frame = 1; |