diff options
author | Michael Niedermayer <michaelni@gmx.at> | 2012-03-17 23:16:05 +0100 |
---|---|---|
committer | Michael Niedermayer <michaelni@gmx.at> | 2012-03-17 23:16:05 +0100 |
commit | 967bdb8572f27b9891fe7bd9b8489146a906633d (patch) | |
tree | 61b486be897a4503f71e2ecc7734f16930dc1a94 /libavcodec | |
parent | 219a6fb61cb9e6f534f20dc3313442f89b0ffd42 (diff) | |
parent | 6ca3b248e21fc48418c4221fc92c2eecaf07cb9b (diff) | |
download | ffmpeg-967bdb8572f27b9891fe7bd9b8489146a906633d.tar.gz |
Merge remote-tracking branch 'qatar/master'
* qatar/master:
resample: allocate a large enough output buffer
fate: fix enc_dec_pcm tests with remote target
wmaenc: remove bit-exact hack
FATE: remove WMA acodec tests
FATE: add WMAv1 and WMAv2 encode/decode tests with fuzzy comparison
FATE: add AC-3 and E-AC-3 encode/decode tests with fuzzy comparison
qtrle: Use bytestream2 functions to prevent buffer overreads.
vqavideo: check malloc return values
x11grab: fix a memory leak exposed by valgrind
threads: fix old frames returned after avcodec_flush_buffers()
MPV: always mark dummy frames as reference
h264: fix deadlocks on incomplete reference frame decoding.
mpeg4: report frame decoding completion at ff_MPV_frame_end().
mimic: don't use self as reference, and report completion at end of decode().
Conflicts:
libavcodec/h264.c
libavcodec/qtrle.c
libavcodec/resample.c
libavcodec/vqavideo.c
libavdevice/x11grab.c
tests/ref/seek/wmav1_asf
tests/ref/seek/wmav2_asf
Merged-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavcodec')
-rw-r--r-- | libavcodec/h264.c | 123 | ||||
-rw-r--r-- | libavcodec/mimic.c | 13 | ||||
-rw-r--r-- | libavcodec/mpegvideo.c | 5 | ||||
-rw-r--r-- | libavcodec/pthread.c | 3 | ||||
-rw-r--r-- | libavcodec/qtrle.c | 187 | ||||
-rw-r--r-- | libavcodec/resample.c | 6 | ||||
-rw-r--r-- | libavcodec/vqavideo.c | 27 | ||||
-rw-r--r-- | libavcodec/wmaenc.c | 4 |
8 files changed, 219 insertions, 149 deletions
diff --git a/libavcodec/h264.c b/libavcodec/h264.c index cfb6aa5345..5785278ede 100644 --- a/libavcodec/h264.c +++ b/libavcodec/h264.c @@ -2733,8 +2733,7 @@ static int field_end(H264Context *h, int in_setup) s->mb_y = 0; if (!in_setup && !s->dropable) - ff_thread_report_progress(&s->current_picture_ptr->f, - (16 * s->mb_height >> FIELD_PICTURE) - 1, + ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, s->picture_structure == PICT_BOTTOM_FIELD); if (CONFIG_H264_VDPAU_DECODER && @@ -2857,9 +2856,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0) int num_ref_idx_active_override_flag; unsigned int slice_type, tmp, i, j; int default_ref_list_done = 0; - int last_pic_structure; - - s->dropable = h->nal_ref_idc == 0; + int last_pic_structure, last_pic_dropable; /* FIXME: 2tap qpel isn't implemented for high bit depth. */ if ((s->avctx->flags2 & CODEC_FLAG2_FAST) && @@ -2879,8 +2876,14 @@ static int decode_slice_header(H264Context *h, H264Context *h0) } h0->current_slice = 0; - if (!s0->first_field) + if (!s0->first_field) { + if (s->current_picture_ptr && !s->dropable && + s->current_picture_ptr->owner2 == s) { + ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, + s->picture_structure == PICT_BOTTOM_FIELD); + } s->current_picture_ptr = NULL; + } } slice_type = get_ue_golomb_31(&s->gb); @@ -3114,6 +3117,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0) h->mb_mbaff = 0; h->mb_aff_frame = 0; last_pic_structure = s0->picture_structure; + last_pic_dropable = s->dropable; + s->dropable = h->nal_ref_idc == 0; if (h->sps.frame_mbs_only_flag) { s->picture_structure = PICT_FRAME; } else { @@ -3130,7 +3135,17 @@ static int decode_slice_header(H264Context *h, H264Context *h0) } h->mb_field_decoding_flag = s->picture_structure != PICT_FRAME; - if (h0->current_slice == 0) { + if (h0->current_slice != 0) { + if (last_pic_structure != s->picture_structure || + last_pic_dropable != s->dropable) { + av_log(h->s.avctx, AV_LOG_ERROR, + "Changing field mode (%d -> %d) between slices is not allowed\n", + last_pic_structure, s->picture_structure); + s->picture_structure = last_pic_structure; + s->dropable = last_pic_dropable; + return AVERROR_INVALIDDATA; + } + } else { /* Shorten frame num gaps so we don't have to allocate reference * frames just to throw them away */ if (h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0) { @@ -3149,6 +3164,72 @@ static int decode_slice_header(H264Context *h, H264Context *h0) } } + /* See if we have a decoded first field looking for a pair... + * Here, we're using that to see if we should mark previously + * decode frames as "finished". + * We have to do that before the "dummy" in-between frame allocation, + * since that can modify s->current_picture_ptr. */ + if (s0->first_field) { + assert(s0->current_picture_ptr); + assert(s0->current_picture_ptr->f.data[0]); + assert(s0->current_picture_ptr->f.reference != DELAYED_PIC_REF); + + /* Mark old field/frame as completed */ + if (!last_pic_dropable && s0->current_picture_ptr->owner2 == s0) { + ff_thread_report_progress(&s0->current_picture_ptr->f, INT_MAX, + last_pic_structure == PICT_BOTTOM_FIELD); + } + + /* figure out if we have a complementary field pair */ + if (!FIELD_PICTURE || s->picture_structure == last_pic_structure) { + /* Previous field is unmatched. Don't display it, but let it + * remain for reference if marked as such. */ + if (!last_pic_dropable && last_pic_structure != PICT_FRAME) { + ff_thread_report_progress(&s0->current_picture_ptr->f, INT_MAX, + last_pic_structure == PICT_TOP_FIELD); + } + } else { + if (s0->current_picture_ptr->frame_num != h->frame_num) { + /* This and previous field were reference, but had + * different frame_nums. Consider this field first in + * pair. Throw away previous field except for reference + * purposes. */ + if (!last_pic_dropable && last_pic_structure != PICT_FRAME) { + ff_thread_report_progress(&s0->current_picture_ptr->f, INT_MAX, + last_pic_structure == PICT_TOP_FIELD); + } + } else { + /* Second field in complementary pair */ + if (!((last_pic_structure == PICT_TOP_FIELD && + s->picture_structure == PICT_BOTTOM_FIELD) || + (last_pic_structure == PICT_BOTTOM_FIELD && + s->picture_structure == PICT_TOP_FIELD))) { + av_log(s->avctx, AV_LOG_ERROR, + "Invalid field mode combination %d/%d\n", + last_pic_structure, s->picture_structure); + s->picture_structure = last_pic_structure; + s->dropable = last_pic_dropable; + return AVERROR_INVALIDDATA; + } else if (last_pic_dropable != s->dropable) { + av_log(s->avctx, AV_LOG_ERROR, + "Cannot combine reference and non-reference fields in the same frame\n"); + av_log_ask_for_sample(s->avctx, NULL); + s->picture_structure = last_pic_structure; + s->dropable = last_pic_dropable; + return AVERROR_INVALIDDATA; + } + + /* Take ownership of this buffer. Note that if another thread owned + * the first field of this buffer, we're not operating on that pointer, + * so the original thread is still responsible for reporting progress + * on that first field (or if that was us, we just did that above). + * By taking ownership, we assign responsibility to ourselves to + * report progress on the second field. */ + s0->current_picture_ptr->owner2 = s0; + } + } + } + while (h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0 && h->frame_num != (h->prev_frame_num + 1) % (1 << h->sps.log2_max_frame_num)) { Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL; @@ -3182,7 +3263,9 @@ static int decode_slice_header(H264Context *h, H264Context *h0) } } - /* See if we have a decoded first field looking for a pair... */ + /* See if we have a decoded first field looking for a pair... + * We're using that to see whether to continue decoding in that + * frame, or to allocate a new one. */ if (s0->first_field) { assert(s0->current_picture_ptr); assert(s0->current_picture_ptr->f.data[0]); @@ -3198,9 +3281,9 @@ static int decode_slice_header(H264Context *h, H264Context *h0) if (s0->current_picture_ptr->frame_num != h->frame_num) { ff_thread_report_progress((AVFrame*)s0->current_picture_ptr, INT_MAX, s0->picture_structure==PICT_BOTTOM_FIELD); - /* This and the previous field had - * different frame_nums. Consider this field first in pair. - * Throw away previous one except for reference purposes. */ + /* This and the previous field had different frame_nums. + * Consider this field first in pair. Throw away previous + * one except for reference purposes. */ s0->first_field = 1; s0->current_picture_ptr = NULL; } else { @@ -4155,8 +4238,10 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size) ptr = ff_h264_decode_nal(hx, buf + buf_index, &dst_length, &consumed, next_avc - buf_index); - if (ptr == NULL || dst_length < 0) - return -1; + if (ptr == NULL || dst_length < 0) { + buf_index = -1; + goto end; + } i = buf_index + consumed; if ((s->workaround_bugs & FF_BUG_AUTODETECT) && i + 3 < next_avc && buf[i] == 0x00 && buf[i + 1] == 0x00 && @@ -4211,7 +4296,8 @@ again: if (h->nal_unit_type != NAL_IDR_SLICE) { av_log(h->s.avctx, AV_LOG_ERROR, "Invalid mix of idr and non-idr slices\n"); - return -1; + buf_index = -1; + goto end; } idr(h); // FIXME ensure we don't lose some frames if there is reordering case NAL_SLICE: @@ -4369,6 +4455,15 @@ again: } if (context_count) execute_decode_slices(h, context_count); + +end: + /* clean up */ + if (s->current_picture_ptr && s->current_picture_ptr->owner2 == s && + !s->dropable) { + ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, + s->picture_structure == PICT_BOTTOM_FIELD); + } + return buf_index; } diff --git a/libavcodec/mimic.c b/libavcodec/mimic.c index 4f085b4e5d..05c4b769d8 100644 --- a/libavcodec/mimic.c +++ b/libavcodec/mimic.c @@ -259,8 +259,8 @@ static int decode(MimicContext *ctx, int quality, int num_coeffs, int index = (ctx->cur_index+backref)&15; uint8_t *p = ctx->flipped_ptrs[index].data[0]; - ff_thread_await_progress(&ctx->buf_ptrs[index], cur_row, 0); - if(p) { + if (index != ctx->cur_index && p) { + ff_thread_await_progress(&ctx->buf_ptrs[index], cur_row, 0); p += src - ctx->flipped_ptrs[ctx->prev_index].data[plane]; ctx->dsp.put_pixels_tab[1][0](dst, p, stride, 8); @@ -311,6 +311,7 @@ static int mimic_decode_frame(AVCodecContext *avctx, void *data, int width, height; int quality, num_coeffs; int swap_buf_size = buf_size - MIMIC_HEADER_SIZE; + int res; if (buf_size <= MIMIC_HEADER_SIZE) { av_log(avctx, AV_LOG_ERROR, "insufficient data\n"); @@ -378,10 +379,10 @@ static int mimic_decode_frame(AVCodecContext *avctx, void *data, swap_buf_size>>2); init_get_bits(&ctx->gb, ctx->swap_buf, swap_buf_size << 3); - if(!decode(ctx, quality, num_coeffs, !is_pframe)) { - if (avctx->active_thread_type&FF_THREAD_FRAME) - ff_thread_report_progress(&ctx->buf_ptrs[ctx->cur_index], INT_MAX, 0); - else { + res = decode(ctx, quality, num_coeffs, !is_pframe); + ff_thread_report_progress(&ctx->buf_ptrs[ctx->cur_index], INT_MAX, 0); + if (!res) { + if (!(avctx->active_thread_type & FF_THREAD_FRAME)) { ff_thread_release_buffer(avctx, &ctx->buf_ptrs[ctx->cur_index]); return -1; } diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c index 9577c9d38f..54a419ffd6 100644 --- a/libavcodec/mpegvideo.c +++ b/libavcodec/mpegvideo.c @@ -1249,6 +1249,7 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0); ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1); + s->last_picture_ptr->f.reference = 3; } if ((s->next_picture_ptr == NULL || s->next_picture_ptr->f.data[0] == NULL) && @@ -1263,6 +1264,7 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) return -1; ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0); ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1); + s->next_picture_ptr->f.reference = 3; } } @@ -1390,8 +1392,7 @@ void ff_MPV_frame_end(MpegEncContext *s) s->avctx->coded_frame = &s->current_picture_ptr->f; if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) { - ff_thread_report_progress(&s->current_picture_ptr->f, - s->mb_height - 1, 0); + ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0); } } diff --git a/libavcodec/pthread.c b/libavcodec/pthread.c index c4e8aab952..d02f6844b4 100644 --- a/libavcodec/pthread.c +++ b/libavcodec/pthread.c @@ -909,6 +909,9 @@ void ff_thread_flush(AVCodecContext *avctx) fctx->next_decoding = fctx->next_finished = 0; fctx->delaying = 1; fctx->prev_thread = NULL; + // Make sure decode flush calls with size=0 won't return old frames + for (int i = 0; i < avctx->thread_count; i++) + fctx->threads[i].got_frame = 0; } static int *allocate_progress(PerThreadContext *p) diff --git a/libavcodec/qtrle.c b/libavcodec/qtrle.c index e3a595c5ac..f9abf782fa 100644 --- a/libavcodec/qtrle.c +++ b/libavcodec/qtrle.c @@ -35,27 +35,17 @@ #include <stdlib.h> #include <string.h> -#include "libavutil/intreadwrite.h" #include "avcodec.h" +#include "bytestream.h" typedef struct QtrleContext { - AVCodecContext *avctx; AVFrame frame; - const unsigned char *buf; - int size; - + GetByteContext g; uint32_t pal[256]; } QtrleContext; -#define CHECK_STREAM_PTR(n) \ - if ((stream_ptr + n) > s->size) { \ - av_log (s->avctx, AV_LOG_INFO, "Problem: stream_ptr out of bounds (%d >= %d)\n", \ - stream_ptr + n, s->size); \ - return; \ - } - #define CHECK_PIXEL_PTR(n) \ if ((pixel_ptr + n > pixel_limit) || (pixel_ptr + n < 0)) { \ av_log (s->avctx, AV_LOG_INFO, "Problem: pixel_ptr = %d, pixel_limit = %d\n", \ @@ -63,7 +53,7 @@ typedef struct QtrleContext { return; \ } \ -static void qtrle_decode_1bpp(QtrleContext *s, int stream_ptr, int row_ptr, int lines_to_change) +static void qtrle_decode_1bpp(QtrleContext *s, int row_ptr, int lines_to_change) { int rle_code; int pixel_ptr = 0; @@ -83,9 +73,8 @@ static void qtrle_decode_1bpp(QtrleContext *s, int stream_ptr, int row_ptr, int lines_to_change++; while (lines_to_change) { - CHECK_STREAM_PTR(2); - skip = s->buf[stream_ptr++]; - rle_code = (signed char)s->buf[stream_ptr++]; + skip = bytestream2_get_byte(&s->g); + rle_code = (signed char)bytestream2_get_byte(&s->g); if (rle_code == 0) break; if(skip & 0x80) { @@ -104,9 +93,9 @@ static void qtrle_decode_1bpp(QtrleContext *s, int stream_ptr, int row_ptr, int rle_code = -rle_code; /* get the next 2 bytes from the stream, treat them as groups * of 8 pixels, and output them rle_code times */ - CHECK_STREAM_PTR(2); - pi0 = s->buf[stream_ptr++]; - pi1 = s->buf[stream_ptr++]; + + pi0 = bytestream2_get_byte(&s->g); + pi1 = bytestream2_get_byte(&s->g); CHECK_PIXEL_PTR(rle_code * 2); while (rle_code--) { @@ -116,17 +105,16 @@ static void qtrle_decode_1bpp(QtrleContext *s, int stream_ptr, int row_ptr, int } else { /* copy the same pixel directly to output 2 times */ rle_code *= 2; - CHECK_STREAM_PTR(rle_code); CHECK_PIXEL_PTR(rle_code); while (rle_code--) - rgb[pixel_ptr++] = s->buf[stream_ptr++]; + rgb[pixel_ptr++] = bytestream2_get_byte(&s->g); } } } -static inline void qtrle_decode_2n4bpp(QtrleContext *s, int stream_ptr, - int row_ptr, int lines_to_change, int bpp) +static inline void qtrle_decode_2n4bpp(QtrleContext *s, int row_ptr, + int lines_to_change, int bpp) { int rle_code, i; int pixel_ptr; @@ -137,25 +125,22 @@ static inline void qtrle_decode_2n4bpp(QtrleContext *s, int stream_ptr, int num_pixels = (bpp == 4) ? 8 : 16; while (lines_to_change--) { - CHECK_STREAM_PTR(2); - pixel_ptr = row_ptr + (num_pixels * (s->buf[stream_ptr++] - 1)); + pixel_ptr = row_ptr + (num_pixels * (bytestream2_get_byte(&s->g) - 1)); CHECK_PIXEL_PTR(0); /* make sure pixel_ptr is positive */ - while ((rle_code = (signed char)s->buf[stream_ptr++]) != -1) { + while ((rle_code = (signed char)bytestream2_get_byte(&s->g)) != -1) { if (rle_code == 0) { /* there's another skip code in the stream */ - CHECK_STREAM_PTR(1); - pixel_ptr += (num_pixels * (s->buf[stream_ptr++] - 1)); + pixel_ptr += (num_pixels * (bytestream2_get_byte(&s->g) - 1)); CHECK_PIXEL_PTR(0); /* make sure pixel_ptr is positive */ } else if (rle_code < 0) { /* decode the run length code */ rle_code = -rle_code; /* get the next 4 bytes from the stream, treat them as palette * indexes, and output them rle_code times */ - CHECK_STREAM_PTR(4); for (i = num_pixels-1; i >= 0; i--) { - pi[num_pixels-1-i] = (s->buf[stream_ptr] >> ((i*bpp) & 0x07)) & ((1<<bpp)-1); - stream_ptr+= ((i & ((num_pixels>>2)-1)) == 0); + pi[num_pixels-1-i] = (bytestream2_peek_byte(&s->g) >> ((i*bpp) & 0x07)) & ((1<<bpp)-1); + bytestream2_skip(&s->g, ((i & ((num_pixels>>2)-1)) == 0)); } CHECK_PIXEL_PTR(rle_code * num_pixels); while (rle_code--) { @@ -165,17 +150,18 @@ static inline void qtrle_decode_2n4bpp(QtrleContext *s, int stream_ptr, } else { /* copy the same pixel directly to output 4 times */ rle_code *= 4; - CHECK_STREAM_PTR(rle_code); CHECK_PIXEL_PTR(rle_code*(num_pixels>>2)); while (rle_code--) { if(bpp == 4) { - rgb[pixel_ptr++] = ((s->buf[stream_ptr]) >> 4) & 0x0f; - rgb[pixel_ptr++] = (s->buf[stream_ptr++]) & 0x0f; + int x = bytestream2_get_byte(&s->g); + rgb[pixel_ptr++] = (x >> 4) & 0x0f; + rgb[pixel_ptr++] = x & 0x0f; } else { - rgb[pixel_ptr++] = ((s->buf[stream_ptr]) >> 6) & 0x03; - rgb[pixel_ptr++] = ((s->buf[stream_ptr]) >> 4) & 0x03; - rgb[pixel_ptr++] = ((s->buf[stream_ptr]) >> 2) & 0x03; - rgb[pixel_ptr++] = (s->buf[stream_ptr++]) & 0x03; + int x = bytestream2_get_byte(&s->g); + rgb[pixel_ptr++] = (x >> 6) & 0x03; + rgb[pixel_ptr++] = (x >> 4) & 0x03; + rgb[pixel_ptr++] = (x >> 2) & 0x03; + rgb[pixel_ptr++] = x & 0x03; } } } @@ -184,7 +170,7 @@ static inline void qtrle_decode_2n4bpp(QtrleContext *s, int stream_ptr, } } -static void qtrle_decode_8bpp(QtrleContext *s, int stream_ptr, int row_ptr, int lines_to_change) +static void qtrle_decode_8bpp(QtrleContext *s, int row_ptr, int lines_to_change) { int rle_code; int pixel_ptr; @@ -194,26 +180,23 @@ static void qtrle_decode_8bpp(QtrleContext *s, int stream_ptr, int row_ptr, int int pixel_limit = s->frame.linesize[0] * s->avctx->height; while (lines_to_change--) { - CHECK_STREAM_PTR(2); - pixel_ptr = row_ptr + (4 * (s->buf[stream_ptr++] - 1)); + pixel_ptr = row_ptr + (4 * (bytestream2_get_byte(&s->g) - 1)); CHECK_PIXEL_PTR(0); /* make sure pixel_ptr is positive */ - while ((rle_code = (signed char)s->buf[stream_ptr++]) != -1) { + while ((rle_code = (signed char)bytestream2_get_byte(&s->g)) != -1) { if (rle_code == 0) { /* there's another skip code in the stream */ - CHECK_STREAM_PTR(1); - pixel_ptr += (4 * (s->buf[stream_ptr++] - 1)); + pixel_ptr += (4 * (bytestream2_get_byte(&s->g) - 1)); CHECK_PIXEL_PTR(0); /* make sure pixel_ptr is positive */ } else if (rle_code < 0) { /* decode the run length code */ rle_code = -rle_code; /* get the next 4 bytes from the stream, treat them as palette * indexes, and output them rle_code times */ - CHECK_STREAM_PTR(4); - pi1 = s->buf[stream_ptr++]; - pi2 = s->buf[stream_ptr++]; - pi3 = s->buf[stream_ptr++]; - pi4 = s->buf[stream_ptr++]; + pi1 = bytestream2_get_byte(&s->g); + pi2 = bytestream2_get_byte(&s->g); + pi3 = bytestream2_get_byte(&s->g); + pi4 = bytestream2_get_byte(&s->g); CHECK_PIXEL_PTR(rle_code * 4); @@ -226,11 +209,10 @@ static void qtrle_decode_8bpp(QtrleContext *s, int stream_ptr, int row_ptr, int } else { /* copy the same pixel directly to output 4 times */ rle_code *= 4; - CHECK_STREAM_PTR(rle_code); CHECK_PIXEL_PTR(rle_code); while (rle_code--) { - rgb[pixel_ptr++] = s->buf[stream_ptr++]; + rgb[pixel_ptr++] = bytestream2_get_byte(&s->g); } } } @@ -238,7 +220,7 @@ static void qtrle_decode_8bpp(QtrleContext *s, int stream_ptr, int row_ptr, int } } -static void qtrle_decode_16bpp(QtrleContext *s, int stream_ptr, int row_ptr, int lines_to_change) +static void qtrle_decode_16bpp(QtrleContext *s, int row_ptr, int lines_to_change) { int rle_code; int pixel_ptr; @@ -248,22 +230,18 @@ static void qtrle_decode_16bpp(QtrleContext *s, int stream_ptr, int row_ptr, int int pixel_limit = s->frame.linesize[0] * s->avctx->height; while (lines_to_change--) { - CHECK_STREAM_PTR(2); - pixel_ptr = row_ptr + (s->buf[stream_ptr++] - 1) * 2; + pixel_ptr = row_ptr + (bytestream2_get_byte(&s->g) - 1) * 2; CHECK_PIXEL_PTR(0); /* make sure pixel_ptr is positive */ - while ((rle_code = (signed char)s->buf[stream_ptr++]) != -1) { + while ((rle_code = (signed char)bytestream2_get_byte(&s->g)) != -1) { if (rle_code == 0) { /* there's another skip code in the stream */ - CHECK_STREAM_PTR(1); - pixel_ptr += (s->buf[stream_ptr++] - 1) * 2; + pixel_ptr += (bytestream2_get_byte(&s->g) - 1) * 2; CHECK_PIXEL_PTR(0); /* make sure pixel_ptr is positive */ } else if (rle_code < 0) { /* decode the run length code */ rle_code = -rle_code; - CHECK_STREAM_PTR(2); - rgb16 = AV_RB16(&s->buf[stream_ptr]); - stream_ptr += 2; + rgb16 = bytestream2_get_be16(&s->g); CHECK_PIXEL_PTR(rle_code * 2); @@ -272,13 +250,11 @@ static void qtrle_decode_16bpp(QtrleContext *s, int stream_ptr, int row_ptr, int pixel_ptr += 2; } } else { - CHECK_STREAM_PTR(rle_code * 2); CHECK_PIXEL_PTR(rle_code * 2); /* copy pixels directly to output */ while (rle_code--) { - rgb16 = AV_RB16(&s->buf[stream_ptr]); - stream_ptr += 2; + rgb16 = bytestream2_get_be16(&s->g); *(unsigned short *)(&rgb[pixel_ptr]) = rgb16; pixel_ptr += 2; } @@ -288,7 +264,7 @@ static void qtrle_decode_16bpp(QtrleContext *s, int stream_ptr, int row_ptr, int } } -static void qtrle_decode_24bpp(QtrleContext *s, int stream_ptr, int row_ptr, int lines_to_change) +static void qtrle_decode_24bpp(QtrleContext *s, int row_ptr, int lines_to_change) { int rle_code; int pixel_ptr; @@ -298,23 +274,20 @@ static void qtrle_decode_24bpp(QtrleContext *s, int stream_ptr, int row_ptr, int int pixel_limit = s->frame.linesize[0] * s->avctx->height; while (lines_to_change--) { - CHECK_STREAM_PTR(2); - pixel_ptr = row_ptr + (s->buf[stream_ptr++] - 1) * 3; + pixel_ptr = row_ptr + (bytestream2_get_byte(&s->g) - 1) * 3; CHECK_PIXEL_PTR(0); /* make sure pixel_ptr is positive */ - while ((rle_code = (signed char)s->buf[stream_ptr++]) != -1) { + while ((rle_code = (signed char)bytestream2_get_byte(&s->g)) != -1) { if (rle_code == 0) { /* there's another skip code in the stream */ - CHECK_STREAM_PTR(1); - pixel_ptr += (s->buf[stream_ptr++] - 1) * 3; + pixel_ptr += (bytestream2_get_byte(&s->g) - 1) * 3; CHECK_PIXEL_PTR(0); /* make sure pixel_ptr is positive */ } else if (rle_code < 0) { /* decode the run length code */ rle_code = -rle_code; - CHECK_STREAM_PTR(3); - r = s->buf[stream_ptr++]; - g = s->buf[stream_ptr++]; - b = s->buf[stream_ptr++]; + r = bytestream2_get_byte(&s->g); + g = bytestream2_get_byte(&s->g); + b = bytestream2_get_byte(&s->g); CHECK_PIXEL_PTR(rle_code * 3); @@ -324,14 +297,13 @@ static void qtrle_decode_24bpp(QtrleContext *s, int stream_ptr, int row_ptr, int rgb[pixel_ptr++] = b; } } else { - CHECK_STREAM_PTR(rle_code * 3); CHECK_PIXEL_PTR(rle_code * 3); /* copy pixels directly to output */ while (rle_code--) { - rgb[pixel_ptr++] = s->buf[stream_ptr++]; - rgb[pixel_ptr++] = s->buf[stream_ptr++]; - rgb[pixel_ptr++] = s->buf[stream_ptr++]; + rgb[pixel_ptr++] = bytestream2_get_byte(&s->g); + rgb[pixel_ptr++] = bytestream2_get_byte(&s->g); + rgb[pixel_ptr++] = bytestream2_get_byte(&s->g); } } } @@ -339,7 +311,7 @@ static void qtrle_decode_24bpp(QtrleContext *s, int stream_ptr, int row_ptr, int } } -static void qtrle_decode_32bpp(QtrleContext *s, int stream_ptr, int row_ptr, int lines_to_change) +static void qtrle_decode_32bpp(QtrleContext *s, int row_ptr, int lines_to_change) { int rle_code; int pixel_ptr; @@ -349,22 +321,18 @@ static void qtrle_decode_32bpp(QtrleContext *s, int stream_ptr, int row_ptr, int int pixel_limit = s->frame.linesize[0] * s->avctx->height; while (lines_to_change--) { - CHECK_STREAM_PTR(2); - pixel_ptr = row_ptr + (s->buf[stream_ptr++] - 1) * 4; + pixel_ptr = row_ptr + (bytestream2_get_byte(&s->g) - 1) * 4; CHECK_PIXEL_PTR(0); /* make sure pixel_ptr is positive */ - while ((rle_code = (signed char)s->buf[stream_ptr++]) != -1) { + while ((rle_code = (signed char)bytestream2_get_byte(&s->g)) != -1) { if (rle_code == 0) { /* there's another skip code in the stream */ - CHECK_STREAM_PTR(1); - pixel_ptr += (s->buf[stream_ptr++] - 1) * 4; + pixel_ptr += (bytestream2_get_byte(&s->g) - 1) * 4; CHECK_PIXEL_PTR(0); /* make sure pixel_ptr is positive */ } else if (rle_code < 0) { /* decode the run length code */ rle_code = -rle_code; - CHECK_STREAM_PTR(4); - argb = AV_RB32(s->buf + stream_ptr); - stream_ptr += 4; + argb = bytestream2_get_be32(&s->g); CHECK_PIXEL_PTR(rle_code * 4); @@ -373,14 +341,12 @@ static void qtrle_decode_32bpp(QtrleContext *s, int stream_ptr, int row_ptr, int pixel_ptr += 4; } } else { - CHECK_STREAM_PTR(rle_code * 4); CHECK_PIXEL_PTR(rle_code * 4); /* copy pixels directly to output */ while (rle_code--) { - argb = AV_RB32(s->buf + stream_ptr); + argb = bytestream2_get_be32(&s->g); AV_WN32A(rgb + pixel_ptr, argb); - stream_ptr += 4; pixel_ptr += 4; } } @@ -437,16 +403,12 @@ static int qtrle_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { - const uint8_t *buf = avpkt->data; - int buf_size = avpkt->size; QtrleContext *s = avctx->priv_data; int header, start_line; - int stream_ptr, height, row_ptr; + int height, row_ptr; int has_palette = 0; - s->buf = buf; - s->size = buf_size; - + bytestream2_init(&s->g, avpkt->data, avpkt->size); s->frame.reference = 3; s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE | FF_BUFFER_HINTS_READABLE; @@ -456,66 +418,65 @@ static int qtrle_decode_frame(AVCodecContext *avctx, } /* check if this frame is even supposed to change */ - if (s->size < 8) + if (avpkt->size < 8) goto done; /* start after the chunk size */ - stream_ptr = 4; + bytestream2_seek(&s->g, 4, SEEK_SET); /* fetch the header */ - header = AV_RB16(&s->buf[stream_ptr]); - stream_ptr += 2; + header = bytestream2_get_be16(&s->g); /* if a header is present, fetch additional decoding parameters */ if (header & 0x0008) { - if(s->size < 14) + if (avpkt->size < 14) goto done; - start_line = AV_RB16(&s->buf[stream_ptr]); - stream_ptr += 4; - height = AV_RB16(&s->buf[stream_ptr]); - stream_ptr += 4; + start_line = bytestream2_get_be16(&s->g); + bytestream2_skip(&s->g, 2); + height = bytestream2_get_be16(&s->g); + bytestream2_skip(&s->g, 2); if (height > s->avctx->height - start_line) goto done; } else { start_line = 0; - height = s->avctx->height; + height = s->avctx->height; } row_ptr = s->frame.linesize[0] * start_line; switch (avctx->bits_per_coded_sample) { case 1: case 33: - qtrle_decode_1bpp(s, stream_ptr, row_ptr, height); + qtrle_decode_1bpp(s, row_ptr, height); break; case 2: case 34: - qtrle_decode_2n4bpp(s, stream_ptr, row_ptr, height, 2); + qtrle_decode_2n4bpp(s, row_ptr, height, 2); has_palette = 1; break; case 4: case 36: - qtrle_decode_2n4bpp(s, stream_ptr, row_ptr, height, 4); + qtrle_decode_2n4bpp(s, row_ptr, height, 4); has_palette = 1; break; case 8: case 40: - qtrle_decode_8bpp(s, stream_ptr, row_ptr, height); + qtrle_decode_8bpp(s, row_ptr, height); has_palette = 1; break; case 16: - qtrle_decode_16bpp(s, stream_ptr, row_ptr, height); + qtrle_decode_16bpp(s, row_ptr, height); break; case 24: - qtrle_decode_24bpp(s, stream_ptr, row_ptr, height); + qtrle_decode_24bpp(s, row_ptr, height); break; case 32: - qtrle_decode_32bpp(s, stream_ptr, row_ptr, height); + qtrle_decode_32bpp(s, row_ptr, height); break; default: @@ -541,7 +502,7 @@ done: *(AVFrame*)data = s->frame; /* always report that the buffer was completely consumed */ - return buf_size; + return avpkt->size; } static av_cold int qtrle_decode_end(AVCodecContext *avctx) diff --git a/libavcodec/resample.c b/libavcodec/resample.c index f1a2dbf6dc..a601e7aac7 100644 --- a/libavcodec/resample.c +++ b/libavcodec/resample.c @@ -323,11 +323,13 @@ int audio_resample(ReSampleContext *s, short *output, short *input, int nb_sampl lenout= 2*s->output_channels*nb_samples * s->ratio + 16; if (s->sample_fmt[1] != AV_SAMPLE_FMT_S16) { + int out_size = lenout * av_get_bytes_per_sample(s->sample_fmt[1]) * + s->output_channels; output_bak = output; - if (!s->buffer_size[1] || s->buffer_size[1] < 2*lenout) { + if (!s->buffer_size[1] || s->buffer_size[1] < out_size) { av_free(s->buffer[1]); - s->buffer_size[1] = 2*lenout; + s->buffer_size[1] = out_size; s->buffer[1] = av_malloc(s->buffer_size[1]); if (!s->buffer[1]) { av_log(s->resample_context, AV_LOG_ERROR, "Could not allocate buffer\n"); diff --git a/libavcodec/vqavideo.c b/libavcodec/vqavideo.c index ae99c6d9c1..ba70a11c59 100644 --- a/libavcodec/vqavideo.c +++ b/libavcodec/vqavideo.c @@ -162,7 +162,18 @@ static av_cold int vqa_decode_init(AVCodecContext *avctx) /* allocate codebooks */ s->codebook_size = MAX_CODEBOOK_SIZE; s->codebook = av_malloc(s->codebook_size); + if (!s->codebook) + goto fail; s->next_codebook_buffer = av_malloc(s->codebook_size); + if (!s->next_codebook_buffer) + goto fail; + + /* allocate decode buffer */ + s->decode_buffer_size = (s->width / s->vector_width) * + (s->height / s->vector_height) * 2; + s->decode_buffer = av_malloc(s->decode_buffer_size); + if (!s->decode_buffer) + goto fail; /* initialize the solid-color vectors */ if (s->vector_height == 4) { @@ -178,15 +189,15 @@ static av_cold int vqa_decode_init(AVCodecContext *avctx) } s->next_codebook_buffer_index = 0; - /* allocate decode buffer */ - s->decode_buffer_size = (s->width / s->vector_width) * - (s->height / s->vector_height) * 2; - s->decode_buffer = av_malloc(s->decode_buffer_size); - avcodec_get_frame_defaults(&s->frame); s->frame.data[0] = NULL; return 0; +fail: + av_freep(&s->codebook); + av_freep(&s->next_codebook_buffer); + av_freep(&s->decode_buffer); + return AVERROR(ENOMEM); } #define CHECK_COUNT() \ @@ -606,9 +617,9 @@ static av_cold int vqa_decode_end(AVCodecContext *avctx) { VqaContext *s = avctx->priv_data; - av_free(s->codebook); - av_free(s->next_codebook_buffer); - av_free(s->decode_buffer); + av_freep(&s->codebook); + av_freep(&s->next_codebook_buffer); + av_freep(&s->decode_buffer); if (s->frame.data[0]) avctx->release_buffer(avctx, &s->frame); diff --git a/libavcodec/wmaenc.c b/libavcodec/wmaenc.c index 0674ca0cc9..019176a6d4 100644 --- a/libavcodec/wmaenc.c +++ b/libavcodec/wmaenc.c @@ -307,10 +307,6 @@ static int encode_block(WMACodecContext *s, float (*src_coefs)[BLOCK_MAX_SIZE], if(1<<coef_nb_bits <= abs_level) return -1; - - //Workaround minor rounding differences for the regression tests, FIXME we should find and replace the problematic float by fixpoint for reg tests - if(abs_level == 0x71B && (s->avctx->flags & CODEC_FLAG_BITEXACT)) abs_level=0x71A; - put_bits(&s->pb, coef_nb_bits, abs_level); put_bits(&s->pb, s->frame_len_bits, run); } |