diff options
author | Mans Rullgard <mans@mansr.com> | 2012-04-12 13:55:49 +0100 |
---|---|---|
committer | Mans Rullgard <mans@mansr.com> | 2012-04-21 18:56:19 +0100 |
commit | 2bcbd98459915baefc15043d02f4a942ebcd33da (patch) | |
tree | 8dc1411f9bab944622c785efbd9c5dc4959b00ee | |
parent | 95510be8c35753da8f48062b28b65e7acdab965f (diff) | |
download | ffmpeg-2bcbd98459915baefc15043d02f4a942ebcd33da.tar.gz |
Remove lowres video decoding
This feature is complex, of questionable utility, and slows down
normal decoding.
Signed-off-by: Mans Rullgard <mans@mansr.com>
34 files changed, 56 insertions, 755 deletions
@@ -3527,12 +3527,6 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic) switch (dec->codec_type) { case AVMEDIA_TYPE_VIDEO: - if (dec->lowres) { - dec->flags |= CODEC_FLAG_EMU_EDGE; - dec->height >>= dec->lowres; - dec->width >>= dec->lowres; - } - ist->resample_height = dec->height; ist->resample_width = dec->width; ist->resample_pix_fmt = dec->pix_fmt; @@ -244,7 +244,6 @@ static int step = 0; static int workaround_bugs = 1; static int fast = 0; static int genpts = 0; -static int lowres = 0; static int idct = FF_IDCT_AUTO; static enum AVDiscard skip_frame = AVDISCARD_DEFAULT; static enum AVDiscard skip_idct = AVDISCARD_DEFAULT; @@ -1298,7 +1297,7 @@ static void alloc_picture(void *opaque) /* SDL allocates a buffer smaller than requested if the video * overlay hardware is unable to support the requested size. */ fprintf(stderr, "Error: the video system does not support an image\n" - "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n" + "size of %dx%d pixels. Try using -vf \"scale=w:h\"\n" "to reduce the image size.\n", vp->width, vp->height ); do_exit(); } @@ -2185,14 +2184,12 @@ static int stream_component_open(VideoState *is, int stream_index) avctx->debug_mv = debug_mv; avctx->debug = debug; avctx->workaround_bugs = workaround_bugs; - avctx->lowres = lowres; avctx->idct_algo = idct; avctx->skip_frame = skip_frame; avctx->skip_idct = skip_idct; avctx->skip_loop_filter = skip_loop_filter; avctx->error_concealment = error_concealment; - if (lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE; if (fast) avctx->flags2 |= CODEC_FLAG2_FAST; if (!av_dict_get(opts, "threads", NULL, 0)) @@ -2979,7 +2976,6 @@ static const OptionDef options[] = { { "fast", OPT_BOOL | OPT_EXPERT, { (void*)&fast }, "non spec compliant optimizations", "" }, { "genpts", OPT_BOOL | OPT_EXPERT, { (void*)&genpts }, "generate pts", "" }, { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""}, - { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&lowres }, "", "" }, { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_loop_filter }, "", "" }, { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_frame }, "", "" }, { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_idct }, "", "" }, diff --git a/libavcodec/alpha/dsputil_alpha.c b/libavcodec/alpha/dsputil_alpha.c index dc7cef9356..ce7cecb741 100644 --- a/libavcodec/alpha/dsputil_alpha.c +++ b/libavcodec/alpha/dsputil_alpha.c @@ -336,7 +336,7 @@ void ff_dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx) put_pixels_clamped_axp_p = c->put_pixels_clamped; add_pixels_clamped_axp_p = c->add_pixels_clamped; - if (!avctx->lowres && avctx->bits_per_raw_sample <= 8 && + if (avctx->bits_per_raw_sample <= 8 && (avctx->idct_algo == FF_IDCT_AUTO || avctx->idct_algo == FF_IDCT_SIMPLEALPHA)) { c->idct_put = ff_simple_idct_put_axp; diff --git a/libavcodec/arm/dsputil_init_arm.c b/libavcodec/arm/dsputil_init_arm.c index 21e1351e16..bc94b08d2a 100644 --- a/libavcodec/arm/dsputil_init_arm.c +++ b/libavcodec/arm/dsputil_init_arm.c @@ -80,7 +80,7 @@ void ff_dsputil_init_arm(DSPContext* c, AVCodecContext *avctx) ff_put_pixels_clamped = c->put_pixels_clamped; ff_add_pixels_clamped = c->add_pixels_clamped; - if (!avctx->lowres && avctx->bits_per_raw_sample <= 8) { + if (avctx->bits_per_raw_sample <= 8) { if(avctx->idct_algo == FF_IDCT_AUTO || avctx->idct_algo == FF_IDCT_ARM){ c->idct_put = j_rev_dct_arm_put; diff --git a/libavcodec/arm/dsputil_init_armv5te.c b/libavcodec/arm/dsputil_init_armv5te.c index d74ca424b9..f37ffc3e9c 100644 --- a/libavcodec/arm/dsputil_init_armv5te.c +++ b/libavcodec/arm/dsputil_init_armv5te.c @@ -29,7 +29,7 @@ void ff_prefetch_arm(void *mem, int stride, int h); av_cold void ff_dsputil_init_armv5te(DSPContext *c, AVCodecContext *avctx) { - if (!avctx->lowres && avctx->bits_per_raw_sample <= 8 && + if (avctx->bits_per_raw_sample <= 8 && (avctx->idct_algo == FF_IDCT_AUTO || avctx->idct_algo == FF_IDCT_SIMPLEARMV5TE)) { c->idct_put = ff_simple_idct_put_armv5te; diff --git a/libavcodec/arm/dsputil_init_armv6.c b/libavcodec/arm/dsputil_init_armv6.c index aa6de3f7a7..fbe601408c 100644 --- a/libavcodec/arm/dsputil_init_armv6.c +++ b/libavcodec/arm/dsputil_init_armv6.c @@ -74,7 +74,7 @@ av_cold void ff_dsputil_init_armv6(DSPContext *c, AVCodecContext *avctx) { const int high_bit_depth = avctx->bits_per_raw_sample > 8; - if (!avctx->lowres && avctx->bits_per_raw_sample <= 8 && + if (avctx->bits_per_raw_sample <= 8 && (avctx->idct_algo == FF_IDCT_AUTO || avctx->idct_algo == FF_IDCT_SIMPLEARMV6)) { c->idct_put = ff_simple_idct_put_armv6; diff --git a/libavcodec/arm/dsputil_init_neon.c b/libavcodec/arm/dsputil_init_neon.c index b2931fe525..d3ef85048f 100644 --- a/libavcodec/arm/dsputil_init_neon.c +++ b/libavcodec/arm/dsputil_init_neon.c @@ -182,7 +182,7 @@ void ff_dsputil_init_neon(DSPContext *c, AVCodecContext *avctx) { const int high_bit_depth = avctx->bits_per_raw_sample > 8; - if (!avctx->lowres && avctx->bits_per_raw_sample <= 8) { + if (avctx->bits_per_raw_sample <= 8) { if (avctx->idct_algo == FF_IDCT_AUTO || avctx->idct_algo == FF_IDCT_SIMPLENEON) { c->idct_put = ff_simple_idct_put_neon; diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h index 9e937d31b1..0fda1cb26d 100644 --- a/libavcodec/avcodec.h +++ b/libavcodec/avcodec.h @@ -1394,7 +1394,7 @@ typedef struct AVCodecContext { int width, height; /** - * Bitstream width / height, may be different from width/height if lowres enabled. + * Bitstream width / height, may be different from width/height. * - encoding: unused * - decoding: Set by user before init if known. Codec should override / dynamically change if needed. */ @@ -2586,7 +2586,7 @@ typedef struct AVCodecContext { * - encoding: unused * - decoding: Set by user. */ - int lowres; + attribute_deprecated int lowres; /** * the picture in the bitstream @@ -2847,7 +2847,7 @@ typedef struct AVCodec { const int *supported_samplerates; ///< array of supported audio samplerates, or NULL if unknown, array is terminated by 0 const enum AVSampleFormat *sample_fmts; ///< array of supported sample formats, or NULL if unknown, array is terminated by -1 const uint64_t *channel_layouts; ///< array of support channel layouts, or NULL if unknown. array is terminated by 0 - uint8_t max_lowres; ///< maximum value for lowres supported by the decoder + attribute_deprecated uint8_t max_lowres; ///< maximum value for lowres supported by the decoder const AVClass *priv_class; ///< AVClass for the private context const AVProfile *profiles; ///< array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN} diff --git a/libavcodec/dsputil.c b/libavcodec/dsputil.c index bcd6c24843..62485b4f75 100644 --- a/libavcodec/dsputil.c +++ b/libavcodec/dsputil.c @@ -2700,37 +2700,6 @@ static void ff_jref_idct_add(uint8_t *dest, int line_size, DCTELEM *block) ff_add_pixels_clamped_c(block, dest, line_size); } -static void ff_jref_idct4_put(uint8_t *dest, int line_size, DCTELEM *block) -{ - ff_j_rev_dct4 (block); - put_pixels_clamped4_c(block, dest, line_size); -} -static void ff_jref_idct4_add(uint8_t *dest, int line_size, DCTELEM *block) -{ - ff_j_rev_dct4 (block); - add_pixels_clamped4_c(block, dest, line_size); -} - -static void ff_jref_idct2_put(uint8_t *dest, int line_size, DCTELEM *block) -{ - ff_j_rev_dct2 (block); - put_pixels_clamped2_c(block, dest, line_size); -} -static void ff_jref_idct2_add(uint8_t *dest, int line_size, DCTELEM *block) -{ - ff_j_rev_dct2 (block); - add_pixels_clamped2_c(block, dest, line_size); -} - -static void ff_jref_idct1_put(uint8_t *dest, int line_size, DCTELEM *block) -{ - dest[0] = av_clip_uint8((block[0] + 4)>>3); -} -static void ff_jref_idct1_add(uint8_t *dest, int line_size, DCTELEM *block) -{ - dest[0] = av_clip_uint8(dest[0] + ((block[0] + 4)>>3)); -} - static void just_return(void *mem av_unused, int stride av_unused, int h av_unused) { return; } /* init static data */ @@ -2797,28 +2766,12 @@ av_cold void ff_dsputil_init(DSPContext* c, AVCodecContext *avctx) } #endif //CONFIG_ENCODERS - if(avctx->lowres==1){ - c->idct_put= ff_jref_idct4_put; - c->idct_add= ff_jref_idct4_add; - c->idct = ff_j_rev_dct4; - c->idct_permutation_type= FF_NO_IDCT_PERM; - }else if(avctx->lowres==2){ - c->idct_put= ff_jref_idct2_put; - c->idct_add= ff_jref_idct2_add; - c->idct = ff_j_rev_dct2; - c->idct_permutation_type= FF_NO_IDCT_PERM; - }else if(avctx->lowres==3){ - c->idct_put= ff_jref_idct1_put; - c->idct_add= ff_jref_idct1_add; - c->idct = ff_j_rev_dct1; - c->idct_permutation_type= FF_NO_IDCT_PERM; - }else{ - if (avctx->bits_per_raw_sample == 10) { - c->idct_put = ff_simple_idct_put_10; - c->idct_add = ff_simple_idct_add_10; - c->idct = ff_simple_idct_10; - c->idct_permutation_type = FF_NO_IDCT_PERM; - } else { + if (avctx->bits_per_raw_sample == 10) { + c->idct_put = ff_simple_idct_put_10; + c->idct_add = ff_simple_idct_add_10; + c->idct = ff_simple_idct_10; + c->idct_permutation_type = FF_NO_IDCT_PERM; + } else { if(avctx->idct_algo==FF_IDCT_INT){ c->idct_put= ff_jref_idct_put; c->idct_add= ff_jref_idct_add; @@ -2849,7 +2802,6 @@ av_cold void ff_dsputil_init(DSPContext* c, AVCodecContext *avctx) c->idct = ff_simple_idct_8; c->idct_permutation_type= FF_NO_IDCT_PERM; } - } } c->diff_pixels = diff_pixels_c; diff --git a/libavcodec/dsputil.h b/libavcodec/dsputil.h index b7c0bed9ac..3906119cfd 100644 --- a/libavcodec/dsputil.h +++ b/libavcodec/dsputil.h @@ -46,9 +46,6 @@ void ff_fdct248_islow_8(DCTELEM *data); void ff_fdct248_islow_10(DCTELEM *data); void ff_j_rev_dct (DCTELEM *data); -void ff_j_rev_dct4 (DCTELEM *data); -void ff_j_rev_dct2 (DCTELEM *data); -void ff_j_rev_dct1 (DCTELEM *data); void ff_wmv2_idct_c(DCTELEM *data); void ff_fdct_mmx(DCTELEM *block); diff --git a/libavcodec/dv.c b/libavcodec/dv.c index 9c0893a47a..79e73dd125 100644 --- a/libavcodec/dv.c +++ b/libavcodec/dv.c @@ -311,13 +311,7 @@ av_cold int ff_dvvideo_init(AVCodecContext *avctx) /* 248DCT setup */ s->fdct[1] = dsp.fdct248; s->idct_put[1] = ff_simple_idct248_put; // FIXME: need to add it to DSP - if (avctx->lowres){ - for (i = 0; i < 64; i++){ - int j = ff_zigzag248_direct[i]; - s->dv_zigzag[1][i] = dsp.idct_permutation[(j & 7) + (j & 8) * 4 + (j & 48) / 2]; - } - }else - memcpy(s->dv_zigzag[1], ff_zigzag248_direct, 64); + memcpy(s->dv_zigzag[1], ff_zigzag248_direct, 64); avctx->coded_frame = &s->picture; s->avctx = avctx; diff --git a/libavcodec/dvdec.c b/libavcodec/dvdec.c index 6b77a40568..ffa9c6d818 100644 --- a/libavcodec/dvdec.c +++ b/libavcodec/dvdec.c @@ -144,7 +144,7 @@ static int dv_decode_video_segment(AVCodecContext *avctx, void *arg) LOCAL_ALIGNED_16(DCTELEM, sblock, [5*DV_MAX_BPM], [64]); LOCAL_ALIGNED_16(uint8_t, mb_bit_buffer, [ 80 + FF_INPUT_BUFFER_PADDING_SIZE]); /* allow some slack */ LOCAL_ALIGNED_16(uint8_t, vs_bit_buffer, [5*80 + FF_INPUT_BUFFER_PADDING_SIZE]); /* allow some slack */ - const int log2_blocksize = 3-s->avctx->lowres; + const int log2_blocksize = 3; int is_field_mode[5]; assert((((int)mb_bit_buffer) & 7) == 0); @@ -381,6 +381,5 @@ AVCodec ff_dvvideo_decoder = { .close = dvvideo_close, .decode = dvvideo_decode_frame, .capabilities = CODEC_CAP_DR1 | CODEC_CAP_SLICE_THREADS, - .max_lowres = 3, .long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"), }; diff --git a/libavcodec/error_resilience.c b/libavcodec/error_resilience.c index d14adb4437..fa1e008584 100644 --- a/libavcodec/error_resilience.c +++ b/libavcodec/error_resilience.c @@ -891,7 +891,7 @@ void ff_er_frame_end(MpegEncContext *s) /* We do not support ER of field pictures yet, * though it should not crash if enabled. */ - if (!s->err_recognition || s->error_count == 0 || s->avctx->lowres || + if (!s->err_recognition || s->error_count == 0 || s->avctx->hwaccel || s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU || s->picture_structure != PICT_FRAME || diff --git a/libavcodec/flvdec.c b/libavcodec/flvdec.c index b4b171f500..fa074c4083 100644 --- a/libavcodec/flvdec.c +++ b/libavcodec/flvdec.c @@ -127,7 +127,6 @@ AVCodec ff_flv_decoder = { .close = ff_h263_decode_end, .decode = ff_h263_decode_frame, .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, - .max_lowres = 3, .long_name = NULL_IF_CONFIG_SMALL("Flash Video (FLV) / Sorenson Spark / Sorenson H.263"), .pix_fmts = ff_pixfmt_list_420, }; diff --git a/libavcodec/h261dec.c b/libavcodec/h261dec.c index 078682dc5f..23dafc6a6e 100644 --- a/libavcodec/h261dec.c +++ b/libavcodec/h261dec.c @@ -651,6 +651,5 @@ AVCodec ff_h261_decoder = { .close = h261_decode_end, .decode = h261_decode_frame, .capabilities = CODEC_CAP_DR1, - .max_lowres = 3, .long_name = NULL_IF_CONFIG_SMALL("H.261"), }; diff --git a/libavcodec/h263dec.c b/libavcodec/h263dec.c index be4083f64f..cfd9a77edb 100644 --- a/libavcodec/h263dec.c +++ b/libavcodec/h263dec.c @@ -149,7 +149,7 @@ static int get_consumed_bytes(MpegEncContext *s, int buf_size){ static int decode_slice(MpegEncContext *s){ const int part_mask= s->partitioned_frame ? (ER_AC_END|ER_AC_ERROR) : 0x7F; - const int mb_size= 16>>s->avctx->lowres; + const int mb_size = 16; s->last_resync_gb= s->gb; s->first_slice_line= 1; @@ -745,7 +745,6 @@ AVCodec ff_h263_decoder = { .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY, .flush = ff_mpeg_flush, - .max_lowres = 3, .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996, H.263+ / H.263-1998 / H.263 version 2"), .pix_fmts = ff_hwaccel_pixfmt_list_420, }; diff --git a/libavcodec/intrax8.c b/libavcodec/intrax8.c index 3ec6e69435..17426c510e 100644 --- a/libavcodec/intrax8.c +++ b/libavcodec/intrax8.c @@ -716,7 +716,6 @@ av_cold void ff_intrax8_common_end(IntraX8Context * w) * The parent codec must call MPV_frame_start(), ff_er_frame_start() before calling this function. * The parent codec must call ff_er_frame_end(), MPV_frame_end() after calling this function. * This function does not use MPV_decode_mb(). - * lowres decoding is theoretically impossible. * @param w pointer to IntraX8Context * @param dquant doubled quantizer, it would be odd in case of VC-1 halfpq==1. * @param quant_offset offset away from zero diff --git a/libavcodec/jrevdct.c b/libavcodec/jrevdct.c index 395eb8c638..e33558f825 100644 --- a/libavcodec/jrevdct.c +++ b/libavcodec/jrevdct.c @@ -940,216 +940,3 @@ void ff_j_rev_dct(DCTBLOCK data) dataptr++; /* advance pointer to next column */ } } - -#undef DCTSIZE -#define DCTSIZE 4 -#define DCTSTRIDE 8 - -void ff_j_rev_dct4(DCTBLOCK data) -{ - int32_t tmp0, tmp1, tmp2, tmp3; - int32_t tmp10, tmp11, tmp12, tmp13; - int32_t z1; - int32_t d0, d2, d4, d6; - register DCTELEM *dataptr; - int rowctr; - - /* Pass 1: process rows. */ - /* Note results are scaled up by sqrt(8) compared to a true IDCT; */ - /* furthermore, we scale the results by 2**PASS1_BITS. */ - - data[0] += 4; - - dataptr = data; - - for (rowctr = DCTSIZE-1; rowctr >= 0; rowctr--) { - /* Due to quantization, we will usually find that many of the input - * coefficients are zero, especially the AC terms. We can exploit this - * by short-circuiting the IDCT calculation for any row in which all - * the AC terms are zero. In that case each output is equal to the - * DC coefficient (with scale factor as needed). - * With typical images and quantization tables, half or more of the - * row DCT calculations can be simplified this way. - */ - - register int *idataptr = (int*)dataptr; - - d0 = dataptr[0]; - d2 = dataptr[1]; - d4 = dataptr[2]; - d6 = dataptr[3]; - - if ((d2 | d4 | d6) == 0) { - /* AC terms all zero */ - if (d0) { - /* Compute a 32 bit value to assign. */ - DCTELEM dcval = (DCTELEM) (d0 << PASS1_BITS); - register int v = (dcval & 0xffff) | ((dcval << 16) & 0xffff0000); - - idataptr[0] = v; - idataptr[1] = v; - } - - dataptr += DCTSTRIDE; /* advance pointer to next row */ - continue; - } - - /* Even part: reverse the even part of the forward DCT. */ - /* The rotator is sqrt(2)*c(-6). */ - if (d6) { - if (d2) { - /* d0 != 0, d2 != 0, d4 != 0, d6 != 0 */ - z1 = MULTIPLY(d2 + d6, FIX_0_541196100); - tmp2 = z1 + MULTIPLY(-d6, FIX_1_847759065); - tmp3 = z1 + MULTIPLY(d2, FIX_0_765366865); - - tmp0 = (d0 + d4) << CONST_BITS; - tmp1 = (d0 - d4) << CONST_BITS; - - tmp10 = tmp0 + tmp3; - tmp13 = tmp0 - tmp3; - tmp11 = tmp1 + tmp2; - tmp12 = tmp1 - tmp2; - } else { - /* d0 != 0, d2 == 0, d4 != 0, d6 != 0 */ - tmp2 = MULTIPLY(-d6, FIX_1_306562965); - tmp3 = MULTIPLY(d6, FIX_0_541196100); - - tmp0 = (d0 + d4) << CONST_BITS; - tmp1 = (d0 - d4) << CONST_BITS; - - tmp10 = tmp0 + tmp3; - tmp13 = tmp0 - tmp3; - tmp11 = tmp1 + tmp2; - tmp12 = tmp1 - tmp2; - } - } else { - if (d2) { - /* d0 != 0, d2 != 0, d4 != 0, d6 == 0 */ - tmp2 = MULTIPLY(d2, FIX_0_541196100); - tmp3 = MULTIPLY(d2, FIX_1_306562965); - - tmp0 = (d0 + d4) << CONST_BITS; - tmp1 = (d0 - d4) << CONST_BITS; - - tmp10 = tmp0 + tmp3; - tmp13 = tmp0 - tmp3; - tmp11 = tmp1 + tmp2; - tmp12 = tmp1 - tmp2; - } else { - /* d0 != 0, d2 == 0, d4 != 0, d6 == 0 */ - tmp10 = tmp13 = (d0 + d4) << CONST_BITS; - tmp11 = tmp12 = (d0 - d4) << CONST_BITS; - } - } - - /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */ - - dataptr[0] = (DCTELEM) DESCALE(tmp10, CONST_BITS-PASS1_BITS); - dataptr[1] = (DCTELEM) DESCALE(tmp11, CONST_BITS-PASS1_BITS); - dataptr[2] = (DCTELEM) DESCALE(tmp12, CONST_BITS-PASS1_BITS); - dataptr[3] = (DCTELEM) DESCALE(tmp13, CONST_BITS-PASS1_BITS); - - dataptr += DCTSTRIDE; /* advance pointer to next row */ - } - - /* Pass 2: process columns. */ - /* Note that we must descale the results by a factor of 8 == 2**3, */ - /* and also undo the PASS1_BITS scaling. */ - - dataptr = data; - for (rowctr = DCTSIZE-1; rowctr >= 0; rowctr--) { - /* Columns of zeroes can be exploited in the same way as we did with rows. - * However, the row calculation has created many nonzero AC terms, so the - * simplification applies less often (typically 5% to 10% of the time). - * On machines with very fast multiplication, it's possible that the - * test takes more time than it's worth. In that case this section - * may be commented out. - */ - - d0 = dataptr[DCTSTRIDE*0]; - d2 = dataptr[DCTSTRIDE*1]; - d4 = dataptr[DCTSTRIDE*2]; - d6 = dataptr[DCTSTRIDE*3]; - - /* Even part: reverse the even part of the forward DCT. */ - /* The rotator is sqrt(2)*c(-6). */ - if (d6) { - if (d2) { - /* d0 != 0, d2 != 0, d4 != 0, d6 != 0 */ - z1 = MULTIPLY(d2 + d6, FIX_0_541196100); - tmp2 = z1 + MULTIPLY(-d6, FIX_1_847759065); - tmp3 = z1 + MULTIPLY(d2, FIX_0_765366865); - - tmp0 = (d0 + d4) << CONST_BITS; - tmp1 = (d0 - d4) << CONST_BITS; - - tmp10 = tmp0 + tmp3; - tmp13 = tmp0 - tmp3; - tmp11 = tmp1 + tmp2; - tmp12 = tmp1 - tmp2; - } else { - /* d0 != 0, d2 == 0, d4 != 0, d6 != 0 */ - tmp2 = MULTIPLY(-d6, FIX_1_306562965); - tmp3 = MULTIPLY(d6, FIX_0_541196100); - - tmp0 = (d0 + d4) << CONST_BITS; - tmp1 = (d0 - d4) << CONST_BITS; - - tmp10 = tmp0 + tmp3; - tmp13 = tmp0 - tmp3; - tmp11 = tmp1 + tmp2; - tmp12 = tmp1 - tmp2; - } - } else { - if (d2) { - /* d0 != 0, d2 != 0, d4 != 0, d6 == 0 */ - tmp2 = MULTIPLY(d2, FIX_0_541196100); - tmp3 = MULTIPLY(d2, FIX_1_306562965); - - tmp0 = (d0 + d4) << CONST_BITS; - tmp1 = (d0 - d4) << CONST_BITS; - - tmp10 = tmp0 + tmp3; - tmp13 = tmp0 - tmp3; - tmp11 = tmp1 + tmp2; - tmp12 = tmp1 - tmp2; - } else { - /* d0 != 0, d2 == 0, d4 != 0, d6 == 0 */ - tmp10 = tmp13 = (d0 + d4) << CONST_BITS; - tmp11 = tmp12 = (d0 - d4) << CONST_BITS; - } - } - - /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */ - - dataptr[DCTSTRIDE*0] = tmp10 >> (CONST_BITS+PASS1_BITS+3); - dataptr[DCTSTRIDE*1] = tmp11 >> (CONST_BITS+PASS1_BITS+3); - dataptr[DCTSTRIDE*2] = tmp12 >> (CONST_BITS+PASS1_BITS+3); - dataptr[DCTSTRIDE*3] = tmp13 >> (CONST_BITS+PASS1_BITS+3); - - dataptr++; /* advance pointer to next column */ - } -} - -void ff_j_rev_dct2(DCTBLOCK data){ - int d00, d01, d10, d11; - - data[0] += 4; - d00 = data[0+0*DCTSTRIDE] + data[1+0*DCTSTRIDE]; - d01 = data[0+0*DCTSTRIDE] - data[1+0*DCTSTRIDE]; - d10 = data[0+1*DCTSTRIDE] + data[1+1*DCTSTRIDE]; - d11 = data[0+1*DCTSTRIDE] - data[1+1*DCTSTRIDE]; - - data[0+0*DCTSTRIDE]= (d00 + d10)>>3; - data[1+0*DCTSTRIDE]= (d01 + d11)>>3; - data[0+1*DCTSTRIDE]= (d00 - d10)>>3; - data[1+1*DCTSTRIDE]= (d01 - d11)>>3; -} - -void ff_j_rev_dct1(DCTBLOCK data){ - data[0] = (data[0] + 4)>>3; -} - -#undef FIX -#undef CONST_BITS diff --git a/libavcodec/libopenjpeg.c b/libavcodec/libopenjpeg.c index 3ea6203ad2..799ccd7745 100644 --- a/libavcodec/libopenjpeg.c +++ b/libavcodec/libopenjpeg.c @@ -158,7 +158,6 @@ static int libopenjpeg_decode_frame(AVCodecContext *avctx, ff_thread_finish_setup(avctx); ctx->dec_params.cp_limit_decoding = NO_LIMITATION; - ctx->dec_params.cp_reduce = avctx->lowres; // Tie decoder with decoding parameters opj_setup_decoder(dec, &ctx->dec_params); stream = opj_cio_open((opj_common_ptr)dec, buf, buf_size); @@ -219,7 +218,6 @@ AVCodec ff_libopenjpeg_decoder = { .close = libopenjpeg_decode_close, .decode = libopenjpeg_decode_frame, .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS, - .max_lowres = 5, .long_name = NULL_IF_CONFIG_SMALL("OpenJPEG based JPEG 2000 decoder"), .init_thread_copy = ONLY_IF_THREADS_ENABLED(libopenjpeg_decode_init_thread_copy), }; diff --git a/libavcodec/mjpegbdec.c b/libavcodec/mjpegbdec.c index 3a2ae13b20..7ba5abf9bd 100644 --- a/libavcodec/mjpegbdec.c +++ b/libavcodec/mjpegbdec.c @@ -161,6 +161,5 @@ AVCodec ff_mjpegb_decoder = { .close = ff_mjpeg_decode_end, .decode = mjpegb_decode_frame, .capabilities = CODEC_CAP_DR1, - .max_lowres = 3, .long_name = NULL_IF_CONFIG_SMALL("Apple MJPEG-B"), }; diff --git a/libavcodec/mjpegdec.c b/libavcodec/mjpegdec.c index 5eab559f84..8552ec1a6f 100644 --- a/libavcodec/mjpegdec.c +++ b/libavcodec/mjpegdec.c @@ -796,21 +796,6 @@ static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, return 0; } -static av_always_inline void mjpeg_copy_block(uint8_t *dst, const uint8_t *src, - int linesize, int lowres) -{ - switch (lowres) { - case 0: copy_block8(dst, src, linesize, linesize, 8); - break; - case 1: copy_block4(dst, src, linesize, linesize, 4); - break; - case 2: copy_block2(dst, src, linesize, linesize, 2); - break; - case 3: *dst = *src; - break; - } -} - static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, const AVFrame *reference) @@ -869,16 +854,16 @@ static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, x = 0; y = 0; for (j = 0; j < n; j++) { - block_offset = (((linesize[c] * (v * mb_y + y) * 8) + - (h * mb_x + x) * 8) >> s->avctx->lowres); + block_offset = ((linesize[c] * (v * mb_y + y) * 8) + + (h * mb_x + x) * 8); if (s->interlaced && s->bottom_field) block_offset += linesize[c] >> 1; ptr = data[c] + block_offset; if (!s->progressive) { if (copy_mb) - mjpeg_copy_block(ptr, reference_data[c] + block_offset, - linesize[c], s->avctx->lowres); + copy_block8(ptr, reference_data[c] + block_offset, + linesize[c], linesize[c], 8); else { s->dsp.clear_block(s->block); if (decode_block(s, s->block, i, @@ -968,7 +953,7 @@ static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, } for (mb_y = 0; mb_y < s->mb_height; mb_y++) { - int block_offset = (mb_y * linesize * 8 >> s->avctx->lowres); + int block_offset = mb_y * linesize * 8; uint8_t *ptr = data + block_offset; int block_idx = mb_y * s->block_stride[c]; DCTELEM (*block)[64] = &s->blocks[c][block_idx]; @@ -993,11 +978,11 @@ static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, if (last_scan) { if (copy_mb) { - mjpeg_copy_block(ptr, reference_data + block_offset, - linesize, s->avctx->lowres); + copy_block8(ptr, reference_data + block_offset, + linesize, linesize, 8); } else { s->dsp.idct_put(ptr, linesize, *block); - ptr += 8 >> s->avctx->lowres; + ptr += 8; } } } @@ -1667,7 +1652,6 @@ AVCodec ff_mjpeg_decoder = { .close = ff_mjpeg_decode_end, .decode = ff_mjpeg_decode_frame, .capabilities = CODEC_CAP_DR1, - .max_lowres = 3, .long_name = NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"), .priv_class = &mjpegdec_class, }; @@ -1681,6 +1665,5 @@ AVCodec ff_thp_decoder = { .close = ff_mjpeg_decode_end, .decode = ff_mjpeg_decode_frame, .capabilities = CODEC_CAP_DR1, - .max_lowres = 3, .long_name = NULL_IF_CONFIG_SMALL("Nintendo Gamecube THP video"), }; diff --git a/libavcodec/mpeg12.c b/libavcodec/mpeg12.c index 8cdc704c73..40ba97b0d0 100644 --- a/libavcodec/mpeg12.c +++ b/libavcodec/mpeg12.c @@ -1631,7 +1631,6 @@ static int mpeg_decode_slice(MpegEncContext *s, int mb_y, const uint8_t **buf, int buf_size) { AVCodecContext *avctx = s->avctx; - const int lowres = s->avctx->lowres; const int field_pic = s->picture_structure != PICT_FRAME; s->resync_mb_x = @@ -1752,14 +1751,14 @@ static int mpeg_decode_slice(MpegEncContext *s, int mb_y, } } - s->dest[0] += 16 >> lowres; - s->dest[1] +=(16 >> lowres) >> s->chroma_x_shift; - s->dest[2] +=(16 >> lowres) >> s->chroma_x_shift; + s->dest[0] += 16; + s->dest[1] += 16 >> s->chroma_x_shift; + s->dest[2] += 16 >> s->chroma_x_shift; ff_MPV_decode_mb(s, s->block); if (++s->mb_x >= s->mb_width) { - const int mb_size = 16 >> s->avctx->lowres; + const int mb_size = 16; ff_draw_horiz_band(s, mb_size*(s->mb_y >> field_pic), mb_size); ff_MPV_report_decode_progress(s); @@ -2508,7 +2507,6 @@ AVCodec ff_mpeg1video_decoder = { CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS, .flush = flush, - .max_lowres = 3, .long_name = NULL_IF_CONFIG_SMALL("MPEG-1 video"), .update_thread_context = ONLY_IF_THREADS_ENABLED(mpeg_decode_update_thread_context) }; @@ -2525,7 +2523,6 @@ AVCodec ff_mpeg2video_decoder = { CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS, .flush = flush, - .max_lowres = 3, .long_name = NULL_IF_CONFIG_SMALL("MPEG-2 video"), .profiles = NULL_IF_CONFIG_SMALL(mpeg2_video_profiles), }; diff --git a/libavcodec/mpeg4videodec.c b/libavcodec/mpeg4videodec.c index f45d86fd35..3fcc6d0489 100644 --- a/libavcodec/mpeg4videodec.c +++ b/libavcodec/mpeg4videodec.c @@ -2266,7 +2266,6 @@ AVCodec ff_mpeg4_decoder = { CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_FRAME_THREADS, .flush = ff_mpeg_flush, - .max_lowres = 3, .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2"), .pix_fmts = ff_hwaccel_pixfmt_list_420, .profiles = NULL_IF_CONFIG_SMALL(mpeg4_video_profiles), diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c index 280207530b..d1c42331c2 100644 --- a/libavcodec/mpegvideo.c +++ b/libavcodec/mpegvideo.c @@ -1815,364 +1815,6 @@ void ff_print_debug_info(MpegEncContext *s, AVFrame *pict) } } -static inline int hpel_motion_lowres(MpegEncContext *s, - uint8_t *dest, uint8_t *src, - int field_based, int field_select, - int src_x, int src_y, - int width, int height, int stride, - int h_edge_pos, int v_edge_pos, - int w, int h, h264_chroma_mc_func *pix_op, - int motion_x, int motion_y) -{ - const int lowres = s->avctx->lowres; - const int op_index = FFMIN(lowres, 2); - const int s_mask = (2 << lowres) - 1; - int emu = 0; - int sx, sy; - - if (s->quarter_sample) { - motion_x /= 2; - motion_y /= 2; - } - - sx = motion_x & s_mask; - sy = motion_y & s_mask; - src_x += motion_x >> lowres + 1; - src_y += motion_y >> lowres + 1; - - src += src_y * stride + src_x; - - if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) || - (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) { - s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1, - (h + 1) << field_based, src_x, - src_y << field_based, - h_edge_pos, - v_edge_pos); - src = s->edge_emu_buffer; - emu = 1; - } - - sx = (sx << 2) >> lowres; - sy = (sy << 2) >> lowres; - if (field_select) - src += s->linesize; - pix_op[op_index](dest, src, stride, h, sx, sy); - return emu; -} - -/* apply one mpeg motion vector to the three components */ -static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, - uint8_t *dest_y, - uint8_t *dest_cb, - uint8_t *dest_cr, - int field_based, - int bottom_field, - int field_select, - uint8_t **ref_picture, - h264_chroma_mc_func *pix_op, - int motion_x, int motion_y, - int h, int mb_y) -{ - uint8_t *ptr_y, *ptr_cb, *ptr_cr; - int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, - uvsx, uvsy; - const int lowres = s->avctx->lowres; - const int op_index = FFMIN(lowres, 2); - const int block_s = 8>>lowres; - const int s_mask = (2 << lowres) - 1; - const int h_edge_pos = s->h_edge_pos >> lowres; - const int v_edge_pos = s->v_edge_pos >> lowres; - linesize = s->current_picture.f.linesize[0] << field_based; - uvlinesize = s->current_picture.f.linesize[1] << field_based; - - // FIXME obviously not perfect but qpel will not work in lowres anyway - if (s->quarter_sample) { - motion_x /= 2; - motion_y /= 2; - } - - if (field_based) { - motion_y += (bottom_field - field_select) * (1 << lowres - 1); - } - - sx = motion_x & s_mask; - sy = motion_y & s_mask; - src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1); - src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1); - - if (s->out_format == FMT_H263) { - uvsx = ((motion_x >> 1) & s_mask) | (sx & 1); - uvsy = ((motion_y >> 1) & s_mask) | (sy & 1); - uvsrc_x = src_x >> 1; - uvsrc_y = src_y >> 1; - } else if (s->out_format == FMT_H261) { - // even chroma mv's are full pel in H261 - mx = motion_x / 4; - my = motion_y / 4; - uvsx = (2 * mx) & s_mask; - uvsy = (2 * my) & s_mask; - uvsrc_x = s->mb_x * block_s + (mx >> lowres); - uvsrc_y = mb_y * block_s + (my >> lowres); - } else { - mx = motion_x / 2; - my = motion_y / 2; - uvsx = mx & s_mask; - uvsy = my & s_mask; - uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1); - uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1); - } - - ptr_y = ref_picture[0] + src_y * linesize + src_x; - ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x; - ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x; - - if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || - (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) { - s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, - s->linesize, 17, 17 + field_based, - src_x, src_y << field_based, h_edge_pos, - v_edge_pos); - ptr_y = s->edge_emu_buffer; - if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) { - uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize; - s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, - 9 + field_based, - uvsrc_x, uvsrc_y << field_based, - h_edge_pos >> 1, v_edge_pos >> 1); - s->dsp.emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize, 9, - 9 + field_based, - uvsrc_x, uvsrc_y << field_based, - h_edge_pos >> 1, v_edge_pos >> 1); - ptr_cb = uvbuf; - ptr_cr = uvbuf + 16; - } - } - - // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data - if (bottom_field) { - dest_y += s->linesize; - dest_cb += s->uvlinesize; - dest_cr += s->uvlinesize; - } - - if (field_select) { - ptr_y += s->linesize; - ptr_cb += s->uvlinesize; - ptr_cr += s->uvlinesize; - } - - sx = (sx << 2) >> lowres; - sy = (sy << 2) >> lowres; - pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy); - - if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) { - uvsx = (uvsx << 2) >> lowres; - uvsy = (uvsy << 2) >> lowres; - pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, - uvsx, uvsy); - pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, - uvsx, uvsy); - } - // FIXME h261 lowres loop filter -} - -static inline void chroma_4mv_motion_lowres(MpegEncContext *s, - uint8_t *dest_cb, uint8_t *dest_cr, - uint8_t **ref_picture, - h264_chroma_mc_func * pix_op, - int mx, int my) -{ - const int lowres = s->avctx->lowres; - const int op_index = FFMIN(lowres, 2); - const int block_s = 8 >> lowres; - const int s_mask = (2 << lowres) - 1; - const int h_edge_pos = s->h_edge_pos >> lowres + 1; - const int v_edge_pos = s->v_edge_pos >> lowres + 1; - int emu = 0, src_x, src_y, offset, sx, sy; - uint8_t *ptr; - - if (s->quarter_sample) { - mx /= 2; - my /= 2; - } - - /* In case of 8X8, we construct a single chroma motion vector - with a special rounding */ - mx = ff_h263_round_chroma(mx); - my = ff_h263_round_chroma(my); - - sx = mx & s_mask; - sy = my & s_mask; - src_x = s->mb_x * block_s + (mx >> lowres + 1); - src_y = s->mb_y * block_s + (my >> lowres + 1); - - offset = src_y * s->uvlinesize + src_x; - ptr = ref_picture[1] + offset; - if (s->flags & CODEC_FLAG_EMU_EDGE) { - if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) || - (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) { - s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, - 9, 9, src_x, src_y, h_edge_pos, v_edge_pos); - ptr = s->edge_emu_buffer; - emu = 1; - } - } - sx = (sx << 2) >> lowres; - sy = (sy << 2) >> lowres; - pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy); - - ptr = ref_picture[2] + offset; - if (emu) { - s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, - src_x, src_y, h_edge_pos, v_edge_pos); - ptr = s->edge_emu_buffer; - } - pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy); -} - -/** - * motion compensation of a single macroblock - * @param s context - * @param dest_y luma destination pointer - * @param dest_cb chroma cb/u destination pointer - * @param dest_cr chroma cr/v destination pointer - * @param dir direction (0->forward, 1->backward) - * @param ref_picture array[3] of pointers to the 3 planes of the reference picture - * @param pix_op halfpel motion compensation function (average or put normally) - * the motion vectors are taken from s->mv and the MV type from s->mv_type - */ -static inline void MPV_motion_lowres(MpegEncContext *s, - uint8_t *dest_y, uint8_t *dest_cb, - uint8_t *dest_cr, - int dir, uint8_t **ref_picture, - h264_chroma_mc_func *pix_op) -{ - int mx, my; - int mb_x, mb_y, i; - const int lowres = s->avctx->lowres; - const int block_s = 8 >>lowres; - - mb_x = s->mb_x; - mb_y = s->mb_y; - - switch (s->mv_type) { - case MV_TYPE_16X16: - mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr, - 0, 0, 0, - ref_picture, pix_op, - s->mv[dir][0][0], s->mv[dir][0][1], - 2 * block_s, mb_y); - break; - case MV_TYPE_8X8: - mx = 0; - my = 0; - for (i = 0; i < 4; i++) { - hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * - s->linesize) * block_s, - ref_picture[0], 0, 0, - (2 * mb_x + (i & 1)) * block_s, - (2 * mb_y + (i >> 1)) * block_s, - s->width, s->height, s->linesize, - s->h_edge_pos >> lowres, s->v_edge_pos >> lowres, - block_s, block_s, pix_op, - s->mv[dir][i][0], s->mv[dir][i][1]); - - mx += s->mv[dir][i][0]; - my += s->mv[dir][i][1]; - } - - if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) - chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, - pix_op, mx, my); - break; - case MV_TYPE_FIELD: - if (s->picture_structure == PICT_FRAME) { - /* top field */ - mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr, - 1, 0, s->field_select[dir][0], - ref_picture, pix_op, - s->mv[dir][0][0], s->mv[dir][0][1], - block_s, mb_y); - /* bottom field */ - mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr, - 1, 1, s->field_select[dir][1], - ref_picture, pix_op, - s->mv[dir][1][0], s->mv[dir][1][1], - block_s, mb_y); - } else { - if (s->picture_structure != s->field_select[dir][0] + 1 && - s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) { - ref_picture = s->current_picture_ptr->f.data; - - } - mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr, - 0, 0, s->field_select[dir][0], - ref_picture, pix_op, - s->mv[dir][0][0], - s->mv[dir][0][1], 2 * block_s, mb_y >> 1); - } - break; - case MV_TYPE_16X8: - for (i = 0; i < 2; i++) { - uint8_t **ref2picture; - - if (s->picture_structure == s->field_select[dir][i] + 1 || - s->pict_type == AV_PICTURE_TYPE_B || s->first_field) { - ref2picture = ref_picture; - } else { - ref2picture = s->current_picture_ptr->f.data; - } - - mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr, - 0, 0, s->field_select[dir][i], - ref2picture, pix_op, - s->mv[dir][i][0], s->mv[dir][i][1] + - 2 * block_s * i, block_s, mb_y >> 1); - - dest_y += 2 * block_s * s->linesize; - dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize; - dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize; - } - break; - case MV_TYPE_DMV: - if (s->picture_structure == PICT_FRAME) { - for (i = 0; i < 2; i++) { - int j; - for (j = 0; j < 2; j++) { - mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr, - 1, j, j ^ i, - ref_picture, pix_op, - s->mv[dir][2 * i + j][0], - s->mv[dir][2 * i + j][1], - block_s, mb_y); - } - pix_op = s->dsp.avg_h264_chroma_pixels_tab; - } - } else { - for (i = 0; i < 2; i++) { - mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr, - 0, 0, s->picture_structure != i + 1, - ref_picture, pix_op, - s->mv[dir][2 * i][0],s->mv[dir][2 * i][1], - 2 * block_s, mb_y >> 1); - - // after put we make avg of the same block - pix_op = s->dsp.avg_h264_chroma_pixels_tab; - - // opposite parity is always in the same - // frame if this is second field - if (!s->first_field) { - ref_picture = s->current_picture_ptr->f.data; - } - } - } - break; - default: - assert(0); - } -} - /** * find the lowest MB row referenced in the MVs */ @@ -2282,7 +1924,7 @@ void ff_clean_intra_table_entries(MpegEncContext *s) */ static av_always_inline void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64], - int lowres_flag, int is_mpeg12) + int is_mpeg12) { const int mb_xy = s->mb_y * s->mb_stride + s->mb_x; if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){ @@ -2327,8 +1969,8 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64], qpel_mc_func (*op_qpix)[16]; const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics const int uvlinesize = s->current_picture.f.linesize[1]; - const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag; - const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8; + const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band; + const int block_size = 8; /* avoid copy if macroblock skipped in last frame too */ /* skip only during decoding as we might trash the buffers during encoding a bit */ @@ -2377,31 +2019,19 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64], } } - if(lowres_flag){ - h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab; - - if (s->mv_dir & MV_DIR_FORWARD) { - MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix); - op_pix = s->dsp.avg_h264_chroma_pixels_tab; - } - if (s->mv_dir & MV_DIR_BACKWARD) { - MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix); - } + op_qpix= s->me.qpel_put; + if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){ + op_pix = s->dsp.put_pixels_tab; }else{ - op_qpix= s->me.qpel_put; - if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){ - op_pix = s->dsp.put_pixels_tab; - }else{ - op_pix = s->dsp.put_no_rnd_pixels_tab; - } - if (s->mv_dir & MV_DIR_FORWARD) { - MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix); - op_pix = s->dsp.avg_pixels_tab; - op_qpix= s->me.qpel_avg; - } - if (s->mv_dir & MV_DIR_BACKWARD) { - MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix); - } + op_pix = s->dsp.put_no_rnd_pixels_tab; + } + if (s->mv_dir & MV_DIR_FORWARD) { + MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix); + op_pix = s->dsp.avg_pixels_tab; + op_qpix= s->me.qpel_avg; + } + if (s->mv_dir & MV_DIR_BACKWARD) { + MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix); } } @@ -2527,12 +2157,10 @@ skip_idct: void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){ #if !CONFIG_SMALL if(s->out_format == FMT_MPEG1) { - if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1); - else MPV_decode_mb_internal(s, block, 0, 1); + MPV_decode_mb_internal(s, block, 1); } else #endif - if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0); - else MPV_decode_mb_internal(s, block, 0, 0); + MPV_decode_mb_internal(s, block, 0); } /** @@ -2607,7 +2235,7 @@ void ff_draw_horiz_band(MpegEncContext *s, int y, int h){ void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics const int uvlinesize = s->current_picture.f.linesize[1]; - const int mb_size= 4 - s->avctx->lowres; + const int mb_size= 4; s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2; s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2; diff --git a/libavcodec/mpegvideo.h b/libavcodec/mpegvideo.h index 8f788a7c06..b73da416ba 100644 --- a/libavcodec/mpegvideo.h +++ b/libavcodec/mpegvideo.h @@ -785,7 +785,7 @@ extern const enum PixelFormat ff_pixfmt_list_420[]; extern const enum PixelFormat ff_hwaccel_pixfmt_list_420[]; static inline void ff_update_block_index(MpegEncContext *s){ - const int block_size= 8>>s->avctx->lowres; + const int block_size = 8; s->block_index[0]+=2; s->block_index[1]+=2; diff --git a/libavcodec/msmpeg4.c b/libavcodec/msmpeg4.c index a91eea52a5..eed5067690 100644 --- a/libavcodec/msmpeg4.c +++ b/libavcodec/msmpeg4.c @@ -1216,7 +1216,6 @@ AVCodec ff_msmpeg4v1_decoder = { .close = ff_h263_decode_end, .decode = ff_h263_decode_frame, .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, - .max_lowres = 3, .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 1"), .pix_fmts = ff_pixfmt_list_420, }; @@ -1230,7 +1229,6 @@ AVCodec ff_msmpeg4v2_decoder = { .close = ff_h263_decode_end, .decode = ff_h263_decode_frame, .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, - .max_lowres = 3, .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"), .pix_fmts = ff_pixfmt_list_420, }; @@ -1244,7 +1242,6 @@ AVCodec ff_msmpeg4v3_decoder = { .close = ff_h263_decode_end, .decode = ff_h263_decode_frame, .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, - .max_lowres = 3, .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"), .pix_fmts = ff_pixfmt_list_420, }; @@ -1258,7 +1255,6 @@ AVCodec ff_wmv1_decoder = { .close = ff_h263_decode_end, .decode = ff_h263_decode_frame, .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, - .max_lowres = 3, .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"), .pix_fmts = ff_pixfmt_list_420, }; diff --git a/libavcodec/mxpegdec.c b/libavcodec/mxpegdec.c index c00997007a..aeb474f287 100644 --- a/libavcodec/mxpegdec.c +++ b/libavcodec/mxpegdec.c @@ -338,5 +338,4 @@ AVCodec ff_mxpeg_decoder = { .close = mxpeg_decode_end, .decode = mxpeg_decode_frame, .capabilities = CODEC_CAP_DR1, - .max_lowres = 3, }; diff --git a/libavcodec/options_table.h b/libavcodec/options_table.h index 6615ade1a6..1e2560c941 100644 --- a/libavcodec/options_table.h +++ b/libavcodec/options_table.h @@ -318,7 +318,6 @@ static const AVOption options[]={ {"dts_hd_ma", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = FF_PROFILE_DTS_HD_MA }, INT_MIN, INT_MAX, A|E, "profile"}, {"level", NULL, OFFSET(level), AV_OPT_TYPE_INT, {.dbl = FF_LEVEL_UNKNOWN }, INT_MIN, INT_MAX, V|A|E, "level"}, {"unknown", NULL, 0, AV_OPT_TYPE_CONST, {.dbl = FF_LEVEL_UNKNOWN }, INT_MIN, INT_MAX, V|A|E, "level"}, -{"lowres", "decode at 1= 1/2, 2=1/4, 3=1/8 resolutions", OFFSET(lowres), AV_OPT_TYPE_INT, {.dbl = 0 }, 0, INT_MAX, V|A|D}, {"skip_threshold", "frame skip threshold", OFFSET(frame_skip_threshold), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E}, {"skip_factor", "frame skip factor", OFFSET(frame_skip_factor), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E}, {"skip_exp", "frame skip exponent", OFFSET(frame_skip_exp), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E}, diff --git a/libavcodec/ppc/dsputil_ppc.c b/libavcodec/ppc/dsputil_ppc.c index ab8f7aef48..de347835e6 100644 --- a/libavcodec/ppc/dsputil_ppc.c +++ b/libavcodec/ppc/dsputil_ppc.c @@ -179,7 +179,7 @@ void ff_dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx) } #endif //CONFIG_ENCODERS - if (avctx->lowres == 0 && avctx->bits_per_raw_sample <= 8) { + if (avctx->bits_per_raw_sample <= 8) { if ((avctx->idct_algo == FF_IDCT_AUTO) || (avctx->idct_algo == FF_IDCT_ALTIVEC)) { c->idct_put = ff_idct_put_altivec; diff --git a/libavcodec/rv10.c b/libavcodec/rv10.c index c5047adb8e..bc81f08b9c 100644 --- a/libavcodec/rv10.c +++ b/libavcodec/rv10.c @@ -409,8 +409,7 @@ av_log(s->avctx, AV_LOG_DEBUG, "\n");*/ // s->obmc=1; // s->umvplus=1; s->modified_quant=1; - if(!s->avctx->lowres) - s->loop_filter=1; + s->loop_filter=1; if(s->avctx->debug & FF_DEBUG_PICT_INFO){ av_log(s->avctx, AV_LOG_INFO, "num:%5d x:%2d y:%2d type:%d qscale:%2d rnd:%d\n", @@ -727,7 +726,6 @@ AVCodec ff_rv10_decoder = { .close = rv10_decode_end, .decode = rv10_decode_frame, .capabilities = CODEC_CAP_DR1, - .max_lowres = 3, .long_name = NULL_IF_CONFIG_SMALL("RealVideo 1.0"), .pix_fmts = ff_pixfmt_list_420, }; @@ -742,7 +740,6 @@ AVCodec ff_rv20_decoder = { .decode = rv10_decode_frame, .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY, .flush = ff_mpeg_flush, - .max_lowres = 3, .long_name = NULL_IF_CONFIG_SMALL("RealVideo 2.0"), .pix_fmts = ff_pixfmt_list_420, }; diff --git a/libavcodec/sp5xdec.c b/libavcodec/sp5xdec.c index 256c2756ef..253c211709 100644 --- a/libavcodec/sp5xdec.c +++ b/libavcodec/sp5xdec.c @@ -102,7 +102,6 @@ AVCodec ff_sp5x_decoder = { .close = ff_mjpeg_decode_end, .decode = sp5x_decode_frame, .capabilities = CODEC_CAP_DR1, - .max_lowres = 3, .long_name = NULL_IF_CONFIG_SMALL("Sunplus JPEG (SP5X)"), }; diff --git a/libavcodec/utils.c b/libavcodec/utils.c index 7caf93f65d..4492486771 100644 --- a/libavcodec/utils.c +++ b/libavcodec/utils.c @@ -143,8 +143,8 @@ unsigned avcodec_get_edge_width(void) void avcodec_set_dimensions(AVCodecContext *s, int width, int height){ s->coded_width = width; s->coded_height= height; - s->width = -((-width )>>s->lowres); - s->height= -((-height)>>s->lowres); + s->width = width; + s->height = height; } #define INTERNAL_BUFFER_SIZE (32+1) @@ -228,9 +228,8 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, *width = FFALIGN(*width , w_align); *height= FFALIGN(*height, h_align); - if(s->codec_id == CODEC_ID_H264 || s->lowres) + if (s->codec_id == CODEC_ID_H264) *height+=2; // some of the optimized chroma MC reads one line too much - // which is also done in mpeg decoders with lowres > 0 for (i = 0; i < 4; i++) linesize_align[i] = STRIDE_ALIGN; @@ -741,12 +740,6 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVD if (!HAVE_THREADS && !(codec->capabilities & CODEC_CAP_AUTO_THREADS)) avctx->thread_count = 1; - if (avctx->codec->max_lowres < avctx->lowres) { - av_log(avctx, AV_LOG_ERROR, "The maximum value for lowres supported by the decoder is %d\n", - avctx->codec->max_lowres); - ret = AVERROR(EINVAL); - goto free_and_end; - } if (av_codec_is_encoder(avctx->codec)) { int i; if (avctx->codec->sample_fmts) { diff --git a/libavcodec/wmv2dec.c b/libavcodec/wmv2dec.c index 4847efac31..2e4bbaac50 100644 --- a/libavcodec/wmv2dec.c +++ b/libavcodec/wmv2dec.c @@ -479,7 +479,6 @@ AVCodec ff_wmv2_decoder = { .close = wmv2_decode_end, .decode = ff_h263_decode_frame, .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, - .max_lowres = 3, .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 8"), .pix_fmts = ff_pixfmt_list_420, }; diff --git a/libavcodec/x86/dsputil_mmx.c b/libavcodec/x86/dsputil_mmx.c index 6695cafb65..89be32840d 100644 --- a/libavcodec/x86/dsputil_mmx.c +++ b/libavcodec/x86/dsputil_mmx.c @@ -3121,7 +3121,7 @@ void ff_dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx) if (mm_flags & AV_CPU_FLAG_MMX) { const int idct_algo = avctx->idct_algo; - if (avctx->lowres == 0 && avctx->bits_per_raw_sample <= 8) { + if (avctx->bits_per_raw_sample <= 8) { if (idct_algo == FF_IDCT_AUTO || idct_algo == FF_IDCT_SIMPLEMMX) { c->idct_put = ff_simple_idct_put_mmx; c->idct_add = ff_simple_idct_add_mmx; |