summaryrefslogtreecommitdiff
path: root/libavcodec/mpegvideo.c
diff options
context:
space:
mode:
Diffstat (limited to 'libavcodec/mpegvideo.c')
-rw-r--r--libavcodec/mpegvideo.c294
1 files changed, 155 insertions, 139 deletions
diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c
index b8ac2ceb3d..041c0c04c3 100644
--- a/libavcodec/mpegvideo.c
+++ b/libavcodec/mpegvideo.c
@@ -5,20 +5,20 @@
*
* 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -616,85 +616,85 @@ av_cold int MPV_common_init(MpegEncContext *s)
s->flags= s->avctx->flags;
s->flags2= s->avctx->flags2;
- if (s->width && s->height) {
- s->mb_width = (s->width + 15) / 16;
- s->mb_stride = s->mb_width + 1;
- s->b8_stride = s->mb_width*2 + 1;
- s->b4_stride = s->mb_width*4 + 1;
- mb_array_size= s->mb_height * s->mb_stride;
- mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
+ s->mb_width = (s->width + 15) / 16;
+ s->mb_stride = s->mb_width + 1;
+ s->b8_stride = s->mb_width*2 + 1;
+ s->b4_stride = s->mb_width*4 + 1;
+ mb_array_size= s->mb_height * s->mb_stride;
+ mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
- /* set chroma shifts */
- avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
- &(s->chroma_y_shift) );
+ /* set chroma shifts */
+ avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
+ &(s->chroma_y_shift) );
- /* set default edge pos, will be overriden in decode_header if needed */
- s->h_edge_pos= s->mb_width*16;
- s->v_edge_pos= s->mb_height*16;
+ /* set default edge pos, will be overriden in decode_header if needed */
+ s->h_edge_pos= s->mb_width*16;
+ s->v_edge_pos= s->mb_height*16;
- s->mb_num = s->mb_width * s->mb_height;
+ s->mb_num = s->mb_width * s->mb_height;
- s->block_wrap[0]=
- s->block_wrap[1]=
- s->block_wrap[2]=
- s->block_wrap[3]= s->b8_stride;
- s->block_wrap[4]=
- s->block_wrap[5]= s->mb_stride;
+ s->block_wrap[0]=
+ s->block_wrap[1]=
+ s->block_wrap[2]=
+ s->block_wrap[3]= s->b8_stride;
+ s->block_wrap[4]=
+ s->block_wrap[5]= s->mb_stride;
- y_size = s->b8_stride * (2 * s->mb_height + 1);
- c_size = s->mb_stride * (s->mb_height + 1);
- yc_size = y_size + 2 * c_size;
+ y_size = s->b8_stride * (2 * s->mb_height + 1);
+ c_size = s->mb_stride * (s->mb_height + 1);
+ yc_size = y_size + 2 * c_size;
- /* convert fourcc to upper case */
- s->codec_tag = ff_toupper4(s->avctx->codec_tag);
+ /* convert fourcc to upper case */
+ s->codec_tag = ff_toupper4(s->avctx->codec_tag);
- s->stream_codec_tag = ff_toupper4(s->avctx->stream_codec_tag);
+ s->stream_codec_tag = ff_toupper4(s->avctx->stream_codec_tag);
- s->avctx->coded_frame= (AVFrame*)&s->current_picture;
+ s->avctx->coded_frame= (AVFrame*)&s->current_picture;
- FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail) //error ressilience code looks cleaner with this
- for(y=0; y<s->mb_height; y++){
- for(x=0; x<s->mb_width; x++){
- s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
- }
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail) //error ressilience code looks cleaner with this
+ for(y=0; y<s->mb_height; y++){
+ for(x=0; x<s->mb_width; x++){
+ s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
}
- s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
-
- if (s->encoding) {
- /* Allocate MV tables */
- FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
- FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
- FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
- FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
- FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
- FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
- s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
- s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
- s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
- s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
- s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
- s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
-
- if(s->msmpeg4_version){
- FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
- }
- FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
-
- /* Allocate MB type table */
- FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
-
- FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
+ }
+ s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
- FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix , 64*32 * sizeof(int), fail)
- FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix , 64*32 * sizeof(int), fail)
- FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
- FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail)
- FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
- FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
+ if (s->encoding) {
+ /* Allocate MV tables */
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
+ s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
+ s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
+ s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
+ s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
+ s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
+ s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
+
+ if(s->msmpeg4_version){
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
+ }
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
+
+ /* Allocate MB type table */
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
+
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
+
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix , 64*32 * sizeof(int), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix , 64*32 * sizeof(int), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix , 64*32 * sizeof(int), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
- if(s->avctx->noise_reduction){
- FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
- }
+ if(s->avctx->noise_reduction){
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
}
}
@@ -704,11 +704,10 @@ av_cold int MPV_common_init(MpegEncContext *s)
avcodec_get_frame_defaults((AVFrame *)&s->picture[i]);
}
- if (s->width && s->height) {
- FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
- if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
- /* interlaced direct mode decoding tables */
+ if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
+ /* interlaced direct mode decoding tables */
for(i=0; i<2; i++){
int j, k;
for(j=0; j<2; j++){
@@ -722,49 +721,47 @@ av_cold int MPV_common_init(MpegEncContext *s)
}
FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
}
- }
- if (s->out_format == FMT_H263) {
- /* cbp values */
- FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
- s->coded_block= s->coded_block_base + s->b8_stride + 1;
-
- /* cbp, ac_pred, pred_dir */
- FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail)
- FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail)
- }
+ }
+ if (s->out_format == FMT_H263) {
+ /* cbp values */
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
+ s->coded_block= s->coded_block_base + s->b8_stride + 1;
- if (s->h263_pred || s->h263_plus || !s->encoding) {
- /* dc values */
- //MN: we need these for error resilience of intra-frames
- FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
- s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
- s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
- s->dc_val[2] = s->dc_val[1] + c_size;
- for(i=0;i<yc_size;i++)
- s->dc_val_base[i] = 1024;
- }
+ /* cbp, ac_pred, pred_dir */
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail)
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail)
+ }
- /* which mb is a intra block */
- FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
- memset(s->mbintra_table, 1, mb_array_size);
+ if (s->h263_pred || s->h263_plus || !s->encoding) {
+ /* dc values */
+ //MN: we need these for error resilience of intra-frames
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
+ s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
+ s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
+ s->dc_val[2] = s->dc_val[1] + c_size;
+ for(i=0;i<yc_size;i++)
+ s->dc_val_base[i] = 1024;
+ }
- /* init macroblock skip table */
- FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail);
- //Note the +1 is for a quicker mpeg4 slice_end detection
- FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
+ /* which mb is a intra block */
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
+ memset(s->mbintra_table, 1, mb_array_size);
- s->parse_context.state= -1;
- if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
- s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
- s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
- s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
- }
+ /* init macroblock skip table */
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail);
+ //Note the +1 is for a quicker mpeg4 slice_end detection
+ FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
+
+ s->parse_context.state= -1;
+ if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
+ s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
+ s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
+ s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
}
s->context_initialized = 1;
s->thread_context[0]= s;
- if (s->width && s->height) {
if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
for(i=1; i<threads; i++){
s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
@@ -781,7 +778,7 @@ av_cold int MPV_common_init(MpegEncContext *s)
if(init_duplicate_context(s, s) < 0) goto fail;
s->start_mb_y = 0;
s->end_mb_y = s->mb_height;
- }
+
}
return 0;
@@ -849,6 +846,10 @@ void MPV_common_end(MpegEncContext *s)
av_freep(&s->error_status_table);
av_freep(&s->mb_index2xy);
av_freep(&s->lambda_table);
+ if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
+ if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
+ s->q_chroma_intra_matrix= NULL;
+ s->q_chroma_intra_matrix16= NULL;
av_freep(&s->q_intra_matrix);
av_freep(&s->q_inter_matrix);
av_freep(&s->q_intra_matrix16);
@@ -1128,6 +1129,7 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
/* Allocate a dummy frame */
i= ff_find_unused_picture(s, 0);
s->last_picture_ptr= &s->picture[i];
+ s->last_picture_ptr->f.key_frame = 0;
if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
return -1;
ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0);
@@ -1137,6 +1139,7 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
/* Allocate a dummy frame */
i= ff_find_unused_picture(s, 0);
s->next_picture_ptr= &s->picture[i];
+ s->next_picture_ptr->f.key_frame = 0;
if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
return -1;
ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 0);
@@ -1196,7 +1199,7 @@ void MPV_frame_end(MpegEncContext *s)
//just to make sure that all data is rendered.
if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
ff_xvmc_field_end(s);
- }else if((s->error_count || s->encoding)
+ }else if((s->error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND))
&& !s->avctx->hwaccel
&& !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
&& s->unrestricted_mv
@@ -1347,15 +1350,8 @@ void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
int x,y;
- av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
- switch (pict->pict_type) {
- case AV_PICTURE_TYPE_I: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
- case AV_PICTURE_TYPE_P: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
- case AV_PICTURE_TYPE_B: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
- case AV_PICTURE_TYPE_S: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
- case AV_PICTURE_TYPE_SI: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
- case AV_PICTURE_TYPE_SP: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
- }
+ av_log(s->avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
+ av_get_picture_type_char(pict->pict_type));
for(y=0; y<s->mb_height; y++){
for(x=0; x<s->mb_width; x++){
if(s->avctx->debug&FF_DEBUG_SKIP){
@@ -1438,6 +1434,7 @@ void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
pict->data[i]= s->visualization_buffer[i];
}
pict->type= FF_BUFFER_TYPE_COPY;
+ pict->opaque= NULL;
ptr= pict->data[0];
block_height = 16>>v_chroma_shift;
@@ -1652,7 +1649,7 @@ static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
uint8_t *ptr_y, *ptr_cb, *ptr_cr;
int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
const int lowres= s->avctx->lowres;
- const int op_index= FFMIN(lowres, 2);
+ const int op_index= FFMIN(lowres-1+s->chroma_x_shift, 2);
const int block_s= 8>>lowres;
const int s_mask= (2<<lowres)-1;
const int h_edge_pos = s->h_edge_pos >> lowres;
@@ -1687,12 +1684,29 @@ static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
uvsrc_x = s->mb_x*block_s + (mx >> lowres);
uvsrc_y = mb_y*block_s + (my >> lowres);
} else {
- mx = motion_x / 2;
- my = motion_y / 2;
- uvsx = mx & s_mask;
- uvsy = my & s_mask;
- uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
- uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1));
+ if(s->chroma_y_shift){
+ mx = motion_x / 2;
+ my = motion_y / 2;
+ uvsx = mx & s_mask;
+ uvsy = my & s_mask;
+ uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
+ uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1));
+ } else {
+ if(s->chroma_x_shift){
+ //Chroma422
+ mx = motion_x / 2;
+ uvsx = mx & s_mask;
+ uvsy = motion_y & s_mask;
+ uvsrc_y = src_y;
+ uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
+ } else {
+ //Chroma444
+ uvsx = motion_x & s_mask;
+ uvsy = motion_y & s_mask;
+ uvsrc_x = src_x;
+ uvsrc_y = src_y;
+ }
+ }
}
ptr_y = ref_picture[0] + src_y * linesize + src_x;
@@ -1734,8 +1748,10 @@ static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
uvsx= (uvsx << 2) >> lowres;
uvsy= (uvsy << 2) >> lowres;
- pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
- pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
+ if(h >> s->chroma_y_shift){
+ pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
+ pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
+ }
}
//FIXME h261 lowres loop filter
}
@@ -2198,17 +2214,17 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
}else{
//chroma422
dct_linesize = uvlinesize << s->interlaced_dct;
- dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
+ dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*block_size;
add_dct(s, block[4], 4, dest_cb, dct_linesize);
add_dct(s, block[5], 5, dest_cr, dct_linesize);
add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
if(!s->chroma_x_shift){//Chroma444
- add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
- add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
- add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
- add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
+ add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
+ add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
+ add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
+ add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
}
}
}//fi gray
@@ -2250,17 +2266,17 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
}else{
dct_linesize = uvlinesize << s->interlaced_dct;
- dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
+ dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*block_size;
s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
if(!s->chroma_x_shift){//Chroma444
- s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
- s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
- s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
- s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
+ s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
+ s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
+ s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
+ s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
}
}
}//gray