summaryrefslogtreecommitdiff
path: root/libavcodec/mss12.c
diff options
context:
space:
mode:
Diffstat (limited to 'libavcodec/mss12.c')
-rw-r--r--libavcodec/mss12.c382
1 files changed, 252 insertions, 130 deletions
diff --git a/libavcodec/mss12.c b/libavcodec/mss12.c
index 38291d910c..9283470884 100644
--- a/libavcodec/mss12.c
+++ b/libavcodec/mss12.c
@@ -47,12 +47,8 @@ static int model_calc_threshold(Model *m)
{
int thr;
- if (m->thr_weight == -1) {
- thr = 2 * m->weights[m->num_syms] - 1;
- thr = ((thr >> 1) + 4 * m->cum_prob[0]) / thr;
- } else {
- thr = m->num_syms * m->thr_weight;
- }
+ thr = 2 * m->weights[m->num_syms] - 1;
+ thr = ((thr >> 1) + 4 * m->cum_prob[0]) / thr;
return FFMIN(thr, 0x3FFF);
}
@@ -78,7 +74,7 @@ static av_cold void model_init(Model *m, int num_syms, int thr_weight)
{
m->num_syms = num_syms;
m->thr_weight = thr_weight;
- m->threshold = model_calc_threshold(m);
+ m->threshold = num_syms * thr_weight;
model_reset(m);
}
@@ -87,7 +83,7 @@ static void model_rescale_weights(Model *m)
int i;
int cum_prob;
- if (m->thr_weight == -1)
+ if (m->thr_weight == THRESH_ADAPTIVE)
m->threshold = model_calc_threshold(m);
while (m->cum_prob[0] > m->threshold) {
cum_prob = 0;
@@ -129,8 +125,14 @@ static void pixctx_reset(PixContext *ctx)
{
int i, j, k;
- for (i = 0; i < ctx->cache_size; i++)
- ctx->cache[i] = i;
+ if (!ctx->special_initial_cache)
+ for (i = 0; i < ctx->cache_size; i++)
+ ctx->cache[i] = i;
+ else {
+ ctx->cache[0] = 1;
+ ctx->cache[1] = 2;
+ ctx->cache[2] = 4;
+ }
model_reset(&ctx->cache_model);
model_reset(&ctx->full_model);
@@ -141,27 +143,23 @@ static void pixctx_reset(PixContext *ctx)
model_reset(&ctx->sec_models[i][j][k]);
}
-static av_cold void pixctx_init(PixContext *ctx, int cache_size)
+static av_cold void pixctx_init(PixContext *ctx, int cache_size,
+ int full_model_syms, int special_initial_cache)
{
int i, j, k;
- ctx->cache_size = cache_size + 4;
- ctx->num_syms = cache_size;
-
- for (i = 0; i < ctx->cache_size; i++)
- ctx->cache[i] = i;
+ ctx->cache_size = cache_size + 4;
+ ctx->num_syms = cache_size;
+ ctx->special_initial_cache = special_initial_cache;
model_init(&ctx->cache_model, ctx->num_syms + 1, THRESH_LOW);
- model_init(&ctx->full_model, 256, THRESH_HIGH);
+ model_init(&ctx->full_model, full_model_syms, THRESH_HIGH);
- for (i = 0; i < 4; i++) {
- for (j = 0; j < sec_order_sizes[i]; j++) {
- for (k = 0; k < 4; k++) {
+ for (i = 0; i < 4; i++)
+ for (j = 0; j < sec_order_sizes[i]; j++)
+ for (k = 0; k < 4; k++)
model_init(&ctx->sec_models[i][j][k], 2 + i,
i ? THRESH_LOW : THRESH_ADAPTIVE);
- }
- }
- }
}
static int decode_top_left_pixel(ArithCoder *acoder, PixContext *pctx)
@@ -196,7 +194,6 @@ static int decode_pixel(ArithCoder *acoder, PixContext *pctx,
if (val < pctx->num_syms) {
int idx, j;
-
idx = 0;
for (i = 0; i < pctx->cache_size; i++) {
for (j = 0; j < num_ngb; j++)
@@ -309,195 +306,288 @@ static int decode_pixel_in_context(ArithCoder *acoder, PixContext *pctx,
break;
}
- pix = acoder->get_model_sym(acoder, &pctx->sec_models[nlen - 1][layer][sub]);
+ pix = acoder->get_model_sym(acoder,
+ &pctx->sec_models[nlen - 1][layer][sub]);
if (pix < nlen)
return ref_pix[pix];
else
return decode_pixel(acoder, pctx, ref_pix, nlen);
}
-static int decode_region(MSS12Context *ctx, ArithCoder *acoder, uint8_t *dst,
+static int decode_region(ArithCoder *acoder, uint8_t *dst, uint8_t *rgb_pic,
int x, int y, int width, int height, int stride,
- PixContext *pctx)
+ int rgb_stride, PixContext *pctx, const uint32_t *pal)
{
- int i, j;
+ int i, j, p;
+ uint8_t *rgb_dst = rgb_pic + x * 3 + y * rgb_stride;
dst += x + y * stride;
- dst[0] = decode_top_left_pixel(acoder, pctx);
for (j = 0; j < height; j++) {
for (i = 0; i < width; i++) {
if (!i && !j)
- continue;
+ p = decode_top_left_pixel(acoder, pctx);
+ else
+ p = decode_pixel_in_context(acoder, pctx, dst + i, stride,
+ i, j, width - i - 1);
+ dst[i] = p;
- dst[i] = decode_pixel_in_context(acoder, pctx, dst + i, stride,
- i, j, width - i - 1);
+ if (rgb_pic)
+ AV_WB24(rgb_dst + i * 3, pal[p]);
}
- dst += stride;
+ dst += stride;
+ rgb_dst += rgb_stride;
}
return 0;
}
-static int decode_region_masked(MSS12Context *ctx, ArithCoder *acoder,
+static void copy_rectangles(MSS12Context const *c,
+ int x, int y, int width, int height)
+{
+ int j;
+
+ if (c->last_rgb_pic)
+ for (j = y; j < y + height; j++) {
+ memcpy(c->rgb_pic + j * c->rgb_stride + x * 3,
+ c->last_rgb_pic + j * c->rgb_stride + x * 3,
+ width * 3);
+ memcpy(c->pal_pic + j * c->pal_stride + x,
+ c->last_pal_pic + j * c->pal_stride + x,
+ width);
+ }
+}
+
+static int motion_compensation(MSS12Context const *c,
+ int x, int y, int width, int height)
+{
+ if (x + c->mvX < 0 || x + c->mvX + width > c->avctx->width ||
+ y + c->mvY < 0 || y + c->mvY + height > c->avctx->height ||
+ !c->rgb_pic)
+ return -1;
+ else {
+ uint8_t *dst = c->pal_pic + x + y * c->pal_stride;
+ uint8_t *rgb_dst = c->rgb_pic + x * 3 + y * c->rgb_stride;
+ uint8_t *src;
+ uint8_t *rgb_src;
+ int j;
+ x += c->mvX;
+ y += c->mvY;
+ if (c->last_rgb_pic) {
+ src = c->last_pal_pic + x + y * c->pal_stride;
+ rgb_src = c->last_rgb_pic + x * 3 + y * c->rgb_stride;
+ } else {
+ src = c->pal_pic + x + y * c->pal_stride;
+ rgb_src = c->rgb_pic + x * 3 + y * c->rgb_stride;
+ }
+ for (j = 0; j < height; j++) {
+ memmove(dst, src, width);
+ memmove(rgb_dst, rgb_src, width * 3);
+ dst += c->pal_stride;
+ src += c->pal_stride;
+ rgb_dst += c->rgb_stride;
+ rgb_src += c->rgb_stride;
+ }
+ }
+ return 0;
+}
+
+static int decode_region_masked(MSS12Context const *c, ArithCoder *acoder,
uint8_t *dst, int stride, uint8_t *mask,
int mask_stride, int x, int y,
int width, int height,
PixContext *pctx)
{
- int i, j;
+ int i, j, p;
+ uint8_t *rgb_dst = c->rgb_pic + x * 3 + y * c->rgb_stride;
dst += x + y * stride;
mask += x + y * mask_stride;
- if (mask[0] == 0xFF)
- dst[0] = decode_top_left_pixel(acoder, pctx);
for (j = 0; j < height; j++) {
for (i = 0; i < width; i++) {
- if (!i && !j || mask[i] != 0xFF)
- continue;
-
- dst[i] = decode_pixel_in_context(acoder, pctx, dst + i, stride,
- i, j, width - i - 1);
+ if (c->avctx->err_recognition & AV_EF_EXPLODE &&
+ ( c->rgb_pic && mask[i] != 0x01 && mask[i] != 0x02 && mask[i] != 0x04 ||
+ !c->rgb_pic && mask[i] != 0x80 && mask[i] != 0xFF))
+ return -1;
+
+ if (mask[i] == 0x02) {
+ copy_rectangles(c, x + i, y + j, 1, 1);
+ } else if (mask[i] == 0x04) {
+ if (motion_compensation(c, x + i, y + j, 1, 1))
+ return -1;
+ } else if (mask[i] != 0x80) {
+ if (!i && !j)
+ p = decode_top_left_pixel(acoder, pctx);
+ else
+ p = decode_pixel_in_context(acoder, pctx, dst + i, stride,
+ i, j, width - i - 1);
+ dst[i] = p;
+ if (c->rgb_pic)
+ AV_WB24(rgb_dst + i * 3, c->pal[p]);
+ }
}
- dst += stride;
- mask += mask_stride;
+ dst += stride;
+ mask += mask_stride;
+ rgb_dst += c->rgb_stride;
}
return 0;
}
-static av_cold void codec_init(MSS12Context *ctx)
+static av_cold void codec_init(MSS12Context *c, int version)
{
- model_init(&ctx->intra_region, 2, THRESH_ADAPTIVE);
- model_init(&ctx->inter_region, 2, THRESH_ADAPTIVE);
- model_init(&ctx->split_mode, 3, THRESH_HIGH);
- model_init(&ctx->edge_mode, 2, THRESH_HIGH);
- model_init(&ctx->pivot, 3, THRESH_LOW);
- pixctx_init(&ctx->intra_pix_ctx, 8);
- pixctx_init(&ctx->inter_pix_ctx, 2);
- ctx->corrupted = 1;
+ int i;
+ for (i = 0; i < (c->slice_split ? 2 : 1); i++) {
+ c->sc[i].c = c;
+ model_init(&c->sc[i].intra_region, 2, THRESH_ADAPTIVE);
+ model_init(&c->sc[i].inter_region, 2, THRESH_ADAPTIVE);
+ model_init(&c->sc[i].split_mode, 3, THRESH_HIGH);
+ model_init(&c->sc[i].edge_mode, 2, THRESH_HIGH);
+ model_init(&c->sc[i].pivot, 3, THRESH_LOW);
+
+ pixctx_init(&c->sc[i].intra_pix_ctx, 8, c->full_model_syms, 0);
+
+ pixctx_init(&c->sc[i].inter_pix_ctx, version ? 3 : 2,
+ c->full_model_syms, version ? 1 : 0);
+ }
+ c->corrupted = 1;
}
-void ff_mss12_codec_reset(MSS12Context *ctx)
+void ff_mss12_codec_reset(MSS12Context *c)
{
- model_reset(&ctx->intra_region);
- model_reset(&ctx->inter_region);
- model_reset(&ctx->split_mode);
- model_reset(&ctx->edge_mode);
- model_reset(&ctx->pivot);
- pixctx_reset(&ctx->intra_pix_ctx);
- pixctx_reset(&ctx->inter_pix_ctx);
-
- ctx->corrupted = 0;
+ int i;
+ for (i = 0; i < (c->slice_split ? 2 : 1); i++) {
+ model_reset(&c->sc[i].intra_region);
+ model_reset(&c->sc[i].inter_region);
+ model_reset(&c->sc[i].split_mode);
+ model_reset(&c->sc[i].edge_mode);
+ model_reset(&c->sc[i].pivot);
+ pixctx_reset(&c->sc[i].intra_pix_ctx);
+ pixctx_reset(&c->sc[i].inter_pix_ctx);
+ }
+
+ c->corrupted = 0;
}
-static int decode_pivot(MSS12Context *ctx, ArithCoder *acoder, int base)
+static int decode_pivot(SliceContext *sc, ArithCoder *acoder, int base)
{
int val, inv;
- inv = acoder->get_model_sym(acoder, &ctx->edge_mode);
- val = acoder->get_model_sym(acoder, &ctx->pivot) + 1;
+ inv = acoder->get_model_sym(acoder, &sc->edge_mode);
+ val = acoder->get_model_sym(acoder, &sc->pivot) + 1;
if (val > 2) {
- if ((base + 1) / 2 - 2 <= 0) {
- ctx->corrupted = 1;
- return 0;
- }
+ if ((base + 1) / 2 - 2 <= 0)
+ return -1;
+
val = acoder->get_number(acoder, (base + 1) / 2 - 2) + 3;
}
- if ((unsigned)val >= base) {
- ctx->corrupted = 1;
- return 0;
- }
+ if ((unsigned)val >= base)
+ return -1;
return inv ? base - val : val;
}
-static int decode_region_intra(MSS12Context *ctx, ArithCoder *acoder,
+static int decode_region_intra(SliceContext *sc, ArithCoder *acoder,
int x, int y, int width, int height)
{
+ MSS12Context const *c = sc->c;
int mode;
- mode = acoder->get_model_sym(acoder, &ctx->intra_region);
+ mode = acoder->get_model_sym(acoder, &sc->intra_region);
if (!mode) {
- int i, pix;
- int stride = ctx->pic_stride;
- uint8_t *dst = ctx->pic_start + x + y * stride;
-
- pix = decode_top_left_pixel(acoder, &ctx->intra_pix_ctx);
- for (i = 0; i < height; i++, dst += stride)
+ int i, j, pix, rgb_pix;
+ int stride = c->pal_stride;
+ int rgb_stride = c->rgb_stride;
+ uint8_t *dst = c->pal_pic + x + y * stride;
+ uint8_t *rgb_dst = c->rgb_pic + x * 3 + y * rgb_stride;
+
+ pix = decode_top_left_pixel(acoder, &sc->intra_pix_ctx);
+ rgb_pix = c->pal[pix];
+ for (i = 0; i < height; i++, dst += stride, rgb_dst += rgb_stride) {
memset(dst, pix, width);
+ if (c->rgb_pic)
+ for (j = 0; j < width * 3; j += 3)
+ AV_WB24(rgb_dst + j, rgb_pix);
+ }
} else {
- return decode_region(ctx, acoder, ctx->pic_start,
- x, y, width, height, ctx->pic_stride,
- &ctx->intra_pix_ctx);
+ return decode_region(acoder, c->pal_pic, c->rgb_pic,
+ x, y, width, height, c->pal_stride, c->rgb_stride,
+ &sc->intra_pix_ctx, &c->pal[0]);
}
return 0;
}
-static int decode_region_inter(MSS12Context *ctx, ArithCoder *acoder,
+static int decode_region_inter(SliceContext *sc, ArithCoder *acoder,
int x, int y, int width, int height)
{
+ MSS12Context const *c = sc->c;
int mode;
- mode = acoder->get_model_sym(acoder, &ctx->inter_region);
+ mode = acoder->get_model_sym(acoder, &sc->inter_region);
if (!mode) {
- mode = decode_top_left_pixel(acoder, &ctx->inter_pix_ctx);
- if (mode != 0xFF) {
- return 0;
- } else {
- return decode_region_intra(ctx, acoder, x, y, width, height);
- }
+ mode = decode_top_left_pixel(acoder, &sc->inter_pix_ctx);
+
+ if (c->avctx->err_recognition & AV_EF_EXPLODE &&
+ ( c->rgb_pic && mode != 0x01 && mode != 0x02 && mode != 0x04 ||
+ !c->rgb_pic && mode != 0x80 && mode != 0xFF))
+ return -1;
+
+ if (mode == 0x02)
+ copy_rectangles(c, x, y, width, height);
+ else if (mode == 0x04)
+ return motion_compensation(c, x, y, width, height);
+ else if (mode != 0x80)
+ return decode_region_intra(sc, acoder, x, y, width, height);
} else {
- if (decode_region(ctx, acoder, ctx->mask,
- x, y, width, height, ctx->mask_linesize,
- &ctx->inter_pix_ctx) < 0)
+ if (decode_region(acoder, c->mask, NULL,
+ x, y, width, height, c->mask_stride, 0,
+ &sc->inter_pix_ctx, &c->pal[0]) < 0)
return -1;
- return decode_region_masked(ctx, acoder, ctx->pic_start,
- ctx->pic_stride, ctx->mask,
- ctx->mask_linesize,
+ return decode_region_masked(c, acoder, c->pal_pic,
+ c->pal_stride, c->mask,
+ c->mask_stride,
x, y, width, height,
- &ctx->intra_pix_ctx);
+ &sc->intra_pix_ctx);
}
return 0;
}
-int ff_mss12_decode_rect(MSS12Context *ctx, ArithCoder *acoder,
+int ff_mss12_decode_rect(SliceContext *sc, ArithCoder *acoder,
int x, int y, int width, int height)
{
int mode, pivot;
- if (ctx->corrupted)
- return -1;
-
- mode = acoder->get_model_sym(acoder, &ctx->split_mode);
+ mode = acoder->get_model_sym(acoder, &sc->split_mode);
switch (mode) {
case SPLIT_VERT:
- pivot = decode_pivot(ctx, acoder, height);
- if (ff_mss12_decode_rect(ctx, acoder, x, y, width, pivot))
+ if ((pivot = decode_pivot(sc, acoder, height)) < 1)
+ return -1;
+ if (ff_mss12_decode_rect(sc, acoder, x, y, width, pivot))
return -1;
- if (ff_mss12_decode_rect(ctx, acoder, x, y + pivot, width, height - pivot))
+ if (ff_mss12_decode_rect(sc, acoder, x, y + pivot, width, height - pivot))
return -1;
break;
case SPLIT_HOR:
- pivot = decode_pivot(ctx, acoder, width);
- if (ff_mss12_decode_rect(ctx, acoder, x, y, pivot, height))
+ if ((pivot = decode_pivot(sc, acoder, width)) < 1)
return -1;
- if (ff_mss12_decode_rect(ctx, acoder, x + pivot, y, width - pivot, height))
+ if (ff_mss12_decode_rect(sc, acoder, x, y, pivot, height))
+ return -1;
+ if (ff_mss12_decode_rect(sc, acoder, x + pivot, y, width - pivot, height))
return -1;
break;
case SPLIT_NONE:
- if (ctx->keyframe)
- return decode_region_intra(ctx, acoder, x, y, width, height);
+ if (sc->c->keyframe)
+ return decode_region_intra(sc, acoder, x, y, width, height);
else
- return decode_region_inter(ctx, acoder, x, y, width, height);
+ return decode_region_inter(sc, acoder, x, y, width, height);
default:
return -1;
}
@@ -505,13 +595,11 @@ int ff_mss12_decode_rect(MSS12Context *ctx, ArithCoder *acoder,
return 0;
}
-av_cold int ff_mss12_decode_init(AVCodecContext *avctx, int version)
+av_cold int ff_mss12_decode_init(MSS12Context *c, int version)
{
- MSS12Context * const c = avctx->priv_data;
+ AVCodecContext *avctx = c->avctx;
int i;
- c->avctx = avctx;
-
if (avctx->extradata_size < 52 + 256 * 3) {
av_log(avctx, AV_LOG_ERROR, "Insufficient extradata size %d\n",
avctx->extradata_size);
@@ -526,9 +614,23 @@ av_cold int ff_mss12_decode_init(AVCodecContext *avctx, int version)
return AVERROR_INVALIDDATA;
}
+ avctx->coded_width = AV_RB32(avctx->extradata + 20);
+ avctx->coded_height = AV_RB32(avctx->extradata + 24);
+ if (avctx->coded_width > 4096 || avctx->coded_height > 4096) {
+ av_log(avctx, AV_LOG_ERROR, "Frame dimensions %dx%d too large",
+ avctx->coded_width, avctx->coded_height);
+ return AVERROR_INVALIDDATA;
+ }
+
av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d\n",
AV_RB32(avctx->extradata + 4), AV_RB32(avctx->extradata + 8));
- c->free_colours = AV_RB32(avctx->extradata + 48);
+ if (version != AV_RB32(avctx->extradata + 4) > 1) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Header version doesn't match codec tag\n");
+ return -1;
+ }
+
+ c->free_colours = AV_RB32(avctx->extradata + 48);
if ((unsigned)c->free_colours > 256) {
av_log(avctx, AV_LOG_ERROR,
"Incorrect number of changeable palette entries: %d\n",
@@ -536,8 +638,6 @@ av_cold int ff_mss12_decode_init(AVCodecContext *avctx, int version)
return AVERROR_INVALIDDATA;
}
av_log(avctx, AV_LOG_DEBUG, "%d free colour(s)\n", c->free_colours);
- avctx->coded_width = AV_RB32(avctx->extradata + 20);
- avctx->coded_height = AV_RB32(avctx->extradata + 24);
av_log(avctx, AV_LOG_DEBUG, "Display dimensions %dx%d\n",
AV_RB32(avctx->extradata + 12), AV_RB32(avctx->extradata + 16));
@@ -554,27 +654,49 @@ av_cold int ff_mss12_decode_init(AVCodecContext *avctx, int version)
av_log(avctx, AV_LOG_DEBUG, "Max. seek time %g ms\n",
av_int2float(AV_RB32(avctx->extradata + 44)));
- for (i = 0; i < 256; i++)
- c->pal[i] = 0xFF << 24 | AV_RB24(avctx->extradata + 52 + i * 3);
+ if (version) {
+ if (avctx->extradata_size < 60 + 256 * 3) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Insufficient extradata size %d for v2\n",
+ avctx->extradata_size);
+ return AVERROR_INVALIDDATA;
+ }
- avctx->pix_fmt = PIX_FMT_PAL8;
+ c->slice_split = AV_RB32(avctx->extradata + 52);
+ av_log(avctx, AV_LOG_DEBUG, "Slice split %d\n", c->slice_split);
- c->mask_linesize = FFALIGN(avctx->width, 16);
- c->mask = av_malloc(c->mask_linesize * avctx->height);
+ c->full_model_syms = AV_RB32(avctx->extradata + 56);
+ if (c->full_model_syms < 2 || c->full_model_syms > 256) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Incorrect number of used colours %d\n",
+ c->full_model_syms);
+ return AVERROR_INVALIDDATA;
+ }
+ av_log(avctx, AV_LOG_DEBUG, "Used colours %d\n",
+ c->full_model_syms);
+ } else {
+ c->slice_split = 0;
+ c->full_model_syms = 256;
+ }
+
+ for (i = 0; i < 256; i++)
+ c->pal[i] = 0xFF << 24 | AV_RB24(avctx->extradata + 52 +
+ (version ? 8 : 0) + i * 3);
+
+ c->mask_stride = FFALIGN(avctx->width, 16);
+ c->mask = av_malloc(c->mask_stride * avctx->height);
if (!c->mask) {
av_log(avctx, AV_LOG_ERROR, "Cannot allocate mask plane\n");
return AVERROR(ENOMEM);
}
- codec_init(c);
+ codec_init(c, version);
return 0;
}
-av_cold int ff_mss12_decode_end(AVCodecContext *avctx)
+av_cold int ff_mss12_decode_end(MSS12Context *c)
{
- MSS12Context * const c = avctx->priv_data;
-
av_freep(&c->mask);
return 0;