summaryrefslogtreecommitdiff
path: root/libavcodec/indeo3.c
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2013-05-08 11:54:23 +0200
committerMichael Niedermayer <michaelni@gmx.at>2013-05-08 11:54:29 +0200
commite077ba801644c07027e7fee14018dc0f49d01a51 (patch)
tree3771e7717452f3fce0b1f550a05714e64e3008af /libavcodec/indeo3.c
parent1bad40ef27237dcccf12a106ed870aa303ed4d44 (diff)
parenta97d8cc16e0da30c9ffefa1ede2a0adf3db5f3f8 (diff)
downloadffmpeg-e077ba801644c07027e7fee14018dc0f49d01a51.tar.gz
Merge commit 'a97d8cc16e0da30c9ffefa1ede2a0adf3db5f3f8'
* commit 'a97d8cc16e0da30c9ffefa1ede2a0adf3db5f3f8': indeo3: use unaligned reads on reference blocks. Merged-by: Michael Niedermayer <michaelni@gmx.at>
Diffstat (limited to 'libavcodec/indeo3.c')
-rw-r--r--libavcodec/indeo3.c40
1 files changed, 20 insertions, 20 deletions
diff --git a/libavcodec/indeo3.c b/libavcodec/indeo3.c
index 0261b52378..5ec4b9027d 100644
--- a/libavcodec/indeo3.c
+++ b/libavcodec/indeo3.c
@@ -283,10 +283,10 @@ static int copy_cell(Indeo3DecodeContext *ctx, Plane *plane, Cell *cell)
/* Average 4/8 pixels at once without rounding using SWAR */
#define AVG_32(dst, src, ref) \
- AV_WN32A(dst, ((AV_RN32A(src) + AV_RN32A(ref)) >> 1) & 0x7F7F7F7FUL)
+ AV_WN32A(dst, ((AV_RN32(src) + AV_RN32(ref)) >> 1) & 0x7F7F7F7FUL)
#define AVG_64(dst, src, ref) \
- AV_WN64A(dst, ((AV_RN64A(src) + AV_RN64A(ref)) >> 1) & 0x7F7F7F7F7F7F7F7FULL)
+ AV_WN64A(dst, ((AV_RN64(src) + AV_RN64(ref)) >> 1) & 0x7F7F7F7F7F7F7F7FULL)
/*
@@ -345,7 +345,7 @@ if (*data_ptr >= last_ptr) \
copy_block4(dst, ref, row_offset, row_offset, 4 << v_zoom)
#define RLE_BLOCK_COPY_8 \
- pix64 = AV_RN64A(ref);\
+ pix64 = AV_RN64(ref);\
if (is_first_row) {/* special prediction case: top line of a cell */\
pix64 = replicate64(pix64);\
fill_64(dst + row_offset, pix64, 7, row_offset);\
@@ -357,7 +357,7 @@ if (*data_ptr >= last_ptr) \
copy_block4(dst, ref, row_offset, row_offset, num_lines << v_zoom)
#define RLE_LINES_COPY_M10 \
- pix64 = AV_RN64A(ref);\
+ pix64 = AV_RN64(ref);\
if (is_top_of_cell) {\
pix64 = replicate64(pix64);\
fill_64(dst + row_offset, pix64, (num_lines << 1) - 1, row_offset);\
@@ -367,12 +367,12 @@ if (*data_ptr >= last_ptr) \
#define APPLY_DELTA_4 \
AV_WN16A(dst + line_offset ,\
- (AV_RN16A(ref ) + delta_tab->deltas[dyad1]) & 0x7F7F);\
+ (AV_RN16(ref ) + delta_tab->deltas[dyad1]) & 0x7F7F);\
AV_WN16A(dst + line_offset + 2,\
- (AV_RN16A(ref + 2) + delta_tab->deltas[dyad2]) & 0x7F7F);\
+ (AV_RN16(ref + 2) + delta_tab->deltas[dyad2]) & 0x7F7F);\
if (mode >= 3) {\
if (is_top_of_cell && !cell->ypos) {\
- AV_COPY32(dst, dst + row_offset);\
+ AV_COPY32U(dst, dst + row_offset);\
} else {\
AVG_32(dst, ref, dst + row_offset);\
}\
@@ -382,20 +382,20 @@ if (*data_ptr >= last_ptr) \
/* apply two 32-bit VQ deltas to next even line */\
if (is_top_of_cell) { \
AV_WN32A(dst + row_offset , \
- (replicate32(AV_RN32A(ref )) + delta_tab->deltas_m10[dyad1]) & 0x7F7F7F7F);\
+ (replicate32(AV_RN32(ref )) + delta_tab->deltas_m10[dyad1]) & 0x7F7F7F7F);\
AV_WN32A(dst + row_offset + 4, \
- (replicate32(AV_RN32A(ref + 4)) + delta_tab->deltas_m10[dyad2]) & 0x7F7F7F7F);\
+ (replicate32(AV_RN32(ref + 4)) + delta_tab->deltas_m10[dyad2]) & 0x7F7F7F7F);\
} else { \
AV_WN32A(dst + row_offset , \
- (AV_RN32A(ref ) + delta_tab->deltas_m10[dyad1]) & 0x7F7F7F7F);\
+ (AV_RN32(ref ) + delta_tab->deltas_m10[dyad1]) & 0x7F7F7F7F);\
AV_WN32A(dst + row_offset + 4, \
- (AV_RN32A(ref + 4) + delta_tab->deltas_m10[dyad2]) & 0x7F7F7F7F);\
+ (AV_RN32(ref + 4) + delta_tab->deltas_m10[dyad2]) & 0x7F7F7F7F);\
} \
/* odd lines are not coded but rather interpolated/replicated */\
/* first line of the cell on the top of image? - replicate */\
/* otherwise - interpolate */\
if (is_top_of_cell && !cell->ypos) {\
- AV_COPY64(dst, dst + row_offset);\
+ AV_COPY64U(dst, dst + row_offset);\
} else \
AVG_64(dst, ref, dst + row_offset);
@@ -403,22 +403,22 @@ if (*data_ptr >= last_ptr) \
#define APPLY_DELTA_1011_INTER \
if (mode == 10) { \
AV_WN32A(dst , \
- (AV_RN32A(dst ) + delta_tab->deltas_m10[dyad1]) & 0x7F7F7F7F);\
+ (AV_RN32(dst ) + delta_tab->deltas_m10[dyad1]) & 0x7F7F7F7F);\
AV_WN32A(dst + 4 , \
- (AV_RN32A(dst + 4 ) + delta_tab->deltas_m10[dyad2]) & 0x7F7F7F7F);\
+ (AV_RN32(dst + 4 ) + delta_tab->deltas_m10[dyad2]) & 0x7F7F7F7F);\
AV_WN32A(dst + row_offset , \
- (AV_RN32A(dst + row_offset ) + delta_tab->deltas_m10[dyad1]) & 0x7F7F7F7F);\
+ (AV_RN32(dst + row_offset ) + delta_tab->deltas_m10[dyad1]) & 0x7F7F7F7F);\
AV_WN32A(dst + row_offset + 4, \
- (AV_RN32A(dst + row_offset + 4) + delta_tab->deltas_m10[dyad2]) & 0x7F7F7F7F);\
+ (AV_RN32(dst + row_offset + 4) + delta_tab->deltas_m10[dyad2]) & 0x7F7F7F7F);\
} else { \
AV_WN16A(dst , \
- (AV_RN16A(dst ) + delta_tab->deltas[dyad1]) & 0x7F7F);\
+ (AV_RN16(dst ) + delta_tab->deltas[dyad1]) & 0x7F7F);\
AV_WN16A(dst + 2 , \
- (AV_RN16A(dst + 2 ) + delta_tab->deltas[dyad2]) & 0x7F7F);\
+ (AV_RN16(dst + 2 ) + delta_tab->deltas[dyad2]) & 0x7F7F);\
AV_WN16A(dst + row_offset , \
- (AV_RN16A(dst + row_offset ) + delta_tab->deltas[dyad1]) & 0x7F7F);\
+ (AV_RN16(dst + row_offset ) + delta_tab->deltas[dyad1]) & 0x7F7F);\
AV_WN16A(dst + row_offset + 2, \
- (AV_RN16A(dst + row_offset + 2) + delta_tab->deltas[dyad2]) & 0x7F7F);\
+ (AV_RN16(dst + row_offset + 2) + delta_tab->deltas[dyad2]) & 0x7F7F);\
}