summaryrefslogtreecommitdiff
path: root/src/lib/evas/common/evas_map_image_loop.c
blob: 7cbb494c126986b00761cf5ac4d6643900fb6277 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
#ifdef SMOOTH
{
# ifdef SCALE_USING_MMX
#  ifdef COLMUL
#   ifdef COLSAME
   MOV_P2R(c1, mm7, mm0); // col
#   endif //COLSAME
#  endif //COLMUL
# endif //SCALE_USING_MMX

# ifdef SCALE_USING_NEON
#  ifndef COLBLACK
   uint16x4_t temp_16x4;
   uint16x4_t rv_16x4;
   uint16x4_t val1_16x4;
   uint16x4_t val3_16x4;
   uint16x8_t ru_16x8;
   uint16x8_t val1_val3_16x8;
   uint16x8_t val2_val4_16x8;
   uint16x8_t x255_16x8;
   uint32x2_t res_32x2;
   uint32x2_t val1_val3_32x2;
   uint32x2_t val2_val4_32x2;
   uint8x8_t val1_val3_8x8;
   uint8x8_t val2_val4_8x8;

   x255_16x8 = vdupq_n_u16(0xff);
#   ifdef COLMUL
   uint16x4_t x255_16x4;
   x255_16x4 = vget_low_u16(x255_16x8);
   uint16x4_t c1_16x4;
#    ifdef COLSAME
   uint16x4_t c1_val3_16x4;
   uint16x8_t c1_16x8;
   uint16x8_t c1_val3_16x8;
   uint32x2_t c1_32x2;
   uint8x8_t c1_8x8;
   uint8x8_t c1_val3_8x8;

   c1_32x2 = vset_lane_u32(c1, c1_32x2, 0);
   c1_8x8 = vreinterpret_u8_u32(c1_32x2);
   c1_16x8 = vmovl_u8(c1_8x8);
   c1_16x4 = vget_low_u16(c1_16x8);
#    else //COLSAME
   uint16x4_t c2_16x4;
   uint16x4_t c2_local_16x4;
   uint16x4_t cv_16x4;
   uint16x8_t c1_c2_16x8;
   uint16x8_t c1_val1_16x8;
   uint16x8_t c2_val3_16x8;
   uint16x8_t cv_rv_16x8;
   uint32x2_t c1_c2_32x2;
   uint8x8_t c1_c2_8x8;
   uint8x8_t val3_8x8;
   uint16x8_t val3_16x8;

   c1_c2_32x2 = vset_lane_u32(c1, c1_c2_32x2, 0);
   c1_c2_32x2 = vset_lane_u32(c2, c1_c2_32x2, 1);
   c1_c2_8x8 = vreinterpret_u8_u32(c1_c2_32x2);
   c1_c2_16x8 = vmovl_u8(c1_c2_8x8);
   c1_16x4 = vget_low_u16(c1_c2_16x8);
   c2_16x4 = vget_high_u16(c1_c2_16x8);
#    endif //COLSAME
#   else //COLMUL
   uint8x8_t val3_8x8;
   uint16x8_t val3_16x8;
#   endif //COLMUL
#  endif //COLBLACK
# endif //SCALE_USING_NEON

   while (ww > 0)
     {
# ifdef COLBLACK
        *d = 0xff000000; // col
# else  //COLBLACK
        FPc uu1, vv1, uu2, vv2;
        FPc rv, ru;
        DATA32 val1, val2, val3, val4;

        uu1 = u;
        if (uu1 < 0) uu1 = 0;
        else if (uu1 >= swp) uu1 = swp - 1;

        vv1 = v;
        if (vv1 < 0) vv1 = 0;
        else if (vv1 >= shp) vv1 = shp - 1;

        uu2 = uu1 + FPFPI1;      // next u point
        if (uu2 >= swp) uu2 = swp - 1;

        vv2 = vv1 + FPFPI1;      // next v point
        if (vv2 >= shp) vv2 = shp - 1;

        ru = (u >> (FP + FPI - 8)) & 0xff;
        rv = (v >> (FP + FPI - 8)) & 0xff;

        s = sp + ((vv1 >> (FP + FPI)) * sw) + (uu1 >> (FP + FPI));
        val1 = *s;             // current pixel

        s = sp + ((vv1 >> (FP + FPI)) * sw) + (uu2 >> (FP + FPI));
        val2 = *s;             // right pixel

        s = sp + ((vv2 >> (FP + FPI)) * sw) + (uu1 >> (FP + FPI));
        val3 = *s;             // bottom pixel

        s = sp + ((vv2 >> (FP + FPI)) * sw) + (uu2 >> (FP + FPI));
        val4 = *s;             // right bottom pixel

#  ifdef SCALE_USING_MMX
        MOV_A2R(rv, mm4);
        MOV_A2R(ru, mm6);
        MOV_P2R(val1, mm1, mm0);
        if (val1 | val2)
          {
             MOV_P2R(val2, mm2, mm0);
             INTERP_256_R2R(mm6, mm2, mm1, mm5);
          }
        MOV_P2R(val3, mm2, mm0);
        if (val3 | val4)
          {
             MOV_P2R(val4, mm3, mm0);
             INTERP_256_R2R(mm6, mm3, mm2, mm5);
          }
        INTERP_256_R2R(mm4, mm2, mm1, mm5);
#   ifdef COLMUL
#    ifdef COLSAME
//        MOV_P2R(c1, mm7, mm0); // col
        MUL4_SYM_R2R(mm7, mm1, mm5); // col
#    else //COLSAME
        cc = cv >> 16; // col
        cv += cd; // col
        MOV_A2R(cc, mm2); // col
        MOV_P2R(c1, mm3, mm0); // col
        MOV_P2R(c2, mm4, mm0); // col
        INTERP_256_R2R(mm2, mm4, mm3, mm5); // col
        MUL4_SYM_R2R(mm3, mm1, mm5); // col
#    endif //COLSAME
#   endif //COLMUL
        MOV_R2P(mm1, *d, mm0);
#  elif defined SCALE_USING_NEON
        if (val1 | val2 | val3 | val4)
          {
             rv_16x4 = vdup_n_u16(rv);
             ru_16x8 = vdupq_n_u16(ru);

             val1_val3_32x2 = vset_lane_u32(val1, val1_val3_32x2, 0);
             val1_val3_32x2 = vset_lane_u32(val3, val1_val3_32x2, 1);
             val2_val4_32x2 = vset_lane_u32(val2, val2_val4_32x2, 0);
             val2_val4_32x2 = vset_lane_u32(val4, val2_val4_32x2, 1);

             val1_val3_8x8 = vreinterpret_u8_u32(val1_val3_32x2);
             val2_val4_8x8 = vreinterpret_u8_u32(val2_val4_32x2);

             val2_val4_16x8 = vmovl_u8(val2_val4_8x8);
             val1_val3_16x8 = vmovl_u8(val1_val3_8x8);

             val2_val4_16x8 = vsubq_u16(val2_val4_16x8, val1_val3_16x8);
             val2_val4_16x8 = vmulq_u16(val2_val4_16x8, ru_16x8);
             val2_val4_16x8 = vshrq_n_u16(val2_val4_16x8, 8);
             val2_val4_16x8 = vaddq_u16(val2_val4_16x8, val1_val3_16x8);
             val2_val4_16x8 = vandq_u16(val2_val4_16x8, x255_16x8);

             val1_16x4 = vget_low_u16(val2_val4_16x8);
             val3_16x4 = vget_high_u16(val2_val4_16x8);
#   ifdef COLMUL
#    ifdef COLSAME

             val3_16x4 = vsub_u16(val3_16x4, val1_16x4);
             val3_16x4 = vmul_u16(val3_16x4, rv_16x4);
             val3_16x4 = vshr_n_u16(val3_16x4, 8);
             val3_16x4 = vadd_u16(val3_16x4, val1_16x4);
             val3_16x4 = vand_u16(val3_16x4, x255_16x4);

             c1_val3_16x4 = vmul_u16(c1_16x4, val3_16x4);
             c1_val3_16x4 = vadd_u16(c1_val3_16x4, x255_16x4);

             c1_val3_16x8 = vcombine_u16(c1_val3_16x4, temp_16x4);

             c1_val3_8x8 = vshrn_n_u16(c1_val3_16x8, 8);
             res_32x2 = vreinterpret_u32_u8(c1_val3_8x8);
#    else //COLSAME
             c1_val1_16x8 = vcombine_u16(c1_16x4, val1_16x4);
             c2_val3_16x8 = vcombine_u16(c2_16x4, val3_16x4);

             cv_16x4 = vdup_n_u16(cv>>16);
             cv += cd;
             cv_rv_16x8 = vcombine_u16(cv_16x4, rv_16x4);

             c2_val3_16x8 = vsubq_u16(c2_val3_16x8, c1_val1_16x8);
             c2_val3_16x8 = vmulq_u16(c2_val3_16x8, cv_rv_16x8);
             c2_val3_16x8 = vshrq_n_u16(c2_val3_16x8, 8);
             c2_val3_16x8 = vaddq_u16(c2_val3_16x8, c1_val1_16x8);
             c2_val3_16x8 = vandq_u16(c2_val3_16x8, x255_16x8);

             c2_local_16x4 = vget_low_u16(c2_val3_16x8);
             val3_16x4 = vget_high_u16(c2_val3_16x8);

             val3_16x4 = vmul_u16(c2_local_16x4, val3_16x4);
             val3_16x4 = vadd_u16(val3_16x4, x255_16x4);

             val3_16x8 = vcombine_u16(val3_16x4, temp_16x4);

             val3_8x8 = vshrn_n_u16(val3_16x8, 8);
             res_32x2 = vreinterpret_u32_u8(val3_8x8);
#    endif //COLSAME
#   else //COLMUL
             val3_16x4 = vsub_u16(val3_16x4, val1_16x4);
             val3_16x4 = vmul_u16(val3_16x4, rv_16x4);
             val3_16x4 = vshr_n_u16(val3_16x4, 8);
             val3_16x4 = vadd_u16(val3_16x4, val1_16x4);

             val3_16x8 = vcombine_u16(val3_16x4, temp_16x4);

             val3_8x8 = vmovn_u16(val3_16x8);
             res_32x2 = vreinterpret_u32_u8(val3_8x8);
#   endif //COLMUL
             vst1_lane_u32(d, res_32x2, 0);
          }
        else
          *d = val1;
#  else //COLMUL
        val1 = INTERP_256(ru, val2, val1);
        val3 = INTERP_256(ru, val4, val3);
        val1 = INTERP_256(rv, val3, val1); // col
#   ifdef COLMUL
#    ifdef COLSAME
        *d = MUL4_SYM(c1, val1);
#    else //COLSAME
        val2 = INTERP_256((cv >> 16), c2, c1); // col
        *d  = MUL4_SYM(val2, val1); // col
        cv += cd; // col
#    endif //COLSAME
#   else
        *d = val1;
#   endif //COLMUL
#  endif //SCALE_USING_MMX
        u += ud;
        v += vd;
# endif //COLBLACK
        if (anti_alias) *d = _aa_coverage_apply(line, ww, w, *d, sa);
        d++;
        ww--;
     }
}
#else //SMOOTH
{
# ifdef SCALE_USING_NEON
#  ifndef COLBLACK
#   ifdef COLMUL
   uint16x4_t x255_16x4;
   uint16x4_t temp_16x4;
   uint16x8_t cval_16x8;
   uint32x2_t res_32x2;
   uint8x8_t cval_8x8;
   uint16x4_t c1_16x4;
   uint16x4_t cval_16x4;
   uint16x4_t val1_16x4;
   uint32x2_t val1_32x2;
   uint8x8_t val1_8x8;

   x255_16x4 = vdup_n_u16(0xff);
#    ifdef COLSAME
   uint16x8_t c1_16x8;
   uint16x8_t val1_16x8;
   uint32x2_t c1_32x2;
   uint8x8_t c1_8x8;

   c1_32x2 = vset_lane_u32(c1, c1_32x2, 0);

   c1_8x8 = vreinterpret_u8_u32(c1_32x2);
   c1_16x8 = vmovl_u8(c1_8x8);

   c1_16x4 = vget_low_u16(c1_16x8);
#    else //COLSAME
   uint16x4_t c2_16x4;
   uint16x4_t c2_c1_16x4;
   uint16x4_t c2_c1_local_16x4;
   uint16x4_t cv_16x4;
   uint16x8_t c1_c2_16x8;
   uint16x8_t val1_16x8;
   uint32x2_t c1_c2_32x2;
   uint8x8_t c1_c2_8x8;

   c1_c2_32x2 = vset_lane_u32(c1, c1_c2_32x2, 0);
   c1_c2_32x2 = vset_lane_u32(c2, c1_c2_32x2, 1);

   c1_c2_8x8 = vreinterpret_u8_u32(c1_c2_32x2);
   c1_c2_16x8 = vmovl_u8(c1_c2_8x8);

   c1_16x4 = vget_low_u16(c1_c2_16x8);
   c2_16x4 = vget_high_u16(c1_c2_16x8);

   c2_c1_16x4 = vsub_u16(c2_16x4, c1_16x4);
#    endif //COLSAME
#   endif //COLMUL
#  endif //COLBLACK
# endif //SCALE_USING_NEON

   while (ww > 0)
     {
# ifndef SCALE_USING_NEON
#  ifdef COLMUL
#   ifndef COLBLACK
        DATA32 val1;
#    ifndef COLSAME
        DATA32 cval; // col
#    endif //COLSAME
#   endif //COLBLACK
#  endif //COLMUL
# endif //SCALE_USING_NEON

# ifdef COLBLACK
        *d = 0xff000000; // col
# else //COLBLACK
        s = sp + ((v >> (FP + FPI)) * sw) + (u >> (FP + FPI));
#  ifdef COLMUL
#   ifdef SCALE_USING_NEON
#    ifdef COLSAME
        val1_32x2 = vset_lane_u32(*s, val1_32x2, 0);
        val1_8x8 = vreinterpret_u8_u32(val1_32x2);
        val1_16x8 = vmovl_u8(val1_8x8);
        val1_16x4 = vget_low_u16(val1_16x8);
        cval_16x4 = c1_16x4;
#    else //COLSAME
        cv_16x4 = vdup_n_u16(cv>>16);
        cv += cd; // col

        c2_c1_local_16x4 = vmul_u16(c2_c1_16x4, cv_16x4);
        c2_c1_local_16x4 = vshr_n_u16(c2_c1_local_16x4, 8);
        c2_c1_local_16x4 = vadd_u16(c2_c1_local_16x4, c1_16x4);
        cval_16x4 = vand_u16(c2_c1_local_16x4, x255_16x4);
        val1_32x2 = vset_lane_u32(*s, val1_32x2, 0);
        val1_8x8 = vreinterpret_u8_u32(val1_32x2);
        val1_16x8 = vmovl_u8(val1_8x8);
        val1_16x4 = vget_low_u16(val1_16x8);
#    endif //COLSAME
        cval_16x4 = vmul_u16(cval_16x4, val1_16x4);
        cval_16x4 = vadd_u16(cval_16x4, x255_16x4);

        cval_16x8 = vcombine_u16(cval_16x4, temp_16x4);

        cval_8x8 = vshrn_n_u16(cval_16x8, 8);
        res_32x2 = vreinterpret_u32_u8(cval_8x8);

        vst1_lane_u32(d, res_32x2, 0);
#   else //SCALE_USING_NEON
        val1 = *s; // col
#    ifdef COLSAME
        *d = MUL4_SYM(c1, val1);
#    else
        cval = INTERP_256((cv >> 16), c2, c1); // col
        *d = MUL4_SYM(cval, val1);
        cv += cd; // col
#    endif
#   endif
#  else
        *d = *s;
#  endif //COLMUL
        u += ud;
        v += vd;
# endif //COLBLACK
        if (anti_alias) *d = _aa_coverage_apply(line, ww, w, *d, sa);
        d++;
        ww--;
     }
}
#endif //SMOOTH