39#if defined(OJPH_ARCH_I386) \
40 || defined(OJPH_ARCH_X86_64) \
41 || defined(OJPH_ENABLE_WASM_SIMD)
58 return (
ui16)((v<<8) | (v>>8));
69 __m128i max_val_vec = _mm_set1_epi32((1 << bit_depth) - 1);
70 __m128i zero = _mm_setzero_si128();
71 __m128i mask = _mm_set_epi64x(0x0F0B07030E0A0602, 0x0D0905010C080400);
72 const si32 *sp = ln0->i32;
76 for ( ; count >= 16; count -= 16, sp += 16, p += 16)
79 a = _mm_load_si128((__m128i*)sp);
80 a = _mm_max_epi32(a, zero);
81 t = _mm_min_epi32(a, max_val_vec);
83 a = _mm_load_si128((__m128i*)sp + 1);
84 a = _mm_max_epi32(a, zero);
85 a = _mm_min_epi32(a, max_val_vec);
86 a = _mm_slli_epi32(a, 8);
87 t = _mm_or_si128(t, a);
89 a = _mm_load_si128((__m128i*)sp + 2);
90 a = _mm_max_epi32(a, zero);
91 a = _mm_min_epi32(a, max_val_vec);
92 a = _mm_slli_epi32(a, 16);
93 t = _mm_or_si128(t, a);
95 a = _mm_load_si128((__m128i*)sp + 3);
96 a = _mm_max_epi32(a, zero);
97 a = _mm_min_epi32(a, max_val_vec);
98 a = _mm_slli_epi32(a, 24);
99 t = _mm_or_si128(t, a);
101 t = _mm_shuffle_epi8(t, mask);
102 _mm_storeu_si128((__m128i*)p, t);
105 int max_val = (1 << bit_depth) - 1;
106 for ( ; count > 0; --count)
109 val = val >= 0 ? val : 0;
110 val = val <= max_val ? val : max_val;
120 const si32 *sp0 = ln0->i32;
121 const si32 *sp1 = ln1->i32;
122 const si32 *sp2 = ln2->i32;
125 __m128i max_val_vec = _mm_set1_epi32((1 << bit_depth) - 1);
126 __m128i zero = _mm_setzero_si128();
127 __m128i m0 = _mm_set_epi64x((
si64)0xFFFFFFFF0E0D0C0A,
128 (
si64)0x0908060504020100);
131 for ( ; count >= 16; count -= 16, sp0 += 16, sp1 += 16, sp2 += 16, p += 48)
133 __m128i a, t, u, v, w;
134 a = _mm_load_si128((__m128i*)sp0);
135 a = _mm_max_epi32(a, zero);
136 t = _mm_min_epi32(a, max_val_vec);
138 a = _mm_load_si128((__m128i*)sp1);
139 a = _mm_max_epi32(a, zero);
140 a = _mm_min_epi32(a, max_val_vec);
141 a = _mm_slli_epi32(a, 8);
142 t = _mm_or_si128(t, a);
144 a = _mm_load_si128((__m128i*)sp2);
145 a = _mm_max_epi32(a, zero);
146 a = _mm_min_epi32(a, max_val_vec);
147 a = _mm_slli_epi32(a, 16);
148 t = _mm_or_si128(t, a);
149 t = _mm_shuffle_epi8(t, m0);
151 a = _mm_load_si128((__m128i*)sp0 + 1);
152 a = _mm_max_epi32(a, zero);
153 u = _mm_min_epi32(a, max_val_vec);
155 a = _mm_load_si128((__m128i*)sp1 + 1);
156 a = _mm_max_epi32(a, zero);
157 a = _mm_min_epi32(a, max_val_vec);
158 a = _mm_slli_epi32(a, 8);
159 u = _mm_or_si128(u, a);
161 a = _mm_load_si128((__m128i*)sp2 + 1);
162 a = _mm_max_epi32(a, zero);
163 a = _mm_min_epi32(a, max_val_vec);
164 a = _mm_slli_epi32(a, 16);
165 u = _mm_or_si128(u, a);
166 u = _mm_shuffle_epi8(u, m0);
168 a = _mm_load_si128((__m128i*)sp0 + 2);
169 a = _mm_max_epi32(a, zero);
170 v = _mm_min_epi32(a, max_val_vec);
172 a = _mm_load_si128((__m128i*)sp1 + 2);
173 a = _mm_max_epi32(a, zero);
174 a = _mm_min_epi32(a, max_val_vec);
175 a = _mm_slli_epi32(a, 8);
176 v = _mm_or_si128(v, a);
178 a = _mm_load_si128((__m128i*)sp2 + 2);
179 a = _mm_max_epi32(a, zero);
180 a = _mm_min_epi32(a, max_val_vec);
181 a = _mm_slli_epi32(a, 16);
182 v = _mm_or_si128(v, a);
183 v = _mm_shuffle_epi8(v, m0);
185 a = _mm_load_si128((__m128i*)sp0 + 3);
186 a = _mm_max_epi32(a, zero);
187 w = _mm_min_epi32(a, max_val_vec);
189 a = _mm_load_si128((__m128i*)sp1 + 3);
190 a = _mm_max_epi32(a, zero);
191 a = _mm_min_epi32(a, max_val_vec);
192 a = _mm_slli_epi32(a, 8);
193 w = _mm_or_si128(w, a);
195 a = _mm_load_si128((__m128i*)sp2 + 3);
196 a = _mm_max_epi32(a, zero);
197 a = _mm_min_epi32(a, max_val_vec);
198 a = _mm_slli_epi32(a, 16);
199 w = _mm_or_si128(w, a);
200 w = _mm_shuffle_epi8(w, m0);
202 t = _mm_or_si128(t, _mm_bslli_si128(u, 12));
203 u = _mm_or_si128(_mm_bsrli_si128(u, 4), _mm_bslli_si128(v, 8));
204 v = _mm_or_si128(_mm_bsrli_si128(v, 8), _mm_bslli_si128(w, 4));
206 _mm_storeu_si128((__m128i*)p + 0, t);
207 _mm_storeu_si128((__m128i*)p + 1, u);
208 _mm_storeu_si128((__m128i*)p + 2, v);
211 int max_val = (1<<bit_depth) - 1;
212 for ( ; count > 0; --count)
216 val = val >= 0 ? val : 0;
217 val = val <= max_val ? val : max_val;
220 val = val >= 0 ? val : 0;
221 val = val <= max_val ? val : max_val;
224 val = val >= 0 ? val : 0;
225 val = val <= max_val ? val : max_val;
238 __m128i max_val_vec = _mm_set1_epi32((1 << bit_depth) - 1);
239 __m128i zero = _mm_setzero_si128();
240 __m128i mask = _mm_set_epi64x(0x0F0E0B0A07060302, 0x0D0C090805040100);
241 const si32 *sp = ln0->i32;
245 for ( ; count >= 8; count -= 8, sp += 8, p += 8)
248 a = _mm_load_si128((__m128i*)sp);
249 a = _mm_max_epi32(a, zero);
250 t = _mm_min_epi32(a, max_val_vec);
252 a = _mm_load_si128((__m128i*)sp + 1);
253 a = _mm_max_epi32(a, zero);
254 a = _mm_min_epi32(a, max_val_vec);
255 a = _mm_slli_epi32(a, 16);
256 t = _mm_or_si128(t, a);
258 t = _mm_shuffle_epi8(t, mask);
259 _mm_storeu_si128((__m128i*)p, t);
262 int max_val = (1<<bit_depth) - 1;
263 for ( ; count > 0; --count)
266 val = val >= 0 ? val : 0;
267 val = val <= max_val ? val : max_val;
277 const si32 *sp0 = ln0->i32;
278 const si32 *sp1 = ln1->i32;
279 const si32 *sp2 = ln2->i32;
282 __m128i max_val_vec = _mm_set1_epi32((1 << bit_depth) - 1);
283 __m128i zero = _mm_setzero_si128();
285 __m128i m0 = _mm_set_epi64x((
si64)0x0B0A0908FFFF0706,
286 (
si64)0x0504FFFF03020100);
287 __m128i m1 = _mm_set_epi64x((
si64)0xFFFFFFFF0504FFFF,
288 (
si64)0xFFFF0100FFFFFFFF);
289 __m128i m2 = _mm_set_epi64x((
si64)0xFFFFFFFFFFFFFFFF,
290 (
si64)0xFFFF0F0E0D0CFFFF);
291 __m128i m3 = _mm_set_epi64x((
si64)0x0706FFFFFFFF0302,
292 (
si64)0x0D0CFFFFFFFF0908);
293 __m128i m4 = _mm_set_epi64x((
si64)0xFFFF03020100FFFF,
294 (
si64)0xFFFFFFFFFFFFFFFF);
295 __m128i m5 = _mm_set_epi64x((
si64)0xFFFFFFFF0F0EFFFF,
296 (
si64)0xFFFF0B0AFFFFFFFF);
297 __m128i m6 = _mm_set_epi64x((
si64)0x0F0E0D0CFFFF0B0A,
298 (
si64)0x0908FFFF07060504);
301 for ( ; count >= 8; count -= 8, sp0 += 8, sp1 += 8, sp2 += 8, p += 24)
303 __m128i a, b, t, u, v;
304 a = _mm_load_si128((__m128i*)sp0);
305 a = _mm_max_epi32(a, zero);
306 t = _mm_min_epi32(a, max_val_vec);
308 a = _mm_load_si128((__m128i*)sp1);
309 a = _mm_max_epi32(a, zero);
310 a = _mm_min_epi32(a, max_val_vec);
311 a = _mm_slli_epi32(a, 16);
312 t = _mm_or_si128(t, a);
314 a = _mm_load_si128((__m128i*)sp2);
315 a = _mm_max_epi32(a, zero);
316 u = _mm_min_epi32(a, max_val_vec);
318 a = _mm_load_si128((__m128i*)sp0 + 1);
319 a = _mm_max_epi32(a, zero);
320 a = _mm_min_epi32(a, max_val_vec);
321 a = _mm_slli_epi32(a, 16);
322 u = _mm_or_si128(u, a);
324 a = _mm_load_si128((__m128i*)sp1 + 1);
325 a = _mm_max_epi32(a, zero);
326 v = _mm_min_epi32(a, max_val_vec);
328 a = _mm_load_si128((__m128i*)sp2 + 1);
329 a = _mm_max_epi32(a, zero);
330 a = _mm_min_epi32(a, max_val_vec);
331 a = _mm_slli_epi32(a, 16);
332 v = _mm_or_si128(v, a);
334 a = _mm_shuffle_epi8(t, m0);
335 b = _mm_shuffle_epi8(u, m1);
336 a = _mm_or_si128(a, b);
337 _mm_storeu_si128((__m128i*)p, a);
339 a = _mm_shuffle_epi8(t, m2);
340 b = _mm_shuffle_epi8(u, m3);
341 a = _mm_or_si128(a, b);
342 b = _mm_shuffle_epi8(v, m4);
343 a = _mm_or_si128(a, b);
344 _mm_storeu_si128((__m128i*)p + 1, a);
346 a = _mm_shuffle_epi8(u, m5);
347 b = _mm_shuffle_epi8(v, m6);
348 a = _mm_or_si128(a, b);
349 _mm_storeu_si128((__m128i*)p + 2, a);
352 int max_val = (1<<bit_depth) - 1;
353 for ( ; count > 0; --count)
357 val = val >= 0 ? val : 0;
358 val = val <= max_val ? val : max_val;
361 val = val >= 0 ? val : 0;
362 val = val <= max_val ? val : max_val;
365 val = val >= 0 ? val : 0;
366 val = val <= max_val ? val : max_val;
379 __m128i max_val_vec = _mm_set1_epi32((1 << bit_depth) - 1);
380 __m128i zero = _mm_setzero_si128();
381 __m128i mask = _mm_set_epi64x(0x0E0F0A0B06070203, 0x0C0D080904050001);
382 const si32 *sp = ln0->i32;
386 for ( ; count >= 8; count -= 8, sp += 8, p += 8)
389 a = _mm_load_si128((__m128i*)sp);
390 a = _mm_max_epi32(a, zero);
391 t = _mm_min_epi32(a, max_val_vec);
393 a = _mm_load_si128((__m128i*)sp + 1);
394 a = _mm_max_epi32(a, zero);
395 a = _mm_min_epi32(a, max_val_vec);
396 a = _mm_slli_epi32(a, 16);
397 t = _mm_or_si128(t, a);
399 t = _mm_shuffle_epi8(t, mask);
400 _mm_storeu_si128((__m128i*)p, t);
403 int max_val = (1<<bit_depth) - 1;
404 for ( ; count > 0; --count)
407 val = val >= 0 ? val : 0;
408 val = val <= max_val ? val : max_val;
418 const si32 *sp0 = ln0->i32;
419 const si32 *sp1 = ln1->i32;
420 const si32 *sp2 = ln2->i32;
423 __m128i max_val_vec = _mm_set1_epi32((1 << bit_depth) - 1);
424 __m128i zero = _mm_setzero_si128();
426 __m128i m0 = _mm_set_epi64x((
si64)0x0A0B0809FFFF0607,
427 (
si64)0x0405FFFF02030001);
428 __m128i m1 = _mm_set_epi64x((
si64)0xFFFFFFFF0405FFFF,
429 (
si64)0xFFFF0001FFFFFFFF);
430 __m128i m2 = _mm_set_epi64x((
si64)0xFFFFFFFFFFFFFFFF,
431 (
si64)0xFFFF0E0F0C0DFFFF);
432 __m128i m3 = _mm_set_epi64x((
si64)0x0607FFFFFFFF0203,
433 (
si64)0x0C0DFFFFFFFF0809);
434 __m128i m4 = _mm_set_epi64x((
si64)0xFFFF02030001FFFF,
435 (
si64)0xFFFFFFFFFFFFFFFF);
436 __m128i m5 = _mm_set_epi64x((
si64)0xFFFFFFFF0E0FFFFF,
437 (
si64)0xFFFF0A0BFFFFFFFF);
438 __m128i m6 = _mm_set_epi64x((
si64)0x0E0F0C0DFFFF0A0B,
439 (
si64)0x0809FFFF06070405);
442 for ( ; count >= 8; count -= 8, sp0 += 8, sp1 += 8, sp2 += 8, p += 24)
444 __m128i a, b, t, u, v;
445 a = _mm_load_si128((__m128i*)sp0);
446 a = _mm_max_epi32(a, zero);
447 t = _mm_min_epi32(a, max_val_vec);
449 a = _mm_load_si128((__m128i*)sp1);
450 a = _mm_max_epi32(a, zero);
451 a = _mm_min_epi32(a, max_val_vec);
452 a = _mm_slli_epi32(a, 16);
453 t = _mm_or_si128(t, a);
455 a = _mm_load_si128((__m128i*)sp2);
456 a = _mm_max_epi32(a, zero);
457 u = _mm_min_epi32(a, max_val_vec);
459 a = _mm_load_si128((__m128i*)sp0 + 1);
460 a = _mm_max_epi32(a, zero);
461 a = _mm_min_epi32(a, max_val_vec);
462 a = _mm_slli_epi32(a, 16);
463 u = _mm_or_si128(u, a);
465 a = _mm_load_si128((__m128i*)sp1 + 1);
466 a = _mm_max_epi32(a, zero);
467 v = _mm_min_epi32(a, max_val_vec);
469 a = _mm_load_si128((__m128i*)sp2 + 1);
470 a = _mm_max_epi32(a, zero);
471 a = _mm_min_epi32(a, max_val_vec);
472 a = _mm_slli_epi32(a, 16);
473 v = _mm_or_si128(v, a);
475 a = _mm_shuffle_epi8(t, m0);
476 b = _mm_shuffle_epi8(u, m1);
477 a = _mm_or_si128(a, b);
478 _mm_storeu_si128((__m128i*)p, a);
480 a = _mm_shuffle_epi8(t, m2);
481 b = _mm_shuffle_epi8(u, m3);
482 a = _mm_or_si128(a, b);
483 b = _mm_shuffle_epi8(v, m4);
484 a = _mm_or_si128(a, b);
485 _mm_storeu_si128((__m128i*)p + 1, a);
487 a = _mm_shuffle_epi8(u, m5);
488 b = _mm_shuffle_epi8(v, m6);
489 a = _mm_or_si128(a, b);
490 _mm_storeu_si128((__m128i*)p + 2, a);
493 int max_val = (1<<bit_depth) - 1;
494 for ( ; count > 0; --count)
498 val = val >= 0 ? val : 0;
499 val = val <= max_val ? val : max_val;
502 val = val >= 0 ? val : 0;
503 val = val <= max_val ? val : max_val;
506 val = val >= 0 ? val : 0;
507 val = val <= max_val ? val : max_val;
void sse41_cvrt_32b3c_to_8ub3c(const line_buf *ln0, const line_buf *ln1, const line_buf *ln2, void *dp, ui32 bit_depth, ui32 count)
void sse41_cvrt_32b3c_to_16ub3c_le(const line_buf *ln0, const line_buf *ln1, const line_buf *ln2, void *dp, ui32 bit_depth, ui32 count)
void sse41_cvrt_32b1c_to_16ub1c_be(const line_buf *ln0, const line_buf *ln1, const line_buf *ln2, void *dp, ui32 bit_depth, ui32 count)
void sse41_cvrt_32b1c_to_8ub1c(const line_buf *ln0, const line_buf *ln1, const line_buf *ln2, void *dp, ui32 bit_depth, ui32 count)
static ui16 be2le(const ui16 v)
void sse41_cvrt_32b1c_to_16ub1c_le(const line_buf *ln0, const line_buf *ln1, const line_buf *ln2, void *dp, ui32 bit_depth, ui32 count)
void sse41_cvrt_32b3c_to_16ub3c_be(const line_buf *ln0, const line_buf *ln1, const line_buf *ln2, void *dp, ui32 bit_depth, ui32 count)