39#if defined(OJPH_ARCH_I386) || defined(OJPH_ARCH_X86_64)
56 return (
ui16)((v<<8) | (v>>8));
67 __m256i max_val_vec = _mm256_set1_epi32((1 << bit_depth) - 1);
68 __m256i zero = _mm256_setzero_si256();
69 __m256i mask = _mm256_set_epi64x(0x0F0B07030E0A0602, 0x0D0905010C080400,
70 0x0F0B07030E0A0602, 0x0D0905010C080400);
71 const si32 *sp = ln0->i32;
75 for ( ; count >= 32; count -= 32, sp += 32, p += 32)
77 __m256i a, t, u, v0, v1;
78 a = _mm256_load_si256((__m256i*)sp);
79 a = _mm256_max_epi32(a, zero);
80 t = _mm256_min_epi32(a, max_val_vec);
82 a = _mm256_load_si256((__m256i*)sp + 1);
83 a = _mm256_max_epi32(a, zero);
84 a = _mm256_min_epi32(a, max_val_vec);
85 a = _mm256_slli_epi32(a, 16);
86 t = _mm256_or_si256(t, a);
88 a = _mm256_load_si256((__m256i*)sp + 2);
89 a = _mm256_max_epi32(a, zero);
90 u = _mm256_min_epi32(a, max_val_vec);
92 a = _mm256_load_si256((__m256i*)sp + 3);
93 a = _mm256_max_epi32(a, zero);
94 a = _mm256_min_epi32(a, max_val_vec);
95 a = _mm256_slli_epi32(a, 16);
96 u = _mm256_or_si256(u, a);
98 v0 = _mm256_permute2x128_si256(t, u, 0x20);
99 v1 = _mm256_permute2x128_si256(t, u, 0x31);
100 v1 = _mm256_slli_epi32(v1, 8);
101 v0 = _mm256_or_si256(v0, v1);
103 v0 = _mm256_shuffle_epi8(v0, mask);
104 _mm256_storeu_si256((__m256i*)p, v0);
107 int max_val = (1 << bit_depth) - 1;
108 for ( ; count > 0; --count)
111 val = val >= 0 ? val : 0;
112 val = val <= max_val ? val : max_val;
122 int max_val = (1 << bit_depth) - 1;
123 __m256i max_val_vec = _mm256_set1_epi32(max_val);
124 __m256i zero = _mm256_setzero_si256();
125 __m256i m0 = _mm256_set_epi64x((
si64)0xFFFFFFFF0E0D0C0A,
126 (
si64)0x0908060504020100,
127 (
si64)0xFFFFFFFF0E0D0C0A,
128 (
si64)0x0908060504020100);
131 const __m256i* sp0 = (__m256i*)ln0->i32;
132 const __m256i* sp1 = (__m256i*)ln1->i32;
133 const __m256i* sp2 = (__m256i*)ln2->i32;
135 for ( ; count >= 32; count -= 32, sp0 += 4, sp1 += 4, sp2 += 4, p += 96)
137 __m256i a, t, u, v, w;
139 a = _mm256_load_si256(sp0);
140 a = _mm256_max_epi32(a, zero);
141 t = _mm256_min_epi32(a, max_val_vec);
143 a = _mm256_load_si256(sp1);
144 a = _mm256_max_epi32(a, zero);
145 a = _mm256_min_epi32(a, max_val_vec);
146 a = _mm256_slli_epi32(a, 8);
147 t = _mm256_or_si256(t, a);
149 a = _mm256_load_si256(sp2);
150 a = _mm256_max_epi32(a, zero);
151 a = _mm256_min_epi32(a, max_val_vec);
152 a = _mm256_slli_epi32(a, 16);
153 t = _mm256_or_si256(t, a);
154 t = _mm256_shuffle_epi8(t, m0);
157 a = _mm256_load_si256(sp0 + 1);
158 a = _mm256_max_epi32(a, zero);
159 u = _mm256_min_epi32(a, max_val_vec);
161 a = _mm256_load_si256(sp1 + 1);
162 a = _mm256_max_epi32(a, zero);
163 a = _mm256_min_epi32(a, max_val_vec);
164 a = _mm256_slli_epi32(a, 8);
165 u = _mm256_or_si256(u, a);
167 a = _mm256_load_si256(sp2 + 1);
168 a = _mm256_max_epi32(a, zero);
169 a = _mm256_min_epi32(a, max_val_vec);
170 a = _mm256_slli_epi32(a, 16);
171 u = _mm256_or_si256(u, a);
172 u = _mm256_shuffle_epi8(u, m0);
175 a = _mm256_load_si256(sp0 + 2);
176 a = _mm256_max_epi32(a, zero);
177 v = _mm256_min_epi32(a, max_val_vec);
179 a = _mm256_load_si256(sp1 + 2);
180 a = _mm256_max_epi32(a, zero);
181 a = _mm256_min_epi32(a, max_val_vec);
182 a = _mm256_slli_epi32(a, 8);
183 v = _mm256_or_si256(v, a);
185 a = _mm256_load_si256(sp2 + 2);
186 a = _mm256_max_epi32(a, zero);
187 a = _mm256_min_epi32(a, max_val_vec);
188 a = _mm256_slli_epi32(a, 16);
189 v = _mm256_or_si256(v, a);
190 v = _mm256_shuffle_epi8(v, m0);
193 a = _mm256_load_si256(sp0 + 3);
194 a = _mm256_max_epi32(a, zero);
195 w = _mm256_min_epi32(a, max_val_vec);
197 a = _mm256_load_si256(sp1 + 3);
198 a = _mm256_max_epi32(a, zero);
199 a = _mm256_min_epi32(a, max_val_vec);
200 a = _mm256_slli_epi32(a, 8);
201 w = _mm256_or_si256(w, a);
203 a = _mm256_load_si256(sp2 + 3);
204 a = _mm256_max_epi32(a, zero);
205 a = _mm256_min_epi32(a, max_val_vec);
206 a = _mm256_slli_epi32(a, 16);
207 w = _mm256_or_si256(w, a);
208 w = _mm256_shuffle_epi8(w, m0);
210 _mm_storeu_si128((__m128i*)(p ), _mm256_castsi256_si128(t));
211 _mm_storeu_si128((__m128i*)(p + 12), _mm256_extracti128_si256(t,1));
212 _mm_storeu_si128((__m128i*)(p + 24), _mm256_castsi256_si128(u));
213 _mm_storeu_si128((__m128i*)(p + 36), _mm256_extracti128_si256(u,1));
214 _mm_storeu_si128((__m128i*)(p + 48), _mm256_castsi256_si128(v));
215 _mm_storeu_si128((__m128i*)(p + 60), _mm256_extracti128_si256(v,1));
216 _mm_storeu_si128((__m128i*)(p + 72), _mm256_castsi256_si128(w));
217#ifdef OJPH_ARCH_X86_64
218 *((
si64*)(p + 84)) = _mm256_extract_epi64(w, 2);
219#elif (defined OJPH_ARCH_I386)
220 *((
si32*)(p + 84)) = _mm256_extract_epi32(w, 4);
221 *((
si32*)(p + 88)) = _mm256_extract_epi32(w, 5);
223 #error Error unsupport compiler
225 *((
si32*)(p + 92)) = _mm256_extract_epi32(w, 6);
251 for ( ; count > 0; --count)
255 val = val >= 0 ? val : 0;
256 val = val <= max_val ? val : max_val;
259 val = val >= 0 ? val : 0;
260 val = val <= max_val ? val : max_val;
263 val = val >= 0 ? val : 0;
264 val = val <= max_val ? val : max_val;
277 __m256i max_val_vec = _mm256_set1_epi32((1 << bit_depth) - 1);
278 __m256i zero = _mm256_setzero_si256();
279 __m256i mask = _mm256_set_epi64x(0x0F0E0B0A07060302, 0x0D0C090805040100,
280 0x0F0E0B0A07060302, 0x0D0C090805040100);
281 const si32 *sp = ln0->i32;
285 for ( ; count >= 16; count -= 16, sp += 16, p += 16)
288 a = _mm256_load_si256((__m256i*)sp);
289 a = _mm256_max_epi32(a, zero);
290 t = _mm256_min_epi32(a, max_val_vec);
292 a = _mm256_load_si256((__m256i*)sp + 1);
293 a = _mm256_max_epi32(a, zero);
294 a = _mm256_min_epi32(a, max_val_vec);
295 a = _mm256_slli_epi32(a, 16);
296 t = _mm256_or_si256(t, a);
298 t = _mm256_shuffle_epi8(t, mask);
299 t = _mm256_permute4x64_epi64(t, 0xD8);
300 _mm256_storeu_si256((__m256i*)p, t);
303 int max_val = (1<<bit_depth) - 1;
304 for ( ; count > 0; --count)
307 val = val >= 0 ? val : 0;
308 val = val <= max_val ? val : max_val;
321 __m256i max_val_vec = _mm256_set1_epi32((1 << bit_depth) - 1);
322 __m256i zero = _mm256_setzero_si256();
323 __m256i mask = _mm256_set_epi64x(0x0E0F0A0B06070203, 0x0C0D080904050001,
324 0x0E0F0A0B06070203, 0x0C0D080904050001);
325 const si32 *sp = ln0->i32;
329 for ( ; count >= 16; count -= 16, sp += 16, p += 16)
332 a = _mm256_load_si256((__m256i*)sp);
333 a = _mm256_max_epi32(a, zero);
334 t = _mm256_min_epi32(a, max_val_vec);
336 a = _mm256_load_si256((__m256i*)sp + 1);
337 a = _mm256_max_epi32(a, zero);
338 a = _mm256_min_epi32(a, max_val_vec);
339 a = _mm256_slli_epi32(a, 16);
340 t = _mm256_or_si256(t, a);
342 t = _mm256_shuffle_epi8(t, mask);
343 t = _mm256_permute4x64_epi64(t, 0xD8);
344 _mm256_storeu_si256((__m256i*)p, t);
347 int max_val = (1<<bit_depth) - 1;
348 for ( ; count > 0; --count)
351 val = val >= 0 ? val : 0;
352 val = val <= max_val ? val : max_val;
void avx2_cvrt_32b3c_to_8ub3c(const line_buf *ln0, const line_buf *ln1, const line_buf *ln2, void *dp, ui32 bit_depth, ui32 count)
void avx2_cvrt_32b1c_to_16ub1c_be(const line_buf *ln0, const line_buf *ln1, const line_buf *ln2, void *dp, ui32 bit_depth, ui32 count)
void avx2_cvrt_32b1c_to_8ub1c(const line_buf *ln0, const line_buf *ln1, const line_buf *ln2, void *dp, ui32 bit_depth, ui32 count)
static ui16 be2le(const ui16 v)
void avx2_cvrt_32b1c_to_16ub1c_le(const line_buf *ln0, const line_buf *ln1, const line_buf *ln2, void *dp, ui32 bit_depth, ui32 count)