FD.io VPP  v19.04.4-rc0-5-ge88582fac
Vector Packet Processing
vector_avx2.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef included_vector_avx2_h
17 #define included_vector_avx2_h
18 
19 #include <vppinfra/clib.h>
20 #include <x86intrin.h>
21 
22 /* *INDENT-OFF* */
23 #define foreach_avx2_vec256i \
24  _(i,8,32,epi8) _(i,16,16,epi16) _(i,32,8,epi32) _(i,64,4,epi64)
25 #define foreach_avx2_vec256u \
26  _(u,8,32,epi8) _(u,16,16,epi16) _(u,32,8,epi32) _(u,64,4,epi64)
27 #define foreach_avx2_vec256f \
28  _(f,32,8,ps) _(f,64,4,pd)
29 
30 #define _mm256_set1_epi64 _mm256_set1_epi64x
31 
32 /* splat, load_unaligned, store_unaligned, is_all_zero, is_equal,
33  is_all_equal */
34 #define _(t, s, c, i) \
35 static_always_inline t##s##x##c \
36 t##s##x##c##_splat (t##s x) \
37 { return (t##s##x##c) _mm256_set1_##i (x); } \
38 \
39 static_always_inline t##s##x##c \
40 t##s##x##c##_load_unaligned (void *p) \
41 { return (t##s##x##c) _mm256_loadu_si256 (p); } \
42 \
43 static_always_inline void \
44 t##s##x##c##_store_unaligned (t##s##x##c v, void *p) \
45 { _mm256_storeu_si256 ((__m256i *) p, (__m256i) v); } \
46 \
47 static_always_inline int \
48 t##s##x##c##_is_all_zero (t##s##x##c x) \
49 { return _mm256_testz_si256 ((__m256i) x, (__m256i) x); } \
50 \
51 static_always_inline int \
52 t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
53 { return t##s##x##c##_is_all_zero (a ^ b); } \
54 \
55 static_always_inline int \
56 t##s##x##c##_is_all_equal (t##s##x##c v, t##s x) \
57 { return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); } \
58 \
59 static_always_inline t##s##x##c \
60 t##s##x##c##_interleave_lo (t##s##x##c a, t##s##x##c b) \
61 { return (t##s##x##c) _mm256_unpacklo_##i ((__m256i) a, (__m256i) b); } \
62 \
63 static_always_inline t##s##x##c \
64 t##s##x##c##_interleave_hi (t##s##x##c a, t##s##x##c b) \
65 { return (t##s##x##c) _mm256_unpackhi_##i ((__m256i) a, (__m256i) b); } \
66 
67 
69 #undef _
70 /* *INDENT-ON* */
71 
72 always_inline u32x8
73 u32x8_permute (u32x8 v, u32x8 idx)
74 {
75  return (u32x8) _mm256_permutevar8x32_epi32 ((__m256i) v, (__m256i) idx);
76 }
77 
78 /* _extract_lo, _extract_hi */
79 /* *INDENT-OFF* */
80 #define _(t1,t2) \
81 always_inline t1 \
82 t2##_extract_lo (t2 v) \
83 { return (t1) _mm256_extracti128_si256 ((__m256i) v, 0); } \
84 \
85 always_inline t1 \
86 t2##_extract_hi (t2 v) \
87 { return (t1) _mm256_extracti128_si256 ((__m256i) v, 1); } \
88 \
89 always_inline t2 \
90 t2##_insert_lo (t2 v1, t1 v2) \
91 { return (t2) _mm256_inserti128_si256 ((__m256i) v1, (__m128i) v2, 0); }\
92 \
93 always_inline t2 \
94 t2##_insert_hi (t2 v1, t1 v2) \
95 { return (t2) _mm256_inserti128_si256 ((__m256i) v1, (__m128i) v2, 1); }\
96 
97 _(u8x16, u8x32)
98 _(u16x8, u16x16)
99 _(u32x4, u32x8)
100 _(u64x2, u64x4)
101 #undef _
102 /* *INDENT-ON* */
103 
104 
105 
106 
108 u8x32_msb_mask (u8x32 v)
109 {
110  return _mm256_movemask_epi8 ((__m256i) v);
111 }
112 
113 /* _extend_to_ */
114 /* *INDENT-OFF* */
115 #define _(f,t,i) \
116 static_always_inline t \
117 f##_extend_to_##t (f x) \
118 { return (t) _mm256_cvt##i ((__m128i) x); }
119 
120 _(u16x8, u32x8, epu16_epi32)
121 _(u16x8, u64x4, epu16_epi64)
122 _(u32x4, u64x4, epu32_epi64)
123 _(u8x16, u16x16, epu8_epi64)
124 _(u8x16, u32x8, epu8_epi32)
125 _(u8x16, u64x4, epu8_epi64)
126 _(i16x8, i32x8, epi16_epi32)
127 _(i16x8, i64x4, epi16_epi64)
128 _(i32x4, i64x4, epi32_epi64)
129 _(i8x16, i16x16, epi8_epi64)
130 _(i8x16, i32x8, epi8_epi32)
131 _(i8x16, i64x4, epi8_epi64)
132 #undef _
133 /* *INDENT-ON* */
134 
137 {
138  u8x32 swap = {
139  1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
140  1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
141  };
142  return (u16x16) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
143 }
144 
146 u32x8_hadd (u32x8 v1, u32x8 v2)
147 {
148  return (u32x8) _mm256_hadd_epi32 ((__m256i) v1, (__m256i) v2);
149 }
150 
153 {
154  const u16x16 masks[17] = {
155  {0},
156  {-1},
157  {-1, -1},
158  {-1, -1, -1},
159  {-1, -1, -1, -1},
160  {-1, -1, -1, -1, -1},
161  {-1, -1, -1, -1, -1, -1},
162  {-1, -1, -1, -1, -1, -1, -1},
163  {-1, -1, -1, -1, -1, -1, -1, -1},
164  {-1, -1, -1, -1, -1, -1, -1, -1, -1},
165  {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
166  {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
167  {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
168  {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
169  {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
170  {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
171  {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
172  };
173 
174  ASSERT (n_last < 17);
175 
176  return v & masks[16 - n_last];
177 }
178 
181 {
182  return (f32x8) _mm256_cvtepi32_ps ((__m256i) v);
183 }
184 
187 {
188  return (u32x8) _mm256_cvttps_epi32 ((__m256) v);
189 }
190 
191 #define u16x16_blend(v1, v2, mask) \
192  (u16x16) _mm256_blend_epi16 ((__m256i) (v1), (__m256i) (v2), mask)
193 
195 u64x4_gather (void *p0, void *p1, void *p2, void *p3)
196 {
197  u64x4 r = {
198  *(u64 *) p0, *(u64 *) p1, *(u64 *) p2, *(u64 *) p3
199  };
200  return r;
201 }
202 
204 u32x8_gather (void *p0, void *p1, void *p2, void *p3, void *p4, void *p5,
205  void *p6, void *p7)
206 {
207  u32x8 r = {
208  *(u32 *) p0, *(u32 *) p1, *(u32 *) p2, *(u32 *) p3,
209  *(u32 *) p4, *(u32 *) p5, *(u32 *) p6, *(u32 *) p7,
210  };
211  return r;
212 }
213 
214 
216 u64x4_scatter (u64x4 r, void *p0, void *p1, void *p2, void *p3)
217 {
218  *(u64 *) p0 = r[0];
219  *(u64 *) p1 = r[1];
220  *(u64 *) p2 = r[2];
221  *(u64 *) p3 = r[3];
222 }
223 
225 u32x8_scatter (u32x8 r, void *p0, void *p1, void *p2, void *p3, void *p4,
226  void *p5, void *p6, void *p7)
227 {
228  *(u32 *) p0 = r[0];
229  *(u32 *) p1 = r[1];
230  *(u32 *) p2 = r[2];
231  *(u32 *) p3 = r[3];
232  *(u32 *) p4 = r[4];
233  *(u32 *) p5 = r[5];
234  *(u32 *) p6 = r[6];
235  *(u32 *) p7 = r[7];
236 }
237 
239 u64x4_scatter_one (u64x4 r, int index, void *p)
240 {
241  *(u64 *) p = r[index];
242 }
243 
245 u32x8_scatter_one (u32x8 r, int index, void *p)
246 {
247  *(u32 *) p = r[index];
248 }
249 
251 u8x32_is_greater (u8x32 v1, u8x32 v2)
252 {
253  return (u8x32) _mm256_cmpgt_epi8 ((__m256i) v1, (__m256i) v2);
254 }
255 
257 u8x32_blend (u8x32 v1, u8x32 v2, u8x32 mask)
258 {
259  return (u8x32) _mm256_blendv_epi8 ((__m256i) v1, (__m256i) v2,
260  (__m256i) mask);
261 }
262 
263 #define u32x8_permute_lanes(a, b, m) \
264  (u32x8) _mm256_permute2x128_si256 ((__m256i) a, (__m256i) b, m)
265 #define u64x4_permute_lanes(a, b, m) \
266  (u64x4) _mm256_permute2x128_si256 ((__m256i) a, (__m256i) b, m)
267 
269 u32x8_transpose (u32x8 a[8])
270 {
271  u64x4 r[8], x, y;
272 
273  r[0] = (u64x4) u32x8_interleave_lo (a[0], a[1]);
274  r[1] = (u64x4) u32x8_interleave_hi (a[0], a[1]);
275  r[2] = (u64x4) u32x8_interleave_lo (a[2], a[3]);
276  r[3] = (u64x4) u32x8_interleave_hi (a[2], a[3]);
277  r[4] = (u64x4) u32x8_interleave_lo (a[4], a[5]);
278  r[5] = (u64x4) u32x8_interleave_hi (a[4], a[5]);
279  r[6] = (u64x4) u32x8_interleave_lo (a[6], a[7]);
280  r[7] = (u64x4) u32x8_interleave_hi (a[6], a[7]);
281 
282  x = u64x4_interleave_lo (r[0], r[2]);
283  y = u64x4_interleave_lo (r[4], r[6]);
284  a[0] = u32x8_permute_lanes (x, y, 0x20);
285  a[4] = u32x8_permute_lanes (x, y, 0x31);
286 
287  x = u64x4_interleave_hi (r[0], r[2]);
288  y = u64x4_interleave_hi (r[4], r[6]);
289  a[1] = u32x8_permute_lanes (x, y, 0x20);
290  a[5] = u32x8_permute_lanes (x, y, 0x31);
291 
292  x = u64x4_interleave_lo (r[1], r[3]);
293  y = u64x4_interleave_lo (r[5], r[7]);
294  a[2] = u32x8_permute_lanes (x, y, 0x20);
295  a[6] = u32x8_permute_lanes (x, y, 0x31);
296 
297  x = u64x4_interleave_hi (r[1], r[3]);
298  y = u64x4_interleave_hi (r[5], r[7]);
299  a[3] = u32x8_permute_lanes (x, y, 0x20);
300  a[7] = u32x8_permute_lanes (x, y, 0x31);
301 }
302 
305 {
306  u64x4 r[4];
307 
308  r[0] = u64x4_interleave_lo (a[0], a[1]);
309  r[1] = u64x4_interleave_hi (a[0], a[1]);
310  r[2] = u64x4_interleave_lo (a[2], a[3]);
311  r[3] = u64x4_interleave_hi (a[2], a[3]);
312 
313  a[0] = u64x4_permute_lanes (r[0], r[2], 0x20);
314  a[1] = u64x4_permute_lanes (r[1], r[3], 0x20);
315  a[2] = u64x4_permute_lanes (r[0], r[2], 0x31);
316  a[3] = u64x4_permute_lanes (r[1], r[3], 0x31);
317 }
318 
319 #endif /* included_vector_avx2_h */
320 
321 /*
322  * fd.io coding-style-patch-verification: ON
323  *
324  * Local Variables:
325  * eval: (c-set-style "gnu")
326  * End:
327  */
a
Definition: bitmap.h:538
unsigned long u64
Definition: types.h:89
u16x16 u64x4 static_always_inline u32 u8x32_msb_mask(u8x32 v)
Definition: vector_avx2.h:108
foreach_avx2_vec256i static foreach_avx2_vec256u u32x8 u32x8_permute(u32x8 v, u32x8 idx)
Definition: vector_avx2.h:73
static_always_inline u16x16 u16x16_mask_last(u16x16 v, u8 n_last)
Definition: vector_avx2.h:152
unsigned char u8
Definition: types.h:56
static_always_inline void u64x4_scatter(u64x4 r, void *p0, void *p1, void *p2, void *p3)
Definition: vector_avx2.h:216
epu16_epi64 epu8_epi64 epu8_epi64 epi16_epi64 i16x16
Definition: vector_avx2.h:129
#define static_always_inline
Definition: clib.h:99
epu16_epi64 epu8_epi64 epu8_epi64 epi16_epi64 epi8_epi64 epi8_epi64 static_always_inline u16x16 u16x16_byte_swap(u16x16 v)
Definition: vector_avx2.h:136
static_always_inline void u64x4_scatter_one(u64x4 r, int index, void *p)
Definition: vector_avx2.h:239
#define always_inline
Definition: clib.h:98
unsigned int u32
Definition: types.h:88
#define foreach_avx2_vec256i
Definition: vector_avx2.h:23
i32x4
epu8_epi32 epu16_epi32 u64x2
Definition: vector_sse42.h:665
static_always_inline void u32x8_scatter(u32x8 r, void *p0, void *p1, void *p2, void *p3, void *p4, void *p5, void *p6, void *p7)
Definition: vector_avx2.h:225
static_always_inline u64x4 u64x4_gather(void *p0, void *p1, void *p2, void *p3)
Definition: vector_avx2.h:195
static_always_inline void u32x8_transpose(u32x8 a[8])
Definition: vector_avx2.h:269
static_always_inline void u32x8_scatter_one(u32x8 r, int index, void *p)
Definition: vector_avx2.h:245
#define ASSERT(truth)
static_always_inline u32x8 u32x8_hadd(u32x8 v1, u32x8 v2)
Definition: vector_avx2.h:146
vmrglw i16x8
static_always_inline u8x32 u8x32_is_greater(u8x32 v1, u8x32 v2)
Definition: vector_avx2.h:251
#define u64x4_permute_lanes(a, b, m)
Definition: vector_avx2.h:265
#define foreach_avx2_vec256u
Definition: vector_avx2.h:25
static_always_inline u32x8 u32x8_from_f32x8(f32x8 v)
Definition: vector_avx2.h:186
epu16_epi64 epu8_epi64 epu8_epi64 i64x4
Definition: vector_avx2.h:127
u64x4
Definition: vector_avx2.h:121
#define u32x8_permute_lanes(a, b, m)
Definition: vector_avx2.h:263
unsigned long long u32x4
Definition: ixge.c:28
static_always_inline void u64x4_transpose(u64x4 a[8])
Definition: vector_avx2.h:304
epu16_epi64 u16x16
Definition: vector_avx2.h:123
static_always_inline f32x8 f32x8_from_u32x8(u32x8 v)
Definition: vector_avx2.h:180
static_always_inline u8x32 u8x32_blend(u8x32 v1, u8x32 v2, u8x32 mask)
Definition: vector_avx2.h:257
static_always_inline u32x8 u32x8_gather(void *p0, void *p1, void *p2, void *p3, void *p4, void *p5, void *p6, void *p7)
Definition: vector_avx2.h:204