5 #ifndef included_vector_funcs_h 6 #define included_vector_funcs_h 14 #if defined(CLIB_HAVE_VEC512) 15 u16x32 v32 = u16x32_splat (v);
16 u16x32u *av = (u16x32u *) a;
17 mask = ((
u64) u16x32_is_equal_mask (av[0], v32) |
18 (
u64) u16x32_is_equal_mask (av[1], v32) << 32);
19 #elif defined(CLIB_HAVE_VEC256) 20 u16x16 v16 = u16x16_splat (v);
21 u16x16u *av = (u16x16u *) a;
24 x = i8x32_pack (v16 == av[0], v16 == av[1]);
26 x = i8x32_pack (v16 == av[2], v16 == av[3]);
28 #elif defined(CLIB_HAVE_VEC128) && defined(__ARM_NEON) 30 u16x8 m = { 1, 2, 4, 8, 16, 32, 64, 128 };
31 u16x8u *av = (u16x8u *) a;
39 for (
int i = 0;
i < 8;
i++)
40 mask |= (
u64) vaddvq_u16 ((av[
i] == v8) & m) << (
i * 8);
42 #elif defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_MSB_MASK) 43 u16x8 v8 = u16x8_splat (v);
44 u16x8u *av = (u16x8u *) a;
50 for (
int i = 0;
i < n_elts;
i++)
86 #if defined(CLIB_HAVE_VEC512) 87 u32x16 v16 = u32x16_splat (v);
88 u32x16u *av = (u32x16u *) a;
89 mask = ((
u64) u32x16_is_equal_mask (av[0], v16) |
90 (
u64) u32x16_is_equal_mask (av[1], v16) << 16 |
91 (
u64) u32x16_is_equal_mask (av[2], v16) << 32 |
92 (
u64) u32x16_is_equal_mask (av[3], v16) << 48);
93 #elif defined(CLIB_HAVE_VEC256) 94 u32x8
v8 = u32x8_splat (v);
95 u32x8u *av = (u32x8u *) a;
96 u32x8 m = { 0, 4, 1, 5, 2, 6, 3, 7 };
99 c = i8x32_pack (i16x16_pack ((i32x8) (v8 == av[0]), (i32x8) (v8 == av[1])),
100 i16x16_pack ((i32x8) (v8 == av[2]), (i32x8) (v8 == av[3])));
103 c = i8x32_pack (i16x16_pack ((i32x8) (v8 == av[4]), (i32x8) (v8 == av[5])),
104 i16x16_pack ((i32x8) (v8 == av[6]), (i32x8) (v8 == av[7])));
107 #elif defined(CLIB_HAVE_VEC128) && defined(__ARM_NEON) 108 u32x4 v4 = u32x4_splat (v);
109 u32x4 m = { 1, 2, 4, 8 };
110 u32x4u *av = (u32x4u *) a;
118 for (
int i = 0;
i < 16;
i++)
119 mask |= (
u64) vaddvq_u32 ((av[
i] == v4) & m) << (
i * 4);
121 #elif defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_MSB_MASK) 122 u32x4 v4 = u32x4_splat (v);
123 u32x4u *av = (u32x4u *) a;
125 for (
int i = 0;
i < 4;
i++)
127 i16x8 p1 = i16x8_pack (v4 == av[0], v4 == av[1]);
128 i16x8 p2 = i16x8_pack (v4 == av[2], v4 == av[3]);
134 for (
int i = 0;
i < n_elts;
i++)
169 #if defined(CLIB_HAVE_VEC512_COMPRESS) 170 u32x16u *sv = (u32x16u *) src;
171 for (
int i = 0;
i < 4;
i++)
173 int cnt = _popcnt32 ((
u16) mask);
174 u32x16_compress_store (sv[
i], mask, dst);
179 #elif defined(CLIB_HAVE_VEC256_COMPRESS) 180 u32x8u *sv = (u32x8u *) src;
181 for (
int i = 0;
i < 8;
i++)
183 int cnt = _popcnt32 ((
u8) mask);
184 u32x8_compress_store (sv[
i], mask, dst);
215 if (mask[0] == ~0ULL)
static_always_inline u32 clib_compress_u32(u32 *dst, u32 *src, u64 *mask, u32 n_elts)
Compare array of 32-bit elemments into destination array based on mask.
#define u64x4_permute(v, m0, m1, m2, m3)
foreach_avx2_vec256i static foreach_avx2_vec256u u32x8 u32x8_permute(u32x8 v, u32x8 idx)
#define count_trailing_zeros(x)
#define static_always_inline
static uword pow2_mask(uword x)
static_always_inline void clib_mask_compare_u16(u16 v, u16 *a, u64 *mask, u32 n_elts)
Compare 16-bit elemments with provied value and return bitmap.
static uword clear_lowest_set_bit(uword x)
sll srl srl sll sra u16x4 i
static_always_inline void clib_memcpy_u32(u32 *dst, u32 *src, u32 n_left)
static_always_inline void clib_mask_compare_u32(u32 v, u32 *a, u64 *bitmap, u32 n_elts)
Compare 32-bit elemments with provied value and return bitmap.
static_always_inline u64 clib_mask_compare_u32_x64(u32 v, u32 *a, u32 n_elts)
static_always_inline u16 i8x16_msb_mask(i8x16 v)
static_always_inline u64 clib_mask_compare_u16_x64(u16 v, u16 *a, u32 n_elts)
static_always_inline u32 i8x32_msb_mask(i8x32 v)
_mm256_packus_epi16 u16x16
_mm512_packus_epi16 u16x32
static_always_inline u32 * clib_compress_u32_x64(u32 *dst, u32 *src, u64 mask)