FD.io VPP  v20.05.1-5-g09f167997
Vector Packet Processing
vector.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /*
16  Copyright (c) 2005 Eliot Dresselhaus
17 
18  Permission is hereby granted, free of charge, to any person obtaining
19  a copy of this software and associated documentation files (the
20  "Software"), to deal in the Software without restriction, including
21  without limitation the rights to use, copy, modify, merge, publish,
22  distribute, sublicense, and/or sell copies of the Software, and to
23  permit persons to whom the Software is furnished to do so, subject to
24  the following conditions:
25 
26  The above copyright notice and this permission notice shall be
27  included in all copies or substantial portions of the Software.
28 
29  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32  NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33  LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34  OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35  WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
36 */
37 
38 #ifndef included_clib_vector_h
39 #define included_clib_vector_h
40 
41 #include <vppinfra/clib.h>
42 
43 /* Vector types. */
44 
45 #if defined (__MMX__) || defined (__IWMMXT__) || defined (__aarch64__) \
46  || defined (__i686__)
47 #define CLIB_HAVE_VEC64
48 #endif
49 
50 #if defined (__aarch64__) && defined(__ARM_NEON) || defined (__i686__)
51 #define CLIB_HAVE_VEC128
52 #endif
53 
54 #if defined (__SSE4_2__) && __GNUC__ >= 4
55 #define CLIB_HAVE_VEC128
56 #endif
57 
58 #if defined (__ALTIVEC__)
59 #define CLIB_HAVE_VEC128
60 #endif
61 
62 #if defined (__AVX2__)
63 #define CLIB_HAVE_VEC256
64 #if defined (__clang__) && __clang_major__ < 4
65 #undef CLIB_HAVE_VEC256
66 #endif
67 #endif
68 
69 #if defined (__AVX512BITALG__)
70 #define CLIB_HAVE_VEC512
71 #endif
72 
73 /* 128 implies 64 */
74 #ifdef CLIB_HAVE_VEC128
75 #define CLIB_HAVE_VEC64
76 #endif
77 
78 #define _vector_size(n) __attribute__ ((vector_size (n)))
79 #define _vector_size_unaligned(n) __attribute__ ((vector_size (n), __aligned__ (1)))
80 
81 #define foreach_vec64i _(i,8,8) _(i,16,4) _(i,32,2)
82 #define foreach_vec64u _(u,8,8) _(u,16,4) _(u,32,2)
83 #define foreach_vec64f _(f,32,2)
84 #define foreach_vec128i _(i,8,16) _(i,16,8) _(i,32,4) _(i,64,2)
85 #define foreach_vec128u _(u,8,16) _(u,16,8) _(u,32,4) _(u,64,2)
86 #define foreach_vec128f _(f,32,4) _(f,64,2)
87 #define foreach_vec256i _(i,8,32) _(i,16,16) _(i,32,8) _(i,64,4)
88 #define foreach_vec256u _(u,8,32) _(u,16,16) _(u,32,8) _(u,64,4)
89 #define foreach_vec256f _(f,32,8) _(f,64,4)
90 #define foreach_vec512i _(i,8,64) _(i,16,32) _(i,32,16) _(i,64,8)
91 #define foreach_vec512u _(u,8,64) _(u,16,32) _(u,32,16) _(u,64,8)
92 #define foreach_vec512f _(f,32,16) _(f,64,8)
93 
94 #if defined (CLIB_HAVE_VEC512)
95 #define foreach_int_vec foreach_vec64i foreach_vec128i foreach_vec256i foreach_vec512i
96 #define foreach_uint_vec foreach_vec64u foreach_vec128u foreach_vec256u foreach_vec512u
97 #define foreach_float_vec foreach_vec64f foreach_vec128f foreach_vec256f foreach_vec512f
98 #elif defined (CLIB_HAVE_VEC256)
99 #define foreach_int_vec foreach_vec64i foreach_vec128i foreach_vec256i
100 #define foreach_uint_vec foreach_vec64u foreach_vec128u foreach_vec256u
101 #define foreach_float_vec foreach_vec64f foreach_vec128f foreach_vec256f
102 #else
103 #define foreach_int_vec foreach_vec64i foreach_vec128i
104 #define foreach_uint_vec foreach_vec64u foreach_vec128u
105 #define foreach_float_vec foreach_vec64f foreach_vec128f
106 #endif
107 
108 #define foreach_vec foreach_int_vec foreach_uint_vec foreach_float_vec
109 
110 /* *INDENT-OFF* */
111 
112 /* Type Definitions */
113 #define _(t,s,c) \
114 typedef t##s t##s##x##c _vector_size (s/8*c); \
115 typedef t##s t##s##x##c##u _vector_size_unaligned (s/8*c); \
116 typedef union { \
117  t##s##x##c as_##t##s##x##c; \
118  t##s as_##t##s[c]; \
119 } t##s##x##c##_union_t;
120 
125 #undef _
126 
127 /* Vector word sized types. */
128 #ifndef CLIB_VECTOR_WORD_BITS
129 #ifdef CLIB_HAVE_VEC128
130 #define CLIB_VECTOR_WORD_BITS 128
131 #else
132 #define CLIB_VECTOR_WORD_BITS 64
133 #endif
134 #endif /* CLIB_VECTOR_WORD_BITS */
135 
136 /* Vector word sized types. */
137 #if CLIB_VECTOR_WORD_BITS == 128
138 typedef i8 i8x _vector_size (16);
139 typedef i16 i16x _vector_size (16);
140 typedef i32 i32x _vector_size (16);
141 typedef i64 i64x _vector_size (16);
142 typedef u8 u8x _vector_size (16);
143 typedef u16 u16x _vector_size (16);
144 typedef u32 u32x _vector_size (16);
145 typedef u64 u64x _vector_size (16);
146 #endif
147 #if CLIB_VECTOR_WORD_BITS == 64
148 typedef i8 i8x _vector_size (8);
149 typedef i16 i16x _vector_size (8);
150 typedef i32 i32x _vector_size (8);
151 typedef i64 i64x _vector_size (8);
152 typedef u8 u8x _vector_size (8);
153 typedef u16 u16x _vector_size (8);
154 typedef u32 u32x _vector_size (8);
155 typedef u64 u64x _vector_size (8);
156 #endif
157 
158 #undef _vector_size
159 
160 #define VECTOR_WORD_TYPE(t) t##x
161 #define VECTOR_WORD_TYPE_LEN(t) (sizeof (VECTOR_WORD_TYPE(t)) / sizeof (t))
162 
163 #if defined (__SSE4_2__) && __GNUC__ >= 4
164 #include <vppinfra/vector_sse42.h>
165 #endif
166 
167 #if defined (__AVX2__)
168 #include <vppinfra/vector_avx2.h>
169 #endif
170 
171 #if defined (__AVX512BITALG__)
172 /* Due to power level transition issues, we don't preffer AVX-512 on
173  Skylake X and CascadeLake CPUs, AVX512BITALG is introduced on
174  icelake CPUs */
175 #include <vppinfra/vector_avx512.h>
176 #endif
177 
178 #if defined (__ALTIVEC__)
179 #include <vppinfra/vector_altivec.h>
180 #endif
181 
182 #if defined (__aarch64__)
183 #include <vppinfra/vector_neon.h>
184 #endif
185 
186 #if (defined(CLIB_HAVE_VEC128) || defined(CLIB_HAVE_VEC64))
187 #include <vppinfra/vector_funcs.h>
188 #endif
189 
190 /* this macro generate _splat inline functions for each scalar vector type */
191 #ifndef CLIB_VEC128_SPLAT_DEFINED
192 #define _(t, s, c) \
193  static_always_inline t##s##x##c \
194 t##s##x##c##_splat (t##s x) \
195 { \
196  t##s##x##c r; \
197  int i; \
198  \
199  for (i = 0; i < c; i++) \
200  r[i] = x; \
201  \
202  return r; \
203 }
205 #undef _
206 #endif
207 
208 /* *INDENT-ON* */
209 
210 #endif /* included_clib_vector_h */
211 /*
212  * fd.io coding-style-patch-verification: ON
213  *
214  * Local Variables:
215  * eval: (c-set-style "gnu")
216  * End:
217  */
#define foreach_vec256u
Definition: vector.h:88
unsigned long u64
Definition: types.h:89
#define foreach_vec128u
Definition: vector.h:85
unsigned char u8
Definition: types.h:56
#define foreach_vec512f
Definition: vector.h:92
#define foreach_vec128i
Definition: vector.h:84
#define foreach_vec128f
Definition: vector.h:86
unsigned int u32
Definition: types.h:88
#define foreach_vec256f
Definition: vector.h:89
unsigned short u16
Definition: types.h:57
signed long i64
Definition: types.h:78
signed char i8
Definition: types.h:45
#define foreach_vec512i
Definition: vector.h:90
signed int i32
Definition: types.h:77
#define foreach_vec64i
Definition: vector.h:81
#define foreach_vec512u
Definition: vector.h:91
#define foreach_vec256i
Definition: vector.h:87
#define foreach_vec64f
Definition: vector.h:83
#define foreach_vec64u
Definition: vector.h:82
signed short i16
Definition: types.h:46