FD.io VPP  v19.04-6-g6f05f72
Vector Packet Processing
vnet_classify.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef __included_vnet_classify_h__
16 #define __included_vnet_classify_h__
17 
18 #include <stdarg.h>
19 
20 #include <vlib/vlib.h>
21 #include <vnet/vnet.h>
22 #include <vnet/pg/pg.h>
23 #include <vnet/ethernet/ethernet.h>
24 #include <vnet/ethernet/packet.h>
25 #include <vnet/ip/ip_packet.h>
26 #include <vnet/ip/ip4_packet.h>
27 #include <vnet/ip/ip6_packet.h>
28 #include <vlib/cli.h>
29 #include <vnet/api_errno.h> /* for API error numbers */
30 
31 #include <vppinfra/error.h>
32 #include <vppinfra/hash.h>
33 #include <vppinfra/cache.h>
34 #include <vppinfra/xxhash.h>
35 
38 
39 #define CLASSIFY_TRACE 0
40 
41 #define U32X4_ALIGNED(p) PREDICT_TRUE((((intptr_t)p) & 0xf) == 0)
42 
43 /*
44  * Classify table option to process packets
45  * CLASSIFY_FLAG_USE_CURR_DATA:
46  * - classify packets starting from VPP node’s current data pointer
47  */
48 #define CLASSIFY_FLAG_USE_CURR_DATA 1
49 
50 /*
51  * Classify session action
52  * CLASSIFY_ACTION_SET_IP4_FIB_INDEX:
53  * - Classified IP packets will be looked up
54  * from the specified ipv4 fib table
55  * CLASSIFY_ACTION_SET_IP6_FIB_INDEX:
56  * - Classified IP packets will be looked up
57  * from the specified ipv6 fib table
58  */
60 {
64 } __attribute__ ((packed)) vnet_classify_action_t;
65 
66 struct _vnet_classify_main;
67 typedef struct _vnet_classify_main vnet_classify_main_t;
68 
69 #define foreach_size_in_u32x4 \
70 _(1) \
71 _(2) \
72 _(3) \
73 _(4) \
74 _(5)
75 
76 /* *INDENT-OFF* */
77 typedef CLIB_PACKED(struct _vnet_classify_entry {
78  /* Graph node next index */
79  u32 next_index;
80 
81  /* put into vnet_buffer(b)->l2_classfy.opaque_index */
82  union {
83  struct {
84  u32 opaque_index;
85  /* advance on hit, note it's a signed quantity... */
86  i32 advance;
87  };
88  u64 opaque_count;
89  };
90 
91  /* Really only need 1 bit */
92  u8 flags;
93 #define VNET_CLASSIFY_ENTRY_FREE (1<<0)
94 
95  vnet_classify_action_t action;
96  u16 metadata;
97 
98  /* Hit counter, last heard time */
99  union {
100  u64 hits;
101  struct _vnet_classify_entry * next_free;
102  };
103 
104  f64 last_heard;
105 
106  /* Must be aligned to a 16-octet boundary */
107  u32x4 key[0];
108 }) vnet_classify_entry_t;
109 /* *INDENT-ON* */
110 
111 static inline int
112 vnet_classify_entry_is_free (vnet_classify_entry_t * e)
113 {
114  return e->flags & VNET_CLASSIFY_ENTRY_FREE;
115 }
116 
117 static inline int
118 vnet_classify_entry_is_busy (vnet_classify_entry_t * e)
119 {
120  return ((e->flags & VNET_CLASSIFY_ENTRY_FREE) == 0);
121 }
122 
123 /* Need these to con the vector allocator */
124 /* *INDENT-OFF* */
125 #define _(size) \
126 typedef CLIB_PACKED(struct { \
127  u32 pad0[4]; \
128  u64 pad1[2]; \
129  u32x4 key[size]; \
130 }) vnet_classify_entry_##size##_t;
132 /* *INDENT-ON* */
133 #undef _
134 
135 typedef struct
136 {
137  union
138  {
139  struct
140  {
143  u8 pad[2];
145  };
147  };
149 
150 typedef struct
151 {
152  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
153  /* Mask to apply after skipping N vectors */
155  /* Buckets and entries */
157  vnet_classify_entry_t *entries;
158 
159  /* Config parameters */
170  /* Index of next table to try */
172 
173  /* Miss next index, return if next_table_index = 0 */
175 
176  /* Per-bucket working copies, one per thread */
177  vnet_classify_entry_t **working_copies;
180 
181  /* Free entry freelists */
182  vnet_classify_entry_t **freelists;
183 
185 
186  /* Private allocation arena, protected by the writer lock */
187  void *mheap;
188 
189  /* Writer (only) lock for this table */
190  volatile u32 *writer_lock;
191 
193 
194 struct _vnet_classify_main
195 {
196  /* Table pool */
197  vnet_classify_table_t *tables;
198 
199  /* Registered next-index, opaque unformat fcns */
200  unformat_function_t **unformat_l2_next_index_fns;
201  unformat_function_t **unformat_ip_next_index_fns;
202  unformat_function_t **unformat_acl_next_index_fns;
203  unformat_function_t **unformat_policer_next_index_fns;
204  unformat_function_t **unformat_opaque_index_fns;
205 
206  /* convenience variables */
209 };
210 
211 extern vnet_classify_main_t vnet_classify_main;
212 
213 u8 *format_classify_table (u8 * s, va_list * args);
214 
216 
217 static inline u64
219 {
220  u32x4 *mask;
221 
222  union
223  {
224  u32x4 as_u32x4;
225  u64 as_u64[2];
226  } xor_sum __attribute__ ((aligned (sizeof (u32x4))));
227 
228  ASSERT (t);
229  mask = t->mask;
230 #ifdef CLIB_HAVE_VEC128
231  if (U32X4_ALIGNED (h))
232  { //SSE can't handle unaligned data
233  u32x4 *data = (u32x4 *) h;
234  xor_sum.as_u32x4 = data[0 + t->skip_n_vectors] & mask[0];
235  switch (t->match_n_vectors)
236  {
237  case 5:
238  xor_sum.as_u32x4 ^= data[4 + t->skip_n_vectors] & mask[4];
239  /* FALLTHROUGH */
240  case 4:
241  xor_sum.as_u32x4 ^= data[3 + t->skip_n_vectors] & mask[3];
242  /* FALLTHROUGH */
243  case 3:
244  xor_sum.as_u32x4 ^= data[2 + t->skip_n_vectors] & mask[2];
245  /* FALLTHROUGH */
246  case 2:
247  xor_sum.as_u32x4 ^= data[1 + t->skip_n_vectors] & mask[1];
248  /* FALLTHROUGH */
249  case 1:
250  break;
251  default:
252  abort ();
253  }
254  }
255  else
256 #endif /* CLIB_HAVE_VEC128 */
257  {
258  u32 skip_u64 = t->skip_n_vectors * 2;
259  u64 *data64 = (u64 *) h;
260  xor_sum.as_u64[0] = data64[0 + skip_u64] & ((u64 *) mask)[0];
261  xor_sum.as_u64[1] = data64[1 + skip_u64] & ((u64 *) mask)[1];
262  switch (t->match_n_vectors)
263  {
264  case 5:
265  xor_sum.as_u64[0] ^= data64[8 + skip_u64] & ((u64 *) mask)[8];
266  xor_sum.as_u64[1] ^= data64[9 + skip_u64] & ((u64 *) mask)[9];
267  /* FALLTHROUGH */
268  case 4:
269  xor_sum.as_u64[0] ^= data64[6 + skip_u64] & ((u64 *) mask)[6];
270  xor_sum.as_u64[1] ^= data64[7 + skip_u64] & ((u64 *) mask)[7];
271  /* FALLTHROUGH */
272  case 3:
273  xor_sum.as_u64[0] ^= data64[4 + skip_u64] & ((u64 *) mask)[4];
274  xor_sum.as_u64[1] ^= data64[5 + skip_u64] & ((u64 *) mask)[5];
275  /* FALLTHROUGH */
276  case 2:
277  xor_sum.as_u64[0] ^= data64[2 + skip_u64] & ((u64 *) mask)[2];
278  xor_sum.as_u64[1] ^= data64[3 + skip_u64] & ((u64 *) mask)[3];
279  /* FALLTHROUGH */
280  case 1:
281  break;
282 
283  default:
284  abort ();
285  }
286  }
287 
288  return clib_xxhash (xor_sum.as_u64[0] ^ xor_sum.as_u64[1]);
289 }
290 
291 static inline void
293 {
294  u32 bucket_index;
295 
296  ASSERT (is_pow2 (t->nbuckets));
297 
298  bucket_index = hash & (t->nbuckets - 1);
299 
300  CLIB_PREFETCH (&t->buckets[bucket_index], CLIB_CACHE_LINE_BYTES, LOAD);
301 }
302 
303 static inline vnet_classify_entry_t *
305 {
306  u8 *hp = t->mheap;
307  u8 *vp = hp + offset;
308 
309  return (void *) vp;
310 }
311 
312 static inline uword
314  vnet_classify_entry_t * v)
315 {
316  u8 *hp, *vp;
317 
318  hp = (u8 *) t->mheap;
319  vp = (u8 *) v;
320 
321  ASSERT ((vp - hp) < 0x100000000ULL);
322  return vp - hp;
323 }
324 
325 static inline vnet_classify_entry_t *
327  vnet_classify_entry_t * e, u32 index)
328 {
329  u8 *eu8;
330 
331  eu8 = (u8 *) e;
332 
333  eu8 += index * (sizeof (vnet_classify_entry_t) +
334  (t->match_n_vectors * sizeof (u32x4)));
335 
336  return (vnet_classify_entry_t *) eu8;
337 }
338 
339 static inline void
341 {
342  u32 bucket_index;
343  u32 value_index;
345  vnet_classify_entry_t *e;
346 
347  bucket_index = hash & (t->nbuckets - 1);
348 
349  b = &t->buckets[bucket_index];
350 
351  if (b->offset == 0)
352  return;
353 
354  hash >>= t->log2_nbuckets;
355 
356  e = vnet_classify_get_entry (t, b->offset);
357  value_index = hash & ((1 << b->log2_pages) - 1);
358 
359  e = vnet_classify_entry_at_index (t, e, value_index);
360 
362 }
363 
364 vnet_classify_entry_t *vnet_classify_find_entry (vnet_classify_table_t * t,
365  u8 * h, u64 hash, f64 now);
366 
367 static inline vnet_classify_entry_t *
369  u8 * h, u64 hash, f64 now)
370 {
371  vnet_classify_entry_t *v;
372  u32x4 *mask, *key;
373  union
374  {
375  u32x4 as_u32x4;
376  u64 as_u64[2];
377  } result __attribute__ ((aligned (sizeof (u32x4))));
379  u32 value_index;
380  u32 bucket_index;
381  u32 limit;
382  int i;
383 
384  bucket_index = hash & (t->nbuckets - 1);
385  b = &t->buckets[bucket_index];
386  mask = t->mask;
387 
388  if (b->offset == 0)
389  return 0;
390 
391  hash >>= t->log2_nbuckets;
392 
393  v = vnet_classify_get_entry (t, b->offset);
394  value_index = hash & ((1 << b->log2_pages) - 1);
395  limit = t->entries_per_page;
396  if (PREDICT_FALSE (b->linear_search))
397  {
398  value_index = 0;
399  limit *= (1 << b->log2_pages);
400  }
401 
402  v = vnet_classify_entry_at_index (t, v, value_index);
403 
404 #ifdef CLIB_HAVE_VEC128
405  if (U32X4_ALIGNED (h))
406  {
407  u32x4 *data = (u32x4 *) h;
408  for (i = 0; i < limit; i++)
409  {
410  key = v->key;
411  result.as_u32x4 = (data[0 + t->skip_n_vectors] & mask[0]) ^ key[0];
412  switch (t->match_n_vectors)
413  {
414  case 5:
415  result.as_u32x4 |=
416  (data[4 + t->skip_n_vectors] & mask[4]) ^ key[4];
417  /* FALLTHROUGH */
418  case 4:
419  result.as_u32x4 |=
420  (data[3 + t->skip_n_vectors] & mask[3]) ^ key[3];
421  /* FALLTHROUGH */
422  case 3:
423  result.as_u32x4 |=
424  (data[2 + t->skip_n_vectors] & mask[2]) ^ key[2];
425  /* FALLTHROUGH */
426  case 2:
427  result.as_u32x4 |=
428  (data[1 + t->skip_n_vectors] & mask[1]) ^ key[1];
429  /* FALLTHROUGH */
430  case 1:
431  break;
432  default:
433  abort ();
434  }
435 
436  if (u32x4_zero_byte_mask (result.as_u32x4) == 0xffff)
437  {
438  if (PREDICT_TRUE (now))
439  {
440  v->hits++;
441  v->last_heard = now;
442  }
443  return (v);
444  }
445  v = vnet_classify_entry_at_index (t, v, 1);
446  }
447  }
448  else
449 #endif /* CLIB_HAVE_VEC128 */
450  {
451  u32 skip_u64 = t->skip_n_vectors * 2;
452  u64 *data64 = (u64 *) h;
453  for (i = 0; i < limit; i++)
454  {
455  key = v->key;
456 
457  result.as_u64[0] =
458  (data64[0 + skip_u64] & ((u64 *) mask)[0]) ^ ((u64 *) key)[0];
459  result.as_u64[1] =
460  (data64[1 + skip_u64] & ((u64 *) mask)[1]) ^ ((u64 *) key)[1];
461  switch (t->match_n_vectors)
462  {
463  case 5:
464  result.as_u64[0] |=
465  (data64[8 + skip_u64] & ((u64 *) mask)[8]) ^ ((u64 *) key)[8];
466  result.as_u64[1] |=
467  (data64[9 + skip_u64] & ((u64 *) mask)[9]) ^ ((u64 *) key)[9];
468  /* FALLTHROUGH */
469  case 4:
470  result.as_u64[0] |=
471  (data64[6 + skip_u64] & ((u64 *) mask)[6]) ^ ((u64 *) key)[6];
472  result.as_u64[1] |=
473  (data64[7 + skip_u64] & ((u64 *) mask)[7]) ^ ((u64 *) key)[7];
474  /* FALLTHROUGH */
475  case 3:
476  result.as_u64[0] |=
477  (data64[4 + skip_u64] & ((u64 *) mask)[4]) ^ ((u64 *) key)[4];
478  result.as_u64[1] |=
479  (data64[5 + skip_u64] & ((u64 *) mask)[5]) ^ ((u64 *) key)[5];
480  /* FALLTHROUGH */
481  case 2:
482  result.as_u64[0] |=
483  (data64[2 + skip_u64] & ((u64 *) mask)[2]) ^ ((u64 *) key)[2];
484  result.as_u64[1] |=
485  (data64[3 + skip_u64] & ((u64 *) mask)[3]) ^ ((u64 *) key)[3];
486  /* FALLTHROUGH */
487  case 1:
488  break;
489  default:
490  abort ();
491  }
492 
493  if (result.as_u64[0] == 0 && result.as_u64[1] == 0)
494  {
495  if (PREDICT_TRUE (now))
496  {
497  v->hits++;
498  v->last_heard = now;
499  }
500  return (v);
501  }
502 
503  v = vnet_classify_entry_at_index (t, v, 1);
504  }
505  }
506  return 0;
507 }
508 
509 vnet_classify_table_t *vnet_classify_new_table (vnet_classify_main_t * cm,
510  u8 * mask, u32 nbuckets,
511  u32 memory_size,
512  u32 skip_n_vectors,
513  u32 match_n_vectors);
514 
515 int vnet_classify_add_del_session (vnet_classify_main_t * cm,
516  u32 table_index,
517  u8 * match,
518  u32 hit_next_index,
519  u32 opaque_index,
520  i32 advance,
521  u8 action, u32 metadata, int is_add);
522 
523 int vnet_classify_add_del_table (vnet_classify_main_t * cm,
524  u8 * mask,
525  u32 nbuckets,
526  u32 memory_size,
527  u32 skip,
528  u32 match,
529  u32 next_table_index,
530  u32 miss_next_index,
531  u32 * table_index,
532  u8 current_data_flag,
533  i16 current_data_offset,
534  int is_add, int del_chain);
535 
550 
552  (unformat_function_t * fn);
553 
555  (unformat_function_t * fn);
556 
558  (unformat_function_t * fn);
559 
561  (unformat_function_t * fn);
562 
564  fn);
565 
566 #endif /* __included_vnet_classify_h__ */
567 
568 /*
569  * fd.io coding-style-patch-verification: ON
570  *
571  * Local Variables:
572  * eval: (c-set-style "gnu")
573  * End:
574  */
u64 vnet_classify_hash_packet(vnet_classify_table_t *t, u8 *h)
unformat_function_t unformat_ip4_match
vnet_classify_entry_t ** working_copies
uword( unformat_function_t)(unformat_input_t *input, va_list *args)
Definition: format.h:232
unformat_function_t unformat_vlan_tag
u8 pad[3]
log2 (size of the packing page block)
Definition: bihash_doc.h:61
#define CLIB_CACHE_LINE_ALIGN_MARK(mark)
Definition: cache.h:60
u32 flags
Definition: vhost_user.h:115
unformat_function_t unformat_l2_mask
unformat_function_t unformat_ip_next_index
#define foreach_size_in_u32x4
Definition: vnet_classify.h:69
u64 as_u64
Definition: bihash_doc.h:63
#define PREDICT_TRUE(x)
Definition: clib.h:112
static vnet_classify_entry_t * vnet_classify_find_entry_inline(vnet_classify_table_t *t, u8 *h, u64 hash, f64 now)
unsigned long u64
Definition: types.h:89
#define VNET_CLASSIFY_ENTRY_FREE
unformat_function_t unformat_ip6_mask
#define U32X4_ALIGNED(p)
Definition: vnet_classify.h:41
static u64 clib_xxhash(u64 key)
Definition: xxhash.h:58
int vnet_classify_add_del_session(vnet_classify_main_t *cm, u32 table_index, u8 *match, u32 hit_next_index, u32 opaque_index, i32 advance, u8 action, u32 metadata, int is_add)
int i
unformat_function_t unformat_classify_match
vnet_classify_table_t * vnet_classify_new_table(vnet_classify_main_t *cm, u8 *mask, u32 nbuckets, u32 memory_size, u32 skip_n_vectors, u32 match_n_vectors)
u8 data[128]
Definition: ipsec.api:248
unformat_function_t unformat_l3_mask
unsigned char u8
Definition: types.h:56
unformat_function_t unformat_ip4_mask
double f64
Definition: types.h:142
unformat_function_t unformat_classify_mask
vnet_classify_action_t_
Definition: vnet_classify.h:59
typedef CLIB_PACKED(struct _vnet_classify_entry{u32 next_index;union{struct{u32 opaque_index;i32 advance;};u64 opaque_count;};u8 flags;#define VNET_CLASSIFY_ENTRY_FREE vnet_classify_action_t action;u16 metadata;union{u64 hits;struct _vnet_classify_entry *next_free;};f64 last_heard;u32x4 key[0];}) vnet_classify_entry_t
void vnet_classify_register_unformat_opaque_index_fn(unformat_function_t *fn)
unsigned int u32
Definition: types.h:88
static void vnet_classify_prefetch_bucket(vnet_classify_table_t *t, u64 hash)
u8 * format_classify_table(u8 *s, va_list *args)
static void vnet_classify_prefetch_entry(vnet_classify_table_t *t, u64 hash)
static int vnet_classify_entry_is_free(vnet_classify_entry_t *e)
static u64 vnet_classify_hash_packet_inline(vnet_classify_table_t *t, u8 *h)
unformat_function_t unformat_l3_match
unsigned short u16
Definition: types.h:57
vnet_classify_entry_t * entries
#define PREDICT_FALSE(x)
Definition: clib.h:111
unformat_function_t unformat_l2_next_index
vnet_main_t vnet_main
Definition: misc.c:43
void vnet_classify_register_unformat_policer_next_index_fn(unformat_function_t *fn)
static vnet_classify_entry_t * vnet_classify_entry_at_index(vnet_classify_table_t *t, vnet_classify_entry_t *e, u32 index)
static uword vnet_classify_get_offset(vnet_classify_table_t *t, vnet_classify_entry_t *v)
vnet_classify_bucket_t saved_bucket
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
void vnet_classify_register_unformat_l2_next_index_fn(unformat_function_t *fn)
Definition: vnet_classify.c:80
void vnet_classify_register_unformat_acl_next_index_fn(unformat_function_t *fn)
Definition: vnet_classify.c:96
signed int i32
Definition: types.h:77
#define ASSERT(truth)
unformat_function_t unformat_ip6_match
static uword is_pow2(uword x)
Definition: clib.h:235
int vlib_main(vlib_main_t *volatile vm, unformat_input_t *input)
Definition: main.c:1982
static int vnet_classify_entry_is_busy(vnet_classify_entry_t *e)
struct _vlib_node_registration vlib_node_registration_t
template key/value backing page structure
Definition: bihash_doc.h:44
vlib_node_registration_t ip6_classify_node
(constructor) VLIB_REGISTER_NODE (ip6_classify_node)
Definition: ip_classify.c:341
vnet_classify_bucket_t * buckets
u64 uword
Definition: types.h:112
vnet_classify_entry_t * vnet_classify_find_entry(vnet_classify_table_t *t, u8 *h, u64 hash, f64 now)
typedef key
Definition: ipsec.api:244
vlib_node_registration_t ip4_classify_node
(constructor) VLIB_REGISTER_NODE (ip4_classify_node)
Definition: ip_classify.c:320
unformat_function_t unformat_l4_match
volatile u32 * writer_lock
void vnet_classify_register_unformat_ip_next_index_fn(unformat_function_t *fn)
Definition: vnet_classify.c:88
struct clib_bihash_value offset
template key/value backing page structure
unsigned long long u32x4
Definition: ixge.c:28
unformat_function_t unformat_l2_match
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
vnet_classify_entry_t ** freelists
vnet_classify_main_t vnet_classify_main
Definition: vnet_classify.c:22
static u32 u32x4_zero_byte_mask(u32x4 x)
static vnet_classify_entry_t * vnet_classify_get_entry(vnet_classify_table_t *t, uword offset)
signed short i16
Definition: types.h:46
int vnet_classify_add_del_table(vnet_classify_main_t *cm, u8 *mask, u32 nbuckets, u32 memory_size, u32 skip, u32 match, u32 next_table_index, u32 miss_next_index, u32 *table_index, u8 current_data_flag, i16 current_data_offset, int is_add, int del_chain)