FD.io VPP  v18.04-17-g3a0d853
Vector Packet Processing
hash_lookup.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2017 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <stddef.h>
19 #include <netinet/in.h>
20 
21 #include <vlibapi/api.h>
22 #include <vlibmemory/api.h>
23 
24 #include <vlib/vlib.h>
25 #include <vnet/vnet.h>
26 #include <vnet/pg/pg.h>
27 #include <vppinfra/error.h>
28 #include <vnet/plugin/plugin.h>
29 #include <acl/acl.h>
30 #include <vppinfra/bihash_48_8.h>
31 
32 #include "hash_lookup.h"
33 #include "hash_lookup_private.h"
34 
35 
37 {
38  applied_hash_ace_entry_t **applied_hash_aces = vec_elt_at_index(am->hash_entry_vec_by_lc_index, lc_index);
39 
40 /*is_input ? vec_elt_at_index(am->input_hash_entry_vec_by_sw_if_index, sw_if_index)
41  : vec_elt_at_index(am->output_hash_entry_vec_by_sw_if_index, sw_if_index);
42 */
43  return applied_hash_aces;
44 }
45 
46 
47 static void
49 {
50  DBG("HASH ADD/DEL: %016llx %016llx %016llx %016llx %016llx %016llx %016llx add %d",
51  kv->key[0], kv->key[1], kv->key[2],
52  kv->key[3], kv->key[4], kv->key[5], kv->value, is_add);
53  BV (clib_bihash_add_del) (&am->acl_lookup_hash, kv, is_add);
54 }
55 
56 static void
58  applied_hash_ace_entry_t **applied_hash_aces,
59  u32 lc_index,
60  u32 new_index, clib_bihash_kv_48_8_t *kv)
61 {
62  fa_5tuple_t *kv_key = (fa_5tuple_t *)kv->key;
64  applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), new_index);
66 
67  memcpy(kv_key, &(vec_elt_at_index(ha->rules, pae->hash_ace_info_index)->match), sizeof(*kv_key));
68  /* initialize the sw_if_index and direction */
69  kv_key->pkt.lc_index = lc_index;
70  kv_val->as_u64 = 0;
71  kv_val->applied_entry_index = new_index;
72  kv_val->need_portrange_check = vec_elt_at_index(ha->rules, pae->hash_ace_info_index)->src_portrange_not_powerof2 ||
73  vec_elt_at_index(ha->rules, pae->hash_ace_info_index)->dst_portrange_not_powerof2;
74  /* by default assume all values are shadowed -> check all mask types */
75  kv_val->shadowed = 1;
76 }
77 
78 static void
80  u32 lc_index,
81  applied_hash_ace_entry_t **applied_hash_aces,
82  u32 index, int is_add)
83 {
85 
86  fill_applied_hash_ace_kv(am, applied_hash_aces, lc_index, index, &kv);
87  hashtable_add_del(am, &kv, is_add);
88 }
89 
90 
91 
92 static void
94  u32 lc_index,
95  applied_hash_ace_entry_t **applied_hash_aces,
96  u32 new_index)
97 {
99  ASSERT(new_index != ~0);
100  applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), new_index);
101  DBG("activate_applied_ace_hash_entry lc_index %d new_index %d", lc_index, new_index);
102 
103  fill_applied_hash_ace_kv(am, applied_hash_aces, lc_index, new_index, &kv);
104 
105  DBG("APPLY ADD KY: %016llx %016llx %016llx %016llx %016llx %016llx",
106  kv.key[0], kv.key[1], kv.key[2],
107  kv.key[3], kv.key[4], kv.key[5]);
108 
109  clib_bihash_kv_48_8_t result;
110  hash_acl_lookup_value_t *result_val = (hash_acl_lookup_value_t *)&result.value;
111  int res = BV (clib_bihash_search) (&am->acl_lookup_hash, &kv, &result);
112  ASSERT(new_index != ~0);
113  ASSERT(new_index < vec_len((*applied_hash_aces)));
114  if (res == 0) {
115  /* There already exists an entry or more. Append at the end. */
116  u32 first_index = result_val->applied_entry_index;
117  ASSERT(first_index != ~0);
118  DBG("A key already exists, with applied entry index: %d", first_index);
119  applied_hash_ace_entry_t *first_pae = vec_elt_at_index((*applied_hash_aces), first_index);
120  u32 last_index = first_pae->tail_applied_entry_index;
121  ASSERT(last_index != ~0);
122  applied_hash_ace_entry_t *last_pae = vec_elt_at_index((*applied_hash_aces), last_index);
123  DBG("...advance to chained entry index: %d", last_index);
124  /* link ourseves in */
125  last_pae->next_applied_entry_index = new_index;
126  pae->prev_applied_entry_index = last_index;
127  /* adjust the pointer to the new tail */
128  first_pae->tail_applied_entry_index = new_index;
129  } else {
130  /* It's the very first entry */
131  hashtable_add_del(am, &kv, 1);
132  ASSERT(new_index != ~0);
133  pae->tail_applied_entry_index = new_index;
134  }
135 }
136 
137 static void
139 {
140  /*
141  * Go over the rules and check which ones are shadowed and which aren't.
142  * Naive approach: try to match the match value from every ACE as if it
143  * was a live packet, and see if the resulting match happens earlier in the list.
144  * if it does not match or it is later in the ACL - then the entry is not shadowed.
145  *
146  * This approach fails, an example:
147  * deny tcp 2001:db8::/32 2001:db8::/32
148  * permit ip 2001:db8::1/128 2001:db8::2/128
149  */
150 }
151 
152 static void *
154 {
155  if (0 == am->hash_lookup_mheap) {
156  am->hash_lookup_mheap = mheap_alloc (0 /* use VM */ , am->hash_lookup_mheap_size);
157  if (0 == am->hash_lookup_mheap) {
158  clib_error("ACL plugin failed to allocate hash lookup heap of %U bytes, abort", format_memory_size, am->hash_lookup_mheap_size);
159  }
162  }
163  void *oldheap = clib_mem_set_heap(am->hash_lookup_mheap);
164  return oldheap;
165 }
166 
167 void
169 {
170  acl_main_t *am = &acl_main;
173  if (on) {
176  mheap_validate(h);
177  } else {
178  h->flags &= ~MHEAP_FLAG_VALIDATE;
180  }
181 }
182 
183 void
185 {
186  acl_main_t *am = &acl_main;
189  if (on) {
190  h->flags |= MHEAP_FLAG_TRACE;
191  } else {
192  h->flags &= ~MHEAP_FLAG_TRACE;
193  }
194 }
195 
196 void
197 hash_acl_apply(acl_main_t *am, u32 lc_index, int acl_index, u32 acl_position)
198 {
199  int i;
200 
201  DBG0("HASH ACL apply: lc_index %d acl %d", lc_index, acl_index);
202  if (!am->acl_lookup_hash_initialized) {
203  BV (clib_bihash_init) (&am->acl_lookup_hash, "ACL plugin rule lookup bihash",
206  }
207 
208  void *oldheap = hash_acl_set_heap(am);
210  vec_validate(am->hash_acl_infos, acl_index);
211  applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
212 
213  hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
214  u32 **hash_acl_applied_lc_index = &ha->lc_index_list;
215 
216  int base_offset = vec_len(*applied_hash_aces);
217 
218  /* Update the bitmap of the mask types with which the lookup
219  needs to happen for the ACLs applied to this lc_index */
221  vec_validate((*applied_hash_acls), lc_index);
222  applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), lc_index);
223 
224  /* ensure the list of applied hash acls is initialized and add this acl# to it */
225  u32 index = vec_search(pal->applied_acls, acl_index);
226  if (index != ~0) {
227  clib_warning("BUG: trying to apply twice acl_index %d on lc_index %d, according to lc",
228  acl_index, lc_index);
229  goto done;
230  }
231  vec_add1(pal->applied_acls, acl_index);
232  u32 index2 = vec_search((*hash_acl_applied_lc_index), lc_index);
233  if (index2 != ~0) {
234  clib_warning("BUG: trying to apply twice acl_index %d on lc_index %d, according to hash h-acl info",
235  acl_index, lc_index);
236  goto done;
237  }
238  vec_add1((*hash_acl_applied_lc_index), lc_index);
239 
242  /*
243  * if the applied ACL is empty, the current code will cause a
244  * different behavior compared to current linear search: an empty ACL will
245  * simply fallthrough to the next ACL, or the default deny in the end.
246  *
247  * This is not a problem, because after vpp-dev discussion,
248  * the consensus was it should not be possible to apply the non-existent
249  * ACL, so the change adding this code also takes care of that.
250  */
251 
252  /* expand the applied aces vector by the necessary amount */
253  vec_resize((*applied_hash_aces), vec_len(ha->rules));
254 
255  /* add the rules from the ACL to the hash table for lookup and append to the vector*/
256  for(i=0; i < vec_len(ha->rules); i++) {
257  u32 new_index = base_offset + i;
258  applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), new_index);
259  pae->acl_index = acl_index;
260  pae->ace_index = ha->rules[i].ace_index;
261  pae->acl_position = acl_position;
262  pae->action = ha->rules[i].action;
263  pae->hitcount = 0;
264  pae->hash_ace_info_index = i;
265  /* we might link it in later */
266  pae->next_applied_entry_index = ~0;
267  pae->prev_applied_entry_index = ~0;
268  pae->tail_applied_entry_index = ~0;
269  activate_applied_ace_hash_entry(am, lc_index, applied_hash_aces, new_index);
270  }
271  applied_hash_entries_analyze(am, applied_hash_aces);
272 done:
273  clib_mem_set_heap (oldheap);
274 }
275 
276 static u32
278 {
279  /*
280  * find back the first entry. Inefficient so might need to be a bit cleverer
281  * if this proves to be a problem..
282  */
283  u32 an_index = curr_index;
284  ASSERT(an_index != ~0);
285  applied_hash_ace_entry_t *head_pae = vec_elt_at_index((*applied_hash_aces), an_index);
286  while(head_pae->prev_applied_entry_index != ~0) {
287  an_index = head_pae->prev_applied_entry_index;
288  ASSERT(an_index != ~0);
289  head_pae = vec_elt_at_index((*applied_hash_aces), an_index);
290  }
291  return an_index;
292 }
293 
294 static void
296  u32 lc_index,
297  applied_hash_ace_entry_t **applied_hash_aces,
298  u32 old_index, u32 new_index)
299 {
300  ASSERT(old_index != ~0);
301  ASSERT(new_index != ~0);
302  /* move the entry */
303  *vec_elt_at_index((*applied_hash_aces), new_index) = *vec_elt_at_index((*applied_hash_aces), old_index);
304 
305  /* update the linkage and hash table if necessary */
306  applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), old_index);
307 
308  if (pae->prev_applied_entry_index != ~0) {
309  applied_hash_ace_entry_t *prev_pae = vec_elt_at_index((*applied_hash_aces), pae->prev_applied_entry_index);
310  ASSERT(prev_pae->next_applied_entry_index == old_index);
311  prev_pae->next_applied_entry_index = new_index;
312  } else {
313  /* first entry - so the hash points to it, update */
314  add_del_hashtable_entry(am, lc_index,
315  applied_hash_aces, new_index, 1);
316  ASSERT(pae->tail_applied_entry_index != ~0);
317  }
318  if (pae->next_applied_entry_index != ~0) {
319  applied_hash_ace_entry_t *next_pae = vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index);
320  ASSERT(next_pae->prev_applied_entry_index == old_index);
321  next_pae->prev_applied_entry_index = new_index;
322  } else {
323  /*
324  * Moving the very last entry, so we need to update the tail pointer in the first one.
325  */
326  u32 head_index = find_head_applied_ace_index(applied_hash_aces, old_index);
327  ASSERT(head_index != ~0);
328  applied_hash_ace_entry_t *head_pae = vec_elt_at_index((*applied_hash_aces), head_index);
329 
330  ASSERT(head_pae->tail_applied_entry_index == old_index);
331  head_pae->tail_applied_entry_index = new_index;
332  }
333  /* invalidate the old entry */
334  pae->prev_applied_entry_index = ~0;
335  pae->next_applied_entry_index = ~0;
336  pae->tail_applied_entry_index = ~0;
337 }
338 
339 static void
341  u32 lc_index,
342  applied_hash_ace_entry_t **applied_hash_aces,
343  u32 old_index)
344 {
345  applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), old_index);
346  DBG("UNAPPLY DEACTIVATE: lc_index %d applied index %d", lc_index, old_index);
347 
348  if (pae->prev_applied_entry_index != ~0) {
349  DBG("UNAPPLY = index %d has prev_applied_entry_index %d", old_index, pae->prev_applied_entry_index);
350  applied_hash_ace_entry_t *prev_pae = vec_elt_at_index((*applied_hash_aces), pae->prev_applied_entry_index);
351  ASSERT(prev_pae->next_applied_entry_index == old_index);
353  if (pae->next_applied_entry_index == ~0) {
354  /* it was a last entry we removed, update the pointer on the first one */
355  u32 head_index = find_head_applied_ace_index(applied_hash_aces, old_index);
356  DBG("UNAPPLY = index %d head index to update %d", old_index, head_index);
357  ASSERT(head_index != ~0);
358  applied_hash_ace_entry_t *head_pae = vec_elt_at_index((*applied_hash_aces), head_index);
359 
360  ASSERT(head_pae->tail_applied_entry_index == old_index);
362  } else {
363  applied_hash_ace_entry_t *next_pae = vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index);
365  }
366  } else {
367  /* It was the first entry. We need either to reset the hash entry or delete it */
368  if (pae->next_applied_entry_index != ~0) {
369  /* the next element becomes the new first one, so needs the tail pointer to be set */
370  applied_hash_ace_entry_t *next_pae = vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index);
371  ASSERT(pae->tail_applied_entry_index != ~0);
373  DBG("Resetting the hash table entry from %d to %d, setting tail index to %d", old_index, pae->next_applied_entry_index, pae->tail_applied_entry_index);
374  /* unlink from the next element */
375  next_pae->prev_applied_entry_index = ~0;
376  add_del_hashtable_entry(am, lc_index,
377  applied_hash_aces, pae->next_applied_entry_index, 1);
378  } else {
379  /* no next entry, so just delete the entry in the hash table */
380  add_del_hashtable_entry(am, lc_index,
381  applied_hash_aces, old_index, 0);
382  }
383  }
384  /* invalidate the old entry */
385  pae->prev_applied_entry_index = ~0;
386  pae->next_applied_entry_index = ~0;
387  pae->tail_applied_entry_index = ~0;
388 }
389 
390 
391 static void
393 {
394  int i;
395  uword *new_lookup_bitmap = 0;
396 
398  vec_validate((*applied_hash_acls), lc_index);
399  applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), lc_index);
400 
401  for(i=0; i < vec_len(pal->applied_acls); i++) {
402  u32 a_acl_index = *vec_elt_at_index((pal->applied_acls), i);
403  hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, a_acl_index);
404  DBG("Update bitmask = %U or %U (acl_index %d)\n", format_bitmap_hex, new_lookup_bitmap,
405  format_bitmap_hex, ha->mask_type_index_bitmap, a_acl_index);
406  new_lookup_bitmap = clib_bitmap_or(new_lookup_bitmap,
408  }
409  uword *old_lookup_bitmap = pal->mask_type_index_bitmap;
410  pal->mask_type_index_bitmap = new_lookup_bitmap;
411  clib_bitmap_free(old_lookup_bitmap);
412 }
413 
414 void
415 hash_acl_unapply(acl_main_t *am, u32 lc_index, int acl_index)
416 {
417  int i;
418 
419  DBG0("HASH ACL unapply: lc_index %d acl %d", lc_index, acl_index);
421  applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), lc_index);
422 
423  hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
424  u32 **hash_acl_applied_lc_index = &ha->lc_index_list;
425 
426  /* remove this acl# from the list of applied hash acls */
427  u32 index = vec_search(pal->applied_acls, acl_index);
428  if (index == ~0) {
429  clib_warning("BUG: trying to unapply unapplied acl_index %d on lc_index %d, according to lc",
430  acl_index, lc_index);
431  return;
432  }
433  vec_del1(pal->applied_acls, index);
434 
435  u32 index2 = vec_search((*hash_acl_applied_lc_index), lc_index);
436  if (index2 == ~0) {
437  clib_warning("BUG: trying to unapply twice acl_index %d on lc_index %d, according to h-acl info",
438  acl_index, lc_index);
439  return;
440  }
441  vec_del1((*hash_acl_applied_lc_index), index2);
442 
443  applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
444 
445  for(i=0; i < vec_len((*applied_hash_aces)); i++) {
446  if (vec_elt_at_index(*applied_hash_aces,i)->acl_index == acl_index) {
447  DBG("Found applied ACL#%d at applied index %d", acl_index, i);
448  break;
449  }
450  }
451  if (vec_len((*applied_hash_aces)) <= i) {
452  DBG("Did not find applied ACL#%d at lc_index %d", acl_index, lc_index);
453  /* we went all the way without finding any entries. Probably a list was empty. */
454  return;
455  }
456 
457  void *oldheap = hash_acl_set_heap(am);
458  int base_offset = i;
459  int tail_offset = base_offset + vec_len(ha->rules);
460  int tail_len = vec_len((*applied_hash_aces)) - tail_offset;
461  DBG("base_offset: %d, tail_offset: %d, tail_len: %d", base_offset, tail_offset, tail_len);
462 
463  for(i=0; i < vec_len(ha->rules); i ++) {
465  applied_hash_aces, base_offset + i);
466  }
467  for(i=0; i < tail_len; i ++) {
468  /* move the entry at tail offset to base offset */
469  /* that is, from (tail_offset+i) -> (base_offset+i) */
470  DBG("UNAPPLY MOVE: lc_index %d, applied index %d -> %d", lc_index, tail_offset+i, base_offset + i);
471  move_applied_ace_hash_entry(am, lc_index, applied_hash_aces, tail_offset + i, base_offset + i);
472  }
473  /* trim the end of the vector */
474  _vec_len((*applied_hash_aces)) -= vec_len(ha->rules);
475 
476  applied_hash_entries_analyze(am, applied_hash_aces);
477 
478  /* After deletion we might not need some of the mask-types anymore... */
480  clib_mem_set_heap (oldheap);
481 }
482 
483 /*
484  * Create the applied ACEs and update the hash table,
485  * taking into account that the ACL may not be the last
486  * in the vector of applied ACLs.
487  *
488  * For now, walk from the end of the vector and unapply the ACLs,
489  * then apply the one in question and reapply the rest.
490  */
491 
492 void
493 hash_acl_reapply(acl_main_t *am, u32 lc_index, int acl_index)
494 {
496  u32 **applied_acls = &acontext->acl_indices;
497  int i;
498  int start_index = vec_search((*applied_acls), acl_index);
499 
500  DBG0("Start index for acl %d in lc_index %d is %d", acl_index, lc_index, start_index);
501  /*
502  * This function is called after we find out the sw_if_index where ACL is applied.
503  * If the by-sw_if_index vector does not have the ACL#, then it's a bug.
504  */
505  ASSERT(start_index < vec_len(*applied_acls));
506 
507  /* unapply all the ACLs at the tail side, up to the current one */
508  for(i = vec_len(*applied_acls) - 1; i > start_index; i--) {
509  hash_acl_unapply(am, lc_index, *vec_elt_at_index(*applied_acls, i));
510  }
511  for(i = start_index; i < vec_len(*applied_acls); i++) {
512  hash_acl_apply(am, lc_index, *vec_elt_at_index(*applied_acls, i), i);
513  }
514 }
515 
516 static void
517 make_address_mask(ip46_address_t *addr, u8 is_ipv6, u8 prefix_len)
518 {
519  if (is_ipv6) {
520  ip6_address_mask_from_width(&addr->ip6, prefix_len);
521  } else {
522  /* FIXME: this may not be correct way */
523  ip6_address_mask_from_width(&addr->ip6, prefix_len + 3*32);
524  ip46_address_mask_ip4(addr);
525  }
526 }
527 
528 static u8
529 make_port_mask(u16 *portmask, u16 port_first, u16 port_last)
530 {
531  if (port_first == port_last) {
532  *portmask = 0xffff;
533  /* single port is representable by masked value */
534  return 0;
535  }
536  if ((port_first == 0) && (port_last == 65535)) {
537  *portmask = 0;
538  /* wildcard port is representable by a masked value */
539  return 0;
540  }
541 
542  /*
543  * For now match all the ports, later
544  * here might be a better optimization which would
545  * pick out bitmaskable portranges.
546  *
547  * However, adding a new mask type potentially
548  * adds a per-packet extra lookup, so the benefit is not clear.
549  */
550  *portmask = 0;
551  /* This port range can't be represented via bitmask exactly. */
552  return 1;
553 }
554 
555 static void
556 make_mask_and_match_from_rule(fa_5tuple_t *mask, acl_rule_t *r, hash_ace_info_t *hi, int match_nonfirst_fragment)
557 {
558  memset(mask, 0, sizeof(*mask));
559  memset(&hi->match, 0, sizeof(hi->match));
560  hi->action = r->is_permit;
561 
562  /* we will need to be matching based on lc_index and mask_type_index when applied */
563  mask->pkt.lc_index = ~0;
564  /* we will assign the match of mask_type_index later when we find it*/
565  mask->pkt.mask_type_index_lsb = ~0;
566 
567  mask->pkt.is_ip6 = 1;
568  hi->match.pkt.is_ip6 = r->is_ipv6;
569 
570  make_address_mask(&mask->addr[0], r->is_ipv6, r->src_prefixlen);
571  hi->match.addr[0] = r->src;
572  make_address_mask(&mask->addr[1], r->is_ipv6, r->dst_prefixlen);
573  hi->match.addr[1] = r->dst;
574 
575  if (r->proto != 0) {
576  mask->l4.proto = ~0; /* L4 proto needs to be matched */
577  hi->match.l4.proto = r->proto;
578  if (match_nonfirst_fragment) {
579  /* match the non-first fragments only */
580  mask->pkt.is_nonfirst_fragment = 1;
582  } else {
583  /* Calculate the src/dst port masks and make the src/dst port matches accordingly */
585  hi->match.l4.port[0] = r->src_port_or_type_first & mask->l4.port[0];
587  hi->match.l4.port[1] = r->dst_port_or_code_first & mask->l4.port[1];
588  /* L4 info must be valid in order to match */
589  mask->pkt.l4_valid = 1;
590  hi->match.pkt.l4_valid = 1;
591  /* And we must set the mask to check that it is an initial fragment */
592  mask->pkt.is_nonfirst_fragment = 1;
594  if ((r->proto == IPPROTO_TCP) && (r->tcp_flags_mask != 0)) {
595  /* if we want to match on TCP flags, they must be masked off as well */
596  mask->pkt.tcp_flags = r->tcp_flags_mask;
598  /* and the flags need to be present within the packet being matched */
599  mask->pkt.tcp_flags_valid = 1;
600  hi->match.pkt.tcp_flags_valid = 1;
601  }
602  }
603  }
604  /* Sanitize the mask and the match */
605  u64 *pmask = (u64 *)mask;
606  u64 *pmatch = (u64 *)&hi->match;
607  int j;
608  for(j=0; j<6; j++) {
609  pmatch[j] = pmatch[j] & pmask[j];
610  }
611 }
612 
613 static u32
615 {
617  /* *INDENT-OFF* */
619  ({
620  if(memcmp(&mte->mask, mask, sizeof(*mask)) == 0)
621  return (mte - am->ace_mask_type_pool);
622  }));
623  /* *INDENT-ON* */
624  return ~0;
625 }
626 
627 static u32
629 {
630  u32 mask_type_index = find_mask_type_index(am, mask);
632  if(~0 == mask_type_index) {
634  mask_type_index = mte - am->ace_mask_type_pool;
635  clib_memcpy(&mte->mask, mask, sizeof(mte->mask));
636  mte->refcount = 0;
637  /*
638  * We can use only 16 bits, since in the match there is only u16 field.
639  * Realistically, once you go to 64K of mask types, it is a huge
640  * problem anyway, so we might as well stop half way.
641  */
642  ASSERT(mask_type_index < 32768);
643  }
644  mte = am->ace_mask_type_pool + mask_type_index;
645  mte->refcount++;
646  return mask_type_index;
647 }
648 
649 static void
651 {
652  ace_mask_type_entry_t *mte = pool_elt_at_index(am->ace_mask_type_pool, mask_type_index);
653  mte->refcount--;
654  if (mte->refcount == 0) {
655  /* we are not using this entry anymore */
656  pool_put(am->ace_mask_type_pool, mte);
657  }
658 }
659 
660 int hash_acl_exists(acl_main_t *am, int acl_index)
661 {
662  if (acl_index >= vec_len(am->hash_acl_infos))
663  return 0;
664 
665  hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
666  return ha->hash_acl_exists;
667 }
668 
669 void hash_acl_add(acl_main_t *am, int acl_index)
670 {
671  void *oldheap = hash_acl_set_heap(am);
672  DBG("HASH ACL add : %d", acl_index);
673  int i;
674  acl_list_t *a = &am->acls[acl_index];
675  vec_validate(am->hash_acl_infos, acl_index);
676  hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
677  memset(ha, 0, sizeof(*ha));
678  ha->hash_acl_exists = 1;
679 
680  /* walk the newly added ACL entries and ensure that for each of them there
681  is a mask type, increment a reference count for that mask type */
682  for(i=0; i < a->count; i++) {
683  hash_ace_info_t ace_info;
684  fa_5tuple_t mask;
685  memset(&ace_info, 0, sizeof(ace_info));
686  ace_info.acl_index = acl_index;
687  ace_info.ace_index = i;
688 
689  make_mask_and_match_from_rule(&mask, &a->rules[i], &ace_info, 0);
690  ace_info.mask_type_index = assign_mask_type_index(am, &mask);
691  /* assign the mask type index for matching itself */
692  ace_info.match.pkt.mask_type_index_lsb = ace_info.mask_type_index;
693  DBG("ACE: %d mask_type_index: %d", i, ace_info.mask_type_index);
694  /* Ensure a given index is set in the mask type index bitmap for this ACL */
696  vec_add1(ha->rules, ace_info);
697  if (am->l4_match_nonfirst_fragment) {
698  /* add the second rule which matches the noninitial fragments with the respective mask */
699  make_mask_and_match_from_rule(&mask, &a->rules[i], &ace_info, 1);
700  ace_info.mask_type_index = assign_mask_type_index(am, &mask);
701  ace_info.match.pkt.mask_type_index_lsb = ace_info.mask_type_index;
702  DBG("ACE: %d (non-initial frags) mask_type_index: %d", i, ace_info.mask_type_index);
703  /* Ensure a given index is set in the mask type index bitmap for this ACL */
705  vec_add1(ha->rules, ace_info);
706  }
707  }
708  /*
709  * if an ACL is applied somewhere, fill the corresponding lookup data structures.
710  * We need to take care if the ACL is not the last one in the vector of ACLs applied to the interface.
711  */
712  if (acl_index < vec_len(am->lc_index_vec_by_acl)) {
713  u32 *lc_index;
714  vec_foreach(lc_index, am->lc_index_vec_by_acl[acl_index]) {
715  hash_acl_reapply(am, *lc_index, acl_index);
716  }
717  }
718  clib_mem_set_heap (oldheap);
719 }
720 
721 void hash_acl_delete(acl_main_t *am, int acl_index)
722 {
723  void *oldheap = hash_acl_set_heap(am);
724  DBG0("HASH ACL delete : %d", acl_index);
725  /*
726  * If the ACL is applied somewhere, remove the references of it (call hash_acl_unapply)
727  * this is a different behavior from the linear lookup where an empty ACL is "deny all",
728  *
729  * However, following vpp-dev discussion the ACL that is referenced elsewhere
730  * should not be possible to delete, and the change adding this also adds
731  * the safeguards to that respect, so this is not a problem.
732  *
733  * The part to rememeber is that this routine is called in process of reapplication
734  * during the acl_add_replace() API call - the old acl ruleset is deleted, then
735  * the new one is added, without the change in the applied ACLs - so this case
736  * has to be handled.
737  */
738  hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
739  u32 *lc_list_copy = 0;
740  {
741  u32 *lc_index;
742  lc_list_copy = vec_dup(ha->lc_index_list);
743  vec_foreach(lc_index, lc_list_copy) {
744  hash_acl_unapply(am, *lc_index, acl_index);
745  }
746  vec_free(lc_list_copy);
747  }
748 
749  /* walk the mask types for the ACL about-to-be-deleted, and decrease
750  * the reference count, possibly freeing up some of them */
751  int i;
752  for(i=0; i < vec_len(ha->rules); i++) {
754  }
756  ha->hash_acl_exists = 0;
757  vec_free(ha->rules);
758  clib_mem_set_heap (oldheap);
759 }
760 
761 
762 void
764 {
765  vlib_cli_output(vm, "\nACL lookup hash table:\n%U\n",
766  BV (format_bihash), &am->acl_lookup_hash, verbose);
767 }
768 
769 void
771 {
772  acl_main_t *am = &acl_main;
773  vlib_main_t *vm = am->vlib_main;
775 
776  vlib_cli_output (vm, "Mask-type entries:");
777  /* *INDENT-OFF* */
779  ({
780  vlib_cli_output(vm, " %3d: %016llx %016llx %016llx %016llx %016llx %016llx refcount %d",
781  mte - am->ace_mask_type_pool,
782  mte->mask.kv.key[0], mte->mask.kv.key[1], mte->mask.kv.key[2],
783  mte->mask.kv.key[3], mte->mask.kv.key[4], mte->mask.kv.value, mte->refcount);
784  }));
785  /* *INDENT-ON* */
786 }
787 
788 void
790 {
791  acl_main_t *am = &acl_main;
792  vlib_main_t *vm = am->vlib_main;
793  u32 i, j;
794  u64 *m;
795  vlib_cli_output (vm, "Mask-ready ACL representations\n");
796  for (i = 0; i < vec_len (am->hash_acl_infos); i++)
797  {
798  if ((acl_index != ~0) && (acl_index != i))
799  {
800  continue;
801  }
802  hash_acl_info_t *ha = &am->hash_acl_infos[i];
803  vlib_cli_output (vm, "acl-index %u bitmask-ready layout\n", i);
804  vlib_cli_output (vm, " applied lc_index list: %U\n",
805  format_vec32, ha->lc_index_list, "%d");
806  vlib_cli_output (vm, " mask type index bitmap: %U\n",
808  for (j = 0; j < vec_len (ha->rules); j++)
809  {
810  hash_ace_info_t *pa = &ha->rules[j];
811  m = (u64 *) & pa->match;
812  vlib_cli_output (vm,
813  " %4d: %016llx %016llx %016llx %016llx %016llx %016llx mask index %d acl %d rule %d action %d src/dst portrange not ^2: %d,%d\n",
814  j, m[0], m[1], m[2], m[3], m[4], m[5],
815  pa->mask_type_index, pa->acl_index, pa->ace_index,
818  }
819  }
820 }
821 
822 void
824 {
825  vlib_cli_output (vm,
826  " %4d: acl %d rule %d action %d bitmask-ready rule %d next %d prev %d tail %d hitcount %lld",
827  j, pae->acl_index, pae->ace_index, pae->action,
831 }
832 
833 void
835 {
836  acl_main_t *am = &acl_main;
837  vlib_main_t *vm = am->vlib_main;
838  u32 lci, j;
839  vlib_cli_output (vm, "Applied lookup entries for lookup contexts");
840 
841  for (lci = 0;
842  (lci < vec_len(am->applied_hash_acl_info_by_lc_index)); lci++)
843  {
844  if ((lc_index != ~0) && (lc_index != lci))
845  {
846  continue;
847  }
848  vlib_cli_output (vm, "lc_index %d:", lci);
850  {
853  vlib_cli_output (vm, " lookup mask_type_index_bitmap: %U",
855  vlib_cli_output (vm, " applied acls: %U", format_vec32,
856  pal->applied_acls, "%d");
857  }
858  if (lci < vec_len (am->hash_entry_vec_by_lc_index))
859  {
860  vlib_cli_output (vm, " lookup applied entries:");
861  for (j = 0;
862  j < vec_len (am->hash_entry_vec_by_lc_index[lci]);
863  j++)
864  {
865  acl_plugin_print_pae (vm, j,
867  [lci][j]);
868  }
869  }
870  }
871 }
872 
873 void
874 acl_plugin_show_tables_bihash (u32 show_bihash_verbose)
875 {
876  acl_main_t *am = &acl_main;
877  vlib_main_t *vm = am->vlib_main;
878  show_hash_acl_hash (vm, am, show_bihash_verbose);
879 }
880 
static void move_applied_ace_hash_entry(acl_main_t *am, u32 lc_index, applied_hash_ace_entry_t **applied_hash_aces, u32 old_index, u32 new_index)
Definition: hash_lookup.c:295
acl_rule_t * rules
Definition: acl.h:108
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:434
static void deactivate_applied_ace_hash_entry(acl_main_t *am, u32 lc_index, applied_hash_ace_entry_t **applied_hash_aces, u32 old_index)
Definition: hash_lookup.c:340
vmrglw vmrglh hi
static u8 * format_bitmap_hex(u8 *s, va_list *args)
Format a bitmap as a string of hex bytes.
Definition: bitmap.h:744
u8 is_ipv6
Definition: acl.h:75
fa_5tuple_t mask
Definition: acl.h:136
void acl_plugin_show_tables_mask_type(void)
Definition: hash_lookup.c:770
Definition: acl.h:132
a
Definition: bitmap.h:516
u32 acl_index
static void * hash_acl_set_heap(acl_main_t *am)
Definition: hash_lookup.c:153
fa_session_l4_key_t l4
Definition: fa_node.h:50
fa_packet_info_t pkt
Definition: fa_node.h:52
void hash_acl_unapply(acl_main_t *am, u32 lc_index, int acl_index)
Definition: hash_lookup.c:415
void acl_plugin_hash_acl_set_trace_heap(int on)
Definition: hash_lookup.c:184
void acl_plugin_show_tables_applied_info(u32 lc_index)
Definition: hash_lookup.c:834
void hash_acl_reapply(acl_main_t *am, u32 lc_index, int acl_index)
Definition: hash_lookup.c:493
#define clib_error(format, args...)
Definition: error.h:62
int l4_match_nonfirst_fragment
Definition: acl.h:270
void * mheap_alloc(void *memory, uword size)
Definition: mheap.c:947
static uword * clib_bitmap_or(uword *ai, uword *bi)
Logical operator across two bitmaps.
u8 dst_prefixlen
Definition: acl.h:79
static u8 make_port_mask(u16 *portmask, u16 port_first, u16 port_last)
Definition: hash_lookup.c:529
u8 action
u32 hash_ace_info_index
u32 count
Definition: acl.h:107
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:520
int i
static mheap_t * mheap_header(u8 *v)
static void applied_hash_entries_analyze(acl_main_t *am, applied_hash_ace_entry_t **applied_hash_aces)
Definition: hash_lookup.c:138
acl_main_t acl_main
Definition: jvpp_acl.h:39
static uword * clib_bitmap_set(uword *ai, uword i, uword value)
Sets the ith bit of a bitmap to new_value Removes trailing zeros from the bitmap. ...
Definition: bitmap.h:167
#define MHEAP_FLAG_THREAD_SAFE
static u32 find_head_applied_ace_index(applied_hash_ace_entry_t **applied_hash_aces, u32 curr_index)
Definition: hash_lookup.c:277
void hash_acl_add(acl_main_t *am, int acl_index)
Definition: hash_lookup.c:669
u32 ** lc_index_vec_by_acl
Definition: acl.h:193
u16 dst_port_or_code_last
Definition: acl.h:84
u8 src_prefixlen
Definition: acl.h:77
u32 acl_position
u32 next_applied_entry_index
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
Definition: pool.h:440
int clib_bihash_add_del(clib_bihash *h, clib_bihash_kv *add_v, int is_add)
Add or delete a (key,value) pair from a bi-hash table.
ip46_address_t addr[2]
Definition: fa_node.h:49
#define always_inline
Definition: clib.h:92
static void activate_applied_ace_hash_entry(acl_main_t *am, u32 lc_index, applied_hash_ace_entry_t **applied_hash_aces, u32 new_index)
Definition: hash_lookup.c:93
ip46_address_t src
Definition: acl.h:76
#define ip46_address_mask_ip4(ip46)
Definition: ip6_packet.h:77
u8 * format_memory_size(u8 *s, va_list *va)
Definition: std-formats.c:193
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
u8 is_permit
Definition: acl.h:74
unsigned long u64
Definition: types.h:89
#define vec_resize(V, N)
Resize a vector (no header, unspecified alignment) Add N elements to end of given vector V...
Definition: vec.h:237
#define vec_search(v, E)
Search a vector for the index of the entry that matches.
Definition: vec.h:939
static void release_mask_type_index(acl_main_t *am, u32 mask_type_index)
Definition: hash_lookup.c:650
void hash_acl_apply(acl_main_t *am, u32 lc_index, int acl_index, u32 acl_position)
Definition: hash_lookup.c:197
static u32 assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
Definition: hash_lookup.c:628
int hash_acl_exists(acl_main_t *am, int acl_index)
Definition: hash_lookup.c:660
ip46_address_t dst
Definition: acl.h:78
static void add_del_hashtable_entry(acl_main_t *am, u32 lc_index, applied_hash_ace_entry_t **applied_hash_aces, u32 index, int is_add)
Definition: hash_lookup.c:79
uword * mask_type_index_bitmap
static void hash_acl_build_applied_lookup_bitmap(acl_main_t *am, u32 lc_index)
Definition: hash_lookup.c:392
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:461
u16 dst_port_or_code_first
Definition: acl.h:83
hash_acl_info_t * hash_acl_infos
Definition: acl.h:154
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:273
#define vec_dup(V)
Return copy of vector (no header, no alignment)
Definition: vec.h:370
u64 hitcount
hash_ace_info_t * rules
#define vec_del1(v, i)
Delete the element at index I.
Definition: vec.h:803
clib_bihash_48_8_t acl_lookup_hash
Definition: acl.h:155
void show_hash_acl_hash(vlib_main_t *vm, acl_main_t *am, u32 verbose)
Definition: hash_lookup.c:763
void acl_plugin_hash_acl_set_validate_heap(int on)
Definition: hash_lookup.c:168
u8 proto
Definition: acl.h:80
void clib_bihash_init(clib_bihash *h, char *name, u32 nbuckets, uword memory_size)
initialize a bounded index extensible hash table
u16 src_port_or_type_first
Definition: acl.h:81
void acl_plugin_show_tables_bihash(u32 show_bihash_verbose)
Definition: hash_lookup.c:874
#define pool_get_aligned(P, E, A)
Allocate an object E from a pool P (general version).
Definition: pool.h:188
void acl_plugin_print_pae(vlib_main_t *vm, int j, applied_hash_ace_entry_t *pae)
Definition: hash_lookup.c:823
u32 refcount
Definition: acl.h:137
vlib_main_t * vm
Definition: buffer.c:294
applied_hash_acl_info_t * applied_hash_acl_info_by_lc_index
Definition: acl.h:170
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:336
int acl_lookup_hash_initialized
Definition: acl.h:162
static void * clib_mem_set_heap(void *heap)
Definition: mem.h:226
void hash_acl_delete(acl_main_t *am, int acl_index)
Definition: hash_lookup.c:721
#define clib_warning(format, args...)
Definition: error.h:59
Definition: acl.h:72
#define clib_memcpy(a, b, c)
Definition: string.h:75
u8 * format_vec32(u8 *s, va_list *va)
Definition: std-formats.c:43
#define DBG(...)
applied_hash_ace_entry_t ** hash_entry_vec_by_lc_index
Definition: acl.h:169
u32 hash_lookup_hash_buckets
Definition: acl.h:156
#define ASSERT(truth)
unsigned int u32
Definition: types.h:88
u8 tcp_flags_valid
Definition: fa_node.h:30
u16 src_port_or_type_last
Definition: acl.h:82
#define MHEAP_FLAG_VALIDATE
#define clib_bitmap_free(v)
Free a bitmap.
Definition: bitmap.h:92
static void make_mask_and_match_from_rule(fa_5tuple_t *mask, acl_rule_t *r, hash_ace_info_t *hi, int match_nonfirst_fragment)
Definition: hash_lookup.c:556
#define DBG0(...)
u64 uword
Definition: types.h:112
#define MHEAP_FLAG_TRACE
static applied_hash_ace_entry_t ** get_applied_hash_aces(acl_main_t *am, u32 lc_index)
Definition: hash_lookup.c:36
ace_mask_type_entry_t * ace_mask_type_pool
Definition: acl.h:211
#define MHEAP_FLAG_SMALL_OBJECT_CACHE
static u32 find_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
Definition: hash_lookup.c:614
u32 prev_applied_entry_index
static void fill_applied_hash_ace_kv(acl_main_t *am, applied_hash_ace_entry_t **applied_hash_aces, u32 lc_index, u32 new_index, clib_bihash_kv_48_8_t *kv)
Definition: hash_lookup.c:57
u8 is_nonfirst_fragment
Definition: fa_node.h:32
acl_lookup_context_t * acl_lookup_contexts
Definition: acl.h:151
unsigned short u16
Definition: types.h:57
uword hash_lookup_mheap_size
Definition: acl.h:161
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
unsigned char u8
Definition: types.h:56
u8 tcp_flags_mask
Definition: acl.h:86
static void hashtable_add_del(acl_main_t *am, clib_bihash_kv_48_8_t *kv, int is_add)
Definition: hash_lookup.c:48
static void ip6_address_mask_from_width(ip6_address_t *a, u32 width)
Definition: ip6_packet.h:252
u8 tcp_flags_value
Definition: acl.h:85
void mheap_validate(void *v)
Definition: mheap.c:1339
#define vec_foreach(var, vec)
Vector iterator.
void * hash_lookup_mheap
Definition: acl.h:160
u32 tail_applied_entry_index
vhost_vring_addr_t addr
Definition: vhost-user.h:83
u16 mask_type_index_lsb
Definition: fa_node.h:28
static void make_address_mask(ip46_address_t *addr, u8 is_ipv6, u8 prefix_len)
Definition: hash_lookup.c:517
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
void acl_plugin_show_tables_acl_hash_info(u32 acl_index)
Definition: hash_lookup.c:789
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
Definition: cli.c:680
acl_list_t * acls
Definition: acl.h:153
u32 hash_lookup_hash_memory
Definition: acl.h:157
u32 ace_index
foreach_fa_cleaner_counter vlib_main_t * vlib_main
Definition: acl.h:326