FD.io VPP  v18.11-rc0-18-g2a3fb1a
Vector Packet Processing
hash_lookup.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2017 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <stddef.h>
19 #include <netinet/in.h>
20 
21 #include <vlibapi/api.h>
22 #include <vlibmemory/api.h>
23 
24 #include <vlib/vlib.h>
25 #include <vnet/vnet.h>
26 #include <vnet/pg/pg.h>
27 #include <vppinfra/error.h>
28 #include <vnet/plugin/plugin.h>
29 #include <acl/acl.h>
30 #include <vppinfra/bihash_48_8.h>
31 
32 #include "hash_lookup.h"
33 #include "hash_lookup_private.h"
34 
35 
37 {
38  applied_hash_ace_entry_t **applied_hash_aces = vec_elt_at_index(am->hash_entry_vec_by_lc_index, lc_index);
39 
40 /*is_input ? vec_elt_at_index(am->input_hash_entry_vec_by_sw_if_index, sw_if_index)
41  : vec_elt_at_index(am->output_hash_entry_vec_by_sw_if_index, sw_if_index);
42 */
43  return applied_hash_aces;
44 }
45 
46 
47 static void
49 {
50  DBG("HASH ADD/DEL: %016llx %016llx %016llx %016llx %016llx %016llx %016llx add %d",
51  kv->key[0], kv->key[1], kv->key[2],
52  kv->key[3], kv->key[4], kv->key[5], kv->value, is_add);
53  BV (clib_bihash_add_del) (&am->acl_lookup_hash, kv, is_add);
54 }
55 
56 /*
57  * TupleMerge
58  *
59  * Initial adaptation by Valerio Bruschi (valerio.bruschi@telecom-paristech.fr)
60  * based on the TupleMerge [1] simulator kindly made available
61  * by James Daly (dalyjamese@gmail.com) and Eric Torng (torng@cse.msu.edu)
62  * ( http://www.cse.msu.edu/~dalyjame/ or http://www.cse.msu.edu/~torng/ ),
63  * refactoring by Andrew Yourtchenko.
64  *
65  * [1] James Daly, Eric Torng "TupleMerge: Building Online Packet Classifiers
66  * by Omitting Bits", In Proc. IEEE ICCCN 2017, pp. 1-10
67  *
68  */
69 
70 static int
72 {
73  int counter = 0;
74  while (word)
75  {
76  counter += word & 1;
77  word >>= 1;
78  }
79  return counter;
80 }
81 
82 /* check if mask2 can be contained by mask1 */
83 static u8
85 {
86  int i;
87  if (is_ip6)
88  {
89  for (i = 0; i < 2; i++)
90  {
91  if ((mask1->ip6_addr[0].as_u64[i] & mask2->ip6_addr[0].as_u64[i]) !=
92  mask1->ip6_addr[0].as_u64[i])
93  return 0;
94  if ((mask1->ip6_addr[1].as_u64[i] & mask2->ip6_addr[1].as_u64[i]) !=
95  mask1->ip6_addr[1].as_u64[i])
96  return 0;
97  }
98  }
99  else
100  {
101  /* check the pads, both masks must have it 0 */
102  u32 padcheck = 0;
103  int i;
104  for (i=0; i<6; i++) {
105  padcheck |= mask1->l3_zero_pad[i];
106  padcheck |= mask2->l3_zero_pad[i];
107  }
108  if (padcheck != 0)
109  return 0;
110  if ((mask1->ip4_addr[0].as_u32 & mask2->ip4_addr[0].as_u32) !=
111  mask1->ip4_addr[0].as_u32)
112  return 0;
113  if ((mask1->ip4_addr[1].as_u32 & mask2->ip4_addr[1].as_u32) !=
114  mask1->ip4_addr[1].as_u32)
115  return 0;
116  }
117 
118  /* take care if port are not exact-match */
119  if ((mask1->l4.as_u64 & mask2->l4.as_u64) != mask1->l4.as_u64)
120  return 0;
121 
122  if ((mask1->pkt.as_u64 & mask2->pkt.as_u64) != mask1->pkt.as_u64)
123  return 0;
124 
125  return 1;
126 }
127 
128 
129 
130 /*
131  * TupleMerge:
132  *
133  * Consider the situation when we have to create a new table
134  * T for a given rule R. This occurs for the first rule inserted and
135  * for later rules if it is incompatible with all existing tables.
136  * In this event, we need to determine mT for a new table.
137  * Setting mT = mR is not a good strategy; if another similar,
138  * but slightly less specific, rule appears we will be unable to
139  * add it to T and will thus have to create another new table. We
140  * thus consider two factors: is the rule more strongly aligned
141  * with source or destination addresses (usually the two most
142  * important fields) and how much slack needs to be given to
143  * allow for other rules. If the source and destination addresses
144  * are close together (within 4 bits for our experiments), we use
145  * both of them. Otherwise, we drop the smaller (less specific)
146  * address and its associated port field from consideration; R is
147  * predominantly aligned with one of the two fields and should
148  * be grouped with other similar rules. This is similar to TSS
149  * dropping port fields, but since it is based on observable rule
150  * characteristics it is more likely to keep important fields and
151  * discard less useful ones.
152  * We then look at the absolute lengths of the addresses. If
153  * the address is long, we are more likely to try to add shorter
154  * lengths and likewise the reverse. We thus remove a few bits
155  * from both address fields with more bits removed from longer
156  * addresses. For 32 bit addresses, we remove 4 bits, 3 for more
157  * than 24, 2 for more than 16, and so on (so 8 and fewer bits
158  * don’t have any removed). We only do this for prefix fields like
159  * addresses; both range fields (like ports) and exact match fields
160  * (like protocol) should remain as they are.
161  */
162 
163 
164 static u32
165 shift_ip4_if(u32 mask, u32 thresh, int numshifts, u32 else_val)
166 {
167  if (mask > thresh)
168  return clib_host_to_net_u32((clib_net_to_host_u32(mask) << numshifts) & 0xFFFFFFFF);
169  else
170  return else_val;
171 }
172 
173 static void
174 relax_ip4_addr(ip4_address_t *ip4_mask, int relax2) {
175  int shifts_per_relax[2][4] = { { 6, 5, 4, 2 }, { 3, 2, 1, 1 } };
176 
177  int *shifts = shifts_per_relax[relax2];
178  if(ip4_mask->as_u32 == 0xffffffff)
179  ip4_mask->as_u32 = clib_host_to_net_u32((clib_net_to_host_u32(ip4_mask->as_u32) << shifts[0])&0xFFFFFFFF);
180  else
181  ip4_mask->as_u32 = shift_ip4_if(ip4_mask->as_u32, 0xffffff00, shifts[1],
182  shift_ip4_if(ip4_mask->as_u32, 0xffff0000, shifts[2],
183  shift_ip4_if(ip4_mask->as_u32, 0xff000000, shifts[3], ip4_mask->as_u32)));
184 }
185 
186 static void
187 relax_ip6_addr(ip6_address_t *ip6_mask, int relax2) {
188  /*
189  * This "better than nothing" relax logic is based on heuristics
190  * from IPv6 knowledge, and may not be optimal.
191  * Some further tuning may be needed in the future.
192  */
193  if (ip6_mask->as_u64[0] == 0xffffffffffffffffULL) {
194  if (ip6_mask->as_u64[1] == 0xffffffffffffffffULL) {
195  /* relax a /128 down to /64 - likely to have more hosts */
196  ip6_mask->as_u64[1] = 0;
197  } else if (ip6_mask->as_u64[1] == 0) {
198  /* relax a /64 down to /56 - likely to have more subnets */
199  ip6_mask->as_u64[0] = clib_host_to_net_u64(0xffffffffffffff00ULL);
200  }
201  }
202 }
203 
204 static void
205 relax_tuple(fa_5tuple_t *mask, int is_ip6, int relax2){
206  fa_5tuple_t save_mask = *mask;
207 
208  int counter_s = 0, counter_d = 0;
209  if (is_ip6) {
210  int i;
211  for(i=0; i<2; i++){
212  counter_s += count_bits(mask->ip6_addr[0].as_u64[i]);
213  counter_d += count_bits(mask->ip6_addr[1].as_u64[i]);
214  }
215  } else {
216  counter_s += count_bits(mask->ip4_addr[0].as_u32);
217  counter_d += count_bits(mask->ip4_addr[1].as_u32);
218  }
219 
220 /*
221  * is the rule more strongly aligned with source or destination addresses
222  * (usually the two most important fields) and how much slack needs to be
223  * given to allow for other rules. If the source and destination addresses
224  * are close together (within 4 bits for our experiments), we use both of them.
225  * Otherwise, we drop the smaller (less specific) address and its associated
226  * port field from consideration
227  */
228  const int deltaThreshold = 4;
229  /* const int deltaThreshold = 8; if IPV6? */
230  int delta = counter_s - counter_d;
231  if (-delta > deltaThreshold) {
232  if (is_ip6)
233  mask->ip6_addr[0].as_u64[1] = mask->ip6_addr[0].as_u64[0] = 0;
234  else
235  mask->ip4_addr[0].as_u32 = 0;
236  mask->l4.port[0] = 0;
237  } else if (delta > deltaThreshold) {
238  if (is_ip6)
239  mask->ip6_addr[1].as_u64[1] = mask->ip6_addr[1].as_u64[0] = 0;
240  else
241  mask->ip4_addr[1].as_u32 = 0;
242  mask->l4.port[1] = 0;
243  }
244 
245  if (is_ip6) {
246  relax_ip6_addr(&mask->ip6_addr[0], relax2);
247  relax_ip6_addr(&mask->ip6_addr[1], relax2);
248  } else {
249  relax_ip4_addr(&mask->ip4_addr[0], relax2);
250  relax_ip4_addr(&mask->ip4_addr[1], relax2);
251  }
252  mask->pkt.is_nonfirst_fragment = 0;
253  mask->pkt.l4_valid = 0;
254  if(!first_mask_contains_second_mask(is_ip6, mask, &save_mask)){
255  DBG( "TM-relaxing-ERROR");
256  *mask = save_mask;
257  }
258  DBG( "TM-relaxing-end");
259 }
260 
261 
262 static u32
263 tm_assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask, int is_ip6, u32 lc_index)
264 {
265  u32 mask_type_index = ~0;
266  u32 for_mask_type_index = ~0;
268  int order_index;
269  /* look for existing mask comparable with the one in input */
270 
271  hash_applied_mask_info_t **hash_applied_mask_info_vec = vec_elt_at_index(am->hash_applied_mask_info_vec_by_lc_index, lc_index);
273 
274  if (vec_len(*hash_applied_mask_info_vec) > 0) {
275  for(order_index = vec_len((*hash_applied_mask_info_vec)) -1; order_index >= 0; order_index--) {
276  minfo = vec_elt_at_index((*hash_applied_mask_info_vec), order_index);
277  for_mask_type_index = minfo->mask_type_index;
278  mte = vec_elt_at_index(am->ace_mask_type_pool, for_mask_type_index);
279  if(first_mask_contains_second_mask(is_ip6, &mte->mask, mask)){
280  mask_type_index = (mte - am->ace_mask_type_pool);
281  break;
282  }
283  }
284  }
285 
286  if(~0 == mask_type_index) {
287  /* if no mask is found, then let's use a relaxed version of the original one, in order to be used by new ace_entries */
288  DBG( "TM-assigning mask type index-new one");
290  mask_type_index = mte - am->ace_mask_type_pool;
291 
292  hash_applied_mask_info_t **hash_applied_mask_info_vec = vec_elt_at_index(am->hash_applied_mask_info_vec_by_lc_index, lc_index);
293 
294  int spot = vec_len((*hash_applied_mask_info_vec));
295  vec_validate((*hash_applied_mask_info_vec), spot);
296  minfo = vec_elt_at_index((*hash_applied_mask_info_vec), spot);
297  minfo->mask_type_index = mask_type_index;
298  minfo->num_entries = 0;
299  minfo->max_collisions = 0;
300  minfo->first_rule_index = ~0;
301 
302  clib_memcpy(&mte->mask, mask, sizeof(mte->mask));
303  relax_tuple(&mte->mask, is_ip6, 0);
304 
305  mte->refcount = 0;
306  /*
307  * We can use only 16 bits, since in the match there is only u16 field.
308  * Realistically, once you go to 64K of mask types, it is a huge
309  * problem anyway, so we might as well stop half way.
310  */
311  ASSERT(mask_type_index < 32768);
312  }
313  mte = am->ace_mask_type_pool + mask_type_index;
314  mte->refcount++;
315  return mask_type_index;
316 }
317 
318 
319 static void
321  applied_hash_ace_entry_t **applied_hash_aces,
322  u32 lc_index,
323  u32 new_index, clib_bihash_kv_48_8_t *kv)
324 {
325  fa_5tuple_t *kv_key = (fa_5tuple_t *)kv->key;
327  applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), new_index);
329 
330  /* apply the mask to ace key */
333 
334  u64 *pmatch = (u64 *) &ace_info->match;
335  u64 *pmask = (u64 *)&mte->mask;
336  u64 *pkey = (u64 *)kv->key;
337 
338  *pkey++ = *pmatch++ & *pmask++;
339  *pkey++ = *pmatch++ & *pmask++;
340  *pkey++ = *pmatch++ & *pmask++;
341  *pkey++ = *pmatch++ & *pmask++;
342  *pkey++ = *pmatch++ & *pmask++;
343  *pkey++ = *pmatch++ & *pmask++;
344 
345  kv_key->pkt.mask_type_index_lsb = pae->mask_type_index;
346  kv_key->pkt.lc_index = lc_index;
347  kv_val->as_u64 = 0;
348  kv_val->applied_entry_index = new_index;
349 }
350 
351 static void
353  u32 lc_index,
354  applied_hash_ace_entry_t **applied_hash_aces,
355  u32 index, int is_add)
356 {
358 
359  fill_applied_hash_ace_kv(am, applied_hash_aces, lc_index, index, &kv);
360  hashtable_add_del(am, &kv, is_add);
361 }
362 
363 
364 static u32
366 {
368  /* *INDENT-OFF* */
370  ({
371  if(memcmp(&mte->mask, mask, sizeof(*mask)) == 0)
372  return (mte - am->ace_mask_type_pool);
373  }));
374  /* *INDENT-ON* */
375  return ~0;
376 }
377 
378 static u32
380 {
381  u32 mask_type_index = find_mask_type_index(am, mask);
383  if(~0 == mask_type_index) {
385  mask_type_index = mte - am->ace_mask_type_pool;
386  clib_memcpy(&mte->mask, mask, sizeof(mte->mask));
387  mte->refcount = 0;
388  /*
389  * We can use only 16 bits, since in the match there is only u16 field.
390  * Realistically, once you go to 64K of mask types, it is a huge
391  * problem anyway, so we might as well stop half way.
392  */
393  ASSERT(mask_type_index < 32768);
394  }
395  mte = am->ace_mask_type_pool + mask_type_index;
396  mte->refcount++;
397  return mask_type_index;
398 }
399 
400 static void
402 {
403  ace_mask_type_entry_t *mte = pool_elt_at_index(am->ace_mask_type_pool, mask_type_index);
404  mte->refcount--;
405  if (mte->refcount == 0) {
406  /* we are not using this entry anymore */
407  pool_put(am->ace_mask_type_pool, mte);
408  }
409 }
410 
411 static void
414  applied_hash_aces, u32 lc_index)
415 {
416  hash_applied_mask_info_t *new_hash_applied_mask_info_vec =
418 
420  int i;
421  for (i = 0; i < vec_len ((*applied_hash_aces)); i++)
422  {
424  vec_elt_at_index ((*applied_hash_aces), i);
425 
426  /* check if mask_type_index is already there */
427  u32 new_pointer = vec_len (new_hash_applied_mask_info_vec);
428  int search;
429  for (search = 0; search < vec_len (new_hash_applied_mask_info_vec);
430  search++)
431  {
432  minfo = vec_elt_at_index (new_hash_applied_mask_info_vec, search);
433  if (minfo->mask_type_index == pae->mask_type_index)
434  break;
435  }
436 
437  vec_validate ((new_hash_applied_mask_info_vec), search);
438  minfo = vec_elt_at_index ((new_hash_applied_mask_info_vec), search);
439  if (search == new_pointer)
440  {
441  minfo->mask_type_index = pae->mask_type_index;
442  minfo->num_entries = 0;
443  minfo->max_collisions = 0;
444  minfo->first_rule_index = ~0;
445  }
446 
447  minfo->num_entries = minfo->num_entries + 1;
448 
449  if (vec_len (pae->colliding_rules) > minfo->max_collisions)
450  minfo->max_collisions = vec_len (pae->colliding_rules);
451 
452  if (minfo->first_rule_index > i)
453  minfo->first_rule_index = i;
454  }
455 
456  hash_applied_mask_info_t **hash_applied_mask_info_vec =
458 
459  vec_free ((*hash_applied_mask_info_vec));
460  (*hash_applied_mask_info_vec) = new_hash_applied_mask_info_vec;
461 }
462 
463 static void
465  u32 applied_entry_index)
466 {
467  u32 i;
468  for (i = 0; i < vec_len ((*pvec)); i++)
469  {
470  collision_match_rule_t *cr = vec_elt_at_index ((*pvec), i);
471  if (cr->applied_entry_index == applied_entry_index)
472  {
473  vec_del1 ((*pvec), i);
474  }
475  }
476 }
477 
478 static void
480  u32 head_index, u32 applied_entry_index)
481 {
482  applied_hash_ace_entry_t *head_pae =
483  vec_elt_at_index ((*applied_hash_aces), head_index);
484  vec_del_collision_rule (&head_pae->colliding_rules, applied_entry_index);
485 }
486 
487 static void
489  applied_hash_ace_entry_t ** applied_hash_aces,
490  u32 head_index, u32 applied_entry_index)
491 {
492  applied_hash_ace_entry_t *head_pae =
493  vec_elt_at_index ((*applied_hash_aces), head_index);
495  vec_elt_at_index ((*applied_hash_aces), applied_entry_index);
496 
498 
499  cr.acl_index = pae->acl_index;
500  cr.ace_index = pae->ace_index;
501  cr.acl_position = pae->acl_position;
502  cr.applied_entry_index = applied_entry_index;
503  cr.rule = am->acls[pae->acl_index].rules[pae->ace_index];
504  vec_add1 (head_pae->colliding_rules, cr);
505 }
506 
507 static u32
509  u32 lc_index,
510  applied_hash_ace_entry_t **applied_hash_aces,
511  u32 new_index)
512 {
514  ASSERT(new_index != ~0);
515  applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), new_index);
516  DBG("activate_applied_ace_hash_entry lc_index %d new_index %d", lc_index, new_index);
517 
518  fill_applied_hash_ace_kv(am, applied_hash_aces, lc_index, new_index, &kv);
519 
520  DBG("APPLY ADD KY: %016llx %016llx %016llx %016llx %016llx %016llx",
521  kv.key[0], kv.key[1], kv.key[2],
522  kv.key[3], kv.key[4], kv.key[5]);
523 
524  clib_bihash_kv_48_8_t result;
525  hash_acl_lookup_value_t *result_val = (hash_acl_lookup_value_t *)&result.value;
526  int res = BV (clib_bihash_search) (&am->acl_lookup_hash, &kv, &result);
527  ASSERT(new_index != ~0);
528  ASSERT(new_index < vec_len((*applied_hash_aces)));
529  if (res == 0) {
530  /* There already exists an entry or more. Append at the end. */
531  u32 first_index = result_val->applied_entry_index;
532  ASSERT(first_index != ~0);
533  DBG("A key already exists, with applied entry index: %d", first_index);
534  applied_hash_ace_entry_t *first_pae = vec_elt_at_index((*applied_hash_aces), first_index);
535  u32 last_index = first_pae->tail_applied_entry_index;
536  ASSERT(last_index != ~0);
537  applied_hash_ace_entry_t *last_pae = vec_elt_at_index((*applied_hash_aces), last_index);
538  DBG("...advance to chained entry index: %d", last_index);
539  /* link ourseves in */
540  last_pae->next_applied_entry_index = new_index;
541  pae->prev_applied_entry_index = last_index;
542  /* adjust the pointer to the new tail */
543  first_pae->tail_applied_entry_index = new_index;
544  add_colliding_rule(am, applied_hash_aces, first_index, new_index);
545  return first_index;
546  } else {
547  /* It's the very first entry */
548  hashtable_add_del(am, &kv, 1);
549  ASSERT(new_index != ~0);
550  pae->tail_applied_entry_index = new_index;
551  add_colliding_rule(am, applied_hash_aces, new_index, new_index);
552  return new_index;
553  }
554 }
555 
556 
557 static void *
559 {
560  if (0 == am->hash_lookup_mheap) {
561  am->hash_lookup_mheap = mheap_alloc (0 /* use VM */ , am->hash_lookup_mheap_size);
562  if (0 == am->hash_lookup_mheap) {
563  clib_error("ACL plugin failed to allocate hash lookup heap of %U bytes, abort", format_memory_size, am->hash_lookup_mheap_size);
564  }
567  }
568  void *oldheap = clib_mem_set_heap(am->hash_lookup_mheap);
569  return oldheap;
570 }
571 
572 void
574 {
575  acl_main_t *am = &acl_main;
578  if (on) {
581  mheap_validate(h);
582  } else {
583  h->flags &= ~MHEAP_FLAG_VALIDATE;
585  }
586 }
587 
588 void
590 {
591  acl_main_t *am = &acl_main;
594  if (on) {
595  h->flags |= MHEAP_FLAG_TRACE;
596  } else {
597  h->flags &= ~MHEAP_FLAG_TRACE;
598  }
599 }
600 
601 static void
603 {
606 
608  fa_5tuple_t *mask;
609  /*
610  * Start taking base_mask associated to ace, and essentially copy it.
611  * With TupleMerge we will assign a relaxed mask here.
612  */
614  mask = &mte->mask;
615  if (am->use_tuple_merge)
616  pae->mask_type_index = tm_assign_mask_type_index(am, mask, is_ip6, lc_index);
617  else
618  pae->mask_type_index = assign_mask_type_index(am, mask);
619 }
620 
621 static void
622 split_partition(acl_main_t *am, u32 first_index,
623  u32 lc_index, int is_ip6);
624 
625 
626 static void
627 check_collision_count_and_maybe_split(acl_main_t *am, u32 lc_index, int is_ip6, u32 first_index)
628 {
629  applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
630  applied_hash_ace_entry_t *first_pae = vec_elt_at_index((*applied_hash_aces), first_index);
631  if (vec_len(first_pae->colliding_rules) > am->tuple_merge_split_threshold) {
632  split_partition(am, first_index, lc_index, is_ip6);
633  }
634 }
635 
636 void
637 hash_acl_apply(acl_main_t *am, u32 lc_index, int acl_index, u32 acl_position)
638 {
639  int i;
640 
641  DBG0("HASH ACL apply: lc_index %d acl %d", lc_index, acl_index);
642  if (!am->acl_lookup_hash_initialized) {
643  BV (clib_bihash_init) (&am->acl_lookup_hash, "ACL plugin rule lookup bihash",
646  }
647 
648  void *oldheap = hash_acl_set_heap(am);
650  vec_validate(am->hash_acl_infos, acl_index);
651  applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
652 
653  hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
654  u32 **hash_acl_applied_lc_index = &ha->lc_index_list;
655 
656  int base_offset = vec_len(*applied_hash_aces);
657 
658  /* Update the bitmap of the mask types with which the lookup
659  needs to happen for the ACLs applied to this lc_index */
661  vec_validate((*applied_hash_acls), lc_index);
662  applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), lc_index);
663 
664  /* ensure the list of applied hash acls is initialized and add this acl# to it */
665  u32 index = vec_search(pal->applied_acls, acl_index);
666  if (index != ~0) {
667  clib_warning("BUG: trying to apply twice acl_index %d on lc_index %d, according to lc",
668  acl_index, lc_index);
669  goto done;
670  }
671  vec_add1(pal->applied_acls, acl_index);
672  u32 index2 = vec_search((*hash_acl_applied_lc_index), lc_index);
673  if (index2 != ~0) {
674  clib_warning("BUG: trying to apply twice acl_index %d on lc_index %d, according to hash h-acl info",
675  acl_index, lc_index);
676  goto done;
677  }
678  vec_add1((*hash_acl_applied_lc_index), lc_index);
679 
680  /*
681  * if the applied ACL is empty, the current code will cause a
682  * different behavior compared to current linear search: an empty ACL will
683  * simply fallthrough to the next ACL, or the default deny in the end.
684  *
685  * This is not a problem, because after vpp-dev discussion,
686  * the consensus was it should not be possible to apply the non-existent
687  * ACL, so the change adding this code also takes care of that.
688  */
689 
690  /* expand the applied aces vector by the necessary amount */
691  vec_resize((*applied_hash_aces), vec_len(ha->rules));
692 
694  /* add the rules from the ACL to the hash table for lookup and append to the vector*/
695  for(i=0; i < vec_len(ha->rules); i++) {
696  int is_ip6 = ha->rules[i].match.pkt.is_ip6;
697  u32 new_index = base_offset + i;
698  applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), new_index);
699  pae->acl_index = acl_index;
700  pae->ace_index = ha->rules[i].ace_index;
701  pae->acl_position = acl_position;
702  pae->action = ha->rules[i].action;
703  pae->hitcount = 0;
704  pae->hash_ace_info_index = i;
705  /* we might link it in later */
706  pae->next_applied_entry_index = ~0;
707  pae->prev_applied_entry_index = ~0;
708  pae->tail_applied_entry_index = ~0;
709  pae->colliding_rules = NULL;
710  pae->mask_type_index = ~0;
711  assign_mask_type_index_to_pae(am, lc_index, is_ip6, pae);
712  u32 first_index = activate_applied_ace_hash_entry(am, lc_index, applied_hash_aces, new_index);
713  if (am->use_tuple_merge)
714  check_collision_count_and_maybe_split(am, lc_index, is_ip6, first_index);
715  }
716  remake_hash_applied_mask_info_vec(am, applied_hash_aces, lc_index);
717 done:
718  clib_mem_set_heap (oldheap);
719 }
720 
721 static u32
723 {
724  /*
725  * find back the first entry. Inefficient so might need to be a bit cleverer
726  * if this proves to be a problem..
727  */
728  u32 an_index = curr_index;
729  ASSERT(an_index != ~0);
730  applied_hash_ace_entry_t *head_pae = vec_elt_at_index((*applied_hash_aces), an_index);
731  while(head_pae->prev_applied_entry_index != ~0) {
732  an_index = head_pae->prev_applied_entry_index;
733  ASSERT(an_index != ~0);
734  head_pae = vec_elt_at_index((*applied_hash_aces), an_index);
735  }
736  return an_index;
737 }
738 
739 static void
741  u32 lc_index,
742  applied_hash_ace_entry_t **applied_hash_aces,
743  u32 old_index, u32 new_index)
744 {
745  ASSERT(old_index != ~0);
746  ASSERT(new_index != ~0);
747  /* move the entry */
748  *vec_elt_at_index((*applied_hash_aces), new_index) = *vec_elt_at_index((*applied_hash_aces), old_index);
749 
750  /* update the linkage and hash table if necessary */
751  applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), old_index);
752 
753  if (pae->prev_applied_entry_index != ~0) {
754  applied_hash_ace_entry_t *prev_pae = vec_elt_at_index((*applied_hash_aces), pae->prev_applied_entry_index);
755  ASSERT(prev_pae->next_applied_entry_index == old_index);
756  prev_pae->next_applied_entry_index = new_index;
757  } else {
758  /* first entry - so the hash points to it, update */
759  add_del_hashtable_entry(am, lc_index,
760  applied_hash_aces, new_index, 1);
761  ASSERT(pae->tail_applied_entry_index != ~0);
762  }
763  if (pae->next_applied_entry_index != ~0) {
764  applied_hash_ace_entry_t *next_pae = vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index);
765  ASSERT(next_pae->prev_applied_entry_index == old_index);
766  next_pae->prev_applied_entry_index = new_index;
767  } else {
768  /*
769  * Moving the very last entry, so we need to update the tail pointer in the first one.
770  */
771  u32 head_index = find_head_applied_ace_index(applied_hash_aces, old_index);
772  ASSERT(head_index != ~0);
773  applied_hash_ace_entry_t *head_pae = vec_elt_at_index((*applied_hash_aces), head_index);
774 
775  ASSERT(head_pae->tail_applied_entry_index == old_index);
776  head_pae->tail_applied_entry_index = new_index;
777  }
778  /* invalidate the old entry */
779  pae->prev_applied_entry_index = ~0;
780  pae->next_applied_entry_index = ~0;
781  pae->tail_applied_entry_index = ~0;
782 }
783 
784 static void
786  u32 lc_index,
787  applied_hash_ace_entry_t **applied_hash_aces,
788  u32 old_index)
789 {
790  applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), old_index);
791  DBG("UNAPPLY DEACTIVATE: lc_index %d applied index %d", lc_index, old_index);
792 
793  if (pae->prev_applied_entry_index != ~0) {
794  DBG("UNAPPLY = index %d has prev_applied_entry_index %d", old_index, pae->prev_applied_entry_index);
795  applied_hash_ace_entry_t *prev_pae = vec_elt_at_index((*applied_hash_aces), pae->prev_applied_entry_index);
796  ASSERT(prev_pae->next_applied_entry_index == old_index);
798 
799  u32 head_index = find_head_applied_ace_index(applied_hash_aces, old_index);
800  ASSERT(head_index != ~0);
801  applied_hash_ace_entry_t *head_pae = vec_elt_at_index((*applied_hash_aces), head_index);
802  del_colliding_rule(applied_hash_aces, head_index, old_index);
803 
804  if (pae->next_applied_entry_index == ~0) {
805  /* it was a last entry we removed, update the pointer on the first one */
806  ASSERT(head_pae->tail_applied_entry_index == old_index);
808  } else {
809  applied_hash_ace_entry_t *next_pae = vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index);
811  }
812  } else {
813  /* It was the first entry. We need either to reset the hash entry or delete it */
814  if (pae->next_applied_entry_index != ~0) {
815  /* the next element becomes the new first one, so needs the tail pointer to be set */
816  applied_hash_ace_entry_t *next_pae = vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index);
817  ASSERT(pae->tail_applied_entry_index != ~0);
819  /* Remove ourselves and transfer the ownership of the colliding rules vector */
820  del_colliding_rule(applied_hash_aces, old_index, old_index);
821  next_pae->colliding_rules = pae->colliding_rules;
822  /* unlink from the next element */
823  next_pae->prev_applied_entry_index = ~0;
824  add_del_hashtable_entry(am, lc_index,
825  applied_hash_aces, pae->next_applied_entry_index, 1);
826  } else {
827  /* no next entry, so just delete the entry in the hash table */
828  add_del_hashtable_entry(am, lc_index,
829  applied_hash_aces, old_index, 0);
830  }
831  }
832 
834  /* invalidate the old entry */
835  pae->mask_type_index = ~0;
836  pae->prev_applied_entry_index = ~0;
837  pae->next_applied_entry_index = ~0;
838  pae->tail_applied_entry_index = ~0;
839  /* always has to be 0 */
840  pae->colliding_rules = NULL;
841 }
842 
843 
844 void
845 hash_acl_unapply(acl_main_t *am, u32 lc_index, int acl_index)
846 {
847  int i;
848 
849  DBG0("HASH ACL unapply: lc_index %d acl %d", lc_index, acl_index);
851  applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), lc_index);
852 
853  hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
854  u32 **hash_acl_applied_lc_index = &ha->lc_index_list;
855 
856  /* remove this acl# from the list of applied hash acls */
857  u32 index = vec_search(pal->applied_acls, acl_index);
858  if (index == ~0) {
859  clib_warning("BUG: trying to unapply unapplied acl_index %d on lc_index %d, according to lc",
860  acl_index, lc_index);
861  return;
862  }
863  vec_del1(pal->applied_acls, index);
864 
865  u32 index2 = vec_search((*hash_acl_applied_lc_index), lc_index);
866  if (index2 == ~0) {
867  clib_warning("BUG: trying to unapply twice acl_index %d on lc_index %d, according to h-acl info",
868  acl_index, lc_index);
869  return;
870  }
871  vec_del1((*hash_acl_applied_lc_index), index2);
872 
873  applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
874 
875  for(i=0; i < vec_len((*applied_hash_aces)); i++) {
876  if (vec_elt_at_index(*applied_hash_aces,i)->acl_index == acl_index) {
877  DBG("Found applied ACL#%d at applied index %d", acl_index, i);
878  break;
879  }
880  }
881  if (vec_len((*applied_hash_aces)) <= i) {
882  DBG("Did not find applied ACL#%d at lc_index %d", acl_index, lc_index);
883  /* we went all the way without finding any entries. Probably a list was empty. */
884  return;
885  }
886 
887  void *oldheap = hash_acl_set_heap(am);
888  int base_offset = i;
889  int tail_offset = base_offset + vec_len(ha->rules);
890  int tail_len = vec_len((*applied_hash_aces)) - tail_offset;
891  DBG("base_offset: %d, tail_offset: %d, tail_len: %d", base_offset, tail_offset, tail_len);
892 
893  for(i=0; i < vec_len(ha->rules); i ++) {
895  applied_hash_aces, base_offset + i);
896  }
897  for(i=0; i < tail_len; i ++) {
898  /* move the entry at tail offset to base offset */
899  /* that is, from (tail_offset+i) -> (base_offset+i) */
900  DBG("UNAPPLY MOVE: lc_index %d, applied index %d -> %d", lc_index, tail_offset+i, base_offset + i);
901  move_applied_ace_hash_entry(am, lc_index, applied_hash_aces, tail_offset + i, base_offset + i);
902  }
903  /* trim the end of the vector */
904  _vec_len((*applied_hash_aces)) -= vec_len(ha->rules);
905 
906  remake_hash_applied_mask_info_vec(am, applied_hash_aces, lc_index);
907 
908  clib_mem_set_heap (oldheap);
909 }
910 
911 /*
912  * Create the applied ACEs and update the hash table,
913  * taking into account that the ACL may not be the last
914  * in the vector of applied ACLs.
915  *
916  * For now, walk from the end of the vector and unapply the ACLs,
917  * then apply the one in question and reapply the rest.
918  */
919 
920 void
921 hash_acl_reapply(acl_main_t *am, u32 lc_index, int acl_index)
922 {
924  u32 **applied_acls = &acontext->acl_indices;
925  int i;
926  int start_index = vec_search((*applied_acls), acl_index);
927 
928  DBG0("Start index for acl %d in lc_index %d is %d", acl_index, lc_index, start_index);
929  /*
930  * This function is called after we find out the lc_index where ACL is applied.
931  * If the by-lc_index vector does not have the ACL#, then it's a bug.
932  */
933  ASSERT(start_index < vec_len(*applied_acls));
934 
935  /* unapply all the ACLs at the tail side, up to the current one */
936  for(i = vec_len(*applied_acls) - 1; i > start_index; i--) {
937  hash_acl_unapply(am, lc_index, *vec_elt_at_index(*applied_acls, i));
938  }
939  for(i = start_index; i < vec_len(*applied_acls); i++) {
940  hash_acl_apply(am, lc_index, *vec_elt_at_index(*applied_acls, i), i);
941  }
942 }
943 
944 static void
946 {
947  ip6_address_mask_from_width(addr, prefix_len);
948 }
949 
950 
951 /* Maybe should be moved into the core somewhere */
952 always_inline void
954 {
955  int i, byte, bit, bitnum;
956  ASSERT (width <= 32);
957  memset (a, 0, sizeof (a[0]));
958  for (i = 0; i < width; i++)
959  {
960  bitnum = (7 - (i & 7));
961  byte = i / 8;
962  bit = 1 << bitnum;
963  a->as_u8[byte] |= bit;
964  }
965 }
966 
967 
968 static void
970 {
971  ip4_address_mask_from_width(addr, prefix_len);
972 }
973 
974 static void
975 make_port_mask(u16 *portmask, u16 port_first, u16 port_last)
976 {
977  if (port_first == port_last) {
978  *portmask = 0xffff;
979  /* single port is representable by masked value */
980  return;
981  }
982 
983  *portmask = 0;
984  return;
985 }
986 
987 static void
989 {
990  memset(mask, 0, sizeof(*mask));
991  memset(&hi->match, 0, sizeof(hi->match));
992  hi->action = r->is_permit;
993 
994  /* we will need to be matching based on lc_index and mask_type_index when applied */
995  mask->pkt.lc_index = ~0;
996  /* we will assign the match of mask_type_index later when we find it*/
997  mask->pkt.mask_type_index_lsb = ~0;
998 
999  mask->pkt.is_ip6 = 1;
1000  hi->match.pkt.is_ip6 = r->is_ipv6;
1001  if (r->is_ipv6) {
1003  hi->match.ip6_addr[0] = r->src.ip6;
1005  hi->match.ip6_addr[1] = r->dst.ip6;
1006  } else {
1007  memset(hi->match.l3_zero_pad, 0, sizeof(hi->match.l3_zero_pad));
1009  hi->match.ip4_addr[0] = r->src.ip4;
1011  hi->match.ip4_addr[1] = r->dst.ip4;
1012  }
1013 
1014  if (r->proto != 0) {
1015  mask->l4.proto = ~0; /* L4 proto needs to be matched */
1016  hi->match.l4.proto = r->proto;
1017 
1018  /* Calculate the src/dst port masks and make the src/dst port matches accordingly */
1020  hi->match.l4.port[0] = r->src_port_or_type_first & mask->l4.port[0];
1021 
1023  hi->match.l4.port[1] = r->dst_port_or_code_first & mask->l4.port[1];
1024  /* L4 info must be valid in order to match */
1025  mask->pkt.l4_valid = 1;
1026  hi->match.pkt.l4_valid = 1;
1027  /* And we must set the mask to check that it is an initial fragment */
1028  mask->pkt.is_nonfirst_fragment = 1;
1029  hi->match.pkt.is_nonfirst_fragment = 0;
1030  if ((r->proto == IPPROTO_TCP) && (r->tcp_flags_mask != 0)) {
1031  /* if we want to match on TCP flags, they must be masked off as well */
1032  mask->pkt.tcp_flags = r->tcp_flags_mask;
1034  /* and the flags need to be present within the packet being matched */
1035  mask->pkt.tcp_flags_valid = 1;
1036  hi->match.pkt.tcp_flags_valid = 1;
1037  }
1038  }
1039  /* Sanitize the mask and the match */
1040  u64 *pmask = (u64 *)mask;
1041  u64 *pmatch = (u64 *)&hi->match;
1042  int j;
1043  for(j=0; j<6; j++) {
1044  pmatch[j] = pmatch[j] & pmask[j];
1045  }
1046 }
1047 
1048 
1049 int hash_acl_exists(acl_main_t *am, int acl_index)
1050 {
1051  if (acl_index >= vec_len(am->hash_acl_infos))
1052  return 0;
1053 
1054  hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
1055  return ha->hash_acl_exists;
1056 }
1057 
1058 void hash_acl_add(acl_main_t *am, int acl_index)
1059 {
1060  void *oldheap = hash_acl_set_heap(am);
1061  DBG("HASH ACL add : %d", acl_index);
1062  int i;
1063  acl_list_t *a = &am->acls[acl_index];
1064  vec_validate(am->hash_acl_infos, acl_index);
1065  hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
1066  memset(ha, 0, sizeof(*ha));
1067  ha->hash_acl_exists = 1;
1068 
1069  /* walk the newly added ACL entries and ensure that for each of them there
1070  is a mask type, increment a reference count for that mask type */
1071  for(i=0; i < a->count; i++) {
1072  hash_ace_info_t ace_info;
1073  fa_5tuple_t mask;
1074  memset(&ace_info, 0, sizeof(ace_info));
1075  ace_info.acl_index = acl_index;
1076  ace_info.ace_index = i;
1077 
1078  make_mask_and_match_from_rule(&mask, &a->rules[i], &ace_info);
1079  mask.pkt.flags_reserved = 0b000;
1080  ace_info.base_mask_type_index = assign_mask_type_index(am, &mask);
1081  /* assign the mask type index for matching itself */
1082  ace_info.match.pkt.mask_type_index_lsb = ace_info.base_mask_type_index;
1083  DBG("ACE: %d mask_type_index: %d", i, ace_info.base_mask_type_index);
1084  vec_add1(ha->rules, ace_info);
1085  }
1086  /*
1087  * if an ACL is applied somewhere, fill the corresponding lookup data structures.
1088  * We need to take care if the ACL is not the last one in the vector of ACLs applied to the interface.
1089  */
1090  if (acl_index < vec_len(am->lc_index_vec_by_acl)) {
1091  u32 *lc_index;
1092  vec_foreach(lc_index, am->lc_index_vec_by_acl[acl_index]) {
1093  hash_acl_reapply(am, *lc_index, acl_index);
1094  }
1095  }
1096  clib_mem_set_heap (oldheap);
1097 }
1098 
1099 void hash_acl_delete(acl_main_t *am, int acl_index)
1100 {
1101  void *oldheap = hash_acl_set_heap(am);
1102  DBG0("HASH ACL delete : %d", acl_index);
1103  /*
1104  * If the ACL is applied somewhere, remove the references of it (call hash_acl_unapply)
1105  * this is a different behavior from the linear lookup where an empty ACL is "deny all",
1106  *
1107  * However, following vpp-dev discussion the ACL that is referenced elsewhere
1108  * should not be possible to delete, and the change adding this also adds
1109  * the safeguards to that respect, so this is not a problem.
1110  *
1111  * The part to rememeber is that this routine is called in process of reapplication
1112  * during the acl_add_replace() API call - the old acl ruleset is deleted, then
1113  * the new one is added, without the change in the applied ACLs - so this case
1114  * has to be handled.
1115  */
1116  hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
1117  u32 *lc_list_copy = 0;
1118  {
1119  u32 *lc_index;
1120  lc_list_copy = vec_dup(ha->lc_index_list);
1121  vec_foreach(lc_index, lc_list_copy) {
1122  hash_acl_unapply(am, *lc_index, acl_index);
1123  }
1124  vec_free(lc_list_copy);
1125  }
1126 
1127  /* walk the mask types for the ACL about-to-be-deleted, and decrease
1128  * the reference count, possibly freeing up some of them */
1129  int i;
1130  for(i=0; i < vec_len(ha->rules); i++) {
1132  }
1133  ha->hash_acl_exists = 0;
1134  vec_free(ha->rules);
1135  clib_mem_set_heap (oldheap);
1136 }
1137 
1138 
1139 void
1141 {
1142  vlib_cli_output(vm, "\nACL lookup hash table:\n%U\n",
1143  BV (format_bihash), &am->acl_lookup_hash, verbose);
1144 }
1145 
1146 void
1148 {
1149  acl_main_t *am = &acl_main;
1150  vlib_main_t *vm = am->vlib_main;
1151  ace_mask_type_entry_t *mte;
1152 
1153  vlib_cli_output (vm, "Mask-type entries:");
1154  /* *INDENT-OFF* */
1156  ({
1157  vlib_cli_output(vm, " %3d: %016llx %016llx %016llx %016llx %016llx %016llx refcount %d",
1158  mte - am->ace_mask_type_pool,
1159  mte->mask.kv_40_8.key[0], mte->mask.kv_40_8.key[1], mte->mask.kv_40_8.key[2],
1160  mte->mask.kv_40_8.key[3], mte->mask.kv_40_8.key[4], mte->mask.kv_40_8.value, mte->refcount);
1161  }));
1162  /* *INDENT-ON* */
1163 }
1164 
1165 void
1167 {
1168  acl_main_t *am = &acl_main;
1169  vlib_main_t *vm = am->vlib_main;
1170  u32 i, j;
1171  u64 *m;
1172  vlib_cli_output (vm, "Mask-ready ACL representations\n");
1173  for (i = 0; i < vec_len (am->hash_acl_infos); i++)
1174  {
1175  if ((acl_index != ~0) && (acl_index != i))
1176  {
1177  continue;
1178  }
1179  hash_acl_info_t *ha = &am->hash_acl_infos[i];
1180  vlib_cli_output (vm, "acl-index %u bitmask-ready layout\n", i);
1181  vlib_cli_output (vm, " applied lc_index list: %U\n",
1182  format_vec32, ha->lc_index_list, "%d");
1183  for (j = 0; j < vec_len (ha->rules); j++)
1184  {
1185  hash_ace_info_t *pa = &ha->rules[j];
1186  m = (u64 *) & pa->match;
1187  vlib_cli_output (vm,
1188  " %4d: %016llx %016llx %016llx %016llx %016llx %016llx base mask index %d acl %d rule %d action %d\n",
1189  j, m[0], m[1], m[2], m[3], m[4], m[5],
1191  pa->action);
1192  }
1193  }
1194 }
1195 
1196 static void
1198  vlib_cli_output(vm,
1199  " %4d: acl %d ace %d acl pos %d pae index: %d",
1200  j, cr->acl_index, cr->ace_index, cr->acl_position, cr->applied_entry_index);
1201 }
1202 
1203 static void
1205 {
1206  vlib_cli_output (vm,
1207  " %4d: acl %d rule %d action %d bitmask-ready rule %d colliding_rules: %d next %d prev %d tail %d hitcount %lld acl_pos: %d",
1208  j, pae->acl_index, pae->ace_index, pae->action,
1212  int jj;
1213  for(jj=0; jj<vec_len(pae->colliding_rules); jj++)
1215 }
1216 
1217 static void
1219 {
1220  vlib_cli_output (vm,
1221  " %4d: mask type index %d first rule index %d num_entries %d max_collisions %d",
1223 }
1224 
1225 void
1227 {
1228  acl_main_t *am = &acl_main;
1229  vlib_main_t *vm = am->vlib_main;
1230  u32 lci, j;
1231  vlib_cli_output (vm, "Applied lookup entries for lookup contexts");
1232 
1233  for (lci = 0;
1234  (lci < vec_len(am->applied_hash_acl_info_by_lc_index)); lci++)
1235  {
1236  if ((lc_index != ~0) && (lc_index != lci))
1237  {
1238  continue;
1239  }
1240  vlib_cli_output (vm, "lc_index %d:", lci);
1242  {
1245  vlib_cli_output (vm, " applied acls: %U", format_vec32,
1246  pal->applied_acls, "%d");
1247  }
1249  {
1250  vlib_cli_output (vm, " applied mask info entries:");
1251  for (j = 0;
1253  j++)
1254  {
1257  [lci][j]);
1258  }
1259  }
1260  if (lci < vec_len (am->hash_entry_vec_by_lc_index))
1261  {
1262  vlib_cli_output (vm, " lookup applied entries:");
1263  for (j = 0;
1264  j < vec_len (am->hash_entry_vec_by_lc_index[lci]);
1265  j++)
1266  {
1267  acl_plugin_print_pae (vm, j,
1269  [lci][j]);
1270  }
1271  }
1272  }
1273 }
1274 
1275 void
1276 acl_plugin_show_tables_bihash (u32 show_bihash_verbose)
1277 {
1278  acl_main_t *am = &acl_main;
1279  vlib_main_t *vm = am->vlib_main;
1280  show_hash_acl_hash (vm, am, show_bihash_verbose);
1281 }
1282 
1283 /*
1284  * Split of the partition needs to happen when the collision count
1285  * goes over a specified threshold.
1286  *
1287  * This is a signal that we ignored too many bits in
1288  * mT and we need to split the table into two tables. We select
1289  * all of the colliding rules L and find their maximum common
1290  * tuple mL. Normally mL is specific enough to hash L with few
1291  * or no collisions. We then create a new table T2 with tuple mL
1292  * and transfer all compatible rules from T to T2. If mL is not
1293  * specific enough, we find the field with the biggest difference
1294  * between the minimum and maximum tuple lengths for all of
1295  * the rules in L and set that field to be the average of those two
1296  * values. We then transfer all compatible rules as before. This
1297  * guarantees that some rules from L will move and that T2 will
1298  * have a smaller number of collisions than T did.
1299  */
1300 
1301 
1302 static void
1304 {
1305  int update =
1306  (clib_net_to_host_u64 (mask_addr->as_u64[0]) <
1307  clib_net_to_host_u64 (min_addr->as_u64[0]))
1308  ||
1309  ((clib_net_to_host_u64 (mask_addr->as_u64[0]) ==
1310  clib_net_to_host_u64 (min_addr->as_u64[0]))
1311  && (clib_net_to_host_u64 (mask_addr->as_u64[1]) <
1312  clib_net_to_host_u64 (min_addr->as_u64[1])));
1313  if (update)
1314  {
1315  min_addr->as_u64[0] = mask_addr->as_u64[0];
1316  min_addr->as_u64[1] = mask_addr->as_u64[1];
1317  }
1318 }
1319 
1320 static void
1322 {
1323  int update =
1324  (clib_net_to_host_u64 (mask_addr->as_u64[0]) >
1325  clib_net_to_host_u64 (max_addr->as_u64[0]))
1326  ||
1327  ((clib_net_to_host_u64 (mask_addr->as_u64[0]) ==
1328  clib_net_to_host_u64 (max_addr->as_u64[0]))
1329  && (clib_net_to_host_u64 (mask_addr->as_u64[1]) >
1330  clib_net_to_host_u64 (max_addr->as_u64[1])));
1331  if (update)
1332  {
1333  max_addr->as_u64[0] = mask_addr->as_u64[0];
1334  max_addr->as_u64[1] = mask_addr->as_u64[1];
1335  }
1336 }
1337 
1338 static void
1340 {
1341  int update =
1342  (clib_net_to_host_u32 (mask_addr->as_u32) <
1343  clib_net_to_host_u32 (min_addr->as_u32));
1344  if (update)
1345  min_addr->as_u32 = mask_addr->as_u32;
1346 }
1347 
1348 static void
1350 {
1351  int update =
1352  (clib_net_to_host_u32 (mask_addr->as_u32) >
1353  clib_net_to_host_u32 (max_addr->as_u32));
1354  if (update)
1355  max_addr->as_u32 = mask_addr->as_u32;
1356 }
1357 
1358 enum {
1364 };
1365 
1366 
1367 
1368 static void
1369 split_partition(acl_main_t *am, u32 first_index,
1370  u32 lc_index, int is_ip6){
1371  DBG( "TM-split_partition - first_entry:%d", first_index);
1372  applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
1373  ace_mask_type_entry_t *mte;
1374  fa_5tuple_t the_min_tuple, *min_tuple = &the_min_tuple;
1375  fa_5tuple_t the_max_tuple, *max_tuple = &the_max_tuple;
1376  applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), first_index);
1378  hash_ace_info_t *ace_info;
1379  u32 coll_mask_type_index = pae->mask_type_index;
1380  memset(&the_min_tuple, 0, sizeof(the_min_tuple));
1381  memset(&the_max_tuple, 0, sizeof(the_max_tuple));
1382 
1383  int i=0;
1384  u64 collisions = vec_len(pae->colliding_rules);
1385 // while(pae->next_applied_entry_index == ~0){
1386  for(i=0; i<collisions; i++){
1387 
1388  DBG( "TM-collision: base_ace:%d (ace_mask:%d, first_collision_mask:%d)",
1389  pae->ace_index, pae->mask_type_index, coll_mask_type_index);
1390 
1391  ace_info = vec_elt_at_index(ha->rules, pae->hash_ace_info_index);
1393  fa_5tuple_t *mask = &mte->mask;
1394 
1395  if(pae->mask_type_index != coll_mask_type_index) continue;
1396  /* Computing min_mask and max_mask for colliding rules */
1397  if(i==0){
1398  clib_memcpy(min_tuple, mask, sizeof(fa_5tuple_t));
1399  clib_memcpy(max_tuple, mask, sizeof(fa_5tuple_t));
1400  }else{
1401  int j;
1402  for(j=0; j<2; j++){
1403  if (is_ip6)
1404  ensure_ip6_min_addr(&min_tuple->ip6_addr[j], &mask->ip6_addr[j]);
1405  else
1406  ensure_ip4_min_addr(&min_tuple->ip4_addr[j], &mask->ip4_addr[j]);
1407 
1408  if ((mask->l4.port[j] < min_tuple->l4.port[j]))
1409  min_tuple->l4.port[j] = mask->l4.port[j];
1410  }
1411 
1412  if ((mask->l4.proto < min_tuple->l4.proto))
1413  min_tuple->l4.proto = mask->l4.proto;
1414 
1415  if(mask->pkt.as_u64 < min_tuple->pkt.as_u64)
1416  min_tuple->pkt.as_u64 = mask->pkt.as_u64;
1417 
1418 
1419  for(j=0; j<2; j++){
1420  if (is_ip6)
1421  ensure_ip6_max_addr(&max_tuple->ip6_addr[j], &mask->ip6_addr[j]);
1422  else
1423  ensure_ip4_max_addr(&max_tuple->ip4_addr[j], &mask->ip4_addr[j]);
1424 
1425  if ((mask->l4.port[j] > max_tuple->l4.port[j]))
1426  max_tuple->l4.port[j] = mask->l4.port[j];
1427  }
1428 
1429  if ((mask->l4.proto < max_tuple->l4.proto))
1430  max_tuple->l4.proto = mask->l4.proto;
1431 
1432  if(mask->pkt.as_u64 > max_tuple->pkt.as_u64)
1433  max_tuple->pkt.as_u64 = mask->pkt.as_u64;
1434  }
1435 
1436  pae = vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index);
1437  }
1438 
1439  /* Computing field with max difference between (min/max)_mask */
1440  int best_dim=-1, best_delta=0, delta=0;
1441 
1442  /* SRC_addr dimension */
1443  if (is_ip6) {
1444  int i;
1445  for(i=0; i<2; i++){
1446  delta += count_bits(max_tuple->ip6_addr[0].as_u64[i]) - count_bits(min_tuple->ip6_addr[0].as_u64[i]);
1447  }
1448  } else {
1449  delta += count_bits(max_tuple->ip4_addr[0].as_u32) - count_bits(min_tuple->ip4_addr[0].as_u32);
1450  }
1451  if(delta > best_delta){
1452  best_delta = delta;
1453  best_dim = DIM_SRC_ADDR;
1454  }
1455 
1456  /* DST_addr dimension */
1457  delta = 0;
1458  if (is_ip6) {
1459  int i;
1460  for(i=0; i<2; i++){
1461  delta += count_bits(max_tuple->ip6_addr[1].as_u64[i]) - count_bits(min_tuple->ip6_addr[1].as_u64[i]);
1462  }
1463  } else {
1464  delta += count_bits(max_tuple->ip4_addr[1].as_u32) - count_bits(min_tuple->ip4_addr[1].as_u32);
1465  }
1466  if(delta > best_delta){
1467  best_delta = delta;
1468  best_dim = DIM_DST_ADDR;
1469  }
1470 
1471  /* SRC_port dimension */
1472  delta = count_bits(max_tuple->l4.port[0]) - count_bits(min_tuple->l4.port[0]);
1473  if(delta > best_delta){
1474  best_delta = delta;
1475  best_dim = DIM_SRC_PORT;
1476  }
1477 
1478  /* DST_port dimension */
1479  delta = count_bits(max_tuple->l4.port[1]) - count_bits(min_tuple->l4.port[1]);
1480  if(delta > best_delta){
1481  best_delta = delta;
1482  best_dim = DIM_DST_PORT;
1483  }
1484 
1485  /* Proto dimension */
1486  delta = count_bits(max_tuple->l4.proto) - count_bits(min_tuple->l4.proto);
1487  if(delta > best_delta){
1488  best_delta = delta;
1489  best_dim = DIM_PROTO;
1490  }
1491 
1492  int shifting = 0; //, ipv4_block = 0;
1493  switch(best_dim){
1494  case DIM_SRC_ADDR:
1495  shifting = (best_delta)/2; // FIXME IPV4-only
1496  // ipv4_block = count_bits(max_tuple->ip4_addr[0].as_u32);
1497  min_tuple->ip4_addr[0].as_u32 =
1498  clib_host_to_net_u32((clib_net_to_host_u32(max_tuple->ip4_addr[0].as_u32) << (shifting))&0xFFFFFFFF);
1499 
1500  break;
1501  case DIM_DST_ADDR:
1502  shifting = (best_delta)/2;
1503 /*
1504  ipv4_block = count_bits(max_tuple->addr[1].as_u64[1]);
1505  if(ipv4_block > shifting)
1506  min_tuple->addr[1].as_u64[1] =
1507  clib_host_to_net_u64((clib_net_to_host_u64(max_tuple->addr[1].as_u64[1]) << (shifting))&0xFFFFFFFF);
1508  else{
1509  shifting = shifting - ipv4_block;
1510  min_tuple->addr[1].as_u64[1] = 0;
1511  min_tuple->addr[1].as_u64[0] =
1512  clib_host_to_net_u64((clib_net_to_host_u64(max_tuple->addr[1].as_u64[0]) << (shifting))&0xFFFFFFFF);
1513  }
1514 */
1515  min_tuple->ip4_addr[1].as_u32 =
1516  clib_host_to_net_u32((clib_net_to_host_u32(max_tuple->ip4_addr[1].as_u32) << (shifting))&0xFFFFFFFF);
1517 
1518  break;
1519  case DIM_SRC_PORT: min_tuple->l4.port[0] = max_tuple->l4.port[0] << (best_delta)/2;
1520  break;
1521  case DIM_DST_PORT: min_tuple->l4.port[1] = max_tuple->l4.port[1] << (best_delta)/2;
1522  break;
1523  case DIM_PROTO: min_tuple->l4.proto = max_tuple->l4.proto << (best_delta)/2;
1524  break;
1525  default: relax_tuple(min_tuple, is_ip6, 1);
1526  break;
1527  }
1528 
1529  min_tuple->pkt.is_nonfirst_fragment = 0;
1530  u32 new_mask_type_index = assign_mask_type_index(am, min_tuple);
1531 
1532  hash_applied_mask_info_t **hash_applied_mask_info_vec = vec_elt_at_index(am->hash_applied_mask_info_vec_by_lc_index, lc_index);
1533 
1534  hash_applied_mask_info_t *minfo;
1535  //search in order pool if mask_type_index is already there
1536  int search;
1537  for (search=0; search < vec_len((*hash_applied_mask_info_vec)); search++){
1538  minfo = vec_elt_at_index((*hash_applied_mask_info_vec), search);
1539  if(minfo->mask_type_index == new_mask_type_index)
1540  break;
1541  }
1542 
1543  vec_validate((*hash_applied_mask_info_vec), search);
1544  minfo = vec_elt_at_index((*hash_applied_mask_info_vec), search);
1545  minfo->mask_type_index = new_mask_type_index;
1546  minfo->num_entries = 0;
1547  minfo->max_collisions = 0;
1548  minfo->first_rule_index = ~0;
1549 
1550  DBG( "TM-split_partition - mask type index-assigned!! -> %d", new_mask_type_index);
1551 
1552  if(coll_mask_type_index == new_mask_type_index){
1553  //vlib_cli_output(vm, "TM-There are collisions over threshold, but i'm not able to split! %d %d", coll_mask_type_index, new_mask_type_index);
1554  return;
1555  }
1556 
1557 
1558  /* populate new partition */
1559  DBG( "TM-Populate new partition");
1560  u32 r_ace_index = first_index;
1561 
1562 // for(i=0; i<collisions; i++){
1563  for(r_ace_index=0; r_ace_index < vec_len((*applied_hash_aces)); r_ace_index++) {
1564 
1565  applied_hash_ace_entry_t *pop_pae = vec_elt_at_index((*applied_hash_aces), r_ace_index);
1566  DBG( "TM-Population-collision: base_ace:%d (ace_mask:%d, first_collision_mask:%d)",
1567  pop_pae->ace_index, pop_pae->mask_type_index, coll_mask_type_index);
1568 
1569  if(pop_pae->mask_type_index != coll_mask_type_index) continue;
1570  u32 next_index = pop_pae->next_applied_entry_index;
1571 
1572  ace_info = vec_elt_at_index(ha->rules, pop_pae->hash_ace_info_index);
1574  //can insert rule?
1575  //mte = vec_elt_at_index(am->ace_mask_type_pool, pop_pae->mask_type_index);
1576  fa_5tuple_t *pop_mask = &mte->mask;
1577 
1578  if(!first_mask_contains_second_mask(is_ip6, min_tuple, pop_mask)) continue;
1579  DBG( "TM-new partition can insert -> applied_ace:%d", r_ace_index);
1580 
1581  //delete and insert in new format
1582  deactivate_applied_ace_hash_entry(am, lc_index, applied_hash_aces, r_ace_index);
1583 
1584  /* insert the new entry */
1585  pop_pae->mask_type_index = new_mask_type_index;
1586 
1587  activate_applied_ace_hash_entry(am, lc_index, applied_hash_aces, r_ace_index);
1588 
1589  r_ace_index = next_index;
1590  }
1591 
1592  DBG( "TM-Populate new partition-END");
1593  DBG( "TM-split_partition - END");
1594 
1595 }
1596 
static void move_applied_ace_hash_entry(acl_main_t *am, u32 lc_index, applied_hash_ace_entry_t **applied_hash_aces, u32 old_index, u32 new_index)
Definition: hash_lookup.c:740
acl_rule_t * rules
Definition: acl.h:95
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:437
static void ensure_ip4_max_addr(ip4_address_t *max_addr, ip4_address_t *mask_addr)
Definition: hash_lookup.c:1349
static void deactivate_applied_ace_hash_entry(acl_main_t *am, u32 lc_index, applied_hash_ace_entry_t **applied_hash_aces, u32 old_index)
Definition: hash_lookup.c:785
vmrglw vmrglh hi
u8 is_ipv6
Definition: types.h:24
fa_5tuple_t mask
Definition: acl.h:123
void acl_plugin_show_tables_mask_type(void)
Definition: hash_lookup.c:1147
static void ensure_ip6_min_addr(ip6_address_t *min_addr, ip6_address_t *mask_addr)
Definition: hash_lookup.c:1303
Definition: acl.h:119
a
Definition: bitmap.h:538
u32 acl_index
static void * hash_acl_set_heap(acl_main_t *am)
Definition: hash_lookup.c:558
fa_session_l4_key_t l4
Definition: fa_node.h:71
fa_packet_info_t pkt
Definition: fa_node.h:73
void hash_acl_unapply(acl_main_t *am, u32 lc_index, int acl_index)
Definition: hash_lookup.c:845
void acl_plugin_hash_acl_set_trace_heap(int on)
Definition: hash_lookup.c:589
void acl_plugin_show_tables_applied_info(u32 lc_index)
Definition: hash_lookup.c:1226
void hash_acl_reapply(acl_main_t *am, u32 lc_index, int acl_index)
Definition: hash_lookup.c:921
#define clib_error(format, args...)
Definition: error.h:62
u64 as_u64[2]
Definition: ip6_packet.h:51
unsigned long u64
Definition: types.h:89
void * mheap_alloc(void *memory, uword size)
Definition: mheap.c:963
#define NULL
Definition: clib.h:55
u8 dst_prefixlen
Definition: types.h:28
u8 action
u32 hash_ace_info_index
u32 count
Definition: acl.h:94
static void add_colliding_rule(acl_main_t *am, applied_hash_ace_entry_t **applied_hash_aces, u32 head_index, u32 applied_entry_index)
Definition: hash_lookup.c:488
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:523
int i
static mheap_t * mheap_header(u8 *v)
acl_main_t acl_main
Definition: jvpp_acl.h:39
static u32 activate_applied_ace_hash_entry(acl_main_t *am, u32 lc_index, applied_hash_ace_entry_t **applied_hash_aces, u32 new_index)
Definition: hash_lookup.c:508
#define MHEAP_FLAG_THREAD_SAFE
vhost_vring_addr_t addr
Definition: vhost_user.h:116
unsigned char u8
Definition: types.h:56
static u32 find_head_applied_ace_index(applied_hash_ace_entry_t **applied_hash_aces, u32 curr_index)
Definition: hash_lookup.c:722
void hash_acl_add(acl_main_t *am, int acl_index)
Definition: hash_lookup.c:1058
u32 ** lc_index_vec_by_acl
Definition: acl.h:180
static void ensure_ip6_max_addr(ip6_address_t *max_addr, ip6_address_t *mask_addr)
Definition: hash_lookup.c:1321
u16 dst_port_or_code_last
Definition: types.h:33
u8 src_prefixlen
Definition: types.h:26
static void make_port_mask(u16 *portmask, u16 port_first, u16 port_last)
Definition: hash_lookup.c:975
u32 acl_position
u32 next_applied_entry_index
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
Definition: pool.h:443
i64 word
Definition: types.h:111
int clib_bihash_add_del(clib_bihash *h, clib_bihash_kv *add_v, int is_add)
Add or delete a (key,value) pair from a bi-hash table.
#define always_inline
Definition: clib.h:92
static void assign_mask_type_index_to_pae(acl_main_t *am, u32 lc_index, int is_ip6, applied_hash_ace_entry_t *pae)
Definition: hash_lookup.c:602
#define vec_new(T, N)
Create new vector of given type and length (unspecified alignment, no header).
Definition: vec.h:309
ip46_address_t src
Definition: types.h:25
u8 * format_memory_size(u8 *s, va_list *va)
Definition: std-formats.c:193
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
u8 is_permit
Definition: types.h:23
#define vec_resize(V, N)
Resize a vector (no header, unspecified alignment) Add N elements to end of given vector V...
Definition: vec.h:240
unsigned int u32
Definition: types.h:88
#define vec_search(v, E)
Search a vector for the index of the entry that matches.
Definition: vec.h:942
static void release_mask_type_index(acl_main_t *am, u32 mask_type_index)
Definition: hash_lookup.c:401
void hash_acl_apply(acl_main_t *am, u32 lc_index, int acl_index, u32 acl_position)
Definition: hash_lookup.c:637
static u32 assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
Definition: hash_lookup.c:379
int hash_acl_exists(acl_main_t *am, int acl_index)
Definition: hash_lookup.c:1049
ip46_address_t dst
Definition: types.h:27
static void add_del_hashtable_entry(acl_main_t *am, u32 lc_index, applied_hash_ace_entry_t **applied_hash_aces, u32 index, int is_add)
Definition: hash_lookup.c:352
collision_match_rule_t * colliding_rules
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:464
u32 l3_zero_pad[6]
Definition: fa_node.h:66
static void ip4_address_mask_from_width(ip4_address_t *a, u32 width)
Definition: hash_lookup.c:953
u32 mask_type_index
u16 dst_port_or_code_first
Definition: types.h:32
hash_applied_mask_info_t ** hash_applied_mask_info_vec_by_lc_index
Definition: acl.h:208
unsigned short u16
Definition: types.h:57
hash_acl_info_t * hash_acl_infos
Definition: acl.h:141
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:274
#define vec_dup(V)
Return copy of vector (no header, no alignment)
Definition: vec.h:373
static void make_mask_and_match_from_rule(fa_5tuple_t *mask, acl_rule_t *r, hash_ace_info_t *hi)
Definition: hash_lookup.c:988
u64 hitcount
hash_ace_info_t * rules
#define vec_del1(v, i)
Delete the element at index I.
Definition: vec.h:806
static void check_collision_count_and_maybe_split(acl_main_t *am, u32 lc_index, int is_ip6, u32 first_index)
Definition: hash_lookup.c:627
clib_bihash_48_8_t acl_lookup_hash
Definition: acl.h:142
void show_hash_acl_hash(vlib_main_t *vm, acl_main_t *am, u32 verbose)
Definition: hash_lookup.c:1140
static u32 shift_ip4_if(u32 mask, u32 thresh, int numshifts, u32 else_val)
Definition: hash_lookup.c:165
void acl_plugin_hash_acl_set_validate_heap(int on)
Definition: hash_lookup.c:573
u8 proto
Definition: types.h:29
void clib_bihash_init(clib_bihash *h, char *name, u32 nbuckets, uword memory_size)
initialize a bounded index extensible hash table
u16 src_port_or_type_first
Definition: types.h:30
void acl_plugin_show_tables_bihash(u32 show_bihash_verbose)
Definition: hash_lookup.c:1276
#define pool_get_aligned(P, E, A)
Allocate an object E from a pool P (general version).
Definition: pool.h:188
static void vec_del_collision_rule(collision_match_rule_t **pvec, u32 applied_entry_index)
Definition: hash_lookup.c:464
u32 refcount
Definition: acl.h:124
static void split_partition(acl_main_t *am, u32 first_index, u32 lc_index, int is_ip6)
Definition: hash_lookup.c:1369
vlib_main_t * vm
Definition: buffer.c:294
applied_hash_acl_info_t * applied_hash_acl_info_by_lc_index
Definition: acl.h:157
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:339
static void make_ip4_address_mask(ip4_address_t *addr, u8 prefix_len)
Definition: hash_lookup.c:969
int acl_lookup_hash_initialized
Definition: acl.h:149
static void * clib_mem_set_heap(void *heap)
Definition: mem.h:226
void hash_acl_delete(acl_main_t *am, int acl_index)
Definition: hash_lookup.c:1099
#define clib_warning(format, args...)
Definition: error.h:59
#define clib_memcpy(a, b, c)
Definition: string.h:75
u8 * format_vec32(u8 *s, va_list *va)
Definition: std-formats.c:43
#define DBG(...)
static int count_bits(u64 word)
Definition: hash_lookup.c:71
applied_hash_ace_entry_t ** hash_entry_vec_by_lc_index
Definition: acl.h:156
static u8 first_mask_contains_second_mask(int is_ip6, fa_5tuple_t *mask1, fa_5tuple_t *mask2)
Definition: hash_lookup.c:84
int tuple_merge_split_threshold
Definition: acl.h:202
static void relax_ip6_addr(ip6_address_t *ip6_mask, int relax2)
Definition: hash_lookup.c:187
static void acl_plugin_print_colliding_rule(vlib_main_t *vm, int j, collision_match_rule_t *cr)
Definition: hash_lookup.c:1197
u32 hash_lookup_hash_buckets
Definition: acl.h:143
#define ASSERT(truth)
static void ensure_ip4_min_addr(ip4_address_t *min_addr, ip4_address_t *mask_addr)
Definition: hash_lookup.c:1339
u8 tcp_flags_valid
Definition: fa_node.h:33
u16 src_port_or_type_last
Definition: types.h:31
static void acl_plugin_print_pae(vlib_main_t *vm, int j, applied_hash_ace_entry_t *pae)
Definition: hash_lookup.c:1204
#define MHEAP_FLAG_VALIDATE
static void acl_plugin_print_applied_mask_info(vlib_main_t *vm, int j, hash_applied_mask_info_t *mi)
Definition: hash_lookup.c:1218
#define DBG0(...)
#define MHEAP_FLAG_TRACE
static applied_hash_ace_entry_t ** get_applied_hash_aces(acl_main_t *am, u32 lc_index)
Definition: hash_lookup.c:36
#define MHEAP_FLAG_SMALL_OBJECT_CACHE
ace_mask_type_entry_t * ace_mask_type_pool
Definition: acl.h:205
static u32 find_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
Definition: hash_lookup.c:365
u32 prev_applied_entry_index
static void fill_applied_hash_ace_kv(acl_main_t *am, applied_hash_ace_entry_t **applied_hash_aces, u32 lc_index, u32 new_index, clib_bihash_kv_48_8_t *kv)
Definition: hash_lookup.c:320
u8 is_nonfirst_fragment
Definition: fa_node.h:35
acl_lookup_context_t * acl_lookup_contexts
Definition: acl.h:138
uword hash_lookup_mheap_size
Definition: acl.h:148
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u8 tcp_flags_mask
Definition: types.h:35
static void relax_ip4_addr(ip4_address_t *ip4_mask, int relax2)
Definition: hash_lookup.c:174
int use_tuple_merge
Definition: acl.h:198
static void hashtable_add_del(acl_main_t *am, clib_bihash_kv_48_8_t *kv, int is_add)
Definition: hash_lookup.c:48
static void ip6_address_mask_from_width(ip6_address_t *a, u32 width)
Definition: ip6_packet.h:257
u8 tcp_flags_value
Definition: types.h:34
static u32 tm_assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask, int is_ip6, u32 lc_index)
Definition: hash_lookup.c:263
void mheap_validate(void *v)
Definition: mheap.c:1355
#define vec_foreach(var, vec)
Vector iterator.
void * hash_lookup_mheap
Definition: acl.h:147
static void del_colliding_rule(applied_hash_ace_entry_t **applied_hash_aces, u32 head_index, u32 applied_entry_index)
Definition: hash_lookup.c:479
static void relax_tuple(fa_5tuple_t *mask, int is_ip6, int relax2)
Definition: hash_lookup.c:205
u32 tail_applied_entry_index
ip4_address_t ip4_addr[2]
Definition: fa_node.h:67
u16 mask_type_index_lsb
Definition: fa_node.h:31
static void remake_hash_applied_mask_info_vec(acl_main_t *am, applied_hash_ace_entry_t **applied_hash_aces, u32 lc_index)
Definition: hash_lookup.c:412
Definition: acl.h:89
static void make_ip6_address_mask(ip6_address_t *addr, u8 prefix_len)
Definition: hash_lookup.c:945
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:62
void acl_plugin_show_tables_acl_hash_info(u32 acl_index)
Definition: hash_lookup.c:1166
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
Definition: cli.c:681
acl_list_t * acls
Definition: acl.h:140
u32 hash_lookup_hash_memory
Definition: acl.h:144
u32 ace_index
ip6_address_t ip6_addr[2]
Definition: fa_node.h:69
foreach_fa_cleaner_counter vlib_main_t * vlib_main
Definition: acl.h:326