FD.io VPP  v18.04-17-g3a0d853
Vector Packet Processing
public_inlines.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef included_acl_inlines_h
17 #define included_acl_inlines_h
18 
19 #include <stdint.h>
20 
21 #include <plugins/acl/acl.h>
22 #include <plugins/acl/fa_node.h>
24 
25 
26 /* check if a given ACL exists */
27 
28 #ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
29 
30 /*
31  * Define a pointer to the acl_main which will be filled during the initialization.
32  */
33 static acl_main_t *p_acl_main = 0;
34 
35 static u8 (*acl_plugin_acl_exists) (u32 acl_index);
36 #else
37 u8 acl_plugin_acl_exists (u32 acl_index);
38 #endif
39 
40 
41 /*
42  * If you are using ACL plugin, get this unique ID first,
43  * so you can identify yourself when creating the lookup contexts.
44  */
45 
46 #ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
47 static u32 (*acl_plugin_register_user_module) (char *caller_module_string, char *val1_label, char *val2_label);
48 #else
49 u32 acl_plugin_register_user_module (char *caller_module_string, char *val1_label, char *val2_label);
50 #endif
51 
52 /*
53  * Allocate a new lookup context index.
54  * Supply the id assigned to your module during registration,
55  * and two values of your choice identifying instances
56  * of use within your module. They are useful for debugging.
57  */
58 #ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
59 static int (*acl_plugin_get_lookup_context_index) (u32 acl_user_id, u32 val1, u32 val2);
60 #else
61 int acl_plugin_get_lookup_context_index (u32 acl_user_id, u32 val1, u32 val2);
62 #endif
63 
64 /*
65  * Release the lookup context index and destroy
66  * any asssociated data structures.
67  */
68 #ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
69 static void (*acl_plugin_put_lookup_context_index) (u32 lc_index);
70 #else
72 #endif
73 
74 /*
75  * Prepare the sequential vector of ACL#s to lookup within a given context.
76  * Any existing list will be overwritten. acl_list is a vector.
77  */
78 #ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
79 static int (*acl_plugin_set_acl_vec_for_context) (u32 lc_index, u32 *acl_list);
80 #else
81 int acl_plugin_set_acl_vec_for_context (u32 lc_index, u32 *acl_list);
82 #endif
83 
84 /* Fill the 5-tuple from the packet */
85 
86 #ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
87 static void (*acl_plugin_fill_5tuple) (u32 lc_index, vlib_buffer_t * b0, int is_ip6, int is_input,
88  int is_l2_path, fa_5tuple_opaque_t * p5tuple_pkt);
89 #else
90 void acl_plugin_fill_5tuple (u32 lc_index, vlib_buffer_t * b0, int is_ip6, int is_input,
91  int is_l2_path, fa_5tuple_opaque_t * p5tuple_pkt);
92 #endif
93 
94 #ifdef ACL_PLUGIN_DEFINED_BELOW_IN_FILE
95 static inline
96 void acl_plugin_fill_5tuple_inline (u32 lc_index, vlib_buffer_t * b0, int is_ip6, int is_input,
97  int is_l2_path, fa_5tuple_opaque_t * p5tuple_pkt) {
98  /* FIXME: normally the inlined version of filling in the 5-tuple. But for now just call the non-inlined version */
99  acl_plugin_fill_5tuple(lc_index, b0, is_ip6, is_input, is_l2_path, p5tuple_pkt);
100 }
101 #endif
102 
103 
104 #ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
105 static int (*acl_plugin_match_5tuple) (u32 lc_index,
106  fa_5tuple_opaque_t * pkt_5tuple,
107  int is_ip6, u8 * r_action,
108  u32 * r_acl_pos_p,
109  u32 * r_acl_match_p,
110  u32 * r_rule_match_p,
111  u32 * trace_bitmap);
112 #else
113 int acl_plugin_match_5tuple (u32 lc_index,
114  fa_5tuple_opaque_t * pkt_5tuple,
115  int is_ip6, u8 * r_action,
116  u32 * r_acl_pos_p,
117  u32 * r_acl_match_p,
118  u32 * r_rule_match_p,
119  u32 * trace_bitmap);
120 #endif
121 
122 #ifdef ACL_PLUGIN_DEFINED_BELOW_IN_FILE
123 static inline int
125  fa_5tuple_opaque_t * pkt_5tuple,
126  int is_ip6, u8 * r_action,
127  u32 * r_acl_pos_p,
128  u32 * r_acl_match_p,
129  u32 * r_rule_match_p,
130  u32 * trace_bitmap) {
131  return acl_plugin_match_5tuple(lc_index, pkt_5tuple, is_ip6, r_action, r_acl_pos_p, r_acl_match_p, r_rule_match_p, trace_bitmap);
132 }
133 #endif
134 
135 #ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
136 
137 #define LOAD_SYMBOL_FROM_PLUGIN_TO(p, s, st) \
138 ({ \
139  st = vlib_get_plugin_symbol(p, #s); \
140  if (!st) \
141  return clib_error_return(0, \
142  "Plugin %s and/or symbol %s not found.", p, #s); \
143 })
144 
145 #define LOAD_SYMBOL(s) LOAD_SYMBOL_FROM_PLUGIN_TO("acl_plugin.so", s, s)
146 
147 static inline clib_error_t * acl_plugin_exports_init (void)
148 {
149  LOAD_SYMBOL_FROM_PLUGIN_TO("acl_plugin.so", acl_main, p_acl_main);
150  LOAD_SYMBOL(acl_plugin_acl_exists);
151  LOAD_SYMBOL(acl_plugin_register_user_module);
155  LOAD_SYMBOL(acl_plugin_fill_5tuple);
156  LOAD_SYMBOL(acl_plugin_match_5tuple);
157  return 0;
158 }
159 
160 #endif
161 
162 
163 
164 always_inline void *
166 {
167  u8 *p = vlib_buffer_get_current (b0) + offset;
168  return p;
169 }
170 
171 always_inline int
173 {
174  /* For the purposes of this code, "within" means we have at least 8 bytes after it */
175  return (offset <= (b0->current_length - 8));
176 }
177 
178 always_inline void
179 acl_fill_5tuple (acl_main_t * am, vlib_buffer_t * b0, int is_ip6,
180  int is_input, int is_l2_path, fa_5tuple_t * p5tuple_pkt)
181 {
182  /* IP4 and IP6 protocol numbers of ICMP */
183  static u8 icmp_protos_v4v6[] = { IP_PROTOCOL_ICMP, IP_PROTOCOL_ICMP6 };
184 
185  int l3_offset;
186  int l4_offset;
187  u16 ports[2];
188  u16 proto;
189 
190  if (is_l2_path)
191  {
192  l3_offset = ethernet_buffer_header_size(b0);
193  }
194  else
195  {
196  if (is_input)
197  l3_offset = 0;
198  else
199  l3_offset = vnet_buffer(b0)->ip.save_rewrite_length;
200  }
201 
202  /* key[0..3] contains src/dst address and is cleared/set below */
203  /* Remainder of the key and per-packet non-key data */
204  p5tuple_pkt->kv.key[4] = 0;
205  p5tuple_pkt->kv.value = 0;
206  p5tuple_pkt->pkt.is_ip6 = is_ip6;
207 
208  if (is_ip6)
209  {
210  clib_memcpy (&p5tuple_pkt->addr,
211  get_ptr_to_offset (b0,
212  offsetof (ip6_header_t,
213  src_address) + l3_offset),
214  sizeof (p5tuple_pkt->addr));
215  proto =
216  *(u8 *) get_ptr_to_offset (b0,
217  offsetof (ip6_header_t,
218  protocol) + l3_offset);
219  l4_offset = l3_offset + sizeof (ip6_header_t);
220 #ifdef FA_NODE_VERBOSE_DEBUG
221  clib_warning ("ACL_FA_NODE_DBG: proto: %d, l4_offset: %d", proto,
222  l4_offset);
223 #endif
224  /* IP6 EH handling is here, increment l4_offset if needs to, update the proto */
225  int need_skip_eh = clib_bitmap_get (am->fa_ipv6_known_eh_bitmap, proto);
226  if (PREDICT_FALSE (need_skip_eh))
227  {
228  while (need_skip_eh && offset_within_packet (b0, l4_offset))
229  {
230  /* Fragment header needs special handling */
231  if (PREDICT_FALSE(ACL_EH_FRAGMENT == proto))
232  {
233  proto = *(u8 *) get_ptr_to_offset (b0, l4_offset);
234  u16 frag_offset;
235  clib_memcpy (&frag_offset, get_ptr_to_offset (b0, 2 + l4_offset), sizeof(frag_offset));
236  frag_offset = clib_net_to_host_u16(frag_offset) >> 3;
237  if (frag_offset)
238  {
239  p5tuple_pkt->pkt.is_nonfirst_fragment = 1;
240  /* invalidate L4 offset so we don't try to find L4 info */
241  l4_offset += b0->current_length;
242  }
243  else
244  {
245  /* First fragment: skip the frag header and move on. */
246  l4_offset += 8;
247  }
248  }
249  else
250  {
251  u8 nwords = *(u8 *) get_ptr_to_offset (b0, 1 + l4_offset);
252  proto = *(u8 *) get_ptr_to_offset (b0, l4_offset);
253  l4_offset += 8 * (1 + (u16) nwords);
254  }
255 #ifdef FA_NODE_VERBOSE_DEBUG
256  clib_warning ("ACL_FA_NODE_DBG: new proto: %d, new offset: %d",
257  proto, l4_offset);
258 #endif
259  need_skip_eh =
261  }
262  }
263  }
264  else
265  {
266  p5tuple_pkt->kv.key[0] = 0;
267  p5tuple_pkt->kv.key[1] = 0;
268  p5tuple_pkt->kv.key[2] = 0;
269  p5tuple_pkt->kv.key[3] = 0;
270  clib_memcpy (&p5tuple_pkt->addr[0].ip4,
271  get_ptr_to_offset (b0,
272  offsetof (ip4_header_t,
273  src_address) + l3_offset),
274  sizeof (p5tuple_pkt->addr[0].ip4));
275  clib_memcpy (&p5tuple_pkt->addr[1].ip4,
276  get_ptr_to_offset (b0,
277  offsetof (ip4_header_t,
278  dst_address) + l3_offset),
279  sizeof (p5tuple_pkt->addr[1].ip4));
280  proto =
281  *(u8 *) get_ptr_to_offset (b0,
282  offsetof (ip4_header_t,
283  protocol) + l3_offset);
284  l4_offset = l3_offset + sizeof (ip4_header_t);
285  u16 flags_and_fragment_offset;
286  clib_memcpy (&flags_and_fragment_offset,
287  get_ptr_to_offset (b0,
288  offsetof (ip4_header_t,
289  flags_and_fragment_offset)) + l3_offset,
290  sizeof(flags_and_fragment_offset));
291  flags_and_fragment_offset = clib_net_to_host_u16 (flags_and_fragment_offset);
292 
293  /* non-initial fragments have non-zero offset */
294  if ((PREDICT_FALSE(0xfff & flags_and_fragment_offset)))
295  {
296  p5tuple_pkt->pkt.is_nonfirst_fragment = 1;
297  /* invalidate L4 offset so we don't try to find L4 info */
298  l4_offset += b0->current_length;
299  }
300 
301  }
302  p5tuple_pkt->l4.proto = proto;
303  if (PREDICT_TRUE (offset_within_packet (b0, l4_offset)))
304  {
305  p5tuple_pkt->pkt.l4_valid = 1;
306  if (icmp_protos_v4v6[is_ip6] == proto)
307  {
308  /* type */
309  p5tuple_pkt->l4.port[0] =
310  *(u8 *) get_ptr_to_offset (b0,
311  l4_offset + offsetof (icmp46_header_t,
312  type));
313  /* code */
314  p5tuple_pkt->l4.port[1] =
315  *(u8 *) get_ptr_to_offset (b0,
316  l4_offset + offsetof (icmp46_header_t,
317  code));
318  }
319  else if ((IP_PROTOCOL_TCP == proto) || (IP_PROTOCOL_UDP == proto))
320  {
321  clib_memcpy (&ports,
322  get_ptr_to_offset (b0,
323  l4_offset + offsetof (tcp_header_t,
324  src_port)),
325  sizeof (ports));
326  p5tuple_pkt->l4.port[0] = clib_net_to_host_u16 (ports[0]);
327  p5tuple_pkt->l4.port[1] = clib_net_to_host_u16 (ports[1]);
328 
329  p5tuple_pkt->pkt.tcp_flags =
330  *(u8 *) get_ptr_to_offset (b0,
331  l4_offset + offsetof (tcp_header_t,
332  flags));
333  p5tuple_pkt->pkt.tcp_flags_valid = (proto == IP_PROTOCOL_TCP);
334  }
335  /*
336  * FIXME: rather than the above conditional, here could
337  * be a nice generic mechanism to extract two L4 values:
338  *
339  * have a per-protocol array of 4 elements like this:
340  * u8 offset; to take the byte from, off L4 header
341  * u8 mask; to mask it with, before storing
342  *
343  * this way we can describe UDP, TCP and ICMP[46] semantics,
344  * and add a sort of FPM-type behavior for other protocols.
345  *
346  * Of course, is it faster ? and is it needed ?
347  *
348  */
349  }
350 }
351 
352 always_inline void
353 acl_plugin_fill_5tuple_inline (u32 lc_index, vlib_buffer_t * b0, int is_ip6,
354  int is_input, int is_l2_path, fa_5tuple_opaque_t * p5tuple_pkt)
355 {
356  acl_main_t *am = p_acl_main;
357  acl_fill_5tuple(am, b0, is_ip6, is_input, is_l2_path, (fa_5tuple_t *)p5tuple_pkt);
358 }
359 
360 
361 
362 always_inline int
363 fa_acl_match_addr (ip46_address_t * addr1, ip46_address_t * addr2,
364  int prefixlen, int is_ip6)
365 {
366  if (prefixlen == 0)
367  {
368  /* match any always succeeds */
369  return 1;
370  }
371  if (is_ip6)
372  {
373  if (memcmp (addr1, addr2, prefixlen / 8))
374  {
375  /* If the starting full bytes do not match, no point in bittwidling the thumbs further */
376  return 0;
377  }
378  if (prefixlen % 8)
379  {
380  u8 b1 = *((u8 *) addr1 + 1 + prefixlen / 8);
381  u8 b2 = *((u8 *) addr2 + 1 + prefixlen / 8);
382  u8 mask0 = (0xff - ((1 << (8 - (prefixlen % 8))) - 1));
383  return (b1 & mask0) == b2;
384  }
385  else
386  {
387  /* The prefix fits into integer number of bytes, so nothing left to do */
388  return 1;
389  }
390  }
391  else
392  {
393  uint32_t a1 = clib_net_to_host_u32 (addr1->ip4.as_u32);
394  uint32_t a2 = clib_net_to_host_u32 (addr2->ip4.as_u32);
395  uint32_t mask0 = 0xffffffff - ((1 << (32 - prefixlen)) - 1);
396  return (a1 & mask0) == a2;
397  }
398 }
399 
400 always_inline int
401 fa_acl_match_port (u16 port, u16 port_first, u16 port_last, int is_ip6)
402 {
403  return ((port >= port_first) && (port <= port_last));
404 }
405 
406 always_inline int
407 single_acl_match_5tuple (acl_main_t * am, u32 acl_index, fa_5tuple_t * pkt_5tuple,
408  int is_ip6, u8 * r_action, u32 * r_acl_match_p,
409  u32 * r_rule_match_p, u32 * trace_bitmap)
410 {
411  int i;
412  acl_list_t *a;
413  acl_rule_t *r;
414 
415  if (pool_is_free_index (am->acls, acl_index))
416  {
417  if (r_acl_match_p)
418  *r_acl_match_p = acl_index;
419  if (r_rule_match_p)
420  *r_rule_match_p = -1;
421  /* the ACL does not exist but is used for policy. Block traffic. */
422  return 0;
423  }
424  a = am->acls + acl_index;
425  for (i = 0; i < a->count; i++)
426  {
427  r = a->rules + i;
428 #ifdef FA_NODE_VERBOSE_DEBUG
429  clib_warning("ACL_FA_NODE_DBG acl %d rule %d tag %s", acl_index, i, a->tag);
430 #endif
431  if (is_ip6 != r->is_ipv6)
432  {
433  continue;
434  }
435  if (!fa_acl_match_addr
436  (&pkt_5tuple->addr[1], &r->dst, r->dst_prefixlen, is_ip6))
437  continue;
438 
439 #ifdef FA_NODE_VERBOSE_DEBUG
441  ("ACL_FA_NODE_DBG acl %d rule %d pkt dst addr %U match rule addr %U/%d",
442  acl_index, i, format_ip46_address, &pkt_5tuple->addr[1],
444  &r->dst, r->is_ipv6 ? IP46_TYPE_IP6: IP46_TYPE_IP4,
445  r->dst_prefixlen);
446 #endif
447 
448  if (!fa_acl_match_addr
449  (&pkt_5tuple->addr[0], &r->src, r->src_prefixlen, is_ip6))
450  continue;
451 
452 #ifdef FA_NODE_VERBOSE_DEBUG
454  ("ACL_FA_NODE_DBG acl %d rule %d pkt src addr %U match rule addr %U/%d",
455  acl_index, i, format_ip46_address, &pkt_5tuple->addr[0],
456  r->is_ipv6 ? IP46_TYPE_IP6: IP46_TYPE_IP4, format_ip46_address,
457  &r->src, r->is_ipv6 ? IP46_TYPE_IP6: IP46_TYPE_IP4,
458  r->src_prefixlen);
460  ("ACL_FA_NODE_DBG acl %d rule %d trying to match pkt proto %d with rule %d",
461  acl_index, i, pkt_5tuple->l4.proto, r->proto);
462 #endif
463  if (r->proto)
464  {
465  if (pkt_5tuple->l4.proto != r->proto)
466  continue;
467 
468  if (PREDICT_FALSE (pkt_5tuple->pkt.is_nonfirst_fragment &&
470  {
471  /* non-initial fragment with frag match configured - match this rule */
472  *trace_bitmap |= 0x80000000;
473  *r_action = r->is_permit;
474  if (r_acl_match_p)
475  *r_acl_match_p = acl_index;
476  if (r_rule_match_p)
477  *r_rule_match_p = i;
478  return 1;
479  }
480 
481  /* A sanity check just to ensure we are about to match the ports extracted from the packet */
482  if (PREDICT_FALSE (!pkt_5tuple->pkt.l4_valid))
483  continue;
484 
485 #ifdef FA_NODE_VERBOSE_DEBUG
487  ("ACL_FA_NODE_DBG acl %d rule %d pkt proto %d match rule %d",
488  acl_index, i, pkt_5tuple->l4.proto, r->proto);
489 #endif
490 
491  if (!fa_acl_match_port
492  (pkt_5tuple->l4.port[0], r->src_port_or_type_first,
493  r->src_port_or_type_last, is_ip6))
494  continue;
495 
496 #ifdef FA_NODE_VERBOSE_DEBUG
498  ("ACL_FA_NODE_DBG acl %d rule %d pkt sport %d match rule [%d..%d]",
499  acl_index, i, pkt_5tuple->l4.port[0], r->src_port_or_type_first,
501 #endif
502 
503  if (!fa_acl_match_port
504  (pkt_5tuple->l4.port[1], r->dst_port_or_code_first,
505  r->dst_port_or_code_last, is_ip6))
506  continue;
507 
508 #ifdef FA_NODE_VERBOSE_DEBUG
510  ("ACL_FA_NODE_DBG acl %d rule %d pkt dport %d match rule [%d..%d]",
511  acl_index, i, pkt_5tuple->l4.port[1], r->dst_port_or_code_first,
513 #endif
514  if (pkt_5tuple->pkt.tcp_flags_valid
515  && ((pkt_5tuple->pkt.tcp_flags & r->tcp_flags_mask) !=
516  r->tcp_flags_value))
517  continue;
518  }
519  /* everything matches! */
520 #ifdef FA_NODE_VERBOSE_DEBUG
521  clib_warning ("ACL_FA_NODE_DBG acl %d rule %d FULL-MATCH, action %d",
522  acl_index, i, r->is_permit);
523 #endif
524  *r_action = r->is_permit;
525  if (r_acl_match_p)
526  *r_acl_match_p = acl_index;
527  if (r_rule_match_p)
528  *r_rule_match_p = i;
529  return 1;
530  }
531  return 0;
532 }
533 
534 always_inline int
536  int is_ip6, u8 * r_action, u32 * r_acl_match_p,
537  u32 * r_rule_match_p, u32 * trace_bitmap)
538 {
539  acl_main_t * am = p_acl_main;
540  return single_acl_match_5tuple(am, acl_index, pkt_5tuple, is_ip6, r_action,
541  r_acl_match_p, r_rule_match_p, trace_bitmap);
542 }
543 
544 always_inline int
546  int is_ip6, u8 *r_action, u32 *acl_pos_p, u32 * acl_match_p,
547  u32 * rule_match_p, u32 * trace_bitmap)
548 {
549  acl_main_t *am = p_acl_main;
550  int i;
551  u32 *acl_vector;
552  u8 action = 0;
554 
555  acl_vector = acontext->acl_indices;
556 
557  for (i = 0; i < vec_len (acl_vector); i++)
558  {
559 #ifdef FA_NODE_VERBOSE_DEBUG
560  clib_warning ("ACL_FA_NODE_DBG: Trying to match ACL: %d",
561  acl_vector[i]);
562 #endif
564  (am, acl_vector[i], pkt_5tuple, is_ip6, &action,
565  acl_match_p, rule_match_p, trace_bitmap))
566  {
567  *r_action = action;
568  *acl_pos_p = i;
569  return 1;
570  }
571  }
572  if (vec_len (acl_vector) > 0)
573  {
574  return 0;
575  }
576 #ifdef FA_NODE_VERBOSE_DEBUG
577  clib_warning ("ACL_FA_NODE_DBG: No ACL on lc_index %d", lc_index);
578 #endif
579  /* If there are no ACLs defined we should not be here. */
580  return 0;
581 }
582 
583 
584 
585 /*
586  * This returns true if there is indeed a match on the portranges.
587  * With all these levels of indirections, this is not going to be very fast,
588  * so, best use the individual ports or wildcard ports for performance.
589  */
590 always_inline int
592 {
593 
595  applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), index);
596 
597  acl_rule_t *r = &(am->acls[pae->acl_index].rules[pae->ace_index]);
598 
599 #ifdef FA_NODE_VERBOSE_DEBUG
600  clib_warning("PORTMATCH: %d <= %d <= %d && %d <= %d <= %d ?",
603 #endif
604 
605  return ( ((r->src_port_or_type_first <= match->l4.port[0]) && r->src_port_or_type_last >= match->l4.port[0]) &&
606  ((r->dst_port_or_code_first <= match->l4.port[1]) && r->dst_port_or_code_last >= match->l4.port[1]) );
607 }
608 
611 {
613  clib_bihash_kv_48_8_t result;
614  fa_5tuple_t *kv_key = (fa_5tuple_t *)kv.key;
615  hash_acl_lookup_value_t *result_val = (hash_acl_lookup_value_t *)&result.value;
616  u64 *pmatch = (u64 *)match;
617  u64 *pmask;
618  u64 *pkey;
619  int mask_type_index;
620  u32 curr_match_index = ~0;
621 
622  u32 lc_index = match->pkt.lc_index;
625 
626  DBG("TRYING TO MATCH: %016llx %016llx %016llx %016llx %016llx %016llx",
627  pmatch[0], pmatch[1], pmatch[2], pmatch[3], pmatch[4], pmatch[5]);
628 
629  for(mask_type_index=0; mask_type_index < pool_len(am->ace_mask_type_pool); mask_type_index++) {
630  if (!clib_bitmap_get(vec_elt_at_index((*applied_hash_acls), lc_index)->mask_type_index_bitmap, mask_type_index)) {
631  /* This bit is not set. Avoid trying to match */
632  continue;
633  }
634  ace_mask_type_entry_t *mte = vec_elt_at_index(am->ace_mask_type_pool, mask_type_index);
635  pmatch = (u64 *)match;
636  pmask = (u64 *)&mte->mask;
637  pkey = (u64 *)kv.key;
638  /*
639  * unrolling the below loop results in a noticeable performance increase.
640  int i;
641  for(i=0; i<6; i++) {
642  kv.key[i] = pmatch[i] & pmask[i];
643  }
644  */
645 
646  *pkey++ = *pmatch++ & *pmask++;
647  *pkey++ = *pmatch++ & *pmask++;
648  *pkey++ = *pmatch++ & *pmask++;
649  *pkey++ = *pmatch++ & *pmask++;
650  *pkey++ = *pmatch++ & *pmask++;
651  *pkey++ = *pmatch++ & *pmask++;
652 
653  kv_key->pkt.mask_type_index_lsb = mask_type_index;
654  DBG(" KEY %3d: %016llx %016llx %016llx %016llx %016llx %016llx", mask_type_index,
655  kv.key[0], kv.key[1], kv.key[2], kv.key[3], kv.key[4], kv.key[5]);
656  int res = clib_bihash_search_48_8 (&am->acl_lookup_hash, &kv, &result);
657  if (res == 0) {
658  DBG("ACL-MATCH! result_val: %016llx", result_val->as_u64);
659  if (result_val->applied_entry_index < curr_match_index) {
660  if (PREDICT_FALSE(result_val->need_portrange_check)) {
661  /*
662  * This is going to be slow, since we can have multiple superset
663  * entries for narrow-ish portranges, e.g.:
664  * 0..42 100..400, 230..60000,
665  * so we need to walk linearly and check if they match.
666  */
667 
668  u32 curr_index = result_val->applied_entry_index;
669  while ((curr_index != ~0) && !match_portranges(am, match, curr_index)) {
670  /* while no match and there are more entries, walk... */
671  applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces),curr_index);
672  DBG("entry %d did not portmatch, advancing to %d", curr_index, pae->next_applied_entry_index);
673  curr_index = pae->next_applied_entry_index;
674  }
675  if (curr_index < curr_match_index) {
676  DBG("The index %d is the new candidate in portrange matches.", curr_index);
677  curr_match_index = curr_index;
678  } else {
679  DBG("Curr portmatch index %d is too big vs. current matched one %d", curr_index, curr_match_index);
680  }
681  } else {
682  /* The usual path is here. Found an entry in front of the current candiate - so it's a new one */
683  DBG("This match is the new candidate");
684  curr_match_index = result_val->applied_entry_index;
685  if (!result_val->shadowed) {
686  /* new result is known to not be shadowed, so no point to look up further */
687  break;
688  }
689  }
690  }
691  }
692  }
693  DBG("MATCH-RESULT: %d", curr_match_index);
694  return curr_match_index;
695 }
696 
697 always_inline int
699  int is_ip6, u8 *action, u32 *acl_pos_p, u32 * acl_match_p,
700  u32 * rule_match_p, u32 * trace_bitmap)
701 {
702  acl_main_t *am = p_acl_main;
703  applied_hash_ace_entry_t **applied_hash_aces = vec_elt_at_index(am->hash_entry_vec_by_lc_index, lc_index);
704  u32 match_index = multi_acl_match_get_applied_ace_index(am, pkt_5tuple);
705  if (match_index < vec_len((*applied_hash_aces))) {
706  applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), match_index);
707  pae->hitcount++;
708  *acl_pos_p = pae->acl_position;
709  *acl_match_p = pae->acl_index;
710  *rule_match_p = pae->ace_index;
711  *action = pae->action;
712  return 1;
713  }
714  return 0;
715 }
716 
717 
718 
719 always_inline int
721  fa_5tuple_opaque_t * pkt_5tuple,
722  int is_ip6, u8 * r_action,
723  u32 * r_acl_pos_p,
724  u32 * r_acl_match_p,
725  u32 * r_rule_match_p,
726  u32 * trace_bitmap)
727 {
728  acl_main_t *am = p_acl_main;
729  fa_5tuple_t * pkt_5tuple_internal = (fa_5tuple_t *)pkt_5tuple;
730  pkt_5tuple_internal->pkt.lc_index = lc_index;
731  if (am->use_hash_acl_matching) {
732  return hash_multi_acl_match_5tuple(lc_index, pkt_5tuple_internal, is_ip6, r_action,
733  r_acl_pos_p, r_acl_match_p, r_rule_match_p, trace_bitmap);
734  } else {
735  return linear_multi_acl_match_5tuple(lc_index, pkt_5tuple_internal, is_ip6, r_action,
736  r_acl_pos_p, r_acl_match_p, r_rule_match_p, trace_bitmap);
737  }
738 }
739 
740 
741 
742 #endif
acl_rule_t * rules
Definition: acl.h:108
u8 is_ipv6
Definition: acl.h:75
fa_5tuple_t mask
Definition: acl.h:136
Definition: acl.h:132
a
Definition: bitmap.h:516
u32 acl_index
fa_session_l4_key_t l4
Definition: fa_node.h:50
fa_packet_info_t pkt
Definition: fa_node.h:52
#define PREDICT_TRUE(x)
Definition: clib.h:106
int l4_match_nonfirst_fragment
Definition: acl.h:270
u8 dst_prefixlen
Definition: acl.h:79
#define ethernet_buffer_header_size(b)
Determine the size of the Ethernet headers of the current frame in the buffer.
Definition: ethernet.h:424
u8 action
static int fa_acl_match_port(u16 port, u16 port_first, u16 port_last, int is_ip6)
u32 count
Definition: acl.h:107
static int acl_plugin_single_acl_match_5tuple(u32 acl_index, fa_5tuple_t *pkt_5tuple, int is_ip6, u8 *r_action, u32 *r_acl_match_p, u32 *r_rule_match_p, u32 *trace_bitmap)
int i
acl_main_t acl_main
Definition: jvpp_acl.h:39
format_function_t format_ip46_address
Definition: format.h:61
struct _tcp_header tcp_header_t
#define pool_len(p)
Number of elements in pool vector.
Definition: pool.h:140
static void * get_ptr_to_offset(vlib_buffer_t *b0, int offset)
u16 dst_port_or_code_last
Definition: acl.h:84
u8 src_prefixlen
Definition: acl.h:77
u32 acl_position
u32 next_applied_entry_index
ip46_address_t addr[2]
Definition: fa_node.h:49
#define always_inline
Definition: clib.h:92
ip46_address_t src
Definition: acl.h:76
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
u8 is_permit
Definition: acl.h:74
unsigned long u64
Definition: types.h:89
void acl_plugin_put_lookup_context_index(u32 lc_index)
ip46_address_t dst
Definition: acl.h:78
acl_main_t * p_acl_main
Definition: acl.c:57
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:461
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:108
u16 dst_port_or_code_first
Definition: acl.h:83
static void acl_fill_5tuple(acl_main_t *am, vlib_buffer_t *b0, int is_ip6, int is_input, int is_l2_path, fa_5tuple_t *p5tuple_pkt)
uword * fa_ipv6_known_eh_bitmap
Definition: acl.h:267
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:209
clib_bihash_kv_40_8_t kv
Definition: fa_node.h:54
u64 hitcount
static int linear_multi_acl_match_5tuple(u32 lc_index, fa_5tuple_t *pkt_5tuple, int is_ip6, u8 *r_action, u32 *acl_pos_p, u32 *acl_match_p, u32 *rule_match_p, u32 *trace_bitmap)
#define PREDICT_FALSE(x)
Definition: clib.h:105
clib_bihash_48_8_t acl_lookup_hash
Definition: acl.h:155
u8 proto
Definition: acl.h:80
u16 src_port_or_type_first
Definition: acl.h:81
applied_hash_acl_info_t * applied_hash_acl_info_by_lc_index
Definition: acl.h:170
#define clib_warning(format, args...)
Definition: error.h:59
Definition: acl.h:72
#define clib_memcpy(a, b, c)
Definition: string.h:75
#define DBG(...)
int use_hash_acl_matching
Definition: acl.h:208
#define pool_is_free_index(P, I)
Use free bitmap to query whether given index is free.
Definition: pool.h:270
applied_hash_ace_entry_t ** hash_entry_vec_by_lc_index
Definition: acl.h:169
static uword clib_bitmap_get(uword *ai, uword i)
Gets the ith bit value from a bitmap.
Definition: bitmap.h:197
static int single_acl_match_5tuple(acl_main_t *am, u32 acl_index, fa_5tuple_t *pkt_5tuple, int is_ip6, u8 *r_action, u32 *r_acl_match_p, u32 *r_rule_match_p, u32 *trace_bitmap)
static int offset_within_packet(vlib_buffer_t *b0, int offset)
static int match_portranges(acl_main_t *am, fa_5tuple_t *match, u32 index)
unsigned int u32
Definition: types.h:88
u8 tag[64]
Definition: acl.h:106
u8 tcp_flags_valid
Definition: fa_node.h:30
static int hash_multi_acl_match_5tuple(u32 lc_index, fa_5tuple_t *pkt_5tuple, int is_ip6, u8 *action, u32 *acl_pos_p, u32 *acl_match_p, u32 *rule_match_p, u32 *trace_bitmap)
u16 src_port_or_type_last
Definition: acl.h:82
static int acl_plugin_match_5tuple_inline(u32 lc_index, fa_5tuple_opaque_t *pkt_5tuple, int is_ip6, u8 *r_action, u32 *r_acl_pos_p, u32 *r_acl_match_p, u32 *r_rule_match_p, u32 *trace_bitmap)
static int fa_acl_match_addr(ip46_address_t *addr1, ip46_address_t *addr2, int prefixlen, int is_ip6)
ace_mask_type_entry_t * ace_mask_type_pool
Definition: acl.h:211
template key/value backing page structure
Definition: bihash_doc.h:44
u8 is_nonfirst_fragment
Definition: fa_node.h:32
acl_lookup_context_t * acl_lookup_contexts
Definition: acl.h:151
unsigned short u16
Definition: types.h:57
int acl_plugin_get_lookup_context_index(u32 acl_user_id, u32 val1, u32 val2)
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
unsigned char u8
Definition: types.h:56
u8 tcp_flags_mask
Definition: acl.h:86
int acl_plugin_set_acl_vec_for_context(u32 lc_index, u32 *acl_list)
struct clib_bihash_value offset
template key/value backing page structure
#define vnet_buffer(b)
Definition: buffer.h:372
u8 tcp_flags_value
Definition: acl.h:85
static u32 multi_acl_match_get_applied_ace_index(acl_main_t *am, fa_5tuple_t *match)
u16 mask_type_index_lsb
Definition: fa_node.h:28
u32 flags
Definition: vhost-user.h:77
u32 acl_plugin_register_user_module(char *caller_module_string, char *val1_label, char *val2_label)
u8 acl_plugin_acl_exists(u32 acl_index)
Definition: acl.c:110
acl_list_t * acls
Definition: acl.h:153
int acl_plugin_match_5tuple(u32 lc_index, fa_5tuple_opaque_t *pkt_5tuple, int is_ip6, u8 *r_action, u32 *r_acl_pos_p, u32 *r_acl_match_p, u32 *r_rule_match_p, u32 *trace_bitmap)
static void acl_plugin_fill_5tuple_inline(u32 lc_index, vlib_buffer_t *b0, int is_ip6, int is_input, int is_l2_path, fa_5tuple_opaque_t *p5tuple_pkt)
void acl_plugin_fill_5tuple(u32 lc_index, vlib_buffer_t *b0, int is_ip6, int is_input, int is_l2_path, fa_5tuple_opaque_t *p5tuple_pkt)
u32 ace_index