FD.io VPP  v20.09-64-g4f7b92f0a
Vector Packet Processing
public_inlines.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef included_acl_inlines_h
17 #define included_acl_inlines_h
18 
19 #include <stdint.h>
20 
21 #include <vlib/unix/plugin.h>
22 #include <plugins/acl/acl.h>
23 #include <plugins/acl/fa_node.h>
25 
27 
28 #define LOAD_SYMBOL_FROM_PLUGIN_TO(p, s, st) \
29 ({ \
30  st = vlib_get_plugin_symbol(p, #s); \
31  if (!st) \
32  return clib_error_return(0, \
33  "Plugin %s and/or symbol %s not found.", p, #s); \
34 })
35 
36 #define LOAD_SYMBOL(s) LOAD_SYMBOL_FROM_PLUGIN_TO("acl_plugin.so", s, s)
37 
38 
40 {
42 
44  return (mvi(m));
45 }
46 
47 always_inline void *
49 {
50  u8 *p = vlib_buffer_get_current (b0) + offset;
51  return p;
52 }
53 
54 always_inline int
56 {
57  /* For the purposes of this code, "within" means we have at least 8 bytes after it */
58  return (offset <= (b0->current_length - 8));
59 }
60 
61 always_inline int
63 {
64  /* For the purposes of this code, "within" means we have at least 8 bytes after it */
65  return (offset > (b0->current_length - 8));
66 }
67 
68 
69 always_inline void
71  int l3_offset, fa_5tuple_t * p5tuple_pkt)
72 {
73  if (is_ip6)
74  {
75  ip6_header_t *ip6 = vlib_buffer_get_current (b0) + l3_offset;
76  p5tuple_pkt->ip6_addr[0] = ip6->src_address;
77  p5tuple_pkt->ip6_addr[1] = ip6->dst_address;
78  }
79  else
80  {
81  int ii;
82  for(ii=0; ii<6; ii++) {
83  p5tuple_pkt->l3_zero_pad[ii] = 0;
84  }
85  ip4_header_t *ip4 = vlib_buffer_get_current (b0) + l3_offset;
86  p5tuple_pkt->ip4_addr[0] = ip4->src_address;
87  p5tuple_pkt->ip4_addr[1] = ip4->dst_address;
88  }
89 }
90 
91 always_inline void
92 acl_fill_5tuple_l4_and_pkt_data (acl_main_t * am, u32 sw_if_index0, vlib_buffer_t * b0, int is_ip6, int is_input,
93  int l3_offset, fa_session_l4_key_t *p5tuple_l4, fa_packet_info_t *p5tuple_pkt)
94 {
95  /* IP4 and IP6 protocol numbers of ICMP */
96  static u8 icmp_protos_v4v6[] = { IP_PROTOCOL_ICMP, IP_PROTOCOL_ICMP6 };
97 
98  int l4_offset;
99  u16 ports[2] = { 0 };
100  u8 proto;
101 
102  u8 tmp_l4_flags = 0;
103  fa_packet_info_t tmp_pkt = { .is_ip6 = is_ip6, .mask_type_index_lsb = ~0 };
104 
105  if (is_ip6)
106  {
107  ip6_header_t *ip6 = vlib_buffer_get_current (b0) + l3_offset;
108  proto = ip6->protocol;
109 
110  l4_offset = l3_offset + sizeof (ip6_header_t);
111 
112  /* IP6 EH handling is here, increment l4_offset if needs to, update the proto */
113  int need_skip_eh = clib_bitmap_get (am->fa_ipv6_known_eh_bitmap, proto);
114  if (PREDICT_FALSE (need_skip_eh))
115  {
116  while (need_skip_eh && offset_within_packet (b0, l4_offset))
117  {
118  /* Fragment header needs special handling */
119  if (PREDICT_FALSE(ACL_EH_FRAGMENT == proto))
120  {
121  proto = *(u8 *) get_ptr_to_offset (b0, l4_offset);
122  u16 frag_offset = *(u16 *) get_ptr_to_offset (b0, 2 + l4_offset);
123  frag_offset = clib_net_to_host_u16(frag_offset) >> 3;
124  if (frag_offset)
125  {
126  tmp_pkt.is_nonfirst_fragment = 1;
127  /* invalidate L4 offset so we don't try to find L4 info */
128  l4_offset += b0->current_length;
129  }
130  else
131  {
132  /* First fragment: skip the frag header and move on. */
133  l4_offset += 8;
134  }
135  }
136  else
137  {
138  u8 nwords = *(u8 *) get_ptr_to_offset (b0, 1 + l4_offset);
139  proto = *(u8 *) get_ptr_to_offset (b0, l4_offset);
140  l4_offset += 8 * (1 + (u16) nwords);
141  }
142  need_skip_eh =
144  }
145  }
146  }
147  else
148  {
149  ip4_header_t *ip4 = vlib_buffer_get_current (b0) + l3_offset;
150  proto = ip4->protocol;
151  l4_offset = l3_offset + ip4_header_bytes(ip4);
152 
153  /* non-initial fragments have non-zero offset */
155  {
156  tmp_pkt.is_nonfirst_fragment = 1;
157  /* invalidate L4 offset so we don't try to find L4 info */
158  l4_offset += b0->current_length;
159  }
160 
161  }
162  tmp_l4_flags |= is_input ? FA_SK_L4_FLAG_IS_INPUT : 0;
163 
164  if (PREDICT_TRUE (offset_within_packet (b0, l4_offset)))
165  {
166  tcp_header_t *tcph = vlib_buffer_get_current (b0) + l4_offset;
167  udp_header_t *udph = vlib_buffer_get_current (b0) + l4_offset;
168  tmp_pkt.l4_valid = 1;
169 
170  if (PREDICT_FALSE(icmp_protos_v4v6[is_ip6] == proto))
171  {
172  icmp46_header_t *icmph = vlib_buffer_get_current (b0) + l4_offset;
173  ports[0] = icmph->type;
174  ports[1] = icmph->code;
175  /* ICMP needs special handling */
176  tmp_l4_flags |= FA_SK_L4_FLAG_IS_SLOWPATH;
177  }
178  else if (IP_PROTOCOL_TCP == proto)
179  {
180  ports[0] = clib_net_to_host_u16(tcph->src_port);
181  ports[1] = clib_net_to_host_u16(tcph->dst_port);
182  tmp_pkt.tcp_flags = tcph->flags;
183  tmp_pkt.tcp_flags_valid = 1;
184  }
185  else if (IP_PROTOCOL_UDP == proto)
186  {
187  ports[0] = clib_net_to_host_u16(udph->src_port);
188  ports[1] = clib_net_to_host_u16(udph->dst_port);
189  }
190  else
191  {
192  tmp_l4_flags |= FA_SK_L4_FLAG_IS_SLOWPATH;
193  }
194  }
195 
196  p5tuple_pkt->as_u64 = tmp_pkt.as_u64;
197 
198  fa_session_l4_key_t tmp_l4 = { .port = { ports[0], ports[1] },
199  .proto = proto,
200  .l4_flags = tmp_l4_flags,
201  .lsb_of_sw_if_index = sw_if_index0 & 0xffff };
202 
203  p5tuple_l4->as_u64 = tmp_l4.as_u64;
204 }
205 
206 always_inline void
207 acl_fill_5tuple (acl_main_t * am, u32 sw_if_index0, vlib_buffer_t * b0, int is_ip6,
208  int is_input, int is_l2_path, fa_5tuple_t * p5tuple_pkt)
209 {
210  int l3_offset;
211 
212  if (is_l2_path)
213  {
214  l3_offset = ethernet_buffer_header_size(b0);
215  }
216  else
217  {
218  if (is_input)
219  l3_offset = 0;
220  else
221  l3_offset = vnet_buffer(b0)->ip.save_rewrite_length;
222  }
223 
224  /* key[0..3] contains src/dst address and is cleared/set below */
225  /* Remainder of the key and per-packet non-key data */
226  acl_fill_5tuple_l3_data(am, b0, is_ip6, l3_offset, p5tuple_pkt);
227  acl_fill_5tuple_l4_and_pkt_data(am, sw_if_index0, b0, is_ip6, is_input, l3_offset, &p5tuple_pkt->l4, &p5tuple_pkt->pkt);
228 }
229 
230 always_inline void
231 acl_plugin_fill_5tuple_inline (void *p_acl_main, u32 lc_index, vlib_buffer_t * b0, int is_ip6,
232  int is_input, int is_l2_path, fa_5tuple_opaque_t * p5tuple_pkt)
233 {
234  acl_main_t *am = p_acl_main;
235  acl_fill_5tuple(am, 0, b0, is_ip6, is_input, is_l2_path, (fa_5tuple_t *)p5tuple_pkt);
236 }
237 
238 
239 
240 always_inline int
242  int prefixlen)
243 {
244  if (prefixlen == 0)
245  {
246  /* match any always succeeds */
247  return 1;
248  }
249  uint32_t a1 = clib_net_to_host_u32 (addr1->as_u32);
250  uint32_t a2 = clib_net_to_host_u32 (addr2->as_u32);
251  uint32_t mask0 = 0xffffffff - ((1 << (32 - prefixlen)) - 1);
252  return (a1 & mask0) == a2;
253 }
254 
255 always_inline int
256 fa_acl_match_ip6_addr (ip6_address_t * addr1, ip6_address_t * addr2,
257  int prefixlen)
258 {
259  if (prefixlen == 0)
260  {
261  /* match any always succeeds */
262  return 1;
263  }
264  if (memcmp (addr1, addr2, prefixlen / 8))
265  {
266  /* If the starting full bytes do not match, no point in bittwidling the thumbs further */
267  return 0;
268  }
269  if (prefixlen % 8)
270  {
271  u8 b1 = *((u8 *) addr1 + 1 + prefixlen / 8);
272  u8 b2 = *((u8 *) addr2 + 1 + prefixlen / 8);
273  u8 mask0 = (0xff - ((1 << (8 - (prefixlen % 8))) - 1));
274  return (b1 & mask0) == b2;
275  }
276  else
277  {
278  /* The prefix fits into integer number of bytes, so nothing left to do */
279  return 1;
280  }
281 }
282 
283 always_inline int
284 fa_acl_match_port (u16 port, u16 port_first, u16 port_last, int is_ip6)
285 {
286  return ((port >= port_first) && (port <= port_last));
287 }
288 
289 always_inline int
291  int is_ip6, u8 * r_action, u32 * r_acl_match_p,
292  u32 * r_rule_match_p, u32 * trace_bitmap)
293 {
294  int i;
295  acl_rule_t *r;
296  acl_rule_t *acl_rules;
297 
298  if (pool_is_free_index (am->acls, acl_index))
299  {
300  if (r_acl_match_p)
301  *r_acl_match_p = acl_index;
302  if (r_rule_match_p)
303  *r_rule_match_p = -1;
304  /* the ACL does not exist but is used for policy. Block traffic. */
305  return 0;
306  }
307  acl_rules = am->acls[acl_index].rules;
308  for (i = 0; i < vec_len(acl_rules); i++)
309  {
310  r = &acl_rules[i];
311  if (is_ip6 != r->is_ipv6)
312  {
313  continue;
314  }
315  if (is_ip6) {
317  (&pkt_5tuple->ip6_addr[1], &r->dst.ip6, r->dst_prefixlen))
318  continue;
320  (&pkt_5tuple->ip6_addr[0], &r->src.ip6, r->src_prefixlen))
321  continue;
322  } else {
324  (&pkt_5tuple->ip4_addr[1], &r->dst.ip4, r->dst_prefixlen))
325  continue;
327  (&pkt_5tuple->ip4_addr[0], &r->src.ip4, r->src_prefixlen))
328  continue;
329  }
330 
331  if (r->proto)
332  {
333  if (pkt_5tuple->l4.proto != r->proto)
334  continue;
335 
336  if (PREDICT_FALSE (pkt_5tuple->pkt.is_nonfirst_fragment &&
338  {
339  /* non-initial fragment with frag match configured - match this rule */
340  *trace_bitmap |= 0x80000000;
341  *r_action = r->is_permit;
342  if (r_acl_match_p)
343  *r_acl_match_p = acl_index;
344  if (r_rule_match_p)
345  *r_rule_match_p = i;
346  return 1;
347  }
348 
349  /* A sanity check just to ensure we are about to match the ports extracted from the packet */
350  if (PREDICT_FALSE (!pkt_5tuple->pkt.l4_valid))
351  continue;
352 
353 #ifdef FA_NODE_VERBOSE_DEBUG
355  ("ACL_FA_NODE_DBG acl %d rule %d pkt proto %d match rule %d",
356  acl_index, i, pkt_5tuple->l4.proto, r->proto);
357 #endif
358 
359  if (!fa_acl_match_port
360  (pkt_5tuple->l4.port[0], r->src_port_or_type_first,
361  r->src_port_or_type_last, is_ip6))
362  continue;
363 
364 #ifdef FA_NODE_VERBOSE_DEBUG
366  ("ACL_FA_NODE_DBG acl %d rule %d pkt sport %d match rule [%d..%d]",
367  acl_index, i, pkt_5tuple->l4.port[0], r->src_port_or_type_first,
369 #endif
370 
371  if (!fa_acl_match_port
372  (pkt_5tuple->l4.port[1], r->dst_port_or_code_first,
373  r->dst_port_or_code_last, is_ip6))
374  continue;
375 
376 #ifdef FA_NODE_VERBOSE_DEBUG
378  ("ACL_FA_NODE_DBG acl %d rule %d pkt dport %d match rule [%d..%d]",
379  acl_index, i, pkt_5tuple->l4.port[1], r->dst_port_or_code_first,
381 #endif
382  if (pkt_5tuple->pkt.tcp_flags_valid
383  && ((pkt_5tuple->pkt.tcp_flags & r->tcp_flags_mask) !=
384  r->tcp_flags_value))
385  continue;
386  }
387  /* everything matches! */
388 #ifdef FA_NODE_VERBOSE_DEBUG
389  clib_warning ("ACL_FA_NODE_DBG acl %d rule %d FULL-MATCH, action %d",
390  acl_index, i, r->is_permit);
391 #endif
392  *r_action = r->is_permit;
393  if (r_acl_match_p)
394  *r_acl_match_p = acl_index;
395  if (r_rule_match_p)
396  *r_rule_match_p = i;
397  return 1;
398  }
399  return 0;
400 }
401 
402 always_inline int
404  int is_ip6, u8 * r_action, u32 * r_acl_match_p,
405  u32 * r_rule_match_p, u32 * trace_bitmap)
406 {
407  acl_main_t * am = p_acl_main;
408  return single_acl_match_5tuple(am, acl_index, pkt_5tuple, is_ip6, r_action,
409  r_acl_match_p, r_rule_match_p, trace_bitmap);
410 }
411 
412 always_inline int
413 linear_multi_acl_match_5tuple (void *p_acl_main, u32 lc_index, fa_5tuple_t * pkt_5tuple,
414  int is_ip6, u8 *r_action, u32 *acl_pos_p, u32 * acl_match_p,
415  u32 * rule_match_p, u32 * trace_bitmap)
416 {
417  acl_main_t *am = p_acl_main;
418  int i;
419  u32 *acl_vector;
420  u8 action = 0;
422 
423  acl_vector = acontext->acl_indices;
424 
425  for (i = 0; i < vec_len (acl_vector); i++)
426  {
427 #ifdef FA_NODE_VERBOSE_DEBUG
428  clib_warning ("ACL_FA_NODE_DBG: Trying to match ACL: %d",
429  acl_vector[i]);
430 #endif
432  (am, acl_vector[i], pkt_5tuple, is_ip6, &action,
433  acl_match_p, rule_match_p, trace_bitmap))
434  {
435  *r_action = action;
436  *acl_pos_p = i;
437  return 1;
438  }
439  }
440  if (vec_len (acl_vector) > 0)
441  {
442  return 0;
443  }
444 #ifdef FA_NODE_VERBOSE_DEBUG
445  clib_warning ("ACL_FA_NODE_DBG: No ACL on lc_index %d", lc_index);
446 #endif
447  /* If there are no ACLs defined we should not be here. */
448  return 0;
449 }
450 
451 
452 
453 /*
454  * This returns true if there is indeed a match on the portranges.
455  * With all these levels of indirections, this is not going to be very fast,
456  * so, best use the individual ports or wildcard ports for performance.
457  */
458 always_inline int
460 {
461 
463  applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), index);
464 
465  acl_rule_t *r = &(am->acls[pae->acl_index].rules[pae->ace_index]);
466 
467 #ifdef FA_NODE_VERBOSE_DEBUG
468  clib_warning("PORTMATCH: %d <= %d <= %d && %d <= %d <= %d ?",
471 #endif
472 
473  return ( ((r->src_port_or_type_first <= match->l4.port[0]) && r->src_port_or_type_last >= match->l4.port[0]) &&
474  ((r->dst_port_or_code_first <= match->l4.port[1]) && r->dst_port_or_code_last >= match->l4.port[1]) );
475 }
476 
477 always_inline int
479 {
480  if (is_ip6 != r->is_ipv6)
481  {
482  return 0;
483  }
484 
485  if (is_ip6)
486  {
488  (&pkt_5tuple->ip6_addr[1], &r->dst.ip6, r->dst_prefixlen))
489  return 0;
491  (&pkt_5tuple->ip6_addr[0], &r->src.ip6, r->src_prefixlen))
492  return 0;
493  }
494  else
495  {
497  (&pkt_5tuple->ip4_addr[1], &r->dst.ip4, r->dst_prefixlen))
498  return 0;
500  (&pkt_5tuple->ip4_addr[0], &r->src.ip4, r->src_prefixlen))
501  return 0;
502  }
503 
504  if (r->proto)
505  {
506  if (pkt_5tuple->l4.proto != r->proto)
507  return 0;
508 
509  /* A sanity check just to ensure we are about to match the ports extracted from the packet */
510  if (PREDICT_FALSE (!pkt_5tuple->pkt.l4_valid))
511  return 0;
512 
513 
514  if (!fa_acl_match_port
515  (pkt_5tuple->l4.port[0], r->src_port_or_type_first,
516  r->src_port_or_type_last, pkt_5tuple->pkt.is_ip6))
517  return 0;
518 
519 
520  if (!fa_acl_match_port
521  (pkt_5tuple->l4.port[1], r->dst_port_or_code_first,
522  r->dst_port_or_code_last, pkt_5tuple->pkt.is_ip6))
523  return 0;
524 
525  if (pkt_5tuple->pkt.tcp_flags_valid
526  && ((pkt_5tuple->pkt.tcp_flags & r->tcp_flags_mask) !=
527  r->tcp_flags_value))
528  return 0;
529  }
530  /* everything matches! */
531  return 1;
532 }
533 
536 {
538  clib_bihash_kv_48_8_t result;
539  fa_5tuple_t *kv_key = (fa_5tuple_t *) kv.key;
540  hash_acl_lookup_value_t *result_val =
541  (hash_acl_lookup_value_t *) & result.value;
542  u64 *pmatch = (u64 *) match;
543  u64 *pmask;
544  u64 *pkey;
545  int mask_type_index, order_index;
546  u32 curr_match_index = (~0 - 1);
547 
548 
549 
550  u32 lc_index = match->pkt.lc_index;
551  applied_hash_ace_entry_t **applied_hash_aces =
553 
554  hash_applied_mask_info_t **hash_applied_mask_info_vec =
556 
558 
559  DBG ("TRYING TO MATCH: %016llx %016llx %016llx %016llx %016llx %016llx",
560  pmatch[0], pmatch[1], pmatch[2], pmatch[3], pmatch[4], pmatch[5]);
561 
562  for (order_index = 0; order_index < vec_len ((*hash_applied_mask_info_vec));
563  order_index++)
564  {
565  minfo = vec_elt_at_index ((*hash_applied_mask_info_vec), order_index);
566  if (minfo->first_rule_index > curr_match_index)
567  {
568  /* Index in this and following (by construction) partitions are greater than our candidate, Avoid trying to match! */
569  break;
570  }
571 
572  mask_type_index = minfo->mask_type_index;
573  ace_mask_type_entry_t *mte =
574  vec_elt_at_index (am->ace_mask_type_pool, mask_type_index);
575  pmatch = (u64 *) match;
576  pmask = (u64 *) & mte->mask;
577  pkey = (u64 *) kv.key;
578  /*
579  * unrolling the below loop results in a noticeable performance increase.
580  int i;
581  for(i=0; i<6; i++) {
582  kv.key[i] = pmatch[i] & pmask[i];
583  }
584  */
585 
586  *pkey++ = *pmatch++ & *pmask++;
587  *pkey++ = *pmatch++ & *pmask++;
588  *pkey++ = *pmatch++ & *pmask++;
589  *pkey++ = *pmatch++ & *pmask++;
590  *pkey++ = *pmatch++ & *pmask++;
591  *pkey++ = *pmatch++ & *pmask++;
592 
593  /*
594  * The use of temporary variable convinces the compiler
595  * to make a u64 write, avoiding the stall on crc32 operation
596  * just a bit later.
597  */
598  fa_packet_info_t tmp_pkt = kv_key->pkt;
599  tmp_pkt.mask_type_index_lsb = mask_type_index;
600  kv_key->pkt.as_u64 = tmp_pkt.as_u64;
601 
602  int res =
603  clib_bihash_search_inline_2_48_8 (&am->acl_lookup_hash, &kv, &result);
604 
605  if (res == 0)
606  {
607  /* There is a hit in the hash, so check the collision vector */
608  u32 curr_index = result_val->applied_entry_index;
610  vec_elt_at_index ((*applied_hash_aces), curr_index);
612  int i;
613  for (i = 0; i < vec_len (crs); i++)
614  {
615  if (crs[i].applied_entry_index >= curr_match_index)
616  {
617  continue;
618  }
619  if (single_rule_match_5tuple (&crs[i].rule, is_ip6, match))
620  {
621  curr_match_index = crs[i].applied_entry_index;
622  }
623  }
624  }
625  }
626  DBG ("MATCH-RESULT: %d", curr_match_index);
627  return curr_match_index;
628 }
629 
630 always_inline int
631 hash_multi_acl_match_5tuple (void *p_acl_main, u32 lc_index, fa_5tuple_t * pkt_5tuple,
632  int is_ip6, u8 *action, u32 *acl_pos_p, u32 * acl_match_p,
633  u32 * rule_match_p, u32 * trace_bitmap)
634 {
635  acl_main_t *am = p_acl_main;
636  applied_hash_ace_entry_t **applied_hash_aces = vec_elt_at_index(am->hash_entry_vec_by_lc_index, lc_index);
637  u32 match_index = multi_acl_match_get_applied_ace_index(am, is_ip6, pkt_5tuple);
638  if (match_index < vec_len((*applied_hash_aces))) {
639  applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), match_index);
640  pae->hitcount++;
641  *acl_pos_p = pae->acl_position;
642  *acl_match_p = pae->acl_index;
643  *rule_match_p = pae->ace_index;
644  *action = pae->action;
645  return 1;
646  }
647  return 0;
648 }
649 
650 
651 
652 always_inline int
653 acl_plugin_match_5tuple_inline (void *p_acl_main, u32 lc_index,
654  fa_5tuple_opaque_t * pkt_5tuple,
655  int is_ip6, u8 * r_action,
656  u32 * r_acl_pos_p,
657  u32 * r_acl_match_p,
658  u32 * r_rule_match_p,
659  u32 * trace_bitmap)
660 {
661  acl_main_t *am = p_acl_main;
662  fa_5tuple_t * pkt_5tuple_internal = (fa_5tuple_t *)pkt_5tuple;
663  pkt_5tuple_internal->pkt.lc_index = lc_index;
665  if (PREDICT_FALSE(pkt_5tuple_internal->pkt.is_nonfirst_fragment)) {
666  /*
667  * tuplemerge does not take fragments into account,
668  * and in general making fragments first class citizens has
669  * proved more overhead than it's worth - so just fall back to linear
670  * matching in that case.
671  */
672  return linear_multi_acl_match_5tuple(p_acl_main, lc_index, pkt_5tuple_internal, is_ip6, r_action,
673  r_acl_pos_p, r_acl_match_p, r_rule_match_p, trace_bitmap);
674  } else {
675  return hash_multi_acl_match_5tuple(p_acl_main, lc_index, pkt_5tuple_internal, is_ip6, r_action,
676  r_acl_pos_p, r_acl_match_p, r_rule_match_p, trace_bitmap);
677  }
678  } else {
679  return linear_multi_acl_match_5tuple(p_acl_main, lc_index, pkt_5tuple_internal, is_ip6, r_action,
680  r_acl_pos_p, r_acl_match_p, r_rule_match_p, trace_bitmap);
681  }
682 }
683 
684 
685 always_inline int
686 acl_plugin_match_5tuple_inline_and_count (void *p_acl_main, u32 lc_index,
687  fa_5tuple_opaque_t * pkt_5tuple,
688  int is_ip6, u8 * r_action,
689  u32 * r_acl_pos_p,
690  u32 * r_acl_match_p,
691  u32 * r_rule_match_p,
692  u32 * trace_bitmap,
693  u32 packet_size)
694 {
695  acl_main_t *am = p_acl_main;
696  int ret = 0;
697  fa_5tuple_t * pkt_5tuple_internal = (fa_5tuple_t *)pkt_5tuple;
698  pkt_5tuple_internal->pkt.lc_index = lc_index;
700  if (PREDICT_FALSE(pkt_5tuple_internal->pkt.is_nonfirst_fragment)) {
701  /*
702  * tuplemerge does not take fragments into account,
703  * and in general making fragments first class citizens has
704  * proved more overhead than it's worth - so just fall back to linear
705  * matching in that case.
706  */
707  ret = linear_multi_acl_match_5tuple(p_acl_main, lc_index, pkt_5tuple_internal, is_ip6, r_action,
708  r_acl_pos_p, r_acl_match_p, r_rule_match_p, trace_bitmap);
709  } else {
710  ret = hash_multi_acl_match_5tuple(p_acl_main, lc_index, pkt_5tuple_internal, is_ip6, r_action,
711  r_acl_pos_p, r_acl_match_p, r_rule_match_p, trace_bitmap);
712  }
713  } else {
714  ret = linear_multi_acl_match_5tuple(p_acl_main, lc_index, pkt_5tuple_internal, is_ip6, r_action,
715  r_acl_pos_p, r_acl_match_p, r_rule_match_p, trace_bitmap);
716  }
717  if (PREDICT_TRUE(ret)) {
718  u16 thread_index = os_get_thread_index ();
719  vlib_increment_combined_counter(am->combined_acl_counters + *r_acl_match_p, thread_index, *r_rule_match_p, 1, packet_size);
720  }
721  return ret;
722 }
723 
724 
725 
726 
727 #endif
acl_rule_t * rules
Definition: acl.h:84
static void acl_fill_5tuple(acl_main_t *am, u32 sw_if_index0, vlib_buffer_t *b0, int is_ip6, int is_input, int is_l2_path, fa_5tuple_t *p5tuple_pkt)
u32 acl_index
Definition: abf.api:60
u8 is_ipv6
Definition: types.h:24
fa_5tuple_t mask
Definition: acl.h:112
vlib_combined_counter_main_t * combined_acl_counters
Definition: acl.h:309
Definition: acl.h:108
u32 acl_index
static int acl_plugin_single_acl_match_5tuple(void *p_acl_main, u32 acl_index, fa_5tuple_t *pkt_5tuple, int is_ip6, u8 *r_action, u32 *r_acl_match_p, u32 *r_rule_match_p, u32 *trace_bitmap)
fa_session_l4_key_t l4
Definition: fa_node.h:81
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:220
fa_packet_info_t pkt
Definition: fa_node.h:83
ip4_address_t src_address
Definition: ip4_packet.h:125
#define PREDICT_TRUE(x)
Definition: clib.h:121
unsigned long u64
Definition: types.h:89
int l4_match_nonfirst_fragment
Definition: acl.h:247
static int fa_acl_match_ip4_addr(ip4_address_t *addr1, ip4_address_t *addr2, int prefixlen)
clib_error_t * acl_plugin_methods_vtable_init(acl_plugin_methods_t *m)
static void acl_fill_5tuple_l3_data(acl_main_t *am, vlib_buffer_t *b0, int is_ip6, int l3_offset, fa_5tuple_t *p5tuple_pkt)
u8 dst_prefixlen
Definition: types.h:28
#define ethernet_buffer_header_size(b)
Determine the size of the Ethernet headers of the current frame in the buffer.
Definition: ethernet.h:452
u8 action
static int fa_acl_match_port(u16 port, u16 port_first, u16 port_last, int is_ip6)
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
static int fa_acl_match_ip6_addr(ip6_address_t *addr1, ip6_address_t *addr2, int prefixlen)
struct _tcp_header tcp_header_t
ip6_address_t src_address
Definition: ip6_packet.h:310
unsigned char u8
Definition: types.h:56
static void * get_ptr_to_offset(vlib_buffer_t *b0, int offset)
u16 dst_port_or_code_last
Definition: types.h:33
u8 src_prefixlen
Definition: types.h:26
u32 acl_position
vl_api_ip6_address_t ip6
Definition: one.api:424
ip4_address_t dst_address
Definition: ip4_packet.h:125
ip46_address_t src
Definition: types.h:25
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
static void acl_fill_5tuple_l4_and_pkt_data(acl_main_t *am, u32 sw_if_index0, vlib_buffer_t *b0, int is_ip6, int is_input, int l3_offset, fa_session_l4_key_t *p5tuple_l4, fa_packet_info_t *p5tuple_pkt)
u8 is_permit
Definition: types.h:23
static int hash_multi_acl_match_5tuple(void *p_acl_main, u32 lc_index, fa_5tuple_t *pkt_5tuple, int is_ip6, u8 *action, u32 *acl_pos_p, u32 *acl_match_p, u32 *rule_match_p, u32 *trace_bitmap)
unsigned int u32
Definition: types.h:88
static u32 multi_acl_match_get_applied_ace_index(acl_main_t *am, int is_ip6, fa_5tuple_t *match)
static void acl_plugin_fill_5tuple_inline(void *p_acl_main, u32 lc_index, vlib_buffer_t *b0, int is_ip6, int is_input, int is_l2_path, fa_5tuple_opaque_t *p5tuple_pkt)
bool is_ip6
Definition: ip.api:43
ip46_address_t dst
Definition: types.h:27
collision_match_rule_t * colliding_rules
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:534
u32 l3_zero_pad[6]
Definition: fa_node.h:76
u16 dst_port_or_code_first
Definition: types.h:32
hash_applied_mask_info_t ** hash_applied_mask_info_vec_by_lc_index
Definition: acl.h:198
vl_api_ip_proto_t proto
Definition: acl_types.api:50
unsigned short u16
Definition: types.h:57
uword * fa_ipv6_known_eh_bitmap
Definition: acl.h:244
static int acl_plugin_match_5tuple_inline_and_count(void *p_acl_main, u32 lc_index, fa_5tuple_opaque_t *pkt_5tuple, int is_ip6, u8 *r_action, u32 *r_acl_pos_p, u32 *r_acl_match_p, u32 *r_rule_match_p, u32 *trace_bitmap, u32 packet_size)
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
u64 hitcount
#define PREDICT_FALSE(x)
Definition: clib.h:120
#define always_inline
Definition: ipsec.h:28
vl_api_ip4_address_t ip4
Definition: one.api:376
clib_bihash_48_8_t acl_lookup_hash
Definition: acl.h:132
u8 proto
Definition: types.h:29
u16 src_port_or_type_first
Definition: types.h:30
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
static u16 ip4_get_fragment_offset(const ip4_header_t *i)
Definition: ip4_packet.h:155
#define clib_warning(format, args...)
Definition: error.h:59
#define DBG(...)
int use_hash_acl_matching
Definition: acl.h:185
#define pool_is_free_index(P, I)
Use free bitmap to query whether given index is free.
Definition: pool.h:299
static clib_error_t * acl_plugin_exports_init(acl_plugin_methods_t *m)
applied_hash_ace_entry_t ** hash_entry_vec_by_lc_index
Definition: acl.h:146
static uword clib_bitmap_get(uword *ai, uword i)
Gets the ith bit value from a bitmap.
Definition: bitmap.h:197
static int single_acl_match_5tuple(acl_main_t *am, u32 acl_index, fa_5tuple_t *pkt_5tuple, int is_ip6, u8 *r_action, u32 *r_acl_match_p, u32 *r_rule_match_p, u32 *trace_bitmap)
static int offset_within_packet(vlib_buffer_t *b0, int offset)
static int match_portranges(acl_main_t *am, fa_5tuple_t *match, u32 index)
u8 tcp_flags_valid
Definition: fa_node.h:33
static int acl_plugin_match_5tuple_inline(void *p_acl_main, u32 lc_index, fa_5tuple_opaque_t *pkt_5tuple, int is_ip6, u8 *r_action, u32 *r_acl_pos_p, u32 *r_acl_match_p, u32 *r_rule_match_p, u32 *trace_bitmap)
u16 src_port_or_type_last
Definition: types.h:31
static int offset_beyond_packet(vlib_buffer_t *b0, int offset)
ace_mask_type_entry_t * ace_mask_type_pool
Definition: acl.h:195
static int single_rule_match_5tuple(acl_rule_t *r, int is_ip6, fa_5tuple_t *pkt_5tuple)
template key/value backing page structure
Definition: bihash_doc.h:44
u8 is_nonfirst_fragment
Definition: fa_node.h:35
acl_lookup_context_t * acl_lookup_contexts
Definition: acl.h:128
vl_api_mac_event_action_t action
Definition: l2.api:181
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u8 tcp_flags_mask
Definition: types.h:35
VLIB buffer representation.
Definition: buffer.h:102
u32 index
Definition: flow_types.api:221
static_always_inline uword os_get_thread_index(void)
Definition: os.h:63
clib_error_t *(* acl_plugin_methods_vtable_init_fn_t)(acl_plugin_methods_t *m)
u16 port
Definition: lb_types.api:72
struct clib_bihash_value offset
template key/value backing page structure
#define vnet_buffer(b)
Definition: buffer.h:417
u8 tcp_flags_value
Definition: types.h:34
#define LOAD_SYMBOL_FROM_PLUGIN_TO(p, s, st)
ip4_address_t ip4_addr[2]
Definition: fa_node.h:77
u16 mask_type_index_lsb
Definition: fa_node.h:31
static int ip4_header_bytes(const ip4_header_t *i)
Definition: ip4_packet.h:190
acl_list_t * acls
Definition: acl.h:130
u32 ace_index
static int linear_multi_acl_match_5tuple(void *p_acl_main, u32 lc_index, fa_5tuple_t *pkt_5tuple, int is_ip6, u8 *r_action, u32 *acl_pos_p, u32 *acl_match_p, u32 *rule_match_p, u32 *trace_bitmap)
ip6_address_t dst_address
Definition: ip6_packet.h:310
ip6_address_t ip6_addr[2]
Definition: fa_node.h:79