FD.io VPP  v21.06-1-gbb7418cf9
Vector Packet Processing
decap.c
Go to the documentation of this file.
1 /*
2  * decap.c: vxlan tunnel decap packet processing
3  *
4  * Copyright (c) 2013 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vnet/vxlan/vxlan.h>
20 #include <vnet/udp/udp_local.h>
21 
22 #ifndef CLIB_MARCH_VARIANT
25 #endif
26 
27 typedef struct
28 {
34 
35 static u8 *
36 format_vxlan_rx_trace (u8 * s, va_list * args)
37 {
38  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
39  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
40  vxlan_rx_trace_t *t = va_arg (*args, vxlan_rx_trace_t *);
41 
42  if (t->tunnel_index == ~0)
43  return format (s, "VXLAN decap error - tunnel for vni %d does not exist",
44  t->vni);
45  return format (s, "VXLAN decap from vxlan_tunnel%d vni %d next %d error %d",
46  t->tunnel_index, t->vni, t->next_index, t->error);
47 }
48 
50 
51 static const vxlan_decap_info_t decap_not_found = {
52  .sw_if_index = ~0,
53  .next_index = VXLAN_INPUT_NEXT_DROP,
54  .error = VXLAN_ERROR_NO_SUCH_TUNNEL
55 };
56 
57 static const vxlan_decap_info_t decap_bad_flags = {
58  .sw_if_index = ~0,
59  .next_index = VXLAN_INPUT_NEXT_DROP,
60  .error = VXLAN_ERROR_BAD_FLAGS
61 };
62 
65  u32 fib_index, ip4_header_t * ip4_0,
66  vxlan_header_t * vxlan0, u32 * stats_sw_if_index)
67 {
68  if (PREDICT_FALSE (vxlan0->flags != VXLAN_FLAGS_I))
69  return decap_bad_flags;
70 
71  /* Make sure VXLAN tunnel exist according to packet S/D IP, UDP port, VRF,
72  * and VNI */
73  u32 dst = ip4_0->dst_address.as_u32;
74  u32 src = ip4_0->src_address.as_u32;
75  udp_header_t *udp = ip4_next_header (ip4_0);
76  vxlan4_tunnel_key_t key4 = {
77  .key[0] = ((u64) dst << 32) | src,
78  .key[1] = ((u64) udp->dst_port << 48) | ((u64) fib_index << 32) |
79  vxlan0->vni_reserved,
80  };
81 
82  if (PREDICT_TRUE
83  (key4.key[0] == cache->key[0] && key4.key[1] == cache->key[1]))
84  {
85  /* cache hit */
86  vxlan_decap_info_t di = {.as_u64 = cache->value };
87  *stats_sw_if_index = di.sw_if_index;
88  return di;
89  }
90 
91  int rv = clib_bihash_search_inline_16_8 (&vxm->vxlan4_tunnel_by_key, &key4);
92  if (PREDICT_TRUE (rv == 0))
93  {
94  *cache = key4;
95  vxlan_decap_info_t di = {.as_u64 = key4.value };
96  *stats_sw_if_index = di.sw_if_index;
97  return di;
98  }
99 
100  /* try multicast */
102  return decap_not_found;
103 
104  /* search for mcast decap info by mcast address */
105  key4.key[0] = dst;
106  rv = clib_bihash_search_inline_16_8 (&vxm->vxlan4_tunnel_by_key, &key4);
107  if (rv != 0)
108  return decap_not_found;
109 
110  /* search for unicast tunnel using the mcast tunnel local(src) ip */
111  vxlan_decap_info_t mdi = {.as_u64 = key4.value };
112  key4.key[0] = ((u64) mdi.local_ip.as_u32 << 32) | src;
113  rv = clib_bihash_search_inline_16_8 (&vxm->vxlan4_tunnel_by_key, &key4);
114  if (PREDICT_FALSE (rv != 0))
115  return decap_not_found;
116 
117  /* mcast traffic does not update the cache */
118  *stats_sw_if_index = mdi.sw_if_index;
119  vxlan_decap_info_t di = {.as_u64 = key4.value };
120  return di;
121 }
122 
124 
127  u32 fib_index, ip6_header_t * ip6_0,
128  vxlan_header_t * vxlan0, u32 * stats_sw_if_index)
129 {
130  if (PREDICT_FALSE (vxlan0->flags != VXLAN_FLAGS_I))
131  return decap_bad_flags;
132 
133  /* Make sure VXLAN tunnel exist according to packet SIP, UDP port, VRF, and
134  * VNI */
135  udp_header_t *udp = ip6_next_header (ip6_0);
136  vxlan6_tunnel_key_t key6 = {
137  .key[0] = ip6_0->src_address.as_u64[0],
138  .key[1] = ip6_0->src_address.as_u64[1],
139  .key[2] = ((u64) udp->dst_port << 48) | ((u64) fib_index << 32) |
140  vxlan0->vni_reserved,
141  };
142 
143  if (PREDICT_FALSE
144  (clib_bihash_key_compare_24_8 (key6.key, cache->key) == 0))
145  {
146  int rv =
147  clib_bihash_search_inline_24_8 (&vxm->vxlan6_tunnel_by_key, &key6);
148  if (PREDICT_FALSE (rv != 0))
149  return decap_not_found;
150 
151  *cache = key6;
152  }
153  vxlan_tunnel_t *t0 = pool_elt_at_index (vxm->tunnels, cache->value);
154 
155  /* Validate VXLAN tunnel SIP against packet DIP */
156  if (PREDICT_TRUE (ip6_address_is_equal (&ip6_0->dst_address, &t0->src.ip6)))
157  *stats_sw_if_index = t0->sw_if_index;
158  else
159  {
160  /* try multicast */
162  return decap_not_found;
163 
164  /* Make sure mcast VXLAN tunnel exist by packet DIP and VNI */
165  key6.key[0] = ip6_0->dst_address.as_u64[0];
166  key6.key[1] = ip6_0->dst_address.as_u64[1];
167  int rv =
168  clib_bihash_search_inline_24_8 (&vxm->vxlan6_tunnel_by_key, &key6);
169  if (PREDICT_FALSE (rv != 0))
170  return decap_not_found;
171 
172  vxlan_tunnel_t *mcast_t0 = pool_elt_at_index (vxm->tunnels, key6.value);
173  *stats_sw_if_index = mcast_t0->sw_if_index;
174  }
175 
177  .sw_if_index = t0->sw_if_index,
178  .next_index = t0->decap_next_index,
179  };
180  return di;
181 }
182 
186  vlib_frame_t * from_frame, u32 is_ip4)
187 {
188  vxlan_main_t *vxm = &vxlan_main;
189  vnet_main_t *vnm = vxm->vnet_main;
191  vlib_combined_counter_main_t *rx_counter =
193  last_tunnel_cache4 last4;
194  last_tunnel_cache6 last6;
195  u32 pkts_dropped = 0;
197 
198  if (is_ip4)
199  clib_memset (&last4, 0xff, sizeof last4);
200  else
201  clib_memset (&last6, 0xff, sizeof last6);
202 
203  u32 *from = vlib_frame_vector_args (from_frame);
204  u32 n_left_from = from_frame->n_vectors;
205 
207  vlib_get_buffers (vm, from, bufs, n_left_from);
208 
209  u32 stats_if0 = ~0, stats_if1 = ~0;
211  while (n_left_from >= 4)
212  {
213  /* Prefetch next iteration. */
214  vlib_prefetch_buffer_header (b[2], LOAD);
215  vlib_prefetch_buffer_header (b[3], LOAD);
216 
217  /* udp leaves current_data pointing at the vxlan header */
218  void *cur0 = vlib_buffer_get_current (b[0]);
219  void *cur1 = vlib_buffer_get_current (b[1]);
220  vxlan_header_t *vxlan0 = cur0;
221  vxlan_header_t *vxlan1 = cur1;
222 
223 
224  ip4_header_t *ip4_0, *ip4_1;
225  ip6_header_t *ip6_0, *ip6_1;
226  if (is_ip4)
227  {
228  ip4_0 = cur0 - sizeof (udp_header_t) - sizeof (ip4_header_t);
229  ip4_1 = cur1 - sizeof (udp_header_t) - sizeof (ip4_header_t);
230  }
231  else
232  {
233  ip6_0 = cur0 - sizeof (udp_header_t) - sizeof (ip6_header_t);
234  ip6_1 = cur1 - sizeof (udp_header_t) - sizeof (ip6_header_t);
235  }
236 
237  /* pop vxlan */
238  vlib_buffer_advance (b[0], sizeof *vxlan0);
239  vlib_buffer_advance (b[1], sizeof *vxlan1);
240 
241  u32 fi0 = vlib_buffer_get_ip_fib_index (b[0], is_ip4);
242  u32 fi1 = vlib_buffer_get_ip_fib_index (b[1], is_ip4);
243 
244  vxlan_decap_info_t di0 = is_ip4 ?
245  vxlan4_find_tunnel (vxm, &last4, fi0, ip4_0, vxlan0, &stats_if0) :
246  vxlan6_find_tunnel (vxm, &last6, fi0, ip6_0, vxlan0, &stats_if0);
247  vxlan_decap_info_t di1 = is_ip4 ?
248  vxlan4_find_tunnel (vxm, &last4, fi1, ip4_1, vxlan1, &stats_if1) :
249  vxlan6_find_tunnel (vxm, &last6, fi1, ip6_1, vxlan1, &stats_if1);
250 
251  /* Prefetch next iteration. */
254 
255  u32 len0 = vlib_buffer_length_in_chain (vm, b[0]);
256  u32 len1 = vlib_buffer_length_in_chain (vm, b[1]);
257 
258  next[0] = di0.next_index;
259  next[1] = di1.next_index;
260 
261  u8 any_error = di0.error | di1.error;
262  if (PREDICT_TRUE (any_error == 0))
263  {
264  /* Required to make the l2 tag push / pop code work on l2 subifs */
265  vnet_update_l2_len (b[0]);
266  vnet_update_l2_len (b[1]);
267  /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
268  vnet_buffer (b[0])->sw_if_index[VLIB_RX] = di0.sw_if_index;
269  vnet_buffer (b[1])->sw_if_index[VLIB_RX] = di1.sw_if_index;
270  vlib_increment_combined_counter (rx_counter, thread_index,
271  stats_if0, 1, len0);
272  vlib_increment_combined_counter (rx_counter, thread_index,
273  stats_if1, 1, len1);
274  }
275  else
276  {
277  if (di0.error == 0)
278  {
279  vnet_update_l2_len (b[0]);
280  vnet_buffer (b[0])->sw_if_index[VLIB_RX] = di0.sw_if_index;
281  vlib_increment_combined_counter (rx_counter, thread_index,
282  stats_if0, 1, len0);
283  }
284  else
285  {
286  b[0]->error = node->errors[di0.error];
287  pkts_dropped++;
288  }
289 
290  if (di1.error == 0)
291  {
292  vnet_update_l2_len (b[1]);
293  vnet_buffer (b[1])->sw_if_index[VLIB_RX] = di1.sw_if_index;
294  vlib_increment_combined_counter (rx_counter, thread_index,
295  stats_if1, 1, len1);
296  }
297  else
298  {
299  b[1]->error = node->errors[di1.error];
300  pkts_dropped++;
301  }
302  }
303 
304  if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
305  {
306  vxlan_rx_trace_t *tr =
307  vlib_add_trace (vm, node, b[0], sizeof (*tr));
308  tr->next_index = next[0];
309  tr->error = di0.error;
310  tr->tunnel_index = di0.sw_if_index == ~0 ?
312  tr->vni = vnet_get_vni (vxlan0);
313  }
314  if (PREDICT_FALSE (b[1]->flags & VLIB_BUFFER_IS_TRACED))
315  {
316  vxlan_rx_trace_t *tr =
317  vlib_add_trace (vm, node, b[1], sizeof (*tr));
318  tr->next_index = next[1];
319  tr->error = di1.error;
320  tr->tunnel_index = di1.sw_if_index == ~0 ?
322  tr->vni = vnet_get_vni (vxlan1);
323  }
324  b += 2;
325  next += 2;
326  n_left_from -= 2;
327  }
328 
329  while (n_left_from > 0)
330  {
331  /* udp leaves current_data pointing at the vxlan header */
332  void *cur0 = vlib_buffer_get_current (b[0]);
333  vxlan_header_t *vxlan0 = cur0;
334  ip4_header_t *ip4_0;
335  ip6_header_t *ip6_0;
336  if (is_ip4)
337  ip4_0 = cur0 - sizeof (udp_header_t) - sizeof (ip4_header_t);
338  else
339  ip6_0 = cur0 - sizeof (udp_header_t) - sizeof (ip6_header_t);
340 
341  /* pop (ip, udp, vxlan) */
342  vlib_buffer_advance (b[0], sizeof (*vxlan0));
343 
344  u32 fi0 = vlib_buffer_get_ip_fib_index (b[0], is_ip4);
345 
346  vxlan_decap_info_t di0 = is_ip4 ?
347  vxlan4_find_tunnel (vxm, &last4, fi0, ip4_0, vxlan0, &stats_if0) :
348  vxlan6_find_tunnel (vxm, &last6, fi0, ip6_0, vxlan0, &stats_if0);
349 
350  uword len0 = vlib_buffer_length_in_chain (vm, b[0]);
351 
352  next[0] = di0.next_index;
353 
354  /* Validate VXLAN tunnel encap-fib index against packet */
355  if (di0.error == 0)
356  {
357  /* Required to make the l2 tag push / pop code work on l2 subifs */
358  vnet_update_l2_len (b[0]);
359 
360  /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
361  vnet_buffer (b[0])->sw_if_index[VLIB_RX] = di0.sw_if_index;
362 
363  vlib_increment_combined_counter (rx_counter, thread_index,
364  stats_if0, 1, len0);
365  }
366  else
367  {
368  b[0]->error = node->errors[di0.error];
369  pkts_dropped++;
370  }
371 
372  if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
373  {
374  vxlan_rx_trace_t *tr
375  = vlib_add_trace (vm, node, b[0], sizeof (*tr));
376  tr->next_index = next[0];
377  tr->error = di0.error;
378  tr->tunnel_index = di0.sw_if_index == ~0 ?
380  tr->vni = vnet_get_vni (vxlan0);
381  }
382  b += 1;
383  next += 1;
384  n_left_from -= 1;
385  }
386  vlib_buffer_enqueue_to_next (vm, node, from, nexts, from_frame->n_vectors);
387  /* Do we still need this now that tunnel tx stats is kept? */
388  u32 node_idx = is_ip4 ? vxlan4_input_node.index : vxlan6_input_node.index;
389  vlib_node_increment_counter (vm, node_idx, VXLAN_ERROR_DECAPSULATED,
390  from_frame->n_vectors - pkts_dropped);
391 
392  return from_frame->n_vectors;
393 }
394 
398 {
399  return vxlan_input (vm, node, from_frame, /* is_ip4 */ 1);
400 }
401 
405 {
406  return vxlan_input (vm, node, from_frame, /* is_ip4 */ 0);
407 }
408 
409 static char *vxlan_error_strings[] = {
410 #define vxlan_error(n,s) s,
412 #undef vxlan_error
413 };
414 
415 /* *INDENT-OFF* */
417 {
418  .name = "vxlan4-input",
419  .vector_size = sizeof (u32),
420  .n_errors = VXLAN_N_ERROR,
421  .error_strings = vxlan_error_strings,
422  .n_next_nodes = VXLAN_INPUT_N_NEXT,
423  .format_trace = format_vxlan_rx_trace,
424  .next_nodes = {
425 #define _(s,n) [VXLAN_INPUT_NEXT_##s] = n,
427 #undef _
428  },
429 };
430 
432 {
433  .name = "vxlan6-input",
434  .vector_size = sizeof (u32),
435  .n_errors = VXLAN_N_ERROR,
436  .error_strings = vxlan_error_strings,
437  .n_next_nodes = VXLAN_INPUT_N_NEXT,
438  .next_nodes = {
439 #define _(s,n) [VXLAN_INPUT_NEXT_##s] = n,
441 #undef _
442  },
443  .format_trace = format_vxlan_rx_trace,
444 };
445 /* *INDENT-ON* */
446 
447 typedef enum
448 {
453 
457  vlib_frame_t * frame, u32 is_ip4)
458 {
459  vxlan_main_t *vxm = &vxlan_main;
460  u32 *from, *to_next, n_left_from, n_left_to_next, next_index;
461  vlib_node_runtime_t *error_node =
463  vtep4_key_t last_vtep4; /* last IPv4 address / fib index
464  matching a local VTEP address */
465  vtep6_key_t last_vtep6; /* last IPv6 address / fib index
466  matching a local VTEP address */
468 
469  last_tunnel_cache4 last4;
470  last_tunnel_cache6 last6;
471 
472  from = vlib_frame_vector_args (frame);
473  n_left_from = frame->n_vectors;
474  next_index = node->cached_next_index;
475 
476  vlib_get_buffers (vm, from, bufs, n_left_from);
477 
478  if (node->flags & VLIB_NODE_FLAG_TRACE)
479  ip4_forward_next_trace (vm, node, frame, VLIB_TX);
480 
481  if (is_ip4)
482  {
483  vtep4_key_init (&last_vtep4);
484  clib_memset (&last4, 0xff, sizeof last4);
485  }
486  else
487  {
488  vtep6_key_init (&last_vtep6);
489  clib_memset (&last6, 0xff, sizeof last6);
490  }
491 
492  while (n_left_from > 0)
493  {
494  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
495 
496  while (n_left_from >= 4 && n_left_to_next >= 2)
497  {
498  vlib_buffer_t *b0, *b1;
499  ip4_header_t *ip40, *ip41;
500  ip6_header_t *ip60, *ip61;
501  udp_header_t *udp0, *udp1;
502  vxlan_header_t *vxlan0, *vxlan1;
503  u32 bi0, ip_len0, udp_len0, flags0, next0;
504  u32 bi1, ip_len1, udp_len1, flags1, next1;
505  i32 len_diff0, len_diff1;
506  u8 error0, good_udp0, proto0;
507  u8 error1, good_udp1, proto1;
508  u32 stats_if0 = ~0, stats_if1 = ~0;
509 
510  /* Prefetch next iteration. */
511  {
512  vlib_prefetch_buffer_header (b[2], LOAD);
513  vlib_prefetch_buffer_header (b[3], LOAD);
514 
515  CLIB_PREFETCH (b[2]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
516  CLIB_PREFETCH (b[3]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
517  }
518 
519  bi0 = to_next[0] = from[0];
520  bi1 = to_next[1] = from[1];
521  from += 2;
522  n_left_from -= 2;
523  to_next += 2;
524  n_left_to_next -= 2;
525 
526  b0 = b[0];
527  b1 = b[1];
528  b += 2;
529  if (is_ip4)
530  {
531  ip40 = vlib_buffer_get_current (b0);
532  ip41 = vlib_buffer_get_current (b1);
533  }
534  else
535  {
536  ip60 = vlib_buffer_get_current (b0);
537  ip61 = vlib_buffer_get_current (b1);
538  }
539 
540  /* Setup packet for next IP feature */
541  vnet_feature_next (&next0, b0);
542  vnet_feature_next (&next1, b1);
543 
544  if (is_ip4)
545  {
546  /* Treat IP frag packets as "experimental" protocol for now
547  until support of IP frag reassembly is implemented */
548  proto0 = ip4_is_fragment (ip40) ? 0xfe : ip40->protocol;
549  proto1 = ip4_is_fragment (ip41) ? 0xfe : ip41->protocol;
550  }
551  else
552  {
553  proto0 = ip60->protocol;
554  proto1 = ip61->protocol;
555  }
556 
557  /* Process packet 0 */
558  if (proto0 != IP_PROTOCOL_UDP)
559  goto exit0; /* not UDP packet */
560 
561  if (is_ip4)
562  udp0 = ip4_next_header (ip40);
563  else
564  udp0 = ip6_next_header (ip60);
565 
566  u32 fi0 = vlib_buffer_get_ip_fib_index (b0, is_ip4);
567  vxlan0 = vlib_buffer_get_current (b0) + sizeof (udp_header_t) +
568  sizeof (ip4_header_t);
569 
570  vxlan_decap_info_t di0 =
571  is_ip4 ?
572  vxlan4_find_tunnel (vxm, &last4, fi0, ip40, vxlan0, &stats_if0) :
573  vxlan6_find_tunnel (vxm, &last6, fi0, ip60, vxlan0, &stats_if0);
574 
575  if (PREDICT_FALSE (di0.sw_if_index == ~0))
576  goto exit0; /* unknown interface */
577 
578  /* Validate DIP against VTEPs */
579  if (is_ip4)
580  {
581 #ifdef CLIB_HAVE_VEC512
582  if (!vtep4_check_vector (&vxm->vtep_table, b0, ip40, &last_vtep4,
583  &vxm->vtep4_u512))
584 #else
585  if (!vtep4_check (&vxm->vtep_table, b0, ip40, &last_vtep4))
586 #endif
587  goto exit0; /* no local VTEP for VXLAN packet */
588  }
589  else
590  {
591  if (!vtep6_check (&vxm->vtep_table, b0, ip60, &last_vtep6))
592  goto exit0; /* no local VTEP for VXLAN packet */
593  }
594 
595  flags0 = b0->flags;
596  good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
597 
598  /* Don't verify UDP checksum for packets with explicit zero checksum. */
599  good_udp0 |= udp0->checksum == 0;
600 
601  /* Verify UDP length */
602  if (is_ip4)
603  ip_len0 = clib_net_to_host_u16 (ip40->length);
604  else
605  ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
606  udp_len0 = clib_net_to_host_u16 (udp0->length);
607  len_diff0 = ip_len0 - udp_len0;
608 
609  /* Verify UDP checksum */
610  if (PREDICT_FALSE (!good_udp0))
611  {
612  if (is_ip4)
613  flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
614  else
615  flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
616  good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
617  }
618 
619  if (is_ip4)
620  {
621  error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
622  error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
623  }
624  else
625  {
626  error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
627  error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
628  }
629 
630  next0 = error0 ?
632  b0->error = error0 ? error_node->errors[error0] : 0;
633 
634  /* vxlan-input node expect current at VXLAN header */
635  if (is_ip4)
637  sizeof (ip4_header_t) +
638  sizeof (udp_header_t));
639  else
641  sizeof (ip6_header_t) +
642  sizeof (udp_header_t));
643 
644  exit0:
645  /* Process packet 1 */
646  if (proto1 != IP_PROTOCOL_UDP)
647  goto exit1; /* not UDP packet */
648 
649  if (is_ip4)
650  udp1 = ip4_next_header (ip41);
651  else
652  udp1 = ip6_next_header (ip61);
653 
654  u32 fi1 = vlib_buffer_get_ip_fib_index (b1, is_ip4);
655  vxlan1 = vlib_buffer_get_current (b1) + sizeof (udp_header_t) +
656  sizeof (ip4_header_t);
657 
658  vxlan_decap_info_t di1 =
659  is_ip4 ?
660  vxlan4_find_tunnel (vxm, &last4, fi1, ip41, vxlan1, &stats_if1) :
661  vxlan6_find_tunnel (vxm, &last6, fi1, ip61, vxlan1, &stats_if1);
662 
663  if (PREDICT_FALSE (di1.sw_if_index == ~0))
664  goto exit1; /* unknown interface */
665 
666  /* Validate DIP against VTEPs */
667  if (is_ip4)
668  {
669 #ifdef CLIB_HAVE_VEC512
670  if (!vtep4_check_vector (&vxm->vtep_table, b1, ip41, &last_vtep4,
671  &vxm->vtep4_u512))
672 #else
673  if (!vtep4_check (&vxm->vtep_table, b1, ip41, &last_vtep4))
674 #endif
675  goto exit1; /* no local VTEP for VXLAN packet */
676  }
677  else
678  {
679  if (!vtep6_check (&vxm->vtep_table, b1, ip61, &last_vtep6))
680  goto exit1; /* no local VTEP for VXLAN packet */
681  }
682 
683  flags1 = b1->flags;
684  good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
685 
686  /* Don't verify UDP checksum for packets with explicit zero checksum. */
687  good_udp1 |= udp1->checksum == 0;
688 
689  /* Verify UDP length */
690  if (is_ip4)
691  ip_len1 = clib_net_to_host_u16 (ip41->length);
692  else
693  ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
694  udp_len1 = clib_net_to_host_u16 (udp1->length);
695  len_diff1 = ip_len1 - udp_len1;
696 
697  /* Verify UDP checksum */
698  if (PREDICT_FALSE (!good_udp1))
699  {
700  if (is_ip4)
701  flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
702  else
703  flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
704  good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
705  }
706 
707  if (is_ip4)
708  {
709  error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
710  error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
711  }
712  else
713  {
714  error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
715  error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
716  }
717 
718  next1 = error1 ?
720  b1->error = error1 ? error_node->errors[error1] : 0;
721 
722  /* vxlan-input node expect current at VXLAN header */
723  if (is_ip4)
725  sizeof (ip4_header_t) +
726  sizeof (udp_header_t));
727  else
729  sizeof (ip6_header_t) +
730  sizeof (udp_header_t));
731 
732  exit1:
733  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
734  to_next, n_left_to_next,
735  bi0, bi1, next0, next1);
736  }
737 
738  while (n_left_from > 0 && n_left_to_next > 0)
739  {
740  vlib_buffer_t *b0;
741  ip4_header_t *ip40;
742  ip6_header_t *ip60;
743  udp_header_t *udp0;
744  vxlan_header_t *vxlan0;
745  u32 bi0, ip_len0, udp_len0, flags0, next0;
746  i32 len_diff0;
747  u8 error0, good_udp0, proto0;
748  u32 stats_if0 = ~0;
749 
750  bi0 = to_next[0] = from[0];
751  from += 1;
752  n_left_from -= 1;
753  to_next += 1;
754  n_left_to_next -= 1;
755 
756  b0 = b[0];
757  b++;
758  if (is_ip4)
759  ip40 = vlib_buffer_get_current (b0);
760  else
761  ip60 = vlib_buffer_get_current (b0);
762 
763  /* Setup packet for next IP feature */
764  vnet_feature_next (&next0, b0);
765 
766  if (is_ip4)
767  /* Treat IP4 frag packets as "experimental" protocol for now
768  until support of IP frag reassembly is implemented */
769  proto0 = ip4_is_fragment (ip40) ? 0xfe : ip40->protocol;
770  else
771  proto0 = ip60->protocol;
772 
773  if (proto0 != IP_PROTOCOL_UDP)
774  goto exit; /* not UDP packet */
775 
776  if (is_ip4)
777  udp0 = ip4_next_header (ip40);
778  else
779  udp0 = ip6_next_header (ip60);
780 
781  u32 fi0 = vlib_buffer_get_ip_fib_index (b0, is_ip4);
782  vxlan0 = vlib_buffer_get_current (b0) + sizeof (udp_header_t) +
783  sizeof (ip4_header_t);
784 
785  vxlan_decap_info_t di0 =
786  is_ip4 ?
787  vxlan4_find_tunnel (vxm, &last4, fi0, ip40, vxlan0, &stats_if0) :
788  vxlan6_find_tunnel (vxm, &last6, fi0, ip60, vxlan0, &stats_if0);
789 
790  if (PREDICT_FALSE (di0.sw_if_index == ~0))
791  goto exit; /* unknown interface */
792 
793  /* Validate DIP against VTEPs */
794  if (is_ip4)
795  {
796 #ifdef CLIB_HAVE_VEC512
797  if (!vtep4_check_vector (&vxm->vtep_table, b0, ip40, &last_vtep4,
798  &vxm->vtep4_u512))
799 #else
800  if (!vtep4_check (&vxm->vtep_table, b0, ip40, &last_vtep4))
801 #endif
802  goto exit; /* no local VTEP for VXLAN packet */
803  }
804  else
805  {
806  if (!vtep6_check (&vxm->vtep_table, b0, ip60, &last_vtep6))
807  goto exit; /* no local VTEP for VXLAN packet */
808  }
809 
810  flags0 = b0->flags;
811  good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
812 
813  /* Don't verify UDP checksum for packets with explicit zero checksum. */
814  good_udp0 |= udp0->checksum == 0;
815 
816  /* Verify UDP length */
817  if (is_ip4)
818  ip_len0 = clib_net_to_host_u16 (ip40->length);
819  else
820  ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
821  udp_len0 = clib_net_to_host_u16 (udp0->length);
822  len_diff0 = ip_len0 - udp_len0;
823 
824  /* Verify UDP checksum */
825  if (PREDICT_FALSE (!good_udp0))
826  {
827  if (is_ip4)
828  flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
829  else
830  flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
831  good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
832  }
833 
834  if (is_ip4)
835  {
836  error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
837  error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
838  }
839  else
840  {
841  error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
842  error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
843  }
844 
845  next0 = error0 ?
847  b0->error = error0 ? error_node->errors[error0] : 0;
848 
849  /* vxlan-input node expect current at VXLAN header */
850  if (is_ip4)
852  sizeof (ip4_header_t) +
853  sizeof (udp_header_t));
854  else
856  sizeof (ip6_header_t) +
857  sizeof (udp_header_t));
858 
859  exit:
860  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
861  to_next, n_left_to_next,
862  bi0, next0);
863  }
864 
865  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
866  }
867 
868  return frame->n_vectors;
869 }
870 
874 {
875  return ip_vxlan_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
876 }
877 
878 /* *INDENT-OFF* */
880 {
881  .name = "ip4-vxlan-bypass",
882  .vector_size = sizeof (u32),
883  .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
884  .next_nodes = {
885  [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
886  [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan4-input",
887  },
888  .format_buffer = format_ip4_header,
889  .format_trace = format_ip4_forward_next_trace,
890 };
891 
892 /* *INDENT-ON* */
893 
894 /* Dummy init function to get us linked in. */
895 static clib_error_t *
897 {
898  return 0;
899 }
900 
902 
906 {
907  return ip_vxlan_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
908 }
909 
910 /* *INDENT-OFF* */
912 {
913  .name = "ip6-vxlan-bypass",
914  .vector_size = sizeof (u32),
915  .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
916  .next_nodes = {
917  [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
918  [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan6-input",
919  },
920  .format_buffer = format_ip6_header,
921  .format_trace = format_ip6_forward_next_trace,
922 };
923 
924 /* *INDENT-ON* */
925 
926 /* Dummy init function to get us linked in. */
927 static clib_error_t *
929 {
930  return 0;
931 }
932 
934 
935 #define foreach_vxlan_flow_input_next \
936 _(DROP, "error-drop") \
937 _(L2_INPUT, "l2-input")
938 
939 typedef enum
940 {
941 #define _(s,n) VXLAN_FLOW_NEXT_##s,
943 #undef _
946 
947 #define foreach_vxlan_flow_error \
948  _(NONE, "no error") \
949  _(IP_CHECKSUM_ERROR, "Rx ip checksum errors") \
950  _(IP_HEADER_ERROR, "Rx ip header errors") \
951  _(UDP_CHECKSUM_ERROR, "Rx udp checksum errors") \
952  _(UDP_LENGTH_ERROR, "Rx udp length errors")
953 
954 typedef enum
955 {
956 #define _(f,s) VXLAN_FLOW_ERROR_##f,
958 #undef _
961 
962 static char *vxlan_flow_error_strings[] = {
963 #define _(n,s) s,
965 #undef _
966 };
967 
968 
971 {
972  u32 flags = b->flags;
973  enum
974  { offset =
975  sizeof (ip4_header_t) + sizeof (udp_header_t) + sizeof (vxlan_header_t),
976  };
977 
978  /* Verify UDP checksum */
979  if ((flags & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
980  {
982  flags = ip4_tcp_udp_validate_checksum (vm, b);
984  }
985 
986  return (flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
987 }
988 
991 {
992  ip4_vxlan_header_t *hdr = vlib_buffer_get_current (b) - sizeof *hdr;
993  udp_header_t *udp = &hdr->udp;
994  /* Don't verify UDP checksum for packets with explicit zero checksum. */
995  u8 good_csum = (b->flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0 ||
996  udp->checksum == 0;
997 
998  return !good_csum;
999 }
1000 
1003 {
1004  ip4_vxlan_header_t *hdr = vlib_buffer_get_current (b) - sizeof *hdr;
1005  u16 ip_len = clib_net_to_host_u16 (hdr->ip4.length);
1006  u16 expected = payload_len + sizeof *hdr;
1007  return ip_len > expected || hdr->ip4.ttl == 0
1008  || hdr->ip4.ip_version_and_header_length != 0x45;
1009 }
1010 
1013 {
1014  ip4_vxlan_header_t *hdr = vlib_buffer_get_current (b) - sizeof *hdr;
1015  u16 ip_len = clib_net_to_host_u16 (hdr->ip4.length);
1016  u16 udp_len = clib_net_to_host_u16 (hdr->udp.length);
1017  return udp_len > ip_len;
1018 }
1019 
1021 vxlan_err_code (u8 ip_err0, u8 udp_err0, u8 csum_err0)
1022 {
1023  u8 error0 = VXLAN_FLOW_ERROR_NONE;
1024  if (ip_err0)
1025  error0 = VXLAN_FLOW_ERROR_IP_HEADER_ERROR;
1026  if (udp_err0)
1027  error0 = VXLAN_FLOW_ERROR_UDP_LENGTH_ERROR;
1028  if (csum_err0)
1029  error0 = VXLAN_FLOW_ERROR_UDP_CHECKSUM_ERROR;
1030  return error0;
1031 }
1032 
1035  vlib_frame_t * f)
1036 {
1037  enum
1038  { payload_offset = sizeof (ip4_vxlan_header_t) };
1039 
1040  vxlan_main_t *vxm = &vxlan_main;
1043  [VXLAN_FLOW_NEXT_DROP] =
1045  [VXLAN_FLOW_NEXT_L2_INPUT] =
1047  };
1049 
1052  u32 next_index = VXLAN_FLOW_NEXT_L2_INPUT;
1053 
1054  while (n_left_from > 0)
1055  {
1056  u32 n_left_to_next, *to_next;
1057 
1058  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1059 
1060  while (n_left_from > 3 && n_left_to_next > 3)
1061  {
1062  u32 bi0 = to_next[0] = from[0];
1063  u32 bi1 = to_next[1] = from[1];
1064  u32 bi2 = to_next[2] = from[2];
1065  u32 bi3 = to_next[3] = from[3];
1066  from += 4;
1067  n_left_from -= 4;
1068  to_next += 4;
1069  n_left_to_next -= 4;
1070 
1071  vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
1072  vlib_buffer_t *b1 = vlib_get_buffer (vm, bi1);
1073  vlib_buffer_t *b2 = vlib_get_buffer (vm, bi2);
1074  vlib_buffer_t *b3 = vlib_get_buffer (vm, bi3);
1075 
1076  vlib_buffer_advance (b0, payload_offset);
1077  vlib_buffer_advance (b1, payload_offset);
1078  vlib_buffer_advance (b2, payload_offset);
1079  vlib_buffer_advance (b3, payload_offset);
1080 
1081  u16 len0 = vlib_buffer_length_in_chain (vm, b0);
1082  u16 len1 = vlib_buffer_length_in_chain (vm, b1);
1083  u16 len2 = vlib_buffer_length_in_chain (vm, b2);
1084  u16 len3 = vlib_buffer_length_in_chain (vm, b3);
1085 
1086  u32 next0 = VXLAN_FLOW_NEXT_L2_INPUT, next1 =
1087  VXLAN_FLOW_NEXT_L2_INPUT, next2 =
1088  VXLAN_FLOW_NEXT_L2_INPUT, next3 = VXLAN_FLOW_NEXT_L2_INPUT;
1089 
1090  u8 ip_err0 = vxlan_check_ip (b0, len0);
1091  u8 ip_err1 = vxlan_check_ip (b1, len1);
1092  u8 ip_err2 = vxlan_check_ip (b2, len2);
1093  u8 ip_err3 = vxlan_check_ip (b3, len3);
1094  u8 ip_err = ip_err0 | ip_err1 | ip_err2 | ip_err3;
1095 
1096  u8 udp_err0 = vxlan_check_ip_udp_len (b0);
1097  u8 udp_err1 = vxlan_check_ip_udp_len (b1);
1098  u8 udp_err2 = vxlan_check_ip_udp_len (b2);
1099  u8 udp_err3 = vxlan_check_ip_udp_len (b3);
1100  u8 udp_err = udp_err0 | udp_err1 | udp_err2 | udp_err3;
1101 
1102  u8 csum_err0 = vxlan_check_udp_csum (vm, b0);
1103  u8 csum_err1 = vxlan_check_udp_csum (vm, b1);
1104  u8 csum_err2 = vxlan_check_udp_csum (vm, b2);
1105  u8 csum_err3 = vxlan_check_udp_csum (vm, b3);
1106  u8 csum_err = csum_err0 | csum_err1 | csum_err2 | csum_err3;
1107 
1108  if (PREDICT_FALSE (csum_err))
1109  {
1110  if (csum_err0)
1111  csum_err0 = !vxlan_validate_udp_csum (vm, b0);
1112  if (csum_err1)
1113  csum_err1 = !vxlan_validate_udp_csum (vm, b1);
1114  if (csum_err2)
1115  csum_err2 = !vxlan_validate_udp_csum (vm, b2);
1116  if (csum_err3)
1117  csum_err3 = !vxlan_validate_udp_csum (vm, b3);
1118  csum_err = csum_err0 | csum_err1 | csum_err2 | csum_err3;
1119  }
1120 
1121  if (PREDICT_FALSE (ip_err || udp_err || csum_err))
1122  {
1123  if (ip_err0 || udp_err0 || csum_err0)
1124  {
1125  next0 = VXLAN_FLOW_NEXT_DROP;
1126  u8 error0 = vxlan_err_code (ip_err0, udp_err0, csum_err0);
1127  b0->error = node->errors[error0];
1128  }
1129  if (ip_err1 || udp_err1 || csum_err1)
1130  {
1131  next1 = VXLAN_FLOW_NEXT_DROP;
1132  u8 error1 = vxlan_err_code (ip_err1, udp_err1, csum_err1);
1133  b1->error = node->errors[error1];
1134  }
1135  if (ip_err2 || udp_err2 || csum_err2)
1136  {
1137  next2 = VXLAN_FLOW_NEXT_DROP;
1138  u8 error2 = vxlan_err_code (ip_err2, udp_err2, csum_err2);
1139  b2->error = node->errors[error2];
1140  }
1141  if (ip_err3 || udp_err3 || csum_err3)
1142  {
1143  next3 = VXLAN_FLOW_NEXT_DROP;
1144  u8 error3 = vxlan_err_code (ip_err3, udp_err3, csum_err3);
1145  b3->error = node->errors[error3];
1146  }
1147  }
1148 
1149  vnet_update_l2_len (b0);
1150  vnet_update_l2_len (b1);
1151  vnet_update_l2_len (b2);
1152  vnet_update_l2_len (b3);
1153 
1154  ASSERT (b0->flow_id != 0);
1155  ASSERT (b1->flow_id != 0);
1156  ASSERT (b2->flow_id != 0);
1157  ASSERT (b3->flow_id != 0);
1158 
1159  u32 t_index0 = b0->flow_id - vxm->flow_id_start;
1160  u32 t_index1 = b1->flow_id - vxm->flow_id_start;
1161  u32 t_index2 = b2->flow_id - vxm->flow_id_start;
1162  u32 t_index3 = b3->flow_id - vxm->flow_id_start;
1163 
1164  vxlan_tunnel_t *t0 = &vxm->tunnels[t_index0];
1165  vxlan_tunnel_t *t1 = &vxm->tunnels[t_index1];
1166  vxlan_tunnel_t *t2 = &vxm->tunnels[t_index2];
1167  vxlan_tunnel_t *t3 = &vxm->tunnels[t_index3];
1168 
1169  /* flow id consumed */
1170  b0->flow_id = 0;
1171  b1->flow_id = 0;
1172  b2->flow_id = 0;
1173  b3->flow_id = 0;
1174 
1175  u32 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX] =
1176  t0->sw_if_index;
1177  u32 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX] =
1178  t1->sw_if_index;
1179  u32 sw_if_index2 = vnet_buffer (b2)->sw_if_index[VLIB_RX] =
1180  t2->sw_if_index;
1181  u32 sw_if_index3 = vnet_buffer (b3)->sw_if_index[VLIB_RX] =
1182  t3->sw_if_index;
1183 
1184  vlib_increment_combined_counter (rx_counter[next0], thread_index,
1185  sw_if_index0, 1, len0);
1186  vlib_increment_combined_counter (rx_counter[next1], thread_index,
1187  sw_if_index1, 1, len1);
1188  vlib_increment_combined_counter (rx_counter[next2], thread_index,
1189  sw_if_index2, 1, len2);
1190  vlib_increment_combined_counter (rx_counter[next3], thread_index,
1191  sw_if_index3, 1, len3);
1192 
1193  u32 flags = b0->flags | b1->flags | b2->flags | b3->flags;
1194 
1195  if (PREDICT_FALSE (flags & VLIB_BUFFER_IS_TRACED))
1196  {
1197  if (b0->flags & VLIB_BUFFER_IS_TRACED)
1198  {
1199  vxlan_rx_trace_t *tr =
1200  vlib_add_trace (vm, node, b0, sizeof *tr);
1201  u8 error0 = vxlan_err_code (ip_err0, udp_err0, csum_err0);
1202  tr->next_index = next0;
1203  tr->error = error0;
1204  tr->tunnel_index = t_index0;
1205  tr->vni = t0->vni;
1206  }
1207  if (b1->flags & VLIB_BUFFER_IS_TRACED)
1208  {
1209  vxlan_rx_trace_t *tr =
1210  vlib_add_trace (vm, node, b1, sizeof *tr);
1211  u8 error1 = vxlan_err_code (ip_err1, udp_err1, csum_err1);
1212  tr->next_index = next1;
1213  tr->error = error1;
1214  tr->tunnel_index = t_index1;
1215  tr->vni = t1->vni;
1216  }
1217  if (b2->flags & VLIB_BUFFER_IS_TRACED)
1218  {
1219  vxlan_rx_trace_t *tr =
1220  vlib_add_trace (vm, node, b2, sizeof *tr);
1221  u8 error2 = vxlan_err_code (ip_err2, udp_err2, csum_err2);
1222  tr->next_index = next2;
1223  tr->error = error2;
1224  tr->tunnel_index = t_index2;
1225  tr->vni = t2->vni;
1226  }
1227  if (b3->flags & VLIB_BUFFER_IS_TRACED)
1228  {
1229  vxlan_rx_trace_t *tr =
1230  vlib_add_trace (vm, node, b3, sizeof *tr);
1231  u8 error3 = vxlan_err_code (ip_err3, udp_err3, csum_err3);
1232  tr->next_index = next3;
1233  tr->error = error3;
1234  tr->tunnel_index = t_index3;
1235  tr->vni = t3->vni;
1236  }
1237  }
1239  (vm, node, next_index, to_next, n_left_to_next,
1240  bi0, bi1, bi2, bi3, next0, next1, next2, next3);
1241  }
1242  while (n_left_from > 0 && n_left_to_next > 0)
1243  {
1244  u32 bi0 = to_next[0] = from[0];
1245  from++;
1246  n_left_from--;
1247  to_next++;
1248  n_left_to_next--;
1249 
1250  vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
1251  vlib_buffer_advance (b0, payload_offset);
1252 
1253  u16 len0 = vlib_buffer_length_in_chain (vm, b0);
1254  u32 next0 = VXLAN_FLOW_NEXT_L2_INPUT;
1255 
1256  u8 ip_err0 = vxlan_check_ip (b0, len0);
1257  u8 udp_err0 = vxlan_check_ip_udp_len (b0);
1258  u8 csum_err0 = vxlan_check_udp_csum (vm, b0);
1259 
1260  if (csum_err0)
1261  csum_err0 = !vxlan_validate_udp_csum (vm, b0);
1262  if (ip_err0 || udp_err0 || csum_err0)
1263  {
1264  next0 = VXLAN_FLOW_NEXT_DROP;
1265  u8 error0 = vxlan_err_code (ip_err0, udp_err0, csum_err0);
1266  b0->error = node->errors[error0];
1267  }
1268 
1269  vnet_update_l2_len (b0);
1270 
1271  ASSERT (b0->flow_id != 0);
1272  u32 t_index0 = b0->flow_id - vxm->flow_id_start;
1273  vxlan_tunnel_t *t0 = &vxm->tunnels[t_index0];
1274  b0->flow_id = 0;
1275 
1276  u32 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX] =
1277  t0->sw_if_index;
1278  vlib_increment_combined_counter (rx_counter[next0], thread_index,
1279  sw_if_index0, 1, len0);
1280 
1281  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1282  {
1283  vxlan_rx_trace_t *tr =
1284  vlib_add_trace (vm, node, b0, sizeof *tr);
1285  u8 error0 = vxlan_err_code (ip_err0, udp_err0, csum_err0);
1286  tr->next_index = next0;
1287  tr->error = error0;
1288  tr->tunnel_index = t_index0;
1289  tr->vni = t0->vni;
1290  }
1291  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1292  to_next, n_left_to_next,
1293  bi0, next0);
1294  }
1295 
1296  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1297  }
1298 
1299  return f->n_vectors;
1300 }
1301 
1302 /* *INDENT-OFF* */
1303 #ifndef CLIB_MULTIARCH_VARIANT
1305  .name = "vxlan-flow-input",
1306  .type = VLIB_NODE_TYPE_INTERNAL,
1307  .vector_size = sizeof (u32),
1308 
1309  .format_trace = format_vxlan_rx_trace,
1310 
1311  .n_errors = VXLAN_FLOW_N_ERROR,
1312  .error_strings = vxlan_flow_error_strings,
1313 
1314  .n_next_nodes = VXLAN_FLOW_N_NEXT,
1315  .next_nodes = {
1316 #define _(s,n) [VXLAN_FLOW_NEXT_##s] = n,
1318 #undef _
1319  },
1320 };
1321 #endif
1322 /* *INDENT-ON* */
1323 
1324 /*
1325  * fd.io coding-style-patch-verification: ON
1326  *
1327  * Local Variables:
1328  * eval: (c-set-style "gnu")
1329  * End:
1330  */
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:133
vlib_node_registration_t vxlan6_input_node
(constructor) VLIB_REGISTER_NODE (vxlan6_input_node)
Definition: decap.c:24
static uword ip_vxlan_bypass_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u32 is_ip4)
Definition: decap.c:455
static uword vxlan_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, u32 is_ip4)
Definition: decap.c:184
#define CLIB_UNUSED(x)
Definition: clib.h:90
vl_api_wireguard_peer_flags_t flags
Definition: wireguard.api:105
clib_bihash_24_8_t vxlan6_tunnel_by_key
Definition: vxlan.h:169
ip4_address_t src_address
Definition: ip4_packet.h:125
vlib_main_t vlib_node_runtime_t vlib_frame_t * from_frame
Definition: esp_encrypt.c:1328
static_always_inline u8 vxlan_check_ip(vlib_buffer_t *b, u16 payload_len)
Definition: decap.c:1002
static vxlan_decap_info_t vxlan6_find_tunnel(vxlan_main_t *vxm, last_tunnel_cache6 *cache, u32 fib_index, ip6_header_t *ip6_0, vxlan_header_t *vxlan0, u32 *stats_sw_if_index)
Definition: decap.c:126
vnet_interface_main_t interface_main
Definition: vnet.h:81
format_function_t format_ip4_header
Definition: format.h:81
u32 thread_index
#define foreach_vxlan_input_next
Definition: vxlan.h:140
#define PREDICT_TRUE(x)
Definition: clib.h:125
unsigned long u64
Definition: types.h:89
u16 nexts[VLIB_FRAME_SIZE]
u32 flow_id_start
Definition: vxlan.h:191
vlib_increment_combined_counter(ccm, ti, sw_if_index, n_buffers, n_bytes)
vlib_node_registration_t vxlan4_input_node
(constructor) VLIB_REGISTER_NODE (vxlan4_input_node)
Definition: decap.c:23
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: nat44_ei.c:3048
#define vlib_validate_buffer_enqueue_x4(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, bi2, bi3, next0, next1, next2, next3)
Finish enqueueing four buffers forward in the graph.
Definition: buffer_node.h:140
vl_api_address_t src
Definition: gre.api:54
static u16 vnet_update_l2_len(vlib_buffer_t *b)
Definition: l2_input.h:298
#define VLIB_NODE_FN(node)
Definition: node.h:202
static uword ip4_address_is_multicast(const ip4_address_t *a)
Definition: ip4_packet.h:446
vlib_node_registration_t ip4_vxlan_bypass_node
(constructor) VLIB_REGISTER_NODE (ip4_vxlan_bypass_node)
Definition: decap.c:879
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:461
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:433
vlib_node_registration_t ip6_vxlan_bypass_node
(constructor) VLIB_REGISTER_NODE (ip6_vxlan_bypass_node)
Definition: decap.c:911
static const vxlan_decap_info_t decap_not_found
Definition: decap.c:51
ip6_address_t src_address
Definition: ip6_packet.h:310
static u32 vlib_buffer_get_ip_fib_index(vlib_buffer_t *b, u8 is_ip4)
Definition: ip.h:292
unsigned char u8
Definition: types.h:56
vlib_buffer_t ** b
u8 data[128]
Definition: ipsec_types.api:92
static int ip4_is_fragment(const ip4_header_t *i)
Definition: ip4_packet.h:168
unsigned int u32
Definition: types.h:88
vlib_frame_t * f
static int clib_bihash_key_compare_24_8(u64 *a, u64 *b)
Definition: bihash_24_8.h:77
#define VXLAN_FLAGS_I
Definition: vxlan_packet.h:53
vnet_main_t * vnet_main
Definition: vxlan.h:187
#define static_always_inline
Definition: clib.h:112
u32 tunnel_index
Definition: decap.c:30
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:172
vlib_get_buffers(vm, from, b, n_left_from)
ip4_address_t dst_address
Definition: ip4_packet.h:125
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:1023
description fragment has unexpected format
Definition: map.api:433
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:231
static vxlan_decap_info_t vxlan4_find_tunnel(vxlan_main_t *vxm, last_tunnel_cache4 *cache, u32 fib_index, ip4_header_t *ip4_0, vxlan_header_t *vxlan0, u32 *stats_sw_if_index)
Definition: decap.c:64
static void * ip4_next_header(ip4_header_t *i)
Definition: ip4_packet.h:196
vlib_buffer_enqueue_to_next(vm, node, from,(u16 *) nexts, frame->n_vectors)
static_always_inline u8 vxlan_err_code(u8 ip_err0, u8 udp_err0, u8 csum_err0)
Definition: decap.c:1021
int __clib_unused rv
Definition: application.c:491
static void vtep4_key_init(vtep4_key_t *k4)
Definition: vtep.h:80
#define VLIB_FRAME_SIZE
Definition: node.h:369
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:145
u16 * next
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:553
vlib_node_registration_t ip4_input_node
Global ip4 input node.
Definition: ip4_input.c:385
ip_vxlan_bypass_next_t
Definition: decap.c:447
ip4_address_t local_ip
Definition: vxlan.h:75
unsigned short u16
Definition: types.h:57
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:257
void di(unformat_input_t *i)
Definition: unformat.c:163
vxlan_main_t vxlan_main
Definition: vxlan.c:46
#define PREDICT_FALSE(x)
Definition: clib.h:124
vnet_main_t vnet_main
Definition: misc.c:43
static u8 * format_vxlan_rx_trace(u8 *s, va_list *args)
Definition: decap.c:36
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
Definition: nat44_ei.c:3047
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:224
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:395
vlib_node_registration_t vxlan4_flow_input_node
(constructor) VLIB_REGISTER_NODE (vxlan4_flow_input_node)
Definition: decap.c:1304
static u8 vtep6_check(vtep_table_t *t, vlib_buffer_t *b0, ip6_header_t *ip60, vtep6_key_t *last_k6)
Definition: vtep.h:155
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1244
static u8 vtep4_check(vtep_table_t *t, vlib_buffer_t *b0, ip4_header_t *ip40, vtep4_key_t *last_k4)
Definition: vtep.h:100
static const vxlan_decap_info_t decap_bad_flags
Definition: decap.c:57
vxlan_flow_input_next_t
Definition: decap.c:939
vtep_table_t vtep_table
Definition: vxlan.h:173
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
static u32 vnet_get_vni(vxlan_header_t *h)
Definition: vxlan_packet.h:56
u16 n_vectors
Definition: node.h:388
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:208
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
vxlan6_tunnel_key_t last_tunnel_cache6
Definition: decap.c:123
u32 flow_id
Generic flow identifier.
Definition: buffer.h:136
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:322
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:116
vnet_interface_main_t * im
#define foreach_vxlan_flow_error
Definition: decap.c:947
static clib_error_t * ip6_vxlan_bypass_init(vlib_main_t *vm)
Definition: decap.c:928
static void * ip6_next_header(ip6_header_t *i)
Definition: ip6_packet.h:407
static char * vxlan_flow_error_strings[]
Definition: decap.c:962
signed int i32
Definition: types.h:77
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:498
#define ASSERT(truth)
static clib_error_t * ip4_vxlan_bypass_init(vlib_main_t *vm)
Definition: decap.c:896
#define always_inline
Definition: rdma_mlx5dv.h:23
static void vtep6_key_init(vtep6_key_t *k6)
Definition: vtep.h:86
vlib_put_next_frame(vm, node, next_index, 0)
u32 sw_if_index
Definition: vxlan.h:110
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:276
u32 ip4_tcp_udp_validate_checksum(vlib_main_t *vm, vlib_buffer_t *p0)
static_always_inline u8 vxlan_validate_udp_csum(vlib_main_t *vm, vlib_buffer_t *b)
Definition: decap.c:970
format_function_t format_ip6_header
Definition: format.h:95
nat44_ei_hairpin_src_next_t next_index
static uword ip6_address_is_equal(const ip6_address_t *a, const ip6_address_t *b)
Definition: ip6_packet.h:167
static uword ip6_address_is_multicast(const ip6_address_t *a)
Definition: ip6_packet.h:121
struct _vlib_node_registration vlib_node_registration_t
template key/value backing page structure
Definition: bihash_doc.h:44
Definition: defs.h:47
u16 payload_length
Definition: ip6_packet.h:301
u32 ip6_tcp_udp_icmp_validate_checksum(vlib_main_t *vm, vlib_buffer_t *p0)
Definition: ip6_forward.c:1163
vlib_main_t vlib_node_runtime_t * node
Definition: nat44_ei.c:3047
void ip4_forward_next_trace(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vlib_rx_or_tx_t which_adj_index)
Definition: ip4_forward.c:1240
VLIB buffer representation.
Definition: buffer.h:111
u64 uword
Definition: types.h:112
vxlan4_tunnel_key_t last_tunnel_cache4
Definition: decap.c:49
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:301
u8 * format_ip4_forward_next_trace(u8 *s, va_list *args)
Definition: ip4_forward.c:1190
A collection of combined counters.
Definition: counter.h:203
static_always_inline u8 vxlan_check_ip_udp_len(vlib_buffer_t *b)
Definition: decap.c:1012
u16 decap_next_index
Definition: vxlan.h:104
u32 * tunnel_index_by_sw_if_index
Definition: vxlan.h:179
#define vnet_buffer(b)
Definition: buffer.h:437
vl_api_ip4_address_t dst
Definition: pnat.api:41
#define foreach_vxlan_flow_input_next
Definition: decap.c:935
clib_bihash_16_8_t vxlan4_tunnel_by_key
Definition: vxlan.h:167
static_always_inline u8 vxlan_check_udp_csum(vlib_main_t *vm, vlib_buffer_t *b)
Definition: decap.c:990
u16 flags
Copy of main node flags.
Definition: node.h:492
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:628
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:292
u32 next_index
Definition: decap.c:29
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
vlib_buffer_t * bufs[VLIB_FRAME_SIZE]
static char * vxlan_error_strings[]
Definition: decap.c:409
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:111
vxlan_tunnel_t * tunnels
Definition: vxlan.h:163
vxlan_flow_error_t
Definition: decap.c:954
ip46_address_t src
Definition: vxlan.h:93
Definition: defs.h:46
ip6_address_t dst_address
Definition: ip6_packet.h:310
u8 * format_ip6_forward_next_trace(u8 *s, va_list *args)
Definition: ip6_forward.c:951