FD.io VPP  v19.01.1-17-ge106252
Vector Packet Processing
encap.c
Go to the documentation of this file.
1 
2 /*
3  * Copyright (c) 2015 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #include <vppinfra/error.h>
17 #include <vppinfra/hash.h>
18 #include <vnet/vnet.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/ethernet/ethernet.h>
21 #include <vnet/vxlan/vxlan.h>
22 #include <vnet/qos/qos_types.h>
23 #include <vnet/adj/rewrite.h>
24 
25 /* Statistics (not all errors) */
26 #define foreach_vxlan_encap_error \
27 _(ENCAPSULATED, "good packets encapsulated")
28 
29 static char * vxlan_encap_error_strings[] = {
30 #define _(sym,string) string,
32 #undef _
33 };
34 
35 typedef enum {
36 #define _(sym,str) VXLAN_ENCAP_ERROR_##sym,
38 #undef _
41 
42 typedef enum {
46 
47 typedef struct {
51 
52 u8 * format_vxlan_encap_trace (u8 * s, va_list * args)
53 {
54  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
55  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
57  = va_arg (*args, vxlan_encap_trace_t *);
58 
59  s = format (s, "VXLAN encap to vxlan_tunnel%d vni %d",
60  t->tunnel_index, t->vni);
61  return s;
62 }
63 
66  vlib_node_runtime_t * node,
67  vlib_frame_t * from_frame,
68  u8 is_ip4, u8 csum_offload)
69 {
70  u32 n_left_from, next_index, * from, * to_next;
71  vxlan_main_t * vxm = &vxlan_main;
72  vnet_main_t * vnm = vxm->vnet_main;
74  vlib_combined_counter_main_t * tx_counter =
76  u32 pkts_encapsulated = 0;
77  u32 thread_index = vlib_get_thread_index();
78  u32 sw_if_index0 = 0, sw_if_index1 = 0;
79  u32 next0 = 0, next1 = 0;
80  vxlan_tunnel_t * t0 = NULL, * t1 = NULL;
81  index_t dpoi_idx0 = INDEX_INVALID, dpoi_idx1 = INDEX_INVALID;
82 
83  from = vlib_frame_vector_args (from_frame);
84  n_left_from = from_frame->n_vectors;
85 
86  next_index = node->cached_next_index;
87 
88  STATIC_ASSERT_SIZEOF(ip6_vxlan_header_t, 56);
89  STATIC_ASSERT_SIZEOF(ip4_vxlan_header_t, 36);
90 
91  u8 const underlay_hdr_len = is_ip4 ?
92  sizeof(ip4_vxlan_header_t) : sizeof(ip6_vxlan_header_t);
93  u8 const rw_hdr_offset = sizeof t0->rewrite_data - underlay_hdr_len;
94  u16 const l3_len = is_ip4 ? sizeof(ip4_header_t) : sizeof(ip6_header_t);
95  u32 const csum_flags = is_ip4 ?
96  VNET_BUFFER_F_OFFLOAD_IP_CKSUM | VNET_BUFFER_F_IS_IP4 |
97  VNET_BUFFER_F_OFFLOAD_UDP_CKSUM :
98  VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
99 
100  while (n_left_from > 0)
101  {
102  u32 n_left_to_next;
103 
104  vlib_get_next_frame (vm, node, next_index,
105  to_next, n_left_to_next);
106 
107  while (n_left_from >= 4 && n_left_to_next >= 2)
108  {
109  /* Prefetch next iteration. */
110  {
111  vlib_buffer_t * p2, * p3;
112 
113  p2 = vlib_get_buffer (vm, from[2]);
114  p3 = vlib_get_buffer (vm, from[3]);
115 
116  vlib_prefetch_buffer_header (p2, LOAD);
117  vlib_prefetch_buffer_header (p3, LOAD);
118 
121  }
122 
123  u32 bi0 = to_next[0] = from[0];
124  u32 bi1 = to_next[1] = from[1];
125  from += 2;
126  to_next += 2;
127  n_left_to_next -= 2;
128  n_left_from -= 2;
129 
130  vlib_buffer_t * b0 = vlib_get_buffer (vm, bi0);
131  vlib_buffer_t * b1 = vlib_get_buffer (vm, bi1);
132  u32 flow_hash0 = vnet_l2_compute_flow_hash (b0);
133  u32 flow_hash1 = vnet_l2_compute_flow_hash (b1);
134 
135  /* Get next node index and adj index from tunnel next_dpo */
136  if (sw_if_index0 != vnet_buffer(b0)->sw_if_index[VLIB_TX])
137  {
138  sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
139  vnet_hw_interface_t *hi0 =
140  vnet_get_sup_hw_interface (vnm, sw_if_index0);
141  t0 = &vxm->tunnels[hi0->dev_instance];
142  /* Note: change to always set next0 if it may set to drop */
143  next0 = t0->next_dpo.dpoi_next_node;
144  dpoi_idx0 = t0->next_dpo.dpoi_index;
145  }
146 
147  /* Get next node index and adj index from tunnel next_dpo */
148  if (sw_if_index1 != vnet_buffer(b1)->sw_if_index[VLIB_TX])
149  {
150  if (sw_if_index0 == vnet_buffer(b1)->sw_if_index[VLIB_TX])
151  {
152  sw_if_index1 = sw_if_index0;
153  t1 = t0;
154  next1 = next0;
155  dpoi_idx1 = dpoi_idx0;
156  }
157  else
158  {
159  sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_TX];
160  vnet_hw_interface_t *hi1 =
161  vnet_get_sup_hw_interface (vnm, sw_if_index1);
162  t1 = &vxm->tunnels[hi1->dev_instance];
163  /* Note: change to always set next1 if it may set to drop */
164  next1 = t1->next_dpo.dpoi_next_node;
165  dpoi_idx1 = t1->next_dpo.dpoi_index;
166  }
167  }
168 
169  vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
170  vnet_buffer(b1)->ip.adj_index[VLIB_TX] = dpoi_idx1;
171 
172  ASSERT(t0->rewrite_header.data_bytes == underlay_hdr_len);
173  ASSERT(t1->rewrite_header.data_bytes == underlay_hdr_len);
174 
175  vlib_buffer_advance (b0, -underlay_hdr_len);
176  vlib_buffer_advance (b1, -underlay_hdr_len);
177 
178  u32 len0 = vlib_buffer_length_in_chain (vm, b0);
179  u32 len1 = vlib_buffer_length_in_chain (vm, b1);
180  u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
181  u16 payload_l1 = clib_host_to_net_u16 (len1 - l3_len);
182 
183  void * underlay0 = vlib_buffer_get_current(b0);
184  void * underlay1 = vlib_buffer_get_current(b1);
185 
186  /* vnet_rewrite_two_header writes only in (uword) 8 bytes chunks
187  * and discards the first 4 bytes of the (36 bytes ip4 underlay) rewrite
188  * use memcpy as a workaround */
189  clib_memcpy_fast(underlay0, t0->rewrite_header.data + rw_hdr_offset, underlay_hdr_len);
190  clib_memcpy_fast(underlay1, t1->rewrite_header.data + rw_hdr_offset, underlay_hdr_len);
191 
192  ip4_header_t * ip4_0, * ip4_1;
193  qos_bits_t ip4_0_tos = 0, ip4_1_tos = 0;
194  ip6_header_t * ip6_0, * ip6_1;
195  udp_header_t * udp0, * udp1;
196  u8 * l3_0, * l3_1;
197  if (is_ip4)
198  {
199  ip4_vxlan_header_t * hdr0 = underlay0;
200  ip4_vxlan_header_t * hdr1 = underlay1;
201 
202  /* Fix the IP4 checksum and length */
203  ip4_0 = &hdr0->ip4;
204  ip4_1 = &hdr1->ip4;
205  ip4_0->length = clib_host_to_net_u16 (len0);
206  ip4_1->length = clib_host_to_net_u16 (len1);
207 
208  if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_QOS_DATA_VALID))
209  {
210  ip4_0_tos = vnet_buffer2 (b0)->qos.bits;
211  ip4_0->tos = ip4_0_tos;
212  }
213  if (PREDICT_FALSE (b1->flags & VNET_BUFFER_F_QOS_DATA_VALID))
214  {
215  ip4_1_tos = vnet_buffer2 (b1)->qos.bits;
216  ip4_1->tos = ip4_1_tos;
217  }
218 
219  l3_0 = (u8 *)ip4_0;
220  l3_1 = (u8 *)ip4_1;
221  udp0 = &hdr0->udp;
222  udp1 = &hdr1->udp;
223  }
224  else /* ipv6 */
225  {
226  ip6_vxlan_header_t * hdr0 = underlay0;
227  ip6_vxlan_header_t * hdr1 = underlay1;
228 
229  /* Fix IP6 payload length */
230  ip6_0 = &hdr0->ip6;
231  ip6_1 = &hdr1->ip6;
232  ip6_0->payload_length = payload_l0;
233  ip6_1->payload_length = payload_l1;
234 
235  l3_0 = (u8 *)ip6_0;
236  l3_1 = (u8 *)ip6_1;
237  udp0 = &hdr0->udp;
238  udp1 = &hdr1->udp;
239  }
240 
241  /* Fix UDP length and set source port */
242  udp0->length = payload_l0;
243  udp0->src_port = flow_hash0;
244  udp1->length = payload_l1;
245  udp1->src_port = flow_hash1;
246 
247  if (csum_offload)
248  {
249  b0->flags |= csum_flags;
250  vnet_buffer (b0)->l3_hdr_offset = l3_0 - b0->data;
251  vnet_buffer (b0)->l4_hdr_offset = (u8 *) udp0 - b0->data;
252  b1->flags |= csum_flags;
253  vnet_buffer (b1)->l3_hdr_offset = l3_1 - b1->data;
254  vnet_buffer (b1)->l4_hdr_offset = (u8 *) udp1 - b1->data;
255  }
256  /* IPv4 UDP checksum only if checksum offload is used */
257  else if (is_ip4)
258  {
259  ip_csum_t sum0 = ip4_0->checksum;
260  sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
261  length /* changed member */);
262  if (PREDICT_FALSE (ip4_0_tos))
263  {
264  sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
265  tos /* changed member */);
266  }
267  ip4_0->checksum = ip_csum_fold (sum0);
268  ip_csum_t sum1 = ip4_1->checksum;
269  sum1 = ip_csum_update (sum1, 0, ip4_1->length, ip4_header_t,
270  length /* changed member */);
271  if (PREDICT_FALSE (ip4_1_tos))
272  {
273  sum1 = ip_csum_update (sum1, 0, ip4_1_tos, ip4_header_t,
274  tos /* changed member */);
275  }
276  ip4_1->checksum = ip_csum_fold (sum1);
277  }
278  /* IPv6 UDP checksum is mandatory */
279  else
280  {
281  int bogus = 0;
282 
284  (vm, b0, ip6_0, &bogus);
285  ASSERT(bogus == 0);
286  if (udp0->checksum == 0)
287  udp0->checksum = 0xffff;
289  (vm, b1, ip6_1, &bogus);
290  ASSERT(bogus == 0);
291  if (udp1->checksum == 0)
292  udp1->checksum = 0xffff;
293  }
294 
295  if (sw_if_index0 == sw_if_index1)
296  {
297  vlib_increment_combined_counter (tx_counter, thread_index,
298  sw_if_index0, 2, len0 + len1);
299  }
300  else
301  {
302  vlib_increment_combined_counter (tx_counter, thread_index,
303  sw_if_index0, 1, len0);
304  vlib_increment_combined_counter (tx_counter, thread_index,
305  sw_if_index1, 1, len1);
306  }
307  pkts_encapsulated += 2;
308 
309  if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
310  {
311  vxlan_encap_trace_t *tr =
312  vlib_add_trace (vm, node, b0, sizeof (*tr));
313  tr->tunnel_index = t0 - vxm->tunnels;
314  tr->vni = t0->vni;
315  }
316 
317  if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
318  {
319  vxlan_encap_trace_t *tr =
320  vlib_add_trace (vm, node, b1, sizeof (*tr));
321  tr->tunnel_index = t1 - vxm->tunnels;
322  tr->vni = t1->vni;
323  }
324 
325  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
326  to_next, n_left_to_next,
327  bi0, bi1, next0, next1);
328  }
329 
330  while (n_left_from > 0 && n_left_to_next > 0)
331  {
332  u32 bi0 = to_next[0] = from[0];
333  from += 1;
334  to_next += 1;
335  n_left_from -= 1;
336  n_left_to_next -= 1;
337 
338  vlib_buffer_t * b0 = vlib_get_buffer (vm, bi0);
339  u32 flow_hash0 = vnet_l2_compute_flow_hash(b0);
340 
341  /* Get next node index and adj index from tunnel next_dpo */
342  if (sw_if_index0 != vnet_buffer(b0)->sw_if_index[VLIB_TX])
343  {
344  sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
345  vnet_hw_interface_t *hi0 =
346  vnet_get_sup_hw_interface (vnm, sw_if_index0);
347  t0 = &vxm->tunnels[hi0->dev_instance];
348  /* Note: change to always set next0 if it may be set to drop */
349  next0 = t0->next_dpo.dpoi_next_node;
350  dpoi_idx0 = t0->next_dpo.dpoi_index;
351  }
352  vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
353 
354  ASSERT(t0->rewrite_header.data_bytes == underlay_hdr_len);
355 
356  vlib_buffer_advance (b0, -underlay_hdr_len);
357  void * underlay0 = vlib_buffer_get_current(b0);
358 
359  /* vnet_rewrite_one_header writes only in (uword) 8 bytes chunks
360  * and discards the first 4 bytes of the (36 bytes ip4 underlay) rewrite
361  * use memcpy as a workaround */
362  clib_memcpy_fast(underlay0, t0->rewrite_header.data + rw_hdr_offset, underlay_hdr_len);
363 
364  u32 len0 = vlib_buffer_length_in_chain (vm, b0);
365  u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
366 
367  udp_header_t * udp0;
368  ip4_header_t * ip4_0;
369  qos_bits_t ip4_0_tos = 0;
370  ip6_header_t * ip6_0;
371  u8 * l3_0;
372  if (is_ip4)
373  {
374  ip4_vxlan_header_t * hdr = underlay0;
375 
376  /* Fix the IP4 checksum and length */
377  ip4_0 = &hdr->ip4;
378  ip4_0->length = clib_host_to_net_u16 (len0);
379 
380  if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_QOS_DATA_VALID))
381  {
382  ip4_0_tos = vnet_buffer2 (b0)->qos.bits;
383  ip4_0->tos = ip4_0_tos;
384  }
385 
386  l3_0 = (u8*)ip4_0;
387  udp0 = &hdr->udp;
388  }
389  else /* ip6 path */
390  {
391  ip6_vxlan_header_t * hdr = underlay0;
392 
393  /* Fix IP6 payload length */
394  ip6_0 = &hdr->ip6;
395  ip6_0->payload_length = payload_l0;
396 
397  l3_0 = (u8 *)ip6_0;
398  udp0 = &hdr->udp;
399  }
400 
401  /* Fix UDP length and set source port */
402  udp0->length = payload_l0;
403  udp0->src_port = flow_hash0;
404 
405  if (csum_offload)
406  {
407  b0->flags |= csum_flags;
408  vnet_buffer (b0)->l3_hdr_offset = l3_0 - b0->data;
409  vnet_buffer (b0)->l4_hdr_offset = (u8 *) udp0 - b0->data;
410  }
411  /* IPv4 UDP checksum only if checksum offload is used */
412  else if (is_ip4)
413  {
414  ip_csum_t sum0 = ip4_0->checksum;
415  sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
416  length /* changed member */);
417  if (PREDICT_FALSE (ip4_0_tos))
418  {
419  sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
420  tos /* changed member */);
421  }
422  ip4_0->checksum = ip_csum_fold (sum0);
423  }
424  /* IPv6 UDP checksum is mandatory */
425  else
426  {
427  int bogus = 0;
428 
430  (vm, b0, ip6_0, &bogus);
431  ASSERT(bogus == 0);
432  if (udp0->checksum == 0)
433  udp0->checksum = 0xffff;
434  }
435 
436  vlib_increment_combined_counter (tx_counter, thread_index,
437  sw_if_index0, 1, len0);
438  pkts_encapsulated ++;
439 
440  if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
441  {
442  vxlan_encap_trace_t *tr =
443  vlib_add_trace (vm, node, b0, sizeof (*tr));
444  tr->tunnel_index = t0 - vxm->tunnels;
445  tr->vni = t0->vni;
446  }
447  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
448  to_next, n_left_to_next,
449  bi0, next0);
450  }
451 
452  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
453  }
454 
455  /* Do we still need this now that tunnel tx stats is kept? */
457  VXLAN_ENCAP_ERROR_ENCAPSULATED,
458  pkts_encapsulated);
459 
460  return from_frame->n_vectors;
461 }
462 
463 static uword
465  vlib_node_runtime_t * node,
466  vlib_frame_t * from_frame)
467 {
468  /* Disable chksum offload as setup overhead in tx node is not worthwhile
469  for ip4 header checksum only, unless udp checksum is also required */
470  return vxlan_encap_inline (vm, node, from_frame, /* is_ip4 */ 1,
471  /* csum_offload */ 0);
472 }
473 
474 static uword
476  vlib_node_runtime_t * node,
477  vlib_frame_t * from_frame)
478 {
479  /* Enable checksum offload for ip6 as udp checksum is mandatory, */
480  return vxlan_encap_inline (vm, node, from_frame, /* is_ip4 */ 0,
481  /* csum_offload */ 1);
482 }
483 
485  .function = vxlan4_encap,
486  .name = "vxlan4-encap",
487  .vector_size = sizeof (u32),
488  .format_trace = format_vxlan_encap_trace,
489  .type = VLIB_NODE_TYPE_INTERNAL,
491  .error_strings = vxlan_encap_error_strings,
492  .n_next_nodes = VXLAN_ENCAP_N_NEXT,
493  .next_nodes = {
494  [VXLAN_ENCAP_NEXT_DROP] = "error-drop",
495  },
496 };
497 
499 
501  .function = vxlan6_encap,
502  .name = "vxlan6-encap",
503  .vector_size = sizeof (u32),
504  .format_trace = format_vxlan_encap_trace,
505  .type = VLIB_NODE_TYPE_INTERNAL,
507  .error_strings = vxlan_encap_error_strings,
508  .n_next_nodes = VXLAN_ENCAP_N_NEXT,
509  .next_nodes = {
510  [VXLAN_ENCAP_NEXT_DROP] = "error-drop",
511  },
512 };
513 
515 
#define CLIB_UNUSED(x)
Definition: clib.h:82
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:220
static vnet_hw_interface_t * vnet_get_sup_hw_interface(vnet_main_t *vnm, u32 sw_if_index)
#define vnet_buffer2(b)
Definition: buffer.h:413
vnet_interface_main_t interface_main
Definition: vnet.h:56
#define foreach_vxlan_encap_error
Definition: encap.c:26
vxlan_encap_error_t
Definition: encap.c:35
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
#define NULL
Definition: clib.h:58
u32 index_t
A Data-Path Object is an object that represents actions that are applied to packets are they are swit...
Definition: dpo.h:41
uword ip_csum_t
Definition: ip_packet.h:181
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
VLIB_NODE_FUNCTION_MULTIARCH(l2t_encap_node, l2t_encap_node_fn)
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:267
unsigned char u8
Definition: types.h:56
vnet_main_t * vnet_main
Definition: vxlan.h:181
static char * vxlan_encap_error_strings[]
Definition: encap.c:29
u32 sw_if_index
Definition: vxlan_gbp.api:37
#define always_inline
Definition: clib.h:98
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:839
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:188
unsigned int u32
Definition: types.h:88
unsigned short u16
Definition: types.h:57
static u32 vnet_l2_compute_flow_hash(vlib_buffer_t *b)
Definition: l2_input.h:266
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:214
vxlan_main_t vxlan_main
Definition: vxlan.c:43
#define PREDICT_FALSE(x)
Definition: clib.h:111
u32 node_index
Node index.
Definition: node.h:519
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:368
u8 * format_vxlan_encap_trace(u8 *s, va_list *args)
Definition: encap.c:52
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1180
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u16 n_vectors
Definition: node.h:420
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:212
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:79
vlib_main_t * vm
Definition: buffer.c:301
#define ARRAY_LEN(x)
Definition: clib.h:62
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:459
u16 ip6_tcp_udp_icmp_compute_checksum(vlib_main_t *vm, vlib_buffer_t *p0, ip6_header_t *ip0, int *bogus_lengthp)
Definition: ip6_forward.c:944
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:538
#define ASSERT(truth)
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:233
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:57
static uword vxlan_encap_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, u8 is_ip4, u8 csum_offload)
Definition: encap.c:65
Definition: defs.h:47
u16 payload_length
Definition: ip6_packet.h:369
index_t dpoi_index
the index of objects of that type
Definition: dpo.h:184
#define INDEX_INVALID
Invalid index - used when no index is known blazoned capitals INVALID speak volumes where ~0 does not...
Definition: dpo.h:47
static uword vxlan6_encap(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
Definition: encap.c:475
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:274
#define ip_csum_update(sum, old, new, type, field)
Definition: ip_packet.h:231
A collection of combined counters.
Definition: counter.h:188
vxlan_encap_next_t
Definition: encap.c:42
#define vnet_buffer(b)
Definition: buffer.h:368
vlib_node_registration_t vxlan4_encap_node
(constructor) VLIB_REGISTER_NODE (vxlan4_encap_node)
Definition: encap.c:484
u8 data[0]
Packet data.
Definition: buffer.h:176
vlib_node_registration_t vxlan6_encap_node
(constructor) VLIB_REGISTER_NODE (vxlan6_encap_node)
Definition: encap.c:500
dpo_id_t next_dpo
Definition: vxlan.h:86
u16 dpoi_next_node
The next VLIB node to follow.
Definition: dpo.h:180
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:117
static uword vxlan4_encap(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
Definition: encap.c:464
#define STATIC_ASSERT_SIZEOF(d, s)
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:62
vxlan_tunnel_t * tunnels
Definition: vxlan.h:158
u8 qos_bits_t
Type, er, safety for us water based entities.
Definition: qos_types.h:68
static u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:237