FD.io VPP  v19.04.4-rc0-5-ge88582fac
Vector Packet Processing
encap.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <vppinfra/error.h>
16 #include <vppinfra/hash.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/ip.h>
19 #include <vnet/ethernet/ethernet.h>
21 #include <vnet/qos/qos_types.h>
22 #include <vnet/adj/rewrite.h>
23 
24 /* Statistics (not all errors) */
25 #define foreach_vxlan_gbp_encap_error \
26 _(ENCAPSULATED, "good packets encapsulated")
27 
29 #define _(sym,string) string,
31 #undef _
32 };
33 
34 typedef enum
35 {
36 #define _(sym,str) VXLAN_GBP_ENCAP_ERROR_##sym,
38 #undef _
41 
42 typedef enum
43 {
47 
48 typedef struct
49 {
55 
56 #ifndef CLIB_MARCH_VARIANT
57 u8 *
58 format_vxlan_gbp_encap_trace (u8 * s, va_list * args)
59 {
60  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
61  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
62  vxlan_gbp_encap_trace_t *t = va_arg (*args, vxlan_gbp_encap_trace_t *);
63 
64  s =
65  format (s,
66  "VXLAN_GBP encap to vxlan_gbp_tunnel%d vni %d sclass %d flags %U",
67  t->tunnel_index, t->vni, t->sclass,
69  return s;
70 }
71 #endif /* CLIB_MARCH_VARIANT */
72 
75  vlib_node_runtime_t * node,
76  vlib_frame_t * from_frame, u8 is_ip4, u8 csum_offload)
77 {
78  u32 n_left_from, next_index, *from, *to_next;
80  vnet_main_t *vnm = vxm->vnet_main;
82  vlib_combined_counter_main_t *tx_counter =
84  u32 pkts_encapsulated = 0;
85  u32 thread_index = vlib_get_thread_index ();
86  u32 sw_if_index0 = 0, sw_if_index1 = 0;
87  u32 next0 = 0, next1 = 0;
88  vxlan_gbp_tunnel_t *t0 = NULL, *t1 = NULL;
89  index_t dpoi_idx0 = INDEX_INVALID, dpoi_idx1 = INDEX_INVALID;
90 
91  from = vlib_frame_vector_args (from_frame);
92  n_left_from = from_frame->n_vectors;
93 
94  next_index = node->cached_next_index;
95 
96  STATIC_ASSERT_SIZEOF (ip6_vxlan_gbp_header_t, 56);
97  STATIC_ASSERT_SIZEOF (ip4_vxlan_gbp_header_t, 36);
98 
99  u8 const underlay_hdr_len = is_ip4 ?
100  sizeof (ip4_vxlan_gbp_header_t) : sizeof (ip6_vxlan_gbp_header_t);
101  u16 const l3_len = is_ip4 ? sizeof (ip4_header_t) : sizeof (ip6_header_t);
102  u32 const csum_flags = is_ip4 ?
103  VNET_BUFFER_F_OFFLOAD_IP_CKSUM | VNET_BUFFER_F_IS_IP4 |
104  VNET_BUFFER_F_OFFLOAD_UDP_CKSUM : VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
105 
106  while (n_left_from > 0)
107  {
108  u32 n_left_to_next;
109 
110  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
111 
112  while (n_left_from >= 4 && n_left_to_next >= 2)
113  {
114  /* Prefetch next iteration. */
115  {
116  vlib_buffer_t *p2, *p3;
117 
118  p2 = vlib_get_buffer (vm, from[2]);
119  p3 = vlib_get_buffer (vm, from[3]);
120 
121  vlib_prefetch_buffer_header (p2, LOAD);
122  vlib_prefetch_buffer_header (p3, LOAD);
123 
124  CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
125  CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
126  }
127 
128  u32 bi0 = to_next[0] = from[0];
129  u32 bi1 = to_next[1] = from[1];
130  from += 2;
131  to_next += 2;
132  n_left_to_next -= 2;
133  n_left_from -= 2;
134 
135  vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
136  vlib_buffer_t *b1 = vlib_get_buffer (vm, bi1);
137  u32 flow_hash0 = vnet_l2_compute_flow_hash (b0);
138  u32 flow_hash1 = vnet_l2_compute_flow_hash (b1);
139 
140  /* Get next node index and adj index from tunnel next_dpo */
141  if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
142  {
143  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
144  vnet_hw_interface_t *hi0 =
145  vnet_get_sup_hw_interface (vnm, sw_if_index0);
146  t0 = &vxm->tunnels[hi0->dev_instance];
147  /* Note: change to always set next0 if it may set to drop */
148  next0 = t0->next_dpo.dpoi_next_node;
149  dpoi_idx0 = t0->next_dpo.dpoi_index;
150  }
151 
152  /* Get next node index and adj index from tunnel next_dpo */
153  if (sw_if_index1 != vnet_buffer (b1)->sw_if_index[VLIB_TX])
154  {
155  if (sw_if_index0 == vnet_buffer (b1)->sw_if_index[VLIB_TX])
156  {
157  sw_if_index1 = sw_if_index0;
158  t1 = t0;
159  next1 = next0;
160  dpoi_idx1 = dpoi_idx0;
161  }
162  else
163  {
164  sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
165  vnet_hw_interface_t *hi1 =
166  vnet_get_sup_hw_interface (vnm, sw_if_index1);
167  t1 = &vxm->tunnels[hi1->dev_instance];
168  /* Note: change to always set next1 if it may set to drop */
169  next1 = t1->next_dpo.dpoi_next_node;
170  dpoi_idx1 = t1->next_dpo.dpoi_index;
171  }
172  }
173 
174  vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
175  vnet_buffer (b1)->ip.adj_index[VLIB_TX] = dpoi_idx1;
176 
177  ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len);
178  ASSERT (t1->rewrite_header.data_bytes == underlay_hdr_len);
181  underlay_hdr_len);
182 
183  vlib_buffer_advance (b0, -underlay_hdr_len);
184  vlib_buffer_advance (b1, -underlay_hdr_len);
185 
186  u32 len0 = vlib_buffer_length_in_chain (vm, b0);
187  u32 len1 = vlib_buffer_length_in_chain (vm, b1);
188  u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
189  u16 payload_l1 = clib_host_to_net_u16 (len1 - l3_len);
190 
191  void *underlay0 = vlib_buffer_get_current (b0);
192  void *underlay1 = vlib_buffer_get_current (b1);
193 
194  ip4_header_t *ip4_0, *ip4_1;
195  qos_bits_t ip4_0_tos = 0, ip4_1_tos = 0;
196  ip6_header_t *ip6_0, *ip6_1;
197  udp_header_t *udp0, *udp1;
198  vxlan_gbp_header_t *vxlan_gbp0, *vxlan_gbp1;
199  u8 *l3_0, *l3_1;
200  if (is_ip4)
201  {
202  ip4_vxlan_gbp_header_t *hdr0 = underlay0;
203  ip4_vxlan_gbp_header_t *hdr1 = underlay1;
204 
205  /* Fix the IP4 checksum and length */
206  ip4_0 = &hdr0->ip4;
207  ip4_1 = &hdr1->ip4;
208  ip4_0->length = clib_host_to_net_u16 (len0);
209  ip4_1->length = clib_host_to_net_u16 (len1);
210 
211  if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_QOS_DATA_VALID))
212  {
213  ip4_0_tos = vnet_buffer2 (b0)->qos.bits;
214  ip4_0->tos = ip4_0_tos;
215  }
216  if (PREDICT_FALSE (b1->flags & VNET_BUFFER_F_QOS_DATA_VALID))
217  {
218  ip4_1_tos = vnet_buffer2 (b1)->qos.bits;
219  ip4_1->tos = ip4_1_tos;
220  }
221 
222  l3_0 = (u8 *) ip4_0;
223  l3_1 = (u8 *) ip4_1;
224  udp0 = &hdr0->udp;
225  udp1 = &hdr1->udp;
226  vxlan_gbp0 = &hdr0->vxlan_gbp;
227  vxlan_gbp1 = &hdr1->vxlan_gbp;
228  }
229  else /* ipv6 */
230  {
231  ip6_vxlan_gbp_header_t *hdr0 = underlay0;
232  ip6_vxlan_gbp_header_t *hdr1 = underlay1;
233 
234  /* Fix IP6 payload length */
235  ip6_0 = &hdr0->ip6;
236  ip6_1 = &hdr1->ip6;
237  ip6_0->payload_length = payload_l0;
238  ip6_1->payload_length = payload_l1;
239 
240  l3_0 = (u8 *) ip6_0;
241  l3_1 = (u8 *) ip6_1;
242  udp0 = &hdr0->udp;
243  udp1 = &hdr1->udp;
244  vxlan_gbp0 = &hdr0->vxlan_gbp;
245  vxlan_gbp1 = &hdr1->vxlan_gbp;
246  }
247 
248  /* Fix UDP length and set source port */
249  udp0->length = payload_l0;
250  udp0->src_port = flow_hash0;
251  udp1->length = payload_l1;
252  udp1->src_port = flow_hash1;
253 
254  /* set source class and gpflags */
255  vxlan_gbp0->gpflags = vnet_buffer2 (b0)->gbp.flags;
256  vxlan_gbp1->gpflags = vnet_buffer2 (b1)->gbp.flags;
257  vxlan_gbp0->sclass =
258  clib_host_to_net_u16 (vnet_buffer2 (b0)->gbp.sclass);
259  vxlan_gbp1->sclass =
260  clib_host_to_net_u16 (vnet_buffer2 (b1)->gbp.sclass);
261 
262  if (csum_offload)
263  {
264  b0->flags |= csum_flags;
265  vnet_buffer (b0)->l3_hdr_offset = l3_0 - b0->data;
266  vnet_buffer (b0)->l4_hdr_offset = (u8 *) udp0 - b0->data;
267  b1->flags |= csum_flags;
268  vnet_buffer (b1)->l3_hdr_offset = l3_1 - b1->data;
269  vnet_buffer (b1)->l4_hdr_offset = (u8 *) udp1 - b1->data;
270  }
271  /* IPv4 UDP checksum only if checksum offload is used */
272  else if (is_ip4)
273  {
274  ip_csum_t sum0 = ip4_0->checksum;
275  sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
276  length /* changed member */ );
277  if (PREDICT_FALSE (ip4_0_tos))
278  {
279  sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
280  tos /* changed member */ );
281  }
282  ip4_0->checksum = ip_csum_fold (sum0);
283  ip_csum_t sum1 = ip4_1->checksum;
284  sum1 = ip_csum_update (sum1, 0, ip4_1->length, ip4_header_t,
285  length /* changed member */ );
286  if (PREDICT_FALSE (ip4_1_tos))
287  {
288  sum1 = ip_csum_update (sum1, 0, ip4_1_tos, ip4_header_t,
289  tos /* changed member */ );
290  }
291  ip4_1->checksum = ip_csum_fold (sum1);
292  }
293  /* IPv6 UDP checksum is mandatory */
294  else
295  {
296  int bogus = 0;
297 
299  (vm, b0, ip6_0, &bogus);
300  ASSERT (bogus == 0);
301  if (udp0->checksum == 0)
302  udp0->checksum = 0xffff;
304  (vm, b1, ip6_1, &bogus);
305  ASSERT (bogus == 0);
306  if (udp1->checksum == 0)
307  udp1->checksum = 0xffff;
308  }
309 
310  vlib_increment_combined_counter (tx_counter, thread_index,
311  sw_if_index0, 1, len0);
312  vlib_increment_combined_counter (tx_counter, thread_index,
313  sw_if_index1, 1, len1);
314  pkts_encapsulated += 2;
315 
316  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
317  {
319  vlib_add_trace (vm, node, b0, sizeof (*tr));
320  tr->tunnel_index = t0 - vxm->tunnels;
321  tr->vni = t0->vni;
322  tr->sclass = vnet_buffer2 (b0)->gbp.sclass;
323  tr->flags = vnet_buffer2 (b0)->gbp.flags;
324  }
325 
326  if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
327  {
329  vlib_add_trace (vm, node, b1, sizeof (*tr));
330  tr->tunnel_index = t1 - vxm->tunnels;
331  tr->vni = t1->vni;
332  tr->sclass = vnet_buffer2 (b1)->gbp.sclass;
333  tr->flags = vnet_buffer2 (b1)->gbp.flags;
334  }
335 
336  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
337  to_next, n_left_to_next,
338  bi0, bi1, next0, next1);
339  }
340 
341  while (n_left_from > 0 && n_left_to_next > 0)
342  {
343  u32 bi0 = to_next[0] = from[0];
344  from += 1;
345  to_next += 1;
346  n_left_from -= 1;
347  n_left_to_next -= 1;
348 
349  vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
350  u32 flow_hash0 = vnet_l2_compute_flow_hash (b0);
351 
352  /* Get next node index and adj index from tunnel next_dpo */
353  if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
354  {
355  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
356  vnet_hw_interface_t *hi0 =
357  vnet_get_sup_hw_interface (vnm, sw_if_index0);
358  t0 = &vxm->tunnels[hi0->dev_instance];
359  /* Note: change to always set next0 if it may be set to drop */
360  next0 = t0->next_dpo.dpoi_next_node;
361  dpoi_idx0 = t0->next_dpo.dpoi_index;
362  }
363  vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
364 
365  ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len);
367  underlay_hdr_len);
368 
369  vlib_buffer_advance (b0, -underlay_hdr_len);
370  void *underlay0 = vlib_buffer_get_current (b0);
371 
372  u32 len0 = vlib_buffer_length_in_chain (vm, b0);
373  u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
374 
375  vxlan_gbp_header_t *vxlan_gbp0;
376  udp_header_t *udp0;
377  ip4_header_t *ip4_0;
378  qos_bits_t ip4_0_tos = 0;
379  ip6_header_t *ip6_0;
380  u8 *l3_0;
381  if (is_ip4)
382  {
383  ip4_vxlan_gbp_header_t *hdr = underlay0;
384 
385  /* Fix the IP4 checksum and length */
386  ip4_0 = &hdr->ip4;
387  ip4_0->length = clib_host_to_net_u16 (len0);
388 
389  if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_QOS_DATA_VALID))
390  {
391  ip4_0_tos = vnet_buffer2 (b0)->qos.bits;
392  ip4_0->tos = ip4_0_tos;
393  }
394 
395  l3_0 = (u8 *) ip4_0;
396  udp0 = &hdr->udp;
397  vxlan_gbp0 = &hdr->vxlan_gbp;
398  }
399  else /* ip6 path */
400  {
401  ip6_vxlan_gbp_header_t *hdr = underlay0;
402 
403  /* Fix IP6 payload length */
404  ip6_0 = &hdr->ip6;
405  ip6_0->payload_length = payload_l0;
406 
407  l3_0 = (u8 *) ip6_0;
408  udp0 = &hdr->udp;
409  vxlan_gbp0 = &hdr->vxlan_gbp;
410  }
411 
412  /* Fix UDP length and set source port */
413  udp0->length = payload_l0;
414  udp0->src_port = flow_hash0;
415 
416  /* set source class and gpflags */
417  vxlan_gbp0->gpflags = vnet_buffer2 (b0)->gbp.flags;
418  vxlan_gbp0->sclass =
419  clib_host_to_net_u16 (vnet_buffer2 (b0)->gbp.sclass);
420 
421  if (csum_offload)
422  {
423  b0->flags |= csum_flags;
424  vnet_buffer (b0)->l3_hdr_offset = l3_0 - b0->data;
425  vnet_buffer (b0)->l4_hdr_offset = (u8 *) udp0 - b0->data;
426  }
427  /* IPv4 UDP checksum only if checksum offload is used */
428  else if (is_ip4)
429  {
430  ip_csum_t sum0 = ip4_0->checksum;
431  sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
432  length /* changed member */ );
433  if (PREDICT_FALSE (ip4_0_tos))
434  {
435  sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
436  tos /* changed member */ );
437  }
438  ip4_0->checksum = ip_csum_fold (sum0);
439  }
440  /* IPv6 UDP checksum is mandatory */
441  else
442  {
443  int bogus = 0;
444 
446  (vm, b0, ip6_0, &bogus);
447  ASSERT (bogus == 0);
448  if (udp0->checksum == 0)
449  udp0->checksum = 0xffff;
450  }
451 
452  vlib_increment_combined_counter (tx_counter, thread_index,
453  sw_if_index0, 1, len0);
454  pkts_encapsulated++;
455 
456  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
457  {
459  vlib_add_trace (vm, node, b0, sizeof (*tr));
460  tr->tunnel_index = t0 - vxm->tunnels;
461  tr->vni = t0->vni;
462  tr->sclass = vnet_buffer2 (b0)->gbp.sclass;
463  tr->flags = vnet_buffer2 (b0)->gbp.flags;
464  }
465  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
466  to_next, n_left_to_next,
467  bi0, next0);
468  }
469 
470  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
471  }
472 
473  /* Do we still need this now that tunnel tx stats is kept? */
475  VXLAN_GBP_ENCAP_ERROR_ENCAPSULATED,
476  pkts_encapsulated);
477 
478  return from_frame->n_vectors;
479 }
480 
482  vlib_node_runtime_t * node,
483  vlib_frame_t * from_frame)
484 {
485  /* Disable chksum offload as setup overhead in tx node is not worthwhile
486  for ip4 header checksum only, unless udp checksum is also required */
487  return vxlan_gbp_encap_inline (vm, node, from_frame, /* is_ip4 */ 1,
488  /* csum_offload */ 0);
489 }
490 
492  vlib_node_runtime_t * node,
493  vlib_frame_t * from_frame)
494 {
495  /* Enable checksum offload for ip6 as udp checksum is mandatory, */
496  return vxlan_gbp_encap_inline (vm, node, from_frame, /* is_ip4 */ 0,
497  /* csum_offload */ 1);
498 }
499 
500 /* *INDENT-OFF* */
502 {
503  .name = "vxlan4-gbp-encap",
504  .vector_size = sizeof (u32),
505  .format_trace = format_vxlan_gbp_encap_trace,
506  .type = VLIB_NODE_TYPE_INTERNAL,
508  .error_strings = vxlan_gbp_encap_error_strings,
509  .n_next_nodes = VXLAN_GBP_ENCAP_N_NEXT,
510  .next_nodes = {
511  [VXLAN_GBP_ENCAP_NEXT_DROP] = "error-drop",
512  },
513 };
514 
516 {
517  .name = "vxlan6-gbp-encap",
518  .vector_size = sizeof (u32),
519  .format_trace = format_vxlan_gbp_encap_trace,
520  .type = VLIB_NODE_TYPE_INTERNAL,
522  .error_strings = vxlan_gbp_encap_error_strings,
523  .n_next_nodes = VXLAN_GBP_ENCAP_N_NEXT,
524  .next_nodes = {
525  [VXLAN_GBP_ENCAP_NEXT_DROP] = "error-drop",
526  },
527 };
528 /* *INDENT-ON* */
529 
530 /*
531  * fd.io coding-style-patch-verification: ON
532  *
533  * Local Variables:
534  * eval: (c-set-style "gnu")
535  * End:
536  */
u32 sw_if_index
Definition: ipsec_gre.api:37
#define vnet_rewrite_one_header(rw0, p0, most_likely_size)
Definition: rewrite.h:199
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
#define CLIB_UNUSED(x)
Definition: clib.h:82
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:220
static vnet_hw_interface_t * vnet_get_sup_hw_interface(vnet_main_t *vnm, u32 sw_if_index)
#define vnet_buffer2(b)
Definition: buffer.h:428
vnet_interface_main_t interface_main
Definition: vnet.h:56
vxlan_gbp_encap_error_t
Definition: encap.c:34
#define NULL
Definition: clib.h:58
vlib_node_registration_t vxlan4_gbp_encap_node
(constructor) VLIB_REGISTER_NODE (vxlan4_gbp_encap_node)
Definition: encap.c:501
u8 data[0]
Packet data.
Definition: buffer.h:181
u32 index_t
A Data-Path Object is an object that represents actions that are applied to packets are they are swit...
Definition: dpo.h:41
vxlan_gbp_main_t vxlan_gbp_main
Definition: vxlan_gbp.c:33
static uword vxlan_gbp_encap_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, u8 is_ip4, u8 csum_offload)
Definition: encap.c:74
uword ip_csum_t
Definition: ip_packet.h:181
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
#define VLIB_NODE_FN(node)
Definition: node.h:201
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:366
vlib_node_registration_t vxlan6_gbp_encap_node
(constructor) VLIB_REGISTER_NODE (vxlan6_gbp_encap_node)
Definition: encap.c:515
unsigned char u8
Definition: types.h:56
#define foreach_vxlan_gbp_encap_error
Definition: encap.c:25
#define always_inline
Definition: clib.h:98
u8 * format_vxlan_gbp_header_gpflags(u8 *s, va_list *args)
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:824
vxlan_gbp_encap_next_t
Definition: encap.c:42
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:203
unsigned int u32
Definition: types.h:88
dpo_id_t next_dpo
Definition: vxlan_gbp.h:76
unsigned short u16
Definition: types.h:57
static u32 vnet_l2_compute_flow_hash(vlib_buffer_t *b)
Definition: l2_input.h:266
static char * vxlan_gbp_encap_error_strings[]
Definition: encap.c:28
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
#define PREDICT_FALSE(x)
Definition: clib.h:111
u32 node_index
Node index.
Definition: node.h:494
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:338
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1150
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u16 n_vectors
Definition: node.h:395
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:212
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
vlib_main_t * vm
Definition: buffer.c:312
#define ARRAY_LEN(x)
Definition: clib.h:62
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:458
u16 ip6_tcp_udp_icmp_compute_checksum(vlib_main_t *vm, vlib_buffer_t *p0, ip6_header_t *ip0, int *bogus_lengthp)
Definition: ip6_forward.c:947
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:513
#define ASSERT(truth)
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:248
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:57
Definition: defs.h:47
u16 payload_length
Definition: ip6_packet.h:376
index_t dpoi_index
the index of objects of that type
Definition: dpo.h:184
#define INDEX_INVALID
Invalid index - used when no index is known blazoned capitals INVALID speak volumes where ~0 does not...
Definition: dpo.h:47
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
#define ip_csum_update(sum, old, new, type, field)
Definition: ip_packet.h:231
A collection of combined counters.
Definition: counter.h:188
vxlan_gbp_tunnel_t * tunnels
Definition: vxlan_gbp.h:166
#define vnet_buffer(b)
Definition: buffer.h:369
u16 dpoi_next_node
The next VLIB node to follow.
Definition: dpo.h:180
#define vnet_rewrite_two_headers(rw0, rw1, p0, p1, most_likely_size)
Definition: rewrite.h:204
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
#define STATIC_ASSERT_SIZEOF(d, s)
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
u8 qos_bits_t
Type, er, safety for us water based entities.
Definition: qos_types.h:68
static u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:237
vnet_main_t * vnet_main
Definition: vxlan_gbp.h:188
u8 * format_vxlan_gbp_encap_trace(u8 *s, va_list *args)
Definition: encap.c:58