FD.io VPP  v21.06
Vector Packet Processing
node.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <vlib/vlib.h>
17 #include <vnet/vnet.h>
18 #include <vppinfra/error.h>
19 #include <vnet/ethernet/ethernet.h>
20 #include <vnet/feature/feature.h>
21 #include <vnet/gso/gso.h>
23 #include <vnet/ip/icmp46_packet.h>
24 #include <vnet/ip/ip4.h>
25 #include <vnet/ip/ip6.h>
26 #include <vnet/udp/udp_packet.h>
27 
28 typedef struct
29 {
34 } gso_trace_t;
35 
36 static u8 *
37 format_gso_trace (u8 * s, va_list * args)
38 {
39  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
40  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
41  gso_trace_t *t = va_arg (*args, gso_trace_t *);
42 
43  if (t->flags & VNET_BUFFER_F_GSO)
44  {
45  s = format (s, "gso_sz %d gso_l4_hdr_sz %d\n%U",
47  &t->gho);
48  }
49  else
50  {
51  s =
52  format (s, "non-gso buffer\n%U", format_generic_header_offset,
53  &t->gho);
54  }
55 
56  return s;
57 }
58 
62  vlib_buffer_t * sb0,
64 {
65  u16 n_tx_bufs = vec_len (ptd->split_buffers);
66  u16 i = 0, n_tx_bytes = 0;
67 
68  while (i < n_tx_bufs)
69  {
70  vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[i]);
71  vnet_get_outer_header (b0, gho);
74 
75  ip4_header_t *ip4 =
77  gho->outer_l3_hdr_offset);
78  ip6_header_t *ip6 =
80  gho->outer_l3_hdr_offset);
81 
82  if (gho->gho_flags & GHO_F_OUTER_IP4)
83  {
84  ip4->length =
85  clib_host_to_net_u16 (b0->current_length -
86  gho->outer_l3_hdr_offset);
87  ip4->checksum = ip4_header_checksum (ip4);
88  }
89  else if (gho->gho_flags & GHO_F_OUTER_IP6)
90  {
91  ip6->payload_length =
92  clib_host_to_net_u16 (b0->current_length -
93  gho->outer_l4_hdr_offset);
94  }
95 
96  n_tx_bytes += gho->outer_hdr_sz;
97  i++;
98  }
99  return n_tx_bytes;
100 }
101 
105 {
106  u8 proto = 0;
107  ip4_header_t *ip4 = 0;
108  ip6_header_t *ip6 = 0;
109  udp_header_t *udp = 0;
110 
111  ip4 =
113  ip6 =
115  udp =
117 
118  if (gho->gho_flags & GHO_F_OUTER_IP4)
119  {
120  proto = ip4->protocol;
121  ip4->length =
122  clib_host_to_net_u16 (b->current_length - gho->outer_l3_hdr_offset);
123  ip4->checksum = ip4_header_checksum (ip4);
124  }
125  else if (gho->gho_flags & GHO_F_OUTER_IP6)
126  {
127  proto = ip6->protocol;
128  ip6->payload_length =
129  clib_host_to_net_u16 (b->current_length - gho->outer_l4_hdr_offset);
130  }
131  if (proto == IP_PROTOCOL_UDP)
132  {
133  int bogus;
134  udp->length =
135  clib_host_to_net_u16 (b->current_length - gho->outer_l4_hdr_offset);
136  udp->checksum = 0;
137  if (gho->gho_flags & GHO_F_OUTER_IP6)
138  {
139  udp->checksum =
140  ip6_tcp_udp_icmp_compute_checksum (vm, b, ip6, &bogus);
141  }
142  else if (gho->gho_flags & GHO_F_OUTER_IP4)
143  {
144  udp->checksum = ip4_tcp_udp_compute_checksum (vm, b, ip4);
145  }
146  /* FIXME: it should be OUTER_UDP_CKSUM */
147  vnet_buffer_offload_flags_clear (b, VNET_BUFFER_OFFLOAD_F_UDP_CKSUM);
148  }
149 }
150 
154  vlib_buffer_t * sb0,
156 {
157  u16 n_tx_bufs = vec_len (ptd->split_buffers);
158  u16 i = 0, n_tx_bytes = 0;
159 
160  while (i < n_tx_bufs)
161  {
162  vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[i]);
163  vnet_get_outer_header (b0, gho);
166 
168  n_tx_bytes += gho->outer_hdr_sz;
169  i++;
170  }
171  return n_tx_bytes;
172 }
173 
177  vlib_buffer_t * b0, u32 n_bytes_b0, u16 l234_sz,
178  u16 gso_size, u16 first_data_size,
180 {
181  u16 n_alloc, size;
182  u16 first_packet_length = l234_sz + first_data_size;
183 
184  /*
185  * size is the amount of data per segmented buffer except the 1st
186  * segmented buffer.
187  * l2_hdr_offset is an offset == current_data of vlib_buffer_t.
188  * l234_sz is hdr_sz from l2_hdr_offset.
189  */
190  size =
191  clib_min (gso_size, vlib_buffer_get_default_data_size (vm) - l234_sz
192  - gho->l2_hdr_offset);
193 
194  /*
195  * First segmented buffer length is calculated separately.
196  * As it may contain less data than gso_size (when gso_size is
197  * greater than current_length of 1st buffer from GSO chained
198  * buffers) and/or size calculated above.
199  */
200  u16 n_bufs = 1;
201 
202  /*
203  * Total packet length minus first packet length including l234 header.
204  * rounded-up division
205  */
206  ASSERT (n_bytes_b0 > first_packet_length);
207  n_bufs += ((n_bytes_b0 - first_packet_length + (size - 1)) / size);
208 
209  vec_validate (ptd->split_buffers, n_bufs - 1);
210 
211  n_alloc = vlib_buffer_alloc (vm, ptd->split_buffers, n_bufs);
212  if (n_alloc < n_bufs)
213  {
214  vlib_buffer_free (vm, ptd->split_buffers, n_alloc);
215  return 0;
216  }
217  return n_alloc;
218 }
219 
222  u32 flags, u16 length)
223 {
224  /* copying objects from cacheline 0 */
225  nb0->current_data = b0->current_data;
226  nb0->current_length = length;
227  nb0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID | flags;
228  nb0->flow_id = b0->flow_id;
229  nb0->error = b0->error;
231  clib_memcpy_fast (&nb0->opaque, &b0->opaque, sizeof (nb0->opaque));
232 
233  /* copying objects from cacheline 1 */
234  nb0->trace_handle = b0->trace_handle;
236 
237  /* copying data */
239  vlib_buffer_get_current (b0), length);
240 }
241 
244  vlib_buffer_t * b0, u16 template_data_sz,
245  u16 gso_size, u8 ** p_dst_ptr, u16 * p_dst_left,
246  u32 next_tcp_seq, u32 flags,
248 {
249  tso_init_buf_from_template_base (nb0, b0, flags, template_data_sz);
250 
251  *p_dst_left =
252  clib_min (gso_size,
253  vlib_buffer_get_default_data_size (vm) - (template_data_sz +
254  nb0->current_data));
255  *p_dst_ptr = vlib_buffer_get_current (nb0) + template_data_sz;
256 
257  tcp_header_t *tcp =
259  tcp->seq_number = clib_host_to_net_u32 (next_tcp_seq);
260 }
261 
264  int is_l2, int is_ip6, generic_header_offset_t * gho)
265 {
266  ip4_header_t *ip4 =
268  ip6_header_t *ip6 =
270  tcp_header_t *tcp =
272 
273  tcp->flags = tcp_flags;
274 
275  if (is_ip6)
276  {
277  ip6->payload_length =
278  clib_host_to_net_u16 (b0->current_length - gho->l4_hdr_offset);
279  if (gho->gho_flags & GHO_F_TCP)
280  {
281  int bogus = 0;
282  tcp->checksum = 0;
283  tcp->checksum =
284  ip6_tcp_udp_icmp_compute_checksum (vm, b0, ip6, &bogus);
286  VNET_BUFFER_OFFLOAD_F_TCP_CKSUM);
287  }
288  }
289  else
290  {
291  ip4->length =
292  clib_host_to_net_u16 (b0->current_length - gho->l3_hdr_offset);
293  if (gho->gho_flags & GHO_F_IP4)
294  ip4->checksum = ip4_header_checksum (ip4);
295  if (gho->gho_flags & GHO_F_TCP)
296  {
297  tcp->checksum = 0;
298  tcp->checksum = ip4_tcp_udp_compute_checksum (vm, b0, ip4);
299  }
300  vnet_buffer_offload_flags_clear (b0, (VNET_BUFFER_OFFLOAD_F_IP_CKSUM |
301  VNET_BUFFER_OFFLOAD_F_TCP_CKSUM));
302  }
303 
304  if (!is_l2 && ((gho->gho_flags & GHO_F_TUNNEL) == 0))
305  {
306  u32 adj_index0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
307 
308  ip_adjacency_t *adj0 = adj_get (adj_index0);
309 
311  adj0->sub_type.midchain.fixup_func)
312  /* calls e.g. ipip44_fixup */
313  adj0->sub_type.midchain.fixup_func
314  (vm, adj0, b0, adj0->sub_type.midchain.fixup_data);
315  }
316 }
317 
318 /**
319  * Allocate the necessary number of ptd->split_buffers,
320  * and segment the possibly chained buffer(s) from b0 into
321  * there.
322  *
323  * Return the cumulative number of bytes sent or zero
324  * if allocation failed.
325  */
326 
329  u32 sbi0, vlib_buffer_t * sb0,
330  generic_header_offset_t * gho, u32 n_bytes_b0, int is_l2,
331  int is_ip6)
332 {
333  u32 n_tx_bytes = 0;
334  u16 gso_size = vnet_buffer2 (sb0)->gso_size;
335 
336  u8 save_tcp_flags = 0;
337  u8 tcp_flags_no_fin_psh = 0;
338  u32 next_tcp_seq = 0;
339 
340  tcp_header_t *tcp =
342  next_tcp_seq = clib_net_to_host_u32 (tcp->seq_number);
343  /* store original flags for last packet and reset FIN and PSH */
344  save_tcp_flags = tcp->flags;
345  tcp_flags_no_fin_psh = tcp->flags & ~(TCP_FLAG_FIN | TCP_FLAG_PSH);
346  tcp->checksum = 0;
347 
348  u32 default_bflags =
349  sb0->flags & ~(VNET_BUFFER_F_GSO | VLIB_BUFFER_NEXT_PRESENT);
350  u16 l234_sz = gho->hdr_sz;
351  int first_data_size = clib_min (gso_size, sb0->current_length - l234_sz);
352  next_tcp_seq += first_data_size;
353 
354  if (PREDICT_FALSE
356  (vm, ptd, sb0, n_bytes_b0, l234_sz, gso_size, first_data_size, gho)))
357  return 0;
358 
359  vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[0]);
360  tso_init_buf_from_template_base (b0, sb0, default_bflags,
361  l234_sz + first_data_size);
362 
363  u32 total_src_left = n_bytes_b0 - l234_sz - first_data_size;
364  if (total_src_left)
365  {
366  /* Need to copy more segments */
367  u8 *src_ptr, *dst_ptr;
368  u16 src_left, dst_left;
369  /* current source buffer */
370  vlib_buffer_t *csb0 = sb0;
371  u32 csbi0 = sbi0;
372  /* current dest buffer */
373  vlib_buffer_t *cdb0;
374  u16 dbi = 1; /* the buffer [0] is b0 */
375 
376  src_ptr = vlib_buffer_get_current (sb0) + l234_sz + first_data_size;
377  src_left = sb0->current_length - l234_sz - first_data_size;
378 
379  tso_fixup_segmented_buf (vm, b0, tcp_flags_no_fin_psh, is_l2, is_ip6,
380  gho);
381 
382  /* grab a second buffer and prepare the loop */
383  ASSERT (dbi < vec_len (ptd->split_buffers));
384  cdb0 = vlib_get_buffer (vm, ptd->split_buffers[dbi++]);
385  tso_init_buf_from_template (vm, cdb0, b0, l234_sz, gso_size, &dst_ptr,
386  &dst_left, next_tcp_seq, default_bflags,
387  gho);
388 
389  /* an arbitrary large number to catch the runaway loops */
390  int nloops = 2000;
391  while (total_src_left)
392  {
393  if (nloops-- <= 0)
394  clib_panic ("infinite loop detected");
395  u16 bytes_to_copy = clib_min (src_left, dst_left);
396 
397  clib_memcpy_fast (dst_ptr, src_ptr, bytes_to_copy);
398 
399  src_left -= bytes_to_copy;
400  src_ptr += bytes_to_copy;
401  total_src_left -= bytes_to_copy;
402  dst_left -= bytes_to_copy;
403  dst_ptr += bytes_to_copy;
404  next_tcp_seq += bytes_to_copy;
405  cdb0->current_length += bytes_to_copy;
406 
407  if (0 == src_left)
408  {
409  int has_next = (csb0->flags & VLIB_BUFFER_NEXT_PRESENT);
410  u32 next_bi = csb0->next_buffer;
411 
412  /* init src to the next buffer in chain */
413  if (has_next)
414  {
415  csbi0 = next_bi;
416  csb0 = vlib_get_buffer (vm, csbi0);
417  src_left = csb0->current_length;
418  src_ptr = vlib_buffer_get_current (csb0);
419  }
420  else
421  {
422  ASSERT (total_src_left == 0);
423  break;
424  }
425  }
426  if (0 == dst_left && total_src_left)
427  {
428  n_tx_bytes += cdb0->current_length;
429  tso_fixup_segmented_buf (vm, cdb0, tcp_flags_no_fin_psh, is_l2,
430  is_ip6, gho);
431  ASSERT (dbi < vec_len (ptd->split_buffers));
432  cdb0 = vlib_get_buffer (vm, ptd->split_buffers[dbi++]);
433  tso_init_buf_from_template (vm, cdb0, b0, l234_sz,
434  gso_size, &dst_ptr, &dst_left,
435  next_tcp_seq, default_bflags, gho);
436  }
437  }
438 
439  tso_fixup_segmented_buf (vm, cdb0, save_tcp_flags, is_l2, is_ip6, gho);
440 
441  n_tx_bytes += cdb0->current_length;
442  }
443  n_tx_bytes += b0->current_length;
444  return n_tx_bytes;
445 }
446 
449  vlib_node_runtime_t * node, u32 * pbi0,
450  u32 sw_if_index, u32 drop_error_code)
451 {
453 
455  cm =
458  vlib_increment_simple_counter (cm, thread_index, sw_if_index, 1);
459 
460  vlib_error_drop_buffers (vm, node, pbi0,
461  /* buffer stride */ 1,
462  /* n_buffers */ 1,
464  node->node_index, drop_error_code);
465 }
466 
471  vnet_main_t * vnm,
473  int is_l2, int is_ip4, int is_ip6, int do_segmentation)
474 {
475  u32 *to_next;
477  u32 *from = vlib_frame_vector_args (frame);
478  u32 n_left_from = frame->n_vectors;
479  u32 *from_end = from + n_left_from;
483  vec_elt_at_index (im->per_thread_data, thread_index);
485 
486  vlib_get_buffers (vm, from, b, n_left_from);
487 
488  while (n_left_from > 0)
489  {
490  u32 n_left_to_next;
491 
492  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
493 
494  if (!do_segmentation)
495  while (from + 8 <= from_end && n_left_to_next >= 4)
496  {
497  u32 bi0, bi1, bi2, bi3;
498  u32 next0, next1, next2, next3;
499  u32 swif0, swif1, swif2, swif3;
500  gso_trace_t *t0, *t1, *t2, *t3;
501  vnet_hw_interface_t *hi0, *hi1, *hi2, *hi3;
502 
503  /* Prefetch next iteration. */
504  vlib_prefetch_buffer_header (b[4], LOAD);
505  vlib_prefetch_buffer_header (b[5], LOAD);
506  vlib_prefetch_buffer_header (b[6], LOAD);
507  vlib_prefetch_buffer_header (b[7], LOAD);
508 
509  bi0 = from[0];
510  bi1 = from[1];
511  bi2 = from[2];
512  bi3 = from[3];
513  to_next[0] = bi0;
514  to_next[1] = bi1;
515  to_next[2] = bi2;
516  to_next[3] = bi3;
517 
518  swif0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
519  swif1 = vnet_buffer (b[1])->sw_if_index[VLIB_TX];
520  swif2 = vnet_buffer (b[2])->sw_if_index[VLIB_TX];
521  swif3 = vnet_buffer (b[3])->sw_if_index[VLIB_TX];
522 
523  if (PREDICT_FALSE (hi->sw_if_index != swif0))
524  {
525  hi0 = vnet_get_sup_hw_interface (vnm, swif0);
527  0 &&
528  (b[0]->flags & VNET_BUFFER_F_GSO))
529  break;
530  }
531  if (PREDICT_FALSE (hi->sw_if_index != swif1))
532  {
533  hi1 = vnet_get_sup_hw_interface (vnm, swif1);
535  (b[1]->flags & VNET_BUFFER_F_GSO))
536  break;
537  }
538  if (PREDICT_FALSE (hi->sw_if_index != swif2))
539  {
540  hi2 = vnet_get_sup_hw_interface (vnm, swif2);
542  0 &&
543  (b[2]->flags & VNET_BUFFER_F_GSO))
544  break;
545  }
546  if (PREDICT_FALSE (hi->sw_if_index != swif3))
547  {
548  hi3 = vnet_get_sup_hw_interface (vnm, swif3);
550  (b[3]->flags & VNET_BUFFER_F_GSO))
551  break;
552  }
553 
554  if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
555  {
556  t0 = vlib_add_trace (vm, node, b[0], sizeof (t0[0]));
557  t0->flags = b[0]->flags & VNET_BUFFER_F_GSO;
558  t0->gso_size = vnet_buffer2 (b[0])->gso_size;
559  t0->gso_l4_hdr_sz = vnet_buffer2 (b[0])->gso_l4_hdr_sz;
560  vnet_generic_header_offset_parser (b[0], &t0->gho, is_l2,
561  is_ip4, is_ip6);
562  }
563  if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
564  {
565  t1 = vlib_add_trace (vm, node, b[1], sizeof (t1[0]));
566  t1->flags = b[1]->flags & VNET_BUFFER_F_GSO;
567  t1->gso_size = vnet_buffer2 (b[1])->gso_size;
568  t1->gso_l4_hdr_sz = vnet_buffer2 (b[1])->gso_l4_hdr_sz;
569  vnet_generic_header_offset_parser (b[1], &t1->gho, is_l2,
570  is_ip4, is_ip6);
571  }
572  if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
573  {
574  t2 = vlib_add_trace (vm, node, b[2], sizeof (t2[0]));
575  t2->flags = b[2]->flags & VNET_BUFFER_F_GSO;
576  t2->gso_size = vnet_buffer2 (b[2])->gso_size;
577  t2->gso_l4_hdr_sz = vnet_buffer2 (b[2])->gso_l4_hdr_sz;
578  vnet_generic_header_offset_parser (b[2], &t2->gho, is_l2,
579  is_ip4, is_ip6);
580  }
581  if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
582  {
583  t3 = vlib_add_trace (vm, node, b[3], sizeof (t3[0]));
584  t3->flags = b[3]->flags & VNET_BUFFER_F_GSO;
585  t3->gso_size = vnet_buffer2 (b[3])->gso_size;
586  t3->gso_l4_hdr_sz = vnet_buffer2 (b[3])->gso_l4_hdr_sz;
587  vnet_generic_header_offset_parser (b[3], &t3->gho, is_l2,
588  is_ip4, is_ip6);
589  }
590 
591  from += 4;
592  to_next += 4;
593  n_left_to_next -= 4;
594  n_left_from -= 4;
595 
596  next0 = next1 = 0;
597  next2 = next3 = 0;
598  vnet_feature_next (&next0, b[0]);
599  vnet_feature_next (&next1, b[1]);
600  vnet_feature_next (&next2, b[2]);
601  vnet_feature_next (&next3, b[3]);
602  vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
603  n_left_to_next, bi0, bi1, bi2,
604  bi3, next0, next1, next2, next3);
605  b += 4;
606  }
607 
608  while (from + 1 <= from_end && n_left_to_next > 0)
609  {
610  u32 bi0, swif0;
611  gso_trace_t *t0;
612  vnet_hw_interface_t *hi0;
613  u32 next0 = 0;
614  u32 do_segmentation0 = 0;
615 
616  swif0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
617  if (PREDICT_FALSE (hi->sw_if_index != swif0))
618  {
619  hi0 = vnet_get_sup_hw_interface (vnm, swif0);
620  if ((hi0->caps & VNET_HW_INTERFACE_CAP_SUPPORTS_TCP_GSO) == 0 &&
621  (b[0]->flags & VNET_BUFFER_F_GSO))
622  do_segmentation0 = 1;
623  }
624  else
625  do_segmentation0 = do_segmentation;
626 
627  /* speculatively enqueue b0 to the current next frame */
628  to_next[0] = bi0 = from[0];
629  to_next += 1;
630  n_left_to_next -= 1;
631  from += 1;
632  n_left_from -= 1;
633 
634  if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
635  {
636  t0 = vlib_add_trace (vm, node, b[0], sizeof (t0[0]));
637  t0->flags = b[0]->flags & VNET_BUFFER_F_GSO;
638  t0->gso_size = vnet_buffer2 (b[0])->gso_size;
639  t0->gso_l4_hdr_sz = vnet_buffer2 (b[0])->gso_l4_hdr_sz;
640  vnet_generic_header_offset_parser (b[0], &t0->gho, is_l2,
641  is_ip4, is_ip6);
642  }
643 
644  if (do_segmentation0)
645  {
646  if (PREDICT_FALSE (b[0]->flags & VNET_BUFFER_F_GSO))
647  {
648  /*
649  * Undo the enqueue of the b0 - it is not going anywhere,
650  * and will be freed either after it's segmented or
651  * when dropped, if there is no buffers to segment into.
652  */
653  to_next -= 1;
654  n_left_to_next += 1;
655  /* undo the counting. */
656  generic_header_offset_t gho = { 0 };
657  u32 n_bytes_b0 = vlib_buffer_length_in_chain (vm, b[0]);
658  u32 n_tx_bytes = 0;
659  u32 inner_is_ip6 = is_ip6;
660 
661  vnet_generic_header_offset_parser (b[0], &gho, is_l2,
662  is_ip4, is_ip6);
663 
665  {
666  if (PREDICT_FALSE
667  (gho.gho_flags & (GHO_F_GRE_TUNNEL |
668  GHO_F_GENEVE_TUNNEL)))
669  {
670  /* not supported yet */
671  drop_one_buffer_and_count (vm, vnm, node, from - 1,
672  hi->sw_if_index,
674  b += 1;
675  continue;
676  }
677 
678  vnet_get_inner_header (b[0], &gho);
679 
680  n_bytes_b0 -= gho.outer_hdr_sz;
681  inner_is_ip6 = (gho.gho_flags & GHO_F_IP6) != 0;
682  }
683 
684  n_tx_bytes =
685  tso_segment_buffer (vm, ptd, bi0, b[0], &gho, n_bytes_b0,
686  is_l2, inner_is_ip6);
687 
688  if (PREDICT_FALSE (n_tx_bytes == 0))
689  {
690  drop_one_buffer_and_count (vm, vnm, node, from - 1,
691  hi->sw_if_index,
693  b += 1;
694  continue;
695  }
696 
697 
698  if (PREDICT_FALSE (gho.gho_flags & GHO_F_VXLAN_TUNNEL))
699  {
700  vnet_get_outer_header (b[0], &gho);
701  n_tx_bytes +=
702  tso_segment_vxlan_tunnel_fixup (vm, ptd, b[0], &gho);
703  }
704  else
705  if (PREDICT_FALSE
706  (gho.gho_flags & (GHO_F_IPIP_TUNNEL |
707  GHO_F_IPIP6_TUNNEL)))
708  {
709  vnet_get_outer_header (b[0], &gho);
710  n_tx_bytes +=
711  tso_segment_ipip_tunnel_fixup (vm, ptd, b[0], &gho);
712  }
713 
714  u16 n_tx_bufs = vec_len (ptd->split_buffers);
715  u32 *from_seg = ptd->split_buffers;
716 
717  while (n_tx_bufs > 0)
718  {
719  u32 sbi0;
720  vlib_buffer_t *sb0;
721  while (n_tx_bufs > 0 && n_left_to_next > 0)
722  {
723  sbi0 = to_next[0] = from_seg[0];
724  sb0 = vlib_get_buffer (vm, sbi0);
725  ASSERT (sb0->current_length > 0);
726  to_next += 1;
727  from_seg += 1;
728  n_left_to_next -= 1;
729  n_tx_bufs -= 1;
730  next0 = 0;
731  vnet_feature_next (&next0, sb0);
733  next_index,
734  to_next,
735  n_left_to_next,
736  sbi0, next0);
737  }
738  vlib_put_next_frame (vm, node, next_index,
739  n_left_to_next);
740  if (n_tx_bufs > 0)
741  vlib_get_next_frame (vm, node, next_index,
742  to_next, n_left_to_next);
743  }
744  /* The buffers were enqueued. Reset the length */
745  _vec_len (ptd->split_buffers) = 0;
746  /* Free the now segmented buffer */
747  vlib_buffer_free_one (vm, bi0);
748  b += 1;
749  continue;
750  }
751  }
752 
753  vnet_feature_next (&next0, b[0]);
754  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
755  n_left_to_next, bi0, next0);
756  b += 1;
757  }
758  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
759  }
760 
761  return frame->n_vectors;
762 }
763 
766  vlib_node_runtime_t * node, vlib_frame_t * frame, int is_l2,
767  int is_ip4, int is_ip6)
768 {
769  vnet_main_t *vnm = vnet_get_main ();
771 
772  if (frame->n_vectors > 0)
773  {
774  u32 *from = vlib_frame_vector_args (frame);
775  vlib_buffer_t *b = vlib_get_buffer (vm, from[0]);
776  hi = vnet_get_sup_hw_interface (vnm,
778 
780  return vnet_gso_node_inline (vm, node, frame, vnm, hi,
781  is_l2, is_ip4, is_ip6,
782  /* do_segmentation */ 0);
783  else
784  return vnet_gso_node_inline (vm, node, frame, vnm, hi,
785  is_l2, is_ip4, is_ip6,
786  /* do_segmentation */ 1);
787  }
788  return 0;
789 }
790 
793 {
794  return vnet_gso_inline (vm, node, frame, 1 /* l2 */ , 1 /* ip4 */ ,
795  0 /* ip6 */ );
796 }
797 
800 {
801  return vnet_gso_inline (vm, node, frame, 1 /* l2 */ , 0 /* ip4 */ ,
802  1 /* ip6 */ );
803 }
804 
807 {
808  return vnet_gso_inline (vm, node, frame, 0 /* l2 */ , 1 /* ip4 */ ,
809  0 /* ip6 */ );
810 }
811 
814 {
815  return vnet_gso_inline (vm, node, frame, 0 /* l2 */ , 0 /* ip4 */ ,
816  1 /* ip6 */ );
817 }
818 
819 /* *INDENT-OFF* */
820 
822  .vector_size = sizeof (u32),
823  .format_trace = format_gso_trace,
825  .n_errors = 0,
826  .n_next_nodes = 0,
827  .name = "gso-l2-ip4",
828 };
829 
831  .vector_size = sizeof (u32),
832  .format_trace = format_gso_trace,
834  .n_errors = 0,
835  .n_next_nodes = 0,
836  .name = "gso-l2-ip6",
837 };
838 
840  .vector_size = sizeof (u32),
841  .format_trace = format_gso_trace,
843  .n_errors = 0,
844  .n_next_nodes = 0,
845  .name = "gso-ip4",
846 };
847 
849  .vector_size = sizeof (u32),
850  .format_trace = format_gso_trace,
852  .n_errors = 0,
853  .n_next_nodes = 0,
854  .name = "gso-ip6",
855 };
856 
858  .arc_name = "l2-output-ip4",
859  .node_name = "gso-l2-ip4",
860  .runs_before = VNET_FEATURES ("l2-output-feat-arc-end"),
861 };
862 
864  .arc_name = "l2-output-ip6",
865  .node_name = "gso-l2-ip6",
866  .runs_before = VNET_FEATURES ("l2-output-feat-arc-end"),
867 };
868 
869 VNET_FEATURE_INIT (gso_ip4_node, static) = {
870  .arc_name = "ip4-output",
871  .node_name = "gso-ip4",
872  .runs_before = VNET_FEATURES ("ipsec4-output-feature"),
873 };
874 
875 VNET_FEATURE_INIT (gso_ip6_node, static) = {
876  .arc_name = "ip6-output",
877  .node_name = "gso-ip6",
878  .runs_before = VNET_FEATURES ("ipsec6-output-feature"),
879 };
880 
881 /*
882  * fd.io coding-style-patch-verification: ON
883  *
884  * Local Variables:
885  * eval: (c-set-style "gnu")
886  * End:
887  */
static_always_inline u32 tso_segment_buffer(vlib_main_t *vm, vnet_interface_per_thread_data_t *ptd, u32 sbi0, vlib_buffer_t *sb0, generic_header_offset_t *gho, u32 n_bytes_b0, int is_l2, int is_ip6)
Allocate the necessary number of ptd->split_buffers, and segment the possibly chained buffer(s) from ...
Definition: node.c:328
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:524
static_always_inline void vnet_get_outer_header(vlib_buffer_t *b0, generic_header_offset_t *gho)
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:133
#define clib_min(x, y)
Definition: clib.h:342
#define CLIB_UNUSED(x)
Definition: clib.h:90
vnet_interface_per_thread_data_t * per_thread_data
Definition: interface.h:1039
vl_api_wireguard_peer_flags_t flags
Definition: wireguard.api:105
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:982
static vnet_hw_interface_t * vnet_get_sup_hw_interface(vnet_main_t *vnm, u32 sw_if_index)
#define vnet_buffer2(b)
Definition: buffer.h:499
vnet_interface_main_t interface_main
Definition: vnet.h:81
u32 thread_index
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:119
vnet_feature_config_main_t * cm
IP unicast adjacency.
Definition: adj.h:235
vnet_hw_interface_capabilities_t caps
Definition: interface.h:645
u32 thread_index
Definition: main.h:213
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:122
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: nat44_ei.c:3048
#define vlib_validate_buffer_enqueue_x4(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, bi2, bi3, next0, next1, next2, next3)
Finish enqueueing four buffers forward in the graph.
Definition: buffer_node.h:140
u16 ip4_tcp_udp_compute_checksum(vlib_main_t *vm, vlib_buffer_t *p0, ip4_header_t *ip0)
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 increment)
Increment a simple counter.
Definition: counter.h:74
string name[64]
Definition: fib.api:25
vlib_node_registration_t gso_l2_ip4_node
(constructor) VLIB_REGISTER_NODE (gso_l2_ip4_node)
Definition: node.c:821
#define VLIB_NODE_FN(node)
Definition: node.h:202
static_always_inline void vnet_get_inner_header(vlib_buffer_t *b0, generic_header_offset_t *gho)
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:433
struct _tcp_header tcp_header_t
unsigned char u8
Definition: types.h:56
vlib_buffer_t ** b
unsigned int u32
Definition: types.h:88
u32 flags
Definition: node.c:30
static ip_adjacency_t * adj_get(adj_index_t adj_index)
Get a pointer to an adjacency object from its index.
Definition: adj.h:470
#define static_always_inline
Definition: clib.h:112
vlib_get_buffers(vm, from, b, n_left_from)
vl_api_ip6_address_t ip6
Definition: one.api:424
static_always_inline u16 tso_segment_vxlan_tunnel_fixup(vlib_main_t *vm, vnet_interface_per_thread_data_t *ptd, vlib_buffer_t *sb0, generic_header_offset_t *gho)
Definition: node.c:152
description fragment has unexpected format
Definition: map.api:433
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:231
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
vnet_main_t * vnet_get_main(void)
A collection of simple counters.
Definition: counter.h:57
#define VLIB_FRAME_SIZE
Definition: node.h:369
bool is_ip6
Definition: ip.api:43
union ip_adjacency_t_::@144 sub_type
vl_api_fib_path_type_t type
Definition: fib_types.api:123
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:145
vl_api_interface_index_t sw_if_index
Definition: wireguard.api:34
static_always_inline void tso_segment_vxlan_tunnel_headers_fixup(vlib_main_t *vm, vlib_buffer_t *b, generic_header_offset_t *gho)
Definition: node.c:103
vl_api_ip_proto_t proto
Definition: acl_types.api:51
static __clib_warn_unused_result u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
Definition: buffer_funcs.h:708
uword vlib_error_drop_buffers(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u32 next_buffer_stride, u32 n_buffers, u32 next_index, u32 drop_error_node, u32 drop_error_code)
Definition: error.c:45
unsigned short u16
Definition: types.h:57
u32 size
Definition: vhost_user.h:125
struct ip_adjacency_t_::@144::@146 midchain
IP_LOOKUP_NEXT_MIDCHAIN.
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:257
u32 trace_handle
Specifies trace buffer handle if VLIB_PACKET_IS_TRACED flag is set.
Definition: buffer.h:172
#define PREDICT_FALSE(x)
Definition: clib.h:124
vl_api_ip4_address_t ip4
Definition: one.api:376
#define TCP_FLAG_FIN
Definition: fa_node.h:12
vlib_simple_counter_main_t * sw_if_counters
Definition: interface.h:1022
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
Definition: nat44_ei.c:3047
u32 node_index
Node index.
Definition: node.h:479
static u8 * format_gso_trace(u8 *s, va_list *args)
Definition: node.c:37
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:224
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:395
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
Definition: buffer_funcs.h:122
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
static_always_inline void tso_init_buf_from_template(vlib_main_t *vm, vlib_buffer_t *nb0, vlib_buffer_t *b0, u16 template_data_sz, u16 gso_size, u8 **p_dst_ptr, u16 *p_dst_left, u32 next_tcp_seq, u32 flags, generic_header_offset_t *gho)
Definition: node.c:243
u16 n_vectors
Definition: node.h:388
vlib_node_registration_t gso_l2_ip6_node
(constructor) VLIB_REGISTER_NODE (gso_l2_ip6_node)
Definition: node.c:830
u32 flow_id
Generic flow identifier.
Definition: buffer.h:136
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:261
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:322
static_always_inline void tso_init_buf_from_template_base(vlib_buffer_t *nb0, vlib_buffer_t *b0, u32 flags, u16 length)
Definition: node.c:221
vnet_interface_main_t * im
u32 current_config_index
Used by feature subgraph arcs to visit enabled feature nodes.
Definition: buffer.h:156
static_always_inline u16 tso_alloc_tx_bufs(vlib_main_t *vm, vnet_interface_per_thread_data_t *ptd, vlib_buffer_t *b0, u32 n_bytes_b0, u16 l234_sz, u16 gso_size, u16 first_data_size, generic_header_offset_t *gho)
Definition: node.c:175
This packets follow a mid-chain adjacency.
Definition: adj.h:76
u16 ip6_tcp_udp_icmp_compute_checksum(vlib_main_t *vm, vlib_buffer_t *p0, ip6_header_t *ip0, int *bogus_lengthp)
Definition: ip6_forward.c:1098
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:498
#define ASSERT(truth)
u16 gso_size
Definition: node.c:31
u8 gso_l4_hdr_sz
Definition: node.c:32
vlib_put_next_frame(vm, node, next_index, 0)
char const int length
Definition: cJSON.h:163
static_always_inline void * clib_memcpy_fast(void *restrict dst, const void *restrict src, size_t n)
Definition: string.h:92
nat44_ei_hairpin_src_next_t next_index
#define VNET_FEATURES(...)
Definition: feature.h:470
vl_api_ip4_address_t hi
Definition: arp.api:37
static_always_inline void vnet_generic_header_offset_parser(vlib_buffer_t *b0, generic_header_offset_t *gho, int is_l2, int is_ip4, int is_ip6)
Definition: defs.h:47
u16 payload_length
Definition: ip6_packet.h:301
static_always_inline void drop_one_buffer_and_count(vlib_main_t *vm, vnet_main_t *vnm, vlib_node_runtime_t *node, u32 *pbi0, u32 sw_if_index, u32 drop_error_code)
Definition: node.c:448
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
vlib_main_t vlib_node_runtime_t * node
Definition: nat44_ei.c:3047
ip_lookup_next_t lookup_next_index
Next hop after ip4-lookup.
Definition: adj.h:337
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:149
VLIB buffer representation.
Definition: buffer.h:111
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:301
generic_header_offset_t gho
Definition: node.c:33
static_always_inline u8 * format_generic_header_offset(u8 *s, va_list *args)
static_always_inline uword vnet_gso_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, int is_l2, int is_ip4, int is_ip6)
Definition: node.c:765
static_always_inline void tso_fixup_segmented_buf(vlib_main_t *vm, vlib_buffer_t *b0, u8 tcp_flags, int is_l2, int is_ip6, generic_header_offset_t *gho)
Definition: node.c:263
static_always_inline void vnet_buffer_offload_flags_clear(vlib_buffer_t *b, vnet_buffer_oflags_t oflags)
Definition: buffer.h:538
#define vnet_buffer(b)
Definition: buffer.h:437
static_always_inline uword vnet_gso_node_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vnet_main_t *vnm, vnet_hw_interface_t *hi, int is_l2, int is_ip4, int is_ip6, int do_segmentation)
Definition: node.c:468
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:628
vlib_node_registration_t gso_ip6_node
(constructor) VLIB_REGISTER_NODE (gso_ip6_node)
Definition: node.c:848
static void vlib_buffer_free_one(vlib_main_t *vm, u32 buffer_index)
Free one buffer Shorthand to free a single buffer chain.
#define GHO_F_TUNNEL
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:176
vlib_buffer_t * bufs[VLIB_FRAME_SIZE]
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:111
static u16 ip4_header_checksum(ip4_header_t *i)
Definition: ip4_packet.h:314
#define clib_panic(format, args...)
Definition: error.h:72
vlib_node_registration_t gso_ip4_node
(constructor) VLIB_REGISTER_NODE (gso_ip4_node)
Definition: node.c:839
u32 opaque[10]
Opaque data used by sub-graphs for their own purposes.
Definition: buffer.h:162
static_always_inline u16 tso_segment_ipip_tunnel_fixup(vlib_main_t *vm, vnet_interface_per_thread_data_t *ptd, vlib_buffer_t *sb0, generic_header_offset_t *gho)
Definition: node.c:60
VNET_FEATURE_INIT(lb_nat4_in2out_node_fn, static)