FD.io VPP  v19.04-6-g6f05f72
Vector Packet Processing
interface_output.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /*
16  * interface_output.c: interface output node
17  *
18  * Copyright (c) 2008 Eliot Dresselhaus
19  *
20  * Permission is hereby granted, free of charge, to any person obtaining
21  * a copy of this software and associated documentation files (the
22  * "Software"), to deal in the Software without restriction, including
23  * without limitation the rights to use, copy, modify, merge, publish,
24  * distribute, sublicense, and/or sell copies of the Software, and to
25  * permit persons to whom the Software is furnished to do so, subject to
26  * the following conditions:
27  *
28  * The above copyright notice and this permission notice shall be
29  * included in all copies or substantial portions of the Software.
30  *
31  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38  */
39 
40 #include <vnet/vnet.h>
41 #include <vnet/ip/icmp46_packet.h>
42 #include <vnet/ip/ip4.h>
43 #include <vnet/ip/ip6.h>
44 #include <vnet/udp/udp_packet.h>
45 #include <vnet/feature/feature.h>
46 
47 typedef struct
48 {
53  u8 data[128 - 3 * sizeof (u32)];
54 }
56 
57 #ifndef CLIB_MARCH_VARIANT
58 u8 *
60 {
61  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
62  vlib_node_t *node = va_arg (*va, vlib_node_t *);
63  interface_output_trace_t *t = va_arg (*va, interface_output_trace_t *);
64  vnet_main_t *vnm = vnet_get_main ();
66  u32 indent;
67 
68  if (t->sw_if_index != (u32) ~ 0)
69  {
70  indent = format_get_indent (s);
71 
74  {
75  /* the interface may have been deleted by the time the trace is printed */
76  s = format (s, "sw_if_index: %d ", t->sw_if_index);
77  }
78  else
79  {
80  si = vnet_get_sw_interface (vnm, t->sw_if_index);
81  s =
82  format (s, "%U ", format_vnet_sw_interface_name, vnm, si,
83  t->flags);
84  }
85 #define _(bit, name, v, x) \
86  if (v && (t->flags & VNET_BUFFER_F_##name)) \
87  s = format (s, "%s ", v);
89 #undef _
90  if (t->flags & VNET_BUFFER_F_GSO)
91  {
92  s = format (s, "\n%Ugso_sz %d gso_l4_hdr_sz %d",
93  format_white_space, indent + 2, t->gso_size,
94  t->gso_l4_hdr_sz);
95  }
96  s =
97  format (s, "\n%U%U", format_white_space, indent,
99  t->data, sizeof (t->data));
100  }
101  return s;
102 }
103 
104 static void
106  vlib_node_runtime_t * node,
107  vlib_frame_t * frame, uword n_buffers)
108 {
109  u32 n_left, *from;
110 
111  n_left = n_buffers;
112  from = vlib_frame_vector_args (frame);
113 
114  while (n_left >= 4)
115  {
116  u32 bi0, bi1;
117  vlib_buffer_t *b0, *b1;
118  interface_output_trace_t *t0, *t1;
119 
120  /* Prefetch next iteration. */
121  vlib_prefetch_buffer_with_index (vm, from[2], LOAD);
122  vlib_prefetch_buffer_with_index (vm, from[3], LOAD);
123 
124  bi0 = from[0];
125  bi1 = from[1];
126 
127  b0 = vlib_get_buffer (vm, bi0);
128  b1 = vlib_get_buffer (vm, bi1);
129 
130  if (b0->flags & VLIB_BUFFER_IS_TRACED)
131  {
132  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
133  t0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
134  t0->flags = b0->flags;
135  t0->gso_size = vnet_buffer2 (b0)->gso_size;
136  t0->gso_l4_hdr_sz = vnet_buffer2 (b0)->gso_l4_hdr_sz;
138  sizeof (t0->data));
139  }
140  if (b1->flags & VLIB_BUFFER_IS_TRACED)
141  {
142  t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
143  t1->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_TX];
144  t1->flags = b1->flags;
145  t1->gso_size = vnet_buffer2 (b1)->gso_size;
146  t1->gso_l4_hdr_sz = vnet_buffer2 (b1)->gso_l4_hdr_sz;
148  sizeof (t1->data));
149  }
150  from += 2;
151  n_left -= 2;
152  }
153 
154  while (n_left >= 1)
155  {
156  u32 bi0;
157  vlib_buffer_t *b0;
158  interface_output_trace_t *t0;
159 
160  bi0 = from[0];
161 
162  b0 = vlib_get_buffer (vm, bi0);
163 
164  if (b0->flags & VLIB_BUFFER_IS_TRACED)
165  {
166  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
167  t0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
168  t0->flags = b0->flags;
169  t0->gso_size = vnet_buffer2 (b0)->gso_size;
170  t0->gso_l4_hdr_sz = vnet_buffer2 (b0)->gso_l4_hdr_sz;
172  sizeof (t0->data));
173  }
174  from += 1;
175  n_left -= 1;
176  }
177 }
178 
181 {
182  ip4_header_t *ip4;
183  ip6_header_t *ip6;
184  tcp_header_t *th;
185  udp_header_t *uh;
186 
187  int is_ip4 = (b->flags & VNET_BUFFER_F_IS_IP4) != 0;
188  int is_ip6 = (b->flags & VNET_BUFFER_F_IS_IP6) != 0;
189 
190  ASSERT (!(is_ip4 && is_ip6));
191 
192  ip4 = (ip4_header_t *) (b->data + vnet_buffer (b)->l3_hdr_offset);
193  ip6 = (ip6_header_t *) (b->data + vnet_buffer (b)->l3_hdr_offset);
194  th = (tcp_header_t *) (b->data + vnet_buffer (b)->l4_hdr_offset);
195  uh = (udp_header_t *) (b->data + vnet_buffer (b)->l4_hdr_offset);
196 
197  if (is_ip4)
198  {
199  ip4 = (ip4_header_t *) (b->data + vnet_buffer (b)->l3_hdr_offset);
200  if (b->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)
201  ip4->checksum = ip4_header_checksum (ip4);
202  if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
203  {
204  th->checksum = 0;
205  th->checksum = ip4_tcp_udp_compute_checksum (vm, b, ip4);
206  }
207  if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
208  uh->checksum = ip4_tcp_udp_compute_checksum (vm, b, ip4);
209  }
210  if (is_ip6)
211  {
212  int bogus;
213  if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
214  {
215  th->checksum = 0;
216  th->checksum =
217  ip6_tcp_udp_icmp_compute_checksum (vm, b, ip6, &bogus);
218  }
219  if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
220  {
221  uh->checksum = 0;
222  uh->checksum =
223  ip6_tcp_udp_icmp_compute_checksum (vm, b, ip6, &bogus);
224  }
225  }
226 
227  b->flags &= ~VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
228  b->flags &= ~VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
229  b->flags &= ~VNET_BUFFER_F_OFFLOAD_IP_CKSUM;
230 }
231 
235  vlib_buffer_t * b0, u16 l4_hdr_sz)
236 {
237  u32 n_bytes_b0 = vlib_buffer_length_in_chain (vm, b0);
238  u16 gso_size = vnet_buffer2 (b0)->gso_size;
239  u16 l234_sz = vnet_buffer (b0)->l4_hdr_offset + l4_hdr_sz;
240  /* rounded-up division */
241  u16 n_bufs = (n_bytes_b0 - l234_sz + (gso_size - 1)) / gso_size;
242  u16 n_alloc;
243 
244  ASSERT (n_bufs > 0);
245  vec_validate (ptd->split_buffers, n_bufs - 1);
246 
247  n_alloc = vlib_buffer_alloc (vm, ptd->split_buffers, n_bufs);
248  if (n_alloc < n_bufs)
249  {
250  vlib_buffer_free (vm, ptd->split_buffers, n_alloc);
251  return 0;
252  }
253  return 1;
254 }
255 
258  u32 flags, u16 length)
259 {
260  nb0->current_data = 0;
262  nb0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID | flags;
263  clib_memcpy_fast (&nb0->opaque, &b0->opaque, sizeof (nb0->opaque));
264  clib_memcpy_fast (nb0->data, b0->data, length);
265  nb0->current_length = length;
266 }
267 
270  vlib_buffer_t * b0, u16 template_data_sz,
271  u16 gso_size, u8 ** p_dst_ptr, u16 * p_dst_left,
272  u32 next_tcp_seq, u32 flags)
273 {
274  tso_init_buf_from_template_base (nb0, b0, flags, template_data_sz);
275 
276  *p_dst_left =
277  clib_min (gso_size,
278  vlib_buffer_get_default_data_size (vm) - template_data_sz);
279  *p_dst_ptr = nb0->data + template_data_sz;
280 
281  tcp_header_t *tcp =
282  (tcp_header_t *) (nb0->data + vnet_buffer (nb0)->l4_hdr_offset);
283  tcp->seq_number = clib_host_to_net_u32 (next_tcp_seq);
284 }
285 
287 tso_fixup_segmented_buf (vlib_buffer_t * b0, u8 tcp_flags, int is_ip6)
288 {
289  u16 l3_hdr_offset = vnet_buffer (b0)->l3_hdr_offset;
290  u16 l4_hdr_offset = vnet_buffer (b0)->l4_hdr_offset;
291  ip4_header_t *ip4 = (ip4_header_t *) (b0->data + l3_hdr_offset);
292  ip6_header_t *ip6 = (ip6_header_t *) (b0->data + l3_hdr_offset);
293  tcp_header_t *tcp = (tcp_header_t *) (b0->data + l4_hdr_offset);
294 
295  tcp->flags = tcp_flags;
296 
297  if (is_ip6)
298  ip6->payload_length =
299  clib_host_to_net_u16 (b0->current_length -
300  vnet_buffer (b0)->l4_hdr_offset);
301  else
302  ip4->length =
303  clib_host_to_net_u16 (b0->current_length -
304  vnet_buffer (b0)->l3_hdr_offset);
305 }
306 
307 /**
308  * Allocate the necessary number of ptd->split_buffers,
309  * and segment the possibly chained buffer(s) from b0 into
310  * there.
311  *
312  * Return the cumulative number of bytes sent or zero
313  * if allocation failed.
314  */
315 
318  int do_tx_offloads, u32 sbi0, vlib_buffer_t * sb0,
319  u32 n_bytes_b0)
320 {
321  u32 n_tx_bytes = 0;
322  int is_ip4 = sb0->flags & VNET_BUFFER_F_IS_IP4;
323  int is_ip6 = sb0->flags & VNET_BUFFER_F_IS_IP6;
324  ASSERT (is_ip4 || is_ip6);
325  ASSERT (sb0->flags & VNET_BUFFER_F_L2_HDR_OFFSET_VALID);
326  ASSERT (sb0->flags & VNET_BUFFER_F_L3_HDR_OFFSET_VALID);
327  ASSERT (sb0->flags & VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
328  u16 gso_size = vnet_buffer2 (sb0)->gso_size;
329 
330  int l4_hdr_sz = vnet_buffer2 (sb0)->gso_l4_hdr_sz;
331  u8 save_tcp_flags = 0;
332  u8 tcp_flags_no_fin_psh = 0;
333  u32 next_tcp_seq = 0;
334 
335  tcp_header_t *tcp =
336  (tcp_header_t *) (sb0->data + vnet_buffer (sb0)->l4_hdr_offset);
337  next_tcp_seq = clib_net_to_host_u32 (tcp->seq_number);
338  /* store original flags for last packet and reset FIN and PSH */
339  save_tcp_flags = tcp->flags;
340  tcp_flags_no_fin_psh = tcp->flags & ~(TCP_FLAG_FIN | TCP_FLAG_PSH);
341  tcp->checksum = 0;
342 
343  u32 default_bflags =
344  sb0->flags & ~(VNET_BUFFER_F_GSO | VLIB_BUFFER_NEXT_PRESENT);
345  u16 l234_sz = vnet_buffer (sb0)->l4_hdr_offset + l4_hdr_sz;
346  int first_data_size = clib_min (gso_size, sb0->current_length - l234_sz);
347  next_tcp_seq += first_data_size;
348 
349  if (PREDICT_FALSE (!tso_alloc_tx_bufs (vm, ptd, sb0, l4_hdr_sz)))
350  return 0;
351 
352  vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[0]);
353  tso_init_buf_from_template_base (b0, sb0, default_bflags,
354  l4_hdr_sz + first_data_size);
355 
356  u32 total_src_left = n_bytes_b0 - l234_sz - first_data_size;
357  if (total_src_left)
358  {
359  /* Need to copy more segments */
360  u8 *src_ptr, *dst_ptr;
361  u16 src_left, dst_left;
362  /* current source buffer */
363  vlib_buffer_t *csb0 = sb0;
364  u32 csbi0 = sbi0;
365  /* current dest buffer */
366  vlib_buffer_t *cdb0;
367  u16 dbi = 1; /* the buffer [0] is b0 */
368 
369  src_ptr = sb0->data + l234_sz + first_data_size;
370  src_left = sb0->current_length - l234_sz - first_data_size;
371  b0->current_length = l234_sz + first_data_size;
372 
373  tso_fixup_segmented_buf (b0, tcp_flags_no_fin_psh, is_ip6);
374  if (do_tx_offloads)
375  calc_checksums (vm, b0);
376 
377  /* grab a second buffer and prepare the loop */
378  ASSERT (dbi < vec_len (ptd->split_buffers));
379  cdb0 = vlib_get_buffer (vm, ptd->split_buffers[dbi++]);
380  tso_init_buf_from_template (vm, cdb0, b0, l234_sz, gso_size, &dst_ptr,
381  &dst_left, next_tcp_seq, default_bflags);
382 
383  /* an arbitrary large number to catch the runaway loops */
384  int nloops = 2000;
385  while (total_src_left)
386  {
387  if (nloops-- <= 0)
388  clib_panic ("infinite loop detected");
389  u16 bytes_to_copy = clib_min (src_left, dst_left);
390 
391  clib_memcpy_fast (dst_ptr, src_ptr, bytes_to_copy);
392 
393  src_left -= bytes_to_copy;
394  src_ptr += bytes_to_copy;
395  total_src_left -= bytes_to_copy;
396  dst_left -= bytes_to_copy;
397  dst_ptr += bytes_to_copy;
398  next_tcp_seq += bytes_to_copy;
399  cdb0->current_length += bytes_to_copy;
400 
401  if (0 == src_left)
402  {
403  int has_next = (csb0->flags & VLIB_BUFFER_NEXT_PRESENT);
404  u32 next_bi = csb0->next_buffer;
405 
406  /* init src to the next buffer in chain */
407  if (has_next)
408  {
409  csbi0 = next_bi;
410  csb0 = vlib_get_buffer (vm, csbi0);
411  src_left = csb0->current_length;
412  src_ptr = csb0->data;
413  }
414  else
415  {
416  ASSERT (total_src_left == 0);
417  break;
418  }
419  }
420  if (0 == dst_left && total_src_left)
421  {
422  if (do_tx_offloads)
423  calc_checksums (vm, cdb0);
424  n_tx_bytes += cdb0->current_length;
425  ASSERT (dbi < vec_len (ptd->split_buffers));
426  cdb0 = vlib_get_buffer (vm, ptd->split_buffers[dbi++]);
427  tso_init_buf_from_template (vm, cdb0, b0, l234_sz,
428  gso_size, &dst_ptr, &dst_left,
429  next_tcp_seq, default_bflags);
430  }
431  }
432 
433  tso_fixup_segmented_buf (cdb0, save_tcp_flags, is_ip6);
434  if (do_tx_offloads)
435  calc_checksums (vm, cdb0);
436 
437  n_tx_bytes += cdb0->current_length;
438  }
439  n_tx_bytes += b0->current_length;
440  return n_tx_bytes;
441 }
442 
445  vlib_node_runtime_t * node, u32 * pbi0,
446  u32 drop_error_code)
447 {
448  u32 thread_index = vm->thread_index;
449  vnet_interface_output_runtime_t *rt = (void *) node->runtime_data;
450 
452  cm =
455  vlib_increment_simple_counter (cm, thread_index, rt->sw_if_index, 1);
456 
457  vlib_error_drop_buffers (vm, node, pbi0,
458  /* buffer stride */ 1,
459  /* n_buffers */ 1,
461  node->node_index, drop_error_code);
462 }
463 
466  vlib_node_runtime_t * node,
467  vlib_frame_t * frame,
468  vnet_main_t * vnm,
470  int do_tx_offloads,
471  int do_segmentation)
472 {
473  vnet_interface_output_runtime_t *rt = (void *) node->runtime_data;
475  u32 n_left_to_tx, *from, *from_end, *to_tx;
476  u32 n_bytes, n_buffers, n_packets;
477  u32 n_bytes_b0, n_bytes_b1, n_bytes_b2, n_bytes_b3;
478  u32 thread_index = vm->thread_index;
480  u32 next_index = VNET_INTERFACE_OUTPUT_NEXT_TX;
481  u32 current_config_index = ~0;
482  u8 arc = im->output_feature_arc_index;
484  vec_elt_at_index (im->per_thread_data, thread_index);
485 
486  n_buffers = frame->n_vectors;
487 
488  if (node->flags & VLIB_NODE_FLAG_TRACE)
489  vnet_interface_output_trace (vm, node, frame, n_buffers);
490 
491  from = vlib_frame_vector_args (frame);
492 
493  if (rt->is_deleted)
494  return vlib_error_drop_buffers (vm, node, from,
495  /* buffer stride */ 1,
496  n_buffers,
498  node->node_index,
500 
501  si = vnet_get_sw_interface (vnm, rt->sw_if_index);
502  hi = vnet_get_sup_hw_interface (vnm, rt->sw_if_index);
503  if (!(si->flags & (VNET_SW_INTERFACE_FLAG_ADMIN_UP |
506  {
508 
511  vlib_increment_simple_counter (cm, thread_index,
512  rt->sw_if_index, n_buffers);
513 
514  return vlib_error_drop_buffers (vm, node, from,
515  /* buffer stride */ 1,
516  n_buffers,
518  node->node_index,
520  }
521 
522  from_end = from + n_buffers;
523 
524  /* Total byte count of all buffers. */
525  n_bytes = 0;
526  n_packets = 0;
527 
528  /* interface-output feature arc handling */
530  {
532  fcm = vnet_feature_get_config_main (arc);
533  current_config_index = vnet_get_feature_config_index (arc,
534  rt->sw_if_index);
535  vnet_get_config_data (&fcm->config_main, &current_config_index,
536  &next_index, 0);
537  }
538 
539  while (from < from_end)
540  {
541  /* Get new next frame since previous incomplete frame may have less
542  than VNET_FRAME_SIZE vectors in it. */
543  vlib_get_new_next_frame (vm, node, next_index, to_tx, n_left_to_tx);
544 
545  while (from + 8 <= from_end && n_left_to_tx >= 4)
546  {
547  u32 bi0, bi1, bi2, bi3;
548  vlib_buffer_t *b0, *b1, *b2, *b3;
549  u32 tx_swif0, tx_swif1, tx_swif2, tx_swif3;
550  u32 or_flags;
551 
552  /* Prefetch next iteration. */
553  vlib_prefetch_buffer_with_index (vm, from[4], LOAD);
554  vlib_prefetch_buffer_with_index (vm, from[5], LOAD);
555  vlib_prefetch_buffer_with_index (vm, from[6], LOAD);
556  vlib_prefetch_buffer_with_index (vm, from[7], LOAD);
557 
558  bi0 = from[0];
559  bi1 = from[1];
560  bi2 = from[2];
561  bi3 = from[3];
562  to_tx[0] = bi0;
563  to_tx[1] = bi1;
564  to_tx[2] = bi2;
565  to_tx[3] = bi3;
566  if (!do_segmentation)
567  {
568  from += 4;
569  to_tx += 4;
570  n_left_to_tx -= 4;
571  }
572 
573  b0 = vlib_get_buffer (vm, bi0);
574  b1 = vlib_get_buffer (vm, bi1);
575  b2 = vlib_get_buffer (vm, bi2);
576  b3 = vlib_get_buffer (vm, bi3);
577 
578  if (do_segmentation)
579  {
580  or_flags = b0->flags | b1->flags | b2->flags | b3->flags;
581 
582  /* go to single loop if we need TSO segmentation */
583  if (PREDICT_FALSE (or_flags & VNET_BUFFER_F_GSO))
584  break;
585  from += 4;
586  to_tx += 4;
587  n_left_to_tx -= 4;
588  }
589 
590  /* Be grumpy about zero length buffers for benefit of
591  driver tx function. */
592  ASSERT (b0->current_length > 0);
593  ASSERT (b1->current_length > 0);
594  ASSERT (b2->current_length > 0);
595  ASSERT (b3->current_length > 0);
596 
597  n_bytes_b0 = vlib_buffer_length_in_chain (vm, b0);
598  n_bytes_b1 = vlib_buffer_length_in_chain (vm, b1);
599  n_bytes_b2 = vlib_buffer_length_in_chain (vm, b2);
600  n_bytes_b3 = vlib_buffer_length_in_chain (vm, b3);
601  tx_swif0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
602  tx_swif1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
603  tx_swif2 = vnet_buffer (b2)->sw_if_index[VLIB_TX];
604  tx_swif3 = vnet_buffer (b3)->sw_if_index[VLIB_TX];
605 
606  n_bytes += n_bytes_b0 + n_bytes_b1;
607  n_bytes += n_bytes_b2 + n_bytes_b3;
608  n_packets += 4;
609 
610  if (PREDICT_FALSE (current_config_index != ~0))
611  {
612  vnet_buffer (b0)->feature_arc_index = arc;
613  vnet_buffer (b1)->feature_arc_index = arc;
614  vnet_buffer (b2)->feature_arc_index = arc;
615  vnet_buffer (b3)->feature_arc_index = arc;
616  b0->current_config_index = current_config_index;
617  b1->current_config_index = current_config_index;
618  b2->current_config_index = current_config_index;
619  b3->current_config_index = current_config_index;
620  }
621 
622  /* update vlan subif tx counts, if required */
623  if (PREDICT_FALSE (tx_swif0 != rt->sw_if_index))
624  {
625  vlib_increment_combined_counter (im->combined_sw_if_counters +
627  thread_index, tx_swif0, 1,
628  n_bytes_b0);
629  }
630 
631  if (PREDICT_FALSE (tx_swif1 != rt->sw_if_index))
632  {
633 
634  vlib_increment_combined_counter (im->combined_sw_if_counters +
636  thread_index, tx_swif1, 1,
637  n_bytes_b1);
638  }
639 
640  if (PREDICT_FALSE (tx_swif2 != rt->sw_if_index))
641  {
642 
643  vlib_increment_combined_counter (im->combined_sw_if_counters +
645  thread_index, tx_swif2, 1,
646  n_bytes_b2);
647  }
648  if (PREDICT_FALSE (tx_swif3 != rt->sw_if_index))
649  {
650 
651  vlib_increment_combined_counter (im->combined_sw_if_counters +
653  thread_index, tx_swif3, 1,
654  n_bytes_b3);
655  }
656 
657  if (!do_segmentation)
658  or_flags = b0->flags | b1->flags | b2->flags | b3->flags;
659 
660  if (do_tx_offloads)
661  {
662  if (or_flags &
663  (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM |
664  VNET_BUFFER_F_OFFLOAD_UDP_CKSUM |
665  VNET_BUFFER_F_OFFLOAD_IP_CKSUM))
666  {
667  calc_checksums (vm, b0);
668  calc_checksums (vm, b1);
669  calc_checksums (vm, b2);
670  calc_checksums (vm, b3);
671  }
672  }
673  }
674 
675  while (from + 1 <= from_end && n_left_to_tx >= 1)
676  {
677  u32 bi0;
678  vlib_buffer_t *b0;
679  u32 tx_swif0;
680 
681  bi0 = from[0];
682  to_tx[0] = bi0;
683  from += 1;
684  to_tx += 1;
685  n_left_to_tx -= 1;
686 
687  b0 = vlib_get_buffer (vm, bi0);
688 
689  /* Be grumpy about zero length buffers for benefit of
690  driver tx function. */
691  ASSERT (b0->current_length > 0);
692 
693  n_bytes_b0 = vlib_buffer_length_in_chain (vm, b0);
694  tx_swif0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
695  n_bytes += n_bytes_b0;
696  n_packets += 1;
697 
698  if (PREDICT_FALSE (current_config_index != ~0))
699  {
700  vnet_buffer (b0)->feature_arc_index = arc;
701  b0->current_config_index = current_config_index;
702  }
703 
704  if (do_segmentation)
705  {
706  if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_GSO))
707  {
708  /*
709  * Undo the enqueue of the b0 - it is not going anywhere,
710  * and will be freed either after it's segmented or
711  * when dropped, if there is no buffers to segment into.
712  */
713  to_tx -= 1;
714  n_left_to_tx += 1;
715  /* undo the counting. */
716  n_bytes -= n_bytes_b0;
717  n_packets -= 1;
718 
719  u32 n_tx_bytes = 0;
720 
721  n_tx_bytes =
722  tso_segment_buffer (vm, ptd, do_tx_offloads, bi0, b0,
723  n_bytes_b0);
724 
725  if (PREDICT_FALSE (n_tx_bytes == 0))
726  {
727  drop_one_buffer_and_count (vm, vnm, node, from - 1,
729  continue;
730  }
731 
732  u16 n_tx_bufs = vec_len (ptd->split_buffers);
733  u32 *from_tx_seg = ptd->split_buffers;
734 
735  while (n_tx_bufs > 0)
736  {
737  if (n_tx_bufs >= n_left_to_tx)
738  {
739  while (n_left_to_tx > 0)
740  {
741  to_tx[0] = from_tx_seg[0];
742  to_tx += 1;
743  from_tx_seg += 1;
744  n_left_to_tx -= 1;
745  n_tx_bufs -= 1;
746  n_packets += 1;
747  }
748  vlib_put_next_frame (vm, node, next_index,
749  n_left_to_tx);
750  vlib_get_new_next_frame (vm, node, next_index,
751  to_tx, n_left_to_tx);
752  }
753  else
754  {
755  while (n_tx_bufs > 0)
756  {
757  to_tx[0] = from_tx_seg[0];
758  to_tx += 1;
759  from_tx_seg += 1;
760  n_left_to_tx -= 1;
761  n_tx_bufs -= 1;
762  n_packets += 1;
763  }
764  }
765  }
766  n_bytes += n_tx_bytes;
767  if (PREDICT_FALSE (tx_swif0 != rt->sw_if_index))
768  {
769 
771  (im->combined_sw_if_counters +
772  VNET_INTERFACE_COUNTER_TX, thread_index, tx_swif0,
773  _vec_len (ptd->split_buffers), n_tx_bytes);
774  }
775  /* The buffers were enqueued. Reset the length */
776  _vec_len (ptd->split_buffers) = 0;
777  /* Free the now segmented buffer */
778  vlib_buffer_free_one (vm, bi0);
779  continue;
780  }
781  }
782 
783  if (PREDICT_FALSE (tx_swif0 != rt->sw_if_index))
784  {
785 
786  vlib_increment_combined_counter (im->combined_sw_if_counters +
788  thread_index, tx_swif0, 1,
789  n_bytes_b0);
790  }
791 
792  if (do_tx_offloads)
793  calc_checksums (vm, b0);
794  }
795 
796  vlib_put_next_frame (vm, node, next_index, n_left_to_tx);
797  }
798 
799  /* Update main interface stats. */
800  vlib_increment_combined_counter (im->combined_sw_if_counters
802  thread_index,
803  rt->sw_if_index, n_packets, n_bytes);
804  return n_buffers;
805 }
806 #endif /* CLIB_MARCH_VARIANT */
807 
810  int sw_if_index_from_buffer)
811 {
812  u32 n_left_from, *from;
814 
815  if (PREDICT_TRUE (vm->pcap[VLIB_TX].pcap_enable == 0))
816  return;
817 
818  if (sw_if_index_from_buffer == 0)
819  {
820  vnet_interface_output_runtime_t *rt = (void *) node->runtime_data;
821  sw_if_index = rt->sw_if_index;
822  }
823  else
824  sw_if_index = ~0;
825 
826  n_left_from = frame->n_vectors;
827  from = vlib_frame_vector_args (frame);
828 
829  while (n_left_from > 0)
830  {
831  u32 bi0 = from[0];
832  vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
833 
834  if (sw_if_index_from_buffer)
835  sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
836 
837  if (vm->pcap[VLIB_TX].pcap_sw_if_index == 0 ||
838  vm->pcap[VLIB_TX].pcap_sw_if_index == sw_if_index)
839  pcap_add_buffer (&vm->pcap[VLIB_TX].pcap_main, vm, bi0, 512);
840  from++;
841  n_left_from--;
842  }
843 }
844 
845 #ifndef CLIB_MARCH_VARIANT
848  vlib_node_runtime_t * node,
849  vlib_frame_t * frame, vnet_main_t * vnm,
851  int do_tx_offloads)
852 {
853  vnet_interface_pcap_tx_trace (vm, node, frame,
854  0 /* sw_if_index_from_buffer */ );
855 
856  /*
857  * The 3-headed "if" is here because we want to err on the side
858  * of not impacting the non-GSO performance - so for the more
859  * common case of no GSO interfaces we want to prevent the
860  * segmentation codepath from being there altogether.
861  */
863  return vnet_interface_output_node_inline_gso (vm, node, frame, vnm, hi,
864  do_tx_offloads,
865  /* do_segmentation */ 0);
867  return vnet_interface_output_node_inline_gso (vm, node, frame, vnm, hi,
868  do_tx_offloads,
869  /* do_segmentation */ 0);
870  else
871  return vnet_interface_output_node_inline_gso (vm, node, frame, vnm, hi,
872  do_tx_offloads,
873  /* do_segmentation */ 1);
874 }
875 
876 uword
878  vlib_frame_t * frame)
879 {
880  vnet_main_t *vnm = vnet_get_main ();
882  vnet_interface_output_runtime_t *rt = (void *) node->runtime_data;
883  hi = vnet_get_sup_hw_interface (vnm, rt->sw_if_index);
884 
885  vnet_interface_pcap_tx_trace (vm, node, frame,
886  0 /* sw_if_index_from_buffer */ );
887 
889  return vnet_interface_output_node_inline (vm, node, frame, vnm, hi,
890  /* do_tx_offloads */ 0);
891  else
892  return vnet_interface_output_node_inline (vm, node, frame, vnm, hi,
893  /* do_tx_offloads */ 1);
894 }
895 #endif /* CLIB_MARCH_VARIANT */
896 
897 /* Use buffer's sw_if_index[VNET_TX] to choose output interface. */
898 VLIB_NODE_FN (vnet_per_buffer_interface_output_node) (vlib_main_t * vm,
900  node,
901  vlib_frame_t * frame)
902 {
903  vnet_main_t *vnm = vnet_get_main ();
904  u32 n_left_to_next, *from, *to_next;
905  u32 n_left_from, next_index;
906 
907  vnet_interface_pcap_tx_trace (vm, node, frame,
908  1 /* sw_if_index_from_buffer */ );
909 
910  n_left_from = frame->n_vectors;
911 
912  from = vlib_frame_vector_args (frame);
913  next_index = node->cached_next_index;
914 
915  while (n_left_from > 0)
916  {
917  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
918 
919  while (n_left_from >= 4 && n_left_to_next >= 2)
920  {
921  u32 bi0, bi1, next0, next1;
922  vlib_buffer_t *b0, *b1;
923  vnet_hw_interface_t *hi0, *hi1;
924 
925  /* Prefetch next iteration. */
926  vlib_prefetch_buffer_with_index (vm, from[2], LOAD);
927  vlib_prefetch_buffer_with_index (vm, from[3], LOAD);
928 
929  bi0 = from[0];
930  bi1 = from[1];
931  to_next[0] = bi0;
932  to_next[1] = bi1;
933  from += 2;
934  to_next += 2;
935  n_left_to_next -= 2;
936  n_left_from -= 2;
937 
938  b0 = vlib_get_buffer (vm, bi0);
939  b1 = vlib_get_buffer (vm, bi1);
940 
941  hi0 =
944  [VLIB_TX]);
945  hi1 =
948  [VLIB_TX]);
949 
950  next0 = hi0->output_node_next_index;
951  next1 = hi1->output_node_next_index;
952 
953  vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
954  n_left_to_next, bi0, bi1, next0,
955  next1);
956  }
957 
958  while (n_left_from > 0 && n_left_to_next > 0)
959  {
960  u32 bi0, next0;
961  vlib_buffer_t *b0;
962  vnet_hw_interface_t *hi0;
963 
964  bi0 = from[0];
965  to_next[0] = bi0;
966  from += 1;
967  to_next += 1;
968  n_left_to_next -= 1;
969  n_left_from -= 1;
970 
971  b0 = vlib_get_buffer (vm, bi0);
972 
973  hi0 =
976  [VLIB_TX]);
977 
978  next0 = hi0->output_node_next_index;
979 
980  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
981  n_left_to_next, bi0, next0);
982  }
983 
984  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
985  }
986 
987  return frame->n_vectors;
988 }
989 
990 typedef struct vnet_error_trace_t_
991 {
994 
995 
996 static u8 *
997 format_vnet_error_trace (u8 * s, va_list * va)
998 {
999  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
1000  CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
1001  vnet_error_trace_t *t = va_arg (*va, vnet_error_trace_t *);
1002 
1003  s = format (s, "rx:%U", format_vnet_sw_if_index_name,
1004  vnet_get_main (), t->sw_if_index);
1005 
1006  return s;
1007 }
1008 
1009 static void
1011  vlib_node_runtime_t * node, vlib_frame_t * frame)
1012 {
1013  u32 n_left, *buffers;
1014 
1015  buffers = vlib_frame_vector_args (frame);
1016  n_left = frame->n_vectors;
1017 
1018  while (n_left >= 4)
1019  {
1020  u32 bi0, bi1;
1021  vlib_buffer_t *b0, *b1;
1022  vnet_error_trace_t *t0, *t1;
1023 
1024  /* Prefetch next iteration. */
1025  vlib_prefetch_buffer_with_index (vm, buffers[2], LOAD);
1026  vlib_prefetch_buffer_with_index (vm, buffers[3], LOAD);
1027 
1028  bi0 = buffers[0];
1029  bi1 = buffers[1];
1030 
1031  b0 = vlib_get_buffer (vm, bi0);
1032  b1 = vlib_get_buffer (vm, bi1);
1033 
1034  if (b0->flags & VLIB_BUFFER_IS_TRACED)
1035  {
1036  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
1037  t0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1038  }
1039  if (b1->flags & VLIB_BUFFER_IS_TRACED)
1040  {
1041  t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
1042  t1->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX];
1043  }
1044  buffers += 2;
1045  n_left -= 2;
1046  }
1047 
1048  while (n_left >= 1)
1049  {
1050  u32 bi0;
1051  vlib_buffer_t *b0;
1052  vnet_error_trace_t *t0;
1053 
1054  bi0 = buffers[0];
1055 
1056  b0 = vlib_get_buffer (vm, bi0);
1057 
1058  if (b0->flags & VLIB_BUFFER_IS_TRACED)
1059  {
1060  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
1061  t0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1062  }
1063  buffers += 1;
1064  n_left -= 1;
1065  }
1066 }
1067 
1068 typedef enum
1069 {
1074 
1077  vlib_node_runtime_t * node,
1078  vlib_frame_t * frame,
1079  vnet_error_disposition_t disposition)
1080 {
1081  u32 *from, n_left, thread_index, *sw_if_index;
1082  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
1083  u32 sw_if_indices[VLIB_FRAME_SIZE];
1085  u16 nexts[VLIB_FRAME_SIZE];
1086  vnet_main_t *vnm;
1087 
1088  vnm = vnet_get_main ();
1089  thread_index = vm->thread_index;
1090  from = vlib_frame_vector_args (frame);
1091  n_left = frame->n_vectors;
1092  b = bufs;
1093  sw_if_index = sw_if_indices;
1094 
1095  vlib_get_buffers (vm, from, bufs, n_left);
1096 
1097  if (node->flags & VLIB_NODE_FLAG_TRACE)
1098  interface_trace_buffers (vm, node, frame);
1099 
1100  /* All going to drop regardless, this is just a counting exercise */
1101  clib_memset (nexts, 0, sizeof (nexts));
1102 
1104  (disposition == VNET_ERROR_DISPOSITION_PUNT
1107 
1108  /* collect the array of interfaces first ... */
1109  while (n_left >= 4)
1110  {
1111  if (n_left >= 12)
1112  {
1113  /* Prefetch 8 ahead - there's not much going on in each iteration */
1114  vlib_prefetch_buffer_header (b[4], LOAD);
1115  vlib_prefetch_buffer_header (b[5], LOAD);
1116  vlib_prefetch_buffer_header (b[6], LOAD);
1117  vlib_prefetch_buffer_header (b[7], LOAD);
1118  }
1119  sw_if_index[0] = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
1120  sw_if_index[1] = vnet_buffer (b[1])->sw_if_index[VLIB_RX];
1121  sw_if_index[2] = vnet_buffer (b[2])->sw_if_index[VLIB_RX];
1122  sw_if_index[3] = vnet_buffer (b[3])->sw_if_index[VLIB_RX];
1123 
1124  sw_if_index += 4;
1125  n_left -= 4;
1126  b += 4;
1127  }
1128  while (n_left)
1129  {
1130  sw_if_index[0] = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
1131 
1132  sw_if_index += 1;
1133  n_left -= 1;
1134  b += 1;
1135  }
1136 
1137  /* ... then count against them in blocks */
1138  n_left = frame->n_vectors;
1139 
1140  while (n_left)
1141  {
1142  vnet_sw_interface_t *sw_if0;
1143  u16 off, count;
1144 
1145  off = frame->n_vectors - n_left;
1146 
1147  sw_if_index = sw_if_indices + off;
1148 
1149  count = clib_count_equal_u32 (sw_if_index, n_left);
1150  n_left -= count;
1151 
1152  vlib_increment_simple_counter (cm, thread_index, sw_if_index[0], count);
1153 
1154  /* Increment super-interface drop/punt counters for
1155  sub-interfaces. */
1156  sw_if0 = vnet_get_sw_interface (vnm, sw_if_index[0]);
1157  if (sw_if0->sup_sw_if_index != sw_if_index[0])
1159  (cm, thread_index, sw_if0->sup_sw_if_index, count);
1160  }
1161 
1162  vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
1163 
1164  return frame->n_vectors;
1165 }
1166 
1167 static inline void
1170 {
1171  u32 *from;
1172  u32 n_left = f->n_vectors;
1173  vlib_buffer_t *b0, *p1;
1174  u32 bi0;
1175  i16 save_current_data;
1176  u16 save_current_length;
1177 
1178  from = vlib_frame_vector_args (f);
1179 
1180  while (n_left > 0)
1181  {
1182  if (PREDICT_TRUE (n_left > 1))
1183  {
1184  p1 = vlib_get_buffer (vm, from[1]);
1185  vlib_prefetch_buffer_header (p1, LOAD);
1186  }
1187 
1188  bi0 = from[0];
1189  b0 = vlib_get_buffer (vm, bi0);
1190  from++;
1191  n_left--;
1192 
1193  /* See if we're pointedly ignoring this specific error */
1194  if (im->pcap_drop_filter_hash
1195  && hash_get (im->pcap_drop_filter_hash, b0->error))
1196  continue;
1197 
1198  /* Trace all drops, or drops received on a specific interface */
1199  if (im->pcap_sw_if_index == 0 ||
1201  {
1202  save_current_data = b0->current_data;
1203  save_current_length = b0->current_length;
1204 
1205  /*
1206  * Typically, we'll need to rewind the buffer
1207  */
1208  if (b0->current_data > 0)
1209  vlib_buffer_advance (b0, (word) - b0->current_data);
1210 
1211  pcap_add_buffer (&im->pcap_main, vm, bi0, 512);
1212 
1213  b0->current_data = save_current_data;
1214  b0->current_length = save_current_length;
1215  }
1216  }
1217 }
1218 
1219 #ifndef CLIB_MARCH_VARIANT
1220 void
1222 {
1224 
1225  if (im->pcap_drop_filter_hash == 0)
1226  im->pcap_drop_filter_hash = hash_create (0, sizeof (uword));
1227 
1228  if (is_add)
1229  hash_set (im->pcap_drop_filter_hash, error_index, 1);
1230  else
1231  hash_unset (im->pcap_drop_filter_hash, error_index);
1232 }
1233 #endif /* CLIB_MARCH_VARIANT */
1234 
1235 VLIB_NODE_FN (interface_drop) (vlib_main_t * vm,
1236  vlib_node_runtime_t * node,
1237  vlib_frame_t * frame)
1238 {
1240 
1241  if (PREDICT_FALSE (im->drop_pcap_enable))
1242  pcap_drop_trace (vm, im, frame);
1243 
1244  return interface_drop_punt (vm, node, frame, VNET_ERROR_DISPOSITION_DROP);
1245 }
1246 
1247 VLIB_NODE_FN (interface_punt) (vlib_main_t * vm,
1248  vlib_node_runtime_t * node,
1249  vlib_frame_t * frame)
1250 {
1251  return interface_drop_punt (vm, node, frame, VNET_ERROR_DISPOSITION_PUNT);
1252 }
1253 
1254 /* *INDENT-OFF* */
1255 VLIB_REGISTER_NODE (interface_drop) = {
1256  .name = "error-drop",
1257  .vector_size = sizeof (u32),
1258  .format_trace = format_vnet_error_trace,
1259  .n_next_nodes = 1,
1260  .next_nodes = {
1261  [0] = "drop",
1262  },
1263 };
1264 /* *INDENT-ON* */
1265 
1266 /* *INDENT-OFF* */
1267 VLIB_REGISTER_NODE (interface_punt) = {
1268  .name = "error-punt",
1269  .vector_size = sizeof (u32),
1270  .format_trace = format_vnet_error_trace,
1271  .n_next_nodes = 1,
1272  .next_nodes = {
1273  [0] = "punt",
1274  },
1275 };
1276 /* *INDENT-ON* */
1277 
1278 /* *INDENT-OFF* */
1279 VLIB_REGISTER_NODE (vnet_per_buffer_interface_output_node) = {
1280  .name = "interface-output",
1281  .vector_size = sizeof (u32),
1282 };
1283 /* *INDENT-ON* */
1284 
1285 static uword
1287  vlib_frame_t * from_frame)
1288 {
1289  vnet_main_t *vnm = vnet_get_main ();
1290  u32 last_sw_if_index = ~0;
1291  vlib_frame_t *to_frame = 0;
1292  vnet_hw_interface_t *hw = 0;
1293  u32 *from, *to_next = 0;
1294  u32 n_left_from;
1295 
1296  from = vlib_frame_vector_args (from_frame);
1297  n_left_from = from_frame->n_vectors;
1298  while (n_left_from > 0)
1299  {
1300  u32 bi0;
1301  vlib_buffer_t *b0;
1302  u32 sw_if_index0;
1303 
1304  bi0 = from[0];
1305  from++;
1306  n_left_from--;
1307  b0 = vlib_get_buffer (vm, bi0);
1308  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
1309 
1310  if (PREDICT_FALSE ((last_sw_if_index != sw_if_index0) || to_frame == 0))
1311  {
1312  if (to_frame)
1313  {
1314  hw = vnet_get_sup_hw_interface (vnm, last_sw_if_index);
1315  vlib_put_frame_to_node (vm, hw->tx_node_index, to_frame);
1316  }
1317  last_sw_if_index = sw_if_index0;
1318  hw = vnet_get_sup_hw_interface (vnm, sw_if_index0);
1319  to_frame = vlib_get_frame_to_node (vm, hw->tx_node_index);
1320  to_next = vlib_frame_vector_args (to_frame);
1321  }
1322 
1323  to_next[0] = bi0;
1324  to_next++;
1325  to_frame->n_vectors++;
1326  }
1327  vlib_put_frame_to_node (vm, hw->tx_node_index, to_frame);
1328  return from_frame->n_vectors;
1329 }
1330 
1331 /* *INDENT-OFF* */
1332 VLIB_REGISTER_NODE (interface_tx, static) = {
1333  .function = interface_tx_node_fn,
1334  .name = "interface-tx",
1335  .vector_size = sizeof (u32),
1336  .n_next_nodes = 1,
1337  .next_nodes = {
1338  [0] = "error-drop",
1339  },
1340 };
1341 
1342 VNET_FEATURE_ARC_INIT (interface_output, static) =
1343 {
1344  .arc_name = "interface-output",
1345  .start_nodes = VNET_FEATURES (0),
1346  .last_in_arc = "interface-tx",
1348 };
1349 
1350 VNET_FEATURE_INIT (span_tx, static) = {
1351  .arc_name = "interface-output",
1352  .node_name = "span-output",
1353  .runs_before = VNET_FEATURES ("interface-tx"),
1354 };
1355 
1356 VNET_FEATURE_INIT (ipsec_if_tx, static) = {
1357  .arc_name = "interface-output",
1358  .node_name = "ipsec-if-output",
1359  .runs_before = VNET_FEATURES ("interface-tx"),
1360 };
1361 
1362 VNET_FEATURE_INIT (interface_tx, static) = {
1363  .arc_name = "interface-output",
1364  .node_name = "interface-tx",
1365  .runs_before = 0,
1366 };
1367 /* *INDENT-ON* */
1368 
1369 #ifndef CLIB_MARCH_VARIANT
1370 clib_error_t *
1372  u32 hw_if_index,
1373  u32 is_create)
1374 {
1375  vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
1376  u32 next_index;
1377 
1378  if (hi->output_node_index == 0)
1379  return 0;
1380 
1381  next_index = vlib_node_add_next
1382  (vnm->vlib_main, vnet_per_buffer_interface_output_node.index,
1383  hi->output_node_index);
1384  hi->output_node_next_index = next_index;
1385 
1386  return 0;
1387 }
1388 
1391 
1392 void
1394  u32 hw_if_index, u32 node_index)
1395 {
1396  ASSERT (node_index);
1397  vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
1398  u32 next_index = vlib_node_add_next
1399  (vnm->vlib_main, vnet_per_buffer_interface_output_node.index, node_index);
1400  hi->output_node_next_index = next_index;
1401  hi->output_node_index = node_index;
1402 }
1403 #endif /* CLIB_MARCH_VARIANT */
1404 
1405 static clib_error_t *
1407  unformat_input_t * input,
1408  vlib_cli_command_t * cmd)
1409 {
1410  vnet_main_t *vnm = vnet_get_main ();
1412  u8 *filename;
1413  u32 max;
1414  int matched = 0;
1415  clib_error_t *error = 0;
1416 
1417  while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
1418  {
1419  if (unformat (input, "on"))
1420  {
1421  if (im->drop_pcap_enable == 0)
1422  {
1423  if (im->pcap_filename == 0)
1424  im->pcap_filename = format (0, "/tmp/drop.pcap%c", 0);
1425 
1426  clib_memset (&im->pcap_main, 0, sizeof (im->pcap_main));
1427  im->pcap_main.file_name = (char *) im->pcap_filename;
1428  im->pcap_main.n_packets_to_capture = 100;
1429  if (im->pcap_pkts_to_capture)
1431 
1432  im->pcap_main.packet_type = PCAP_PACKET_TYPE_ethernet;
1433  im->drop_pcap_enable = 1;
1434  matched = 1;
1435  vlib_cli_output (vm, "pcap drop capture on...");
1436  }
1437  else
1438  {
1439  vlib_cli_output (vm, "pcap drop capture already on...");
1440  }
1441  matched = 1;
1442  }
1443  else if (unformat (input, "off"))
1444  {
1445  matched = 1;
1446 
1447  if (im->drop_pcap_enable)
1448  {
1449  vlib_cli_output (vm, "captured %d pkts...",
1451  if (im->pcap_main.n_packets_captured)
1452  {
1455  error = pcap_write (&im->pcap_main);
1456  if (error)
1457  clib_error_report (error);
1458  else
1459  vlib_cli_output (vm, "saved to %s...", im->pcap_filename);
1460  }
1461  }
1462  else
1463  {
1464  vlib_cli_output (vm, "pcap drop capture already off...");
1465  }
1466 
1467  im->drop_pcap_enable = 0;
1468  }
1469  else if (unformat (input, "max %d", &max))
1470  {
1471  im->pcap_pkts_to_capture = max;
1472  matched = 1;
1473  }
1474 
1475  else if (unformat (input, "intfc %U",
1477  &im->pcap_sw_if_index))
1478  matched = 1;
1479  else if (unformat (input, "intfc any"))
1480  {
1481  im->pcap_sw_if_index = 0;
1482  matched = 1;
1483  }
1484  else if (unformat (input, "file %s", &filename))
1485  {
1486  u8 *chroot_filename;
1487  /* Brain-police user path input */
1488  if (strstr ((char *) filename, "..")
1489  || index ((char *) filename, '/'))
1490  {
1491  vlib_cli_output (vm, "illegal characters in filename '%s'",
1492  filename);
1493  continue;
1494  }
1495 
1496  chroot_filename = format (0, "/tmp/%s%c", filename, 0);
1497  vec_free (filename);
1498 
1499  if (im->pcap_filename)
1500  vec_free (im->pcap_filename);
1501  im->pcap_filename = chroot_filename;
1502  im->pcap_main.file_name = (char *) im->pcap_filename;
1503  matched = 1;
1504  }
1505  else if (unformat (input, "status"))
1506  {
1507  if (im->drop_pcap_enable == 0)
1508  {
1509  vlib_cli_output (vm, "pcap drop capture is off...");
1510  continue;
1511  }
1512 
1513  vlib_cli_output (vm, "pcap drop capture: %d of %d pkts...",
1516  matched = 1;
1517  }
1518 
1519  else
1520  break;
1521  }
1522 
1523  if (matched == 0)
1524  return clib_error_return (0, "unknown input `%U'",
1525  format_unformat_error, input);
1526 
1527  return 0;
1528 }
1529 
1530 /* *INDENT-OFF* */
1531 VLIB_CLI_COMMAND (pcap_trace_command, static) = {
1532  .path = "pcap drop trace",
1533  .short_help =
1534  "pcap drop trace on off max <nn> intfc <intfc> file <name> status",
1535  .function = pcap_drop_trace_command_fn,
1536 };
1537 /* *INDENT-ON* */
1538 
1539 /*
1540  * fd.io coding-style-patch-verification: ON
1541  *
1542  * Local Variables:
1543  * eval: (c-set-style "gnu")
1544  * End:
1545  */
u8 * format_vnet_interface_output_trace(u8 *s, va_list *va)
vnet_config_main_t config_main
Definition: feature.h:82
static_always_inline uword vnet_interface_output_node_inline_gso(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vnet_main_t *vnm, vnet_hw_interface_t *hi, int do_tx_offloads, int do_segmentation)
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:439
u32 sw_if_index
Definition: ipsec_gre.api:37
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
vmrglw vmrglh hi
pcap_main_t pcap_main
Definition: main.h:65
VNET_FEATURE_ARC_INIT(interface_output, static)
#define hash_set(h, key, value)
Definition: hash.h:255
char * file_name
File name of pcap output.
Definition: pcap.h:162
u32 flags
Definition: vhost_user.h:115
pcap_main_t pcap_main
Definition: interface.h:830
#define clib_min(x, y)
Definition: clib.h:295
#define CLIB_UNUSED(x)
Definition: clib.h:82
void vnet_set_interface_output_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Set interface output node - for interface registered without its output/tx nodes created because its ...
static_always_inline uword vnet_interface_output_node_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vnet_main_t *vnm, vnet_hw_interface_t *hi, int do_tx_offloads)
#define hash_unset(h, key)
Definition: hash.h:261
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:220
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:865
u8 runtime_data[0]
Function dependent node-runtime data.
Definition: node.h:522
u32 n_packets_to_capture
Number of packets to capture.
Definition: pcap.h:165
vnet_main_t * vnet_get_main(void)
Definition: misc.c:47
static vnet_hw_interface_t * vnet_get_sup_hw_interface(vnet_main_t *vnm, u32 sw_if_index)
#define vnet_buffer2(b)
Definition: buffer.h:428
vnet_interface_main_t interface_main
Definition: vnet.h:56
#define PREDICT_TRUE(x)
Definition: clib.h:112
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:110
static_always_inline void drop_one_buffer_and_count(vlib_main_t *vm, vnet_main_t *vnm, vlib_node_runtime_t *node, u32 *pbi0, u32 drop_error_code)
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
u32 thread_index
Definition: main.h:197
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
u8 data[0]
Packet data.
Definition: buffer.h:181
static_always_inline u16 tso_alloc_tx_bufs(vlib_main_t *vm, vnet_interface_per_thread_data_t *ptd, vlib_buffer_t *b0, u16 l4_hdr_sz)
static_always_inline void tso_init_buf_from_template(vlib_main_t *vm, vlib_buffer_t *nb0, vlib_buffer_t *b0, u16 template_data_sz, u16 gso_size, u8 **p_dst_ptr, u16 *p_dst_left, u32 next_tcp_seq, u32 flags)
clib_error_t * vnet_per_buffer_interface_output_hw_interface_add_del(vnet_main_t *vnm, u32 hw_if_index, u32 is_create)
static void vnet_interface_output_trace(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, uword n_buffers)
static u32 format_get_indent(u8 *s)
Definition: format.h:72
clib_memset(h->entries, 0, sizeof(h->entries[0])*entries)
static vnet_sw_interface_t * vnet_get_sw_interface(vnet_main_t *vnm, u32 sw_if_index)
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 increment)
Increment a simple counter.
Definition: counter.h:78
unformat_function_t unformat_vnet_sw_interface
u8 data[128]
Definition: ipsec.api:248
static_always_inline int vnet_have_features(u8 arc, u32 sw_if_index)
Definition: feature.h:241
void vnet_pcap_drop_trace_filter_add_del(u32 error_index, int is_add)
#define VLIB_NODE_FN(node)
Definition: node.h:201
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:366
struct _tcp_header tcp_header_t
format_function_t format_vnet_sw_if_index_name
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
Definition: node_funcs.h:1122
unsigned char u8
Definition: types.h:56
#define static_always_inline
Definition: clib.h:99
uword * pcap_drop_filter_hash
Definition: interface.h:834
i64 word
Definition: types.h:111
static_always_inline u32 tso_segment_buffer(vlib_main_t *vm, vnet_interface_per_thread_data_t *ptd, int do_tx_offloads, u32 sbi0, vlib_buffer_t *sb0, u32 n_bytes_b0)
Allocate the necessary number of ptd->split_buffers, and segment the possibly chained buffer(s) from ...
#define vlib_prefetch_buffer_with_index(vm, bi, type)
Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header informatio...
Definition: buffer_funcs.h:440
static_always_inline void calc_checksums(vlib_main_t *vm, vlib_buffer_t *b)
struct vnet_error_trace_t_ vnet_error_trace_t
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:113
vnet_hw_interface_flags_t flags
Definition: interface.h:494
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:203
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
u8 * format_hex_bytes(u8 *s, va_list *va)
Definition: std-formats.c:84
vlib_frame_t * vlib_get_frame_to_node(vlib_main_t *vm, u32 to_node_index)
Definition: main.c:189
#define vlib_get_new_next_frame(vm, node, next_index, vectors, n_vectors_left)
Definition: node_funcs.h:373
#define clib_error_return(e, args...)
Definition: error.h:99
unsigned int u32
Definition: types.h:88
A collection of simple counters.
Definition: counter.h:57
static void pcap_drop_trace(vlib_main_t *vm, vnet_interface_main_t *im, vlib_frame_t *f)
#define VLIB_FRAME_SIZE
Definition: node.h:376
static uword interface_tx_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
int pcap_enable
Definition: main.h:63
#define hash_get(h, key)
Definition: hash.h:249
format_function_t format_vnet_sw_interface_name
vlib_main_t * vlib_main
Definition: vnet.h:80
static_always_inline void tso_init_buf_from_template_base(vlib_buffer_t *nb0, vlib_buffer_t *b0, u32 flags, u16 length)
static u8 * format_vnet_error_trace(u8 *s, va_list *va)
VNET_FEATURE_INIT(span_tx, static)
uword vnet_interface_output_node(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
uword vlib_error_drop_buffers(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u32 next_buffer_stride, u32 n_buffers, u32 next_index, u32 drop_error_node, u32 drop_error_code)
Definition: error.c:44
struct _unformat_input_t unformat_input_t
unsigned short u16
Definition: types.h:57
void vlib_put_frame_to_node(vlib_main_t *vm, u32 to_node_index, vlib_frame_t *f)
Definition: main.c:198
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
vnet_error_disposition_t
static void * vnet_get_config_data(vnet_config_main_t *cm, u32 *config_index, u32 *next_index, u32 n_data_bytes)
Definition: config.h:122
#define PREDICT_FALSE(x)
Definition: clib.h:111
vnet_main_t vnet_main
Definition: misc.c:43
#define TCP_FLAG_FIN
Definition: fa_node.h:12
vlib_simple_counter_main_t * sw_if_counters
Definition: interface.h:823
format_function_t * format_buffer
Definition: node.h:356
u32 node_index
Node index.
Definition: node.h:495
u32 pcap_sw_if_index
Definition: main.h:64
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:368
static void interface_trace_buffers(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
static_always_inline void tso_fixup_segmented_buf(vlib_buffer_t *b0, u8 tcp_flags, int is_ip6)
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
Definition: buffer_funcs.h:96
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
#define UNFORMAT_END_OF_INPUT
Definition: format.h:144
u16 n_vectors
Definition: node.h:395
vlib_main_t * vm
Definition: buffer.c:312
static_always_inline void vlib_buffer_enqueue_to_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts, uword count)
Definition: buffer_node.h:332
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:341
#define pool_is_free_index(P, I)
Use free bitmap to query whether given index is free.
Definition: pool.h:283
u32 current_config_index
Used by feature subgraph arcs to visit enabled feature nodes.
Definition: buffer.h:147
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:465
clib_error_t * pcap_write(pcap_main_t *pm)
Write PCAP file.
Definition: pcap.c:89
u8 data[128-3 *sizeof(u32)]
#define VLIB_CLI_COMMAND(x,...)
Definition: cli.h:155
u16 ip6_tcp_udp_icmp_compute_checksum(vlib_main_t *vm, vlib_buffer_t *p0, ip6_header_t *ip0, int *bogus_lengthp)
Definition: ip6_forward.c:947
#define hash_create(elts, value_bytes)
Definition: hash.h:696
#define ASSERT(truth)
u16 ip4_tcp_udp_compute_checksum(vlib_main_t *vm, vlib_buffer_t *p0, ip4_header_t *ip0)
Definition: ip4_forward.c:1128
u8 is_add
Definition: ipsec_gre.api:36
#define clib_error_report(e)
Definition: error.h:113
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:248
size_t count
Definition: vapi.c:47
static_always_inline void vnet_interface_pcap_tx_trace(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, int sw_if_index_from_buffer)
#define VNET_FEATURES(...)
Definition: feature.h:435
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:57
VNET_HW_INTERFACE_ADD_DEL_FUNCTION(vnet_per_buffer_interface_output_hw_interface_add_del)
pcap_packet_type_t packet_type
Packet type.
Definition: pcap.h:168
Definition: defs.h:47
u16 payload_length
Definition: ip6_packet.h:376
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
static_always_inline u32 vnet_get_feature_config_index(u8 arc, u32 sw_if_index)
Definition: feature.h:248
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
vnet_sw_interface_t * sw_interfaces
Definition: interface.h:815
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:274
static clib_error_t * pcap_drop_trace_command_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
vnet_pcap_t pcap[VLIB_N_RX_TX]
Definition: main.h:149
#define vnet_buffer(b)
Definition: buffer.h:369
u8 * format_unformat_error(u8 *s, va_list *va)
Definition: unformat.c:91
static_always_inline uword clib_count_equal_u32(u32 *data, uword max_count)
Definition: string.h:520
u16 flags
Copy of main node flags.
Definition: node.h:508
static void vlib_buffer_free_one(vlib_main_t *vm, u32 buffer_index)
Free one buffer Shorthand to free a single buffer chain.
Definition: buffer_funcs.h:898
static_always_inline uword interface_drop_punt(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vnet_error_disposition_t disposition)
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:244
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:301
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:167
static u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
Definition: buffer_funcs.h:612
static_always_inline vnet_feature_config_main_t * vnet_feature_get_config_main(u16 arc)
Definition: feature.h:234
static void pcap_add_buffer(pcap_main_t *pm, struct vlib_main_t *vm, u32 buffer_index, u32 n_bytes_in_trace)
Add buffer (vlib_buffer_t) to the trace.
Definition: pcap_funcs.h:63
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
Definition: cli.c:725
u32 n_packets_captured
Number of packets currently captured.
Definition: pcap.h:171
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
static u16 ip4_header_checksum(ip4_header_t *i)
Definition: ip4_packet.h:247
#define clib_panic(format, args...)
Definition: error.h:72
u32 opaque[10]
Opaque data used by sub-graphs for their own purposes.
Definition: buffer.h:153
uword unformat(unformat_input_t *i, const char *fmt,...)
Definition: unformat.c:972
Definition: defs.h:46
static uword unformat_check_input(unformat_input_t *i)
Definition: format.h:170
signed short i16
Definition: types.h:46