FD.io VPP  v19.08-24-ge6a5712
Vector Packet Processing
interface_output.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /*
16  * interface_output.c: interface output node
17  *
18  * Copyright (c) 2008 Eliot Dresselhaus
19  *
20  * Permission is hereby granted, free of charge, to any person obtaining
21  * a copy of this software and associated documentation files (the
22  * "Software"), to deal in the Software without restriction, including
23  * without limitation the rights to use, copy, modify, merge, publish,
24  * distribute, sublicense, and/or sell copies of the Software, and to
25  * permit persons to whom the Software is furnished to do so, subject to
26  * the following conditions:
27  *
28  * The above copyright notice and this permission notice shall be
29  * included in all copies or substantial portions of the Software.
30  *
31  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38  */
39 
40 #include <vnet/vnet.h>
41 #include <vnet/ip/icmp46_packet.h>
42 #include <vnet/ip/ip4.h>
43 #include <vnet/ip/ip6.h>
44 #include <vnet/udp/udp_packet.h>
45 #include <vnet/feature/feature.h>
46 
47 typedef struct
48 {
53  u8 data[128 - 3 * sizeof (u32)];
54 }
56 
57 #ifndef CLIB_MARCH_VARIANT
58 u8 *
60 {
61  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
62  vlib_node_t *node = va_arg (*va, vlib_node_t *);
63  interface_output_trace_t *t = va_arg (*va, interface_output_trace_t *);
64  vnet_main_t *vnm = vnet_get_main ();
66  u32 indent;
67 
68  if (t->sw_if_index != (u32) ~ 0)
69  {
70  indent = format_get_indent (s);
71 
74  {
75  /* the interface may have been deleted by the time the trace is printed */
76  s = format (s, "sw_if_index: %d ", t->sw_if_index);
77  }
78  else
79  {
80  si = vnet_get_sw_interface (vnm, t->sw_if_index);
81  s =
82  format (s, "%U ", format_vnet_sw_interface_name, vnm, si,
83  t->flags);
84  }
85 #define _(bit, name, v, x) \
86  if (v && (t->flags & VNET_BUFFER_F_##name)) \
87  s = format (s, "%s ", v);
89 #undef _
90  if (t->flags & VNET_BUFFER_F_GSO)
91  {
92  s = format (s, "\n%Ugso_sz %d gso_l4_hdr_sz %d",
93  format_white_space, indent + 2, t->gso_size,
94  t->gso_l4_hdr_sz);
95  }
96  s =
97  format (s, "\n%U%U", format_white_space, indent,
99  t->data, sizeof (t->data));
100  }
101  return s;
102 }
103 
104 static void
106  vlib_node_runtime_t * node,
107  vlib_frame_t * frame, uword n_buffers)
108 {
109  u32 n_left, *from;
110 
111  n_left = n_buffers;
112  from = vlib_frame_vector_args (frame);
113 
114  while (n_left >= 4)
115  {
116  u32 bi0, bi1;
117  vlib_buffer_t *b0, *b1;
118  interface_output_trace_t *t0, *t1;
119 
120  /* Prefetch next iteration. */
121  vlib_prefetch_buffer_with_index (vm, from[2], LOAD);
122  vlib_prefetch_buffer_with_index (vm, from[3], LOAD);
123 
124  bi0 = from[0];
125  bi1 = from[1];
126 
127  b0 = vlib_get_buffer (vm, bi0);
128  b1 = vlib_get_buffer (vm, bi1);
129 
130  if (b0->flags & VLIB_BUFFER_IS_TRACED)
131  {
132  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
133  t0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
134  t0->flags = b0->flags;
135  t0->gso_size = vnet_buffer2 (b0)->gso_size;
136  t0->gso_l4_hdr_sz = vnet_buffer2 (b0)->gso_l4_hdr_sz;
138  sizeof (t0->data));
139  }
140  if (b1->flags & VLIB_BUFFER_IS_TRACED)
141  {
142  t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
143  t1->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_TX];
144  t1->flags = b1->flags;
145  t1->gso_size = vnet_buffer2 (b1)->gso_size;
146  t1->gso_l4_hdr_sz = vnet_buffer2 (b1)->gso_l4_hdr_sz;
148  sizeof (t1->data));
149  }
150  from += 2;
151  n_left -= 2;
152  }
153 
154  while (n_left >= 1)
155  {
156  u32 bi0;
157  vlib_buffer_t *b0;
158  interface_output_trace_t *t0;
159 
160  bi0 = from[0];
161 
162  b0 = vlib_get_buffer (vm, bi0);
163 
164  if (b0->flags & VLIB_BUFFER_IS_TRACED)
165  {
166  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
167  t0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
168  t0->flags = b0->flags;
169  t0->gso_size = vnet_buffer2 (b0)->gso_size;
170  t0->gso_l4_hdr_sz = vnet_buffer2 (b0)->gso_l4_hdr_sz;
172  sizeof (t0->data));
173  }
174  from += 1;
175  n_left -= 1;
176  }
177 }
178 
181 {
182  tcp_header_t *th;
183  udp_header_t *uh;
184 
185  int is_ip4 = (b->flags & VNET_BUFFER_F_IS_IP4) != 0;
186  int is_ip6 = (b->flags & VNET_BUFFER_F_IS_IP6) != 0;
187 
188  ASSERT (!(is_ip4 && is_ip6));
189 
190  th = (tcp_header_t *) (b->data + vnet_buffer (b)->l4_hdr_offset);
191  uh = (udp_header_t *) (b->data + vnet_buffer (b)->l4_hdr_offset);
192 
193  if (is_ip4)
194  {
195  ip4_header_t *ip4;
196 
197  ip4 = (ip4_header_t *) (b->data + vnet_buffer (b)->l3_hdr_offset);
198  if (b->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)
199  ip4->checksum = ip4_header_checksum (ip4);
200  if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
201  {
202  th->checksum = 0;
203  th->checksum = ip4_tcp_udp_compute_checksum (vm, b, ip4);
204  }
205  else if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
206  uh->checksum = ip4_tcp_udp_compute_checksum (vm, b, ip4);
207  }
208  else if (is_ip6)
209  {
210  int bogus;
211  ip6_header_t *ip6;
212 
213  ip6 = (ip6_header_t *) (b->data + vnet_buffer (b)->l3_hdr_offset);
214  if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
215  {
216  th->checksum = 0;
217  th->checksum =
218  ip6_tcp_udp_icmp_compute_checksum (vm, b, ip6, &bogus);
219  }
220  else if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
221  {
222  uh->checksum = 0;
223  uh->checksum =
224  ip6_tcp_udp_icmp_compute_checksum (vm, b, ip6, &bogus);
225  }
226  }
227  b->flags &= ~VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
228  b->flags &= ~VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
229  b->flags &= ~VNET_BUFFER_F_OFFLOAD_IP_CKSUM;
230 }
231 
235  vlib_buffer_t * b0, u32 n_bytes_b0, u16 l234_sz,
236  u16 gso_size)
237 {
238  u16 size =
239  clib_min (gso_size, vlib_buffer_get_default_data_size (vm) - l234_sz);
240 
241  /* rounded-up division */
242  u16 n_bufs = (n_bytes_b0 - l234_sz + (size - 1)) / size;
243  u16 n_alloc;
244 
245  ASSERT (n_bufs > 0);
246  vec_validate (ptd->split_buffers, n_bufs - 1);
247 
248  n_alloc = vlib_buffer_alloc (vm, ptd->split_buffers, n_bufs);
249  if (n_alloc < n_bufs)
250  {
251  vlib_buffer_free (vm, ptd->split_buffers, n_alloc);
252  return 0;
253  }
254  return n_alloc;
255 }
256 
259  u32 flags, u16 length)
260 {
261  nb0->current_data = b0->current_data;
263  nb0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID | flags;
264  clib_memcpy_fast (&nb0->opaque, &b0->opaque, sizeof (nb0->opaque));
266  vlib_buffer_get_current (b0), length);
267  nb0->current_length = length;
268 }
269 
272  vlib_buffer_t * b0, u16 template_data_sz,
273  u16 gso_size, u8 ** p_dst_ptr, u16 * p_dst_left,
274  u32 next_tcp_seq, u32 flags)
275 {
276  tso_init_buf_from_template_base (nb0, b0, flags, template_data_sz);
277 
278  *p_dst_left =
279  clib_min (gso_size,
280  vlib_buffer_get_default_data_size (vm) - (template_data_sz +
281  nb0->current_data));
282  *p_dst_ptr = vlib_buffer_get_current (nb0) + template_data_sz;
283 
284  tcp_header_t *tcp =
285  (tcp_header_t *) (nb0->data + vnet_buffer (nb0)->l4_hdr_offset);
286  tcp->seq_number = clib_host_to_net_u32 (next_tcp_seq);
287 }
288 
290 tso_fixup_segmented_buf (vlib_buffer_t * b0, u8 tcp_flags, int is_ip6)
291 {
292  u16 l3_hdr_offset = vnet_buffer (b0)->l3_hdr_offset;
293  u16 l4_hdr_offset = vnet_buffer (b0)->l4_hdr_offset;
294  ip4_header_t *ip4 = (ip4_header_t *) (b0->data + l3_hdr_offset);
295  ip6_header_t *ip6 = (ip6_header_t *) (b0->data + l3_hdr_offset);
296  tcp_header_t *tcp = (tcp_header_t *) (b0->data + l4_hdr_offset);
297 
298  tcp->flags = tcp_flags;
299 
300  if (is_ip6)
301  ip6->payload_length =
302  clib_host_to_net_u16 (b0->current_length -
303  (l4_hdr_offset - b0->current_data));
304  else
305  ip4->length =
306  clib_host_to_net_u16 (b0->current_length -
307  (l3_hdr_offset - b0->current_data));
308 }
309 
310 /**
311  * Allocate the necessary number of ptd->split_buffers,
312  * and segment the possibly chained buffer(s) from b0 into
313  * there.
314  *
315  * Return the cumulative number of bytes sent or zero
316  * if allocation failed.
317  */
318 
321  int do_tx_offloads, u32 sbi0, vlib_buffer_t * sb0,
322  u32 n_bytes_b0)
323 {
324  u32 n_tx_bytes = 0;
325  int is_ip4 = sb0->flags & VNET_BUFFER_F_IS_IP4;
326  int is_ip6 = sb0->flags & VNET_BUFFER_F_IS_IP6;
327  ASSERT (is_ip4 || is_ip6);
328  ASSERT (sb0->flags & VNET_BUFFER_F_L2_HDR_OFFSET_VALID);
329  ASSERT (sb0->flags & VNET_BUFFER_F_L3_HDR_OFFSET_VALID);
330  ASSERT (sb0->flags & VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
331  u16 gso_size = vnet_buffer2 (sb0)->gso_size;
332 
333  int l4_hdr_sz = vnet_buffer2 (sb0)->gso_l4_hdr_sz;
334  u8 save_tcp_flags = 0;
335  u8 tcp_flags_no_fin_psh = 0;
336  u32 next_tcp_seq = 0;
337 
338  tcp_header_t *tcp =
339  (tcp_header_t *) (sb0->data + vnet_buffer (sb0)->l4_hdr_offset);
340  next_tcp_seq = clib_net_to_host_u32 (tcp->seq_number);
341  /* store original flags for last packet and reset FIN and PSH */
342  save_tcp_flags = tcp->flags;
343  tcp_flags_no_fin_psh = tcp->flags & ~(TCP_FLAG_FIN | TCP_FLAG_PSH);
344  tcp->checksum = 0;
345 
346  u32 default_bflags =
347  sb0->flags & ~(VNET_BUFFER_F_GSO | VLIB_BUFFER_NEXT_PRESENT);
348  u16 l234_sz = vnet_buffer (sb0)->l4_hdr_offset + l4_hdr_sz
349  - sb0->current_data;
350  int first_data_size = clib_min (gso_size, sb0->current_length - l234_sz);
351  next_tcp_seq += first_data_size;
352 
353  if (PREDICT_FALSE
354  (!tso_alloc_tx_bufs (vm, ptd, sb0, n_bytes_b0, l234_sz, gso_size)))
355  return 0;
356 
357  vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[0]);
358  tso_init_buf_from_template_base (b0, sb0, default_bflags,
359  l234_sz + first_data_size);
360 
361  u32 total_src_left = n_bytes_b0 - l234_sz - first_data_size;
362  if (total_src_left)
363  {
364  /* Need to copy more segments */
365  u8 *src_ptr, *dst_ptr;
366  u16 src_left, dst_left;
367  /* current source buffer */
368  vlib_buffer_t *csb0 = sb0;
369  u32 csbi0 = sbi0;
370  /* current dest buffer */
371  vlib_buffer_t *cdb0;
372  u16 dbi = 1; /* the buffer [0] is b0 */
373 
374  src_ptr = vlib_buffer_get_current (sb0) + l234_sz + first_data_size;
375  src_left = sb0->current_length - l234_sz - first_data_size;
376 
377  tso_fixup_segmented_buf (b0, tcp_flags_no_fin_psh, is_ip6);
378  if (do_tx_offloads)
379  calc_checksums (vm, b0);
380 
381  /* grab a second buffer and prepare the loop */
382  ASSERT (dbi < vec_len (ptd->split_buffers));
383  cdb0 = vlib_get_buffer (vm, ptd->split_buffers[dbi++]);
384  tso_init_buf_from_template (vm, cdb0, b0, l234_sz, gso_size, &dst_ptr,
385  &dst_left, next_tcp_seq, default_bflags);
386 
387  /* an arbitrary large number to catch the runaway loops */
388  int nloops = 2000;
389  while (total_src_left)
390  {
391  if (nloops-- <= 0)
392  clib_panic ("infinite loop detected");
393  u16 bytes_to_copy = clib_min (src_left, dst_left);
394 
395  clib_memcpy_fast (dst_ptr, src_ptr, bytes_to_copy);
396 
397  src_left -= bytes_to_copy;
398  src_ptr += bytes_to_copy;
399  total_src_left -= bytes_to_copy;
400  dst_left -= bytes_to_copy;
401  dst_ptr += bytes_to_copy;
402  next_tcp_seq += bytes_to_copy;
403  cdb0->current_length += bytes_to_copy;
404 
405  if (0 == src_left)
406  {
407  int has_next = (csb0->flags & VLIB_BUFFER_NEXT_PRESENT);
408  u32 next_bi = csb0->next_buffer;
409 
410  /* init src to the next buffer in chain */
411  if (has_next)
412  {
413  csbi0 = next_bi;
414  csb0 = vlib_get_buffer (vm, csbi0);
415  src_left = csb0->current_length;
416  src_ptr = vlib_buffer_get_current (csb0);
417  }
418  else
419  {
420  ASSERT (total_src_left == 0);
421  break;
422  }
423  }
424  if (0 == dst_left && total_src_left)
425  {
426  if (do_tx_offloads)
427  calc_checksums (vm, cdb0);
428  n_tx_bytes += cdb0->current_length;
429  ASSERT (dbi < vec_len (ptd->split_buffers));
430  cdb0 = vlib_get_buffer (vm, ptd->split_buffers[dbi++]);
431  tso_init_buf_from_template (vm, cdb0, b0, l234_sz,
432  gso_size, &dst_ptr, &dst_left,
433  next_tcp_seq, default_bflags);
434  }
435  }
436 
437  tso_fixup_segmented_buf (cdb0, save_tcp_flags, is_ip6);
438  if (do_tx_offloads)
439  calc_checksums (vm, cdb0);
440 
441  n_tx_bytes += cdb0->current_length;
442  }
443  n_tx_bytes += b0->current_length;
444  return n_tx_bytes;
445 }
446 
449  vlib_node_runtime_t * node, u32 * pbi0,
450  u32 drop_error_code)
451 {
452  u32 thread_index = vm->thread_index;
453  vnet_interface_output_runtime_t *rt = (void *) node->runtime_data;
454 
456  cm =
459  vlib_increment_simple_counter (cm, thread_index, rt->sw_if_index, 1);
460 
461  vlib_error_drop_buffers (vm, node, pbi0,
462  /* buffer stride */ 1,
463  /* n_buffers */ 1,
465  node->node_index, drop_error_code);
466 }
467 
470  vlib_node_runtime_t * node,
471  vlib_frame_t * frame,
472  vnet_main_t * vnm,
474  int do_tx_offloads,
475  int do_segmentation)
476 {
477  vnet_interface_output_runtime_t *rt = (void *) node->runtime_data;
479  u32 n_left_to_tx, *from, *from_end, *to_tx;
480  u32 n_bytes, n_buffers, n_packets;
481  u32 n_bytes_b0, n_bytes_b1, n_bytes_b2, n_bytes_b3;
482  u32 thread_index = vm->thread_index;
484  u32 next_index = VNET_INTERFACE_OUTPUT_NEXT_TX;
485  u32 current_config_index = ~0;
486  u8 arc = im->output_feature_arc_index;
488  vec_elt_at_index (im->per_thread_data, thread_index);
489  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
490 
491  n_buffers = frame->n_vectors;
492 
493  if (node->flags & VLIB_NODE_FLAG_TRACE)
494  vnet_interface_output_trace (vm, node, frame, n_buffers);
495 
496  from = vlib_frame_vector_args (frame);
497  vlib_get_buffers (vm, from, b, n_buffers);
498 
499  if (rt->is_deleted)
500  return vlib_error_drop_buffers (vm, node, from,
501  /* buffer stride */ 1,
502  n_buffers,
504  node->node_index,
506 
507  si = vnet_get_sw_interface (vnm, rt->sw_if_index);
508  hi = vnet_get_sup_hw_interface (vnm, rt->sw_if_index);
509  if (!(si->flags & (VNET_SW_INTERFACE_FLAG_ADMIN_UP |
512  {
514 
517  vlib_increment_simple_counter (cm, thread_index,
518  rt->sw_if_index, n_buffers);
519 
520  return vlib_error_drop_buffers (vm, node, from,
521  /* buffer stride */ 1,
522  n_buffers,
524  node->node_index,
526  }
527 
528  from_end = from + n_buffers;
529 
530  /* Total byte count of all buffers. */
531  n_bytes = 0;
532  n_packets = 0;
533 
534  /* interface-output feature arc handling */
536  {
538  fcm = vnet_feature_get_config_main (arc);
539  current_config_index = vnet_get_feature_config_index (arc,
540  rt->sw_if_index);
541  vnet_get_config_data (&fcm->config_main, &current_config_index,
542  &next_index, 0);
543  }
544 
545  while (from < from_end)
546  {
547  /* Get new next frame since previous incomplete frame may have less
548  than VNET_FRAME_SIZE vectors in it. */
549  vlib_get_new_next_frame (vm, node, next_index, to_tx, n_left_to_tx);
550 
551  while (from + 8 <= from_end && n_left_to_tx >= 4)
552  {
553  u32 bi0, bi1, bi2, bi3;
554  u32 tx_swif0, tx_swif1, tx_swif2, tx_swif3;
555  u32 or_flags;
556 
557  /* Prefetch next iteration. */
558  vlib_prefetch_buffer_header (b[4], LOAD);
559  vlib_prefetch_buffer_header (b[5], LOAD);
560  vlib_prefetch_buffer_header (b[6], LOAD);
561  vlib_prefetch_buffer_header (b[7], LOAD);
562 
563  bi0 = from[0];
564  bi1 = from[1];
565  bi2 = from[2];
566  bi3 = from[3];
567  to_tx[0] = bi0;
568  to_tx[1] = bi1;
569  to_tx[2] = bi2;
570  to_tx[3] = bi3;
571 
572  or_flags = b[0]->flags | b[1]->flags | b[2]->flags | b[3]->flags;
573 
574  if (do_segmentation)
575  {
576  /* go to single loop if we need TSO segmentation */
577  if (PREDICT_FALSE (or_flags & VNET_BUFFER_F_GSO))
578  break;
579  }
580  from += 4;
581  to_tx += 4;
582  n_left_to_tx -= 4;
583 
584  /* Be grumpy about zero length buffers for benefit of
585  driver tx function. */
586  ASSERT (b[0]->current_length > 0);
587  ASSERT (b[1]->current_length > 0);
588  ASSERT (b[2]->current_length > 0);
589  ASSERT (b[3]->current_length > 0);
590 
591  n_bytes_b0 = vlib_buffer_length_in_chain (vm, b[0]);
592  n_bytes_b1 = vlib_buffer_length_in_chain (vm, b[1]);
593  n_bytes_b2 = vlib_buffer_length_in_chain (vm, b[2]);
594  n_bytes_b3 = vlib_buffer_length_in_chain (vm, b[3]);
595  tx_swif0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
596  tx_swif1 = vnet_buffer (b[1])->sw_if_index[VLIB_TX];
597  tx_swif2 = vnet_buffer (b[2])->sw_if_index[VLIB_TX];
598  tx_swif3 = vnet_buffer (b[3])->sw_if_index[VLIB_TX];
599 
600  n_bytes += n_bytes_b0 + n_bytes_b1;
601  n_bytes += n_bytes_b2 + n_bytes_b3;
602  n_packets += 4;
603 
604  if (PREDICT_FALSE (current_config_index != ~0))
605  {
606  vnet_buffer (b[0])->feature_arc_index = arc;
607  vnet_buffer (b[1])->feature_arc_index = arc;
608  vnet_buffer (b[2])->feature_arc_index = arc;
609  vnet_buffer (b[3])->feature_arc_index = arc;
610  b[0]->current_config_index = current_config_index;
611  b[1]->current_config_index = current_config_index;
612  b[2]->current_config_index = current_config_index;
613  b[3]->current_config_index = current_config_index;
614  }
615 
616  /* update vlan subif tx counts, if required */
617  if (PREDICT_FALSE (tx_swif0 != rt->sw_if_index))
618  {
619  vlib_increment_combined_counter (im->combined_sw_if_counters +
621  thread_index, tx_swif0, 1,
622  n_bytes_b0);
623  }
624 
625  if (PREDICT_FALSE (tx_swif1 != rt->sw_if_index))
626  {
627 
628  vlib_increment_combined_counter (im->combined_sw_if_counters +
630  thread_index, tx_swif1, 1,
631  n_bytes_b1);
632  }
633 
634  if (PREDICT_FALSE (tx_swif2 != rt->sw_if_index))
635  {
636 
637  vlib_increment_combined_counter (im->combined_sw_if_counters +
639  thread_index, tx_swif2, 1,
640  n_bytes_b2);
641  }
642  if (PREDICT_FALSE (tx_swif3 != rt->sw_if_index))
643  {
644 
645  vlib_increment_combined_counter (im->combined_sw_if_counters +
647  thread_index, tx_swif3, 1,
648  n_bytes_b3);
649  }
650 
651  if (do_tx_offloads)
652  {
653  if (or_flags &
654  (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM |
655  VNET_BUFFER_F_OFFLOAD_UDP_CKSUM |
656  VNET_BUFFER_F_OFFLOAD_IP_CKSUM))
657  {
658  calc_checksums (vm, b[0]);
659  calc_checksums (vm, b[1]);
660  calc_checksums (vm, b[2]);
661  calc_checksums (vm, b[3]);
662  }
663  }
664  b += 4;
665 
666  }
667 
668  while (from + 1 <= from_end && n_left_to_tx >= 1)
669  {
670  u32 bi0;
671  u32 tx_swif0;
672 
673  bi0 = from[0];
674  to_tx[0] = bi0;
675  from += 1;
676  to_tx += 1;
677  n_left_to_tx -= 1;
678 
679  /* Be grumpy about zero length buffers for benefit of
680  driver tx function. */
681  ASSERT (b[0]->current_length > 0);
682 
683  n_bytes_b0 = vlib_buffer_length_in_chain (vm, b[0]);
684  tx_swif0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
685  n_bytes += n_bytes_b0;
686  n_packets += 1;
687 
688  if (PREDICT_FALSE (current_config_index != ~0))
689  {
690  vnet_buffer (b[0])->feature_arc_index = arc;
691  b[0]->current_config_index = current_config_index;
692  }
693 
694  if (do_segmentation)
695  {
696  if (PREDICT_FALSE (b[0]->flags & VNET_BUFFER_F_GSO))
697  {
698  /*
699  * Undo the enqueue of the b0 - it is not going anywhere,
700  * and will be freed either after it's segmented or
701  * when dropped, if there is no buffers to segment into.
702  */
703  to_tx -= 1;
704  n_left_to_tx += 1;
705  /* undo the counting. */
706  n_bytes -= n_bytes_b0;
707  n_packets -= 1;
708 
709  u32 n_tx_bytes = 0;
710 
711  n_tx_bytes =
712  tso_segment_buffer (vm, ptd, do_tx_offloads, bi0, b[0],
713  n_bytes_b0);
714 
715  if (PREDICT_FALSE (n_tx_bytes == 0))
716  {
717  drop_one_buffer_and_count (vm, vnm, node, from - 1,
719  b += 1;
720  continue;
721  }
722 
723  u16 n_tx_bufs = vec_len (ptd->split_buffers);
724  u32 *from_tx_seg = ptd->split_buffers;
725 
726  while (n_tx_bufs > 0)
727  {
728  if (n_tx_bufs >= n_left_to_tx)
729  {
730  while (n_left_to_tx > 0)
731  {
732  to_tx[0] = from_tx_seg[0];
733  to_tx += 1;
734  from_tx_seg += 1;
735  n_left_to_tx -= 1;
736  n_tx_bufs -= 1;
737  n_packets += 1;
738  }
739  vlib_put_next_frame (vm, node, next_index,
740  n_left_to_tx);
741  vlib_get_new_next_frame (vm, node, next_index,
742  to_tx, n_left_to_tx);
743  }
744  while (n_tx_bufs > 0)
745  {
746  to_tx[0] = from_tx_seg[0];
747  to_tx += 1;
748  from_tx_seg += 1;
749  n_left_to_tx -= 1;
750  n_tx_bufs -= 1;
751  n_packets += 1;
752  }
753  }
754  n_bytes += n_tx_bytes;
755  if (PREDICT_FALSE (tx_swif0 != rt->sw_if_index))
756  {
757 
759  (im->combined_sw_if_counters +
760  VNET_INTERFACE_COUNTER_TX, thread_index, tx_swif0,
761  _vec_len (ptd->split_buffers), n_tx_bytes);
762  }
763  /* The buffers were enqueued. Reset the length */
764  _vec_len (ptd->split_buffers) = 0;
765  /* Free the now segmented buffer */
766  vlib_buffer_free_one (vm, bi0);
767  b += 1;
768  continue;
769  }
770  }
771 
772  if (PREDICT_FALSE (tx_swif0 != rt->sw_if_index))
773  {
774 
775  vlib_increment_combined_counter (im->combined_sw_if_counters +
777  thread_index, tx_swif0, 1,
778  n_bytes_b0);
779  }
780 
781  if (do_tx_offloads)
782  calc_checksums (vm, b[0]);
783 
784  b += 1;
785  }
786 
787  vlib_put_next_frame (vm, node, next_index, n_left_to_tx);
788  }
789 
790  /* Update main interface stats. */
791  vlib_increment_combined_counter (im->combined_sw_if_counters
793  thread_index,
794  rt->sw_if_index, n_packets, n_bytes);
795  return n_buffers;
796 }
797 #endif /* CLIB_MARCH_VARIANT */
798 
801  int sw_if_index_from_buffer)
802 {
803  u32 n_left_from, *from;
805 
807  return;
808 
809  if (sw_if_index_from_buffer == 0)
810  {
811  vnet_interface_output_runtime_t *rt = (void *) node->runtime_data;
812  sw_if_index = rt->sw_if_index;
813  }
814  else
815  sw_if_index = ~0;
816 
817  n_left_from = frame->n_vectors;
818  from = vlib_frame_vector_args (frame);
819 
820  while (n_left_from > 0)
821  {
822  u32 bi0 = from[0];
823  vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
824 
825  if (sw_if_index_from_buffer)
826  sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
827 
831  512);
832  from++;
833  n_left_from--;
834  }
835 }
836 
837 #ifndef CLIB_MARCH_VARIANT
840  vlib_node_runtime_t * node,
841  vlib_frame_t * frame, vnet_main_t * vnm,
843  int do_tx_offloads)
844 {
845  /*
846  * The 3-headed "if" is here because we want to err on the side
847  * of not impacting the non-GSO performance - so for the more
848  * common case of no GSO interfaces we want to prevent the
849  * segmentation codepath from being there altogether.
850  */
852  return vnet_interface_output_node_inline_gso (vm, node, frame, vnm, hi,
853  do_tx_offloads,
854  /* do_segmentation */ 0);
856  return vnet_interface_output_node_inline_gso (vm, node, frame, vnm, hi,
857  do_tx_offloads,
858  /* do_segmentation */ 0);
859  else
860  return vnet_interface_output_node_inline_gso (vm, node, frame, vnm, hi,
861  do_tx_offloads,
862  /* do_segmentation */ 1);
863 }
864 
865 uword
867  vlib_frame_t * frame)
868 {
869  vnet_main_t *vnm = vnet_get_main ();
871  vnet_interface_output_runtime_t *rt = (void *) node->runtime_data;
872  hi = vnet_get_sup_hw_interface (vnm, rt->sw_if_index);
873 
874  vnet_interface_pcap_tx_trace (vm, node, frame,
875  0 /* sw_if_index_from_buffer */ );
876 
878  return vnet_interface_output_node_inline (vm, node, frame, vnm, hi,
879  /* do_tx_offloads */ 0);
880  else
881  return vnet_interface_output_node_inline (vm, node, frame, vnm, hi,
882  /* do_tx_offloads */ 1);
883 }
884 #endif /* CLIB_MARCH_VARIANT */
885 
886 /* Use buffer's sw_if_index[VNET_TX] to choose output interface. */
887 VLIB_NODE_FN (vnet_per_buffer_interface_output_node) (vlib_main_t * vm,
889  node,
890  vlib_frame_t * frame)
891 {
892  vnet_main_t *vnm = vnet_get_main ();
893  u32 n_left_to_next, *from, *to_next;
894  u32 n_left_from, next_index;
895 
896  vnet_interface_pcap_tx_trace (vm, node, frame,
897  1 /* sw_if_index_from_buffer */ );
898 
899  n_left_from = frame->n_vectors;
900 
901  from = vlib_frame_vector_args (frame);
902  next_index = node->cached_next_index;
903 
904  while (n_left_from > 0)
905  {
906  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
907 
908  while (n_left_from >= 4 && n_left_to_next >= 2)
909  {
910  u32 bi0, bi1, next0, next1;
911  vlib_buffer_t *b0, *b1;
912  vnet_hw_interface_t *hi0, *hi1;
913 
914  /* Prefetch next iteration. */
915  vlib_prefetch_buffer_with_index (vm, from[2], LOAD);
916  vlib_prefetch_buffer_with_index (vm, from[3], LOAD);
917 
918  bi0 = from[0];
919  bi1 = from[1];
920  to_next[0] = bi0;
921  to_next[1] = bi1;
922  from += 2;
923  to_next += 2;
924  n_left_to_next -= 2;
925  n_left_from -= 2;
926 
927  b0 = vlib_get_buffer (vm, bi0);
928  b1 = vlib_get_buffer (vm, bi1);
929 
930  hi0 =
933  [VLIB_TX]);
934  hi1 =
937  [VLIB_TX]);
938 
939  next0 = hi0->output_node_next_index;
940  next1 = hi1->output_node_next_index;
941 
942  vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
943  n_left_to_next, bi0, bi1, next0,
944  next1);
945  }
946 
947  while (n_left_from > 0 && n_left_to_next > 0)
948  {
949  u32 bi0, next0;
950  vlib_buffer_t *b0;
951  vnet_hw_interface_t *hi0;
952 
953  bi0 = from[0];
954  to_next[0] = bi0;
955  from += 1;
956  to_next += 1;
957  n_left_to_next -= 1;
958  n_left_from -= 1;
959 
960  b0 = vlib_get_buffer (vm, bi0);
961 
962  hi0 =
965  [VLIB_TX]);
966 
967  next0 = hi0->output_node_next_index;
968 
969  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
970  n_left_to_next, bi0, next0);
971  }
972 
973  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
974  }
975 
976  return frame->n_vectors;
977 }
978 
979 typedef struct vnet_error_trace_t_
980 {
983 
984 
985 static u8 *
986 format_vnet_error_trace (u8 * s, va_list * va)
987 {
988  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
989  CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
990  vnet_error_trace_t *t = va_arg (*va, vnet_error_trace_t *);
991 
992  s = format (s, "rx:%U", format_vnet_sw_if_index_name,
993  vnet_get_main (), t->sw_if_index);
994 
995  return s;
996 }
997 
998 static void
1000  vlib_node_runtime_t * node, vlib_frame_t * frame)
1001 {
1002  u32 n_left, *buffers;
1003 
1004  buffers = vlib_frame_vector_args (frame);
1005  n_left = frame->n_vectors;
1006 
1007  while (n_left >= 4)
1008  {
1009  u32 bi0, bi1;
1010  vlib_buffer_t *b0, *b1;
1011  vnet_error_trace_t *t0, *t1;
1012 
1013  /* Prefetch next iteration. */
1014  vlib_prefetch_buffer_with_index (vm, buffers[2], LOAD);
1015  vlib_prefetch_buffer_with_index (vm, buffers[3], LOAD);
1016 
1017  bi0 = buffers[0];
1018  bi1 = buffers[1];
1019 
1020  b0 = vlib_get_buffer (vm, bi0);
1021  b1 = vlib_get_buffer (vm, bi1);
1022 
1023  if (b0->flags & VLIB_BUFFER_IS_TRACED)
1024  {
1025  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
1026  t0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1027  }
1028  if (b1->flags & VLIB_BUFFER_IS_TRACED)
1029  {
1030  t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
1031  t1->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX];
1032  }
1033  buffers += 2;
1034  n_left -= 2;
1035  }
1036 
1037  while (n_left >= 1)
1038  {
1039  u32 bi0;
1040  vlib_buffer_t *b0;
1041  vnet_error_trace_t *t0;
1042 
1043  bi0 = buffers[0];
1044 
1045  b0 = vlib_get_buffer (vm, bi0);
1046 
1047  if (b0->flags & VLIB_BUFFER_IS_TRACED)
1048  {
1049  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
1050  t0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1051  }
1052  buffers += 1;
1053  n_left -= 1;
1054  }
1055 }
1056 
1057 typedef enum
1058 {
1063 
1066  vlib_node_runtime_t * node,
1067  vlib_frame_t * frame,
1068  vnet_error_disposition_t disposition)
1069 {
1070  u32 *from, n_left, thread_index, *sw_if_index;
1071  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
1072  u32 sw_if_indices[VLIB_FRAME_SIZE];
1074  u16 nexts[VLIB_FRAME_SIZE];
1075  vnet_main_t *vnm;
1076 
1077  vnm = vnet_get_main ();
1078  thread_index = vm->thread_index;
1079  from = vlib_frame_vector_args (frame);
1080  n_left = frame->n_vectors;
1081  b = bufs;
1082  sw_if_index = sw_if_indices;
1083 
1084  vlib_get_buffers (vm, from, bufs, n_left);
1085 
1086  if (node->flags & VLIB_NODE_FLAG_TRACE)
1087  interface_trace_buffers (vm, node, frame);
1088 
1089  /* All going to drop regardless, this is just a counting exercise */
1090  clib_memset (nexts, 0, sizeof (nexts));
1091 
1093  (disposition == VNET_ERROR_DISPOSITION_PUNT
1096 
1097  /* collect the array of interfaces first ... */
1098  while (n_left >= 4)
1099  {
1100  if (n_left >= 12)
1101  {
1102  /* Prefetch 8 ahead - there's not much going on in each iteration */
1103  vlib_prefetch_buffer_header (b[4], LOAD);
1104  vlib_prefetch_buffer_header (b[5], LOAD);
1105  vlib_prefetch_buffer_header (b[6], LOAD);
1106  vlib_prefetch_buffer_header (b[7], LOAD);
1107  }
1108  sw_if_index[0] = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
1109  sw_if_index[1] = vnet_buffer (b[1])->sw_if_index[VLIB_RX];
1110  sw_if_index[2] = vnet_buffer (b[2])->sw_if_index[VLIB_RX];
1111  sw_if_index[3] = vnet_buffer (b[3])->sw_if_index[VLIB_RX];
1112 
1113  sw_if_index += 4;
1114  n_left -= 4;
1115  b += 4;
1116  }
1117  while (n_left)
1118  {
1119  sw_if_index[0] = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
1120 
1121  sw_if_index += 1;
1122  n_left -= 1;
1123  b += 1;
1124  }
1125 
1126  /* ... then count against them in blocks */
1127  n_left = frame->n_vectors;
1128 
1129  while (n_left)
1130  {
1131  vnet_sw_interface_t *sw_if0;
1132  u16 off, count;
1133 
1134  off = frame->n_vectors - n_left;
1135 
1136  sw_if_index = sw_if_indices + off;
1137 
1138  count = clib_count_equal_u32 (sw_if_index, n_left);
1139  n_left -= count;
1140 
1141  vlib_increment_simple_counter (cm, thread_index, sw_if_index[0], count);
1142 
1143  /* Increment super-interface drop/punt counters for
1144  sub-interfaces. */
1145  sw_if0 = vnet_get_sw_interface (vnm, sw_if_index[0]);
1146  if (sw_if0->sup_sw_if_index != sw_if_index[0])
1148  (cm, thread_index, sw_if0->sup_sw_if_index, count);
1149  }
1150 
1151  vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
1152 
1153  return frame->n_vectors;
1154 }
1155 
1156 static inline void
1159 {
1160  u32 *from;
1161  u32 n_left = f->n_vectors;
1162  vlib_buffer_t *b0, *p1;
1163  u32 bi0;
1164  i16 save_current_data;
1165  u16 save_current_length;
1166 
1167  from = vlib_frame_vector_args (f);
1168 
1169  while (n_left > 0)
1170  {
1171  if (PREDICT_TRUE (n_left > 1))
1172  {
1173  p1 = vlib_get_buffer (vm, from[1]);
1174  vlib_prefetch_buffer_header (p1, LOAD);
1175  }
1176 
1177  bi0 = from[0];
1178  b0 = vlib_get_buffer (vm, bi0);
1179  from++;
1180  n_left--;
1181 
1182  /* See if we're pointedly ignoring this specific error */
1183  if (im->pcap_drop_filter_hash
1184  && hash_get (im->pcap_drop_filter_hash, b0->error))
1185  continue;
1186 
1187  /* Trace all drops, or drops received on a specific interface */
1188  if (im->pcap_sw_if_index == 0 ||
1190  {
1191  save_current_data = b0->current_data;
1192  save_current_length = b0->current_length;
1193 
1194  /*
1195  * Typically, we'll need to rewind the buffer
1196  */
1197  if (b0->current_data > 0)
1198  vlib_buffer_advance (b0, (word) - b0->current_data);
1199 
1200  pcap_add_buffer (&im->pcap_main, vm, bi0, 512);
1201 
1202  b0->current_data = save_current_data;
1203  b0->current_length = save_current_length;
1204  }
1205  }
1206 }
1207 
1208 #ifndef CLIB_MARCH_VARIANT
1209 void
1210 vnet_pcap_drop_trace_filter_add_del (u32 error_index, int is_add)
1211 {
1213 
1214  if (im->pcap_drop_filter_hash == 0)
1215  im->pcap_drop_filter_hash = hash_create (0, sizeof (uword));
1216 
1217  if (is_add)
1218  hash_set (im->pcap_drop_filter_hash, error_index, 1);
1219  else
1220  hash_unset (im->pcap_drop_filter_hash, error_index);
1221 }
1222 #endif /* CLIB_MARCH_VARIANT */
1223 
1224 VLIB_NODE_FN (interface_drop) (vlib_main_t * vm,
1225  vlib_node_runtime_t * node,
1226  vlib_frame_t * frame)
1227 {
1229 
1230  if (PREDICT_FALSE (im->drop_pcap_enable))
1231  pcap_drop_trace (vm, im, frame);
1232 
1233  return interface_drop_punt (vm, node, frame, VNET_ERROR_DISPOSITION_DROP);
1234 }
1235 
1236 VLIB_NODE_FN (interface_punt) (vlib_main_t * vm,
1237  vlib_node_runtime_t * node,
1238  vlib_frame_t * frame)
1239 {
1240  return interface_drop_punt (vm, node, frame, VNET_ERROR_DISPOSITION_PUNT);
1241 }
1242 
1243 /* *INDENT-OFF* */
1244 VLIB_REGISTER_NODE (interface_drop) = {
1245  .name = "error-drop",
1246  .vector_size = sizeof (u32),
1247  .format_trace = format_vnet_error_trace,
1248  .n_next_nodes = 1,
1249  .next_nodes = {
1250  [0] = "drop",
1251  },
1252 };
1253 /* *INDENT-ON* */
1254 
1255 /* *INDENT-OFF* */
1256 VLIB_REGISTER_NODE (interface_punt) = {
1257  .name = "error-punt",
1258  .vector_size = sizeof (u32),
1259  .format_trace = format_vnet_error_trace,
1260  .n_next_nodes = 1,
1261  .next_nodes = {
1262  [0] = "punt",
1263  },
1264 };
1265 /* *INDENT-ON* */
1266 
1267 /* *INDENT-OFF* */
1268 VLIB_REGISTER_NODE (vnet_per_buffer_interface_output_node) = {
1269  .name = "interface-output",
1270  .vector_size = sizeof (u32),
1271 };
1272 /* *INDENT-ON* */
1273 
1274 static uword
1276  vlib_frame_t * from_frame)
1277 {
1278  vnet_main_t *vnm = vnet_get_main ();
1279  u32 last_sw_if_index = ~0;
1280  vlib_frame_t *to_frame = 0;
1281  vnet_hw_interface_t *hw = 0;
1282  u32 *from, *to_next = 0;
1283  u32 n_left_from;
1284 
1285  from = vlib_frame_vector_args (from_frame);
1286  n_left_from = from_frame->n_vectors;
1287  while (n_left_from > 0)
1288  {
1289  u32 bi0;
1290  vlib_buffer_t *b0;
1291  u32 sw_if_index0;
1292 
1293  bi0 = from[0];
1294  from++;
1295  n_left_from--;
1296  b0 = vlib_get_buffer (vm, bi0);
1297  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
1298 
1299  if (PREDICT_FALSE ((last_sw_if_index != sw_if_index0) || to_frame == 0))
1300  {
1301  if (to_frame)
1302  {
1303  hw = vnet_get_sup_hw_interface (vnm, last_sw_if_index);
1304  vlib_put_frame_to_node (vm, hw->tx_node_index, to_frame);
1305  }
1306  last_sw_if_index = sw_if_index0;
1307  hw = vnet_get_sup_hw_interface (vnm, sw_if_index0);
1308  to_frame = vlib_get_frame_to_node (vm, hw->tx_node_index);
1309  to_next = vlib_frame_vector_args (to_frame);
1310  }
1311 
1312  to_next[0] = bi0;
1313  to_next++;
1314  to_frame->n_vectors++;
1315  }
1316  vlib_put_frame_to_node (vm, hw->tx_node_index, to_frame);
1317  return from_frame->n_vectors;
1318 }
1319 
1320 /* *INDENT-OFF* */
1321 VLIB_REGISTER_NODE (interface_tx, static) = {
1322  .function = interface_tx_node_fn,
1323  .name = "interface-tx",
1324  .vector_size = sizeof (u32),
1325  .n_next_nodes = 1,
1326  .next_nodes = {
1327  [0] = "error-drop",
1328  },
1329 };
1330 
1331 VNET_FEATURE_ARC_INIT (interface_output, static) =
1332 {
1333  .arc_name = "interface-output",
1334  .start_nodes = VNET_FEATURES (0),
1335  .last_in_arc = "interface-tx",
1337 };
1338 
1339 VNET_FEATURE_INIT (span_tx, static) = {
1340  .arc_name = "interface-output",
1341  .node_name = "span-output",
1342  .runs_before = VNET_FEATURES ("interface-tx"),
1343 };
1344 
1345 VNET_FEATURE_INIT (ipsec_if_tx, static) = {
1346  .arc_name = "interface-output",
1347  .node_name = "ipsec-if-output",
1348  .runs_before = VNET_FEATURES ("interface-tx"),
1349 };
1350 
1351 VNET_FEATURE_INIT (interface_tx, static) = {
1352  .arc_name = "interface-output",
1353  .node_name = "interface-tx",
1354  .runs_before = 0,
1355 };
1356 /* *INDENT-ON* */
1357 
1358 #ifndef CLIB_MARCH_VARIANT
1359 clib_error_t *
1361  u32 hw_if_index,
1362  u32 is_create)
1363 {
1364  vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
1365  u32 next_index;
1366 
1367  if (hi->output_node_index == 0)
1368  return 0;
1369 
1370  next_index = vlib_node_add_next
1371  (vnm->vlib_main, vnet_per_buffer_interface_output_node.index,
1372  hi->output_node_index);
1373  hi->output_node_next_index = next_index;
1374 
1375  return 0;
1376 }
1377 
1380 
1381 void
1383  u32 hw_if_index, u32 node_index)
1384 {
1385  ASSERT (node_index);
1386  vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
1387  u32 next_index = vlib_node_add_next
1388  (vnm->vlib_main, vnet_per_buffer_interface_output_node.index, node_index);
1389  hi->output_node_next_index = next_index;
1390  hi->output_node_index = node_index;
1391 }
1392 #endif /* CLIB_MARCH_VARIANT */
1393 
1394 static clib_error_t *
1396  unformat_input_t * input,
1397  vlib_cli_command_t * cmd)
1398 {
1399  vnet_main_t *vnm = vnet_get_main ();
1401  u8 *filename;
1402  u32 max;
1403  int matched = 0;
1404  clib_error_t *error = 0;
1405 
1406  while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
1407  {
1408  if (unformat (input, "on"))
1409  {
1410  if (im->drop_pcap_enable == 0)
1411  {
1412  if (im->pcap_filename == 0)
1413  im->pcap_filename = format (0, "/tmp/drop.pcap%c", 0);
1414 
1415  clib_memset (&im->pcap_main, 0, sizeof (im->pcap_main));
1416  im->pcap_main.file_name = (char *) im->pcap_filename;
1418  if (im->pcap_pkts_to_capture)
1420 
1421  im->pcap_main.packet_type = PCAP_PACKET_TYPE_ethernet;
1422  im->drop_pcap_enable = 1;
1423  matched = 1;
1424  vlib_cli_output (vm, "pcap drop capture on...");
1425  }
1426  else
1427  {
1428  vlib_cli_output (vm, "pcap drop capture already on...");
1429  }
1430  matched = 1;
1431  }
1432  else if (unformat (input, "off"))
1433  {
1434  matched = 1;
1435 
1436  if (im->drop_pcap_enable)
1437  {
1438  vlib_cli_output (vm, "captured %d pkts...",
1440  if (im->pcap_main.n_packets_captured)
1441  {
1444  error = pcap_write (&im->pcap_main);
1445  if (error)
1446  clib_error_report (error);
1447  else
1448  vlib_cli_output (vm, "saved to %s...", im->pcap_filename);
1449  }
1450  }
1451  else
1452  {
1453  vlib_cli_output (vm, "pcap drop capture already off...");
1454  }
1455 
1456  im->drop_pcap_enable = 0;
1457  }
1458  else if (unformat (input, "max %d", &max))
1459  {
1460  im->pcap_pkts_to_capture = max;
1461  matched = 1;
1462  }
1463 
1464  else if (unformat (input, "intfc %U",
1466  &im->pcap_sw_if_index))
1467  matched = 1;
1468  else if (unformat (input, "intfc any"))
1469  {
1470  im->pcap_sw_if_index = 0;
1471  matched = 1;
1472  }
1473  else if (unformat (input, "file %s", &filename))
1474  {
1475  u8 *chroot_filename;
1476  /* Brain-police user path input */
1477  if (strstr ((char *) filename, "..")
1478  || index ((char *) filename, '/'))
1479  {
1480  vlib_cli_output (vm, "illegal characters in filename '%s'",
1481  filename);
1482  continue;
1483  }
1484 
1485  chroot_filename = format (0, "/tmp/%s%c", filename, 0);
1486  vec_free (filename);
1487 
1488  if (im->pcap_filename)
1489  vec_free (im->pcap_filename);
1490  im->pcap_filename = chroot_filename;
1491  im->pcap_main.file_name = (char *) im->pcap_filename;
1492  matched = 1;
1493  }
1494  else if (unformat (input, "status"))
1495  {
1496  if (im->drop_pcap_enable == 0)
1497  {
1498  vlib_cli_output (vm, "pcap drop capture is off...");
1499  continue;
1500  }
1501 
1502  vlib_cli_output (vm, "pcap drop capture: %d of %d pkts...",
1505  matched = 1;
1506  }
1507 
1508  else
1509  break;
1510  }
1511 
1512  if (matched == 0)
1513  return clib_error_return (0, "unknown input `%U'",
1514  format_unformat_error, input);
1515 
1516  return 0;
1517 }
1518 
1519 /* *INDENT-OFF* */
1520 VLIB_CLI_COMMAND (pcap_trace_command, static) = {
1521  .path = "pcap drop trace",
1522  .short_help =
1523  "pcap drop trace on off max <nn> intfc <intfc> file <name> status",
1524  .function = pcap_drop_trace_command_fn,
1525 };
1526 /* *INDENT-ON* */
1527 
1528 /*
1529  * fd.io coding-style-patch-verification: ON
1530  *
1531  * Local Variables:
1532  * eval: (c-set-style "gnu")
1533  * End:
1534  */
u8 * format_vnet_interface_output_trace(u8 *s, va_list *va)
vnet_config_main_t config_main
Definition: feature.h:82
static_always_inline uword vnet_interface_output_node_inline_gso(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vnet_main_t *vnm, vnet_hw_interface_t *hi, int do_tx_offloads, int do_segmentation)
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:439
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
vmrglw vmrglh hi
pcap_main_t pcap_main
Definition: main.h:65
vlib_main_t vlib_global_main
Definition: main.c:1937
VNET_FEATURE_ARC_INIT(interface_output, static)
#define hash_set(h, key, value)
Definition: hash.h:255
char * file_name
File name of pcap output.
Definition: pcap.h:162
u32 flags
Definition: vhost_user.h:141
pcap_main_t pcap_main
Definition: interface.h:848
#define clib_min(x, y)
Definition: clib.h:295
#define CLIB_UNUSED(x)
Definition: clib.h:82
void vnet_set_interface_output_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Set interface output node - for interface registered without its output/tx nodes created because its ...
static_always_inline uword vnet_interface_output_node_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vnet_main_t *vnm, vnet_hw_interface_t *hi, int do_tx_offloads)
#define hash_unset(h, key)
Definition: hash.h:261
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:220
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:865
u8 runtime_data[0]
Function dependent node-runtime data.
Definition: node.h:521
u32 n_packets_to_capture
Number of packets to capture.
Definition: pcap.h:165
vnet_main_t * vnet_get_main(void)
Definition: misc.c:46
static vnet_hw_interface_t * vnet_get_sup_hw_interface(vnet_main_t *vnm, u32 sw_if_index)
#define vnet_buffer2(b)
Definition: buffer.h:420
vnet_interface_main_t interface_main
Definition: vnet.h:56
#define PREDICT_TRUE(x)
Definition: clib.h:112
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:110
static_always_inline void drop_one_buffer_and_count(vlib_main_t *vm, vnet_main_t *vnm, vlib_node_runtime_t *node, u32 *pbi0, u32 drop_error_code)
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
u32 thread_index
Definition: main.h:197
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
u8 data[0]
Packet data.
Definition: buffer.h:181
static_always_inline void tso_init_buf_from_template(vlib_main_t *vm, vlib_buffer_t *nb0, vlib_buffer_t *b0, u16 template_data_sz, u16 gso_size, u8 **p_dst_ptr, u16 *p_dst_left, u32 next_tcp_seq, u32 flags)
clib_error_t * vnet_per_buffer_interface_output_hw_interface_add_del(vnet_main_t *vnm, u32 hw_if_index, u32 is_create)
static void vnet_interface_output_trace(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, uword n_buffers)
static u32 format_get_indent(u8 *s)
Definition: format.h:72
clib_memset(h->entries, 0, sizeof(h->entries[0])*entries)
static vnet_sw_interface_t * vnet_get_sw_interface(vnet_main_t *vnm, u32 sw_if_index)
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 increment)
Increment a simple counter.
Definition: counter.h:78
unformat_function_t unformat_vnet_sw_interface
u8 data[128]
Definition: ipsec.api:249
static_always_inline int vnet_have_features(u8 arc, u32 sw_if_index)
Definition: feature.h:241
void vnet_pcap_drop_trace_filter_add_del(u32 error_index, int is_add)
#define VLIB_NODE_FN(node)
Definition: node.h:201
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:366
struct _tcp_header tcp_header_t
format_function_t format_vnet_sw_if_index_name
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
Definition: node_funcs.h:1092
unsigned char u8
Definition: types.h:56
#define static_always_inline
Definition: clib.h:99
uword * pcap_drop_filter_hash
Definition: interface.h:852
i64 word
Definition: types.h:111
static_always_inline u32 tso_segment_buffer(vlib_main_t *vm, vnet_interface_per_thread_data_t *ptd, int do_tx_offloads, u32 sbi0, vlib_buffer_t *sb0, u32 n_bytes_b0)
Allocate the necessary number of ptd->split_buffers, and segment the possibly chained buffer(s) from ...
#define vlib_prefetch_buffer_with_index(vm, bi, type)
Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header informatio...
Definition: buffer_funcs.h:440
vl_api_interface_index_t sw_if_index
Definition: gre.api:50
static_always_inline void calc_checksums(vlib_main_t *vm, vlib_buffer_t *b)
struct vnet_error_trace_t_ vnet_error_trace_t
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:129
vnet_hw_interface_flags_t flags
Definition: interface.h:505
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:203
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
u8 * format_hex_bytes(u8 *s, va_list *va)
Definition: std-formats.c:84
vlib_frame_t * vlib_get_frame_to_node(vlib_main_t *vm, u32 to_node_index)
Definition: main.c:187
#define vlib_get_new_next_frame(vm, node, next_index, vectors, n_vectors_left)
Definition: node_funcs.h:343
#define clib_error_return(e, args...)
Definition: error.h:99
unsigned int u32
Definition: types.h:88
A collection of simple counters.
Definition: counter.h:57
static void pcap_drop_trace(vlib_main_t *vm, vnet_interface_main_t *im, vlib_frame_t *f)
#define VLIB_FRAME_SIZE
Definition: node.h:376
static uword interface_tx_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
int pcap_enable
Definition: main.h:63
vnet_crypto_main_t * cm
Definition: quic_crypto.c:41
#define hash_get(h, key)
Definition: hash.h:249
format_function_t format_vnet_sw_interface_name
uword size
vlib_main_t * vlib_main
Definition: vnet.h:80
static_always_inline void tso_init_buf_from_template_base(vlib_buffer_t *nb0, vlib_buffer_t *b0, u32 flags, u16 length)
static u8 * format_vnet_error_trace(u8 *s, va_list *va)
VNET_FEATURE_INIT(span_tx, static)
uword vnet_interface_output_node(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
uword vlib_error_drop_buffers(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u32 next_buffer_stride, u32 n_buffers, u32 next_index, u32 drop_error_node, u32 drop_error_code)
Definition: error.c:45
struct _unformat_input_t unformat_input_t
unsigned short u16
Definition: types.h:57
void vlib_put_frame_to_node(vlib_main_t *vm, u32 to_node_index, vlib_frame_t *f)
Definition: main.c:196
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
vnet_error_disposition_t
static void * vnet_get_config_data(vnet_config_main_t *cm, u32 *config_index, u32 *next_index, u32 n_data_bytes)
Definition: config.h:122
#define PREDICT_FALSE(x)
Definition: clib.h:111
vnet_main_t vnet_main
Definition: misc.c:43
#define TCP_FLAG_FIN
Definition: fa_node.h:12
vlib_simple_counter_main_t * sw_if_counters
Definition: interface.h:841
static_always_inline u16 tso_alloc_tx_bufs(vlib_main_t *vm, vnet_interface_per_thread_data_t *ptd, vlib_buffer_t *b0, u32 n_bytes_b0, u16 l234_sz, u16 gso_size)
format_function_t * format_buffer
Definition: node.h:356
u32 node_index
Node index.
Definition: node.h:494
u32 pcap_sw_if_index
Definition: main.h:64
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:338
static void interface_trace_buffers(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
static_always_inline void tso_fixup_segmented_buf(vlib_buffer_t *b0, u8 tcp_flags, int is_ip6)
#define PCAP_DEF_PKT_TO_CAPTURE
Definition: pcap.h:196
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
Definition: buffer_funcs.h:96
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
#define UNFORMAT_END_OF_INPUT
Definition: format.h:145
u16 n_vectors
Definition: node.h:395
vlib_main_t * vm
Definition: buffer.c:312
static_always_inline void vlib_buffer_enqueue_to_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts, uword count)
Definition: buffer_node.h:332
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:341
#define pool_is_free_index(P, I)
Use free bitmap to query whether given index is free.
Definition: pool.h:283
u32 current_config_index
Used by feature subgraph arcs to visit enabled feature nodes.
Definition: buffer.h:147
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:458
clib_error_t * pcap_write(pcap_main_t *pm)
Write PCAP file.
Definition: pcap.c:89
u8 data[128-3 *sizeof(u32)]
#define VLIB_CLI_COMMAND(x,...)
Definition: cli.h:155
u16 ip6_tcp_udp_icmp_compute_checksum(vlib_main_t *vm, vlib_buffer_t *p0, ip6_header_t *ip0, int *bogus_lengthp)
Definition: ip6_forward.c:906
#define hash_create(elts, value_bytes)
Definition: hash.h:696
#define ASSERT(truth)
u16 ip4_tcp_udp_compute_checksum(vlib_main_t *vm, vlib_buffer_t *p0, ip4_header_t *ip0)
Definition: ip4_forward.c:1144
#define clib_error_report(e)
Definition: error.h:113
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:248
size_t count
Definition: vapi.c:47
static_always_inline void vnet_interface_pcap_tx_trace(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, int sw_if_index_from_buffer)
#define VNET_FEATURES(...)
Definition: feature.h:435
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
VNET_HW_INTERFACE_ADD_DEL_FUNCTION(vnet_per_buffer_interface_output_hw_interface_add_del)
pcap_packet_type_t packet_type
Packet type.
Definition: pcap.h:168
Definition: defs.h:47
u16 payload_length
Definition: ip6_packet.h:374
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
static_always_inline u32 vnet_get_feature_config_index(u8 arc, u32 sw_if_index)
Definition: feature.h:248
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
vnet_sw_interface_t * sw_interfaces
Definition: interface.h:833
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
static clib_error_t * pcap_drop_trace_command_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
vnet_pcap_t pcap[VLIB_N_RX_TX]
Definition: main.h:155
#define vnet_buffer(b)
Definition: buffer.h:361
u8 * format_unformat_error(u8 *s, va_list *va)
Definition: unformat.c:91
static_always_inline uword clib_count_equal_u32(u32 *data, uword max_count)
Definition: string.h:520
u16 flags
Copy of main node flags.
Definition: node.h:507
static void vlib_buffer_free_one(vlib_main_t *vm, u32 buffer_index)
Free one buffer Shorthand to free a single buffer chain.
Definition: buffer_funcs.h:898
static_always_inline uword interface_drop_punt(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vnet_error_disposition_t disposition)
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:244
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:301
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:167
static u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
Definition: buffer_funcs.h:612
static_always_inline vnet_feature_config_main_t * vnet_feature_get_config_main(u16 arc)
Definition: feature.h:234
static void pcap_add_buffer(pcap_main_t *pm, struct vlib_main_t *vm, u32 buffer_index, u32 n_bytes_in_trace)
Add buffer (vlib_buffer_t) to the trace.
Definition: pcap_funcs.h:66
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
Definition: cli.c:768
u32 n_packets_captured
Number of packets currently captured.
Definition: pcap.h:171
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
static u16 ip4_header_checksum(ip4_header_t *i)
Definition: ip4_packet.h:247
#define clib_panic(format, args...)
Definition: error.h:72
u32 opaque[10]
Opaque data used by sub-graphs for their own purposes.
Definition: buffer.h:153
uword unformat(unformat_input_t *i, const char *fmt,...)
Definition: unformat.c:978
Definition: defs.h:46
static uword unformat_check_input(unformat_input_t *i)
Definition: format.h:171
signed short i16
Definition: types.h:46