FD.io VPP  v18.10-32-g1161dda
Vector Packet Processing
node.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <vnet/vnet.h>
16 #include <vppinfra/vec.h>
17 #include <vppinfra/error.h>
18 #include <vppinfra/format.h>
19 #include <vppinfra/xxhash.h>
20 
21 #include <vnet/ethernet/ethernet.h>
22 #include <dpdk/device/dpdk.h>
24 #include <vnet/mpls/packet.h>
25 #include <vnet/handoff.h>
26 #include <vnet/devices/devices.h>
27 #include <vnet/feature/feature.h>
28 
29 #include <dpdk/device/dpdk_priv.h>
30 
31 static char *dpdk_error_strings[] = {
32 #define _(n,s) s,
34 #undef _
35 };
36 
39  "IP4_INPUT must follow IP4_NCS_INPUT");
40 
41 enum
42 {
46 };
47 
48 /* currently we are just copying bit positions from DPDK, but that
49  might change in future, in case we start to be interested in something
50  stored in upper bytes. Currently we store only lower byte for perf reasons */
51 STATIC_ASSERT (1 << DPDK_RX_F_CKSUM_GOOD == PKT_RX_IP_CKSUM_GOOD, "");
52 STATIC_ASSERT (1 << DPDK_RX_F_CKSUM_BAD == PKT_RX_IP_CKSUM_BAD, "");
53 STATIC_ASSERT (1 << DPDK_RX_F_FDIR == PKT_RX_FDIR, "");
54 STATIC_ASSERT ((PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | PKT_RX_FDIR) <
55  256, "dpdk flags not un lower byte, fix needed");
56 
59 {
60  if (PREDICT_TRUE (etype == clib_host_to_net_u16 (ETHERNET_TYPE_IP4)))
61  {
62  /* keep it branchless */
63  u32 is_good = (flags >> DPDK_RX_F_CKSUM_GOOD) & 1;
64  return VNET_DEVICE_INPUT_NEXT_IP4_INPUT - is_good;
65  }
66  else if (PREDICT_TRUE (etype == clib_host_to_net_u16 (ETHERNET_TYPE_IP6)))
68  else if (PREDICT_TRUE (etype == clib_host_to_net_u16 (ETHERNET_TYPE_MPLS)))
70  else
72 }
73 
76  struct rte_mbuf * mb, vlib_buffer_free_list_t * fl)
77 {
78  u8 nb_seg = 1;
79  struct rte_mbuf *mb_seg = 0;
80  vlib_buffer_t *b_seg, *b_chain = 0;
81  mb_seg = mb->next;
82  b_chain = b;
83 
84  if (mb->nb_segs < 2)
85  return 0;
86 
87  b->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
89 
90  while (nb_seg < mb->nb_segs)
91  {
92  ASSERT (mb_seg != 0);
93 
94  b_seg = vlib_buffer_from_rte_mbuf (mb_seg);
96 
97  ASSERT ((b_seg->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
98  ASSERT (b_seg->current_data == 0);
99 
100  /*
101  * The driver (e.g. virtio) may not put the packet data at the start
102  * of the segment, so don't assume b_seg->current_data == 0 is correct.
103  */
104  b_seg->current_data =
105  (mb_seg->buf_addr + mb_seg->data_off) - (void *) b_seg->data;
106 
107  b_seg->current_length = mb_seg->data_len;
108  b->total_length_not_including_first_buffer += mb_seg->data_len;
109 
110  b_chain->flags |= VLIB_BUFFER_NEXT_PRESENT;
111  b_chain->next_buffer = vlib_get_buffer_index (vm, b_seg);
112 
113  b_chain = b_seg;
114  mb_seg = mb_seg->next;
115  nb_seg++;
116  }
118 }
119 
121 dpdk_prefetch_mbuf_x4 (struct rte_mbuf *mb[])
122 {
123  CLIB_PREFETCH (mb[0], CLIB_CACHE_LINE_BYTES, LOAD);
124  CLIB_PREFETCH (mb[1], CLIB_CACHE_LINE_BYTES, LOAD);
125  CLIB_PREFETCH (mb[2], CLIB_CACHE_LINE_BYTES, LOAD);
126  CLIB_PREFETCH (mb[3], CLIB_CACHE_LINE_BYTES, LOAD);
127 }
128 
130 dpdk_prefetch_buffer_x4 (struct rte_mbuf *mb[])
131 {
132  vlib_buffer_t *b;
133  b = vlib_buffer_from_rte_mbuf (mb[0]);
135  b = vlib_buffer_from_rte_mbuf (mb[1]);
137  b = vlib_buffer_from_rte_mbuf (mb[2]);
139  b = vlib_buffer_from_rte_mbuf (mb[3]);
141 }
142 
144 dpdk_prefetch_buffer_data_x4 (struct rte_mbuf *mb[])
145 {
146  vlib_buffer_t *b;
147  b = vlib_buffer_from_rte_mbuf (mb[0]);
149  b = vlib_buffer_from_rte_mbuf (mb[1]);
151  b = vlib_buffer_from_rte_mbuf (mb[2]);
153  b = vlib_buffer_from_rte_mbuf (mb[3]);
155 }
156 
157 /** \brief Main DPDK input node
158  @node dpdk-input
159 
160  This is the main DPDK input node: across each assigned interface,
161  call rte_eth_rx_burst(...) or similar to obtain a vector of
162  packets to process. Derive @c vlib_buffer_t metadata from
163  <code>struct rte_mbuf</code> metadata,
164  Depending on the resulting metadata: adjust <code>b->current_data,
165  b->current_length </code> and dispatch directly to
166  ip4-input-no-checksum, or ip6-input. Trace the packet if required.
167 
168  @param vm vlib_main_t corresponding to the current thread
169  @param node vlib_node_runtime_t
170  @param f vlib_frame_t input-node, not used.
171 
172  @par Graph mechanics: buffer metadata, next index usage
173 
174  @em Uses:
175  - <code>struct rte_mbuf mb->ol_flags</code>
176  - PKT_RX_IP_CKSUM_BAD
177 
178  @em Sets:
179  - <code>b->error</code> if the packet is to be dropped immediately
180  - <code>b->current_data, b->current_length</code>
181  - adjusted as needed to skip the L2 header in direct-dispatch cases
182  - <code>vnet_buffer(b)->sw_if_index[VLIB_RX]</code>
183  - rx interface sw_if_index
184  - <code>vnet_buffer(b)->sw_if_index[VLIB_TX] = ~0</code>
185  - required by ipX-lookup
186  - <code>b->flags</code>
187  - to indicate multi-segment pkts (VLIB_BUFFER_NEXT_PRESENT), etc.
188 
189  <em>Next Nodes:</em>
190  - Static arcs to: error-drop, ethernet-input,
191  ip4-input-no-checksum, ip6-input, mpls-input
192  - per-interface redirection, controlled by
193  <code>xd->per_interface_next_index</code>
194 */
195 
197 dpdk_ol_flags_extract (struct rte_mbuf **mb, u8 * flags, int count)
198 {
199  u8 rv = 0;
200  int i;
201  for (i = 0; i < count; i++)
202  {
203  /* all flags we are interested in are in lower 8 bits but
204  that might change */
205  flags[i] = (u8) mb[i]->ol_flags;
206  rv |= flags[i];
207  }
208  return rv;
209 }
210 
213  uword n_rx_packets, int maybe_multiseg, u8 * or_flagsp)
214 {
215  u32 n_left = n_rx_packets;
216  vlib_buffer_t *b[4];
218  struct rte_mbuf **mb = ptd->mbufs;
219  uword n_bytes = 0;
220  i16 off;
221  u8 *flags, or_flags = 0;
222  u16 *next;
223 
225 
226  mb = ptd->mbufs;
227  flags = ptd->flags;
228  next = ptd->next;
229 
230  while (n_left >= 8)
231  {
232  CLIB_PREFETCH (mb + 8, CLIB_CACHE_LINE_BYTES, LOAD);
233 
234  dpdk_prefetch_buffer_x4 (mb + 4);
235 
236  b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
237  b[1] = vlib_buffer_from_rte_mbuf (mb[1]);
238  b[2] = vlib_buffer_from_rte_mbuf (mb[2]);
239  b[3] = vlib_buffer_from_rte_mbuf (mb[3]);
240 
241  clib_memcpy64_x4 (b[0], b[1], b[2], b[3], &ptd->buffer_template);
242 
243  dpdk_prefetch_mbuf_x4 (mb + 4);
244 
245  or_flags |= dpdk_ol_flags_extract (mb, flags, 4);
246  flags += 4;
247 
248  /* we temporary store relative offset of ethertype into next[x]
249  so we can prefetch and get it faster later */
250 
251  off = mb[0]->data_off;
252  next[0] = off + STRUCT_OFFSET_OF (ethernet_header_t, type);
253  off -= RTE_PKTMBUF_HEADROOM;
254  vnet_buffer (b[0])->l2_hdr_offset = off;
255  b[0]->current_data = off;
256 
257  off = mb[1]->data_off;
258  next[1] = off + STRUCT_OFFSET_OF (ethernet_header_t, type);
259  off -= RTE_PKTMBUF_HEADROOM;
260  vnet_buffer (b[1])->l2_hdr_offset = off;
261  b[1]->current_data = off;
262 
263  off = mb[2]->data_off;
264  next[2] = off + STRUCT_OFFSET_OF (ethernet_header_t, type);
265  off -= RTE_PKTMBUF_HEADROOM;
266  vnet_buffer (b[2])->l2_hdr_offset = off;
267  b[2]->current_data = off;
268 
269  off = mb[3]->data_off;
270  next[3] = off + STRUCT_OFFSET_OF (ethernet_header_t, type);
271  off -= RTE_PKTMBUF_HEADROOM;
272  vnet_buffer (b[3])->l2_hdr_offset = off;
273  b[3]->current_data = off;
274 
275  b[0]->current_length = mb[0]->data_len;
276  b[1]->current_length = mb[1]->data_len;
277  b[2]->current_length = mb[2]->data_len;
278  b[3]->current_length = mb[3]->data_len;
279 
280  n_bytes += mb[0]->data_len;
281  n_bytes += mb[1]->data_len;
282  n_bytes += mb[2]->data_len;
283  n_bytes += mb[3]->data_len;
284 
285  if (maybe_multiseg)
286  {
287  n_bytes += dpdk_process_subseq_segs (vm, b[0], mb[0], fl);
288  n_bytes += dpdk_process_subseq_segs (vm, b[1], mb[1], fl);
289  n_bytes += dpdk_process_subseq_segs (vm, b[2], mb[2], fl);
290  n_bytes += dpdk_process_subseq_segs (vm, b[3], mb[3], fl);
291  }
292 
297 
298  /* next */
299  mb += 4;
300  n_left -= 4;
301  next += 4;
302  }
303 
304  while (n_left)
305  {
306  b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
307  clib_memcpy (b[0], &ptd->buffer_template, 64);
308  or_flags |= dpdk_ol_flags_extract (mb, flags, 1);
309  flags += 1;
310 
311  off = mb[0]->data_off;
312  next[0] = off + STRUCT_OFFSET_OF (ethernet_header_t, type);
313  off -= RTE_PKTMBUF_HEADROOM;
314  vnet_buffer (b[0])->l2_hdr_offset = off;
315  b[0]->current_data = off;
316  b[0]->current_length = mb[0]->data_len;
317  n_bytes += mb[0]->data_len;
318  if (maybe_multiseg)
319  n_bytes += dpdk_process_subseq_segs (vm, b[0], mb[0], fl);
321 
322  /* next */
323  mb += 1;
324  n_left -= 1;
325  next += 1;
326  }
327 
328  *or_flagsp = or_flags;
329  return n_bytes;
330 }
331 
334  dpdk_per_thread_data_t * ptd, uword n_rx_packets)
335 {
336  vlib_buffer_t *b[4];
337  i16 adv[4];
338  u16 etype[4];
339  struct rte_mbuf **mb = ptd->mbufs;
340  u8 *flags = ptd->flags;
341  u16 *next = ptd->next;
342  u32 n_left = n_rx_packets;
343 
344  while (n_left >= 12)
345  {
347  dpdk_prefetch_buffer_x4 (mb + 8);
348 
349  b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
350  b[1] = vlib_buffer_from_rte_mbuf (mb[1]);
351  b[2] = vlib_buffer_from_rte_mbuf (mb[2]);
352  b[3] = vlib_buffer_from_rte_mbuf (mb[3]);
353  etype[0] = *(u16 *) ((u8 *) mb[0] + next[0] + sizeof (vlib_buffer_t));
354  etype[1] = *(u16 *) ((u8 *) mb[1] + next[1] + sizeof (vlib_buffer_t));
355  etype[2] = *(u16 *) ((u8 *) mb[2] + next[2] + sizeof (vlib_buffer_t));
356  etype[3] = *(u16 *) ((u8 *) mb[3] + next[3] + sizeof (vlib_buffer_t));
357  next[0] = dpdk_rx_next (node, etype[0], flags[0]);
358  next[1] = dpdk_rx_next (node, etype[1], flags[1]);
359  next[2] = dpdk_rx_next (node, etype[2], flags[2]);
360  next[3] = dpdk_rx_next (node, etype[3], flags[3]);
361  adv[0] = device_input_next_node_advance[next[0]];
362  adv[1] = device_input_next_node_advance[next[1]];
363  adv[2] = device_input_next_node_advance[next[2]];
364  adv[3] = device_input_next_node_advance[next[3]];
365  b[0]->current_data += adv[0];
366  b[1]->current_data += adv[1];
367  b[2]->current_data += adv[2];
368  b[3]->current_data += adv[3];
369  b[0]->current_length -= adv[0];
370  b[1]->current_length -= adv[1];
371  b[2]->current_length -= adv[2];
372  b[3]->current_length -= adv[3];
373 
374  /* next */
375  next += 4;
376  mb += 4;
377  n_left -= 4;
378  flags += 4;
379  }
380 
381  while (n_left)
382  {
383  b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
384  next[0] = *(u16 *) ((u8 *) mb[0] + next[0] + sizeof (vlib_buffer_t));
385  next[0] = dpdk_rx_next (node, next[0], flags[0]);
386  adv[0] = device_input_next_node_advance[next[0]];
387  b[0]->current_data += adv[0];
388  b[0]->current_length -= adv[0];
389 
390  /* next */
391  next += 1;
392  mb += 1;
393  n_left -= 1;
394  flags += 1;
395  }
396 }
397 
400  uword n_rx_packets)
401 {
402  uword n;
404  vlib_buffer_t *b0;
405 
406  /* TODO prefetch and quad-loop */
407  for (n = 0; n < n_rx_packets; n++)
408  {
409  if ((ptd->flags[n] & (1 << DPDK_RX_F_FDIR)) == 0)
410  continue;
411 
413  ptd->mbufs[n]->hash.fdir.hi);
414 
415  if (fle->next_index != (u16) ~ 0)
416  ptd->next[n] = fle->next_index;
417 
418  if (fle->flow_id != ~0)
419  {
420  b0 = vlib_buffer_from_rte_mbuf (ptd->mbufs[n]);
421  b0->flow_id = fle->flow_id;
422  }
423 
424  if (fle->buffer_advance != ~0)
425  {
426  b0 = vlib_buffer_from_rte_mbuf (ptd->mbufs[n]);
428  }
429  }
430 }
431 
434  vlib_node_runtime_t * node, u32 thread_index, u16 queue_id)
435 {
436  uword n_rx_packets = 0, n_rx_bytes;
437  u32 n_left, n_trace;
438  u32 *buffers;
440  struct rte_mbuf **mb;
441  vlib_buffer_t *b0;
442  int known_next = 0;
443  u16 *next;
444  u8 or_flags;
445  u32 n;
446 
448  thread_index);
449  vlib_buffer_t *bt = &ptd->buffer_template;
450 
451  if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) == 0)
452  return 0;
453 
454  /* get up to DPDK_RX_BURST_SZ buffers from PMD */
455  while (n_rx_packets < DPDK_RX_BURST_SZ)
456  {
457  n = rte_eth_rx_burst (xd->port_id, queue_id,
458  ptd->mbufs + n_rx_packets,
459  DPDK_RX_BURST_SZ - n_rx_packets);
460  n_rx_packets += n;
461 
462  if (n < 32)
463  break;
464  }
465 
466  if (n_rx_packets == 0)
467  return 0;
468 
469  /* Update buffer template */
470  vnet_buffer (bt)->sw_if_index[VLIB_RX] = xd->sw_if_index;
471  bt->error = node->errors[DPDK_ERROR_NONE];
472  /* as DPDK is allocating empty buffers from mempool provided before interface
473  start for each queue, it is safe to store this in the template */
474  bt->buffer_pool_index = xd->buffer_pool_for_queue[queue_id];
475 
476  /* receive burst of packets from DPDK PMD */
477  if (PREDICT_FALSE (xd->per_interface_next_index != ~0))
478  {
479  known_next = 1;
480  next_index = xd->per_interface_next_index;
481  }
482 
483  /* as all packets belong to the same interface feature arc lookup
484  can be don once and result stored in the buffer template */
486  {
487  vnet_feature_start_device_input_x1 (xd->sw_if_index, &next_index, bt);
488  known_next = 1;
489  }
490 
491  if (xd->flags & DPDK_DEVICE_FLAG_MAYBE_MULTISEG)
492  n_rx_bytes = dpdk_process_rx_burst (vm, ptd, n_rx_packets, 1, &or_flags);
493  else
494  n_rx_bytes = dpdk_process_rx_burst (vm, ptd, n_rx_packets, 0, &or_flags);
495 
496  if (PREDICT_FALSE (known_next))
497  {
498  for (n = 0; n < n_rx_packets; n++)
499  ptd->next[n] = next_index;
500 
501  vnet_buffer (bt)->feature_arc_index = 0;
502  bt->current_config_index = 0;
503  }
504  else
505  dpdk_set_next_from_etype (vm, node, ptd, n_rx_packets);
506 
507  /* flow offload - process if rx flow offload enabled and at least one packet
508  is marked */
509  if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) &&
510  (or_flags & (1 << DPDK_RX_F_FDIR))))
511  dpdk_process_flow_offload (xd, ptd, n_rx_packets);
512 
513  /* is at least one packet marked as ip4 checksum bad? */
514  if (PREDICT_FALSE (or_flags & (1 << DPDK_RX_F_CKSUM_BAD)))
515  for (n = 0; n < n_rx_packets; n++)
516  {
517  if ((ptd->flags[n] & (1 << DPDK_RX_F_CKSUM_BAD)) == 0)
518  continue;
519  if (ptd->next[n] != VNET_DEVICE_INPUT_NEXT_IP4_INPUT)
520  continue;
521 
522  b0 = vlib_buffer_from_rte_mbuf (ptd->mbufs[n]);
523  b0->error = node->errors[DPDK_ERROR_IP_CHECKSUM_ERROR];
525  }
526 
527  /* enqueue buffers to the next node */
528  vlib_get_buffer_indices_with_offset (vm, (void **) ptd->mbufs, ptd->buffers,
529  n_rx_packets,
530  sizeof (struct rte_mbuf));
531 
532  vlib_buffer_enqueue_to_next (vm, node, ptd->buffers, ptd->next,
533  n_rx_packets);
534 
535  /* packet trace if enabled */
536  if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
537  {
538  n_left = n_rx_packets;
539  buffers = ptd->buffers;
540  mb = ptd->mbufs;
541  next = ptd->next;
542  while (n_trace && n_left)
543  {
544  b0 = vlib_get_buffer (vm, buffers[0]);
545  vlib_trace_buffer (vm, node, next[0], b0, /* follow_chain */ 0);
546 
547  dpdk_rx_trace_t *t0 = vlib_add_trace (vm, node, b0, sizeof t0[0]);
548  t0->queue_index = queue_id;
549  t0->device_index = xd->device_index;
550  t0->buffer_index = vlib_get_buffer_index (vm, b0);
551 
552  clib_memcpy (&t0->mb, mb[0], sizeof t0->mb);
553  clib_memcpy (&t0->buffer, b0, sizeof b0[0] - sizeof b0->pre_data);
554  clib_memcpy (t0->buffer.pre_data, b0->data,
555  sizeof t0->buffer.pre_data);
556  clib_memcpy (&t0->data, mb[0]->buf_addr + mb[0]->data_off,
557  sizeof t0->data);
558  n_trace--;
559  n_left--;
560  buffers++;
561  mb++;
562  next++;
563  }
564  vlib_set_trace_count (vm, node, n_trace);
565  }
566 
567  /* rx pcap capture if enabled */
569  {
570  u32 bi0;
571  n_left = n_rx_packets;
572  buffers = ptd->buffers;
573  while (n_left)
574  {
575  bi0 = buffers[0];
576  b0 = vlib_get_buffer (vm, bi0);
577  buffers++;
578 
579  if (dm->pcap[VLIB_RX].pcap_sw_if_index == 0 ||
581  == vnet_buffer (b0)->sw_if_index[VLIB_RX])
582  {
583  struct rte_mbuf *mb;
584  i16 data_start;
585  i32 temp_advance;
586 
587  /*
588  * Note: current_data will have advanced
589  * when we skip ethernet input.
590  * Temporarily back up to the original DMA
591  * target, so we capture a valid ethernet frame
592  */
593  mb = rte_mbuf_from_vlib_buffer (b0);
594 
595  /* Figure out the original data_start */
596  data_start = (mb->buf_addr + mb->data_off) - (void *) b0->data;
597  /* Back up that far */
598  temp_advance = b0->current_data - data_start;
599  vlib_buffer_advance (b0, -temp_advance);
600  /* Trace the packet */
601  pcap_add_buffer (&dm->pcap[VLIB_RX].pcap_main, vm, bi0, 512);
602  /* and advance again */
603  vlib_buffer_advance (b0, temp_advance);
604  }
605  n_left--;
606  }
607  }
608 
610  (vnet_get_main ()->interface_main.combined_sw_if_counters
611  + VNET_INTERFACE_COUNTER_RX, thread_index, xd->sw_if_index,
612  n_rx_packets, n_rx_bytes);
613 
614  vnet_device_increment_rx_packets (thread_index, n_rx_packets);
615 
616  return n_rx_packets;
617 }
618 
619 VLIB_NODE_FN (dpdk_input_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
620  vlib_frame_t * f)
621 {
622  dpdk_main_t *dm = &dpdk_main;
623  dpdk_device_t *xd;
624  uword n_rx_packets = 0;
625  vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
627  u32 thread_index = node->thread_index;
628 
629  /*
630  * Poll all devices on this cpu for input/interrupts.
631  */
632  /* *INDENT-OFF* */
634  {
635  xd = vec_elt_at_index(dm->devices, dq->dev_instance);
636  if (PREDICT_FALSE (xd->flags & DPDK_DEVICE_FLAG_BOND_SLAVE))
637  continue; /* Do not poll slave to a bonded interface */
638  n_rx_packets += dpdk_device_input (vm, dm, xd, node, thread_index,
639  dq->queue_id);
640  }
641  /* *INDENT-ON* */
642  return n_rx_packets;
643 }
644 
645 /* *INDENT-OFF* */
646 VLIB_REGISTER_NODE (dpdk_input_node) = {
647  .type = VLIB_NODE_TYPE_INPUT,
648  .name = "dpdk-input",
649  .sibling_of = "device-input",
650 
651  /* Will be enabled if/when hardware is detected. */
652  .state = VLIB_NODE_STATE_DISABLED,
653 
654  .format_buffer = format_ethernet_header_with_length,
655  .format_trace = format_dpdk_rx_trace,
656 
657  .n_errors = DPDK_N_ERROR,
658  .error_strings = dpdk_error_strings,
659 };
660 /* *INDENT-ON* */
661 
662 /*
663  * fd.io coding-style-patch-verification: ON
664  *
665  * Local Variables:
666  * eval: (c-set-style "gnu")
667  * End:
668  */
static_always_inline void dpdk_prefetch_buffer_data_x4(struct rte_mbuf *mb[])
Definition: node.c:144
static void vnet_device_increment_rx_packets(u32 thread_index, u64 count)
Definition: devices.h:110
static_always_inline void dpdk_set_next_from_etype(vlib_main_t *vm, vlib_node_runtime_t *node, dpdk_per_thread_data_t *ptd, uword n_rx_packets)
Definition: node.c:333
#define vlib_buffer_from_rte_mbuf(x)
Definition: dpdk_priv.h:17
vnet_device_and_queue_t * devices_and_queues
Definition: devices.h:69
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:156
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:204
u32 current_config_index
Used by feature subgraph arcs to visit enabled feature nodes.
Definition: buffer.h:135
dpdk_main_t dpdk_main
Definition: init.c:42
vnet_main_t * vnet_get_main(void)
Definition: misc.c:47
#define PREDICT_TRUE(x)
Definition: clib.h:108
u32 sw_if_index
Definition: dpdk.h:205
u8 flags[DPDK_RX_BURST_SZ]
Definition: dpdk.h:388
#define foreach_dpdk_error
Definition: dpdk.h:479
u16 flags
Definition: dpdk.h:213
u8 buffer_pool_index
index of buffer pool this buffer belongs.
Definition: buffer.h:143
int i
u32 per_interface_next_index
Definition: dpdk.h:208
#define STRUCT_OFFSET_OF(t, f)
Definition: clib.h:64
#define VLIB_NODE_FN(node)
Definition: node.h:187
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:472
unsigned char u8
Definition: types.h:56
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:114
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:109
#define static_always_inline
Definition: clib.h:95
dpdk_portid_t port_id
Definition: dpdk.h:202
u32 sw_if_index
Definition: vxlan_gbp.api:39
#define always_inline
Definition: clib.h:94
static_always_inline int vnet_device_input_have_features(u32 sw_if_index)
Definition: feature.h:252
static_always_inline void dpdk_prefetch_buffer_x4(struct rte_mbuf *mb[])
Definition: node.c:130
STATIC_ASSERT(VNET_DEVICE_INPUT_NEXT_IP4_INPUT-1==VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT,"IP4_INPUT must follow IP4_NCS_INPUT")
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
u8 pre_data[VLIB_BUFFER_PRE_DATA_SIZE]
Space for inserting data before buffer start.
Definition: buffer.h:167
unsigned int u32
Definition: types.h:88
i16 buffer_advance
Definition: dpdk.h:190
static_always_inline void dpdk_prefetch_mbuf_x4(struct rte_mbuf *mb[])
Definition: node.c:121
static u32 vlib_get_buffer_index(vlib_main_t *vm, void *p)
Translate buffer pointer into buffer index.
Definition: buffer_funcs.h:154
#define DPDK_RX_BURST_SZ
Definition: dpdk.h:379
#define fl(x, y)
#define rte_mbuf_from_vlib_buffer(x)
Definition: dpdk_priv.h:16
const u32 device_input_next_node_advance[((VNET_DEVICE_INPUT_N_NEXT_NODES/CLIB_CACHE_LINE_BYTES)+1)*CLIB_CACHE_LINE_BYTES]
Definition: devices.c:47
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:464
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
u32 pcap_sw_if_index
Definition: dpdk.h:395
static_always_inline void dpdk_process_flow_offload(dpdk_device_t *xd, dpdk_per_thread_data_t *ptd, uword n_rx_packets)
Definition: node.c:399
dpdk_per_thread_data_t * per_thread_data
Definition: dpdk.h:405
unsigned short u16
Definition: types.h:57
vlib_buffer_t buffer_template
Definition: dpdk.h:389
#define VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX
Definition: buffer.h:440
static void pcap_add_buffer(pcap_main_t *pm, vlib_main_t *vm, u32 buffer_index, u32 n_bytes_in_trace)
Add buffer (vlib_buffer_t) to the trace.
Definition: pcap.h:208
#define PREDICT_FALSE(x)
Definition: clib.h:107
static_always_inline void vlib_get_buffer_indices_with_offset(vlib_main_t *vm, void **b, u32 *bi, uword count, i32 offset)
Translate array of buffer pointers into buffer indices with offset.
Definition: buffer_funcs.h:173
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:138
static char * dpdk_error_strings[]
Definition: node.c:31
u32 flags
Definition: vhost_user.h:115
u8 * format_ethernet_header_with_length(u8 *s, va_list *args)
Definition: format.c:97
static_always_inline u8 dpdk_ol_flags_extract(struct rte_mbuf **mb, u8 *flags, int count)
Main DPDK input node.
Definition: node.c:197
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:155
u32 flow_id
Generic flow identifier.
Definition: buffer.h:126
dpdk_device_t * devices
Definition: dpdk.h:403
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:79
vlib_main_t * vm
Definition: buffer.c:294
dpdk_flow_lookup_entry_t * flow_lookup_entries
Definition: dpdk.h:235
static_always_inline void vlib_buffer_enqueue_to_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts, uword count)
Definition: buffer_node.h:332
static_always_inline void clib_memcpy64_x4(void *d0, void *d1, void *d2, void *d3, void *s)
Definition: string.h:89
#define clib_memcpy(a, b, c)
Definition: string.h:75
u32 buffers[DPDK_RX_BURST_SZ]
Definition: dpdk.h:385
signed int i32
Definition: types.h:77
u16 device_index
Definition: dpdk.h:464
#define ASSERT(truth)
u16 next[DPDK_RX_BURST_SZ]
Definition: dpdk.h:386
static_always_inline u32 dpdk_device_input(vlib_main_t *vm, dpdk_main_t *dm, dpdk_device_t *xd, vlib_node_runtime_t *node, u32 thread_index, u16 queue_id)
Definition: node.c:433
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:129
static_always_inline uword dpdk_process_rx_burst(vlib_main_t *vm, dpdk_per_thread_data_t *ptd, uword n_rx_packets, int maybe_multiseg, u8 *or_flagsp)
Definition: node.c:212
vlib_buffer_t buffer
Definition: dpdk.h:467
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:218
size_t count
Definition: vapi.c:46
dpdk_portid_t device_index
Definition: dpdk.h:199
u32 buffer_index
Definition: dpdk.h:463
format_function_t format_dpdk_rx_trace
Definition: dpdk.h:511
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:57
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:155
Definition: dpdk.h:186
struct rte_mbuf * mbufs[DPDK_RX_BURST_SZ]
Definition: dpdk.h:384
u8 data[256]
Definition: dpdk.h:468
#define foreach_device_and_queue(var, vec)
Definition: devices.h:156
pcap_main_t pcap_main
Definition: dpdk.h:396
static u32 dpdk_rx_next(vlib_node_runtime_t *node, u16 etype, u8 flags)
Definition: node.c:58
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
Definition: buffer.h:547
u64 uword
Definition: types.h:112
dpdk_pcap_t pcap[VLIB_N_RX_TX]
Definition: dpdk.h:411
u16 next_index
Definition: dpdk.h:189
static void vlib_buffer_init_for_free_list(vlib_buffer_t *dst, vlib_buffer_free_list_t *fl)
#define vnet_buffer(b)
Definition: buffer.h:344
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:259
u8 data[0]
Packet data.
Definition: buffer.h:175
static vlib_buffer_free_list_t * vlib_buffer_get_free_list(vlib_main_t *vm, vlib_buffer_free_list_index_t free_list_index)
Definition: buffer_funcs.h:675
struct rte_mbuf mb
Definition: dpdk.h:466
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:172
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:116
static_always_inline uword dpdk_process_subseq_segs(vlib_main_t *vm, vlib_buffer_t *b, struct rte_mbuf *mb, vlib_buffer_free_list_t *fl)
Definition: node.c:75
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:58
u8 * buffer_pool_for_queue
Definition: dpdk.h:228
u32 flow_id
Definition: dpdk.h:188
Definition: defs.h:46
CLIB vectors are ubiquitous dynamically resized arrays with by user defined "headers".
int pcap_enable
Definition: dpdk.h:394
signed short i16
Definition: types.h:46
u16 queue_index
Definition: dpdk.h:465