FD.io VPP  v19.01.3-6-g70449b9b9
Vector Packet Processing
node.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <vnet/vnet.h>
16 #include <vppinfra/vec.h>
17 #include <vppinfra/error.h>
18 #include <vppinfra/format.h>
19 #include <vppinfra/xxhash.h>
20 
21 #include <vnet/ethernet/ethernet.h>
22 #include <dpdk/device/dpdk.h>
24 #include <vnet/mpls/packet.h>
25 #include <vnet/handoff.h>
26 #include <vnet/devices/devices.h>
27 #include <vnet/feature/feature.h>
28 
29 #include <dpdk/device/dpdk_priv.h>
30 
31 static char *dpdk_error_strings[] = {
32 #define _(n,s) s,
34 #undef _
35 };
36 
37 /* make sure all flags we need are stored in lower 8 bits */
38 STATIC_ASSERT ((PKT_RX_IP_CKSUM_BAD | PKT_RX_FDIR) <
39  256, "dpdk flags not un lower byte, fix needed");
40 
43  struct rte_mbuf *mb, vlib_buffer_free_list_t * fl)
44 {
45  u8 nb_seg = 1;
46  struct rte_mbuf *mb_seg = 0;
47  vlib_buffer_t *b_seg, *b_chain = 0;
48  mb_seg = mb->next;
49  b_chain = b;
50 
51  if (mb->nb_segs < 2)
52  return 0;
53 
54  b->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
56 
57  while (nb_seg < mb->nb_segs)
58  {
59  ASSERT (mb_seg != 0);
60 
61  b_seg = vlib_buffer_from_rte_mbuf (mb_seg);
63 
64  ASSERT ((b_seg->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
65  ASSERT (b_seg->current_data == 0);
66 
67  /*
68  * The driver (e.g. virtio) may not put the packet data at the start
69  * of the segment, so don't assume b_seg->current_data == 0 is correct.
70  */
71  b_seg->current_data =
72  (mb_seg->buf_addr + mb_seg->data_off) - (void *) b_seg->data;
73 
74  b_seg->current_length = mb_seg->data_len;
75  b->total_length_not_including_first_buffer += mb_seg->data_len;
76 
77  b_chain->flags |= VLIB_BUFFER_NEXT_PRESENT;
78  b_chain->next_buffer = vlib_get_buffer_index (vm, b_seg);
79 
80  b_chain = b_seg;
81  mb_seg = mb_seg->next;
82  nb_seg++;
83  }
85 }
86 
88 dpdk_prefetch_mbuf_x4 (struct rte_mbuf *mb[])
89 {
90  CLIB_PREFETCH (mb[0], CLIB_CACHE_LINE_BYTES, LOAD);
91  CLIB_PREFETCH (mb[1], CLIB_CACHE_LINE_BYTES, LOAD);
92  CLIB_PREFETCH (mb[2], CLIB_CACHE_LINE_BYTES, LOAD);
93  CLIB_PREFETCH (mb[3], CLIB_CACHE_LINE_BYTES, LOAD);
94 }
95 
97 dpdk_prefetch_buffer_x4 (struct rte_mbuf *mb[])
98 {
99  vlib_buffer_t *b;
100  b = vlib_buffer_from_rte_mbuf (mb[0]);
102  b = vlib_buffer_from_rte_mbuf (mb[1]);
104  b = vlib_buffer_from_rte_mbuf (mb[2]);
106  b = vlib_buffer_from_rte_mbuf (mb[3]);
108 }
109 
110 /** \brief Main DPDK input node
111  @node dpdk-input
112 
113  This is the main DPDK input node: across each assigned interface,
114  call rte_eth_rx_burst(...) or similar to obtain a vector of
115  packets to process. Derive @c vlib_buffer_t metadata from
116  <code>struct rte_mbuf</code> metadata,
117  Depending on the resulting metadata: adjust <code>b->current_data,
118  b->current_length </code> and dispatch directly to
119  ip4-input-no-checksum, or ip6-input. Trace the packet if required.
120 
121  @param vm vlib_main_t corresponding to the current thread
122  @param node vlib_node_runtime_t
123  @param f vlib_frame_t input-node, not used.
124 
125  @par Graph mechanics: buffer metadata, next index usage
126 
127  @em Uses:
128  - <code>struct rte_mbuf mb->ol_flags</code>
129  - PKT_RX_IP_CKSUM_BAD
130 
131  @em Sets:
132  - <code>b->error</code> if the packet is to be dropped immediately
133  - <code>b->current_data, b->current_length</code>
134  - adjusted as needed to skip the L2 header in direct-dispatch cases
135  - <code>vnet_buffer(b)->sw_if_index[VLIB_RX]</code>
136  - rx interface sw_if_index
137  - <code>vnet_buffer(b)->sw_if_index[VLIB_TX] = ~0</code>
138  - required by ipX-lookup
139  - <code>b->flags</code>
140  - to indicate multi-segment pkts (VLIB_BUFFER_NEXT_PRESENT), etc.
141 
142  <em>Next Nodes:</em>
143  - Static arcs to: error-drop, ethernet-input,
144  ip4-input-no-checksum, ip6-input, mpls-input
145  - per-interface redirection, controlled by
146  <code>xd->per_interface_next_index</code>
147 */
148 
150 dpdk_ol_flags_extract (struct rte_mbuf **mb, u8 * flags, int count)
151 {
152  u8 rv = 0;
153  int i;
154  for (i = 0; i < count; i++)
155  {
156  /* all flags we are interested in are in lower 8 bits but
157  that might change */
158  flags[i] = (u8) mb[i]->ol_flags;
159  rv |= flags[i];
160  }
161  return rv;
162 }
163 
166  uword n_rx_packets, int maybe_multiseg, u8 * or_flagsp)
167 {
168  u32 n_left = n_rx_packets;
169  vlib_buffer_t *b[4];
171  struct rte_mbuf **mb = ptd->mbufs;
172  uword n_bytes = 0;
173  u8 *flags, or_flags = 0;
174 
175  if (maybe_multiseg)
177 
178  mb = ptd->mbufs;
179  flags = ptd->flags;
180 
181  while (n_left >= 8)
182  {
183  dpdk_prefetch_buffer_x4 (mb + 4);
184 
185  b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
186  b[1] = vlib_buffer_from_rte_mbuf (mb[1]);
187  b[2] = vlib_buffer_from_rte_mbuf (mb[2]);
188  b[3] = vlib_buffer_from_rte_mbuf (mb[3]);
189 
190  clib_memcpy64_x4 (b[0], b[1], b[2], b[3], &ptd->buffer_template);
191 
192  dpdk_prefetch_mbuf_x4 (mb + 4);
193 
194  or_flags |= dpdk_ol_flags_extract (mb, flags, 4);
195  flags += 4;
196 
197  b[0]->current_data = mb[0]->data_off - RTE_PKTMBUF_HEADROOM;
198  n_bytes += b[0]->current_length = mb[0]->data_len;
199 
200  b[1]->current_data = mb[1]->data_off - RTE_PKTMBUF_HEADROOM;
201  n_bytes += b[1]->current_length = mb[1]->data_len;
202 
203  b[2]->current_data = mb[2]->data_off - RTE_PKTMBUF_HEADROOM;
204  n_bytes += b[2]->current_length = mb[2]->data_len;
205 
206  b[3]->current_data = mb[3]->data_off - RTE_PKTMBUF_HEADROOM;
207  n_bytes += b[3]->current_length = mb[3]->data_len;
208 
209  if (maybe_multiseg)
210  {
211  n_bytes += dpdk_process_subseq_segs (vm, b[0], mb[0], fl);
212  n_bytes += dpdk_process_subseq_segs (vm, b[1], mb[1], fl);
213  n_bytes += dpdk_process_subseq_segs (vm, b[2], mb[2], fl);
214  n_bytes += dpdk_process_subseq_segs (vm, b[3], mb[3], fl);
215  }
216 
221 
222  /* next */
223  mb += 4;
224  n_left -= 4;
225  }
226 
227  while (n_left)
228  {
229  b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
230  clib_memcpy_fast (b[0], &ptd->buffer_template, 64);
231  or_flags |= dpdk_ol_flags_extract (mb, flags, 1);
232  flags += 1;
233 
234  b[0]->current_data = mb[0]->data_off - RTE_PKTMBUF_HEADROOM;
235  n_bytes += b[0]->current_length = mb[0]->data_len;
236 
237  if (maybe_multiseg)
238  n_bytes += dpdk_process_subseq_segs (vm, b[0], mb[0], fl);
240 
241  /* next */
242  mb += 1;
243  n_left -= 1;
244  }
245 
246  *or_flagsp = or_flags;
247  return n_bytes;
248 }
249 
252  uword n_rx_packets)
253 {
254  uword n;
256  vlib_buffer_t *b0;
257 
258  /* TODO prefetch and quad-loop */
259  for (n = 0; n < n_rx_packets; n++)
260  {
261  if ((ptd->flags[n] & (1 << PKT_RX_FDIR)) == 0)
262  continue;
263 
265  ptd->mbufs[n]->hash.fdir.hi);
266 
267  if (fle->next_index != (u16) ~ 0)
268  ptd->next[n] = fle->next_index;
269 
270  if (fle->flow_id != ~0)
271  {
272  b0 = vlib_buffer_from_rte_mbuf (ptd->mbufs[n]);
273  b0->flow_id = fle->flow_id;
274  }
275 
276  if (fle->buffer_advance != ~0)
277  {
278  b0 = vlib_buffer_from_rte_mbuf (ptd->mbufs[n]);
280  }
281  }
282 }
283 
286  vlib_node_runtime_t * node, u32 thread_index, u16 queue_id)
287 {
288  uword n_rx_packets = 0, n_rx_bytes;
289  u32 n_left, n_trace;
290  u32 *buffers;
292  struct rte_mbuf **mb;
293  vlib_buffer_t *b0;
294  u16 *next;
295  u8 or_flags;
296  u32 n;
297  int single_next = 0;
298 
300  thread_index);
301  vlib_buffer_t *bt = &ptd->buffer_template;
302 
303  if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) == 0)
304  return 0;
305 
306  /* get up to DPDK_RX_BURST_SZ buffers from PMD */
307  while (n_rx_packets < DPDK_RX_BURST_SZ)
308  {
309  n = rte_eth_rx_burst (xd->port_id, queue_id,
310  ptd->mbufs + n_rx_packets,
311  DPDK_RX_BURST_SZ - n_rx_packets);
312  n_rx_packets += n;
313 
314  if (n < 32)
315  break;
316  }
317 
318  if (n_rx_packets == 0)
319  return 0;
320 
321  /* Update buffer template */
322  vnet_buffer (bt)->sw_if_index[VLIB_RX] = xd->sw_if_index;
323  bt->error = node->errors[DPDK_ERROR_NONE];
324  /* as DPDK is allocating empty buffers from mempool provided before interface
325  start for each queue, it is safe to store this in the template */
326  bt->buffer_pool_index = xd->buffer_pool_for_queue[queue_id];
327  vnet_buffer (bt)->feature_arc_index = 0;
328  bt->current_config_index = 0;
329 
330  /* receive burst of packets from DPDK PMD */
331  if (PREDICT_FALSE (xd->per_interface_next_index != ~0))
332  next_index = xd->per_interface_next_index;
333 
334  /* as all packets belong to the same interface feature arc lookup
335  can be don once and result stored in the buffer template */
337  vnet_feature_start_device_input_x1 (xd->sw_if_index, &next_index, bt);
338 
339  if (xd->flags & DPDK_DEVICE_FLAG_MAYBE_MULTISEG)
340  n_rx_bytes = dpdk_process_rx_burst (vm, ptd, n_rx_packets, 1, &or_flags);
341  else
342  n_rx_bytes = dpdk_process_rx_burst (vm, ptd, n_rx_packets, 0, &or_flags);
343 
344  if (PREDICT_FALSE (or_flags & PKT_RX_FDIR))
345  {
346  /* some packets will need to go to different next nodes */
347  for (n = 0; n < n_rx_packets; n++)
348  ptd->next[n] = next_index;
349 
350  /* flow offload - process if rx flow offload enabled and at least one
351  packet is marked */
352  if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) &&
353  (or_flags & PKT_RX_FDIR)))
354  dpdk_process_flow_offload (xd, ptd, n_rx_packets);
355 
356  /* enqueue buffers to the next node */
357  vlib_get_buffer_indices_with_offset (vm, (void **) ptd->mbufs,
358  ptd->buffers, n_rx_packets,
359  sizeof (struct rte_mbuf));
360 
361  vlib_buffer_enqueue_to_next (vm, node, ptd->buffers, ptd->next,
362  n_rx_packets);
363  }
364  else
365  {
366  u32 *to_next, n_left_to_next;
367 
368  vlib_get_new_next_frame (vm, node, next_index, to_next, n_left_to_next);
369  vlib_get_buffer_indices_with_offset (vm, (void **) ptd->mbufs, to_next,
370  n_rx_packets,
371  sizeof (struct rte_mbuf));
372 
374  {
375  vlib_next_frame_t *nf;
376  vlib_frame_t *f;
378  nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
379  f = vlib_get_frame (vm, nf->frame);
381 
382  ef = vlib_frame_scalar_args (f);
383  ef->sw_if_index = xd->sw_if_index;
384  ef->hw_if_index = xd->hw_if_index;
385 
386  /* if PMD supports ip4 checksum check and there are no packets
387  marked as ip4 checksum bad we can notify ethernet input so it
388  can send pacets to ip4-input-no-checksum node */
389  if (xd->flags & DPDK_DEVICE_FLAG_RX_IP4_CKSUM &&
390  (or_flags & PKT_RX_IP_CKSUM_BAD) == 0)
393  }
394  n_left_to_next -= n_rx_packets;
395  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
396  single_next = 1;
397  }
398 
399  /* packet trace if enabled */
400  if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
401  {
402  if (single_next)
403  vlib_get_buffer_indices_with_offset (vm, (void **) ptd->mbufs,
404  ptd->buffers, n_rx_packets,
405  sizeof (struct rte_mbuf));
406 
407  n_left = n_rx_packets;
408  buffers = ptd->buffers;
409  mb = ptd->mbufs;
410  next = ptd->next;
411 
412  while (n_trace && n_left)
413  {
414  b0 = vlib_get_buffer (vm, buffers[0]);
415  if (single_next == 0)
416  next_index = next[0];
417  vlib_trace_buffer (vm, node, next_index, b0, /* follow_chain */ 0);
418 
419  dpdk_rx_trace_t *t0 = vlib_add_trace (vm, node, b0, sizeof t0[0]);
420  t0->queue_index = queue_id;
421  t0->device_index = xd->device_index;
422  t0->buffer_index = vlib_get_buffer_index (vm, b0);
423 
424  clib_memcpy_fast (&t0->mb, mb[0], sizeof t0->mb);
425  clib_memcpy_fast (&t0->buffer, b0,
426  sizeof b0[0] - sizeof b0->pre_data);
428  sizeof t0->buffer.pre_data);
429  clib_memcpy_fast (&t0->data, mb[0]->buf_addr + mb[0]->data_off,
430  sizeof t0->data);
431  n_trace--;
432  n_left--;
433  buffers++;
434  mb++;
435  next++;
436  }
437  vlib_set_trace_count (vm, node, n_trace);
438  }
439 
440  /* rx pcap capture if enabled */
442  {
443  u32 bi0;
444  n_left = n_rx_packets;
445  buffers = ptd->buffers;
446  while (n_left)
447  {
448  bi0 = buffers[0];
449  b0 = vlib_get_buffer (vm, bi0);
450  buffers++;
451 
452  if (dm->pcap[VLIB_RX].pcap_sw_if_index == 0 ||
454  == vnet_buffer (b0)->sw_if_index[VLIB_RX])
455  {
456  struct rte_mbuf *mb;
457  i16 data_start;
458  i32 temp_advance;
459 
460  /*
461  * Note: current_data will have advanced
462  * when we skip ethernet input.
463  * Temporarily back up to the original DMA
464  * target, so we capture a valid ethernet frame
465  */
466  mb = rte_mbuf_from_vlib_buffer (b0);
467 
468  /* Figure out the original data_start */
469  data_start = (mb->buf_addr + mb->data_off) - (void *) b0->data;
470  /* Back up that far */
471  temp_advance = b0->current_data - data_start;
472  vlib_buffer_advance (b0, -temp_advance);
473  /* Trace the packet */
474  pcap_add_buffer (&dm->pcap[VLIB_RX].pcap_main, vm, bi0, 512);
475  /* and advance again */
476  vlib_buffer_advance (b0, temp_advance);
477  }
478  n_left--;
479  }
480  }
481 
483  (vnet_get_main ()->interface_main.combined_sw_if_counters
484  + VNET_INTERFACE_COUNTER_RX, thread_index, xd->sw_if_index,
485  n_rx_packets, n_rx_bytes);
486 
487  vnet_device_increment_rx_packets (thread_index, n_rx_packets);
488 
489  return n_rx_packets;
490 }
491 
493  vlib_frame_t * f)
494 {
495  dpdk_main_t *dm = &dpdk_main;
496  dpdk_device_t *xd;
497  uword n_rx_packets = 0;
498  vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
500  u32 thread_index = node->thread_index;
501 
502  /*
503  * Poll all devices on this cpu for input/interrupts.
504  */
505  /* *INDENT-OFF* */
507  {
508  xd = vec_elt_at_index(dm->devices, dq->dev_instance);
509  if (PREDICT_FALSE (xd->flags & DPDK_DEVICE_FLAG_BOND_SLAVE))
510  continue; /* Do not poll slave to a bonded interface */
511  n_rx_packets += dpdk_device_input (vm, dm, xd, node, thread_index,
512  dq->queue_id);
513  }
514  /* *INDENT-ON* */
515  return n_rx_packets;
516 }
517 
518 /* *INDENT-OFF* */
520  .type = VLIB_NODE_TYPE_INPUT,
521  .name = "dpdk-input",
522  .sibling_of = "device-input",
523 
524  /* Will be enabled if/when hardware is detected. */
525  .state = VLIB_NODE_STATE_DISABLED,
526 
527  .format_buffer = format_ethernet_header_with_length,
528  .format_trace = format_dpdk_rx_trace,
529 
530  .n_errors = DPDK_N_ERROR,
531  .error_strings = dpdk_error_strings,
532 };
533 /* *INDENT-ON* */
534 
535 /*
536  * fd.io coding-style-patch-verification: ON
537  *
538  * Local Variables:
539  * eval: (c-set-style "gnu")
540  * End:
541  */
static void vnet_device_increment_rx_packets(u32 thread_index, u64 count)
Definition: devices.h:110
#define vlib_buffer_from_rte_mbuf(x)
Definition: dpdk_priv.h:17
vnet_device_and_queue_t * devices_and_queues
Definition: devices.h:69
u32 flags
Definition: vhost_user.h:115
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:156
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:220
u32 current_config_index
Used by feature subgraph arcs to visit enabled feature nodes.
Definition: buffer.h:136
dpdk_main_t dpdk_main
Definition: init.c:43
vnet_main_t * vnet_get_main(void)
Definition: misc.c:47
#define PREDICT_TRUE(x)
Definition: clib.h:112
u32 sw_if_index
Definition: dpdk.h:208
u8 flags[DPDK_RX_BURST_SZ]
Definition: dpdk.h:396
#define foreach_dpdk_error
Definition: dpdk.h:487
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
u16 flags
Definition: dpdk.h:216
static vlib_frame_t * vlib_get_frame(vlib_main_t *vm, vlib_frame_t *f)
Definition: node_funcs.h:216
u8 buffer_pool_index
index of buffer pool this buffer belongs.
Definition: buffer.h:144
int i
u32 per_interface_next_index
Definition: dpdk.h:211
#define VLIB_NODE_FN(node)
Definition: node.h:201
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:493
unsigned char u8
Definition: types.h:56
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:114
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:110
#define static_always_inline
Definition: clib.h:99
dpdk_portid_t port_id
Definition: dpdk.h:205
u32 sw_if_index
Definition: vxlan_gbp.api:37
#define ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX
Definition: ethernet.h:51
static_always_inline int vnet_device_input_have_features(u32 sw_if_index)
Definition: feature.h:301
static_always_inline void dpdk_prefetch_buffer_x4(struct rte_mbuf *mb[])
Definition: node.c:97
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define vlib_get_new_next_frame(vm, node, next_index, vectors, n_vectors_left)
Definition: node_funcs.h:343
u8 pre_data[VLIB_BUFFER_PRE_DATA_SIZE]
Space for inserting data before buffer start.
Definition: buffer.h:168
unsigned int u32
Definition: types.h:88
i16 buffer_advance
Definition: dpdk.h:193
static vlib_next_frame_t * vlib_node_runtime_get_next_frame(vlib_main_t *vm, vlib_node_runtime_t *n, u32 next_index)
Definition: node_funcs.h:264
static_always_inline void dpdk_prefetch_mbuf_x4(struct rte_mbuf *mb[])
Definition: node.c:88
static u32 vlib_get_buffer_index(vlib_main_t *vm, void *p)
Translate buffer pointer into buffer index.
Definition: buffer_funcs.h:158
#define DPDK_RX_BURST_SZ
Definition: dpdk.h:387
#define fl(x, y)
#define rte_mbuf_from_vlib_buffer(x)
Definition: dpdk_priv.h:16
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:511
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:114
u32 pcap_sw_if_index
Definition: dpdk.h:403
static_always_inline void dpdk_process_flow_offload(dpdk_device_t *xd, dpdk_per_thread_data_t *ptd, uword n_rx_packets)
Definition: node.c:251
dpdk_per_thread_data_t * per_thread_data
Definition: dpdk.h:413
unsigned short u16
Definition: types.h:57
#define ETH_INPUT_FRAME_F_IP4_CKSUM_OK
Definition: ethernet.h:54
vlib_buffer_t buffer_template
Definition: dpdk.h:397
#define VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX
Definition: buffer.h:442
#define PREDICT_FALSE(x)
Definition: clib.h:111
static_always_inline void vlib_get_buffer_indices_with_offset(vlib_main_t *vm, void **b, u32 *bi, uword count, i32 offset)
Translate array of buffer pointers into buffer indices with offset.
Definition: buffer_funcs.h:177
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:139
static char * dpdk_error_strings[]
Definition: node.c:31
u32 hw_if_index
Definition: dpdk.h:207
u8 * format_ethernet_header_with_length(u8 *s, va_list *args)
Definition: format.c:97
static_always_inline u8 dpdk_ol_flags_extract(struct rte_mbuf **mb, u8 *flags, int count)
Main DPDK input node.
Definition: node.c:150
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u32 flow_id
Generic flow identifier.
Definition: buffer.h:127
dpdk_device_t * devices
Definition: dpdk.h:411
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:79
vlib_main_t * vm
Definition: buffer.c:301
dpdk_flow_lookup_entry_t * flow_lookup_entries
Definition: dpdk.h:239
static_always_inline void vlib_buffer_enqueue_to_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts, uword count)
Definition: buffer_node.h:332
static_always_inline void clib_memcpy64_x4(void *d0, void *d1, void *d2, void *d3, void *s)
Definition: string.h:224
vlib_node_registration_t dpdk_input_node
(constructor) VLIB_REGISTER_NODE (dpdk_input_node)
Definition: node.c:519
static void * vlib_frame_scalar_args(vlib_frame_t *f)
Get pointer to frame scalar data.
Definition: node_funcs.h:258
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:452
u32 buffers[DPDK_RX_BURST_SZ]
Definition: dpdk.h:393
signed int i32
Definition: types.h:77
u16 device_index
Definition: dpdk.h:472
#define ASSERT(truth)
u16 next[DPDK_RX_BURST_SZ]
Definition: dpdk.h:394
vlib_frame_t * frame
Definition: node.h:429
static_always_inline u32 dpdk_device_input(vlib_main_t *vm, dpdk_main_t *dm, dpdk_device_t *xd, vlib_node_runtime_t *node, u32 thread_index, u16 queue_id)
Definition: node.c:285
u16 flags
Definition: node.h:411
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:130
static_always_inline uword dpdk_process_rx_burst(vlib_main_t *vm, dpdk_per_thread_data_t *ptd, uword n_rx_packets, int maybe_multiseg, u8 *or_flagsp)
Definition: node.c:165
vlib_buffer_t buffer
Definition: dpdk.h:475
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:233
STATIC_ASSERT(STRUCT_OFFSET_OF(vnet_buffer_opaque_t, l2_hdr_offset)==STRUCT_OFFSET_OF(vnet_buffer_opaque_t, l3_hdr_offset) - 2, "l3_hdr_offset must follow l2_hdr_offset")
size_t count
Definition: vapi.c:47
dpdk_portid_t device_index
Definition: dpdk.h:202
u32 buffer_index
Definition: dpdk.h:471
format_function_t format_dpdk_rx_trace
Definition: dpdk.h:519
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:57
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:156
Definition: dpdk.h:189
struct rte_mbuf * mbufs[DPDK_RX_BURST_SZ]
Definition: dpdk.h:392
u8 data[256]
Definition: dpdk.h:476
#define foreach_device_and_queue(var, vec)
Definition: devices.h:161
pcap_main_t pcap_main
Definition: dpdk.h:404
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
Definition: buffer.h:495
u64 uword
Definition: types.h:112
dpdk_pcap_t pcap[VLIB_N_RX_TX]
Definition: dpdk.h:419
u16 next_index
Definition: dpdk.h:192
static void vlib_buffer_init_for_free_list(vlib_buffer_t *dst, vlib_buffer_free_list_t *fl)
#define vnet_buffer(b)
Definition: buffer.h:368
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:308
u8 data[0]
Packet data.
Definition: buffer.h:176
static void vlib_frame_no_append(vlib_frame_t *f)
Definition: node_funcs.h:224
static vlib_buffer_free_list_t * vlib_buffer_get_free_list(vlib_main_t *vm, vlib_buffer_free_list_index_t free_list_index)
Definition: buffer_funcs.h:657
struct rte_mbuf mb
Definition: dpdk.h:474
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:172
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:117
static void pcap_add_buffer(pcap_main_t *pm, struct vlib_main_t *vm, u32 buffer_index, u32 n_bytes_in_trace)
Add buffer (vlib_buffer_t) to the trace.
Definition: pcap_funcs.h:63
static_always_inline uword dpdk_process_subseq_segs(vlib_main_t *vm, vlib_buffer_t *b, struct rte_mbuf *mb, vlib_buffer_free_list_t *fl)
Definition: node.c:42
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:62
u8 * buffer_pool_for_queue
Definition: dpdk.h:232
u32 flow_id
Definition: dpdk.h:191
Definition: defs.h:46
CLIB vectors are ubiquitous dynamically resized arrays with by user defined "headers".
int pcap_enable
Definition: dpdk.h:402
signed short i16
Definition: types.h:46
u16 queue_index
Definition: dpdk.h:473