FD.io VPP  v19.04.4-rc0-5-ge88582fac
Vector Packet Processing
node.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <vnet/vnet.h>
16 #include <vppinfra/vec.h>
17 #include <vppinfra/error.h>
18 #include <vppinfra/format.h>
19 #include <vppinfra/xxhash.h>
20 
21 #include <vnet/ethernet/ethernet.h>
22 #include <dpdk/buffer.h>
23 #include <dpdk/device/dpdk.h>
25 #include <vnet/mpls/packet.h>
26 #include <vnet/handoff.h>
27 #include <vnet/devices/devices.h>
28 #include <vnet/feature/feature.h>
29 
30 #include <dpdk/device/dpdk_priv.h>
31 
32 static char *dpdk_error_strings[] = {
33 #define _(n,s) s,
35 #undef _
36 };
37 
38 /* make sure all flags we need are stored in lower 8 bits */
39 STATIC_ASSERT ((PKT_RX_IP_CKSUM_BAD | PKT_RX_FDIR) <
40  256, "dpdk flags not un lower byte, fix needed");
41 
44  struct rte_mbuf *mb, vlib_buffer_t * bt)
45 {
46  u8 nb_seg = 1;
47  struct rte_mbuf *mb_seg = 0;
48  vlib_buffer_t *b_seg, *b_chain = 0;
49  mb_seg = mb->next;
50  b_chain = b;
51 
52  if (mb->nb_segs < 2)
53  return 0;
54 
55  b->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
57 
58  while (nb_seg < mb->nb_segs)
59  {
60  ASSERT (mb_seg != 0);
61 
62  b_seg = vlib_buffer_from_rte_mbuf (mb_seg);
63  vlib_buffer_copy_template (b_seg, bt);
64 
65  /*
66  * The driver (e.g. virtio) may not put the packet data at the start
67  * of the segment, so don't assume b_seg->current_data == 0 is correct.
68  */
69  b_seg->current_data =
70  (mb_seg->buf_addr + mb_seg->data_off) - (void *) b_seg->data;
71 
72  b_seg->current_length = mb_seg->data_len;
73  b->total_length_not_including_first_buffer += mb_seg->data_len;
74 
75  b_chain->flags |= VLIB_BUFFER_NEXT_PRESENT;
76  b_chain->next_buffer = vlib_get_buffer_index (vm, b_seg);
77 
78  b_chain = b_seg;
79  mb_seg = mb_seg->next;
80  nb_seg++;
81  }
83 }
84 
86 dpdk_prefetch_mbuf_x4 (struct rte_mbuf *mb[])
87 {
88  CLIB_PREFETCH (mb[0], CLIB_CACHE_LINE_BYTES, LOAD);
89  CLIB_PREFETCH (mb[1], CLIB_CACHE_LINE_BYTES, LOAD);
90  CLIB_PREFETCH (mb[2], CLIB_CACHE_LINE_BYTES, LOAD);
91  CLIB_PREFETCH (mb[3], CLIB_CACHE_LINE_BYTES, LOAD);
92 }
93 
95 dpdk_prefetch_buffer_x4 (struct rte_mbuf *mb[])
96 {
97  vlib_buffer_t *b;
98  b = vlib_buffer_from_rte_mbuf (mb[0]);
100  b = vlib_buffer_from_rte_mbuf (mb[1]);
102  b = vlib_buffer_from_rte_mbuf (mb[2]);
104  b = vlib_buffer_from_rte_mbuf (mb[3]);
106 }
107 
108 /** \brief Main DPDK input node
109  @node dpdk-input
110 
111  This is the main DPDK input node: across each assigned interface,
112  call rte_eth_rx_burst(...) or similar to obtain a vector of
113  packets to process. Derive @c vlib_buffer_t metadata from
114  <code>struct rte_mbuf</code> metadata,
115  Depending on the resulting metadata: adjust <code>b->current_data,
116  b->current_length </code> and dispatch directly to
117  ip4-input-no-checksum, or ip6-input. Trace the packet if required.
118 
119  @param vm vlib_main_t corresponding to the current thread
120  @param node vlib_node_runtime_t
121  @param f vlib_frame_t input-node, not used.
122 
123  @par Graph mechanics: buffer metadata, next index usage
124 
125  @em Uses:
126  - <code>struct rte_mbuf mb->ol_flags</code>
127  - PKT_RX_IP_CKSUM_BAD
128 
129  @em Sets:
130  - <code>b->error</code> if the packet is to be dropped immediately
131  - <code>b->current_data, b->current_length</code>
132  - adjusted as needed to skip the L2 header in direct-dispatch cases
133  - <code>vnet_buffer(b)->sw_if_index[VLIB_RX]</code>
134  - rx interface sw_if_index
135  - <code>vnet_buffer(b)->sw_if_index[VLIB_TX] = ~0</code>
136  - required by ipX-lookup
137  - <code>b->flags</code>
138  - to indicate multi-segment pkts (VLIB_BUFFER_NEXT_PRESENT), etc.
139 
140  <em>Next Nodes:</em>
141  - Static arcs to: error-drop, ethernet-input,
142  ip4-input-no-checksum, ip6-input, mpls-input
143  - per-interface redirection, controlled by
144  <code>xd->per_interface_next_index</code>
145 */
146 
148 dpdk_ol_flags_extract (struct rte_mbuf **mb, u8 * flags, int count)
149 {
150  u8 rv = 0;
151  int i;
152  for (i = 0; i < count; i++)
153  {
154  /* all flags we are interested in are in lower 8 bits but
155  that might change */
156  flags[i] = (u8) mb[i]->ol_flags;
157  rv |= flags[i];
158  }
159  return rv;
160 }
161 
164  uword n_rx_packets, int maybe_multiseg, u8 * or_flagsp)
165 {
166  u32 n_left = n_rx_packets;
167  vlib_buffer_t *b[4];
168  struct rte_mbuf **mb = ptd->mbufs;
169  uword n_bytes = 0;
170  u8 *flags, or_flags = 0;
171  vlib_buffer_t bt;
172 
173  mb = ptd->mbufs;
174  flags = ptd->flags;
175 
176  /* copy template into local variable - will save per packet load */
178  while (n_left >= 8)
179  {
180  dpdk_prefetch_buffer_x4 (mb + 4);
181 
182  b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
183  b[1] = vlib_buffer_from_rte_mbuf (mb[1]);
184  b[2] = vlib_buffer_from_rte_mbuf (mb[2]);
185  b[3] = vlib_buffer_from_rte_mbuf (mb[3]);
186 
187  vlib_buffer_copy_template (b[0], &bt);
188  vlib_buffer_copy_template (b[1], &bt);
189  vlib_buffer_copy_template (b[2], &bt);
190  vlib_buffer_copy_template (b[3], &bt);
191 
192  dpdk_prefetch_mbuf_x4 (mb + 4);
193 
194  or_flags |= dpdk_ol_flags_extract (mb, flags, 4);
195  flags += 4;
196 
197  b[0]->current_data = mb[0]->data_off - RTE_PKTMBUF_HEADROOM;
198  n_bytes += b[0]->current_length = mb[0]->data_len;
199 
200  b[1]->current_data = mb[1]->data_off - RTE_PKTMBUF_HEADROOM;
201  n_bytes += b[1]->current_length = mb[1]->data_len;
202 
203  b[2]->current_data = mb[2]->data_off - RTE_PKTMBUF_HEADROOM;
204  n_bytes += b[2]->current_length = mb[2]->data_len;
205 
206  b[3]->current_data = mb[3]->data_off - RTE_PKTMBUF_HEADROOM;
207  n_bytes += b[3]->current_length = mb[3]->data_len;
208 
209  if (maybe_multiseg)
210  {
211  n_bytes += dpdk_process_subseq_segs (vm, b[0], mb[0], &bt);
212  n_bytes += dpdk_process_subseq_segs (vm, b[1], mb[1], &bt);
213  n_bytes += dpdk_process_subseq_segs (vm, b[2], mb[2], &bt);
214  n_bytes += dpdk_process_subseq_segs (vm, b[3], mb[3], &bt);
215  }
216 
221 
222  /* next */
223  mb += 4;
224  n_left -= 4;
225  }
226 
227  while (n_left)
228  {
229  b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
230  vlib_buffer_copy_template (b[0], &bt);
231  or_flags |= dpdk_ol_flags_extract (mb, flags, 1);
232  flags += 1;
233 
234  b[0]->current_data = mb[0]->data_off - RTE_PKTMBUF_HEADROOM;
235  n_bytes += b[0]->current_length = mb[0]->data_len;
236 
237  if (maybe_multiseg)
238  n_bytes += dpdk_process_subseq_segs (vm, b[0], mb[0], &bt);
240 
241  /* next */
242  mb += 1;
243  n_left -= 1;
244  }
245 
246  *or_flagsp = or_flags;
247  return n_bytes;
248 }
249 
252  uword n_rx_packets)
253 {
254  uword n;
256  vlib_buffer_t *b0;
257 
258  /* TODO prefetch and quad-loop */
259  for (n = 0; n < n_rx_packets; n++)
260  {
261  if ((ptd->flags[n] & PKT_RX_FDIR) == 0)
262  continue;
263 
265  ptd->mbufs[n]->hash.fdir.hi);
266 
267  if (fle->next_index != (u16) ~ 0)
268  ptd->next[n] = fle->next_index;
269 
270  if (fle->flow_id != ~0)
271  {
272  b0 = vlib_buffer_from_rte_mbuf (ptd->mbufs[n]);
273  b0->flow_id = fle->flow_id;
274  }
275 
276  if (fle->buffer_advance != ~0)
277  {
278  b0 = vlib_buffer_from_rte_mbuf (ptd->mbufs[n]);
280  }
281  }
282 }
283 
286  vlib_node_runtime_t * node, u32 thread_index, u16 queue_id)
287 {
288  uword n_rx_packets = 0, n_rx_bytes;
289  u32 n_left, n_trace;
290  u32 *buffers;
292  struct rte_mbuf **mb;
293  vlib_buffer_t *b0;
294  u16 *next;
295  u8 or_flags;
296  u32 n;
297  int single_next = 0;
298 
300  thread_index);
301  vlib_buffer_t *bt = &ptd->buffer_template;
302 
303  if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) == 0)
304  return 0;
305 
306  /* get up to DPDK_RX_BURST_SZ buffers from PMD */
307  while (n_rx_packets < DPDK_RX_BURST_SZ)
308  {
309  n = rte_eth_rx_burst (xd->port_id, queue_id,
310  ptd->mbufs + n_rx_packets,
311  DPDK_RX_BURST_SZ - n_rx_packets);
312  n_rx_packets += n;
313 
314  if (n < 32)
315  break;
316  }
317 
318  if (n_rx_packets == 0)
319  return 0;
320 
321  /* Update buffer template */
322  vnet_buffer (bt)->sw_if_index[VLIB_RX] = xd->sw_if_index;
323  bt->error = node->errors[DPDK_ERROR_NONE];
324  /* as DPDK is allocating empty buffers from mempool provided before interface
325  start for each queue, it is safe to store this in the template */
326  bt->buffer_pool_index = xd->buffer_pool_for_queue[queue_id];
327  bt->ref_count = 1;
328  vnet_buffer (bt)->feature_arc_index = 0;
329  bt->current_config_index = 0;
330 
331  /* receive burst of packets from DPDK PMD */
332  if (PREDICT_FALSE (xd->per_interface_next_index != ~0))
333  next_index = xd->per_interface_next_index;
334 
335  /* as all packets belong to the same interface feature arc lookup
336  can be don once and result stored in the buffer template */
338  vnet_feature_start_device_input_x1 (xd->sw_if_index, &next_index, bt);
339 
340  if (xd->flags & DPDK_DEVICE_FLAG_MAYBE_MULTISEG)
341  n_rx_bytes = dpdk_process_rx_burst (vm, ptd, n_rx_packets, 1, &or_flags);
342  else
343  n_rx_bytes = dpdk_process_rx_burst (vm, ptd, n_rx_packets, 0, &or_flags);
344 
345  if (PREDICT_FALSE (or_flags & PKT_RX_FDIR))
346  {
347  /* some packets will need to go to different next nodes */
348  for (n = 0; n < n_rx_packets; n++)
349  ptd->next[n] = next_index;
350 
351  /* flow offload - process if rx flow offload enabled and at least one
352  packet is marked */
353  if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) &&
354  (or_flags & PKT_RX_FDIR)))
355  dpdk_process_flow_offload (xd, ptd, n_rx_packets);
356 
357  /* enqueue buffers to the next node */
358  vlib_get_buffer_indices_with_offset (vm, (void **) ptd->mbufs,
359  ptd->buffers, n_rx_packets,
360  sizeof (struct rte_mbuf));
361 
362  vlib_buffer_enqueue_to_next (vm, node, ptd->buffers, ptd->next,
363  n_rx_packets);
364  }
365  else
366  {
367  u32 *to_next, n_left_to_next;
368 
369  vlib_get_new_next_frame (vm, node, next_index, to_next, n_left_to_next);
370  vlib_get_buffer_indices_with_offset (vm, (void **) ptd->mbufs, to_next,
371  n_rx_packets,
372  sizeof (struct rte_mbuf));
373 
375  {
376  vlib_next_frame_t *nf;
377  vlib_frame_t *f;
379  nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
380  f = vlib_get_frame (vm, nf->frame);
382 
383  ef = vlib_frame_scalar_args (f);
384  ef->sw_if_index = xd->sw_if_index;
385  ef->hw_if_index = xd->hw_if_index;
386 
387  /* if PMD supports ip4 checksum check and there are no packets
388  marked as ip4 checksum bad we can notify ethernet input so it
389  can send pacets to ip4-input-no-checksum node */
390  if (xd->flags & DPDK_DEVICE_FLAG_RX_IP4_CKSUM &&
391  (or_flags & PKT_RX_IP_CKSUM_BAD) == 0)
394  }
395  n_left_to_next -= n_rx_packets;
396  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
397  single_next = 1;
398  }
399 
400  /* packet trace if enabled */
401  if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
402  {
403  if (single_next)
404  vlib_get_buffer_indices_with_offset (vm, (void **) ptd->mbufs,
405  ptd->buffers, n_rx_packets,
406  sizeof (struct rte_mbuf));
407 
408  n_left = n_rx_packets;
409  buffers = ptd->buffers;
410  mb = ptd->mbufs;
411  next = ptd->next;
412 
413  while (n_trace && n_left)
414  {
415  b0 = vlib_get_buffer (vm, buffers[0]);
416  if (single_next == 0)
417  next_index = next[0];
418  vlib_trace_buffer (vm, node, next_index, b0, /* follow_chain */ 0);
419 
420  dpdk_rx_trace_t *t0 = vlib_add_trace (vm, node, b0, sizeof t0[0]);
421  t0->queue_index = queue_id;
422  t0->device_index = xd->device_index;
423  t0->buffer_index = vlib_get_buffer_index (vm, b0);
424 
425  clib_memcpy_fast (&t0->mb, mb[0], sizeof t0->mb);
426  clib_memcpy_fast (&t0->buffer, b0,
427  sizeof b0[0] - sizeof b0->pre_data);
429  sizeof t0->buffer.pre_data);
430  clib_memcpy_fast (&t0->data, mb[0]->buf_addr + mb[0]->data_off,
431  sizeof t0->data);
432  n_trace--;
433  n_left--;
434  buffers++;
435  mb++;
436  next++;
437  }
438  vlib_set_trace_count (vm, node, n_trace);
439  }
440 
442  (vnet_get_main ()->interface_main.combined_sw_if_counters
443  + VNET_INTERFACE_COUNTER_RX, thread_index, xd->sw_if_index,
444  n_rx_packets, n_rx_bytes);
445 
446  vnet_device_increment_rx_packets (thread_index, n_rx_packets);
447 
448  return n_rx_packets;
449 }
450 
452  vlib_frame_t * f)
453 {
454  dpdk_main_t *dm = &dpdk_main;
455  dpdk_device_t *xd;
456  uword n_rx_packets = 0;
457  vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
459  u32 thread_index = node->thread_index;
460 
461  /*
462  * Poll all devices on this cpu for input/interrupts.
463  */
464  /* *INDENT-OFF* */
466  {
467  xd = vec_elt_at_index(dm->devices, dq->dev_instance);
468  if (PREDICT_FALSE (xd->flags & DPDK_DEVICE_FLAG_BOND_SLAVE))
469  continue; /* Do not poll slave to a bonded interface */
470  n_rx_packets += dpdk_device_input (vm, dm, xd, node, thread_index,
471  dq->queue_id);
472  }
473  /* *INDENT-ON* */
474  return n_rx_packets;
475 }
476 
477 /* *INDENT-OFF* */
479  .type = VLIB_NODE_TYPE_INPUT,
480  .name = "dpdk-input",
481  .sibling_of = "device-input",
482 
483  /* Will be enabled if/when hardware is detected. */
484  .state = VLIB_NODE_STATE_DISABLED,
485 
486  .format_buffer = format_ethernet_header_with_length,
487  .format_trace = format_dpdk_rx_trace,
488 
489  .n_errors = DPDK_N_ERROR,
490  .error_strings = dpdk_error_strings,
491 };
492 /* *INDENT-ON* */
493 
494 /*
495  * fd.io coding-style-patch-verification: ON
496  *
497  * Local Variables:
498  * eval: (c-set-style "gnu")
499  * End:
500  */
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
static void vnet_device_increment_rx_packets(u32 thread_index, u64 count)
Definition: devices.h:110
vnet_device_and_queue_t * devices_and_queues
Definition: devices.h:69
#define vlib_buffer_from_rte_mbuf(x)
Definition: buffer.h:20
u32 flags
Definition: vhost_user.h:115
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:156
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:220
dpdk_main_t dpdk_main
Definition: init.c:44
vnet_main_t * vnet_get_main(void)
Definition: misc.c:47
#define PREDICT_TRUE(x)
Definition: clib.h:112
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:110
u32 sw_if_index
Definition: dpdk.h:206
u8 flags[DPDK_RX_BURST_SZ]
Definition: dpdk.h:396
#define foreach_dpdk_error
Definition: dpdk.h:468
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
u16 flags
Definition: dpdk.h:214
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
static vlib_frame_t * vlib_get_frame(vlib_main_t *vm, vlib_frame_t *f)
Definition: node_funcs.h:216
u8 data[0]
Packet data.
Definition: buffer.h:181
int i
u32 per_interface_next_index
Definition: dpdk.h:209
#define VLIB_NODE_FN(node)
Definition: node.h:201
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:468
unsigned char u8
Definition: types.h:56
u8 buffer_pool_index
index of buffer pool this buffer belongs.
Definition: buffer.h:133
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:114
#define static_always_inline
Definition: clib.h:99
dpdk_portid_t port_id
Definition: dpdk.h:203
#define ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX
Definition: ethernet.h:52
static_always_inline int vnet_device_input_have_features(u32 sw_if_index)
Definition: feature.h:301
static_always_inline void dpdk_prefetch_buffer_x4(struct rte_mbuf *mb[])
Definition: node.c:95
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define vlib_get_new_next_frame(vm, node, next_index, vectors, n_vectors_left)
Definition: node_funcs.h:343
unsigned int u32
Definition: types.h:88
i16 buffer_advance
Definition: dpdk.h:191
static vlib_next_frame_t * vlib_node_runtime_get_next_frame(vlib_main_t *vm, vlib_node_runtime_t *n, u32 next_index)
Definition: node_funcs.h:264
static_always_inline void dpdk_prefetch_mbuf_x4(struct rte_mbuf *mb[])
Definition: node.c:86
static u32 vlib_get_buffer_index(vlib_main_t *vm, void *p)
Translate buffer pointer into buffer index.
Definition: buffer_funcs.h:257
#define DPDK_RX_BURST_SZ
Definition: dpdk.h:387
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:514
static_always_inline uword dpdk_process_subseq_segs(vlib_main_t *vm, vlib_buffer_t *b, struct rte_mbuf *mb, vlib_buffer_t *bt)
Definition: node.c:43
static_always_inline void dpdk_process_flow_offload(dpdk_device_t *xd, dpdk_per_thread_data_t *ptd, uword n_rx_packets)
Definition: node.c:251
dpdk_per_thread_data_t * per_thread_data
Definition: dpdk.h:406
static_always_inline void vlib_buffer_copy_template(vlib_buffer_t *b, vlib_buffer_t *bt)
Definition: buffer_funcs.h:145
unsigned short u16
Definition: types.h:57
#define ETH_INPUT_FRAME_F_IP4_CKSUM_OK
Definition: ethernet.h:55
vlib_buffer_t buffer_template
Definition: dpdk.h:397
#define PREDICT_FALSE(x)
Definition: clib.h:111
static_always_inline void vlib_get_buffer_indices_with_offset(vlib_main_t *vm, void **b, u32 *bi, uword count, i32 offset)
Translate array of buffer pointers into buffer indices with offset.
Definition: buffer_funcs.h:276
static char * dpdk_error_strings[]
Definition: node.c:32
u32 hw_if_index
Definition: dpdk.h:205
u8 * format_ethernet_header_with_length(u8 *s, va_list *args)
Definition: format.c:97
static_always_inline u8 dpdk_ol_flags_extract(struct rte_mbuf **mb, u8 *flags, int count)
Main DPDK input node.
Definition: node.c:148
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
dpdk_device_t * devices
Definition: dpdk.h:404
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
vlib_main_t * vm
Definition: buffer.c:312
dpdk_flow_lookup_entry_t * flow_lookup_entries
Definition: dpdk.h:237
static_always_inline void vlib_buffer_enqueue_to_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts, uword count)
Definition: buffer_node.h:332
u32 flow_id
Generic flow identifier.
Definition: buffer.h:127
vlib_node_registration_t dpdk_input_node
(constructor) VLIB_REGISTER_NODE (dpdk_input_node)
Definition: node.c:478
static void * vlib_frame_scalar_args(vlib_frame_t *f)
Get pointer to frame scalar data.
Definition: node_funcs.h:258
u32 current_config_index
Used by feature subgraph arcs to visit enabled feature nodes.
Definition: buffer.h:147
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:458
u32 buffers[DPDK_RX_BURST_SZ]
Definition: dpdk.h:393
u16 device_index
Definition: dpdk.h:453
#define ASSERT(truth)
u8 pre_data[VLIB_BUFFER_PRE_DATA_SIZE]
Space for inserting data before buffer start.
Definition: buffer.h:178
u16 next[DPDK_RX_BURST_SZ]
Definition: dpdk.h:394
vlib_frame_t * frame
Definition: node.h:404
static_always_inline u32 dpdk_device_input(vlib_main_t *vm, dpdk_main_t *dm, dpdk_device_t *xd, vlib_node_runtime_t *node, u32 thread_index, u16 queue_id)
Definition: node.c:285
u16 flags
Definition: node.h:386
static_always_inline uword dpdk_process_rx_burst(vlib_main_t *vm, dpdk_per_thread_data_t *ptd, uword n_rx_packets, int maybe_multiseg, u8 *or_flagsp)
Definition: node.c:163
vlib_buffer_t buffer
Definition: dpdk.h:456
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:248
STATIC_ASSERT(STRUCT_OFFSET_OF(vnet_buffer_opaque_t, l2_hdr_offset)==STRUCT_OFFSET_OF(vnet_buffer_opaque_t, l3_hdr_offset) - 2, "l3_hdr_offset must follow l2_hdr_offset")
size_t count
Definition: vapi.c:47
dpdk_portid_t device_index
Definition: dpdk.h:200
u32 buffer_index
Definition: dpdk.h:452
format_function_t format_dpdk_rx_trace
Definition: dpdk.h:500
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:57
Definition: dpdk.h:187
struct rte_mbuf * mbufs[DPDK_RX_BURST_SZ]
Definition: dpdk.h:392
u8 data[256]
Definition: dpdk.h:457
#define foreach_device_and_queue(var, vec)
Definition: devices.h:161
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
Definition: buffer.h:451
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
u16 next_index
Definition: dpdk.h:190
#define vnet_buffer(b)
Definition: buffer.h:369
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:308
static void vlib_frame_no_append(vlib_frame_t *f)
Definition: node_funcs.h:224
struct rte_mbuf mb
Definition: dpdk.h:455
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:172
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:167
volatile u8 ref_count
Reference count for this buffer.
Definition: buffer.h:130
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
u8 * buffer_pool_for_queue
Definition: dpdk.h:230
u32 flow_id
Definition: dpdk.h:189
Definition: defs.h:46
CLIB vectors are ubiquitous dynamically resized arrays with by user defined "headers".
u16 queue_index
Definition: dpdk.h:454