FD.io VPP  v19.01.1-17-ge106252
Vector Packet Processing
input.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2018 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vlib/unix/unix.h>
20 #include <vlib/pci/pci.h>
21 #include <vnet/ethernet/ethernet.h>
22 #include <vnet/devices/devices.h>
23 
24 #include <avf/avf.h>
25 
26 #define foreach_avf_input_error \
27  _(BUFFER_ALLOC, "buffer alloc error")
28 
29 typedef enum
30 {
31 #define _(f,s) AVF_INPUT_ERROR_##f,
33 #undef _
36 
37 static __clib_unused char *avf_input_error_strings[] = {
38 #define _(n,s) s,
40 #undef _
41 };
42 
43 #define AVF_INPUT_REFILL_TRESHOLD 32
44 
47 {
48 #ifdef CLIB_HAVE_VEC256
49  u64x4 v = { addr, 0, 0, 0 };
50  u64x4_store_unaligned (v, (void *) d);
51 #else
52  d->qword[0] = addr;
53  d->qword[1] = 0;
54 #endif
55 }
56 
59  int use_va_dma)
60 {
61  u16 n_refill, mask, n_alloc, slot, size;
62  vlib_buffer_t *b[8];
63  avf_rx_desc_t *d, *first_d;
64  void *p[8];
65 
66  size = rxq->size;
67  mask = size - 1;
68  n_refill = mask - rxq->n_enqueued;
69  if (PREDICT_TRUE (n_refill <= AVF_INPUT_REFILL_TRESHOLD))
70  return;
71 
72  slot = (rxq->next - n_refill - 1) & mask;
73 
74  n_refill &= ~7; /* round to 8 */
75  n_alloc = vlib_buffer_alloc_to_ring (vm, rxq->bufs, slot, size, n_refill);
76 
77  if (PREDICT_FALSE (n_alloc != n_refill))
78  {
79  vlib_error_count (vm, node->node_index,
80  AVF_INPUT_ERROR_BUFFER_ALLOC, 1);
81  if (n_alloc)
82  vlib_buffer_free_from_ring (vm, rxq->bufs, slot, size, n_alloc);
83  return;
84  }
85 
86  rxq->n_enqueued += n_alloc;
87  first_d = rxq->descs;
88 
89  ASSERT (slot % 8 == 0);
90 
91  while (n_alloc >= 8)
92  {
93  d = first_d + slot;
94 
95  if (use_va_dma)
96  {
97  vlib_get_buffers_with_offset (vm, rxq->bufs + slot, p, 8,
98  sizeof (vlib_buffer_t));
99  avf_rx_desc_write (d + 0, pointer_to_uword (p[0]));
100  avf_rx_desc_write (d + 1, pointer_to_uword (p[1]));
101  avf_rx_desc_write (d + 2, pointer_to_uword (p[2]));
102  avf_rx_desc_write (d + 3, pointer_to_uword (p[3]));
103  avf_rx_desc_write (d + 4, pointer_to_uword (p[4]));
104  avf_rx_desc_write (d + 5, pointer_to_uword (p[5]));
105  avf_rx_desc_write (d + 6, pointer_to_uword (p[6]));
106  avf_rx_desc_write (d + 7, pointer_to_uword (p[7]));
107  }
108  else
109  {
110  vlib_get_buffers (vm, rxq->bufs + slot, b, 8);
111  avf_rx_desc_write (d + 0, vlib_buffer_get_pa (vm, b[0]));
112  avf_rx_desc_write (d + 1, vlib_buffer_get_pa (vm, b[1]));
113  avf_rx_desc_write (d + 2, vlib_buffer_get_pa (vm, b[2]));
114  avf_rx_desc_write (d + 3, vlib_buffer_get_pa (vm, b[3]));
115  avf_rx_desc_write (d + 4, vlib_buffer_get_pa (vm, b[4]));
116  avf_rx_desc_write (d + 5, vlib_buffer_get_pa (vm, b[5]));
117  avf_rx_desc_write (d + 6, vlib_buffer_get_pa (vm, b[6]));
118  avf_rx_desc_write (d + 7, vlib_buffer_get_pa (vm, b[7]));
119  }
120 
121  /* next */
122  slot = (slot + 8) & mask;
123  n_alloc -= 8;
124  }
125 
127  *(rxq->qrx_tail) = slot;
128 }
129 
130 
133  u64 qw1, avf_rx_tail_t * t)
134 {
135  vlib_buffer_t *hb = b;
136  u32 tlnifb = 0, i = 0;
137 
138  if (qw1 & AVF_RXD_STATUS_EOP)
139  return 0;
140 
141  while ((qw1 & AVF_RXD_STATUS_EOP) == 0)
142  {
144  ASSERT (qw1 & AVF_RXD_STATUS_DD);
145  qw1 = t->qw1s[i];
146  b->next_buffer = t->buffers[i];
147  b->flags |= VLIB_BUFFER_NEXT_PRESENT;
148  b = vlib_get_buffer (vm, b->next_buffer);
149  clib_memcpy_fast (b, bt, sizeof (vlib_buffer_t));
150  tlnifb += b->current_length = qw1 >> AVF_RXD_LEN_SHIFT;
151  i++;
152  }
153 
155  hb->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
156  return tlnifb;
157 }
158 
161  avf_per_thread_data_t * ptd, u32 n_left,
162  int maybe_multiseg)
163 {
164  vlib_buffer_t *bt = &ptd->buffer_template;
165  vlib_buffer_t **b = ptd->bufs;
166  u64 *qw1 = ptd->qw1s;
167  avf_rx_tail_t *tail = ptd->tails;
168  uword n_rx_bytes = 0;
169 
170  while (n_left >= 4)
171  {
172  if (n_left >= 12)
173  {
174  vlib_prefetch_buffer_header (b[8], LOAD);
175  vlib_prefetch_buffer_header (b[9], LOAD);
176  vlib_prefetch_buffer_header (b[10], LOAD);
177  vlib_prefetch_buffer_header (b[11], LOAD);
178  }
179 
180  clib_memcpy64_x4 (b[0], b[1], b[2], b[3], bt);
181 
182  n_rx_bytes += b[0]->current_length = qw1[0] >> AVF_RXD_LEN_SHIFT;
183  n_rx_bytes += b[1]->current_length = qw1[1] >> AVF_RXD_LEN_SHIFT;
184  n_rx_bytes += b[2]->current_length = qw1[2] >> AVF_RXD_LEN_SHIFT;
185  n_rx_bytes += b[3]->current_length = qw1[3] >> AVF_RXD_LEN_SHIFT;
186 
187  if (maybe_multiseg)
188  {
189  n_rx_bytes += avf_rx_attach_tail (vm, bt, b[0], qw1[0], tail + 0);
190  n_rx_bytes += avf_rx_attach_tail (vm, bt, b[1], qw1[1], tail + 1);
191  n_rx_bytes += avf_rx_attach_tail (vm, bt, b[2], qw1[2], tail + 2);
192  n_rx_bytes += avf_rx_attach_tail (vm, bt, b[3], qw1[3], tail + 3);
193  }
194 
199 
200  /* next */
201  qw1 += 4;
202  tail += 4;
203  b += 4;
204  n_left -= 4;
205  }
206  while (n_left)
207  {
208  clib_memcpy_fast (b[0], bt, sizeof (vlib_buffer_t));
209 
210  n_rx_bytes += b[0]->current_length = qw1[0] >> AVF_RXD_LEN_SHIFT;
211 
212  if (maybe_multiseg)
213  n_rx_bytes += avf_rx_attach_tail (vm, bt, b[0], qw1[0], tail + 0);
214 
216 
217  /* next */
218  qw1 += 1;
219  tail += 1;
220  b += 1;
221  n_left -= 1;
222  }
223  return n_rx_bytes;
224 }
225 
228  vlib_frame_t * frame, avf_device_t * ad, u16 qid)
229 {
230  avf_main_t *am = &avf_main;
231  vnet_main_t *vnm = vnet_get_main ();
232  u32 thr_idx = vlib_get_thread_index ();
233  avf_per_thread_data_t *ptd =
234  vec_elt_at_index (am->per_thread_data, thr_idx);
235  avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, qid);
236  u32 n_trace, n_rx_packets = 0, n_rx_bytes = 0;
237  u16 n_tail_desc = 0;
238  u64 or_qw1 = 0;
239  u32 *bi, *to_next, n_left_to_next;
240  vlib_buffer_t *bt = &ptd->buffer_template;
242  u16 next = rxq->next;
243  u16 size = rxq->size;
244  u16 mask = size - 1;
245  avf_rx_desc_t *d, *fd = rxq->descs;
246 #ifdef CLIB_HAVE_VEC256
247  u64x4 q1x4, or_q1x4 = { 0 };
248  u64x4 dd_eop_mask4 = u64x4_splat (AVF_RXD_STATUS_DD | AVF_RXD_STATUS_EOP);
249 #endif
250 
251  /* is there anything on the ring */
252  d = fd + next;
253  if ((d->qword[1] & AVF_RXD_STATUS_DD) == 0)
254  goto done;
255 
256  if (PREDICT_FALSE (ad->per_interface_next_index != ~0))
257  next_index = ad->per_interface_next_index;
258  vlib_get_new_next_frame (vm, node, next_index, to_next, n_left_to_next);
259 
260  /* fetch up to AVF_RX_VECTOR_SZ from the rx ring, unflatten them and
261  copy needed data from descriptor to rx vector */
262  bi = to_next;
263 
264  while (n_rx_packets < AVF_RX_VECTOR_SZ)
265  {
266  if (next + 11 < size)
267  {
268  int stride = 8;
269  CLIB_PREFETCH ((void *) (fd + (next + stride)),
270  CLIB_CACHE_LINE_BYTES, LOAD);
271  CLIB_PREFETCH ((void *) (fd + (next + stride + 1)),
272  CLIB_CACHE_LINE_BYTES, LOAD);
273  CLIB_PREFETCH ((void *) (fd + (next + stride + 2)),
274  CLIB_CACHE_LINE_BYTES, LOAD);
275  CLIB_PREFETCH ((void *) (fd + (next + stride + 3)),
276  CLIB_CACHE_LINE_BYTES, LOAD);
277  }
278 
279 #ifdef CLIB_HAVE_VEC256
280  if (n_rx_packets >= AVF_RX_VECTOR_SZ - 4 || next >= size - 4)
281  goto one_by_one;
282 
283  q1x4 = u64x4_gather ((void *) &d[0].qword[1], (void *) &d[1].qword[1],
284  (void *) &d[2].qword[1], (void *) &d[3].qword[1]);
285 
286  /* not all packets are ready or at least one of them is chained */
287  if (!u64x4_is_equal (q1x4 & dd_eop_mask4, dd_eop_mask4))
288  goto one_by_one;
289 
290  or_q1x4 |= q1x4;
291  u64x4_store_unaligned (q1x4, ptd->qw1s + n_rx_packets);
292  clib_memcpy_fast (bi, rxq->bufs + next, 4 * sizeof (u32));
293 
294  /* next */
295  next = (next + 4) & mask;
296  d = fd + next;
297  n_rx_packets += 4;
298  bi += 4;
299  continue;
300  one_by_one:
301 #endif
302  CLIB_PREFETCH ((void *) (fd + ((next + 8) & mask)),
303  CLIB_CACHE_LINE_BYTES, LOAD);
304 
305  if (avf_rxd_is_not_dd (d))
306  break;
307 
308  bi[0] = rxq->bufs[next];
309 
310  /* deal with chained buffers */
312  {
313  u16 tail_desc = 0;
314  u16 tail_next = next;
315  avf_rx_tail_t *tail = ptd->tails + n_rx_packets;
316  avf_rx_desc_t *td;
317  do
318  {
319  tail_next = (tail_next + 1) & mask;
320  td = fd + tail_next;
321 
322  /* bail out in case of incomplete transaction */
323  if (avf_rxd_is_not_dd (td))
324  goto no_more_desc;
325 
326  or_qw1 |= tail->qw1s[tail_desc] = td[0].qword[1];
327  tail->buffers[tail_desc] = rxq->bufs[tail_next];
328  tail_desc++;
329  }
330  while (avf_rxd_is_not_eop (td));
331  next = tail_next;
332  n_tail_desc += tail_desc;
333  }
334 
335  or_qw1 |= ptd->qw1s[n_rx_packets] = d[0].qword[1];
336 
337  /* next */
338  next = (next + 1) & mask;
339  d = fd + next;
340  n_rx_packets++;
341  bi++;
342  }
343 no_more_desc:
344 
345  if (n_rx_packets == 0)
346  goto done;
347 
348  rxq->next = next;
349  rxq->n_enqueued -= n_rx_packets + n_tail_desc;
350 
351 #ifdef CLIB_HAVE_VEC256
352  or_qw1 |= or_q1x4[0] | or_q1x4[1] | or_q1x4[2] | or_q1x4[3];
353 #endif
354 
355  /* refill rx ring */
356  if (ad->flags & AVF_DEVICE_F_VA_DMA)
357  avf_rxq_refill (vm, node, rxq, 1 /* use_va_dma */ );
358  else
359  avf_rxq_refill (vm, node, rxq, 0 /* use_va_dma */ );
360 
361  vlib_get_buffers (vm, to_next, ptd->bufs, n_rx_packets);
362 
363  vnet_buffer (bt)->sw_if_index[VLIB_RX] = ad->sw_if_index;
364  vnet_buffer (bt)->sw_if_index[VLIB_TX] = ~0;
365 
366  if (n_tail_desc)
367  n_rx_bytes = avf_process_rx_burst (vm, node, ptd, n_rx_packets, 1);
368  else
369  n_rx_bytes = avf_process_rx_burst (vm, node, ptd, n_rx_packets, 0);
370 
371  /* packet trace if enabled */
372  if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
373  {
374  u32 n_left = n_rx_packets, i = 0, j;
375  bi = to_next;
376 
377  while (n_trace && n_left)
378  {
379  vlib_buffer_t *b;
380  avf_input_trace_t *tr;
381  b = vlib_get_buffer (vm, bi[0]);
382  vlib_trace_buffer (vm, node, next_index, b, /* follow_chain */ 0);
383  tr = vlib_add_trace (vm, node, b, sizeof (*tr));
384  tr->next_index = next_index;
385  tr->hw_if_index = ad->hw_if_index;
386  tr->qw1s[0] = ptd->qw1s[i];
387  for (j = 1; j < AVF_RX_MAX_DESC_IN_CHAIN; j++)
388  tr->qw1s[j] = ptd->tails[i].qw1s[j - 1];
389 
390  /* next */
391  n_trace--;
392  n_left--;
393  bi++;
394  i++;
395  }
396  vlib_set_trace_count (vm, node, n_trace);
397  }
398 
400  {
401  vlib_next_frame_t *nf;
402  vlib_frame_t *f;
404  nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
405  f = vlib_get_frame (vm, nf->frame_index);
407 
408  ef = vlib_frame_scalar_args (f);
409  ef->sw_if_index = ad->sw_if_index;
410  ef->hw_if_index = ad->hw_if_index;
411 
412  if ((or_qw1 & AVF_RXD_ERROR_IPE) == 0)
415  }
416 
417  n_left_to_next -= n_rx_packets;
418  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
419 
421  + VNET_INTERFACE_COUNTER_RX, thr_idx,
422  ad->hw_if_index, n_rx_packets, n_rx_bytes);
423 
424 done:
425  return n_rx_packets;
426 }
427 
429  vlib_frame_t * frame)
430 {
431  u32 n_rx = 0;
432  avf_main_t *am = &avf_main;
433  vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
435 
437  {
438  avf_device_t *ad;
439  ad = vec_elt_at_index (am->devices, dq->dev_instance);
440  if ((ad->flags & AVF_DEVICE_F_ADMIN_UP) == 0)
441  continue;
442  n_rx += avf_device_input_inline (vm, node, frame, ad, dq->queue_id);
443  }
444  return n_rx;
445 }
446 
447 /* *INDENT-OFF* */
449  .name = "avf-input",
450  .sibling_of = "device-input",
451  .format_trace = format_avf_input_trace,
452  .type = VLIB_NODE_TYPE_INPUT,
453  .state = VLIB_NODE_STATE_DISABLED,
454  .n_errors = AVF_INPUT_N_ERROR,
455  .error_strings = avf_input_error_strings,
456 };
457 
458 /* *INDENT-ON* */
459 
460 
461 /*
462  * fd.io coding-style-patch-verification: ON
463  *
464  * Local Variables:
465  * eval: (c-set-style "gnu")
466  * End:
467  */
u32 hw_if_index
Definition: avf.h:128
vnet_device_and_queue_t * devices_and_queues
Definition: devices.h:69
#define foreach_avf_input_error
Definition: input.c:26
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:156
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:220
vnet_main_t * vnet_get_main(void)
Definition: misc.c:47
vnet_interface_main_t interface_main
Definition: vnet.h:56
static uword vlib_buffer_get_pa(vlib_main_t *vm, vlib_buffer_t *b)
Definition: buffer_funcs.h:322
#define PREDICT_TRUE(x)
Definition: clib.h:112
unsigned long u64
Definition: types.h:89
#define CLIB_MEMORY_STORE_BARRIER()
Definition: clib.h:118
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
avf_input_error_t
Definition: input.c:29
static_always_inline uword avf_device_input_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, avf_device_t *ad, u16 qid)
Definition: input.c:227
int i
u32 dev_instance
Definition: avf.h:126
static_always_inline void vlib_get_buffers_with_offset(vlib_main_t *vm, u32 *bi, void **b, int count, i32 offset)
Translate array of buffer indices into buffer pointers with offset.
Definition: buffer_funcs.h:80
u32 next_index
Definition: avf.h:302
#define VLIB_NODE_FN(node)
Definition: node.h:201
#define AVF_RXD_STATUS_DD
Definition: avf.h:26
avf_device_t * devices
Definition: avf.h:196
format_function_t format_avf_input_trace
Definition: avf.h:227
vhost_vring_addr_t addr
Definition: vhost_user.h:121
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:114
#define static_always_inline
Definition: clib.h:99
vlib_node_registration_t avf_input_node
(constructor) VLIB_REGISTER_NODE (avf_input_node)
Definition: input.c:448
#define ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX
Definition: ethernet.h:51
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:839
volatile u32 * qrx_tail
Definition: avf.h:98
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:188
#define AVF_INPUT_REFILL_TRESHOLD
Definition: input.c:43
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define vlib_get_new_next_frame(vm, node, next_index, vectors, n_vectors_left)
Definition: node_funcs.h:373
u64 qw1s[AVF_RX_MAX_DESC_IN_CHAIN]
Definition: avf.h:304
avf_rx_tail_t tails[AVF_RX_VECTOR_SZ]
Definition: avf.h:188
unsigned int u32
Definition: types.h:88
static vlib_next_frame_t * vlib_node_runtime_get_next_frame(vlib_main_t *vm, vlib_node_runtime_t *n, u32 next_index)
Definition: node_funcs.h:294
u64 qw1s[AVF_RX_MAX_DESC_IN_CHAIN-1]
Definition: avf.h:179
static void vlib_buffer_free_from_ring(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Free buffers from ring.
Definition: buffer_funcs.h:584
static_always_inline void avf_rxq_refill(vlib_main_t *vm, vlib_node_runtime_t *node, avf_rxq_t *rxq, int use_va_dma)
Definition: input.c:58
u32 hw_if_index
Definition: avf.h:303
uword size
u32 frame_index
Definition: node.h:429
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:114
u64 qw1s[AVF_RX_VECTOR_SZ]
Definition: avf.h:187
unsigned short u16
Definition: types.h:57
static_always_inline u64x4 u64x4_gather(void *p0, void *p1, void *p2, void *p3)
Definition: vector_avx2.h:195
#define AVF_RXD_LEN_SHIFT
Definition: avf.h:30
#define ETH_INPUT_FRAME_F_IP4_CKSUM_OK
Definition: ethernet.h:54
u64 qword[4]
Definition: avf.h:73
#define PREDICT_FALSE(x)
Definition: clib.h:111
#define AVF_RX_VECTOR_SZ
Definition: avf.h:168
static vlib_frame_t * vlib_get_frame(vlib_main_t *vm, uword frame_index)
Definition: node_funcs.h:238
u32 node_index
Node index.
Definition: node.h:519
#define AVF_RXD_STATUS_EOP
Definition: avf.h:27
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:212
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:79
vlib_main_t * vm
Definition: buffer.c:301
static_always_inline int avf_rxd_is_not_dd(avf_rx_desc_t *d)
Definition: avf.h:295
Definition: avf.h:95
static_always_inline void clib_memcpy64_x4(void *d0, void *d1, void *d2, void *d3, void *s)
Definition: string.h:224
static_always_inline uword avf_process_rx_burst(vlib_main_t *vm, vlib_node_runtime_t *node, avf_per_thread_data_t *ptd, u32 n_left, int maybe_multiseg)
Definition: input.c:160
static void * vlib_frame_scalar_args(vlib_frame_t *f)
Get pointer to frame scalar data.
Definition: node_funcs.h:288
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:459
vlib_buffer_t * bufs[AVF_RX_VECTOR_SZ]
Definition: avf.h:186
static_always_inline void avf_rx_desc_write(avf_rx_desc_t *d, u64 addr)
Definition: input.c:46
u32 per_interface_next_index
Definition: avf.h:124
#define ASSERT(truth)
u32 flags
Definition: avf.h:123
u32 * bufs
Definition: avf.h:102
u16 flags
Definition: node.h:411
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:130
vlib_buffer_t buffer_template
Definition: avf.h:189
u16 n_enqueued
Definition: avf.h:103
#define AVF_RXD_ERROR_IPE
Definition: avf.h:33
static uword pointer_to_uword(const void *p)
Definition: types.h:131
avf_main_t avf_main
Definition: device.c:37
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:57
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:156
#define foreach_device_and_queue(var, vec)
Definition: devices.h:161
Definition: defs.h:47
static u32 vlib_buffer_alloc_to_ring(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Allocate buffers into ring.
Definition: buffer_funcs.h:502
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
Definition: buffer.h:495
u64 uword
Definition: types.h:112
u16 size
Definition: avf.h:100
avf_rxq_t * rxqs
Definition: avf.h:135
u64x4
Definition: vector_avx2.h:121
#define vnet_buffer(b)
Definition: buffer.h:368
avf_per_thread_data_t * per_thread_data
Definition: avf.h:197
u32 sw_if_index
Definition: avf.h:127
static void vlib_frame_no_append(vlib_frame_t *f)
Definition: node_funcs.h:246
static_always_inline int avf_rxd_is_not_eop(avf_rx_desc_t *d)
Definition: avf.h:289
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:172
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:145
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:117
static __clib_unused char * avf_input_error_strings[]
Definition: input.c:37
#define AVF_RX_MAX_DESC_IN_CHAIN
Definition: avf.h:31
u16 next
Definition: avf.h:99
static_always_inline uword avf_rx_attach_tail(vlib_main_t *vm, vlib_buffer_t *bt, vlib_buffer_t *b, u64 qw1, avf_rx_tail_t *t)
Definition: input.c:132
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:62
u32 buffers[AVF_RX_MAX_DESC_IN_CHAIN-1]
Definition: avf.h:180
avf_rx_desc_t * descs
Definition: avf.h:101
Definition: defs.h:46