FD.io VPP  v19.04.1-1-ge4a0f9f
Vector Packet Processing
input.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2018 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vlib/unix/unix.h>
20 #include <vlib/pci/pci.h>
21 #include <vnet/ethernet/ethernet.h>
22 #include <vnet/devices/devices.h>
23 
24 #include <avf/avf.h>
25 
26 #define foreach_avf_input_error \
27  _(BUFFER_ALLOC, "buffer alloc error")
28 
29 typedef enum
30 {
31 #define _(f,s) AVF_INPUT_ERROR_##f,
33 #undef _
36 
37 static __clib_unused char *avf_input_error_strings[] = {
38 #define _(n,s) s,
40 #undef _
41 };
42 
43 #define AVF_INPUT_REFILL_TRESHOLD 32
44 
47 {
48 #ifdef CLIB_HAVE_VEC256
49  u64x4 v = { addr, 0, 0, 0 };
50  u64x4_store_unaligned (v, (void *) d);
51 #else
52  d->qword[0] = addr;
53  d->qword[1] = 0;
54 #endif
55 }
56 
59  int use_va_dma)
60 {
61  u16 n_refill, mask, n_alloc, slot, size;
62  vlib_buffer_t *b[8];
63  avf_rx_desc_t *d, *first_d;
64  void *p[8];
65 
66  size = rxq->size;
67  mask = size - 1;
68  n_refill = mask - rxq->n_enqueued;
69  if (PREDICT_TRUE (n_refill <= AVF_INPUT_REFILL_TRESHOLD))
70  return;
71 
72  slot = (rxq->next - n_refill - 1) & mask;
73 
74  n_refill &= ~7; /* round to 8 */
75  n_alloc =
76  vlib_buffer_alloc_to_ring_from_pool (vm, rxq->bufs, slot, size, n_refill,
77  rxq->buffer_pool_index);
78 
79  if (PREDICT_FALSE (n_alloc != n_refill))
80  {
81  vlib_error_count (vm, node->node_index,
82  AVF_INPUT_ERROR_BUFFER_ALLOC, 1);
83  if (n_alloc)
84  vlib_buffer_free_from_ring (vm, rxq->bufs, slot, size, n_alloc);
85  return;
86  }
87 
88  rxq->n_enqueued += n_alloc;
89  first_d = rxq->descs;
90 
91  ASSERT (slot % 8 == 0);
92 
93  while (n_alloc >= 8)
94  {
95  d = first_d + slot;
96 
97  if (use_va_dma)
98  {
99  vlib_get_buffers_with_offset (vm, rxq->bufs + slot, p, 8,
100  sizeof (vlib_buffer_t));
101  avf_rx_desc_write (d + 0, pointer_to_uword (p[0]));
102  avf_rx_desc_write (d + 1, pointer_to_uword (p[1]));
103  avf_rx_desc_write (d + 2, pointer_to_uword (p[2]));
104  avf_rx_desc_write (d + 3, pointer_to_uword (p[3]));
105  avf_rx_desc_write (d + 4, pointer_to_uword (p[4]));
106  avf_rx_desc_write (d + 5, pointer_to_uword (p[5]));
107  avf_rx_desc_write (d + 6, pointer_to_uword (p[6]));
108  avf_rx_desc_write (d + 7, pointer_to_uword (p[7]));
109  }
110  else
111  {
112  vlib_get_buffers (vm, rxq->bufs + slot, b, 8);
113  avf_rx_desc_write (d + 0, vlib_buffer_get_pa (vm, b[0]));
114  avf_rx_desc_write (d + 1, vlib_buffer_get_pa (vm, b[1]));
115  avf_rx_desc_write (d + 2, vlib_buffer_get_pa (vm, b[2]));
116  avf_rx_desc_write (d + 3, vlib_buffer_get_pa (vm, b[3]));
117  avf_rx_desc_write (d + 4, vlib_buffer_get_pa (vm, b[4]));
118  avf_rx_desc_write (d + 5, vlib_buffer_get_pa (vm, b[5]));
119  avf_rx_desc_write (d + 6, vlib_buffer_get_pa (vm, b[6]));
120  avf_rx_desc_write (d + 7, vlib_buffer_get_pa (vm, b[7]));
121  }
122 
123  /* next */
124  slot = (slot + 8) & mask;
125  n_alloc -= 8;
126  }
127 
129  *(rxq->qrx_tail) = slot;
130 }
131 
132 
135  u64 qw1, avf_rx_tail_t * t)
136 {
137  vlib_buffer_t *hb = b;
138  u32 tlnifb = 0, i = 0;
139 
140  if (qw1 & AVF_RXD_STATUS_EOP)
141  return 0;
142 
143  while ((qw1 & AVF_RXD_STATUS_EOP) == 0)
144  {
146  ASSERT (qw1 & AVF_RXD_STATUS_DD);
147  qw1 = t->qw1s[i];
148  b->next_buffer = t->buffers[i];
149  b->flags |= VLIB_BUFFER_NEXT_PRESENT;
150  b = vlib_get_buffer (vm, b->next_buffer);
152  tlnifb += b->current_length = qw1 >> AVF_RXD_LEN_SHIFT;
153  i++;
154  }
155 
157  hb->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
158  return tlnifb;
159 }
160 
163  avf_per_thread_data_t * ptd, u32 n_left,
164  int maybe_multiseg)
165 {
166  vlib_buffer_t bt;
167  vlib_buffer_t **b = ptd->bufs;
168  u64 *qw1 = ptd->qw1s;
169  avf_rx_tail_t *tail = ptd->tails;
170  uword n_rx_bytes = 0;
171 
172  /* copy template into local variable - will save per packet load */
174 
175  while (n_left >= 4)
176  {
177  if (n_left >= 12)
178  {
179  vlib_prefetch_buffer_header (b[8], LOAD);
180  vlib_prefetch_buffer_header (b[9], LOAD);
181  vlib_prefetch_buffer_header (b[10], LOAD);
182  vlib_prefetch_buffer_header (b[11], LOAD);
183  }
184 
185  vlib_buffer_copy_template (b[0], &bt);
186  vlib_buffer_copy_template (b[1], &bt);
187  vlib_buffer_copy_template (b[2], &bt);
188  vlib_buffer_copy_template (b[3], &bt);
189 
190  n_rx_bytes += b[0]->current_length = qw1[0] >> AVF_RXD_LEN_SHIFT;
191  n_rx_bytes += b[1]->current_length = qw1[1] >> AVF_RXD_LEN_SHIFT;
192  n_rx_bytes += b[2]->current_length = qw1[2] >> AVF_RXD_LEN_SHIFT;
193  n_rx_bytes += b[3]->current_length = qw1[3] >> AVF_RXD_LEN_SHIFT;
194 
195  if (maybe_multiseg)
196  {
197  n_rx_bytes += avf_rx_attach_tail (vm, &bt, b[0], qw1[0], tail + 0);
198  n_rx_bytes += avf_rx_attach_tail (vm, &bt, b[1], qw1[1], tail + 1);
199  n_rx_bytes += avf_rx_attach_tail (vm, &bt, b[2], qw1[2], tail + 2);
200  n_rx_bytes += avf_rx_attach_tail (vm, &bt, b[3], qw1[3], tail + 3);
201  }
202 
207 
208  /* next */
209  qw1 += 4;
210  tail += 4;
211  b += 4;
212  n_left -= 4;
213  }
214  while (n_left)
215  {
216  vlib_buffer_copy_template (b[0], &bt);
217 
218  n_rx_bytes += b[0]->current_length = qw1[0] >> AVF_RXD_LEN_SHIFT;
219 
220  if (maybe_multiseg)
221  n_rx_bytes += avf_rx_attach_tail (vm, &bt, b[0], qw1[0], tail + 0);
222 
224 
225  /* next */
226  qw1 += 1;
227  tail += 1;
228  b += 1;
229  n_left -= 1;
230  }
231  return n_rx_bytes;
232 }
233 
236  vlib_frame_t * frame, avf_device_t * ad, u16 qid)
237 {
238  avf_main_t *am = &avf_main;
239  vnet_main_t *vnm = vnet_get_main ();
240  u32 thr_idx = vlib_get_thread_index ();
241  avf_per_thread_data_t *ptd =
242  vec_elt_at_index (am->per_thread_data, thr_idx);
243  avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, qid);
244  u32 n_trace, n_rx_packets = 0, n_rx_bytes = 0;
245  u16 n_tail_desc = 0;
246  u64 or_qw1 = 0;
247  u32 *bi, *to_next, n_left_to_next;
248  vlib_buffer_t *bt = &ptd->buffer_template;
250  u16 next = rxq->next;
251  u16 size = rxq->size;
252  u16 mask = size - 1;
253  avf_rx_desc_t *d, *fd = rxq->descs;
254 #ifdef CLIB_HAVE_VEC256
255  u64x4 q1x4, or_q1x4 = { 0 };
256  u64x4 dd_eop_mask4 = u64x4_splat (AVF_RXD_STATUS_DD | AVF_RXD_STATUS_EOP);
257 #endif
258 
259  /* is there anything on the ring */
260  d = fd + next;
261  if ((d->qword[1] & AVF_RXD_STATUS_DD) == 0)
262  goto done;
263 
264  if (PREDICT_FALSE (ad->per_interface_next_index != ~0))
265  next_index = ad->per_interface_next_index;
266 
268  vnet_feature_start_device_input_x1 (ad->sw_if_index, &next_index, bt);
269 
270  vlib_get_new_next_frame (vm, node, next_index, to_next, n_left_to_next);
271 
272  /* fetch up to AVF_RX_VECTOR_SZ from the rx ring, unflatten them and
273  copy needed data from descriptor to rx vector */
274  bi = to_next;
275 
276  while (n_rx_packets < AVF_RX_VECTOR_SZ)
277  {
278  if (next + 11 < size)
279  {
280  int stride = 8;
281  CLIB_PREFETCH ((void *) (fd + (next + stride)),
282  CLIB_CACHE_LINE_BYTES, LOAD);
283  CLIB_PREFETCH ((void *) (fd + (next + stride + 1)),
284  CLIB_CACHE_LINE_BYTES, LOAD);
285  CLIB_PREFETCH ((void *) (fd + (next + stride + 2)),
286  CLIB_CACHE_LINE_BYTES, LOAD);
287  CLIB_PREFETCH ((void *) (fd + (next + stride + 3)),
288  CLIB_CACHE_LINE_BYTES, LOAD);
289  }
290 
291 #ifdef CLIB_HAVE_VEC256
292  if (n_rx_packets >= AVF_RX_VECTOR_SZ - 4 || next >= size - 4)
293  goto one_by_one;
294 
295  q1x4 = u64x4_gather ((void *) &d[0].qword[1], (void *) &d[1].qword[1],
296  (void *) &d[2].qword[1], (void *) &d[3].qword[1]);
297 
298  /* not all packets are ready or at least one of them is chained */
299  if (!u64x4_is_equal (q1x4 & dd_eop_mask4, dd_eop_mask4))
300  goto one_by_one;
301 
302  or_q1x4 |= q1x4;
303  u64x4_store_unaligned (q1x4, ptd->qw1s + n_rx_packets);
304  vlib_buffer_copy_indices (bi, rxq->bufs + next, 4);
305 
306  /* next */
307  next = (next + 4) & mask;
308  d = fd + next;
309  n_rx_packets += 4;
310  bi += 4;
311  continue;
312  one_by_one:
313 #endif
314  CLIB_PREFETCH ((void *) (fd + ((next + 8) & mask)),
315  CLIB_CACHE_LINE_BYTES, LOAD);
316 
317  if (avf_rxd_is_not_dd (d))
318  break;
319 
320  bi[0] = rxq->bufs[next];
321 
322  /* deal with chained buffers */
324  {
325  u16 tail_desc = 0;
326  u16 tail_next = next;
327  avf_rx_tail_t *tail = ptd->tails + n_rx_packets;
328  avf_rx_desc_t *td;
329  do
330  {
331  tail_next = (tail_next + 1) & mask;
332  td = fd + tail_next;
333 
334  /* bail out in case of incomplete transaction */
335  if (avf_rxd_is_not_dd (td))
336  goto no_more_desc;
337 
338  or_qw1 |= tail->qw1s[tail_desc] = td[0].qword[1];
339  tail->buffers[tail_desc] = rxq->bufs[tail_next];
340  tail_desc++;
341  }
342  while (avf_rxd_is_not_eop (td));
343  next = tail_next;
344  n_tail_desc += tail_desc;
345  }
346 
347  or_qw1 |= ptd->qw1s[n_rx_packets] = d[0].qword[1];
348 
349  /* next */
350  next = (next + 1) & mask;
351  d = fd + next;
352  n_rx_packets++;
353  bi++;
354  }
355 no_more_desc:
356 
357  if (n_rx_packets == 0)
358  goto done;
359 
360  rxq->next = next;
361  rxq->n_enqueued -= n_rx_packets + n_tail_desc;
362 
363 #ifdef CLIB_HAVE_VEC256
364  or_qw1 |= or_q1x4[0] | or_q1x4[1] | or_q1x4[2] | or_q1x4[3];
365 #endif
366 
367  /* refill rx ring */
368  if (ad->flags & AVF_DEVICE_F_VA_DMA)
369  avf_rxq_refill (vm, node, rxq, 1 /* use_va_dma */ );
370  else
371  avf_rxq_refill (vm, node, rxq, 0 /* use_va_dma */ );
372 
373  vlib_get_buffers (vm, to_next, ptd->bufs, n_rx_packets);
374 
375  vnet_buffer (bt)->sw_if_index[VLIB_RX] = ad->sw_if_index;
376  vnet_buffer (bt)->sw_if_index[VLIB_TX] = ~0;
378  bt->ref_count = 1;
379 
380  if (n_tail_desc)
381  n_rx_bytes = avf_process_rx_burst (vm, node, ptd, n_rx_packets, 1);
382  else
383  n_rx_bytes = avf_process_rx_burst (vm, node, ptd, n_rx_packets, 0);
384 
385  /* packet trace if enabled */
386  if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
387  {
388  u32 n_left = n_rx_packets, i = 0, j;
389  bi = to_next;
390 
391  while (n_trace && n_left)
392  {
393  vlib_buffer_t *b;
394  avf_input_trace_t *tr;
395  b = vlib_get_buffer (vm, bi[0]);
396  vlib_trace_buffer (vm, node, next_index, b, /* follow_chain */ 0);
397  tr = vlib_add_trace (vm, node, b, sizeof (*tr));
398  tr->next_index = next_index;
399  tr->hw_if_index = ad->hw_if_index;
400  tr->qw1s[0] = ptd->qw1s[i];
401  for (j = 1; j < AVF_RX_MAX_DESC_IN_CHAIN; j++)
402  tr->qw1s[j] = ptd->tails[i].qw1s[j - 1];
403 
404  /* next */
405  n_trace--;
406  n_left--;
407  bi++;
408  i++;
409  }
410  vlib_set_trace_count (vm, node, n_trace);
411  }
412 
414  {
415  vlib_next_frame_t *nf;
416  vlib_frame_t *f;
418  nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
419  f = vlib_get_frame (vm, nf->frame_index);
421 
422  ef = vlib_frame_scalar_args (f);
423  ef->sw_if_index = ad->sw_if_index;
424  ef->hw_if_index = ad->hw_if_index;
425 
426  if ((or_qw1 & AVF_RXD_ERROR_IPE) == 0)
429  }
430 
431  n_left_to_next -= n_rx_packets;
432  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
433 
435  + VNET_INTERFACE_COUNTER_RX, thr_idx,
436  ad->hw_if_index, n_rx_packets, n_rx_bytes);
437 
438 done:
439  return n_rx_packets;
440 }
441 
443  vlib_frame_t * frame)
444 {
445  u32 n_rx = 0;
446  avf_main_t *am = &avf_main;
447  vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
449 
451  {
452  avf_device_t *ad;
453  ad = vec_elt_at_index (am->devices, dq->dev_instance);
454  if ((ad->flags & AVF_DEVICE_F_ADMIN_UP) == 0)
455  continue;
456  n_rx += avf_device_input_inline (vm, node, frame, ad, dq->queue_id);
457  }
458  return n_rx;
459 }
460 
461 /* *INDENT-OFF* */
463  .name = "avf-input",
464  .sibling_of = "device-input",
465  .format_trace = format_avf_input_trace,
466  .type = VLIB_NODE_TYPE_INPUT,
467  .state = VLIB_NODE_STATE_DISABLED,
468  .n_errors = AVF_INPUT_N_ERROR,
469  .error_strings = avf_input_error_strings,
470 };
471 
472 /* *INDENT-ON* */
473 
474 
475 /*
476  * fd.io coding-style-patch-verification: ON
477  *
478  * Local Variables:
479  * eval: (c-set-style "gnu")
480  * End:
481  */
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
u32 hw_if_index
Definition: avf.h:129
vnet_device_and_queue_t * devices_and_queues
Definition: devices.h:69
#define foreach_avf_input_error
Definition: input.c:26
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:156
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:220
vnet_main_t * vnet_get_main(void)
Definition: misc.c:47
vnet_interface_main_t interface_main
Definition: vnet.h:56
static uword vlib_buffer_get_pa(vlib_main_t *vm, vlib_buffer_t *b)
Definition: buffer_funcs.h:421
#define PREDICT_TRUE(x)
Definition: clib.h:112
unsigned long u64
Definition: types.h:89
#define CLIB_MEMORY_STORE_BARRIER()
Definition: clib.h:118
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
avf_input_error_t
Definition: input.c:29
static_always_inline uword avf_device_input_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, avf_device_t *ad, u16 qid)
Definition: input.c:235
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
int i
u32 dev_instance
Definition: avf.h:127
static_always_inline void vlib_get_buffers_with_offset(vlib_main_t *vm, u32 *bi, void **b, int count, i32 offset)
Translate array of buffer indices into buffer pointers with offset.
Definition: buffer_funcs.h:178
u32 next_index
Definition: avf.h:303
#define VLIB_NODE_FN(node)
Definition: node.h:201
#define AVF_RXD_STATUS_DD
Definition: avf.h:26
avf_device_t * devices
Definition: avf.h:197
static u32 vlib_buffer_alloc_to_ring_from_pool(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers, u8 buffer_pool_index)
Allocate buffers into ring from specific buffer pool.
Definition: buffer_funcs.h:657
format_function_t format_avf_input_trace
Definition: avf.h:228
vhost_vring_addr_t addr
Definition: vhost_user.h:121
u8 buffer_pool_index
index of buffer pool this buffer belongs.
Definition: buffer.h:133
u8 buffer_pool_index
Definition: avf.h:105
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:114
#define static_always_inline
Definition: clib.h:99
vlib_node_registration_t avf_input_node
(constructor) VLIB_REGISTER_NODE (avf_input_node)
Definition: input.c:462
#define ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX
Definition: ethernet.h:52
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:824
static_always_inline int vnet_device_input_have_features(u32 sw_if_index)
Definition: feature.h:301
volatile u32 * qrx_tail
Definition: avf.h:98
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:203
#define AVF_INPUT_REFILL_TRESHOLD
Definition: input.c:43
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define vlib_get_new_next_frame(vm, node, next_index, vectors, n_vectors_left)
Definition: node_funcs.h:373
u64 qw1s[AVF_RX_MAX_DESC_IN_CHAIN]
Definition: avf.h:305
avf_rx_tail_t tails[AVF_RX_VECTOR_SZ]
Definition: avf.h:189
unsigned int u32
Definition: types.h:88
static vlib_next_frame_t * vlib_node_runtime_get_next_frame(vlib_main_t *vm, vlib_node_runtime_t *n, u32 next_index)
Definition: node_funcs.h:294
u64 qw1s[AVF_RX_MAX_DESC_IN_CHAIN-1]
Definition: avf.h:180
static void vlib_buffer_free_from_ring(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Free buffers from ring.
Definition: buffer_funcs.h:912
static_always_inline void avf_rxq_refill(vlib_main_t *vm, vlib_node_runtime_t *node, avf_rxq_t *rxq, int use_va_dma)
Definition: input.c:58
u32 hw_if_index
Definition: avf.h:304
uword size
u32 frame_index
Definition: node.h:404
u64 qw1s[AVF_RX_VECTOR_SZ]
Definition: avf.h:188
static_always_inline void vlib_buffer_copy_template(vlib_buffer_t *b, vlib_buffer_t *bt)
Definition: buffer_funcs.h:145
unsigned short u16
Definition: types.h:57
static_always_inline u64x4 u64x4_gather(void *p0, void *p1, void *p2, void *p3)
Definition: vector_avx2.h:195
#define AVF_RXD_LEN_SHIFT
Definition: avf.h:30
#define ETH_INPUT_FRAME_F_IP4_CKSUM_OK
Definition: ethernet.h:55
u64 qword[4]
Definition: avf.h:73
#define PREDICT_FALSE(x)
Definition: clib.h:111
#define AVF_RX_VECTOR_SZ
Definition: avf.h:169
static vlib_frame_t * vlib_get_frame(vlib_main_t *vm, uword frame_index)
Definition: node_funcs.h:238
u32 node_index
Node index.
Definition: node.h:495
#define AVF_RXD_STATUS_EOP
Definition: avf.h:27
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:212
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
vlib_main_t * vm
Definition: buffer.c:312
static_always_inline int avf_rxd_is_not_dd(avf_rx_desc_t *d)
Definition: avf.h:296
Definition: avf.h:95
static_always_inline uword avf_process_rx_burst(vlib_main_t *vm, vlib_node_runtime_t *node, avf_per_thread_data_t *ptd, u32 n_left, int maybe_multiseg)
Definition: input.c:162
static void * vlib_frame_scalar_args(vlib_frame_t *f)
Get pointer to frame scalar data.
Definition: node_funcs.h:288
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:465
vlib_buffer_t * bufs[AVF_RX_VECTOR_SZ]
Definition: avf.h:187
static_always_inline void avf_rx_desc_write(avf_rx_desc_t *d, u64 addr)
Definition: input.c:46
u32 per_interface_next_index
Definition: avf.h:125
#define ASSERT(truth)
u32 flags
Definition: avf.h:124
u32 * bufs
Definition: avf.h:102
u16 flags
Definition: node.h:386
vlib_buffer_t buffer_template
Definition: avf.h:190
u16 n_enqueued
Definition: avf.h:103
#define AVF_RXD_ERROR_IPE
Definition: avf.h:33
static uword pointer_to_uword(const void *p)
Definition: types.h:131
avf_main_t avf_main
Definition: device.c:37
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:57
#define foreach_device_and_queue(var, vec)
Definition: devices.h:161
Definition: defs.h:47
static_always_inline void vlib_buffer_copy_indices(u32 *dst, u32 *src, u32 n_indices)
Definition: buffer_funcs.h:102
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
Definition: buffer.h:451
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
u16 size
Definition: avf.h:100
avf_rxq_t * rxqs
Definition: avf.h:136
u64x4
Definition: vector_avx2.h:121
#define vnet_buffer(b)
Definition: buffer.h:369
avf_per_thread_data_t * per_thread_data
Definition: avf.h:198
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:308
u32 sw_if_index
Definition: avf.h:128
static void vlib_frame_no_append(vlib_frame_t *f)
Definition: node_funcs.h:246
static_always_inline int avf_rxd_is_not_eop(avf_rx_desc_t *d)
Definition: avf.h:290
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:172
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:244
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:167
static __clib_unused char * avf_input_error_strings[]
Definition: input.c:37
#define AVF_RX_MAX_DESC_IN_CHAIN
Definition: avf.h:31
u16 next
Definition: avf.h:99
volatile u8 ref_count
Reference count for this buffer.
Definition: buffer.h:130
static_always_inline uword avf_rx_attach_tail(vlib_main_t *vm, vlib_buffer_t *bt, vlib_buffer_t *b, u64 qw1, avf_rx_tail_t *t)
Definition: input.c:134
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
u32 buffers[AVF_RX_MAX_DESC_IN_CHAIN-1]
Definition: avf.h:181
avf_rx_desc_t * descs
Definition: avf.h:101
Definition: defs.h:46