FD.io VPP  v21.06-1-gbb7418cf9
Vector Packet Processing
input.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2018 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vlib/unix/unix.h>
20 #include <vlib/pci/pci.h>
21 #include <vnet/ethernet/ethernet.h>
23 
24 #include <avf/avf.h>
25 
26 #define foreach_avf_input_error \
27  _(BUFFER_ALLOC, "buffer alloc error")
28 
29 typedef enum
30 {
31 #define _(f,s) AVF_INPUT_ERROR_##f,
33 #undef _
36 
37 static __clib_unused char *avf_input_error_strings[] = {
38 #define _(n,s) s,
40 #undef _
41 };
42 
43 #define AVF_INPUT_REFILL_TRESHOLD 32
44 
47 {
48 #ifdef CLIB_HAVE_VEC256
49  u64x4 v = { addr, 0, 0, 0 };
50  u64x4_store_unaligned (v, (void *) d);
51 #else
52  d->qword[0] = addr;
53  d->qword[1] = 0;
54 #endif
55 }
56 
59  int use_va_dma)
60 {
61  u16 n_refill, mask, n_alloc, slot, size;
62  vlib_buffer_t *b[8];
63  avf_rx_desc_t *d, *first_d;
64  void *p[8];
65 
66  size = rxq->size;
67  mask = size - 1;
68  n_refill = mask - rxq->n_enqueued;
69  if (PREDICT_TRUE (n_refill <= AVF_INPUT_REFILL_TRESHOLD))
70  return;
71 
72  slot = (rxq->next - n_refill - 1) & mask;
73 
74  n_refill &= ~7; /* round to 8 */
75  n_alloc =
76  vlib_buffer_alloc_to_ring_from_pool (vm, rxq->bufs, slot, size, n_refill,
77  rxq->buffer_pool_index);
78 
79  if (PREDICT_FALSE (n_alloc != n_refill))
80  {
81  vlib_error_count (vm, node->node_index,
82  AVF_INPUT_ERROR_BUFFER_ALLOC, 1);
83  if (n_alloc)
84  vlib_buffer_free_from_ring (vm, rxq->bufs, slot, size, n_alloc);
85  return;
86  }
87 
88  rxq->n_enqueued += n_alloc;
89  first_d = rxq->descs;
90 
91  ASSERT (slot % 8 == 0);
92 
93  while (n_alloc >= 8)
94  {
95  d = first_d + slot;
96 
97  if (use_va_dma)
98  {
99  vlib_get_buffers_with_offset (vm, rxq->bufs + slot, p, 8,
100  sizeof (vlib_buffer_t));
101  avf_rx_desc_write (d + 0, pointer_to_uword (p[0]));
102  avf_rx_desc_write (d + 1, pointer_to_uword (p[1]));
103  avf_rx_desc_write (d + 2, pointer_to_uword (p[2]));
104  avf_rx_desc_write (d + 3, pointer_to_uword (p[3]));
105  avf_rx_desc_write (d + 4, pointer_to_uword (p[4]));
106  avf_rx_desc_write (d + 5, pointer_to_uword (p[5]));
107  avf_rx_desc_write (d + 6, pointer_to_uword (p[6]));
108  avf_rx_desc_write (d + 7, pointer_to_uword (p[7]));
109  }
110  else
111  {
112  vlib_get_buffers (vm, rxq->bufs + slot, b, 8);
113  avf_rx_desc_write (d + 0, vlib_buffer_get_pa (vm, b[0]));
114  avf_rx_desc_write (d + 1, vlib_buffer_get_pa (vm, b[1]));
115  avf_rx_desc_write (d + 2, vlib_buffer_get_pa (vm, b[2]));
116  avf_rx_desc_write (d + 3, vlib_buffer_get_pa (vm, b[3]));
117  avf_rx_desc_write (d + 4, vlib_buffer_get_pa (vm, b[4]));
118  avf_rx_desc_write (d + 5, vlib_buffer_get_pa (vm, b[5]));
119  avf_rx_desc_write (d + 6, vlib_buffer_get_pa (vm, b[6]));
120  avf_rx_desc_write (d + 7, vlib_buffer_get_pa (vm, b[7]));
121  }
122 
123  /* next */
124  slot = (slot + 8) & mask;
125  n_alloc -= 8;
126  }
127 
128  avf_tail_write (rxq->qrx_tail, slot);
129 }
130 
131 
134  u64 qw1, avf_rx_tail_t * t)
135 {
136  vlib_buffer_t *hb = b;
137  u32 tlnifb = 0, i = 0;
138 
139  if (qw1 & AVF_RXD_STATUS_EOP)
140  return 0;
141 
142  while ((qw1 & AVF_RXD_STATUS_EOP) == 0)
143  {
145  ASSERT (qw1 & AVF_RXD_STATUS_DD);
146  qw1 = t->qw1s[i];
147  b->next_buffer = t->buffers[i];
148  b->flags |= VLIB_BUFFER_NEXT_PRESENT;
149  b = vlib_get_buffer (vm, b->next_buffer);
151  tlnifb += b->current_length = qw1 >> AVF_RXD_LEN_SHIFT;
152  i++;
153  }
154 
156  hb->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
157  return tlnifb;
158 }
159 
162  uword n_rx_packets)
163 {
164  uword n;
166 
167  for (n = 0; n < n_rx_packets; n++)
168  {
169  if ((ptd->qw1s[n] & AVF_RXD_STATUS_FLM) == 0)
170  continue;
171 
172  fle = pool_elt_at_index (ad->flow_lookup_entries, ptd->flow_ids[n]);
173 
174  if (fle->next_index != (u16) ~0)
175  {
176  ptd->next[n] = fle->next_index;
177  }
178 
179  if (fle->flow_id != ~0)
180  {
181  ptd->bufs[n]->flow_id = fle->flow_id;
182  }
183 
184  if (fle->buffer_advance != ~0)
185  {
186  vlib_buffer_advance (ptd->bufs[n], fle->buffer_advance);
187  }
188  }
189 }
190 
194  int maybe_multiseg)
195 {
196  vlib_buffer_t bt;
197  vlib_buffer_t **b = ptd->bufs;
198  u64 *qw1 = ptd->qw1s;
199  avf_rx_tail_t *tail = ptd->tails;
200  uword n_rx_bytes = 0;
201 
202  /* copy template into local variable - will save per packet load */
204 
205  while (n_left >= 4)
206  {
207  if (n_left >= 12)
208  {
209  vlib_prefetch_buffer_header (b[8], LOAD);
210  vlib_prefetch_buffer_header (b[9], LOAD);
211  vlib_prefetch_buffer_header (b[10], LOAD);
212  vlib_prefetch_buffer_header (b[11], LOAD);
213  }
214 
215  vlib_buffer_copy_template (b[0], &bt);
216  vlib_buffer_copy_template (b[1], &bt);
217  vlib_buffer_copy_template (b[2], &bt);
218  vlib_buffer_copy_template (b[3], &bt);
219 
220  n_rx_bytes += b[0]->current_length = qw1[0] >> AVF_RXD_LEN_SHIFT;
221  n_rx_bytes += b[1]->current_length = qw1[1] >> AVF_RXD_LEN_SHIFT;
222  n_rx_bytes += b[2]->current_length = qw1[2] >> AVF_RXD_LEN_SHIFT;
223  n_rx_bytes += b[3]->current_length = qw1[3] >> AVF_RXD_LEN_SHIFT;
224 
225  if (maybe_multiseg)
226  {
227  n_rx_bytes += avf_rx_attach_tail (vm, &bt, b[0], qw1[0], tail + 0);
228  n_rx_bytes += avf_rx_attach_tail (vm, &bt, b[1], qw1[1], tail + 1);
229  n_rx_bytes += avf_rx_attach_tail (vm, &bt, b[2], qw1[2], tail + 2);
230  n_rx_bytes += avf_rx_attach_tail (vm, &bt, b[3], qw1[3], tail + 3);
231  }
232 
233  /* next */
234  qw1 += 4;
235  tail += 4;
236  b += 4;
237  n_left -= 4;
238  }
239 
240  while (n_left)
241  {
242  vlib_buffer_copy_template (b[0], &bt);
243 
244  n_rx_bytes += b[0]->current_length = qw1[0] >> AVF_RXD_LEN_SHIFT;
245 
246  if (maybe_multiseg)
247  n_rx_bytes += avf_rx_attach_tail (vm, &bt, b[0], qw1[0], tail + 0);
248 
249  /* next */
250  qw1 += 1;
251  tail += 1;
252  b += 1;
253  n_left -= 1;
254  }
255  return n_rx_bytes;
256 }
257 
260  vlib_frame_t *frame, avf_device_t *ad, u16 qid,
261  int with_flows)
262 {
263  avf_main_t *am = &avf_main;
264  vnet_main_t *vnm = vnet_get_main ();
265  u32 thr_idx = vlib_get_thread_index ();
266  avf_per_thread_data_t *ptd =
267  vec_elt_at_index (am->per_thread_data, thr_idx);
268  avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, qid);
269  u32 n_trace, n_rx_packets = 0, n_rx_bytes = 0;
270  u16 n_tail_desc = 0;
271  u64 or_qw1 = 0;
272  u32 *bi, *to_next, n_left_to_next;
273  vlib_buffer_t *bt = &ptd->buffer_template;
275  u16 next = rxq->next;
276  u16 size = rxq->size;
277  u16 mask = size - 1;
278  avf_rx_desc_t *d, *fd = rxq->descs;
279 #ifdef CLIB_HAVE_VEC256
280  u64x4 q1x4, or_q1x4 = { 0 };
281  u32x4 fdidx4;
282  u64x4 dd_eop_mask4 = u64x4_splat (AVF_RXD_STATUS_DD | AVF_RXD_STATUS_EOP);
283 #elif defined(CLIB_HAVE_VEC128)
284  u32x4 q1x4_lo, q1x4_hi, or_q1x4 = { 0 };
285  u32x4 fdidx4;
286  u32x4 dd_eop_mask4 = u32x4_splat (AVF_RXD_STATUS_DD | AVF_RXD_STATUS_EOP);
287 #endif
288  int single_next = 1;
289 
290  /* is there anything on the ring */
291  d = fd + next;
292  if ((d->qword[1] & AVF_RXD_STATUS_DD) == 0)
293  goto done;
294 
295  if (PREDICT_FALSE (ad->per_interface_next_index != ~0))
296  next_index = ad->per_interface_next_index;
297 
299  vnet_feature_start_device_input_x1 (ad->sw_if_index, &next_index, bt);
300 
301  vlib_get_new_next_frame (vm, node, next_index, to_next, n_left_to_next);
302 
303  /* fetch up to AVF_RX_VECTOR_SZ from the rx ring, unflatten them and
304  copy needed data from descriptor to rx vector */
305  bi = to_next;
306 
307  while (n_rx_packets < AVF_RX_VECTOR_SZ)
308  {
309  if (next + 11 < size)
310  {
311  int stride = 8;
312  CLIB_PREFETCH ((void *) (fd + (next + stride)),
313  CLIB_CACHE_LINE_BYTES, LOAD);
314  CLIB_PREFETCH ((void *) (fd + (next + stride + 1)),
315  CLIB_CACHE_LINE_BYTES, LOAD);
316  CLIB_PREFETCH ((void *) (fd + (next + stride + 2)),
317  CLIB_CACHE_LINE_BYTES, LOAD);
318  CLIB_PREFETCH ((void *) (fd + (next + stride + 3)),
319  CLIB_CACHE_LINE_BYTES, LOAD);
320  }
321 
322 #ifdef CLIB_HAVE_VEC256
323  if (n_rx_packets >= AVF_RX_VECTOR_SZ - 4 || next >= size - 4)
324  goto one_by_one;
325 
326  q1x4 = u64x4_gather ((void *) &d[0].qword[1], (void *) &d[1].qword[1],
327  (void *) &d[2].qword[1], (void *) &d[3].qword[1]);
328 
329  /* not all packets are ready or at least one of them is chained */
330  if (!u64x4_is_equal (q1x4 & dd_eop_mask4, dd_eop_mask4))
331  goto one_by_one;
332 
333  or_q1x4 |= q1x4;
334 
335  u64x4_store_unaligned (q1x4, ptd->qw1s + n_rx_packets);
336 #elif defined(CLIB_HAVE_VEC128)
337  if (n_rx_packets >= AVF_RX_VECTOR_SZ - 4 || next >= size - 4)
338  goto one_by_one;
339 
340  q1x4_lo =
341  u32x4_gather ((void *) &d[0].qword[1], (void *) &d[1].qword[1],
342  (void *) &d[2].qword[1], (void *) &d[3].qword[1]);
343 
344  /* not all packets are ready or at least one of them is chained */
345  if (!u32x4_is_equal (q1x4_lo & dd_eop_mask4, dd_eop_mask4))
346  goto one_by_one;
347 
348  q1x4_hi = u32x4_gather (
349  (void *) &d[0].qword[1] + 4, (void *) &d[1].qword[1] + 4,
350  (void *) &d[2].qword[1] + 4, (void *) &d[3].qword[1] + 4);
351 
352  or_q1x4 |= q1x4_lo;
353  ptd->qw1s[n_rx_packets + 0] = (u64) q1x4_hi[0] << 32 | (u64) q1x4_lo[0];
354  ptd->qw1s[n_rx_packets + 1] = (u64) q1x4_hi[1] << 32 | (u64) q1x4_lo[1];
355  ptd->qw1s[n_rx_packets + 2] = (u64) q1x4_hi[2] << 32 | (u64) q1x4_lo[2];
356  ptd->qw1s[n_rx_packets + 3] = (u64) q1x4_hi[3] << 32 | (u64) q1x4_lo[3];
357 #endif
358 #if defined(CLIB_HAVE_VEC256) || defined(CLIB_HAVE_VEC128)
359 
360  if (with_flows)
361  {
362  fdidx4 = u32x4_gather (
363  (void *) &d[0].fdid_flex_hi, (void *) &d[1].fdid_flex_hi,
364  (void *) &d[2].fdid_flex_hi, (void *) &d[3].fdid_flex_hi);
365  u32x4_store_unaligned (fdidx4, ptd->flow_ids + n_rx_packets);
366  }
367 
368  vlib_buffer_copy_indices (bi, rxq->bufs + next, 4);
369 
370  /* next */
371  next = (next + 4) & mask;
372  d = fd + next;
373  n_rx_packets += 4;
374  bi += 4;
375  continue;
376  one_by_one:
377 #endif
378  CLIB_PREFETCH ((void *) (fd + ((next + 8) & mask)),
379  CLIB_CACHE_LINE_BYTES, LOAD);
380 
381  if (avf_rxd_is_not_dd (d))
382  break;
383 
384  bi[0] = rxq->bufs[next];
385 
386  /* deal with chained buffers */
388  {
389  u16 tail_desc = 0;
390  u16 tail_next = next;
391  avf_rx_tail_t *tail = ptd->tails + n_rx_packets;
392  avf_rx_desc_t *td;
393  do
394  {
395  tail_next = (tail_next + 1) & mask;
396  td = fd + tail_next;
397 
398  /* bail out in case of incomplete transaction */
399  if (avf_rxd_is_not_dd (td))
400  goto no_more_desc;
401 
402  or_qw1 |= tail->qw1s[tail_desc] = td[0].qword[1];
403  tail->buffers[tail_desc] = rxq->bufs[tail_next];
404  tail_desc++;
405  }
406  while (avf_rxd_is_not_eop (td));
407  next = tail_next;
408  n_tail_desc += tail_desc;
409  }
410 
411  or_qw1 |= ptd->qw1s[n_rx_packets] = d[0].qword[1];
412  if (PREDICT_FALSE (with_flows))
413  {
414  ptd->flow_ids[n_rx_packets] = d[0].fdid_flex_hi;
415  }
416 
417  /* next */
418  next = (next + 1) & mask;
419  d = fd + next;
420  n_rx_packets++;
421  bi++;
422  }
423 no_more_desc:
424 
425  if (n_rx_packets == 0)
426  goto done;
427 
428  rxq->next = next;
429  rxq->n_enqueued -= n_rx_packets + n_tail_desc;
430 
431  /* avoid eating our own tail */
432  rxq->descs[(next + rxq->n_enqueued) & mask].qword[1] = 0;
433 
434 #if defined(CLIB_HAVE_VEC256) || defined(CLIB_HAVE_VEC128)
435  or_qw1 |= or_q1x4[0] | or_q1x4[1] | or_q1x4[2] | or_q1x4[3];
436 #endif
437 
438  vlib_get_buffers (vm, to_next, ptd->bufs, n_rx_packets);
439 
440  vnet_buffer (bt)->sw_if_index[VLIB_RX] = ad->sw_if_index;
441  vnet_buffer (bt)->sw_if_index[VLIB_TX] = ~0;
443  bt->ref_count = 1;
444 
445  if (n_tail_desc)
446  n_rx_bytes = avf_process_rx_burst (vm, node, ptd, n_rx_packets, 1);
447  else
448  n_rx_bytes = avf_process_rx_burst (vm, node, ptd, n_rx_packets, 0);
449 
450  /* the MARKed packets may have different next nodes */
451  if (PREDICT_FALSE (with_flows && (or_qw1 & AVF_RXD_STATUS_FLM)))
452  {
453  u32 n;
454  single_next = 0;
455  for (n = 0; n < n_rx_packets; n++)
456  ptd->next[n] = next_index;
457 
458  avf_process_flow_offload (ad, ptd, n_rx_packets);
459  }
460 
461  /* packet trace if enabled */
462  if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
463  {
464  u32 n_left = n_rx_packets;
465  u32 i, j;
466  u16 *next_indices = ptd->next;
467 
468  i = 0;
469  while (n_trace && n_left)
470  {
471  vlib_buffer_t *b = ptd->bufs[i];
472  if (PREDICT_FALSE (single_next == 0))
473  next_index = next_indices[0];
474 
475  if (PREDICT_TRUE
477  (vm, node, next_index, b, /* follow_chain */ 0)))
478  {
479  avf_input_trace_t *tr =
480  vlib_add_trace (vm, node, b, sizeof (*tr));
481  tr->next_index = next_index;
482  tr->qid = qid;
483  tr->hw_if_index = ad->hw_if_index;
484  tr->qw1s[0] = ptd->qw1s[i];
485  tr->flow_id =
486  (tr->qw1s[0] & AVF_RXD_STATUS_FLM) ? ptd->flow_ids[i] : 0;
487  for (j = 1; j < AVF_RX_MAX_DESC_IN_CHAIN; j++)
488  tr->qw1s[j] = ptd->tails[i].qw1s[j - 1];
489 
490  n_trace--;
491  }
492 
493  /* next */
494  n_left--;
495  i++;
496  next_indices++;
497  }
498  vlib_set_trace_count (vm, node, n_trace);
499  }
500 
501  /* enqueu the packets to the next nodes */
502  if (PREDICT_FALSE (with_flows && (or_qw1 & AVF_RXD_STATUS_FLM)))
503  {
504  /* release next node's frame vector, in this case we use
505  vlib_buffer_enqueue_to_next to place the packets
506  */
507  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
508 
509  /* enqueue buffers to the next node */
510  vlib_buffer_enqueue_to_next (vm, node, to_next, ptd->next, n_rx_packets);
511  }
512  else
513  {
515  {
516  vlib_next_frame_t *nf;
517  vlib_frame_t *f;
519  nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
520  f = vlib_get_frame (vm, nf->frame);
522 
523  ef = vlib_frame_scalar_args (f);
524  ef->sw_if_index = ad->sw_if_index;
525  ef->hw_if_index = ad->hw_if_index;
526 
527  if ((or_qw1 & AVF_RXD_ERROR_IPE) == 0)
530  }
531 
532  n_left_to_next -= n_rx_packets;
533  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
534  }
535 
537  + VNET_INTERFACE_COUNTER_RX, thr_idx,
538  ad->hw_if_index, n_rx_packets, n_rx_bytes);
539 
540 done:
541  /* refill rx ring */
542  if (ad->flags & AVF_DEVICE_F_VA_DMA)
543  avf_rxq_refill (vm, node, rxq, 1 /* use_va_dma */ );
544  else
545  avf_rxq_refill (vm, node, rxq, 0 /* use_va_dma */ );
546 
547  return n_rx_packets;
548 }
549 
552 {
553  u32 n_rx = 0;
555 
557 
558  for (int i = 0; i < vec_len (pv); i++)
559  {
560  avf_device_t *ad = avf_get_device (pv[i].dev_instance);
561  if ((ad->flags & AVF_DEVICE_F_ADMIN_UP) == 0)
562  continue;
563  if (PREDICT_FALSE (ad->flags & AVF_DEVICE_F_RX_FLOW_OFFLOAD))
564  n_rx +=
565  avf_device_input_inline (vm, node, frame, ad, pv[i].queue_id, 1);
566  else
567  n_rx +=
568  avf_device_input_inline (vm, node, frame, ad, pv[i].queue_id, 0);
569  }
570 
571  return n_rx;
572 }
573 
574 /* *INDENT-OFF* */
576  .name = "avf-input",
577  .sibling_of = "device-input",
578  .format_trace = format_avf_input_trace,
579  .type = VLIB_NODE_TYPE_INPUT,
580  .state = VLIB_NODE_STATE_DISABLED,
581  .n_errors = AVF_INPUT_N_ERROR,
582  .error_strings = avf_input_error_strings,
584 };
585 
586 /* *INDENT-ON* */
587 
588 
589 /*
590  * fd.io coding-style-patch-verification: ON
591  *
592  * Local Variables:
593  * eval: (c-set-style "gnu")
594  * End:
595  */
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:133
u32 hw_if_index
Definition: avf.h:223
static __clib_warn_unused_result u32 vlib_buffer_alloc_to_ring_from_pool(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers, u8 buffer_pool_index)
Allocate buffers into ring from specific buffer pool.
Definition: buffer_funcs.h:753
#define foreach_avf_input_error
Definition: input.c:26
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:212
vlib_buffer_copy_indices(to, tmp, n_free)
vnet_interface_main_t interface_main
Definition: vnet.h:81
static uword vlib_buffer_get_pa(vlib_main_t *vm, vlib_buffer_t *b)
Definition: buffer_funcs.h:488
#define PREDICT_TRUE(x)
Definition: clib.h:125
unsigned long u64
Definition: types.h:89
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
vlib_increment_combined_counter(ccm, ti, sw_if_index, n_buffers, n_bytes)
#define VLIB_NODE_FLAG_TRACE_SUPPORTED
Definition: node.h:296
static_always_inline vnet_hw_if_rxq_poll_vector_t * vnet_hw_if_get_rxq_poll_vector(vlib_main_t *vm, vlib_node_runtime_t *node)
avf_input_error_t
Definition: input.c:29
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:122
static void avf_tail_write(volatile u32 *addr, u32 val)
Definition: avf.h:447
static vlib_frame_t * vlib_get_frame(vlib_main_t *vm, vlib_frame_t *f)
Definition: node_funcs.h:273
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: nat44_ei.c:3048
u32 buffers[AVF_RX_MAX_DESC_IN_CHAIN - 1]
Definition: avf.h:307
for(i=1;i<=collision_buckets;i++)
static_always_inline void vlib_get_buffers_with_offset(vlib_main_t *vm, u32 *bi, void **b, int count, i32 offset)
Translate array of buffer indices into buffer pointers with offset.
Definition: buffer_funcs.h:204
u16 next[AVF_RX_VECTOR_SZ]
Definition: avf.h:314
#define VLIB_NODE_FN(node)
Definition: node.h:202
u32 fdid_flex_hi
Definition: avf.h:145
#define AVF_RXD_STATUS_DD
Definition: avf.h:48
format_function_t format_avf_input_trace
Definition: avf.h:359
vhost_vring_addr_t addr
Definition: vhost_user.h:130
vlib_buffer_t ** b
u8 buffer_pool_index
index of buffer pool this buffer belongs.
Definition: buffer.h:142
unsigned int u32
Definition: types.h:88
u8 buffer_pool_index
Definition: avf.h:179
vlib_frame_t * f
#define static_always_inline
Definition: clib.h:112
vlib_node_registration_t avf_input_node
(constructor) VLIB_REGISTER_NODE (avf_input_node)
Definition: input.c:575
vlib_get_buffers(vm, from, b, n_left_from)
#define ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX
Definition: ethernet.h:52
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:1023
static_always_inline int vnet_device_input_have_features(u32 sw_if_index)
Definition: feature.h:336
static_always_inline void avf_process_flow_offload(avf_device_t *ad, avf_per_thread_data_t *ptd, uword n_rx_packets)
Definition: input.c:161
volatile u32 * qrx_tail
Definition: avf.h:172
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:231
#define AVF_INPUT_REFILL_TRESHOLD
Definition: input.c:43
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define vlib_get_new_next_frame(vm, node, next_index, vectors, n_vectors_left)
Definition: node_funcs.h:400
u64 qw1s[AVF_RX_MAX_DESC_IN_CHAIN]
Definition: avf.h:475
vnet_main_t * vnet_get_main(void)
avf_rx_tail_t tails[AVF_RX_VECTOR_SZ]
Definition: avf.h:317
vlib_buffer_enqueue_to_next(vm, node, from,(u16 *) nexts, frame->n_vectors)
static vlib_next_frame_t * vlib_node_runtime_get_next_frame(vlib_main_t *vm, vlib_node_runtime_t *n, u32 next_index)
Definition: node_funcs.h:321
static_always_inline uword avf_device_input_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, avf_device_t *ad, u16 qid, int with_flows)
Definition: input.c:259
static void vlib_buffer_free_from_ring(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Free buffers from ring.
static_always_inline void avf_rxq_refill(vlib_main_t *vm, vlib_node_runtime_t *node, avf_rxq_t *rxq, int use_va_dma)
Definition: input.c:58
u16 next_index
Definition: avf.h:472
u32 hw_if_index
Definition: avf.h:473
u16 * next
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:553
u64 qw1s[AVF_RX_VECTOR_SZ]
Definition: avf.h:315
static_always_inline void vlib_buffer_copy_template(vlib_buffer_t *b, vlib_buffer_t *bt)
Definition: buffer_funcs.h:171
static __clib_warn_unused_result int vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:153
unsigned short u16
Definition: types.h:57
static_always_inline u64x4 u64x4_gather(void *p0, void *p1, void *p2, void *p3)
Definition: vector_avx2.h:256
#define AVF_RXD_LEN_SHIFT
Definition: avf.h:53
#define ETH_INPUT_FRAME_F_IP4_CKSUM_OK
Definition: ethernet.h:55
u32 size
Definition: vhost_user.h:125
u64 qword[4]
Definition: avf.h:147
#define PREDICT_FALSE(x)
Definition: clib.h:124
#define AVF_RX_VECTOR_SZ
Definition: avf.h:270
static_always_inline u32x4 u32x4_gather(void *p0, void *p1, void *p2, void *p3)
Definition: vector_neon.h:160
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
Definition: nat44_ei.c:3047
u32 node_index
Node index.
Definition: node.h:479
#define AVF_RXD_STATUS_EOP
Definition: avf.h:49
u32 n_left
#define AVF_RXD_STATUS_FLM
Definition: avf.h:50
u8 slot
Definition: pci_types.api:22
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:208
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
u32 flow_id
Generic flow identifier.
Definition: buffer.h:136
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:261
u32 flow_id
Definition: avf.h:210
vl_api_pnat_mask_t mask
Definition: pnat.api:45
static_always_inline int avf_rxd_is_not_dd(avf_rx_desc_t *d)
Definition: avf.h:464
Definition: avf.h:169
static_always_inline uword avf_process_rx_burst(vlib_main_t *vm, vlib_node_runtime_t *node, avf_per_thread_data_t *ptd, u32 n_left, int maybe_multiseg)
Definition: input.c:192
i16 buffer_advance
Definition: avf.h:212
static void * vlib_frame_scalar_args(vlib_frame_t *f)
Get pointer to frame scalar data.
Definition: node_funcs.h:315
vlib_buffer_t * bufs[AVF_RX_VECTOR_SZ]
Definition: avf.h:313
static_always_inline avf_device_t * avf_get_device(u32 dev_instance)
Definition: avf.h:368
static_always_inline void avf_rx_desc_write(avf_rx_desc_t *d, u64 addr)
Definition: input.c:46
u32 per_interface_next_index
Definition: avf.h:219
#define ASSERT(truth)
u32 flags
Definition: avf.h:218
u32 * bufs
Definition: avf.h:176
vlib_frame_t * frame
Definition: node.h:397
u16 flags
Definition: node.h:379
vlib_buffer_t buffer_template
Definition: avf.h:318
vlib_put_next_frame(vm, node, next_index, 0)
u16 next_index
Definition: avf.h:211
u16 n_enqueued
Definition: avf.h:177
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:276
#define AVF_RXD_ERROR_IPE
Definition: avf.h:56
nat44_ei_hairpin_src_next_t next_index
static uword pointer_to_uword(const void *p)
Definition: types.h:131
avf_main_t avf_main
Definition: device.c:48
Definition: defs.h:47
avf_flow_lookup_entry_t * flow_lookup_entries
Definition: avf.h:260
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
vlib_main_t vlib_node_runtime_t * node
Definition: nat44_ei.c:3047
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:149
VLIB buffer representation.
Definition: buffer.h:111
u64 uword
Definition: types.h:112
u16 size
Definition: avf.h:174
Definition: avf.h:208
avf_rxq_t * rxqs
Definition: avf.h:230
#define vnet_buffer(b)
Definition: buffer.h:437
avf_per_thread_data_t * per_thread_data
Definition: avf.h:326
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:343
u32 sw_if_index
Definition: avf.h:222
unsigned long long u32x4
Definition: ixge.c:28
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:628
u64 qw1s[AVF_RX_MAX_DESC_IN_CHAIN - 1]
Definition: avf.h:306
static void vlib_frame_no_append(vlib_frame_t *f)
Definition: node_funcs.h:281
static_always_inline int avf_rxd_is_not_eop(avf_rx_desc_t *d)
Definition: avf.h:458
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:226
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:176
static __clib_unused char * avf_input_error_strings[]
Definition: input.c:37
#define AVF_RX_MAX_DESC_IN_CHAIN
Definition: avf.h:54
app_main_t * am
Definition: application.c:489
u64x4
Definition: vector_avx2.h:142
u16 next
Definition: avf.h:173
volatile u8 ref_count
Reference count for this buffer.
Definition: buffer.h:139
static_always_inline uword avf_rx_attach_tail(vlib_main_t *vm, vlib_buffer_t *bt, vlib_buffer_t *b, u64 qw1, avf_rx_tail_t *t)
Definition: input.c:133
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:111
u32 flow_ids[AVF_RX_VECTOR_SZ]
Definition: avf.h:316
avf_rx_desc_t * descs
Definition: avf.h:175
Definition: defs.h:46