FD.io VPP  v21.06-1-gbb7418cf9
Vector Packet Processing
device.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2018 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vppinfra/ring.h>
20 #include <vlib/unix/unix.h>
21 #include <vlib/pci/pci.h>
22 #include <vnet/ethernet/ethernet.h>
25 
26 #include <avf/avf.h>
27 
28 #define AVF_MBOX_LEN 64
29 #define AVF_MBOX_BUF_SZ 4096
30 #define AVF_RXQ_SZ 512
31 #define AVF_TXQ_SZ 512
32 #define AVF_ITR_INT 250
33 
34 #define PCI_VENDOR_ID_INTEL 0x8086
35 #define PCI_DEVICE_ID_INTEL_AVF 0x1889
36 #define PCI_DEVICE_ID_INTEL_X710_VF 0x154c
37 #define PCI_DEVICE_ID_INTEL_X722_VF 0x37cd
38 
40  .class_name = "avf",
41 };
42 
44  .class_name = "avf",
45  .subclass_name = "stats",
46 };
47 
49 void avf_delete_if (vlib_main_t * vm, avf_device_t * ad, int with_barrier);
50 
51 static pci_device_id_t avf_pci_device_ids[] = {
53  {.vendor_id = PCI_VENDOR_ID_INTEL,.device_id = PCI_DEVICE_ID_INTEL_X710_VF},
54  {.vendor_id = PCI_VENDOR_ID_INTEL,.device_id = PCI_DEVICE_ID_INTEL_X722_VF},
55  {0},
56 };
57 
58 const static char *virtchnl_event_names[] = {
59 #define _(v, n) [v] = #n,
61 #undef _
62 };
63 
64 typedef enum
65 {
70 
71 static inline void
73 {
74  u32 dyn_ctl0 = 0, icr0_ena = 0;
75 
76  dyn_ctl0 |= (3 << 3); /* 11b = No ITR update */
77 
78  avf_reg_write (ad, AVFINT_ICR0_ENA1, icr0_ena);
79  avf_reg_write (ad, AVFINT_DYN_CTL0, dyn_ctl0);
80  avf_reg_flush (ad);
81 
82  if (state == AVF_IRQ_STATE_DISABLED)
83  return;
84 
85  dyn_ctl0 = 0;
86  icr0_ena = 0;
87 
88  icr0_ena |= (1 << 30); /* [30] Admin Queue Enable */
89 
90  dyn_ctl0 |= (1 << 0); /* [0] Interrupt Enable */
91  dyn_ctl0 |= (1 << 1); /* [1] Clear PBA */
92  dyn_ctl0 |= (2 << 3); /* [4:3] ITR Index, 11b = No ITR update */
93  dyn_ctl0 |= ((AVF_ITR_INT / 2) << 5); /* [16:5] ITR Interval in 2us steps */
94 
95  avf_reg_write (ad, AVFINT_ICR0_ENA1, icr0_ena);
96  avf_reg_write (ad, AVFINT_DYN_CTL0, dyn_ctl0);
97  avf_reg_flush (ad);
98 }
99 
100 static inline void
102 {
103  u32 dyn_ctln = 0;
104 
105  /* disable */
106  avf_reg_write (ad, AVFINT_DYN_CTLN (line), dyn_ctln);
107  avf_reg_flush (ad);
108 
109  if (state == AVF_IRQ_STATE_DISABLED)
110  return;
111 
112  dyn_ctln |= (1 << 1); /* [1] Clear PBA */
113  if (state == AVF_IRQ_STATE_WB_ON_ITR)
114  {
115  /* minimal ITR interval, use ITR1 */
116  dyn_ctln |= (1 << 3); /* [4:3] ITR Index */
117  dyn_ctln |= ((32 / 2) << 5); /* [16:5] ITR Interval in 2us steps */
118  dyn_ctln |= (1 << 30); /* [30] Writeback on ITR */
119  }
120  else
121  {
122  /* configured ITR interval, use ITR0 */
123  dyn_ctln |= (1 << 0); /* [0] Interrupt Enable */
124  dyn_ctln |= ((AVF_ITR_INT / 2) << 5); /* [16:5] ITR Interval in 2us steps */
125  }
126 
127  avf_reg_write (ad, AVFINT_DYN_CTLN (line), dyn_ctln);
128  avf_reg_flush (ad);
129 }
130 
131 
132 clib_error_t *
134  void *data, int len)
135 {
136  clib_error_t *err = 0;
137  avf_aq_desc_t *d, dc;
138  f64 t0, suspend_time = AVF_AQ_ENQ_SUSPEND_TIME;
139 
140  d = &ad->atq[ad->atq_next_slot];
141  clib_memcpy_fast (d, dt, sizeof (avf_aq_desc_t));
142  d->flags |= AVF_AQ_F_RD | AVF_AQ_F_SI;
143  if (len)
144  d->datalen = len;
145  if (len)
146  {
147  u64 pa;
148  pa = ad->atq_bufs_pa + ad->atq_next_slot * AVF_MBOX_BUF_SZ;
149  d->addr_hi = (u32) (pa >> 32);
150  d->addr_lo = (u32) pa;
152  data, len);
153  d->flags |= AVF_AQ_F_BUF;
154  }
155 
156  if (ad->flags & AVF_DEVICE_F_ELOG)
157  clib_memcpy_fast (&dc, d, sizeof (avf_aq_desc_t));
158 
160  ad->atq_next_slot = (ad->atq_next_slot + 1) % AVF_MBOX_LEN;
162  avf_reg_flush (ad);
163 
164  t0 = vlib_time_now (vm);
165 retry:
166  vlib_process_suspend (vm, suspend_time);
167 
168  if (((d->flags & AVF_AQ_F_DD) == 0) || ((d->flags & AVF_AQ_F_CMP) == 0))
169  {
170  f64 t = vlib_time_now (vm) - t0;
171  if (t > AVF_AQ_ENQ_MAX_WAIT_TIME)
172  {
173  avf_log_err (ad, "aq_desc_enq failed (timeout %.3fs)", t);
174  err = clib_error_return (0, "adminq enqueue timeout [opcode 0x%x]",
175  d->opcode);
176  goto done;
177  }
178  suspend_time *= 2;
179  goto retry;
180  }
181 
182  clib_memcpy_fast (dt, d, sizeof (avf_aq_desc_t));
183  if (d->flags & AVF_AQ_F_ERR)
184  return clib_error_return (0, "adminq enqueue error [opcode 0x%x, retval "
185  "%d]", d->opcode, d->retval);
186 
187 done:
188  if (ad->flags & AVF_DEVICE_F_ELOG)
189  {
190  ELOG_TYPE_DECLARE (el) =
191  {
192  .format = "avf[%d] aq enq: s_flags 0x%x r_flags 0x%x opcode 0x%x "
193  "datalen %d retval %d",
194  .format_args = "i4i2i2i2i2i2",
195  };
196  struct
197  {
198  u32 dev_instance;
199  u16 s_flags;
200  u16 r_flags;
201  u16 opcode;
202  u16 datalen;
203  u16 retval;
204  } *ed;
206  ed->dev_instance = ad->dev_instance;
207  ed->s_flags = dc.flags;
208  ed->r_flags = d->flags;
209  ed->opcode = dc.opcode;
210  ed->datalen = dc.datalen;
211  ed->retval = d->retval;
212  }
213 
214  return err;
215 }
216 
217 clib_error_t *
219  u32 val)
220 {
221  clib_error_t *err;
222  avf_aq_desc_t d = {.opcode = 0x207,.param1 = reg,.param3 = val };
223  err = avf_aq_desc_enq (vm, ad, &d, 0, 0);
224 
225  if (ad->flags & AVF_DEVICE_F_ELOG)
226  {
227  ELOG_TYPE_DECLARE (el) =
228  {
229  .format = "avf[%d] rx ctl reg write: reg 0x%x val 0x%x ",
230  .format_args = "i4i4i4",
231  };
232  struct
233  {
234  u32 dev_instance;
235  u32 reg;
236  u32 val;
237  } *ed;
239  ed->dev_instance = ad->dev_instance;
240  ed->reg = reg;
241  ed->val = val;
242  }
243  return err;
244 }
245 
246 clib_error_t *
247 avf_rxq_init (vlib_main_t * vm, avf_device_t * ad, u16 qid, u16 rxq_size)
248 {
249  clib_error_t *err;
250  avf_rxq_t *rxq;
251  u32 n_alloc, i;
252 
254  rxq = vec_elt_at_index (ad->rxqs, qid);
255  rxq->size = rxq_size;
256  rxq->next = 0;
258  sizeof (avf_rx_desc_t),
260  ad->numa_node);
261 
262  rxq->buffer_pool_index =
264 
265  if (rxq->descs == 0)
266  return vlib_physmem_last_error (vm);
267 
268  if ((err = vlib_pci_map_dma (vm, ad->pci_dev_handle, (void *) rxq->descs)))
269  return err;
270 
271  clib_memset ((void *) rxq->descs, 0, rxq->size * sizeof (avf_rx_desc_t));
273  rxq->qrx_tail = ad->bar0 + AVF_QRX_TAIL (qid);
274 
275  n_alloc = vlib_buffer_alloc_from_pool (vm, rxq->bufs, rxq->size - 8,
276  rxq->buffer_pool_index);
277 
278  if (n_alloc == 0)
279  return clib_error_return (0, "buffer allocation error");
280 
281  rxq->n_enqueued = n_alloc;
282  avf_rx_desc_t *d = rxq->descs;
283  for (i = 0; i < n_alloc; i++)
284  {
285  vlib_buffer_t *b = vlib_get_buffer (vm, rxq->bufs[i]);
286  if (ad->flags & AVF_DEVICE_F_VA_DMA)
287  d->qword[0] = vlib_buffer_get_va (b);
288  else
289  d->qword[0] = vlib_buffer_get_pa (vm, b);
290  d++;
291  }
292 
293  return 0;
294 }
295 
296 clib_error_t *
297 avf_txq_init (vlib_main_t * vm, avf_device_t * ad, u16 qid, u16 txq_size)
298 {
299  clib_error_t *err;
300  avf_txq_t *txq;
301  u16 n;
303  ad->numa_node);
304 
306  txq = vec_elt_at_index (ad->txqs, qid);
307  txq->size = txq_size;
308  txq->next = 0;
309  clib_spinlock_init (&txq->lock);
310 
311  /* Prepare a placeholder buffer(s) to maintain a 1-1 relationship between
312  * bufs and descs when a context descriptor is added in descs. Worst case
313  * every second descriptor is context descriptor and due to b->ref_count
314  * being u8 we need one for each block of 510 descriptors */
315 
316  n = (txq->size / 510) + 1;
318 
319  if (!vlib_buffer_alloc_from_pool (vm, txq->ph_bufs, n, bpi))
320  return clib_error_return (0, "buffer allocation error");
321 
323  sizeof (avf_tx_desc_t),
325  ad->numa_node);
326  if (txq->descs == 0)
327  return vlib_physmem_last_error (vm);
328 
329  if ((err = vlib_pci_map_dma (vm, ad->pci_dev_handle, (void *) txq->descs)))
330  return err;
331 
333  txq->qtx_tail = ad->bar0 + AVF_QTX_TAIL (qid);
334 
335  /* initialize ring of pending RS slots */
337 
340 
341  return 0;
342 }
343 
344 typedef struct
345 {
349 
350 void
352 {
353  avf_aq_desc_t *d;
354  u64 pa = ad->arq_bufs_pa + slot * AVF_MBOX_BUF_SZ;
355  d = &ad->arq[slot];
356  clib_memset (d, 0, sizeof (avf_aq_desc_t));
357  d->flags = AVF_AQ_F_BUF;
359  d->addr_hi = (u32) (pa >> 32);
360  d->addr_lo = (u32) pa;
361 }
362 
363 static inline uword
365 {
366  return (ad->flags & AVF_DEVICE_F_VA_DMA) ?
368 }
369 
370 static void
372 {
373  u64 pa;
374  int i;
375 
376  /* VF MailBox Transmit */
377  clib_memset (ad->atq, 0, sizeof (avf_aq_desc_t) * AVF_MBOX_LEN);
378  ad->atq_bufs_pa = avf_dma_addr (vm, ad, ad->atq_bufs);
379 
380  pa = avf_dma_addr (vm, ad, ad->atq);
381  avf_reg_write (ad, AVF_ATQT, 0); /* Tail */
382  avf_reg_write (ad, AVF_ATQH, 0); /* Head */
383  avf_reg_write (ad, AVF_ATQLEN, AVF_MBOX_LEN | (1ULL << 31)); /* len & ena */
384  avf_reg_write (ad, AVF_ATQBAL, (u32) pa); /* Base Address Low */
385  avf_reg_write (ad, AVF_ATQBAH, (u32) (pa >> 32)); /* Base Address High */
386 
387  /* VF MailBox Receive */
388  clib_memset (ad->arq, 0, sizeof (avf_aq_desc_t) * AVF_MBOX_LEN);
389  ad->arq_bufs_pa = avf_dma_addr (vm, ad, ad->arq_bufs);
390 
391  for (i = 0; i < AVF_MBOX_LEN; i++)
392  avf_arq_slot_init (ad, i);
393 
394  pa = avf_dma_addr (vm, ad, ad->arq);
395 
396  avf_reg_write (ad, AVF_ARQH, 0); /* Head */
397  avf_reg_write (ad, AVF_ARQT, 0); /* Head */
398  avf_reg_write (ad, AVF_ARQLEN, AVF_MBOX_LEN | (1ULL << 31)); /* len & ena */
399  avf_reg_write (ad, AVF_ARQBAL, (u32) pa); /* Base Address Low */
400  avf_reg_write (ad, AVF_ARQBAH, (u32) (pa >> 32)); /* Base Address High */
401  avf_reg_write (ad, AVF_ARQT, AVF_MBOX_LEN - 1); /* Tail */
402 
403  ad->atq_next_slot = 0;
404  ad->arq_next_slot = 0;
405 }
406 
407 clib_error_t *
409  void *in, int in_len, void *out, int out_len)
410 {
411  clib_error_t *err;
412  avf_aq_desc_t *d, dt = {.opcode = 0x801,.v_opcode = op };
413  u32 head;
414  f64 t0, suspend_time = AVF_SEND_TO_PF_SUSPEND_TIME;
415 
416  /* adminq operations should be only done from process node after device
417  * is initialized */
418  ASSERT ((ad->flags & AVF_DEVICE_F_INITIALIZED) == 0 ||
420 
421  /* suppress interrupt in the next adminq receive slot
422  as we are going to wait for response
423  we only need interrupts when event is received */
424  d = &ad->arq[ad->arq_next_slot];
425  d->flags |= AVF_AQ_F_SI;
426 
427  if ((err = avf_aq_desc_enq (vm, ad, &dt, in, in_len)))
428  return err;
429 
430  t0 = vlib_time_now (vm);
431 retry:
432  head = avf_get_u32 (ad->bar0, AVF_ARQH);
433 
434  if (ad->arq_next_slot == head)
435  {
436  f64 t = vlib_time_now (vm) - t0;
438  {
439  avf_log_err (ad, "send_to_pf failed (timeout %.3fs)", t);
440  return clib_error_return (0, "timeout");
441  }
442  vlib_process_suspend (vm, suspend_time);
443  suspend_time *= 2;
444  goto retry;
445  }
446 
447  d = &ad->arq[ad->arq_next_slot];
448 
449  if (d->v_opcode == VIRTCHNL_OP_EVENT)
450  {
451  void *buf = ad->arq_bufs + ad->arq_next_slot * AVF_MBOX_BUF_SZ;
453 
454  if ((d->datalen != sizeof (virtchnl_pf_event_t)) ||
455  ((d->flags & AVF_AQ_F_BUF) == 0))
456  return clib_error_return (0, "event message error");
457 
458  vec_add2 (ad->events, e, 1);
459  clib_memcpy_fast (e, buf, sizeof (virtchnl_pf_event_t));
461  ad->arq_next_slot++;
462  /* reset timer */
463  t0 = vlib_time_now (vm);
464  suspend_time = AVF_SEND_TO_PF_SUSPEND_TIME;
465  goto retry;
466  }
467 
468  if (d->v_opcode != op)
469  {
470  err = clib_error_return (0,
471  "unexpected message received [v_opcode = %u, "
472  "expected %u, v_retval %d]",
473  d->v_opcode, op, d->v_retval);
474  goto done;
475  }
476 
477  if (d->v_retval)
478  {
479  err = clib_error_return (0, "error [v_opcode = %u, v_retval %d]",
480  d->v_opcode, d->v_retval);
481  goto done;
482  }
483 
484  if (out_len && d->flags & AVF_AQ_F_BUF)
485  {
486  void *buf = ad->arq_bufs + ad->arq_next_slot * AVF_MBOX_BUF_SZ;
487  clib_memcpy_fast (out, buf, out_len);
488  }
489 
492  avf_reg_flush (ad);
493  ad->arq_next_slot = (ad->arq_next_slot + 1) % AVF_MBOX_LEN;
494 
495 done:
496 
497  if (ad->flags & AVF_DEVICE_F_ELOG)
498  {
499  ELOG_TYPE_DECLARE (el) =
500  {
501  .format = "avf[%d] send to pf: v_opcode %s (%d) v_retval 0x%x",
502  .format_args = "i4t4i4i4",
503  .n_enum_strings = VIRTCHNL_N_OPS,
504  .enum_strings = {
505 #define _(v, n) [v] = #n,
507 #undef _
508  },
509  };
510  struct
511  {
512  u32 dev_instance;
513  u32 v_opcode;
514  u32 v_opcode_val;
515  u32 v_retval;
516  } *ed;
518  ed->dev_instance = ad->dev_instance;
519  ed->v_opcode = op;
520  ed->v_opcode_val = op;
521  ed->v_retval = d->v_retval;
522  }
523  return err;
524 }
525 
526 clib_error_t *
529 {
530  clib_error_t *err = 0;
531  virtchnl_version_info_t myver = {
533  .minor = VIRTCHNL_VERSION_MINOR,
534  };
535 
536  avf_log_debug (ad, "version: major %u minor %u", myver.major, myver.minor);
537 
538  err = avf_send_to_pf (vm, ad, VIRTCHNL_OP_VERSION, &myver,
539  sizeof (virtchnl_version_info_t), ver,
540  sizeof (virtchnl_version_info_t));
541 
542  if (err)
543  return err;
544 
545  return err;
546 }
547 
548 clib_error_t *
551 {
552  clib_error_t *err = 0;
553  u32 bitmap = (VIRTCHNL_VF_OFFLOAD_L2 | VIRTCHNL_VF_OFFLOAD_RSS_PF |
554  VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_VLAN |
555  VIRTCHNL_VF_OFFLOAD_RX_POLLING |
556  VIRTCHNL_VF_CAP_ADV_LINK_SPEED | VIRTCHNL_VF_OFFLOAD_FDIR_PF |
557  VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF | VIRTCHNL_VF_OFFLOAD_VLAN_V2);
558 
559  avf_log_debug (ad, "get_vf_resources: bitmap 0x%x (%U)", bitmap,
560  format_avf_vf_cap_flags, bitmap);
561  err = avf_send_to_pf (vm, ad, VIRTCHNL_OP_GET_VF_RESOURCES, &bitmap,
562  sizeof (u32), res, sizeof (virtchnl_vf_resource_t));
563 
564  if (err == 0)
565  {
566  int i;
567  avf_log_debug (ad,
568  "get_vf_resources: num_vsis %u num_queue_pairs %u "
569  "max_vectors %u max_mtu %u vf_cap_flags 0x%x (%U) "
570  "rss_key_size %u rss_lut_size %u",
571  res->num_vsis, res->num_queue_pairs, res->max_vectors,
573  res->vf_cap_flags, res->rss_key_size, res->rss_lut_size);
574  for (i = 0; i < res->num_vsis; i++)
575  avf_log_debug (
576  ad,
577  "get_vf_resources_vsi[%u]: vsi_id %u num_queue_pairs %u vsi_type %u "
578  "qset_handle %u default_mac_addr %U",
579  i, res->vsi_res[i].vsi_id, res->vsi_res[i].num_queue_pairs,
580  res->vsi_res[i].vsi_type, res->vsi_res[i].qset_handle,
582  }
583 
584  return err;
585 }
586 
587 clib_error_t *
589 {
590  int msg_len = sizeof (virtchnl_rss_lut_t) + ad->rss_lut_size - 1;
591  int i;
592  u8 msg[msg_len];
593  virtchnl_rss_lut_t *rl;
594 
595  clib_memset (msg, 0, msg_len);
596  rl = (virtchnl_rss_lut_t *) msg;
597  rl->vsi_id = ad->vsi_id;
598  rl->lut_entries = ad->rss_lut_size;
599  for (i = 0; i < ad->rss_lut_size; i++)
600  rl->lut[i] = i % ad->n_rx_queues;
601 
602  avf_log_debug (ad, "config_rss_lut: vsi_id %u rss_lut_size %u lut 0x%U",
603  rl->vsi_id, rl->lut_entries, format_hex_bytes_no_wrap,
604  rl->lut, rl->lut_entries);
605 
606  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_RSS_LUT, msg, msg_len, 0,
607  0);
608 }
609 
610 clib_error_t *
612 {
613  int msg_len = sizeof (virtchnl_rss_key_t) + ad->rss_key_size - 1;
614  int i;
615  u8 msg[msg_len];
616  virtchnl_rss_key_t *rk;
617 
618  clib_memset (msg, 0, msg_len);
619  rk = (virtchnl_rss_key_t *) msg;
620  rk->vsi_id = ad->vsi_id;
621  rk->key_len = ad->rss_key_size;
622  u32 seed = random_default_seed ();
623  for (i = 0; i < ad->rss_key_size; i++)
624  rk->key[i] = (u8) random_u32 (&seed);
625 
626  avf_log_debug (ad, "config_rss_key: vsi_id %u rss_key_size %u key 0x%U",
627  rk->vsi_id, rk->key_len, format_hex_bytes_no_wrap, rk->key,
628  rk->key_len);
629 
630  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_RSS_KEY, msg, msg_len, 0,
631  0);
632 }
633 
634 clib_error_t *
636 {
637  avf_log_debug (ad, "disable_vlan_stripping");
638 
639  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, 0, 0, 0,
640  0);
641 }
642 
643 clib_error_t *
645  int is_enable)
646 {
647  virtchnl_promisc_info_t pi = { 0 };
648 
649  pi.vsi_id = ad->vsi_id;
650 
651  if (is_enable)
652  pi.flags = FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC;
653 
654  avf_log_debug (ad, "config_promisc_mode: unicast %s multicast %s",
655  pi.flags & FLAG_VF_UNICAST_PROMISC ? "on" : "off",
656  pi.flags & FLAG_VF_MULTICAST_PROMISC ? "on" : "off");
657 
658  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, &pi,
659  sizeof (virtchnl_promisc_info_t), 0, 0);
660 }
661 
662 
663 clib_error_t *
665 {
666  int i;
667  int n_qp = clib_max (vec_len (ad->rxqs), vec_len (ad->txqs));
668  int msg_len = sizeof (virtchnl_vsi_queue_config_info_t) + n_qp *
670  u8 msg[msg_len];
672 
673  clib_memset (msg, 0, msg_len);
675  ci->vsi_id = ad->vsi_id;
676  ci->num_queue_pairs = n_qp;
677 
678  avf_log_debug (ad, "config_vsi_queues: vsi_id %u num_queue_pairs %u",
679  ad->vsi_id, ci->num_queue_pairs);
680 
681  for (i = 0; i < n_qp; i++)
682  {
683  virtchnl_txq_info_t *txq = &ci->qpair[i].txq;
684  virtchnl_rxq_info_t *rxq = &ci->qpair[i].rxq;
685 
686  rxq->vsi_id = ad->vsi_id;
687  rxq->queue_id = i;
689  if (i < vec_len (ad->rxqs))
690  {
691  avf_rxq_t *q = vec_elt_at_index (ad->rxqs, i);
692  rxq->ring_len = q->size;
694  rxq->dma_ring_addr = avf_dma_addr (vm, ad, (void *) q->descs);
695  avf_reg_write (ad, AVF_QRX_TAIL (i), q->size - 1);
696  }
697  avf_log_debug (ad, "config_vsi_queues_rx[%u]: max_pkt_size %u "
698  "ring_len %u databuffer_size %u dma_ring_addr 0x%llx",
699  i, rxq->max_pkt_size, rxq->ring_len,
700  rxq->databuffer_size, rxq->dma_ring_addr);
701 
702  txq->vsi_id = ad->vsi_id;
703  txq->queue_id = i;
704  if (i < vec_len (ad->txqs))
705  {
706  avf_txq_t *q = vec_elt_at_index (ad->txqs, i);
707  txq->ring_len = q->size;
708  txq->dma_ring_addr = avf_dma_addr (vm, ad, (void *) q->descs);
709  }
710  avf_log_debug (ad, "config_vsi_queues_tx[%u]: ring_len %u "
711  "dma_ring_addr 0x%llx", i, txq->ring_len,
712  txq->dma_ring_addr);
713  }
714 
715  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_VSI_QUEUES, msg, msg_len,
716  0, 0);
717 }
718 
719 clib_error_t *
721 {
722  int msg_len = sizeof (virtchnl_irq_map_info_t) +
723  (ad->n_rx_irqs) * sizeof (virtchnl_vector_map_t);
724  u8 msg[msg_len];
726 
727  clib_memset (msg, 0, msg_len);
728  imi = (virtchnl_irq_map_info_t *) msg;
729  imi->num_vectors = ad->n_rx_irqs;
730 
731  for (int i = 0; i < ad->n_rx_irqs; i++)
732  {
733  imi->vecmap[i].vector_id = i + 1;
734  imi->vecmap[i].vsi_id = ad->vsi_id;
735  if (ad->n_rx_irqs == ad->n_rx_queues)
736  imi->vecmap[i].rxq_map = 1 << i;
737  else
738  imi->vecmap[i].rxq_map = pow2_mask (ad->n_rx_queues);;
739 
740  avf_log_debug (ad, "config_irq_map[%u/%u]: vsi_id %u vector_id %u "
741  "rxq_map %u", i, ad->n_rx_irqs - 1, ad->vsi_id,
742  imi->vecmap[i].vector_id, imi->vecmap[i].rxq_map);
743  }
744 
745 
746  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_IRQ_MAP, msg, msg_len, 0,
747  0);
748 }
749 
750 clib_error_t *
752  u8 * macs, int is_add)
753 {
754  int msg_len =
755  sizeof (virtchnl_ether_addr_list_t) +
756  count * sizeof (virtchnl_ether_addr_t);
757  u8 msg[msg_len];
759  int i;
760 
761  clib_memset (msg, 0, msg_len);
762  al = (virtchnl_ether_addr_list_t *) msg;
763  al->vsi_id = ad->vsi_id;
764  al->num_elements = count;
765 
766  avf_log_debug (ad, "add_del_eth_addr: vsi_id %u num_elements %u is_add %u",
767  ad->vsi_id, al->num_elements, is_add);
768 
769  for (i = 0; i < count; i++)
770  {
771  clib_memcpy_fast (&al->list[i].addr, macs + i * 6, 6);
772  avf_log_debug (ad, "add_del_eth_addr[%u]: %U", i,
774  }
775  return avf_send_to_pf (vm, ad, is_add ? VIRTCHNL_OP_ADD_ETH_ADDR :
776  VIRTCHNL_OP_DEL_ETH_ADDR, msg, msg_len, 0, 0);
777 }
778 
779 clib_error_t *
781 {
782  virtchnl_queue_select_t qs = { 0 };
783  int i = 0;
784  qs.vsi_id = ad->vsi_id;
785  qs.rx_queues = rx;
786  qs.tx_queues = tx;
787 
788  avf_log_debug (ad, "enable_queues: vsi_id %u rx_queues %u tx_queues %u",
789  ad->vsi_id, qs.rx_queues, qs.tx_queues);
790 
791  while (rx)
792  {
793  if (rx & (1 << i))
794  {
795  avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, i);
796  avf_reg_write (ad, AVF_QRX_TAIL (i), rxq->n_enqueued);
797  rx &= ~(1 << i);
798  }
799  i++;
800  }
801  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_ENABLE_QUEUES, &qs,
802  sizeof (virtchnl_queue_select_t), 0, 0);
803 }
804 
805 clib_error_t *
808 {
809  virtchnl_queue_select_t qs = { 0 };
810  clib_error_t *err;
811  qs.vsi_id = ad->vsi_id;
812 
813  err = avf_send_to_pf (vm, ad, VIRTCHNL_OP_GET_STATS, &qs,
814  sizeof (virtchnl_queue_select_t), es,
815  sizeof (virtchnl_eth_stats_t));
816 
817  avf_stats_log_debug (ad, "get_stats: vsi_id %u\n %U", ad->vsi_id,
819 
820  return err;
821 }
822 
823 clib_error_t *
826 {
827  clib_error_t *err;
828 
829  err = avf_send_to_pf (vm, ad, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS, 0, 0, vc,
830  sizeof (virtchnl_vlan_caps_t));
831 
832  avf_log_debug (ad, "get_offload_vlan_v2_caps:\n%U%U", format_white_space, 16,
834 
835  return err;
836 }
837 
838 clib_error_t *
840  u32 inner)
841 {
843  .outer_ethertype_setting = outer,
844  .inner_ethertype_setting = inner,
845  .vport_id = ad->vsi_id,
846  };
847 
848  avf_log_debug (ad, "disable_vlan_stripping_v2: outer: %U, inner %U",
850  inner);
851 
852  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2, &vs,
853  sizeof (virtchnl_vlan_setting_t), 0, 0);
854 }
855 
856 clib_error_t *
858 {
859  avf_aq_desc_t d = { 0 };
861  u32 rstat;
862  f64 t0, t = 0, suspend_time = AVF_RESET_SUSPEND_TIME;
863 
864  avf_log_debug (ad, "reset");
865 
866  d.opcode = 0x801;
867  d.v_opcode = VIRTCHNL_OP_RESET_VF;
868  if ((error = avf_aq_desc_enq (vm, ad, &d, 0, 0)))
869  return error;
870 
871  t0 = vlib_time_now (vm);
872 retry:
873  vlib_process_suspend (vm, suspend_time);
874 
875  rstat = avf_get_u32 (ad->bar0, AVFGEN_RSTAT);
876 
877  if (rstat == 2 || rstat == 3)
878  {
879  avf_log_debug (ad, "reset completed in %.3fs", t);
880  return 0;
881  }
882 
883  t = vlib_time_now (vm) - t0;
884  if (t > AVF_RESET_MAX_WAIT_TIME)
885  {
886  avf_log_err (ad, "reset failed (timeout %.3fs)", t);
887  return clib_error_return (0, "reset failed (timeout)");
888  }
889 
890  suspend_time *= 2;
891  goto retry;
892 }
893 
894 clib_error_t *
896 {
897  virtchnl_vf_res_request_t res_req = { 0 };
899  u32 rstat;
900  f64 t0, t, suspend_time = AVF_RESET_SUSPEND_TIME;
901 
902  res_req.num_queue_pairs = num_queue_pairs;
903 
904  avf_log_debug (ad, "request_queues: num_queue_pairs %u", num_queue_pairs);
905 
906  error = avf_send_to_pf (vm, ad, VIRTCHNL_OP_REQUEST_QUEUES, &res_req,
907  sizeof (virtchnl_vf_res_request_t), &res_req,
908  sizeof (virtchnl_vf_res_request_t));
909 
910  /*
911  * if PF responds, the request failed
912  * else PF initializes restart and avf_send_to_pf returns an error
913  */
914  if (!error)
915  {
916  return clib_error_return (0, "requested more than %u queue pairs",
917  res_req.num_queue_pairs);
918  }
919 
920  t0 = vlib_time_now (vm);
921 retry:
922  vlib_process_suspend (vm, suspend_time);
923  t = vlib_time_now (vm) - t0;
924 
925  rstat = avf_get_u32 (ad->bar0, AVFGEN_RSTAT);
926 
927  if ((rstat == VIRTCHNL_VFR_COMPLETED) || (rstat == VIRTCHNL_VFR_VFACTIVE))
928  goto done;
929 
930  if (t > AVF_RESET_MAX_WAIT_TIME)
931  {
932  avf_log_err (ad, "request queues failed (timeout %.3f seconds)", t);
933  return clib_error_return (0, "request queues failed (timeout)");
934  }
935 
936  suspend_time *= 2;
937  goto retry;
938 
939 done:
940  return NULL;
941 }
942 
943 clib_error_t *
945  avf_create_if_args_t * args)
946 {
947  virtchnl_version_info_t ver = { 0 };
948  virtchnl_vf_resource_t res = { 0 };
950  int i, wb_on_itr;
951  u16 rxq_num, txq_num;
952 
953  avf_adminq_init (vm, ad);
954 
955  rxq_num = args->rxq_num ? args->rxq_num : 1;
956  txq_num = args->txq_num ? args->txq_num : vlib_get_n_threads ();
957 
958  if ((error = avf_request_queues (vm, ad, clib_max (txq_num, rxq_num))))
959  {
960  /* we failed to get more queues, but still we want to proceed */
961  clib_error_free (error);
962 
963  if ((error = avf_device_reset (vm, ad)))
964  return error;
965  }
966 
967  avf_adminq_init (vm, ad);
968 
969  /*
970  * OP_VERSION
971  */
972  if ((error = avf_op_version (vm, ad, &ver)))
973  return error;
974 
975  if (ver.major != VIRTCHNL_VERSION_MAJOR ||
977  return clib_error_return (0, "incompatible protocol version "
978  "(remote %d.%d)", ver.major, ver.minor);
979 
980  /*
981  * OP_GET_VF_RESOURCES
982  */
983  if ((error = avf_op_get_vf_resources (vm, ad, &res)))
984  return error;
985 
986  if (res.num_vsis != 1 || res.vsi_res[0].vsi_type != VIRTCHNL_VSI_SRIOV)
987  return clib_error_return (0, "unexpected GET_VF_RESOURCE reply received");
988 
989  ad->vsi_id = res.vsi_res[0].vsi_id;
990  ad->cap_flags = res.vf_cap_flags;
992  ad->n_rx_queues = clib_min (rxq_num, res.num_queue_pairs);
993  ad->n_tx_queues = clib_min (txq_num, res.num_queue_pairs);
994  ad->max_vectors = res.max_vectors;
995  ad->max_mtu = res.max_mtu;
996  ad->rss_key_size = res.rss_key_size;
997  ad->rss_lut_size = res.rss_lut_size;
998  ad->n_rx_irqs = ad->max_vectors > ad->n_rx_queues ? ad->n_rx_queues : 1;
999 
1000  if (ad->max_vectors > ad->n_rx_queues)
1001  ad->flags |= AVF_DEVICE_F_RX_INT;
1002 
1003  wb_on_itr = (ad->cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) != 0;
1004 
1006 
1007  if (args->rxq_num != 0 && ad->n_rx_queues != args->rxq_num)
1008  return clib_error_return (0,
1009  "Number of requested RX queues (%u) is "
1010  "higher than mumber of available queues (%u)",
1011  args->rxq_num, ad->num_queue_pairs);
1012 
1013  if (args->txq_num != 0 && ad->n_tx_queues != args->txq_num)
1014  return clib_error_return (0,
1015  "Number of requested TX queues (%u) is "
1016  "higher than mumber of available queues (%u)",
1017  args->txq_num, ad->num_queue_pairs);
1018 
1019  /*
1020  * Disable VLAN stripping
1021  */
1022  if (ad->cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2)
1023  {
1024  virtchnl_vlan_caps_t vc = {};
1026  u32 mask = VIRTCHNL_VLAN_ETHERTYPE_8100;
1027 
1028  if ((error = avf_op_get_offload_vlan_v2_caps (vm, ad, &vc)))
1029  return error;
1030 
1031  outer = vc.offloads.stripping_support.outer & mask;
1032  inner = vc.offloads.stripping_support.inner & mask;
1033 
1034  if ((outer || inner) &&
1035  (error = avf_op_disable_vlan_stripping_v2 (vm, ad, outer, inner)))
1036  return error;
1037  }
1038  else if ((error = avf_op_disable_vlan_stripping (vm, ad)))
1039  return error;
1040 
1041  /*
1042  * Init Queues
1043  */
1044  for (i = 0; i < ad->n_rx_queues; i++)
1045  if ((error = avf_rxq_init (vm, ad, i, args->rxq_size)))
1046  return error;
1047 
1048  for (i = 0; i < ad->n_tx_queues; i++)
1049  if ((error = avf_txq_init (vm, ad, i, args->txq_size)))
1050  return error;
1051 
1052  if ((ad->cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) &&
1053  (error = avf_op_config_rss_lut (vm, ad)))
1054  return error;
1055 
1056  if ((ad->cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) &&
1057  (error = avf_op_config_rss_key (vm, ad)))
1058  return error;
1059 
1060  if ((error = avf_op_config_vsi_queues (vm, ad)))
1061  return error;
1062 
1063  if ((error = avf_op_config_irq_map (vm, ad)))
1064  return error;
1065 
1067 
1068  for (i = 0; i < ad->n_rx_irqs; i++)
1069  avf_irq_n_set_state (ad, i, wb_on_itr ? AVF_IRQ_STATE_WB_ON_ITR :
1071 
1072  if ((error = avf_op_add_del_eth_addr (vm, ad, 1, ad->hwaddr, 1 /* add */ )))
1073  return error;
1074 
1075  if ((error = avf_op_enable_queues (vm, ad, pow2_mask (ad->n_rx_queues),
1076  pow2_mask (ad->n_tx_queues))))
1077  return error;
1078 
1079  ad->flags |= AVF_DEVICE_F_INITIALIZED;
1080  return error;
1081 }
1082 
1083 void
1085 {
1086  vnet_main_t *vnm = vnet_get_main ();
1088  u32 r;
1089 
1090  if (ad->flags & AVF_DEVICE_F_ERROR)
1091  return;
1092 
1093  if ((ad->flags & AVF_DEVICE_F_INITIALIZED) == 0)
1094  return;
1095 
1096  ASSERT (ad->error == 0);
1097 
1098  /* do not process device in reset state */
1099  r = avf_get_u32 (ad->bar0, AVFGEN_RSTAT);
1100  if (r != VIRTCHNL_VFR_VFACTIVE)
1101  return;
1102 
1103  r = avf_get_u32 (ad->bar0, AVF_ARQLEN);
1104  if ((r & 0xf0000000) != (1ULL << 31))
1105  {
1106  ad->error = clib_error_return (0, "arq not enabled, arqlen = 0x%x", r);
1107  avf_log_err (ad, "error: %U", format_clib_error, ad->error);
1108  goto error;
1109  }
1110 
1111  r = avf_get_u32 (ad->bar0, AVF_ATQLEN);
1112  if ((r & 0xf0000000) != (1ULL << 31))
1113  {
1114  ad->error = clib_error_return (0, "atq not enabled, atqlen = 0x%x", r);
1115  avf_log_err (ad, "error: %U", format_clib_error, ad->error);
1116  goto error;
1117  }
1118 
1119  if (is_irq == 0)
1120  avf_op_get_stats (vm, ad, &ad->eth_stats);
1121 
1122  /* *INDENT-OFF* */
1123  vec_foreach (e, ad->events)
1124  {
1125  avf_log_debug (ad, "event: %s (%u) sev %d",
1127  if (e->event == VIRTCHNL_EVENT_LINK_CHANGE)
1128  {
1129  int link_up;
1130  virtchnl_link_speed_t speed = e->event_data.link_event.link_speed;
1131  u32 flags = 0;
1132  u32 mbps = 0;
1133 
1134  if (ad->cap_flags & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
1135  link_up = e->event_data.link_event_adv.link_status;
1136  else
1137  link_up = e->event_data.link_event.link_status;
1138 
1139  if (ad->cap_flags & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
1140  mbps = e->event_data.link_event_adv.link_speed;
1141  if (speed == VIRTCHNL_LINK_SPEED_40GB)
1142  mbps = 40000;
1143  else if (speed == VIRTCHNL_LINK_SPEED_25GB)
1144  mbps = 25000;
1145  else if (speed == VIRTCHNL_LINK_SPEED_10GB)
1146  mbps = 10000;
1147  else if (speed == VIRTCHNL_LINK_SPEED_5GB)
1148  mbps = 5000;
1149  else if (speed == VIRTCHNL_LINK_SPEED_2_5GB)
1150  mbps = 2500;
1151  else if (speed == VIRTCHNL_LINK_SPEED_1GB)
1152  mbps = 1000;
1153  else if (speed == VIRTCHNL_LINK_SPEED_100MB)
1154  mbps = 100;
1155 
1156  avf_log_debug (ad, "event_link_change: status %d speed %u mbps",
1157  link_up, mbps);
1158 
1159  if (link_up && (ad->flags & AVF_DEVICE_F_LINK_UP) == 0)
1160  {
1161  ad->flags |= AVF_DEVICE_F_LINK_UP;
1164  vnet_hw_interface_set_flags (vnm, ad->hw_if_index, flags);
1166  mbps * 1000);
1167  ad->link_speed = mbps;
1168  }
1169  else if (!link_up && (ad->flags & AVF_DEVICE_F_LINK_UP) != 0)
1170  {
1171  ad->flags &= ~AVF_DEVICE_F_LINK_UP;
1172  ad->link_speed = 0;
1173  }
1174 
1175  if (ad->flags & AVF_DEVICE_F_ELOG)
1176  {
1177  ELOG_TYPE_DECLARE (el) =
1178  {
1179  .format = "avf[%d] link change: link_status %d "
1180  "link_speed %d mbps",
1181  .format_args = "i4i1i4",
1182  };
1183  struct
1184  {
1185  u32 dev_instance;
1186  u8 link_status;
1187  u32 link_speed;
1188  } *ed;
1189  ed = ELOG_DATA (&vlib_global_main.elog_main, el);
1190  ed->dev_instance = ad->dev_instance;
1191  ed->link_status = link_up;
1192  ed->link_speed = mbps;
1193  }
1194  }
1195  else
1196  {
1197  if (ad->flags & AVF_DEVICE_F_ELOG)
1198  {
1199  ELOG_TYPE_DECLARE (el) =
1200  {
1201  .format = "avf[%d] unknown event: event %d severity %d",
1202  .format_args = "i4i4i1i1",
1203  };
1204  struct
1205  {
1206  u32 dev_instance;
1207  u32 event;
1208  u32 severity;
1209  } *ed;
1210  ed = ELOG_DATA (&vlib_global_main.elog_main, el);
1211  ed->dev_instance = ad->dev_instance;
1212  ed->event = e->event;
1213  ed->severity = e->severity;
1214  }
1215  }
1216  }
1217  /* *INDENT-ON* */
1218  vec_reset_length (ad->events);
1219 
1220  return;
1221 
1222 error:
1223  ad->flags |= AVF_DEVICE_F_ERROR;
1224  ASSERT (ad->error != 0);
1226 }
1227 
1228 clib_error_t *
1230  u8 *rule, u32 rule_len, u8 *program_status,
1231  u32 status_len)
1232 {
1233  avf_log_debug (ad, "avf_op_program_flow: vsi_id %u is_create %u", ad->vsi_id,
1234  is_create);
1235 
1236  return avf_send_to_pf (vm, ad,
1237  is_create ? VIRTCHNL_OP_ADD_FDIR_FILTER :
1238  VIRTCHNL_OP_DEL_FDIR_FILTER,
1239  rule, rule_len, program_status, status_len);
1240 }
1241 
1242 static void
1244 {
1246 
1248  req->error = avf_op_add_del_eth_addr (vm, ad, 1, req->eth_addr,
1249  req->is_add);
1250  else if (req->type == AVF_PROCESS_REQ_CONFIG_PROMISC_MDDE)
1251  req->error = avf_op_config_promisc_mode (vm, ad, req->is_enable);
1252  else if (req->type == AVF_PROCESS_REQ_PROGRAM_FLOW)
1253  req->error =
1254  avf_op_program_flow (vm, ad, req->is_add, req->rule, req->rule_len,
1255  req->program_status, req->status_len);
1256  else
1257  clib_panic ("BUG: unknown avf proceess request type");
1258 
1259  if (req->calling_process_index != avf_process_node.index)
1261 }
1262 
1263 static clib_error_t *
1265 {
1266  uword *event_data = 0;
1268 
1269  if (req->calling_process_index != avf_process_node.index)
1270  {
1272  AVF_PROCESS_EVENT_REQ, req);
1273 
1275 
1276  if (vlib_process_get_events (vm, &event_data) != 0)
1277  clib_panic ("avf process node failed to reply in 5 seconds");
1278  vec_free (event_data);
1279  }
1280  else
1281  avf_process_handle_request (vm, req);
1282 
1283  return req->error;
1284 }
1285 
1286 static u32
1288 {
1289  avf_process_req_t req;
1292  clib_error_t *err;
1293 
1294  switch (flags)
1295  {
1297  ad->flags &= ~AVF_DEVICE_F_PROMISC;
1298  break;
1300  ad->flags |= AVF_DEVICE_F_PROMISC;
1301  break;
1302  default:
1303  return ~0;
1304  }
1305 
1306  req.is_enable = ((ad->flags & AVF_DEVICE_F_PROMISC) != 0);
1308  req.dev_instance = hw->dev_instance;
1309 
1310  if ((err = avf_process_request (vm, &req)))
1311  {
1312  avf_log_err (ad, "error: %U", format_clib_error, err);
1313  clib_error_free (err);
1314  return ~0;
1315  }
1316  return 0;
1317 }
1318 
1319 static uword
1321 {
1322  avf_main_t *am = &avf_main;
1323  uword *event_data = 0, event_type;
1324  int enabled = 0, irq;
1325  f64 last_run_duration = 0;
1326  f64 last_periodic_time = 0;
1327  avf_device_t **dev_pointers = 0;
1328  u32 i;
1329 
1330  while (1)
1331  {
1332  if (enabled)
1333  vlib_process_wait_for_event_or_clock (vm, 5.0 - last_run_duration);
1334  else
1336 
1337  event_type = vlib_process_get_events (vm, &event_data);
1338  irq = 0;
1339 
1340  switch (event_type)
1341  {
1342  case ~0:
1343  last_periodic_time = vlib_time_now (vm);
1344  break;
1346  enabled = 1;
1347  break;
1349  for (int i = 0; i < vec_len (event_data); i++)
1350  {
1351  avf_device_t *ad = avf_get_device (event_data[i]);
1352  avf_delete_if (vm, ad, /* with_barrier */ 1);
1353  }
1354  if (pool_elts (am->devices) < 1)
1355  enabled = 0;
1356  break;
1358  irq = 1;
1359  break;
1360  case AVF_PROCESS_EVENT_REQ:
1361  for (int i = 0; i < vec_len (event_data); i++)
1362  avf_process_handle_request (vm, (void *) event_data[i]);
1363  break;
1364 
1365  default:
1366  ASSERT (0);
1367  }
1368 
1369  vec_reset_length (event_data);
1370 
1371  if (enabled == 0)
1372  continue;
1373 
1374  /* create local list of device pointers as device pool may grow
1375  * during suspend */
1376  vec_reset_length (dev_pointers);
1377  /* *INDENT-OFF* */
1378  pool_foreach_index (i, am->devices)
1379  {
1380  vec_add1 (dev_pointers, avf_get_device (i));
1381  }
1382 
1383  vec_foreach_index (i, dev_pointers)
1384  {
1385  avf_process_one_device (vm, dev_pointers[i], irq);
1386  };
1387  /* *INDENT-ON* */
1388  last_run_duration = vlib_time_now (vm) - last_periodic_time;
1389  }
1390  return 0;
1391 }
1392 
1393 /* *INDENT-OFF* */
1395  .function = avf_process,
1396  .type = VLIB_NODE_TYPE_PROCESS,
1397  .name = "avf-process",
1398 };
1399 /* *INDENT-ON* */
1400 
1401 static void
1403 {
1404  uword pd = vlib_pci_get_private_data (vm, h);
1405  avf_device_t *ad = avf_get_device (pd);
1406  u32 icr0;
1407 
1408  icr0 = avf_reg_read (ad, AVFINT_ICR0);
1409 
1410  if (ad->flags & AVF_DEVICE_F_ELOG)
1411  {
1412  /* *INDENT-OFF* */
1413  ELOG_TYPE_DECLARE (el) =
1414  {
1415  .format = "avf[%d] irq 0: icr0 0x%x",
1416  .format_args = "i4i4",
1417  };
1418  /* *INDENT-ON* */
1419  struct
1420  {
1421  u32 dev_instance;
1422  u32 icr0;
1423  } *ed;
1424 
1425  ed = ELOG_DATA (&vlib_global_main.elog_main, el);
1426  ed->dev_instance = ad->dev_instance;
1427  ed->icr0 = icr0;
1428  }
1429 
1431 
1432  /* bit 30 - Send/Receive Admin queue interrupt indication */
1433  if (icr0 & (1 << 30))
1436 }
1437 
1438 static void
1440 {
1441  vnet_main_t *vnm = vnet_get_main ();
1442  uword pd = vlib_pci_get_private_data (vm, h);
1443  avf_device_t *ad = avf_get_device (pd);
1444  avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, line - 1);
1445 
1446  if (ad->flags & AVF_DEVICE_F_ELOG)
1447  {
1448  /* *INDENT-OFF* */
1449  ELOG_TYPE_DECLARE (el) =
1450  {
1451  .format = "avf[%d] irq %d: received",
1452  .format_args = "i4i2",
1453  };
1454  /* *INDENT-ON* */
1455  struct
1456  {
1457  u32 dev_instance;
1458  u16 line;
1459  } *ed;
1460 
1461  ed = ELOG_DATA (&vlib_global_main.elog_main, el);
1462  ed->dev_instance = ad->dev_instance;
1463  ed->line = line;
1464  }
1465 
1466  line--;
1467 
1468  if (ad->flags & AVF_DEVICE_F_RX_INT && rxq->int_mode)
1471 }
1472 
1473 void
1474 avf_delete_if (vlib_main_t * vm, avf_device_t * ad, int with_barrier)
1475 {
1476  vnet_main_t *vnm = vnet_get_main ();
1477  avf_main_t *am = &avf_main;
1478  int i;
1479  u32 dev_instance;
1480 
1481  ad->flags &= ~AVF_DEVICE_F_ADMIN_UP;
1482 
1483  if (ad->hw_if_index)
1484  {
1485  if (with_barrier)
1489  if (with_barrier)
1491  }
1492 
1494 
1495  vlib_physmem_free (vm, ad->atq);
1496  vlib_physmem_free (vm, ad->arq);
1497  vlib_physmem_free (vm, ad->atq_bufs);
1498  vlib_physmem_free (vm, ad->arq_bufs);
1499 
1500  /* *INDENT-OFF* */
1501  vec_foreach_index (i, ad->rxqs)
1502  {
1503  avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, i);
1504  vlib_physmem_free (vm, (void *) rxq->descs);
1505  if (rxq->n_enqueued)
1506  vlib_buffer_free_from_ring (vm, rxq->bufs, rxq->next, rxq->size,
1507  rxq->n_enqueued);
1508  vec_free (rxq->bufs);
1509  }
1510  /* *INDENT-ON* */
1511  vec_free (ad->rxqs);
1512 
1513  /* *INDENT-OFF* */
1514  vec_foreach_index (i, ad->txqs)
1515  {
1516  avf_txq_t *txq = vec_elt_at_index (ad->txqs, i);
1517  vlib_physmem_free (vm, (void *) txq->descs);
1518  if (txq->n_enqueued)
1519  {
1520  u16 first = (txq->next - txq->n_enqueued) & (txq->size -1);
1521  vlib_buffer_free_from_ring (vm, txq->bufs, first, txq->size,
1522  txq->n_enqueued);
1523  }
1524  /* Free the placeholder buffer */
1525  vlib_buffer_free (vm, txq->ph_bufs, vec_len (txq->ph_bufs));
1526  vec_free (txq->ph_bufs);
1527  vec_free (txq->bufs);
1528  clib_ring_free (txq->rs_slots);
1529  vec_free (txq->tmp_bufs);
1530  vec_free (txq->tmp_descs);
1531  clib_spinlock_free (&txq->lock);
1532  }
1533  /* *INDENT-ON* */
1534  vec_free (ad->txqs);
1535  vec_free (ad->name);
1536 
1537  clib_error_free (ad->error);
1538  dev_instance = ad->dev_instance;
1539  clib_memset (ad, 0, sizeof (*ad));
1540  pool_put_index (am->devices, dev_instance);
1541  clib_mem_free (ad);
1542 }
1543 
1544 static u8
1546 {
1547  clib_error_t *error = 0;
1548 
1549  args->rxq_size = (args->rxq_size == 0) ? AVF_RXQ_SZ : args->rxq_size;
1550  args->txq_size = (args->txq_size == 0) ? AVF_TXQ_SZ : args->txq_size;
1551 
1552  if ((args->rxq_size > AVF_QUEUE_SZ_MAX)
1553  || (args->txq_size > AVF_QUEUE_SZ_MAX))
1554  {
1555  args->rv = VNET_API_ERROR_INVALID_VALUE;
1556  args->error =
1557  clib_error_return (error, "queue size must not be greater than %u",
1559  return 1;
1560  }
1561  if ((args->rxq_size < AVF_QUEUE_SZ_MIN)
1562  || (args->txq_size < AVF_QUEUE_SZ_MIN))
1563  {
1564  args->rv = VNET_API_ERROR_INVALID_VALUE;
1565  args->error =
1566  clib_error_return (error, "queue size must not be smaller than %u",
1568  return 1;
1569  }
1570  if ((args->rxq_size & (args->rxq_size - 1)) ||
1571  (args->txq_size & (args->txq_size - 1)))
1572  {
1573  args->rv = VNET_API_ERROR_INVALID_VALUE;
1574  args->error =
1575  clib_error_return (error, "queue size must be a power of two");
1576  return 1;
1577  }
1578  return 0;
1579 }
1580 
1581 void
1583 {
1584  vnet_main_t *vnm = vnet_get_main ();
1585  avf_main_t *am = &avf_main;
1586  avf_device_t *ad, **adp;
1588  clib_error_t *error = 0;
1589  int i;
1590 
1591  /* check input args */
1592  if (avf_validate_queue_size (args) != 0)
1593  return;
1594 
1595  /* *INDENT-OFF* */
1596  pool_foreach (adp, am->devices) {
1597  if ((*adp)->pci_addr.as_u32 == args->addr.as_u32)
1598  {
1599  args->rv = VNET_API_ERROR_ADDRESS_IN_USE;
1600  args->error =
1601  clib_error_return (error, "%U: %s", format_vlib_pci_addr,
1602  &args->addr, "pci address in use");
1603  return;
1604  }
1605  }
1606  /* *INDENT-ON* */
1607 
1608  pool_get (am->devices, adp);
1609  adp[0] = ad = clib_mem_alloc_aligned (sizeof (avf_device_t),
1611  clib_memset (ad, 0, sizeof (avf_device_t));
1612  ad->dev_instance = adp - am->devices;
1613  ad->per_interface_next_index = ~0;
1614  ad->name = vec_dup (args->name);
1615 
1616  if (args->enable_elog)
1617  {
1618  ad->flags |= AVF_DEVICE_F_ELOG;
1619  avf_elog_init ();
1620  }
1621 
1622  if ((error = vlib_pci_device_open (vm, &args->addr, avf_pci_device_ids,
1623  &h)))
1624  {
1625  pool_put (am->devices, adp);
1626  clib_mem_free (ad);
1627  args->rv = VNET_API_ERROR_INVALID_INTERFACE;
1628  args->error =
1629  clib_error_return (error, "pci-addr %U", format_vlib_pci_addr,
1630  &args->addr);
1631  return;
1632  }
1633  ad->pci_dev_handle = h;
1634  ad->pci_addr = args->addr;
1635  ad->numa_node = vlib_pci_get_numa_node (vm, h);
1636 
1638 
1639  if ((error = vlib_pci_bus_master_enable (vm, h)))
1640  goto error;
1641 
1642  if ((error = vlib_pci_map_region (vm, h, 0, &ad->bar0)))
1643  goto error;
1644 
1646  AVF_MBOX_LEN,
1648  ad->numa_node);
1649  if (ad->atq == 0)
1650  {
1651  error = vlib_physmem_last_error (vm);
1652  goto error;
1653  }
1654 
1655  if ((error = vlib_pci_map_dma (vm, h, ad->atq)))
1656  goto error;
1657 
1659  AVF_MBOX_LEN,
1661  ad->numa_node);
1662  if (ad->arq == 0)
1663  {
1664  error = vlib_physmem_last_error (vm);
1665  goto error;
1666  }
1667 
1668  if ((error = vlib_pci_map_dma (vm, h, ad->arq)))
1669  goto error;
1670 
1672  AVF_MBOX_LEN,
1674  ad->numa_node);
1675  if (ad->atq_bufs == 0)
1676  {
1677  error = vlib_physmem_last_error (vm);
1678  goto error;
1679  }
1680 
1681  if ((error = vlib_pci_map_dma (vm, h, ad->atq_bufs)))
1682  goto error;
1683 
1685  AVF_MBOX_LEN,
1687  ad->numa_node);
1688  if (ad->arq_bufs == 0)
1689  {
1690  error = vlib_physmem_last_error (vm);
1691  goto error;
1692  }
1693 
1694  if ((error = vlib_pci_map_dma (vm, h, ad->arq_bufs)))
1695  goto error;
1696 
1698  ad->flags |= AVF_DEVICE_F_VA_DMA;
1699 
1700  if ((error = avf_device_init (vm, am, ad, args)))
1701  goto error;
1702 
1703  if ((error = vlib_pci_register_msix_handler (vm, h, 0, 1,
1704  &avf_irq_0_handler)))
1705  goto error;
1706 
1707  if ((error = vlib_pci_register_msix_handler (vm, h, 1, ad->n_rx_irqs,
1708  &avf_irq_n_handler)))
1709  goto error;
1710 
1711  if ((error = vlib_pci_enable_msix_irq (vm, h, 0, ad->n_rx_irqs + 1)))
1712  goto error;
1713 
1714  if ((error = vlib_pci_intr_enable (vm, h)))
1715  goto error;
1716 
1717  /* create interface */
1718  error = ethernet_register_interface (vnm, avf_device_class.index,
1719  ad->dev_instance, ad->hwaddr,
1721 
1722  if (error)
1723  goto error;
1724 
1725  /* Indicate ability to support L3 DMAC filtering and
1726  * initialize interface to L3 non-promisc mode */
1731  ethernet_set_flags (vnm, ad->hw_if_index,
1733 
1735  args->sw_if_index = ad->sw_if_index = sw->sw_if_index;
1736 
1740 
1741  for (i = 0; i < ad->n_rx_queues; i++)
1742  {
1743  u32 qi, fi;
1744  qi = vnet_hw_if_register_rx_queue (vnm, ad->hw_if_index, i,
1746 
1747  if (ad->flags & AVF_DEVICE_F_RX_INT)
1748  {
1749  fi = vlib_pci_get_msix_file_index (vm, ad->pci_dev_handle, i + 1);
1750  vnet_hw_if_set_rx_queue_file_index (vnm, qi, fi);
1751  }
1752  ad->rxqs[i].queue_index = qi;
1753  }
1754 
1755  for (i = 0; i < ad->n_tx_queues; i++)
1756  {
1757  u32 qi = vnet_hw_if_register_tx_queue (vnm, ad->hw_if_index, i);
1758  ad->txqs[i].queue_index = qi;
1759  }
1760 
1761  for (i = 0; i < vlib_get_n_threads (); i++)
1762  {
1763  u32 qi = ad->txqs[i % ad->n_tx_queues].queue_index;
1764  vnet_hw_if_tx_queue_assign_thread (vnm, qi, i);
1765  }
1766 
1768 
1769  if (pool_elts (am->devices) == 1)
1772 
1773  return;
1774 
1775 error:
1776  avf_delete_if (vm, ad, /* with_barrier */ 0);
1777  args->rv = VNET_API_ERROR_INVALID_INTERFACE;
1778  args->error = clib_error_return (error, "pci-addr %U",
1779  format_vlib_pci_addr, &args->addr);
1780  avf_log_err (ad, "error: %U", format_clib_error, args->error);
1781 }
1782 
1783 static clib_error_t *
1785 {
1786  vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
1788  uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
1789 
1790  if (ad->flags & AVF_DEVICE_F_ERROR)
1791  return clib_error_return (0, "device is in error state");
1792 
1793  if (is_up)
1794  {
1797  ad->flags |= AVF_DEVICE_F_ADMIN_UP;
1798  }
1799  else
1800  {
1802  ad->flags &= ~AVF_DEVICE_F_ADMIN_UP;
1803  }
1804  return 0;
1805 }
1806 
1807 static clib_error_t *
1810 {
1811  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1813  avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, qid);
1814 
1815  if (mode == VNET_HW_IF_RX_MODE_POLLING)
1816  {
1817  if (rxq->int_mode == 0)
1818  return 0;
1819  if (ad->cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1821  else
1823  rxq->int_mode = 0;
1824  }
1825  else
1826  {
1827  if (rxq->int_mode == 1)
1828  return 0;
1829  if (ad->n_rx_irqs != ad->n_rx_queues)
1830  return clib_error_return (0, "not enough interrupt lines");
1831  rxq->int_mode = 1;
1833  }
1834 
1835  return 0;
1836 }
1837 
1838 static void
1840  u32 node_index)
1841 {
1842  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1844 
1845  /* Shut off redirection */
1846  if (node_index == ~0)
1847  {
1849  return;
1850  }
1851 
1853  vlib_node_add_next (vlib_get_main (), avf_input_node.index, node_index);
1854 }
1855 
1856 static clib_error_t *
1858  const u8 * address, u8 is_add)
1859 {
1861  avf_process_req_t req;
1862 
1863  req.dev_instance = hw->dev_instance;
1865  req.is_add = is_add;
1866  clib_memcpy (req.eth_addr, address, 6);
1867 
1868  return avf_process_request (vm, &req);
1869 }
1870 
1871 static char *avf_tx_func_error_strings[] = {
1872 #define _(n,s) s,
1874 #undef _
1875 };
1876 
1877 static void
1879 {
1880  avf_device_t *ad = avf_get_device (instance);
1882  &ad->eth_stats, sizeof (ad->eth_stats));
1883 }
1884 
1885 clib_error_t *
1886 avf_program_flow (u32 dev_instance, int is_add, u8 *rule, u32 rule_len,
1887  u8 *program_status, u32 status_len)
1888 {
1890  avf_process_req_t req;
1891 
1892  req.dev_instance = dev_instance;
1894  req.is_add = is_add;
1895  req.rule = rule;
1896  req.rule_len = rule_len;
1897  req.program_status = program_status;
1898  req.status_len = status_len;
1899 
1900  return avf_process_request (vm, &req);
1901 }
1902 
1903 /* *INDENT-OFF* */
1905  .name = "Adaptive Virtual Function (AVF) interface",
1906  .clear_counters = avf_clear_hw_interface_counters,
1907  .format_device = format_avf_device,
1908  .format_device_name = format_avf_device_name,
1909  .admin_up_down_function = avf_interface_admin_up_down,
1910  .rx_mode_change_function = avf_interface_rx_mode_change,
1911  .rx_redirect_to_node = avf_set_interface_next_node,
1912  .mac_addr_add_del_function = avf_add_del_mac_address,
1913  .tx_function_n_errors = AVF_TX_N_ERROR,
1914  .tx_function_error_strings = avf_tx_func_error_strings,
1915  .flow_ops_function = avf_flow_ops_fn,
1916 };
1917 /* *INDENT-ON* */
1918 
1919 clib_error_t *
1921 {
1922  avf_main_t *am = &avf_main;
1924 
1927 
1928  return 0;
1929 }
1930 
1931 /* *INDENT-OFF* */
1933 {
1934  .runs_after = VLIB_INITS ("pci_bus_init"),
1935 };
1936 /* *INDENT-OFF* */
1937 
1938 /*
1939  * fd.io coding-style-patch-verification: ON
1940  *
1941  * Local Variables:
1942  * eval: (c-set-style "gnu")
1943  * End:
1944  */
#define AVF_TXQ_SZ
Definition: device.c:31
clib_error_t * avf_op_get_vf_resources(vlib_main_t *vm, avf_device_t *ad, virtchnl_vf_resource_t *res)
Definition: device.c:549
vlib_log_class_t class
Definition: log.h:85
vlib_node_registration_t avf_process_node
(constructor) VLIB_REGISTER_NODE (avf_process_node)
Definition: device.c:1394
u8 int_mode
Definition: avf.h:178
#define vec_foreach_index(var, v)
Iterate over vector indices.
#define AVF_ARQLEN
Definition: virtchnl.h:47
virtchnl_queue_pair_info_t qpair[1]
Definition: virtchnl.h:333
u32 hw_if_index
Definition: avf.h:223
static clib_error_t * vlib_pci_intr_enable(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.h:239
#define PCI_DEVICE_ID_INTEL_AVF
Definition: device.c:35
vnet_interface_output_runtime_t * rt
#define AVF_ATQH
Definition: virtchnl.h:40
#define clib_min(x, y)
Definition: clib.h:342
#define pool_foreach_index(i, v)
Definition: pool.h:576
format_function_t format_avf_vlan_caps
Definition: avf.h:362
static void avf_irq_0_set_state(avf_device_t *ad, avf_irq_state_t state)
Definition: device.c:72
static uword random_default_seed(void)
Default random seed (unix/linux user-mode)
Definition: random.h:91
static clib_error_t * vlib_pci_bus_master_enable(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.h:271
static void * vlib_physmem_alloc_aligned_on_numa(vlib_main_t *vm, uword n_bytes, uword alignment, u32 numa_node)
Definition: physmem_funcs.h:63
static f64 vlib_process_wait_for_event_or_clock(vlib_main_t *vm, f64 dt)
Suspend a cooperative multi-tasking thread Waits for an event, or for the indicated number of seconds...
Definition: node_funcs.h:755
void avf_arq_slot_init(avf_device_t *ad, u16 slot)
Definition: device.c:351
vl_api_wireguard_peer_flags_t flags
Definition: wireguard.api:105
clib_error_t * avf_send_to_pf(vlib_main_t *vm, avf_device_t *ad, virtchnl_ops_t op, void *in, int in_len, void *out, int out_len)
Definition: device.c:408
void avf_create_if(vlib_main_t *vm, avf_create_if_args_t *args)
Definition: device.c:1582
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:982
static uword * vlib_process_wait_for_event(vlib_main_t *vm)
Definition: node_funcs.h:660
void ethernet_delete_interface(vnet_main_t *vnm, u32 hw_if_index)
Definition: interface.c:393
VLIB_REGISTER_LOG_CLASS(avf_log)
#define AVF_ATQBAH
Definition: virtchnl.h:45
vnet_hw_if_output_node_runtime_t * r
clib_error_t * avf_device_init(vlib_main_t *vm, avf_main_t *am, avf_device_t *ad, avf_create_if_args_t *args)
Definition: device.c:944
u32 status_len
Definition: avf.h:299
u8 * program_status
Definition: avf.h:298
clib_error_t * error
Definition: avf.h:267
#define pool_foreach(VAR, POOL)
Iterate through pool.
Definition: pool.h:534
u32 * tmp_bufs
Definition: avf.h:197
#define AVF_AQ_ENQ_SUSPEND_TIME
Definition: avf.h:38
clib_error_t * avf_op_config_vsi_queues(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:664
clib_error_t * avf_op_version(vlib_main_t *vm, avf_device_t *ad, virtchnl_version_info_t *ver)
Definition: device.c:527
u64 atq_bufs_pa
Definition: avf.h:240
static uword vlib_buffer_get_pa(vlib_main_t *vm, vlib_buffer_t *b)
Definition: buffer_funcs.h:488
virtchnl_vsi_type_t vsi_type
Definition: virtchnl.h:187
#define AVF_SEND_TO_PF_SUSPEND_TIME
Definition: avf.h:44
unsigned long u64
Definition: types.h:89
void vlib_pci_device_close(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.c:1311
static clib_error_t * avf_process_request(vlib_main_t *vm, avf_process_req_t *req)
Definition: device.c:1264
virtchnl_vector_map_t vecmap[1]
Definition: virtchnl.h:361
u16 n_rx_irqs
Definition: avf.h:251
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:325
#define clib_ring_new_aligned(ring, size, align)
Definition: ring.h:53
vnet_hw_interface_capabilities_t caps
Definition: interface.h:645
virtchnl_link_speed_t link_speed
Definition: avf.h:255
#define AVF_ARQBAH
Definition: virtchnl.h:39
static uword avf_dma_addr(vlib_main_t *vm, avf_device_t *ad, void *p)
Definition: device.c:364
static clib_error_t * vlib_physmem_last_error(struct vlib_main_t *vm)
clib_error_t * avf_aq_desc_enq(vlib_main_t *vm, avf_device_t *ad, avf_aq_desc_t *dt, void *data, int len)
Definition: device.c:133
avf_process_req_type_t type
Definition: avf.h:289
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
#define AVF_QRX_TAIL(q)
Definition: virtchnl.h:51
#define AVF_ARQT
Definition: virtchnl.h:43
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:607
format_function_t format_avf_device
Definition: avf.h:357
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:645
static void avf_irq_n_handler(vlib_main_t *vm, vlib_pci_dev_handle_t h, u16 line)
Definition: device.c:1439
vlib_pci_addr_t addr
Definition: avf.h:333
clib_error_t * avf_init(vlib_main_t *vm)
Definition: device.c:1920
#define AVF_AQ_F_SI
Definition: virtchnl.h:61
vnet_flow_dev_ops_function_t avf_flow_ops_fn
Definition: avf.h:365
u32 dev_instance
Definition: avf.h:221
clib_error_t * vlib_pci_enable_msix_irq(vlib_main_t *vm, vlib_pci_dev_handle_t h, u16 start, u16 count)
Definition: pci.c:895
#define AVF_QTX_TAIL(q)
Definition: virtchnl.h:50
static_always_inline void vnet_hw_if_rx_queue_set_int_pending(vnet_main_t *vnm, u32 queue_index)
clib_error_t * avf_rxq_init(vlib_main_t *vm, avf_device_t *ad, u16 qid, u16 rxq_size)
Definition: device.c:247
virtchnl_link_speed_t
Definition: virtchnl.h:227
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
Definition: vec.h:535
#define ETHERNET_INTERFACE_FLAG_DEFAULT_L3
Definition: ethernet.h:160
clib_error_t * vnet_hw_interface_set_flags(vnet_main_t *vnm, u32 hw_if_index, vnet_hw_interface_flags_t flags)
Definition: interface.c:513
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:255
volatile u32 * qtx_tail
Definition: avf.h:186
clib_error_t * avf_op_get_offload_vlan_v2_caps(vlib_main_t *vm, avf_device_t *ad, virtchnl_vlan_caps_t *vc)
Definition: device.c:824
#define AVF_ATQLEN
Definition: virtchnl.h:41
clib_error_t * error
Definition: avf.h:301
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
Definition: node_funcs.h:1177
void avf_process_one_device(vlib_main_t *vm, avf_device_t *ad, int is_irq)
Definition: device.c:1084
unsigned char u8
Definition: types.h:56
vlib_buffer_t ** b
vnet_device_class_t avf_device_class
#define AVF_ARQH
Definition: virtchnl.h:44
static clib_error_t * avf_interface_rx_mode_change(vnet_main_t *vnm, u32 hw_if_index, u32 qid, vnet_hw_if_rx_mode mode)
Definition: device.c:1808
u8 data[128]
Definition: ipsec_types.api:92
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
double f64
Definition: types.h:142
static void clib_spinlock_free(clib_spinlock_t *p)
Definition: lock.h:72
#define vlib_worker_thread_barrier_sync(X)
Definition: threads.h:194
void avf_delete_if(vlib_main_t *vm, avf_device_t *ad, int with_barrier)
Definition: device.c:1474
virtchnl_ops_t
Definition: virtchnl.h:117
uword vlib_pci_get_msix_file_index(vlib_main_t *vm, vlib_pci_dev_handle_t h, u16 index)
Definition: pci.c:917
static vnet_sw_interface_t * vnet_get_hw_sw_interface(vnet_main_t *vnm, u32 hw_if_index)
unsigned int u32
Definition: types.h:88
#define clib_memcpy(d, s, n)
Definition: string.h:197
u8 buffer_pool_index
Definition: avf.h:179
u32 cap_flags
Definition: avf.h:247
vlib_frame_t * f
static uword vlib_process_suspend(vlib_main_t *vm, f64 dt)
Suspend a vlib cooperative multi-tasking thread for a period of time.
Definition: node_funcs.h:486
u32 calling_process_index
Definition: avf.h:291
#define AVF_AQ_F_DD
Definition: virtchnl.h:53
#define AVF_RESET_SUSPEND_TIME
Definition: avf.h:41
u16 * rs_slots
Definition: avf.h:194
#define AVFINT_ICR0_ENA1
Definition: virtchnl.h:37
VNET_DEVICE_CLASS(af_xdp_device_class)
u8 * format_ethernet_address(u8 *s, va_list *args)
Definition: format.c:44
avf_tx_desc_t * tmp_descs
Definition: avf.h:196
avf_irq_state_t
Definition: device.c:64
clib_error_t * vlib_pci_map_dma(vlib_main_t *vm, vlib_pci_dev_handle_t h, void *ptr)
Definition: pci.c:1232
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:172
static uword vlib_process_get_events(vlib_main_t *vm, uword **data_vector)
Return the first event type which has occurred and a vector of per-event data of that type...
Definition: node_funcs.h:583
clib_spinlock_t lock
Definition: avf.h:190
#define AVF_SEND_TO_PF_MAX_WAIT_TIME
Definition: avf.h:45
static uword pow2_mask(uword x)
Definition: clib.h:252
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:129
static u32 avf_reg_read(avf_device_t *ad, u32 addr)
Definition: avf.h:429
static void avf_irq_0_handler(vlib_main_t *vm, vlib_pci_dev_handle_t h, u16 line)
Definition: device.c:1402
volatile u32 * qrx_tail
Definition: avf.h:172
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define avf_stats_log_debug(dev, f,...)
Definition: avf.h:105
#define clib_error_return(e, args...)
Definition: error.h:99
static void avf_set_interface_next_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Definition: device.c:1839
vnet_main_t * vnet_get_main(void)
static const char * virtchnl_event_names[]
Definition: device.c:58
clib_error_t * avf_op_add_del_eth_addr(vlib_main_t *vm, avf_device_t *ad, u8 count, u8 *macs, int is_add)
Definition: device.c:751
vlib_log_class_registration_t avf_log
vlib_pci_dev_handle_t pci_dev_handle
Definition: avf.h:224
int is_enable
Definition: avf.h:293
clib_error_t * avf_op_config_rss_lut(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:588
clib_error_t * avf_op_disable_vlan_stripping_v2(vlib_main_t *vm, avf_device_t *ad, u32 outer, u32 inner)
Definition: device.c:839
virtchnl_ether_addr_t list[1]
Definition: virtchnl.h:376
void * arq_bufs
Definition: avf.h:239
virtchnl_vlan_supported_caps_t stripping_support
Definition: virtchnl.h:440
avf_aq_desc_t * arq
Definition: avf.h:237
static void clib_spinlock_init(clib_spinlock_t *p)
Definition: lock.h:65
static void vlib_buffer_free_from_ring(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Free buffers from ring.
static u32 avf_flag_change(vnet_main_t *vnm, vnet_hw_interface_t *hw, u32 flags)
Definition: device.c:1287
static heap_elt_t * first(heap_header_t *h)
Definition: heap.c:59
Definition: cJSON.c:88
u8 * name
Definition: avf.h:227
u32 vnet_hw_if_register_rx_queue(vnet_main_t *vnm, u32 hw_if_index, u32 queue_id, u32 thread_index)
Definition: rx_queue.c:64
virtchnl_eth_stats_t last_cleared_eth_stats
Definition: avf.h:264
static void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
Definition: node_funcs.h:1019
void vlib_pci_set_private_data(vlib_main_t *vm, vlib_pci_dev_handle_t h, uword private_data)
Definition: pci.c:155
static u8 avf_validate_queue_size(avf_create_if_args_t *args)
Definition: device.c:1545
static u32 avf_get_u32(void *start, int offset)
Definition: avf.h:381
virtchnl_txq_info_t txq
Definition: virtchnl.h:324
u32 rule_len
Definition: avf.h:297
#define AVF_ATQT
Definition: virtchnl.h:48
unsigned short u16
Definition: types.h:57
#define AVF_RESET_MAX_WAIT_TIME
Definition: avf.h:42
#define AVF_QUEUE_SZ_MAX
Definition: avf.h:35
static void avf_clear_hw_interface_counters(u32 instance)
Definition: device.c:1878
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:305
#define vec_dup(V)
Return copy of vector (no header, no alignment)
Definition: vec.h:444
static void avf_adminq_init(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:371
clib_error_t * vlib_pci_register_msix_handler(vlib_main_t *vm, vlib_pci_dev_handle_t h, u32 start, u32 count, pci_msix_handler_function_t *msix_handler)
Definition: pci.c:838
u64 qword[4]
Definition: avf.h:147
#define ELOG_DATA(em, f)
Definition: elog.h:484
#define AVF_AQ_F_RD
Definition: virtchnl.h:58
#define VIRTCHNL_VERSION_MAJOR
Definition: virtchnl.h:21
uword vlib_pci_get_private_data(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.c:148
clib_error_t * avf_op_disable_vlan_stripping(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:635
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
Definition: nat44_ei.c:3047
u32 * ph_bufs
Definition: avf.h:189
void vnet_hw_if_update_runtime_data(vnet_main_t *vnm, u32 hw_if_index)
Definition: runtime.c:58
#define vlib_log_err(...)
Definition: log.h:133
vlib_log_class_registration_t avf_stats_log
static void avf_reg_flush(avf_device_t *ad)
Definition: avf.h:440
u32 vlib_pci_dev_handle_t
Definition: pci.h:97
#define AVFINT_ICR0
Definition: virtchnl.h:36
vl_api_tunnel_mode_t mode
Definition: gre.api:48
clib_error_t * avf_device_reset(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:857
#define PCI_DEVICE_ID_INTEL_X710_VF
Definition: device.c:36
clib_error_t * avf_op_config_irq_map(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:720
avf_device_t ** devices
Definition: avf.h:325
#define PCI_VENDOR_ID_INTEL
Definition: device.c:34
u8 len
Definition: ip_types.api:103
clib_error_t * avf_request_queues(vlib_main_t *vm, avf_device_t *ad, u16 num_queue_pairs)
Definition: device.c:895
u16 n_rx_queues
Definition: avf.h:233
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
Definition: buffer_funcs.h:122
u8 slot
Definition: pci_types.api:22
u8 hwaddr[6]
Definition: avf.h:248
clib_error_t * avf_op_program_flow(vlib_main_t *vm, avf_device_t *ad, int is_create, u8 *rule, u32 rule_len, u8 *program_status, u32 status_len)
Definition: device.c:1229
u16 atq_next_slot
Definition: avf.h:242
u32 vlib_pci_get_numa_node(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.c:170
#define AVF_AQ_F_BUF
Definition: virtchnl.h:60
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u32 queue_index
Definition: avf.h:180
static void vlib_process_signal_event_pointer(vlib_main_t *vm, uword node_index, uword type_opaque, void *data)
Definition: node_funcs.h:1028
u32 numa_node
Definition: avf.h:225
clib_error_t * avf_op_config_promisc_mode(vlib_main_t *vm, avf_device_t *ad, int is_enable)
Definition: device.c:644
static void vlib_physmem_free(vlib_main_t *vm, void *p)
Definition: physmem_funcs.h:89
vlib_node_registration_t avf_input_node
(constructor) VLIB_REGISTER_NODE (avf_input_node)
Definition: input.c:575
#define avf_log_debug(dev, f,...)
Definition: avf.h:100
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:261
static clib_error_t * avf_add_del_mac_address(vnet_hw_interface_t *hw, const u8 *address, u8 is_add)
Definition: device.c:1857
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:395
vl_api_pnat_mask_t mask
Definition: pnat.api:45
Definition: avf.h:169
struct virtchnl_pf_event_t::@36::@37 link_event
avf_tx_desc_t * descs
Definition: avf.h:191
#define ETHERNET_INTERFACE_FLAG_ACCEPT_ALL
Definition: ethernet.h:163
u64 buf
Definition: application.c:493
static void avf_irq_n_set_state(avf_device_t *ad, u8 line, avf_irq_state_t state)
Definition: device.c:101
#define ELOG_TYPE_DECLARE(f)
Definition: elog.h:442
virtchnl_ops_t v_opcode
Definition: virtchnl.h:271
static_always_inline avf_device_t * avf_get_device(u32 dev_instance)
Definition: avf.h:368
#define AVFINT_DYN_CTL0
Definition: virtchnl.h:38
u16 vsi_id
Definition: avf.h:246
u32 per_interface_next_index
Definition: avf.h:219
virtchnl_status_code_t v_retval
Definition: virtchnl.h:276
virtchnl_vlan_offload_caps_t offloads
Definition: virtchnl.h:450
u32 * bufs
Definition: avf.h:192
#define AVF_ITR_INT
Definition: device.c:32
clib_error_t * avf_op_enable_queues(vlib_main_t *vm, avf_device_t *ad, u32 rx, u32 tx)
Definition: device.c:780
u32 queue_index
Definition: avf.h:198
#define pool_put_index(p, i)
Free pool element with given index.
Definition: pool.h:337
#define ASSERT(truth)
#define AVF_RXQ_SZ
Definition: device.c:30
void avf_elog_init()
Definition: elog.c:176
avf_aq_desc_t * atq
Definition: avf.h:236
union virtchnl_pf_event_t::@36 event_data
manual_print typedef address
Definition: ip_types.api:96
format_function_t format_avf_vlan_support
Definition: avf.h:363
u32 flags
Definition: avf.h:218
#define AVFINT_DYN_CTLN(x)
Definition: virtchnl.h:35
Definition: avf.h:183
u32 * bufs
Definition: avf.h:176
void * bar0
Definition: avf.h:226
static void clib_mem_free(void *p)
Definition: mem.h:311
format_function_t format_avf_eth_stats
Definition: avf.h:364
#define AVF_MBOX_BUF_SZ
Definition: device.c:29
u32 vnet_hw_if_register_tx_queue(vnet_main_t *vnm, u32 hw_if_index, u32 queue_id)
Definition: tx_queue.c:35
u16 n_enqueued
Definition: avf.h:193
clib_error_t * avf_txq_init(vlib_main_t *vm, avf_device_t *ad, u16 qid, u16 txq_size)
Definition: device.c:297
u16 n_enqueued
Definition: avf.h:177
u8 * format_hex_bytes_no_wrap(u8 *s, va_list *va)
Definition: std-formats.c:112
virtchnl_pf_event_t * events
Definition: avf.h:244
static u32 vlib_get_n_threads()
Definition: global_funcs.h:23
static_always_inline void * clib_memcpy_fast(void *restrict dst, const void *restrict src, size_t n)
Definition: string.h:92
virtchnl_event_codes_t event
Definition: virtchnl.h:237
#define AVF_AQ_ENQ_MAX_WAIT_TIME
Definition: avf.h:39
static void avf_process_handle_request(vlib_main_t *vm, avf_process_req_t *req)
Definition: device.c:1243
static void avf_reg_write(avf_device_t *ad, u32 addr, u32 val)
Definition: avf.h:421
static uword pointer_to_uword(const void *p)
Definition: types.h:131
static char * avf_tx_func_error_strings[]
Definition: device.c:1871
#define clib_max(x, y)
Definition: clib.h:335
virtchnl_eth_stats_t eth_stats
Definition: avf.h:263
void * atq_bufs
Definition: avf.h:238
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:38
vl_api_ip4_address_t hi
Definition: arp.api:37
#define AVFGEN_RSTAT
Definition: virtchnl.h:49
u16 num_queue_pairs
Definition: avf.h:249
u16 next
Definition: avf.h:187
#define PCI_DEVICE_ID_INTEL_X722_VF
Definition: device.c:37
u8 eth_addr[6]
Definition: avf.h:292
clib_error_t * avf_op_config_rss_key(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:611
u32 dev_instance
Definition: avf.h:290
virtchnl_rxq_info_t rxq
Definition: virtchnl.h:325
void vnet_hw_if_set_input_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Definition: rx_queue.c:157
static u64 vlib_physmem_get_pa(vlib_main_t *vm, void *mem)
#define AVF_ARQBAL
Definition: virtchnl.h:42
u32 rss_lut_size
Definition: avf.h:254
u16 n_tx_queues
Definition: avf.h:232
#define AVF_QUEUE_SZ_MIN
Definition: avf.h:36
struct virtchnl_pf_event_t::@36::@38 link_event_adv
static u32 vlib_get_current_process_node_index(vlib_main_t *vm)
Definition: node_funcs.h:463
#define AVF_AQ_F_CMP
Definition: virtchnl.h:54
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
void vnet_hw_if_tx_queue_assign_thread(vnet_main_t *vnm, u32 queue_index, u32 thread_index)
Definition: tx_queue.c:109
clib_error_t * ethernet_register_interface(vnet_main_t *vnm, u32 dev_class_index, u32 dev_instance, const u8 *address, u32 *hw_if_index_return, ethernet_flag_change_function_t flag_change)
Definition: interface.c:348
u32 instance
Definition: gre.api:51
clib_error_t * avf_cmd_rx_ctl_reg_write(vlib_main_t *vm, avf_device_t *ad, u32 reg, u32 val)
Definition: device.c:218
format_function_t format_avf_device_name
Definition: avf.h:358
clib_error_t * avf_program_flow(u32 dev_instance, int is_add, u8 *rule, u32 rule_len, u8 *program_status, u32 status_len)
Definition: device.c:1886
avf_main_t avf_main
Definition: device.c:48
#define foreach_virtchnl_op
Definition: virtchnl.h:65
VLIB buffer representation.
Definition: buffer.h:111
#define foreach_avf_tx_func_error
Definition: avf.h:478
u64 uword
Definition: types.h:112
u16 size
Definition: avf.h:174
u16 arq_next_slot
Definition: avf.h:243
node node_index
#define clib_error_free(e)
Definition: error.h:86
clib_error_t * vlib_pci_map_region(vlib_main_t *vm, vlib_pci_dev_handle_t h, u32 resource, void **result)
Definition: pci.c:1182
clib_error_t * avf_op_get_stats(vlib_main_t *vm, avf_device_t *ad, virtchnl_eth_stats_t *es)
Definition: device.c:806
avf_rxq_t * rxqs
Definition: avf.h:230
virtchnl_vsi_resource_t vsi_res[1]
Definition: virtchnl.h:201
int vlib_pci_supports_virtual_addr_dma(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.c:1243
static void * clib_mem_alloc_aligned(uword size, uword align)
Definition: mem.h:261
clib_error_t * error
Definition: avf.h:343
static uword avf_process(vlib_main_t *vm, vlib_node_runtime_t *rt, vlib_frame_t *f)
Definition: device.c:1320
static u32 random_u32(u32 *seed)
32-bit random number generator
Definition: random.h:69
void vlib_worker_thread_barrier_release(vlib_main_t *vm)
Definition: threads.c:1386
avf_per_thread_data_t * per_thread_data
Definition: avf.h:326
static uword vlib_buffer_get_va(vlib_buffer_t *b)
Definition: buffer.h:245
static vlib_thread_main_t * vlib_get_thread_main()
Definition: global_funcs.h:56
u16 size
Definition: avf.h:188
vlib_pci_addr_t pci_addr
Definition: avf.h:256
vl_api_dhcp_client_state_t state
Definition: dhcp.api:201
#define VNET_HW_INTERFACE_CAP_SUPPORTS_L4_TX_CKSUM
Definition: interface.h:555
vnet_hw_if_rx_mode
Definition: interface.h:53
u32 sw_if_index
Definition: avf.h:222
#define ETHERNET_MAX_PACKET_BYTES
Definition: ethernet.h:133
#define vec_foreach(var, vec)
Vector iterator.
u64 arq_bufs_pa
Definition: avf.h:241
u8 count
Definition: dhcp.api:208
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:137
static clib_error_t * avf_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
Definition: device.c:1784
#define avf_log_err(dev, f,...)
Definition: avf.h:90
#define AVF_ATQBAL
Definition: virtchnl.h:46
elog_main_t elog_main
Definition: main.h:300
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
#define VIRTCHNL_VERSION_MINOR
Definition: virtchnl.h:22
static u8 vlib_buffer_pool_get_default_for_numa(vlib_main_t *vm, u32 numa_node)
Definition: buffer_funcs.h:189
#define clib_ring_free(f)
Definition: ring.h:59
app_main_t * am
Definition: application.c:489
#define VNET_HW_IF_RXQ_THREAD_ANY
Definition: interface.h:598
format_function_t format_avf_vf_cap_flags
Definition: avf.h:360
#define AVF_MBOX_LEN
Definition: device.c:28
clib_error_t * vlib_pci_device_open(vlib_main_t *vm, vlib_pci_addr_t *addr, pci_device_id_t ids[], vlib_pci_dev_handle_t *handle)
Definition: pci.c:1251
u16 next
Definition: avf.h:173
__clib_export u8 * format_clib_error(u8 *s, va_list *va)
Definition: error.c:191
#define VLIB_INITS(...)
Definition: init.h:352
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:111
static void vnet_hw_interface_set_link_speed(vnet_main_t *vnm, u32 hw_if_index, u32 link_speed)
vlib_global_main_t vlib_global_main
Definition: main.c:1786
#define clib_panic(format, args...)
Definition: error.h:72
avf_txq_t * txqs
Definition: avf.h:231
avf_rx_desc_t * descs
Definition: avf.h:175
void vnet_hw_if_set_rx_queue_file_index(vnet_main_t *vnm, u32 queue_index, u32 file_index)
Definition: rx_queue.c:144
u16 vendor_id
Definition: pci.h:127
format_function_t format_vlib_pci_addr
Definition: pci.h:326
#define AVF_AQ_F_ERR
Definition: virtchnl.h:55
static __clib_warn_unused_result u32 vlib_buffer_alloc_from_pool(vlib_main_t *vm, u32 *buffers, u32 n_buffers, u8 buffer_pool_index)
Allocate buffers from specific pool into supplied array.
Definition: buffer_funcs.h:597
u32 ethernet_set_flags(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
Definition: interface.c:441
u16 max_vectors
Definition: avf.h:250
u32 rss_key_size
Definition: avf.h:253
u16 max_mtu
Definition: avf.h:252
static uword pool_elts(void *v)
Number of active elements in a pool.
Definition: pool.h:127