FD.io VPP  v19.04.2-12-g66b1689
Vector Packet Processing
device.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2018 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vppinfra/ring.h>
20 #include <vlib/unix/unix.h>
21 #include <vlib/pci/pci.h>
22 #include <vnet/ethernet/ethernet.h>
23 
24 #include <avf/avf.h>
25 
26 #define AVF_MBOX_LEN 64
27 #define AVF_MBOX_BUF_SZ 512
28 #define AVF_RXQ_SZ 512
29 #define AVF_TXQ_SZ 512
30 #define AVF_ITR_INT 8160
31 
32 #define PCI_VENDOR_ID_INTEL 0x8086
33 #define PCI_DEVICE_ID_INTEL_AVF 0x1889
34 #define PCI_DEVICE_ID_INTEL_X710_VF 0x154c
35 #define PCI_DEVICE_ID_INTEL_X722_VF 0x37cd
36 
38 
39 static pci_device_id_t avf_pci_device_ids[] = {
41  {.vendor_id = PCI_VENDOR_ID_INTEL,.device_id = PCI_DEVICE_ID_INTEL_X710_VF},
42  {.vendor_id = PCI_VENDOR_ID_INTEL,.device_id = PCI_DEVICE_ID_INTEL_X722_VF},
43  {0},
44 };
45 
46 static inline void
48 {
49  u32 dyn_ctl0 = 0, icr0_ena = 0;
50 
51  dyn_ctl0 |= (3 << 3); /* 11b = No ITR update */
52 
53  avf_reg_write (ad, AVFINT_ICR0_ENA1, icr0_ena);
54  avf_reg_write (ad, AVFINT_DYN_CTL0, dyn_ctl0);
55  avf_reg_flush (ad);
56 }
57 
58 static inline void
60 {
61  u32 dyn_ctl0 = 0, icr0_ena = 0;
62 
63  icr0_ena |= (1 << 30); /* [30] Admin Queue Enable */
64 
65  dyn_ctl0 |= (1 << 0); /* [0] Interrupt Enable */
66  dyn_ctl0 |= (1 << 1); /* [1] Clear PBA */
67  //dyn_ctl0 |= (3 << 3); /* [4:3] ITR Index, 11b = No ITR update */
68  dyn_ctl0 |= ((AVF_ITR_INT / 2) << 5); /* [16:5] ITR Interval in 2us steps */
69 
70  avf_irq_0_disable (ad);
71  avf_reg_write (ad, AVFINT_ICR0_ENA1, icr0_ena);
72  avf_reg_write (ad, AVFINT_DYN_CTL0, dyn_ctl0);
73  avf_reg_flush (ad);
74 }
75 
76 static inline void
78 {
79  u32 dyn_ctln = 0;
80 
81  avf_reg_write (ad, AVFINT_DYN_CTLN (line), dyn_ctln);
82  avf_reg_flush (ad);
83 }
84 
85 static inline void
87 {
88  u32 dyn_ctln = 0;
89 
90  dyn_ctln |= (1 << 0); /* [0] Interrupt Enable */
91  dyn_ctln |= (1 << 1); /* [1] Clear PBA */
92  dyn_ctln |= ((AVF_ITR_INT / 2) << 5); /* [16:5] ITR Interval in 2us steps */
93 
94  avf_irq_n_disable (ad, line);
95  avf_reg_write (ad, AVFINT_DYN_CTLN (line), dyn_ctln);
96  avf_reg_flush (ad);
97 }
98 
99 
100 clib_error_t *
102  void *data, int len)
103 {
104  clib_error_t *err = 0;
105  avf_aq_desc_t *d, dc;
106  f64 t0, wait_time, suspend_time = AVF_AQ_ENQ_SUSPEND_TIME;
107 
108  d = &ad->atq[ad->atq_next_slot];
109  clib_memcpy_fast (d, dt, sizeof (avf_aq_desc_t));
110  d->flags |= AVF_AQ_F_RD | AVF_AQ_F_SI;
111  if (len)
112  d->datalen = len;
113  if (len)
114  {
115  u64 pa;
116  pa = ad->atq_bufs_pa + ad->atq_next_slot * AVF_MBOX_BUF_SZ;
117  d->addr_hi = (u32) (pa >> 32);
118  d->addr_lo = (u32) pa;
120  data, len);
121  d->flags |= AVF_AQ_F_BUF;
122  }
123 
124  if (ad->flags & AVF_DEVICE_F_ELOG)
125  clib_memcpy_fast (&dc, d, sizeof (avf_aq_desc_t));
126 
128  ad->atq_next_slot = (ad->atq_next_slot + 1) % AVF_MBOX_LEN;
130  avf_reg_flush (ad);
131 
132  t0 = vlib_time_now (vm);
133 retry:
134  vlib_process_suspend (vm, suspend_time);
135  wait_time = vlib_time_now (vm) - t0;
136 
137  if (((d->flags & AVF_AQ_F_DD) == 0) || ((d->flags & AVF_AQ_F_CMP) == 0))
138  {
139  if (wait_time > AVF_AQ_ENQ_MAX_WAIT_TIME)
140  {
141  err = clib_error_return (0, "adminq enqueue timeout [opcode 0x%x]",
142  d->opcode);
143  goto done;
144  }
145  suspend_time *= 2;
146  goto retry;
147  }
148 
149  clib_memcpy_fast (dt, d, sizeof (avf_aq_desc_t));
150  if (d->flags & AVF_AQ_F_ERR)
151  return clib_error_return (0, "adminq enqueue error [opcode 0x%x, retval "
152  "%d]", d->opcode, d->retval);
153 
154 done:
155  if (ad->flags & AVF_DEVICE_F_ELOG)
156  {
157  /* *INDENT-OFF* */
158  ELOG_TYPE_DECLARE (el) =
159  {
160  .format = "avf[%d] aq enq: s_flags 0x%x r_flags 0x%x opcode 0x%x "
161  "datalen %d retval %d",
162  .format_args = "i4i2i2i2i2i2",
163  };
164  struct
165  {
166  u32 dev_instance;
167  u16 s_flags;
168  u16 r_flags;
169  u16 opcode;
170  u16 datalen;
171  u16 retval;
172  } *ed;
173  ed = ELOG_DATA (&vm->elog_main, el);
174  ed->dev_instance = ad->dev_instance;
175  ed->s_flags = dc.flags;
176  ed->r_flags = d->flags;
177  ed->opcode = dc.opcode;
178  ed->datalen = dc.datalen;
179  ed->retval = d->retval;
180  /* *INDENT-ON* */
181  }
182 
183  return err;
184 }
185 
186 clib_error_t *
188  u32 val)
189 {
190  clib_error_t *err;
191  avf_aq_desc_t d = {.opcode = 0x207,.param1 = reg,.param3 = val };
192  err = avf_aq_desc_enq (vm, ad, &d, 0, 0);
193 
194  if (ad->flags & AVF_DEVICE_F_ELOG)
195  {
196  /* *INDENT-OFF* */
197  ELOG_TYPE_DECLARE (el) =
198  {
199  .format = "avf[%d] rx ctl reg write: reg 0x%x val 0x%x ",
200  .format_args = "i4i4i4",
201  };
202  struct
203  {
204  u32 dev_instance;
205  u32 reg;
206  u32 val;
207  } *ed;
208  ed = ELOG_DATA (&vm->elog_main, el);
209  ed->dev_instance = ad->dev_instance;
210  ed->reg = reg;
211  ed->val = val;
212  /* *INDENT-ON* */
213  }
214  return err;
215 }
216 
217 clib_error_t *
218 avf_rxq_init (vlib_main_t * vm, avf_device_t * ad, u16 qid, u16 rxq_size)
219 {
220  clib_error_t *err;
221  avf_rxq_t *rxq;
222  u32 n_alloc, i;
223 
225  rxq = vec_elt_at_index (ad->rxqs, qid);
226  rxq->size = rxq_size;
227  rxq->next = 0;
229  sizeof (avf_rx_desc_t),
231  ad->numa_node);
232 
233  rxq->buffer_pool_index =
235 
236  if (rxq->descs == 0)
237  return vlib_physmem_last_error (vm);
238 
239  if ((err = vlib_pci_map_dma (vm, ad->pci_dev_handle, (void *) rxq->descs)))
240  return err;
241 
242  clib_memset ((void *) rxq->descs, 0, rxq->size * sizeof (avf_rx_desc_t));
244  rxq->qrx_tail = ad->bar0 + AVF_QRX_TAIL (qid);
245 
246  n_alloc = vlib_buffer_alloc_from_pool (vm, rxq->bufs, rxq->size - 8,
247  rxq->buffer_pool_index);
248 
249  if (n_alloc == 0)
250  return clib_error_return (0, "buffer allocation error");
251 
252  rxq->n_enqueued = n_alloc;
253  avf_rx_desc_t *d = rxq->descs;
254  for (i = 0; i < n_alloc; i++)
255  {
256  vlib_buffer_t *b = vlib_get_buffer (vm, rxq->bufs[i]);
257  if (ad->flags & AVF_DEVICE_F_VA_DMA)
258  d->qword[0] = vlib_buffer_get_va (b);
259  else
260  d->qword[0] = vlib_buffer_get_pa (vm, b);
261  d++;
262  }
263 
264  ad->n_rx_queues = clib_min (ad->num_queue_pairs, qid + 1);
265  return 0;
266 }
267 
268 clib_error_t *
269 avf_txq_init (vlib_main_t * vm, avf_device_t * ad, u16 qid, u16 txq_size)
270 {
271  clib_error_t *err;
272  avf_txq_t *txq;
273 
274  if (qid >= ad->num_queue_pairs)
275  {
276  qid = qid % ad->num_queue_pairs;
277  txq = vec_elt_at_index (ad->txqs, qid);
278  if (txq->lock == 0)
279  clib_spinlock_init (&txq->lock);
280  ad->flags |= AVF_DEVICE_F_SHARED_TXQ_LOCK;
281  return 0;
282  }
283 
285  txq = vec_elt_at_index (ad->txqs, qid);
286  txq->size = txq_size;
287  txq->next = 0;
289  sizeof (avf_tx_desc_t),
291  ad->numa_node);
292  if (txq->descs == 0)
293  return vlib_physmem_last_error (vm);
294 
295  if ((err = vlib_pci_map_dma (vm, ad->pci_dev_handle, (void *) txq->descs)))
296  return err;
297 
299  txq->qtx_tail = ad->bar0 + AVF_QTX_TAIL (qid);
300 
301  /* initialize ring of pending RS slots */
303 
304  ad->n_tx_queues = clib_min (ad->num_queue_pairs, qid + 1);
305  return 0;
306 }
307 
308 typedef struct
309 {
313 
314 void
316 {
317  avf_aq_desc_t *d;
318  u64 pa = ad->arq_bufs_pa + slot * AVF_MBOX_BUF_SZ;
319  d = &ad->arq[slot];
320  clib_memset (d, 0, sizeof (avf_aq_desc_t));
321  d->flags = AVF_AQ_F_BUF;
323  d->addr_hi = (u32) (pa >> 32);
324  d->addr_lo = (u32) pa;
325 }
326 
327 static inline uword
329 {
330  return (ad->flags & AVF_DEVICE_F_VA_DMA) ?
332 }
333 
334 static void
336 {
337  u64 pa;
338  int i;
339 
340  /* VF MailBox Transmit */
341  clib_memset (ad->atq, 0, sizeof (avf_aq_desc_t) * AVF_MBOX_LEN);
342  ad->atq_bufs_pa = avf_dma_addr (vm, ad, ad->atq_bufs);
343 
344  pa = avf_dma_addr (vm, ad, ad->atq);
345  avf_reg_write (ad, AVF_ATQT, 0); /* Tail */
346  avf_reg_write (ad, AVF_ATQH, 0); /* Head */
347  avf_reg_write (ad, AVF_ATQLEN, AVF_MBOX_LEN | (1ULL << 31)); /* len & ena */
348  avf_reg_write (ad, AVF_ATQBAL, (u32) pa); /* Base Address Low */
349  avf_reg_write (ad, AVF_ATQBAH, (u32) (pa >> 32)); /* Base Address High */
350 
351  /* VF MailBox Receive */
352  clib_memset (ad->arq, 0, sizeof (avf_aq_desc_t) * AVF_MBOX_LEN);
353  ad->arq_bufs_pa = avf_dma_addr (vm, ad, ad->arq_bufs);
354 
355  for (i = 0; i < AVF_MBOX_LEN; i++)
356  avf_arq_slot_init (ad, i);
357 
358  pa = avf_dma_addr (vm, ad, ad->arq);
359 
360  avf_reg_write (ad, AVF_ARQH, 0); /* Head */
361  avf_reg_write (ad, AVF_ARQT, 0); /* Head */
362  avf_reg_write (ad, AVF_ARQLEN, AVF_MBOX_LEN | (1ULL << 31)); /* len & ena */
363  avf_reg_write (ad, AVF_ARQBAL, (u32) pa); /* Base Address Low */
364  avf_reg_write (ad, AVF_ARQBAH, (u32) (pa >> 32)); /* Base Address High */
365  avf_reg_write (ad, AVF_ARQT, AVF_MBOX_LEN - 1); /* Tail */
366 
367  ad->atq_next_slot = 0;
368  ad->arq_next_slot = 0;
369 }
370 
371 clib_error_t *
373  void *in, int in_len, void *out, int out_len)
374 {
375  clib_error_t *err;
376  avf_aq_desc_t *d, dt = {.opcode = 0x801,.v_opcode = op };
377  u32 head;
378  int n_retry = 5;
379 
380 
381  /* suppress interrupt in the next adminq receive slot
382  as we are going to wait for response
383  we only need interrupts when event is received */
384  d = &ad->arq[ad->arq_next_slot];
385  d->flags |= AVF_AQ_F_SI;
386 
387  if ((err = avf_aq_desc_enq (vm, ad, &dt, in, in_len)))
388  return err;
389 
390 retry:
391  head = avf_get_u32 (ad->bar0, AVF_ARQH);
392 
393  if (ad->arq_next_slot == head)
394  {
395  if (--n_retry == 0)
396  return clib_error_return (0, "timeout");
397  vlib_process_suspend (vm, 10e-3);
398  goto retry;
399  }
400 
401  d = &ad->arq[ad->arq_next_slot];
402 
403  if (d->v_opcode == VIRTCHNL_OP_EVENT)
404  {
405  void *buf = ad->arq_bufs + ad->arq_next_slot * AVF_MBOX_BUF_SZ;
407 
408  if ((d->datalen != sizeof (virtchnl_pf_event_t)) ||
409  ((d->flags & AVF_AQ_F_BUF) == 0))
410  return clib_error_return (0, "event message error");
411 
412  vec_add2 (ad->events, e, 1);
413  clib_memcpy_fast (e, buf, sizeof (virtchnl_pf_event_t));
415  ad->arq_next_slot++;
416  n_retry = 5;
417  goto retry;
418  }
419 
420  if (d->v_opcode != op)
421  {
422  err =
424  "unexpected message receiver [v_opcode = %u, "
425  "expected %u, v_retval %d]", d->v_opcode, op,
426  d->v_retval);
427  goto done;
428  }
429 
430  if (d->v_retval)
431  {
432  err = clib_error_return (0, "error [v_opcode = %u, v_retval %d]",
433  d->v_opcode, d->v_retval);
434  goto done;
435  }
436 
437  if (d->flags & AVF_AQ_F_BUF)
438  {
439  void *buf = ad->arq_bufs + ad->arq_next_slot * AVF_MBOX_BUF_SZ;
440  clib_memcpy_fast (out, buf, out_len);
441  }
442 
445  avf_reg_flush (ad);
446  ad->arq_next_slot = (ad->arq_next_slot + 1) % AVF_MBOX_LEN;
447 
448 done:
449 
450  if (ad->flags & AVF_DEVICE_F_ELOG)
451  {
452  /* *INDENT-OFF* */
453  ELOG_TYPE_DECLARE (el) =
454  {
455  .format = "avf[%d] send to pf: v_opcode %s (%d) v_retval 0x%x",
456  .format_args = "i4t4i4i4",
457  .n_enum_strings = VIRTCHNL_N_OPS,
458  .enum_strings = {
459 #define _(v, n) [v] = #n,
461 #undef _
462  },
463  };
464  struct
465  {
466  u32 dev_instance;
467  u32 v_opcode;
468  u32 v_opcode_val;
469  u32 v_retval;
470  } *ed;
471  ed = ELOG_DATA (&vm->elog_main, el);
472  ed->dev_instance = ad->dev_instance;
473  ed->v_opcode = op;
474  ed->v_opcode_val = op;
475  ed->v_retval = d->v_retval;
476  /* *INDENT-ON* */
477  }
478  return err;
479 }
480 
481 clib_error_t *
484 {
485  clib_error_t *err = 0;
486  virtchnl_version_info_t myver = {
488  .minor = VIRTCHNL_VERSION_MINOR,
489  };
490 
491  err = avf_send_to_pf (vm, ad, VIRTCHNL_OP_VERSION, &myver,
492  sizeof (virtchnl_version_info_t), ver,
493  sizeof (virtchnl_version_info_t));
494 
495  if (err)
496  return err;
497 
498  return err;
499 }
500 
501 clib_error_t *
504 {
505  u32 bitmap = (VIRTCHNL_VF_OFFLOAD_L2 | VIRTCHNL_VF_OFFLOAD_RSS_PF |
506  VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_VLAN |
507  VIRTCHNL_VF_OFFLOAD_RX_POLLING);
508 
509  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_GET_VF_RESOURCES, &bitmap,
510  sizeof (u32), res, sizeof (virtchnl_vf_resource_t));
511 }
512 
513 clib_error_t *
515 {
516  int msg_len = sizeof (virtchnl_rss_lut_t) + ad->rss_lut_size - 1;
517  int i;
518  u8 msg[msg_len];
519  virtchnl_rss_lut_t *rl;
520 
521  clib_memset (msg, 0, msg_len);
522  rl = (virtchnl_rss_lut_t *) msg;
523  rl->vsi_id = ad->vsi_id;
524  rl->lut_entries = ad->rss_lut_size;
525  for (i = 0; i < ad->rss_lut_size; i++)
526  rl->lut[i] = i % ad->n_rx_queues;
527 
528  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_RSS_LUT, msg, msg_len, 0,
529  0);
530 }
531 
532 clib_error_t *
534 {
535  int msg_len = sizeof (virtchnl_rss_key_t) + ad->rss_key_size - 1;
536  int i;
537  u8 msg[msg_len];
538  virtchnl_rss_key_t *rk;
539 
540  clib_memset (msg, 0, msg_len);
541  rk = (virtchnl_rss_key_t *) msg;
542  rk->vsi_id = ad->vsi_id;
543  rk->key_len = ad->rss_key_size;
544  u32 seed = random_default_seed ();
545  for (i = 0; i < ad->rss_key_size; i++)
546  rk->key[i] = (u8) random_u32 (&seed);
547 
548  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_RSS_KEY, msg, msg_len, 0,
549  0);
550 }
551 
552 clib_error_t *
554 {
555  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, 0, 0, 0,
556  0);
557 }
558 
559 clib_error_t *
561 {
562  virtchnl_promisc_info_t pi = { 0 };
563 
564  pi.vsi_id = ad->vsi_id;
565  pi.flags = FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC;
566  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, &pi,
567  sizeof (virtchnl_promisc_info_t), 0, 0);
568 }
569 
570 
571 clib_error_t *
573 {
574  int i;
575  int n_qp = clib_max (vec_len (ad->rxqs), vec_len (ad->txqs));
576  int msg_len = sizeof (virtchnl_vsi_queue_config_info_t) + n_qp *
578  u8 msg[msg_len];
580 
581  clib_memset (msg, 0, msg_len);
583  ci->vsi_id = ad->vsi_id;
584  ci->num_queue_pairs = n_qp;
585 
586  for (i = 0; i < n_qp; i++)
587  {
588  virtchnl_txq_info_t *txq = &ci->qpair[i].txq;
589  virtchnl_rxq_info_t *rxq = &ci->qpair[i].rxq;
590 
591  rxq->vsi_id = ad->vsi_id;
592  rxq->queue_id = i;
594  if (i < vec_len (ad->rxqs))
595  {
596  avf_rxq_t *q = vec_elt_at_index (ad->rxqs, i);
597  rxq->ring_len = q->size;
599  rxq->dma_ring_addr = avf_dma_addr (vm, ad, (void *) q->descs);
600  avf_reg_write (ad, AVF_QRX_TAIL (i), q->size - 1);
601  }
602 
603  avf_txq_t *q = vec_elt_at_index (ad->txqs, i);
604  txq->vsi_id = ad->vsi_id;
605  if (i < vec_len (ad->txqs))
606  {
607  txq->queue_id = i;
608  txq->ring_len = q->size;
609  txq->dma_ring_addr = avf_dma_addr (vm, ad, (void *) q->descs);
610  }
611  }
612 
613  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_VSI_QUEUES, msg, msg_len,
614  0, 0);
615 }
616 
617 clib_error_t *
619 {
620  int count = 1;
621  int msg_len = sizeof (virtchnl_irq_map_info_t) +
622  count * sizeof (virtchnl_vector_map_t);
623  u8 msg[msg_len];
625 
626  clib_memset (msg, 0, msg_len);
627  imi = (virtchnl_irq_map_info_t *) msg;
628  imi->num_vectors = count;
629 
630  imi->vecmap[0].vector_id = 1;
631  imi->vecmap[0].vsi_id = ad->vsi_id;
632  imi->vecmap[0].rxq_map = (1 << ad->n_rx_queues) - 1;
633  imi->vecmap[0].txq_map = (1 << ad->n_tx_queues) - 1;
634  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_IRQ_MAP, msg, msg_len, 0,
635  0);
636 }
637 
638 clib_error_t *
640 {
641  int msg_len =
642  sizeof (virtchnl_ether_addr_list_t) +
643  count * sizeof (virtchnl_ether_addr_t);
644  u8 msg[msg_len];
646  int i;
647 
648  clib_memset (msg, 0, msg_len);
649  al = (virtchnl_ether_addr_list_t *) msg;
650  al->vsi_id = ad->vsi_id;
651  al->num_elements = count;
652  for (i = 0; i < count; i++)
653  clib_memcpy_fast (&al->list[i].addr, macs + i * 6, 6);
654  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_ADD_ETH_ADDR, msg, msg_len, 0,
655  0);
656 }
657 
658 clib_error_t *
660 {
661  virtchnl_queue_select_t qs = { 0 };
662  int i = 0;
663  qs.vsi_id = ad->vsi_id;
664  qs.rx_queues = rx;
665  qs.tx_queues = tx;
666  while (rx)
667  {
668  if (rx & (1 << i))
669  {
670  avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, i);
671  avf_reg_write (ad, AVF_QRX_TAIL (i), rxq->n_enqueued);
672  rx &= ~(1 << i);
673  }
674  i++;
675  }
676  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_ENABLE_QUEUES, &qs,
677  sizeof (virtchnl_queue_select_t), 0, 0);
678 }
679 
680 clib_error_t *
683 {
684  virtchnl_queue_select_t qs = { 0 };
685  qs.vsi_id = ad->vsi_id;
686  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_GET_STATS,
687  &qs, sizeof (virtchnl_queue_select_t),
688  es, sizeof (virtchnl_eth_stats_t));
689 }
690 
691 clib_error_t *
693 {
694  avf_aq_desc_t d = { 0 };
695  clib_error_t *error;
696  u32 rstat;
697  int n_retry = 20;
698 
699  d.opcode = 0x801;
700  d.v_opcode = VIRTCHNL_OP_RESET_VF;
701  if ((error = avf_aq_desc_enq (vm, ad, &d, 0, 0)))
702  return error;
703 
704 retry:
705  vlib_process_suspend (vm, 10e-3);
706  rstat = avf_get_u32 (ad->bar0, AVFGEN_RSTAT);
707 
708  if (rstat == 2 || rstat == 3)
709  return 0;
710 
711  if (--n_retry == 0)
712  return clib_error_return (0, "reset failed (timeout)");
713 
714  goto retry;
715 }
716 
717 clib_error_t *
719 {
720  virtchnl_vf_res_request_t res_req = { 0 };
721  clib_error_t *error;
722  u32 rstat;
723  int n_retry = 20;
724 
725  res_req.num_queue_pairs = num_queue_pairs;
726 
727  error = avf_send_to_pf (vm, ad, VIRTCHNL_OP_REQUEST_QUEUES, &res_req,
728  sizeof (virtchnl_vf_res_request_t), &res_req,
729  sizeof (virtchnl_vf_res_request_t));
730 
731  /*
732  * if PF responds, the request failed
733  * else PF initializes restart and avf_send_to_pf returns an error
734  */
735  if (!error)
736  {
737  return clib_error_return (0, "requested more than %u queue pairs",
738  res_req.num_queue_pairs);
739  }
740 
741 retry:
742  vlib_process_suspend (vm, 10e-3);
743  rstat = avf_get_u32 (ad->bar0, AVFGEN_RSTAT);
744 
745  if ((rstat == VIRTCHNL_VFR_COMPLETED) || (rstat == VIRTCHNL_VFR_VFACTIVE))
746  goto done;
747 
748  if (--n_retry == 0)
749  return clib_error_return (0, "reset failed (timeout)");
750 
751  goto retry;
752 
753 done:
754  return NULL;
755 }
756 
757 clib_error_t *
759  avf_create_if_args_t * args)
760 {
761  virtchnl_version_info_t ver = { 0 };
762  virtchnl_vf_resource_t res = { 0 };
763  clib_error_t *error;
765  int i;
766 
767  avf_adminq_init (vm, ad);
768 
769  /* request more queues only if we need them */
770  if ((error = avf_request_queues (vm, ad, tm->n_vlib_mains)))
771  {
772  /* we failed to get more queues, but still we want to proceed */
773  clib_error_free (error);
774 
775  if ((error = avf_device_reset (vm, ad)))
776  return error;
777  }
778 
779  avf_adminq_init (vm, ad);
780 
781  /*
782  * OP_VERSION
783  */
784  if ((error = avf_op_version (vm, ad, &ver)))
785  return error;
786 
787  if (ver.major != VIRTCHNL_VERSION_MAJOR ||
789  return clib_error_return (0, "incompatible protocol version "
790  "(remote %d.%d)", ver.major, ver.minor);
791 
792  /*
793  * OP_GET_VF_RESOURCES
794  */
795  if ((error = avf_op_get_vf_resources (vm, ad, &res)))
796  return error;
797 
798  if (res.num_vsis != 1 || res.vsi_res[0].vsi_type != VIRTCHNL_VSI_SRIOV)
799  return clib_error_return (0, "unexpected GET_VF_RESOURCE reply received");
800 
801  ad->vsi_id = res.vsi_res[0].vsi_id;
804  ad->max_vectors = res.max_vectors;
805  ad->max_mtu = res.max_mtu;
806  ad->rss_key_size = res.rss_key_size;
807  ad->rss_lut_size = res.rss_lut_size;
808 
810 
811  /*
812  * Disable VLAN stripping
813  */
814  if ((error = avf_op_disable_vlan_stripping (vm, ad)))
815  return error;
816 
817  if ((error = avf_config_promisc_mode (vm, ad)))
818  return error;
819 
820  /*
821  * Init Queues
822  */
823  if (args->rxq_num == 0)
824  {
825  args->rxq_num = 1;
826  }
827  else if (args->rxq_num > ad->num_queue_pairs)
828  {
829  args->rxq_num = ad->num_queue_pairs;
830  vlib_log_warn (am->log_class, "Requested more rx queues than"
831  "queue pairs available. Using %u rx queues.",
832  args->rxq_num);
833  }
834 
835  for (i = 0; i < args->rxq_num; i++)
836  if ((error = avf_rxq_init (vm, ad, i, args->rxq_size)))
837  return error;
838 
839  for (i = 0; i < tm->n_vlib_mains; i++)
840  if ((error = avf_txq_init (vm, ad, i, args->txq_size)))
841  return error;
842 
843  if ((ad->feature_bitmap & VIRTCHNL_VF_OFFLOAD_RSS_PF) &&
844  (error = avf_op_config_rss_lut (vm, ad)))
845  return error;
846 
847  if ((ad->feature_bitmap & VIRTCHNL_VF_OFFLOAD_RSS_PF) &&
848  (error = avf_op_config_rss_key (vm, ad)))
849  return error;
850 
851  if ((error = avf_op_config_vsi_queues (vm, ad)))
852  return error;
853 
854  if ((error = avf_op_config_irq_map (vm, ad)))
855  return error;
856 
857  avf_irq_0_enable (ad);
858  for (i = 0; i < ad->n_rx_queues; i++)
859  avf_irq_n_enable (ad, i);
860 
861  if ((error = avf_op_add_eth_addr (vm, ad, 1, ad->hwaddr)))
862  return error;
863 
864  if ((error = avf_op_enable_queues (vm, ad, pow2_mask (ad->n_rx_queues),
865  pow2_mask (ad->n_tx_queues))))
866  return error;
867 
868  ad->flags |= AVF_DEVICE_F_INITIALIZED;
869  return error;
870 }
871 
872 void
874 {
875  avf_main_t *am = &avf_main;
876  vnet_main_t *vnm = vnet_get_main ();
878  u32 r;
879 
880  if (ad->flags & AVF_DEVICE_F_ERROR)
881  return;
882 
883  if ((ad->flags & AVF_DEVICE_F_INITIALIZED) == 0)
884  return;
885 
886  ASSERT (ad->error == 0);
887 
888  /* do not process device in reset state */
889  r = avf_get_u32 (ad->bar0, AVFGEN_RSTAT);
890  if (r != VIRTCHNL_VFR_VFACTIVE)
891  return;
892 
893  r = avf_get_u32 (ad->bar0, AVF_ARQLEN);
894  if ((r & 0xf0000000) != (1ULL << 31))
895  {
896  ad->error = clib_error_return (0, "arq not enabled, arqlen = 0x%x", r);
897  goto error;
898  }
899 
900  r = avf_get_u32 (ad->bar0, AVF_ATQLEN);
901  if ((r & 0xf0000000) != (1ULL << 31))
902  {
903  ad->error = clib_error_return (0, "atq not enabled, atqlen = 0x%x", r);
904  goto error;
905  }
906 
907  if (is_irq == 0)
908  avf_op_get_stats (vm, ad, &ad->eth_stats);
909 
910  /* *INDENT-OFF* */
911  vec_foreach (e, ad->events)
912  {
914  {
915  int link_up = e->event_data.link_event.link_status;
916  virtchnl_link_speed_t speed = e->event_data.link_event.link_speed;
917  u32 flags = 0;
918  u32 kbps = 0;
919 
920  if (link_up && (ad->flags & AVF_DEVICE_F_LINK_UP) == 0)
921  {
922  ad->flags |= AVF_DEVICE_F_LINK_UP;
925  if (speed == VIRTCHNL_LINK_SPEED_40GB)
926  kbps = 40000000;
927  else if (speed == VIRTCHNL_LINK_SPEED_25GB)
928  kbps = 25000000;
929  else if (speed == VIRTCHNL_LINK_SPEED_10GB)
930  kbps = 10000000;
931  else if (speed == VIRTCHNL_LINK_SPEED_1GB)
932  kbps = 1000000;
933  else if (speed == VIRTCHNL_LINK_SPEED_100MB)
934  kbps = 100000;
935  vnet_hw_interface_set_flags (vnm, ad->hw_if_index, flags);
937  ad->link_speed = speed;
938  }
939  else if (!link_up && (ad->flags & AVF_DEVICE_F_LINK_UP) != 0)
940  {
941  ad->flags &= ~AVF_DEVICE_F_LINK_UP;
942  ad->link_speed = 0;
943  }
944 
945  if (ad->flags & AVF_DEVICE_F_ELOG)
946  {
947  ELOG_TYPE_DECLARE (el) =
948  {
949  .format = "avf[%d] link change: link_status %d "
950  "link_speed %d",
951  .format_args = "i4i1i1",
952  };
953  struct
954  {
955  u32 dev_instance;
956  u8 link_status;
957  u8 link_speed;
958  } *ed;
959  ed = ELOG_DATA (&vm->elog_main, el);
960  ed->dev_instance = ad->dev_instance;
961  ed->link_status = link_up;
962  ed->link_speed = speed;
963  }
964  }
965  else
966  {
967  if (ad->flags & AVF_DEVICE_F_ELOG)
968  {
969  ELOG_TYPE_DECLARE (el) =
970  {
971  .format = "avf[%d] unknown event: event %d severity %d",
972  .format_args = "i4i4i1i1",
973  };
974  struct
975  {
976  u32 dev_instance;
977  u32 event;
978  u32 severity;
979  } *ed;
980  ed = ELOG_DATA (&vm->elog_main, el);
981  ed->dev_instance = ad->dev_instance;
982  ed->event = e->event;
983  ed->severity = e->severity;
984  }
985  }
986  }
987  /* *INDENT-ON* */
988  vec_reset_length (ad->events);
989 
990  return;
991 
992 error:
993  ad->flags |= AVF_DEVICE_F_ERROR;
994  ASSERT (ad->error != 0);
995  vlib_log_err (am->log_class, "%U", format_clib_error, ad->error);
996 }
997 
998 static u32
1000 {
1001  avf_main_t *am = &avf_main;
1002  vlib_log_warn (am->log_class, "TODO");
1003  return 0;
1004 }
1005 
1006 static uword
1008 {
1009  avf_main_t *am = &avf_main;
1010  avf_device_t *ad;
1011  uword *event_data = 0, event_type;
1012  int enabled = 0, irq;
1013  f64 last_run_duration = 0;
1014  f64 last_periodic_time = 0;
1015 
1016  while (1)
1017  {
1018  if (enabled)
1019  vlib_process_wait_for_event_or_clock (vm, 5.0 - last_run_duration);
1020  else
1022 
1023  event_type = vlib_process_get_events (vm, &event_data);
1024  vec_reset_length (event_data);
1025  irq = 0;
1026 
1027  switch (event_type)
1028  {
1029  case ~0:
1030  last_periodic_time = vlib_time_now (vm);
1031  break;
1033  enabled = 1;
1034  break;
1036  enabled = 0;
1037  continue;
1039  irq = 1;
1040  break;
1041  default:
1042  ASSERT (0);
1043  }
1044 
1045  /* *INDENT-OFF* */
1046  pool_foreach (ad, am->devices,
1047  {
1048  avf_process_one_device (vm, ad, irq);
1049  });
1050  /* *INDENT-ON* */
1051  last_run_duration = vlib_time_now (vm) - last_periodic_time;
1052  }
1053  return 0;
1054 }
1055 
1056 /* *INDENT-OFF* */
1058  .function = avf_process,
1059  .type = VLIB_NODE_TYPE_PROCESS,
1060  .name = "avf-process",
1061 };
1062 /* *INDENT-ON* */
1063 
1064 static void
1066 {
1067  avf_main_t *am = &avf_main;
1068  uword pd = vlib_pci_get_private_data (vm, h);
1069  avf_device_t *ad = pool_elt_at_index (am->devices, pd);
1070  u32 icr0;
1071 
1072  icr0 = avf_reg_read (ad, AVFINT_ICR0);
1073 
1074  if (ad->flags & AVF_DEVICE_F_ELOG)
1075  {
1076  /* *INDENT-OFF* */
1077  ELOG_TYPE_DECLARE (el) =
1078  {
1079  .format = "avf[%d] irq 0: icr0 0x%x",
1080  .format_args = "i4i4",
1081  };
1082  /* *INDENT-ON* */
1083  struct
1084  {
1085  u32 dev_instance;
1086  u32 icr0;
1087  } *ed;
1088 
1089  ed = ELOG_DATA (&vm->elog_main, el);
1090  ed->dev_instance = ad->dev_instance;
1091  ed->icr0 = icr0;
1092  }
1093 
1094  avf_irq_0_enable (ad);
1095 
1096  /* bit 30 - Send/Receive Admin queue interrupt indication */
1097  if (icr0 & (1 << 30))
1100 }
1101 
1102 static void
1104 {
1105  vnet_main_t *vnm = vnet_get_main ();
1106  avf_main_t *am = &avf_main;
1107  uword pd = vlib_pci_get_private_data (vm, h);
1108  avf_device_t *ad = pool_elt_at_index (am->devices, pd);
1109  u16 qid;
1110  int i;
1111 
1112  if (ad->flags & AVF_DEVICE_F_ELOG)
1113  {
1114  /* *INDENT-OFF* */
1115  ELOG_TYPE_DECLARE (el) =
1116  {
1117  .format = "avf[%d] irq %d: received",
1118  .format_args = "i4i2",
1119  };
1120  /* *INDENT-ON* */
1121  struct
1122  {
1123  u32 dev_instance;
1124  u16 line;
1125  } *ed;
1126 
1127  ed = ELOG_DATA (&vm->elog_main, el);
1128  ed->dev_instance = ad->dev_instance;
1129  ed->line = line;
1130  }
1131 
1132  qid = line - 1;
1133  if (vec_len (ad->rxqs) > qid && ad->rxqs[qid].int_mode != 0)
1135  for (i = 0; i < vec_len (ad->rxqs); i++)
1136  avf_irq_n_enable (ad, i);
1137 }
1138 
1139 void
1141 {
1142  vnet_main_t *vnm = vnet_get_main ();
1143  avf_main_t *am = &avf_main;
1144  int i;
1145 
1146  if (ad->hw_if_index)
1147  {
1151  }
1152 
1154 
1155  vlib_physmem_free (vm, ad->atq);
1156  vlib_physmem_free (vm, ad->arq);
1157  vlib_physmem_free (vm, ad->atq_bufs);
1158  vlib_physmem_free (vm, ad->arq_bufs);
1159 
1160  /* *INDENT-OFF* */
1161  vec_foreach_index (i, ad->rxqs)
1162  {
1163  avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, i);
1164  vlib_physmem_free (vm, (void *) rxq->descs);
1165  if (rxq->n_enqueued)
1166  vlib_buffer_free_from_ring (vm, rxq->bufs, rxq->next, rxq->size,
1167  rxq->n_enqueued);
1168  vec_free (rxq->bufs);
1169  }
1170  /* *INDENT-ON* */
1171  vec_free (ad->rxqs);
1172 
1173  /* *INDENT-OFF* */
1174  vec_foreach_index (i, ad->txqs)
1175  {
1176  avf_txq_t *txq = vec_elt_at_index (ad->txqs, i);
1177  vlib_physmem_free (vm, (void *) txq->descs);
1178  if (txq->n_enqueued)
1179  {
1180  u16 first = (txq->next - txq->n_enqueued) & (txq->size -1);
1181  vlib_buffer_free_from_ring (vm, txq->bufs, first, txq->size,
1182  txq->n_enqueued);
1183  }
1184  vec_free (txq->bufs);
1185  clib_ring_free (txq->rs_slots);
1186  }
1187  /* *INDENT-ON* */
1188  vec_free (ad->txqs);
1189  vec_free (ad->name);
1190 
1191  clib_error_free (ad->error);
1192  clib_memset (ad, 0, sizeof (*ad));
1193  pool_put (am->devices, ad);
1194 }
1195 
1196 void
1198 {
1199  vnet_main_t *vnm = vnet_get_main ();
1200  avf_main_t *am = &avf_main;
1201  avf_device_t *ad;
1203  clib_error_t *error = 0;
1204  int i;
1205 
1206  /* check input args */
1207  args->rxq_size = (args->rxq_size == 0) ? AVF_RXQ_SZ : args->rxq_size;
1208  args->txq_size = (args->txq_size == 0) ? AVF_TXQ_SZ : args->txq_size;
1209 
1210  if ((args->rxq_size & (args->rxq_size - 1))
1211  || (args->txq_size & (args->txq_size - 1)))
1212  {
1213  args->rv = VNET_API_ERROR_INVALID_VALUE;
1214  args->error =
1215  clib_error_return (error, "queue size must be a power of two");
1216  return;
1217  }
1218 
1219  pool_get (am->devices, ad);
1220  ad->dev_instance = ad - am->devices;
1221  ad->per_interface_next_index = ~0;
1222  ad->name = vec_dup (args->name);
1223 
1224  if (args->enable_elog)
1225  ad->flags |= AVF_DEVICE_F_ELOG;
1226 
1227  if ((error = vlib_pci_device_open (vm, &args->addr, avf_pci_device_ids,
1228  &h)))
1229  {
1230  pool_put (am->devices, ad);
1231  args->rv = VNET_API_ERROR_INVALID_INTERFACE;
1232  args->error =
1233  clib_error_return (error, "pci-addr %U", format_vlib_pci_addr,
1234  &args->addr);
1235  return;
1236  }
1237  ad->pci_dev_handle = h;
1238  ad->numa_node = vlib_pci_get_numa_node (vm, h);
1239 
1241 
1242  if ((error = vlib_pci_bus_master_enable (vm, h)))
1243  goto error;
1244 
1245  if ((error = vlib_pci_map_region (vm, h, 0, &ad->bar0)))
1246  goto error;
1247 
1248  if ((error = vlib_pci_register_msix_handler (vm, h, 0, 1,
1249  &avf_irq_0_handler)))
1250  goto error;
1251 
1252  if ((error = vlib_pci_register_msix_handler (vm, h, 1, 1,
1253  &avf_irq_n_handler)))
1254  goto error;
1255 
1256  if ((error = vlib_pci_enable_msix_irq (vm, h, 0, 2)))
1257  goto error;
1258 
1260  AVF_MBOX_LEN,
1262  ad->numa_node);
1263  if (ad->atq == 0)
1264  {
1265  error = vlib_physmem_last_error (vm);
1266  goto error;
1267  }
1268 
1269  if ((error = vlib_pci_map_dma (vm, h, ad->atq)))
1270  goto error;
1271 
1273  AVF_MBOX_LEN,
1275  ad->numa_node);
1276  if (ad->arq == 0)
1277  {
1278  error = vlib_physmem_last_error (vm);
1279  goto error;
1280  }
1281 
1282  if ((error = vlib_pci_map_dma (vm, h, ad->arq)))
1283  goto error;
1284 
1286  AVF_MBOX_LEN,
1288  ad->numa_node);
1289  if (ad->atq_bufs == 0)
1290  {
1291  error = vlib_physmem_last_error (vm);
1292  goto error;
1293  }
1294 
1295  if ((error = vlib_pci_map_dma (vm, h, ad->atq_bufs)))
1296  goto error;
1297 
1299  AVF_MBOX_LEN,
1301  ad->numa_node);
1302  if (ad->arq_bufs == 0)
1303  {
1304  error = vlib_physmem_last_error (vm);
1305  goto error;
1306  }
1307 
1308  if ((error = vlib_pci_map_dma (vm, h, ad->arq_bufs)))
1309  goto error;
1310 
1311  if ((error = vlib_pci_intr_enable (vm, h)))
1312  goto error;
1313 
1315  ad->flags |= AVF_DEVICE_F_VA_DMA;
1316 
1317  if ((error = avf_device_init (vm, am, ad, args)))
1318  goto error;
1319 
1320  /* create interface */
1321  error = ethernet_register_interface (vnm, avf_device_class.index,
1322  ad->dev_instance, ad->hwaddr,
1324 
1325  if (error)
1326  goto error;
1327 
1329  args->sw_if_index = ad->sw_if_index = sw->sw_if_index;
1330 
1334  avf_input_node.index);
1335 
1336  for (i = 0; i < ad->n_rx_queues; i++)
1338 
1339  if (pool_elts (am->devices) == 1)
1342 
1343  return;
1344 
1345 error:
1346  avf_delete_if (vm, ad);
1347  args->rv = VNET_API_ERROR_INVALID_INTERFACE;
1348  args->error = clib_error_return (error, "pci-addr %U",
1349  format_vlib_pci_addr, &args->addr);
1350  vlib_log_err (am->log_class, "%U", format_clib_error, args->error);
1351 }
1352 
1353 static clib_error_t *
1355 {
1356  vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
1357  avf_main_t *am = &avf_main;
1359  uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
1360 
1361  if (ad->flags & AVF_DEVICE_F_ERROR)
1362  return clib_error_return (0, "device is in error state");
1363 
1364  if (is_up)
1365  {
1368  ad->flags |= AVF_DEVICE_F_ADMIN_UP;
1369  }
1370  else
1371  {
1373  ad->flags &= ~AVF_DEVICE_F_ADMIN_UP;
1374  }
1375  return 0;
1376 }
1377 
1378 static clib_error_t *
1381 {
1382  avf_main_t *am = &avf_main;
1383  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1385  avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, qid);
1386 
1388  rxq->int_mode = 0;
1389  else
1390  rxq->int_mode = 1;
1391 
1392  return 0;
1393 }
1394 
1395 static void
1397  u32 node_index)
1398 {
1399  avf_main_t *am = &avf_main;
1400  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1402 
1403  /* Shut off redirection */
1404  if (node_index == ~0)
1405  {
1406  ad->per_interface_next_index = node_index;
1407  return;
1408  }
1409 
1411  vlib_node_add_next (vlib_get_main (), avf_input_node.index, node_index);
1412 }
1413 
1414 static char *avf_tx_func_error_strings[] = {
1415 #define _(n,s) s,
1417 #undef _
1418 };
1419 
1420 static void
1422 {
1423  avf_main_t *am = &avf_main;
1424  avf_device_t *ad = vec_elt_at_index (am->devices, instance);
1426  &ad->eth_stats, sizeof (ad->eth_stats));
1427 }
1428 
1429 /* *INDENT-OFF* */
1431 {
1432  .name = "Adaptive Virtual Function (AVF) interface",
1433  .clear_counters = avf_clear_hw_interface_counters,
1434  .format_device = format_avf_device,
1435  .format_device_name = format_avf_device_name,
1436  .admin_up_down_function = avf_interface_admin_up_down,
1437  .rx_mode_change_function = avf_interface_rx_mode_change,
1438  .rx_redirect_to_node = avf_set_interface_next_node,
1439  .tx_function_n_errors = AVF_TX_N_ERROR,
1440  .tx_function_error_strings = avf_tx_func_error_strings,
1441 };
1442 /* *INDENT-ON* */
1443 
1444 clib_error_t *
1446 {
1447  avf_main_t *am = &avf_main;
1448  clib_error_t *error;
1450 
1451  if ((error = vlib_call_init_function (vm, pci_bus_init)))
1452  return error;
1453 
1456 
1457  am->log_class = vlib_log_register_class ("avf_plugin", 0);
1458  vlib_log_debug (am->log_class, "initialized");
1459 
1460  return 0;
1461 }
1462 
1464 
1465 /*
1466  * fd.io coding-style-patch-verification: ON
1467  *
1468  * Local Variables:
1469  * eval: (c-set-style "gnu")
1470  * End:
1471  */
static void avf_set_interface_next_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Definition: device.c:1396
vlib_log_class_t vlib_log_register_class(char *class, char *subclass)
Definition: log.c:227
clib_error_t * pci_bus_init(vlib_main_t *vm)
Definition: pci.c:252
vmrglw vmrglh hi
u8 int_mode
Definition: avf.h:107
#define vec_foreach_index(var, v)
Iterate over vector indices.
#define AVF_ARQLEN
Definition: virtchnl.h:47
virtchnl_queue_pair_info_t qpair[1]
Definition: virtchnl.h:304
u32 hw_if_index
Definition: avf.h:132
u8 * format_clib_error(u8 *s, va_list *va)
Definition: error.c:191
static clib_error_t * vlib_pci_intr_enable(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.h:239
u32 flags
Definition: vhost_user.h:115
#define AVF_ATQH
Definition: virtchnl.h:40
#define vlib_log_warn(...)
Definition: log.h:49
static clib_error_t * avf_interface_rx_mode_change(vnet_main_t *vnm, u32 hw_if_index, u32 qid, vnet_hw_interface_rx_mode mode)
Definition: device.c:1379
#define clib_min(x, y)
Definition: clib.h:295
static uword random_default_seed(void)
Default random seed (unix/linux user-mode)
Definition: random.h:91
static clib_error_t * vlib_pci_bus_master_enable(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.h:271
static void * vlib_physmem_alloc_aligned_on_numa(vlib_main_t *vm, uword n_bytes, uword alignment, u32 numa_node)
Definition: physmem_funcs.h:63
static f64 vlib_process_wait_for_event_or_clock(vlib_main_t *vm, f64 dt)
Suspend a cooperative multi-tasking thread Waits for an event, or for the indicated number of seconds...
Definition: node_funcs.h:703
clib_error_t * avf_init(vlib_main_t *vm)
Definition: device.c:1445
static uword * vlib_process_wait_for_event(vlib_main_t *vm)
Definition: node_funcs.h:623
clib_error_t * avf_send_to_pf(vlib_main_t *vm, avf_device_t *ad, virtchnl_ops_t op, void *in, int in_len, void *out, int out_len)
Definition: device.c:372
void ethernet_delete_interface(vnet_main_t *vnm, u32 hw_if_index)
Definition: interface.c:324
#define AVF_ATQBAH
Definition: virtchnl.h:45
vnet_main_t * vnet_get_main(void)
Definition: misc.c:47
struct virtchnl_pf_event_t::@418::@419 link_event
void avf_arq_slot_init(avf_device_t *ad, u16 slot)
Definition: device.c:315
clib_error_t * error
Definition: avf.h:170
#define AVF_AQ_ENQ_SUSPEND_TIME
Definition: avf.h:25
u64 atq_bufs_pa
Definition: avf.h:149
static uword vlib_buffer_get_pa(vlib_main_t *vm, vlib_buffer_t *b)
Definition: buffer_funcs.h:421
virtchnl_vsi_type_t vsi_type
Definition: virtchnl.h:165
unsigned long u64
Definition: types.h:89
void vlib_pci_device_close(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.c:1275
virtchnl_vector_map_t vecmap[1]
Definition: virtchnl.h:332
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
#define NULL
Definition: clib.h:58
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:255
#define clib_ring_new_aligned(ring, size, align)
Definition: ring.h:53
virtchnl_link_speed_t link_speed
Definition: avf.h:163
#define AVF_ARQBAH
Definition: virtchnl.h:39
static clib_error_t * vlib_physmem_last_error(struct vlib_main_t *vm)
static void avf_adminq_init(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:335
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
#define AVF_QRX_TAIL(q)
Definition: virtchnl.h:51
#define AVF_ARQT
Definition: virtchnl.h:43
format_function_t format_avf_device
Definition: avf.h:230
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:560
int i
vlib_pci_addr_t addr
Definition: avf.h:211
#define AVF_AQ_F_SI
Definition: virtchnl.h:61
u32 dev_instance
Definition: avf.h:130
clib_memset(h->entries, 0, sizeof(h->entries[0])*entries)
clib_error_t * vlib_pci_enable_msix_irq(vlib_main_t *vm, vlib_pci_dev_handle_t h, u16 start, u16 count)
Definition: pci.c:883
#define AVF_QTX_TAIL(q)
Definition: virtchnl.h:50
u8 data[128]
Definition: ipsec.api:248
virtchnl_link_speed_t
Definition: virtchnl.h:198
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
Definition: vec.h:450
avf_device_t * devices
Definition: avf.h:201
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:236
volatile u32 * qtx_tail
Definition: avf.h:114
clib_error_t * avf_op_config_irq_map(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:618
static vlib_node_registration_t avf_process_node
(constructor) VLIB_REGISTER_NODE (avf_process_node)
Definition: device.c:1057
#define AVF_ATQLEN
Definition: virtchnl.h:41
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
Definition: node_funcs.h:1122
unsigned char u8
Definition: types.h:56
vnet_device_class_t avf_device_class
#define AVF_ARQH
Definition: virtchnl.h:44
clib_error_t * avf_device_reset(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:692
clib_error_t * avf_op_disable_vlan_stripping(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:553
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
double f64
Definition: types.h:142
virtchnl_ops_t
Definition: virtchnl.h:103
static vnet_sw_interface_t * vnet_get_hw_sw_interface(vnet_main_t *vnm, u32 hw_if_index)
u8 buffer_pool_index
Definition: avf.h:108
static uword vlib_process_suspend(vlib_main_t *vm, f64 dt)
Suspend a vlib cooperative multi-tasking thread for a period of time.
Definition: node_funcs.h:452
static uword avf_process(vlib_main_t *vm, vlib_node_runtime_t *rt, vlib_frame_t *f)
Definition: device.c:1007
#define AVF_AQ_F_DD
Definition: virtchnl.h:53
vnet_hw_interface_rx_mode
Definition: interface.h:52
u16 * rs_slots
Definition: avf.h:121
#define AVFINT_ICR0_ENA1
Definition: virtchnl.h:37
clib_error_t * avf_request_queues(vlib_main_t *vm, avf_device_t *ad, u16 num_queue_pairs)
Definition: device.c:718
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
Definition: pool.h:493
clib_error_t * vlib_pci_map_dma(vlib_main_t *vm, vlib_pci_dev_handle_t h, void *ptr)
Definition: pci.c:1196
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:163
static uword vlib_process_get_events(vlib_main_t *vm, uword **data_vector)
Return the first event type which has occurred and a vector of per-event data of that type...
Definition: node_funcs.h:546
clib_spinlock_t lock
Definition: avf.h:117
static uword pow2_mask(uword x)
Definition: clib.h:220
#define PCI_DEVICE_ID_INTEL_AVF
Definition: device.c:33
static u32 avf_reg_read(avf_device_t *ad, u32 addr)
Definition: avf.h:281
clib_error_t * avf_txq_init(vlib_main_t *vm, avf_device_t *ad, u16 qid, u16 txq_size)
Definition: device.c:269
static_always_inline void vnet_device_input_set_interrupt_pending(vnet_main_t *vnm, u32 hw_if_index, u16 queue_id)
Definition: devices.h:136
vnet_hw_interface_flags_t flags
Definition: interface.h:494
#define AVF_MBOX_BUF_SZ
Definition: device.c:27
volatile u32 * qrx_tail
Definition: avf.h:101
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define clib_error_return(e, args...)
Definition: error.h:99
clib_error_t * avf_config_promisc_mode(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:560
unsigned int u32
Definition: types.h:88
vlib_pci_dev_handle_t pci_dev_handle
Definition: avf.h:133
#define vlib_call_init_function(vm, x)
Definition: init.h:260
#define vlib_log_debug(...)
Definition: log.h:52
virtchnl_ether_addr_t list[1]
Definition: virtchnl.h:347
void * arq_bufs
Definition: avf.h:148
avf_main_t avf_main
Definition: device.c:37
static void avf_irq_n_handler(vlib_main_t *vm, vlib_pci_dev_handle_t h, u16 line)
Definition: device.c:1103
avf_aq_desc_t * arq
Definition: avf.h:146
static void clib_spinlock_init(clib_spinlock_t *p)
Definition: lock.h:57
static void vlib_buffer_free_from_ring(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Free buffers from ring.
Definition: buffer_funcs.h:912
static heap_elt_t * first(heap_header_t *h)
Definition: heap.c:59
void avf_process_one_device(vlib_main_t *vm, avf_device_t *ad, int is_irq)
Definition: device.c:873
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:514
u8 * name
Definition: avf.h:136
virtchnl_eth_stats_t last_cleared_eth_stats
Definition: avf.h:167
static void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
Definition: node_funcs.h:964
clib_error_t * avf_device_init(vlib_main_t *vm, avf_main_t *am, avf_device_t *ad, avf_create_if_args_t *args)
Definition: device.c:758
void vlib_pci_set_private_data(vlib_main_t *vm, vlib_pci_dev_handle_t h, uword private_data)
Definition: pci.c:155
static u32 avf_get_u32(void *start, int offset)
Definition: avf.h:235
virtchnl_txq_info_t txq
Definition: virtchnl.h:295
#define AVF_ATQT
Definition: virtchnl.h:48
unsigned short u16
Definition: types.h:57
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:286
#define vec_dup(V)
Return copy of vector (no header, no alignment)
Definition: vec.h:375
clib_error_t * vlib_pci_register_msix_handler(vlib_main_t *vm, vlib_pci_dev_handle_t h, u32 start, u32 count, pci_msix_handler_function_t *msix_handler)
Definition: pci.c:826
u64 qword[4]
Definition: avf.h:76
#define ELOG_DATA(em, f)
Definition: elog.h:484
#define AVF_AQ_F_RD
Definition: virtchnl.h:58
#define VIRTCHNL_VERSION_MAJOR
Definition: virtchnl.h:21
clib_error_t * avf_op_enable_queues(vlib_main_t *vm, avf_device_t *ad, u32 rx, u32 tx)
Definition: device.c:659
union virtchnl_pf_event_t::@418 event_data
uword vlib_pci_get_private_data(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.c:148
#define AVF_ITR_INT
Definition: device.c:30
static void avf_reg_flush(avf_device_t *ad)
Definition: avf.h:287
#define AVF_RXQ_SZ
Definition: device.c:28
u32 vlib_pci_dev_handle_t
Definition: pci.h:97
static char * avf_tx_func_error_strings[]
Definition: device.c:1414
#define AVF_MBOX_LEN
Definition: device.c:26
#define AVFINT_ICR0
Definition: virtchnl.h:36
u8 len
Definition: ip_types.api:49
void avf_delete_if(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:1140
u16 n_rx_queues
Definition: avf.h:142
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
Definition: buffer_funcs.h:96
u8 hwaddr[6]
Definition: avf.h:157
u16 atq_next_slot
Definition: avf.h:151
u32 vlib_pci_get_numa_node(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.c:170
static u32 avf_flag_change(vnet_main_t *vnm, vnet_hw_interface_t *hw, u32 flags)
Definition: device.c:999
static void avf_irq_0_enable(avf_device_t *ad)
Definition: device.c:59
#define AVF_AQ_F_BUF
Definition: virtchnl.h:60
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u32 numa_node
Definition: avf.h:134
vlib_main_t * vm
Definition: buffer.c:312
static void vlib_physmem_free(vlib_main_t *vm, void *p)
Definition: physmem_funcs.h:89
vlib_node_registration_t avf_input_node
(constructor) VLIB_REGISTER_NODE (avf_input_node)
Definition: input.c:462
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:341
static void avf_irq_0_disable(avf_device_t *ad)
Definition: device.c:47
Definition: avf.h:98
clib_error_t * avf_op_get_stats(vlib_main_t *vm, avf_device_t *ad, virtchnl_eth_stats_t *es)
Definition: device.c:681
vlib_log_class_t log_class
Definition: avf.h:204
elog_main_t elog_main
Definition: main.h:172
avf_tx_desc_t * descs
Definition: avf.h:118
#define ELOG_TYPE_DECLARE(f)
Definition: elog.h:442
clib_error_t * avf_aq_desc_enq(vlib_main_t *vm, avf_device_t *ad, avf_aq_desc_t *dt, void *data, int len)
Definition: device.c:101
virtchnl_ops_t v_opcode
Definition: virtchnl.h:242
#define AVFINT_DYN_CTL0
Definition: virtchnl.h:38
static void avf_clear_hw_interface_counters(u32 instance)
Definition: device.c:1421
u16 vsi_id
Definition: avf.h:155
vl_api_vxlan_gbp_api_tunnel_mode_t mode
Definition: vxlan_gbp.api:44
u32 per_interface_next_index
Definition: avf.h:128
clib_error_t * avf_op_add_eth_addr(vlib_main_t *vm, avf_device_t *ad, u8 count, u8 *macs)
Definition: device.c:639
static clib_error_t * avf_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
Definition: device.c:1354
u32 feature_bitmap
Definition: avf.h:156
virtchnl_status_code_t v_retval
Definition: virtchnl.h:247
u32 * bufs
Definition: avf.h:119
static u32 vlib_buffer_alloc_from_pool(vlib_main_t *vm, u32 *buffers, u32 n_buffers, u8 buffer_pool_index)
Allocate buffers from specific pool into supplied array.
Definition: buffer_funcs.h:523
#define ASSERT(truth)
avf_aq_desc_t * atq
Definition: avf.h:145
void vnet_hw_interface_assign_rx_thread(vnet_main_t *vnm, u32 hw_if_index, u16 queue_id, uword thread_index)
Definition: devices.c:139
u32 flags
Definition: avf.h:127
#define PCI_DEVICE_ID_INTEL_X722_VF
Definition: device.c:35
#define AVFINT_DYN_CTLN(x)
Definition: virtchnl.h:35
Definition: avf.h:111
u32 * bufs
Definition: avf.h:105
clib_error_t * avf_op_config_rss_lut(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:514
static void avf_irq_n_enable(avf_device_t *ad, u8 line)
Definition: device.c:86
void * bar0
Definition: avf.h:135
#define PCI_DEVICE_ID_INTEL_X710_VF
Definition: device.c:34
u16 n_enqueued
Definition: avf.h:120
u16 n_enqueued
Definition: avf.h:106
VNET_DEVICE_CLASS(bond_dev_class)
virtchnl_pf_event_t * events
Definition: avf.h:153
size_t count
Definition: vapi.c:47
virtchnl_event_codes_t event
Definition: virtchnl.h:208
#define AVF_AQ_ENQ_MAX_WAIT_TIME
Definition: avf.h:26
static void avf_reg_write(avf_device_t *ad, u32 addr, u32 val)
Definition: avf.h:275
clib_error_t * avf_op_version(vlib_main_t *vm, avf_device_t *ad, virtchnl_version_info_t *ver)
Definition: device.c:482
static uword pointer_to_uword(const void *p)
Definition: types.h:131
static uword avf_dma_addr(vlib_main_t *vm, avf_device_t *ad, void *p)
Definition: device.c:328
#define clib_max(x, y)
Definition: clib.h:288
virtchnl_eth_stats_t eth_stats
Definition: avf.h:166
void * atq_bufs
Definition: avf.h:147
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
#define AVFGEN_RSTAT
Definition: virtchnl.h:49
u16 num_queue_pairs
Definition: avf.h:158
u16 next
Definition: avf.h:115
virtchnl_rxq_info_t rxq
Definition: virtchnl.h:296
static u64 vlib_physmem_get_pa(vlib_main_t *vm, void *mem)
#define AVF_ARQBAL
Definition: virtchnl.h:42
u32 rss_lut_size
Definition: avf.h:162
u16 n_tx_queues
Definition: avf.h:141
#define AVF_AQ_F_CMP
Definition: virtchnl.h:54
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
clib_error_t * ethernet_register_interface(vnet_main_t *vnm, u32 dev_class_index, u32 dev_instance, const u8 *address, u32 *hw_if_index_return, ethernet_flag_change_function_t flag_change)
Definition: interface.c:278
format_function_t format_avf_device_name
Definition: avf.h:231
clib_error_t * vnet_hw_interface_set_flags(vnet_main_t *vnm, u32 hw_if_index, vnet_hw_interface_flags_t flags)
Definition: interface.c:504
#define foreach_virtchnl_op
Definition: virtchnl.h:66
VLIB buffer representation.
Definition: buffer.h:102
#define foreach_avf_tx_func_error
Definition: avf.h:312
u64 uword
Definition: types.h:112
clib_error_t * avf_rxq_init(vlib_main_t *vm, avf_device_t *ad, u16 qid, u16 rxq_size)
Definition: device.c:218
u16 size
Definition: avf.h:103
u16 arq_next_slot
Definition: avf.h:152
#define clib_error_free(e)
Definition: error.h:86
clib_error_t * vlib_pci_map_region(vlib_main_t *vm, vlib_pci_dev_handle_t h, u32 resource, void **result)
Definition: pci.c:1146
avf_rxq_t * rxqs
Definition: avf.h:139
virtchnl_vsi_resource_t vsi_res[1]
Definition: virtchnl.h:179
int vnet_hw_interface_unassign_rx_thread(vnet_main_t *vnm, u32 hw_if_index, u16 queue_id)
Definition: devices.c:188
#define AVF_TXQ_SZ
Definition: device.c:29
int vlib_pci_supports_virtual_addr_dma(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.c:1207
clib_error_t * error
Definition: avf.h:220
static u32 random_u32(u32 *seed)
32-bit random number generator
Definition: random.h:69
avf_per_thread_data_t * per_thread_data
Definition: avf.h:202
static uword vlib_buffer_get_va(vlib_buffer_t *b)
Definition: buffer.h:217
static vlib_thread_main_t * vlib_get_thread_main()
Definition: global_funcs.h:32
u16 size
Definition: avf.h:116
u32 sw_if_index
Definition: avf.h:131
#define ETHERNET_MAX_PACKET_BYTES
Definition: ethernet.h:133
#define vec_foreach(var, vec)
Vector iterator.
#define vlib_log_err(...)
Definition: log.h:48
u64 arq_bufs_pa
Definition: avf.h:150
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:115
void avf_create_if(vlib_main_t *vm, avf_create_if_args_t *args)
Definition: device.c:1197
#define AVF_ATQBAL
Definition: virtchnl.h:46
static void avf_irq_0_handler(vlib_main_t *vm, vlib_pci_dev_handle_t h, u16 line)
Definition: device.c:1065
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
#define VIRTCHNL_VERSION_MINOR
Definition: virtchnl.h:22
static void avf_irq_n_disable(avf_device_t *ad, u8 line)
Definition: device.c:77
static u8 vlib_buffer_pool_get_default_for_numa(vlib_main_t *vm, u32 numa_node)
Definition: buffer_funcs.h:163
#define clib_ring_free(f)
Definition: ring.h:59
#define PCI_VENDOR_ID_INTEL
Definition: device.c:32
clib_error_t * avf_cmd_rx_ctl_reg_write(vlib_main_t *vm, avf_device_t *ad, u32 reg, u32 val)
Definition: device.c:187
clib_error_t * vlib_pci_device_open(vlib_main_t *vm, vlib_pci_addr_t *addr, pci_device_id_t ids[], vlib_pci_dev_handle_t *handle)
Definition: pci.c:1215
u16 next
Definition: avf.h:102
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
static void vnet_hw_interface_set_link_speed(vnet_main_t *vnm, u32 hw_if_index, u32 link_speed)
static void vnet_hw_interface_set_input_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Definition: devices.h:79
clib_error_t * avf_op_config_rss_key(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:533
avf_txq_t * txqs
Definition: avf.h:140
avf_rx_desc_t * descs
Definition: avf.h:104
clib_error_t * avf_op_config_vsi_queues(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:572
u16 vendor_id
Definition: pci.h:127
format_function_t format_vlib_pci_addr
Definition: pci.h:324
#define AVF_AQ_F_ERR
Definition: virtchnl.h:55
u16 max_vectors
Definition: avf.h:159
clib_error_t * avf_op_get_vf_resources(vlib_main_t *vm, avf_device_t *ad, virtchnl_vf_resource_t *res)
Definition: device.c:502
u32 rss_key_size
Definition: avf.h:161
u16 max_mtu
Definition: avf.h:160
static uword pool_elts(void *v)
Number of active elements in a pool.
Definition: pool.h:128