FD.io VPP  v19.04.1-1-ge4a0f9f
Vector Packet Processing
device.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2018 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vppinfra/ring.h>
20 #include <vlib/unix/unix.h>
21 #include <vlib/pci/pci.h>
22 #include <vnet/ethernet/ethernet.h>
23 
24 #include <avf/avf.h>
25 
26 #define AVF_MBOX_LEN 64
27 #define AVF_MBOX_BUF_SZ 512
28 #define AVF_RXQ_SZ 512
29 #define AVF_TXQ_SZ 512
30 #define AVF_ITR_INT 8160
31 
32 #define PCI_VENDOR_ID_INTEL 0x8086
33 #define PCI_DEVICE_ID_INTEL_AVF 0x1889
34 #define PCI_DEVICE_ID_INTEL_X710_VF 0x154c
35 #define PCI_DEVICE_ID_INTEL_X722_VF 0x37cd
36 
38 
39 static pci_device_id_t avf_pci_device_ids[] = {
41  {.vendor_id = PCI_VENDOR_ID_INTEL,.device_id = PCI_DEVICE_ID_INTEL_X710_VF},
42  {.vendor_id = PCI_VENDOR_ID_INTEL,.device_id = PCI_DEVICE_ID_INTEL_X722_VF},
43  {0},
44 };
45 
46 static inline void
48 {
49  u32 dyn_ctl0 = 0, icr0_ena = 0;
50 
51  dyn_ctl0 |= (3 << 3); /* 11b = No ITR update */
52 
53  avf_reg_write (ad, AVFINT_ICR0_ENA1, icr0_ena);
54  avf_reg_write (ad, AVFINT_DYN_CTL0, dyn_ctl0);
55  avf_reg_flush (ad);
56 }
57 
58 static inline void
60 {
61  u32 dyn_ctl0 = 0, icr0_ena = 0;
62 
63  icr0_ena |= (1 << 30); /* [30] Admin Queue Enable */
64 
65  dyn_ctl0 |= (1 << 0); /* [0] Interrupt Enable */
66  dyn_ctl0 |= (1 << 1); /* [1] Clear PBA */
67  //dyn_ctl0 |= (3 << 3); /* [4:3] ITR Index, 11b = No ITR update */
68  dyn_ctl0 |= ((AVF_ITR_INT / 2) << 5); /* [16:5] ITR Interval in 2us steps */
69 
70  avf_irq_0_disable (ad);
71  avf_reg_write (ad, AVFINT_ICR0_ENA1, icr0_ena);
72  avf_reg_write (ad, AVFINT_DYN_CTL0, dyn_ctl0);
73  avf_reg_flush (ad);
74 }
75 
76 static inline void
78 {
79  u32 dyn_ctln = 0;
80 
81  avf_reg_write (ad, AVFINT_DYN_CTLN (line), dyn_ctln);
82  avf_reg_flush (ad);
83 }
84 
85 static inline void
87 {
88  u32 dyn_ctln = 0;
89 
90  dyn_ctln |= (1 << 0); /* [0] Interrupt Enable */
91  dyn_ctln |= (1 << 1); /* [1] Clear PBA */
92  dyn_ctln |= ((AVF_ITR_INT / 2) << 5); /* [16:5] ITR Interval in 2us steps */
93 
94  avf_irq_n_disable (ad, line);
95  avf_reg_write (ad, AVFINT_DYN_CTLN (line), dyn_ctln);
96  avf_reg_flush (ad);
97 }
98 
99 
100 clib_error_t *
102  void *data, int len)
103 {
104  avf_main_t *am = &avf_main;
105  clib_error_t *err = 0;
106  avf_aq_desc_t *d, dc;
107  int n_retry = 5;
108 
109  d = &ad->atq[ad->atq_next_slot];
110  clib_memcpy_fast (d, dt, sizeof (avf_aq_desc_t));
111  d->flags |= AVF_AQ_F_RD | AVF_AQ_F_SI;
112  if (len)
113  d->datalen = len;
114  if (len)
115  {
116  u64 pa;
117  pa = ad->atq_bufs_pa + ad->atq_next_slot * AVF_MBOX_BUF_SZ;
118  d->addr_hi = (u32) (pa >> 32);
119  d->addr_lo = (u32) pa;
121  data, len);
122  d->flags |= AVF_AQ_F_BUF;
123  }
124 
125  if (ad->flags & AVF_DEVICE_F_ELOG)
126  clib_memcpy_fast (&dc, d, sizeof (avf_aq_desc_t));
127 
129  vlib_log_debug (am->log_class, "%U", format_hexdump, data, len);
130  ad->atq_next_slot = (ad->atq_next_slot + 1) % AVF_MBOX_LEN;
132  avf_reg_flush (ad);
133 
134 retry:
135  vlib_process_suspend (vm, 10e-6);
136 
137  if (((d->flags & AVF_AQ_F_DD) == 0) || ((d->flags & AVF_AQ_F_CMP) == 0))
138  {
139  if (--n_retry == 0)
140  {
141  err = clib_error_return (0, "adminq enqueue timeout [opcode 0x%x]",
142  d->opcode);
143  goto done;
144  }
145  goto retry;
146  }
147 
148  clib_memcpy_fast (dt, d, sizeof (avf_aq_desc_t));
149  if (d->flags & AVF_AQ_F_ERR)
150  return clib_error_return (0, "adminq enqueue error [opcode 0x%x, retval "
151  "%d]", d->opcode, d->retval);
152 
153 done:
154  if (ad->flags & AVF_DEVICE_F_ELOG)
155  {
156  /* *INDENT-OFF* */
157  ELOG_TYPE_DECLARE (el) =
158  {
159  .format = "avf[%d] aq enq: s_flags 0x%x r_flags 0x%x opcode 0x%x "
160  "datalen %d retval %d",
161  .format_args = "i4i2i2i2i2i2",
162  };
163  struct
164  {
165  u32 dev_instance;
166  u16 s_flags;
167  u16 r_flags;
168  u16 opcode;
169  u16 datalen;
170  u16 retval;
171  } *ed;
172  ed = ELOG_DATA (&vm->elog_main, el);
173  ed->dev_instance = ad->dev_instance;
174  ed->s_flags = dc.flags;
175  ed->r_flags = d->flags;
176  ed->opcode = dc.opcode;
177  ed->datalen = dc.datalen;
178  ed->retval = d->retval;
179  /* *INDENT-ON* */
180  }
181 
182  return err;
183 }
184 
185 clib_error_t *
187  u32 val)
188 {
189  clib_error_t *err;
190  avf_aq_desc_t d = {.opcode = 0x207,.param1 = reg,.param3 = val };
191  err = avf_aq_desc_enq (vm, ad, &d, 0, 0);
192 
193  if (ad->flags & AVF_DEVICE_F_ELOG)
194  {
195  /* *INDENT-OFF* */
196  ELOG_TYPE_DECLARE (el) =
197  {
198  .format = "avf[%d] rx ctl reg write: reg 0x%x val 0x%x ",
199  .format_args = "i4i4i4",
200  };
201  struct
202  {
203  u32 dev_instance;
204  u32 reg;
205  u32 val;
206  } *ed;
207  ed = ELOG_DATA (&vm->elog_main, el);
208  ed->dev_instance = ad->dev_instance;
209  ed->reg = reg;
210  ed->val = val;
211  /* *INDENT-ON* */
212  }
213  return err;
214 }
215 
216 clib_error_t *
217 avf_rxq_init (vlib_main_t * vm, avf_device_t * ad, u16 qid, u16 rxq_size)
218 {
219  clib_error_t *err;
220  avf_rxq_t *rxq;
221  u32 n_alloc, i;
222 
224  rxq = vec_elt_at_index (ad->rxqs, qid);
225  rxq->size = rxq_size;
226  rxq->next = 0;
228  sizeof (avf_rx_desc_t),
230  ad->numa_node);
231 
232  rxq->buffer_pool_index =
234 
235  if (rxq->descs == 0)
236  return vlib_physmem_last_error (vm);
237 
238  if ((err = vlib_pci_map_dma (vm, ad->pci_dev_handle, (void *) rxq->descs)))
239  return err;
240 
241  clib_memset ((void *) rxq->descs, 0, rxq->size * sizeof (avf_rx_desc_t));
243  rxq->qrx_tail = ad->bar0 + AVF_QRX_TAIL (qid);
244 
245  n_alloc = vlib_buffer_alloc_from_pool (vm, rxq->bufs, rxq->size - 8,
246  rxq->buffer_pool_index);
247 
248  if (n_alloc == 0)
249  return clib_error_return (0, "buffer allocation error");
250 
251  rxq->n_enqueued = n_alloc;
252  avf_rx_desc_t *d = rxq->descs;
253  for (i = 0; i < n_alloc; i++)
254  {
255  vlib_buffer_t *b = vlib_get_buffer (vm, rxq->bufs[i]);
256  if (ad->flags & AVF_DEVICE_F_VA_DMA)
257  d->qword[0] = vlib_buffer_get_va (b);
258  else
259  d->qword[0] = vlib_buffer_get_pa (vm, b);
260  d++;
261  }
262 
263  ad->n_rx_queues = clib_min (ad->num_queue_pairs, qid + 1);
264  return 0;
265 }
266 
267 clib_error_t *
268 avf_txq_init (vlib_main_t * vm, avf_device_t * ad, u16 qid, u16 txq_size)
269 {
270  clib_error_t *err;
271  avf_txq_t *txq;
272 
273  if (qid >= ad->num_queue_pairs)
274  {
275  qid = qid % ad->num_queue_pairs;
276  txq = vec_elt_at_index (ad->txqs, qid);
277  if (txq->lock == 0)
278  clib_spinlock_init (&txq->lock);
279  ad->flags |= AVF_DEVICE_F_SHARED_TXQ_LOCK;
280  return 0;
281  }
282 
284  txq = vec_elt_at_index (ad->txqs, qid);
285  txq->size = txq_size;
286  txq->next = 0;
288  sizeof (avf_tx_desc_t),
290  ad->numa_node);
291  if (txq->descs == 0)
292  return vlib_physmem_last_error (vm);
293 
294  if ((err = vlib_pci_map_dma (vm, ad->pci_dev_handle, (void *) txq->descs)))
295  return err;
296 
298  txq->qtx_tail = ad->bar0 + AVF_QTX_TAIL (qid);
299 
300  /* initialize ring of pending RS slots */
302 
303  ad->n_tx_queues = clib_min (ad->num_queue_pairs, qid + 1);
304  return 0;
305 }
306 
307 typedef struct
308 {
312 
313 void
315 {
316  avf_aq_desc_t *d;
317  u64 pa = ad->arq_bufs_pa + slot * AVF_MBOX_BUF_SZ;
318  d = &ad->arq[slot];
319  clib_memset (d, 0, sizeof (avf_aq_desc_t));
320  d->flags = AVF_AQ_F_BUF;
322  d->addr_hi = (u32) (pa >> 32);
323  d->addr_lo = (u32) pa;
324 }
325 
326 static inline uword
328 {
329  return (ad->flags & AVF_DEVICE_F_VA_DMA) ?
331 }
332 
333 static void
335 {
336  u64 pa;
337  int i;
338 
339  /* VF MailBox Transmit */
340  clib_memset (ad->atq, 0, sizeof (avf_aq_desc_t) * AVF_MBOX_LEN);
341  ad->atq_bufs_pa = avf_dma_addr (vm, ad, ad->atq_bufs);
342 
343  pa = avf_dma_addr (vm, ad, ad->atq);
344  avf_reg_write (ad, AVF_ATQT, 0); /* Tail */
345  avf_reg_write (ad, AVF_ATQH, 0); /* Head */
346  avf_reg_write (ad, AVF_ATQLEN, AVF_MBOX_LEN | (1ULL << 31)); /* len & ena */
347  avf_reg_write (ad, AVF_ATQBAL, (u32) pa); /* Base Address Low */
348  avf_reg_write (ad, AVF_ATQBAH, (u32) (pa >> 32)); /* Base Address High */
349 
350  /* VF MailBox Receive */
351  clib_memset (ad->arq, 0, sizeof (avf_aq_desc_t) * AVF_MBOX_LEN);
352  ad->arq_bufs_pa = avf_dma_addr (vm, ad, ad->arq_bufs);
353 
354  for (i = 0; i < AVF_MBOX_LEN; i++)
355  avf_arq_slot_init (ad, i);
356 
357  pa = avf_dma_addr (vm, ad, ad->arq);
358 
359  avf_reg_write (ad, AVF_ARQH, 0); /* Head */
360  avf_reg_write (ad, AVF_ARQT, 0); /* Head */
361  avf_reg_write (ad, AVF_ARQLEN, AVF_MBOX_LEN | (1ULL << 31)); /* len & ena */
362  avf_reg_write (ad, AVF_ARQBAL, (u32) pa); /* Base Address Low */
363  avf_reg_write (ad, AVF_ARQBAH, (u32) (pa >> 32)); /* Base Address High */
364  avf_reg_write (ad, AVF_ARQT, AVF_MBOX_LEN - 1); /* Tail */
365 
366  ad->atq_next_slot = 0;
367  ad->arq_next_slot = 0;
368 }
369 
370 clib_error_t *
372  void *in, int in_len, void *out, int out_len)
373 {
374  clib_error_t *err;
375  avf_aq_desc_t *d, dt = {.opcode = 0x801,.v_opcode = op };
376  u32 head;
377  int n_retry = 5;
378 
379 
380  /* suppress interrupt in the next adminq receive slot
381  as we are going to wait for response
382  we only need interrupts when event is received */
383  d = &ad->arq[ad->arq_next_slot];
384  d->flags |= AVF_AQ_F_SI;
385 
386  if ((err = avf_aq_desc_enq (vm, ad, &dt, in, in_len)))
387  return err;
388 
389 retry:
390  head = avf_get_u32 (ad->bar0, AVF_ARQH);
391 
392  if (ad->arq_next_slot == head)
393  {
394  if (--n_retry == 0)
395  return clib_error_return (0, "timeout");
396  vlib_process_suspend (vm, 10e-3);
397  goto retry;
398  }
399 
400  d = &ad->arq[ad->arq_next_slot];
401 
402  if (d->v_opcode == VIRTCHNL_OP_EVENT)
403  {
404  void *buf = ad->arq_bufs + ad->arq_next_slot * AVF_MBOX_BUF_SZ;
406 
407  if ((d->datalen != sizeof (virtchnl_pf_event_t)) ||
408  ((d->flags & AVF_AQ_F_BUF) == 0))
409  return clib_error_return (0, "event message error");
410 
411  vec_add2 (ad->events, e, 1);
412  clib_memcpy_fast (e, buf, sizeof (virtchnl_pf_event_t));
414  ad->arq_next_slot++;
415  n_retry = 5;
416  goto retry;
417  }
418 
419  if (d->v_opcode != op)
420  {
421  err =
423  "unexpected message receiver [v_opcode = %u, "
424  "expected %u, v_retval %d]", d->v_opcode, op,
425  d->v_retval);
426  goto done;
427  }
428 
429  if (d->v_retval)
430  {
431  err = clib_error_return (0, "error [v_opcode = %u, v_retval %d]",
432  d->v_opcode, d->v_retval);
433  goto done;
434  }
435 
436  if (d->flags & AVF_AQ_F_BUF)
437  {
438  void *buf = ad->arq_bufs + ad->arq_next_slot * AVF_MBOX_BUF_SZ;
439  clib_memcpy_fast (out, buf, out_len);
440  }
441 
444  avf_reg_flush (ad);
445  ad->arq_next_slot = (ad->arq_next_slot + 1) % AVF_MBOX_LEN;
446 
447 done:
448 
449  if (ad->flags & AVF_DEVICE_F_ELOG)
450  {
451  /* *INDENT-OFF* */
452  ELOG_TYPE_DECLARE (el) =
453  {
454  .format = "avf[%d] send to pf: v_opcode %s (%d) v_retval 0x%x",
455  .format_args = "i4t4i4i4",
456  .n_enum_strings = VIRTCHNL_N_OPS,
457  .enum_strings = {
458 #define _(v, n) [v] = #n,
460 #undef _
461  },
462  };
463  struct
464  {
465  u32 dev_instance;
466  u32 v_opcode;
467  u32 v_opcode_val;
468  u32 v_retval;
469  } *ed;
470  ed = ELOG_DATA (&vm->elog_main, el);
471  ed->dev_instance = ad->dev_instance;
472  ed->v_opcode = op;
473  ed->v_opcode_val = op;
474  ed->v_retval = d->v_retval;
475  /* *INDENT-ON* */
476  }
477  return err;
478 }
479 
480 clib_error_t *
483 {
484  clib_error_t *err = 0;
485  virtchnl_version_info_t myver = {
487  .minor = VIRTCHNL_VERSION_MINOR,
488  };
489 
490  err = avf_send_to_pf (vm, ad, VIRTCHNL_OP_VERSION, &myver,
491  sizeof (virtchnl_version_info_t), ver,
492  sizeof (virtchnl_version_info_t));
493 
494  if (err)
495  return err;
496 
497  return err;
498 }
499 
500 clib_error_t *
503 {
504  u32 bitmap = (VIRTCHNL_VF_OFFLOAD_L2 | VIRTCHNL_VF_OFFLOAD_RSS_PF |
505  VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_VLAN |
506  VIRTCHNL_VF_OFFLOAD_RX_POLLING);
507 
508  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_GET_VF_RESOURCES, &bitmap,
509  sizeof (u32), res, sizeof (virtchnl_vf_resource_t));
510 }
511 
512 clib_error_t *
514 {
515  int msg_len = sizeof (virtchnl_rss_lut_t) + ad->rss_lut_size - 1;
516  int i;
517  u8 msg[msg_len];
518  virtchnl_rss_lut_t *rl;
519 
520  clib_memset (msg, 0, msg_len);
521  rl = (virtchnl_rss_lut_t *) msg;
522  rl->vsi_id = ad->vsi_id;
523  rl->lut_entries = ad->rss_lut_size;
524  for (i = 0; i < ad->rss_lut_size; i++)
525  rl->lut[i] = i % ad->n_rx_queues;
526 
527  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_RSS_LUT, msg, msg_len, 0,
528  0);
529 }
530 
531 clib_error_t *
533 {
534  int msg_len = sizeof (virtchnl_rss_key_t) + ad->rss_key_size - 1;
535  int i;
536  u8 msg[msg_len];
537  virtchnl_rss_key_t *rk;
538 
539  clib_memset (msg, 0, msg_len);
540  rk = (virtchnl_rss_key_t *) msg;
541  rk->vsi_id = ad->vsi_id;
542  rk->key_len = ad->rss_key_size;
543  u32 seed = random_default_seed ();
544  for (i = 0; i < ad->rss_key_size; i++)
545  rk->key[i] = (u8) random_u32 (&seed);
546 
547  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_RSS_KEY, msg, msg_len, 0,
548  0);
549 }
550 
551 clib_error_t *
553 {
554  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, 0, 0, 0,
555  0);
556 }
557 
558 clib_error_t *
560 {
561  virtchnl_promisc_info_t pi = { 0 };
562 
563  pi.vsi_id = ad->vsi_id;
564  pi.flags = 1;
565  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, &pi,
566  sizeof (virtchnl_promisc_info_t), 0, 0);
567 }
568 
569 
570 clib_error_t *
572 {
573  int i;
574  int n_qp = clib_max (vec_len (ad->rxqs), vec_len (ad->txqs));
575  int msg_len = sizeof (virtchnl_vsi_queue_config_info_t) + n_qp *
577  u8 msg[msg_len];
579 
580  clib_memset (msg, 0, msg_len);
582  ci->vsi_id = ad->vsi_id;
583  ci->num_queue_pairs = n_qp;
584 
585  for (i = 0; i < n_qp; i++)
586  {
587  virtchnl_txq_info_t *txq = &ci->qpair[i].txq;
588  virtchnl_rxq_info_t *rxq = &ci->qpair[i].rxq;
589 
590  rxq->vsi_id = ad->vsi_id;
591  rxq->queue_id = i;
593  if (i < vec_len (ad->rxqs))
594  {
595  avf_rxq_t *q = vec_elt_at_index (ad->rxqs, i);
596  rxq->ring_len = q->size;
598  rxq->dma_ring_addr = avf_dma_addr (vm, ad, (void *) q->descs);
599  avf_reg_write (ad, AVF_QRX_TAIL (i), q->size - 1);
600  }
601 
602  avf_txq_t *q = vec_elt_at_index (ad->txqs, i);
603  txq->vsi_id = ad->vsi_id;
604  if (i < vec_len (ad->txqs))
605  {
606  txq->queue_id = i;
607  txq->ring_len = q->size;
608  txq->dma_ring_addr = avf_dma_addr (vm, ad, (void *) q->descs);
609  }
610  }
611 
612  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_VSI_QUEUES, msg, msg_len,
613  0, 0);
614 }
615 
616 clib_error_t *
618 {
619  int count = 1;
620  int msg_len = sizeof (virtchnl_irq_map_info_t) +
621  count * sizeof (virtchnl_vector_map_t);
622  u8 msg[msg_len];
624 
625  clib_memset (msg, 0, msg_len);
626  imi = (virtchnl_irq_map_info_t *) msg;
627  imi->num_vectors = count;
628 
629  imi->vecmap[0].vector_id = 1;
630  imi->vecmap[0].vsi_id = ad->vsi_id;
631  imi->vecmap[0].rxq_map = 1;
632  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_IRQ_MAP, msg, msg_len, 0,
633  0);
634 }
635 
636 clib_error_t *
638 {
639  int msg_len =
640  sizeof (virtchnl_ether_addr_list_t) +
641  count * sizeof (virtchnl_ether_addr_t);
642  u8 msg[msg_len];
644  int i;
645 
646  clib_memset (msg, 0, msg_len);
647  al = (virtchnl_ether_addr_list_t *) msg;
648  al->vsi_id = ad->vsi_id;
649  al->num_elements = count;
650  for (i = 0; i < count; i++)
651  clib_memcpy_fast (&al->list[i].addr, macs + i * 6, 6);
652  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_ADD_ETH_ADDR, msg, msg_len, 0,
653  0);
654 }
655 
656 clib_error_t *
658 {
659  virtchnl_queue_select_t qs = { 0 };
660  int i = 0;
661  qs.vsi_id = ad->vsi_id;
662  qs.rx_queues = rx;
663  qs.tx_queues = tx;
664  while (rx)
665  {
666  if (rx & (1 << i))
667  {
668  avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, i);
669  avf_reg_write (ad, AVF_QRX_TAIL (i), rxq->n_enqueued);
670  rx &= ~(1 << i);
671  }
672  i++;
673  }
674  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_ENABLE_QUEUES, &qs,
675  sizeof (virtchnl_queue_select_t), 0, 0);
676 }
677 
678 clib_error_t *
681 {
682  virtchnl_queue_select_t qs = { 0 };
683  qs.vsi_id = ad->vsi_id;
684  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_GET_STATS,
685  &qs, sizeof (virtchnl_queue_select_t),
686  es, sizeof (virtchnl_eth_stats_t));
687 }
688 
689 clib_error_t *
691 {
692  avf_aq_desc_t d = { 0 };
693  clib_error_t *error;
694  u32 rstat;
695  int n_retry = 20;
696 
697  d.opcode = 0x801;
698  d.v_opcode = VIRTCHNL_OP_RESET_VF;
699  if ((error = avf_aq_desc_enq (vm, ad, &d, 0, 0)))
700  return error;
701 
702 retry:
703  vlib_process_suspend (vm, 10e-3);
704  rstat = avf_get_u32 (ad->bar0, AVFGEN_RSTAT);
705 
706  if (rstat == 2 || rstat == 3)
707  return 0;
708 
709  if (--n_retry == 0)
710  return clib_error_return (0, "reset failed (timeout)");
711 
712  goto retry;
713 }
714 
715 clib_error_t *
717 {
718  virtchnl_vf_res_request_t res_req = { 0 };
719  clib_error_t *error;
720  u32 rstat;
721  int n_retry = 20;
722 
723  res_req.num_queue_pairs = num_queue_pairs;
724 
725  error = avf_send_to_pf (vm, ad, VIRTCHNL_OP_REQUEST_QUEUES, &res_req,
726  sizeof (virtchnl_vf_res_request_t), &res_req,
727  sizeof (virtchnl_vf_res_request_t));
728 
729  /*
730  * if PF responds, the request failed
731  * else PF initializes restart and avf_send_to_pf returns an error
732  */
733  if (!error)
734  {
735  return clib_error_return (0, "requested more than %u queue pairs",
736  res_req.num_queue_pairs);
737  }
738 
739 retry:
740  vlib_process_suspend (vm, 10e-3);
741  rstat = avf_get_u32 (ad->bar0, AVFGEN_RSTAT);
742 
743  if ((rstat == VIRTCHNL_VFR_COMPLETED) || (rstat == VIRTCHNL_VFR_VFACTIVE))
744  goto done;
745 
746  if (--n_retry == 0)
747  return clib_error_return (0, "reset failed (timeout)");
748 
749  goto retry;
750 
751 done:
752  return NULL;
753 }
754 
755 clib_error_t *
757  avf_create_if_args_t * args)
758 {
759  virtchnl_version_info_t ver = { 0 };
760  virtchnl_vf_resource_t res = { 0 };
761  clib_error_t *error;
763  int i;
764 
765  avf_adminq_init (vm, ad);
766 
767  /* request more queues only if we need them */
768  if ((error = avf_request_queues (vm, ad, tm->n_vlib_mains)))
769  {
770  /* we failed to get more queues, but still we want to proceed */
771  clib_error_free (error);
772 
773  if ((error = avf_device_reset (vm, ad)))
774  return error;
775  }
776 
777  avf_adminq_init (vm, ad);
778 
779  /*
780  * OP_VERSION
781  */
782  if ((error = avf_op_version (vm, ad, &ver)))
783  return error;
784 
785  if (ver.major != VIRTCHNL_VERSION_MAJOR ||
787  return clib_error_return (0, "incompatible protocol version "
788  "(remote %d.%d)", ver.major, ver.minor);
789 
790  /*
791  * OP_GET_VF_RESOURCES
792  */
793  if ((error = avf_op_get_vf_resources (vm, ad, &res)))
794  return error;
795 
796  if (res.num_vsis != 1 || res.vsi_res[0].vsi_type != VIRTCHNL_VSI_SRIOV)
797  return clib_error_return (0, "unexpected GET_VF_RESOURCE reply received");
798 
799  ad->vsi_id = res.vsi_res[0].vsi_id;
802  ad->max_vectors = res.max_vectors;
803  ad->max_mtu = res.max_mtu;
804  ad->rss_key_size = res.rss_key_size;
805  ad->rss_lut_size = res.rss_lut_size;
806 
808 
809  /*
810  * Disable VLAN stripping
811  */
812  if ((error = avf_op_disable_vlan_stripping (vm, ad)))
813  return error;
814 
815  if ((error = avf_config_promisc_mode (vm, ad)))
816  return error;
817 
818  /*
819  * Init Queues
820  */
821  if (args->rxq_num == 0)
822  {
823  args->rxq_num = 1;
824  }
825  else if (args->rxq_num > ad->num_queue_pairs)
826  {
827  args->rxq_num = ad->num_queue_pairs;
828  vlib_log_warn (am->log_class, "Requested more rx queues than"
829  "queue pairs available. Using %u rx queues.",
830  args->rxq_num);
831  }
832 
833  for (i = 0; i < args->rxq_num; i++)
834  if ((error = avf_rxq_init (vm, ad, i, args->rxq_size)))
835  return error;
836 
837  for (i = 0; i < tm->n_vlib_mains; i++)
838  if ((error = avf_txq_init (vm, ad, i, args->txq_size)))
839  return error;
840 
841  if ((ad->feature_bitmap & VIRTCHNL_VF_OFFLOAD_RSS_PF) &&
842  (error = avf_op_config_rss_lut (vm, ad)))
843  return error;
844 
845  if ((ad->feature_bitmap & VIRTCHNL_VF_OFFLOAD_RSS_PF) &&
846  (error = avf_op_config_rss_key (vm, ad)))
847  return error;
848 
849  if ((error = avf_op_config_vsi_queues (vm, ad)))
850  return error;
851 
852  if ((error = avf_op_config_irq_map (vm, ad)))
853  return error;
854 
855  avf_irq_0_enable (ad);
856  for (i = 0; i < ad->n_rx_queues; i++)
857  avf_irq_n_enable (ad, i);
858 
859  if ((error = avf_op_add_eth_addr (vm, ad, 1, ad->hwaddr)))
860  return error;
861 
862  if ((error = avf_op_enable_queues (vm, ad, pow2_mask (ad->n_rx_queues),
863  pow2_mask (ad->n_tx_queues))))
864  return error;
865 
866  ad->flags |= AVF_DEVICE_F_INITIALIZED;
867  return error;
868 }
869 
870 void
872 {
873  avf_main_t *am = &avf_main;
874  vnet_main_t *vnm = vnet_get_main ();
876  u32 r;
877 
878  if (ad->flags & AVF_DEVICE_F_ERROR)
879  return;
880 
881  if ((ad->flags & AVF_DEVICE_F_INITIALIZED) == 0)
882  return;
883 
884  ASSERT (ad->error == 0);
885 
886  /* do not process device in reset state */
887  r = avf_get_u32 (ad->bar0, AVFGEN_RSTAT);
888  if (r != VIRTCHNL_VFR_VFACTIVE)
889  return;
890 
891  r = avf_get_u32 (ad->bar0, AVF_ARQLEN);
892  if ((r & 0xf0000000) != (1ULL << 31))
893  {
894  ad->error = clib_error_return (0, "arq not enabled, arqlen = 0x%x", r);
895  goto error;
896  }
897 
898  r = avf_get_u32 (ad->bar0, AVF_ATQLEN);
899  if ((r & 0xf0000000) != (1ULL << 31))
900  {
901  ad->error = clib_error_return (0, "atq not enabled, atqlen = 0x%x", r);
902  goto error;
903  }
904 
905  if (is_irq == 0)
906  avf_op_get_stats (vm, ad, &ad->eth_stats);
907 
908  /* *INDENT-OFF* */
909  vec_foreach (e, ad->events)
910  {
912  {
913  int link_up = e->event_data.link_event.link_status;
914  virtchnl_link_speed_t speed = e->event_data.link_event.link_speed;
915  u32 flags = 0;
916  u32 kbps = 0;
917 
918  if (link_up && (ad->flags & AVF_DEVICE_F_LINK_UP) == 0)
919  {
920  ad->flags |= AVF_DEVICE_F_LINK_UP;
923  if (speed == VIRTCHNL_LINK_SPEED_40GB)
924  kbps = 40000000;
925  else if (speed == VIRTCHNL_LINK_SPEED_25GB)
926  kbps = 25000000;
927  else if (speed == VIRTCHNL_LINK_SPEED_10GB)
928  kbps = 10000000;
929  else if (speed == VIRTCHNL_LINK_SPEED_1GB)
930  kbps = 1000000;
931  else if (speed == VIRTCHNL_LINK_SPEED_100MB)
932  kbps = 100000;
933  vnet_hw_interface_set_flags (vnm, ad->hw_if_index, flags);
935  ad->link_speed = speed;
936  }
937  else if (!link_up && (ad->flags & AVF_DEVICE_F_LINK_UP) != 0)
938  {
939  ad->flags &= ~AVF_DEVICE_F_LINK_UP;
940  ad->link_speed = 0;
941  }
942 
943  if (ad->flags & AVF_DEVICE_F_ELOG)
944  {
945  ELOG_TYPE_DECLARE (el) =
946  {
947  .format = "avf[%d] link change: link_status %d "
948  "link_speed %d",
949  .format_args = "i4i1i1",
950  };
951  struct
952  {
953  u32 dev_instance;
954  u8 link_status;
955  u8 link_speed;
956  } *ed;
957  ed = ELOG_DATA (&vm->elog_main, el);
958  ed->dev_instance = ad->dev_instance;
959  ed->link_status = link_up;
960  ed->link_speed = speed;
961  }
962  }
963  else
964  {
965  if (ad->flags & AVF_DEVICE_F_ELOG)
966  {
967  ELOG_TYPE_DECLARE (el) =
968  {
969  .format = "avf[%d] unknown event: event %d severity %d",
970  .format_args = "i4i4i1i1",
971  };
972  struct
973  {
974  u32 dev_instance;
975  u32 event;
976  u32 severity;
977  } *ed;
978  ed = ELOG_DATA (&vm->elog_main, el);
979  ed->dev_instance = ad->dev_instance;
980  ed->event = e->event;
981  ed->severity = e->severity;
982  }
983  }
984  }
985  /* *INDENT-ON* */
986  vec_reset_length (ad->events);
987 
988  return;
989 
990 error:
991  ad->flags |= AVF_DEVICE_F_ERROR;
992  ASSERT (ad->error != 0);
993  vlib_log_err (am->log_class, "%U", format_clib_error, ad->error);
994 }
995 
996 static u32
998 {
999  avf_main_t *am = &avf_main;
1000  vlib_log_warn (am->log_class, "TODO");
1001  return 0;
1002 }
1003 
1004 static uword
1006 {
1007  avf_main_t *am = &avf_main;
1008  avf_device_t *ad;
1009  uword *event_data = 0, event_type;
1010  int enabled = 0, irq;
1011  f64 last_run_duration = 0;
1012  f64 last_periodic_time = 0;
1013 
1014  while (1)
1015  {
1016  if (enabled)
1017  vlib_process_wait_for_event_or_clock (vm, 5.0 - last_run_duration);
1018  else
1020 
1021  event_type = vlib_process_get_events (vm, &event_data);
1022  vec_reset_length (event_data);
1023  irq = 0;
1024 
1025  switch (event_type)
1026  {
1027  case ~0:
1028  last_periodic_time = vlib_time_now (vm);
1029  break;
1031  enabled = 1;
1032  break;
1034  enabled = 0;
1035  continue;
1037  irq = 1;
1038  break;
1039  default:
1040  ASSERT (0);
1041  }
1042 
1043  /* *INDENT-OFF* */
1044  pool_foreach (ad, am->devices,
1045  {
1046  avf_process_one_device (vm, ad, irq);
1047  });
1048  /* *INDENT-ON* */
1049  last_run_duration = vlib_time_now (vm) - last_periodic_time;
1050  }
1051  return 0;
1052 }
1053 
1054 /* *INDENT-OFF* */
1056  .function = avf_process,
1057  .type = VLIB_NODE_TYPE_PROCESS,
1058  .name = "avf-process",
1059 };
1060 /* *INDENT-ON* */
1061 
1062 static void
1064 {
1065  avf_main_t *am = &avf_main;
1066  uword pd = vlib_pci_get_private_data (vm, h);
1067  avf_device_t *ad = pool_elt_at_index (am->devices, pd);
1068  u32 icr0;
1069 
1070  icr0 = avf_reg_read (ad, AVFINT_ICR0);
1071 
1072  if (ad->flags & AVF_DEVICE_F_ELOG)
1073  {
1074  /* *INDENT-OFF* */
1075  ELOG_TYPE_DECLARE (el) =
1076  {
1077  .format = "avf[%d] irq 0: icr0 0x%x",
1078  .format_args = "i4i4",
1079  };
1080  /* *INDENT-ON* */
1081  struct
1082  {
1083  u32 dev_instance;
1084  u32 icr0;
1085  } *ed;
1086 
1087  ed = ELOG_DATA (&vm->elog_main, el);
1088  ed->dev_instance = ad->dev_instance;
1089  ed->icr0 = icr0;
1090  }
1091 
1092  avf_irq_0_enable (ad);
1093 
1094  /* bit 30 - Send/Receive Admin queue interrupt indication */
1095  if (icr0 & (1 << 30))
1098 }
1099 
1100 static void
1102 {
1103  vnet_main_t *vnm = vnet_get_main ();
1104  avf_main_t *am = &avf_main;
1105  uword pd = vlib_pci_get_private_data (vm, h);
1106  avf_device_t *ad = pool_elt_at_index (am->devices, pd);
1107  u16 qid;
1108  int i;
1109 
1110  if (ad->flags & AVF_DEVICE_F_ELOG)
1111  {
1112  /* *INDENT-OFF* */
1113  ELOG_TYPE_DECLARE (el) =
1114  {
1115  .format = "avf[%d] irq %d: received",
1116  .format_args = "i4i2",
1117  };
1118  /* *INDENT-ON* */
1119  struct
1120  {
1121  u32 dev_instance;
1122  u16 line;
1123  } *ed;
1124 
1125  ed = ELOG_DATA (&vm->elog_main, el);
1126  ed->dev_instance = ad->dev_instance;
1127  ed->line = line;
1128  }
1129 
1130  qid = line - 1;
1131  if (vec_len (ad->rxqs) > qid && ad->rxqs[qid].int_mode != 0)
1133  for (i = 0; i < vec_len (ad->rxqs); i++)
1134  avf_irq_n_enable (ad, i);
1135 }
1136 
1137 void
1139 {
1140  vnet_main_t *vnm = vnet_get_main ();
1141  avf_main_t *am = &avf_main;
1142  int i;
1143 
1144  if (ad->hw_if_index)
1145  {
1149  }
1150 
1152 
1153  vlib_physmem_free (vm, ad->atq);
1154  vlib_physmem_free (vm, ad->arq);
1155  vlib_physmem_free (vm, ad->atq_bufs);
1156  vlib_physmem_free (vm, ad->arq_bufs);
1157 
1158  /* *INDENT-OFF* */
1159  vec_foreach_index (i, ad->rxqs)
1160  {
1161  avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, i);
1162  vlib_physmem_free (vm, (void *) rxq->descs);
1163  if (rxq->n_enqueued)
1164  vlib_buffer_free_from_ring (vm, rxq->bufs, rxq->next, rxq->size,
1165  rxq->n_enqueued);
1166  vec_free (rxq->bufs);
1167  }
1168  /* *INDENT-ON* */
1169  vec_free (ad->rxqs);
1170 
1171  /* *INDENT-OFF* */
1172  vec_foreach_index (i, ad->txqs)
1173  {
1174  avf_txq_t *txq = vec_elt_at_index (ad->txqs, i);
1175  vlib_physmem_free (vm, (void *) txq->descs);
1176  if (txq->n_enqueued)
1177  {
1178  u16 first = (txq->next - txq->n_enqueued) & (txq->size -1);
1179  vlib_buffer_free_from_ring (vm, txq->bufs, first, txq->size,
1180  txq->n_enqueued);
1181  }
1182  vec_free (txq->bufs);
1183  clib_ring_free (txq->rs_slots);
1184  }
1185  /* *INDENT-ON* */
1186  vec_free (ad->txqs);
1187  vec_free (ad->name);
1188 
1189  clib_error_free (ad->error);
1190  clib_memset (ad, 0, sizeof (*ad));
1191  pool_put (am->devices, ad);
1192 }
1193 
1194 void
1196 {
1197  vnet_main_t *vnm = vnet_get_main ();
1198  avf_main_t *am = &avf_main;
1199  avf_device_t *ad;
1201  clib_error_t *error = 0;
1202  int i;
1203 
1204  /* check input args */
1205  args->rxq_size = (args->rxq_size == 0) ? AVF_RXQ_SZ : args->rxq_size;
1206  args->txq_size = (args->txq_size == 0) ? AVF_TXQ_SZ : args->txq_size;
1207 
1208  if ((args->rxq_size & (args->rxq_size - 1))
1209  || (args->txq_size & (args->txq_size - 1)))
1210  {
1211  args->rv = VNET_API_ERROR_INVALID_VALUE;
1212  args->error =
1213  clib_error_return (error, "queue size must be a power of two");
1214  return;
1215  }
1216 
1217  pool_get (am->devices, ad);
1218  ad->dev_instance = ad - am->devices;
1219  ad->per_interface_next_index = ~0;
1220  ad->name = vec_dup (args->name);
1221 
1222  if (args->enable_elog)
1223  ad->flags |= AVF_DEVICE_F_ELOG;
1224 
1225  if ((error = vlib_pci_device_open (vm, &args->addr, avf_pci_device_ids,
1226  &h)))
1227  {
1228  pool_put (am->devices, ad);
1229  args->rv = VNET_API_ERROR_INVALID_INTERFACE;
1230  args->error =
1231  clib_error_return (error, "pci-addr %U", format_vlib_pci_addr,
1232  &args->addr);
1233  return;
1234  }
1235  ad->pci_dev_handle = h;
1236  ad->numa_node = vlib_pci_get_numa_node (vm, h);
1237 
1239 
1240  if ((error = vlib_pci_bus_master_enable (vm, h)))
1241  goto error;
1242 
1243  if ((error = vlib_pci_map_region (vm, h, 0, &ad->bar0)))
1244  goto error;
1245 
1246  if ((error = vlib_pci_register_msix_handler (vm, h, 0, 1,
1247  &avf_irq_0_handler)))
1248  goto error;
1249 
1250  if ((error = vlib_pci_register_msix_handler (vm, h, 1, 1,
1251  &avf_irq_n_handler)))
1252  goto error;
1253 
1254  if ((error = vlib_pci_enable_msix_irq (vm, h, 0, 2)))
1255  goto error;
1256 
1258  AVF_MBOX_LEN,
1260  ad->numa_node);
1261  if (ad->atq == 0)
1262  {
1263  error = vlib_physmem_last_error (vm);
1264  goto error;
1265  }
1266 
1267  if ((error = vlib_pci_map_dma (vm, h, ad->atq)))
1268  goto error;
1269 
1271  AVF_MBOX_LEN,
1273  ad->numa_node);
1274  if (ad->arq == 0)
1275  {
1276  error = vlib_physmem_last_error (vm);
1277  goto error;
1278  }
1279 
1280  if ((error = vlib_pci_map_dma (vm, h, ad->arq)))
1281  goto error;
1282 
1284  AVF_MBOX_LEN,
1286  ad->numa_node);
1287  if (ad->atq_bufs == 0)
1288  {
1289  error = vlib_physmem_last_error (vm);
1290  goto error;
1291  }
1292 
1293  if ((error = vlib_pci_map_dma (vm, h, ad->atq_bufs)))
1294  goto error;
1295 
1297  AVF_MBOX_LEN,
1299  ad->numa_node);
1300  if (ad->arq_bufs == 0)
1301  {
1302  error = vlib_physmem_last_error (vm);
1303  goto error;
1304  }
1305 
1306  if ((error = vlib_pci_map_dma (vm, h, ad->arq_bufs)))
1307  goto error;
1308 
1309  if ((error = vlib_pci_intr_enable (vm, h)))
1310  goto error;
1311 
1313  ad->flags |= AVF_DEVICE_F_VA_DMA;
1314 
1315  if ((error = avf_device_init (vm, am, ad, args)))
1316  goto error;
1317 
1318  /* create interface */
1319  error = ethernet_register_interface (vnm, avf_device_class.index,
1320  ad->dev_instance, ad->hwaddr,
1322 
1323  if (error)
1324  goto error;
1325 
1327  args->sw_if_index = ad->sw_if_index = sw->sw_if_index;
1328 
1332  avf_input_node.index);
1333 
1334  for (i = 0; i < ad->n_rx_queues; i++)
1336 
1337  if (pool_elts (am->devices) == 1)
1340 
1341  return;
1342 
1343 error:
1344  avf_delete_if (vm, ad);
1345  args->rv = VNET_API_ERROR_INVALID_INTERFACE;
1346  args->error = clib_error_return (error, "pci-addr %U",
1347  format_vlib_pci_addr, &args->addr);
1348  vlib_log_err (am->log_class, "%U", format_clib_error, args->error);
1349 }
1350 
1351 static clib_error_t *
1353 {
1354  vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
1355  avf_main_t *am = &avf_main;
1357  uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
1358 
1359  if (ad->flags & AVF_DEVICE_F_ERROR)
1360  return clib_error_return (0, "device is in error state");
1361 
1362  if (is_up)
1363  {
1366  ad->flags |= AVF_DEVICE_F_ADMIN_UP;
1367  }
1368  else
1369  {
1371  ad->flags &= ~AVF_DEVICE_F_ADMIN_UP;
1372  }
1373  return 0;
1374 }
1375 
1376 static clib_error_t *
1379 {
1380  avf_main_t *am = &avf_main;
1381  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1383  avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, qid);
1384 
1386  rxq->int_mode = 0;
1387  else
1388  rxq->int_mode = 1;
1389 
1390  return 0;
1391 }
1392 
1393 static void
1395  u32 node_index)
1396 {
1397  avf_main_t *am = &avf_main;
1398  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1400 
1401  /* Shut off redirection */
1402  if (node_index == ~0)
1403  {
1404  ad->per_interface_next_index = node_index;
1405  return;
1406  }
1407 
1409  vlib_node_add_next (vlib_get_main (), avf_input_node.index, node_index);
1410 }
1411 
1412 static char *avf_tx_func_error_strings[] = {
1413 #define _(n,s) s,
1415 #undef _
1416 };
1417 
1418 /* *INDENT-OFF* */
1420 {
1421  .name = "Adaptive Virtual Function (AVF) interface",
1422  .format_device = format_avf_device,
1423  .format_device_name = format_avf_device_name,
1424  .admin_up_down_function = avf_interface_admin_up_down,
1425  .rx_mode_change_function = avf_interface_rx_mode_change,
1426  .rx_redirect_to_node = avf_set_interface_next_node,
1427  .tx_function_n_errors = AVF_TX_N_ERROR,
1428  .tx_function_error_strings = avf_tx_func_error_strings,
1429 };
1430 /* *INDENT-ON* */
1431 
1432 clib_error_t *
1434 {
1435  avf_main_t *am = &avf_main;
1436  clib_error_t *error;
1438 
1439  if ((error = vlib_call_init_function (vm, pci_bus_init)))
1440  return error;
1441 
1444 
1445  am->log_class = vlib_log_register_class ("avf_plugin", 0);
1446  vlib_log_debug (am->log_class, "initialized");
1447 
1448  return 0;
1449 }
1450 
1452 
1453 /*
1454  * fd.io coding-style-patch-verification: ON
1455  *
1456  * Local Variables:
1457  * eval: (c-set-style "gnu")
1458  * End:
1459  */
static void avf_set_interface_next_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Definition: device.c:1394
vlib_log_class_t vlib_log_register_class(char *class, char *subclass)
Definition: log.c:227
clib_error_t * pci_bus_init(vlib_main_t *vm)
Definition: pci.c:252
struct virtchnl_pf_event_t::@417::@418 link_event
vmrglw vmrglh hi
u8 int_mode
Definition: avf.h:104
#define vec_foreach_index(var, v)
Iterate over vector indices.
#define AVF_ARQLEN
Definition: virtchnl.h:37
virtchnl_queue_pair_info_t qpair[1]
Definition: virtchnl.h:294
u32 hw_if_index
Definition: avf.h:129
u8 * format_clib_error(u8 *s, va_list *va)
Definition: error.c:191
static clib_error_t * vlib_pci_intr_enable(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.h:239
u32 flags
Definition: vhost_user.h:115
#define AVF_ATQH
Definition: virtchnl.h:30
#define vlib_log_warn(...)
Definition: log.h:49
static clib_error_t * avf_interface_rx_mode_change(vnet_main_t *vnm, u32 hw_if_index, u32 qid, vnet_hw_interface_rx_mode mode)
Definition: device.c:1377
#define clib_min(x, y)
Definition: clib.h:295
static uword random_default_seed(void)
Default random seed (unix/linux user-mode)
Definition: random.h:91
static clib_error_t * vlib_pci_bus_master_enable(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.h:271
static void * vlib_physmem_alloc_aligned_on_numa(vlib_main_t *vm, uword n_bytes, uword alignment, u32 numa_node)
Definition: physmem_funcs.h:63
static f64 vlib_process_wait_for_event_or_clock(vlib_main_t *vm, f64 dt)
Suspend a cooperative multi-tasking thread Waits for an event, or for the indicated number of seconds...
Definition: node_funcs.h:703
clib_error_t * avf_init(vlib_main_t *vm)
Definition: device.c:1433
static uword * vlib_process_wait_for_event(vlib_main_t *vm)
Definition: node_funcs.h:623
clib_error_t * avf_send_to_pf(vlib_main_t *vm, avf_device_t *ad, virtchnl_ops_t op, void *in, int in_len, void *out, int out_len)
Definition: device.c:371
void ethernet_delete_interface(vnet_main_t *vnm, u32 hw_if_index)
Definition: interface.c:324
#define AVF_ATQBAH
Definition: virtchnl.h:35
vnet_main_t * vnet_get_main(void)
Definition: misc.c:47
void avf_arq_slot_init(avf_device_t *ad, u16 slot)
Definition: device.c:314
clib_error_t * error
Definition: avf.h:166
u64 atq_bufs_pa
Definition: avf.h:146
static uword vlib_buffer_get_pa(vlib_main_t *vm, vlib_buffer_t *b)
Definition: buffer_funcs.h:421
virtchnl_vsi_type_t vsi_type
Definition: virtchnl.h:155
unsigned long u64
Definition: types.h:89
void vlib_pci_device_close(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.c:1275
virtchnl_vector_map_t vecmap[1]
Definition: virtchnl.h:322
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
#define NULL
Definition: clib.h:58
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:255
#define clib_ring_new_aligned(ring, size, align)
Definition: ring.h:53
virtchnl_link_speed_t link_speed
Definition: avf.h:160
#define AVF_ARQBAH
Definition: virtchnl.h:29
static clib_error_t * vlib_physmem_last_error(struct vlib_main_t *vm)
static void avf_adminq_init(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:334
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
#define AVF_QRX_TAIL(q)
Definition: virtchnl.h:41
#define AVF_ARQT
Definition: virtchnl.h:33
format_function_t format_avf_device
Definition: avf.h:226
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:560
int i
vlib_pci_addr_t addr
Definition: avf.h:207
#define AVF_AQ_F_SI
Definition: virtchnl.h:51
u32 dev_instance
Definition: avf.h:127
clib_memset(h->entries, 0, sizeof(h->entries[0])*entries)
clib_error_t * vlib_pci_enable_msix_irq(vlib_main_t *vm, vlib_pci_dev_handle_t h, u16 start, u16 count)
Definition: pci.c:883
#define AVF_QTX_TAIL(q)
Definition: virtchnl.h:40
u8 data[128]
Definition: ipsec.api:248
virtchnl_link_speed_t
Definition: virtchnl.h:188
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
Definition: vec.h:450
avf_device_t * devices
Definition: avf.h:197
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:236
volatile u32 * qtx_tail
Definition: avf.h:111
clib_error_t * avf_op_config_irq_map(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:617
static vlib_node_registration_t avf_process_node
(constructor) VLIB_REGISTER_NODE (avf_process_node)
Definition: device.c:1055
#define AVF_ATQLEN
Definition: virtchnl.h:31
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
Definition: node_funcs.h:1122
unsigned char u8
Definition: types.h:56
vnet_device_class_t avf_device_class
#define AVF_ARQH
Definition: virtchnl.h:34
clib_error_t * avf_device_reset(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:690
clib_error_t * avf_op_disable_vlan_stripping(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:552
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
double f64
Definition: types.h:142
virtchnl_ops_t
Definition: virtchnl.h:93
static vnet_sw_interface_t * vnet_get_hw_sw_interface(vnet_main_t *vnm, u32 hw_if_index)
u8 buffer_pool_index
Definition: avf.h:105
static uword vlib_process_suspend(vlib_main_t *vm, f64 dt)
Suspend a vlib cooperative multi-tasking thread for a period of time.
Definition: node_funcs.h:452
static uword avf_process(vlib_main_t *vm, vlib_node_runtime_t *rt, vlib_frame_t *f)
Definition: device.c:1005
#define AVF_AQ_F_DD
Definition: virtchnl.h:43
vnet_hw_interface_rx_mode
Definition: interface.h:52
u16 * rs_slots
Definition: avf.h:118
#define AVFINT_ICR0_ENA1
Definition: virtchnl.h:27
clib_error_t * avf_request_queues(vlib_main_t *vm, avf_device_t *ad, u16 num_queue_pairs)
Definition: device.c:716
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
Definition: pool.h:493
clib_error_t * vlib_pci_map_dma(vlib_main_t *vm, vlib_pci_dev_handle_t h, void *ptr)
Definition: pci.c:1196
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:163
static uword vlib_process_get_events(vlib_main_t *vm, uword **data_vector)
Return the first event type which has occurred and a vector of per-event data of that type...
Definition: node_funcs.h:546
clib_spinlock_t lock
Definition: avf.h:114
static uword pow2_mask(uword x)
Definition: clib.h:220
#define PCI_DEVICE_ID_INTEL_AVF
Definition: device.c:33
static u32 avf_reg_read(avf_device_t *ad, u32 addr)
Definition: avf.h:277
clib_error_t * avf_txq_init(vlib_main_t *vm, avf_device_t *ad, u16 qid, u16 txq_size)
Definition: device.c:268
static_always_inline void vnet_device_input_set_interrupt_pending(vnet_main_t *vnm, u32 hw_if_index, u16 queue_id)
Definition: devices.h:136
vnet_hw_interface_flags_t flags
Definition: interface.h:494
#define AVF_MBOX_BUF_SZ
Definition: device.c:27
volatile u32 * qrx_tail
Definition: avf.h:98
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define clib_error_return(e, args...)
Definition: error.h:99
clib_error_t * avf_config_promisc_mode(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:559
unsigned int u32
Definition: types.h:88
vlib_pci_dev_handle_t pci_dev_handle
Definition: avf.h:130
#define vlib_call_init_function(vm, x)
Definition: init.h:260
virtchnl_ether_addr_t list[1]
Definition: virtchnl.h:337
#define vlib_log_debug(...)
Definition: log.h:52
void * arq_bufs
Definition: avf.h:145
avf_main_t avf_main
Definition: device.c:37
static void avf_irq_n_handler(vlib_main_t *vm, vlib_pci_dev_handle_t h, u16 line)
Definition: device.c:1101
avf_aq_desc_t * arq
Definition: avf.h:143
static void clib_spinlock_init(clib_spinlock_t *p)
Definition: lock.h:57
static void vlib_buffer_free_from_ring(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Free buffers from ring.
Definition: buffer_funcs.h:912
static heap_elt_t * first(heap_header_t *h)
Definition: heap.c:59
void avf_process_one_device(vlib_main_t *vm, avf_device_t *ad, int is_irq)
Definition: device.c:871
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:514
u8 * name
Definition: avf.h:133
static void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
Definition: node_funcs.h:964
clib_error_t * avf_device_init(vlib_main_t *vm, avf_main_t *am, avf_device_t *ad, avf_create_if_args_t *args)
Definition: device.c:756
void vlib_pci_set_private_data(vlib_main_t *vm, vlib_pci_dev_handle_t h, uword private_data)
Definition: pci.c:155
static u32 avf_get_u32(void *start, int offset)
Definition: avf.h:231
virtchnl_txq_info_t txq
Definition: virtchnl.h:285
#define AVF_ATQT
Definition: virtchnl.h:38
unsigned short u16
Definition: types.h:57
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:286
#define vec_dup(V)
Return copy of vector (no header, no alignment)
Definition: vec.h:375
clib_error_t * vlib_pci_register_msix_handler(vlib_main_t *vm, vlib_pci_dev_handle_t h, u32 start, u32 count, pci_msix_handler_function_t *msix_handler)
Definition: pci.c:826
u64 qword[4]
Definition: avf.h:73
#define ELOG_DATA(em, f)
Definition: elog.h:481
#define AVF_AQ_F_RD
Definition: virtchnl.h:48
#define VIRTCHNL_VERSION_MAJOR
Definition: virtchnl.h:21
clib_error_t * avf_op_enable_queues(vlib_main_t *vm, avf_device_t *ad, u32 rx, u32 tx)
Definition: device.c:657
uword vlib_pci_get_private_data(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.c:148
#define AVF_ITR_INT
Definition: device.c:30
static void avf_reg_flush(avf_device_t *ad)
Definition: avf.h:283
#define AVF_RXQ_SZ
Definition: device.c:28
u32 vlib_pci_dev_handle_t
Definition: pci.h:97
static char * avf_tx_func_error_strings[]
Definition: device.c:1412
#define AVF_MBOX_LEN
Definition: device.c:26
#define AVFINT_ICR0
Definition: virtchnl.h:26
u8 len
Definition: ip_types.api:49
void avf_delete_if(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:1138
u16 n_rx_queues
Definition: avf.h:139
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
Definition: buffer_funcs.h:96
u8 hwaddr[6]
Definition: avf.h:154
u16 atq_next_slot
Definition: avf.h:148
u32 vlib_pci_get_numa_node(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.c:170
static u32 avf_flag_change(vnet_main_t *vnm, vnet_hw_interface_t *hw, u32 flags)
Definition: device.c:997
static void avf_irq_0_enable(avf_device_t *ad)
Definition: device.c:59
#define AVF_AQ_F_BUF
Definition: virtchnl.h:50
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u32 numa_node
Definition: avf.h:131
vlib_main_t * vm
Definition: buffer.c:312
static void vlib_physmem_free(vlib_main_t *vm, void *p)
Definition: physmem_funcs.h:89
vlib_node_registration_t avf_input_node
(constructor) VLIB_REGISTER_NODE (avf_input_node)
Definition: input.c:462
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:341
static void avf_irq_0_disable(avf_device_t *ad)
Definition: device.c:47
Definition: avf.h:95
clib_error_t * avf_op_get_stats(vlib_main_t *vm, avf_device_t *ad, virtchnl_eth_stats_t *es)
Definition: device.c:679
vlib_log_class_t log_class
Definition: avf.h:200
elog_main_t elog_main
Definition: main.h:172
avf_tx_desc_t * descs
Definition: avf.h:115
u8 * format_hexdump(u8 *s, va_list *va)
Definition: std-formats.c:281
#define ELOG_TYPE_DECLARE(f)
Definition: elog.h:439
clib_error_t * avf_aq_desc_enq(vlib_main_t *vm, avf_device_t *ad, avf_aq_desc_t *dt, void *data, int len)
Definition: device.c:101
virtchnl_ops_t v_opcode
Definition: virtchnl.h:232
#define AVFINT_DYN_CTL0
Definition: virtchnl.h:28
u16 vsi_id
Definition: avf.h:152
vl_api_vxlan_gbp_api_tunnel_mode_t mode
Definition: vxlan_gbp.api:44
u32 per_interface_next_index
Definition: avf.h:125
clib_error_t * avf_op_add_eth_addr(vlib_main_t *vm, avf_device_t *ad, u8 count, u8 *macs)
Definition: device.c:637
static clib_error_t * avf_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
Definition: device.c:1352
u32 feature_bitmap
Definition: avf.h:153
virtchnl_status_code_t v_retval
Definition: virtchnl.h:237
u32 * bufs
Definition: avf.h:116
static u32 vlib_buffer_alloc_from_pool(vlib_main_t *vm, u32 *buffers, u32 n_buffers, u8 buffer_pool_index)
Allocate buffers from specific pool into supplied array.
Definition: buffer_funcs.h:523
#define ASSERT(truth)
avf_aq_desc_t * atq
Definition: avf.h:142
void vnet_hw_interface_assign_rx_thread(vnet_main_t *vnm, u32 hw_if_index, u16 queue_id, uword thread_index)
Definition: devices.c:139
u32 flags
Definition: avf.h:124
#define PCI_DEVICE_ID_INTEL_X722_VF
Definition: device.c:35
#define AVFINT_DYN_CTLN(x)
Definition: virtchnl.h:25
Definition: avf.h:108
u32 * bufs
Definition: avf.h:102
clib_error_t * avf_op_config_rss_lut(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:513
static void avf_irq_n_enable(avf_device_t *ad, u8 line)
Definition: device.c:86
void * bar0
Definition: avf.h:132
#define PCI_DEVICE_ID_INTEL_X710_VF
Definition: device.c:34
u16 n_enqueued
Definition: avf.h:117
u16 n_enqueued
Definition: avf.h:103
VNET_DEVICE_CLASS(bond_dev_class)
virtchnl_pf_event_t * events
Definition: avf.h:150
size_t count
Definition: vapi.c:47
virtchnl_event_codes_t event
Definition: virtchnl.h:198
static void avf_reg_write(avf_device_t *ad, u32 addr, u32 val)
Definition: avf.h:271
clib_error_t * avf_op_version(vlib_main_t *vm, avf_device_t *ad, virtchnl_version_info_t *ver)
Definition: device.c:481
static uword pointer_to_uword(const void *p)
Definition: types.h:131
static uword avf_dma_addr(vlib_main_t *vm, avf_device_t *ad, void *p)
Definition: device.c:327
#define clib_max(x, y)
Definition: clib.h:288
virtchnl_eth_stats_t eth_stats
Definition: avf.h:163
void * atq_bufs
Definition: avf.h:144
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
#define AVFGEN_RSTAT
Definition: virtchnl.h:39
u16 num_queue_pairs
Definition: avf.h:155
u16 next
Definition: avf.h:112
virtchnl_rxq_info_t rxq
Definition: virtchnl.h:286
static u64 vlib_physmem_get_pa(vlib_main_t *vm, void *mem)
#define AVF_ARQBAL
Definition: virtchnl.h:32
u32 rss_lut_size
Definition: avf.h:159
u16 n_tx_queues
Definition: avf.h:138
#define AVF_AQ_F_CMP
Definition: virtchnl.h:44
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
clib_error_t * ethernet_register_interface(vnet_main_t *vnm, u32 dev_class_index, u32 dev_instance, const u8 *address, u32 *hw_if_index_return, ethernet_flag_change_function_t flag_change)
Definition: interface.c:278
format_function_t format_avf_device_name
Definition: avf.h:227
clib_error_t * vnet_hw_interface_set_flags(vnet_main_t *vnm, u32 hw_if_index, vnet_hw_interface_flags_t flags)
Definition: interface.c:504
#define foreach_virtchnl_op
Definition: virtchnl.h:56
VLIB buffer representation.
Definition: buffer.h:102
#define foreach_avf_tx_func_error
Definition: avf.h:308
u64 uword
Definition: types.h:112
clib_error_t * avf_rxq_init(vlib_main_t *vm, avf_device_t *ad, u16 qid, u16 rxq_size)
Definition: device.c:217
u16 size
Definition: avf.h:100
u16 arq_next_slot
Definition: avf.h:149
#define clib_error_free(e)
Definition: error.h:86
clib_error_t * vlib_pci_map_region(vlib_main_t *vm, vlib_pci_dev_handle_t h, u32 resource, void **result)
Definition: pci.c:1146
avf_rxq_t * rxqs
Definition: avf.h:136
virtchnl_vsi_resource_t vsi_res[1]
Definition: virtchnl.h:169
union virtchnl_pf_event_t::@417 event_data
int vnet_hw_interface_unassign_rx_thread(vnet_main_t *vnm, u32 hw_if_index, u16 queue_id)
Definition: devices.c:188
#define AVF_TXQ_SZ
Definition: device.c:29
int vlib_pci_supports_virtual_addr_dma(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.c:1207
clib_error_t * error
Definition: avf.h:216
static u32 random_u32(u32 *seed)
32-bit random number generator
Definition: random.h:69
avf_per_thread_data_t * per_thread_data
Definition: avf.h:198
static uword vlib_buffer_get_va(vlib_buffer_t *b)
Definition: buffer.h:217
static vlib_thread_main_t * vlib_get_thread_main()
Definition: global_funcs.h:32
u16 size
Definition: avf.h:113
u32 sw_if_index
Definition: avf.h:128
#define ETHERNET_MAX_PACKET_BYTES
Definition: ethernet.h:133
#define vec_foreach(var, vec)
Vector iterator.
#define vlib_log_err(...)
Definition: log.h:48
u64 arq_bufs_pa
Definition: avf.h:147
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:115
void avf_create_if(vlib_main_t *vm, avf_create_if_args_t *args)
Definition: device.c:1195
#define AVF_ATQBAL
Definition: virtchnl.h:36
static void avf_irq_0_handler(vlib_main_t *vm, vlib_pci_dev_handle_t h, u16 line)
Definition: device.c:1063
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
#define VIRTCHNL_VERSION_MINOR
Definition: virtchnl.h:22
static void avf_irq_n_disable(avf_device_t *ad, u8 line)
Definition: device.c:77
static u8 vlib_buffer_pool_get_default_for_numa(vlib_main_t *vm, u32 numa_node)
Definition: buffer_funcs.h:163
#define clib_ring_free(f)
Definition: ring.h:59
#define PCI_VENDOR_ID_INTEL
Definition: device.c:32
clib_error_t * avf_cmd_rx_ctl_reg_write(vlib_main_t *vm, avf_device_t *ad, u32 reg, u32 val)
Definition: device.c:186
clib_error_t * vlib_pci_device_open(vlib_main_t *vm, vlib_pci_addr_t *addr, pci_device_id_t ids[], vlib_pci_dev_handle_t *handle)
Definition: pci.c:1215
u16 next
Definition: avf.h:99
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
static void vnet_hw_interface_set_link_speed(vnet_main_t *vnm, u32 hw_if_index, u32 link_speed)
static void vnet_hw_interface_set_input_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Definition: devices.h:79
clib_error_t * avf_op_config_rss_key(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:532
avf_txq_t * txqs
Definition: avf.h:137
avf_rx_desc_t * descs
Definition: avf.h:101
clib_error_t * avf_op_config_vsi_queues(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:571
u16 vendor_id
Definition: pci.h:127
format_function_t format_vlib_pci_addr
Definition: pci.h:324
#define AVF_AQ_F_ERR
Definition: virtchnl.h:45
u16 max_vectors
Definition: avf.h:156
clib_error_t * avf_op_get_vf_resources(vlib_main_t *vm, avf_device_t *ad, virtchnl_vf_resource_t *res)
Definition: device.c:501
u32 rss_key_size
Definition: avf.h:158
u16 max_mtu
Definition: avf.h:157
static uword pool_elts(void *v)
Number of active elements in a pool.
Definition: pool.h:128