FD.io VPP  v19.01.3-6-g70449b9b9
Vector Packet Processing
device.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2018 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vppinfra/ring.h>
20 #include <vlib/unix/unix.h>
21 #include <vlib/pci/pci.h>
22 #include <vnet/ethernet/ethernet.h>
23 
24 #include <avf/avf.h>
25 
26 #define AVF_MBOX_LEN 64
27 #define AVF_MBOX_BUF_SZ 512
28 #define AVF_RXQ_SZ 512
29 #define AVF_TXQ_SZ 512
30 #define AVF_ITR_INT 8160
31 
32 #define PCI_VENDOR_ID_INTEL 0x8086
33 #define PCI_DEVICE_ID_INTEL_AVF 0x1889
34 #define PCI_DEVICE_ID_INTEL_X710_VF 0x154c
35 #define PCI_DEVICE_ID_INTEL_X722_VF 0x37cd
36 
38 
39 static pci_device_id_t avf_pci_device_ids[] = {
41  {.vendor_id = PCI_VENDOR_ID_INTEL,.device_id = PCI_DEVICE_ID_INTEL_X710_VF},
42  {.vendor_id = PCI_VENDOR_ID_INTEL,.device_id = PCI_DEVICE_ID_INTEL_X722_VF},
43  {0},
44 };
45 
46 static inline void
48 {
49  u32 dyn_ctl0 = 0, icr0_ena = 0;
50 
51  dyn_ctl0 |= (3 << 3); /* 11b = No ITR update */
52 
53  avf_reg_write (ad, AVFINT_ICR0_ENA1, icr0_ena);
54  avf_reg_write (ad, AVFINT_DYN_CTL0, dyn_ctl0);
55  avf_reg_flush (ad);
56 }
57 
58 static inline void
60 {
61  u32 dyn_ctl0 = 0, icr0_ena = 0;
62 
63  icr0_ena |= (1 << 30); /* [30] Admin Queue Enable */
64 
65  dyn_ctl0 |= (1 << 0); /* [0] Interrupt Enable */
66  dyn_ctl0 |= (1 << 1); /* [1] Clear PBA */
67  //dyn_ctl0 |= (3 << 3); /* [4:3] ITR Index, 11b = No ITR update */
68  dyn_ctl0 |= ((AVF_ITR_INT / 2) << 5); /* [16:5] ITR Interval in 2us steps */
69 
70  avf_irq_0_disable (ad);
71  avf_reg_write (ad, AVFINT_ICR0_ENA1, icr0_ena);
72  avf_reg_write (ad, AVFINT_DYN_CTL0, dyn_ctl0);
73  avf_reg_flush (ad);
74 }
75 
76 static inline void
78 {
79  u32 dyn_ctln = 0;
80 
81  avf_reg_write (ad, AVFINT_DYN_CTLN (line), dyn_ctln);
82  avf_reg_flush (ad);
83 }
84 
85 static inline void
87 {
88  u32 dyn_ctln = 0;
89 
90  dyn_ctln |= (1 << 0); /* [0] Interrupt Enable */
91  dyn_ctln |= (1 << 1); /* [1] Clear PBA */
92  dyn_ctln |= ((AVF_ITR_INT / 2) << 5); /* [16:5] ITR Interval in 2us steps */
93 
94  avf_irq_n_disable (ad, line);
95  avf_reg_write (ad, AVFINT_DYN_CTLN (line), dyn_ctln);
96  avf_reg_flush (ad);
97 }
98 
99 
100 clib_error_t *
102  void *data, int len)
103 {
104  avf_main_t *am = &avf_main;
105  clib_error_t *err = 0;
106  avf_aq_desc_t *d, dc;
107  int n_retry = 5;
108 
109  d = &ad->atq[ad->atq_next_slot];
110  clib_memcpy_fast (d, dt, sizeof (avf_aq_desc_t));
111  d->flags |= AVF_AQ_F_RD | AVF_AQ_F_SI;
112  if (len)
113  d->datalen = len;
114  if (len)
115  {
116  u64 pa;
117  pa = ad->atq_bufs_pa + ad->atq_next_slot * AVF_MBOX_BUF_SZ;
118  d->addr_hi = (u32) (pa >> 32);
119  d->addr_lo = (u32) pa;
121  data, len);
122  d->flags |= AVF_AQ_F_BUF;
123  }
124 
125  if (ad->flags & AVF_DEVICE_F_ELOG)
126  clib_memcpy_fast (&dc, d, sizeof (avf_aq_desc_t));
127 
129  vlib_log_debug (am->log_class, "%U", format_hexdump, data, len);
130  ad->atq_next_slot = (ad->atq_next_slot + 1) % AVF_MBOX_LEN;
132  avf_reg_flush (ad);
133 
134 retry:
135  vlib_process_suspend (vm, 10e-6);
136 
137  if (((d->flags & AVF_AQ_F_DD) == 0) || ((d->flags & AVF_AQ_F_CMP) == 0))
138  {
139  if (--n_retry == 0)
140  {
141  err = clib_error_return (0, "adminq enqueue timeout [opcode 0x%x]",
142  d->opcode);
143  goto done;
144  }
145  goto retry;
146  }
147 
148  clib_memcpy_fast (dt, d, sizeof (avf_aq_desc_t));
149  if (d->flags & AVF_AQ_F_ERR)
150  return clib_error_return (0, "adminq enqueue error [opcode 0x%x, retval "
151  "%d]", d->opcode, d->retval);
152 
153 done:
154  if (ad->flags & AVF_DEVICE_F_ELOG)
155  {
156  /* *INDENT-OFF* */
157  ELOG_TYPE_DECLARE (el) =
158  {
159  .format = "avf[%d] aq enq: s_flags 0x%x r_flags 0x%x opcode 0x%x "
160  "datalen %d retval %d",
161  .format_args = "i4i2i2i2i2i2",
162  };
163  struct
164  {
165  u32 dev_instance;
166  u16 s_flags;
167  u16 r_flags;
168  u16 opcode;
169  u16 datalen;
170  u16 retval;
171  } *ed;
172  ed = ELOG_DATA (&vm->elog_main, el);
173  ed->dev_instance = ad->dev_instance;
174  ed->s_flags = dc.flags;
175  ed->r_flags = d->flags;
176  ed->opcode = dc.opcode;
177  ed->datalen = dc.datalen;
178  ed->retval = d->retval;
179  /* *INDENT-ON* */
180  }
181 
182  return err;
183 }
184 
185 clib_error_t *
187  u32 val)
188 {
189  clib_error_t *err;
190  avf_aq_desc_t d = {.opcode = 0x207,.param1 = reg,.param3 = val };
191  err = avf_aq_desc_enq (vm, ad, &d, 0, 0);
192 
193  if (ad->flags & AVF_DEVICE_F_ELOG)
194  {
195  /* *INDENT-OFF* */
196  ELOG_TYPE_DECLARE (el) =
197  {
198  .format = "avf[%d] rx ctl reg write: reg 0x%x val 0x%x ",
199  .format_args = "i4i4i4",
200  };
201  struct
202  {
203  u32 dev_instance;
204  u32 reg;
205  u32 val;
206  } *ed;
207  ed = ELOG_DATA (&vm->elog_main, el);
208  ed->dev_instance = ad->dev_instance;
209  ed->reg = reg;
210  ed->val = val;
211  /* *INDENT-ON* */
212  }
213  return err;
214 }
215 
216 clib_error_t *
217 avf_rxq_init (vlib_main_t * vm, avf_device_t * ad, u16 qid, u16 rxq_size)
218 {
219  clib_error_t *err;
220  avf_rxq_t *rxq;
221  u32 n_alloc, i;
222 
224  rxq = vec_elt_at_index (ad->rxqs, qid);
225  rxq->size = rxq_size;
226  rxq->next = 0;
228  sizeof (avf_rx_desc_t),
230  ad->numa_node);
231 
232  if (rxq->descs == 0)
233  return vlib_physmem_last_error (vm);
234 
235  if ((err = vlib_pci_map_dma (vm, ad->pci_dev_handle, (void *) rxq->descs)))
236  return err;
237 
238  clib_memset ((void *) rxq->descs, 0, rxq->size * sizeof (avf_rx_desc_t));
240  rxq->qrx_tail = ad->bar0 + AVF_QRX_TAIL (qid);
241 
242  n_alloc = vlib_buffer_alloc (vm, rxq->bufs, rxq->size - 8);
243 
244  if (n_alloc == 0)
245  return clib_error_return (0, "buffer allocation error");
246 
247  rxq->n_enqueued = n_alloc;
248  avf_rx_desc_t *d = rxq->descs;
249  for (i = 0; i < n_alloc; i++)
250  {
251  vlib_buffer_t *b = vlib_get_buffer (vm, rxq->bufs[i]);
252  if (ad->flags & AVF_DEVICE_F_VA_DMA)
253  d->qword[0] = vlib_buffer_get_va (b);
254  else
255  d->qword[0] = vlib_buffer_get_pa (vm, b);
256  d++;
257  }
258 
259  ad->n_rx_queues = clib_min (ad->num_queue_pairs, qid + 1);
260  return 0;
261 }
262 
263 clib_error_t *
264 avf_txq_init (vlib_main_t * vm, avf_device_t * ad, u16 qid, u16 txq_size)
265 {
266  clib_error_t *err;
267  avf_txq_t *txq;
268 
269  if (qid >= ad->num_queue_pairs)
270  {
271  qid = qid % ad->num_queue_pairs;
272  txq = vec_elt_at_index (ad->txqs, qid);
273  if (txq->lock == 0)
274  clib_spinlock_init (&txq->lock);
275  ad->flags |= AVF_DEVICE_F_SHARED_TXQ_LOCK;
276  return 0;
277  }
278 
280  txq = vec_elt_at_index (ad->txqs, qid);
281  txq->size = txq_size;
282  txq->next = 0;
284  sizeof (avf_tx_desc_t),
286  ad->numa_node);
287  if (txq->descs == 0)
288  return vlib_physmem_last_error (vm);
289 
290  if ((err = vlib_pci_map_dma (vm, ad->pci_dev_handle, (void *) txq->descs)))
291  return err;
292 
294  txq->qtx_tail = ad->bar0 + AVF_QTX_TAIL (qid);
295 
296  /* initialize ring of pending RS slots */
298 
299  ad->n_tx_queues = clib_min (ad->num_queue_pairs, qid + 1);
300  return 0;
301 }
302 
303 typedef struct
304 {
308 
309 void
311 {
312  avf_aq_desc_t *d;
313  u64 pa = ad->arq_bufs_pa + slot * AVF_MBOX_BUF_SZ;
314  d = &ad->arq[slot];
315  clib_memset (d, 0, sizeof (avf_aq_desc_t));
316  d->flags = AVF_AQ_F_BUF;
318  d->addr_hi = (u32) (pa >> 32);
319  d->addr_lo = (u32) pa;
320 }
321 
322 static inline uword
324 {
325  return (ad->flags & AVF_DEVICE_F_VA_DMA) ?
327 }
328 
329 static void
331 {
332  u64 pa;
333  int i;
334 
335  /* VF MailBox Transmit */
336  clib_memset (ad->atq, 0, sizeof (avf_aq_desc_t) * AVF_MBOX_LEN);
337  ad->atq_bufs_pa = avf_dma_addr (vm, ad, ad->atq_bufs);
338 
339  pa = avf_dma_addr (vm, ad, ad->atq);
340  avf_reg_write (ad, AVF_ATQT, 0); /* Tail */
341  avf_reg_write (ad, AVF_ATQH, 0); /* Head */
342  avf_reg_write (ad, AVF_ATQLEN, AVF_MBOX_LEN | (1ULL << 31)); /* len & ena */
343  avf_reg_write (ad, AVF_ATQBAL, (u32) pa); /* Base Address Low */
344  avf_reg_write (ad, AVF_ATQBAH, (u32) (pa >> 32)); /* Base Address High */
345 
346  /* VF MailBox Receive */
347  clib_memset (ad->arq, 0, sizeof (avf_aq_desc_t) * AVF_MBOX_LEN);
348  ad->arq_bufs_pa = avf_dma_addr (vm, ad, ad->arq_bufs);
349 
350  for (i = 0; i < AVF_MBOX_LEN; i++)
351  avf_arq_slot_init (ad, i);
352 
353  pa = avf_dma_addr (vm, ad, ad->arq);
354 
355  avf_reg_write (ad, AVF_ARQH, 0); /* Head */
356  avf_reg_write (ad, AVF_ARQT, 0); /* Head */
357  avf_reg_write (ad, AVF_ARQLEN, AVF_MBOX_LEN | (1ULL << 31)); /* len & ena */
358  avf_reg_write (ad, AVF_ARQBAL, (u32) pa); /* Base Address Low */
359  avf_reg_write (ad, AVF_ARQBAH, (u32) (pa >> 32)); /* Base Address High */
360  avf_reg_write (ad, AVF_ARQT, AVF_MBOX_LEN - 1); /* Tail */
361 
362  ad->atq_next_slot = 0;
363  ad->arq_next_slot = 0;
364 }
365 
366 clib_error_t *
368  void *in, int in_len, void *out, int out_len)
369 {
370  clib_error_t *err;
371  avf_aq_desc_t *d, dt = {.opcode = 0x801,.v_opcode = op };
372  u32 head;
373  int n_retry = 5;
374 
375 
376  /* supppres interrupt in the next adminq receive slot
377  as we are going to wait for response
378  we only need interrupts when event is received */
379  d = &ad->arq[ad->arq_next_slot];
380  d->flags |= AVF_AQ_F_SI;
381 
382  if ((err = avf_aq_desc_enq (vm, ad, &dt, in, in_len)))
383  return err;
384 
385 retry:
386  head = avf_get_u32 (ad->bar0, AVF_ARQH);
387 
388  if (ad->arq_next_slot == head)
389  {
390  if (--n_retry == 0)
391  return clib_error_return (0, "timeout");
392  vlib_process_suspend (vm, 10e-3);
393  goto retry;
394  }
395 
396  d = &ad->arq[ad->arq_next_slot];
397 
398  if (d->v_opcode == VIRTCHNL_OP_EVENT)
399  {
400  void *buf = ad->arq_bufs + ad->arq_next_slot * AVF_MBOX_BUF_SZ;
402 
403  if ((d->datalen != sizeof (virtchnl_pf_event_t)) ||
404  ((d->flags & AVF_AQ_F_BUF) == 0))
405  return clib_error_return (0, "event message error");
406 
407  vec_add2 (ad->events, e, 1);
408  clib_memcpy_fast (e, buf, sizeof (virtchnl_pf_event_t));
410  ad->arq_next_slot++;
411  n_retry = 5;
412  goto retry;
413  }
414 
415  if (d->v_opcode != op)
416  {
417  err =
419  "unexpected message receiver [v_opcode = %u, "
420  "expected %u, v_retval %d]", d->v_opcode, op,
421  d->v_retval);
422  goto done;
423  }
424 
425  if (d->v_retval)
426  {
427  err = clib_error_return (0, "error [v_opcode = %u, v_retval %d]",
428  d->v_opcode, d->v_retval);
429  goto done;
430  }
431 
432  if (d->flags & AVF_AQ_F_BUF)
433  {
434  void *buf = ad->arq_bufs + ad->arq_next_slot * AVF_MBOX_BUF_SZ;
435  clib_memcpy_fast (out, buf, out_len);
436  }
437 
440  avf_reg_flush (ad);
441  ad->arq_next_slot = (ad->arq_next_slot + 1) % AVF_MBOX_LEN;
442 
443 done:
444 
445  if (ad->flags & AVF_DEVICE_F_ELOG)
446  {
447  /* *INDENT-OFF* */
448  ELOG_TYPE_DECLARE (el) =
449  {
450  .format = "avf[%d] send to pf: v_opcode %s (%d) v_retval 0x%x",
451  .format_args = "i4t4i4i4",
452  .n_enum_strings = VIRTCHNL_N_OPS,
453  .enum_strings = {
454 #define _(v, n) [v] = #n,
456 #undef _
457  },
458  };
459  struct
460  {
461  u32 dev_instance;
462  u32 v_opcode;
463  u32 v_opcode_val;
464  u32 v_retval;
465  } *ed;
466  ed = ELOG_DATA (&vm->elog_main, el);
467  ed->dev_instance = ad->dev_instance;
468  ed->v_opcode = op;
469  ed->v_opcode_val = op;
470  ed->v_retval = d->v_retval;
471  /* *INDENT-ON* */
472  }
473  return err;
474 }
475 
476 clib_error_t *
479 {
480  clib_error_t *err = 0;
481  virtchnl_version_info_t myver = {
483  .minor = VIRTCHNL_VERSION_MINOR,
484  };
485 
486  err = avf_send_to_pf (vm, ad, VIRTCHNL_OP_VERSION, &myver,
487  sizeof (virtchnl_version_info_t), ver,
488  sizeof (virtchnl_version_info_t));
489 
490  if (err)
491  return err;
492 
493  return err;
494 }
495 
496 clib_error_t *
499 {
500  u32 bitmap = (VIRTCHNL_VF_OFFLOAD_L2 | VIRTCHNL_VF_OFFLOAD_RSS_PF |
501  VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_VLAN |
502  VIRTCHNL_VF_OFFLOAD_RX_POLLING);
503 
504  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_GET_VF_RESOURCES, &bitmap,
505  sizeof (u32), res, sizeof (virtchnl_vf_resource_t));
506 }
507 
508 clib_error_t *
510 {
511  int msg_len = sizeof (virtchnl_rss_lut_t) + ad->rss_lut_size - 1;
512  int i;
513  u8 msg[msg_len];
514  virtchnl_rss_lut_t *rl;
515 
516  clib_memset (msg, 0, msg_len);
517  rl = (virtchnl_rss_lut_t *) msg;
518  rl->vsi_id = ad->vsi_id;
519  rl->lut_entries = ad->rss_lut_size;
520  for (i = 0; i < ad->rss_lut_size; i++)
521  rl->lut[i] = i % ad->n_rx_queues;
522 
523  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_RSS_LUT, msg, msg_len, 0,
524  0);
525 }
526 
527 clib_error_t *
529 {
530  int msg_len = sizeof (virtchnl_rss_key_t) + ad->rss_key_size - 1;
531  int i;
532  u8 msg[msg_len];
533  virtchnl_rss_key_t *rk;
534 
535  clib_memset (msg, 0, msg_len);
536  rk = (virtchnl_rss_key_t *) msg;
537  rk->vsi_id = ad->vsi_id;
538  rk->key_len = ad->rss_key_size;
539  u32 seed = random_default_seed ();
540  for (i = 0; i < ad->rss_key_size; i++)
541  rk->key[i] = (u8) random_u32 (&seed);
542 
543  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_RSS_KEY, msg, msg_len, 0,
544  0);
545 }
546 
547 clib_error_t *
549 {
550  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, 0, 0, 0,
551  0);
552 }
553 
554 clib_error_t *
556 {
557  virtchnl_promisc_info_t pi = { 0 };
558 
559  pi.vsi_id = ad->vsi_id;
560  pi.flags = 1;
561  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, &pi,
562  sizeof (virtchnl_promisc_info_t), 0, 0);
563 }
564 
565 
566 clib_error_t *
568 {
569  int i;
570  int n_qp = clib_max (vec_len (ad->rxqs), vec_len (ad->txqs));
571  int msg_len = sizeof (virtchnl_vsi_queue_config_info_t) + n_qp *
573  u8 msg[msg_len];
575 
576  clib_memset (msg, 0, msg_len);
578  ci->vsi_id = ad->vsi_id;
579  ci->num_queue_pairs = n_qp;
580 
581  for (i = 0; i < n_qp; i++)
582  {
583  virtchnl_txq_info_t *txq = &ci->qpair[i].txq;
584  virtchnl_rxq_info_t *rxq = &ci->qpair[i].rxq;
585 
586  rxq->vsi_id = ad->vsi_id;
587  rxq->queue_id = i;
589  if (i < vec_len (ad->rxqs))
590  {
591  avf_rxq_t *q = vec_elt_at_index (ad->rxqs, i);
592  rxq->ring_len = q->size;
594  rxq->dma_ring_addr = avf_dma_addr (vm, ad, (void *) q->descs);
595  avf_reg_write (ad, AVF_QRX_TAIL (i), q->size - 1);
596  }
597 
598  avf_txq_t *q = vec_elt_at_index (ad->txqs, i);
599  txq->vsi_id = ad->vsi_id;
600  if (i < vec_len (ad->txqs))
601  {
602  txq->queue_id = i;
603  txq->ring_len = q->size;
604  txq->dma_ring_addr = avf_dma_addr (vm, ad, (void *) q->descs);
605  }
606  }
607 
608  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_VSI_QUEUES, msg, msg_len,
609  0, 0);
610 }
611 
612 clib_error_t *
614 {
615  int count = 1;
616  int msg_len = sizeof (virtchnl_irq_map_info_t) +
617  count * sizeof (virtchnl_vector_map_t);
618  u8 msg[msg_len];
620 
621  clib_memset (msg, 0, msg_len);
622  imi = (virtchnl_irq_map_info_t *) msg;
623  imi->num_vectors = count;
624 
625  imi->vecmap[0].vector_id = 1;
626  imi->vecmap[0].vsi_id = ad->vsi_id;
627  imi->vecmap[0].rxq_map = 1;
628  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_IRQ_MAP, msg, msg_len, 0,
629  0);
630 }
631 
632 clib_error_t *
634 {
635  int msg_len =
636  sizeof (virtchnl_ether_addr_list_t) +
637  count * sizeof (virtchnl_ether_addr_t);
638  u8 msg[msg_len];
640  int i;
641 
642  clib_memset (msg, 0, msg_len);
643  al = (virtchnl_ether_addr_list_t *) msg;
644  al->vsi_id = ad->vsi_id;
645  al->num_elements = count;
646  for (i = 0; i < count; i++)
647  clib_memcpy_fast (&al->list[i].addr, macs + i * 6, 6);
648  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_ADD_ETH_ADDR, msg, msg_len, 0,
649  0);
650 }
651 
652 clib_error_t *
654 {
655  virtchnl_queue_select_t qs = { 0 };
656  int i = 0;
657  qs.vsi_id = ad->vsi_id;
658  qs.rx_queues = rx;
659  qs.tx_queues = tx;
660  while (rx)
661  {
662  if (rx & (1 << i))
663  {
664  avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, i);
665  avf_reg_write (ad, AVF_QRX_TAIL (i), rxq->n_enqueued);
666  rx &= ~(1 << i);
667  }
668  i++;
669  }
670  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_ENABLE_QUEUES, &qs,
671  sizeof (virtchnl_queue_select_t), 0, 0);
672 }
673 
674 clib_error_t *
677 {
678  virtchnl_queue_select_t qs = { 0 };
679  qs.vsi_id = ad->vsi_id;
680  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_GET_STATS,
681  &qs, sizeof (virtchnl_queue_select_t),
682  es, sizeof (virtchnl_eth_stats_t));
683 }
684 
685 clib_error_t *
687 {
688  avf_aq_desc_t d = { 0 };
689  clib_error_t *error;
690  u32 rstat;
691  int n_retry = 20;
692 
693  d.opcode = 0x801;
694  d.v_opcode = VIRTCHNL_OP_RESET_VF;
695  if ((error = avf_aq_desc_enq (vm, ad, &d, 0, 0)))
696  return error;
697 
698 retry:
699  vlib_process_suspend (vm, 10e-3);
700  rstat = avf_get_u32 (ad->bar0, AVFGEN_RSTAT);
701 
702  if (rstat == 2 || rstat == 3)
703  return 0;
704 
705  if (--n_retry == 0)
706  return clib_error_return (0, "reset failed (timeout)");
707 
708  goto retry;
709 }
710 
711 clib_error_t *
713 {
714  virtchnl_vf_res_request_t res_req = { 0 };
715  clib_error_t *error;
716  u32 rstat;
717  int n_retry = 20;
718 
719  res_req.num_queue_pairs = num_queue_pairs;
720 
721  error = avf_send_to_pf (vm, ad, VIRTCHNL_OP_REQUEST_QUEUES, &res_req,
722  sizeof (virtchnl_vf_res_request_t), &res_req,
723  sizeof (virtchnl_vf_res_request_t));
724 
725  /*
726  * if PF respondes, the request failed
727  * else PF initializes restart and avf_send_to_pf returns an error
728  */
729  if (!error)
730  {
731  return clib_error_return (0, "requested more than %u queue pairs",
732  res_req.num_queue_pairs);
733  }
734 
735 retry:
736  vlib_process_suspend (vm, 10e-3);
737  rstat = avf_get_u32 (ad->bar0, AVFGEN_RSTAT);
738 
739  if ((rstat == VIRTCHNL_VFR_COMPLETED) || (rstat == VIRTCHNL_VFR_VFACTIVE))
740  goto done;
741 
742  if (--n_retry == 0)
743  return clib_error_return (0, "reset failed (timeout)");
744 
745  goto retry;
746 
747 done:
748  return NULL;
749 }
750 
751 clib_error_t *
753  avf_create_if_args_t * args)
754 {
755  virtchnl_version_info_t ver = { 0 };
756  virtchnl_vf_resource_t res = { 0 };
757  clib_error_t *error;
759  int i;
760 
761  avf_adminq_init (vm, ad);
762 
763  /* request more queues only if we need them */
764  if ((error = avf_request_queues (vm, ad, tm->n_vlib_mains)))
765  {
766  /* we failed to get more queues, but still we want to proceed */
767  clib_error_free (error);
768 
769  if ((error = avf_device_reset (vm, ad)))
770  return error;
771  }
772 
773  avf_adminq_init (vm, ad);
774 
775  /*
776  * OP_VERSION
777  */
778  if ((error = avf_op_version (vm, ad, &ver)))
779  return error;
780 
781  if (ver.major != VIRTCHNL_VERSION_MAJOR ||
783  return clib_error_return (0, "incompatible protocol version "
784  "(remote %d.%d)", ver.major, ver.minor);
785 
786  /*
787  * OP_GET_VF_RESOUCES
788  */
789  if ((error = avf_op_get_vf_resources (vm, ad, &res)))
790  return error;
791 
792  if (res.num_vsis != 1 || res.vsi_res[0].vsi_type != VIRTCHNL_VSI_SRIOV)
793  return clib_error_return (0, "unexpected GET_VF_RESOURCE reply received");
794 
795  ad->vsi_id = res.vsi_res[0].vsi_id;
798  ad->max_vectors = res.max_vectors;
799  ad->max_mtu = res.max_mtu;
800  ad->rss_key_size = res.rss_key_size;
801  ad->rss_lut_size = res.rss_lut_size;
802 
804 
805  /*
806  * Disable VLAN stripping
807  */
808  if ((error = avf_op_disable_vlan_stripping (vm, ad)))
809  return error;
810 
811  if ((error = avf_config_promisc_mode (vm, ad)))
812  return error;
813 
814  /*
815  * Init Queues
816  */
817  if (args->rxq_num == 0)
818  {
819  args->rxq_num = 1;
820  }
821  else if (args->rxq_num > ad->num_queue_pairs)
822  {
823  args->rxq_num = ad->num_queue_pairs;
824  vlib_log_warn (am->log_class, "Requested more rx queues than"
825  "queue pairs available. Using %u rx queues.",
826  args->rxq_num);
827  }
828 
829  for (i = 0; i < args->rxq_num; i++)
830  if ((error = avf_rxq_init (vm, ad, i, args->rxq_size)))
831  return error;
832 
833  for (i = 0; i < tm->n_vlib_mains; i++)
834  if ((error = avf_txq_init (vm, ad, i, args->txq_size)))
835  return error;
836 
837  if ((ad->feature_bitmap & VIRTCHNL_VF_OFFLOAD_RSS_PF) &&
838  (error = avf_op_config_rss_lut (vm, ad)))
839  return error;
840 
841  if ((ad->feature_bitmap & VIRTCHNL_VF_OFFLOAD_RSS_PF) &&
842  (error = avf_op_config_rss_key (vm, ad)))
843  return error;
844 
845  if ((error = avf_op_config_vsi_queues (vm, ad)))
846  return error;
847 
848  if ((error = avf_op_config_irq_map (vm, ad)))
849  return error;
850 
851  avf_irq_0_enable (ad);
852  for (i = 0; i < ad->n_rx_queues; i++)
853  avf_irq_n_enable (ad, i);
854 
855  if ((error = avf_op_add_eth_addr (vm, ad, 1, ad->hwaddr)))
856  return error;
857 
858  if ((error = avf_op_enable_queues (vm, ad, pow2_mask (ad->n_rx_queues),
859  pow2_mask (ad->n_tx_queues))))
860  return error;
861 
862  ad->flags |= AVF_DEVICE_F_INITIALIZED;
863  return error;
864 }
865 
866 void
868 {
869  avf_main_t *am = &avf_main;
870  vnet_main_t *vnm = vnet_get_main ();
872  u32 r;
873 
874  if (ad->flags & AVF_DEVICE_F_ERROR)
875  return;
876 
877  if ((ad->flags & AVF_DEVICE_F_INITIALIZED) == 0)
878  return;
879 
880  ASSERT (ad->error == 0);
881 
882  /* do not process device in reset state */
883  r = avf_get_u32 (ad->bar0, AVFGEN_RSTAT);
884  if (r != VIRTCHNL_VFR_VFACTIVE)
885  return;
886 
887  r = avf_get_u32 (ad->bar0, AVF_ARQLEN);
888  if ((r & 0xf0000000) != (1ULL << 31))
889  {
890  ad->error = clib_error_return (0, "arq not enabled, arqlen = 0x%x", r);
891  goto error;
892  }
893 
894  r = avf_get_u32 (ad->bar0, AVF_ATQLEN);
895  if ((r & 0xf0000000) != (1ULL << 31))
896  {
897  ad->error = clib_error_return (0, "atq not enabled, atqlen = 0x%x", r);
898  goto error;
899  }
900 
901  if (is_irq == 0)
902  avf_op_get_stats (vm, ad, &ad->eth_stats);
903 
904  /* *INDENT-OFF* */
905  vec_foreach (e, ad->events)
906  {
908  {
909  int link_up = e->event_data.link_event.link_status;
910  virtchnl_link_speed_t speed = e->event_data.link_event.link_speed;
911  u32 flags = 0;
912  u32 kbps = 0;
913 
914  if (link_up && (ad->flags & AVF_DEVICE_F_LINK_UP) == 0)
915  {
916  ad->flags |= AVF_DEVICE_F_LINK_UP;
919  if (speed == VIRTCHNL_LINK_SPEED_40GB)
920  kbps = 40000000;
921  else if (speed == VIRTCHNL_LINK_SPEED_25GB)
922  kbps = 25000000;
923  else if (speed == VIRTCHNL_LINK_SPEED_10GB)
924  kbps = 10000000;
925  else if (speed == VIRTCHNL_LINK_SPEED_1GB)
926  kbps = 1000000;
927  else if (speed == VIRTCHNL_LINK_SPEED_100MB)
928  kbps = 100000;
929  vnet_hw_interface_set_flags (vnm, ad->hw_if_index, flags);
931  ad->link_speed = speed;
932  }
933  else if (!link_up && (ad->flags & AVF_DEVICE_F_LINK_UP) != 0)
934  {
935  ad->flags &= ~AVF_DEVICE_F_LINK_UP;
936  ad->link_speed = 0;
937  }
938 
939  if (ad->flags & AVF_DEVICE_F_ELOG)
940  {
941  ELOG_TYPE_DECLARE (el) =
942  {
943  .format = "avf[%d] link change: link_status %d "
944  "link_speed %d",
945  .format_args = "i4i1i1",
946  };
947  struct
948  {
949  u32 dev_instance;
950  u8 link_status;
951  u8 link_speed;
952  } *ed;
953  ed = ELOG_DATA (&vm->elog_main, el);
954  ed->dev_instance = ad->dev_instance;
955  ed->link_status = link_up;
956  ed->link_speed = speed;
957  }
958  }
959  else
960  {
961  if (ad->flags & AVF_DEVICE_F_ELOG)
962  {
963  ELOG_TYPE_DECLARE (el) =
964  {
965  .format = "avf[%d] unknown event: event %d severity %d",
966  .format_args = "i4i4i1i1",
967  };
968  struct
969  {
970  u32 dev_instance;
971  u32 event;
972  u32 severity;
973  } *ed;
974  ed = ELOG_DATA (&vm->elog_main, el);
975  ed->dev_instance = ad->dev_instance;
976  ed->event = e->event;
977  ed->severity = e->severity;
978  }
979  }
980  }
981  /* *INDENT-ON* */
982  vec_reset_length (ad->events);
983 
984  return;
985 
986 error:
987  ad->flags |= AVF_DEVICE_F_ERROR;
988  ASSERT (ad->error != 0);
989  vlib_log_err (am->log_class, "%U", format_clib_error, ad->error);
990 }
991 
992 static u32
994 {
995  avf_main_t *am = &avf_main;
996  vlib_log_warn (am->log_class, "TODO");
997  return 0;
998 }
999 
1000 static uword
1002 {
1003  avf_main_t *am = &avf_main;
1004  avf_device_t *ad;
1005  uword *event_data = 0, event_type;
1006  int enabled = 0, irq;
1007  f64 last_run_duration = 0;
1008  f64 last_periodic_time = 0;
1009 
1010  while (1)
1011  {
1012  if (enabled)
1013  vlib_process_wait_for_event_or_clock (vm, 5.0 - last_run_duration);
1014  else
1016 
1017  event_type = vlib_process_get_events (vm, &event_data);
1018  vec_reset_length (event_data);
1019  irq = 0;
1020 
1021  switch (event_type)
1022  {
1023  case ~0:
1024  last_periodic_time = vlib_time_now (vm);
1025  break;
1027  enabled = 1;
1028  break;
1030  enabled = 0;
1031  continue;
1033  irq = 1;
1034  break;
1035  default:
1036  ASSERT (0);
1037  }
1038 
1039  /* *INDENT-OFF* */
1040  pool_foreach (ad, am->devices,
1041  {
1042  avf_process_one_device (vm, ad, irq);
1043  });
1044  /* *INDENT-ON* */
1045  last_run_duration = vlib_time_now (vm) - last_periodic_time;
1046  }
1047  return 0;
1048 }
1049 
1050 /* *INDENT-OFF* */
1052  .function = avf_process,
1053  .type = VLIB_NODE_TYPE_PROCESS,
1054  .name = "avf-process",
1055 };
1056 /* *INDENT-ON* */
1057 
1058 static void
1060 {
1061  avf_main_t *am = &avf_main;
1062  uword pd = vlib_pci_get_private_data (vm, h);
1063  avf_device_t *ad = pool_elt_at_index (am->devices, pd);
1064  u32 icr0;
1065 
1066  icr0 = avf_reg_read (ad, AVFINT_ICR0);
1067 
1068  if (ad->flags & AVF_DEVICE_F_ELOG)
1069  {
1070  /* *INDENT-OFF* */
1071  ELOG_TYPE_DECLARE (el) =
1072  {
1073  .format = "avf[%d] irq 0: icr0 0x%x",
1074  .format_args = "i4i4",
1075  };
1076  /* *INDENT-ON* */
1077  struct
1078  {
1079  u32 dev_instance;
1080  u32 icr0;
1081  } *ed;
1082 
1083  ed = ELOG_DATA (&vm->elog_main, el);
1084  ed->dev_instance = ad->dev_instance;
1085  ed->icr0 = icr0;
1086  }
1087 
1088  avf_irq_0_enable (ad);
1089 
1090  /* bit 30 - Send/Receive Admin queue interrupt indication */
1091  if (icr0 & (1 << 30))
1094 }
1095 
1096 static void
1098 {
1099  vnet_main_t *vnm = vnet_get_main ();
1100  avf_main_t *am = &avf_main;
1101  uword pd = vlib_pci_get_private_data (vm, h);
1102  avf_device_t *ad = pool_elt_at_index (am->devices, pd);
1103  u16 qid;
1104  int i;
1105 
1106  if (ad->flags & AVF_DEVICE_F_ELOG)
1107  {
1108  /* *INDENT-OFF* */
1109  ELOG_TYPE_DECLARE (el) =
1110  {
1111  .format = "avf[%d] irq %d: received",
1112  .format_args = "i4i2",
1113  };
1114  /* *INDENT-ON* */
1115  struct
1116  {
1117  u32 dev_instance;
1118  u16 line;
1119  } *ed;
1120 
1121  ed = ELOG_DATA (&vm->elog_main, el);
1122  ed->dev_instance = ad->dev_instance;
1123  ed->line = line;
1124  }
1125 
1126  qid = line - 1;
1127  if (vec_len (ad->rxqs) > qid && ad->rxqs[qid].int_mode != 0)
1129  for (i = 0; i < vec_len (ad->rxqs); i++)
1130  avf_irq_n_enable (ad, i);
1131 }
1132 
1133 void
1135 {
1136  vnet_main_t *vnm = vnet_get_main ();
1137  avf_main_t *am = &avf_main;
1138  int i;
1139 
1140  if (ad->hw_if_index)
1141  {
1145  }
1146 
1148 
1149  vlib_physmem_free (vm, ad->atq);
1150  vlib_physmem_free (vm, ad->arq);
1151  vlib_physmem_free (vm, ad->atq_bufs);
1152  vlib_physmem_free (vm, ad->arq_bufs);
1153 
1154  /* *INDENT-OFF* */
1155  vec_foreach_index (i, ad->rxqs)
1156  {
1157  avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, i);
1158  vlib_physmem_free (vm, (void *) rxq->descs);
1159  if (rxq->n_enqueued)
1160  vlib_buffer_free_from_ring (vm, rxq->bufs, rxq->next, rxq->size,
1161  rxq->n_enqueued);
1162  vec_free (rxq->bufs);
1163  }
1164  /* *INDENT-ON* */
1165  vec_free (ad->rxqs);
1166 
1167  /* *INDENT-OFF* */
1168  vec_foreach_index (i, ad->txqs)
1169  {
1170  avf_txq_t *txq = vec_elt_at_index (ad->txqs, i);
1171  vlib_physmem_free (vm, (void *) txq->descs);
1172  if (txq->n_enqueued)
1173  {
1174  u16 first = (txq->next - txq->n_enqueued) & (txq->size -1);
1175  vlib_buffer_free_from_ring (vm, txq->bufs, first, txq->size,
1176  txq->n_enqueued);
1177  }
1178  vec_free (txq->bufs);
1179  clib_ring_free (txq->rs_slots);
1180  }
1181  /* *INDENT-ON* */
1182  vec_free (ad->txqs);
1183  vec_free (ad->name);
1184 
1185  clib_error_free (ad->error);
1186  clib_memset (ad, 0, sizeof (*ad));
1187  pool_put (am->devices, ad);
1188 }
1189 
1190 void
1192 {
1193  vnet_main_t *vnm = vnet_get_main ();
1194  avf_main_t *am = &avf_main;
1195  avf_device_t *ad;
1197  clib_error_t *error = 0;
1198  int i;
1199 
1200  /* check input args */
1201  args->rxq_size = (args->rxq_size == 0) ? AVF_RXQ_SZ : args->rxq_size;
1202  args->txq_size = (args->txq_size == 0) ? AVF_TXQ_SZ : args->txq_size;
1203 
1204  if ((args->rxq_size & (args->rxq_size - 1))
1205  || (args->txq_size & (args->txq_size - 1)))
1206  {
1207  args->rv = VNET_API_ERROR_INVALID_VALUE;
1208  args->error =
1209  clib_error_return (error, "queue size must be a power of two");
1210  return;
1211  }
1212 
1213  pool_get (am->devices, ad);
1214  ad->dev_instance = ad - am->devices;
1215  ad->per_interface_next_index = ~0;
1216  ad->name = vec_dup (args->name);
1217 
1218  if (args->enable_elog)
1219  ad->flags |= AVF_DEVICE_F_ELOG;
1220 
1221  if ((error = vlib_pci_device_open (vm, &args->addr, avf_pci_device_ids,
1222  &h)))
1223  {
1224  pool_put (am->devices, ad);
1225  args->rv = VNET_API_ERROR_INVALID_INTERFACE;
1226  args->error =
1227  clib_error_return (error, "pci-addr %U", format_vlib_pci_addr,
1228  &args->addr);
1229  return;
1230  }
1231  ad->pci_dev_handle = h;
1232  ad->numa_node = vlib_pci_get_numa_node (vm, h);
1233 
1235 
1236  if ((error = vlib_pci_bus_master_enable (vm, h)))
1237  goto error;
1238 
1239  if ((error = vlib_pci_map_region (vm, h, 0, &ad->bar0)))
1240  goto error;
1241 
1242  if ((error = vlib_pci_register_msix_handler (vm, h, 0, 1,
1243  &avf_irq_0_handler)))
1244  goto error;
1245 
1246  if ((error = vlib_pci_register_msix_handler (vm, h, 1, 1,
1247  &avf_irq_n_handler)))
1248  goto error;
1249 
1250  if ((error = vlib_pci_enable_msix_irq (vm, h, 0, 2)))
1251  goto error;
1252 
1254  AVF_MBOX_LEN,
1256  ad->numa_node);
1257  if (ad->atq == 0)
1258  {
1259  error = vlib_physmem_last_error (vm);
1260  goto error;
1261  }
1262 
1263  if ((error = vlib_pci_map_dma (vm, h, ad->atq)))
1264  goto error;
1265 
1267  AVF_MBOX_LEN,
1269  ad->numa_node);
1270  if (ad->arq == 0)
1271  {
1272  error = vlib_physmem_last_error (vm);
1273  goto error;
1274  }
1275 
1276  if ((error = vlib_pci_map_dma (vm, h, ad->arq)))
1277  goto error;
1278 
1280  AVF_MBOX_LEN,
1282  ad->numa_node);
1283  if (ad->atq_bufs == 0)
1284  {
1285  error = vlib_physmem_last_error (vm);
1286  goto error;
1287  }
1288 
1289  if ((error = vlib_pci_map_dma (vm, h, ad->atq_bufs)))
1290  goto error;
1291 
1293  AVF_MBOX_LEN,
1295  ad->numa_node);
1296  if (ad->arq_bufs == 0)
1297  {
1298  error = vlib_physmem_last_error (vm);
1299  goto error;
1300  }
1301 
1302  if ((error = vlib_pci_map_dma (vm, h, ad->arq_bufs)))
1303  goto error;
1304 
1305  if ((error = vlib_pci_intr_enable (vm, h)))
1306  goto error;
1307 
1309  ad->flags |= AVF_DEVICE_F_VA_DMA;
1310 
1311  if ((error = avf_device_init (vm, am, ad, args)))
1312  goto error;
1313 
1314  /* create interface */
1315  error = ethernet_register_interface (vnm, avf_device_class.index,
1316  ad->dev_instance, ad->hwaddr,
1318 
1319  if (error)
1320  goto error;
1321 
1323  args->sw_if_index = ad->sw_if_index = sw->sw_if_index;
1324 
1328  avf_input_node.index);
1329 
1330  for (i = 0; i < ad->n_rx_queues; i++)
1332 
1333  if (pool_elts (am->devices) == 1)
1336 
1337  return;
1338 
1339 error:
1340  avf_delete_if (vm, ad);
1341  args->rv = VNET_API_ERROR_INVALID_INTERFACE;
1342  args->error = clib_error_return (error, "pci-addr %U",
1343  format_vlib_pci_addr, &args->addr);
1344  vlib_log_err (am->log_class, "%U", format_clib_error, args->error);
1345 }
1346 
1347 static clib_error_t *
1349 {
1350  vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
1351  avf_main_t *am = &avf_main;
1353  uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
1354 
1355  if (ad->flags & AVF_DEVICE_F_ERROR)
1356  return clib_error_return (0, "device is in error state");
1357 
1358  if (is_up)
1359  {
1362  ad->flags |= AVF_DEVICE_F_ADMIN_UP;
1363  }
1364  else
1365  {
1367  ad->flags &= ~AVF_DEVICE_F_ADMIN_UP;
1368  }
1369  return 0;
1370 }
1371 
1372 static clib_error_t *
1375 {
1376  avf_main_t *am = &avf_main;
1377  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1379  avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, qid);
1380 
1382  rxq->int_mode = 0;
1383  else
1384  rxq->int_mode = 1;
1385 
1386  return 0;
1387 }
1388 
1389 static void
1391  u32 node_index)
1392 {
1393  avf_main_t *am = &avf_main;
1394  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1396 
1397  /* Shut off redirection */
1398  if (node_index == ~0)
1399  {
1400  ad->per_interface_next_index = node_index;
1401  return;
1402  }
1403 
1405  vlib_node_add_next (vlib_get_main (), avf_input_node.index, node_index);
1406 }
1407 
1408 static char *avf_tx_func_error_strings[] = {
1409 #define _(n,s) s,
1411 #undef _
1412 };
1413 
1414 /* *INDENT-OFF* */
1416 {
1417  .name = "Adaptive Virtual Function (AVF) interface",
1418  .format_device = format_avf_device,
1419  .format_device_name = format_avf_device_name,
1420  .admin_up_down_function = avf_interface_admin_up_down,
1421  .rx_mode_change_function = avf_interface_rx_mode_change,
1422  .rx_redirect_to_node = avf_set_interface_next_node,
1423  .tx_function_n_errors = AVF_TX_N_ERROR,
1424  .tx_function_error_strings = avf_tx_func_error_strings,
1425 };
1426 /* *INDENT-ON* */
1427 
1428 clib_error_t *
1430 {
1431  avf_main_t *am = &avf_main;
1432  clib_error_t *error;
1434 
1435  if ((error = vlib_call_init_function (vm, pci_bus_init)))
1436  return error;
1437 
1440 
1441  am->log_class = vlib_log_register_class ("avf_plugin", 0);
1442  vlib_log_debug (am->log_class, "initialized");
1443 
1444  return 0;
1445 }
1446 
1448 
1449 /*
1450  * fd.io coding-style-patch-verification: ON
1451  *
1452  * Local Variables:
1453  * eval: (c-set-style "gnu")
1454  * End:
1455  */
static void avf_set_interface_next_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Definition: device.c:1390
vlib_log_class_t vlib_log_register_class(char *class, char *subclass)
Definition: log.c:227
vmrglw vmrglh hi
clib_error_t * vlib_pci_device_open(vlib_main_t *vm, vlib_pci_addr_t *addr, pci_device_id_t ids[], vlib_pci_dev_handle_t *handle)
Definition: pci.c:1197
u8 int_mode
Definition: avf.h:104
format_function_t format_vlib_pci_addr
Definition: pci.h:321
#define vec_foreach_index(var, v)
Iterate over vector indices.
#define AVF_ARQLEN
Definition: virtchnl.h:37
virtchnl_queue_pair_info_t qpair[1]
Definition: virtchnl.h:294
u32 hw_if_index
Definition: avf.h:128
u8 * format_clib_error(u8 *s, va_list *va)
Definition: error.c:191
u32 flags
Definition: vhost_user.h:115
#define AVF_ATQH
Definition: virtchnl.h:30
#define vlib_log_warn(...)
Definition: log.h:51
static clib_error_t * avf_interface_rx_mode_change(vnet_main_t *vnm, u32 hw_if_index, u32 qid, vnet_hw_interface_rx_mode mode)
Definition: device.c:1373
#define clib_min(x, y)
Definition: clib.h:295
static uword random_default_seed(void)
Default random seed (unix/linux user-mode)
Definition: random.h:91
static void * vlib_physmem_alloc_aligned_on_numa(vlib_main_t *vm, uword n_bytes, uword alignment, u32 numa_node)
Definition: physmem_funcs.h:63
static f64 vlib_process_wait_for_event_or_clock(vlib_main_t *vm, f64 dt)
Suspend a cooperative multi-tasking thread Waits for an event, or for the indicated number of seconds...
Definition: node_funcs.h:673
clib_error_t * avf_init(vlib_main_t *vm)
Definition: device.c:1429
static uword * vlib_process_wait_for_event(vlib_main_t *vm)
Definition: node_funcs.h:593
clib_error_t * avf_send_to_pf(vlib_main_t *vm, avf_device_t *ad, virtchnl_ops_t op, void *in, int in_len, void *out, int out_len)
Definition: device.c:367
void ethernet_delete_interface(vnet_main_t *vnm, u32 hw_if_index)
Definition: interface.c:323
static clib_error_t * vlib_pci_bus_master_enable(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.h:268
#define AVF_ATQBAH
Definition: virtchnl.h:35
vnet_main_t * vnet_get_main(void)
Definition: misc.c:47
void avf_arq_slot_init(avf_device_t *ad, u16 slot)
Definition: device.c:310
clib_error_t * error
Definition: avf.h:165
u64 atq_bufs_pa
Definition: avf.h:145
static uword vlib_buffer_get_pa(vlib_main_t *vm, vlib_buffer_t *b)
Definition: buffer_funcs.h:322
virtchnl_vsi_type_t vsi_type
Definition: virtchnl.h:155
unsigned long u64
Definition: types.h:89
virtchnl_vector_map_t vecmap[1]
Definition: virtchnl.h:322
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
#define NULL
Definition: clib.h:58
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:232
#define clib_ring_new_aligned(ring, size, align)
Definition: ring.h:53
virtchnl_link_speed_t link_speed
Definition: avf.h:159
#define AVF_ARQBAH
Definition: virtchnl.h:29
static clib_error_t * vlib_physmem_last_error(struct vlib_main_t *vm)
static void avf_adminq_init(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:330
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
#define AVF_QRX_TAIL(q)
Definition: virtchnl.h:41
#define AVF_ARQT
Definition: virtchnl.h:33
format_function_t format_avf_device
Definition: avf.h:225
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:564
int i
vlib_pci_addr_t addr
Definition: avf.h:206
#define AVF_AQ_F_SI
Definition: virtchnl.h:51
u32 dev_instance
Definition: avf.h:126
#define AVF_QTX_TAIL(q)
Definition: virtchnl.h:40
virtchnl_link_speed_t
Definition: virtchnl.h:188
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
Definition: vec.h:450
avf_device_t * devices
Definition: avf.h:196
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:236
volatile u32 * qtx_tail
Definition: avf.h:110
clib_error_t * avf_op_config_irq_map(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:613
static vlib_node_registration_t avf_process_node
(constructor) VLIB_REGISTER_NODE (avf_process_node)
Definition: device.c:1051
#define AVF_ATQLEN
Definition: virtchnl.h:31
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
Definition: node_funcs.h:1092
unsigned char u8
Definition: types.h:56
vnet_device_class_t avf_device_class
#define AVF_ARQH
Definition: virtchnl.h:34
clib_error_t * avf_device_reset(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:686
clib_error_t * avf_op_disable_vlan_stripping(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:548
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
double f64
Definition: types.h:142
virtchnl_ops_t
Definition: virtchnl.h:93
static vnet_sw_interface_t * vnet_get_hw_sw_interface(vnet_main_t *vnm, u32 hw_if_index)
static uword vlib_process_suspend(vlib_main_t *vm, f64 dt)
Suspend a vlib cooperative multi-tasking thread for a period of time.
Definition: node_funcs.h:422
static uword avf_process(vlib_main_t *vm, vlib_node_runtime_t *rt, vlib_frame_t *f)
Definition: device.c:1001
#define AVF_AQ_F_DD
Definition: virtchnl.h:43
vnet_hw_interface_rx_mode
Definition: interface.h:52
u16 * rs_slots
Definition: avf.h:117
#define AVFINT_ICR0_ENA1
Definition: virtchnl.h:27
clib_error_t * avf_request_queues(vlib_main_t *vm, avf_device_t *ad, u16 num_queue_pairs)
Definition: device.c:712
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
Definition: pool.h:490
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:163
static uword vlib_process_get_events(vlib_main_t *vm, uword **data_vector)
Return the first event type which has occurred and a vector of per-event data of that type...
Definition: node_funcs.h:516
clib_spinlock_t lock
Definition: avf.h:113
static uword pow2_mask(uword x)
Definition: clib.h:220
#define PCI_DEVICE_ID_INTEL_AVF
Definition: device.c:33
static u32 avf_reg_read(avf_device_t *ad, u32 addr)
Definition: avf.h:276
clib_error_t * avf_txq_init(vlib_main_t *vm, avf_device_t *ad, u16 qid, u16 txq_size)
Definition: device.c:264
static_always_inline void vnet_device_input_set_interrupt_pending(vnet_main_t *vnm, u32 hw_if_index, u16 queue_id)
Definition: devices.h:136
vnet_hw_interface_flags_t flags
Definition: interface.h:516
#define AVF_MBOX_BUF_SZ
Definition: device.c:27
volatile u32 * qrx_tail
Definition: avf.h:98
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define clib_error_return(e, args...)
Definition: error.h:99
clib_error_t * avf_config_promisc_mode(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:555
#define VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES
Definition: buffer.h:443
unsigned int u32
Definition: types.h:88
vlib_pci_dev_handle_t pci_dev_handle
Definition: avf.h:129
#define vlib_call_init_function(vm, x)
Definition: init.h:260
virtchnl_ether_addr_t list[1]
Definition: virtchnl.h:337
#define vlib_log_debug(...)
Definition: log.h:54
void * arq_bufs
Definition: avf.h:144
avf_main_t avf_main
Definition: device.c:37
static void avf_irq_n_handler(vlib_main_t *vm, vlib_pci_dev_handle_t h, u16 line)
Definition: device.c:1097
avf_aq_desc_t * arq
Definition: avf.h:142
static void clib_spinlock_init(clib_spinlock_t *p)
Definition: lock.h:57
static void vlib_buffer_free_from_ring(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Free buffers from ring.
Definition: buffer_funcs.h:584
static heap_elt_t * first(heap_header_t *h)
Definition: heap.c:59
void avf_process_one_device(vlib_main_t *vm, avf_device_t *ad, int is_irq)
Definition: device.c:867
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:511
u8 * name
Definition: avf.h:132
static void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
Definition: node_funcs.h:934
clib_error_t * avf_device_init(vlib_main_t *vm, avf_main_t *am, avf_device_t *ad, avf_create_if_args_t *args)
Definition: device.c:752
static u32 avf_get_u32(void *start, int offset)
Definition: avf.h:230
virtchnl_txq_info_t txq
Definition: virtchnl.h:285
#define AVF_ATQT
Definition: virtchnl.h:38
unsigned short u16
Definition: types.h:57
uword vlib_pci_get_private_data(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.c:148
u32 vlib_pci_dev_handle_t
Definition: pci.h:97
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:286
#define vec_dup(V)
Return copy of vector (no header, no alignment)
Definition: vec.h:375
u64 qword[4]
Definition: avf.h:73
#define ELOG_DATA(em, f)
Definition: elog.h:484
#define AVF_AQ_F_RD
Definition: virtchnl.h:48
#define VIRTCHNL_VERSION_MAJOR
Definition: virtchnl.h:21
clib_error_t * avf_op_enable_queues(vlib_main_t *vm, avf_device_t *ad, u32 rx, u32 tx)
Definition: device.c:653
#define AVF_ITR_INT
Definition: device.c:30
static void avf_reg_flush(avf_device_t *ad)
Definition: avf.h:282
#define AVF_RXQ_SZ
Definition: device.c:28
static char * avf_tx_func_error_strings[]
Definition: device.c:1408
#define AVF_MBOX_LEN
Definition: device.c:26
#define AVFINT_ICR0
Definition: virtchnl.h:26
int vlib_pci_supports_virtual_addr_dma(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.c:1189
clib_error_t * vlib_pci_enable_msix_irq(vlib_main_t *vm, vlib_pci_dev_handle_t h, u16 start, u16 count)
Definition: pci.c:865
u8 len
Definition: ip_types.api:49
void avf_delete_if(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:1134
u16 n_rx_queues
Definition: avf.h:138
u8 hwaddr[6]
Definition: avf.h:153
clib_error_t * vlib_pci_map_region(vlib_main_t *vm, vlib_pci_dev_handle_t h, u32 resource, void **result)
Definition: pci.c:1128
u16 atq_next_slot
Definition: avf.h:147
static u32 avf_flag_change(vnet_main_t *vnm, vnet_hw_interface_t *hw, u32 flags)
Definition: device.c:993
static void avf_irq_0_enable(avf_device_t *ad)
Definition: device.c:59
#define AVF_AQ_F_BUF
Definition: virtchnl.h:50
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u32 numa_node
Definition: avf.h:130
vlib_main_t * vm
Definition: buffer.c:301
static void vlib_physmem_free(vlib_main_t *vm, void *p)
Definition: physmem_funcs.h:89
vlib_node_registration_t avf_input_node
(constructor) VLIB_REGISTER_NODE (avf_input_node)
Definition: input.c:448
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:341
clib_error_t * pci_bus_init(vlib_main_t *vm)
Definition: pci.c:252
static void avf_irq_0_disable(avf_device_t *ad)
Definition: device.c:47
Definition: avf.h:95
u32 vlib_pci_get_numa_node(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.c:170
clib_error_t * avf_op_get_stats(vlib_main_t *vm, avf_device_t *ad, virtchnl_eth_stats_t *es)
Definition: device.c:675
vlib_log_class_t log_class
Definition: avf.h:199
elog_main_t elog_main
Definition: main.h:157
avf_tx_desc_t * descs
Definition: avf.h:114
u8 * format_hexdump(u8 *s, va_list *va)
Definition: std-formats.c:281
clib_error_t * vlib_pci_register_msix_handler(vlib_main_t *vm, vlib_pci_dev_handle_t h, u32 start, u32 count, pci_msix_handler_function_t *msix_handler)
Definition: pci.c:808
#define ELOG_TYPE_DECLARE(f)
Definition: elog.h:442
clib_error_t * avf_aq_desc_enq(vlib_main_t *vm, avf_device_t *ad, avf_aq_desc_t *dt, void *data, int len)
Definition: device.c:101
virtchnl_ops_t v_opcode
Definition: virtchnl.h:232
#define AVFINT_DYN_CTL0
Definition: virtchnl.h:28
u16 vsi_id
Definition: avf.h:151
u32 per_interface_next_index
Definition: avf.h:124
clib_error_t * avf_op_add_eth_addr(vlib_main_t *vm, avf_device_t *ad, u8 count, u8 *macs)
Definition: device.c:633
static clib_error_t * avf_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
Definition: device.c:1348
u32 feature_bitmap
Definition: avf.h:152
virtchnl_status_code_t v_retval
Definition: virtchnl.h:237
u32 * bufs
Definition: avf.h:115
#define ASSERT(truth)
avf_aq_desc_t * atq
Definition: avf.h:141
void vnet_hw_interface_assign_rx_thread(vnet_main_t *vnm, u32 hw_if_index, u16 queue_id, uword thread_index)
Definition: devices.c:139
u32 flags
Definition: avf.h:123
#define PCI_DEVICE_ID_INTEL_X722_VF
Definition: device.c:35
#define AVFINT_DYN_CTLN(x)
Definition: virtchnl.h:25
Definition: avf.h:107
u32 * bufs
Definition: avf.h:102
clib_error_t * avf_op_config_rss_lut(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:509
static void avf_irq_n_enable(avf_device_t *ad, u8 line)
Definition: device.c:86
void * bar0
Definition: avf.h:131
#define PCI_DEVICE_ID_INTEL_X710_VF
Definition: device.c:34
clib_error_t * ethernet_register_interface(vnet_main_t *vnm, u32 dev_class_index, u32 dev_instance, u8 *address, u32 *hw_if_index_return, ethernet_flag_change_function_t flag_change)
Definition: interface.c:277
u16 n_enqueued
Definition: avf.h:116
u16 n_enqueued
Definition: avf.h:103
VNET_DEVICE_CLASS(bond_dev_class)
virtchnl_pf_event_t * events
Definition: avf.h:149
size_t count
Definition: vapi.c:47
virtchnl_event_codes_t event
Definition: virtchnl.h:198
static void avf_reg_write(avf_device_t *ad, u32 addr, u32 val)
Definition: avf.h:270
clib_error_t * avf_op_version(vlib_main_t *vm, avf_device_t *ad, virtchnl_version_info_t *ver)
Definition: device.c:477
static uword pointer_to_uword(const void *p)
Definition: types.h:131
static uword avf_dma_addr(vlib_main_t *vm, avf_device_t *ad, void *p)
Definition: device.c:323
#define clib_max(x, y)
Definition: clib.h:288
virtchnl_eth_stats_t eth_stats
Definition: avf.h:162
void * atq_bufs
Definition: avf.h:143
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
#define AVFGEN_RSTAT
Definition: virtchnl.h:39
u16 num_queue_pairs
Definition: avf.h:154
u16 next
Definition: avf.h:111
virtchnl_rxq_info_t rxq
Definition: virtchnl.h:286
static u64 vlib_physmem_get_pa(vlib_main_t *vm, void *mem)
#define AVF_ARQBAL
Definition: virtchnl.h:32
u32 rss_lut_size
Definition: avf.h:158
u16 n_tx_queues
Definition: avf.h:137
#define AVF_AQ_F_CMP
Definition: virtchnl.h:44
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
format_function_t format_avf_device_name
Definition: avf.h:226
clib_error_t * vnet_hw_interface_set_flags(vnet_main_t *vnm, u32 hw_if_index, vnet_hw_interface_flags_t flags)
Definition: interface.c:504
#define foreach_virtchnl_op
Definition: virtchnl.h:56
#define foreach_avf_tx_func_error
Definition: avf.h:307
u64 uword
Definition: types.h:112
clib_error_t * avf_rxq_init(vlib_main_t *vm, avf_device_t *ad, u16 qid, u16 rxq_size)
Definition: device.c:217
clib_error_t * vlib_pci_map_dma(vlib_main_t *vm, vlib_pci_dev_handle_t h, void *ptr)
Definition: pci.c:1178
u16 size
Definition: avf.h:100
u16 arq_next_slot
Definition: avf.h:148
union virtchnl_pf_event_t::@408 event_data
#define clib_error_free(e)
Definition: error.h:86
avf_rxq_t * rxqs
Definition: avf.h:135
virtchnl_vsi_resource_t vsi_res[1]
Definition: virtchnl.h:169
int vnet_hw_interface_unassign_rx_thread(vnet_main_t *vnm, u32 hw_if_index, u16 queue_id)
Definition: devices.c:188
#define AVF_TXQ_SZ
Definition: device.c:29
clib_error_t * error
Definition: avf.h:215
static u32 random_u32(u32 *seed)
32-bit random number generator
Definition: random.h:69
vl_api_gbp_vxlan_tunnel_mode_t mode
Definition: gbp.api:349
avf_per_thread_data_t * per_thread_data
Definition: avf.h:197
static uword vlib_buffer_get_va(vlib_buffer_t *b)
Definition: buffer.h:202
static vlib_thread_main_t * vlib_get_thread_main()
Definition: global_funcs.h:32
u16 size
Definition: avf.h:112
u32 sw_if_index
Definition: avf.h:127
#define ETHERNET_MAX_PACKET_BYTES
Definition: ethernet.h:166
#define vec_foreach(var, vec)
Vector iterator.
#define vlib_log_err(...)
Definition: log.h:50
u64 arq_bufs_pa
Definition: avf.h:146
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:115
void avf_create_if(vlib_main_t *vm, avf_create_if_args_t *args)
Definition: device.c:1191
#define AVF_ATQBAL
Definition: virtchnl.h:36
static void avf_irq_0_handler(vlib_main_t *vm, vlib_pci_dev_handle_t h, u16 line)
Definition: device.c:1059
static clib_error_t * vlib_pci_intr_enable(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.h:236
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
#define VIRTCHNL_VERSION_MINOR
Definition: virtchnl.h:22
void vlib_pci_set_private_data(vlib_main_t *vm, vlib_pci_dev_handle_t h, uword private_data)
Definition: pci.c:155
static u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
Definition: buffer_funcs.h:485
static void avf_irq_n_disable(avf_device_t *ad, u8 line)
Definition: device.c:77
#define clib_ring_free(f)
Definition: ring.h:59
#define PCI_VENDOR_ID_INTEL
Definition: device.c:32
clib_error_t * avf_cmd_rx_ctl_reg_write(vlib_main_t *vm, avf_device_t *ad, u32 reg, u32 val)
Definition: device.c:186
u16 next
Definition: avf.h:99
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:62
static void vnet_hw_interface_set_link_speed(vnet_main_t *vnm, u32 hw_if_index, u32 link_speed)
static void vnet_hw_interface_set_input_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Definition: devices.h:79
clib_error_t * avf_op_config_rss_key(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:528
avf_txq_t * txqs
Definition: avf.h:136
avf_rx_desc_t * descs
Definition: avf.h:101
clib_error_t * avf_op_config_vsi_queues(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:567
u16 vendor_id
Definition: pci.h:124
#define AVF_AQ_F_ERR
Definition: virtchnl.h:45
u16 max_vectors
Definition: avf.h:155
clib_error_t * avf_op_get_vf_resources(vlib_main_t *vm, avf_device_t *ad, virtchnl_vf_resource_t *res)
Definition: device.c:497
u32 rss_key_size
Definition: avf.h:157
void vlib_pci_device_close(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.c:1256
u16 max_mtu
Definition: avf.h:156
struct virtchnl_pf_event_t::@408::@409 link_event
static uword pool_elts(void *v)
Number of active elements in a pool.
Definition: pool.h:128