FD.io VPP  v20.01-48-g3e0dafb74
Vector Packet Processing
device.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2018 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vppinfra/ring.h>
20 #include <vlib/unix/unix.h>
21 #include <vlib/pci/pci.h>
22 #include <vnet/ethernet/ethernet.h>
23 
24 #include <avf/avf.h>
25 
26 #define AVF_MBOX_LEN 64
27 #define AVF_MBOX_BUF_SZ 512
28 #define AVF_RXQ_SZ 512
29 #define AVF_TXQ_SZ 512
30 #define AVF_ITR_INT 32
31 
32 #define PCI_VENDOR_ID_INTEL 0x8086
33 #define PCI_DEVICE_ID_INTEL_AVF 0x1889
34 #define PCI_DEVICE_ID_INTEL_X710_VF 0x154c
35 #define PCI_DEVICE_ID_INTEL_X722_VF 0x37cd
36 
38 
39 static pci_device_id_t avf_pci_device_ids[] = {
41  {.vendor_id = PCI_VENDOR_ID_INTEL,.device_id = PCI_DEVICE_ID_INTEL_X710_VF},
42  {.vendor_id = PCI_VENDOR_ID_INTEL,.device_id = PCI_DEVICE_ID_INTEL_X722_VF},
43  {0},
44 };
45 
46 const static char *virtchnl_event_names[] = {
47 #define _(v, n) [v] = #n,
49 #undef _
50 };
51 
52 const static char *virtchnl_link_speed_str[] = {
53 #define _(v, n, s) [v] = s,
55 #undef _
56 };
57 
58 static inline void
60 {
61  u32 dyn_ctl0 = 0, icr0_ena = 0;
62 
63  dyn_ctl0 |= (3 << 3); /* 11b = No ITR update */
64 
65  avf_reg_write (ad, AVFINT_ICR0_ENA1, icr0_ena);
66  avf_reg_write (ad, AVFINT_DYN_CTL0, dyn_ctl0);
67  avf_reg_flush (ad);
68 }
69 
70 static inline void
72 {
73  u32 dyn_ctl0 = 0, icr0_ena = 0;
74 
75  icr0_ena |= (1 << 30); /* [30] Admin Queue Enable */
76 
77  dyn_ctl0 |= (1 << 0); /* [0] Interrupt Enable */
78  dyn_ctl0 |= (1 << 1); /* [1] Clear PBA */
79  //dyn_ctl0 |= (3 << 3); /* [4:3] ITR Index, 11b = No ITR update */
80  dyn_ctl0 |= ((AVF_ITR_INT / 2) << 5); /* [16:5] ITR Interval in 2us steps */
81 
82  avf_irq_0_disable (ad);
83  avf_reg_write (ad, AVFINT_ICR0_ENA1, icr0_ena);
84  avf_reg_write (ad, AVFINT_DYN_CTL0, dyn_ctl0);
85  avf_reg_flush (ad);
86 }
87 
88 static inline void
90 {
91  u32 dyn_ctln = 0;
92 
93  avf_reg_write (ad, AVFINT_DYN_CTLN (line), dyn_ctln);
94  avf_reg_flush (ad);
95 }
96 
97 static inline void
99 {
100  u32 dyn_ctln = 0;
101 
102  dyn_ctln |= (1 << 0); /* [0] Interrupt Enable */
103  dyn_ctln |= (1 << 1); /* [1] Clear PBA */
104  dyn_ctln |= ((AVF_ITR_INT / 2) << 5); /* [16:5] ITR Interval in 2us steps */
105 
106  avf_irq_n_disable (ad, line);
107  avf_reg_write (ad, AVFINT_DYN_CTLN (line), dyn_ctln);
108  avf_reg_flush (ad);
109 }
110 
111 
112 clib_error_t *
114  void *data, int len)
115 {
116  clib_error_t *err = 0;
117  avf_aq_desc_t *d, dc;
118  f64 t0, suspend_time = AVF_AQ_ENQ_SUSPEND_TIME;
119 
120  d = &ad->atq[ad->atq_next_slot];
121  clib_memcpy_fast (d, dt, sizeof (avf_aq_desc_t));
122  d->flags |= AVF_AQ_F_RD | AVF_AQ_F_SI;
123  if (len)
124  d->datalen = len;
125  if (len)
126  {
127  u64 pa;
128  pa = ad->atq_bufs_pa + ad->atq_next_slot * AVF_MBOX_BUF_SZ;
129  d->addr_hi = (u32) (pa >> 32);
130  d->addr_lo = (u32) pa;
132  data, len);
133  d->flags |= AVF_AQ_F_BUF;
134  }
135 
136  if (ad->flags & AVF_DEVICE_F_ELOG)
137  clib_memcpy_fast (&dc, d, sizeof (avf_aq_desc_t));
138 
140  ad->atq_next_slot = (ad->atq_next_slot + 1) % AVF_MBOX_LEN;
142  avf_reg_flush (ad);
143 
144  t0 = vlib_time_now (vm);
145 retry:
146  vlib_process_suspend (vm, suspend_time);
147 
148  if (((d->flags & AVF_AQ_F_DD) == 0) || ((d->flags & AVF_AQ_F_CMP) == 0))
149  {
150  f64 t = vlib_time_now (vm) - t0;
151  if (t > AVF_AQ_ENQ_MAX_WAIT_TIME)
152  {
153  avf_log_err (ad, "aq_desc_enq failed (timeout %.3fs)", t);
154  err = clib_error_return (0, "adminq enqueue timeout [opcode 0x%x]",
155  d->opcode);
156  goto done;
157  }
158  suspend_time *= 2;
159  goto retry;
160  }
161 
162  clib_memcpy_fast (dt, d, sizeof (avf_aq_desc_t));
163  if (d->flags & AVF_AQ_F_ERR)
164  return clib_error_return (0, "adminq enqueue error [opcode 0x%x, retval "
165  "%d]", d->opcode, d->retval);
166 
167 done:
168  if (ad->flags & AVF_DEVICE_F_ELOG)
169  {
170  /* *INDENT-OFF* */
171  ELOG_TYPE_DECLARE (el) =
172  {
173  .format = "avf[%d] aq enq: s_flags 0x%x r_flags 0x%x opcode 0x%x "
174  "datalen %d retval %d",
175  .format_args = "i4i2i2i2i2i2",
176  };
177  struct
178  {
179  u32 dev_instance;
180  u16 s_flags;
181  u16 r_flags;
182  u16 opcode;
183  u16 datalen;
184  u16 retval;
185  } *ed;
186  ed = ELOG_DATA (&vm->elog_main, el);
187  ed->dev_instance = ad->dev_instance;
188  ed->s_flags = dc.flags;
189  ed->r_flags = d->flags;
190  ed->opcode = dc.opcode;
191  ed->datalen = dc.datalen;
192  ed->retval = d->retval;
193  /* *INDENT-ON* */
194  }
195 
196  return err;
197 }
198 
199 clib_error_t *
201  u32 val)
202 {
203  clib_error_t *err;
204  avf_aq_desc_t d = {.opcode = 0x207,.param1 = reg,.param3 = val };
205  err = avf_aq_desc_enq (vm, ad, &d, 0, 0);
206 
207  if (ad->flags & AVF_DEVICE_F_ELOG)
208  {
209  /* *INDENT-OFF* */
210  ELOG_TYPE_DECLARE (el) =
211  {
212  .format = "avf[%d] rx ctl reg write: reg 0x%x val 0x%x ",
213  .format_args = "i4i4i4",
214  };
215  struct
216  {
217  u32 dev_instance;
218  u32 reg;
219  u32 val;
220  } *ed;
221  ed = ELOG_DATA (&vm->elog_main, el);
222  ed->dev_instance = ad->dev_instance;
223  ed->reg = reg;
224  ed->val = val;
225  /* *INDENT-ON* */
226  }
227  return err;
228 }
229 
230 clib_error_t *
231 avf_rxq_init (vlib_main_t * vm, avf_device_t * ad, u16 qid, u16 rxq_size)
232 {
233  clib_error_t *err;
234  avf_rxq_t *rxq;
235  u32 n_alloc, i;
236 
238  rxq = vec_elt_at_index (ad->rxqs, qid);
239  rxq->size = rxq_size;
240  rxq->next = 0;
242  sizeof (avf_rx_desc_t),
244  ad->numa_node);
245 
246  rxq->buffer_pool_index =
248 
249  if (rxq->descs == 0)
250  return vlib_physmem_last_error (vm);
251 
252  if ((err = vlib_pci_map_dma (vm, ad->pci_dev_handle, (void *) rxq->descs)))
253  return err;
254 
255  clib_memset ((void *) rxq->descs, 0, rxq->size * sizeof (avf_rx_desc_t));
257  rxq->qrx_tail = ad->bar0 + AVF_QRX_TAIL (qid);
258 
259  n_alloc = vlib_buffer_alloc_from_pool (vm, rxq->bufs, rxq->size - 8,
260  rxq->buffer_pool_index);
261 
262  if (n_alloc == 0)
263  return clib_error_return (0, "buffer allocation error");
264 
265  rxq->n_enqueued = n_alloc;
266  avf_rx_desc_t *d = rxq->descs;
267  for (i = 0; i < n_alloc; i++)
268  {
269  vlib_buffer_t *b = vlib_get_buffer (vm, rxq->bufs[i]);
270  if (ad->flags & AVF_DEVICE_F_VA_DMA)
271  d->qword[0] = vlib_buffer_get_va (b);
272  else
273  d->qword[0] = vlib_buffer_get_pa (vm, b);
274  d++;
275  }
276 
277  ad->n_rx_queues = clib_min (ad->num_queue_pairs, qid + 1);
278  return 0;
279 }
280 
281 clib_error_t *
282 avf_txq_init (vlib_main_t * vm, avf_device_t * ad, u16 qid, u16 txq_size)
283 {
284  clib_error_t *err;
285  avf_txq_t *txq;
286 
287  if (qid >= ad->num_queue_pairs)
288  {
289  qid = qid % ad->num_queue_pairs;
290  txq = vec_elt_at_index (ad->txqs, qid);
291  if (txq->lock == 0)
292  clib_spinlock_init (&txq->lock);
293  ad->flags |= AVF_DEVICE_F_SHARED_TXQ_LOCK;
294  return 0;
295  }
296 
298  txq = vec_elt_at_index (ad->txqs, qid);
299  txq->size = txq_size;
300  txq->next = 0;
302  sizeof (avf_tx_desc_t),
304  ad->numa_node);
305  if (txq->descs == 0)
306  return vlib_physmem_last_error (vm);
307 
308  if ((err = vlib_pci_map_dma (vm, ad->pci_dev_handle, (void *) txq->descs)))
309  return err;
310 
312  txq->qtx_tail = ad->bar0 + AVF_QTX_TAIL (qid);
313 
314  /* initialize ring of pending RS slots */
316 
317  ad->n_tx_queues = clib_min (ad->num_queue_pairs, qid + 1);
318  return 0;
319 }
320 
321 typedef struct
322 {
326 
327 void
329 {
330  avf_aq_desc_t *d;
331  u64 pa = ad->arq_bufs_pa + slot * AVF_MBOX_BUF_SZ;
332  d = &ad->arq[slot];
333  clib_memset (d, 0, sizeof (avf_aq_desc_t));
334  d->flags = AVF_AQ_F_BUF;
336  d->addr_hi = (u32) (pa >> 32);
337  d->addr_lo = (u32) pa;
338 }
339 
340 static inline uword
342 {
343  return (ad->flags & AVF_DEVICE_F_VA_DMA) ?
345 }
346 
347 static void
349 {
350  u64 pa;
351  int i;
352 
353  /* VF MailBox Transmit */
354  clib_memset (ad->atq, 0, sizeof (avf_aq_desc_t) * AVF_MBOX_LEN);
355  ad->atq_bufs_pa = avf_dma_addr (vm, ad, ad->atq_bufs);
356 
357  pa = avf_dma_addr (vm, ad, ad->atq);
358  avf_reg_write (ad, AVF_ATQT, 0); /* Tail */
359  avf_reg_write (ad, AVF_ATQH, 0); /* Head */
360  avf_reg_write (ad, AVF_ATQLEN, AVF_MBOX_LEN | (1ULL << 31)); /* len & ena */
361  avf_reg_write (ad, AVF_ATQBAL, (u32) pa); /* Base Address Low */
362  avf_reg_write (ad, AVF_ATQBAH, (u32) (pa >> 32)); /* Base Address High */
363 
364  /* VF MailBox Receive */
365  clib_memset (ad->arq, 0, sizeof (avf_aq_desc_t) * AVF_MBOX_LEN);
366  ad->arq_bufs_pa = avf_dma_addr (vm, ad, ad->arq_bufs);
367 
368  for (i = 0; i < AVF_MBOX_LEN; i++)
369  avf_arq_slot_init (ad, i);
370 
371  pa = avf_dma_addr (vm, ad, ad->arq);
372 
373  avf_reg_write (ad, AVF_ARQH, 0); /* Head */
374  avf_reg_write (ad, AVF_ARQT, 0); /* Head */
375  avf_reg_write (ad, AVF_ARQLEN, AVF_MBOX_LEN | (1ULL << 31)); /* len & ena */
376  avf_reg_write (ad, AVF_ARQBAL, (u32) pa); /* Base Address Low */
377  avf_reg_write (ad, AVF_ARQBAH, (u32) (pa >> 32)); /* Base Address High */
378  avf_reg_write (ad, AVF_ARQT, AVF_MBOX_LEN - 1); /* Tail */
379 
380  ad->atq_next_slot = 0;
381  ad->arq_next_slot = 0;
382 }
383 
384 clib_error_t *
386  void *in, int in_len, void *out, int out_len)
387 {
388  clib_error_t *err;
389  avf_aq_desc_t *d, dt = {.opcode = 0x801,.v_opcode = op };
390  u32 head;
391  f64 t0, suspend_time = AVF_SEND_TO_PF_SUSPEND_TIME;
392 
393  /* suppress interrupt in the next adminq receive slot
394  as we are going to wait for response
395  we only need interrupts when event is received */
396  d = &ad->arq[ad->arq_next_slot];
397  d->flags |= AVF_AQ_F_SI;
398 
399  if ((err = avf_aq_desc_enq (vm, ad, &dt, in, in_len)))
400  return err;
401 
402  t0 = vlib_time_now (vm);
403 retry:
404  head = avf_get_u32 (ad->bar0, AVF_ARQH);
405 
406  if (ad->arq_next_slot == head)
407  {
408  f64 t = vlib_time_now (vm) - t0;
410  {
411  avf_log_err (ad, "send_to_pf failed (timeout %.3fs)", t);
412  return clib_error_return (0, "timeout");
413  }
414  vlib_process_suspend (vm, suspend_time);
415  suspend_time *= 2;
416  goto retry;
417  }
418 
419  d = &ad->arq[ad->arq_next_slot];
420 
421  if (d->v_opcode == VIRTCHNL_OP_EVENT)
422  {
423  void *buf = ad->arq_bufs + ad->arq_next_slot * AVF_MBOX_BUF_SZ;
425 
426  if ((d->datalen != sizeof (virtchnl_pf_event_t)) ||
427  ((d->flags & AVF_AQ_F_BUF) == 0))
428  return clib_error_return (0, "event message error");
429 
430  vec_add2 (ad->events, e, 1);
431  clib_memcpy_fast (e, buf, sizeof (virtchnl_pf_event_t));
433  ad->arq_next_slot++;
434  /* reset timer */
435  t0 = vlib_time_now (vm);
436  suspend_time = AVF_SEND_TO_PF_SUSPEND_TIME;
437  goto retry;
438  }
439 
440  if (d->v_opcode != op)
441  {
442  err =
444  "unexpected message receiver [v_opcode = %u, "
445  "expected %u, v_retval %d]", d->v_opcode, op,
446  d->v_retval);
447  goto done;
448  }
449 
450  if (d->v_retval)
451  {
452  err = clib_error_return (0, "error [v_opcode = %u, v_retval %d]",
453  d->v_opcode, d->v_retval);
454  goto done;
455  }
456 
457  if (d->flags & AVF_AQ_F_BUF)
458  {
459  void *buf = ad->arq_bufs + ad->arq_next_slot * AVF_MBOX_BUF_SZ;
460  clib_memcpy_fast (out, buf, out_len);
461  }
462 
465  avf_reg_flush (ad);
466  ad->arq_next_slot = (ad->arq_next_slot + 1) % AVF_MBOX_LEN;
467 
468 done:
469 
470  if (ad->flags & AVF_DEVICE_F_ELOG)
471  {
472  /* *INDENT-OFF* */
473  ELOG_TYPE_DECLARE (el) =
474  {
475  .format = "avf[%d] send to pf: v_opcode %s (%d) v_retval 0x%x",
476  .format_args = "i4t4i4i4",
477  .n_enum_strings = VIRTCHNL_N_OPS,
478  .enum_strings = {
479 #define _(v, n) [v] = #n,
481 #undef _
482  },
483  };
484  struct
485  {
486  u32 dev_instance;
487  u32 v_opcode;
488  u32 v_opcode_val;
489  u32 v_retval;
490  } *ed;
491  ed = ELOG_DATA (&vm->elog_main, el);
492  ed->dev_instance = ad->dev_instance;
493  ed->v_opcode = op;
494  ed->v_opcode_val = op;
495  ed->v_retval = d->v_retval;
496  /* *INDENT-ON* */
497  }
498  return err;
499 }
500 
501 clib_error_t *
504 {
505  clib_error_t *err = 0;
506  virtchnl_version_info_t myver = {
508  .minor = VIRTCHNL_VERSION_MINOR,
509  };
510 
511  avf_log_debug (ad, "version: major %u minor %u", myver.major, myver.minor);
512 
513  err = avf_send_to_pf (vm, ad, VIRTCHNL_OP_VERSION, &myver,
514  sizeof (virtchnl_version_info_t), ver,
515  sizeof (virtchnl_version_info_t));
516 
517  if (err)
518  return err;
519 
520  return err;
521 }
522 
523 clib_error_t *
526 {
527  clib_error_t *err = 0;
528  u32 bitmap = (VIRTCHNL_VF_OFFLOAD_L2 | VIRTCHNL_VF_OFFLOAD_RSS_PF |
529  VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_VLAN |
530  VIRTCHNL_VF_OFFLOAD_RX_POLLING);
531 
532  avf_log_debug (ad, "get_vf_reqources: bitmap 0x%x", bitmap);
533  err = avf_send_to_pf (vm, ad, VIRTCHNL_OP_GET_VF_RESOURCES, &bitmap,
534  sizeof (u32), res, sizeof (virtchnl_vf_resource_t));
535 
536  if (err == 0)
537  {
538  int i;
539  avf_log_debug (ad, "get_vf_reqources: num_vsis %u num_queue_pairs %u "
540  "max_vectors %u max_mtu %u vf_offload_flags 0x%04x "
541  "rss_key_size %u rss_lut_size %u",
542  res->num_vsis, res->num_queue_pairs, res->max_vectors,
543  res->max_mtu, res->vf_offload_flags, res->rss_key_size,
544  res->rss_lut_size);
545  for (i = 0; i < res->num_vsis; i++)
546  avf_log_debug (ad, "get_vf_reqources_vsi[%u]: vsi_id %u "
547  "num_queue_pairs %u vsi_type %u qset_handle %u "
548  "default_mac_addr %U", i,
549  res->vsi_res[i].vsi_id,
550  res->vsi_res[i].num_queue_pairs,
551  res->vsi_res[i].vsi_type,
552  res->vsi_res[i].qset_handle,
554  res->vsi_res[i].default_mac_addr);
555  }
556 
557  return err;
558 }
559 
560 clib_error_t *
562 {
563  int msg_len = sizeof (virtchnl_rss_lut_t) + ad->rss_lut_size - 1;
564  int i;
565  u8 msg[msg_len];
566  virtchnl_rss_lut_t *rl;
567 
568  clib_memset (msg, 0, msg_len);
569  rl = (virtchnl_rss_lut_t *) msg;
570  rl->vsi_id = ad->vsi_id;
571  rl->lut_entries = ad->rss_lut_size;
572  for (i = 0; i < ad->rss_lut_size; i++)
573  rl->lut[i] = i % ad->n_rx_queues;
574 
575  avf_log_debug (ad, "config_rss_lut: vsi_id %u rss_lut_size %u lut 0x%U",
576  rl->vsi_id, rl->lut_entries, format_hex_bytes_no_wrap,
577  rl->lut, rl->lut_entries);
578 
579  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_RSS_LUT, msg, msg_len, 0,
580  0);
581 }
582 
583 clib_error_t *
585 {
586  int msg_len = sizeof (virtchnl_rss_key_t) + ad->rss_key_size - 1;
587  int i;
588  u8 msg[msg_len];
589  virtchnl_rss_key_t *rk;
590 
591  clib_memset (msg, 0, msg_len);
592  rk = (virtchnl_rss_key_t *) msg;
593  rk->vsi_id = ad->vsi_id;
594  rk->key_len = ad->rss_key_size;
595  u32 seed = random_default_seed ();
596  for (i = 0; i < ad->rss_key_size; i++)
597  rk->key[i] = (u8) random_u32 (&seed);
598 
599  avf_log_debug (ad, "config_rss_key: vsi_id %u rss_key_size %u key 0x%U",
600  rk->vsi_id, rk->key_len, format_hex_bytes_no_wrap, rk->key,
601  rk->key_len);
602 
603  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_RSS_KEY, msg, msg_len, 0,
604  0);
605 }
606 
607 clib_error_t *
609 {
610  avf_log_debug (ad, "disable_vlan_stripping");
611 
612  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, 0, 0, 0,
613  0);
614 }
615 
616 clib_error_t *
618 {
619  virtchnl_promisc_info_t pi = { 0 };
620 
621  pi.vsi_id = ad->vsi_id;
622  pi.flags = FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC;
623 
624  avf_log_debug (ad, "config_promisc_mode: unicast %s multicast %s",
625  pi.flags & FLAG_VF_UNICAST_PROMISC ? "on" : "off",
626  pi.flags & FLAG_VF_MULTICAST_PROMISC ? "on" : "off");
627 
628  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, &pi,
629  sizeof (virtchnl_promisc_info_t), 0, 0);
630 }
631 
632 
633 clib_error_t *
635 {
636  int i;
637  int n_qp = clib_max (vec_len (ad->rxqs), vec_len (ad->txqs));
638  int msg_len = sizeof (virtchnl_vsi_queue_config_info_t) + n_qp *
640  u8 msg[msg_len];
642 
643  clib_memset (msg, 0, msg_len);
645  ci->vsi_id = ad->vsi_id;
646  ci->num_queue_pairs = n_qp;
647 
648  avf_log_debug (ad, "config_vsi_queues: vsi_id %u num_queue_pairs %u",
649  ad->vsi_id, ci->num_queue_pairs);
650 
651  for (i = 0; i < n_qp; i++)
652  {
653  virtchnl_txq_info_t *txq = &ci->qpair[i].txq;
654  virtchnl_rxq_info_t *rxq = &ci->qpair[i].rxq;
655 
656  rxq->vsi_id = ad->vsi_id;
657  rxq->queue_id = i;
659  if (i < vec_len (ad->rxqs))
660  {
661  avf_rxq_t *q = vec_elt_at_index (ad->rxqs, i);
662  rxq->ring_len = q->size;
664  rxq->dma_ring_addr = avf_dma_addr (vm, ad, (void *) q->descs);
665  avf_reg_write (ad, AVF_QRX_TAIL (i), q->size - 1);
666  }
667  avf_log_debug (ad, "config_vsi_queues_rx[%u]: max_pkt_size %u "
668  "ring_len %u databuffer_size %u dma_ring_addr 0x%llx",
669  i, rxq->max_pkt_size, rxq->ring_len,
670  rxq->databuffer_size, rxq->dma_ring_addr);
671 
672  txq->vsi_id = ad->vsi_id;
673  txq->queue_id = i;
674  if (i < vec_len (ad->txqs))
675  {
676  avf_txq_t *q = vec_elt_at_index (ad->txqs, i);
677  txq->ring_len = q->size;
678  txq->dma_ring_addr = avf_dma_addr (vm, ad, (void *) q->descs);
679  }
680  avf_log_debug (ad, "config_vsi_queues_tx[%u]: ring_len %u "
681  "dma_ring_addr 0x%llx", i, txq->ring_len,
682  txq->dma_ring_addr);
683  }
684 
685  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_VSI_QUEUES, msg, msg_len,
686  0, 0);
687 }
688 
689 clib_error_t *
691 {
692  int count = 1;
693  int msg_len = sizeof (virtchnl_irq_map_info_t) +
694  count * sizeof (virtchnl_vector_map_t);
695  u8 msg[msg_len];
697 
698  clib_memset (msg, 0, msg_len);
699  imi = (virtchnl_irq_map_info_t *) msg;
700  imi->num_vectors = count;
701 
702  imi->vecmap[0].vector_id = 1;
703  imi->vecmap[0].vsi_id = ad->vsi_id;
704  imi->vecmap[0].rxq_map = (1 << ad->n_rx_queues) - 1;
705  imi->vecmap[0].txq_map = (1 << ad->n_tx_queues) - 1;
706 
707  avf_log_debug (ad, "config_irq_map: vsi_id %u vector_id %u rxq_map %u",
708  ad->vsi_id, imi->vecmap[0].vector_id,
709  imi->vecmap[0].rxq_map);
710 
711  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_IRQ_MAP, msg, msg_len, 0,
712  0);
713 }
714 
715 clib_error_t *
717 {
718  int msg_len =
719  sizeof (virtchnl_ether_addr_list_t) +
720  count * sizeof (virtchnl_ether_addr_t);
721  u8 msg[msg_len];
723  int i;
724 
725  clib_memset (msg, 0, msg_len);
726  al = (virtchnl_ether_addr_list_t *) msg;
727  al->vsi_id = ad->vsi_id;
728  al->num_elements = count;
729 
730  avf_log_debug (ad, "add_eth_addr: vsi_id %u num_elements %u",
731  ad->vsi_id, al->num_elements);
732 
733  for (i = 0; i < count; i++)
734  {
735  clib_memcpy_fast (&al->list[i].addr, macs + i * 6, 6);
736  avf_log_debug (ad, "add_eth_addr[%u]: %U", i,
738  }
739  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_ADD_ETH_ADDR, msg, msg_len, 0,
740  0);
741 }
742 
743 clib_error_t *
745 {
746  virtchnl_queue_select_t qs = { 0 };
747  int i = 0;
748  qs.vsi_id = ad->vsi_id;
749  qs.rx_queues = rx;
750  qs.tx_queues = tx;
751 
752  avf_log_debug (ad, "enable_queues: vsi_id %u rx_queues %u tx_queues %u",
753  ad->vsi_id, qs.rx_queues, qs.tx_queues);
754 
755  while (rx)
756  {
757  if (rx & (1 << i))
758  {
759  avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, i);
760  avf_reg_write (ad, AVF_QRX_TAIL (i), rxq->n_enqueued);
761  rx &= ~(1 << i);
762  }
763  i++;
764  }
765  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_ENABLE_QUEUES, &qs,
766  sizeof (virtchnl_queue_select_t), 0, 0);
767 }
768 
769 clib_error_t *
772 {
773  virtchnl_queue_select_t qs = { 0 };
774  qs.vsi_id = ad->vsi_id;
775 
776  avf_log_debug (ad, "get_stats: vsi_id %u", ad->vsi_id);
777 
778  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_GET_STATS,
779  &qs, sizeof (virtchnl_queue_select_t),
780  es, sizeof (virtchnl_eth_stats_t));
781 }
782 
783 clib_error_t *
785 {
786  avf_aq_desc_t d = { 0 };
787  clib_error_t *error;
788  u32 rstat;
789  f64 t0, t = 0, suspend_time = AVF_RESET_SUSPEND_TIME;
790 
791  avf_log_debug (ad, "reset");
792 
793  d.opcode = 0x801;
794  d.v_opcode = VIRTCHNL_OP_RESET_VF;
795  if ((error = avf_aq_desc_enq (vm, ad, &d, 0, 0)))
796  return error;
797 
798  t0 = vlib_time_now (vm);
799 retry:
800  vlib_process_suspend (vm, suspend_time);
801 
802  rstat = avf_get_u32 (ad->bar0, AVFGEN_RSTAT);
803 
804  if (rstat == 2 || rstat == 3)
805  {
806  avf_log_debug (ad, "reset completed in %.3fs", t);
807  return 0;
808  }
809 
810  t = vlib_time_now (vm) - t0;
811  if (t > AVF_RESET_MAX_WAIT_TIME)
812  {
813  avf_log_err (ad, "reset failed (timeout %.3fs)", t);
814  return clib_error_return (0, "reset failed (timeout)");
815  }
816 
817  suspend_time *= 2;
818  goto retry;
819 }
820 
821 clib_error_t *
823 {
824  virtchnl_vf_res_request_t res_req = { 0 };
825  clib_error_t *error;
826  u32 rstat;
827  f64 t0, t, suspend_time = AVF_RESET_SUSPEND_TIME;
828 
829  res_req.num_queue_pairs = num_queue_pairs;
830 
831  avf_log_debug (ad, "request_queues: num_queue_pairs %u", num_queue_pairs);
832 
833  error = avf_send_to_pf (vm, ad, VIRTCHNL_OP_REQUEST_QUEUES, &res_req,
834  sizeof (virtchnl_vf_res_request_t), &res_req,
835  sizeof (virtchnl_vf_res_request_t));
836 
837  /*
838  * if PF responds, the request failed
839  * else PF initializes restart and avf_send_to_pf returns an error
840  */
841  if (!error)
842  {
843  return clib_error_return (0, "requested more than %u queue pairs",
844  res_req.num_queue_pairs);
845  }
846 
847  t0 = vlib_time_now (vm);
848 retry:
849  vlib_process_suspend (vm, suspend_time);
850  t = vlib_time_now (vm) - t0;
851 
852  rstat = avf_get_u32 (ad->bar0, AVFGEN_RSTAT);
853 
854  if ((rstat == VIRTCHNL_VFR_COMPLETED) || (rstat == VIRTCHNL_VFR_VFACTIVE))
855  goto done;
856 
857  if (t > AVF_RESET_MAX_WAIT_TIME)
858  {
859  avf_log_err (ad, "request queues failed (timeout %.3f seconds)", t);
860  return clib_error_return (0, "request queues failed (timeout)");
861  }
862 
863  suspend_time *= 2;
864  goto retry;
865 
866 done:
867  return NULL;
868 }
869 
870 clib_error_t *
872  avf_create_if_args_t * args)
873 {
874  virtchnl_version_info_t ver = { 0 };
875  virtchnl_vf_resource_t res = { 0 };
876  clib_error_t *error;
878  int i;
879 
880  avf_adminq_init (vm, ad);
881 
882  if ((error = avf_request_queues (vm, ad, clib_max (tm->n_vlib_mains,
883  args->rxq_num))))
884  {
885  /* we failed to get more queues, but still we want to proceed */
886  clib_error_free (error);
887 
888  if ((error = avf_device_reset (vm, ad)))
889  return error;
890  }
891 
892  avf_adminq_init (vm, ad);
893 
894  /*
895  * OP_VERSION
896  */
897  if ((error = avf_op_version (vm, ad, &ver)))
898  return error;
899 
900  if (ver.major != VIRTCHNL_VERSION_MAJOR ||
902  return clib_error_return (0, "incompatible protocol version "
903  "(remote %d.%d)", ver.major, ver.minor);
904 
905  /*
906  * OP_GET_VF_RESOURCES
907  */
908  if ((error = avf_op_get_vf_resources (vm, ad, &res)))
909  return error;
910 
911  if (res.num_vsis != 1 || res.vsi_res[0].vsi_type != VIRTCHNL_VSI_SRIOV)
912  return clib_error_return (0, "unexpected GET_VF_RESOURCE reply received");
913 
914  ad->vsi_id = res.vsi_res[0].vsi_id;
917  ad->max_vectors = res.max_vectors;
918  ad->max_mtu = res.max_mtu;
919  ad->rss_key_size = res.rss_key_size;
920  ad->rss_lut_size = res.rss_lut_size;
921 
923 
924  /*
925  * Disable VLAN stripping
926  */
927  if ((error = avf_op_disable_vlan_stripping (vm, ad)))
928  return error;
929 
930  if ((error = avf_config_promisc_mode (vm, ad)))
931  return error;
932 
933  /*
934  * Init Queues
935  */
936  if (args->rxq_num == 0)
937  {
938  args->rxq_num = 1;
939  }
940  else if (args->rxq_num > ad->num_queue_pairs)
941  {
942  args->rxq_num = ad->num_queue_pairs;
943  avf_log_warn (ad, "Requested more rx queues than queue pairs available."
944  "Using %u rx queues.", args->rxq_num);
945  }
946 
947  for (i = 0; i < args->rxq_num; i++)
948  if ((error = avf_rxq_init (vm, ad, i, args->rxq_size)))
949  return error;
950 
951  for (i = 0; i < tm->n_vlib_mains; i++)
952  if ((error = avf_txq_init (vm, ad, i, args->txq_size)))
953  return error;
954 
955  if ((ad->feature_bitmap & VIRTCHNL_VF_OFFLOAD_RSS_PF) &&
956  (error = avf_op_config_rss_lut (vm, ad)))
957  return error;
958 
959  if ((ad->feature_bitmap & VIRTCHNL_VF_OFFLOAD_RSS_PF) &&
960  (error = avf_op_config_rss_key (vm, ad)))
961  return error;
962 
963  if ((error = avf_op_config_vsi_queues (vm, ad)))
964  return error;
965 
966  if ((error = avf_op_config_irq_map (vm, ad)))
967  return error;
968 
969  avf_irq_0_enable (ad);
970  for (i = 0; i < ad->n_rx_queues; i++)
971  avf_irq_n_enable (ad, i);
972 
973  if ((error = avf_op_add_eth_addr (vm, ad, 1, ad->hwaddr)))
974  return error;
975 
976  if ((error = avf_op_enable_queues (vm, ad, pow2_mask (ad->n_rx_queues),
977  pow2_mask (ad->n_tx_queues))))
978  return error;
979 
980  ad->flags |= AVF_DEVICE_F_INITIALIZED;
981  return error;
982 }
983 
984 void
986 {
987  avf_main_t *am = &avf_main;
988  vnet_main_t *vnm = vnet_get_main ();
990  u32 r;
991 
992  if (ad->flags & AVF_DEVICE_F_ERROR)
993  return;
994 
995  if ((ad->flags & AVF_DEVICE_F_INITIALIZED) == 0)
996  return;
997 
998  ASSERT (ad->error == 0);
999 
1000  /* do not process device in reset state */
1001  r = avf_get_u32 (ad->bar0, AVFGEN_RSTAT);
1002  if (r != VIRTCHNL_VFR_VFACTIVE)
1003  return;
1004 
1005  r = avf_get_u32 (ad->bar0, AVF_ARQLEN);
1006  if ((r & 0xf0000000) != (1ULL << 31))
1007  {
1008  ad->error = clib_error_return (0, "arq not enabled, arqlen = 0x%x", r);
1009  avf_log_err (ad, "error: %U", format_clib_error, ad->error);
1010  goto error;
1011  }
1012 
1013  r = avf_get_u32 (ad->bar0, AVF_ATQLEN);
1014  if ((r & 0xf0000000) != (1ULL << 31))
1015  {
1016  ad->error = clib_error_return (0, "atq not enabled, atqlen = 0x%x", r);
1017  avf_log_err (ad, "error: %U", format_clib_error, ad->error);
1018  goto error;
1019  }
1020 
1021  if (is_irq == 0)
1022  avf_op_get_stats (vm, ad, &ad->eth_stats);
1023 
1024  /* *INDENT-OFF* */
1025  vec_foreach (e, ad->events)
1026  {
1027  avf_log_debug (ad, "event: %s (%u) sev %d",
1029  if (e->event == VIRTCHNL_EVENT_LINK_CHANGE)
1030  {
1031  int link_up = e->event_data.link_event.link_status;
1032  virtchnl_link_speed_t speed = e->event_data.link_event.link_speed;
1033  u32 flags = 0;
1034  u32 kbps = 0;
1035 
1036  avf_log_debug (ad, "event_link_change: status %d speed '%s' (%d)",
1037  link_up,
1039  virtchnl_link_speed_str[speed] : "unknown", speed);
1040 
1041  if (link_up && (ad->flags & AVF_DEVICE_F_LINK_UP) == 0)
1042  {
1043  ad->flags |= AVF_DEVICE_F_LINK_UP;
1046  if (speed == VIRTCHNL_LINK_SPEED_40GB)
1047  kbps = 40000000;
1048  else if (speed == VIRTCHNL_LINK_SPEED_25GB)
1049  kbps = 25000000;
1050  else if (speed == VIRTCHNL_LINK_SPEED_10GB)
1051  kbps = 10000000;
1052  else if (speed == VIRTCHNL_LINK_SPEED_5GB)
1053  kbps = 5000000;
1054  else if (speed == VIRTCHNL_LINK_SPEED_2_5GB)
1055  kbps = 2500000;
1056  else if (speed == VIRTCHNL_LINK_SPEED_1GB)
1057  kbps = 1000000;
1058  else if (speed == VIRTCHNL_LINK_SPEED_100MB)
1059  kbps = 100000;
1060  vnet_hw_interface_set_flags (vnm, ad->hw_if_index, flags);
1062  ad->link_speed = speed;
1063  }
1064  else if (!link_up && (ad->flags & AVF_DEVICE_F_LINK_UP) != 0)
1065  {
1066  ad->flags &= ~AVF_DEVICE_F_LINK_UP;
1067  ad->link_speed = 0;
1068  }
1069 
1070  if (ad->flags & AVF_DEVICE_F_ELOG)
1071  {
1072  ELOG_TYPE_DECLARE (el) =
1073  {
1074  .format = "avf[%d] link change: link_status %d "
1075  "link_speed %d",
1076  .format_args = "i4i1i1",
1077  };
1078  struct
1079  {
1080  u32 dev_instance;
1081  u8 link_status;
1082  u8 link_speed;
1083  } *ed;
1084  ed = ELOG_DATA (&vm->elog_main, el);
1085  ed->dev_instance = ad->dev_instance;
1086  ed->link_status = link_up;
1087  ed->link_speed = speed;
1088  }
1089  }
1090  else
1091  {
1092  if (ad->flags & AVF_DEVICE_F_ELOG)
1093  {
1094  ELOG_TYPE_DECLARE (el) =
1095  {
1096  .format = "avf[%d] unknown event: event %d severity %d",
1097  .format_args = "i4i4i1i1",
1098  };
1099  struct
1100  {
1101  u32 dev_instance;
1102  u32 event;
1103  u32 severity;
1104  } *ed;
1105  ed = ELOG_DATA (&vm->elog_main, el);
1106  ed->dev_instance = ad->dev_instance;
1107  ed->event = e->event;
1108  ed->severity = e->severity;
1109  }
1110  }
1111  }
1112  /* *INDENT-ON* */
1113  vec_reset_length (ad->events);
1114 
1115  return;
1116 
1117 error:
1118  ad->flags |= AVF_DEVICE_F_ERROR;
1119  ASSERT (ad->error != 0);
1120  vlib_log_err (am->log_class, "%U", format_clib_error, ad->error);
1121 }
1122 
1123 static u32
1125 {
1126  avf_main_t *am = &avf_main;
1127  vlib_log_warn (am->log_class, "TODO");
1128  return 0;
1129 }
1130 
1131 static uword
1133 {
1134  avf_main_t *am = &avf_main;
1135  avf_device_t *ad;
1136  uword *event_data = 0, event_type;
1137  int enabled = 0, irq;
1138  f64 last_run_duration = 0;
1139  f64 last_periodic_time = 0;
1140 
1141  while (1)
1142  {
1143  if (enabled)
1144  vlib_process_wait_for_event_or_clock (vm, 5.0 - last_run_duration);
1145  else
1147 
1148  event_type = vlib_process_get_events (vm, &event_data);
1149  vec_reset_length (event_data);
1150  irq = 0;
1151 
1152  switch (event_type)
1153  {
1154  case ~0:
1155  last_periodic_time = vlib_time_now (vm);
1156  break;
1158  enabled = 1;
1159  break;
1161  enabled = 0;
1162  continue;
1164  irq = 1;
1165  break;
1166  default:
1167  ASSERT (0);
1168  }
1169 
1170  /* *INDENT-OFF* */
1171  pool_foreach (ad, am->devices,
1172  {
1173  avf_process_one_device (vm, ad, irq);
1174  });
1175  /* *INDENT-ON* */
1176  last_run_duration = vlib_time_now (vm) - last_periodic_time;
1177  }
1178  return 0;
1179 }
1180 
1181 /* *INDENT-OFF* */
1183  .function = avf_process,
1184  .type = VLIB_NODE_TYPE_PROCESS,
1185  .name = "avf-process",
1186 };
1187 /* *INDENT-ON* */
1188 
1189 static void
1191 {
1192  avf_main_t *am = &avf_main;
1193  uword pd = vlib_pci_get_private_data (vm, h);
1194  avf_device_t *ad = pool_elt_at_index (am->devices, pd);
1195  u32 icr0;
1196 
1197  icr0 = avf_reg_read (ad, AVFINT_ICR0);
1198 
1199  if (ad->flags & AVF_DEVICE_F_ELOG)
1200  {
1201  /* *INDENT-OFF* */
1202  ELOG_TYPE_DECLARE (el) =
1203  {
1204  .format = "avf[%d] irq 0: icr0 0x%x",
1205  .format_args = "i4i4",
1206  };
1207  /* *INDENT-ON* */
1208  struct
1209  {
1210  u32 dev_instance;
1211  u32 icr0;
1212  } *ed;
1213 
1214  ed = ELOG_DATA (&vm->elog_main, el);
1215  ed->dev_instance = ad->dev_instance;
1216  ed->icr0 = icr0;
1217  }
1218 
1219  avf_irq_0_enable (ad);
1220 
1221  /* bit 30 - Send/Receive Admin queue interrupt indication */
1222  if (icr0 & (1 << 30))
1225 }
1226 
1227 static void
1229 {
1230  vnet_main_t *vnm = vnet_get_main ();
1231  avf_main_t *am = &avf_main;
1232  uword pd = vlib_pci_get_private_data (vm, h);
1233  avf_device_t *ad = pool_elt_at_index (am->devices, pd);
1234  u16 qid;
1235  int i;
1236 
1237  if (ad->flags & AVF_DEVICE_F_ELOG)
1238  {
1239  /* *INDENT-OFF* */
1240  ELOG_TYPE_DECLARE (el) =
1241  {
1242  .format = "avf[%d] irq %d: received",
1243  .format_args = "i4i2",
1244  };
1245  /* *INDENT-ON* */
1246  struct
1247  {
1248  u32 dev_instance;
1249  u16 line;
1250  } *ed;
1251 
1252  ed = ELOG_DATA (&vm->elog_main, el);
1253  ed->dev_instance = ad->dev_instance;
1254  ed->line = line;
1255  }
1256 
1257  qid = line - 1;
1258  if (vec_len (ad->rxqs) > qid && ad->rxqs[qid].int_mode != 0)
1260  for (i = 0; i < vec_len (ad->rxqs); i++)
1261  avf_irq_n_enable (ad, i);
1262 }
1263 
1264 void
1266 {
1267  vnet_main_t *vnm = vnet_get_main ();
1268  avf_main_t *am = &avf_main;
1269  int i;
1270 
1271  if (ad->hw_if_index)
1272  {
1276  }
1277 
1279 
1280  vlib_physmem_free (vm, ad->atq);
1281  vlib_physmem_free (vm, ad->arq);
1282  vlib_physmem_free (vm, ad->atq_bufs);
1283  vlib_physmem_free (vm, ad->arq_bufs);
1284 
1285  /* *INDENT-OFF* */
1286  vec_foreach_index (i, ad->rxqs)
1287  {
1288  avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, i);
1289  vlib_physmem_free (vm, (void *) rxq->descs);
1290  if (rxq->n_enqueued)
1291  vlib_buffer_free_from_ring (vm, rxq->bufs, rxq->next, rxq->size,
1292  rxq->n_enqueued);
1293  vec_free (rxq->bufs);
1294  }
1295  /* *INDENT-ON* */
1296  vec_free (ad->rxqs);
1297 
1298  /* *INDENT-OFF* */
1299  vec_foreach_index (i, ad->txqs)
1300  {
1301  avf_txq_t *txq = vec_elt_at_index (ad->txqs, i);
1302  vlib_physmem_free (vm, (void *) txq->descs);
1303  if (txq->n_enqueued)
1304  {
1305  u16 first = (txq->next - txq->n_enqueued) & (txq->size -1);
1306  vlib_buffer_free_from_ring (vm, txq->bufs, first, txq->size,
1307  txq->n_enqueued);
1308  }
1309  vec_free (txq->bufs);
1310  clib_ring_free (txq->rs_slots);
1311  }
1312  /* *INDENT-ON* */
1313  vec_free (ad->txqs);
1314  vec_free (ad->name);
1315 
1316  clib_error_free (ad->error);
1317  clib_memset (ad, 0, sizeof (*ad));
1318  pool_put (am->devices, ad);
1319 }
1320 
1321 void
1323 {
1324  vnet_main_t *vnm = vnet_get_main ();
1325  avf_main_t *am = &avf_main;
1326  avf_device_t *ad;
1328  clib_error_t *error = 0;
1329  int i;
1330 
1331  /* check input args */
1332  args->rxq_size = (args->rxq_size == 0) ? AVF_RXQ_SZ : args->rxq_size;
1333  args->txq_size = (args->txq_size == 0) ? AVF_TXQ_SZ : args->txq_size;
1334 
1335  if ((args->rxq_size & (args->rxq_size - 1))
1336  || (args->txq_size & (args->txq_size - 1)))
1337  {
1338  args->rv = VNET_API_ERROR_INVALID_VALUE;
1339  args->error =
1340  clib_error_return (error, "queue size must be a power of two");
1341  return;
1342  }
1343 
1344  pool_get (am->devices, ad);
1345  ad->dev_instance = ad - am->devices;
1346  ad->per_interface_next_index = ~0;
1347  ad->name = vec_dup (args->name);
1348 
1349  if (args->enable_elog)
1350  ad->flags |= AVF_DEVICE_F_ELOG;
1351 
1352  if ((error = vlib_pci_device_open (vm, &args->addr, avf_pci_device_ids,
1353  &h)))
1354  {
1355  pool_put (am->devices, ad);
1356  args->rv = VNET_API_ERROR_INVALID_INTERFACE;
1357  args->error =
1358  clib_error_return (error, "pci-addr %U", format_vlib_pci_addr,
1359  &args->addr);
1360  return;
1361  }
1362  ad->pci_dev_handle = h;
1363  ad->pci_addr = args->addr;
1364  ad->numa_node = vlib_pci_get_numa_node (vm, h);
1365 
1367 
1368  if ((error = vlib_pci_bus_master_enable (vm, h)))
1369  goto error;
1370 
1371  if ((error = vlib_pci_map_region (vm, h, 0, &ad->bar0)))
1372  goto error;
1373 
1374  if ((error = vlib_pci_register_msix_handler (vm, h, 0, 1,
1375  &avf_irq_0_handler)))
1376  goto error;
1377 
1378  if ((error = vlib_pci_register_msix_handler (vm, h, 1, 1,
1379  &avf_irq_n_handler)))
1380  goto error;
1381 
1382  if ((error = vlib_pci_enable_msix_irq (vm, h, 0, 2)))
1383  goto error;
1384 
1386  AVF_MBOX_LEN,
1388  ad->numa_node);
1389  if (ad->atq == 0)
1390  {
1391  error = vlib_physmem_last_error (vm);
1392  goto error;
1393  }
1394 
1395  if ((error = vlib_pci_map_dma (vm, h, ad->atq)))
1396  goto error;
1397 
1399  AVF_MBOX_LEN,
1401  ad->numa_node);
1402  if (ad->arq == 0)
1403  {
1404  error = vlib_physmem_last_error (vm);
1405  goto error;
1406  }
1407 
1408  if ((error = vlib_pci_map_dma (vm, h, ad->arq)))
1409  goto error;
1410 
1412  AVF_MBOX_LEN,
1414  ad->numa_node);
1415  if (ad->atq_bufs == 0)
1416  {
1417  error = vlib_physmem_last_error (vm);
1418  goto error;
1419  }
1420 
1421  if ((error = vlib_pci_map_dma (vm, h, ad->atq_bufs)))
1422  goto error;
1423 
1425  AVF_MBOX_LEN,
1427  ad->numa_node);
1428  if (ad->arq_bufs == 0)
1429  {
1430  error = vlib_physmem_last_error (vm);
1431  goto error;
1432  }
1433 
1434  if ((error = vlib_pci_map_dma (vm, h, ad->arq_bufs)))
1435  goto error;
1436 
1437  if ((error = vlib_pci_intr_enable (vm, h)))
1438  goto error;
1439 
1441  ad->flags |= AVF_DEVICE_F_VA_DMA;
1442 
1443  if ((error = avf_device_init (vm, am, ad, args)))
1444  goto error;
1445 
1446  /* create interface */
1447  error = ethernet_register_interface (vnm, avf_device_class.index,
1448  ad->dev_instance, ad->hwaddr,
1450 
1451  if (error)
1452  goto error;
1453 
1455  args->sw_if_index = ad->sw_if_index = sw->sw_if_index;
1456 
1460  avf_input_node.index);
1461 
1462  for (i = 0; i < ad->n_rx_queues; i++)
1464 
1465  if (pool_elts (am->devices) == 1)
1468 
1469  return;
1470 
1471 error:
1472  avf_delete_if (vm, ad);
1473  args->rv = VNET_API_ERROR_INVALID_INTERFACE;
1474  args->error = clib_error_return (error, "pci-addr %U",
1475  format_vlib_pci_addr, &args->addr);
1476  avf_log_err (ad, "error: %U", format_clib_error, args->error);
1477 }
1478 
1479 static clib_error_t *
1481 {
1482  vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
1483  avf_main_t *am = &avf_main;
1485  uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
1486 
1487  if (ad->flags & AVF_DEVICE_F_ERROR)
1488  return clib_error_return (0, "device is in error state");
1489 
1490  if (is_up)
1491  {
1494  ad->flags |= AVF_DEVICE_F_ADMIN_UP;
1495  }
1496  else
1497  {
1499  ad->flags &= ~AVF_DEVICE_F_ADMIN_UP;
1500  }
1501  return 0;
1502 }
1503 
1504 static clib_error_t *
1507 {
1508  avf_main_t *am = &avf_main;
1509  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1511  avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, qid);
1512 
1514  rxq->int_mode = 0;
1515  else
1516  rxq->int_mode = 1;
1517 
1518  return 0;
1519 }
1520 
1521 static void
1523  u32 node_index)
1524 {
1525  avf_main_t *am = &avf_main;
1526  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1528 
1529  /* Shut off redirection */
1530  if (node_index == ~0)
1531  {
1532  ad->per_interface_next_index = node_index;
1533  return;
1534  }
1535 
1537  vlib_node_add_next (vlib_get_main (), avf_input_node.index, node_index);
1538 }
1539 
1540 static char *avf_tx_func_error_strings[] = {
1541 #define _(n,s) s,
1543 #undef _
1544 };
1545 
1546 static void
1548 {
1549  avf_main_t *am = &avf_main;
1550  avf_device_t *ad = vec_elt_at_index (am->devices, instance);
1552  &ad->eth_stats, sizeof (ad->eth_stats));
1553 }
1554 
1555 /* *INDENT-OFF* */
1557 {
1558  .name = "Adaptive Virtual Function (AVF) interface",
1559  .clear_counters = avf_clear_hw_interface_counters,
1560  .format_device = format_avf_device,
1561  .format_device_name = format_avf_device_name,
1562  .admin_up_down_function = avf_interface_admin_up_down,
1563  .rx_mode_change_function = avf_interface_rx_mode_change,
1564  .rx_redirect_to_node = avf_set_interface_next_node,
1565  .tx_function_n_errors = AVF_TX_N_ERROR,
1566  .tx_function_error_strings = avf_tx_func_error_strings,
1567 };
1568 /* *INDENT-ON* */
1569 
1570 clib_error_t *
1572 {
1573  avf_main_t *am = &avf_main;
1575 
1578 
1579  am->log_class = vlib_log_register_class ("avf", 0);
1580  vlib_log_debug (am->log_class, "initialized");
1581 
1582  return 0;
1583 }
1584 
1585 /* *INDENT-OFF* */
1587 {
1588  .runs_after = VLIB_INITS ("pci_bus_init"),
1589 };
1590 /* *INDENT-OFF* */
1591 
1592 /*
1593  * fd.io coding-style-patch-verification: ON
1594  *
1595  * Local Variables:
1596  * eval: (c-set-style "gnu")
1597  * End:
1598  */
static void avf_set_interface_next_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Definition: device.c:1522
vlib_log_class_t vlib_log_register_class(char *class, char *subclass)
Definition: log.c:176
u8 count
Definition: dhcp.api:208
u8 int_mode
Definition: avf.h:128
#define vec_foreach_index(var, v)
Iterate over vector indices.
#define AVF_ARQLEN
Definition: virtchnl.h:47
virtchnl_queue_pair_info_t qpair[1]
Definition: virtchnl.h:311
u32 hw_if_index
Definition: avf.h:153
u8 * format_clib_error(u8 *s, va_list *va)
Definition: error.c:191
static clib_error_t * vlib_pci_intr_enable(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.h:239
#define AVF_ATQH
Definition: virtchnl.h:40
#define vlib_log_warn(...)
Definition: log.h:103
static clib_error_t * avf_interface_rx_mode_change(vnet_main_t *vnm, u32 hw_if_index, u32 qid, vnet_hw_interface_rx_mode mode)
Definition: device.c:1505
#define clib_min(x, y)
Definition: clib.h:295
static uword random_default_seed(void)
Default random seed (unix/linux user-mode)
Definition: random.h:91
static clib_error_t * vlib_pci_bus_master_enable(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.h:271
static void * vlib_physmem_alloc_aligned_on_numa(vlib_main_t *vm, uword n_bytes, uword alignment, u32 numa_node)
Definition: physmem_funcs.h:63
static f64 vlib_process_wait_for_event_or_clock(vlib_main_t *vm, f64 dt)
Suspend a cooperative multi-tasking thread Waits for an event, or for the indicated number of seconds...
Definition: node_funcs.h:673
clib_error_t * avf_init(vlib_main_t *vm)
Definition: device.c:1571
static uword * vlib_process_wait_for_event(vlib_main_t *vm)
Definition: node_funcs.h:593
clib_error_t * avf_send_to_pf(vlib_main_t *vm, avf_device_t *ad, virtchnl_ops_t op, void *in, int in_len, void *out, int out_len)
Definition: device.c:385
void ethernet_delete_interface(vnet_main_t *vnm, u32 hw_if_index)
Definition: interface.c:378
#define AVF_ATQBAH
Definition: virtchnl.h:45
vnet_main_t * vnet_get_main(void)
Definition: misc.c:46
void avf_arq_slot_init(avf_device_t *ad, u16 slot)
Definition: device.c:328
clib_error_t * error
Definition: avf.h:192
#define avf_log_warn(dev, f,...)
Definition: avf.h:55
#define AVF_AQ_ENQ_SUSPEND_TIME
Definition: avf.h:25
u64 atq_bufs_pa
Definition: avf.h:170
static uword vlib_buffer_get_pa(vlib_main_t *vm, vlib_buffer_t *b)
Definition: buffer_funcs.h:421
virtchnl_vsi_type_t vsi_type
Definition: virtchnl.h:165
#define AVF_SEND_TO_PF_SUSPEND_TIME
Definition: avf.h:31
unsigned long u64
Definition: types.h:89
void vlib_pci_device_close(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.c:1274
virtchnl_vector_map_t vecmap[1]
Definition: virtchnl.h:339
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
#define NULL
Definition: clib.h:58
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:279
#define clib_ring_new_aligned(ring, size, align)
Definition: ring.h:53
virtchnl_link_speed_t link_speed
Definition: avf.h:184
#define AVF_ARQBAH
Definition: virtchnl.h:39
static clib_error_t * vlib_physmem_last_error(struct vlib_main_t *vm)
static void avf_adminq_init(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:348
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
#define AVF_QRX_TAIL(q)
Definition: virtchnl.h:51
#define AVF_ARQT
Definition: virtchnl.h:43
format_function_t format_avf_device
Definition: avf.h:252
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:561
int i
vlib_pci_addr_t addr
Definition: avf.h:233
#define AVF_AQ_F_SI
Definition: virtchnl.h:61
u32 dev_instance
Definition: avf.h:151
clib_error_t * vlib_pci_enable_msix_irq(vlib_main_t *vm, vlib_pci_dev_handle_t h, u16 start, u16 count)
Definition: pci.c:882
#define AVF_QTX_TAIL(q)
Definition: virtchnl.h:50
virtchnl_link_speed_t
Definition: virtchnl.h:205
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
Definition: vec.h:451
avf_device_t * devices
Definition: avf.h:223
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:237
volatile u32 * qtx_tail
Definition: avf.h:135
clib_error_t * avf_op_config_irq_map(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:690
static vlib_node_registration_t avf_process_node
(constructor) VLIB_REGISTER_NODE (avf_process_node)
Definition: device.c:1182
#define AVF_ATQLEN
Definition: virtchnl.h:41
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
Definition: node_funcs.h:1092
unsigned char u8
Definition: types.h:56
vnet_device_class_t avf_device_class
#define AVF_ARQH
Definition: virtchnl.h:44
clib_error_t * avf_device_reset(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:784
clib_error_t * avf_op_disable_vlan_stripping(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:608
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
double f64
Definition: types.h:142
virtchnl_ops_t
Definition: virtchnl.h:103
static vnet_sw_interface_t * vnet_get_hw_sw_interface(vnet_main_t *vnm, u32 hw_if_index)
u8 buffer_pool_index
Definition: avf.h:129
static uword vlib_process_suspend(vlib_main_t *vm, f64 dt)
Suspend a vlib cooperative multi-tasking thread for a period of time.
Definition: node_funcs.h:422
static uword avf_process(vlib_main_t *vm, vlib_node_runtime_t *rt, vlib_frame_t *f)
Definition: device.c:1132
#define AVF_AQ_F_DD
Definition: virtchnl.h:53
vnet_hw_interface_rx_mode
Definition: interface.h:53
#define AVF_RESET_SUSPEND_TIME
Definition: avf.h:28
u16 * rs_slots
Definition: avf.h:142
#define AVFINT_ICR0_ENA1
Definition: virtchnl.h:37
u8 * format_ethernet_address(u8 *s, va_list *args)
Definition: format.c:44
clib_error_t * avf_request_queues(vlib_main_t *vm, avf_device_t *ad, u16 num_queue_pairs)
Definition: device.c:822
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
Definition: pool.h:498
clib_error_t * vlib_pci_map_dma(vlib_main_t *vm, vlib_pci_dev_handle_t h, void *ptr)
Definition: pci.c:1195
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:173
static uword vlib_process_get_events(vlib_main_t *vm, uword **data_vector)
Return the first event type which has occurred and a vector of per-event data of that type...
Definition: node_funcs.h:516
clib_spinlock_t lock
Definition: avf.h:138
#define AVF_SEND_TO_PF_MAX_WAIT_TIME
Definition: avf.h:32
static uword pow2_mask(uword x)
Definition: clib.h:220
#define PCI_DEVICE_ID_INTEL_AVF
Definition: device.c:33
static u32 avf_reg_read(avf_device_t *ad, u32 addr)
Definition: avf.h:303
clib_error_t * avf_txq_init(vlib_main_t *vm, avf_device_t *ad, u16 qid, u16 txq_size)
Definition: device.c:282
static_always_inline void vnet_device_input_set_interrupt_pending(vnet_main_t *vnm, u32 hw_if_index, u16 queue_id)
Definition: devices.h:136
vnet_hw_interface_flags_t flags
Definition: interface.h:523
#define AVF_MBOX_BUF_SZ
Definition: device.c:27
volatile u32 * qrx_tail
Definition: avf.h:122
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define clib_error_return(e, args...)
Definition: error.h:99
clib_error_t * avf_config_promisc_mode(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:617
unsigned int u32
Definition: types.h:88
vlib_pci_dev_handle_t pci_dev_handle
Definition: avf.h:154
#define vlib_log_debug(...)
Definition: log.h:106
virtchnl_ether_addr_t list[1]
Definition: virtchnl.h:354
void * arq_bufs
Definition: avf.h:169
avf_main_t avf_main
Definition: device.c:37
vl_api_gre_tunnel_mode_t mode
Definition: gre.api:55
static void avf_irq_n_handler(vlib_main_t *vm, vlib_pci_dev_handle_t h, u16 line)
Definition: device.c:1228
avf_aq_desc_t * arq
Definition: avf.h:167
static void clib_spinlock_init(clib_spinlock_t *p)
Definition: lock.h:63
static void vlib_buffer_free_from_ring(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Free buffers from ring.
Definition: buffer_funcs.h:937
static heap_elt_t * first(heap_header_t *h)
Definition: heap.c:59
void avf_process_one_device(vlib_main_t *vm, avf_device_t *ad, int is_irq)
Definition: device.c:985
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:519
u8 * name
Definition: avf.h:157
virtchnl_eth_stats_t last_cleared_eth_stats
Definition: avf.h:189
static void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
Definition: node_funcs.h:934
clib_error_t * avf_device_init(vlib_main_t *vm, avf_main_t *am, avf_device_t *ad, avf_create_if_args_t *args)
Definition: device.c:871
void vlib_pci_set_private_data(vlib_main_t *vm, vlib_pci_dev_handle_t h, uword private_data)
Definition: pci.c:155
static u32 avf_get_u32(void *start, int offset)
Definition: avf.h:257
virtchnl_txq_info_t txq
Definition: virtchnl.h:302
#define AVF_ATQT
Definition: virtchnl.h:48
unsigned short u16
Definition: types.h:57
#define AVF_RESET_MAX_WAIT_TIME
Definition: avf.h:29
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:287
#define vec_dup(V)
Return copy of vector (no header, no alignment)
Definition: vec.h:376
clib_error_t * vlib_pci_register_msix_handler(vlib_main_t *vm, vlib_pci_dev_handle_t h, u32 start, u32 count, pci_msix_handler_function_t *msix_handler)
Definition: pci.c:825
u64 qword[4]
Definition: avf.h:97
#define ELOG_DATA(em, f)
Definition: elog.h:484
#define AVF_AQ_F_RD
Definition: virtchnl.h:58
#define VIRTCHNL_VERSION_MAJOR
Definition: virtchnl.h:21
clib_error_t * avf_op_enable_queues(vlib_main_t *vm, avf_device_t *ad, u32 rx, u32 tx)
Definition: device.c:744
uword vlib_pci_get_private_data(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.c:148
#define AVF_ITR_INT
Definition: device.c:30
static void avf_reg_flush(avf_device_t *ad)
Definition: avf.h:309
#define AVF_RXQ_SZ
Definition: device.c:28
u32 vlib_pci_dev_handle_t
Definition: pci.h:97
static char * avf_tx_func_error_strings[]
Definition: device.c:1540
#define AVF_MBOX_LEN
Definition: device.c:26
#define AVFINT_ICR0
Definition: virtchnl.h:36
vlib_main_t * vm
Definition: in2out_ed.c:1810
u8 len
Definition: ip_types.api:91
void avf_delete_if(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:1265
u16 n_rx_queues
Definition: avf.h:163
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
Definition: buffer_funcs.h:96
u8 slot
Definition: pci_types.api:22
u8 hwaddr[6]
Definition: avf.h:178
u16 atq_next_slot
Definition: avf.h:172
u32 vlib_pci_get_numa_node(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.c:170
static u32 avf_flag_change(vnet_main_t *vnm, vnet_hw_interface_t *hw, u32 flags)
Definition: device.c:1124
static void avf_irq_0_enable(avf_device_t *ad)
Definition: device.c:71
#define AVF_AQ_F_BUF
Definition: virtchnl.h:60
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u32 numa_node
Definition: avf.h:155
u32 flags
Definition: vhost_user.h:141
static void vlib_physmem_free(vlib_main_t *vm, void *p)
Definition: physmem_funcs.h:89
vlib_node_registration_t avf_input_node
(constructor) VLIB_REGISTER_NODE (avf_input_node)
Definition: input.c:463
#define avf_log_debug(dev, f,...)
Definition: avf.h:60
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:342
static void avf_irq_0_disable(avf_device_t *ad)
Definition: device.c:59
Definition: avf.h:119
clib_error_t * avf_op_get_stats(vlib_main_t *vm, avf_device_t *ad, virtchnl_eth_stats_t *es)
Definition: device.c:770
vlib_log_class_t log_class
Definition: avf.h:226
union virtchnl_pf_event_t::@32 event_data
elog_main_t elog_main
Definition: main.h:193
avf_tx_desc_t * descs
Definition: avf.h:139
#define ARRAY_LEN(x)
Definition: clib.h:62
#define ELOG_TYPE_DECLARE(f)
Definition: elog.h:442
clib_error_t * avf_aq_desc_enq(vlib_main_t *vm, avf_device_t *ad, avf_aq_desc_t *dt, void *data, int len)
Definition: device.c:113
virtchnl_ops_t v_opcode
Definition: virtchnl.h:249
#define AVFINT_DYN_CTL0
Definition: virtchnl.h:38
static void avf_clear_hw_interface_counters(u32 instance)
Definition: device.c:1547
u16 vsi_id
Definition: avf.h:176
u32 per_interface_next_index
Definition: avf.h:149
clib_error_t * avf_op_add_eth_addr(vlib_main_t *vm, avf_device_t *ad, u8 count, u8 *macs)
Definition: device.c:716
static clib_error_t * avf_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
Definition: device.c:1480
u32 feature_bitmap
Definition: avf.h:177
virtchnl_status_code_t v_retval
Definition: virtchnl.h:254
u32 * bufs
Definition: avf.h:140
static u32 vlib_buffer_alloc_from_pool(vlib_main_t *vm, u32 *buffers, u32 n_buffers, u8 buffer_pool_index)
Allocate buffers from specific pool into supplied array.
Definition: buffer_funcs.h:530
#define ASSERT(truth)
avf_aq_desc_t * atq
Definition: avf.h:166
void vnet_hw_interface_assign_rx_thread(vnet_main_t *vnm, u32 hw_if_index, u16 queue_id, uword thread_index)
Definition: devices.c:139
u32 flags
Definition: avf.h:148
#define PCI_DEVICE_ID_INTEL_X722_VF
Definition: device.c:35
u8 data[128]
Definition: ipsec_types.api:87
#define AVFINT_DYN_CTLN(x)
Definition: virtchnl.h:35
Definition: avf.h:132
u32 * bufs
Definition: avf.h:126
clib_error_t * avf_op_config_rss_lut(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:561
static void avf_irq_n_enable(avf_device_t *ad, u8 line)
Definition: device.c:98
void * bar0
Definition: avf.h:156
static const char * virtchnl_link_speed_str[]
Definition: device.c:52
#define PCI_DEVICE_ID_INTEL_X710_VF
Definition: device.c:34
u16 n_enqueued
Definition: avf.h:141
u16 n_enqueued
Definition: avf.h:127
u8 * format_hex_bytes_no_wrap(u8 *s, va_list *va)
Definition: std-formats.c:112
virtchnl_pf_event_t * events
Definition: avf.h:174
virtchnl_event_codes_t event
Definition: virtchnl.h:215
#define AVF_AQ_ENQ_MAX_WAIT_TIME
Definition: avf.h:26
static void avf_reg_write(avf_device_t *ad, u32 addr, u32 val)
Definition: avf.h:297
clib_error_t * avf_op_version(vlib_main_t *vm, avf_device_t *ad, virtchnl_version_info_t *ver)
Definition: device.c:502
static uword pointer_to_uword(const void *p)
Definition: types.h:131
static uword avf_dma_addr(vlib_main_t *vm, avf_device_t *ad, void *p)
Definition: device.c:341
#define clib_max(x, y)
Definition: clib.h:288
virtchnl_eth_stats_t eth_stats
Definition: avf.h:188
void * atq_bufs
Definition: avf.h:168
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
vl_api_ip4_address_t hi
Definition: arp.api:37
#define AVFGEN_RSTAT
Definition: virtchnl.h:49
u16 num_queue_pairs
Definition: avf.h:179
u16 next
Definition: avf.h:136
virtchnl_rxq_info_t rxq
Definition: virtchnl.h:303
static u64 vlib_physmem_get_pa(vlib_main_t *vm, void *mem)
#define AVF_ARQBAL
Definition: virtchnl.h:42
u32 rss_lut_size
Definition: avf.h:183
u16 n_tx_queues
Definition: avf.h:162
#define AVF_AQ_F_CMP
Definition: virtchnl.h:54
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
clib_error_t * ethernet_register_interface(vnet_main_t *vnm, u32 dev_class_index, u32 dev_instance, const u8 *address, u32 *hw_if_index_return, ethernet_flag_change_function_t flag_change)
Definition: interface.c:331
u32 instance
Definition: gre.api:57
format_function_t format_avf_device_name
Definition: avf.h:253
clib_error_t * vnet_hw_interface_set_flags(vnet_main_t *vnm, u32 hw_if_index, vnet_hw_interface_flags_t flags)
Definition: interface.c:492
#define foreach_virtchnl_op
Definition: virtchnl.h:66
VLIB buffer representation.
Definition: buffer.h:102
#define foreach_avf_tx_func_error
Definition: avf.h:335
u64 uword
Definition: types.h:112
clib_error_t * avf_rxq_init(vlib_main_t *vm, avf_device_t *ad, u16 qid, u16 rxq_size)
Definition: device.c:231
u16 size
Definition: avf.h:124
u16 arq_next_slot
Definition: avf.h:173
#define clib_error_free(e)
Definition: error.h:86
clib_error_t * vlib_pci_map_region(vlib_main_t *vm, vlib_pci_dev_handle_t h, u32 resource, void **result)
Definition: pci.c:1145
avf_rxq_t * rxqs
Definition: avf.h:160
virtchnl_vsi_resource_t vsi_res[1]
Definition: virtchnl.h:179
int vnet_hw_interface_unassign_rx_thread(vnet_main_t *vnm, u32 hw_if_index, u16 queue_id)
Definition: devices.c:188
#define AVF_TXQ_SZ
Definition: device.c:29
int vlib_pci_supports_virtual_addr_dma(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.c:1206
clib_error_t * error
Definition: avf.h:242
static u32 random_u32(u32 *seed)
32-bit random number generator
Definition: random.h:69
avf_per_thread_data_t * per_thread_data
Definition: avf.h:224
static uword vlib_buffer_get_va(vlib_buffer_t *b)
Definition: buffer.h:217
static vlib_thread_main_t * vlib_get_thread_main()
Definition: global_funcs.h:32
u16 size
Definition: avf.h:137
vlib_pci_addr_t pci_addr
Definition: avf.h:185
u32 sw_if_index
Definition: avf.h:152
#define ETHERNET_MAX_PACKET_BYTES
Definition: ethernet.h:133
#define vec_foreach(var, vec)
Vector iterator.
#define vlib_log_err(...)
Definition: log.h:102
u64 arq_bufs_pa
Definition: avf.h:171
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:115
void avf_create_if(vlib_main_t *vm, avf_create_if_args_t *args)
Definition: device.c:1322
#define avf_log_err(dev, f,...)
Definition: avf.h:50
#define AVF_ATQBAL
Definition: virtchnl.h:46
struct virtchnl_pf_event_t::@32::@33 link_event
static const char * virtchnl_event_names[]
Definition: device.c:46
static void avf_irq_0_handler(vlib_main_t *vm, vlib_pci_dev_handle_t h, u16 line)
Definition: device.c:1190
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
#define VIRTCHNL_VERSION_MINOR
Definition: virtchnl.h:22
static void avf_irq_n_disable(avf_device_t *ad, u8 line)
Definition: device.c:89
static u8 vlib_buffer_pool_get_default_for_numa(vlib_main_t *vm, u32 numa_node)
Definition: buffer_funcs.h:163
#define clib_ring_free(f)
Definition: ring.h:59
#define PCI_VENDOR_ID_INTEL
Definition: device.c:32
clib_error_t * avf_cmd_rx_ctl_reg_write(vlib_main_t *vm, avf_device_t *ad, u32 reg, u32 val)
Definition: device.c:200
clib_error_t * vlib_pci_device_open(vlib_main_t *vm, vlib_pci_addr_t *addr, pci_device_id_t ids[], vlib_pci_dev_handle_t *handle)
Definition: pci.c:1214
u16 next
Definition: avf.h:123
#define VLIB_INITS(...)
Definition: init.h:344
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
static void vnet_hw_interface_set_link_speed(vnet_main_t *vnm, u32 hw_if_index, u32 link_speed)
static void vnet_hw_interface_set_input_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Definition: devices.h:79
clib_error_t * avf_op_config_rss_key(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:584
avf_txq_t * txqs
Definition: avf.h:161
avf_rx_desc_t * descs
Definition: avf.h:125
clib_error_t * avf_op_config_vsi_queues(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:634
u16 vendor_id
Definition: pci.h:127
format_function_t format_vlib_pci_addr
Definition: pci.h:324
#define AVF_AQ_F_ERR
Definition: virtchnl.h:55
u16 max_vectors
Definition: avf.h:180
clib_error_t * avf_op_get_vf_resources(vlib_main_t *vm, avf_device_t *ad, virtchnl_vf_resource_t *res)
Definition: device.c:524
VNET_DEVICE_CLASS(avf_device_class,)
u32 rss_key_size
Definition: avf.h:182
u16 max_mtu
Definition: avf.h:181
static uword pool_elts(void *v)
Number of active elements in a pool.
Definition: pool.h:128