FD.io VPP  v19.08-27-gf4dcae4
Vector Packet Processing
vhost_user_output.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * vhost-user-output
4  *
5  * Copyright (c) 2014-2018 Cisco and/or its affiliates.
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at:
9  *
10  * http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  *------------------------------------------------------------------
18  */
19 
20 #include <stddef.h>
21 #include <fcntl.h> /* for open */
22 #include <sys/ioctl.h>
23 #include <sys/socket.h>
24 #include <sys/un.h>
25 #include <sys/stat.h>
26 #include <sys/types.h>
27 #include <sys/uio.h> /* for iovec */
28 #include <netinet/in.h>
29 #include <sys/vfs.h>
30 
31 #include <linux/if_arp.h>
32 #include <linux/if_tun.h>
33 
34 #include <vlib/vlib.h>
35 #include <vlib/unix/unix.h>
36 
37 #include <vnet/ip/ip.h>
38 
39 #include <vnet/ethernet/ethernet.h>
40 #include <vnet/devices/devices.h>
41 #include <vnet/feature/feature.h>
42 
46 
47 /*
48  * On the transmit side, we keep processing the buffers from vlib in the while
49  * loop and prepare the copy order to be executed later. However, the static
50  * array which we keep the copy order is limited to VHOST_USER_COPY_ARRAY_N
51  * entries. In order to not corrupt memory, we have to do the copy when the
52  * static array reaches the copy threshold. We subtract 40 in case the code
53  * goes into the inner loop for a maximum of 64k frames which may require
54  * more array entries.
55  */
56 #define VHOST_USER_TX_COPY_THRESHOLD (VHOST_USER_COPY_ARRAY_N - 40)
57 
59 
60 #define foreach_vhost_user_tx_func_error \
61  _(NONE, "no error") \
62  _(NOT_READY, "vhost vring not ready") \
63  _(DOWN, "vhost interface is down") \
64  _(PKT_DROP_NOBUF, "tx packet drops (no available descriptors)") \
65  _(PKT_DROP_NOMRG, "tx packet drops (cannot merge descriptors)") \
66  _(MMAP_FAIL, "mmap failure") \
67  _(INDIRECT_OVERFLOW, "indirect descriptor table overflow")
68 
69 typedef enum
70 {
71 #define _(f,s) VHOST_USER_TX_FUNC_ERROR_##f,
73 #undef _
76 
77 static __clib_unused char *vhost_user_tx_func_error_strings[] = {
78 #define _(n,s) s,
80 #undef _
81 };
82 
83 static __clib_unused u8 *
84 format_vhost_user_interface_name (u8 * s, va_list * args)
85 {
86  u32 i = va_arg (*args, u32);
87  u32 show_dev_instance = ~0;
89 
91  show_dev_instance = vum->show_dev_instance_by_real_dev_instance[i];
92 
93  if (show_dev_instance != ~0)
94  i = show_dev_instance;
95 
96  s = format (s, "VirtualEthernet0/0/%d", i);
97  return s;
98 }
99 
100 static __clib_unused int
102 {
103  // FIXME: check if the new dev instance is already used
106  hi->dev_instance);
107 
109  hi->dev_instance, ~0);
110 
112  new_dev_instance;
113 
114  vu_log_debug (vui, "renumbered vhost-user interface dev_instance %d to %d",
115  hi->dev_instance, new_dev_instance);
116 
117  return 0;
118 }
119 
120 /**
121  * @brief Try once to lock the vring
122  * @return 0 on success, non-zero on failure.
123  */
126 {
127  return clib_atomic_test_and_set (vui->vring_locks[qid]);
128 }
129 
130 /**
131  * @brief Spin until the vring is successfully locked
132  */
135 {
136  while (vhost_user_vring_try_lock (vui, qid))
137  ;
138 }
139 
140 /**
141  * @brief Unlock the vring lock
142  */
145 {
146  clib_atomic_release (vui->vring_locks[qid]);
147 }
148 
151  vhost_user_intf_t * vui, u16 qid,
152  vlib_buffer_t * b, vhost_user_vring_t * rxvq)
153 {
155  u32 last_avail_idx = rxvq->last_avail_idx;
156  u32 desc_current = rxvq->avail->ring[last_avail_idx & rxvq->qsz_mask];
157  vring_desc_t *hdr_desc = 0;
158  u32 hint = 0;
159 
160  clib_memset (t, 0, sizeof (*t));
161  t->device_index = vui - vum->vhost_user_interfaces;
162  t->qid = qid;
163 
164  hdr_desc = &rxvq->desc[desc_current];
165  if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
166  {
167  t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
168  /* Header is the first here */
169  hdr_desc = map_guest_mem (vui, rxvq->desc[desc_current].addr, &hint);
170  }
171  if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT)
172  {
173  t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
174  }
175  if (!(rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT) &&
176  !(rxvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT))
177  {
178  t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
179  }
180 
181  t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
182 }
183 
186  u16 copy_len, u32 * map_hint)
187 {
188  void *dst0, *dst1, *dst2, *dst3;
189  if (PREDICT_TRUE (copy_len >= 4))
190  {
191  if (PREDICT_FALSE (!(dst2 = map_guest_mem (vui, cpy[0].dst, map_hint))))
192  return 1;
193  if (PREDICT_FALSE (!(dst3 = map_guest_mem (vui, cpy[1].dst, map_hint))))
194  return 1;
195  while (PREDICT_TRUE (copy_len >= 4))
196  {
197  dst0 = dst2;
198  dst1 = dst3;
199 
200  if (PREDICT_FALSE
201  (!(dst2 = map_guest_mem (vui, cpy[2].dst, map_hint))))
202  return 1;
203  if (PREDICT_FALSE
204  (!(dst3 = map_guest_mem (vui, cpy[3].dst, map_hint))))
205  return 1;
206 
207  CLIB_PREFETCH ((void *) cpy[2].src, 64, LOAD);
208  CLIB_PREFETCH ((void *) cpy[3].src, 64, LOAD);
209 
210  clib_memcpy_fast (dst0, (void *) cpy[0].src, cpy[0].len);
211  clib_memcpy_fast (dst1, (void *) cpy[1].src, cpy[1].len);
212 
213  vhost_user_log_dirty_pages_2 (vui, cpy[0].dst, cpy[0].len, 1);
214  vhost_user_log_dirty_pages_2 (vui, cpy[1].dst, cpy[1].len, 1);
215  copy_len -= 2;
216  cpy += 2;
217  }
218  }
219  while (copy_len)
220  {
221  if (PREDICT_FALSE (!(dst0 = map_guest_mem (vui, cpy->dst, map_hint))))
222  return 1;
223  clib_memcpy_fast (dst0, (void *) cpy->src, cpy->len);
224  vhost_user_log_dirty_pages_2 (vui, cpy->dst, cpy->len, 1);
225  copy_len -= 1;
226  cpy += 1;
227  }
228  return 0;
229 }
230 
233  virtio_net_hdr_t * hdr)
234 {
235  /* checksum offload */
236  if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
237  {
238  hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
239  hdr->csum_start = vnet_buffer (b)->l4_hdr_offset;
240  hdr->csum_offset = offsetof (udp_header_t, checksum);
241  }
242  else if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
243  {
244  hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
245  hdr->csum_start = vnet_buffer (b)->l4_hdr_offset;
246  hdr->csum_offset = offsetof (tcp_header_t, checksum);
247  }
248 
249  /* GSO offload */
250  if (b->flags & VNET_BUFFER_F_GSO)
251  {
252  if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
253  {
254  if ((b->flags & VNET_BUFFER_F_IS_IP4) &&
255  (vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_TSO4)))
256  {
257  hdr->gso_size = vnet_buffer2 (b)->gso_size;
258  hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
259  }
260  else if ((b->flags & VNET_BUFFER_F_IS_IP6) &&
261  (vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_TSO6)))
262  {
263  hdr->gso_size = vnet_buffer2 (b)->gso_size;
264  hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
265  }
266  }
267  else if ((vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_UFO)) &&
268  (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))
269  {
270  hdr->gso_size = vnet_buffer2 (b)->gso_size;
271  hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
272  }
273  }
274 }
275 
278  node, vlib_frame_t * frame)
279 {
280  u32 *buffers = vlib_frame_vector_args (frame);
281  u32 n_left = frame->n_vectors;
283  vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
284  vhost_user_intf_t *vui =
286  u32 qid = ~0;
287  vhost_user_vring_t *rxvq;
288  u8 error;
289  u32 thread_index = vm->thread_index;
290  vhost_cpu_t *cpu = &vum->cpus[thread_index];
291  u32 map_hint = 0;
292  u8 retry = 8;
293  u16 copy_len;
294  u16 tx_headers_len;
295 
296  if (PREDICT_FALSE (!vui->admin_up))
297  {
298  error = VHOST_USER_TX_FUNC_ERROR_DOWN;
299  goto done3;
300  }
301 
302  if (PREDICT_FALSE (!vui->is_ready))
303  {
304  error = VHOST_USER_TX_FUNC_ERROR_NOT_READY;
305  goto done3;
306  }
307 
308  qid = VHOST_VRING_IDX_RX (*vec_elt_at_index (vui->per_cpu_tx_qid,
309  thread_index));
310  rxvq = &vui->vrings[qid];
311  if (PREDICT_FALSE (rxvq->avail == 0))
312  {
313  error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
314  goto done3;
315  }
316 
317  if (PREDICT_FALSE (vui->use_tx_spinlock))
318  vhost_user_vring_lock (vui, qid);
319 
320 retry:
321  error = VHOST_USER_TX_FUNC_ERROR_NONE;
322  tx_headers_len = 0;
323  copy_len = 0;
324  while (n_left > 0)
325  {
326  vlib_buffer_t *b0, *current_b0;
327  u16 desc_head, desc_index, desc_len;
328  vring_desc_t *desc_table;
329  uword buffer_map_addr;
330  u32 buffer_len;
331  u16 bytes_left;
332 
333  if (PREDICT_TRUE (n_left > 1))
334  vlib_prefetch_buffer_with_index (vm, buffers[1], LOAD);
335 
336  b0 = vlib_get_buffer (vm, buffers[0]);
337 
338  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
339  {
340  cpu->current_trace = vlib_add_trace (vm, node, b0,
341  sizeof (*cpu->current_trace));
342  vhost_user_tx_trace (cpu->current_trace, vui, qid / 2, b0, rxvq);
343  }
344 
345  if (PREDICT_FALSE (rxvq->last_avail_idx == rxvq->avail->idx))
346  {
347  error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
348  goto done;
349  }
350 
351  desc_table = rxvq->desc;
352  desc_head = desc_index =
353  rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask];
354 
355  /* Go deeper in case of indirect descriptor
356  * I don't know of any driver providing indirect for RX. */
357  if (PREDICT_FALSE (rxvq->desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT))
358  {
359  if (PREDICT_FALSE
360  (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
361  {
362  error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
363  goto done;
364  }
365  if (PREDICT_FALSE
366  (!(desc_table =
367  map_guest_mem (vui, rxvq->desc[desc_index].addr,
368  &map_hint))))
369  {
370  error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
371  goto done;
372  }
373  desc_index = 0;
374  }
375 
376  desc_len = vui->virtio_net_hdr_sz;
377  buffer_map_addr = desc_table[desc_index].addr;
378  buffer_len = desc_table[desc_index].len;
379 
380  {
381  // Get a header from the header array
382  virtio_net_hdr_mrg_rxbuf_t *hdr = &cpu->tx_headers[tx_headers_len];
383  tx_headers_len++;
384  hdr->hdr.flags = 0;
385  hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
386  hdr->num_buffers = 1; //This is local, no need to check
387 
388  /* Guest supports csum offload? */
389  if (vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_CSUM))
390  vhost_user_handle_tx_offload (vui, b0, &hdr->hdr);
391 
392  // Prepare a copy order executed later for the header
393  vhost_copy_t *cpy = &cpu->copy[copy_len];
394  copy_len++;
395  cpy->len = vui->virtio_net_hdr_sz;
396  cpy->dst = buffer_map_addr;
397  cpy->src = (uword) hdr;
398  }
399 
400  buffer_map_addr += vui->virtio_net_hdr_sz;
401  buffer_len -= vui->virtio_net_hdr_sz;
402  bytes_left = b0->current_length;
403  current_b0 = b0;
404  while (1)
405  {
406  if (buffer_len == 0)
407  { //Get new output
408  if (desc_table[desc_index].flags & VIRTQ_DESC_F_NEXT)
409  {
410  //Next one is chained
411  desc_index = desc_table[desc_index].next;
412  buffer_map_addr = desc_table[desc_index].addr;
413  buffer_len = desc_table[desc_index].len;
414  }
415  else if (vui->virtio_net_hdr_sz == 12) //MRG is available
416  {
417  virtio_net_hdr_mrg_rxbuf_t *hdr =
418  &cpu->tx_headers[tx_headers_len - 1];
419 
420  //Move from available to used buffer
421  rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id =
422  desc_head;
423  rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len =
424  desc_len;
425  vhost_user_log_dirty_ring (vui, rxvq,
426  ring[rxvq->last_used_idx &
427  rxvq->qsz_mask]);
428 
429  rxvq->last_avail_idx++;
430  rxvq->last_used_idx++;
431  hdr->num_buffers++;
432  desc_len = 0;
433 
434  if (PREDICT_FALSE
435  (rxvq->last_avail_idx == rxvq->avail->idx))
436  {
437  //Dequeue queued descriptors for this packet
438  rxvq->last_used_idx -= hdr->num_buffers - 1;
439  rxvq->last_avail_idx -= hdr->num_buffers - 1;
440  error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
441  goto done;
442  }
443 
444  desc_table = rxvq->desc;
445  desc_head = desc_index =
446  rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask];
447  if (PREDICT_FALSE
448  (rxvq->desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT))
449  {
450  //It is seriously unlikely that a driver will put indirect descriptor
451  //after non-indirect descriptor.
452  if (PREDICT_FALSE
453  (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
454  {
455  error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
456  goto done;
457  }
458  if (PREDICT_FALSE
459  (!(desc_table =
460  map_guest_mem (vui,
461  rxvq->desc[desc_index].addr,
462  &map_hint))))
463  {
464  error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
465  goto done;
466  }
467  desc_index = 0;
468  }
469  buffer_map_addr = desc_table[desc_index].addr;
470  buffer_len = desc_table[desc_index].len;
471  }
472  else
473  {
474  error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOMRG;
475  goto done;
476  }
477  }
478 
479  {
480  vhost_copy_t *cpy = &cpu->copy[copy_len];
481  copy_len++;
482  cpy->len = bytes_left;
483  cpy->len = (cpy->len > buffer_len) ? buffer_len : cpy->len;
484  cpy->dst = buffer_map_addr;
485  cpy->src = (uword) vlib_buffer_get_current (current_b0) +
486  current_b0->current_length - bytes_left;
487 
488  bytes_left -= cpy->len;
489  buffer_len -= cpy->len;
490  buffer_map_addr += cpy->len;
491  desc_len += cpy->len;
492 
493  CLIB_PREFETCH (&rxvq->desc, CLIB_CACHE_LINE_BYTES, LOAD);
494  }
495 
496  // Check if vlib buffer has more data. If not, get more or break.
497  if (PREDICT_TRUE (!bytes_left))
498  {
499  if (PREDICT_FALSE
500  (current_b0->flags & VLIB_BUFFER_NEXT_PRESENT))
501  {
502  current_b0 = vlib_get_buffer (vm, current_b0->next_buffer);
503  bytes_left = current_b0->current_length;
504  }
505  else
506  {
507  //End of packet
508  break;
509  }
510  }
511  }
512 
513  //Move from available to used ring
514  rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id = desc_head;
515  rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len = desc_len;
516  vhost_user_log_dirty_ring (vui, rxvq,
517  ring[rxvq->last_used_idx & rxvq->qsz_mask]);
518  rxvq->last_avail_idx++;
519  rxvq->last_used_idx++;
520 
521  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
522  {
523  cpu->current_trace->hdr = cpu->tx_headers[tx_headers_len - 1];
524  }
525 
526  n_left--; //At the end for error counting when 'goto done' is invoked
527 
528  /*
529  * Do the copy periodically to prevent
530  * cpu->copy array overflow and corrupt memory
531  */
533  {
534  if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
535  &map_hint)))
536  {
537  vlib_error_count (vm, node->node_index,
538  VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
539  }
540  copy_len = 0;
541 
542  /* give buffers back to driver */
544  rxvq->used->idx = rxvq->last_used_idx;
545  vhost_user_log_dirty_ring (vui, rxvq, idx);
546  }
547  buffers++;
548  }
549 
550 done:
551  //Do the memory copies
552  if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
553  &map_hint)))
554  {
555  vlib_error_count (vm, node->node_index,
556  VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
557  }
558 
560  rxvq->used->idx = rxvq->last_used_idx;
561  vhost_user_log_dirty_ring (vui, rxvq, idx);
562 
563  /*
564  * When n_left is set, error is always set to something too.
565  * In case error is due to lack of remaining buffers, we go back up and
566  * retry.
567  * The idea is that it is better to waste some time on packets
568  * that have been processed already than dropping them and get
569  * more fresh packets with a good likelyhood that they will be dropped too.
570  * This technique also gives more time to VM driver to pick-up packets.
571  * In case the traffic flows from physical to virtual interfaces, this
572  * technique will end-up leveraging the physical NIC buffer in order to
573  * absorb the VM's CPU jitter.
574  */
575  if (n_left && (error == VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF) && retry)
576  {
577  retry--;
578  goto retry;
579  }
580 
581  /* interrupt (call) handling */
582  if ((rxvq->callfd_idx != ~0) &&
584  {
585  rxvq->n_since_last_int += frame->n_vectors - n_left;
586 
587  if (rxvq->n_since_last_int > vum->coalesce_frames)
588  vhost_user_send_call (vm, rxvq);
589  }
590 
591  vhost_user_vring_unlock (vui, qid);
592 
593 done3:
594  if (PREDICT_FALSE (n_left && error != VHOST_USER_TX_FUNC_ERROR_NONE))
595  {
596  vlib_error_count (vm, node->node_index, error, n_left);
600  thread_index, vui->sw_if_index, n_left);
601  }
602 
603  vlib_buffer_free (vm, vlib_frame_vector_args (frame), frame->n_vectors);
604  return frame->n_vectors;
605 }
606 
607 static __clib_unused clib_error_t *
610 {
611  vlib_main_t *vm = vnm->vlib_main;
612  vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
614  vhost_user_intf_t *vui =
616  vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
617 
618  if ((mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT) ||
620  {
621  if (txvq->kickfd_idx == ~0)
622  {
623  // We cannot support interrupt mode if the driver opts out
624  return clib_error_return (0, "Driver does not support interrupt");
625  }
627  {
628  vum->ifq_count++;
629  // Start the timer if this is the first encounter on interrupt
630  // interface/queue
631  if ((vum->ifq_count == 1) &&
632  (vum->coalesce_time > 0.0) && (vum->coalesce_frames > 0))
636  }
637  }
638  else if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
639  {
640  if (((txvq->mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT) ||
642  vum->ifq_count)
643  {
644  vum->ifq_count--;
645  // Stop the timer if there is no more interrupt interface/queue
646  if ((vum->ifq_count == 0) &&
647  (vum->coalesce_time > 0.0) && (vum->coalesce_frames > 0))
651  }
652  }
653 
654  txvq->mode = mode;
657  else if ((mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE) ||
659  txvq->used->flags = 0;
660  else
661  {
662  vu_log_err (vui, "unhandled mode %d changed for if %d queue %d", mode,
663  hw_if_index, qid);
664  return clib_error_return (0, "unsupported");
665  }
666 
667  return 0;
668 }
669 
670 static __clib_unused clib_error_t *
672  u32 flags)
673 {
674  vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
676  vhost_user_intf_t *vui =
678  u8 link_old, link_new;
679 
680  link_old = vui_is_link_up (vui);
681 
682  vui->admin_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
683 
684  link_new = vui_is_link_up (vui);
685 
686  if (link_old != link_new)
687  vnet_hw_interface_set_flags (vnm, vui->hw_if_index, link_new ?
689 
690  return /* no error */ 0;
691 }
692 
693 /* *INDENT-OFF* */
695  .name = "vhost-user",
696  .tx_function_n_errors = VHOST_USER_TX_FUNC_N_ERROR,
697  .tx_function_error_strings = vhost_user_tx_func_error_strings,
698  .format_device_name = format_vhost_user_interface_name,
699  .name_renumber = vhost_user_name_renumber,
700  .admin_up_down_function = vhost_user_interface_admin_up_down,
701  .rx_mode_change_function = vhost_user_interface_rx_mode_change,
702  .format_tx_trace = format_vhost_trace,
703 };
704 
705 /* *INDENT-ON* */
706 
707 /*
708  * fd.io coding-style-patch-verification: ON
709  *
710  * Local Variables:
711  * eval: (c-set-style "gnu")
712  * End:
713  */
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
u32 len
Definition: pci.h:212
static __clib_unused u8 * format_vhost_user_interface_name(u8 *s, va_list *args)
static __clib_unused int vhost_user_name_renumber(vnet_hw_interface_t *hi, u32 new_dev_instance)
vmrglw vmrglh hi
static __clib_unused clib_error_t * vhost_user_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
static_always_inline int vhost_user_vring_try_lock(vhost_user_intf_t *vui, u32 qid)
Try once to lock the vring.
u32 flags
Definition: vhost_user.h:141
vring_desc_t * desc
Definition: vhost_user.h:263
u32 virtio_ring_flags
The device index.
Definition: vhost_user.h:345
virtio_net_hdr_mrg_rxbuf_t hdr
Length of the first data descriptor.
Definition: vhost_user.h:347
vhost_cpu_t * cpus
Per-CPU data for vhost-user.
Definition: vhost_user.h:378
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:865
#define vnet_buffer2(b)
Definition: buffer.h:420
vhost_user_tx_func_error_t
#define foreach_vhost_user_tx_func_error
vnet_interface_main_t interface_main
Definition: vnet.h:56
#define PREDICT_TRUE(x)
Definition: clib.h:112
static __clib_unused char * vhost_user_tx_func_error_strings[]
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
#define VHOST_USER_TX_COPY_THRESHOLD
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
vring_used_elem_t ring[0]
Definition: pci.h:235
vhost_copy_t copy[VHOST_USER_COPY_ARRAY_N]
Definition: vhost_user.h:360
u16 next
Definition: pci.h:214
vring_avail_t * avail
Definition: vhost_user.h:264
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
u32 thread_index
Definition: main.h:197
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
vl_api_address_t src
Definition: gre.api:51
int i
#define VHOST_USER_EVENT_START_TIMER
Definition: vhost_user.h:289
#define VRING_AVAIL_F_NO_INTERRUPT
Definition: vhost_user.h:46
clib_memset(h->entries, 0, sizeof(h->entries[0])*entries)
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 increment)
Increment a simple counter.
Definition: counter.h:78
u16 idx
Definition: pci.h:220
static_always_inline void vhost_user_vring_lock(vhost_user_intf_t *vui, u32 qid)
Spin until the vring is successfully locked.
u16 flags
Definition: pci.h:233
struct _vnet_device_class vnet_device_class_t
struct _tcp_header tcp_header_t
vring_used_t * used
Definition: vhost_user.h:265
vhost_trace_t * current_trace
Definition: vhost_user.h:364
unsigned char u8
Definition: types.h:56
#define vu_log_debug(dev, f,...)
Definition: vhost_user.h:48
u64 addr
Definition: pci.h:211
vnet_hw_interface_rx_mode
Definition: interface.h:53
#define static_always_inline
Definition: clib.h:99
#define vlib_prefetch_buffer_with_index(vm, bi, type)
Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header informatio...
Definition: buffer_funcs.h:440
static_always_inline u32 vhost_user_tx_copy(vhost_user_intf_t *vui, vhost_copy_t *cpy, u16 copy_len, u32 *map_hint)
static_always_inline void * map_guest_mem(vhost_user_intf_t *vui, uword addr, u32 *hint)
#define VHOST_VRING_IDX_TX(qid)
Definition: vhost_user.h:24
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define VRING_USED_F_NO_NOTIFY
Definition: vhost_user.h:45
#define clib_error_return(e, args...)
Definition: error.h:99
static_always_inline u8 * format_vhost_trace(u8 *s, va_list *va)
#define VNET_DEVICE_CLASS_TX_FN(devclass)
Definition: interface.h:298
unsigned int u32
Definition: types.h:88
#define clib_atomic_test_and_set(a)
Definition: atomics.h:42
VNET_DEVICE_CLASS(vhost_user_device_class)
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:514
vlib_main_t * vlib_main
Definition: vnet.h:80
static void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
Definition: node_funcs.h:934
#define clib_atomic_release(a)
Definition: atomics.h:43
unsigned short u16
Definition: types.h:57
#define VIRTQ_DESC_F_INDIRECT
Definition: vhost_user.h:28
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
#define PREDICT_FALSE(x)
Definition: clib.h:111
vhost_user_main_t vhost_user_main
Definition: vhost_user.c:56
vnet_main_t vnet_main
Definition: misc.c:43
vlib_simple_counter_main_t * sw_if_counters
Definition: interface.h:841
vl_api_address_t dst
Definition: gre.api:52
static_always_inline void vhost_user_vring_unlock(vhost_user_intf_t *vui, u32 qid)
Unlock the vring lock.
static_always_inline u8 vui_is_link_up(vhost_user_intf_t *vui)
u8 len
Definition: ip_types.api:90
#define VHOST_VRING_IDX_RX(qid)
Definition: vhost_user.h:23
vnet_device_class_t vhost_user_device_class
u32 * show_dev_instance_by_real_dev_instance
Definition: vhost_user.h:372
u16 device_index
The interface queue index (Not the virtio vring idx)
Definition: vhost_user.h:344
vhost_user_intf_t * vhost_user_interfaces
Definition: vhost_user.h:371
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
vlib_main_t * vm
Definition: buffer.c:312
static __clib_unused clib_error_t * vhost_user_interface_rx_mode_change(vnet_main_t *vnm, u32 hw_if_index, u32 qid, vnet_hw_interface_rx_mode mode)
#define VHOST_USER_EVENT_STOP_TIMER
Definition: vhost_user.h:290
u16 first_desc_len
Runtime queue flags.
Definition: vhost_user.h:346
vl_api_vxlan_gbp_api_tunnel_mode_t mode
Definition: vxlan_gbp.api:44
u16 flags
Definition: pci.h:219
#define vu_log_err(dev, f,...)
Definition: vhost_user.h:61
#define VIRTQ_DESC_F_NEXT
Definition: vhost_user.h:27
static_always_inline void vhost_user_send_call(vlib_main_t *vm, vhost_user_vring_t *vq)
volatile u32 * vring_locks[VHOST_VRING_MAX_N]
Definition: vhost_user.h:319
vlib_node_registration_t vhost_user_send_interrupt_node
(constructor) VLIB_REGISTER_NODE (vhost_user_send_interrupt_node)
Definition: vhost_user.c:53
static_always_inline void vhost_user_tx_trace(vhost_trace_t *t, vhost_user_intf_t *vui, u16 qid, vlib_buffer_t *b, vhost_user_vring_t *rxvq)
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
u16 ring[0]
Definition: pci.h:221
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
vhost_user_vring_t vrings[VHOST_VRING_MAX_N]
Definition: vhost_user.h:318
clib_error_t * vnet_hw_interface_set_flags(vnet_main_t *vnm, u32 hw_if_index, vnet_hw_interface_flags_t flags)
Definition: interface.c:501
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
static_always_inline void vhost_user_log_dirty_pages_2(vhost_user_intf_t *vui, u64 addr, u64 len, u8 is_host_address)
#define vhost_user_log_dirty_ring(vui, vq, member)
u16 idx
Definition: pci.h:234
#define vnet_buffer(b)
Definition: buffer.h:361
u16 flags
Definition: pci.h:213
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:115
virtio_net_hdr_mrg_rxbuf_t tx_headers[VLIB_FRAME_SIZE]
Definition: vhost_user.h:359
#define vec_validate_init_empty(V, I, INIT)
Make sure vector is long enough for given index and initialize empty space (no header, unspecified alignment)
Definition: vec.h:486
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
static_always_inline void vhost_user_handle_tx_offload(vhost_user_intf_t *vui, vlib_buffer_t *b, virtio_net_hdr_t *hdr)