21 #include <sys/ioctl.h> 22 #include <sys/socket.h> 25 #include <sys/types.h> 27 #include <netinet/in.h> 30 #include <linux/if_arp.h> 31 #include <linux/if_tun.h> 52 #define VHOST_DEBUG_VQ 0 54 #define DBG_SOCK(args...) \ 56 vhost_user_main_t *_vum = &vhost_user_main; \ 61 #if VHOST_DEBUG_VQ == 1 62 #define DBG_VQ(args...) clib_warning(args); 64 #define DBG_VQ(args...) 72 #define VHOST_USER_DOWN_DISCARD_COUNT 256 78 #define VHOST_USER_RX_BUFFER_STARVATION 32 88 #define VHOST_USER_RX_COPY_THRESHOLD 64 90 #define UNIX_GET_FD(unixfd_idx) \ 91 (unixfd_idx != ~0) ? \ 92 pool_elt_at_index (unix_main.file_pool, \ 93 unixfd_idx)->file_descriptor : -1; 95 #define foreach_virtio_trace_flags \ 96 _ (SIMPLE_CHAINED, 0, "Simple descriptor chaining") \ 97 _ (SINGLE_DESC, 1, "Single descriptor packet") \ 98 _ (INDIRECT, 2, "Indirect descriptor") \ 99 _ (MAP_ERROR, 4, "Memory mapping error") 103 #define _(n,i,s) VIRTIO_TRACE_F_##n, 110 #define foreach_vhost_user_tx_func_error \ 111 _(NONE, "no error") \ 112 _(NOT_READY, "vhost vring not ready") \ 113 _(DOWN, "vhost interface is down") \ 114 _(PKT_DROP_NOBUF, "tx packet drops (no available descriptors)") \ 115 _(PKT_DROP_NOMRG, "tx packet drops (cannot merge descriptors)") \ 116 _(MMAP_FAIL, "mmap failure") \ 117 _(INDIRECT_OVERFLOW, "indirect descriptor table overflow") 121 #define _(f,s) VHOST_USER_TX_FUNC_ERROR_##f, 133 #define foreach_vhost_user_input_func_error \ 134 _(NO_ERROR, "no error") \ 135 _(NO_BUFFER, "no available buffer") \ 136 _(MMAP_FAIL, "mmap failure") \ 137 _(INDIRECT_OVERFLOW, "indirect descriptor overflows table") \ 138 _(UNDERSIZED_FRAME, "undersized ethernet frame received (< 14 bytes)") \ 139 _(FULL_RX_QUEUE, "full rx queue (possible driver tx drop)") 143 #define _(f,s) VHOST_USER_INPUT_FUNC_ERROR_##f, 161 .name =
"vhost-user",
169 u32 show_dev_instance = ~0;
175 if (show_dev_instance != ~0)
176 i = show_dev_instance;
178 s =
format (s,
"VirtualEthernet0/0/%d", i);
193 DBG_SOCK (
"renumbered vhost-user interface dev_instance %d to %d",
204 ((vui->
regions[i].guest_phys_addr +
205 vui->
regions[i].memory_size) > addr)))
211 __m128i rl, rh, al, ah, r;
212 al = _mm_set1_epi64x (addr + 1);
213 ah = _mm_set1_epi64x (addr);
216 rl = _mm_cmpgt_epi64 (al, rl);
218 rh = _mm_cmpgt_epi64 (rh, ah);
219 r = _mm_and_si128 (rl, rh);
222 rl = _mm_cmpgt_epi64 (al, rl);
224 rh = _mm_cmpgt_epi64 (rh, ah);
225 r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x22);
228 rl = _mm_cmpgt_epi64 (al, rl);
230 rh = _mm_cmpgt_epi64 (rh, ah);
231 r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x44);
234 rl = _mm_cmpgt_epi64 (al, rl);
236 rh = _mm_cmpgt_epi64 (rh, ah);
237 r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x88);
239 r = _mm_shuffle_epi8 (r, _mm_set_epi64x (0, 0x0e060c040a020800));
240 i = __builtin_ctzll (_mm_movemask_epi8 (r) |
253 if ((vui->
regions[i].guest_phys_addr <= addr) &&
263 DBG_VQ (
"failed to map guest mem addr %llx", addr);
274 if ((vui->
regions[i].userspace_addr <= addr) &&
304 ssize_t map_sz = (vui->
regions[
i].memory_size +
306 page_sz - 1) & ~(page_sz - 1);
313 (
"unmap memory region %d addr 0x%lx len 0x%lx page_sz 0x%x", i,
320 clib_warning (
"failed to unmap memory region (errno %d)",
334 u32 thread_index = 0;
351 if (thread_index == 0)
354 for (thread_index = 0;
383 vec_foreach (queue, vui->rx_queues)
385 rv = vnet_hw_interface_unassign_rx_thread (vnm, vui->hw_if_index,
388 clib_warning (
"Warning: unable to unassign interface %d, " 389 "queue %d: rc=%d", vui->hw_if_index, *queue, rv);
398 for (qid = 0; qid < VHOST_VRING_MAX_N / 2; qid++)
400 txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
403 if (txvq->mode == VNET_HW_INTERFACE_RX_MODE_UNKNOWN)
405 txvq->mode = VNET_HW_INTERFACE_RX_MODE_POLLING;
406 vec_add1 (vui->rx_queues, qid);
415 vnet_hw_interface_set_input_node (vnm, vui->hw_if_index,
416 vhost_user_input_node.index);
417 vec_foreach (queue, vui->rx_queues)
419 vnet_hw_interface_assign_rx_thread (vnm, vui->hw_if_index, *queue,
421 txvq = &vui->vrings[VHOST_VRING_IDX_TX (*queue)];
422 rv = vnet_hw_interface_set_rx_mode (vnm, vui->hw_if_index, *queue,
425 clib_warning (
"Warning: unable to set rx mode for interface %d, " 426 "queue %d: rc=%d", vui->hw_if_index, *queue, rv);
436 int i, found[2] = { };
442 return found[0] && found[1];
450 if (is_up != vui->
is_up)
453 is_up ?
"ready" :
"down");
482 __attribute__ ((unused))
int n;
493 __attribute__ ((unused))
int n;
522 return __sync_lock_test_and_set (vui->
vring_locks[qid], 1);
548 memset (vring, 0,
sizeof (*vring));
562 if (qid == 0 || qid == 1)
584 if (vring->
errfd != -1)
586 close (vring->
errfd);
615 #define VHOST_LOG_PAGE 0x1000 621 || !(vui->
features & (1 << FEAT_VHOST_F_LOG_ALL))))
631 DBG_SOCK (
"vhost_user_log_dirty_pages(): out of range\n");
650 #define vhost_user_log_dirty_ring(vui, vq, member) \ 651 if (PREDICT_FALSE(vq->log_used)) { \ 652 vhost_user_log_dirty_pages(vui, vq->log_guest_addr + STRUCT_OFFSET_OF(vring_used_t, member), \ 653 sizeof(vq->used->member)); \ 660 int fd, number_of_fds = 0;
662 vhost_user_msg_t msg;
667 struct cmsghdr *cmsg;
676 memset (&mh, 0,
sizeof (mh));
677 memset (control, 0,
sizeof (control));
683 iov[0].iov_base = (
void *) &msg;
688 mh.msg_control = control;
689 mh.msg_controllen =
sizeof (control);
700 DBG_SOCK (
"recvmsg returned error %d %s", errno, strerror (errno));
704 DBG_SOCK (
"n (%d) != VHOST_USER_MSG_HDR_SZ (%d)",
710 if (mh.msg_flags & MSG_CTRUNC)
716 cmsg = CMSG_FIRSTHDR (&mh);
718 if (cmsg && (cmsg->cmsg_len > 0) && (cmsg->cmsg_level == SOL_SOCKET) &&
719 (cmsg->cmsg_type == SCM_RIGHTS) &&
720 (cmsg->cmsg_len - CMSG_LEN (0) <=
721 VHOST_MEMORY_MAX_NREGIONS *
sizeof (
int)))
723 number_of_fds = (cmsg->cmsg_len - CMSG_LEN (0)) /
sizeof (
int);
724 clib_memcpy (fds, CMSG_DATA (cmsg), number_of_fds *
sizeof (
int));
728 if ((msg.flags & 7) != 1)
730 DBG_SOCK (
"malformed message received. closing socket");
741 DBG_SOCK (
"read failed %s", strerror (errno));
744 else if (rv != msg.size)
746 DBG_SOCK (
"message too short (read %dB should be %dB)", rv, msg.size);
755 msg.u64 = (1ULL << FEAT_VIRTIO_NET_F_MRG_RXBUF) |
756 (1ULL << FEAT_VIRTIO_NET_F_CTRL_VQ) |
757 (1ULL << FEAT_VIRTIO_F_ANY_LAYOUT) |
758 (1ULL << FEAT_VIRTIO_F_INDIRECT_DESC) |
759 (1ULL << FEAT_VHOST_F_LOG_ALL) |
760 (1ULL << FEAT_VIRTIO_NET_F_GUEST_ANNOUNCE) |
761 (1ULL << FEAT_VIRTIO_NET_F_MQ) |
762 (1ULL << FEAT_VHOST_USER_F_PROTOCOL_FEATURES) |
763 (1ULL << FEAT_VIRTIO_F_VERSION_1);
765 msg.size =
sizeof (msg.u64);
766 DBG_SOCK (
"if %d msg VHOST_USER_GET_FEATURES - reply 0x%016llx",
771 DBG_SOCK (
"if %d msg VHOST_USER_SET_FEATURES features 0x%016llx",
777 ((1 << FEAT_VIRTIO_NET_F_MRG_RXBUF) |
778 (1ULL << FEAT_VIRTIO_F_VERSION_1)))
784 (vui->
features & (1 << FEAT_VIRTIO_F_ANY_LAYOUT)) ? 1 : 0;
796 DBG_SOCK (
"if %d msg VHOST_USER_SET_MEM_TABLE nregions %d",
799 if ((msg.memory.nregions < 1) ||
800 (msg.memory.nregions > VHOST_MEMORY_MAX_NREGIONS))
803 DBG_SOCK (
"number of mem regions must be between 1 and %i",
804 VHOST_MEMORY_MAX_NREGIONS);
809 if (msg.memory.nregions != number_of_fds)
811 DBG_SOCK (
"each memory region must have FD");
815 for (i = 0; i < msg.memory.nregions; i++)
818 sizeof (vhost_user_memory_region_t));
823 ssize_t map_sz = (vui->
regions[
i].memory_size +
825 page_sz - 1) & ~(page_sz - 1);
828 MAP_SHARED, fds[i], 0);
834 (
"map memory region %d addr 0 len 0x%lx fd %d mapped 0x%lx " 840 clib_warning (
"failed to map memory. errno is %d", errno);
846 vui->
nregions = msg.memory.nregions;
850 DBG_SOCK (
"if %d msg VHOST_USER_SET_VRING_NUM idx %d num %d",
853 if ((msg.state.num > 32768) ||
854 (msg.state.num == 0) ||
855 ((msg.state.num - 1) & msg.state.num))
857 vui->
vrings[msg.state.index].
qsz = msg.state.num;
861 DBG_SOCK (
"if %d msg VHOST_USER_SET_VRING_ADDR idx %d",
866 DBG_SOCK (
"invalid vring index VHOST_USER_SET_VRING_ADDR:" 871 if (msg.size < sizeof (msg.addr))
873 DBG_SOCK (
"vhost message is too short (%d < %d)",
874 msg.size, sizeof (msg.addr));
878 vui->
vrings[msg.state.index].
desc = (vring_desc_t *)
880 vui->
vrings[msg.state.index].
used = (vring_used_t *)
882 vui->
vrings[msg.state.index].
avail = (vring_avail_t *)
889 DBG_SOCK (
"failed to map user memory for hw_if_index %d",
900 if (!(vui->
features & (1 << FEAT_VHOST_USER_F_PROTOCOL_FEATURES)))
922 DBG_SOCK (
"if %d msg VHOST_USER_SET_VRING_CALL %d",
925 q = (
u8) (msg.u64 & 0xFF);
936 if (!(msg.u64 & 0x100))
938 if (number_of_fds != 1)
940 DBG_SOCK (
"More than one fd received !");
945 template.file_descriptor = fds[0];
946 template.private_data =
955 DBG_SOCK (
"if %d msg VHOST_USER_SET_VRING_KICK %d",
958 q = (
u8) (msg.u64 & 0xFF);
968 if (!(msg.u64 & 0x100))
970 if (number_of_fds != 1)
972 DBG_SOCK (
"More than one fd received !");
977 template.file_descriptor = fds[0];
978 template.private_data =
993 DBG_SOCK (
"if %d msg VHOST_USER_SET_VRING_ERR %d",
996 q = (
u8) (msg.u64 & 0xFF);
1001 if (!(msg.u64 & 0x100))
1003 if (number_of_fds != 1)
1014 DBG_SOCK (
"if %d msg VHOST_USER_SET_VRING_BASE idx %d num %d",
1015 vui->
hw_if_index, msg.state.index, msg.state.num);
1023 DBG_SOCK (
"invalid vring index VHOST_USER_GET_VRING_BASE:" 1034 msg.size =
sizeof (msg.state);
1038 DBG_SOCK (
"if %d msg VHOST_USER_GET_VRING_BASE idx %d num %d",
1039 vui->
hw_if_index, msg.state.index, msg.state.num);
1051 if (msg.size != sizeof (msg.log))
1054 (
"invalid msg size for VHOST_USER_SET_LOG_BASE: %d instead of %d",
1055 msg.size, sizeof (msg.log));
1063 (
"VHOST_USER_PROTOCOL_F_LOG_SHMFD not set but VHOST_USER_SET_LOG_BASE received");
1071 (msg.log.size + msg.log.offset + page_sz - 1) & ~(page_sz - 1);
1073 vui->
log_base_addr = mmap (0, map_sz, PROT_READ | PROT_WRITE,
1077 (
"map log region addr 0 len 0x%lx off 0x%lx fd %d mapped 0x%lx",
1082 clib_warning (
"failed to map memory. errno is %d", errno);
1090 msg.size =
sizeof (msg.u64);
1104 msg.size =
sizeof (msg.u64);
1106 (
"if %d msg VHOST_USER_GET_PROTOCOL_FEATURES - reply 0x%016llx",
1112 (
"if %d msg VHOST_USER_SET_PROTOCOL_FEATURES features 0x%016llx",
1122 msg.size =
sizeof (msg.u64);
1123 DBG_SOCK (
"if %d msg VHOST_USER_GET_QUEUE_NUM - reply %d",
1128 DBG_SOCK (
"if %d VHOST_USER_SET_VRING_ENABLE: %s queue %d",
1129 vui->
hw_if_index, msg.state.num ?
"enable" :
"disable",
1133 DBG_SOCK (
"invalid vring index VHOST_USER_SET_VRING_ENABLE:" 1142 DBG_SOCK (
"unknown vhost-user message %d received. closing socket",
1154 DBG_SOCK (
"could not send message response");
1189 int client_fd, client_len;
1190 struct sockaddr_un client;
1197 client_len =
sizeof (client);
1199 (
struct sockaddr *) &client,
1200 (socklen_t *) & client_len);
1208 template.file_descriptor = client_fd;
1267 if (t->virtio_ring_flags & (1 << VIRTIO_TRACE_F_##n)) \ 1268 s = format (s, "%U %s %s\n", format_white_space, indent, #n, st); 1271 s =
format (s,
"%U virtio_net_hdr first_desc_len %u\n",
1274 s =
format (s,
"%U flags 0x%02x gso_type %u\n",
1276 t->
hdr.hdr.flags, t->
hdr.hdr.gso_type);
1279 s =
format (s,
"%U num_buff %u",
1291 u32 qsz_mask = txvq->
qsz - 1;
1293 u32 desc_current = txvq->
avail->ring[last_avail_idx & qsz_mask];
1294 vring_desc_t *hdr_desc = 0;
1295 virtio_net_hdr_mrg_rxbuf_t *hdr;
1298 memset (t, 0,
sizeof (*t));
1302 hdr_desc = &txvq->
desc[desc_current];
1313 if (!(txvq->
desc[desc_current].flags & VIRTQ_DESC_F_NEXT) &&
1314 !(txvq->
desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT))
1321 if (!hdr_desc || !(hdr =
map_guest_mem (vui, hdr_desc->addr, &hint)))
1328 memcpy (&t->
hdr, hdr, len > hdr_desc->len ? hdr_desc->len : len);
1340 rv = write (fd, &x,
sizeof (x));
1344 (
"Error: Could not write to unix socket for callfd %d", fd);
1354 u16 copy_len,
u32 * map_hint)
1356 void *src0, *src1, *src2, *src3;
1379 clib_memcpy ((
void *) cpy[0].dst, src0, cpy[0].len);
1380 clib_memcpy ((
void *) cpy[1].dst, src1, cpy[1].len);
1410 u32 discarded_packets = 0;
1412 u16 qsz_mask = txvq->
qsz - 1;
1413 while (discarded_packets != discard_max)
1418 u16 desc_chain_head =
1426 discarded_packets++;
1433 return discarded_packets;
1446 b_current->
flags = 0;
1447 while (b_current != b_head)
1453 b_current->
flags = 0;
1466 u16 n_rx_packets = 0;
1469 u32 n_left_to_next, *to_next;
1503 txvq->
used->flags = 0;
1539 VHOST_USER_INPUT_FUNC_ERROR_FULL_RX_QUEUE, 1);
1542 qsz_mask = txvq->
qsz - 1;
1576 interface_main.sw_if_counters +
1582 VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, flush);
1590 while (n_left > 0 && n_left_to_next > 0)
1595 u32 desc_data_offset;
1596 vring_desc_t *desc_table = txvq->
desc;
1613 to_next[0] = bi_current;
1619 cpus[thread_index].rx_buffers)
1620 [vum->
cpus[thread_index].
1621 rx_buffers_len - 1], LOAD);
1656 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
1670 desc_data_offset = desc_table[desc_current].len;
1676 if (desc_data_offset == desc_table[desc_current].len)
1681 desc_current = desc_table[desc_current].next;
1682 desc_data_offset = 0;
1723 bi_current = bi_next;
1731 desc_table[desc_current].
len - desc_data_offset;
1733 cpy->
len = (cpy->
len > desc_data_l) ? desc_data_l : cpy->
len;
1736 cpy->
src = desc_table[desc_current].addr + desc_data_offset;
1738 desc_data_offset += cpy->
len;
1770 u32 bi = to_next[-1];
1772 to_next, n_left_to_next,
1787 copy_len, &map_hint)))
1790 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
1807 copy_len, &map_hint)))
1810 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
1836 return n_rx_packets;
1844 uword n_rx_packets = 0;
1853 (node->
state == VLIB_NODE_STATE_POLLING))
1862 return n_rx_packets;
1869 .name =
"vhost-user-input",
1870 .sibling_of =
"device-input",
1873 .state = VLIB_NODE_STATE_DISABLED,
1892 u32 qsz_mask = rxvq->
qsz - 1;
1894 u32 desc_current = rxvq->
avail->ring[last_avail_idx & qsz_mask];
1895 vring_desc_t *hdr_desc = 0;
1898 memset (t, 0,
sizeof (*t));
1902 hdr_desc = &rxvq->
desc[desc_current];
1913 if (!(rxvq->
desc[desc_current].flags & VIRTQ_DESC_F_NEXT) &&
1914 !(rxvq->
desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT))
1924 u16 copy_len,
u32 * map_hint)
1926 void *dst0, *dst1, *dst2, *dst3;
1948 clib_memcpy (dst0, (
void *) cpy[0].src, cpy[0].len);
1949 clib_memcpy (dst1, (
void *) cpy[1].src, cpy[1].len);
1992 error = VHOST_USER_TX_FUNC_ERROR_DOWN;
1998 error = VHOST_USER_TX_FUNC_ERROR_NOT_READY;
2005 rxvq = &vui->vrings[qid];
2009 qsz_mask = rxvq->
qsz - 1;
2012 error = VHOST_USER_TX_FUNC_ERROR_NONE;
2018 u16 desc_head, desc_index, desc_len;
2019 vring_desc_t *desc_table;
2020 uword buffer_map_addr;
2035 vui, qid / 2, b0, rxvq);
2040 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
2044 desc_table = rxvq->
desc;
2045 desc_head = desc_index =
2053 (rxvq->
desc[desc_head].len < sizeof (vring_desc_t)))
2055 error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
2063 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
2069 desc_len = vui->virtio_net_hdr_sz;
2070 buffer_map_addr = desc_table[desc_index].addr;
2071 buffer_len = desc_table[desc_index].len;
2075 virtio_net_hdr_mrg_rxbuf_t *hdr =
2079 hdr->hdr.gso_type = 0;
2080 hdr->num_buffers = 1;
2085 cpy->
len = vui->virtio_net_hdr_sz;
2086 cpy->
dst = buffer_map_addr;
2090 buffer_map_addr += vui->virtio_net_hdr_sz;
2091 buffer_len -= vui->virtio_net_hdr_sz;
2096 if (buffer_len == 0)
2101 desc_index = desc_table[desc_index].next;
2102 buffer_map_addr = desc_table[desc_index].addr;
2103 buffer_len = desc_table[desc_index].len;
2105 else if (vui->virtio_net_hdr_sz == 12)
2107 virtio_net_hdr_mrg_rxbuf_t *hdr =
2130 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
2134 desc_table = rxvq->
desc;
2135 desc_head = desc_index =
2138 (rxvq->
desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT))
2143 (rxvq->
desc[desc_head].len < sizeof (vring_desc_t)))
2145 error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
2151 rxvq->
desc[desc_index].addr,
2154 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
2159 buffer_map_addr = desc_table[desc_index].addr;
2160 buffer_len = desc_table[desc_index].len;
2164 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOMRG;
2172 cpy->
len = bytes_left;
2173 cpy->
len = (cpy->
len > buffer_len) ? buffer_len : cpy->
len;
2174 cpy->
dst = buffer_map_addr;
2178 bytes_left -= cpy->
len;
2179 buffer_len -= cpy->
len;
2180 buffer_map_addr += cpy->
len;
2181 desc_len += cpy->
len;
2225 copy_len, &map_hint)))
2228 VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
2247 if (n_left && (error == VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF) && retry)
2266 if (
PREDICT_FALSE (n_left && error != VHOST_USER_TX_FUNC_ERROR_NONE))
2284 f64 timeout = 3153600000.0 ;
2285 uword event_type, *event_data = 0;
2288 f64 now, poll_time_remaining;
2294 poll_time_remaining =
2303 timeout = poll_time_remaining;
2324 next_timeout = timeout;
2325 vec_foreach (queue, vui->rx_queues)
2327 vhost_user_vring_t *rxvq =
2328 &vui->vrings[VHOST_VRING_IDX_RX (*queue)];
2329 vhost_user_vring_t *txvq =
2330 &vui->vrings[VHOST_VRING_IDX_TX (*queue)];
2332 if (txvq->n_since_last_int)
2334 if (now >= txvq->int_deadline)
2335 vhost_user_send_call (vm, txvq);
2337 next_timeout = txvq->int_deadline - now;
2340 if (rxvq->n_since_last_int)
2342 if (now >= rxvq->int_deadline)
2343 vhost_user_send_call (vm, rxvq);
2345 next_timeout = rxvq->int_deadline - now;
2348 if ((next_timeout < timeout) && (next_timeout > 0.0))
2349 timeout = next_timeout;
2356 clib_warning (
"BUG: unhandled event type %d", event_type);
2363 timeout = 3153600000.0;
2372 .name =
"vhost-user-send-interrupt-process",
2423 txvq->
used->flags = 0;
2426 clib_warning (
"BUG: unhandled mode %d changed for if %d queue %d", mode,
2455 .name =
"vhost-user",
2476 struct sockaddr_un sun;
2479 f64 timeout = 3153600000.0 ;
2480 uword *event_data = 0;
2483 sun.sun_family = AF_UNIX;
2498 if (vui->unix_server_index == ~0) {
2499 if (vui->unix_file_index == ~0)
2502 ((sockfd = socket (AF_UNIX, SOCK_STREAM, 0)) < 0))
2508 if (!vui->sock_errno || (vui->sock_errno != errno))
2511 (
"Error: Could not open unix socket for %s",
2512 vui->sock_filename);
2513 vui->sock_errno = errno;
2519 strncpy (sun.sun_path, (char *) vui->sock_filename,
2520 sizeof (sun.sun_path) - 1);
2523 if (fcntl(sockfd, F_SETFL, O_NONBLOCK) < 0)
2524 clib_unix_warning (
"fcntl");
2526 if (connect (sockfd, (struct sockaddr *) &sun,
2527 sizeof (struct sockaddr_un)) == 0)
2530 if (fcntl(sockfd, F_SETFL, 0) < 0)
2531 clib_unix_warning (
"fcntl2");
2533 vui->sock_errno = 0;
2534 template.file_descriptor = sockfd;
2535 template.private_data =
2536 vui - vhost_user_main.vhost_user_interfaces;
2537 vui->unix_file_index = unix_file_add (&unix_main, &template);
2544 vui->sock_errno = errno;
2551 socklen_t len = sizeof (error);
2552 int fd = UNIX_GET_FD(vui->unix_file_index);
2554 getsockopt (fd, SOL_SOCKET, SO_ERROR, &error, &len);
2558 DBG_SOCK (
"getsockopt returned %d", retval);
2559 vhost_user_if_disconnect (vui);
2571 .function = vhost_user_process,
2573 .name =
"vhost-user-process",
2621 return VNET_API_ERROR_INVALID_SW_IF_INDEX;
2623 DBG_SOCK (
"Deleting vhost-user interface %s (instance %d)",
2677 vhost_user_delete_if (vnm, vm, vui->sw_if_index);
2693 struct sockaddr_un un = { };
2696 if ((fd = socket (AF_UNIX, SOCK_STREAM, 0)) < 0)
2697 return VNET_API_ERROR_SYSCALL_ERROR_1;
2699 un.sun_family = AF_UNIX;
2700 strncpy ((
char *) un.sun_path, (
char *) sock_filename,
2701 sizeof (un.sun_path) - 1);
2704 unlink ((
char *) sock_filename);
2706 if (bind (fd, (
struct sockaddr *) &un,
sizeof (un)) == -1)
2708 rv = VNET_API_ERROR_SYSCALL_ERROR_2;
2712 if (listen (fd, 1) == -1)
2714 rv = VNET_API_ERROR_SYSCALL_ERROR_3;
2752 vhost_user_dev_class.index,
2771 const char *sock_filename,
2772 u64 feature_mask,
u32 * sw_if_index)
2781 if (server_sock_fd != -1)
2785 template.file_descriptor = server_sock_fd;
2829 const char *sock_filename,
2833 u8 renumber,
u32 custom_dev_instance,
u8 * hwaddr)
2838 int server_sock_fd = -1;
2842 if (sock_filename ==
NULL || !(strlen (sock_filename) > 0))
2844 return VNET_API_ERROR_INVALID_ARGUMENT;
2855 return VNET_API_ERROR_IF_ALREADY_EXISTS;
2871 feature_mask, &sw_if_idx);
2877 *sw_if_index = sw_if_idx;
2887 const char *sock_filename,
2890 u64 feature_mask,
u8 renumber,
u32 custom_dev_instance)
2895 int server_sock_fd = -1;
2902 return VNET_API_ERROR_INVALID_SW_IF_INDEX;
2904 if (sock_filename ==
NULL || !(strlen (sock_filename) > 0))
2905 return VNET_API_ERROR_INVALID_ARGUMENT;
2914 if (if_index && (*if_index != vui->
if_index))
2915 return VNET_API_ERROR_IF_ALREADY_EXISTS;
2920 &server_sock_fd)) != 0)
2925 sock_filename, feature_mask, &sw_if_idx);
2942 u8 *sock_filename =
NULL;
2945 u64 feature_mask = (
u64) ~ (0ULL);
2947 u32 custom_dev_instance = ~0;
2958 if (
unformat (line_input,
"socket %s", &sock_filename))
2960 else if (
unformat (line_input,
"server"))
2962 else if (
unformat (line_input,
"feature-mask 0x%llx", &feature_mask))
2968 else if (
unformat (line_input,
"renumber %d", &custom_dev_instance))
2984 is_server, &sw_if_index, feature_mask,
2985 renumber, custom_dev_instance, hw)))
3007 u32 sw_if_index = ~0;
3017 if (
unformat (line_input,
"sw_if_index %d", &sw_if_index))
3057 u32 *hw_if_indices = 0;
3069 for (i = 0; i <
vec_len (hw_if_indices); i++)
3086 strncpy ((
char *) vuid->
if_name, (
char *) s,
3094 *out_vuids = r_vuids;
3108 u32 hw_if_index, *hw_if_indices = 0;
3119 struct feat_struct *feat_entry;
3121 static struct feat_struct feat_array[] = {
3122 #define _(s,b) { .str = #s, .bit = b, }, 3128 #define foreach_protocol_feature \ 3129 _(VHOST_USER_PROTOCOL_F_MQ) \ 3130 _(VHOST_USER_PROTOCOL_F_LOG_SHMFD) 3132 static struct feat_struct proto_feat_array[] = {
3133 #define _(s) { .str = #s, .bit = s}, 3144 vec_add1 (hw_if_indices, hw_if_index);
3155 if (
vec_len (hw_if_indices) == 0)
3164 vlib_cli_output (vm,
" number of rx virtqueues in interrupt mode: %d",
3167 for (i = 0; i <
vec_len (hw_if_indices); i++)
3172 hi->
name, hw_if_indices[i]);
3175 " features mask (0x%llx): \n" 3176 " features (0x%llx): \n",
3180 feat_entry = (
struct feat_struct *) &feat_array;
3181 while (feat_entry->str)
3183 if (vui->
features & (1ULL << feat_entry->bit))
3191 feat_entry = (
struct feat_struct *) &proto_feat_array;
3192 while (feat_entry->str)
3240 " region fd guest_phys_addr memory_size userspace_addr mmap_offset mmap_addr\n");
3242 " ====== ===== ================== ================== ================== ================== ==================\n");
3244 for (j = 0; j < vui->
nregions; j++)
3247 " %d %-5d 0x%016lx 0x%016lx 0x%016lx 0x%016lx 0x%016lx\n",
3249 vui->
regions[j].guest_phys_addr,
3251 vui->
regions[j].userspace_addr,
3261 (q & 1) ?
"RX" :
"TX",
3265 " qsz %d last_avail_idx %d last_used_idx %d\n",
3271 " avail.flags %x avail.idx %d used.flags %x used.idx %d\n",
3286 " id addr len flags next user_addr\n");
3288 " ===== ================== ===== ====== ===== ==================\n");
3289 for (j = 0; j < vui->
vrings[q].
qsz; j++)
3293 " %-5d 0x%016lx %-5d 0x%04x %-5d 0x%016lx\n",
3368 .path =
"create vhost-user",
3369 .short_help =
"create vhost-user socket <socket-filename> [server] " 3370 "[feature-mask <hex>] [hwaddr <mac-addr>] [renumber <dev_instance>] ",
3389 .path =
"delete vhost-user",
3390 .short_help =
"delete vhost-user {<interface> | sw_if_index <sw_idx>}",
3529 .path =
"show vhost-user",
3530 .short_help =
"show vhost-user [<interface> [<interface> [..]]] [descriptors]",
3564 else if (
unformat (line_input,
"off"))
3587 .path =
"debug vhost-user",
3588 .short_help =
"debug vhost-user <on | off>",
3604 else if (
unformat (input,
"dont-dump-memory"))
unformat_function_t unformat_vnet_hw_interface
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
static clib_error_t * vhost_user_init(vlib_main_t *vm)
static void vhost_user_vring_close(vhost_user_intf_t *vui, u32 qid)
static void vnet_device_increment_rx_packets(u32 thread_index, u64 count)
static void vhost_user_if_disconnect(vhost_user_intf_t *vui)
#define vec_foreach_index(var, v)
Iterate over vector indices.
sll srl srl sll sra u16x4 i
#define VRING_AVAIL_F_NO_INTERRUPT
u32 virtio_ring_flags
The device index.
virtio_net_hdr_mrg_rxbuf_t hdr
Length of the first data descriptor.
static uword random_default_seed(void)
Default random seed (unix/linux user-mode)
clib_error_t * vnet_hw_interface_set_flags(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
static f64 vlib_process_wait_for_event_or_clock(vlib_main_t *vm, f64 dt)
Suspend a cooperative multi-tasking thread Waits for an event, or for the indicated number of seconds...
#define clib_smp_swap(addr, new)
unix_file_function_t * read_function
vhost_cpu_t * cpus
Per-CPU data for vhost-user.
static void vhost_user_create_ethernet(vnet_main_t *vnm, vlib_main_t *vm, vhost_user_intf_t *vui, u8 *hwaddress)
Create ethernet interface for vhost user interface.
#define VHOST_USER_DOWN_DISCARD_COUNT
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
u8 runtime_data[0]
Function dependent node-runtime data.
void ethernet_delete_interface(vnet_main_t *vnm, u32 hw_if_index)
#define VHOST_VRING_IDX_TX(qid)
vnet_main_t * vnet_get_main(void)
static vnet_hw_interface_t * vnet_get_sup_hw_interface(vnet_main_t *vnm, u32 sw_if_index)
static clib_error_t * vhost_user_socket_error(unix_file_t *uf)
void vhost_user_rx_trace(vhost_trace_t *t, vhost_user_intf_t *vui, u16 qid, vlib_buffer_t *b, vhost_user_vring_t *txvq)
u64 region_guest_addr_hi[VHOST_MEMORY_MAX_NREGIONS]
vnet_interface_main_t interface_main
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
uword mhash_unset(mhash_t *h, void *key, uword *old_value)
static f64 vlib_time_now(vlib_main_t *vm)
#define foreach_virtio_trace_flags
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
vhost_copy_t copy[VHOST_USER_COPY_ARRAY_N]
static void vhost_user_term_if(vhost_user_intf_t *vui)
Disables and reset interface structure.
#define VLIB_BUFFER_PRE_DATA_SIZE
static uword vhost_user_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
#define VHOST_USER_EVENT_START_TIMER
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
int vnet_interface_name_renumber(u32 sw_if_index, u32 new_show_dev_instance)
struct _vlib_node_registration vlib_node_registration_t
static_always_inline u32 vhost_user_input_copy(vhost_user_intf_t *vui, vhost_copy_t *cpy, u16 copy_len, u32 *map_hint)
#define VHOST_USER_MSG_HDR_SZ
static clib_error_t * vhost_user_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
static vnet_sw_interface_t * vnet_get_sw_interface(vnet_main_t *vnm, u32 sw_if_index)
clib_error_t * show_vhost_user_command_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 increment)
Increment a simple counter.
unformat_function_t unformat_vnet_sw_interface
#define VNET_HW_INTERFACE_FLAG_LINK_UP
static char * vhost_user_input_func_error_strings[]
static char * vhost_user_tx_func_error_strings[]
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
format_function_t format_vnet_sw_if_index_name
vhost_trace_t * current_trace
static uword vlib_process_suspend_time_is_zero(f64 dt)
Returns TRUE if a process suspend time is less than 1us.
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
static int vhost_user_name_renumber(vnet_hw_interface_t *hi, u32 new_dev_instance)
static void vhost_user_vui_init(vnet_main_t *vnm, vhost_user_intf_t *vui, int server_sock_fd, const char *sock_filename, u64 feature_mask, u32 *sw_if_index)
static vnet_sw_interface_t * vnet_get_hw_sw_interface(vnet_main_t *vnm, u32 hw_if_index)
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
#define VHOST_VRING_F_LOG
vnet_hw_interface_rx_mode
VNET_DEVICE_CLASS(vhost_user_dev_class, static)
#define VLIB_BUFFER_NEXT_PRESENT
static u8 * format_vhost_user_interface_name(u8 *s, va_list *args)
#define static_always_inline
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
#define vlib_prefetch_buffer_with_index(vm, bi, type)
Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header informatio...
#define VLIB_INIT_FUNCTION(x)
static uword vlib_process_get_events(vlib_main_t *vm, uword **data_vector)
Return the first event type which has occurred and a vector of per-event data of that type...
vlib_combined_counter_main_t * combined_sw_if_counters
static_always_inline void vnet_device_input_set_interrupt_pending(vnet_main_t *vnm, u32 hw_if_index, u16 queue_id)
#define VLIB_BUFFER_TOTAL_LENGTH_VALID
#define foreach_protocol_feature
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define clib_error_return(e, args...)
vhost_user_tx_func_error_t
static void unmap_all_mem_regions(vhost_user_intf_t *vui)
static void vhost_user_set_interrupt_pending(vhost_user_intf_t *vui, u32 ifq)
vhost_user_input_func_error_t
#define vlib_call_init_function(vm, x)
static clib_error_t * vhost_user_socket_read(unix_file_t *uf)
static uword pointer_to_uword(const void *p)
#define UNIX_GET_FD(unixfd_idx)
static int vhost_user_init_server_sock(const char *sock_filename, int *sock_fd)
Open server unix socket on specified sock_filename.
static uword vhost_user_send_interrupt_process(vlib_main_t *vm, vlib_node_runtime_t *rt, vlib_frame_t *f)
VLIB_DEVICE_TX_FUNCTION_MULTIARCH(vhost_user_dev_class, vhost_user_tx)
#define VHOST_USER_EVENT_STOP_TIMER
static uword unix_file_add(unix_main_t *um, unix_file_t *template)
static void vhost_user_vring_unlock(vhost_user_intf_t *vui, u32 qid)
Unlock the vring lock.
format_function_t format_vnet_sw_interface_name
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
static_always_inline uword vnet_get_device_input_thread_index(vnet_main_t *vnm, u32 hw_if_index, u16 queue_id)
u16 state
Input node state.
u16 current_length
Nbytes between current data and the end of this buffer.
static void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
int vhost_user_delete_if(vnet_main_t *vnm, vlib_main_t *vm, u32 sw_if_index)
static void * map_user_mem(vhost_user_intf_t *vui, uword addr)
u32 random
Pseudo random iterator.
static_always_inline void vhost_user_log_dirty_pages(vhost_user_intf_t *vui, u64 addr, u64 len)
uword mhash_set_mem(mhash_t *h, void *key, uword *new_value, uword *old_value)
#define VIRTQ_DESC_F_INDIRECT
#define clib_error_return_unix(e, args...)
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
int vnet_hw_interface_get_rx_mode(vnet_main_t *vnm, u32 hw_if_index, u16 queue_id, vnet_hw_interface_rx_mode *mode)
#define VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX
#define pool_put(P, E)
Free an object E in pool P.
format_function_t format_vnet_hw_interface_rx_mode
void vhost_user_tx_trace(vhost_trace_t *t, vhost_user_intf_t *vui, u16 qid, vlib_buffer_t *b, vhost_user_vring_t *rxvq)
#define VLIB_CONFIG_FUNCTION(x, n,...)
#define vhost_user_log_dirty_ring(vui, vq, member)
static vlib_node_registration_t vhost_user_process_node
(constructor) VLIB_REGISTER_NODE (vhost_user_process_node)
void vhost_user_unmap_all(void)
vlib_simple_counter_main_t * sw_if_counters
u32 region_mmap_fd[VHOST_MEMORY_MAX_NREGIONS]
static void vhost_user_send_call(vlib_main_t *vm, vhost_user_vring_t *vq)
#define VNET_HW_INTERFACE_FLAG_SUPPORTS_INT_MODE
u32 node_index
Node index.
vhost_user_memory_region_t regions[VHOST_MEMORY_MAX_NREGIONS]
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
int vhost_user_dump_ifs(vnet_main_t *vnm, vlib_main_t *vm, vhost_user_intf_details_t **out_vuids)
vlib_error_t error
Error code for buffers to be enqueued to error handler.
static clib_error_t * vhost_user_exit(vlib_main_t *vm)
static void vhost_user_tx_thread_placement(vhost_user_intf_t *vui)
static void vhost_user_vring_init(vhost_user_intf_t *vui, u32 qid)
u8 * format_ethernet_header_with_length(u8 *s, va_list *args)
static vlib_node_registration_t vhost_user_send_interrupt_node
(constructor) VLIB_REGISTER_NODE (vhost_user_send_interrupt_node)
u32 * show_dev_instance_by_real_dev_instance
int vhost_user_create_if(vnet_main_t *vnm, vlib_main_t *vm, const char *sock_filename, u8 is_server, u32 *sw_if_index, u64 feature_mask, u8 renumber, u32 custom_dev_instance, u8 *hwaddr)
u16 device_index
The interface queue index (Not the virtio vring idx)
vhost_user_intf_t * vhost_user_interfaces
static void mhash_init_c_string(mhash_t *h, uword n_value_bytes)
static_always_inline uword vlib_get_thread_index(void)
static clib_error_t * vhost_user_kickfd_read_ready(unix_file_t *uf)
#define CLIB_PREFETCH(addr, size, type)
static_always_inline void vhost_user_log_dirty_pages_2(vhost_user_intf_t *vui, u64 addr, u64 len, u8 is_host_address)
#define vec_free(V)
Free vector's memory (no header).
static int vhost_user_vring_try_lock(vhost_user_intf_t *vui, u32 qid)
Try once to lock the vring.
#define VLIB_MAIN_LOOP_EXIT_FUNCTION(x)
int vhost_user_modify_if(vnet_main_t *vnm, vlib_main_t *vm, const char *sock_filename, u8 is_server, u32 sw_if_index, u64 feature_mask, u8 renumber, u32 custom_dev_instance)
#define clib_warning(format, args...)
#define VLIB_BUFFER_IS_TRACED
#define clib_memcpy(a, b, c)
#define VHOST_MEMORY_MAX_NREGIONS
static_always_inline void * map_guest_mem(vhost_user_intf_t *vui, uword addr, u32 *hint)
clib_error_t * debug_vhost_user_command_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
static u32 vlib_buffer_alloc_from_free_list(vlib_main_t *vm, u32 *buffers, u32 n_buffers, u32 free_list_index)
Allocate buffers from specific freelist into supplied array.
void vlib_worker_thread_barrier_sync(vlib_main_t *vm)
static uword vhost_user_tx(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
u16 first_desc_len
Runtime queue flags.
#define VHOST_USER_PROTOCOL_F_LOG_SHMFD
#define VLIB_BUFFER_DATA_SIZE
static void vhost_user_input_rewind_buffers(vlib_main_t *vm, vhost_cpu_t *cpu, vlib_buffer_t *b_head)
#define VLIB_CLI_COMMAND(x,...)
#define VNET_SW_INTERFACE_FLAG_ADMIN_UP
u32 max_l3_packet_bytes[VLIB_N_RX_TX]
u32 rx_buffers[VHOST_USER_RX_BUFFERS_N]
uword unformat_ethernet_address(unformat_input_t *input, va_list *args)
#define VHOST_USER_RX_BUFFER_STARVATION
static uword * mhash_get(mhash_t *h, const void *key)
static long get_huge_page_size(int fd)
u32 next_buffer
Next buffer for this linked-list of buffers.
clib_error_t * vhost_user_delete_command_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
static void clib_mem_free(void *p)
#define VIRTQ_DESC_F_NEXT
volatile u32 * vring_locks[VHOST_VRING_MAX_N]
clib_error_t * ethernet_register_interface(vnet_main_t *vnm, u32 dev_class_index, u32 dev_instance, u8 *address, u32 *hw_if_index_return, ethernet_flag_change_function_t flag_change)
mhash_t if_index_by_sock_name
#define clib_error_report(e)
static void * vlib_frame_args(vlib_frame_t *f)
Get pointer to frame scalar data.
static u8 * format_vhost_trace(u8 *s, va_list *va)
#define VHOST_USER_RX_COPY_THRESHOLD
static void vhost_user_vring_lock(vhost_user_intf_t *vui, u32 qid)
Spin until the vring is successfully locked.
static vlib_main_t * vlib_get_main(void)
static void vhost_user_rx_thread_placement()
Unassign existing interface/queue to thread mappings and re-assign new interface/queue to thread mapp...
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
static void vhost_user_update_iface_state(vhost_user_intf_t *vui)
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
#define foreach_vhost_user_tx_func_error
void * region_mmap_addr[VHOST_MEMORY_MAX_NREGIONS]
static clib_error_t * vhost_user_socksvr_accept_ready(unix_file_t *uf)
static u32 vhost_user_if_input(vlib_main_t *vm, vhost_user_main_t *vum, vhost_user_intf_t *vui, u16 qid, vlib_node_runtime_t *node, vnet_hw_interface_rx_mode mode)
static clib_error_t * vhost_user_config(vlib_main_t *vm, unformat_input_t *input)
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
#define VRING_USED_F_NO_NOTIFY
#define VHOST_USER_RX_BUFFERS_N
int vhost_user_intf_ready(vhost_user_intf_t *vui)
Returns whether at least one TX and one RX vring are enabled.
vhost_user_vring_t vrings[VHOST_VRING_MAX_N]
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
#define VHOST_VRING_MAX_N
#define VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE
#define clib_unix_warning(format, args...)
vlib_node_registration_t vhost_user_input_node
(constructor) VLIB_REGISTER_NODE (vhost_user_input_node)
#define DBG_SOCK(args...)
u32 vhost_user_rx_discard_packet(vlib_main_t *vm, vhost_user_intf_t *vui, vhost_user_vring_t *txvq, u32 discard_max)
Try to discard packets from the tx ring (VPP RX path).
static vhost_user_main_t vhost_user_main
static void * clib_mem_alloc_aligned(uword size, uword align)
#define VLIB_NODE_FUNCTION_MULTIARCH(node, fn)
static u32 random_u32(u32 *seed)
32-bit random number generator
void vlib_worker_thread_barrier_release(vlib_main_t *vm)
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
#define VLIB_REGISTER_NODE(x,...)
static vlib_thread_main_t * vlib_get_thread_main()
#define VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE
u64 region_guest_addr_lo[VHOST_MEMORY_MAX_NREGIONS]
static clib_error_t * vhost_user_callfd_read_ready(unix_file_t *uf)
#define vec_foreach(var, vec)
Vector iterator.
#define foreach_vhost_user_input_func_error
u16 flags
Copy of main node flags.
#define CLIB_MEMORY_BARRIER()
virtio_net_hdr_mrg_rxbuf_t tx_headers[VLIB_FRAME_SIZE]
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
#define vec_validate_init_empty(V, I, INIT)
Make sure vector is long enough for given index and initialize empty space (no header, unspecified alignment)
#define CLIB_CACHE_LINE_BYTES
u32 flags
buffer flags: VLIB_BUFFER_IS_TRACED: trace this buffer.
static_always_inline u32 vhost_user_tx_copy(vhost_user_intf_t *vui, vhost_copy_t *cpy, u16 copy_len, u32 *map_hint)
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
static clib_error_t * vhost_user_interface_rx_mode_change(vnet_main_t *vnm, u32 hw_if_index, u32 qid, vnet_hw_interface_rx_mode mode)
#define VHOST_USER_PROTOCOL_F_MQ
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
clib_error_t * vhost_user_connect_command_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
VNET_HW_INTERFACE_CLASS(vhost_interface_class, static)
int dont_dump_vhost_user_memory
#define VHOST_VRING_IDX_RX(qid)
static void unix_file_del(unix_main_t *um, unix_file_t *f)