FD.io VPP  v21.06
Vector Packet Processing
vhost_user_inline.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef __VIRTIO_VHOST_USER_INLINE_H__
16 #define __VIRTIO_VHOST_USER_INLINE_H__
17 /* vhost-user inline functions */
18 #include <vppinfra/elog.h>
19 
22 {
23  int i = *hint;
24  if (PREDICT_TRUE ((vui->regions[i].guest_phys_addr <= addr) &&
25  ((vui->regions[i].guest_phys_addr +
26  vui->regions[i].memory_size) > addr)))
27  {
28  return (void *) (vui->region_mmap_addr[i] + addr -
29  vui->regions[i].guest_phys_addr);
30  }
31 #if __SSE4_2__
32  __m128i rl, rh, al, ah, r;
33  al = _mm_set1_epi64x (addr + 1);
34  ah = _mm_set1_epi64x (addr);
35 
36  rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[0]);
37  rl = _mm_cmpgt_epi64 (al, rl);
38  rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[0]);
39  rh = _mm_cmpgt_epi64 (rh, ah);
40  r = _mm_and_si128 (rl, rh);
41 
42  rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[2]);
43  rl = _mm_cmpgt_epi64 (al, rl);
44  rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[2]);
45  rh = _mm_cmpgt_epi64 (rh, ah);
46  r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x22);
47 
48  rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[4]);
49  rl = _mm_cmpgt_epi64 (al, rl);
50  rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[4]);
51  rh = _mm_cmpgt_epi64 (rh, ah);
52  r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x44);
53 
54  rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[6]);
55  rl = _mm_cmpgt_epi64 (al, rl);
56  rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[6]);
57  rh = _mm_cmpgt_epi64 (rh, ah);
58  r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x88);
59 
60  r = _mm_shuffle_epi8 (r, _mm_set_epi64x (0, 0x0e060c040a020800));
61  i = count_trailing_zeros (_mm_movemask_epi8 (r) |
63 
64  if (i < vui->nregions)
65  {
66  *hint = i;
67  return (void *) (vui->region_mmap_addr[i] + addr -
68  vui->regions[i].guest_phys_addr);
69  }
70 #elif __aarch64__ && __ARM_NEON
71  uint64x2_t al, ah, rl, rh, r;
72  uint32_t u32 = 0;
73 
74  al = vdupq_n_u64 (addr + 1);
75  ah = vdupq_n_u64 (addr);
76 
77  /*First Iteration */
78  rl = vld1q_u64 (&vui->region_guest_addr_lo[0]);
79  rl = vcgtq_u64 (al, rl);
80  rh = vld1q_u64 (&vui->region_guest_addr_hi[0]);
81  rh = vcgtq_u64 (rh, ah);
82  r = vandq_u64 (rl, rh);
83  u32 |= (vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 0) & 0x1);
84  u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 8) & 0x1) << 1);
85 
86  if (u32)
87  {
88  i = count_trailing_zeros (u32);
89  goto vhost_map_guest_mem_done;
90  }
91 
92  /*Second Iteration */
93  rl = vld1q_u64 (&vui->region_guest_addr_lo[2]);
94  rl = vcgtq_u64 (al, rl);
95  rh = vld1q_u64 (&vui->region_guest_addr_hi[2]);
96  rh = vcgtq_u64 (rh, ah);
97  r = vandq_u64 (rl, rh);
98  u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 0) & 0x1) << 2);
99  u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 8) & 0x1) << 3);
100 
101  if (u32)
102  {
103  i = count_trailing_zeros (u32);
104  goto vhost_map_guest_mem_done;
105  }
106 
107  /*Third Iteration */
108  rl = vld1q_u64 (&vui->region_guest_addr_lo[4]);
109  rl = vcgtq_u64 (al, rl);
110  rh = vld1q_u64 (&vui->region_guest_addr_hi[4]);
111  rh = vcgtq_u64 (rh, ah);
112  r = vandq_u64 (rl, rh);
113  u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 0) & 0x1) << 6);
114  u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 8) & 0x1) << 7);
115 
117 
118 vhost_map_guest_mem_done:
119  if (i < vui->nregions)
120  {
121  *hint = i;
122  return (void *) (vui->region_mmap_addr[i] + addr -
123  vui->regions[i].guest_phys_addr);
124  }
125 #else
126  for (i = 0; i < vui->nregions; i++)
127  {
128  if ((vui->regions[i].guest_phys_addr <= addr) &&
129  ((vui->regions[i].guest_phys_addr + vui->regions[i].memory_size) >
130  addr))
131  {
132  *hint = i;
133  return (void *) (vui->region_mmap_addr[i] + addr -
134  vui->regions[i].guest_phys_addr);
135  }
136  }
137 #endif
138  /* *INDENT-OFF* */
139  ELOG_TYPE_DECLARE (el) =
140  {
141  .format = "failed to map guest mem addr %lx",
142  .format_args = "i8",
143  };
144  /* *INDENT-ON* */
145  struct
146  {
147  uword addr;
148  } *ed;
150  ed->addr = addr;
151  *hint = 0;
152  return 0;
153 }
154 
157 {
158  int i;
159  for (i = 0; i < vui->nregions; i++)
160  {
161  if ((vui->regions[i].userspace_addr <= addr) &&
162  ((vui->regions[i].userspace_addr + vui->regions[i].memory_size) >
163  addr))
164  {
165  return (void *) (vui->region_mmap_addr[i] + addr -
166  vui->regions[i].userspace_addr);
167  }
168  }
169  return 0;
170 }
171 
172 #define VHOST_LOG_PAGE 0x1000
173 
176  u64 addr, u64 len, u8 is_host_address)
177 {
178  if (PREDICT_TRUE (vui->log_base_addr == 0
179  || !(vui->features & VIRTIO_FEATURE (VHOST_F_LOG_ALL))))
180  {
181  return;
182  }
183  if (is_host_address)
184  {
185  addr = pointer_to_uword (map_user_mem (vui, (uword) addr));
186  }
187  if (PREDICT_FALSE ((addr + len - 1) / VHOST_LOG_PAGE / 8 >= vui->log_size))
188  {
189  vu_log_debug (vui, "vhost_user_log_dirty_pages(): out of range\n");
190  return;
191  }
192 
194  u64 page = addr / VHOST_LOG_PAGE;
195  while (page * VHOST_LOG_PAGE < addr + len)
196  {
197  ((u8 *) vui->log_base_addr)[page / 8] |= 1 << page % 8;
198  page++;
199  }
200 }
201 
202 
203 #define vhost_user_log_dirty_ring(vui, vq, member) \
204  if (PREDICT_FALSE(vq->log_used)) { \
205  vhost_user_log_dirty_pages_2(vui, vq->log_guest_addr + STRUCT_OFFSET_OF(vring_used_t, member), \
206  sizeof(vq->used->member), 0); \
207  }
208 
210 format_vhost_trace (u8 * s, va_list * va)
211 {
212  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
213  CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
216  vhost_trace_t *t = va_arg (*va, vhost_trace_t *);
219  u32 indent;
220 
221  if (pool_is_free (vum->vhost_user_interfaces, vui))
222  {
223  s = format (s, "vhost-user interface is deleted");
224  return s;
225  }
226  sw = vnet_get_sw_interface (vnm, vui->sw_if_index);
227  indent = format_get_indent (s);
228  s = format (s, "%U %U queue %d\n", format_white_space, indent,
229  format_vnet_sw_interface_name, vnm, sw, t->qid);
230 
231  s = format (s, "%U virtio flags:\n", format_white_space, indent);
232 #define _(n,i,st) \
233  if (t->virtio_ring_flags & (1 << VIRTIO_TRACE_F_##n)) \
234  s = format (s, "%U %s %s\n", format_white_space, indent, #n, st);
236 #undef _
237  s = format (s, "%U virtio_net_hdr first_desc_len %u\n",
238  format_white_space, indent, t->first_desc_len);
239 
240  s = format (s, "%U flags 0x%02x gso_type %u\n",
241  format_white_space, indent,
242  t->hdr.hdr.flags, t->hdr.hdr.gso_type);
243 
244  if (vui->virtio_net_hdr_sz == 12)
245  s = format (s, "%U num_buff %u",
246  format_white_space, indent, t->hdr.num_buffers);
247 
248  return s;
249 }
250 
253 {
254  return (vui->features & VIRTIO_FEATURE (VIRTIO_F_RING_PACKED));
255 }
256 
259 {
260  return (vui->features & VIRTIO_FEATURE (VIRTIO_RING_F_EVENT_IDX));
261 }
262 
265 {
267  u64 x = 1;
268  int fd = UNIX_GET_FD (vq->callfd_idx);
269  int rv;
270 
271  rv = write (fd, &x, sizeof (x));
272  if (PREDICT_FALSE (rv <= 0))
273  {
275  ("Error: Could not write to unix socket for callfd %d", fd);
276  return;
277  }
278 
279  vq->n_since_last_int = 0;
280  vq->int_deadline = vlib_time_now (vm) + vum->coalesce_time;
281 }
282 
285 {
286  volatile u16 *event_idx = (u16 *) & (vq->used->ring[vq->qsz_mask + 1]);
287 
288  return *event_idx;
289 }
290 
293 {
294  volatile u16 *event_idx = (u16 *) & (vq->avail->ring[vq->qsz_mask + 1]);
295 
296  return *event_idx;
297 }
298 
301 {
302  return ((u16) (new_idx - event_idx - 1) < (u16) (new_idx - old_idx));
303 }
304 
307 {
309  u8 first_kick = vq->first_kick;
311 
312  vq->first_kick = 1;
313  if (vhost_user_need_event (event_idx, vq->last_used_idx, vq->last_kick) ||
314  PREDICT_FALSE (!first_kick))
315  {
316  vhost_user_kick (vm, vq);
317  vq->last_kick = event_idx;
318  }
319  else
320  {
321  vq->n_since_last_int = 0;
322  vq->int_deadline = vlib_time_now (vm) + vum->coalesce_time;
323  }
324 }
325 
328  vhost_user_vring_t * vq)
329 {
331  u8 first_kick = vq->first_kick;
332  u16 off_wrap;
333  u16 event_idx;
334  u16 new_idx = vq->last_used_idx;
335  u16 old_idx = vq->last_kick;
336 
337  if (PREDICT_TRUE (vq->avail_event->flags == VRING_EVENT_F_DESC))
338  {
340  off_wrap = vq->avail_event->off_wrap;
341  event_idx = off_wrap & 0x7fff;
342  if (vq->used_wrap_counter != (off_wrap >> 15))
343  event_idx -= (vq->qsz_mask + 1);
344 
345  if (new_idx <= old_idx)
346  old_idx -= (vq->qsz_mask + 1);
347 
348  vq->first_kick = 1;
349  vq->last_kick = event_idx;
350  if (vhost_user_need_event (event_idx, new_idx, old_idx) ||
351  PREDICT_FALSE (!first_kick))
352  vhost_user_kick (vm, vq);
353  else
354  {
355  vq->n_since_last_int = 0;
356  vq->int_deadline = vlib_time_now (vm) + vum->coalesce_time;
357  }
358  }
359  else
360  vhost_user_kick (vm, vq);
361 }
362 
365  vhost_user_vring_t * vq)
366 {
368  {
371  else
373  }
374  else
375  vhost_user_kick (vm, vq);
376 }
377 
380 {
381  return vui->admin_up && vui->is_ready;
382 }
383 
386 {
388 
389  if (vui->enable_gso)
390  {
391  if (add)
392  {
393  vum->gso_count++;
394  }
395  else
396  {
397  ASSERT (vum->gso_count > 0);
398  vum->gso_count--;
399  }
400  }
401 }
402 
405 {
406  return (((vring->packed_desc[idx].flags & VRING_DESC_F_AVAIL) ==
407  vring->avail_wrap_counter));
408 }
409 
412 {
413  vring->last_avail_idx++;
414  if (PREDICT_FALSE ((vring->last_avail_idx & vring->qsz_mask) == 0))
415  {
417  vring->last_avail_idx = 0;
418  }
419 }
420 
423  vhost_user_vring_t * vring,
424  u8 chained)
425 {
426  if (chained)
427  {
428  vring_packed_desc_t *desc_table = vring->packed_desc;
429 
430  /* pick up the slot of the next avail idx */
431  while (desc_table[vring->last_avail_idx & vring->qsz_mask].flags &
434  }
435 
437 }
438 
441 {
442  if (PREDICT_FALSE ((vring->last_avail_idx & vring->qsz_mask) == 0))
444 
445  if (PREDICT_FALSE (vring->last_avail_idx == 0))
446  vring->last_avail_idx = vring->qsz_mask;
447  else
448  vring->last_avail_idx--;
449 }
450 
453  virtio_net_hdr_mrg_rxbuf_t * hdr,
454  u16 * n_descs_processed)
455 {
456  u16 i;
457 
458  *n_descs_processed -= (hdr->num_buffers - 1);
459  for (i = 0; i < hdr->num_buffers - 1; i++)
461 }
462 
465  u16 * n_descs_processed)
466 {
467  while (*n_descs_processed)
468  {
470  (*n_descs_processed)--;
471  }
472 }
473 
476 {
477  vring->last_used_idx++;
478  if (PREDICT_FALSE ((vring->last_used_idx & vring->qsz_mask) == 0))
479  {
480  vring->used_wrap_counter ^= 1;
481  vring->last_used_idx = 0;
482  }
483 }
484 
485 #endif
486 
487 /*
488  * fd.io coding-style-patch-verification: ON
489  *
490  * Local Variables:
491  * eval: (c-set-style "gnu")
492  * End:
493  */
u32 nregions
Definition: vhost_user.h:123
static_always_inline void vhost_user_send_call(vlib_main_t *vm, vhost_user_intf_t *vui, vhost_user_vring_t *vq)
#define CLIB_UNUSED(x)
Definition: clib.h:90
virtio_net_hdr_mrg_rxbuf_t hdr
Length of the first data descriptor.
Definition: vhost_user.h:306
vnet_hw_if_output_node_runtime_t * r
u64 region_guest_addr_hi[VHOST_MEMORY_MAX_NREGIONS]
Definition: vhost_user.h:261
#define CLIB_COMPILER_BARRIER()
Definition: clib.h:134
#define PREDICT_TRUE(x)
Definition: clib.h:125
unsigned long u64
Definition: types.h:89
vring_used_elem_t ring[0]
Definition: virtio_std.h:121
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:325
vring_packed_desc_t * packed_desc
Definition: vhost_user.h:190
vring_avail_t * avail
Definition: vhost_user.h:194
static_always_inline void vhost_user_send_call_event_idx(vlib_main_t *vm, vhost_user_vring_t *vq)
static u32 format_get_indent(u8 *s)
Definition: format.h:72
static vnet_sw_interface_t * vnet_get_sw_interface(vnet_main_t *vnm, u32 sw_if_index)
static_always_inline void vhost_user_advance_last_avail_idx(vhost_user_vring_t *vring)
#define pool_is_free(P, E)
Use free bitmap to query whether given element is free.
Definition: pool.h:294
vring_used_t * used
Definition: vhost_user.h:199
vhost_vring_addr_t addr
Definition: vhost_user.h:130
unsigned char u8
Definition: types.h:56
static_always_inline void vhost_user_dequeue_descs(vhost_user_vring_t *rxvq, virtio_net_hdr_mrg_rxbuf_t *hdr, u16 *n_descs_processed)
#define count_trailing_zeros(x)
Definition: clib.h:161
#define VIRTIO_FEATURE(X)
Definition: virtio_std.h:69
unsigned int u32
Definition: types.h:88
#define vu_log_debug(dev, f,...)
Definition: vhost_user.h:45
static_always_inline u64 vhost_user_is_event_idx_supported(vhost_user_intf_t *vui)
#define static_always_inline
Definition: clib.h:112
#define UNIX_GET_FD(unixfd_idx)
Definition: vhost_user.h:65
#define event_idx
Definition: tls_async.c:41
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:129
void * log_base_addr
Definition: vhost_user.h:278
static_always_inline void * map_guest_mem(vhost_user_intf_t *vui, uword addr, u32 *hint)
description fragment has unexpected format
Definition: map.api:433
vnet_main_t * vnet_get_main(void)
static_always_inline u8 * format_vhost_trace(u8 *s, va_list *va)
int __clib_unused rv
Definition: application.c:491
static_always_inline void vhost_user_kick(vlib_main_t *vm, vhost_user_vring_t *vq)
format_function_t format_vnet_sw_interface_name
static_always_inline void vhost_user_dequeue_chained_descs(vhost_user_vring_t *rxvq, u16 *n_descs_processed)
unsigned short u16
Definition: types.h:57
#define VHOST_LOG_PAGE
#define ELOG_DATA(em, f)
Definition: elog.h:484
#define PREDICT_FALSE(x)
Definition: clib.h:124
vhost_user_main_t vhost_user_main
Definition: vhost_user.c:55
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
Definition: nat44_ei.c:3047
vhost_user_memory_region_t regions[VHOST_MEMORY_MAX_NREGIONS]
Definition: vhost_user.h:258
static_always_inline u8 vui_is_link_up(vhost_user_intf_t *vui)
u8 len
Definition: ip_types.api:103
static_always_inline void vhost_user_update_gso_interface_count(vhost_user_intf_t *vui, u8 add)
The fine-grained event logger allows lightweight, thread-safe event logging at minimum cost...
u16 device_index
The interface queue index (Not the virtio vring idx)
Definition: vhost_user.h:303
vhost_user_intf_t * vhost_user_interfaces
Definition: vhost_user.h:332
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:261
static_always_inline u64 vhost_user_is_packed_ring_supported(vhost_user_intf_t *vui)
#define ELOG_TYPE_DECLARE(f)
Definition: elog.h:442
u16 first_desc_len
Runtime queue flags.
Definition: vhost_user.h:305
static_always_inline u8 vhost_user_packed_desc_available(vhost_user_vring_t *vring, u16 idx)
static_always_inline void vhost_user_advance_last_avail_table_idx(vhost_user_intf_t *vui, vhost_user_vring_t *vring, u8 chained)
static_always_inline void vhost_user_send_call_event_idx_packed(vlib_main_t *vm, vhost_user_vring_t *vq)
static_always_inline u16 vhost_user_used_event_idx(vhost_user_vring_t *vq)
#define ASSERT(truth)
static_always_inline void vhost_user_undo_advanced_last_avail_idx(vhost_user_vring_t *vring)
vring_desc_event_t * avail_event
Definition: vhost_user.h:195
static uword pointer_to_uword(const void *p)
Definition: types.h:131
#define VRING_DESC_F_NEXT
Definition: virtio_std.h:73
static_always_inline u16 vhost_user_need_event(u16 event_idx, u16 new_idx, u16 old_idx)
void * region_mmap_addr[VHOST_MEMORY_MAX_NREGIONS]
Definition: vhost_user.h:259
static_always_inline u16 vhost_user_avail_event_idx(vhost_user_vring_t *vq)
vlib_main_t vlib_node_runtime_t * node
Definition: nat44_ei.c:3047
u64 uword
Definition: types.h:112
#define clib_unix_warning(format, args...)
Definition: error.h:68
#define VHOST_MEMORY_MAX_NREGIONS
Definition: vhost_user.h:23
static_always_inline void vhost_user_log_dirty_pages_2(vhost_user_intf_t *vui, u64 addr, u64 len, u8 is_host_address)
u64 region_guest_addr_lo[VHOST_MEMORY_MAX_NREGIONS]
Definition: vhost_user.h:260
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:137
static_always_inline void vhost_user_advance_last_used_idx(vhost_user_vring_t *vring)
static_always_inline void * map_user_mem(vhost_user_intf_t *vui, uword addr)
elog_main_t elog_main
Definition: main.h:300
vlib_global_main_t vlib_global_main
Definition: main.c:1786
#define VRING_DESC_F_AVAIL
Definition: virtio_std.h:77