FD.io VPP  v18.07-34-g55fbdb9
Vector Packet Processing
vhost_user_inline.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef __VIRTIO_VHOST_USER_INLINE_H__
16 #define __VIRTIO_VHOST_USER_INLINE_H__
17 /* vhost-user inline functions */
18 
21 {
22  int i = *hint;
23  if (PREDICT_TRUE ((vui->regions[i].guest_phys_addr <= addr) &&
24  ((vui->regions[i].guest_phys_addr +
25  vui->regions[i].memory_size) > addr)))
26  {
27  return (void *) (vui->region_mmap_addr[i] + addr -
28  vui->regions[i].guest_phys_addr);
29  }
30 #if __SSE4_2__
31  __m128i rl, rh, al, ah, r;
32  al = _mm_set1_epi64x (addr + 1);
33  ah = _mm_set1_epi64x (addr);
34 
35  rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[0]);
36  rl = _mm_cmpgt_epi64 (al, rl);
37  rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[0]);
38  rh = _mm_cmpgt_epi64 (rh, ah);
39  r = _mm_and_si128 (rl, rh);
40 
41  rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[2]);
42  rl = _mm_cmpgt_epi64 (al, rl);
43  rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[2]);
44  rh = _mm_cmpgt_epi64 (rh, ah);
45  r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x22);
46 
47  rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[4]);
48  rl = _mm_cmpgt_epi64 (al, rl);
49  rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[4]);
50  rh = _mm_cmpgt_epi64 (rh, ah);
51  r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x44);
52 
53  rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[6]);
54  rl = _mm_cmpgt_epi64 (al, rl);
55  rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[6]);
56  rh = _mm_cmpgt_epi64 (rh, ah);
57  r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x88);
58 
59  r = _mm_shuffle_epi8 (r, _mm_set_epi64x (0, 0x0e060c040a020800));
60  i = count_trailing_zeros (_mm_movemask_epi8 (r) |
62 
63  if (i < vui->nregions)
64  {
65  *hint = i;
66  return (void *) (vui->region_mmap_addr[i] + addr -
67  vui->regions[i].guest_phys_addr);
68  }
69 #elif __aarch64__ && __ARM_NEON
70  uint64x2_t al, ah, rl, rh, r;
71  uint32_t u32 = 0;
72 
73  al = vdupq_n_u64 (addr + 1);
74  ah = vdupq_n_u64 (addr);
75 
76  /*First Iteration */
77  rl = vld1q_u64 (&vui->region_guest_addr_lo[0]);
78  rl = vcgtq_u64 (al, rl);
79  rh = vld1q_u64 (&vui->region_guest_addr_hi[0]);
80  rh = vcgtq_u64 (rh, ah);
81  r = vandq_u64 (rl, rh);
82  u32 |= (vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 0) & 0x1);
83  u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 8) & 0x1) << 1);
84 
85  if (u32)
86  {
87  i = count_trailing_zeros (u32);
88  goto vhost_map_guest_mem_done;
89  }
90 
91  /*Second Iteration */
92  rl = vld1q_u64 (&vui->region_guest_addr_lo[2]);
93  rl = vcgtq_u64 (al, rl);
94  rh = vld1q_u64 (&vui->region_guest_addr_hi[2]);
95  rh = vcgtq_u64 (rh, ah);
96  r = vandq_u64 (rl, rh);
97  u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 0) & 0x1) << 2);
98  u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 8) & 0x1) << 3);
99 
100  if (u32)
101  {
102  i = count_trailing_zeros (u32);
103  goto vhost_map_guest_mem_done;
104  }
105 
106  /*Third Iteration */
107  rl = vld1q_u64 (&vui->region_guest_addr_lo[4]);
108  rl = vcgtq_u64 (al, rl);
109  rh = vld1q_u64 (&vui->region_guest_addr_hi[4]);
110  rh = vcgtq_u64 (rh, ah);
111  r = vandq_u64 (rl, rh);
112  u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 0) & 0x1) << 6);
113  u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 8) & 0x1) << 7);
114 
116 
117 vhost_map_guest_mem_done:
118  if (i < vui->nregions)
119  {
120  *hint = i;
121  return (void *) (vui->region_mmap_addr[i] + addr -
122  vui->regions[i].guest_phys_addr);
123  }
124 #else
125  for (i = 0; i < vui->nregions; i++)
126  {
127  if ((vui->regions[i].guest_phys_addr <= addr) &&
128  ((vui->regions[i].guest_phys_addr + vui->regions[i].memory_size) >
129  addr))
130  {
131  *hint = i;
132  return (void *) (vui->region_mmap_addr[i] + addr -
133  vui->regions[i].guest_phys_addr);
134  }
135  }
136 #endif
137  DBG_VQ ("failed to map guest mem addr %llx", addr);
138  *hint = 0;
139  return 0;
140 }
141 
144 {
145  int i;
146  for (i = 0; i < vui->nregions; i++)
147  {
148  if ((vui->regions[i].userspace_addr <= addr) &&
149  ((vui->regions[i].userspace_addr + vui->regions[i].memory_size) >
150  addr))
151  {
152  return (void *) (vui->region_mmap_addr[i] + addr -
153  vui->regions[i].userspace_addr);
154  }
155  }
156  return 0;
157 }
158 
159 #define VHOST_LOG_PAGE 0x1000
160 
163  u64 addr, u64 len, u8 is_host_address)
164 {
165  if (PREDICT_TRUE (vui->log_base_addr == 0
166  || !(vui->features & (1 << FEAT_VHOST_F_LOG_ALL))))
167  {
168  return;
169  }
170  if (is_host_address)
171  {
172  addr = pointer_to_uword (map_user_mem (vui, (uword) addr));
173  }
174  if (PREDICT_FALSE ((addr + len - 1) / VHOST_LOG_PAGE / 8 >= vui->log_size))
175  {
176  DBG_SOCK ("vhost_user_log_dirty_pages(): out of range\n");
177  return;
178  }
179 
181  u64 page = addr / VHOST_LOG_PAGE;
182  while (page * VHOST_LOG_PAGE < addr + len)
183  {
184  ((u8 *) vui->log_base_addr)[page / 8] |= 1 << page % 8;
185  page++;
186  }
187 }
188 
189 
190 #define vhost_user_log_dirty_ring(vui, vq, member) \
191  if (PREDICT_FALSE(vq->log_used)) { \
192  vhost_user_log_dirty_pages_2(vui, vq->log_guest_addr + STRUCT_OFFSET_OF(vring_used_t, member), \
193  sizeof(vq->used->member), 0); \
194  }
195 
197 format_vhost_trace (u8 * s, va_list * va)
198 {
199  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
200  CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
203  vhost_trace_t *t = va_arg (*va, vhost_trace_t *);
205  t->device_index);
206 
208 
209  u32 indent = format_get_indent (s);
210 
211  s = format (s, "%U %U queue %d\n", format_white_space, indent,
212  format_vnet_sw_interface_name, vnm, sw, t->qid);
213 
214  s = format (s, "%U virtio flags:\n", format_white_space, indent);
215 #define _(n,i,st) \
216  if (t->virtio_ring_flags & (1 << VIRTIO_TRACE_F_##n)) \
217  s = format (s, "%U %s %s\n", format_white_space, indent, #n, st);
219 #undef _
220  s = format (s, "%U virtio_net_hdr first_desc_len %u\n",
221  format_white_space, indent, t->first_desc_len);
222 
223  s = format (s, "%U flags 0x%02x gso_type %u\n",
224  format_white_space, indent,
225  t->hdr.hdr.flags, t->hdr.hdr.gso_type);
226 
227  if (vui->virtio_net_hdr_sz == 12)
228  s = format (s, "%U num_buff %u",
229  format_white_space, indent, t->hdr.num_buffers);
230 
231  return s;
232 }
233 
236 {
238  u64 x = 1;
239  int fd = UNIX_GET_FD (vq->callfd_idx);
240  int rv;
241 
242  rv = write (fd, &x, sizeof (x));
243  if (rv <= 0)
244  {
246  ("Error: Could not write to unix socket for callfd %d", fd);
247  return;
248  }
249 
250  vq->n_since_last_int = 0;
251  vq->int_deadline = vlib_time_now (vm) + vum->coalesce_time;
252 }
253 
254 #endif
255 
256 /*
257  * fd.io coding-style-patch-verification: ON
258  *
259  * Local Variables:
260  * eval: (c-set-style "gnu")
261  * End:
262  */
u32 nregions
Definition: vhost_user.h:109
#define CLIB_UNUSED(x)
Definition: clib.h:79
#define DBG_VQ(args...)
Definition: vhost_user.h:60
virtio_net_hdr_mrg_rxbuf_t hdr
Length of the first data descriptor.
Definition: vhost_user.h:309
vnet_main_t * vnet_get_main(void)
Definition: misc.c:47
u64 region_guest_addr_hi[VHOST_MEMORY_MAX_NREGIONS]
Definition: vhost_user.h:275
#define PREDICT_TRUE(x)
Definition: clib.h:106
unsigned long u64
Definition: types.h:89
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:225
int i
static u32 format_get_indent(u8 *s)
Definition: format.h:72
static vnet_sw_interface_t * vnet_get_sw_interface(vnet_main_t *vnm, u32 sw_if_index)
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
vhost_vring_addr_t addr
Definition: vhost_user.h:116
unsigned char u8
Definition: types.h:56
#define count_trailing_zeros(x)
Definition: clib.h:133
#define static_always_inline
Definition: clib.h:93
#define UNIX_GET_FD(unixfd_idx)
Definition: vhost_user.h:63
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:113
void * log_base_addr
Definition: vhost_user.h:285
static_always_inline void * map_guest_mem(vhost_user_intf_t *vui, uword addr, u32 *hint)
static_always_inline u8 * format_vhost_trace(u8 *s, va_list *va)
unsigned int u32
Definition: types.h:88
format_function_t format_vnet_sw_interface_name
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:464
#define VHOST_LOG_PAGE
#define PREDICT_FALSE(x)
Definition: clib.h:105
vhost_user_main_t vhost_user_main
Definition: vhost_user.c:56
vhost_user_memory_region_t regions[VHOST_MEMORY_MAX_NREGIONS]
Definition: vhost_user.h:272
u16 device_index
The interface queue index (Not the virtio vring idx)
Definition: vhost_user.h:306
vhost_user_intf_t * vhost_user_interfaces
Definition: vhost_user.h:333
#define DBG_SOCK(args...)
Definition: vhost_user.h:48
vlib_main_t * vm
Definition: buffer.c:294
u16 first_desc_len
Runtime queue flags.
Definition: vhost_user.h:308
static_always_inline void vhost_user_send_call(vlib_main_t *vm, vhost_user_vring_t *vq)
static uword pointer_to_uword(const void *p)
Definition: types.h:131
void * region_mmap_addr[VHOST_MEMORY_MAX_NREGIONS]
Definition: vhost_user.h:273
u64 uword
Definition: types.h:112
#define clib_unix_warning(format, args...)
Definition: error.h:68
#define VHOST_MEMORY_MAX_NREGIONS
Definition: vhost_user.h:19
static_always_inline void vhost_user_log_dirty_pages_2(vhost_user_intf_t *vui, u64 addr, u64 len, u8 is_host_address)
u64 region_guest_addr_lo[VHOST_MEMORY_MAX_NREGIONS]
Definition: vhost_user.h:274
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:109
static_always_inline void * map_user_mem(vhost_user_intf_t *vui, uword addr)