FD.io VPP  v18.10-32-g1161dda
Vector Packet Processing
device.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2016 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #define _GNU_SOURCE
19 #include <stdint.h>
20 #include <net/if.h>
21 #include <sys/ioctl.h>
22 #include <sys/uio.h>
23 
24 #include <vlib/vlib.h>
25 #include <vlib/unix/unix.h>
26 #include <vnet/ethernet/ethernet.h>
27 
28 #include <memif/memif.h>
29 #include <memif/private.h>
30 
31 #define foreach_memif_tx_func_error \
32 _(NO_FREE_SLOTS, "no free tx slots") \
33 _(ROLLBACK, "no enough space in tx buffers")
34 
35 typedef enum
36 {
37 #define _(f,s) MEMIF_TX_ERROR_##f,
39 #undef _
42 
43 static char *memif_tx_func_error_strings[] = {
44 #define _(n,s) s,
46 #undef _
47 };
48 
49 #ifndef CLIB_MARCH_VARIANT
50 u8 *
51 format_memif_device_name (u8 * s, va_list * args)
52 {
53  u32 dev_instance = va_arg (*args, u32);
54  memif_main_t *mm = &memif_main;
55  memif_if_t *mif = pool_elt_at_index (mm->interfaces, dev_instance);
57 
59  s = format (s, "memif%lu/%lu", msf->socket_id, mif->id);
60  return s;
61 }
62 #endif
63 
64 static u8 *
65 format_memif_device (u8 * s, va_list * args)
66 {
67  u32 dev_instance = va_arg (*args, u32);
68  int verbose = va_arg (*args, int);
69  u32 indent = format_get_indent (s);
70 
71  s = format (s, "MEMIF interface");
72  if (verbose)
73  {
74  s = format (s, "\n%U instance %u", format_white_space, indent + 2,
75  dev_instance);
76  }
77  return s;
78 }
79 
80 static u8 *
81 format_memif_tx_trace (u8 * s, va_list * args)
82 {
83  s = format (s, "Unimplemented...");
84  return s;
85 }
86 
89  u16 buffer_offset, u16 buffer_vec_index)
90 {
91  memif_copy_op_t *co;
93  co->data = data;
94  co->data_len = len;
95  co->buffer_offset = buffer_offset;
96  co->buffer_vec_index = buffer_vec_index;
97 }
98 
101  vlib_frame_t * frame, memif_if_t * mif,
102  memif_ring_type_t type, memif_queue_t * mq,
104 {
105  memif_ring_t *ring;
106  u32 *buffers = vlib_frame_args (frame);
107  u32 n_left = frame->n_vectors;
108  u32 n_copy_op;
109  u16 ring_size, mask, slot, free_slots;
110  int n_retries = 5;
111  vlib_buffer_t *b0, *b1, *b2, *b3;
112  memif_copy_op_t *co;
113  memif_region_index_t last_region = ~0;
114  void *last_region_shm = 0;
115 
116  ring = mq->ring;
117  ring_size = 1 << mq->log2_ring_size;
118  mask = ring_size - 1;
119 
120 retry:
121 
122  free_slots = ring->tail - mq->last_tail;
123  mq->last_tail += free_slots;
124  slot = (type == MEMIF_RING_S2M) ? ring->head : ring->tail;
125 
126  if (type == MEMIF_RING_S2M)
127  free_slots = ring_size - ring->head + mq->last_tail;
128  else
129  free_slots = ring->head - ring->tail;
130 
131  while (n_left && free_slots)
132  {
133  memif_desc_t *d0;
134  void *mb0;
135  i32 src_off;
136  u32 bi0, dst_off, src_left, dst_left, bytes_to_copy;
137  u32 saved_ptd_copy_ops_len = _vec_len (ptd->copy_ops);
138  u32 saved_ptd_buffers_len = _vec_len (ptd->buffers);
139  u16 saved_slot = slot;
140 
141  CLIB_PREFETCH (&ring->desc[(slot + 8) & mask], CLIB_CACHE_LINE_BYTES,
142  LOAD);
143 
144  d0 = &ring->desc[slot & mask];
145  if (PREDICT_FALSE (last_region != d0->region))
146  {
147  last_region_shm = mif->regions[d0->region].shm;
148  last_region = d0->region;
149  }
150  mb0 = last_region_shm + d0->offset;
151 
152  dst_off = 0;
153 
154  /* slave is the producer, so it should be able to reset buffer length */
155  dst_left = (type == MEMIF_RING_S2M) ? mif->run.buffer_size : d0->length;
156 
157  if (PREDICT_TRUE (n_left >= 4))
158  vlib_prefetch_buffer_header (vlib_get_buffer (vm, buffers[3]), LOAD);
159  bi0 = buffers[0];
160 
161  next_in_chain:
162 
163  b0 = vlib_get_buffer (vm, bi0);
164  src_off = b0->current_data;
165  src_left = b0->current_length;
166 
167  while (src_left)
168  {
169  if (PREDICT_FALSE (dst_left == 0))
170  {
171  if (free_slots)
172  {
173  slot++;
174  free_slots--;
176  d0 = &ring->desc[slot & mask];
177  dst_off = 0;
178  dst_left =
179  (type ==
180  MEMIF_RING_S2M) ? mif->run.buffer_size : d0->length;
181 
182  if (PREDICT_FALSE (last_region != d0->region))
183  {
184  last_region_shm = mif->regions[d0->region].shm;
185  last_region = d0->region;
186  }
187  mb0 = last_region_shm + d0->offset;
188  }
189  else
190  {
191  /* we need to rollback vectors before bailing out */
192  _vec_len (ptd->buffers) = saved_ptd_buffers_len;
193  _vec_len (ptd->copy_ops) = saved_ptd_copy_ops_len;
194  vlib_error_count (vm, node->node_index,
195  MEMIF_TX_ERROR_ROLLBACK, 1);
196  slot = saved_slot;
197  goto no_free_slots;
198  }
199  }
200  bytes_to_copy = clib_min (src_left, dst_left);
201  memif_add_copy_op (ptd, mb0 + dst_off, bytes_to_copy, src_off,
202  vec_len (ptd->buffers));
204  src_off += bytes_to_copy;
205  dst_off += bytes_to_copy;
206  src_left -= bytes_to_copy;
207  dst_left -= bytes_to_copy;
208  }
209 
210  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_NEXT_PRESENT))
211  {
212  bi0 = b0->next_buffer;
213  goto next_in_chain;
214  }
215 
216  d0->length = dst_off;
217  d0->flags = 0;
218 
219  free_slots -= 1;
220  slot += 1;
221 
222  buffers++;
223  n_left--;
224  }
225 no_free_slots:
226 
227  /* copy data */
228  n_copy_op = vec_len (ptd->copy_ops);
229  co = ptd->copy_ops;
230  while (n_copy_op >= 8)
231  {
232  CLIB_PREFETCH (co[4].data, CLIB_CACHE_LINE_BYTES, LOAD);
233  CLIB_PREFETCH (co[5].data, CLIB_CACHE_LINE_BYTES, LOAD);
234  CLIB_PREFETCH (co[6].data, CLIB_CACHE_LINE_BYTES, LOAD);
235  CLIB_PREFETCH (co[7].data, CLIB_CACHE_LINE_BYTES, LOAD);
236 
237  b0 = vlib_get_buffer (vm, ptd->buffers[co[0].buffer_vec_index]);
238  b1 = vlib_get_buffer (vm, ptd->buffers[co[1].buffer_vec_index]);
239  b2 = vlib_get_buffer (vm, ptd->buffers[co[2].buffer_vec_index]);
240  b3 = vlib_get_buffer (vm, ptd->buffers[co[3].buffer_vec_index]);
241 
242  clib_memcpy (co[0].data, b0->data + co[0].buffer_offset,
243  co[0].data_len);
244  clib_memcpy (co[1].data, b1->data + co[1].buffer_offset,
245  co[1].data_len);
246  clib_memcpy (co[2].data, b2->data + co[2].buffer_offset,
247  co[2].data_len);
248  clib_memcpy (co[3].data, b3->data + co[3].buffer_offset,
249  co[3].data_len);
250 
251  co += 4;
252  n_copy_op -= 4;
253  }
254  while (n_copy_op)
255  {
256  b0 = vlib_get_buffer (vm, ptd->buffers[co[0].buffer_vec_index]);
257  clib_memcpy (co[0].data, b0->data + co[0].buffer_offset,
258  co[0].data_len);
259  co += 1;
260  n_copy_op -= 1;
261  }
262 
263  vec_reset_length (ptd->copy_ops);
264  vec_reset_length (ptd->buffers);
265 
267  if (type == MEMIF_RING_S2M)
268  ring->head = slot;
269  else
270  ring->tail = slot;
271 
272  if (n_left && n_retries--)
273  goto retry;
274 
276 
277  if (n_left)
278  {
279  vlib_error_count (vm, node->node_index, MEMIF_TX_ERROR_NO_FREE_SLOTS,
280  n_left);
281  }
282 
283  if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0 && mq->int_fd > -1)
284  {
285  u64 b = 1;
286  CLIB_UNUSED (int r) = write (mq->int_fd, &b, sizeof (b));
287  mq->int_count++;
288  }
289 
290  vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors);
291 
292  return frame->n_vectors;
293 }
294 
297  vlib_frame_t * frame, memif_if_t * mif,
298  memif_queue_t * mq,
300 {
301  memif_ring_t *ring = mq->ring;
302  u32 *buffers = vlib_frame_args (frame);
303  u32 n_left = frame->n_vectors;
304  u16 slot, free_slots, n_free;
305  u16 ring_size = 1 << mq->log2_ring_size;
306  u16 mask = ring_size - 1;
307  int n_retries = 5;
308  vlib_buffer_t *b0;
309 
310 retry:
311  n_free = ring->tail - mq->last_tail;
312  if (n_free >= 16)
313  {
315  mq->last_tail & mask,
316  ring_size, n_free);
317  mq->last_tail += n_free;
318  }
319 
320  slot = ring->head;
321  free_slots = ring_size - ring->head + mq->last_tail;
322 
323  while (n_left && free_slots)
324  {
325  u16 s0;
326  u16 slots_in_packet = 1;
327  memif_desc_t *d0;
328  u32 bi0;
329 
330  CLIB_PREFETCH (&ring->desc[(slot + 8) & mask], CLIB_CACHE_LINE_BYTES,
331  STORE);
332 
333  if (PREDICT_TRUE (n_left >= 4))
334  vlib_prefetch_buffer_header (vlib_get_buffer (vm, buffers[3]), LOAD);
335 
336  bi0 = buffers[0];
337 
338  next_in_chain:
339  s0 = slot & mask;
340  d0 = &ring->desc[s0];
341  mq->buffers[s0] = bi0;
342  b0 = vlib_get_buffer (vm, bi0);
343 
344  d0->region = b0->buffer_pool_index + 1;
345  d0->offset = (void *) b0->data + b0->current_data -
346  mif->regions[d0->region].shm;
347  d0->length = b0->current_length;
348 
349  free_slots--;
350  slot++;
351 
352  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_NEXT_PRESENT))
353  {
354  if (PREDICT_FALSE (free_slots == 0))
355  {
356  /* revert to last fully processed packet */
357  free_slots += slots_in_packet;
358  slot -= slots_in_packet;
359  goto no_free_slots;
360  }
361 
363  bi0 = b0->next_buffer;
364 
365  /* next */
366  slots_in_packet++;
367  goto next_in_chain;
368  }
369 
370  d0->flags = 0;
371 
372  /* next from */
373  buffers++;
374  n_left--;
375  }
376 no_free_slots:
377 
379  ring->head = slot;
380 
381  if (n_left && n_retries--)
382  goto retry;
383 
385 
386  if (n_left)
387  {
388  vlib_error_count (vm, node->node_index, MEMIF_TX_ERROR_NO_FREE_SLOTS,
389  n_left);
390  vlib_buffer_free (vm, buffers, n_left);
391  }
392 
393  if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0 && mq->int_fd > -1)
394  {
395  u64 b = 1;
396  CLIB_UNUSED (int r) = write (mq->int_fd, &b, sizeof (b));
397  mq->int_count++;
398  }
399 
400  return frame->n_vectors;
401 }
402 
404  vlib_node_runtime_t * node,
405  vlib_frame_t * frame)
406 {
407  memif_main_t *nm = &memif_main;
408  vnet_interface_output_runtime_t *rund = (void *) node->runtime_data;
410  memif_queue_t *mq;
411  u32 thread_index = vm->thread_index;
413  thread_index);
414  u8 tx_queues = vec_len (mif->tx_queues);
415 
416  if (tx_queues < vec_len (vlib_mains))
417  {
418  ASSERT (tx_queues > 0);
419  mq = vec_elt_at_index (mif->tx_queues, thread_index % tx_queues);
420  clib_spinlock_lock_if_init (&mif->lockp);
421  }
422  else
423  mq = vec_elt_at_index (mif->tx_queues, thread_index);
424 
425  if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY)
426  return memif_interface_tx_zc_inline (vm, node, frame, mif, mq, ptd);
427  else if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE)
428  return memif_interface_tx_inline (vm, node, frame, mif, MEMIF_RING_S2M,
429  mq, ptd);
430  else
431  return memif_interface_tx_inline (vm, node, frame, mif, MEMIF_RING_M2S,
432  mq, ptd);
433 }
434 
435 static void
437  u32 node_index)
438 {
439  memif_main_t *apm = &memif_main;
440  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
442 
443  /* Shut off redirection */
444  if (node_index == ~0)
445  {
446  mif->per_interface_next_index = node_index;
447  return;
448  }
449 
451  vlib_node_add_next (vlib_get_main (), memif_input_node.index, node_index);
452 }
453 
454 static void
456 {
457  /* Nothing for now */
458 }
459 
460 static clib_error_t *
463 {
464  memif_main_t *mm = &memif_main;
465  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
467  memif_queue_t *mq = vec_elt_at_index (mif->rx_queues, qid);
468 
471  else
473 
474  return 0;
475 }
476 
477 static clib_error_t *
479 {
480  memif_main_t *mm = &memif_main;
481  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
483  static clib_error_t *error = 0;
484 
486  mif->flags |= MEMIF_IF_FLAG_ADMIN_UP;
487  else
488  mif->flags &= ~MEMIF_IF_FLAG_ADMIN_UP;
489 
490  return error;
491 }
492 
493 static clib_error_t *
495  u32 hw_if_index,
496  struct vnet_sw_interface_t *st, int is_add)
497 {
498  /* Nothing for now */
499  return 0;
500 }
501 
502 /* *INDENT-OFF* */
504  .name = "memif",
505  .format_device_name = format_memif_device_name,
506  .format_device = format_memif_device,
507  .format_tx_trace = format_memif_tx_trace,
508  .tx_function_n_errors = MEMIF_TX_N_ERROR,
509  .tx_function_error_strings = memif_tx_func_error_strings,
510  .rx_redirect_to_node = memif_set_interface_next_node,
511  .clear_counters = memif_clear_hw_interface_counters,
512  .admin_up_down_function = memif_interface_admin_up_down,
513  .subif_add_del_function = memif_subif_add_del_function,
514  .rx_mode_change_function = memif_interface_rx_mode_change,
515 };
516 
517 /* *INDENT-ON* */
518 
519 /*
520  * fd.io coding-style-patch-verification: ON
521  *
522  * Local Variables:
523  * eval: (c-set-style "gnu")
524  * End:
525  */
memif_if_t * interfaces
Definition: private.h:238
#define clib_min(x, y)
Definition: clib.h:291
#define CLIB_UNUSED(x)
Definition: clib.h:81
memif_tx_func_error_t
Definition: device.c:35
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:547
static_always_inline uword memif_interface_tx_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, memif_if_t *mif, memif_ring_type_t type, memif_queue_t *mq, memif_per_thread_data_t *ptd)
Definition: device.c:100
#define PREDICT_TRUE(x)
Definition: clib.h:108
unsigned long u64
Definition: types.h:89
memif_socket_file_t * socket_files
Definition: private.h:241
#define CLIB_MEMORY_STORE_BARRIER()
Definition: clib.h:114
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
#define vec_add2_aligned(V, P, N, A)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:574
static void memif_set_interface_next_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Definition: device.c:436
static_always_inline void clib_spinlock_unlock_if_init(clib_spinlock_t *p)
Definition: lock.h:98
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
u32 thread_index
Definition: main.h:179
u8 buffer_pool_index
index of buffer pool this buffer belongs.
Definition: buffer.h:143
uint16_t memif_region_index_t
Definition: memif.h:60
VNET_DEVICE_CLASS_TX_FN() memif_device_class(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: device.c:403
static u32 format_get_indent(u8 *s)
Definition: format.h:72
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
u32 * buffers
Definition: private.h:124
vlib_main_t ** vlib_mains
Definition: buffer.c:303
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
Definition: node_funcs.h:1118
unsigned char u8
Definition: types.h:56
static_always_inline void memif_add_copy_op(memif_per_thread_data_t *ptd, void *data, u32 len, u16 buffer_offset, u16 buffer_vec_index)
Definition: device.c:88
uint32_t length
Definition: memif.h:152
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
#define vec_add1_aligned(V, E, A)
Add 1 element to end of vector (alignment specified).
Definition: vec.h:533
vnet_hw_interface_rx_mode
Definition: interface.h:51
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:109
#define static_always_inline
Definition: clib.h:95
uword socket_file_index
Definition: private.h:166
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:113
u16 buffer_size
Definition: private.h:184
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:187
uint16_t flags
Definition: memif.h:149
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
u32 per_interface_next_index
Definition: private.h:162
#define VNET_DEVICE_CLASS_TX_FN(devclass)
Definition: interface.h:286
unsigned int u32
Definition: types.h:88
void * shm
Definition: private.h:102
static clib_error_t * memif_subif_add_del_function(vnet_main_t *vnm, u32 hw_if_index, struct vnet_sw_interface_t *st, int is_add)
Definition: device.c:494
static void memif_clear_hw_interface_counters(u32 instance)
Definition: device.c:455
memif_region_index_t region
Definition: memif.h:151
memif_copy_op_t * copy_ops
Definition: private.h:222
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:464
vlib_node_registration_t memif_input_node
(constructor) VLIB_REGISTER_NODE (memif_input_node)
Definition: node.c:889
static char * memif_tx_func_error_strings[]
Definition: device.c:43
memif_desc_t desc[0]
Definition: memif.h:173
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
clib_spinlock_t lockp
Definition: private.h:154
unsigned short u16
Definition: types.h:57
void * data
Definition: private.h:208
#define PREDICT_FALSE(x)
Definition: clib.h:107
u32 node_index
Node index.
Definition: node.h:494
static u8 * format_memif_tx_trace(u8 *s, va_list *args)
Definition: device.c:81
#define foreach_memif_tx_func_error
Definition: device.c:31
u32 flags
Definition: vhost_user.h:115
u16 n_vectors
Definition: node.h:401
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:79
vlib_main_t * vm
Definition: buffer.c:294
u16 buffer_vec_index
Definition: private.h:211
#define clib_memcpy(a, b, c)
Definition: string.h:75
static clib_error_t * memif_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
Definition: device.c:478
u16 last_tail
Definition: private.h:123
i16 buffer_offset
Definition: private.h:210
memif_region_t * regions
Definition: private.h:170
signed int i32
Definition: types.h:77
static clib_error_t * memif_interface_rx_mode_change(vnet_main_t *vnm, u32 hw_if_index, u32 qid, vnet_hw_interface_rx_mode mode)
Definition: device.c:461
#define ASSERT(truth)
#define MEMIF_DESC_FLAG_NEXT
Definition: memif.h:150
u32 flags
Definition: private.h:155
memif_ring_t * ring
Definition: private.h:117
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:129
static void * vlib_frame_args(vlib_frame_t *f)
Get pointer to frame scalar data.
Definition: node_funcs.h:284
VNET_DEVICE_CLASS(bond_dev_class)
#define MEMIF_RING_FLAG_MASK_INT
Definition: memif.h:168
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
u64 int_count
Definition: private.h:129
memif_region_offset_t offset
Definition: memif.h:153
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u64 uword
Definition: types.h:112
memif_interface_id_t id
Definition: private.h:156
memif_log2_ring_size_t log2_ring_size
Definition: private.h:118
static u8 * format_memif_device(u8 *s, va_list *args)
Definition: device.c:65
struct memif_if_t::@487 run
uint16_t flags
Definition: memif.h:167
memif_per_thread_data_t * per_thread_data
Definition: private.h:245
u8 data[0]
Packet data.
Definition: buffer.h:175
memif_ring_type_t
Definition: memif.h:47
volatile uint16_t head
Definition: memif.h:169
static_always_inline uword memif_interface_tx_zc_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, memif_if_t *mif, memif_queue_t *mq, memif_per_thread_data_t *ptd)
Definition: device.c:296
memif_queue_t * rx_queues
Definition: private.h:172
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:116
memif_main_t memif_main
Definition: memif.c:43
static void vlib_buffer_free_from_ring_no_next(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Free buffers from ring without freeing tail buffers.
Definition: buffer_funcs.h:627
static_always_inline void clib_spinlock_lock_if_init(clib_spinlock_t *p)
Definition: lock.h:82
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:58
volatile uint16_t tail
Definition: memif.h:171
u8 * format_memif_device_name(u8 *s, va_list *args)
Definition: device.c:51