FD.io VPP  v17.10-9-gd594711
Vector Packet Processing
device.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2016 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #define _GNU_SOURCE
19 #include <stdint.h>
20 #include <net/if.h>
21 #include <sys/ioctl.h>
22 #include <sys/uio.h>
23 
24 #include <vlib/vlib.h>
25 #include <vlib/unix/unix.h>
26 #include <vnet/ethernet/ethernet.h>
27 
28 #include <memif/memif.h>
29 #include <memif/private.h>
30 
31 #define foreach_memif_tx_func_error \
32 _(NO_FREE_SLOTS, "no free tx slots") \
33 _(TRUNC_PACKET, "packet > buffer size -- truncated in tx ring") \
34 _(PENDING_MSGS, "pending msgs in tx ring") \
35 _(NO_TX_QUEUES, "no tx queues")
36 
37 typedef enum
38 {
39 #define _(f,s) MEMIF_TX_ERROR_##f,
41 #undef _
44 
45 static char *memif_tx_func_error_strings[] = {
46 #define _(n,s) s,
48 #undef _
49 };
50 
51 u8 *
52 format_memif_device_name (u8 * s, va_list * args)
53 {
54  u32 dev_instance = va_arg (*args, u32);
55  memif_main_t *mm = &memif_main;
56  memif_if_t *mif = pool_elt_at_index (mm->interfaces, dev_instance);
57 
58  s = format (s, "memif%lu/%lu", mif->socket_file_index, mif->id);
59  return s;
60 }
61 
62 static u8 *
63 format_memif_device (u8 * s, va_list * args)
64 {
65  u32 dev_instance = va_arg (*args, u32);
66  int verbose = va_arg (*args, int);
67  uword indent = format_get_indent (s);
68 
69  s = format (s, "MEMIF interface");
70  if (verbose)
71  {
72  s = format (s, "\n%U instance %u", format_white_space, indent + 2,
73  dev_instance);
74  }
75  return s;
76 }
77 
78 static u8 *
79 format_memif_tx_trace (u8 * s, va_list * args)
80 {
81  s = format (s, "Unimplemented...");
82  return s;
83 }
84 
87 {
88  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
91 }
92 
93 /**
94  * @brief Copy buffer to tx ring
95  *
96  * @param * vm (in)
97  * @param * node (in)
98  * @param * mif (in) pointer to memif interface
99  * @param bi (in) vlib buffer index
100  * @param * ring (in) pointer to memif ring
101  * @param * head (in/out) ring head
102  * @param mask (in) ring size - 1
103  */
106  memif_if_t * mif, u32 bi, memif_ring_t * ring,
107  u16 * head, u16 mask)
108 {
109  vlib_buffer_t *b0;
110  void *mb0;
111  u32 total = 0, len;
112 
113  mb0 = memif_get_buffer (mif, ring, *head);
114  ring->desc[*head].flags = 0;
115  do
116  {
117  b0 = vlib_get_buffer (vm, bi);
118  len = b0->current_length;
119  if (PREDICT_FALSE (ring->desc[*head].buffer_length < (total + len)))
120  {
121  if (PREDICT_TRUE (total))
122  {
123  ring->desc[*head].length = total;
124  total = 0;
125  ring->desc[*head].flags |= MEMIF_DESC_FLAG_NEXT;
126  *head = (*head + 1) & mask;
127  mb0 = memif_get_buffer (mif, ring, *head);
128  ring->desc[*head].flags = 0;
129  }
130  }
131  if (PREDICT_TRUE (ring->desc[*head].buffer_length >= (total + len)))
132  {
133  clib_memcpy (mb0 + total, vlib_buffer_get_current (b0),
135  if (len > CLIB_CACHE_LINE_BYTES)
136  clib_memcpy (mb0 + CLIB_CACHE_LINE_BYTES + total,
138  len - CLIB_CACHE_LINE_BYTES);
139  total += len;
140  }
141  else
142  {
143  vlib_error_count (vm, node->node_index, MEMIF_TX_ERROR_TRUNC_PACKET,
144  1);
145  break;
146  }
147  }
148  while ((bi = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) ? b0->next_buffer : 0));
149 
150  if (PREDICT_TRUE (total))
151  {
152  ring->desc[*head].length = total;
153  *head = (*head + 1) & mask;
154  }
155 }
156 
159  vlib_frame_t * frame, memif_if_t * mif,
160  memif_ring_type_t type)
161 {
162  u8 qid;
163  memif_ring_t *ring;
164  u32 *buffers = vlib_frame_args (frame);
165  u32 n_left = frame->n_vectors;
166  u16 ring_size, mask;
167  u16 head, tail;
168  u16 free_slots;
169  u32 thread_index = vlib_get_thread_index ();
170  u8 tx_queues = vec_len (mif->tx_queues);
171  memif_queue_t *mq;
172 
173  if (PREDICT_FALSE (tx_queues == 0))
174  {
175  vlib_error_count (vm, node->node_index, MEMIF_TX_ERROR_NO_TX_QUEUES,
176  n_left);
177  goto error;
178  }
179 
180  if (tx_queues < vec_len (vlib_mains))
181  {
182  qid = thread_index % tx_queues;
184  }
185  else
186  {
187  qid = thread_index;
188  }
189  mq = vec_elt_at_index (mif->tx_queues, qid);
190  ring = mq->ring;
191  ring_size = 1 << mq->log2_ring_size;
192  mask = ring_size - 1;
193 
194  /* free consumed buffers */
195 
196  head = ring->head;
197  tail = ring->tail;
198 
199  if (tail > head)
200  free_slots = tail - head;
201  else
202  free_slots = ring_size - head + tail;
203 
204  while (n_left > 5 && free_slots > 1)
205  {
206  if (PREDICT_TRUE (head + 5 < ring_size))
207  {
208  CLIB_PREFETCH (memif_get_buffer (mif, ring, head + 2),
209  CLIB_CACHE_LINE_BYTES, STORE);
210  CLIB_PREFETCH (memif_get_buffer (mif, ring, head + 3),
211  CLIB_CACHE_LINE_BYTES, STORE);
212  CLIB_PREFETCH (&ring->desc[head + 4], CLIB_CACHE_LINE_BYTES, STORE);
213  CLIB_PREFETCH (&ring->desc[head + 5], CLIB_CACHE_LINE_BYTES, STORE);
214  }
215  else
216  {
217  CLIB_PREFETCH (memif_get_buffer (mif, ring, (head + 2) % mask),
218  CLIB_CACHE_LINE_BYTES, STORE);
219  CLIB_PREFETCH (memif_get_buffer (mif, ring, (head + 3) % mask),
220  CLIB_CACHE_LINE_BYTES, STORE);
221  CLIB_PREFETCH (&ring->desc[(head + 4) % mask],
222  CLIB_CACHE_LINE_BYTES, STORE);
223  CLIB_PREFETCH (&ring->desc[(head + 5) % mask],
224  CLIB_CACHE_LINE_BYTES, STORE);
225  }
226 
227  memif_prefetch_buffer_and_data (vm, buffers[2]);
228  memif_prefetch_buffer_and_data (vm, buffers[3]);
229 
230  memif_copy_buffer_to_tx_ring (vm, node, mif, buffers[0], ring, &head,
231  mask);
232  memif_copy_buffer_to_tx_ring (vm, node, mif, buffers[1], ring, &head,
233  mask);
234 
235  buffers += 2;
236  n_left -= 2;
237  free_slots -= 2;
238  }
239 
240  while (n_left && free_slots)
241  {
242  memif_copy_buffer_to_tx_ring (vm, node, mif, buffers[0], ring, &head,
243  mask);
244  buffers++;
245  n_left--;
246  free_slots--;
247  }
248 
250  ring->head = head;
251 
253 
254  if (n_left)
255  {
256  vlib_error_count (vm, node->node_index, MEMIF_TX_ERROR_NO_FREE_SLOTS,
257  n_left);
258  }
259 
260  if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0 && mq->int_fd > -1)
261  {
262  u64 b = 1;
263  CLIB_UNUSED (int r) = write (mq->int_fd, &b, sizeof (b));
264  mq->int_count++;
265  }
266 
267 error:
268  vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors);
269 
270  return frame->n_vectors;
271 }
272 
273 static uword
275  vlib_node_runtime_t * node, vlib_frame_t * frame)
276 {
277  memif_main_t *nm = &memif_main;
278  vnet_interface_output_runtime_t *rund = (void *) node->runtime_data;
280 
281  if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE)
282  return memif_interface_tx_inline (vm, node, frame, mif, MEMIF_RING_S2M);
283  else
284  return memif_interface_tx_inline (vm, node, frame, mif, MEMIF_RING_M2S);
285 }
286 
287 static void
289  u32 node_index)
290 {
291  memif_main_t *apm = &memif_main;
292  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
294 
295  /* Shut off redirection */
296  if (node_index == ~0)
297  {
298  mif->per_interface_next_index = node_index;
299  return;
300  }
301 
303  vlib_node_add_next (vlib_get_main (), memif_input_node.index, node_index);
304 }
305 
306 static void
308 {
309  /* Nothing for now */
310 }
311 
312 static clib_error_t *
315 {
316  memif_main_t *mm = &memif_main;
317  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
319  memif_queue_t *mq = vec_elt_at_index (mif->rx_queues, qid);
320 
323  else
325 
326  return 0;
327 }
328 
329 static clib_error_t *
331 {
332  memif_main_t *mm = &memif_main;
333  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
335  static clib_error_t *error = 0;
336 
338  mif->flags |= MEMIF_IF_FLAG_ADMIN_UP;
339  else
340  mif->flags &= ~MEMIF_IF_FLAG_ADMIN_UP;
341 
342  return error;
343 }
344 
345 static clib_error_t *
347  u32 hw_if_index,
348  struct vnet_sw_interface_t *st, int is_add)
349 {
350  /* Nothing for now */
351  return 0;
352 }
353 
354 /* *INDENT-OFF* */
356  .name = "memif",
357  .tx_function = memif_interface_tx,
358  .format_device_name = format_memif_device_name,
359  .format_device = format_memif_device,
360  .format_tx_trace = format_memif_tx_trace,
361  .tx_function_n_errors = MEMIF_TX_N_ERROR,
362  .tx_function_error_strings = memif_tx_func_error_strings,
363  .rx_redirect_to_node = memif_set_interface_next_node,
364  .clear_counters = memif_clear_hw_interface_counters,
365  .admin_up_down_function = memif_interface_admin_up_down,
366  .subif_add_del_function = memif_subif_add_del_function,
367  .rx_mode_change_function = memif_interface_rx_mode_change,
368 };
369 
372 /* *INDENT-ON* */
373 
374 /*
375  * fd.io coding-style-patch-verification: ON
376  *
377  * Local Variables:
378  * eval: (c-set-style "gnu")
379  * End:
380  */
memif_if_t * interfaces
Definition: private.h:188
static_always_inline void memif_copy_buffer_to_tx_ring(vlib_main_t *vm, vlib_node_runtime_t *node, memif_if_t *mif, u32 bi, memif_ring_t *ring, u16 *head, u16 mask)
Copy buffer to tx ring.
Definition: device.c:105
#define CLIB_UNUSED(x)
Definition: clib.h:79
memif_tx_func_error_t
Definition: device.c:37
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:317
u8 runtime_data[0]
Function dependent node-runtime data.
Definition: node.h:464
static uword memif_interface_tx(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: device.c:274
#define PREDICT_TRUE(x)
Definition: clib.h:98
#define CLIB_MEMORY_STORE_BARRIER()
Definition: clib.h:104
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
static void memif_set_interface_next_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Definition: device.c:288
static_always_inline void clib_spinlock_unlock_if_init(clib_spinlock_t *p)
Definition: lock.h:85
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
vlib_main_t ** vlib_mains
Definition: buffer.c:292
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
Definition: node_funcs.h:1108
VNET_DEVICE_CLASS(af_packet_device_class)
uint32_t length
Definition: memif.h:152
uint32_t buffer_length
Definition: memif.h:151
vnet_hw_interface_rx_mode
Definition: interface.h:51
#define VLIB_BUFFER_NEXT_PRESENT
Definition: buffer.h:95
#define static_always_inline
Definition: clib.h:85
static uword format_get_indent(u8 *s)
Definition: format.h:72
uword socket_file_index
Definition: private.h:141
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:113
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:169
uint16_t flags
Definition: memif.h:148
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
vnet_device_class_t memif_device_class
u32 per_interface_next_index
Definition: private.h:138
unsigned long u64
Definition: types.h:89
static clib_error_t * memif_subif_add_del_function(vnet_main_t *vnm, u32 hw_if_index, struct vnet_sw_interface_t *st, int is_add)
Definition: device.c:346
static void memif_clear_hw_interface_counters(u32 instance)
Definition: device.c:307
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:458
vlib_node_registration_t memif_input_node
(constructor) VLIB_REGISTER_NODE (memif_input_node)
Definition: node.c:512
static char * memif_tx_func_error_strings[]
Definition: device.c:45
memif_desc_t desc[0]
Definition: memif.h:174
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:72
clib_spinlock_t lockp
Definition: private.h:130
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:193
#define PREDICT_FALSE(x)
Definition: clib.h:97
u32 node_index
Node index.
Definition: node.h:437
static u8 * format_memif_tx_trace(u8 *s, va_list *args)
Definition: device.c:79
#define foreach_memif_tx_func_error
Definition: device.c:31
memif_queue_t * tx_queues
Definition: private.h:150
static_always_inline uword memif_interface_tx_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, memif_if_t *mif, memif_ring_type_t type)
Definition: device.c:158
u16 n_vectors
Definition: node.h:344
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:221
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:82
vlib_main_t * vm
Definition: buffer.c:283
static_always_inline void memif_prefetch_buffer_and_data(vlib_main_t *vm, u32 bi)
Definition: device.c:86
#define clib_memcpy(a, b, c)
Definition: string.h:69
static clib_error_t * memif_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
Definition: device.c:330
static clib_error_t * memif_interface_rx_mode_change(vnet_main_t *vnm, u32 hw_if_index, u32 qid, vnet_hw_interface_rx_mode mode)
Definition: device.c:313
#define VNET_SW_INTERFACE_FLAG_ADMIN_UP
Definition: interface.h:572
unsigned int u32
Definition: types.h:88
#define MEMIF_DESC_FLAG_NEXT
Definition: memif.h:149
static_always_inline void * memif_get_buffer(memif_if_t *mif, memif_ring_t *ring, u16 slot)
Definition: private.h:232
u32 flags
Definition: private.h:131
memif_ring_t * ring
Definition: private.h:99
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:109
static void * vlib_frame_args(vlib_frame_t *f)
Get pointer to frame scalar data.
Definition: node_funcs.h:284
#define MEMIF_RING_FLAG_MASK_INT
Definition: memif.h:169
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
u64 int_count
Definition: private.h:110
u64 uword
Definition: types.h:112
unsigned short u16
Definition: types.h:57
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
unsigned char u8
Definition: types.h:56
memif_interface_id_t id
Definition: private.h:132
memif_log2_ring_size_t log2_ring_size
Definition: private.h:100
static u8 * format_memif_device(u8 *s, va_list *args)
Definition: device.c:63
uint16_t flags
Definition: memif.h:168
u8 data[0]
Packet data.
Definition: buffer.h:157
memif_ring_type_t
Definition: memif.h:47
volatile uint16_t head
Definition: memif.h:170
memif_queue_t * rx_queues
Definition: private.h:149
u32 flags
Definition: vhost-user.h:77
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:67
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:75
memif_main_t memif_main
Definition: memif.c:43
static_always_inline void clib_spinlock_lock_if_init(clib_spinlock_t *p)
Definition: lock.h:65
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
#define VLIB_DEVICE_TX_FUNCTION_MULTIARCH(dev, fn)
Definition: interface.h:232
volatile uint16_t tail
Definition: memif.h:172
u8 * format_memif_device_name(u8 *s, va_list *args)
Definition: device.c:52