FD.io VPP  v18.01.1-37-g7ea3975
Vector Packet Processing
node.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2016 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #define _GNU_SOURCE
19 #include <stdint.h>
20 #include <net/if.h>
21 #include <sys/ioctl.h>
22 #include <sys/uio.h>
23 
24 #include <vlib/vlib.h>
25 #include <vlib/unix/unix.h>
26 #include <vnet/ethernet/ethernet.h>
27 #include <vnet/devices/devices.h>
28 #include <vnet/feature/feature.h>
29 
30 #include <memif/memif.h>
31 #include <memif/private.h>
32 
33 #define foreach_memif_input_error \
34  _(NOT_IP, "not ip packet")
35 
36 typedef enum
37 {
38 #define _(f,s) MEMIF_INPUT_ERROR_##f,
40 #undef _
43 
44 static __clib_unused char *memif_input_error_strings[] = {
45 #define _(n,s) s,
47 #undef _
48 };
49 
50 typedef struct
51 {
56 
57 static __clib_unused u8 *
58 format_memif_input_trace (u8 * s, va_list * args)
59 {
60  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
61  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
62  memif_input_trace_t *t = va_arg (*args, memif_input_trace_t *);
63  u32 indent = format_get_indent (s);
64 
65  s = format (s, "memif: hw_if_index %d next-index %d",
66  t->hw_if_index, t->next_index);
67  s = format (s, "\n%Uslot: ring %u", format_white_space, indent + 2,
68  t->ring);
69  return s;
70 }
71 
74 {
75  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
76  vlib_prefetch_buffer_header (b, STORE);
78 }
79 
82  u32 prev_bi)
83 {
84  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
85  vlib_buffer_t *first_b = vlib_get_buffer (vm, first_bi);
86  vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_bi);
87 
88  /* update first buffer */
90 
91  /* update previous buffer */
92  prev_b->next_buffer = bi;
94 
95  /* update current buffer */
96  b->next_buffer = 0;
97 }
98 
99 /**
100  * @brief Copy buffer from rx ring
101  *
102  * @param * vm (in)
103  * @param * mif (in) pointer to memif interface
104  * @param * ring (in) pointer to memif ring
105  * @param * rd (in) pointer to ring data
106  * @param ring_size (in) ring size
107  * @param * n_free_bufs (in/out) the number of free vlib buffers available
108  * @param ** first_b (out) the first vlib buffer pointer
109  * @param * first_bi (out) the first vlib buffer index
110  * @param * bi (in/out) the current buffer index
111  * #param * num_slots (in/out) the number of descriptors available to read
112  *
113  * @return total bytes read from rx ring also written to vlib buffers
114  */
117  memif_ring_t * ring, memif_queue_t * mq,
118  u16 ring_size, u32 n_buffer_bytes,
119  u32 * n_free_bufs, vlib_buffer_t ** first_b,
120  u32 * first_bi, u32 * bi, u16 * num_slots)
121 {
122  memif_main_t *nm = &memif_main;
123  u32 thread_index = vlib_get_thread_index ();
124  u32 total_bytes = 0, offset = 0;
125  u32 data_len;
126  u32 bytes_to_copy;
127  void *mb;
128  vlib_buffer_t *b;
129  u16 mask = ring_size - 1;
130  u32 prev_bi;
131  u16 last_head;
132 
133  while (*num_slots)
134  {
135  data_len = ring->desc[mq->last_head & mask].length;
136  while (data_len && (*n_free_bufs))
137  {
138  /* get empty buffer */
139  u32 last_buf = vec_len (nm->rx_buffers[thread_index]) - 1;
140  prev_bi = *bi;
141  *bi = nm->rx_buffers[thread_index][last_buf];
142  b = vlib_get_buffer (vm, *bi);
143  /* Clear the error first to ensure following node forget setting it */
144  /* It will cause null-node error counter increasement instead of potential crash */
145  b->error = 0x0;
146  _vec_len (nm->rx_buffers[thread_index]) = last_buf;
147  (*n_free_bufs)--;
148  if (PREDICT_FALSE (*n_free_bufs == 0))
149  {
150  *n_free_bufs +=
151  vlib_buffer_alloc (vm,
152  &nm->rx_buffers[thread_index]
153  [*n_free_bufs], ring_size);
154  _vec_len (nm->rx_buffers[thread_index]) = *n_free_bufs;
155  }
156 
157  if (last_buf > 4)
158  {
159  memif_prefetch (vm, nm->rx_buffers[thread_index][last_buf - 2]);
160  memif_prefetch (vm, nm->rx_buffers[thread_index][last_buf - 3]);
161  }
162 
163  /* copy buffer */
164  bytes_to_copy =
165  data_len > n_buffer_bytes ? n_buffer_bytes : data_len;
166  b->current_data = 0;
167  mb = memif_get_buffer (mif, ring, mq->last_head & mask);
170  if (bytes_to_copy > CLIB_CACHE_LINE_BYTES)
173  bytes_to_copy - CLIB_CACHE_LINE_BYTES);
174 
175  /* fill buffer header */
176  b->current_length = bytes_to_copy;
177 
178  if (total_bytes == 0)
179  {
180  /* fill buffer metadata */
183  vnet_buffer (b)->sw_if_index[VLIB_RX] = mif->sw_if_index;
184  vnet_buffer (b)->sw_if_index[VLIB_TX] = (u32) ~ 0;
185  *first_bi = *bi;
186  *first_b = vlib_get_buffer (vm, *first_bi);
187  }
188  else
189  memif_buffer_add_to_chain (vm, *bi, *first_bi, prev_bi);
190 
191  offset += bytes_to_copy;
192  total_bytes += bytes_to_copy;
193  data_len -= bytes_to_copy;
194  }
195  last_head = mq->last_head;
196  /* Advance to next descriptor */
197  mq->last_head++;
198  offset = 0;
199  (*num_slots)--;
200  if ((ring->desc[last_head & mask].flags & MEMIF_DESC_FLAG_NEXT) == 0)
201  break;
202  }
203 
204  return (total_bytes);
205 }
206 
207 
210 {
211  u8 *ptr = vlib_buffer_get_current (b);
212  u8 v = *ptr & 0xf0;
213 
214  if (PREDICT_TRUE (v == 0x40))
216  else if (PREDICT_TRUE (v == 0x60))
218 
219  b->error = node->errors[MEMIF_INPUT_ERROR_NOT_IP];
221 }
222 
225  vlib_frame_t * frame, memif_if_t * mif,
226  memif_ring_type_t type, u16 qid,
228 {
229  vnet_main_t *vnm = vnet_get_main ();
230  memif_ring_t *ring;
231  memif_queue_t *mq;
232  u16 head;
233  u32 next_index;
234  uword n_trace = vlib_get_trace_count (vm, node);
235  memif_main_t *nm = &memif_main;
236  u32 n_rx_packets = 0;
237  u32 n_rx_bytes = 0;
238  u32 *to_next = 0;
239  u32 n_free_bufs;
240  u32 b0_total, b1_total;
241  u32 thread_index = vlib_get_thread_index ();
242  u16 ring_size, mask, num_slots;
243  u32 n_buffer_bytes = vlib_buffer_free_list_buffer_size (vm,
245 
246  mq = vec_elt_at_index (mif->rx_queues, qid);
247  ring = mq->ring;
248  ring_size = 1 << mq->log2_ring_size;
249  mask = ring_size - 1;
250 
251  if (mode == MEMIF_INTERFACE_MODE_IP)
252  {
254  }
255  else
256  {
258  }
259 
260  n_free_bufs = vec_len (nm->rx_buffers[thread_index]);
261  if (PREDICT_FALSE (n_free_bufs < ring_size))
262  {
263  vec_validate (nm->rx_buffers[thread_index],
264  ring_size + n_free_bufs - 1);
265  n_free_bufs +=
266  vlib_buffer_alloc (vm, &nm->rx_buffers[thread_index][n_free_bufs],
267  ring_size);
268  _vec_len (nm->rx_buffers[thread_index]) = n_free_bufs;
269  }
270 
271  head = ring->head;
272  mq->last_head = ring->tail;
273  if (head == mq->last_head)
274  return 0;
275 
276  num_slots = head - mq->last_head;
277 
278  while (num_slots)
279  {
280  u32 n_left_to_next;
281  u32 next0 = next_index;
282  u32 next1 = next_index;
283  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
284 
285  while (num_slots > 11 && n_left_to_next > 2)
286  {
288  (mif, ring, (mq->last_head + 2) & mask),
289  CLIB_CACHE_LINE_BYTES, LOAD);
291  (mif, ring, (mq->last_head + 3) & mask),
292  CLIB_CACHE_LINE_BYTES, LOAD);
293  CLIB_PREFETCH (&ring->desc[(mq->last_head + 4) & mask],
294  CLIB_CACHE_LINE_BYTES, LOAD);
295  CLIB_PREFETCH (&ring->desc[(mq->last_head + 5) & mask],
296  CLIB_CACHE_LINE_BYTES, LOAD);
297 
298  vlib_buffer_t *first_b0 = 0;
299  u32 bi0 = 0, first_bi0 = 0;
300  b0_total = memif_copy_buffer_from_rx_ring (vm, mif, ring, mq,
301  ring_size,
302  n_buffer_bytes,
303  &n_free_bufs, &first_b0,
304  &first_bi0, &bi0,
305  &num_slots);
306 
307  vlib_buffer_t *first_b1 = 0;
308  u32 bi1 = 0, first_bi1 = 0;
309  b1_total = memif_copy_buffer_from_rx_ring (vm, mif, ring, mq,
310  ring_size,
311  n_buffer_bytes,
312  &n_free_bufs, &first_b1,
313  &first_bi1, &bi1,
314  &num_slots);
315 
316  if (PREDICT_FALSE (!first_bi0 || !first_bi1))
317  {
318  goto _invalid_pkt01;
319  }
320  /* enqueue buffer */
321  to_next[0] = first_bi0;
322  to_next[1] = first_bi1;
323  to_next += 2;
324  n_left_to_next -= 2;
325 
326 
327  if (mode == MEMIF_INTERFACE_MODE_IP)
328  {
329  next0 = memif_next_from_ip_hdr (node, first_b0);
330  next1 = memif_next_from_ip_hdr (node, first_b1);
331  }
332  else if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
333  {
334  if (PREDICT_FALSE (mif->per_interface_next_index != ~0))
335  next0 = next1 = mif->per_interface_next_index;
336  else
337  /* redirect if feature path
338  * enabled */
340  &next0, &next1,
341  first_b0, first_b1);
342  }
343 
344  /* trace */
347 
348  if (PREDICT_FALSE (n_trace > 0))
349  {
350  /* b0 */
351  if (PREDICT_TRUE (first_b0 != 0))
352  {
354  vlib_trace_buffer (vm, node, next0, first_b0,
355  /* follow_chain */ 0);
356  vlib_set_trace_count (vm, node, --n_trace);
357  tr = vlib_add_trace (vm, node, first_b0, sizeof (*tr));
358  tr->next_index = next0;
359  tr->hw_if_index = mif->hw_if_index;
360  tr->ring = qid;
361  }
362  if (n_trace)
363  {
364  /* b1 */
365  if (PREDICT_TRUE (first_b1 != 0))
366  {
368  vlib_trace_buffer (vm, node, next1, first_b1,
369  /* follow_chain */ 0);
370  vlib_set_trace_count (vm, node, --n_trace);
371  tr = vlib_add_trace (vm, node, first_b1, sizeof (*tr));
372  tr->next_index = next1;
373  tr->hw_if_index = mif->hw_if_index;
374  tr->ring = qid;
375  }
376  }
377  }
378 
379  /* enqueue */
380  vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
381  n_left_to_next, first_bi0,
382  first_bi1, next0, next1);
383 
384  /* next packet */
385  n_rx_packets += 2;
386  n_rx_bytes += b0_total + b1_total;
387 
388  continue;
389  _invalid_pkt01:
390  if (!first_bi0 && !first_bi1)
391  {
392  continue;
393  }
394  if (first_bi1)
395  {
396  first_bi0 = first_bi1;
397  first_b0 = first_b1;
398  bi0 = bi1;
399  b0_total = b1_total;
400  }
401 
402  if (mode == MEMIF_INTERFACE_MODE_IP)
403  {
404  next0 = memif_next_from_ip_hdr (node, first_b0);
405  }
406  else if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
407  {
408  if (PREDICT_FALSE (mif->per_interface_next_index != ~0))
409  next0 = mif->per_interface_next_index;
410  else
411  /* redirect if feature path
412  * enabled */
414  first_b0);
415  }
416 
417  /* trace */
419 
420  if (PREDICT_FALSE (n_trace > 0))
421  {
422  if (PREDICT_TRUE (first_b0 != 0))
423  {
425  vlib_trace_buffer (vm, node, next0, first_b0,
426  /* follow_chain */ 0);
427  vlib_set_trace_count (vm, node, --n_trace);
428  tr = vlib_add_trace (vm, node, first_b0, sizeof (*tr));
429  tr->next_index = next0;
430  tr->hw_if_index = mif->hw_if_index;
431  tr->ring = qid;
432  }
433  }
434 
435  /* enqueue buffer */
436  to_next[0] = first_bi0;
437  to_next += 1;
438  n_left_to_next--;
439 
440  /* enqueue */
441  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
442  n_left_to_next, first_bi0, next0);
443 
444  /* next packet */
445  n_rx_packets++;
446  n_rx_bytes += b0_total;
447  }
448  while (num_slots && n_left_to_next)
449  {
450  vlib_buffer_t *first_b0 = 0;
451  u32 bi0 = 0, first_bi0 = 0;
452  b0_total = memif_copy_buffer_from_rx_ring (vm, mif, ring, mq,
453  ring_size,
454  n_buffer_bytes,
455  &n_free_bufs, &first_b0,
456  &first_bi0, &bi0,
457  &num_slots);
458  if (PREDICT_FALSE (!first_bi0))
459  {
460  goto _invalid_pkt0;
461  }
462 
463  if (mode == MEMIF_INTERFACE_MODE_IP)
464  {
465  next0 = memif_next_from_ip_hdr (node, first_b0);
466  }
467  else if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
468  {
469  if (PREDICT_FALSE (mif->per_interface_next_index != ~0))
470  next0 = mif->per_interface_next_index;
471  else
472  /* redirect if feature path
473  * enabled */
475  &next0, first_b0);
476  }
477 
478  /* trace */
480 
481  if (PREDICT_FALSE (n_trace > 0))
482  {
483  if (PREDICT_TRUE (first_b0 != 0))
484  {
486  vlib_trace_buffer (vm, node, next0, first_b0,
487  /* follow_chain */ 0);
488  vlib_set_trace_count (vm, node, --n_trace);
489  tr = vlib_add_trace (vm, node, first_b0, sizeof (*tr));
490  tr->next_index = next0;
491  tr->hw_if_index = mif->hw_if_index;
492  tr->ring = qid;
493  }
494  }
495 
496  /* enqueue buffer */
497  to_next[0] = first_bi0;
498  to_next += 1;
499  n_left_to_next--;
500 
501  /* enqueue */
502  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
503  n_left_to_next, first_bi0, next0);
504 
505  /* next packet */
506  n_rx_packets++;
507  n_rx_bytes += b0_total;
508  continue;
509  _invalid_pkt0:
510  ;
511  }
512  if (PREDICT_TRUE (n_rx_packets != 0))
513  {
514  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
515  }
516  }
518  ring->tail = mq->last_head;
519 
521  + VNET_INTERFACE_COUNTER_RX, thread_index,
522  mif->hw_if_index, n_rx_packets,
523  n_rx_bytes);
524 
525  return n_rx_packets;
526 }
527 
528 uword
530  vlib_node_runtime_t * node,
531  vlib_frame_t * frame)
532 {
533  u32 n_rx = 0;
534  memif_main_t *nm = &memif_main;
535  vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
537 
539  {
540  memif_if_t *mif;
541  mif = vec_elt_at_index (nm->interfaces, dq->dev_instance);
542  if ((mif->flags & MEMIF_IF_FLAG_ADMIN_UP) &&
543  (mif->flags & MEMIF_IF_FLAG_CONNECTED))
544  {
545  if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE)
546  {
547  if (mif->mode == MEMIF_INTERFACE_MODE_IP)
548  n_rx += memif_device_input_inline (vm, node, frame, mif,
549  MEMIF_RING_M2S, dq->queue_id,
551  else
552  n_rx += memif_device_input_inline (vm, node, frame, mif,
553  MEMIF_RING_M2S, dq->queue_id,
555  }
556  else
557  {
558  if (mif->mode == MEMIF_INTERFACE_MODE_IP)
559  n_rx += memif_device_input_inline (vm, node, frame, mif,
560  MEMIF_RING_S2M, dq->queue_id,
562  else
563  n_rx += memif_device_input_inline (vm, node, frame, mif,
564  MEMIF_RING_S2M, dq->queue_id,
566  }
567  }
568  }
569 
570  return n_rx;
571 }
572 
573 #ifndef CLIB_MULTIARCH_VARIANT
574 /* *INDENT-OFF* */
576  .function = memif_input_fn,
577  .name = "memif-input",
578  .sibling_of = "device-input",
579  .format_trace = format_memif_input_trace,
580  .type = VLIB_NODE_TYPE_INPUT,
581  .state = VLIB_NODE_STATE_INTERRUPT,
582  .n_errors = MEMIF_INPUT_N_ERROR,
583  .error_strings = memif_input_error_strings,
584 };
585 
588 
589 #if __x86_64__
590 static void __clib_constructor
592 {
593  if (memif_input_fn_avx512 && clib_cpu_supports_avx512f ())
595  else if (memif_input_fn_avx2 && clib_cpu_supports_avx2 ())
597 }
598 #endif
599 #endif
600 
601 /* *INDENT-ON* */
602 
603 
604 /*
605  * fd.io coding-style-patch-verification: ON
606  *
607  * Local Variables:
608  * eval: (c-set-style "gnu")
609  * End:
610  */
memif_if_t * interfaces
Definition: private.h:184
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:432
uword( vlib_node_function_t)(struct vlib_main_t *vm, struct vlib_node_runtime_t *node, struct vlib_frame_t *frame)
Definition: node.h:54
vnet_device_and_queue_t * devices_and_queues
Definition: devices.h:69
#define CLIB_UNUSED(x)
Definition: clib.h:79
static_always_inline uword memif_device_input_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, memif_if_t *mif, memif_ring_type_t type, u16 qid, memif_interface_mode_t mode)
Definition: node.c:224
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:143
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:211
vnet_main_t * vnet_get_main(void)
Definition: misc.c:47
vnet_interface_main_t interface_main
Definition: vnet.h:56
#define PREDICT_TRUE(x)
Definition: clib.h:106
#define CLIB_MEMORY_STORE_BARRIER()
Definition: clib.h:112
memif_interface_mode_t
Definition: memif.h:53
static u32 format_get_indent(u8 *s)
Definition: format.h:72
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:415
uint32_t length
Definition: memif.h:152
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:104
static_always_inline uword memif_copy_buffer_from_rx_ring(vlib_main_t *vm, memif_if_t *mif, memif_ring_t *ring, memif_queue_t *mq, u16 ring_size, u32 n_buffer_bytes, u32 *n_free_bufs, vlib_buffer_t **first_b, u32 *first_bi, u32 *bi, u16 *num_slots)
Copy buffer from rx ring.
Definition: node.c:116
#define VLIB_BUFFER_NEXT_PRESENT
Definition: buffer.h:95
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:68
#define static_always_inline
Definition: clib.h:93
static_always_inline void memif_buffer_add_to_chain(vlib_main_t *vm, u32 bi, u32 first_bi, u32 prev_bi)
Definition: node.c:81
static_always_inline u32 memif_next_from_ip_hdr(vlib_node_runtime_t *node, vlib_buffer_t *b)
Definition: node.c:209
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:672
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:113
#define VLIB_BUFFER_TOTAL_LENGTH_VALID
Definition: buffer.h:97
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:171
uint16_t flags
Definition: memif.h:148
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
u32 per_interface_next_index
Definition: private.h:138
static __clib_unused char * memif_input_error_strings[]
Definition: node.c:44
u16 last_head
Definition: private.h:101
vlib_node_registration_t memif_input_node
(constructor) VLIB_REGISTER_NODE (memif_input_node)
Definition: node.c:575
memif_desc_t desc[0]
Definition: memif.h:174
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:72
uword dev_instance
Definition: private.h:135
#define v
Definition: acl.c:341
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:195
#define VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX
Definition: buffer.h:434
#define PREDICT_FALSE(x)
Definition: clib.h:105
static u32 vlib_buffer_free_list_buffer_size(vlib_main_t *vm, u32 free_list_index)
Definition: buffer_funcs.h:465
#define foreach_memif_input_error
Definition: node.c:33
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:364
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:113
static_always_inline void vnet_feature_start_device_input_x2(u32 sw_if_index, u32 *next0, u32 *next1, vlib_buffer_t *b0, vlib_buffer_t *b1)
Definition: feature.h:259
vlib_node_function_t __clib_weak memif_input_fn_avx512
Definition: node.c:586
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:221
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:82
vlib_main_t * vm
Definition: buffer.c:283
static void __clib_constructor memif_input_multiarch_select(void)
Definition: node.c:591
#define clib_memcpy(a, b, c)
Definition: string.h:75
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:454
u32 ** rx_buffers
Definition: private.h:191
memif_input_error_t
Definition: node.c:36
unsigned int u32
Definition: types.h:88
static_always_inline void memif_prefetch(vlib_main_t *vm, u32 bi)
Definition: node.c:73
#define MEMIF_DESC_FLAG_NEXT
Definition: memif.h:149
static_always_inline void * memif_get_buffer(memif_if_t *mif, memif_ring_t *ring, u16 slot)
Definition: private.h:228
u32 flags
Definition: private.h:131
memif_ring_t * ring
Definition: private.h:96
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:109
u32 hw_if_index
Definition: private.h:133
uword CLIB_MULTIARCH_FN() memif_input_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:529
u64 uword
Definition: types.h:112
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:142
template key/value backing page structure
Definition: bihash_doc.h:44
#define foreach_device_and_queue(var, vec)
Definition: devices.h:156
Definition: defs.h:47
unsigned short u16
Definition: types.h:57
vlib_node_function_t __clib_weak memif_input_fn_avx2
Definition: node.c:587
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
unsigned char u8
Definition: types.h:56
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
Definition: buffer.h:534
memif_log2_ring_size_t log2_ring_size
Definition: private.h:97
#define vnet_buffer(b)
Definition: buffer.h:326
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:227
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:143
u8 data[0]
Packet data.
Definition: buffer.h:159
memif_ring_type_t
Definition: memif.h:47
volatile uint16_t head
Definition: memif.h:170
memif_queue_t * rx_queues
Definition: private.h:148
static __clib_unused u8 * format_memif_input_trace(u8 *s, va_list *args)
Definition: node.c:58
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:159
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:67
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:75
memif_main_t memif_main
Definition: memif.c:43
static u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
Definition: buffer_funcs.h:341
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
u32 sw_if_index
Definition: private.h:134
volatile uint16_t tail
Definition: memif.h:172
memif_interface_mode_t mode
Definition: private.h:136
Definition: defs.h:46
#define CLIB_MULTIARCH_FN(fn)
Definition: cpu.h:59