FD.io VPP  v18.01-8-g0eacf49
Vector Packet Processing
node.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2016 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #define _GNU_SOURCE
19 #include <stdint.h>
20 #include <net/if.h>
21 #include <sys/ioctl.h>
22 #include <sys/uio.h>
23 
24 #include <vlib/vlib.h>
25 #include <vlib/unix/unix.h>
26 #include <vnet/ethernet/ethernet.h>
27 #include <vnet/devices/devices.h>
28 #include <vnet/feature/feature.h>
29 
30 #include <memif/memif.h>
31 #include <memif/private.h>
32 
33 #define foreach_memif_input_error \
34  _(NOT_IP, "not ip packet")
35 
36 typedef enum
37 {
38 #define _(f,s) MEMIF_INPUT_ERROR_##f,
40 #undef _
43 
44 static __clib_unused char *memif_input_error_strings[] = {
45 #define _(n,s) s,
47 #undef _
48 };
49 
50 typedef struct
51 {
56 
57 static __clib_unused u8 *
58 format_memif_input_trace (u8 * s, va_list * args)
59 {
60  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
61  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
62  memif_input_trace_t *t = va_arg (*args, memif_input_trace_t *);
63  u32 indent = format_get_indent (s);
64 
65  s = format (s, "memif: hw_if_index %d next-index %d",
66  t->hw_if_index, t->next_index);
67  s = format (s, "\n%Uslot: ring %u", format_white_space, indent + 2,
68  t->ring);
69  return s;
70 }
71 
74 {
75  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
76  vlib_prefetch_buffer_header (b, STORE);
78 }
79 
82  u32 prev_bi)
83 {
84  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
85  vlib_buffer_t *first_b = vlib_get_buffer (vm, first_bi);
86  vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_bi);
87 
88  /* update first buffer */
90 
91  /* update previous buffer */
92  prev_b->next_buffer = bi;
94 
95  /* update current buffer */
96  b->next_buffer = 0;
97 }
98 
99 /**
100  * @brief Copy buffer from rx ring
101  *
102  * @param * vm (in)
103  * @param * mif (in) pointer to memif interface
104  * @param * ring (in) pointer to memif ring
105  * @param * rd (in) pointer to ring data
106  * @param ring_size (in) ring size
107  * @param * n_free_bufs (in/out) the number of free vlib buffers available
108  * @param ** first_b (out) the first vlib buffer pointer
109  * @param * first_bi (out) the first vlib buffer index
110  * @param * bi (in/out) the current buffer index
111  * #param * num_slots (in/out) the number of descriptors available to read
112  *
113  * @return total bytes read from rx ring also written to vlib buffers
114  */
117  memif_ring_t * ring, memif_queue_t * mq,
118  u16 ring_size, u32 n_buffer_bytes,
119  u32 * n_free_bufs, vlib_buffer_t ** first_b,
120  u32 * first_bi, u32 * bi, u16 * num_slots)
121 {
122  memif_main_t *nm = &memif_main;
123  u32 thread_index = vlib_get_thread_index ();
124  u32 total_bytes = 0, offset = 0;
125  u32 data_len;
126  u32 bytes_to_copy;
127  void *mb;
128  vlib_buffer_t *b;
129  u16 mask = ring_size - 1;
130  u32 prev_bi;
131  u16 last_head;
132 
133  while (*num_slots)
134  {
135  data_len = ring->desc[mq->last_head & mask].length;
136  while (data_len && (*n_free_bufs))
137  {
138  /* get empty buffer */
139  u32 last_buf = vec_len (nm->rx_buffers[thread_index]) - 1;
140  prev_bi = *bi;
141  *bi = nm->rx_buffers[thread_index][last_buf];
142  b = vlib_get_buffer (vm, *bi);
143  _vec_len (nm->rx_buffers[thread_index]) = last_buf;
144  (*n_free_bufs)--;
145  if (PREDICT_FALSE (*n_free_bufs == 0))
146  {
147  *n_free_bufs +=
148  vlib_buffer_alloc (vm,
149  &nm->rx_buffers[thread_index]
150  [*n_free_bufs], ring_size);
151  _vec_len (nm->rx_buffers[thread_index]) = *n_free_bufs;
152  }
153 
154  if (last_buf > 4)
155  {
156  memif_prefetch (vm, nm->rx_buffers[thread_index][last_buf - 2]);
157  memif_prefetch (vm, nm->rx_buffers[thread_index][last_buf - 3]);
158  }
159 
160  /* copy buffer */
161  bytes_to_copy =
162  data_len > n_buffer_bytes ? n_buffer_bytes : data_len;
163  b->current_data = 0;
164  mb = memif_get_buffer (mif, ring, mq->last_head & mask);
167  if (bytes_to_copy > CLIB_CACHE_LINE_BYTES)
170  bytes_to_copy - CLIB_CACHE_LINE_BYTES);
171 
172  /* fill buffer header */
173  b->current_length = bytes_to_copy;
174 
175  if (total_bytes == 0)
176  {
177  /* fill buffer metadata */
180  vnet_buffer (b)->sw_if_index[VLIB_RX] = mif->sw_if_index;
181  vnet_buffer (b)->sw_if_index[VLIB_TX] = (u32) ~ 0;
182  *first_bi = *bi;
183  *first_b = vlib_get_buffer (vm, *first_bi);
184  }
185  else
186  memif_buffer_add_to_chain (vm, *bi, *first_bi, prev_bi);
187 
188  offset += bytes_to_copy;
189  total_bytes += bytes_to_copy;
190  data_len -= bytes_to_copy;
191  }
192  last_head = mq->last_head;
193  /* Advance to next descriptor */
194  mq->last_head++;
195  offset = 0;
196  (*num_slots)--;
197  if ((ring->desc[last_head & mask].flags & MEMIF_DESC_FLAG_NEXT) == 0)
198  break;
199  }
200 
201  return (total_bytes);
202 }
203 
204 
207 {
208  u8 *ptr = vlib_buffer_get_current (b);
209  u8 v = *ptr & 0xf0;
210 
211  if (PREDICT_TRUE (v == 0x40))
213  else if (PREDICT_TRUE (v == 0x60))
215 
216  b->error = node->errors[MEMIF_INPUT_ERROR_NOT_IP];
218 }
219 
222  vlib_frame_t * frame, memif_if_t * mif,
223  memif_ring_type_t type, u16 qid,
225 {
226  vnet_main_t *vnm = vnet_get_main ();
227  memif_ring_t *ring;
228  memif_queue_t *mq;
229  u16 head;
230  u32 next_index;
231  uword n_trace = vlib_get_trace_count (vm, node);
232  memif_main_t *nm = &memif_main;
233  u32 n_rx_packets = 0;
234  u32 n_rx_bytes = 0;
235  u32 *to_next = 0;
236  u32 n_free_bufs;
237  u32 b0_total, b1_total;
238  u32 thread_index = vlib_get_thread_index ();
239  u16 ring_size, mask, num_slots;
240  u32 n_buffer_bytes = vlib_buffer_free_list_buffer_size (vm,
242 
243  mq = vec_elt_at_index (mif->rx_queues, qid);
244  ring = mq->ring;
245  ring_size = 1 << mq->log2_ring_size;
246  mask = ring_size - 1;
247 
248  if (mode == MEMIF_INTERFACE_MODE_IP)
249  {
251  }
252  else
253  {
255  }
256 
257  n_free_bufs = vec_len (nm->rx_buffers[thread_index]);
258  if (PREDICT_FALSE (n_free_bufs < ring_size))
259  {
260  vec_validate (nm->rx_buffers[thread_index],
261  ring_size + n_free_bufs - 1);
262  n_free_bufs +=
263  vlib_buffer_alloc (vm, &nm->rx_buffers[thread_index][n_free_bufs],
264  ring_size);
265  _vec_len (nm->rx_buffers[thread_index]) = n_free_bufs;
266  }
267 
268  head = ring->head;
269  if (head == mq->last_head)
270  return 0;
271 
272  num_slots = head - mq->last_head;
273 
274  while (num_slots)
275  {
276  u32 n_left_to_next;
277  u32 next0 = next_index;
278  u32 next1 = next_index;
279  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
280 
281  while (num_slots > 11 && n_left_to_next > 2)
282  {
284  (mif, ring, (mq->last_head + 2) & mask),
285  CLIB_CACHE_LINE_BYTES, LOAD);
287  (mif, ring, (mq->last_head + 3) & mask),
288  CLIB_CACHE_LINE_BYTES, LOAD);
289  CLIB_PREFETCH (&ring->desc[(mq->last_head + 4) & mask],
290  CLIB_CACHE_LINE_BYTES, LOAD);
291  CLIB_PREFETCH (&ring->desc[(mq->last_head + 5) & mask],
292  CLIB_CACHE_LINE_BYTES, LOAD);
293 
294  vlib_buffer_t *first_b0 = 0;
295  u32 bi0 = 0, first_bi0 = 0;
296  b0_total = memif_copy_buffer_from_rx_ring (vm, mif, ring, mq,
297  ring_size,
298  n_buffer_bytes,
299  &n_free_bufs, &first_b0,
300  &first_bi0, &bi0,
301  &num_slots);
302 
303  vlib_buffer_t *first_b1 = 0;
304  u32 bi1 = 0, first_bi1 = 0;
305  b1_total = memif_copy_buffer_from_rx_ring (vm, mif, ring, mq,
306  ring_size,
307  n_buffer_bytes,
308  &n_free_bufs, &first_b1,
309  &first_bi1, &bi1,
310  &num_slots);
311 
312  /* enqueue buffer */
313  to_next[0] = first_bi0;
314  to_next[1] = first_bi1;
315  to_next += 2;
316  n_left_to_next -= 2;
317 
318 
319  if (mode == MEMIF_INTERFACE_MODE_IP)
320  {
321  next0 = memif_next_from_ip_hdr (node, first_b0);
322  next1 = memif_next_from_ip_hdr (node, first_b1);
323  }
324  else if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
325  {
326  if (PREDICT_FALSE (mif->per_interface_next_index != ~0))
327  next0 = next1 = mif->per_interface_next_index;
328  else
329  /* redirect if feature path
330  * enabled */
332  &next0, &next1,
333  first_b0, first_b1);
334  }
335 
336  /* trace */
339 
340  if (PREDICT_FALSE (n_trace > 0))
341  {
342  /* b0 */
343  if (PREDICT_TRUE (first_b0 != 0))
344  {
346  vlib_trace_buffer (vm, node, next0, first_b0,
347  /* follow_chain */ 0);
348  vlib_set_trace_count (vm, node, --n_trace);
349  tr = vlib_add_trace (vm, node, first_b0, sizeof (*tr));
350  tr->next_index = next0;
351  tr->hw_if_index = mif->hw_if_index;
352  tr->ring = qid;
353  }
354  if (n_trace)
355  {
356  /* b1 */
357  if (PREDICT_TRUE (first_b1 != 0))
358  {
360  vlib_trace_buffer (vm, node, next1, first_b1,
361  /* follow_chain */ 0);
362  vlib_set_trace_count (vm, node, --n_trace);
363  tr = vlib_add_trace (vm, node, first_b1, sizeof (*tr));
364  tr->next_index = next1;
365  tr->hw_if_index = mif->hw_if_index;
366  tr->ring = qid;
367  }
368  }
369  }
370 
371  /* enqueue */
372  vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
373  n_left_to_next, first_bi0,
374  first_bi1, next0, next1);
375 
376  /* next packet */
377  n_rx_packets += 2;
378  n_rx_bytes += b0_total + b1_total;
379  }
380  while (num_slots && n_left_to_next)
381  {
382  vlib_buffer_t *first_b0 = 0;
383  u32 bi0 = 0, first_bi0 = 0;
384  b0_total = memif_copy_buffer_from_rx_ring (vm, mif, ring, mq,
385  ring_size,
386  n_buffer_bytes,
387  &n_free_bufs, &first_b0,
388  &first_bi0, &bi0,
389  &num_slots);
390 
391  if (mode == MEMIF_INTERFACE_MODE_IP)
392  {
393  next0 = memif_next_from_ip_hdr (node, first_b0);
394  }
395  else if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
396  {
397  if (PREDICT_FALSE (mif->per_interface_next_index != ~0))
398  next0 = mif->per_interface_next_index;
399  else
400  /* redirect if feature path
401  * enabled */
403  &next0, first_b0);
404  }
405 
406  /* trace */
408 
409  if (PREDICT_FALSE (n_trace > 0))
410  {
411  if (PREDICT_TRUE (first_b0 != 0))
412  {
414  vlib_trace_buffer (vm, node, next0, first_b0,
415  /* follow_chain */ 0);
416  vlib_set_trace_count (vm, node, --n_trace);
417  tr = vlib_add_trace (vm, node, first_b0, sizeof (*tr));
418  tr->next_index = next0;
419  tr->hw_if_index = mif->hw_if_index;
420  tr->ring = qid;
421  }
422  }
423 
424  /* enqueue buffer */
425  to_next[0] = first_bi0;
426  to_next += 1;
427  n_left_to_next--;
428 
429  /* enqueue */
430  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
431  n_left_to_next, first_bi0, next0);
432 
433  /* next packet */
434  n_rx_packets++;
435  n_rx_bytes += b0_total;
436  }
437  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
438  }
440  ring->tail = head;
441 
443  + VNET_INTERFACE_COUNTER_RX, thread_index,
444  mif->hw_if_index, n_rx_packets,
445  n_rx_bytes);
446 
447  return n_rx_packets;
448 }
449 
450 uword
452  vlib_node_runtime_t * node,
453  vlib_frame_t * frame)
454 {
455  u32 n_rx = 0;
456  memif_main_t *nm = &memif_main;
457  vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
459 
461  {
462  memif_if_t *mif;
463  mif = vec_elt_at_index (nm->interfaces, dq->dev_instance);
464  if ((mif->flags & MEMIF_IF_FLAG_ADMIN_UP) &&
465  (mif->flags & MEMIF_IF_FLAG_CONNECTED))
466  {
467  if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE)
468  {
469  if (mif->mode == MEMIF_INTERFACE_MODE_IP)
470  n_rx += memif_device_input_inline (vm, node, frame, mif,
471  MEMIF_RING_M2S, dq->queue_id,
473  else
474  n_rx += memif_device_input_inline (vm, node, frame, mif,
475  MEMIF_RING_M2S, dq->queue_id,
477  }
478  else
479  {
480  if (mif->mode == MEMIF_INTERFACE_MODE_IP)
481  n_rx += memif_device_input_inline (vm, node, frame, mif,
482  MEMIF_RING_S2M, dq->queue_id,
484  else
485  n_rx += memif_device_input_inline (vm, node, frame, mif,
486  MEMIF_RING_S2M, dq->queue_id,
488  }
489  }
490  }
491 
492  return n_rx;
493 }
494 
495 #ifndef CLIB_MULTIARCH_VARIANT
496 /* *INDENT-OFF* */
498  .function = memif_input_fn,
499  .name = "memif-input",
500  .sibling_of = "device-input",
501  .format_trace = format_memif_input_trace,
502  .type = VLIB_NODE_TYPE_INPUT,
503  .state = VLIB_NODE_STATE_INTERRUPT,
504  .n_errors = MEMIF_INPUT_N_ERROR,
505  .error_strings = memif_input_error_strings,
506 };
507 
510 
511 #if __x86_64__
512 static void __clib_constructor
514 {
515  if (memif_input_fn_avx512 && clib_cpu_supports_avx512f ())
517  else if (memif_input_fn_avx2 && clib_cpu_supports_avx2 ())
519 }
520 #endif
521 #endif
522 
523 /* *INDENT-ON* */
524 
525 
526 /*
527  * fd.io coding-style-patch-verification: ON
528  *
529  * Local Variables:
530  * eval: (c-set-style "gnu")
531  * End:
532  */
memif_if_t * interfaces
Definition: private.h:181
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:432
uword( vlib_node_function_t)(struct vlib_main_t *vm, struct vlib_node_runtime_t *node, struct vlib_frame_t *frame)
Definition: node.h:54
vnet_device_and_queue_t * devices_and_queues
Definition: devices.h:69
#define CLIB_UNUSED(x)
Definition: clib.h:79
static_always_inline uword memif_device_input_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, memif_if_t *mif, memif_ring_type_t type, u16 qid, memif_interface_mode_t mode)
Definition: node.c:221
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:143
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:211
vnet_main_t * vnet_get_main(void)
Definition: misc.c:47
vnet_interface_main_t interface_main
Definition: vnet.h:56
#define PREDICT_TRUE(x)
Definition: clib.h:106
#define CLIB_MEMORY_STORE_BARRIER()
Definition: clib.h:112
memif_interface_mode_t
Definition: memif.h:53
static u32 format_get_indent(u8 *s)
Definition: format.h:72
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:415
uint32_t length
Definition: memif.h:152
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:104
static_always_inline uword memif_copy_buffer_from_rx_ring(vlib_main_t *vm, memif_if_t *mif, memif_ring_t *ring, memif_queue_t *mq, u16 ring_size, u32 n_buffer_bytes, u32 *n_free_bufs, vlib_buffer_t **first_b, u32 *first_bi, u32 *bi, u16 *num_slots)
Copy buffer from rx ring.
Definition: node.c:116
#define VLIB_BUFFER_NEXT_PRESENT
Definition: buffer.h:95
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:68
#define static_always_inline
Definition: clib.h:93
static_always_inline void memif_buffer_add_to_chain(vlib_main_t *vm, u32 bi, u32 first_bi, u32 prev_bi)
Definition: node.c:81
static_always_inline u32 memif_next_from_ip_hdr(vlib_node_runtime_t *node, vlib_buffer_t *b)
Definition: node.c:206
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:672
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:113
#define VLIB_BUFFER_TOTAL_LENGTH_VALID
Definition: buffer.h:97
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:171
uint16_t flags
Definition: memif.h:148
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
u32 per_interface_next_index
Definition: private.h:135
static __clib_unused char * memif_input_error_strings[]
Definition: node.c:44
u16 last_head
Definition: private.h:101
vlib_node_registration_t memif_input_node
(constructor) VLIB_REGISTER_NODE (memif_input_node)
Definition: node.c:497
memif_desc_t desc[0]
Definition: memif.h:174
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:72
uword dev_instance
Definition: private.h:132
#define v
Definition: acl.c:341
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:195
#define VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX
Definition: buffer.h:434
#define PREDICT_FALSE(x)
Definition: clib.h:105
static u32 vlib_buffer_free_list_buffer_size(vlib_main_t *vm, u32 free_list_index)
Definition: buffer_funcs.h:465
#define foreach_memif_input_error
Definition: node.c:33
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:364
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:113
static_always_inline void vnet_feature_start_device_input_x2(u32 sw_if_index, u32 *next0, u32 *next1, vlib_buffer_t *b0, vlib_buffer_t *b1)
Definition: feature.h:259
vlib_node_function_t __clib_weak memif_input_fn_avx512
Definition: node.c:508
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:221
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:82
vlib_main_t * vm
Definition: buffer.c:283
static void __clib_constructor memif_input_multiarch_select(void)
Definition: node.c:513
#define clib_memcpy(a, b, c)
Definition: string.h:75
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:454
u32 ** rx_buffers
Definition: private.h:188
memif_input_error_t
Definition: node.c:36
unsigned int u32
Definition: types.h:88
static_always_inline void memif_prefetch(vlib_main_t *vm, u32 bi)
Definition: node.c:73
#define MEMIF_DESC_FLAG_NEXT
Definition: memif.h:149
static_always_inline void * memif_get_buffer(memif_if_t *mif, memif_ring_t *ring, u16 slot)
Definition: private.h:225
u32 flags
Definition: private.h:128
memif_ring_t * ring
Definition: private.h:96
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:109
u32 hw_if_index
Definition: private.h:130
uword CLIB_MULTIARCH_FN() memif_input_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:451
u64 uword
Definition: types.h:112
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:142
template key/value backing page structure
Definition: bihash_doc.h:44
#define foreach_device_and_queue(var, vec)
Definition: devices.h:156
Definition: defs.h:47
unsigned short u16
Definition: types.h:57
vlib_node_function_t __clib_weak memif_input_fn_avx2
Definition: node.c:509
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
unsigned char u8
Definition: types.h:56
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
Definition: buffer.h:534
memif_log2_ring_size_t log2_ring_size
Definition: private.h:97
#define vnet_buffer(b)
Definition: buffer.h:326
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:227
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:143
u8 data[0]
Packet data.
Definition: buffer.h:159
memif_ring_type_t
Definition: memif.h:47
volatile uint16_t head
Definition: memif.h:170
memif_queue_t * rx_queues
Definition: private.h:145
static __clib_unused u8 * format_memif_input_trace(u8 *s, va_list *args)
Definition: node.c:58
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:159
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:67
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:75
memif_main_t memif_main
Definition: memif.c:43
static u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
Definition: buffer_funcs.h:341
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
u32 sw_if_index
Definition: private.h:131
volatile uint16_t tail
Definition: memif.h:172
memif_interface_mode_t mode
Definition: private.h:133
Definition: defs.h:46
#define CLIB_MULTIARCH_FN(fn)
Definition: cpu.h:59