FD.io VPP  v20.09-64-g4f7b92f0a
Vector Packet Processing
buffer_node.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /*
16  * buffer_node.h: VLIB buffer handling node helper macros/inlines
17  *
18  * Copyright (c) 2008 Eliot Dresselhaus
19  *
20  * Permission is hereby granted, free of charge, to any person obtaining
21  * a copy of this software and associated documentation files (the
22  * "Software"), to deal in the Software without restriction, including
23  * without limitation the rights to use, copy, modify, merge, publish,
24  * distribute, sublicense, and/or sell copies of the Software, and to
25  * permit persons to whom the Software is furnished to do so, subject to
26  * the following conditions:
27  *
28  * The above copyright notice and this permission notice shall be
29  * included in all copies or substantial portions of the Software.
30  *
31  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38  */
39 
40 #ifndef included_vlib_buffer_node_h
41 #define included_vlib_buffer_node_h
42 
43 /** \file
44  vlib buffer/node functions
45 */
46 
47 /** \brief Finish enqueueing two buffers forward in the graph.
48  Standard dual loop boilerplate element. This is a MACRO,
49  with MULTIPLE SIDE EFFECTS. In the ideal case,
50  <code>next_index == next0 == next1</code>,
51  which means that the speculative enqueue at the top of the dual loop
52  has correctly dealt with both packets. In that case, the macro does
53  nothing at all.
54 
55  @param vm vlib_main_t pointer, varies by thread
56  @param node current node vlib_node_runtime_t pointer
57  @param next_index speculated next index used for both packets
58  @param to_next speculated vector pointer used for both packets
59  @param n_left_to_next number of slots left in speculated vector
60  @param bi0 first buffer index
61  @param bi1 second buffer index
62  @param next0 actual next index to be used for the first packet
63  @param next1 actual next index to be used for the second packet
64 
65  @return @c next_index -- speculative next index to be used for future packets
66  @return @c to_next -- speculative frame to be used for future packets
67  @return @c n_left_to_next -- number of slots left in speculative frame
68 */
69 
70 #define vlib_validate_buffer_enqueue_x2(vm,node,next_index,to_next,n_left_to_next,bi0,bi1,next0,next1) \
71 do { \
72  ASSERT (bi0 != 0); \
73  ASSERT (bi1 != 0); \
74  int enqueue_code = (next0 != next_index) + 2*(next1 != next_index); \
75  \
76  if (PREDICT_FALSE (enqueue_code != 0)) \
77  { \
78  switch (enqueue_code) \
79  { \
80  case 1: \
81  /* A B A */ \
82  to_next[-2] = bi1; \
83  to_next -= 1; \
84  n_left_to_next += 1; \
85  vlib_set_next_frame_buffer (vm, node, next0, bi0); \
86  break; \
87  \
88  case 2: \
89  /* A A B */ \
90  to_next -= 1; \
91  n_left_to_next += 1; \
92  vlib_set_next_frame_buffer (vm, node, next1, bi1); \
93  break; \
94  \
95  case 3: \
96  /* A B B or A B C */ \
97  to_next -= 2; \
98  n_left_to_next += 2; \
99  vlib_set_next_frame_buffer (vm, node, next0, bi0); \
100  vlib_set_next_frame_buffer (vm, node, next1, bi1); \
101  if (next0 == next1) \
102  { \
103  vlib_put_next_frame (vm, node, next_index, \
104  n_left_to_next); \
105  next_index = next1; \
106  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); \
107  } \
108  } \
109  } \
110 } while (0)
111 
112 
113 /** \brief Finish enqueueing four buffers forward in the graph.
114  Standard quad loop boilerplate element. This is a MACRO,
115  with MULTIPLE SIDE EFFECTS. In the ideal case,
116  <code>next_index == next0 == next1 == next2 == next3</code>,
117  which means that the speculative enqueue at the top of the quad loop
118  has correctly dealt with all four packets. In that case, the macro does
119  nothing at all.
120 
121  @param vm vlib_main_t pointer, varies by thread
122  @param node current node vlib_node_runtime_t pointer
123  @param next_index speculated next index used for both packets
124  @param to_next speculated vector pointer used for both packets
125  @param n_left_to_next number of slots left in speculated vector
126  @param bi0 first buffer index
127  @param bi1 second buffer index
128  @param bi2 third buffer index
129  @param bi3 fourth buffer index
130  @param next0 actual next index to be used for the first packet
131  @param next1 actual next index to be used for the second packet
132  @param next2 actual next index to be used for the third packet
133  @param next3 actual next index to be used for the fourth packet
134 
135  @return @c next_index -- speculative next index to be used for future packets
136  @return @c to_next -- speculative frame to be used for future packets
137  @return @c n_left_to_next -- number of slots left in speculative frame
138 */
139 
140 #define vlib_validate_buffer_enqueue_x4(vm,node,next_index,to_next,n_left_to_next,bi0,bi1,bi2,bi3,next0,next1,next2,next3) \
141 do { \
142  ASSERT (bi0 != 0); \
143  ASSERT (bi1 != 0); \
144  ASSERT (bi2 != 0); \
145  ASSERT (bi3 != 0); \
146  /* After the fact: check the [speculative] enqueue to "next" */ \
147  u32 fix_speculation = (next_index ^ next0) | (next_index ^ next1) \
148  | (next_index ^ next2) | (next_index ^ next3); \
149  if (PREDICT_FALSE(fix_speculation)) \
150  { \
151  /* rewind... */ \
152  to_next -= 4; \
153  n_left_to_next += 4; \
154  \
155  /* If bi0 belongs to "next", send it there */ \
156  if (next_index == next0) \
157  { \
158  to_next[0] = bi0; \
159  to_next++; \
160  n_left_to_next --; \
161  } \
162  else /* send it where it needs to go */ \
163  vlib_set_next_frame_buffer (vm, node, next0, bi0); \
164  \
165  if (next_index == next1) \
166  { \
167  to_next[0] = bi1; \
168  to_next++; \
169  n_left_to_next --; \
170  } \
171  else \
172  vlib_set_next_frame_buffer (vm, node, next1, bi1); \
173  \
174  if (next_index == next2) \
175  { \
176  to_next[0] = bi2; \
177  to_next++; \
178  n_left_to_next --; \
179  } \
180  else \
181  vlib_set_next_frame_buffer (vm, node, next2, bi2); \
182  \
183  if (next_index == next3) \
184  { \
185  to_next[0] = bi3; \
186  to_next++; \
187  n_left_to_next --; \
188  } \
189  else \
190  { \
191  vlib_set_next_frame_buffer (vm, node, next3, bi3); \
192  \
193  /* Change speculation: last 2 packets went to the same node*/ \
194  if (next2 == next3) \
195  { \
196  vlib_put_next_frame (vm, node, next_index, n_left_to_next); \
197  next_index = next3; \
198  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); \
199  } \
200  } \
201  } \
202  } while(0);
203 
204 /** \brief Finish enqueueing one buffer forward in the graph.
205  Standard single loop boilerplate element. This is a MACRO,
206  with MULTIPLE SIDE EFFECTS. In the ideal case,
207  <code>next_index == next0</code>,
208  which means that the speculative enqueue at the top of the single loop
209  has correctly dealt with the packet in hand. In that case, the macro does
210  nothing at all.
211 
212  @param vm vlib_main_t pointer, varies by thread
213  @param node current node vlib_node_runtime_t pointer
214  @param next_index speculated next index used for both packets
215  @param to_next speculated vector pointer used for both packets
216  @param n_left_to_next number of slots left in speculated vector
217  @param bi0 first buffer index
218  @param next0 actual next index to be used for the first packet
219 
220  @return @c next_index -- speculative next index to be used for future packets
221  @return @c to_next -- speculative frame to be used for future packets
222  @return @c n_left_to_next -- number of slots left in speculative frame
223 */
224 #define vlib_validate_buffer_enqueue_x1(vm,node,next_index,to_next,n_left_to_next,bi0,next0) \
225 do { \
226  ASSERT (bi0 != 0); \
227  if (PREDICT_FALSE (next0 != next_index)) \
228  { \
229  vlib_put_next_frame (vm, node, next_index, n_left_to_next + 1); \
230  next_index = next0; \
231  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); \
232  \
233  to_next[0] = bi0; \
234  to_next += 1; \
235  n_left_to_next -= 1; \
236  } \
237 } while (0)
238 
241  vlib_node_runtime_t * node,
242  vlib_frame_t * frame,
243  uword sizeof_trace,
244  void *opaque1,
245  uword opaque2,
246  void (*two_buffers) (vlib_main_t * vm,
247  void *opaque1,
248  uword opaque2,
249  vlib_buffer_t * b0,
250  vlib_buffer_t * b1,
251  u32 * next0, u32 * next1),
252  void (*one_buffer) (vlib_main_t * vm,
253  void *opaque1, uword opaque2,
254  vlib_buffer_t * b0,
255  u32 * next0))
256 {
257  u32 n_left_from, *from, *to_next;
258  u32 next_index;
259 
260  from = vlib_frame_vector_args (frame);
261  n_left_from = frame->n_vectors;
262  next_index = node->cached_next_index;
263 
264  if (node->flags & VLIB_NODE_FLAG_TRACE)
265  vlib_trace_frame_buffers_only (vm, node, from, frame->n_vectors,
266  /* stride */ 1, sizeof_trace);
267 
268  while (n_left_from > 0)
269  {
270  u32 n_left_to_next;
271 
272  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
273 
274  while (n_left_from >= 4 && n_left_to_next >= 2)
275  {
276  vlib_buffer_t *p0, *p1;
277  u32 pi0, next0;
278  u32 pi1, next1;
279 
280  /* Prefetch next iteration. */
281  {
282  vlib_buffer_t *p2, *p3;
283 
284  p2 = vlib_get_buffer (vm, from[2]);
285  p3 = vlib_get_buffer (vm, from[3]);
286 
287  vlib_prefetch_buffer_header (p2, LOAD);
288  vlib_prefetch_buffer_header (p3, LOAD);
289 
290  CLIB_PREFETCH (p2->data, 64, LOAD);
291  CLIB_PREFETCH (p3->data, 64, LOAD);
292  }
293 
294  pi0 = to_next[0] = from[0];
295  pi1 = to_next[1] = from[1];
296  from += 2;
297  to_next += 2;
298  n_left_from -= 2;
299  n_left_to_next -= 2;
300 
301  p0 = vlib_get_buffer (vm, pi0);
302  p1 = vlib_get_buffer (vm, pi1);
303 
304  two_buffers (vm, opaque1, opaque2, p0, p1, &next0, &next1);
305 
306  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
307  to_next, n_left_to_next,
308  pi0, pi1, next0, next1);
309  }
310 
311  while (n_left_from > 0 && n_left_to_next > 0)
312  {
313  vlib_buffer_t *p0;
314  u32 pi0, next0;
315 
316  pi0 = from[0];
317  to_next[0] = pi0;
318  from += 1;
319  to_next += 1;
320  n_left_from -= 1;
321  n_left_to_next -= 1;
322 
323  p0 = vlib_get_buffer (vm, pi0);
324 
325  one_buffer (vm, opaque1, opaque2, p0, &next0);
326 
327  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
328  to_next, n_left_to_next,
329  pi0, next0);
330  }
331 
332  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
333  }
334 
335  return frame->n_vectors;
336 }
337 
340  u32 * buffers, u16 * nexts, uword count)
341 {
342  u32 *to_next, n_left_to_next, max;
343  u16 next_index;
344 
345  next_index = nexts[0];
346  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
347  max = clib_min (n_left_to_next, count);
348 
349  while (count)
350  {
351  u32 n_enqueued;
352  if ((nexts[0] != next_index) || n_left_to_next == 0)
353  {
354  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
355  next_index = nexts[0];
356  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
357  max = clib_min (n_left_to_next, count);
358  }
359 #if defined(CLIB_HAVE_VEC512)
360  u16x32 next32 = CLIB_MEM_OVERFLOW_LOAD (u16x32_load_unaligned, nexts);
361  next32 = (next32 == u16x32_splat (next32[0]));
362  u64 bitmap = u16x32_msb_mask (next32);
363  n_enqueued = count_trailing_zeros (~bitmap);
364 #elif defined(CLIB_HAVE_VEC256)
365  u16x16 next16 = CLIB_MEM_OVERFLOW_LOAD (u16x16_load_unaligned, nexts);
366  next16 = (next16 == u16x16_splat (next16[0]));
367  u64 bitmap = u8x32_msb_mask ((u8x32) next16);
368  n_enqueued = count_trailing_zeros (~bitmap) / 2;
369 #elif defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_MSB_MASK)
370  u16x8 next8 = CLIB_MEM_OVERFLOW_LOAD (u16x8_load_unaligned, nexts);
371  next8 = (next8 == u16x8_splat (next8[0]));
372  u64 bitmap = u8x16_msb_mask ((u8x16) next8);
373  n_enqueued = count_trailing_zeros (~bitmap) / 2;
374 #else
375  u16 x = 0;
376  if (count + 3 < max)
377  {
378  x |= next_index ^ nexts[1];
379  x |= next_index ^ nexts[2];
380  x |= next_index ^ nexts[3];
381  n_enqueued = (x == 0) ? 4 : 1;
382  }
383  else
384  n_enqueued = 1;
385 #endif
386 
387  if (PREDICT_FALSE (n_enqueued > max))
388  n_enqueued = max;
389 
390 #ifdef CLIB_HAVE_VEC512
391  if (n_enqueued >= 32)
392  {
393  vlib_buffer_copy_indices (to_next, buffers, 32);
394  nexts += 32;
395  to_next += 32;
396  buffers += 32;
397  n_left_to_next -= 32;
398  count -= 32;
399  max -= 32;
400  continue;
401  }
402 #endif
403 
404 #ifdef CLIB_HAVE_VEC256
405  if (n_enqueued >= 16)
406  {
407  vlib_buffer_copy_indices (to_next, buffers, 16);
408  nexts += 16;
409  to_next += 16;
410  buffers += 16;
411  n_left_to_next -= 16;
412  count -= 16;
413  max -= 16;
414  continue;
415  }
416 #endif
417 
418 #ifdef CLIB_HAVE_VEC128
419  if (n_enqueued >= 8)
420  {
421  vlib_buffer_copy_indices (to_next, buffers, 8);
422  nexts += 8;
423  to_next += 8;
424  buffers += 8;
425  n_left_to_next -= 8;
426  count -= 8;
427  max -= 8;
428  continue;
429  }
430 #endif
431 
432  if (n_enqueued >= 4)
433  {
434  vlib_buffer_copy_indices (to_next, buffers, 4);
435  nexts += 4;
436  to_next += 4;
437  buffers += 4;
438  n_left_to_next -= 4;
439  count -= 4;
440  max -= 4;
441  continue;
442  }
443 
444  /* copy */
445  to_next[0] = buffers[0];
446 
447  /* next */
448  nexts += 1;
449  to_next += 1;
450  buffers += 1;
451  n_left_to_next -= 1;
452  count -= 1;
453  max -= 1;
454  }
455  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
456 }
457 
460  vlib_node_runtime_t * node, u32 * buffers,
461  u16 next_index, u32 count)
462 {
463  u32 *to_next, n_left_to_next, n_enq;
464 
465  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
466 
467  if (PREDICT_TRUE (n_left_to_next >= count))
468  {
469  vlib_buffer_copy_indices (to_next, buffers, count);
470  n_left_to_next -= count;
471  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
472  return;
473  }
474 
475  n_enq = n_left_to_next;
476 next:
477  vlib_buffer_copy_indices (to_next, buffers, n_enq);
478  n_left_to_next -= n_enq;
479 
480  if (PREDICT_FALSE (count > n_enq))
481  {
482  count -= n_enq;
483  buffers += n_enq;
484 
485  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
486  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
487  n_enq = clib_min (n_left_to_next, count);
488  goto next;
489  }
490  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
491 }
492 
495  u32 * buffer_indices, u16 * thread_indices,
496  u32 n_packets, int drop_on_congestion)
497 {
501  u32 n_left = n_packets;
502  u32 drop_list[VLIB_FRAME_SIZE], *dbi = drop_list, n_drop = 0;
503  vlib_frame_queue_elt_t *hf = 0;
504  u32 n_left_to_next_thread = 0, *to_next_thread = 0;
505  u32 next_thread_index, current_thread_index = ~0;
506  int i;
507 
508  fqm = vec_elt_at_index (tm->frame_queue_mains, frame_queue_index);
510 
511  while (n_left)
512  {
513  next_thread_index = thread_indices[0];
514 
515  if (next_thread_index != current_thread_index)
516  {
517  if (drop_on_congestion &&
519  (frame_queue_index, next_thread_index, fqm->queue_hi_thresh,
521  {
522  dbi[0] = buffer_indices[0];
523  dbi++;
524  n_drop++;
525  goto next;
526  }
527 
528  if (hf)
529  hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_thread;
530 
531  hf = vlib_get_worker_handoff_queue_elt (frame_queue_index,
532  next_thread_index,
534 
535  n_left_to_next_thread = VLIB_FRAME_SIZE - hf->n_vectors;
536  to_next_thread = &hf->buffer_index[hf->n_vectors];
537  current_thread_index = next_thread_index;
538  }
539 
540  to_next_thread[0] = buffer_indices[0];
541  to_next_thread++;
542  n_left_to_next_thread--;
543 
544  if (n_left_to_next_thread == 0)
545  {
548  vlib_mains[current_thread_index]->check_frame_queues = 1;
549  current_thread_index = ~0;
550  ptd->handoff_queue_elt_by_thread_index[next_thread_index] = 0;
551  hf = 0;
552  }
553 
554  /* next */
555  next:
556  thread_indices += 1;
557  buffer_indices += 1;
558  n_left -= 1;
559  }
560 
561  if (hf)
562  hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_thread;
563 
564  /* Ship frames to the thread nodes */
565  for (i = 0; i < vec_len (ptd->handoff_queue_elt_by_thread_index); i++)
566  {
568  {
570  /*
571  * It works better to let the handoff node
572  * rate-adapt, always ship the handoff queue element.
573  */
574  if (1 || hf->n_vectors == hf->last_n_vectors)
575  {
579  }
580  else
581  hf->last_n_vectors = hf->n_vectors;
582  }
584  (vlib_frame_queue_t *) (~0);
585  }
586 
587  if (drop_on_congestion && n_drop)
588  vlib_buffer_free (vm, drop_list, n_drop);
589 
590  return n_packets - n_drop;
591 }
592 
593 #endif /* included_vlib_buffer_node_h */
594 
595 /*
596  * fd.io coding-style-patch-verification: ON
597  *
598  * Local Variables:
599  * eval: (c-set-style "gnu")
600  * End:
601  */
u8 count
Definition: dhcp.api:208
#define clib_min(x, y)
Definition: clib.h:327
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:937
#define PREDICT_TRUE(x)
Definition: clib.h:121
unsigned long u64
Definition: types.h:89
u16x16 u64x4 static_always_inline u32 u8x32_msb_mask(u8x32 v)
Definition: vector_avx2.h:108
u32 thread_index
Definition: main.h:249
u32 buffer_index[VLIB_FRAME_SIZE]
Definition: threads.h:83
vlib_main_t ** vlib_mains
Definition: buffer.c:332
static vlib_frame_queue_t * is_vlib_frame_queue_congested(u32 frame_queue_index, u32 index, u32 queue_hi_thresh, vlib_frame_queue_t **handoff_queue_by_worker_index)
Definition: threads.h:576
#define count_trailing_zeros(x)
Definition: clib.h:156
volatile uword check_frame_queues
Definition: main.h:310
#define static_always_inline
Definition: clib.h:108
vlib_frame_queue_elt_t ** handoff_queue_elt_by_thread_index
Definition: threads.h:152
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:203
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
static_always_inline void vlib_buffer_enqueue_to_single_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 next_index, u32 count)
Definition: buffer_node.h:459
unsigned int u32
Definition: types.h:88
#define VLIB_FRAME_SIZE
Definition: node.h:377
unsigned short u16
Definition: types.h:57
#define PREDICT_FALSE(x)
Definition: clib.h:120
static vlib_frame_queue_elt_t * vlib_get_worker_handoff_queue_elt(u32 frame_queue_index, u32 vlib_worker_index, vlib_frame_queue_elt_t **handoff_queue_elt_by_worker_index)
Definition: threads.h:608
#define always_inline
Definition: ipsec.h:28
static_always_inline u16 u8x16_msb_mask(u8x16 v)
Definition: vector_neon.h:138
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:224
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:391
u16 n_vectors
Definition: node.h:396
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
static_always_inline void vlib_buffer_enqueue_to_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts, uword count)
Definition: buffer_node.h:339
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
u8 data[]
Packet data.
Definition: buffer.h:181
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:483
vlib_frame_queue_per_thread_data_t * per_thread_data
Definition: threads.h:163
vlib_frame_queue_t ** congested_handoff_queue_by_thread_index
Definition: threads.h:153
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:510
vlib_frame_queue_main_t * frame_queue_mains
Definition: threads.h:333
foreach_avx512_vec512i foreach_avx512_vec512u static_always_inline u32 u16x32_msb_mask(u16x32 v)
Definition: vector_avx512.h:83
static uword generic_buffer_node_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, uword sizeof_trace, void *opaque1, uword opaque2, void(*two_buffers)(vlib_main_t *vm, void *opaque1, uword opaque2, vlib_buffer_t *b0, vlib_buffer_t *b1, u32 *next0, u32 *next1), void(*one_buffer)(vlib_main_t *vm, void *opaque1, uword opaque2, vlib_buffer_t *b0, u32 *next0))
Definition: buffer_node.h:240
void vlib_trace_frame_buffers_only(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, uword n_buffers, uword next_buffer_stride, uword n_buffer_data_bytes_in_trace)
Definition: trace.c:47
static_always_inline void vlib_buffer_copy_indices(u32 *dst, u32 *src, u32 n_indices)
Definition: buffer_funcs.h:102
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:297
static void vlib_put_frame_queue_elt(vlib_frame_queue_elt_t *hf)
Definition: threads.h:538
static_always_inline u32 vlib_buffer_enqueue_to_thread(vlib_main_t *vm, u32 frame_queue_index, u32 *buffer_indices, u16 *thread_indices, u32 n_packets, int drop_on_congestion)
Definition: buffer_node.h:494
static vlib_thread_main_t * vlib_get_thread_main()
Definition: global_funcs.h:32
#define CLIB_MEM_OVERFLOW_LOAD(f, src)
Definition: sanitizer.h:49
u16 flags
Copy of main node flags.
Definition: node.h:500
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:301
epu16_epi64 u16x16
Definition: vector_avx2.h:123
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85