FD.io VPP  v20.01-48-g3e0dafb74
Vector Packet Processing
node.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <vlib/vlib.h>
16 #include <vnet/vnet.h>
17 #include <vnet/pg/pg.h>
18 #include <vnet/ethernet/ethernet.h>
19 #include <vppinfra/error.h>
20 #include <sample/sample.h>
21 
22 typedef struct
23 {
26  u8 new_src_mac[6];
27  u8 new_dst_mac[6];
29 
30 
31 /* packet trace format function */
32 static u8 *
33 format_sample_trace (u8 * s, va_list * args)
34 {
35  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
36  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
37  sample_trace_t *t = va_arg (*args, sample_trace_t *);
38 
39  s = format (s, "SAMPLE: sw_if_index %d, next index %d\n",
40  t->sw_if_index, t->next_index);
41  s = format (s, " new src %U -> new dst %U",
44 
45  return s;
46 }
47 
49 
50 #define foreach_sample_error \
51 _(SWAPPED, "Mac swap packets processed")
52 
53 typedef enum
54 {
55 #define _(sym,str) SAMPLE_ERROR_##sym,
57 #undef _
60 
61 static char *sample_error_strings[] = {
62 #define _(sym,string) string,
64 #undef _
65 };
66 
67 typedef enum
68 {
72 
73 /*
74  * Simple dual/single loop version, default version which will compile
75  * everywhere.
76  *
77  * Node costs 30 clocks/pkt at a vector size of 51
78  */
79 
80 #define VERSION_1 1
81 #ifdef VERSION_1
82 #define foreach_mac_address_offset \
83 _(0) \
84 _(1) \
85 _(2) \
86 _(3) \
87 _(4) \
88 _(5)
89 
92 {
93  u32 n_left_from, *from, *to_next;
94  sample_next_t next_index;
95  u32 pkts_swapped = 0;
96 
98  n_left_from = frame->n_vectors;
99  next_index = node->cached_next_index;
100 
101  while (n_left_from > 0)
102  {
103  u32 n_left_to_next;
104 
105  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
106 
107  while (n_left_from >= 4 && n_left_to_next >= 2)
108  {
111  u32 sw_if_index0, sw_if_index1;
112  u8 tmp0[6], tmp1[6];
113  ethernet_header_t *en0, *en1;
114  u32 bi0, bi1;
115  vlib_buffer_t *b0, *b1;
116 
117  /* Prefetch next iteration. */
118  {
119  vlib_buffer_t *p2, *p3;
120 
121  p2 = vlib_get_buffer (vm, from[2]);
122  p3 = vlib_get_buffer (vm, from[3]);
123 
124  vlib_prefetch_buffer_header (p2, LOAD);
125  vlib_prefetch_buffer_header (p3, LOAD);
126 
129  }
130 
131  /* speculatively enqueue b0 and b1 to the current next frame */
132  to_next[0] = bi0 = from[0];
133  to_next[1] = bi1 = from[1];
134  from += 2;
135  to_next += 2;
136  n_left_from -= 2;
137  n_left_to_next -= 2;
138 
139  b0 = vlib_get_buffer (vm, bi0);
140  b1 = vlib_get_buffer (vm, bi1);
141 
142  ASSERT (b0->current_data == 0);
143  ASSERT (b1->current_data == 0);
144 
145  en0 = vlib_buffer_get_current (b0);
146  en1 = vlib_buffer_get_current (b1);
147 
148  /* This is not the fastest way to swap src + dst mac addresses */
149 #define _(a) tmp0[a] = en0->src_address[a];
151 #undef _
152 #define _(a) en0->src_address[a] = en0->dst_address[a];
154 #undef _
155 #define _(a) en0->dst_address[a] = tmp0[a];
157 #undef _
158 
159 #define _(a) tmp1[a] = en1->src_address[a];
161 #undef _
162 #define _(a) en1->src_address[a] = en1->dst_address[a];
164 #undef _
165 #define _(a) en1->dst_address[a] = tmp1[a];
167 #undef _
168 
169  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
170  sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
171 
172  /* Send pkt back out the RX interface */
173  vnet_buffer (b0)->sw_if_index[VLIB_TX] = sw_if_index0;
174  vnet_buffer (b1)->sw_if_index[VLIB_TX] = sw_if_index1;
175 
176  pkts_swapped += 2;
177 
178  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
179  {
180  if (b0->flags & VLIB_BUFFER_IS_TRACED)
181  {
182  sample_trace_t *t =
183  vlib_add_trace (vm, node, b0, sizeof (*t));
184  t->sw_if_index = sw_if_index0;
185  t->next_index = next0;
187  sizeof (t->new_src_mac));
189  sizeof (t->new_dst_mac));
190 
191  }
192  if (b1->flags & VLIB_BUFFER_IS_TRACED)
193  {
194  sample_trace_t *t =
195  vlib_add_trace (vm, node, b1, sizeof (*t));
196  t->sw_if_index = sw_if_index1;
197  t->next_index = next1;
199  sizeof (t->new_src_mac));
201  sizeof (t->new_dst_mac));
202  }
203  }
204 
205  /* verify speculative enqueues, maybe switch current next frame */
206  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
207  to_next, n_left_to_next,
208  bi0, bi1, next0, next1);
209  }
210 
211  while (n_left_from > 0 && n_left_to_next > 0)
212  {
213  u32 bi0;
214  vlib_buffer_t *b0;
216  u32 sw_if_index0;
217  u8 tmp0[6];
218  ethernet_header_t *en0;
219 
220  /* speculatively enqueue b0 to the current next frame */
221  bi0 = from[0];
222  to_next[0] = bi0;
223  from += 1;
224  to_next += 1;
225  n_left_from -= 1;
226  n_left_to_next -= 1;
227 
228  b0 = vlib_get_buffer (vm, bi0);
229  /*
230  * Direct from the driver, we should be at offset 0
231  * aka at &b0->data[0]
232  */
233  ASSERT (b0->current_data == 0);
234 
235  en0 = vlib_buffer_get_current (b0);
236 
237  /* This is not the fastest way to swap src + dst mac addresses */
238 #define _(a) tmp0[a] = en0->src_address[a];
240 #undef _
241 #define _(a) en0->src_address[a] = en0->dst_address[a];
243 #undef _
244 #define _(a) en0->dst_address[a] = tmp0[a];
246 #undef _
247 
248  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
249 
250  /* Send pkt back out the RX interface */
251  vnet_buffer (b0)->sw_if_index[VLIB_TX] = sw_if_index0;
252 
254  && (b0->flags & VLIB_BUFFER_IS_TRACED)))
255  {
256  sample_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
257  t->sw_if_index = sw_if_index0;
258  t->next_index = next0;
260  sizeof (t->new_src_mac));
262  sizeof (t->new_dst_mac));
263  }
264 
265  pkts_swapped += 1;
266 
267  /* verify speculative enqueue, maybe switch current next frame */
268  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
269  to_next, n_left_to_next,
270  bi0, next0);
271  }
272 
273  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
274  }
275 
277  SAMPLE_ERROR_SWAPPED, pkts_swapped);
278  return frame->n_vectors;
279 }
280 #endif
281 
282 /*
283  * This version swaps mac addresses using an MMX vector shuffle
284  * Node costs about 17 clocks/pkt at a vector size of 26
285  */
286 #ifdef VERSION_2
289 {
290  u32 n_left_from, *from, *to_next;
291  sample_next_t next_index;
292  u32 pkts_swapped = 0;
293  /* Vector shuffle mask to swap src, dst */
294  u8x16 swapmac = { 6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5, 12, 13, 14, 15 };
295 
296  from = vlib_frame_vector_args (frame);
297  n_left_from = frame->n_vectors;
298  next_index = node->cached_next_index;
299 
300  while (n_left_from > 0)
301  {
302  u32 n_left_to_next;
303 
304  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
305  while (n_left_from >= 4 && n_left_to_next >= 2)
306  {
309  u32 sw_if_index0, sw_if_index1;
310  u8x16 src_dst0, src_dst1;
311  ethernet_header_t *en0, *en1;
312  u32 bi0, bi1;
313  vlib_buffer_t *b0, *b1;
314 
315  /* Prefetch next iteration. */
316  {
317  vlib_buffer_t *p2, *p3;
318 
319  p2 = vlib_get_buffer (vm, from[2]);
320  p3 = vlib_get_buffer (vm, from[3]);
321 
322  vlib_prefetch_buffer_header (p2, LOAD);
323  vlib_prefetch_buffer_header (p3, LOAD);
324 
327  }
328 
329  /* speculatively enqueue b0 and b1 to the current next frame */
330  to_next[0] = bi0 = from[0];
331  to_next[1] = bi1 = from[1];
332  from += 2;
333  to_next += 2;
334  n_left_from -= 2;
335  n_left_to_next -= 2;
336 
337  b0 = vlib_get_buffer (vm, bi0);
338  b1 = vlib_get_buffer (vm, bi1);
339 
340  ASSERT (b0->current_data == 0);
341  ASSERT (b1->current_data == 0);
342 
343  en0 = vlib_buffer_get_current (b0);
344  en1 = vlib_buffer_get_current (b1);
345 
346  src_dst0 = ((u8x16 *) en0)[0];
347  src_dst1 = ((u8x16 *) en1)[0];
348  src_dst0 = u8x16_shuffle (src_dst0, swapmac);
349  src_dst1 = u8x16_shuffle (src_dst1, swapmac);
350  ((u8x16 *) en0)[0] = src_dst0;
351  ((u8x16 *) en1)[0] = src_dst1;
352 
353  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
354  sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
355 
356  /* Send pkt back out the RX interface */
357  vnet_buffer (b0)->sw_if_index[VLIB_TX] = sw_if_index0;
358  vnet_buffer (b1)->sw_if_index[VLIB_TX] = sw_if_index1;
359 
360  pkts_swapped += 2;
361 
362  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
363  {
364  if (b0->flags & VLIB_BUFFER_IS_TRACED)
365  {
366  sample_trace_t *t =
367  vlib_add_trace (vm, node, b0, sizeof (*t));
368  t->sw_if_index = sw_if_index0;
369  t->next_index = next0;
371  sizeof (t->new_src_mac));
373  sizeof (t->new_dst_mac));
374 
375  }
376  if (b1->flags & VLIB_BUFFER_IS_TRACED)
377  {
378  sample_trace_t *t =
379  vlib_add_trace (vm, node, b1, sizeof (*t));
380  t->sw_if_index = sw_if_index1;
381  t->next_index = next1;
383  sizeof (t->new_src_mac));
385  sizeof (t->new_dst_mac));
386  }
387  }
388 
389  /* verify speculative enqueues, maybe switch current next frame */
390  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
391  to_next, n_left_to_next,
392  bi0, bi1, next0, next1);
393  }
394 
395  while (n_left_from > 0 && n_left_to_next > 0)
396  {
397  u32 bi0;
398  vlib_buffer_t *b0;
400  u32 sw_if_index0;
401  u8x16 src_dst0;
402  ethernet_header_t *en0;
403 
404  /* speculatively enqueue b0 to the current next frame */
405  bi0 = from[0];
406  to_next[0] = bi0;
407  from += 1;
408  to_next += 1;
409  n_left_from -= 1;
410  n_left_to_next -= 1;
411 
412  b0 = vlib_get_buffer (vm, bi0);
413  /*
414  * Direct from the driver, we should be at offset 0
415  * aka at &b0->data[0]
416  */
417  ASSERT (b0->current_data == 0);
418 
419  en0 = vlib_buffer_get_current (b0);
420  src_dst0 = ((u8x16 *) en0)[0];
421  src_dst0 = u8x16_shuffle (src_dst0, swapmac);
422  ((u8x16 *) en0)[0] = src_dst0;
423 
424  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
425 
426  /* Send pkt back out the RX interface */
427  vnet_buffer (b0)->sw_if_index[VLIB_TX] = sw_if_index0;
428 
430  && (b0->flags & VLIB_BUFFER_IS_TRACED)))
431  {
432  sample_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
433  t->sw_if_index = sw_if_index0;
434  t->next_index = next0;
436  sizeof (t->new_src_mac));
438  sizeof (t->new_dst_mac));
439  }
440 
441  pkts_swapped += 1;
442 
443  /* verify speculative enqueue, maybe switch current next frame */
444  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
445  to_next, n_left_to_next,
446  bi0, next0);
447  }
448 
449  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
450  }
451 
453  SAMPLE_ERROR_SWAPPED, pkts_swapped);
454  return frame->n_vectors;
455 }
456 #endif
457 
458 
459 /*
460  * This version computes all of the buffer pointers in
461  * one motion, uses a quad/single loop model, and
462  * traces the entire frame in one motion.
463  *
464  * Node costs about 16 clocks/pkt at a vector size of 26
465  *
466  * Some compilation drama with u8x16_shuffle, so turned off by
467  * default.
468  */
469 
470 #ifdef VERSION_3
471 
472 #define u8x16_shuffle __builtin_shuffle
473 /* This would normally be a stack local, but since it's a constant... */
474 static const u16 nexts[VLIB_FRAME_SIZE] = { 0 };
475 
478 {
479  u32 n_left_from, *from;
480  u32 pkts_swapped = 0;
481  /* Vector shuffle mask to swap src, dst */
482  u8x16 swapmac = { 6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5, 12, 13, 14, 15 };
483  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
484  /* See comment below about sending all pkts to the same place... */
485  u16 *next __attribute__ ((unused));
486 
487  from = vlib_frame_vector_args (frame);
488  n_left_from = frame->n_vectors;
489 
490  vlib_get_buffers (vm, from, bufs, n_left_from);
491  b = bufs;
492  // next = nexts;
493 
494  /*
495  * We send all pkts to SAMPLE_NEXT_INTERFACE_OUTPUT, aka
496  * graph arc 0. So the usual setting of next[0...3] is commented
497  * out below
498  */
499 
500  while (n_left_from >= 4)
501  {
502  u8x16 src_dst0, src_dst1, src_dst2, src_dst3;
503  /* Prefetch next iteration. */
504  if (PREDICT_TRUE (n_left_from >= 8))
505  {
506  vlib_prefetch_buffer_header (b[4], STORE);
507  vlib_prefetch_buffer_header (b[5], STORE);
508  vlib_prefetch_buffer_header (b[6], STORE);
509  vlib_prefetch_buffer_header (b[7], STORE);
510  CLIB_PREFETCH (&b[4]->data, CLIB_CACHE_LINE_BYTES, STORE);
511  CLIB_PREFETCH (&b[5]->data, CLIB_CACHE_LINE_BYTES, STORE);
512  CLIB_PREFETCH (&b[6]->data, CLIB_CACHE_LINE_BYTES, STORE);
513  CLIB_PREFETCH (&b[7]->data, CLIB_CACHE_LINE_BYTES, STORE);
514  }
515 
516  src_dst0 = ((u8x16 *) vlib_buffer_get_current (b[0]))[0];
517  src_dst1 = ((u8x16 *) vlib_buffer_get_current (b[1]))[0];
518  src_dst2 = ((u8x16 *) vlib_buffer_get_current (b[2]))[0];
519  src_dst3 = ((u8x16 *) vlib_buffer_get_current (b[3]))[0];
520 
521  src_dst0 = u8x16_shuffle (src_dst0, swapmac);
522  src_dst1 = u8x16_shuffle (src_dst1, swapmac);
523  src_dst2 = u8x16_shuffle (src_dst2, swapmac);
524  src_dst3 = u8x16_shuffle (src_dst3, swapmac);
525 
526  ((u8x16 *) vlib_buffer_get_current (b[0]))[0] = src_dst0;
527  ((u8x16 *) vlib_buffer_get_current (b[1]))[0] = src_dst1;
528  ((u8x16 *) vlib_buffer_get_current (b[2]))[0] = src_dst2;
529  ((u8x16 *) vlib_buffer_get_current (b[3]))[0] = src_dst3;
530 
531  vnet_buffer (b[0])->sw_if_index[VLIB_TX] =
532  vnet_buffer (b[0])->sw_if_index[VLIB_RX];
533  vnet_buffer (b[1])->sw_if_index[VLIB_TX] =
534  vnet_buffer (b[1])->sw_if_index[VLIB_RX];
535  vnet_buffer (b[2])->sw_if_index[VLIB_TX] =
536  vnet_buffer (b[2])->sw_if_index[VLIB_RX];
537  vnet_buffer (b[3])->sw_if_index[VLIB_TX] =
538  vnet_buffer (b[3])->sw_if_index[VLIB_RX];
539 
540  // next[0] = SAMPLE_NEXT_INTERFACE_OUTPUT;
541  // next[1] = SAMPLE_NEXT_INTERFACE_OUTPUT;
542  // next[2] = SAMPLE_NEXT_INTERFACE_OUTPUT;
543  // next[3] = SAMPLE_NEXT_INTERFACE_OUTPUT;
544 
545  b += 4;
546  // next += 4;
547  n_left_from -= 4;
548  pkts_swapped += 4;
549  }
550 
551  while (n_left_from > 0)
552  {
553  u8x16 src_dst0;
554  src_dst0 = ((u8x16 *) vlib_buffer_get_current (b[0]))[0];
555  src_dst0 = u8x16_shuffle (src_dst0, swapmac);
556  ((u8x16 *) vlib_buffer_get_current (b[0]))[0] = src_dst0;
557  vnet_buffer (b[0])->sw_if_index[VLIB_TX] =
558  vnet_buffer (b[0])->sw_if_index[VLIB_RX];
559  // next[0] = SAMPLE_NEXT_INTERFACE_OUTPUT;
560 
561  b += 1;
562  // next += 1;
563  n_left_from -= 1;
564  pkts_swapped += 1;
565 
566  }
567  vlib_buffer_enqueue_to_next (vm, node, from, (u16 *) nexts,
568  frame->n_vectors);
569 
571  SAMPLE_ERROR_SWAPPED, pkts_swapped);
572 
573  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
574  {
575  int i;
576  b = bufs;
577 
578  for (i = 0; i < frame->n_vectors; i++)
579  {
580  if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
581  {
582  ethernet_header_t *en;
583  sample_trace_t *t =
584  vlib_add_trace (vm, node, b[0], sizeof (*t));
585  t->sw_if_index = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
587  en = vlib_buffer_get_current (b[0]);
589  sizeof (t->new_src_mac));
591  sizeof (t->new_dst_mac));
592  b++;
593  }
594  else
595  break;
596  }
597  }
598  return frame->n_vectors;
599 }
600 #endif
601 
602 /*
603  * This version computes all of the buffer pointers in
604  * one motion, uses a fully pipelined loop model, and
605  * traces the entire frame in one motion.
606  *
607  * It's performance-competative with other coding paradigms,
608  * and it's the simplest way to write performant vpp code
609  */
610 
611 
612 #ifdef VERSION_4
613 
614 #define u8x16_shuffle __builtin_shuffle
615 
616 static u8x16 swapmac =
617  { 6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5, 12, 13, 14, 15 };
618 
619 /* Final stage in the pipeline, do the mac swap */
620 static inline u32
622 {
623  u8x16 src_dst0;
624  src_dst0 = ((u8x16 *) vlib_buffer_get_current (b))[0];
625  src_dst0 = u8x16_shuffle (src_dst0, swapmac);
626  ((u8x16 *) vlib_buffer_get_current (b))[0] = src_dst0;
627  vnet_buffer (b)->sw_if_index[VLIB_TX] =
628  vnet_buffer (b)->sw_if_index[VLIB_RX];
629  /* set next-index[] to 0 for this buffer */
630  return 0;
631 }
632 
633 /*
634  * Add a couple of nil stages to increase the prefetch stride.
635  * For any specific platform, the optimal prefetch stride may differ.
636  */
637 static inline void
639 {
640 }
641 
642 static inline void
643 stage2 (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_buffer_t * b)
644 {
645 }
646 
647 #define NSTAGES 4
648 #define STAGE_INLINE inline __attribute__((__always_inline__))
649 
650 #define stage0 generic_stage0
651 
652 #include <vnet/pipeline.h>
653 
656 {
657  dispatch_pipeline (vm, node, frame);
658 
660  SAMPLE_ERROR_SWAPPED, frame->n_vectors);
661  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
662  {
663  int i;
664  b = bufs;
665 
666  for (i = 0; i < frame->n_vectors; i++)
667  {
668  if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
669  {
670  ethernet_header_t *en;
671  sample_trace_t *t =
672  vlib_add_trace (vm, node, b[0], sizeof (*t));
673  t->sw_if_index = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
675  en = vlib_buffer_get_current (b[0]);
677  sizeof (t->new_src_mac));
679  sizeof (t->new_dst_mac));
680  b++;
681  }
682  else
683  break;
684  }
685  }
686  return frame->n_vectors;
687 }
688 #endif
689 
690 /* *INDENT-OFF* */
692 {
693  .name = "sample",
694  .vector_size = sizeof (u32),
695  .format_trace = format_sample_trace,
697 
698  .n_errors = ARRAY_LEN(sample_error_strings),
699  .error_strings = sample_error_strings,
700 
701  .n_next_nodes = SAMPLE_N_NEXT,
702 
703  /* edit / add dispositions here */
704  .next_nodes = {
705  [SAMPLE_NEXT_INTERFACE_OUTPUT] = "interface-output",
706  },
707 };
708 /* *INDENT-ON* */
709 
710 /*
711  * fd.io coding-style-patch-verification: ON
712  *
713  * Local Variables:
714  * eval: (c-set-style "gnu")
715  * End:
716  */
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
vlib_node_registration_t sample_node
(constructor) VLIB_REGISTER_NODE (sample_node)
Definition: node.c:691
#define CLIB_UNUSED(x)
Definition: clib.h:82
u32 next_index
Definition: node.c:24
sample_error_t
Definition: node.c:53
#define PREDICT_TRUE(x)
Definition: clib.h:112
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:110
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
u8 src_address[6]
Definition: packet.h:56
u8 data[0]
Packet data.
Definition: buffer.h:181
int i
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
#define VLIB_NODE_FN(node)
Definition: node.h:202
unsigned char u8
Definition: types.h:56
#define foreach_sample_error
Definition: node.c:50
u8 dst_address[6]
Definition: packet.h:55
sample_next_t
Definition: node.c:67
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:203
unsigned int u32
Definition: types.h:88
#define VLIB_FRAME_SIZE
Definition: node.h:378
vl_api_fib_path_type_t type
Definition: fib_types.api:123
static u8 * format_sample_trace(u8 *s, va_list *args)
Definition: node.c:33
u8 new_dst_mac[6]
Definition: node.c:27
unsigned short u16
Definition: types.h:57
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
#define PREDICT_FALSE(x)
Definition: clib.h:111
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
static void stage1(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_buffer_t *b)
Definition: decap.c:68
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:338
vlib_main_t * vm
Definition: in2out_ed.c:1810
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1150
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u32 flags
Definition: vhost_user.h:141
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
static_always_inline void vlib_buffer_enqueue_to_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts, uword count)
Definition: buffer_node.h:332
#define ARRAY_LEN(x)
Definition: clib.h:62
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:456
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1810
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:515
#define ASSERT(truth)
u8 data[128]
Definition: ipsec_types.api:87
u8 new_src_mac[6]
Definition: node.c:26
static char * sample_error_strings[]
Definition: node.c:61
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
struct _vlib_node_registration vlib_node_registration_t
Definition: defs.h:47
static_always_inline u8x16 u8x16_shuffle(u8x16 v, u8x16 m)
Definition: vector_neon.h:107
static u32 last_stage(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_buffer_t *b)
Definition: decap.c:118
VLIB buffer representation.
Definition: buffer.h:102
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
u8 * format_mac_address(u8 *s, va_list *args)
Definition: format.c:58
#define vnet_buffer(b)
Definition: buffer.h:408
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1811
u16 flags
Copy of main node flags.
Definition: node.h:509
u32 sw_if_index
Definition: node.c:25
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:244
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:302
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
Definition: defs.h:46
#define foreach_mac_address_offset
Definition: node.c:82