FD.io VPP  v19.08-27-gf4dcae4
Vector Packet Processing
ip4_forward.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015-2019 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /*
16  * ip/ip4_forward.h: IP v4 forwarding
17  *
18  * Copyright (c) 2008 Eliot Dresselhaus
19  *
20  * Permission is hereby granted, free of charge, to any person obtaining
21  * a copy of this software and associated documentation files (the
22  * "Software"), to deal in the Software without restriction, including
23  * without limitation the rights to use, copy, modify, merge, publish,
24  * distribute, sublicense, and/or sell copies of the Software, and to
25  * permit persons to whom the Software is furnished to do so, subject to
26  * the following conditions:
27  *
28  * The above copyright notice and this permission notice shall be
29  * included in all copies or substantial portions of the Software.
30  *
31  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38  */
39 
40 #ifndef __included_ip4_forward_h__
41 #define __included_ip4_forward_h__
42 
43 #include <vppinfra/cache.h>
44 #include <vnet/fib/ip4_fib.h>
46 
47 /**
48  * @file
49  * @brief IPv4 Forwarding.
50  *
51  * This file contains the source code for IPv4 forwarding.
52  */
53 
56  vlib_node_runtime_t * node, vlib_frame_t * frame)
57 {
58  ip4_main_t *im = &ip4_main;
60  u32 n_left, *from;
61  u32 thread_index = vm->thread_index;
63  vlib_buffer_t **b = bufs;
64  u16 nexts[VLIB_FRAME_SIZE], *next;
65 
66  from = vlib_frame_vector_args (frame);
67  n_left = frame->n_vectors;
68  next = nexts;
69  vlib_get_buffers (vm, from, bufs, n_left);
70 
71 #if (CLIB_N_PREFETCHES >= 8)
72  while (n_left >= 4)
73  {
74  ip4_header_t *ip0, *ip1, *ip2, *ip3;
75  const load_balance_t *lb0, *lb1, *lb2, *lb3;
76  ip4_fib_mtrie_t *mtrie0, *mtrie1, *mtrie2, *mtrie3;
77  ip4_fib_mtrie_leaf_t leaf0, leaf1, leaf2, leaf3;
78  ip4_address_t *dst_addr0, *dst_addr1, *dst_addr2, *dst_addr3;
79  u32 lb_index0, lb_index1, lb_index2, lb_index3;
80  flow_hash_config_t flow_hash_config0, flow_hash_config1;
81  flow_hash_config_t flow_hash_config2, flow_hash_config3;
82  u32 hash_c0, hash_c1, hash_c2, hash_c3;
83  const dpo_id_t *dpo0, *dpo1, *dpo2, *dpo3;
84 
85  /* Prefetch next iteration. */
86  if (n_left >= 8)
87  {
88  vlib_prefetch_buffer_header (b[4], LOAD);
89  vlib_prefetch_buffer_header (b[5], LOAD);
90  vlib_prefetch_buffer_header (b[6], LOAD);
91  vlib_prefetch_buffer_header (b[7], LOAD);
92 
93  CLIB_PREFETCH (b[4]->data, sizeof (ip0[0]), LOAD);
94  CLIB_PREFETCH (b[5]->data, sizeof (ip0[0]), LOAD);
95  CLIB_PREFETCH (b[6]->data, sizeof (ip0[0]), LOAD);
96  CLIB_PREFETCH (b[7]->data, sizeof (ip0[0]), LOAD);
97  }
98 
99  ip0 = vlib_buffer_get_current (b[0]);
100  ip1 = vlib_buffer_get_current (b[1]);
101  ip2 = vlib_buffer_get_current (b[2]);
102  ip3 = vlib_buffer_get_current (b[3]);
103 
104  dst_addr0 = &ip0->dst_address;
105  dst_addr1 = &ip1->dst_address;
106  dst_addr2 = &ip2->dst_address;
107  dst_addr3 = &ip3->dst_address;
108 
113 
114  mtrie0 = &ip4_fib_get (vnet_buffer (b[0])->ip.fib_index)->mtrie;
115  mtrie1 = &ip4_fib_get (vnet_buffer (b[1])->ip.fib_index)->mtrie;
116  mtrie2 = &ip4_fib_get (vnet_buffer (b[2])->ip.fib_index)->mtrie;
117  mtrie3 = &ip4_fib_get (vnet_buffer (b[3])->ip.fib_index)->mtrie;
118 
119  leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, dst_addr0);
120  leaf1 = ip4_fib_mtrie_lookup_step_one (mtrie1, dst_addr1);
121  leaf2 = ip4_fib_mtrie_lookup_step_one (mtrie2, dst_addr2);
122  leaf3 = ip4_fib_mtrie_lookup_step_one (mtrie3, dst_addr3);
123 
124  leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 2);
125  leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, dst_addr1, 2);
126  leaf2 = ip4_fib_mtrie_lookup_step (mtrie2, leaf2, dst_addr2, 2);
127  leaf3 = ip4_fib_mtrie_lookup_step (mtrie3, leaf3, dst_addr3, 2);
128 
129  leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 3);
130  leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, dst_addr1, 3);
131  leaf2 = ip4_fib_mtrie_lookup_step (mtrie2, leaf2, dst_addr2, 3);
132  leaf3 = ip4_fib_mtrie_lookup_step (mtrie3, leaf3, dst_addr3, 3);
133 
134  lb_index0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
135  lb_index1 = ip4_fib_mtrie_leaf_get_adj_index (leaf1);
136  lb_index2 = ip4_fib_mtrie_leaf_get_adj_index (leaf2);
137  lb_index3 = ip4_fib_mtrie_leaf_get_adj_index (leaf3);
138 
139  ASSERT (lb_index0 && lb_index1 && lb_index2 && lb_index3);
140  lb0 = load_balance_get (lb_index0);
141  lb1 = load_balance_get (lb_index1);
142  lb2 = load_balance_get (lb_index2);
143  lb3 = load_balance_get (lb_index3);
144 
145  ASSERT (lb0->lb_n_buckets > 0);
146  ASSERT (is_pow2 (lb0->lb_n_buckets));
147  ASSERT (lb1->lb_n_buckets > 0);
148  ASSERT (is_pow2 (lb1->lb_n_buckets));
149  ASSERT (lb2->lb_n_buckets > 0);
150  ASSERT (is_pow2 (lb2->lb_n_buckets));
151  ASSERT (lb3->lb_n_buckets > 0);
152  ASSERT (is_pow2 (lb3->lb_n_buckets));
153 
154  /* Use flow hash to compute multipath adjacency. */
155  hash_c0 = vnet_buffer (b[0])->ip.flow_hash = 0;
156  hash_c1 = vnet_buffer (b[1])->ip.flow_hash = 0;
157  hash_c2 = vnet_buffer (b[2])->ip.flow_hash = 0;
158  hash_c3 = vnet_buffer (b[3])->ip.flow_hash = 0;
159  if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
160  {
161  flow_hash_config0 = lb0->lb_hash_config;
162  hash_c0 = vnet_buffer (b[0])->ip.flow_hash =
163  ip4_compute_flow_hash (ip0, flow_hash_config0);
164  dpo0 =
166  (hash_c0 &
167  (lb0->lb_n_buckets_minus_1)));
168  }
169  else
170  {
171  dpo0 = load_balance_get_bucket_i (lb0, 0);
172  }
173  if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
174  {
175  flow_hash_config1 = lb1->lb_hash_config;
176  hash_c1 = vnet_buffer (b[1])->ip.flow_hash =
177  ip4_compute_flow_hash (ip1, flow_hash_config1);
178  dpo1 =
180  (hash_c1 &
181  (lb1->lb_n_buckets_minus_1)));
182  }
183  else
184  {
185  dpo1 = load_balance_get_bucket_i (lb1, 0);
186  }
187  if (PREDICT_FALSE (lb2->lb_n_buckets > 1))
188  {
189  flow_hash_config2 = lb2->lb_hash_config;
190  hash_c2 = vnet_buffer (b[2])->ip.flow_hash =
191  ip4_compute_flow_hash (ip2, flow_hash_config2);
192  dpo2 =
194  (hash_c2 &
195  (lb2->lb_n_buckets_minus_1)));
196  }
197  else
198  {
199  dpo2 = load_balance_get_bucket_i (lb2, 0);
200  }
201  if (PREDICT_FALSE (lb3->lb_n_buckets > 1))
202  {
203  flow_hash_config3 = lb3->lb_hash_config;
204  hash_c3 = vnet_buffer (b[3])->ip.flow_hash =
205  ip4_compute_flow_hash (ip3, flow_hash_config3);
206  dpo3 =
208  (hash_c3 &
209  (lb3->lb_n_buckets_minus_1)));
210  }
211  else
212  {
213  dpo3 = load_balance_get_bucket_i (lb3, 0);
214  }
215 
216  next[0] = dpo0->dpoi_next_node;
217  vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
218  next[1] = dpo1->dpoi_next_node;
219  vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
220  next[2] = dpo2->dpoi_next_node;
221  vnet_buffer (b[2])->ip.adj_index[VLIB_TX] = dpo2->dpoi_index;
222  next[3] = dpo3->dpoi_next_node;
223  vnet_buffer (b[3])->ip.adj_index[VLIB_TX] = dpo3->dpoi_index;
224 
226  (cm, thread_index, lb_index0, 1,
227  vlib_buffer_length_in_chain (vm, b[0]));
229  (cm, thread_index, lb_index1, 1,
230  vlib_buffer_length_in_chain (vm, b[1]));
232  (cm, thread_index, lb_index2, 1,
233  vlib_buffer_length_in_chain (vm, b[2]));
235  (cm, thread_index, lb_index3, 1,
236  vlib_buffer_length_in_chain (vm, b[3]));
237 
238  b += 4;
239  next += 4;
240  n_left -= 4;
241  }
242 #elif (CLIB_N_PREFETCHES >= 4)
243  while (n_left >= 4)
244  {
245  ip4_header_t *ip0, *ip1;
246  const load_balance_t *lb0, *lb1;
247  ip4_fib_mtrie_t *mtrie0, *mtrie1;
248  ip4_fib_mtrie_leaf_t leaf0, leaf1;
249  ip4_address_t *dst_addr0, *dst_addr1;
250  u32 lb_index0, lb_index1;
251  flow_hash_config_t flow_hash_config0, flow_hash_config1;
252  u32 hash_c0, hash_c1;
253  const dpo_id_t *dpo0, *dpo1;
254 
255  /* Prefetch next iteration. */
256  {
257  vlib_prefetch_buffer_header (b[2], LOAD);
258  vlib_prefetch_buffer_header (b[3], LOAD);
259 
260  CLIB_PREFETCH (b[2]->data, sizeof (ip0[0]), LOAD);
261  CLIB_PREFETCH (b[3]->data, sizeof (ip0[0]), LOAD);
262  }
263 
264  ip0 = vlib_buffer_get_current (b[0]);
265  ip1 = vlib_buffer_get_current (b[1]);
266 
267  dst_addr0 = &ip0->dst_address;
268  dst_addr1 = &ip1->dst_address;
269 
272 
273  mtrie0 = &ip4_fib_get (vnet_buffer (b[0])->ip.fib_index)->mtrie;
274  mtrie1 = &ip4_fib_get (vnet_buffer (b[1])->ip.fib_index)->mtrie;
275 
276  leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, dst_addr0);
277  leaf1 = ip4_fib_mtrie_lookup_step_one (mtrie1, dst_addr1);
278 
279  leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 2);
280  leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, dst_addr1, 2);
281 
282  leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 3);
283  leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, dst_addr1, 3);
284 
285  lb_index0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
286  lb_index1 = ip4_fib_mtrie_leaf_get_adj_index (leaf1);
287 
288  ASSERT (lb_index0 && lb_index1);
289  lb0 = load_balance_get (lb_index0);
290  lb1 = load_balance_get (lb_index1);
291 
292  ASSERT (lb0->lb_n_buckets > 0);
293  ASSERT (is_pow2 (lb0->lb_n_buckets));
294  ASSERT (lb1->lb_n_buckets > 0);
295  ASSERT (is_pow2 (lb1->lb_n_buckets));
296 
297  /* Use flow hash to compute multipath adjacency. */
298  hash_c0 = vnet_buffer (b[0])->ip.flow_hash = 0;
299  hash_c1 = vnet_buffer (b[1])->ip.flow_hash = 0;
300  if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
301  {
302  flow_hash_config0 = lb0->lb_hash_config;
303  hash_c0 = vnet_buffer (b[0])->ip.flow_hash =
304  ip4_compute_flow_hash (ip0, flow_hash_config0);
305  dpo0 =
307  (hash_c0 &
308  (lb0->lb_n_buckets_minus_1)));
309  }
310  else
311  {
312  dpo0 = load_balance_get_bucket_i (lb0, 0);
313  }
314  if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
315  {
316  flow_hash_config1 = lb1->lb_hash_config;
317  hash_c1 = vnet_buffer (b[1])->ip.flow_hash =
318  ip4_compute_flow_hash (ip1, flow_hash_config1);
319  dpo1 =
321  (hash_c1 &
322  (lb1->lb_n_buckets_minus_1)));
323  }
324  else
325  {
326  dpo1 = load_balance_get_bucket_i (lb1, 0);
327  }
328 
329  next[0] = dpo0->dpoi_next_node;
330  vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
331  next[1] = dpo1->dpoi_next_node;
332  vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
333 
335  (cm, thread_index, lb_index0, 1,
336  vlib_buffer_length_in_chain (vm, b[0]));
338  (cm, thread_index, lb_index1, 1,
339  vlib_buffer_length_in_chain (vm, b[1]));
340 
341  b += 2;
342  next += 2;
343  n_left -= 2;
344  }
345 #endif
346  while (n_left > 0)
347  {
348  ip4_header_t *ip0;
349  const load_balance_t *lb0;
350  ip4_fib_mtrie_t *mtrie0;
351  ip4_fib_mtrie_leaf_t leaf0;
352  ip4_address_t *dst_addr0;
353  u32 lbi0;
354  flow_hash_config_t flow_hash_config0;
355  const dpo_id_t *dpo0;
356  u32 hash_c0;
357 
358  ip0 = vlib_buffer_get_current (b[0]);
359  dst_addr0 = &ip0->dst_address;
361 
362  mtrie0 = &ip4_fib_get (vnet_buffer (b[0])->ip.fib_index)->mtrie;
363  leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, dst_addr0);
364  leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 2);
365  leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 3);
366  lbi0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
367 
368  ASSERT (lbi0);
369  lb0 = load_balance_get (lbi0);
370 
371  ASSERT (lb0->lb_n_buckets > 0);
372  ASSERT (is_pow2 (lb0->lb_n_buckets));
373 
374  /* Use flow hash to compute multipath adjacency. */
375  hash_c0 = vnet_buffer (b[0])->ip.flow_hash = 0;
376  if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
377  {
378  flow_hash_config0 = lb0->lb_hash_config;
379 
380  hash_c0 = vnet_buffer (b[0])->ip.flow_hash =
381  ip4_compute_flow_hash (ip0, flow_hash_config0);
382  dpo0 =
384  (hash_c0 &
385  (lb0->lb_n_buckets_minus_1)));
386  }
387  else
388  {
389  dpo0 = load_balance_get_bucket_i (lb0, 0);
390  }
391 
392  next[0] = dpo0->dpoi_next_node;
393  vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
394 
395  vlib_increment_combined_counter (cm, thread_index, lbi0, 1,
397  b[0]));
398 
399  b += 1;
400  next += 1;
401  n_left -= 1;
402  }
403 
404  vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
405 
406  if (node->flags & VLIB_NODE_FLAG_TRACE)
407  ip4_forward_next_trace (vm, node, frame, VLIB_TX);
408 
409  return frame->n_vectors;
410 }
411 
412 #endif /* __included_ip4_forward_h__ */
413 
414 /*
415  * fd.io coding-style-patch-verification: ON
416  *
417  * Local Variables:
418  * eval: (c-set-style "gnu")
419  * End:
420  */
u16 lb_n_buckets
number of buckets in the load-balance.
Definition: load_balance.h:116
vlib_combined_counter_main_t lbm_to_counters
Definition: load_balance.h:46
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:220
The mutiway-TRIE.
Definition: ip4_mtrie.h:129
static ip4_fib_mtrie_leaf_t ip4_fib_mtrie_lookup_step(const ip4_fib_mtrie_t *m, ip4_fib_mtrie_leaf_t current_leaf, const ip4_address_t *dst_address, u32 dst_address_byte_index)
Lookup step.
Definition: ip4_mtrie.h:202
static u32 ip4_compute_flow_hash(const ip4_header_t *ip, flow_hash_config_t flow_hash_config)
Definition: ip4.h:313
flow_hash_config_t lb_hash_config
the hash config to use when selecting a bucket.
Definition: load_balance.h:161
static const dpo_id_t * load_balance_get_fwd_bucket(const load_balance_t *lb, u16 bucket)
u32 thread_index
Definition: main.h:197
static uword ip4_lookup_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip4_forward.h:55
u32 * fib_index_by_sw_if_index
Table index indexed by software interface.
Definition: ip4.h:121
u8 data[128]
Definition: ipsec.api:249
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:366
#define always_inline
Definition: clib.h:98
u16 lb_n_buckets_minus_1
number of buckets in the load-balance - 1.
Definition: load_balance.h:121
ip4_address_t dst_address
Definition: ip4_packet.h:170
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:203
u32 ip4_fib_mtrie_leaf_t
Definition: ip4_mtrie.h:52
unsigned int u32
Definition: types.h:88
#define VLIB_FRAME_SIZE
Definition: node.h:376
static u32 ip4_fib_mtrie_leaf_get_adj_index(ip4_fib_mtrie_leaf_t n)
From the stored slot value extract the LB index value.
Definition: ip4_mtrie.h:192
The identity of a DPO is a combination of its type and its instance number/index of objects of that t...
Definition: dpo.h:170
vnet_crypto_main_t * cm
Definition: quic_crypto.c:41
static void ip_lookup_set_buffer_fib_index(u32 *fib_index_by_sw_if_index, vlib_buffer_t *b)
Definition: lookup.h:218
static const dpo_id_t * load_balance_get_bucket_i(const load_balance_t *lb, u32 bucket)
Definition: load_balance.h:229
unsigned short u16
Definition: types.h:57
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
The FIB DPO provieds;.
Definition: load_balance.h:106
#define PREDICT_FALSE(x)
Definition: clib.h:111
load_balance_main_t load_balance_main
The one instance of load-balance main.
Definition: load_balance.c:56
ip4_fib_mtrie_t mtrie
Mtrie for fast lookups.
Definition: ip4_fib.h:48
u16 n_vectors
Definition: node.h:395
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
vlib_main_t * vm
Definition: buffer.c:312
static_always_inline void vlib_buffer_enqueue_to_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts, uword count)
Definition: buffer_node.h:332
static ip4_fib_t * ip4_fib_get(u32 index)
Get the FIB at the given index.
Definition: ip4_fib.h:113
#define ASSERT(truth)
static ip4_fib_mtrie_leaf_t ip4_fib_mtrie_lookup_step_one(const ip4_fib_mtrie_t *m, const ip4_address_t *dst_address)
Lookup step number 1.
Definition: ip4_mtrie.h:224
static load_balance_t * load_balance_get(index_t lbi)
Definition: load_balance.h:220
IPv4 main type.
Definition: ip4.h:105
u32 flow_hash_config_t
A flow hash configuration is a mask of the flow hash options.
Definition: lookup.h:84
static uword is_pow2(uword x)
Definition: clib.h:235
Definition: defs.h:47
index_t dpoi_index
the index of objects of that type
Definition: dpo.h:186
vl_api_address_t ip
Definition: l2.api:489
void ip4_forward_next_trace(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vlib_rx_or_tx_t which_adj_index)
Definition: ip4_forward.c:1056
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
A collection of combined counters.
Definition: counter.h:188
#define vnet_buffer(b)
Definition: buffer.h:361
ip4_main_t ip4_main
Global ip4 main structure.
Definition: ip4_forward.c:921
u16 flags
Copy of main node flags.
Definition: node.h:507
u16 dpoi_next_node
The next VLIB node to follow.
Definition: dpo.h:182
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:244
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:301