FD.io VPP  v21.06
Vector Packet Processing
nat44_ei_handoff.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2020 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <vlib/vlib.h>
17 #include <vnet/vnet.h>
18 #include <vnet/handoff.h>
19 #include <vnet/fib/ip4_fib.h>
20 #include <vppinfra/error.h>
21 
22 #include <nat/nat44-ei/nat44_ei.h>
23 
24 typedef struct
25 {
31 
32 #define foreach_nat44_ei_handoff_error \
33  _ (CONGESTION_DROP, "congestion drop") \
34  _ (SAME_WORKER, "same worker") \
35  _ (DO_HANDOFF, "do handoff")
36 
37 typedef enum
38 {
39 #define _(sym, str) NAT44_EI_HANDOFF_ERROR_##sym,
41 #undef _
44 
46 #define _(sym, string) string,
48 #undef _
49 };
50 
51 static u8 *
52 format_nat44_ei_handoff_trace (u8 *s, va_list *args)
53 {
54  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
55  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
57  char *tag, *output;
58 
59  tag = t->in2out ? "IN2OUT" : "OUT2IN";
60  output = t->output ? "OUTPUT-FEATURE" : "";
61  s =
62  format (s, "NAT44_EI_%s_WORKER_HANDOFF %s: next-worker %d trace index %d",
63  tag, output, t->next_worker_index, t->trace_index);
64 
65  return s;
66 }
67 
68 static inline uword
70  vlib_frame_t *frame, u8 is_output,
71  u8 is_in2out)
72 {
73  u32 n_enq, n_left_from, *from, do_handoff = 0, same_worker = 0;
74 
75  u16 thread_indices[VLIB_FRAME_SIZE], *ti = thread_indices;
78 
79  u32 fq_index, thread_index = vm->thread_index;
80 
81  from = vlib_frame_vector_args (frame);
82  n_left_from = frame->n_vectors;
83 
84  vlib_get_buffers (vm, from, b, n_left_from);
85 
86  // TODO: move to nm
87  // TODO: remove callbacks and use inlines that should be moved here
88  if (is_in2out)
89  {
90  fq_index = is_output ? nm->fq_in2out_output_index : nm->fq_in2out_index;
91  }
92  else
93  {
94  fq_index = nm->fq_out2in_index;
95  }
96 
97  while (n_left_from >= 4)
98  {
99  u32 arc_next0, arc_next1, arc_next2, arc_next3;
100  u32 sw_if_index0, sw_if_index1, sw_if_index2, sw_if_index3;
101  u32 rx_fib_index0, rx_fib_index1, rx_fib_index2, rx_fib_index3;
102  u32 iph_offset0 = 0, iph_offset1 = 0, iph_offset2 = 0, iph_offset3 = 0;
103  ip4_header_t *ip0, *ip1, *ip2, *ip3;
104 
105  if (PREDICT_TRUE (n_left_from >= 8))
106  {
107  vlib_prefetch_buffer_header (b[4], LOAD);
108  vlib_prefetch_buffer_header (b[5], LOAD);
109  vlib_prefetch_buffer_header (b[6], LOAD);
110  vlib_prefetch_buffer_header (b[7], LOAD);
111  CLIB_PREFETCH (&b[4]->data, CLIB_CACHE_LINE_BYTES, LOAD);
112  CLIB_PREFETCH (&b[5]->data, CLIB_CACHE_LINE_BYTES, LOAD);
113  CLIB_PREFETCH (&b[6]->data, CLIB_CACHE_LINE_BYTES, LOAD);
114  CLIB_PREFETCH (&b[7]->data, CLIB_CACHE_LINE_BYTES, LOAD);
115  }
116 
117  if (is_output)
118  {
119  iph_offset0 = vnet_buffer (b[0])->ip.save_rewrite_length;
120  iph_offset1 = vnet_buffer (b[1])->ip.save_rewrite_length;
121  iph_offset2 = vnet_buffer (b[2])->ip.save_rewrite_length;
122  iph_offset3 = vnet_buffer (b[3])->ip.save_rewrite_length;
123  }
124 
125  ip0 =
126  (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[0]) + iph_offset0);
127  ip1 =
128  (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[1]) + iph_offset1);
129  ip2 =
130  (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[2]) + iph_offset2);
131  ip3 =
132  (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[3]) + iph_offset3);
133 
134  vnet_feature_next (&arc_next0, b[0]);
135  vnet_feature_next (&arc_next1, b[1]);
136  vnet_feature_next (&arc_next2, b[2]);
137  vnet_feature_next (&arc_next3, b[3]);
138 
139  vnet_buffer2 (b[0])->nat.arc_next = arc_next0;
140  vnet_buffer2 (b[1])->nat.arc_next = arc_next1;
141  vnet_buffer2 (b[2])->nat.arc_next = arc_next2;
142  vnet_buffer2 (b[3])->nat.arc_next = arc_next3;
143 
144  sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
145  sw_if_index1 = vnet_buffer (b[1])->sw_if_index[VLIB_RX];
146  sw_if_index2 = vnet_buffer (b[2])->sw_if_index[VLIB_RX];
147  sw_if_index3 = vnet_buffer (b[3])->sw_if_index[VLIB_RX];
148 
149  rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index0);
150  rx_fib_index1 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index1);
151  rx_fib_index2 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index2);
152  rx_fib_index3 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index3);
153 
154  if (is_in2out)
155  {
156  ti[0] =
157  nat44_ei_get_in2out_worker_index (ip0, rx_fib_index0, is_output);
158  ti[1] =
159  nat44_ei_get_in2out_worker_index (ip1, rx_fib_index1, is_output);
160  ti[2] =
161  nat44_ei_get_in2out_worker_index (ip2, rx_fib_index2, is_output);
162  ti[3] =
163  nat44_ei_get_in2out_worker_index (ip3, rx_fib_index3, is_output);
164  }
165  else
166  {
167  ti[0] = nat44_ei_get_out2in_worker_index (b[0], ip0, rx_fib_index0,
168  is_output);
169  ti[1] = nat44_ei_get_out2in_worker_index (b[1], ip1, rx_fib_index1,
170  is_output);
171  ti[2] = nat44_ei_get_out2in_worker_index (b[2], ip2, rx_fib_index2,
172  is_output);
173  ti[3] = nat44_ei_get_out2in_worker_index (b[3], ip3, rx_fib_index3,
174  is_output);
175  }
176 
177  if (ti[0] == thread_index)
178  same_worker++;
179  else
180  do_handoff++;
181 
182  if (ti[1] == thread_index)
183  same_worker++;
184  else
185  do_handoff++;
186 
187  if (ti[2] == thread_index)
188  same_worker++;
189  else
190  do_handoff++;
191 
192  if (ti[3] == thread_index)
193  same_worker++;
194  else
195  do_handoff++;
196 
197  b += 4;
198  ti += 4;
199  n_left_from -= 4;
200  }
201 
202  while (n_left_from > 0)
203  {
204  u32 arc_next0;
205  u32 sw_if_index0;
206  u32 rx_fib_index0;
207  u32 iph_offset0 = 0;
208  ip4_header_t *ip0;
209 
210  if (is_output)
211  iph_offset0 = vnet_buffer (b[0])->ip.save_rewrite_length;
212 
213  ip0 =
214  (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[0]) + iph_offset0);
215 
216  vnet_feature_next (&arc_next0, b[0]);
217  vnet_buffer2 (b[0])->nat.arc_next = arc_next0;
218 
219  sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
220  rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index0);
221 
222  if (is_in2out)
223  {
224  ti[0] =
225  nat44_ei_get_in2out_worker_index (ip0, rx_fib_index0, is_output);
226  }
227  else
228  {
229  ti[0] = nat44_ei_get_out2in_worker_index (b[0], ip0, rx_fib_index0,
230  is_output);
231  }
232 
233  if (ti[0] == thread_index)
234  same_worker++;
235  else
236  do_handoff++;
237 
238  b += 1;
239  ti += 1;
240  n_left_from -= 1;
241  }
242 
243  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
244  {
245  u32 i;
246  b = bufs;
247  ti = thread_indices;
248 
249  for (i = 0; i < frame->n_vectors; i++)
250  {
251  if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
252  {
254  vlib_add_trace (vm, node, b[0], sizeof (*t));
255  t->next_worker_index = ti[0];
257  t->in2out = is_in2out;
258  t->output = is_output;
259 
260  b += 1;
261  ti += 1;
262  }
263  else
264  break;
265  }
266  }
267 
268  n_enq = vlib_buffer_enqueue_to_thread (vm, node, fq_index, from,
269  thread_indices, frame->n_vectors, 1);
270 
271  if (n_enq < frame->n_vectors)
272  {
274  NAT44_EI_HANDOFF_ERROR_CONGESTION_DROP,
275  frame->n_vectors - n_enq);
276  }
277 
279  vm, node->node_index, NAT44_EI_HANDOFF_ERROR_SAME_WORKER, same_worker);
281  NAT44_EI_HANDOFF_ERROR_DO_HANDOFF, do_handoff);
282  return frame->n_vectors;
283 }
284 
287 {
288  return nat44_ei_worker_handoff_fn_inline (vm, node, frame, 0, 1);
289 }
290 
293 {
294  return nat44_ei_worker_handoff_fn_inline (vm, node, frame, 1, 1);
295 }
296 
299 {
300  return nat44_ei_worker_handoff_fn_inline (vm, node, frame, 0, 0);
301 }
302 
304  .name = "nat44-ei-in2out-output-worker-handoff",
305  .vector_size = sizeof (u32),
306  .format_trace = format_nat44_ei_handoff_trace,
309  .error_strings = nat44_ei_handoff_error_strings,
310 };
311 
313  .name = "nat44-ei-in2out-worker-handoff",
314  .vector_size = sizeof (u32),
315  .format_trace = format_nat44_ei_handoff_trace,
318  .error_strings = nat44_ei_handoff_error_strings,
319 };
320 
322  .name = "nat44-ei-out2in-worker-handoff",
323  .vector_size = sizeof (u32),
324  .format_trace = format_nat44_ei_handoff_trace,
327  .error_strings = nat44_ei_handoff_error_strings,
328 };
329 
330 /*
331  * fd.io coding-style-patch-verification: ON
332  *
333  * Local Variables:
334  * eval: (c-set-style "gnu")
335  * End:
336  */
#define CLIB_UNUSED(x)
Definition: clib.h:90
vlib_node_registration_t nat44_ei_in2out_output_worker_handoff_node
(constructor) VLIB_REGISTER_NODE (nat44_ei_in2out_output_worker_handoff_node)
u32 fq_out2in_index
Definition: nat44_ei.h:400
vl_api_wireguard_peer_flags_t flags
Definition: wireguard.api:105
static u32 vlib_buffer_get_trace_index(vlib_buffer_t *b)
Extract the trace (pool) index from a trace handle.
Definition: buffer.h:416
static uword nat44_ei_worker_handoff_fn_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u8 is_output, u8 is_in2out)
u32 fq_in2out_output_index
Definition: nat44_ei.h:399
#define vnet_buffer2(b)
Definition: buffer.h:499
u32 thread_index
#define PREDICT_TRUE(x)
Definition: clib.h:125
vlib_node_registration_t nat44_ei_out2in_worker_handoff_node
(constructor) VLIB_REGISTER_NODE (nat44_ei_out2in_worker_handoff_node)
static u8 * format_nat44_ei_handoff_trace(u8 *s, va_list *args)
u32 thread_index
Definition: main.h:213
NAT44 endpoint independent plugin declarations.
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
#define VLIB_NODE_FN(node)
Definition: node.h:202
vlib_node_registration_t nat44_ei_in2out_worker_handoff_node
(constructor) VLIB_REGISTER_NODE (nat44_ei_in2out_worker_handoff_node)
unsigned char u8
Definition: types.h:56
vlib_buffer_t ** b
u8 data[128]
Definition: ipsec_types.api:92
unsigned int u32
Definition: types.h:88
u32 nat44_ei_get_in2out_worker_index(ip4_header_t *ip0, u32 rx_fib_index0, u8 is_output)
Definition: nat44_ei.c:1408
vlib_get_buffers(vm, from, b, n_left_from)
return frame n_vectors
description fragment has unexpected format
Definition: map.api:433
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:231
u32 ip4_fib_table_get_index_for_sw_if_index(u32 sw_if_index)
#define VLIB_FRAME_SIZE
Definition: node.h:369
vl_api_fib_path_type_t type
Definition: fib_types.api:123
vlib_main_t * vm
unsigned short u16
Definition: types.h:57
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:257
#define PREDICT_FALSE(x)
Definition: clib.h:124
u32 node_index
Node index.
Definition: node.h:479
u32 nat44_ei_get_out2in_worker_index(vlib_buffer_t *b, ip4_header_t *ip0, u32 rx_fib_index0, u8 is_output)
Definition: nat44_ei.c:1428
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1244
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u16 n_vectors
Definition: node.h:388
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
u32 ti
vlib_main_t vlib_node_runtime_t * node
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:261
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:322
nat44_ei_handoff_error_t
#define ARRAY_LEN(x)
Definition: clib.h:70
static char * nat44_ei_handoff_error_strings[]
u32 fq_in2out_index
Definition: nat44_ei.h:398
#define foreach_nat44_ei_handoff_error
VLIB buffer representation.
Definition: buffer.h:111
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:301
nat44_ei_main_t nat44_ei_main
Definition: nat44_ei.c:40
#define vnet_buffer(b)
Definition: buffer.h:437
nat44_ei_main_t * nm
u16 flags
Copy of main node flags.
Definition: node.h:492
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:628
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:292
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
vlib_buffer_t * bufs[VLIB_FRAME_SIZE]
static_always_inline u32 vlib_buffer_enqueue_to_thread(vlib_main_t *vm, vlib_node_runtime_t *node, u32 frame_queue_index, u32 *buffer_indices, u16 *thread_indices, u32 n_packets, int drop_on_congestion)
Definition: buffer_node.h:358
Definition: defs.h:46