FD.io VPP  v21.06
Vector Packet Processing
nat44_ed_handoff.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /**
16  * @file
17  * @brief NAT44 worker handoff
18  */
19 
20 #include <vlib/vlib.h>
21 #include <vnet/vnet.h>
22 #include <vnet/handoff.h>
23 #include <vnet/fib/ip4_fib.h>
24 #include <vppinfra/error.h>
25 
26 #include <nat/nat44-ed/nat44_ed.h>
28 
29 typedef struct
30 {
36 
37 #define foreach_nat44_handoff_error \
38  _ (CONGESTION_DROP, "congestion drop") \
39  _ (SAME_WORKER, "same worker") \
40  _ (DO_HANDOFF, "do handoff")
41 
42 typedef enum
43 {
44 #define _(sym, str) NAT44_HANDOFF_ERROR_##sym,
46 #undef _
49 
50 static char *nat44_handoff_error_strings[] = {
51 #define _(sym,string) string,
53 #undef _
54 };
55 
56 static u8 *
57 format_nat44_handoff_trace (u8 * s, va_list * args)
58 {
59  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
60  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
61  nat44_handoff_trace_t *t = va_arg (*args, nat44_handoff_trace_t *);
62  char *tag, *output;
63 
64  tag = t->in2out ? "IN2OUT" : "OUT2IN";
65  output = t->output ? "OUTPUT-FEATURE" : "";
66  s =
67  format (s, "NAT44_%s_WORKER_HANDOFF %s: next-worker %d trace index %d",
68  tag, output, t->next_worker_index, t->trace_index);
69 
70  return s;
71 }
72 
73 static inline uword
76  vlib_frame_t * frame, u8 is_output,
77  u8 is_in2out)
78 {
79  u32 n_enq, n_left_from, *from, do_handoff = 0, same_worker = 0;
80 
81  u16 thread_indices[VLIB_FRAME_SIZE], *ti = thread_indices;
83  snat_main_t *sm = &snat_main;
84 
85  u32 fq_index, thread_index = vm->thread_index;
86 
87  from = vlib_frame_vector_args (frame);
88  n_left_from = frame->n_vectors;
89 
90  vlib_get_buffers (vm, from, b, n_left_from);
91 
92  if (is_in2out)
93  {
94  fq_index = is_output ? sm->fq_in2out_output_index : sm->fq_in2out_index;
95  }
96  else
97  {
98  fq_index = sm->fq_out2in_index;
99  }
100 
101  while (n_left_from >= 4)
102  {
103  u32 arc_next0, arc_next1, arc_next2, arc_next3;
104  u32 sw_if_index0, sw_if_index1, sw_if_index2, sw_if_index3;
105  u32 rx_fib_index0, rx_fib_index1, rx_fib_index2, rx_fib_index3;
106  u32 iph_offset0 = 0, iph_offset1 = 0, iph_offset2 = 0, iph_offset3 = 0;
107  ip4_header_t *ip0, *ip1, *ip2, *ip3;
108 
109  if (PREDICT_TRUE (n_left_from >= 8))
110  {
111  vlib_prefetch_buffer_header (b[4], LOAD);
112  vlib_prefetch_buffer_header (b[5], LOAD);
113  vlib_prefetch_buffer_header (b[6], LOAD);
114  vlib_prefetch_buffer_header (b[7], LOAD);
115  CLIB_PREFETCH (&b[4]->data, CLIB_CACHE_LINE_BYTES, LOAD);
116  CLIB_PREFETCH (&b[5]->data, CLIB_CACHE_LINE_BYTES, LOAD);
117  CLIB_PREFETCH (&b[6]->data, CLIB_CACHE_LINE_BYTES, LOAD);
118  CLIB_PREFETCH (&b[7]->data, CLIB_CACHE_LINE_BYTES, LOAD);
119  }
120 
121  if (is_output)
122  {
123  iph_offset0 = vnet_buffer (b[0])->ip.save_rewrite_length;
124  iph_offset1 = vnet_buffer (b[1])->ip.save_rewrite_length;
125  iph_offset2 = vnet_buffer (b[2])->ip.save_rewrite_length;
126  iph_offset3 = vnet_buffer (b[3])->ip.save_rewrite_length;
127  }
128 
129  ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[0]) +
130  iph_offset0);
131  ip1 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[1]) +
132  iph_offset1);
133  ip2 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[2]) +
134  iph_offset2);
135  ip3 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[3]) +
136  iph_offset3);
137 
138  vnet_feature_next (&arc_next0, b[0]);
139  vnet_feature_next (&arc_next1, b[1]);
140  vnet_feature_next (&arc_next2, b[2]);
141  vnet_feature_next (&arc_next3, b[3]);
142 
143  vnet_buffer2 (b[0])->nat.arc_next = arc_next0;
144  vnet_buffer2 (b[1])->nat.arc_next = arc_next1;
145  vnet_buffer2 (b[2])->nat.arc_next = arc_next2;
146  vnet_buffer2 (b[3])->nat.arc_next = arc_next3;
147 
148  sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
149  sw_if_index1 = vnet_buffer (b[1])->sw_if_index[VLIB_RX];
150  sw_if_index2 = vnet_buffer (b[2])->sw_if_index[VLIB_RX];
151  sw_if_index3 = vnet_buffer (b[3])->sw_if_index[VLIB_RX];
152 
153  rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index0);
154  rx_fib_index1 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index1);
155  rx_fib_index2 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index2);
156  rx_fib_index3 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index3);
157 
158  if (is_in2out)
159  {
160  ti[0] = nat44_ed_get_in2out_worker_index (b[0], ip0, rx_fib_index0,
161  is_output);
162  ti[1] = nat44_ed_get_in2out_worker_index (b[1], ip1, rx_fib_index1,
163  is_output);
164  ti[2] = nat44_ed_get_in2out_worker_index (b[2], ip2, rx_fib_index2,
165  is_output);
166  ti[3] = nat44_ed_get_in2out_worker_index (b[3], ip3, rx_fib_index3,
167  is_output);
168  }
169  else
170  {
171  ti[0] = nat44_ed_get_out2in_worker_index (b[0], ip0, rx_fib_index0,
172  is_output);
173  ti[1] = nat44_ed_get_out2in_worker_index (b[1], ip1, rx_fib_index1,
174  is_output);
175  ti[2] = nat44_ed_get_out2in_worker_index (b[2], ip2, rx_fib_index2,
176  is_output);
177  ti[3] = nat44_ed_get_out2in_worker_index (b[3], ip3, rx_fib_index3,
178  is_output);
179  }
180 
181  if (ti[0] == thread_index)
182  same_worker++;
183  else
184  do_handoff++;
185 
186  if (ti[1] == thread_index)
187  same_worker++;
188  else
189  do_handoff++;
190 
191  if (ti[2] == thread_index)
192  same_worker++;
193  else
194  do_handoff++;
195 
196  if (ti[3] == thread_index)
197  same_worker++;
198  else
199  do_handoff++;
200 
201  b += 4;
202  ti += 4;
203  n_left_from -= 4;
204  }
205 
206  while (n_left_from > 0)
207  {
208  u32 arc_next0;
209  u32 sw_if_index0;
210  u32 rx_fib_index0;
211  u32 iph_offset0 = 0;
212  ip4_header_t *ip0;
213 
214 
215  if (is_output)
216  iph_offset0 = vnet_buffer (b[0])->ip.save_rewrite_length;
217 
218  ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[0]) +
219  iph_offset0);
220 
221  vnet_feature_next (&arc_next0, b[0]);
222  vnet_buffer2 (b[0])->nat.arc_next = arc_next0;
223 
224  sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
225  rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index0);
226 
227  if (is_in2out)
228  {
229  ti[0] = nat44_ed_get_in2out_worker_index (b[0], ip0, rx_fib_index0,
230  is_output);
231  }
232  else
233  {
234  ti[0] = nat44_ed_get_out2in_worker_index (b[0], ip0, rx_fib_index0,
235  is_output);
236  }
237 
238  if (ti[0] == thread_index)
239  same_worker++;
240  else
241  do_handoff++;
242 
243  b += 1;
244  ti += 1;
245  n_left_from -= 1;
246  }
247 
248  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
249  {
250  u32 i;
251  b = bufs;
252  ti = thread_indices;
253 
254  for (i = 0; i < frame->n_vectors; i++)
255  {
256  if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
257  {
259  vlib_add_trace (vm, node, b[0], sizeof (*t));
260  t->next_worker_index = ti[0];
262  t->in2out = is_in2out;
263  t->output = is_output;
264 
265  b += 1;
266  ti += 1;
267  }
268  else
269  break;
270  }
271  }
272 
273  n_enq = vlib_buffer_enqueue_to_thread (vm, node, fq_index, from,
274  thread_indices, frame->n_vectors, 1);
275 
276  if (n_enq < frame->n_vectors)
277  {
279  NAT44_HANDOFF_ERROR_CONGESTION_DROP,
280  frame->n_vectors - n_enq);
281  }
282 
284  NAT44_HANDOFF_ERROR_SAME_WORKER, same_worker);
286  NAT44_HANDOFF_ERROR_DO_HANDOFF, do_handoff);
287  return frame->n_vectors;
288 }
289 
293 {
294  return nat44_worker_handoff_fn_inline (vm, node, frame, 0, 1);
295 }
296 
297 /* *INDENT-OFF* */
299  .name = "nat44-in2out-worker-handoff",
300  .vector_size = sizeof (u32),
301  .sibling_of = "nat-default",
302  .format_trace = format_nat44_handoff_trace,
305  .error_strings = nat44_handoff_error_strings,
306 };
307 /* *INDENT-ON* */
308 
311  node,
313 {
314  return nat44_worker_handoff_fn_inline (vm, node, frame, 1, 1);
315 }
316 
317 /* *INDENT-OFF* */
319  .name = "nat44-in2out-output-worker-handoff",
320  .vector_size = sizeof (u32),
321  .sibling_of = "nat-default",
322  .format_trace = format_nat44_handoff_trace,
325  .error_strings = nat44_handoff_error_strings,
326 };
327 /* *INDENT-ON* */
328 
332 {
333  return nat44_worker_handoff_fn_inline (vm, node, frame, 0, 0);
334 }
335 
336 /* *INDENT-OFF* */
338  .name = "nat44-out2in-worker-handoff",
339  .vector_size = sizeof (u32),
340  .sibling_of = "nat-default",
341  .format_trace = format_nat44_handoff_trace,
344  .error_strings = nat44_handoff_error_strings,
345 };
346 /* *INDENT-ON* */
347 
348 /*
349  * fd.io coding-style-patch-verification: ON
350  *
351  * Local Variables:
352  * eval: (c-set-style "gnu")
353  * End:
354  */
u32 nat44_ed_get_in2out_worker_index(vlib_buffer_t *b, ip4_header_t *ip, u32 rx_fib_index, u8 is_output)
Definition: nat44_ed.c:2503
#define CLIB_UNUSED(x)
Definition: clib.h:90
vl_api_wireguard_peer_flags_t flags
Definition: wireguard.api:105
static u32 vlib_buffer_get_trace_index(vlib_buffer_t *b)
Extract the trace (pool) index from a trace handle.
Definition: buffer.h:416
#define vnet_buffer2(b)
Definition: buffer.h:499
u32 fq_in2out_output_index
Definition: nat44_ed.h:576
u32 thread_index
#define PREDICT_TRUE(x)
Definition: clib.h:125
u32 thread_index
Definition: main.h:213
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: nat44_ei.c:3048
#define VLIB_NODE_FN(node)
Definition: node.h:202
unsigned char u8
Definition: types.h:56
vlib_buffer_t ** b
u8 data[128]
Definition: ipsec_types.api:92
vlib_node_registration_t snat_in2out_output_worker_handoff_node
(constructor) VLIB_REGISTER_NODE (snat_in2out_output_worker_handoff_node)
unsigned int u32
Definition: types.h:88
vlib_get_buffers(vm, from, b, n_left_from)
return frame n_vectors
description fragment has unexpected format
Definition: map.api:433
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:231
u32 ip4_fib_table_get_index_for_sw_if_index(u32 sw_if_index)
vlib_node_registration_t snat_in2out_worker_handoff_node
(constructor) VLIB_REGISTER_NODE (snat_in2out_worker_handoff_node)
#define VLIB_FRAME_SIZE
Definition: node.h:369
nat44_handoff_error_t
#define foreach_nat44_handoff_error
static uword nat44_worker_handoff_fn_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u8 is_output, u8 is_in2out)
vl_api_fib_path_type_t type
Definition: fib_types.api:123
snat_main_t snat_main
Definition: nat44_ed.c:41
unsigned short u16
Definition: types.h:57
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:257
#define PREDICT_FALSE(x)
Definition: clib.h:124
vlib_node_registration_t snat_out2in_worker_handoff_node
(constructor) VLIB_REGISTER_NODE (snat_out2in_worker_handoff_node)
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
Definition: nat44_ei.c:3047
u32 node_index
Node index.
Definition: node.h:479
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1244
u32 fq_out2in_index
Definition: nat44_ed.h:577
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u16 n_vectors
Definition: node.h:388
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
u32 ti
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:261
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:322
#define ARRAY_LEN(x)
Definition: clib.h:70
u32 fq_in2out_index
Definition: nat44_ed.h:575
u32 nat44_ed_get_out2in_worker_index(vlib_buffer_t *b, ip4_header_t *ip, u32 rx_fib_index, u8 is_output)
Definition: nat44_ed.c:2606
static u8 * format_nat44_handoff_trace(u8 *s, va_list *args)
vlib_main_t vlib_node_runtime_t * node
Definition: nat44_ei.c:3047
VLIB buffer representation.
Definition: buffer.h:111
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:301
#define vnet_buffer(b)
Definition: buffer.h:437
u16 flags
Copy of main node flags.
Definition: node.h:492
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:628
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:292
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
vlib_buffer_t * bufs[VLIB_FRAME_SIZE]
static_always_inline u32 vlib_buffer_enqueue_to_thread(vlib_main_t *vm, vlib_node_runtime_t *node, u32 frame_queue_index, u32 *buffer_indices, u16 *thread_indices, u32 n_packets, int drop_on_congestion)
Definition: buffer_node.h:358
Definition: defs.h:46
static char * nat44_handoff_error_strings[]