FD.io VPP  v20.09-64-g4f7b92f0a
Vector Packet Processing
nat44_handoff.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /**
16  * @file
17  * @brief NAT44 worker handoff
18  */
19 
20 #include <vlib/vlib.h>
21 #include <vnet/vnet.h>
22 #include <vnet/handoff.h>
23 #include <vnet/fib/ip4_fib.h>
24 #include <vppinfra/error.h>
25 #include <nat/nat.h>
26 #include <nat/nat_inlines.h>
27 
28 typedef struct
29 {
35 
36 #define foreach_nat44_handoff_error \
37 _(CONGESTION_DROP, "congestion drop") \
38 _(SAME_WORKER, "same worker") \
39 _(DO_HANDOFF, "do handoff")
40 
41 typedef enum
42 {
43 #define _(sym,str) NAT44_HANDOFF_ERROR_##sym,
45 #undef _
48 
49 static char *nat44_handoff_error_strings[] = {
50 #define _(sym,string) string,
52 #undef _
53 };
54 
55 
56 static u8 *
57 format_nat44_handoff_trace (u8 * s, va_list * args)
58 {
59  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
60  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
61  nat44_handoff_trace_t *t = va_arg (*args, nat44_handoff_trace_t *);
62  char *tag, *output;
63 
64  tag = t->in2out ? "IN2OUT" : "OUT2IN";
65  output = t->output ? "OUTPUT-FEATURE" : "";
66  s =
67  format (s, "NAT44_%s_WORKER_HANDOFF %s: next-worker %d trace index %d",
68  tag, output, t->next_worker_index, t->trace_index);
69 
70  return s;
71 }
72 
73 static inline uword
76  vlib_frame_t * frame, u8 is_output,
77  u8 is_in2out)
78 {
79  u32 n_enq, n_left_from, *from, do_handoff = 0, same_worker = 0;
80 
81  u16 thread_indices[VLIB_FRAME_SIZE], *ti = thread_indices;
82  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
83  snat_main_t *sm = &snat_main;
84 
85  u32 fq_index, thread_index = vm->thread_index;
86 
87  from = vlib_frame_vector_args (frame);
88  n_left_from = frame->n_vectors;
89 
90  vlib_get_buffers (vm, from, b, n_left_from);
91 
92  if (is_in2out)
93  {
94  fq_index = is_output ? sm->fq_in2out_output_index : sm->fq_in2out_index;
95  }
96  else
97  {
98  fq_index = sm->fq_out2in_index;
99  }
100 
101  while (n_left_from >= 4)
102  {
103  u32 arc_next0, arc_next1, arc_next2, arc_next3;
104  u32 sw_if_index0, sw_if_index1, sw_if_index2, sw_if_index3;
105  u32 rx_fib_index0, rx_fib_index1, rx_fib_index2, rx_fib_index3;
106  u32 iph_offset0 = 0, iph_offset1 = 0, iph_offset2 = 0, iph_offset3 = 0;
107  ip4_header_t *ip0, *ip1, *ip2, *ip3;
108 
109  if (PREDICT_TRUE (n_left_from >= 8))
110  {
111  vlib_prefetch_buffer_header (b[4], LOAD);
112  vlib_prefetch_buffer_header (b[5], LOAD);
113  vlib_prefetch_buffer_header (b[6], LOAD);
114  vlib_prefetch_buffer_header (b[7], LOAD);
115  CLIB_PREFETCH (&b[4]->data, CLIB_CACHE_LINE_BYTES, LOAD);
116  CLIB_PREFETCH (&b[5]->data, CLIB_CACHE_LINE_BYTES, LOAD);
117  CLIB_PREFETCH (&b[6]->data, CLIB_CACHE_LINE_BYTES, LOAD);
118  CLIB_PREFETCH (&b[7]->data, CLIB_CACHE_LINE_BYTES, LOAD);
119  }
120 
121  if (is_output)
122  {
123  iph_offset0 = vnet_buffer (b[0])->ip.save_rewrite_length;
124  iph_offset1 = vnet_buffer (b[1])->ip.save_rewrite_length;
125  iph_offset2 = vnet_buffer (b[2])->ip.save_rewrite_length;
126  iph_offset3 = vnet_buffer (b[3])->ip.save_rewrite_length;
127  }
128 
129  ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[0]) +
130  iph_offset0);
131  ip1 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[1]) +
132  iph_offset1);
133  ip2 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[2]) +
134  iph_offset2);
135  ip3 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[3]) +
136  iph_offset3);
137 
138  vnet_feature_next (&arc_next0, b[0]);
139  vnet_feature_next (&arc_next1, b[1]);
140  vnet_feature_next (&arc_next2, b[2]);
141  vnet_feature_next (&arc_next3, b[3]);
142 
143  vnet_buffer2 (b[0])->nat.arc_next = arc_next0;
144  vnet_buffer2 (b[1])->nat.arc_next = arc_next1;
145  vnet_buffer2 (b[2])->nat.arc_next = arc_next2;
146  vnet_buffer2 (b[3])->nat.arc_next = arc_next3;
147 
148  sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
149  sw_if_index1 = vnet_buffer (b[1])->sw_if_index[VLIB_RX];
150  sw_if_index2 = vnet_buffer (b[2])->sw_if_index[VLIB_RX];
151  sw_if_index3 = vnet_buffer (b[3])->sw_if_index[VLIB_RX];
152 
153  rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index0);
154  rx_fib_index1 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index1);
155  rx_fib_index2 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index2);
156  rx_fib_index3 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index3);
157 
158  if (is_in2out)
159  {
160  ti[0] = sm->worker_in2out_cb (ip0, rx_fib_index0, is_output);
161  ti[1] = sm->worker_in2out_cb (ip1, rx_fib_index1, is_output);
162  ti[2] = sm->worker_in2out_cb (ip2, rx_fib_index2, is_output);
163  ti[3] = sm->worker_in2out_cb (ip3, rx_fib_index3, is_output);
164  }
165  else
166  {
167  ti[0] = sm->worker_out2in_cb (b[0], ip0, rx_fib_index0, is_output);
168  ti[1] = sm->worker_out2in_cb (b[1], ip1, rx_fib_index1, is_output);
169  ti[2] = sm->worker_out2in_cb (b[2], ip2, rx_fib_index2, is_output);
170  ti[3] = sm->worker_out2in_cb (b[3], ip3, rx_fib_index3, is_output);
171  }
172 
173  if (ti[0] == thread_index)
174  same_worker++;
175  else
176  do_handoff++;
177 
178  if (ti[1] == thread_index)
179  same_worker++;
180  else
181  do_handoff++;
182 
183  if (ti[2] == thread_index)
184  same_worker++;
185  else
186  do_handoff++;
187 
188  if (ti[3] == thread_index)
189  same_worker++;
190  else
191  do_handoff++;
192 
193  b += 4;
194  ti += 4;
195  n_left_from -= 4;
196  }
197 
198  while (n_left_from > 0)
199  {
200  u32 arc_next0;
201  u32 sw_if_index0;
202  u32 rx_fib_index0;
203  u32 iph_offset0 = 0;
204  ip4_header_t *ip0;
205 
206 
207  if (is_output)
208  iph_offset0 = vnet_buffer (b[0])->ip.save_rewrite_length;
209 
210  ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[0]) +
211  iph_offset0);
212 
213  vnet_feature_next (&arc_next0, b[0]);
214  vnet_buffer2 (b[0])->nat.arc_next = arc_next0;
215 
216  sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
217  rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index0);
218 
219  if (is_in2out)
220  {
221  ti[0] = sm->worker_in2out_cb (ip0, rx_fib_index0, is_output);
222  }
223  else
224  {
225  ti[0] = sm->worker_out2in_cb (b[0], ip0, rx_fib_index0, is_output);
226  }
227 
228  if (ti[0] == thread_index)
229  same_worker++;
230  else
231  do_handoff++;
232 
233  b += 1;
234  ti += 1;
235  n_left_from -= 1;
236  }
237 
238  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
239  {
240  u32 i;
241  b = bufs;
242  ti = thread_indices;
243 
244  for (i = 0; i < frame->n_vectors; i++)
245  {
246  if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
247  {
249  vlib_add_trace (vm, node, b[0], sizeof (*t));
250  t->next_worker_index = ti[0];
252  t->in2out = is_in2out;
253  t->output = is_output;
254 
255  b += 1;
256  ti += 1;
257  }
258  else
259  break;
260  }
261  }
262 
263  n_enq = vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices,
264  frame->n_vectors, 1);
265 
266  if (n_enq < frame->n_vectors)
267  {
269  NAT44_HANDOFF_ERROR_CONGESTION_DROP,
270  frame->n_vectors - n_enq);
271  }
272 
274  NAT44_HANDOFF_ERROR_SAME_WORKER, same_worker);
276  NAT44_HANDOFF_ERROR_DO_HANDOFF, do_handoff);
277  return frame->n_vectors;
278 }
279 
283 {
284  return nat44_worker_handoff_fn_inline (vm, node, frame, 0, 1);
285 }
286 
287 /* *INDENT-OFF* */
289  .name = "nat44-in2out-worker-handoff",
290  .vector_size = sizeof (u32),
291  .sibling_of = "nat-default",
292  .format_trace = format_nat44_handoff_trace,
295  .error_strings = nat44_handoff_error_strings,
296 };
297 /* *INDENT-ON* */
298 
301  node,
303 {
304  return nat44_worker_handoff_fn_inline (vm, node, frame, 1, 1);
305 }
306 
307 /* *INDENT-OFF* */
309  .name = "nat44-in2out-output-worker-handoff",
310  .vector_size = sizeof (u32),
311  .sibling_of = "nat-default",
312  .format_trace = format_nat44_handoff_trace,
315  .error_strings = nat44_handoff_error_strings,
316 };
317 /* *INDENT-ON* */
318 
322 {
323  return nat44_worker_handoff_fn_inline (vm, node, frame, 0, 0);
324 }
325 
326 /* *INDENT-OFF* */
328  .name = "nat44-out2in-worker-handoff",
329  .vector_size = sizeof (u32),
330  .sibling_of = "nat-default",
331  .format_trace = format_nat44_handoff_trace,
334  .error_strings = nat44_handoff_error_strings,
335 };
336 /* *INDENT-ON* */
337 
338 /*
339  * fd.io coding-style-patch-verification: ON
340  *
341  * Local Variables:
342  * eval: (c-set-style "gnu")
343  * End:
344  */
#define CLIB_UNUSED(x)
Definition: clib.h:87
static char * nat44_handoff_error_strings[]
Definition: nat44_handoff.c:49
#define foreach_nat44_handoff_error
Definition: nat44_handoff.c:36
vl_api_wireguard_peer_flags_t flags
Definition: wireguard.api:103
static u32 vlib_buffer_get_trace_index(vlib_buffer_t *b)
Extract the trace (pool) index from a trace handle.
Definition: buffer.h:388
#define vnet_buffer2(b)
Definition: buffer.h:482
u32 fq_in2out_output_index
Definition: nat.h:557
#define PREDICT_TRUE(x)
Definition: clib.h:121
u32 thread_index
Definition: main.h:249
vlib_node_registration_t snat_in2out_worker_handoff_node
(constructor) VLIB_REGISTER_NODE (snat_in2out_worker_handoff_node)
vlib_main_t * vm
Definition: in2out_ed.c:1582
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
#define VLIB_NODE_FN(node)
Definition: node.h:202
unsigned char u8
Definition: types.h:56
u8 data[128]
Definition: ipsec_types.api:89
u32 ip4_fib_table_get_index_for_sw_if_index(u32 sw_if_index)
Definition: ip4_fib.c:230
snat_get_worker_out2in_function_t * worker_out2in_cb
Definition: nat.h:500
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:203
unsigned int u32
Definition: types.h:88
#define VLIB_FRAME_SIZE
Definition: node.h:377
vl_api_fib_path_type_t type
Definition: fib_types.api:123
unsigned short u16
Definition: types.h:57
nat44_handoff_error_t
Definition: nat44_handoff.c:41
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
#define PREDICT_FALSE(x)
Definition: clib.h:120
u32 node_index
Node index.
Definition: node.h:487
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1231
u32 fq_out2in_index
Definition: nat.h:558
snat_main_t snat_main
Definition: nat.c:39
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u16 n_vectors
Definition: node.h:396
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:322
#define ARRAY_LEN(x)
Definition: clib.h:67
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1582
u32 fq_in2out_index
Definition: nat.h:556
static uword nat44_worker_handoff_fn_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u8 is_output, u8 is_in2out)
Definition: nat44_handoff.c:74
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1583
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:297
vlib_node_registration_t snat_out2in_worker_handoff_node
(constructor) VLIB_REGISTER_NODE (snat_out2in_worker_handoff_node)
static_always_inline u32 vlib_buffer_enqueue_to_thread(vlib_main_t *vm, u32 frame_queue_index, u32 *buffer_indices, u16 *thread_indices, u32 n_packets, int drop_on_congestion)
Definition: buffer_node.h:494
snat_get_worker_in2out_function_t * worker_in2out_cb
Definition: nat.h:499
#define vnet_buffer(b)
Definition: buffer.h:417
vlib_node_registration_t snat_in2out_output_worker_handoff_node
(constructor) VLIB_REGISTER_NODE (snat_in2out_output_worker_handoff_node)
u16 flags
Copy of main node flags.
Definition: node.h:500
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:577
static u8 * format_nat44_handoff_trace(u8 *s, va_list *args)
Definition: nat44_handoff.c:57
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:280
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:301
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
Definition: defs.h:46