FD.io VPP  v19.08-27-gf4dcae4
Vector Packet Processing
nat44_handoff.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /**
16  * @file
17  * @brief NAT44 worker handoff
18  */
19 
20 #include <vlib/vlib.h>
21 #include <vnet/vnet.h>
22 #include <vnet/handoff.h>
23 #include <vnet/fib/ip4_fib.h>
24 #include <vppinfra/error.h>
25 #include <nat/nat.h>
26 
27 typedef struct
28 {
34 
35 #define foreach_nat44_handoff_error \
36 _(CONGESTION_DROP, "congestion drop") \
37 _(SAME_WORKER, "same worker") \
38 _(DO_HANDOFF, "do handoff")
39 
40 typedef enum
41 {
42 #define _(sym,str) NAT44_HANDOFF_ERROR_##sym,
44 #undef _
47 
48 static char *nat44_handoff_error_strings[] = {
49 #define _(sym,string) string,
51 #undef _
52 };
53 
54 
55 static u8 *
56 format_nat44_handoff_trace (u8 * s, va_list * args)
57 {
58  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
59  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
60  nat44_handoff_trace_t *t = va_arg (*args, nat44_handoff_trace_t *);
61  char *tag, *output;
62 
63  tag = t->in2out ? "IN2OUT" : "OUT2IN";
64  output = t->output ? "OUTPUT-FEATURE" : "";
65  s =
66  format (s, "NAT44_%s_WORKER_HANDOFF %s: next-worker %d trace index %d",
67  tag, output, t->next_worker_index, t->trace_index);
68 
69  return s;
70 }
71 
72 static inline uword
74  vlib_node_runtime_t * node,
75  vlib_frame_t * frame, u8 is_output,
76  u8 is_in2out)
77 {
78  u32 n_enq, n_left_from, *from, do_handoff = 0, same_worker = 0;
79 
80  u16 thread_indices[VLIB_FRAME_SIZE], *ti = thread_indices;
81  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
82  snat_main_t *sm = &snat_main;
83 
84  snat_get_worker_function_t *get_worker;
85  u32 fq_index, thread_index = vm->thread_index;
86 
87  from = vlib_frame_vector_args (frame);
88  n_left_from = frame->n_vectors;
89 
90  vlib_get_buffers (vm, from, b, n_left_from);
91 
92  if (is_in2out)
93  {
94  fq_index = is_output ? sm->fq_in2out_output_index : sm->fq_in2out_index;
95  get_worker = sm->worker_in2out_cb;
96  }
97  else
98  {
99  fq_index = sm->fq_out2in_index;
100  get_worker = sm->worker_out2in_cb;
101  }
102 
103  while (n_left_from >= 4)
104  {
105  u32 sw_if_index0, sw_if_index1, sw_if_index2, sw_if_index3;
106  u32 rx_fib_index0, rx_fib_index1, rx_fib_index2, rx_fib_index3;
107  u32 iph_offset0 = 0, iph_offset1 = 0, iph_offset2 = 0, iph_offset3 = 0;
108  ip4_header_t *ip0, *ip1, *ip2, *ip3;
109 
110  if (PREDICT_TRUE (n_left_from >= 8))
111  {
112  vlib_prefetch_buffer_header (b[4], STORE);
113  vlib_prefetch_buffer_header (b[5], STORE);
114  vlib_prefetch_buffer_header (b[6], STORE);
115  vlib_prefetch_buffer_header (b[7], STORE);
116  CLIB_PREFETCH (&b[4]->data, CLIB_CACHE_LINE_BYTES, STORE);
117  CLIB_PREFETCH (&b[5]->data, CLIB_CACHE_LINE_BYTES, STORE);
118  CLIB_PREFETCH (&b[6]->data, CLIB_CACHE_LINE_BYTES, STORE);
119  CLIB_PREFETCH (&b[7]->data, CLIB_CACHE_LINE_BYTES, STORE);
120  }
121 
122  if (is_output)
123  {
124  iph_offset0 = vnet_buffer (b[0])->ip.save_rewrite_length;
125  iph_offset1 = vnet_buffer (b[1])->ip.save_rewrite_length;
126  iph_offset2 = vnet_buffer (b[2])->ip.save_rewrite_length;
127  iph_offset3 = vnet_buffer (b[3])->ip.save_rewrite_length;
128  }
129 
130  ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[0]) +
131  iph_offset0);
132  ip1 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[1]) +
133  iph_offset1);
134  ip2 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[2]) +
135  iph_offset2);
136  ip3 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[3]) +
137  iph_offset3);
138 
139  sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
140  sw_if_index1 = vnet_buffer (b[1])->sw_if_index[VLIB_RX];
141  sw_if_index2 = vnet_buffer (b[2])->sw_if_index[VLIB_RX];
142  sw_if_index3 = vnet_buffer (b[3])->sw_if_index[VLIB_RX];
143 
144  rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index0);
145  rx_fib_index1 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index1);
146  rx_fib_index2 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index2);
147  rx_fib_index3 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index3);
148 
149  ti[0] = get_worker (ip0, rx_fib_index0, is_output);
150  ti[1] = get_worker (ip1, rx_fib_index1, is_output);
151  ti[2] = get_worker (ip2, rx_fib_index2, is_output);
152  ti[3] = get_worker (ip3, rx_fib_index3, is_output);
153 
154  if (ti[0] == thread_index)
155  same_worker++;
156  else
157  do_handoff++;
158 
159  if (ti[1] == thread_index)
160  same_worker++;
161  else
162  do_handoff++;
163 
164  if (ti[2] == thread_index)
165  same_worker++;
166  else
167  do_handoff++;
168 
169  if (ti[3] == thread_index)
170  same_worker++;
171  else
172  do_handoff++;
173 
174  b += 4;
175  ti += 4;
176  n_left_from -= 4;
177  }
178 
179  while (n_left_from > 0)
180  {
181  u32 sw_if_index0;
182  u32 rx_fib_index0;
183  u32 iph_offset0 = 0;
184  ip4_header_t *ip0;
185 
186 
187  if (is_output)
188  iph_offset0 = vnet_buffer (b[0])->ip.save_rewrite_length;
189 
190  ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[0]) +
191  iph_offset0);
192 
193  sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
194  rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index0);
195 
196  ti[0] = get_worker (ip0, rx_fib_index0, is_output);
197 
198  if (ti[0] == thread_index)
199  same_worker++;
200  else
201  do_handoff++;
202 
203  b += 1;
204  ti += 1;
205  n_left_from -= 1;
206  }
207 
208  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
209  {
210  u32 i;
211  b = bufs;
212  ti = thread_indices;
213 
214  for (i = 0; i < frame->n_vectors; i++)
215  {
216  if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
217  {
219  vlib_add_trace (vm, node, b[0], sizeof (*t));
220  t->next_worker_index = ti[0];
222  t->in2out = is_in2out;
223  t->output = is_output;
224 
225  b += 1;
226  ti += 1;
227  }
228  else
229  break;
230  }
231  }
232 
233  n_enq = vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices,
234  frame->n_vectors, 1);
235 
236  if (n_enq < frame->n_vectors)
237  {
239  NAT44_HANDOFF_ERROR_CONGESTION_DROP,
240  frame->n_vectors - n_enq);
241  }
242 
244  NAT44_HANDOFF_ERROR_SAME_WORKER, same_worker);
246  NAT44_HANDOFF_ERROR_DO_HANDOFF, do_handoff);
247  return frame->n_vectors;
248 }
249 
250 
251 
253  vlib_node_runtime_t * node,
254  vlib_frame_t * frame)
255 {
256  return nat44_worker_handoff_fn_inline (vm, node, frame, 0, 1);
257 }
258 
259 /* *INDENT-OFF* */
261  .name = "nat44-in2out-worker-handoff",
262  .vector_size = sizeof (u32),
263  .format_trace = format_nat44_handoff_trace,
266  .error_strings = nat44_handoff_error_strings,
267  .n_next_nodes = 1,
268  .next_nodes = {
269  [0] = "error-drop",
270  },
271 };
272 /* *INDENT-ON* */
273 
276  node,
277  vlib_frame_t * frame)
278 {
279  return nat44_worker_handoff_fn_inline (vm, node, frame, 1, 1);
280 }
281 
282 /* *INDENT-OFF* */
284  .name = "nat44-in2out-output-worker-handoff",
285  .vector_size = sizeof (u32),
286  .format_trace = format_nat44_handoff_trace,
289  .error_strings = nat44_handoff_error_strings,
290  .n_next_nodes = 1,
291  .next_nodes = {
292  [0] = "error-drop",
293  },
294 };
295 /* *INDENT-ON* */
296 
298  vlib_node_runtime_t * node,
299  vlib_frame_t * frame)
300 {
301  return nat44_worker_handoff_fn_inline (vm, node, frame, 0, 0);
302 }
303 
304 /* *INDENT-OFF* */
306  .name = "nat44-out2in-worker-handoff",
307  .vector_size = sizeof (u32),
308  .format_trace = format_nat44_handoff_trace,
311  .error_strings = nat44_handoff_error_strings,
312  .n_next_nodes = 1,
313  .next_nodes = {
314  [0] = "error-drop",
315  },
316 };
317 /* *INDENT-ON* */
318 
319 /*
320  * fd.io coding-style-patch-verification: ON
321  *
322  * Local Variables:
323  * eval: (c-set-style "gnu")
324  * End:
325  */
u32 flags
Definition: vhost_user.h:141
#define CLIB_UNUSED(x)
Definition: clib.h:82
static char * nat44_handoff_error_strings[]
Definition: nat44_handoff.c:48
#define foreach_nat44_handoff_error
Definition: nat44_handoff.c:35
static u32 vlib_buffer_get_trace_index(vlib_buffer_t *b)
Extract the trace (pool) index from a trace handle.
Definition: buffer.h:389
u32 fq_in2out_output_index
Definition: nat.h:572
#define PREDICT_TRUE(x)
Definition: clib.h:112
u32 thread_index
Definition: main.h:197
int i
vlib_node_registration_t snat_in2out_worker_handoff_node
(constructor) VLIB_REGISTER_NODE (snat_in2out_worker_handoff_node)
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
u8 data[128]
Definition: ipsec.api:249
#define VLIB_NODE_FN(node)
Definition: node.h:201
unsigned char u8
Definition: types.h:56
u32 ip4_fib_table_get_index_for_sw_if_index(u32 sw_if_index)
Definition: ip4_fib.c:224
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:203
unsigned int u32
Definition: types.h:88
#define VLIB_FRAME_SIZE
Definition: node.h:376
vl_api_fib_path_type_t type
Definition: fib_types.api:123
unsigned short u16
Definition: types.h:57
nat44_handoff_error_t
Definition: nat44_handoff.c:40
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
#define PREDICT_FALSE(x)
Definition: clib.h:111
u32 node_index
Node index.
Definition: node.h:494
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1150
u32 fq_out2in_index
Definition: nat.h:573
snat_main_t snat_main
Definition: nat.c:39
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u16 n_vectors
Definition: node.h:395
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
vlib_main_t * vm
Definition: buffer.c:312
snat_get_worker_function_t * worker_in2out_cb
Definition: nat.h:519
u32( snat_get_worker_function_t)(ip4_header_t *ip, u32 rx_fib_index, u8 is_output)
Definition: nat.h:497
#define ARRAY_LEN(x)
Definition: clib.h:62
snat_get_worker_function_t * worker_out2in_cb
Definition: nat.h:520
u32 fq_in2out_index
Definition: nat.h:571
static uword nat44_worker_handoff_fn_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u8 is_output, u8 is_in2out)
Definition: nat44_handoff.c:73
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
vlib_node_registration_t snat_out2in_worker_handoff_node
(constructor) VLIB_REGISTER_NODE (snat_out2in_worker_handoff_node)
static_always_inline u32 vlib_buffer_enqueue_to_thread(vlib_main_t *vm, u32 frame_queue_index, u32 *buffer_indices, u16 *thread_indices, u32 n_packets, int drop_on_congestion)
Definition: buffer_node.h:487
#define vnet_buffer(b)
Definition: buffer.h:361
vlib_node_registration_t snat_in2out_output_worker_handoff_node
(constructor) VLIB_REGISTER_NODE (snat_in2out_output_worker_handoff_node)
u16 flags
Copy of main node flags.
Definition: node.h:507
static u8 * format_nat44_handoff_trace(u8 *s, va_list *args)
Definition: nat44_handoff.c:56
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:244
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:301
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
Definition: defs.h:46