FD.io VPP  v20.09-64-g4f7b92f0a
Vector Packet Processing
nat64_in2out.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /**
16  * @file
17  * @brief NAT64 IPv6 to IPv4 translation (inside to outside network)
18  */
19 
20 #include <nat/nat64.h>
21 #include <nat/nat_inlines.h>
22 #include <vnet/ip/ip6_to_ip4.h>
23 #include <vnet/fib/fib_table.h>
24 #include <nat/lib/nat_inlines.h>
25 
26 typedef struct
27 {
32 
33 static u8 *
34 format_nat64_in2out_trace (u8 * s, va_list * args)
35 {
36  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
37  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
38  nat64_in2out_trace_t *t = va_arg (*args, nat64_in2out_trace_t *);
39  char *tag;
40 
41  tag = t->is_slow_path ? "NAT64-in2out-slowpath" : "NAT64-in2out";
42 
43  s =
44  format (s, "%s: sw_if_index %d, next index %d", tag, t->sw_if_index,
45  t->next_index);
46 
47  return s;
48 }
49 
50 #define foreach_nat64_in2out_error \
51 _(UNSUPPORTED_PROTOCOL, "unsupported protocol") \
52 _(NO_TRANSLATION, "no translation") \
53 _(UNKNOWN, "unknown")
54 
55 
56 typedef enum
57 {
58 #define _(sym,str) NAT64_IN2OUT_ERROR_##sym,
60 #undef _
63 
64 static char *nat64_in2out_error_strings[] = {
65 #define _(sym,string) string,
67 #undef _
68 };
69 
70 typedef enum
71 {
78 
80 {
85 
86 static inline u8
87 nat64_not_translate (u32 sw_if_index, ip6_address_t ip6_addr)
88 {
89  ip6_address_t *addr;
90  ip6_main_t *im6 = &ip6_main;
91  ip_lookup_main_t *lm6 = &im6->lookup_main;
92  ip_interface_address_t *ia = 0;
93 
94  /* *INDENT-OFF* */
95  foreach_ip_interface_address (lm6, ia, sw_if_index, 0,
96  ({
97  addr = ip_interface_address_get_address (lm6, ia);
98  if (0 == ip6_address_compare (addr, &ip6_addr))
99  return 1;
100  }));
101  /* *INDENT-ON* */
102 
103  return 0;
104 }
105 
106 /**
107  * @brief Check whether is a hairpinning.
108  *
109  * If the destination IP address of the packet is an IPv4 address assigned to
110  * the NAT64 itself, then the packet is a hairpin packet.
111  *
112  * param dst_addr Destination address of the packet.
113  *
114  * @returns 1 if hairpinning, otherwise 0.
115  */
117 is_hairpinning (ip6_address_t * dst_addr)
118 {
119  nat64_main_t *nm = &nat64_main;
120  int i;
121 
122  for (i = 0; i < vec_len (nm->addr_pool); i++)
123  {
124  if (nm->addr_pool[i].addr.as_u32 == dst_addr->as_u32[3])
125  return 1;
126  }
127 
128  return 0;
129 }
130 
131 static int
133  u16 frag_hdr_offset, nat64_in2out_set_ctx_t * ctx)
134 {
135  ip6_header_t *ip6;
136  ip_csum_t csum = 0;
137  ip4_header_t *ip4;
138  u16 fragment_id;
139  u8 frag_more;
140  u16 frag_offset;
141  nat64_main_t *nm = &nat64_main;
142  nat64_db_bib_entry_t *bibe;
143  nat64_db_st_entry_t *ste;
144  ip46_address_t old_saddr, old_daddr;
145  ip4_address_t new_daddr;
146  u32 sw_if_index, fib_index;
147  u8 proto = vnet_buffer (p)->ip.reass.ip_proto;
148  u16 sport = vnet_buffer (p)->ip.reass.l4_src_port;
149  u16 dport = vnet_buffer (p)->ip.reass.l4_dst_port;
150  nat64_db_t *db = &nm->db[ctx->thread_index];
151 
152  ip6 = vlib_buffer_get_current (p);
153 
154  vlib_buffer_advance (p, l4_offset - sizeof (*ip4));
155  ip4 = vlib_buffer_get_current (p);
156 
157  u32 ip_version_traffic_class_and_flow_label =
159  u16 payload_length = ip6->payload_length;
160  u8 hop_limit = ip6->hop_limit;
161 
162  old_saddr.as_u64[0] = ip6->src_address.as_u64[0];
163  old_saddr.as_u64[1] = ip6->src_address.as_u64[1];
164  old_daddr.as_u64[0] = ip6->dst_address.as_u64[0];
165  old_daddr.as_u64[1] = ip6->dst_address.as_u64[1];
166 
167  if (PREDICT_FALSE (frag_hdr_offset))
168  {
169  //Only the first fragment
170  ip6_frag_hdr_t *hdr =
171  (ip6_frag_hdr_t *) u8_ptr_add (ip6, frag_hdr_offset);
172  fragment_id = frag_id_6to4 (hdr->identification);
173  frag_more = ip6_frag_hdr_more (hdr);
174  frag_offset = ip6_frag_hdr_offset (hdr);
175  }
176  else
177  {
178  fragment_id = 0;
179  frag_offset = 0;
180  frag_more = 0;
181  }
182 
185  ip4->tos = ip6_translate_tos (ip_version_traffic_class_and_flow_label);
186  ip4->length =
187  u16_net_add (payload_length, sizeof (*ip4) + sizeof (*ip6) - l4_offset);
188  ip4->fragment_id = fragment_id;
190  clib_host_to_net_u16 (frag_offset |
191  (frag_more ? IP4_HEADER_FLAG_MORE_FRAGMENTS : 0));
192  ip4->ttl = hop_limit;
193  ip4->protocol = (proto == IP_PROTOCOL_ICMP6) ? IP_PROTOCOL_ICMP : proto;
194 
195  sw_if_index = vnet_buffer (ctx->b)->sw_if_index[VLIB_RX];
196  fib_index =
198 
199  ste =
200  nat64_db_st_entry_find (db, &old_saddr, &old_daddr, sport, dport, proto,
201  fib_index, 1);
202 
203  if (ste)
204  {
205  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
206  if (!bibe)
207  return -1;
208  }
209  else
210  {
211  bibe =
212  nat64_db_bib_entry_find (db, &old_saddr, sport, proto, fib_index, 1);
213 
214  if (!bibe)
215  {
216  u16 out_port;
217  ip4_address_t out_addr;
219  (fib_index, ip_proto_to_nat_proto (proto), &out_addr,
220  &out_port, ctx->thread_index))
221  return -1;
222 
223  bibe =
225  &old_saddr.ip6, &out_addr, sport,
226  out_port, fib_index, proto, 0);
227  if (!bibe)
228  return -1;
229 
231  db->bib.bib_entries_num);
232  }
233 
234  nat64_extract_ip4 (&old_daddr.ip6, &new_daddr, fib_index);
235  ste =
236  nat64_db_st_entry_create (ctx->thread_index, db, bibe,
237  &old_daddr.ip6, &new_daddr, dport);
238  if (!ste)
239  return -1;
240 
242  db->st.st_entries_num);
243  }
244 
245  ip4->src_address.as_u32 = bibe->out_addr.as_u32;
246  ip4->dst_address.as_u32 = ste->out_r_addr.as_u32;
247 
248  ip4->checksum = ip4_header_checksum (ip4);
249 
250  if (!vnet_buffer (p)->ip.reass.is_non_first_fragment)
251  {
252  udp_header_t *udp = (udp_header_t *) (ip4 + 1);
253  udp->src_port = bibe->out_port;
254 
255  //UDP checksum is optional over IPv4
256  if (proto == IP_PROTOCOL_UDP)
257  {
258  udp->checksum = 0;
259  }
260  else
261  {
262  tcp_header_t *tcp = (tcp_header_t *) (ip4 + 1);
263  csum = ip_csum_sub_even (tcp->checksum, old_saddr.as_u64[0]);
264  csum = ip_csum_sub_even (csum, old_saddr.as_u64[1]);
265  csum = ip_csum_sub_even (csum, old_daddr.as_u64[0]);
266  csum = ip_csum_sub_even (csum, old_daddr.as_u64[1]);
267  csum = ip_csum_add_even (csum, ip4->dst_address.as_u32);
268  csum = ip_csum_add_even (csum, ip4->src_address.as_u32);
269  csum = ip_csum_sub_even (csum, sport);
270  csum = ip_csum_add_even (csum, udp->src_port);
271  mss_clamping (nm->sm->mss_clamping, tcp, &csum);
272  tcp->checksum = ip_csum_fold (csum);
273 
274  nat64_tcp_session_set_state (ste, tcp, 1);
275  }
276  }
277 
278  nat64_session_reset_timeout (ste, ctx->vm);
279 
280  return 0;
281 }
282 
283 static int
285 {
286  nat64_main_t *nm = &nat64_main;
288  nat64_db_bib_entry_t *bibe;
289  nat64_db_st_entry_t *ste;
290  ip46_address_t saddr, daddr;
291  u32 sw_if_index, fib_index;
292  icmp46_header_t *icmp = ip6_next_header (ip6);
293  nat64_db_t *db = &nm->db[ctx->thread_index];
294 
295  sw_if_index = vnet_buffer (ctx->b)->sw_if_index[VLIB_RX];
296  fib_index =
298 
299  saddr.as_u64[0] = ip6->src_address.as_u64[0];
300  saddr.as_u64[1] = ip6->src_address.as_u64[1];
301  daddr.as_u64[0] = ip6->dst_address.as_u64[0];
302  daddr.as_u64[1] = ip6->dst_address.as_u64[1];
303 
304  if (icmp->type == ICMP4_echo_request || icmp->type == ICMP4_echo_reply)
305  {
306  u16 in_id = ((u16 *) (icmp))[2];
307  ste =
308  nat64_db_st_entry_find (db, &saddr, &daddr, in_id, 0,
309  IP_PROTOCOL_ICMP, fib_index, 1);
310 
311  if (ste)
312  {
313  bibe =
314  nat64_db_bib_entry_by_index (db, IP_PROTOCOL_ICMP,
315  ste->bibe_index);
316  if (!bibe)
317  return -1;
318  }
319  else
320  {
321  bibe =
322  nat64_db_bib_entry_find (db, &saddr, in_id,
323  IP_PROTOCOL_ICMP, fib_index, 1);
324 
325  if (!bibe)
326  {
327  u16 out_id;
328  ip4_address_t out_addr;
330  (fib_index, NAT_PROTOCOL_ICMP, &out_addr, &out_id,
331  ctx->thread_index))
332  return -1;
333 
334  bibe =
336  &ip6->src_address, &out_addr,
337  in_id, out_id, fib_index,
338  IP_PROTOCOL_ICMP, 0);
339  if (!bibe)
340  return -1;
341 
343  db->bib.bib_entries_num);
344  }
345 
346  nat64_extract_ip4 (&ip6->dst_address, &daddr.ip4, fib_index);
347  ste =
348  nat64_db_st_entry_create (ctx->thread_index, db, bibe,
349  &ip6->dst_address, &daddr.ip4, 0);
350  if (!ste)
351  return -1;
352 
354  db->st.st_entries_num);
355  }
356 
357  nat64_session_reset_timeout (ste, ctx->vm);
358 
359  ip4->src_address.as_u32 = bibe->out_addr.as_u32;
360  ((u16 *) (icmp))[2] = bibe->out_port;
361 
362  ip4->dst_address.as_u32 = ste->out_r_addr.as_u32;
363  }
364  else
365  {
366  if (!vec_len (nm->addr_pool))
367  return -1;
368 
369  ip4->src_address.as_u32 = nm->addr_pool[0].addr.as_u32;
370  nat64_extract_ip4 (&ip6->dst_address, &ip4->dst_address, fib_index);
371  }
372 
373  return 0;
374 }
375 
376 static int
378  void *arg)
379 {
380  nat64_main_t *nm = &nat64_main;
382  nat64_db_st_entry_t *ste;
383  nat64_db_bib_entry_t *bibe;
384  ip46_address_t saddr, daddr;
385  u32 sw_if_index, fib_index;
386  u8 proto = ip6->protocol;
387  nat64_db_t *db = &nm->db[ctx->thread_index];
388 
389  sw_if_index = vnet_buffer (ctx->b)->sw_if_index[VLIB_RX];
390  fib_index =
392 
393  saddr.as_u64[0] = ip6->src_address.as_u64[0];
394  saddr.as_u64[1] = ip6->src_address.as_u64[1];
395  daddr.as_u64[0] = ip6->dst_address.as_u64[0];
396  daddr.as_u64[1] = ip6->dst_address.as_u64[1];
397 
398  if (proto == IP_PROTOCOL_ICMP6)
399  {
400  icmp46_header_t *icmp = ip6_next_header (ip6);
401  u16 in_id = ((u16 *) (icmp))[2];
402  proto = IP_PROTOCOL_ICMP;
403 
404  if (!
405  (icmp->type == ICMP4_echo_request
406  || icmp->type == ICMP4_echo_reply))
407  return -1;
408 
409  ste =
410  nat64_db_st_entry_find (db, &daddr, &saddr, in_id, 0, proto,
411  fib_index, 1);
412  if (!ste)
413  return -1;
414 
415  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
416  if (!bibe)
417  return -1;
418 
419  ip4->dst_address.as_u32 = bibe->out_addr.as_u32;
420  ((u16 *) (icmp))[2] = bibe->out_port;
421  ip4->src_address.as_u32 = ste->out_r_addr.as_u32;
422  }
423  else
424  {
425  udp_header_t *udp = ip6_next_header (ip6);
426  tcp_header_t *tcp = ip6_next_header (ip6);
427  u16 *checksum;
428  ip_csum_t csum;
429 
430  u16 sport = udp->src_port;
431  u16 dport = udp->dst_port;
432 
433  ste =
434  nat64_db_st_entry_find (db, &daddr, &saddr, dport, sport, proto,
435  fib_index, 1);
436  if (!ste)
437  return -1;
438 
439  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
440  if (!bibe)
441  return -1;
442 
443  ip4->dst_address.as_u32 = bibe->out_addr.as_u32;
444  udp->dst_port = bibe->out_port;
445  ip4->src_address.as_u32 = ste->out_r_addr.as_u32;
446 
447  if (proto == IP_PROTOCOL_TCP)
448  checksum = &tcp->checksum;
449  else
450  checksum = &udp->checksum;
451  csum = ip_csum_sub_even (*checksum, dport);
452  csum = ip_csum_add_even (csum, udp->dst_port);
453  *checksum = ip_csum_fold (csum);
454  }
455 
456  return 0;
457 }
458 
460 {
461  ip6_address_t src_addr;
462  ip6_address_t dst_addr;
468 
469 static int
470 unk_proto_st_walk (nat64_db_st_entry_t * ste, void *arg)
471 {
472  nat64_main_t *nm = &nat64_main;
474  nat64_db_bib_entry_t *bibe;
475  ip46_address_t saddr, daddr;
476  nat64_db_t *db = &nm->db[ctx->thread_index];
477 
478  if (ip6_address_is_equal (&ste->in_r_addr, &ctx->dst_addr))
479  {
480  bibe = nat64_db_bib_entry_by_index (db, ste->proto, ste->bibe_index);
481  if (!bibe)
482  return -1;
483 
484  if (ip6_address_is_equal (&bibe->in_addr, &ctx->src_addr)
485  && bibe->fib_index == ctx->fib_index)
486  {
487  clib_memset (&saddr, 0, sizeof (saddr));
488  saddr.ip4.as_u32 = bibe->out_addr.as_u32;
489  clib_memset (&daddr, 0, sizeof (daddr));
490  nat64_extract_ip4 (&ctx->dst_addr, &daddr.ip4, ctx->fib_index);
491 
493  (db, &daddr, &saddr, 0, 0, ctx->proto, ctx->fib_index, 0))
494  return -1;
495 
496  ctx->out_addr.as_u32 = bibe->out_addr.as_u32;
497  return 1;
498  }
499  }
500 
501  return 0;
502 }
503 
504 static int
506  u16 l4_offset, u16 frag_hdr_offset,
507  nat64_in2out_set_ctx_t * s_ctx)
508 {
509  ip6_header_t *ip6;
510  ip4_header_t *ip4;
511  u16 fragment_id;
512  u16 frag_offset;
513  u8 frag_more;
514 
515  ip6 = vlib_buffer_get_current (p);
516 
517  ip4 = (ip4_header_t *) u8_ptr_add (ip6, l4_offset - sizeof (*ip4));
518 
519  vlib_buffer_advance (p, l4_offset - sizeof (*ip4));
520 
521  if (PREDICT_FALSE (frag_hdr_offset))
522  {
523  //Only the first fragment
524  ip6_frag_hdr_t *hdr =
525  (ip6_frag_hdr_t *) u8_ptr_add (ip6, frag_hdr_offset);
526  fragment_id = frag_id_6to4 (hdr->identification);
527  frag_offset = ip6_frag_hdr_offset (hdr);
528  frag_more = ip6_frag_hdr_more (hdr);
529  }
530  else
531  {
532  fragment_id = 0;
533  frag_offset = 0;
534  frag_more = 0;
535  }
536 
537  nat64_main_t *nm = &nat64_main;
538  nat64_db_bib_entry_t *bibe;
539  nat64_db_st_entry_t *ste;
540  ip46_address_t saddr, daddr, addr;
541  u32 sw_if_index, fib_index;
542  int i;
543  nat64_db_t *db = &nm->db[s_ctx->thread_index];
544 
545  sw_if_index = vnet_buffer (s_ctx->b)->sw_if_index[VLIB_RX];
546  fib_index =
548 
549  saddr.as_u64[0] = ip6->src_address.as_u64[0];
550  saddr.as_u64[1] = ip6->src_address.as_u64[1];
551  daddr.as_u64[0] = ip6->dst_address.as_u64[0];
552  daddr.as_u64[1] = ip6->dst_address.as_u64[1];
553 
554  ste =
555  nat64_db_st_entry_find (db, &saddr, &daddr, 0, 0, l4_protocol, fib_index,
556  1);
557 
558  if (ste)
559  {
560  bibe = nat64_db_bib_entry_by_index (db, l4_protocol, ste->bibe_index);
561  if (!bibe)
562  return -1;
563  }
564  else
565  {
566  bibe =
567  nat64_db_bib_entry_find (db, &saddr, 0, l4_protocol, fib_index, 1);
568 
569  if (!bibe)
570  {
571  /* Choose same out address as for TCP/UDP session to same dst */
573  .src_addr.as_u64[0] = ip6->src_address.as_u64[0],
574  .src_addr.as_u64[1] = ip6->src_address.as_u64[1],
575  .dst_addr.as_u64[0] = ip6->dst_address.as_u64[0],
576  .dst_addr.as_u64[1] = ip6->dst_address.as_u64[1],
577  .out_addr.as_u32 = 0,
578  .fib_index = fib_index,
579  .proto = l4_protocol,
580  .thread_index = s_ctx->thread_index,
581  };
582 
583  nat64_db_st_walk (db, IP_PROTOCOL_TCP, unk_proto_st_walk, &ctx);
584 
585  if (!ctx.out_addr.as_u32)
586  nat64_db_st_walk (db, IP_PROTOCOL_UDP, unk_proto_st_walk, &ctx);
587 
588  /* Verify if out address is not already in use for protocol */
589  clib_memset (&addr, 0, sizeof (addr));
590  addr.ip4.as_u32 = ctx.out_addr.as_u32;
591  if (nat64_db_bib_entry_find (db, &addr, 0, l4_protocol, 0, 0))
592  ctx.out_addr.as_u32 = 0;
593 
594  if (!ctx.out_addr.as_u32)
595  {
596  for (i = 0; i < vec_len (nm->addr_pool); i++)
597  {
598  addr.ip4.as_u32 = nm->addr_pool[i].addr.as_u32;
600  (db, &addr, 0, l4_protocol, 0, 0))
601  break;
602  }
603  }
604 
605  if (!ctx.out_addr.as_u32)
606  return -1;
607 
608  bibe =
610  &ip6->src_address, &ctx.out_addr,
611  0, 0, fib_index, l4_protocol, 0);
612  if (!bibe)
613  return -1;
614 
616  db->bib.bib_entries_num);
617  }
618 
619  nat64_extract_ip4 (&ip6->dst_address, &daddr.ip4, fib_index);
620  ste =
621  nat64_db_st_entry_create (s_ctx->thread_index, db, bibe,
622  &ip6->dst_address, &daddr.ip4, 0);
623  if (!ste)
624  return -1;
625 
627  db->st.st_entries_num);
628  }
629 
630  nat64_session_reset_timeout (ste, s_ctx->vm);
631 
632  ip4->src_address.as_u32 = bibe->out_addr.as_u32;
633  ip4->dst_address.as_u32 = ste->out_r_addr.as_u32;
634 
638  ip4->length = u16_net_add (ip6->payload_length,
639  sizeof (*ip4) + sizeof (*ip6) - l4_offset);
640  ip4->fragment_id = fragment_id;
642  clib_host_to_net_u16 (frag_offset |
643  (frag_more ? IP4_HEADER_FLAG_MORE_FRAGMENTS : 0));
644  ip4->ttl = ip6->hop_limit;
645  ip4->protocol = l4_protocol;
646  ip4->checksum = ip4_header_checksum (ip4);
647 
648  return 0;
649 }
650 
651 static int
653  ip6_header_t * ip6, u32 l4_offset,
655 {
656  nat64_main_t *nm = &nat64_main;
657  nat64_db_bib_entry_t *bibe;
658  nat64_db_st_entry_t *ste;
659  ip46_address_t saddr, daddr;
660  u32 sw_if_index, fib_index;
661  udp_header_t *udp = (udp_header_t *) u8_ptr_add (ip6, l4_offset);
662  tcp_header_t *tcp = (tcp_header_t *) u8_ptr_add (ip6, l4_offset);
663  u8 proto = vnet_buffer (b)->ip.reass.ip_proto;
664  u16 sport = vnet_buffer (b)->ip.reass.l4_src_port;
665  u16 dport = vnet_buffer (b)->ip.reass.l4_dst_port;
666  u16 *checksum = NULL;
667  ip_csum_t csum = 0;
668  nat64_db_t *db = &nm->db[thread_index];
669 
670  sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
671  fib_index =
673 
674  saddr.as_u64[0] = ip6->src_address.as_u64[0];
675  saddr.as_u64[1] = ip6->src_address.as_u64[1];
676  daddr.as_u64[0] = ip6->dst_address.as_u64[0];
677  daddr.as_u64[1] = ip6->dst_address.as_u64[1];
678 
679  if (!vnet_buffer (b)->ip.reass.is_non_first_fragment)
680  {
681  if (proto == IP_PROTOCOL_UDP)
682  checksum = &udp->checksum;
683  else
684  checksum = &tcp->checksum;
685  csum = ip_csum_sub_even (*checksum, ip6->src_address.as_u64[0]);
686  csum = ip_csum_sub_even (csum, ip6->src_address.as_u64[1]);
687  csum = ip_csum_sub_even (csum, ip6->dst_address.as_u64[0]);
688  csum = ip_csum_sub_even (csum, ip6->dst_address.as_u64[1]);
689  }
690 
691  ste =
692  nat64_db_st_entry_find (db, &saddr, &daddr, sport, dport, proto,
693  fib_index, 1);
694 
695  if (ste)
696  {
697  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
698  if (!bibe)
699  return -1;
700  }
701  else
702  {
703  bibe = nat64_db_bib_entry_find (db, &saddr, sport, proto, fib_index, 1);
704 
705  if (!bibe)
706  {
707  u16 out_port;
708  ip4_address_t out_addr;
710  (fib_index, ip_proto_to_nat_proto (proto), &out_addr,
711  &out_port, thread_index))
712  return -1;
713 
714  bibe =
715  nat64_db_bib_entry_create (thread_index, db, &ip6->src_address,
716  &out_addr, sport, out_port, fib_index,
717  proto, 0);
718  if (!bibe)
719  return -1;
720 
721  vlib_set_simple_counter (&nm->total_bibs, thread_index, 0,
722  db->bib.bib_entries_num);
723  }
724 
725  nat64_extract_ip4 (&ip6->dst_address, &daddr.ip4, fib_index);
726  ste =
727  nat64_db_st_entry_create (thread_index, db, bibe, &ip6->dst_address,
728  &daddr.ip4, dport);
729  if (!ste)
730  return -1;
731 
732  vlib_set_simple_counter (&nm->total_sessions, thread_index, 0,
733  db->st.st_entries_num);
734  }
735 
736  if (proto == IP_PROTOCOL_TCP)
737  nat64_tcp_session_set_state (ste, tcp, 1);
738 
739  nat64_session_reset_timeout (ste, vm);
740 
741  if (!vnet_buffer (b)->ip.reass.is_non_first_fragment)
742  {
743  udp->src_port = bibe->out_port;
744  }
745 
746  nat64_compose_ip6 (&ip6->src_address, &bibe->out_addr, fib_index);
747 
748  clib_memset (&daddr, 0, sizeof (daddr));
749  daddr.ip4.as_u32 = ste->out_r_addr.as_u32;
750 
751  bibe = 0;
752  /* *INDENT-OFF* */
753  vec_foreach (db, nm->db)
754  {
755  bibe = nat64_db_bib_entry_find (db, &daddr, dport, proto, 0, 0);
756 
757  if (bibe)
758  break;
759  }
760  /* *INDENT-ON* */
761 
762  if (!bibe)
763  return -1;
764 
765  ip6->dst_address.as_u64[0] = bibe->in_addr.as_u64[0];
766  ip6->dst_address.as_u64[1] = bibe->in_addr.as_u64[1];
767 
768  if (!vnet_buffer (b)->ip.reass.is_non_first_fragment)
769  {
770  csum = ip_csum_add_even (csum, ip6->src_address.as_u64[0]);
771  csum = ip_csum_add_even (csum, ip6->src_address.as_u64[1]);
772  csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[0]);
773  csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[1]);
774  csum = ip_csum_sub_even (csum, sport);
775  csum = ip_csum_sub_even (csum, dport);
776  udp->dst_port = bibe->in_port;
777  csum = ip_csum_add_even (csum, udp->src_port);
778  csum = ip_csum_add_even (csum, udp->dst_port);
779  *checksum = ip_csum_fold (csum);
780  }
781 
782  return 0;
783 }
784 
785 static int
788 {
789  nat64_main_t *nm = &nat64_main;
790  nat64_db_bib_entry_t *bibe;
791  nat64_db_st_entry_t *ste;
792  icmp46_header_t *icmp = ip6_next_header (ip6);
793  ip6_header_t *inner_ip6;
794  ip46_address_t saddr, daddr;
795  u32 sw_if_index, fib_index;
796  u8 proto;
797  udp_header_t *udp;
798  tcp_header_t *tcp;
799  u16 *checksum, sport, dport;
800  ip_csum_t csum;
801  nat64_db_t *db = &nm->db[thread_index];
802 
803  if (icmp->type == ICMP6_echo_request || icmp->type == ICMP6_echo_reply)
804  return -1;
805 
806  inner_ip6 = (ip6_header_t *) u8_ptr_add (icmp, 8);
807 
808  proto = inner_ip6->protocol;
809 
810  if (proto == IP_PROTOCOL_ICMP6)
811  return -1;
812 
813  sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
814  fib_index =
816 
817  saddr.as_u64[0] = inner_ip6->src_address.as_u64[0];
818  saddr.as_u64[1] = inner_ip6->src_address.as_u64[1];
819  daddr.as_u64[0] = inner_ip6->dst_address.as_u64[0];
820  daddr.as_u64[1] = inner_ip6->dst_address.as_u64[1];
821 
822  udp = ip6_next_header (inner_ip6);
823  tcp = ip6_next_header (inner_ip6);
824 
825  sport = udp->src_port;
826  dport = udp->dst_port;
827 
828  if (proto == IP_PROTOCOL_UDP)
829  checksum = &udp->checksum;
830  else
831  checksum = &tcp->checksum;
832 
833  csum = ip_csum_sub_even (*checksum, inner_ip6->src_address.as_u64[0]);
834  csum = ip_csum_sub_even (csum, inner_ip6->src_address.as_u64[1]);
835  csum = ip_csum_sub_even (csum, inner_ip6->dst_address.as_u64[0]);
836  csum = ip_csum_sub_even (csum, inner_ip6->dst_address.as_u64[1]);
837  csum = ip_csum_sub_even (csum, sport);
838  csum = ip_csum_sub_even (csum, dport);
839 
840  ste =
841  nat64_db_st_entry_find (db, &daddr, &saddr, dport, sport, proto,
842  fib_index, 1);
843  if (!ste)
844  return -1;
845 
846  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
847  if (!bibe)
848  return -1;
849 
850  dport = udp->dst_port = bibe->out_port;
851  nat64_compose_ip6 (&inner_ip6->dst_address, &bibe->out_addr, fib_index);
852 
853  clib_memset (&saddr, 0, sizeof (saddr));
854  clib_memset (&daddr, 0, sizeof (daddr));
855  saddr.ip4.as_u32 = ste->out_r_addr.as_u32;
856  daddr.ip4.as_u32 = bibe->out_addr.as_u32;
857 
858  ste = 0;
859  /* *INDENT-OFF* */
860  vec_foreach (db, nm->db)
861  {
862  ste = nat64_db_st_entry_find (db, &saddr, &daddr, sport, dport, proto,
863  0, 0);
864 
865  if (ste)
866  break;
867  }
868  /* *INDENT-ON* */
869 
870  if (!ste)
871  return -1;
872 
873  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
874  if (!bibe)
875  return -1;
876 
877  inner_ip6->src_address.as_u64[0] = bibe->in_addr.as_u64[0];
878  inner_ip6->src_address.as_u64[1] = bibe->in_addr.as_u64[1];
879  udp->src_port = bibe->in_port;
880 
881  csum = ip_csum_add_even (csum, inner_ip6->src_address.as_u64[0]);
882  csum = ip_csum_add_even (csum, inner_ip6->src_address.as_u64[1]);
883  csum = ip_csum_add_even (csum, inner_ip6->dst_address.as_u64[0]);
884  csum = ip_csum_add_even (csum, inner_ip6->dst_address.as_u64[1]);
885  csum = ip_csum_add_even (csum, udp->src_port);
886  csum = ip_csum_add_even (csum, udp->dst_port);
887  *checksum = ip_csum_fold (csum);
888 
889  if (!vec_len (nm->addr_pool))
890  return -1;
891 
892  nat64_compose_ip6 (&ip6->src_address, &nm->addr_pool[0].addr, fib_index);
893  ip6->dst_address.as_u64[0] = inner_ip6->src_address.as_u64[0];
894  ip6->dst_address.as_u64[1] = inner_ip6->src_address.as_u64[1];
895 
896  icmp->checksum = 0;
897  csum = ip_csum_with_carry (0, ip6->payload_length);
898  csum = ip_csum_with_carry (csum, clib_host_to_net_u16 (ip6->protocol));
899  csum = ip_csum_with_carry (csum, ip6->src_address.as_u64[0]);
900  csum = ip_csum_with_carry (csum, ip6->src_address.as_u64[1]);
901  csum = ip_csum_with_carry (csum, ip6->dst_address.as_u64[0]);
902  csum = ip_csum_with_carry (csum, ip6->dst_address.as_u64[1]);
903  csum =
904  ip_incremental_checksum (csum, icmp,
905  clib_net_to_host_u16 (ip6->payload_length));
906  icmp->checksum = ~ip_csum_fold (csum);
907 
908  return 0;
909 }
910 
911 static int
914 {
915  nat64_main_t *nm = &nat64_main;
916  nat64_db_bib_entry_t *bibe;
917  nat64_db_st_entry_t *ste;
918  ip46_address_t saddr, daddr, addr;
919  u32 sw_if_index, fib_index;
920  u8 proto = ip6->protocol;
921  int i;
922  nat64_db_t *db = &nm->db[thread_index];
923 
924  sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
925  fib_index =
927 
928  saddr.as_u64[0] = ip6->src_address.as_u64[0];
929  saddr.as_u64[1] = ip6->src_address.as_u64[1];
930  daddr.as_u64[0] = ip6->dst_address.as_u64[0];
931  daddr.as_u64[1] = ip6->dst_address.as_u64[1];
932 
933  ste =
934  nat64_db_st_entry_find (db, &saddr, &daddr, 0, 0, proto, fib_index, 1);
935 
936  if (ste)
937  {
938  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
939  if (!bibe)
940  return -1;
941  }
942  else
943  {
944  bibe = nat64_db_bib_entry_find (db, &saddr, 0, proto, fib_index, 1);
945 
946  if (!bibe)
947  {
948  /* Choose same out address as for TCP/UDP session to same dst */
950  .src_addr.as_u64[0] = ip6->src_address.as_u64[0],
951  .src_addr.as_u64[1] = ip6->src_address.as_u64[1],
952  .dst_addr.as_u64[0] = ip6->dst_address.as_u64[0],
953  .dst_addr.as_u64[1] = ip6->dst_address.as_u64[1],
954  .out_addr.as_u32 = 0,
955  .fib_index = fib_index,
956  .proto = proto,
957  .thread_index = thread_index,
958  };
959 
960  nat64_db_st_walk (db, IP_PROTOCOL_TCP, unk_proto_st_walk, &ctx);
961 
962  if (!ctx.out_addr.as_u32)
963  nat64_db_st_walk (db, IP_PROTOCOL_UDP, unk_proto_st_walk, &ctx);
964 
965  /* Verify if out address is not already in use for protocol */
966  clib_memset (&addr, 0, sizeof (addr));
967  addr.ip4.as_u32 = ctx.out_addr.as_u32;
968  if (nat64_db_bib_entry_find (db, &addr, 0, proto, 0, 0))
969  ctx.out_addr.as_u32 = 0;
970 
971  if (!ctx.out_addr.as_u32)
972  {
973  for (i = 0; i < vec_len (nm->addr_pool); i++)
974  {
975  addr.ip4.as_u32 = nm->addr_pool[i].addr.as_u32;
976  if (!nat64_db_bib_entry_find (db, &addr, 0, proto, 0, 0))
977  break;
978  }
979  }
980 
981  if (!ctx.out_addr.as_u32)
982  return -1;
983 
984  bibe =
985  nat64_db_bib_entry_create (thread_index, db, &ip6->src_address,
986  &ctx.out_addr, 0, 0, fib_index, proto,
987  0);
988  if (!bibe)
989  return -1;
990 
991  vlib_set_simple_counter (&nm->total_bibs, thread_index, 0,
992  db->bib.bib_entries_num);
993  }
994 
995  nat64_extract_ip4 (&ip6->dst_address, &daddr.ip4, fib_index);
996  ste =
997  nat64_db_st_entry_create (thread_index, db, bibe, &ip6->dst_address,
998  &daddr.ip4, 0);
999  if (!ste)
1000  return -1;
1001 
1002  vlib_set_simple_counter (&nm->total_sessions, thread_index, 0,
1003  db->st.st_entries_num);
1004  }
1005 
1006  nat64_session_reset_timeout (ste, vm);
1007 
1008  nat64_compose_ip6 (&ip6->src_address, &bibe->out_addr, fib_index);
1009 
1010  clib_memset (&daddr, 0, sizeof (daddr));
1011  daddr.ip4.as_u32 = ste->out_r_addr.as_u32;
1012 
1013  bibe = 0;
1014  /* *INDENT-OFF* */
1015  vec_foreach (db, nm->db)
1016  {
1017  bibe = nat64_db_bib_entry_find (db, &daddr, 0, proto, 0, 0);
1018 
1019  if (bibe)
1020  break;
1021  }
1022  /* *INDENT-ON* */
1023 
1024  if (!bibe)
1025  return -1;
1026 
1027  ip6->dst_address.as_u64[0] = bibe->in_addr.as_u64[0];
1028  ip6->dst_address.as_u64[1] = bibe->in_addr.as_u64[1];
1029 
1030  return 0;
1031 }
1032 
1033 static inline uword
1035  vlib_frame_t * frame, u8 is_slow_path)
1036 {
1037  u32 n_left_from, *from, *to_next;
1038  nat64_in2out_next_t next_index;
1040  nat64_main_t *nm = &nat64_main;
1041 
1042  from = vlib_frame_vector_args (frame);
1043  n_left_from = frame->n_vectors;
1044  next_index = node->cached_next_index;
1045 
1046  while (n_left_from > 0)
1047  {
1048  u32 n_left_to_next;
1049 
1050  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1051 
1052  while (n_left_from > 0 && n_left_to_next > 0)
1053  {
1054  u32 bi0;
1055  vlib_buffer_t *b0;
1056  u32 next0;
1057  ip6_header_t *ip60;
1058  u16 l4_offset0, frag_hdr_offset0;
1059  u8 l4_protocol0;
1060  u32 proto0;
1062  u32 sw_if_index0;
1063 
1064  /* speculatively enqueue b0 to the current next frame */
1065  bi0 = from[0];
1066  to_next[0] = bi0;
1067  from += 1;
1068  to_next += 1;
1069  n_left_from -= 1;
1070  n_left_to_next -= 1;
1071 
1072  b0 = vlib_get_buffer (vm, bi0);
1073  ip60 = vlib_buffer_get_current (b0);
1074 
1075  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1076 
1077  ctx0.b = b0;
1078  ctx0.vm = vm;
1079  ctx0.thread_index = thread_index;
1080 
1082 
1083  if (PREDICT_FALSE
1084  (ip6_parse
1085  (vm, b0, ip60, b0->current_length, &l4_protocol0, &l4_offset0,
1086  &frag_hdr_offset0)))
1087  {
1088  next0 = NAT64_IN2OUT_NEXT_DROP;
1089  b0->error = node->errors[NAT64_IN2OUT_ERROR_UNKNOWN];
1090  goto trace0;
1091  }
1092 
1093  if (nat64_not_translate (sw_if_index0, ip60->dst_address))
1094  {
1096  goto trace0;
1097  }
1098 
1099  proto0 = ip_proto_to_nat_proto (l4_protocol0);
1100 
1101  if (is_slow_path)
1102  {
1103  if (PREDICT_TRUE (proto0 == NAT_PROTOCOL_OTHER))
1104  {
1106  thread_index, sw_if_index0,
1107  1);
1108  if (is_hairpinning (&ip60->dst_address))
1109  {
1112  (vm, b0, ip60, thread_index))
1113  {
1114  next0 = NAT64_IN2OUT_NEXT_DROP;
1115  b0->error =
1116  node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1117  }
1118  goto trace0;
1119  }
1120 
1122  (vm, b0, l4_protocol0, l4_offset0, frag_hdr_offset0,
1123  &ctx0))
1124  {
1125  next0 = NAT64_IN2OUT_NEXT_DROP;
1126  b0->error =
1127  node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1128  goto trace0;
1129  }
1130  }
1131  goto trace0;
1132  }
1133  else
1134  {
1135  if (PREDICT_FALSE (proto0 == NAT_PROTOCOL_OTHER))
1136  {
1138  goto trace0;
1139  }
1140  }
1141 
1142  if (proto0 == NAT_PROTOCOL_ICMP)
1143  {
1145  thread_index, sw_if_index0, 1);
1146  if (is_hairpinning (&ip60->dst_address))
1147  {
1150  (vm, b0, ip60, thread_index))
1151  {
1152  next0 = NAT64_IN2OUT_NEXT_DROP;
1153  b0->error =
1154  node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1155  }
1156  goto trace0;
1157  }
1158 
1159  if (icmp6_to_icmp
1160  (vm, b0, nat64_in2out_icmp_set_cb, &ctx0,
1162  {
1163  next0 = NAT64_IN2OUT_NEXT_DROP;
1164  b0->error = node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1165  goto trace0;
1166  }
1167  }
1168  else if (proto0 == NAT_PROTOCOL_TCP || proto0 == NAT_PROTOCOL_UDP)
1169  {
1170  if (proto0 == NAT_PROTOCOL_TCP)
1172  thread_index, sw_if_index0, 1);
1173  else
1175  thread_index, sw_if_index0, 1);
1176 
1177  if (is_hairpinning (&ip60->dst_address))
1178  {
1181  (vm, b0, ip60, l4_offset0, thread_index))
1182  {
1183  next0 = NAT64_IN2OUT_NEXT_DROP;
1184  b0->error =
1185  node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1186  }
1187  goto trace0;
1188  }
1189 
1191  (vm, b0, l4_offset0, frag_hdr_offset0, &ctx0))
1192  {
1193  next0 = NAT64_IN2OUT_NEXT_DROP;
1194  b0->error = node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1195  goto trace0;
1196  }
1197  }
1198 
1199  trace0:
1201  && (b0->flags & VLIB_BUFFER_IS_TRACED)))
1202  {
1204  vlib_add_trace (vm, node, b0, sizeof (*t));
1205  t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1206  t->next_index = next0;
1207  t->is_slow_path = is_slow_path;
1208  }
1209 
1210  if (next0 == NAT64_IN2OUT_NEXT_DROP)
1211  {
1213  thread_index, sw_if_index0, 1);
1214  }
1215 
1216 
1217  /* verify speculative enqueue, maybe switch current next frame */
1218  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1219  n_left_to_next, bi0, next0);
1220  }
1221  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1222  }
1223 
1224  return frame->n_vectors;
1225 }
1226 
1229  vlib_frame_t * frame)
1230 {
1231  return nat64_in2out_node_fn_inline (vm, node, frame, 0);
1232 }
1233 
1234 /* *INDENT-OFF* */
1236  .name = "nat64-in2out",
1237  .vector_size = sizeof (u32),
1238  .format_trace = format_nat64_in2out_trace,
1240  .n_errors = ARRAY_LEN (nat64_in2out_error_strings),
1241  .error_strings = nat64_in2out_error_strings,
1242  .n_next_nodes = NAT64_IN2OUT_N_NEXT,
1243  /* edit / add dispositions here */
1244  .next_nodes = {
1245  [NAT64_IN2OUT_NEXT_DROP] = "error-drop",
1246  [NAT64_IN2OUT_NEXT_IP4_LOOKUP] = "ip4-lookup",
1247  [NAT64_IN2OUT_NEXT_IP6_LOOKUP] = "ip6-lookup",
1248  [NAT64_IN2OUT_NEXT_SLOWPATH] = "nat64-in2out-slowpath",
1249  },
1250 };
1251 /* *INDENT-ON* */
1252 
1255  vlib_frame_t * frame)
1256 {
1257  return nat64_in2out_node_fn_inline (vm, node, frame, 1);
1258 }
1259 
1260 /* *INDENT-OFF* */
1262  .name = "nat64-in2out-slowpath",
1263  .vector_size = sizeof (u32),
1264  .format_trace = format_nat64_in2out_trace,
1266  .n_errors = ARRAY_LEN (nat64_in2out_error_strings),
1267  .error_strings = nat64_in2out_error_strings,
1268  .n_next_nodes = NAT64_IN2OUT_N_NEXT,
1269  /* edit / add dispositions here */
1270  .next_nodes = {
1271  [NAT64_IN2OUT_NEXT_DROP] = "error-drop",
1272  [NAT64_IN2OUT_NEXT_IP4_LOOKUP] = "ip4-lookup",
1273  [NAT64_IN2OUT_NEXT_IP6_LOOKUP] = "ip6-lookup",
1274  [NAT64_IN2OUT_NEXT_SLOWPATH] = "nat64-in2out-slowpath",
1275  },
1276 };
1277 /* *INDENT-ON* */
1278 
1280 {
1288 
1289 
1290 #define foreach_nat64_in2out_handoff_error \
1291 _(CONGESTION_DROP, "congestion drop") \
1292 _(SAME_WORKER, "same worker") \
1293 _(DO_HANDOFF, "do handoff")
1294 
1295 typedef enum
1296 {
1297 #define _(sym,str) NAT64_IN2OUT_HANDOFF_ERROR_##sym,
1299 #undef _
1302 
1304 #define _(sym,string) string,
1306 #undef _
1307 };
1308 
1309 typedef struct
1310 {
1313 
1314 static u8 *
1316 {
1317  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1318  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1320  va_arg (*args, nat64_in2out_handoff_trace_t *);
1321 
1322  s =
1323  format (s, "NAT64-IN2OUT-HANDOFF: next-worker %d", t->next_worker_index);
1324 
1325  return s;
1326 }
1327 
1330  vlib_frame_t * frame)
1331 {
1332  nat64_main_t *nm = &nat64_main;
1333  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
1334  u32 n_enq, n_left_from, *from;
1335  u16 thread_indices[VLIB_FRAME_SIZE], *ti;
1336  u32 fq_index;
1338  u32 do_handoff = 0, same_worker = 0;
1339 
1340  from = vlib_frame_vector_args (frame);
1341  n_left_from = frame->n_vectors;
1342  vlib_get_buffers (vm, from, bufs, n_left_from);
1343 
1344  b = bufs;
1345  ti = thread_indices;
1346 
1347  fq_index = nm->fq_in2out_index;
1348 
1349  while (n_left_from > 0)
1350  {
1351  ip6_header_t *ip0;
1352 
1353  ip0 = vlib_buffer_get_current (b[0]);
1354  ti[0] = nat64_get_worker_in2out (&ip0->src_address);
1355 
1356  if (ti[0] != thread_index)
1357  do_handoff++;
1358  else
1359  same_worker++;
1360 
1361  if (PREDICT_FALSE
1362  ((node->flags & VLIB_NODE_FLAG_TRACE)
1363  && (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
1364  {
1366  vlib_add_trace (vm, node, b[0], sizeof (*t));
1367  t->next_worker_index = ti[0];
1368  }
1369 
1370  n_left_from -= 1;
1371  ti += 1;
1372  b += 1;
1373  }
1374 
1375  n_enq =
1376  vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices,
1377  frame->n_vectors, 1);
1378 
1379  if (n_enq < frame->n_vectors)
1380  vlib_node_increment_counter (vm, node->node_index,
1381  NAT64_IN2OUT_HANDOFF_ERROR_CONGESTION_DROP,
1382  frame->n_vectors - n_enq);
1383  vlib_node_increment_counter (vm, node->node_index,
1384  NAT64_IN2OUT_HANDOFF_ERROR_SAME_WORKER,
1385  same_worker);
1386  vlib_node_increment_counter (vm, node->node_index,
1387  NAT64_IN2OUT_HANDOFF_ERROR_DO_HANDOFF,
1388  do_handoff);
1389 
1390  return frame->n_vectors;
1391 }
1392 
1393 /* *INDENT-OFF* */
1395  .name = "nat64-in2out-handoff",
1396  .vector_size = sizeof (u32),
1397  .format_trace = format_nat64_in2out_handoff_trace,
1400  .error_strings = nat64_in2out_handoff_error_strings,
1401 
1402  .n_next_nodes = 1,
1403 
1404  .next_nodes = {
1405  [0] = "error-drop",
1406  },
1407 };
1408 /* *INDENT-ON* */
1409 
1410 /*
1411  * fd.io coding-style-patch-verification: ON
1412  *
1413  * Local Variables:
1414  * eval: (c-set-style "gnu")
1415  * End:
1416  */
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
nat64_db_t * db
BIB and session DB per thread.
Definition: nat64.h:83
static int unk_proto_st_walk(nat64_db_st_entry_t *ste, void *arg)
Definition: nat64_in2out.c:470
struct nat64_main_t::@102::@103 in2out
int nat64_alloc_out_addr_and_port(u32 fib_index, nat_protocol_t proto, ip4_address_t *addr, u16 *port, u32 thread_index)
Alloce IPv4 address and port pair from NAT64 pool.
Definition: nat64.c:547
#define CLIB_UNUSED(x)
Definition: clib.h:87
ip4_address_t src_address
Definition: ip4_packet.h:125
snat_address_t * addr_pool
Address pool vector.
Definition: nat64.h:74
static int nat64_in2out_tcp_udp(vlib_main_t *vm, vlib_buffer_t *p, u16 l4_offset, u16 frag_hdr_offset, nat64_in2out_set_ctx_t *ctx)
Definition: nat64_in2out.c:132
void nat64_extract_ip4(ip6_address_t *ip6, ip4_address_t *ip4, u32 fib_index)
Extract IPv4 address from the IPv4-embedded IPv6 addresses.
Definition: nat64.c:1119
#define PREDICT_TRUE(x)
Definition: clib.h:121
vlib_node_registration_t nat64_in2out_node
(constructor) VLIB_REGISTER_NODE (nat64_in2out_node)
static void * ip_interface_address_get_address(ip_lookup_main_t *lm, ip_interface_address_t *a)
Definition: ip_interface.h:43
static int nat64_in2out_tcp_udp_hairpinning(vlib_main_t *vm, vlib_buffer_t *b, ip6_header_t *ip6, u32 l4_offset, u32 thread_index)
Definition: nat64_in2out.c:652
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
nat64_in2out_next_t
Definition: nat64_in2out.c:70
u32 fib_table_get_index_for_sw_if_index(fib_protocol_t proto, u32 sw_if_index)
Get the index of the FIB bound to the interface.
Definition: fib_table.c:989
u32 thread_index
Definition: main.h:249
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
nat64_db_bib_entry_t * nat64_db_bib_entry_find(nat64_db_t *db, ip46_address_t *addr, u16 port, u8 proto, u32 fib_index, u8 is_ip6)
Find NAT64 BIB entry.
Definition: nat64_db.c:209
uword ip_csum_t
Definition: ip_packet.h:244
static ip_csum_t ip_csum_with_carry(ip_csum_t sum, ip_csum_t x)
Definition: ip_packet.h:247
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 increment)
Increment a simple counter.
Definition: counter.h:78
u16 flags_and_fragment_offset
Definition: ip4_packet.h:106
#define VLIB_NODE_FN(node)
Definition: node.h:202
static u8 * format_nat64_in2out_trace(u8 *s, va_list *args)
Definition: nat64_in2out.c:34
nat64_db_bib_t bib
Definition: nat64_db.h:138
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:469
vlib_node_registration_t nat64_in2out_handoff_node
(constructor) VLIB_REGISTER_NODE (nat64_in2out_handoff_node)
struct _tcp_header tcp_header_t
vhost_vring_addr_t addr
Definition: vhost_user.h:111
ip6_address_t src_address
Definition: ip6_packet.h:310
unsigned char u8
Definition: types.h:56
u32 st_entries_num
Definition: nat64_db.h:123
#define u8_ptr_add(ptr, index)
Definition: ip_types.h:43
u32 nat64_get_worker_in2out(ip6_address_t *addr)
Get worker thread index for NAT64 in2out.
Definition: nat64.c:105
#define static_always_inline
Definition: clib.h:108
static nat_protocol_t ip_proto_to_nat_proto(u8 ip_proto)
Common NAT inline functions.
Definition: inlines.h:22
nat64_db_st_entry_t * nat64_db_st_entry_create(u32 thread_index, nat64_db_t *db, nat64_db_bib_entry_t *bibe, ip6_address_t *in_r_addr, ip4_address_t *out_r_addr, u16 r_port)
Create new NAT64 session table entry.
Definition: nat64_db.c:376
vl_api_ip6_address_t ip6
Definition: one.api:424
ip4_address_t dst_address
Definition: ip4_packet.h:125
static int nat64_in2out_unk_proto(vlib_main_t *vm, vlib_buffer_t *p, u8 l4_protocol, u16 l4_offset, u16 frag_hdr_offset, nat64_in2out_set_ctx_t *s_ctx)
Definition: nat64_in2out.c:505
struct nat64_main_t::@102 counters
snat_main_t * sm
Definition: nat64.h:122
static_always_inline u8 ip6_translate_tos(u32 ip_version_traffic_class_and_flow_label)
Translate TOS value from IPv6 to IPv4.
Definition: ip6_to_ip4.h:332
unsigned int u32
Definition: types.h:88
struct unk_proto_st_walk_ctx_t_ unk_proto_st_walk_ctx_t
void nat64_tcp_session_set_state(nat64_db_st_entry_t *ste, tcp_header_t *tcp, u8 is_ip6)
Set NAT64 TCP session state.
Definition: nat64.c:919
#define VLIB_FRAME_SIZE
Definition: node.h:377
#define frag_id_6to4(id)
Definition: ip6_to_ip4.h:49
vl_api_fib_path_type_t type
Definition: fib_types.api:123
#define ip6_frag_hdr_more(hdr)
Definition: ip6_packet.h:670
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
void nat64_session_reset_timeout(nat64_db_st_entry_t *ste, vlib_main_t *vm)
Reset NAT64 session timeout.
Definition: nat64.c:880
vl_api_ip_proto_t proto
Definition: acl_types.api:50
long ctx[MAX_CONNS]
Definition: main.c:144
u16 mss_clamping
Definition: nat.h:625
unsigned short u16
Definition: types.h:57
vlib_buffer_t * b
Definition: nat64_in2out.c:81
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
static void mss_clamping(u16 mss_clamping, tcp_header_t *tcp, ip_csum_t *sum)
Definition: nat_inlines.h:20
int ip6_address_compare(ip6_address_t *a1, ip6_address_t *a2)
Definition: ip46_cli.c:60
nat64_db_st_t st
Definition: nat64_db.h:139
#define PREDICT_FALSE(x)
Definition: clib.h:120
vl_api_ip4_address_t ip4
Definition: one.api:376
static int nat64_in2out_inner_icmp_set_cb(ip6_header_t *ip6, ip4_header_t *ip4, void *arg)
Definition: nat64_in2out.c:377
ip6_main_t ip6_main
Definition: ip6_forward.c:2781
static void vlib_set_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 value)
Set a simple counter.
Definition: counter.h:94
static char * nat64_in2out_error_strings[]
Definition: nat64_in2out.c:64
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:224
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:391
void nat64_compose_ip6(ip6_address_t *ip6, ip4_address_t *ip4, u32 fib_index)
Compose IPv4-embedded IPv6 addresses.
Definition: nat64.c:1049
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1231
vlib_simple_counter_main_t total_sessions
Definition: nat64.h:111
static u8 * format_nat64_in2out_handoff_trace(u8 *s, va_list *args)
vl_api_mac_address_t dst_addr
Definition: flow_types.api:65
#define IP4_HEADER_FLAG_MORE_FRAGMENTS
Definition: ip4_packet.h:107
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
static int nat64_in2out_unk_proto_hairpinning(vlib_main_t *vm, vlib_buffer_t *b, ip6_header_t *ip6, u32 thread_index)
Definition: nat64_in2out.c:912
static int nat64_in2out_icmp_set_cb(ip6_header_t *ip6, ip4_header_t *ip4, void *arg)
Definition: nat64_in2out.c:284
u16 n_vectors
Definition: node.h:396
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
vlib_node_registration_t nat64_in2out_slowpath_node
(constructor) VLIB_REGISTER_NODE (nat64_in2out_slowpath_node)
#define foreach_nat64_in2out_handoff_error
#define ARRAY_LEN(x)
Definition: clib.h:67
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:483
nat64_main_t nat64_main
Definition: nat64.c:29
static void * ip6_next_header(ip6_header_t *i)
Definition: ip6_packet.h:371
nat64_db_bib_entry_t * nat64_db_bib_entry_create(u32 thread_index, nat64_db_t *db, ip6_address_t *in_addr, ip4_address_t *out_addr, u16 in_port, u16 out_port, u32 fib_index, u8 proto, u8 is_static)
Create new NAT64 BIB entry.
Definition: nat64_db.c:53
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1582
#define ip6_frag_hdr_offset(hdr)
Definition: ip6_packet.h:664
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:510
nat64_db_st_entry_t * nat64_db_st_entry_find(nat64_db_t *db, ip46_address_t *l_addr, ip46_address_t *r_addr, u16 l_port, u16 r_port, u8 proto, u32 fib_index, u8 is_ip6)
Find NAT64 session table entry.
Definition: nat64_db.c:557
nat64_in2out_handoff_error_t
nat64_in2out_error_t
Definition: nat64_in2out.c:56
void nat64_db_st_walk(nat64_db_t *db, u8 proto, nat64_db_st_walk_fn_t fn, void *ctx)
Walk NAT64 session table.
Definition: nat64_db.c:325
static int nat64_in2out_icmp_hairpinning(vlib_main_t *vm, vlib_buffer_t *b, ip6_header_t *ip6, u32 thread_index)
Definition: nat64_in2out.c:786
ip_lookup_main_t lookup_main
Definition: ip6.h:181
u32 fq_in2out_index
Worker handoff.
Definition: nat64.h:86
ip_dscp_t tos
Definition: ip4_packet.h:96
static u8 nat64_not_translate(u32 sw_if_index, ip6_address_t ip6_addr)
Definition: nat64_in2out.c:87
static ip_csum_t ip_csum_sub_even(ip_csum_t c, ip_csum_t x)
Definition: ip_packet.h:272
nat64_db_bib_entry_t * nat64_db_bib_entry_by_index(nat64_db_t *db, u8 proto, u32 bibe_index)
Get BIB entry by index and protocol.
Definition: nat64_db.c:302
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:248
u32 bib_entries_num
Definition: nat64_db.h:73
IPv6 to IPv4 translation.
static uword ip6_address_is_equal(const ip6_address_t *a, const ip6_address_t *b)
Definition: ip6_packet.h:167
ip4_address_t addr
Definition: nat.h:296
NAT64 global declarations.
u32 ip_version_traffic_class_and_flow_label
Definition: ip6_packet.h:297
u16 payload_length
Definition: ip6_packet.h:301
vl_api_address_t ip
Definition: l2.api:501
static char * nat64_in2out_handoff_error_strings[]
static int icmp6_to_icmp(vlib_main_t *vm, vlib_buffer_t *p, ip6_to_ip4_icmp_set_fn_t fn, void *ctx, ip6_to_ip4_icmp_set_fn_t inner_fn, void *inner_ctx)
Translate ICMP6 packet to ICMP4.
Definition: ip6_to_ip4.h:350
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1583
#define foreach_ip_interface_address(lm, a, sw_if_index, loop, body)
Definition: ip_interface.h:57
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:297
static_always_inline u32 vlib_buffer_enqueue_to_thread(vlib_main_t *vm, u32 frame_queue_index, u32 *buffer_indices, u16 *thread_indices, u32 n_packets, int drop_on_congestion)
Definition: buffer_node.h:494
static uword nat64_in2out_node_fn_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u8 is_slow_path)
#define vnet_buffer(b)
Definition: buffer.h:417
static_always_inline int is_hairpinning(ip6_address_t *dst_addr)
Check whether is a hairpinning.
Definition: nat64_in2out.c:117
#define vec_foreach(var, vec)
Vector iterator.
#define IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS
Definition: ip4_packet.h:149
u16 flags
Copy of main node flags.
Definition: node.h:500
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:577
#define u16_net_add(u, val)
Definition: ip_types.h:44
static_always_inline int ip6_parse(vlib_main_t *vm, vlib_buffer_t *b, const ip6_header_t *ip6, u32 buff_len, u8 *l4_protocol, u16 *l4_offset, u16 *frag_hdr_offset)
Parse some useful information from IPv6 header.
Definition: ip6_to_ip4.h:65
u8 ip_version_and_header_length
Definition: ip4_packet.h:93
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:280
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:301
#define foreach_nat64_in2out_error
Definition: nat64_in2out.c:50
struct nat64_in2out_set_ctx_t_ nat64_in2out_set_ctx_t
static ip_csum_t ip_incremental_checksum(ip_csum_t sum, void *_data, uword n_bytes)
Definition: ip_packet.h:318
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
static u16 ip4_header_checksum(ip4_header_t *i)
Definition: ip4_packet.h:314
static u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:300
struct nat64_in2out_frag_set_ctx_t_ nat64_in2out_frag_set_ctx_t
vl_api_interface_index_t sw_if_index
Definition: wireguard.api:33
Definition: defs.h:46
static ip_csum_t ip_csum_add_even(ip_csum_t c, ip_csum_t x)
Definition: ip_packet.h:255
ip6_address_t dst_address
Definition: ip6_packet.h:310
vlib_simple_counter_main_t total_bibs
Definition: nat64.h:110