FD.io VPP  v19.01.3-6-g70449b9b9
Vector Packet Processing
nat64_in2out.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /**
16  * @file
17  * @brief NAT64 IPv6 to IPv4 translation (inside to outside network)
18  */
19 
20 #include <nat/nat64.h>
21 #include <nat/nat_reass.h>
22 #include <nat/nat_inlines.h>
23 #include <vnet/ip/ip6_to_ip4.h>
24 #include <vnet/fib/fib_table.h>
25 
26 typedef struct
27 {
32 
33 static u8 *
34 format_nat64_in2out_trace (u8 * s, va_list * args)
35 {
36  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
37  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
38  nat64_in2out_trace_t *t = va_arg (*args, nat64_in2out_trace_t *);
39  char *tag;
40 
41  tag = t->is_slow_path ? "NAT64-in2out-slowpath" : "NAT64-in2out";
42 
43  s =
44  format (s, "%s: sw_if_index %d, next index %d", tag, t->sw_if_index,
45  t->next_index);
46 
47  return s;
48 }
49 
50 typedef struct
51 {
56 
57 static u8 *
58 format_nat64_in2out_reass_trace (u8 * s, va_list * args)
59 {
60  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
61  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
63  va_arg (*args, nat64_in2out_reass_trace_t *);
64 
65  s =
66  format (s, "NAT64-in2out-reass: sw_if_index %d, next index %d, status %s",
67  t->sw_if_index, t->next_index,
68  t->cached ? "cached" : "translated");
69 
70  return s;
71 }
72 
77 
78 #define foreach_nat64_in2out_error \
79 _(UNSUPPORTED_PROTOCOL, "unsupported protocol") \
80 _(IN2OUT_PACKETS, "good in2out packets processed") \
81 _(NO_TRANSLATION, "no translation") \
82 _(UNKNOWN, "unknown") \
83 _(DROP_FRAGMENT, "drop fragment") \
84 _(MAX_REASS, "maximum reassemblies exceeded") \
85 _(MAX_FRAG, "maximum fragments per reassembly exceeded") \
86 _(TCP_PACKETS, "TCP packets") \
87 _(UDP_PACKETS, "UDP packets") \
88 _(ICMP_PACKETS, "ICMP packets") \
89 _(OTHER_PACKETS, "other protocol packets") \
90 _(FRAGMENTS, "fragments") \
91 _(CACHED_FRAGMENTS, "cached fragments") \
92 _(PROCESSED_FRAGMENTS, "processed fragments")
93 
94 
95 typedef enum
96 {
97 #define _(sym,str) NAT64_IN2OUT_ERROR_##sym,
99 #undef _
102 
103 static char *nat64_in2out_error_strings[] = {
104 #define _(sym,string) string,
106 #undef _
107 };
108 
109 typedef enum
110 {
118 
120 {
125 
126 static inline u8
128 {
130  ip6_main_t *im6 = &ip6_main;
131  ip_lookup_main_t *lm6 = &im6->lookup_main;
132  ip_interface_address_t *ia = 0;
133 
134  /* *INDENT-OFF* */
135  foreach_ip_interface_address (lm6, ia, sw_if_index, 0,
136  ({
137  addr = ip_interface_address_get_address (lm6, ia);
138  if (0 == ip6_address_compare (addr, &ip6_addr))
139  return 1;
140  }));
141  /* *INDENT-ON* */
142 
143  return 0;
144 }
145 
146 /**
147  * @brief Check whether is a hairpinning.
148  *
149  * If the destination IP address of the packet is an IPv4 address assigned to
150  * the NAT64 itself, then the packet is a hairpin packet.
151  *
152  * param dst_addr Destination address of the packet.
153  *
154  * @returns 1 if hairpinning, otherwise 0.
155  */
158 {
159  nat64_main_t *nm = &nat64_main;
160  int i;
161 
162  for (i = 0; i < vec_len (nm->addr_pool); i++)
163  {
164  if (nm->addr_pool[i].addr.as_u32 == dst_addr->as_u32[3])
165  return 1;
166  }
167 
168  return 0;
169 }
170 
171 static int
173  void *arg)
174 {
175  nat64_main_t *nm = &nat64_main;
177  nat64_db_bib_entry_t *bibe;
178  nat64_db_st_entry_t *ste;
179  ip46_address_t saddr, daddr;
180  u32 sw_if_index, fib_index;
181  udp_header_t *udp = ip6_next_header (ip6);
182  u8 proto = ip6->protocol;
183  u16 sport = udp->src_port;
184  u16 dport = udp->dst_port;
185  nat64_db_t *db = &nm->db[ctx->thread_index];
186 
187  sw_if_index = vnet_buffer (ctx->b)->sw_if_index[VLIB_RX];
188  fib_index =
190 
191  saddr.as_u64[0] = ip6->src_address.as_u64[0];
192  saddr.as_u64[1] = ip6->src_address.as_u64[1];
193  daddr.as_u64[0] = ip6->dst_address.as_u64[0];
194  daddr.as_u64[1] = ip6->dst_address.as_u64[1];
195 
196  ste =
197  nat64_db_st_entry_find (db, &saddr, &daddr, sport, dport, proto,
198  fib_index, 1);
199 
200  if (ste)
201  {
202  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
203  if (!bibe)
204  return -1;
205  }
206  else
207  {
208  bibe = nat64_db_bib_entry_find (db, &saddr, sport, proto, fib_index, 1);
209 
210  if (!bibe)
211  {
212  u16 out_port;
213  ip4_address_t out_addr;
215  (fib_index, ip_proto_to_snat_proto (proto), &out_addr,
216  &out_port, ctx->thread_index))
217  return -1;
218 
219  bibe =
220  nat64_db_bib_entry_create (db, &ip6->src_address, &out_addr,
221  sport, out_port, fib_index, proto, 0);
222  if (!bibe)
223  return -1;
224 
226  db->bib.bib_entries_num);
227  }
228 
229  nat64_extract_ip4 (&ip6->dst_address, &daddr.ip4, fib_index);
230  ste =
231  nat64_db_st_entry_create (db, bibe, &ip6->dst_address,
232  &daddr.ip4, dport);
233  if (!ste)
234  return -1;
235 
237  db->st.st_entries_num);
238  }
239 
240  ip4->src_address.as_u32 = bibe->out_addr.as_u32;
241  udp->src_port = bibe->out_port;
242 
243  ip4->dst_address.as_u32 = ste->out_r_addr.as_u32;
244 
245  if (proto == IP_PROTOCOL_TCP)
246  {
247  u16 *checksum;
248  ip_csum_t csum;
249  tcp_header_t *tcp = ip6_next_header (ip6);
250 
251  nat64_tcp_session_set_state (ste, tcp, 1);
252  checksum = &tcp->checksum;
253  csum = ip_csum_sub_even (*checksum, sport);
254  csum = ip_csum_add_even (csum, udp->src_port);
255  mss_clamping (nm->sm, tcp, &csum);
256  *checksum = ip_csum_fold (csum);
257  }
258 
259  nat64_session_reset_timeout (ste, ctx->vm);
260 
261  return 0;
262 }
263 
264 static int
266 {
267  nat64_main_t *nm = &nat64_main;
269  nat64_db_bib_entry_t *bibe;
270  nat64_db_st_entry_t *ste;
271  ip46_address_t saddr, daddr;
272  u32 sw_if_index, fib_index;
273  icmp46_header_t *icmp = ip6_next_header (ip6);
274  nat64_db_t *db = &nm->db[ctx->thread_index];
275 
276  sw_if_index = vnet_buffer (ctx->b)->sw_if_index[VLIB_RX];
277  fib_index =
279 
280  saddr.as_u64[0] = ip6->src_address.as_u64[0];
281  saddr.as_u64[1] = ip6->src_address.as_u64[1];
282  daddr.as_u64[0] = ip6->dst_address.as_u64[0];
283  daddr.as_u64[1] = ip6->dst_address.as_u64[1];
284 
285  if (icmp->type == ICMP4_echo_request || icmp->type == ICMP4_echo_reply)
286  {
287  u16 in_id = ((u16 *) (icmp))[2];
288  ste =
289  nat64_db_st_entry_find (db, &saddr, &daddr, in_id, 0,
290  IP_PROTOCOL_ICMP, fib_index, 1);
291 
292  if (ste)
293  {
294  bibe =
295  nat64_db_bib_entry_by_index (db, IP_PROTOCOL_ICMP,
296  ste->bibe_index);
297  if (!bibe)
298  return -1;
299  }
300  else
301  {
302  bibe =
303  nat64_db_bib_entry_find (db, &saddr, in_id,
304  IP_PROTOCOL_ICMP, fib_index, 1);
305 
306  if (!bibe)
307  {
308  u16 out_id;
309  ip4_address_t out_addr;
311  (fib_index, SNAT_PROTOCOL_ICMP, &out_addr, &out_id,
312  ctx->thread_index))
313  return -1;
314 
315  bibe =
317  &out_addr, in_id, out_id,
318  fib_index, IP_PROTOCOL_ICMP, 0);
319  if (!bibe)
320  return -1;
321 
323  db->bib.bib_entries_num);
324  }
325 
326  nat64_extract_ip4 (&ip6->dst_address, &daddr.ip4, fib_index);
327  ste =
328  nat64_db_st_entry_create (db, bibe, &ip6->dst_address,
329  &daddr.ip4, 0);
330  if (!ste)
331  return -1;
332 
334  db->st.st_entries_num);
335  }
336 
337  nat64_session_reset_timeout (ste, ctx->vm);
338 
339  ip4->src_address.as_u32 = bibe->out_addr.as_u32;
340  ((u16 *) (icmp))[2] = bibe->out_port;
341 
342  ip4->dst_address.as_u32 = ste->out_r_addr.as_u32;
343  }
344  else
345  {
346  if (!vec_len (nm->addr_pool))
347  return -1;
348 
349  ip4->src_address.as_u32 = nm->addr_pool[0].addr.as_u32;
350  nat64_extract_ip4 (&ip6->dst_address, &ip4->dst_address, fib_index);
351  }
352 
353  return 0;
354 }
355 
356 static int
358  void *arg)
359 {
360  nat64_main_t *nm = &nat64_main;
362  nat64_db_st_entry_t *ste;
363  nat64_db_bib_entry_t *bibe;
364  ip46_address_t saddr, daddr;
365  u32 sw_if_index, fib_index;
366  u8 proto = ip6->protocol;
367  nat64_db_t *db = &nm->db[ctx->thread_index];
368 
369  sw_if_index = vnet_buffer (ctx->b)->sw_if_index[VLIB_RX];
370  fib_index =
372 
373  saddr.as_u64[0] = ip6->src_address.as_u64[0];
374  saddr.as_u64[1] = ip6->src_address.as_u64[1];
375  daddr.as_u64[0] = ip6->dst_address.as_u64[0];
376  daddr.as_u64[1] = ip6->dst_address.as_u64[1];
377 
378  if (proto == IP_PROTOCOL_ICMP6)
379  {
380  icmp46_header_t *icmp = ip6_next_header (ip6);
381  u16 in_id = ((u16 *) (icmp))[2];
382  proto = IP_PROTOCOL_ICMP;
383 
384  if (!
385  (icmp->type == ICMP4_echo_request
386  || icmp->type == ICMP4_echo_reply))
387  return -1;
388 
389  ste =
390  nat64_db_st_entry_find (db, &daddr, &saddr, in_id, 0, proto,
391  fib_index, 1);
392  if (!ste)
393  return -1;
394 
395  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
396  if (!bibe)
397  return -1;
398 
399  ip4->dst_address.as_u32 = bibe->out_addr.as_u32;
400  ((u16 *) (icmp))[2] = bibe->out_port;
401  ip4->src_address.as_u32 = ste->out_r_addr.as_u32;
402  }
403  else
404  {
405  udp_header_t *udp = ip6_next_header (ip6);
406  tcp_header_t *tcp = ip6_next_header (ip6);
407  u16 *checksum;
408  ip_csum_t csum;
409 
410  u16 sport = udp->src_port;
411  u16 dport = udp->dst_port;
412 
413  ste =
414  nat64_db_st_entry_find (db, &daddr, &saddr, dport, sport, proto,
415  fib_index, 1);
416  if (!ste)
417  return -1;
418 
419  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
420  if (!bibe)
421  return -1;
422 
423  ip4->dst_address.as_u32 = bibe->out_addr.as_u32;
424  udp->dst_port = bibe->out_port;
425  ip4->src_address.as_u32 = ste->out_r_addr.as_u32;
426 
427  if (proto == IP_PROTOCOL_TCP)
428  checksum = &tcp->checksum;
429  else
430  checksum = &udp->checksum;
431  csum = ip_csum_sub_even (*checksum, dport);
432  csum = ip_csum_add_even (csum, udp->dst_port);
433  *checksum = ip_csum_fold (csum);
434  }
435 
436  return 0;
437 }
438 
440 {
448 
449 static int
450 unk_proto_st_walk (nat64_db_st_entry_t * ste, void *arg)
451 {
452  nat64_main_t *nm = &nat64_main;
454  nat64_db_bib_entry_t *bibe;
455  ip46_address_t saddr, daddr;
456  nat64_db_t *db = &nm->db[ctx->thread_index];
457 
458  if (ip46_address_is_equal (&ste->in_r_addr, &ctx->dst_addr))
459  {
460  bibe = nat64_db_bib_entry_by_index (db, ste->proto, ste->bibe_index);
461  if (!bibe)
462  return -1;
463 
464  if (ip46_address_is_equal (&bibe->in_addr, &ctx->src_addr)
465  && bibe->fib_index == ctx->fib_index)
466  {
467  clib_memset (&saddr, 0, sizeof (saddr));
468  saddr.ip4.as_u32 = bibe->out_addr.as_u32;
469  clib_memset (&daddr, 0, sizeof (daddr));
470  nat64_extract_ip4 (&ctx->dst_addr, &daddr.ip4, ctx->fib_index);
471 
473  (db, &daddr, &saddr, 0, 0, ctx->proto, ctx->fib_index, 0))
474  return -1;
475 
476  ctx->out_addr.as_u32 = bibe->out_addr.as_u32;
477  return 1;
478  }
479  }
480 
481  return 0;
482 }
483 
484 static int
486  void *arg)
487 {
488  nat64_main_t *nm = &nat64_main;
489  nat64_in2out_set_ctx_t *s_ctx = arg;
490  nat64_db_bib_entry_t *bibe;
491  nat64_db_st_entry_t *ste;
492  ip46_address_t saddr, daddr, addr;
493  u32 sw_if_index, fib_index;
494  u8 proto = ip6->protocol;
495  int i;
496  nat64_db_t *db = &nm->db[s_ctx->thread_index];
497 
498  sw_if_index = vnet_buffer (s_ctx->b)->sw_if_index[VLIB_RX];
499  fib_index =
501 
502  saddr.as_u64[0] = ip6->src_address.as_u64[0];
503  saddr.as_u64[1] = ip6->src_address.as_u64[1];
504  daddr.as_u64[0] = ip6->dst_address.as_u64[0];
505  daddr.as_u64[1] = ip6->dst_address.as_u64[1];
506 
507  ste =
508  nat64_db_st_entry_find (db, &saddr, &daddr, 0, 0, proto, fib_index, 1);
509 
510  if (ste)
511  {
512  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
513  if (!bibe)
514  return -1;
515  }
516  else
517  {
518  bibe = nat64_db_bib_entry_find (db, &saddr, 0, proto, fib_index, 1);
519 
520  if (!bibe)
521  {
522  /* Choose same out address as for TCP/UDP session to same dst */
524  .src_addr.as_u64[0] = ip6->src_address.as_u64[0],
525  .src_addr.as_u64[1] = ip6->src_address.as_u64[1],
526  .dst_addr.as_u64[0] = ip6->dst_address.as_u64[0],
527  .dst_addr.as_u64[1] = ip6->dst_address.as_u64[1],
528  .out_addr.as_u32 = 0,
529  .fib_index = fib_index,
530  .proto = proto,
531  .thread_index = s_ctx->thread_index,
532  };
533 
534  nat64_db_st_walk (db, IP_PROTOCOL_TCP, unk_proto_st_walk, &ctx);
535 
536  if (!ctx.out_addr.as_u32)
537  nat64_db_st_walk (db, IP_PROTOCOL_UDP, unk_proto_st_walk, &ctx);
538 
539  /* Verify if out address is not already in use for protocol */
540  clib_memset (&addr, 0, sizeof (addr));
541  addr.ip4.as_u32 = ctx.out_addr.as_u32;
542  if (nat64_db_bib_entry_find (db, &addr, 0, proto, 0, 0))
543  ctx.out_addr.as_u32 = 0;
544 
545  if (!ctx.out_addr.as_u32)
546  {
547  for (i = 0; i < vec_len (nm->addr_pool); i++)
548  {
549  addr.ip4.as_u32 = nm->addr_pool[i].addr.as_u32;
550  if (!nat64_db_bib_entry_find (db, &addr, 0, proto, 0, 0))
551  break;
552  }
553  }
554 
555  if (!ctx.out_addr.as_u32)
556  return -1;
557 
558  bibe =
560  &ctx.out_addr, 0, 0, fib_index, proto,
561  0);
562  if (!bibe)
563  return -1;
564 
566  db->bib.bib_entries_num);
567  }
568 
569  nat64_extract_ip4 (&ip6->dst_address, &daddr.ip4, fib_index);
570  ste =
571  nat64_db_st_entry_create (db, bibe, &ip6->dst_address, &daddr.ip4, 0);
572  if (!ste)
573  return -1;
574 
576  db->st.st_entries_num);
577  }
578 
579  nat64_session_reset_timeout (ste, s_ctx->vm);
580 
581  ip4->src_address.as_u32 = bibe->out_addr.as_u32;
582  ip4->dst_address.as_u32 = ste->out_r_addr.as_u32;
583 
584  return 0;
585 }
586 
587 
588 
589 static int
592 {
593  nat64_main_t *nm = &nat64_main;
594  nat64_db_bib_entry_t *bibe;
595  nat64_db_st_entry_t *ste;
596  ip46_address_t saddr, daddr;
597  u32 sw_if_index, fib_index;
598  udp_header_t *udp = ip6_next_header (ip6);
599  tcp_header_t *tcp = ip6_next_header (ip6);
600  u8 proto = ip6->protocol;
601  u16 sport = udp->src_port;
602  u16 dport = udp->dst_port;
603  u16 *checksum;
604  ip_csum_t csum;
605  nat64_db_t *db = &nm->db[thread_index];
606 
607  sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
608  fib_index =
610 
611  saddr.as_u64[0] = ip6->src_address.as_u64[0];
612  saddr.as_u64[1] = ip6->src_address.as_u64[1];
613  daddr.as_u64[0] = ip6->dst_address.as_u64[0];
614  daddr.as_u64[1] = ip6->dst_address.as_u64[1];
615 
616  if (proto == IP_PROTOCOL_UDP)
617  checksum = &udp->checksum;
618  else
619  checksum = &tcp->checksum;
620 
621  csum = ip_csum_sub_even (*checksum, ip6->src_address.as_u64[0]);
622  csum = ip_csum_sub_even (csum, ip6->src_address.as_u64[1]);
623  csum = ip_csum_sub_even (csum, ip6->dst_address.as_u64[0]);
624  csum = ip_csum_sub_even (csum, ip6->dst_address.as_u64[1]);
625  csum = ip_csum_sub_even (csum, sport);
626  csum = ip_csum_sub_even (csum, dport);
627 
628  ste =
629  nat64_db_st_entry_find (db, &saddr, &daddr, sport, dport, proto,
630  fib_index, 1);
631 
632  if (ste)
633  {
634  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
635  if (!bibe)
636  return -1;
637  }
638  else
639  {
640  bibe = nat64_db_bib_entry_find (db, &saddr, sport, proto, fib_index, 1);
641 
642  if (!bibe)
643  {
644  u16 out_port;
645  ip4_address_t out_addr;
647  (fib_index, ip_proto_to_snat_proto (proto), &out_addr,
648  &out_port, thread_index))
649  return -1;
650 
651  bibe =
652  nat64_db_bib_entry_create (db, &ip6->src_address, &out_addr,
653  sport, out_port, fib_index, proto, 0);
654  if (!bibe)
655  return -1;
656 
657  vlib_set_simple_counter (&nm->total_bibs, thread_index, 0,
658  db->bib.bib_entries_num);
659  }
660 
661  nat64_extract_ip4 (&ip6->dst_address, &daddr.ip4, fib_index);
662  ste =
663  nat64_db_st_entry_create (db, bibe, &ip6->dst_address,
664  &daddr.ip4, dport);
665  if (!ste)
666  return -1;
667 
668  vlib_set_simple_counter (&nm->total_sessions, thread_index, 0,
669  db->st.st_entries_num);
670  }
671 
672  if (proto == IP_PROTOCOL_TCP)
673  nat64_tcp_session_set_state (ste, tcp, 1);
674 
675  nat64_session_reset_timeout (ste, vm);
676 
677  sport = udp->src_port = bibe->out_port;
678  nat64_compose_ip6 (&ip6->src_address, &bibe->out_addr, fib_index);
679 
680  clib_memset (&daddr, 0, sizeof (daddr));
681  daddr.ip4.as_u32 = ste->out_r_addr.as_u32;
682 
683  bibe = 0;
684  /* *INDENT-OFF* */
685  vec_foreach (db, nm->db)
686  {
687  bibe = nat64_db_bib_entry_find (db, &daddr, dport, proto, 0, 0);
688 
689  if (bibe)
690  break;
691  }
692  /* *INDENT-ON* */
693 
694  if (!bibe)
695  return -1;
696 
697  ip6->dst_address.as_u64[0] = bibe->in_addr.as_u64[0];
698  ip6->dst_address.as_u64[1] = bibe->in_addr.as_u64[1];
699  udp->dst_port = bibe->in_port;
700 
701  csum = ip_csum_add_even (csum, ip6->src_address.as_u64[0]);
702  csum = ip_csum_add_even (csum, ip6->src_address.as_u64[1]);
703  csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[0]);
704  csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[1]);
705  csum = ip_csum_add_even (csum, udp->src_port);
706  csum = ip_csum_add_even (csum, udp->dst_port);
707  *checksum = ip_csum_fold (csum);
708 
709  return 0;
710 }
711 
712 static int
715 {
716  nat64_main_t *nm = &nat64_main;
717  nat64_db_bib_entry_t *bibe;
718  nat64_db_st_entry_t *ste;
719  icmp46_header_t *icmp = ip6_next_header (ip6);
720  ip6_header_t *inner_ip6;
721  ip46_address_t saddr, daddr;
722  u32 sw_if_index, fib_index;
723  u8 proto;
724  udp_header_t *udp;
725  tcp_header_t *tcp;
726  u16 *checksum, sport, dport;
727  ip_csum_t csum;
728  nat64_db_t *db = &nm->db[thread_index];
729 
730  if (icmp->type == ICMP6_echo_request || icmp->type == ICMP6_echo_reply)
731  return -1;
732 
733  inner_ip6 = (ip6_header_t *) u8_ptr_add (icmp, 8);
734 
735  proto = inner_ip6->protocol;
736 
737  if (proto == IP_PROTOCOL_ICMP6)
738  return -1;
739 
740  sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
741  fib_index =
743 
744  saddr.as_u64[0] = inner_ip6->src_address.as_u64[0];
745  saddr.as_u64[1] = inner_ip6->src_address.as_u64[1];
746  daddr.as_u64[0] = inner_ip6->dst_address.as_u64[0];
747  daddr.as_u64[1] = inner_ip6->dst_address.as_u64[1];
748 
749  udp = ip6_next_header (inner_ip6);
750  tcp = ip6_next_header (inner_ip6);
751 
752  sport = udp->src_port;
753  dport = udp->dst_port;
754 
755  if (proto == IP_PROTOCOL_UDP)
756  checksum = &udp->checksum;
757  else
758  checksum = &tcp->checksum;
759 
760  csum = ip_csum_sub_even (*checksum, inner_ip6->src_address.as_u64[0]);
761  csum = ip_csum_sub_even (csum, inner_ip6->src_address.as_u64[1]);
762  csum = ip_csum_sub_even (csum, inner_ip6->dst_address.as_u64[0]);
763  csum = ip_csum_sub_even (csum, inner_ip6->dst_address.as_u64[1]);
764  csum = ip_csum_sub_even (csum, sport);
765  csum = ip_csum_sub_even (csum, dport);
766 
767  ste =
768  nat64_db_st_entry_find (db, &daddr, &saddr, dport, sport, proto,
769  fib_index, 1);
770  if (!ste)
771  return -1;
772 
773  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
774  if (!bibe)
775  return -1;
776 
777  dport = udp->dst_port = bibe->out_port;
778  nat64_compose_ip6 (&inner_ip6->dst_address, &bibe->out_addr, fib_index);
779 
780  clib_memset (&saddr, 0, sizeof (saddr));
781  clib_memset (&daddr, 0, sizeof (daddr));
782  saddr.ip4.as_u32 = ste->out_r_addr.as_u32;
783  daddr.ip4.as_u32 = bibe->out_addr.as_u32;
784 
785  ste = 0;
786  /* *INDENT-OFF* */
787  vec_foreach (db, nm->db)
788  {
789  ste = nat64_db_st_entry_find (db, &saddr, &daddr, sport, dport, proto,
790  0, 0);
791 
792  if (ste)
793  break;
794  }
795  /* *INDENT-ON* */
796 
797  if (!ste)
798  return -1;
799 
800  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
801  if (!bibe)
802  return -1;
803 
804  inner_ip6->src_address.as_u64[0] = bibe->in_addr.as_u64[0];
805  inner_ip6->src_address.as_u64[1] = bibe->in_addr.as_u64[1];
806  udp->src_port = bibe->in_port;
807 
808  csum = ip_csum_add_even (csum, inner_ip6->src_address.as_u64[0]);
809  csum = ip_csum_add_even (csum, inner_ip6->src_address.as_u64[1]);
810  csum = ip_csum_add_even (csum, inner_ip6->dst_address.as_u64[0]);
811  csum = ip_csum_add_even (csum, inner_ip6->dst_address.as_u64[1]);
812  csum = ip_csum_add_even (csum, udp->src_port);
813  csum = ip_csum_add_even (csum, udp->dst_port);
814  *checksum = ip_csum_fold (csum);
815 
816  if (!vec_len (nm->addr_pool))
817  return -1;
818 
819  nat64_compose_ip6 (&ip6->src_address, &nm->addr_pool[0].addr, fib_index);
820  ip6->dst_address.as_u64[0] = inner_ip6->src_address.as_u64[0];
821  ip6->dst_address.as_u64[1] = inner_ip6->src_address.as_u64[1];
822 
823  icmp->checksum = 0;
824  csum = ip_csum_with_carry (0, ip6->payload_length);
825  csum = ip_csum_with_carry (csum, clib_host_to_net_u16 (ip6->protocol));
826  csum = ip_csum_with_carry (csum, ip6->src_address.as_u64[0]);
827  csum = ip_csum_with_carry (csum, ip6->src_address.as_u64[1]);
828  csum = ip_csum_with_carry (csum, ip6->dst_address.as_u64[0]);
829  csum = ip_csum_with_carry (csum, ip6->dst_address.as_u64[1]);
830  csum =
831  ip_incremental_checksum (csum, icmp,
832  clib_net_to_host_u16 (ip6->payload_length));
833  icmp->checksum = ~ip_csum_fold (csum);
834 
835  return 0;
836 }
837 
838 static int
841 {
842  nat64_main_t *nm = &nat64_main;
843  nat64_db_bib_entry_t *bibe;
844  nat64_db_st_entry_t *ste;
845  ip46_address_t saddr, daddr, addr;
846  u32 sw_if_index, fib_index;
847  u8 proto = ip6->protocol;
848  int i;
849  nat64_db_t *db = &nm->db[thread_index];
850 
851  sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
852  fib_index =
854 
855  saddr.as_u64[0] = ip6->src_address.as_u64[0];
856  saddr.as_u64[1] = ip6->src_address.as_u64[1];
857  daddr.as_u64[0] = ip6->dst_address.as_u64[0];
858  daddr.as_u64[1] = ip6->dst_address.as_u64[1];
859 
860  ste =
861  nat64_db_st_entry_find (db, &saddr, &daddr, 0, 0, proto, fib_index, 1);
862 
863  if (ste)
864  {
865  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
866  if (!bibe)
867  return -1;
868  }
869  else
870  {
871  bibe = nat64_db_bib_entry_find (db, &saddr, 0, proto, fib_index, 1);
872 
873  if (!bibe)
874  {
875  /* Choose same out address as for TCP/UDP session to same dst */
877  .src_addr.as_u64[0] = ip6->src_address.as_u64[0],
878  .src_addr.as_u64[1] = ip6->src_address.as_u64[1],
879  .dst_addr.as_u64[0] = ip6->dst_address.as_u64[0],
880  .dst_addr.as_u64[1] = ip6->dst_address.as_u64[1],
881  .out_addr.as_u32 = 0,
882  .fib_index = fib_index,
883  .proto = proto,
884  .thread_index = thread_index,
885  };
886 
887  nat64_db_st_walk (db, IP_PROTOCOL_TCP, unk_proto_st_walk, &ctx);
888 
889  if (!ctx.out_addr.as_u32)
890  nat64_db_st_walk (db, IP_PROTOCOL_UDP, unk_proto_st_walk, &ctx);
891 
892  /* Verify if out address is not already in use for protocol */
893  clib_memset (&addr, 0, sizeof (addr));
894  addr.ip4.as_u32 = ctx.out_addr.as_u32;
895  if (nat64_db_bib_entry_find (db, &addr, 0, proto, 0, 0))
896  ctx.out_addr.as_u32 = 0;
897 
898  if (!ctx.out_addr.as_u32)
899  {
900  for (i = 0; i < vec_len (nm->addr_pool); i++)
901  {
902  addr.ip4.as_u32 = nm->addr_pool[i].addr.as_u32;
903  if (!nat64_db_bib_entry_find (db, &addr, 0, proto, 0, 0))
904  break;
905  }
906  }
907 
908  if (!ctx.out_addr.as_u32)
909  return -1;
910 
911  bibe =
913  &ctx.out_addr, 0, 0, fib_index, proto,
914  0);
915  if (!bibe)
916  return -1;
917 
918  vlib_set_simple_counter (&nm->total_bibs, thread_index, 0,
919  db->bib.bib_entries_num);
920  }
921 
922  nat64_extract_ip4 (&ip6->dst_address, &daddr.ip4, fib_index);
923  ste =
924  nat64_db_st_entry_create (db, bibe, &ip6->dst_address, &daddr.ip4, 0);
925  if (!ste)
926  return -1;
927 
928  vlib_set_simple_counter (&nm->total_sessions, thread_index, 0,
929  db->st.st_entries_num);
930  }
931 
932  nat64_session_reset_timeout (ste, vm);
933 
934  nat64_compose_ip6 (&ip6->src_address, &bibe->out_addr, fib_index);
935 
936  clib_memset (&daddr, 0, sizeof (daddr));
937  daddr.ip4.as_u32 = ste->out_r_addr.as_u32;
938 
939  bibe = 0;
940  /* *INDENT-OFF* */
941  vec_foreach (db, nm->db)
942  {
943  bibe = nat64_db_bib_entry_find (db, &daddr, 0, proto, 0, 0);
944 
945  if (bibe)
946  break;
947  }
948  /* *INDENT-ON* */
949 
950  if (!bibe)
951  return -1;
952 
953  ip6->dst_address.as_u64[0] = bibe->in_addr.as_u64[0];
954  ip6->dst_address.as_u64[1] = bibe->in_addr.as_u64[1];
955 
956  return 0;
957 }
958 
959 static inline uword
961  vlib_frame_t * frame, u8 is_slow_path)
962 {
963  u32 n_left_from, *from, *to_next;
964  nat64_in2out_next_t next_index;
965  u32 pkts_processed = 0;
966  u32 stats_node_index;
968  u32 tcp_packets = 0, udp_packets = 0, icmp_packets = 0, other_packets =
969  0, fragments = 0;
970 
971  stats_node_index =
972  is_slow_path ? nat64_in2out_slowpath_node.index : nat64_in2out_node.index;
973 
974  from = vlib_frame_vector_args (frame);
975  n_left_from = frame->n_vectors;
976  next_index = node->cached_next_index;
977 
978  while (n_left_from > 0)
979  {
980  u32 n_left_to_next;
981 
982  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
983 
984  while (n_left_from > 0 && n_left_to_next > 0)
985  {
986  u32 bi0;
987  vlib_buffer_t *b0;
988  u32 next0;
989  ip6_header_t *ip60;
990  u16 l4_offset0, frag_offset0;
991  u8 l4_protocol0;
992  u32 proto0;
994  u32 sw_if_index0;
995 
996  /* speculatively enqueue b0 to the current next frame */
997  bi0 = from[0];
998  to_next[0] = bi0;
999  from += 1;
1000  to_next += 1;
1001  n_left_from -= 1;
1002  n_left_to_next -= 1;
1003 
1004  b0 = vlib_get_buffer (vm, bi0);
1005  ip60 = vlib_buffer_get_current (b0);
1006 
1007  ctx0.b = b0;
1008  ctx0.vm = vm;
1009  ctx0.thread_index = thread_index;
1010 
1012 
1013  if (PREDICT_FALSE
1014  (ip6_parse
1015  (ip60, b0->current_length, &l4_protocol0, &l4_offset0,
1016  &frag_offset0)))
1017  {
1018  next0 = NAT64_IN2OUT_NEXT_DROP;
1019  b0->error = node->errors[NAT64_IN2OUT_ERROR_UNKNOWN];
1020  goto trace0;
1021  }
1022 
1023  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1024 
1025  if (nat64_not_translate (sw_if_index0, ip60->dst_address))
1026  {
1028  goto trace0;
1029  }
1030 
1031  proto0 = ip_proto_to_snat_proto (l4_protocol0);
1032 
1033  if (is_slow_path)
1034  {
1035  if (PREDICT_TRUE (proto0 == ~0))
1036  {
1037  other_packets++;
1038  if (is_hairpinning (&ip60->dst_address))
1039  {
1042  (vm, b0, ip60, thread_index))
1043  {
1044  next0 = NAT64_IN2OUT_NEXT_DROP;
1045  b0->error =
1046  node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1047  }
1048  goto trace0;
1049  }
1050 
1051  if (ip6_to_ip4 (b0, nat64_in2out_unk_proto_set_cb, &ctx0))
1052  {
1053  next0 = NAT64_IN2OUT_NEXT_DROP;
1054  b0->error =
1055  node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1056  goto trace0;
1057  }
1058  }
1059  goto trace0;
1060  }
1061  else
1062  {
1063  if (PREDICT_FALSE (proto0 == ~0))
1064  {
1066  goto trace0;
1067  }
1068  }
1069 
1070  if (PREDICT_FALSE
1071  (ip60->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION))
1072  {
1073  next0 = NAT64_IN2OUT_NEXT_REASS;
1074  fragments++;
1075  goto trace0;
1076  }
1077 
1078  if (proto0 == SNAT_PROTOCOL_ICMP)
1079  {
1080  icmp_packets++;
1081  if (is_hairpinning (&ip60->dst_address))
1082  {
1085  (vm, b0, ip60, thread_index))
1086  {
1087  next0 = NAT64_IN2OUT_NEXT_DROP;
1088  b0->error =
1089  node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1090  }
1091  goto trace0;
1092  }
1093 
1094  if (icmp6_to_icmp
1095  (b0, nat64_in2out_icmp_set_cb, &ctx0,
1097  {
1098  next0 = NAT64_IN2OUT_NEXT_DROP;
1099  b0->error = node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1100  goto trace0;
1101  }
1102  }
1103  else if (proto0 == SNAT_PROTOCOL_TCP || proto0 == SNAT_PROTOCOL_UDP)
1104  {
1105  if (proto0 == SNAT_PROTOCOL_TCP)
1106  tcp_packets++;
1107  else
1108  udp_packets++;
1109 
1110  if (is_hairpinning (&ip60->dst_address))
1111  {
1114  (vm, b0, ip60, thread_index))
1115  {
1116  next0 = NAT64_IN2OUT_NEXT_DROP;
1117  b0->error =
1118  node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1119  }
1120  goto trace0;
1121  }
1122 
1123  if (ip6_to_ip4_tcp_udp
1124  (b0, nat64_in2out_tcp_udp_set_cb, &ctx0, 0))
1125  {
1126  next0 = NAT64_IN2OUT_NEXT_DROP;
1127  b0->error = node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1128  goto trace0;
1129  }
1130  }
1131 
1132  trace0:
1134  && (b0->flags & VLIB_BUFFER_IS_TRACED)))
1135  {
1137  vlib_add_trace (vm, node, b0, sizeof (*t));
1138  t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1139  t->next_index = next0;
1140  t->is_slow_path = is_slow_path;
1141  }
1142 
1143  pkts_processed += next0 == NAT64_IN2OUT_NEXT_IP4_LOOKUP;
1144 
1145  /* verify speculative enqueue, maybe switch current next frame */
1146  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1147  n_left_to_next, bi0, next0);
1148  }
1149  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1150  }
1151  vlib_node_increment_counter (vm, stats_node_index,
1152  NAT64_IN2OUT_ERROR_IN2OUT_PACKETS,
1153  pkts_processed);
1154  vlib_node_increment_counter (vm, stats_node_index,
1155  NAT64_IN2OUT_ERROR_TCP_PACKETS, tcp_packets);
1156  vlib_node_increment_counter (vm, stats_node_index,
1157  NAT64_IN2OUT_ERROR_UDP_PACKETS, tcp_packets);
1158  vlib_node_increment_counter (vm, stats_node_index,
1159  NAT64_IN2OUT_ERROR_ICMP_PACKETS, icmp_packets);
1160  vlib_node_increment_counter (vm, stats_node_index,
1161  NAT64_IN2OUT_ERROR_OTHER_PACKETS,
1162  other_packets);
1163  vlib_node_increment_counter (vm, stats_node_index,
1164  NAT64_IN2OUT_ERROR_FRAGMENTS, fragments);
1165 
1166  return frame->n_vectors;
1167 }
1168 
1169 static uword
1171  vlib_frame_t * frame)
1172 {
1173  return nat64_in2out_node_fn_inline (vm, node, frame, 0);
1174 }
1175 
1176 /* *INDENT-OFF* */
1178  .function = nat64_in2out_node_fn,
1179  .name = "nat64-in2out",
1180  .vector_size = sizeof (u32),
1181  .format_trace = format_nat64_in2out_trace,
1182  .type = VLIB_NODE_TYPE_INTERNAL,
1183  .n_errors = ARRAY_LEN (nat64_in2out_error_strings),
1184  .error_strings = nat64_in2out_error_strings,
1185  .n_next_nodes = NAT64_IN2OUT_N_NEXT,
1186  /* edit / add dispositions here */
1187  .next_nodes = {
1188  [NAT64_IN2OUT_NEXT_DROP] = "error-drop",
1189  [NAT64_IN2OUT_NEXT_IP4_LOOKUP] = "ip4-lookup",
1190  [NAT64_IN2OUT_NEXT_IP6_LOOKUP] = "ip6-lookup",
1191  [NAT64_IN2OUT_NEXT_SLOWPATH] = "nat64-in2out-slowpath",
1192  [NAT64_IN2OUT_NEXT_REASS] = "nat64-in2out-reass",
1193  },
1194 };
1195 /* *INDENT-ON* */
1196 
1198 
1199 static uword
1201  vlib_frame_t * frame)
1202 {
1203  return nat64_in2out_node_fn_inline (vm, node, frame, 1);
1204 }
1205 
1206 /* *INDENT-OFF* */
1208  .function = nat64_in2out_slowpath_node_fn,
1209  .name = "nat64-in2out-slowpath",
1210  .vector_size = sizeof (u32),
1211  .format_trace = format_nat64_in2out_trace,
1212  .type = VLIB_NODE_TYPE_INTERNAL,
1213  .n_errors = ARRAY_LEN (nat64_in2out_error_strings),
1214  .error_strings = nat64_in2out_error_strings,
1215  .n_next_nodes = NAT64_IN2OUT_N_NEXT,
1216  /* edit / add dispositions here */
1217  .next_nodes = {
1218  [NAT64_IN2OUT_NEXT_DROP] = "error-drop",
1219  [NAT64_IN2OUT_NEXT_IP4_LOOKUP] = "ip4-lookup",
1220  [NAT64_IN2OUT_NEXT_IP6_LOOKUP] = "ip6-lookup",
1221  [NAT64_IN2OUT_NEXT_SLOWPATH] = "nat64-in2out-slowpath",
1222  [NAT64_IN2OUT_NEXT_REASS] = "nat64-in2out-reass",
1223  },
1224 };
1225 /* *INDENT-ON* */
1226 
1229 
1231 {
1239 
1240 static int
1242 {
1243  nat64_main_t *nm = &nat64_main;
1245  nat64_db_st_entry_t *ste;
1246  nat64_db_bib_entry_t *bibe;
1247  udp_header_t *udp;
1248  nat64_db_t *db = &nm->db[ctx->thread_index];
1249 
1250  ste = nat64_db_st_entry_by_index (db, ctx->proto, ctx->sess_index);
1251  if (!ste)
1252  return -1;
1253 
1254  bibe = nat64_db_bib_entry_by_index (db, ctx->proto, ste->bibe_index);
1255  if (!bibe)
1256  return -1;
1257 
1258  nat64_session_reset_timeout (ste, ctx->vm);
1259 
1260  if (ctx->first_frag)
1261  {
1262  udp = (udp_header_t *) u8_ptr_add (ip6, ctx->l4_offset);
1263 
1264  if (ctx->proto == IP_PROTOCOL_TCP)
1265  {
1266  u16 *checksum;
1267  ip_csum_t csum;
1268  tcp_header_t *tcp = (tcp_header_t *) udp;
1269 
1270  nat64_tcp_session_set_state (ste, tcp, 1);
1271  checksum = &tcp->checksum;
1272  csum = ip_csum_sub_even (*checksum, tcp->src_port);
1273  csum = ip_csum_sub_even (csum, ip6->src_address.as_u64[0]);
1274  csum = ip_csum_sub_even (csum, ip6->src_address.as_u64[1]);
1275  csum = ip_csum_sub_even (csum, ip6->dst_address.as_u64[0]);
1276  csum = ip_csum_sub_even (csum, ip6->dst_address.as_u64[1]);
1277  csum = ip_csum_add_even (csum, bibe->out_port);
1278  csum = ip_csum_add_even (csum, bibe->out_addr.as_u32);
1279  csum = ip_csum_add_even (csum, ste->out_r_addr.as_u32);
1280  *checksum = ip_csum_fold (csum);
1281  }
1282 
1283  udp->src_port = bibe->out_port;
1284  }
1285 
1286  ip4->src_address.as_u32 = bibe->out_addr.as_u32;
1287  ip4->dst_address.as_u32 = ste->out_r_addr.as_u32;
1288 
1289  return 0;
1290 }
1291 
1292 static int
1295 {
1296  nat64_main_t *nm = &nat64_main;
1297  nat64_db_st_entry_t *ste;
1298  nat64_db_bib_entry_t *bibe;
1299  udp_header_t *udp = (udp_header_t *) u8_ptr_add (ip6, ctx->l4_offset);
1300  tcp_header_t *tcp = (tcp_header_t *) udp;
1301  u16 sport = udp->src_port;
1302  u16 dport = udp->dst_port;
1303  u16 *checksum;
1304  ip_csum_t csum;
1305  ip46_address_t daddr;
1306  nat64_db_t *db = &nm->db[ctx->thread_index];
1307 
1308  if (ctx->first_frag)
1309  {
1310  if (ctx->proto == IP_PROTOCOL_UDP)
1311  checksum = &udp->checksum;
1312  else
1313  checksum = &tcp->checksum;
1314 
1315  csum = ip_csum_sub_even (*checksum, ip6->src_address.as_u64[0]);
1316  csum = ip_csum_sub_even (csum, ip6->src_address.as_u64[1]);
1317  csum = ip_csum_sub_even (csum, ip6->dst_address.as_u64[0]);
1318  csum = ip_csum_sub_even (csum, ip6->dst_address.as_u64[1]);
1319  csum = ip_csum_sub_even (csum, sport);
1320  csum = ip_csum_sub_even (csum, dport);
1321  }
1322 
1323  ste = nat64_db_st_entry_by_index (db, ctx->proto, ctx->sess_index);
1324  if (!ste)
1325  return -1;
1326 
1327  bibe = nat64_db_bib_entry_by_index (db, ctx->proto, ste->bibe_index);
1328  if (!bibe)
1329  return -1;
1330 
1331  if (ctx->proto == IP_PROTOCOL_TCP)
1332  nat64_tcp_session_set_state (ste, tcp, 1);
1333 
1334  nat64_session_reset_timeout (ste, ctx->vm);
1335 
1336  sport = bibe->out_port;
1337  dport = ste->r_port;
1338 
1339  nat64_compose_ip6 (&ip6->src_address, &bibe->out_addr, bibe->fib_index);
1340 
1341  clib_memset (&daddr, 0, sizeof (daddr));
1342  daddr.ip4.as_u32 = ste->out_r_addr.as_u32;
1343 
1344  bibe = 0;
1345  /* *INDENT-OFF* */
1346  vec_foreach (db, nm->db)
1347  {
1348  bibe = nat64_db_bib_entry_find (db, &daddr, dport, ctx->proto, 0, 0);
1349 
1350  if (bibe)
1351  break;
1352  }
1353  /* *INDENT-ON* */
1354 
1355  if (!bibe)
1356  return -1;
1357 
1358  ip6->dst_address.as_u64[0] = bibe->in_addr.as_u64[0];
1359  ip6->dst_address.as_u64[1] = bibe->in_addr.as_u64[1];
1360 
1361  if (ctx->first_frag)
1362  {
1363  udp->dst_port = bibe->in_port;
1364  udp->src_port = sport;
1365  csum = ip_csum_add_even (csum, ip6->src_address.as_u64[0]);
1366  csum = ip_csum_add_even (csum, ip6->src_address.as_u64[1]);
1367  csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[0]);
1368  csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[1]);
1369  csum = ip_csum_add_even (csum, udp->src_port);
1370  csum = ip_csum_add_even (csum, udp->dst_port);
1371  *checksum = ip_csum_fold (csum);
1372  }
1373 
1374  return 0;
1375 }
1376 
1377 static uword
1379  vlib_node_runtime_t * node, vlib_frame_t * frame)
1380 {
1381  u32 n_left_from, *from, *to_next;
1382  nat64_in2out_next_t next_index;
1383  u32 pkts_processed = 0, cached_fragments = 0;
1384  u32 *fragments_to_drop = 0;
1385  u32 *fragments_to_loopback = 0;
1386  nat64_main_t *nm = &nat64_main;
1388 
1389  from = vlib_frame_vector_args (frame);
1390  n_left_from = frame->n_vectors;
1391  next_index = node->cached_next_index;
1392 
1393  while (n_left_from > 0)
1394  {
1395  u32 n_left_to_next;
1396 
1397  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1398 
1399  while (n_left_from > 0 && n_left_to_next > 0)
1400  {
1401  u32 bi0;
1402  vlib_buffer_t *b0;
1403  u32 next0;
1404  u8 cached0 = 0;
1405  ip6_header_t *ip60;
1406  u16 l4_offset0, frag_offset0;
1407  u8 l4_protocol0;
1408  nat_reass_ip6_t *reass0;
1409  ip6_frag_hdr_t *frag0;
1410  nat64_db_bib_entry_t *bibe0;
1411  nat64_db_st_entry_t *ste0;
1412  udp_header_t *udp0;
1413  snat_protocol_t proto0;
1414  u32 sw_if_index0, fib_index0;
1415  ip46_address_t saddr0, daddr0;
1417  nat64_db_t *db = &nm->db[thread_index];
1418 
1419  /* speculatively enqueue b0 to the current next frame */
1420  bi0 = from[0];
1421  to_next[0] = bi0;
1422  from += 1;
1423  to_next += 1;
1424  n_left_from -= 1;
1425  n_left_to_next -= 1;
1426 
1427  b0 = vlib_get_buffer (vm, bi0);
1429 
1430  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1431  fib_index0 =
1433  sw_if_index0);
1434 
1435  ctx0.thread_index = thread_index;
1436 
1438  {
1439  next0 = NAT64_IN2OUT_NEXT_DROP;
1440  b0->error = node->errors[NAT64_IN2OUT_ERROR_DROP_FRAGMENT];
1441  goto trace0;
1442  }
1443 
1444  ip60 = (ip6_header_t *) vlib_buffer_get_current (b0);
1445 
1446  if (PREDICT_FALSE
1447  (ip6_parse
1448  (ip60, b0->current_length, &l4_protocol0, &l4_offset0,
1449  &frag_offset0)))
1450  {
1451  next0 = NAT64_IN2OUT_NEXT_DROP;
1452  b0->error = node->errors[NAT64_IN2OUT_ERROR_UNKNOWN];
1453  goto trace0;
1454  }
1455 
1456  if (PREDICT_FALSE
1457  (!(l4_protocol0 == IP_PROTOCOL_TCP
1458  || l4_protocol0 == IP_PROTOCOL_UDP)))
1459  {
1460  next0 = NAT64_IN2OUT_NEXT_DROP;
1461  b0->error = node->errors[NAT64_IN2OUT_ERROR_DROP_FRAGMENT];
1462  goto trace0;
1463  }
1464 
1465  udp0 = (udp_header_t *) u8_ptr_add (ip60, l4_offset0);
1466  frag0 = (ip6_frag_hdr_t *) u8_ptr_add (ip60, frag_offset0);
1467  proto0 = ip_proto_to_snat_proto (l4_protocol0);
1468 
1469  reass0 = nat_ip6_reass_find_or_create (ip60->src_address,
1470  ip60->dst_address,
1471  frag0->identification,
1472  l4_protocol0,
1473  1, &fragments_to_drop);
1474 
1475  if (PREDICT_FALSE (!reass0))
1476  {
1477  next0 = NAT64_IN2OUT_NEXT_DROP;
1478  b0->error = node->errors[NAT64_IN2OUT_ERROR_MAX_REASS];
1479  goto trace0;
1480  }
1481 
1482  if (PREDICT_TRUE (ip6_frag_hdr_offset (frag0)))
1483  {
1484  ctx0.first_frag = 0;
1485  if (PREDICT_FALSE (reass0->sess_index == (u32) ~ 0))
1486  {
1488  (reass0, bi0, &fragments_to_drop))
1489  {
1490  b0->error = node->errors[NAT64_IN2OUT_ERROR_MAX_FRAG];
1491  next0 = NAT64_IN2OUT_NEXT_DROP;
1492  goto trace0;
1493  }
1494  cached0 = 1;
1495  goto trace0;
1496  }
1497  }
1498  else
1499  {
1500  ctx0.first_frag = 1;
1501 
1502  saddr0.as_u64[0] = ip60->src_address.as_u64[0];
1503  saddr0.as_u64[1] = ip60->src_address.as_u64[1];
1504  daddr0.as_u64[0] = ip60->dst_address.as_u64[0];
1505  daddr0.as_u64[1] = ip60->dst_address.as_u64[1];
1506 
1507  ste0 =
1508  nat64_db_st_entry_find (db, &saddr0, &daddr0,
1509  udp0->src_port, udp0->dst_port,
1510  l4_protocol0, fib_index0, 1);
1511  if (!ste0)
1512  {
1513  bibe0 =
1514  nat64_db_bib_entry_find (db, &saddr0, udp0->src_port,
1515  l4_protocol0, fib_index0, 1);
1516  if (!bibe0)
1517  {
1518  u16 out_port0;
1519  ip4_address_t out_addr0;
1521  (fib_index0, proto0, &out_addr0, &out_port0,
1522  thread_index))
1523  {
1524  next0 = NAT64_IN2OUT_NEXT_DROP;
1525  b0->error =
1526  node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1527  goto trace0;
1528  }
1529 
1530  bibe0 =
1532  &ip60->src_address,
1533  &out_addr0, udp0->src_port,
1534  out_port0, fib_index0,
1535  l4_protocol0, 0);
1536  if (!bibe0)
1537  {
1538  next0 = NAT64_IN2OUT_NEXT_DROP;
1539  b0->error =
1540  node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1541  goto trace0;
1542  }
1543  vlib_set_simple_counter (&nm->total_bibs, thread_index,
1544  0, db->bib.bib_entries_num);
1545  }
1546  nat64_extract_ip4 (&ip60->dst_address, &daddr0.ip4,
1547  fib_index0);
1548  ste0 =
1549  nat64_db_st_entry_create (db, bibe0,
1550  &ip60->dst_address, &daddr0.ip4,
1551  udp0->dst_port);
1552  if (!ste0)
1553  {
1554  next0 = NAT64_IN2OUT_NEXT_DROP;
1555  b0->error =
1556  node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1557  goto trace0;
1558  }
1559 
1560  vlib_set_simple_counter (&nm->total_sessions, thread_index,
1561  0, db->st.st_entries_num);
1562  }
1563  reass0->sess_index = nat64_db_st_entry_get_index (db, ste0);
1564 
1565  nat_ip6_reass_get_frags (reass0, &fragments_to_loopback);
1566  }
1567 
1568  ctx0.sess_index = reass0->sess_index;
1569  ctx0.proto = l4_protocol0;
1570  ctx0.vm = vm;
1571  ctx0.l4_offset = l4_offset0;
1572 
1573  if (PREDICT_FALSE (is_hairpinning (&ip60->dst_address)))
1574  {
1576  if (nat64_in2out_frag_hairpinning (b0, ip60, &ctx0))
1577  {
1578  next0 = NAT64_IN2OUT_NEXT_DROP;
1579  b0->error = node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1580  }
1581  goto trace0;
1582  }
1583  else
1584  {
1586  {
1587  next0 = NAT64_IN2OUT_NEXT_DROP;
1588  b0->error = node->errors[NAT64_IN2OUT_ERROR_UNKNOWN];
1589  goto trace0;
1590  }
1591  }
1592 
1593  trace0:
1594  if (PREDICT_FALSE
1595  ((node->flags & VLIB_NODE_FLAG_TRACE)
1596  && (b0->flags & VLIB_BUFFER_IS_TRACED)))
1597  {
1599  vlib_add_trace (vm, node, b0, sizeof (*t));
1600  t->cached = cached0;
1601  t->sw_if_index = sw_if_index0;
1602  t->next_index = next0;
1603  }
1604 
1605  if (cached0)
1606  {
1607  n_left_to_next++;
1608  to_next--;
1609  cached_fragments++;
1610  }
1611  else
1612  {
1613  pkts_processed += next0 != NAT64_IN2OUT_NEXT_DROP;
1614 
1615  /* verify speculative enqueue, maybe switch current next frame */
1616  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1617  to_next, n_left_to_next,
1618  bi0, next0);
1619  }
1620 
1621  if (n_left_from == 0 && vec_len (fragments_to_loopback))
1622  {
1623  from = vlib_frame_vector_args (frame);
1624  u32 len = vec_len (fragments_to_loopback);
1625  if (len <= VLIB_FRAME_SIZE)
1626  {
1627  clib_memcpy_fast (from, fragments_to_loopback,
1628  sizeof (u32) * len);
1629  n_left_from = len;
1630  vec_reset_length (fragments_to_loopback);
1631  }
1632  else
1633  {
1634  clib_memcpy_fast (from, fragments_to_loopback +
1635  (len - VLIB_FRAME_SIZE),
1636  sizeof (u32) * VLIB_FRAME_SIZE);
1637  n_left_from = VLIB_FRAME_SIZE;
1638  _vec_len (fragments_to_loopback) = len - VLIB_FRAME_SIZE;
1639  }
1640  }
1641  }
1642 
1643  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1644  }
1645 
1647  NAT64_IN2OUT_ERROR_PROCESSED_FRAGMENTS,
1648  pkts_processed);
1650  NAT64_IN2OUT_ERROR_CACHED_FRAGMENTS,
1651  cached_fragments);
1652 
1653  nat_send_all_to_node (vm, fragments_to_drop, node,
1654  &node->errors[NAT64_IN2OUT_ERROR_DROP_FRAGMENT],
1656 
1657  vec_free (fragments_to_drop);
1658  vec_free (fragments_to_loopback);
1659  return frame->n_vectors;
1660 }
1661 
1662 /* *INDENT-OFF* */
1664  .function = nat64_in2out_reass_node_fn,
1665  .name = "nat64-in2out-reass",
1666  .vector_size = sizeof (u32),
1667  .format_trace = format_nat64_in2out_reass_trace,
1668  .type = VLIB_NODE_TYPE_INTERNAL,
1669  .n_errors = ARRAY_LEN (nat64_in2out_error_strings),
1670  .error_strings = nat64_in2out_error_strings,
1671  .n_next_nodes = NAT64_IN2OUT_N_NEXT,
1672  /* edit / add dispositions here */
1673  .next_nodes = {
1674  [NAT64_IN2OUT_NEXT_DROP] = "error-drop",
1675  [NAT64_IN2OUT_NEXT_IP4_LOOKUP] = "ip4-lookup",
1676  [NAT64_IN2OUT_NEXT_IP6_LOOKUP] = "ip6-lookup",
1677  [NAT64_IN2OUT_NEXT_SLOWPATH] = "nat64-in2out-slowpath",
1678  [NAT64_IN2OUT_NEXT_REASS] = "nat64-in2out-reass",
1679  },
1680 };
1681 /* *INDENT-ON* */
1682 
1685 
1686 #define foreach_nat64_in2out_handoff_error \
1687 _(CONGESTION_DROP, "congestion drop") \
1688 _(SAME_WORKER, "same worker") \
1689 _(DO_HANDOFF, "do handoff")
1690 
1691 typedef enum
1692 {
1693 #define _(sym,str) NAT64_IN2OUT_HANDOFF_ERROR_##sym,
1695 #undef _
1698 
1700 #define _(sym,string) string,
1702 #undef _
1703 };
1704 
1705 typedef struct
1706 {
1709 
1710 static u8 *
1712 {
1713  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1714  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1716  va_arg (*args, nat64_in2out_handoff_trace_t *);
1717 
1718  s =
1719  format (s, "NAT64-IN2OUT-HANDOFF: next-worker %d", t->next_worker_index);
1720 
1721  return s;
1722 }
1723 
1724 static inline uword
1726  vlib_frame_t * frame)
1727 {
1728  nat64_main_t *nm = &nat64_main;
1729  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
1730  u32 n_enq, n_left_from, *from;
1731  u16 thread_indices[VLIB_FRAME_SIZE], *ti;
1732  u32 fq_index;
1734  u32 do_handoff = 0, same_worker = 0;
1735 
1736  from = vlib_frame_vector_args (frame);
1737  n_left_from = frame->n_vectors;
1738  vlib_get_buffers (vm, from, bufs, n_left_from);
1739 
1740  b = bufs;
1741  ti = thread_indices;
1742 
1743  fq_index = nm->fq_in2out_index;
1744 
1745  while (n_left_from > 0)
1746  {
1747  ip6_header_t *ip0;
1748 
1749  ip0 = vlib_buffer_get_current (b[0]);
1750  ti[0] = nat64_get_worker_in2out (&ip0->src_address);
1751 
1752  if (ti[0] != thread_index)
1753  do_handoff++;
1754  else
1755  same_worker++;
1756 
1757  if (PREDICT_FALSE
1758  ((node->flags & VLIB_NODE_FLAG_TRACE)
1759  && (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
1760  {
1762  vlib_add_trace (vm, node, b[0], sizeof (*t));
1763  t->next_worker_index = ti[0];
1764  }
1765 
1766  n_left_from -= 1;
1767  ti += 1;
1768  b += 1;
1769  }
1770 
1771  n_enq =
1772  vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices,
1773  frame->n_vectors, 1);
1774 
1775  if (n_enq < frame->n_vectors)
1777  NAT64_IN2OUT_HANDOFF_ERROR_CONGESTION_DROP,
1778  frame->n_vectors - n_enq);
1780  NAT64_IN2OUT_HANDOFF_ERROR_SAME_WORKER,
1781  same_worker);
1783  NAT64_IN2OUT_HANDOFF_ERROR_DO_HANDOFF,
1784  do_handoff);
1785 
1786  return frame->n_vectors;
1787 }
1788 
1789 /* *INDENT-OFF* */
1791  .function = nat64_in2out_handoff_node_fn,
1792  .name = "nat64-in2out-handoff",
1793  .vector_size = sizeof (u32),
1794  .format_trace = format_nat64_in2out_handoff_trace,
1795  .type = VLIB_NODE_TYPE_INTERNAL,
1797  .error_strings = nat64_in2out_handoff_error_strings,
1798 
1799  .n_next_nodes = 1,
1800 
1801  .next_nodes = {
1802  [0] = "error-drop",
1803  },
1804 };
1805 /* *INDENT-ON* */
1806 
1809 
1810 /*
1811  * fd.io coding-style-patch-verification: ON
1812  *
1813  * Local Variables:
1814  * eval: (c-set-style "gnu")
1815  * End:
1816  */
#define foreach_ip_interface_address(lm, a, sw_if_index, loop, body)
Definition: lookup.h:182
nat64_db_t * db
BIB and session DB per thread.
Definition: nat64.h:83
static int unk_proto_st_walk(nat64_db_st_entry_t *ste, void *arg)
Definition: nat64_in2out.c:450
#define CLIB_UNUSED(x)
Definition: clib.h:82
static int nat64_in2out_frag_set_cb(ip6_header_t *ip6, ip4_header_t *ip4, void *arg)
static int nat64_in2out_frag_hairpinning(vlib_buffer_t *b, ip6_header_t *ip6, nat64_in2out_frag_set_ctx_t *ctx)
ip4_address_t src_address
Definition: ip4_packet.h:170
snat_address_t * addr_pool
Address pool vector.
Definition: nat64.h:74
static int icmp6_to_icmp(vlib_buffer_t *p, ip6_to_ip4_set_fn_t fn, void *ctx, ip6_to_ip4_set_fn_t inner_fn, void *inner_ctx)
Translate ICMP6 packet to ICMP4.
Definition: ip6_to_ip4.h:249
void nat64_extract_ip4(ip6_address_t *ip6, ip4_address_t *ip4, u32 fib_index)
Extract IPv4 address from the IPv4-embedded IPv6 addresses.
Definition: nat64.c:1100
#define PREDICT_TRUE(x)
Definition: clib.h:112
u64 as_u64[2]
Definition: ip6_packet.h:51
vlib_node_registration_t nat64_in2out_node
(constructor) VLIB_REGISTER_NODE (nat64_in2out_node)
Definition: nat64_in2out.c:73
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
nat64_db_st_entry_t * nat64_db_st_entry_create(nat64_db_t *db, nat64_db_bib_entry_t *bibe, ip6_address_t *in_r_addr, ip4_address_t *out_r_addr, u16 r_port)
Create new NAT64 session table entry.
Definition: nat64_db.c:373
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
nat64_in2out_next_t
Definition: nat64_in2out.c:109
u32 fib_table_get_index_for_sw_if_index(fib_protocol_t proto, u32 sw_if_index)
Get the index of the FIB bound to the interface.
Definition: fib_table.c:956
nat64_db_bib_entry_t * nat64_db_bib_entry_create(nat64_db_t *db, ip6_address_t *in_addr, ip4_address_t *out_addr, u16 in_port, u16 out_port, u32 fib_index, u8 proto, u8 is_static)
Create new NAT64 BIB entry.
Definition: nat64_db.c:53
void nat_ip6_reass_get_frags(nat_reass_ip6_t *reass, u32 **bi)
Get cached fragments.
Definition: nat_reass.c:576
u32 thread_index
Definition: main.h:179
nat64_db_bib_entry_t * nat64_db_bib_entry_find(nat64_db_t *db, ip46_address_t *addr, u16 port, u8 proto, u32 fib_index, u8 is_ip6)
Find NAT64 BIB entry.
Definition: nat64_db.c:206
int i
uword ip_csum_t
Definition: ip_packet.h:181
static ip_csum_t ip_csum_with_carry(ip_csum_t sum, ip_csum_t x)
Definition: ip_packet.h:184
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
static u8 * format_nat64_in2out_trace(u8 *s, va_list *args)
Definition: nat64_in2out.c:34
nat64_db_st_entry_t * nat64_db_st_entry_by_index(nat64_db_t *db, u8 proto, u32 ste_index)
Get ST entry by index and protocol.
Definition: nat64_db.c:621
nat64_db_bib_t bib
Definition: nat64_db.h:138
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:493
vlib_node_registration_t nat64_in2out_handoff_node
(constructor) VLIB_REGISTER_NODE (nat64_in2out_handoff_node)
Definition: nat64_in2out.c:76
struct _tcp_header tcp_header_t
vhost_vring_addr_t addr
Definition: vhost_user.h:121
ip6_address_t src_address
Definition: ip6_packet.h:378
unsigned char u8
Definition: types.h:56
u32 st_entries_num
Definition: nat64_db.h:123
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
static int nat64_in2out_tcp_udp_hairpinning(vlib_main_t *vm, vlib_buffer_t *b, ip6_header_t *ip6, u32 thread_index)
Definition: nat64_in2out.c:590
u32 nat64_get_worker_in2out(ip6_address_t *addr)
Get worker thread index for NAT64 in2out.
Definition: nat64.c:98
#define static_always_inline
Definition: clib.h:99
u32 sw_if_index
Definition: vxlan_gbp.api:37
ip4_address_t dst_address
Definition: ip4_packet.h:170
vlib_node_registration_t nat64_in2out_reass_node
(constructor) VLIB_REGISTER_NODE (nat64_in2out_reass_node)
Definition: nat64_in2out.c:75
static_always_inline int ip6_parse(const ip6_header_t *ip6, u32 buff_len, u8 *l4_protocol, u16 *l4_offset, u16 *frag_hdr_offset)
Parse some useful information from IPv6 header.
Definition: ip6_to_ip4.h:59
snat_main_t * sm
Definition: nat64.h:116
unsigned int u32
Definition: types.h:88
struct unk_proto_st_walk_ctx_t_ unk_proto_st_walk_ctx_t
void nat64_tcp_session_set_state(nat64_db_st_entry_t *ste, tcp_header_t *tcp, u8 is_ip6)
Set NAT64 TCP session state.
Definition: nat64.c:900
#define VLIB_FRAME_SIZE
Definition: node.h:401
static int ip6_to_ip4_tcp_udp(vlib_buffer_t *p, ip6_to_ip4_set_fn_t fn, void *ctx, u8 udp_checksum)
Translate IPv6 UDP/TCP packet to IPv4.
Definition: ip6_to_ip4.h:481
static void mss_clamping(snat_main_t *sm, tcp_header_t *tcp, ip_csum_t *sum)
Definition: nat_inlines.h:347
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:114
void nat64_session_reset_timeout(nat64_db_st_entry_t *ste, vlib_main_t *vm)
Reset NAT64 session timeout.
Definition: nat64.c:861
static uword nat64_in2out_slowpath_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
int nat64_alloc_out_addr_and_port(u32 fib_index, snat_protocol_t proto, ip4_address_t *addr, u16 *port, u32 thread_index)
Alloce IPv4 address and port pair from NAT64 pool.
Definition: nat64.c:518
long ctx[MAX_CONNS]
Definition: main.c:144
unsigned short u16
Definition: types.h:57
static u8 * format_nat64_in2out_reass_trace(u8 *s, va_list *args)
Definition: nat64_in2out.c:58
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:214
int ip6_address_compare(ip6_address_t *a1, ip6_address_t *a2)
Definition: ip46_cli.c:60
nat64_db_st_t st
Definition: nat64_db.h:139
#define PREDICT_FALSE(x)
Definition: clib.h:111
static int nat64_in2out_inner_icmp_set_cb(ip6_header_t *ip6, ip4_header_t *ip4, void *arg)
Definition: nat64_in2out.c:357
u32 node_index
Node index.
Definition: node.h:518
static void vlib_set_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 value)
Set a simple counter.
Definition: counter.h:94
static char * nat64_in2out_error_strings[]
Definition: nat64_in2out.c:103
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:338
void nat64_compose_ip6(ip6_address_t *ip6, ip4_address_t *ip4, u32 fib_index)
Compose IPv4-embedded IPv6 addresses.
Definition: nat64.c:1030
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:139
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1150
vlib_simple_counter_main_t total_sessions
Definition: nat64.h:113
u8 len
Definition: ip_types.api:49
static u8 * format_nat64_in2out_handoff_trace(u8 *s, va_list *args)
u32 as_u32[4]
Definition: ip6_packet.h:50
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
static int nat64_in2out_unk_proto_hairpinning(vlib_main_t *vm, vlib_buffer_t *b, ip6_header_t *ip6, u32 thread_index)
Definition: nat64_in2out.c:839
static int nat64_in2out_icmp_set_cb(ip6_header_t *ip6, ip4_header_t *ip4, void *arg)
Definition: nat64_in2out.c:265
u16 n_vectors
Definition: node.h:420
u8 nat_reass_is_drop_frag(u8 is_ip6)
Get status of virtual fragmentation reassembly.
Definition: nat_reass.c:168
vlib_main_t * vm
Definition: buffer.c:301
static int nat64_in2out_tcp_udp_set_cb(ip6_header_t *ip6, ip4_header_t *ip4, void *arg)
Definition: nat64_in2out.c:172
int nat_ip6_reass_add_fragment(nat_reass_ip6_t *reass, u32 bi, u32 **bi_to_drop)
Cache fragment.
Definition: nat_reass.c:544
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:341
static ip_csum_t ip_incremental_checksum(ip_csum_t sum, void *_data, uword n_bytes)
Definition: ip_packet.h:254
vlib_node_registration_t nat64_in2out_slowpath_node
(constructor) VLIB_REGISTER_NODE (nat64_in2out_slowpath_node)
Definition: nat64_in2out.c:74
#define foreach_nat64_in2out_handoff_error
#define ARRAY_LEN(x)
Definition: clib.h:62
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:452
nat64_main_t nat64_main
Definition: nat64.c:28
static void * ip6_next_header(ip6_header_t *i)
Definition: ip6_packet.h:405
#define ip6_frag_hdr_offset(hdr)
Definition: ip6_packet.h:573
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:537
nat64_db_st_entry_t * nat64_db_st_entry_find(nat64_db_t *db, ip46_address_t *l_addr, ip46_address_t *r_addr, u16 l_port, u16 r_port, u8 proto, u32 fib_index, u8 is_ip6)
Find NAT64 session table entry.
Definition: nat64_db.c:550
nat64_in2out_handoff_error_t
nat64_in2out_error_t
Definition: nat64_in2out.c:95
void nat64_db_st_walk(nat64_db_t *db, u8 proto, nat64_db_st_walk_fn_t fn, void *ctx)
Walk NAT64 session table.
Definition: nat64_db.c:322
static int nat64_in2out_icmp_hairpinning(vlib_main_t *vm, vlib_buffer_t *b, ip6_header_t *ip6, u32 thread_index)
Definition: nat64_in2out.c:713
ip6_main_t ip6_main
Definition: ip6_forward.c:2624
ip_lookup_main_t lookup_main
Definition: ip6.h:178
u32 fq_in2out_index
Worker handoff.
Definition: nat64.h:86
u32 nat64_db_st_entry_get_index(nat64_db_t *db, nat64_db_st_entry_t *ste)
Definition: nat64_db.c:598
static u8 nat64_not_translate(u32 sw_if_index, ip6_address_t ip6_addr)
Definition: nat64_in2out.c:127
static ip_csum_t ip_csum_sub_even(ip_csum_t c, ip_csum_t x)
Definition: ip_packet.h:209
nat64_db_bib_entry_t * nat64_db_bib_entry_by_index(nat64_db_t *db, u8 proto, u32 bibe_index)
Get BIB entry by index and protocol.
Definition: nat64_db.c:299
u32 bib_entries_num
Definition: nat64_db.h:73
IPv6 to IPv4 translation.
static uword nat64_in2out_reass_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
#define u8_ptr_add(ptr, index)
Definition: ip.h:68
ip4_address_t addr
Definition: nat.h:243
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:57
struct _vlib_node_registration vlib_node_registration_t
NAT64 global declarations.
u16 payload_length
Definition: ip6_packet.h:369
static u32 ip_proto_to_snat_proto(u8 ip_proto)
The NAT inline functions.
Definition: nat_inlines.h:26
static char * nat64_in2out_handoff_error_strings[]
VLIB_NODE_FUNCTION_MULTIARCH(nat64_in2out_node, nat64_in2out_node_fn)
#define ip46_address_is_equal(a1, a2)
Definition: ip6_packet.h:94
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
nat_reass_ip6_t * nat_ip6_reass_find_or_create(ip6_address_t src, ip6_address_t dst, u32 frag_id, u8 proto, u8 reset_timeout, u32 **bi_to_drop)
Find or create reassembly.
Definition: nat_reass.c:426
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
snat_protocol_t
Definition: nat.h:133
static uword nat64_in2out_handoff_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
static_always_inline u32 vlib_buffer_enqueue_to_thread(vlib_main_t *vm, u32 frame_queue_index, u32 *buffer_indices, u16 *thread_indices, u32 n_packets, int drop_on_congestion)
Definition: buffer_node.h:487
static uword nat64_in2out_node_fn_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u8 is_slow_path)
Definition: nat64_in2out.c:960
#define vnet_buffer(b)
Definition: buffer.h:368
static int nat64_in2out_unk_proto_set_cb(ip6_header_t *ip6, ip4_header_t *ip4, void *arg)
Definition: nat64_in2out.c:485
static_always_inline int is_hairpinning(ip6_address_t *dst_addr)
Check whether is a hairpinning.
Definition: nat64_in2out.c:157
#define vec_foreach(var, vec)
Vector iterator.
static uword nat64_in2out_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
u16 flags
Copy of main node flags.
Definition: node.h:531
static void nat_send_all_to_node(vlib_main_t *vm, u32 *bi_vector, vlib_node_runtime_t *node, vlib_error_t *error, u32 next)
Definition: nat_inlines.h:104
static void * ip_interface_address_get_address(ip_lookup_main_t *lm, ip_interface_address_t *a)
Definition: lookup.h:175
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:145
NAT plugin virtual fragmentation reassembly.
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:326
#define foreach_nat64_in2out_error
Definition: nat64_in2out.c:78
struct nat64_in2out_set_ctx_t_ nat64_in2out_set_ctx_t
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:117
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:62
static int ip6_to_ip4(vlib_buffer_t *p, ip6_to_ip4_set_fn_t fn, void *ctx)
Translate IPv6 packet to IPv4 (IP header only).
Definition: ip6_to_ip4.h:578
static u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:237
struct nat64_in2out_frag_set_ctx_t_ nat64_in2out_frag_set_ctx_t
Definition: defs.h:46
static ip_csum_t ip_csum_add_even(ip_csum_t c, ip_csum_t x)
Definition: ip_packet.h:192
ip6_address_t dst_address
Definition: ip6_packet.h:378
vlib_simple_counter_main_t total_bibs
Definition: nat64.h:112
static int ip6_to_ip4_fragmented(vlib_buffer_t *p, ip6_to_ip4_set_fn_t fn, void *ctx)
Translate IPv6 fragmented packet to IPv4.
Definition: ip6_to_ip4.h:425