FD.io VPP  v19.01.3-6-g70449b9b9
Vector Packet Processing
nat64_out2in.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /**
16  * @file
17  * @brief NAT64 IPv4 to IPv6 translation (otside to inside network)
18  */
19 
20 #include <nat/nat64.h>
21 #include <nat/nat_reass.h>
22 #include <nat/nat_inlines.h>
23 #include <vnet/ip/ip4_to_ip6.h>
24 #include <vnet/fib/ip4_fib.h>
25 #include <vnet/udp/udp.h>
26 
27 typedef struct
28 {
32 
33 static u8 *
34 format_nat64_out2in_trace (u8 * s, va_list * args)
35 {
36  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
37  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
38  nat64_out2in_trace_t *t = va_arg (*args, nat64_out2in_trace_t *);
39 
40  s =
41  format (s, "NAT64-out2in: sw_if_index %d, next index %d", t->sw_if_index,
42  t->next_index);
43 
44  return s;
45 }
46 
47 typedef struct
48 {
53 
54 static u8 *
55 format_nat64_out2in_reass_trace (u8 * s, va_list * args)
56 {
57  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
58  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
60  va_arg (*args, nat64_out2in_reass_trace_t *);
61 
62  s =
63  format (s, "NAT64-out2in-reass: sw_if_index %d, next index %d, status %s",
64  t->sw_if_index, t->next_index,
65  t->cached ? "cached" : "translated");
66 
67  return s;
68 }
69 
73 
74 #define foreach_nat64_out2in_error \
75 _(UNSUPPORTED_PROTOCOL, "unsupported protocol") \
76 _(OUT2IN_PACKETS, "good out2in packets processed") \
77 _(NO_TRANSLATION, "no translation") \
78 _(UNKNOWN, "unknown") \
79 _(DROP_FRAGMENT, "drop fragment") \
80 _(MAX_REASS, "maximum reassemblies exceeded") \
81 _(MAX_FRAG, "maximum fragments per reassembly exceeded") \
82 _(TCP_PACKETS, "TCP packets") \
83 _(UDP_PACKETS, "UDP packets") \
84 _(ICMP_PACKETS, "ICMP packets") \
85 _(OTHER_PACKETS, "other protocol packets") \
86 _(FRAGMENTS, "fragments") \
87 _(CACHED_FRAGMENTS, "cached fragments") \
88 _(PROCESSED_FRAGMENTS, "processed fragments")
89 
90 
91 typedef enum
92 {
93 #define _(sym,str) NAT64_OUT2IN_ERROR_##sym,
95 #undef _
98 
99 static char *nat64_out2in_error_strings[] = {
100 #define _(sym,string) string,
102 #undef _
103 };
104 
105 typedef enum
106 {
113 
115 {
120 
121 static int
123  void *arg)
124 {
125  nat64_main_t *nm = &nat64_main;
127  nat64_db_bib_entry_t *bibe;
128  nat64_db_st_entry_t *ste;
129  ip46_address_t saddr, daddr;
130  ip6_address_t ip6_saddr;
131  udp_header_t *udp = ip4_next_header (ip4);
132  tcp_header_t *tcp = ip4_next_header (ip4);
133  u8 proto = ip4->protocol;
134  u16 dport = udp->dst_port;
135  u16 sport = udp->src_port;
136  u32 sw_if_index, fib_index;
137  u16 *checksum;
138  ip_csum_t csum;
139  nat64_db_t *db = &nm->db[ctx->thread_index];
140 
141  sw_if_index = vnet_buffer (ctx->b)->sw_if_index[VLIB_RX];
142  fib_index = ip4_fib_table_get_index_for_sw_if_index (sw_if_index);
143 
144  clib_memset (&saddr, 0, sizeof (saddr));
145  saddr.ip4.as_u32 = ip4->src_address.as_u32;
146  clib_memset (&daddr, 0, sizeof (daddr));
147  daddr.ip4.as_u32 = ip4->dst_address.as_u32;
148 
149  ste =
150  nat64_db_st_entry_find (db, &daddr, &saddr, dport, sport, proto,
151  fib_index, 0);
152  if (ste)
153  {
154  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
155  if (!bibe)
156  return -1;
157  }
158  else
159  {
160  bibe = nat64_db_bib_entry_find (db, &daddr, dport, proto, fib_index, 0);
161 
162  if (!bibe)
163  return -1;
164 
165  nat64_compose_ip6 (&ip6_saddr, &ip4->src_address, bibe->fib_index);
166  ste =
167  nat64_db_st_entry_create (db, bibe, &ip6_saddr, &saddr.ip4, sport);
168 
169  if (!ste)
170  return -1;
171 
173  db->st.st_entries_num);
174  }
175 
176  ip6->src_address.as_u64[0] = ste->in_r_addr.as_u64[0];
177  ip6->src_address.as_u64[1] = ste->in_r_addr.as_u64[1];
178 
179  ip6->dst_address.as_u64[0] = bibe->in_addr.as_u64[0];
180  ip6->dst_address.as_u64[1] = bibe->in_addr.as_u64[1];
181  udp->dst_port = bibe->in_port;
182 
183  if (proto == IP_PROTOCOL_UDP)
184  checksum = &udp->checksum;
185  else
186  {
187  checksum = &tcp->checksum;
188  nat64_tcp_session_set_state (ste, tcp, 0);
189  }
190 
191  csum = ip_csum_sub_even (*checksum, dport);
192  csum = ip_csum_add_even (csum, udp->dst_port);
193  *checksum = ip_csum_fold (csum);
194 
195  vnet_buffer (ctx->b)->sw_if_index[VLIB_TX] = bibe->fib_index;
196 
197  nat64_session_reset_timeout (ste, ctx->vm);
198 
199  return 0;
200 }
201 
202 static int
204 {
205  nat64_main_t *nm = &nat64_main;
207  nat64_db_bib_entry_t *bibe;
208  nat64_db_st_entry_t *ste;
209  ip46_address_t saddr, daddr;
210  ip6_address_t ip6_saddr;
211  u32 sw_if_index, fib_index;
212  icmp46_header_t *icmp = ip4_next_header (ip4);
213  nat64_db_t *db = &nm->db[ctx->thread_index];
214 
215  sw_if_index = vnet_buffer (ctx->b)->sw_if_index[VLIB_RX];
216  fib_index = ip4_fib_table_get_index_for_sw_if_index (sw_if_index);
217 
218  clib_memset (&saddr, 0, sizeof (saddr));
219  saddr.ip4.as_u32 = ip4->src_address.as_u32;
220  clib_memset (&daddr, 0, sizeof (daddr));
221  daddr.ip4.as_u32 = ip4->dst_address.as_u32;
222 
223  if (icmp->type == ICMP6_echo_request || icmp->type == ICMP6_echo_reply)
224  {
225  u16 out_id = ((u16 *) (icmp))[2];
226  ste =
227  nat64_db_st_entry_find (db, &daddr, &saddr, out_id, 0,
228  IP_PROTOCOL_ICMP, fib_index, 0);
229 
230  if (ste)
231  {
232  bibe =
233  nat64_db_bib_entry_by_index (db, IP_PROTOCOL_ICMP,
234  ste->bibe_index);
235  if (!bibe)
236  return -1;
237  }
238  else
239  {
240  bibe =
241  nat64_db_bib_entry_find (db, &daddr, out_id,
242  IP_PROTOCOL_ICMP, fib_index, 0);
243  if (!bibe)
244  return -1;
245 
246  nat64_compose_ip6 (&ip6_saddr, &ip4->src_address, bibe->fib_index);
247  ste =
248  nat64_db_st_entry_create (db, bibe, &ip6_saddr, &saddr.ip4, 0);
249 
250  if (!ste)
251  return -1;
252 
254  db->st.st_entries_num);
255  }
256 
257  nat64_session_reset_timeout (ste, ctx->vm);
258 
259  ip6->src_address.as_u64[0] = ste->in_r_addr.as_u64[0];
260  ip6->src_address.as_u64[1] = ste->in_r_addr.as_u64[1];
261 
262  ip6->dst_address.as_u64[0] = bibe->in_addr.as_u64[0];
263  ip6->dst_address.as_u64[1] = bibe->in_addr.as_u64[1];
264  ((u16 *) (icmp))[2] = bibe->in_port;
265 
266  vnet_buffer (ctx->b)->sw_if_index[VLIB_TX] = bibe->fib_index;
267  }
268  else
269  {
270  ip6_header_t *inner_ip6 = (ip6_header_t *) u8_ptr_add (icmp, 8);
271 
273  vnet_buffer (ctx->b)->sw_if_index[VLIB_TX]);
274  ip6->dst_address.as_u64[0] = inner_ip6->src_address.as_u64[0];
275  ip6->dst_address.as_u64[1] = inner_ip6->src_address.as_u64[1];
276  }
277 
278  return 0;
279 }
280 
281 static int
283  void *arg)
284 {
285  nat64_main_t *nm = &nat64_main;
287  nat64_db_bib_entry_t *bibe;
288  nat64_db_st_entry_t *ste;
289  ip46_address_t saddr, daddr;
290  u32 sw_if_index, fib_index;
291  u8 proto = ip4->protocol;
292  nat64_db_t *db = &nm->db[ctx->thread_index];
293 
294  sw_if_index = vnet_buffer (ctx->b)->sw_if_index[VLIB_RX];
295  fib_index =
297 
298  clib_memset (&saddr, 0, sizeof (saddr));
299  saddr.ip4.as_u32 = ip4->src_address.as_u32;
300  clib_memset (&daddr, 0, sizeof (daddr));
301  daddr.ip4.as_u32 = ip4->dst_address.as_u32;
302 
303  if (proto == IP_PROTOCOL_ICMP6)
304  {
305  icmp46_header_t *icmp = ip4_next_header (ip4);
306  u16 out_id = ((u16 *) (icmp))[2];
307  proto = IP_PROTOCOL_ICMP;
308 
309  if (!
310  (icmp->type == ICMP6_echo_request
311  || icmp->type == ICMP6_echo_reply))
312  return -1;
313 
314  ste =
315  nat64_db_st_entry_find (db, &saddr, &daddr, out_id, 0, proto,
316  fib_index, 0);
317  if (!ste)
318  return -1;
319 
320  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
321  if (!bibe)
322  return -1;
323 
324  ip6->dst_address.as_u64[0] = ste->in_r_addr.as_u64[0];
325  ip6->dst_address.as_u64[1] = ste->in_r_addr.as_u64[1];
326  ip6->src_address.as_u64[0] = bibe->in_addr.as_u64[0];
327  ip6->src_address.as_u64[1] = bibe->in_addr.as_u64[1];
328  ((u16 *) (icmp))[2] = bibe->in_port;
329 
330  vnet_buffer (ctx->b)->sw_if_index[VLIB_TX] = bibe->fib_index;
331  }
332  else
333  {
334  udp_header_t *udp = ip4_next_header (ip4);
335  tcp_header_t *tcp = ip4_next_header (ip4);
336  u16 dport = udp->dst_port;
337  u16 sport = udp->src_port;
338  u16 *checksum;
339  ip_csum_t csum;
340 
341  ste =
342  nat64_db_st_entry_find (db, &saddr, &daddr, sport, dport, proto,
343  fib_index, 0);
344  if (!ste)
345  return -1;
346 
347  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
348  if (!bibe)
349  return -1;
350 
351  nat64_compose_ip6 (&ip6->dst_address, &daddr.ip4, bibe->fib_index);
352  ip6->src_address.as_u64[0] = bibe->in_addr.as_u64[0];
353  ip6->src_address.as_u64[1] = bibe->in_addr.as_u64[1];
354  udp->src_port = bibe->in_port;
355 
356  if (proto == IP_PROTOCOL_UDP)
357  checksum = &udp->checksum;
358  else
359  checksum = &tcp->checksum;
360  if (*checksum)
361  {
362  csum = ip_csum_sub_even (*checksum, sport);
363  csum = ip_csum_add_even (csum, udp->src_port);
364  *checksum = ip_csum_fold (csum);
365  }
366 
367  vnet_buffer (ctx->b)->sw_if_index[VLIB_TX] = bibe->fib_index;
368  }
369 
370  return 0;
371 }
372 
373 static int
375  void *arg)
376 {
377  nat64_main_t *nm = &nat64_main;
379  nat64_db_bib_entry_t *bibe;
380  nat64_db_st_entry_t *ste;
381  ip46_address_t saddr, daddr;
382  ip6_address_t ip6_saddr;
383  u32 sw_if_index, fib_index;
384  u8 proto = ip4->protocol;
385  nat64_db_t *db = &nm->db[ctx->thread_index];
386 
387  sw_if_index = vnet_buffer (ctx->b)->sw_if_index[VLIB_RX];
388  fib_index = ip4_fib_table_get_index_for_sw_if_index (sw_if_index);
389 
390  clib_memset (&saddr, 0, sizeof (saddr));
391  saddr.ip4.as_u32 = ip4->src_address.as_u32;
392  clib_memset (&daddr, 0, sizeof (daddr));
393  daddr.ip4.as_u32 = ip4->dst_address.as_u32;
394 
395  ste =
396  nat64_db_st_entry_find (db, &daddr, &saddr, 0, 0, proto, fib_index, 0);
397  if (ste)
398  {
399  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
400  if (!bibe)
401  return -1;
402  }
403  else
404  {
405  bibe = nat64_db_bib_entry_find (db, &daddr, 0, proto, fib_index, 0);
406 
407  if (!bibe)
408  return -1;
409 
410  nat64_compose_ip6 (&ip6_saddr, &ip4->src_address, bibe->fib_index);
411  ste = nat64_db_st_entry_create (db, bibe, &ip6_saddr, &saddr.ip4, 0);
412 
413  if (!ste)
414  return -1;
415 
417  db->st.st_entries_num);
418  }
419 
420  nat64_session_reset_timeout (ste, ctx->vm);
421 
422  ip6->src_address.as_u64[0] = ste->in_r_addr.as_u64[0];
423  ip6->src_address.as_u64[1] = ste->in_r_addr.as_u64[1];
424 
425  ip6->dst_address.as_u64[0] = bibe->in_addr.as_u64[0];
426  ip6->dst_address.as_u64[1] = bibe->in_addr.as_u64[1];
427 
428  vnet_buffer (ctx->b)->sw_if_index[VLIB_TX] = bibe->fib_index;
429 
430  return 0;
431 }
432 
433 static uword
435  vlib_frame_t * frame)
436 {
437  u32 n_left_from, *from, *to_next;
438  nat64_out2in_next_t next_index;
439  u32 pkts_processed = 0;
441  u32 tcp_packets = 0, udp_packets = 0, icmp_packets = 0, other_packets =
442  0, fragments = 0;
443 
444  from = vlib_frame_vector_args (frame);
445  n_left_from = frame->n_vectors;
446  next_index = node->cached_next_index;
447  while (n_left_from > 0)
448  {
449  u32 n_left_to_next;
450 
451  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
452 
453  while (n_left_from > 0 && n_left_to_next > 0)
454  {
455  u32 bi0;
456  vlib_buffer_t *b0;
457  u32 next0;
458  ip4_header_t *ip40;
459  u32 proto0;
461  udp_header_t *udp0;
462 
463  /* speculatively enqueue b0 to the current next frame */
464  bi0 = from[0];
465  to_next[0] = bi0;
466  from += 1;
467  to_next += 1;
468  n_left_from -= 1;
469  n_left_to_next -= 1;
470 
471  b0 = vlib_get_buffer (vm, bi0);
472  ip40 = vlib_buffer_get_current (b0);
473 
474  ctx0.b = b0;
475  ctx0.vm = vm;
476  ctx0.thread_index = thread_index;
477 
479 
480  proto0 = ip_proto_to_snat_proto (ip40->protocol);
481 
482  if (PREDICT_FALSE (proto0 == ~0))
483  {
484  if (ip4_to_ip6 (b0, nat64_out2in_unk_proto_set_cb, &ctx0))
485  {
486  next0 = NAT64_OUT2IN_NEXT_DROP;
487  b0->error = node->errors[NAT64_OUT2IN_ERROR_NO_TRANSLATION];
488  }
489  other_packets++;
490  goto trace0;
491  }
492 
493  if (PREDICT_FALSE (ip4_is_fragment (ip40)))
494  {
495  next0 = NAT64_OUT2IN_NEXT_REASS;
496  fragments++;
497  goto trace0;
498  }
499 
500  if (proto0 == SNAT_PROTOCOL_ICMP)
501  {
502  icmp_packets++;
503  if (icmp_to_icmp6
504  (b0, nat64_out2in_icmp_set_cb, &ctx0,
506  {
507  next0 = NAT64_OUT2IN_NEXT_DROP;
508  b0->error = node->errors[NAT64_OUT2IN_ERROR_NO_TRANSLATION];
509  goto trace0;
510  }
511  }
512  else
513  {
514  if (proto0 == SNAT_PROTOCOL_TCP)
515  tcp_packets++;
516  else
517  udp_packets++;
518 
520  {
521  udp0 = ip4_next_header (ip40);
522  /*
523  * Send DHCP packets to the ipv4 stack, or we won't
524  * be able to use dhcp client on the outside interface
525  */
526  if ((proto0 == SNAT_PROTOCOL_UDP)
527  && (udp0->dst_port ==
528  clib_host_to_net_u16 (UDP_DST_PORT_dhcp_to_client)))
529  {
531  goto trace0;
532  }
533  next0 = NAT64_OUT2IN_NEXT_DROP;
534  b0->error = node->errors[NAT64_OUT2IN_ERROR_NO_TRANSLATION];
535  goto trace0;
536  }
537  }
538 
539  trace0:
541  && (b0->flags & VLIB_BUFFER_IS_TRACED)))
542  {
544  vlib_add_trace (vm, node, b0, sizeof (*t));
545  t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
546  t->next_index = next0;
547  }
548 
549  pkts_processed += next0 == NAT64_OUT2IN_NEXT_IP6_LOOKUP;
550 
551  /* verify speculative enqueue, maybe switch current next frame */
552  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
553  n_left_to_next, bi0, next0);
554  }
555  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
556  }
558  NAT64_OUT2IN_ERROR_OUT2IN_PACKETS,
559  pkts_processed);
561  NAT64_OUT2IN_ERROR_TCP_PACKETS, tcp_packets);
563  NAT64_OUT2IN_ERROR_UDP_PACKETS, tcp_packets);
565  NAT64_OUT2IN_ERROR_ICMP_PACKETS, icmp_packets);
567  NAT64_OUT2IN_ERROR_OTHER_PACKETS,
568  other_packets);
570  NAT64_OUT2IN_ERROR_FRAGMENTS, fragments);
571 
572  return frame->n_vectors;
573 }
574 
575 /* *INDENT-OFF* */
577  .function = nat64_out2in_node_fn,
578  .name = "nat64-out2in",
579  .vector_size = sizeof (u32),
580  .format_trace = format_nat64_out2in_trace,
581  .type = VLIB_NODE_TYPE_INTERNAL,
583  .error_strings = nat64_out2in_error_strings,
584  .n_next_nodes = NAT64_OUT2IN_N_NEXT,
585  /* edit / add dispositions here */
586  .next_nodes = {
587  [NAT64_OUT2IN_NEXT_DROP] = "error-drop",
588  [NAT64_OUT2IN_NEXT_IP6_LOOKUP] = "ip6-lookup",
589  [NAT64_OUT2IN_NEXT_IP4_LOOKUP] = "ip4-lookup",
590  [NAT64_OUT2IN_NEXT_REASS] = "nat64-out2in-reass",
591  },
592 };
593 /* *INDENT-ON* */
594 
596 
598 {
606 
607 static int
609 {
610  nat64_main_t *nm = &nat64_main;
612  nat64_db_st_entry_t *ste;
613  nat64_db_bib_entry_t *bibe;
614  udp_header_t *udp = ip4_next_header (ip4);
615  ip_csum_t csum;
616  u16 *checksum;
617  nat64_db_t *db = &nm->db[ctx->thread_index];
618 
619  ste = nat64_db_st_entry_by_index (db, ctx->proto, ctx->sess_index);
620  if (!ste)
621  return -1;
622 
623  bibe = nat64_db_bib_entry_by_index (db, ctx->proto, ste->bibe_index);
624  if (!bibe)
625  return -1;
626 
627  if (ctx->first_frag)
628  {
629  udp->dst_port = bibe->in_port;
630 
631  if (ip4->protocol == IP_PROTOCOL_UDP)
632  {
633  checksum = &udp->checksum;
634 
635  if (!checksum)
636  {
637  u16 udp_len =
638  clib_host_to_net_u16 (ip4->length) - sizeof (*ip4);
639  csum = ip_incremental_checksum (0, udp, udp_len);
640  csum =
641  ip_csum_with_carry (csum, clib_host_to_net_u16 (udp_len));
642  csum =
643  ip_csum_with_carry (csum,
644  clib_host_to_net_u16 (IP_PROTOCOL_UDP));
645  csum = ip_csum_with_carry (csum, ste->in_r_addr.as_u64[0]);
646  csum = ip_csum_with_carry (csum, ste->in_r_addr.as_u64[1]);
647  csum = ip_csum_with_carry (csum, bibe->in_addr.as_u64[0]);
648  csum = ip_csum_with_carry (csum, bibe->in_addr.as_u64[1]);
649  *checksum = ~ip_csum_fold (csum);
650  }
651  else
652  {
653  csum = ip_csum_sub_even (*checksum, bibe->out_addr.as_u32);
654  csum = ip_csum_sub_even (csum, ste->out_r_addr.as_u32);
655  csum = ip_csum_sub_even (csum, bibe->out_port);
656  csum = ip_csum_add_even (csum, ste->in_r_addr.as_u64[0]);
657  csum = ip_csum_add_even (csum, ste->in_r_addr.as_u64[1]);
658  csum = ip_csum_add_even (csum, bibe->in_addr.as_u64[0]);
659  csum = ip_csum_add_even (csum, bibe->in_addr.as_u64[1]);
660  csum = ip_csum_add_even (csum, bibe->in_port);
661  *checksum = ip_csum_fold (csum);
662  }
663  }
664  else
665  {
666  tcp_header_t *tcp = ip4_next_header (ip4);
667  nat64_tcp_session_set_state (ste, tcp, 0);
668  checksum = &tcp->checksum;
669  csum = ip_csum_sub_even (*checksum, bibe->out_addr.as_u32);
670  csum = ip_csum_sub_even (csum, ste->out_r_addr.as_u32);
671  csum = ip_csum_sub_even (csum, bibe->out_port);
672  csum = ip_csum_add_even (csum, ste->in_r_addr.as_u64[0]);
673  csum = ip_csum_add_even (csum, ste->in_r_addr.as_u64[1]);
674  csum = ip_csum_add_even (csum, bibe->in_addr.as_u64[0]);
675  csum = ip_csum_add_even (csum, bibe->in_addr.as_u64[1]);
676  csum = ip_csum_add_even (csum, bibe->in_port);
677  *checksum = ip_csum_fold (csum);
678  }
679 
680  }
681 
682  ip6->src_address.as_u64[0] = ste->in_r_addr.as_u64[0];
683  ip6->src_address.as_u64[1] = ste->in_r_addr.as_u64[1];
684 
685  ip6->dst_address.as_u64[0] = bibe->in_addr.as_u64[0];
686  ip6->dst_address.as_u64[1] = bibe->in_addr.as_u64[1];
687 
688  vnet_buffer (ctx->b)->sw_if_index[VLIB_TX] = bibe->fib_index;
689 
690  nat64_session_reset_timeout (ste, ctx->vm);
691 
692  return 0;
693 }
694 
695 static uword
697  vlib_frame_t * frame)
698 {
699  u32 n_left_from, *from, *to_next;
700  nat64_out2in_next_t next_index;
701  u32 pkts_processed = 0, cached_fragments = 0;
702  u32 *fragments_to_drop = 0;
703  u32 *fragments_to_loopback = 0;
704  nat64_main_t *nm = &nat64_main;
706 
707  from = vlib_frame_vector_args (frame);
708  n_left_from = frame->n_vectors;
709  next_index = node->cached_next_index;
710 
711  while (n_left_from > 0)
712  {
713  u32 n_left_to_next;
714 
715  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
716 
717  while (n_left_from > 0 && n_left_to_next > 0)
718  {
719  u32 bi0;
720  vlib_buffer_t *b0;
721  u32 next0;
722  ip4_header_t *ip40;
723  u8 cached0 = 0;
724  u32 sw_if_index0, fib_index0;
725  udp_header_t *udp0;
726  nat_reass_ip4_t *reass0;
727  ip46_address_t saddr0, daddr0;
728  nat64_db_st_entry_t *ste0;
729  nat64_db_bib_entry_t *bibe0;
730  ip6_address_t ip6_saddr0;
732  nat64_db_t *db = &nm->db[thread_index];
733 
734  /* speculatively enqueue b0 to the current next frame */
735  bi0 = from[0];
736  to_next[0] = bi0;
737  from += 1;
738  to_next += 1;
739  n_left_from -= 1;
740  n_left_to_next -= 1;
741 
742  b0 = vlib_get_buffer (vm, bi0);
744 
745  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
746  fib_index0 =
748  sw_if_index0);
749 
750  ctx0.thread_index = thread_index;
751 
753  {
754  next0 = NAT64_OUT2IN_NEXT_DROP;
755  b0->error = node->errors[NAT64_OUT2IN_ERROR_DROP_FRAGMENT];
756  goto trace0;
757  }
758 
759  ip40 = vlib_buffer_get_current (b0);
760 
761  if (PREDICT_FALSE (!(ip40->protocol == IP_PROTOCOL_TCP
762  || ip40->protocol == IP_PROTOCOL_UDP)))
763  {
764  next0 = NAT64_OUT2IN_NEXT_DROP;
765  b0->error = node->errors[NAT64_OUT2IN_ERROR_DROP_FRAGMENT];
766  goto trace0;
767  }
768 
769  udp0 = ip4_next_header (ip40);
770 
772  ip40->dst_address,
773  ip40->fragment_id,
774  ip40->protocol,
775  1, &fragments_to_drop);
776 
777  if (PREDICT_FALSE (!reass0))
778  {
779  next0 = NAT64_OUT2IN_NEXT_DROP;
780  b0->error = node->errors[NAT64_OUT2IN_ERROR_MAX_REASS];
781  goto trace0;
782  }
783 
785  {
786  ctx0.first_frag = 1;
787 
788  clib_memset (&saddr0, 0, sizeof (saddr0));
789  saddr0.ip4.as_u32 = ip40->src_address.as_u32;
790  clib_memset (&daddr0, 0, sizeof (daddr0));
791  daddr0.ip4.as_u32 = ip40->dst_address.as_u32;
792 
793  ste0 =
794  nat64_db_st_entry_find (db, &daddr0, &saddr0,
795  udp0->dst_port, udp0->src_port,
796  ip40->protocol, fib_index0, 0);
797  if (!ste0)
798  {
799  bibe0 =
800  nat64_db_bib_entry_find (db, &daddr0, udp0->dst_port,
801  ip40->protocol, fib_index0, 0);
802  if (!bibe0)
803  {
804  next0 = NAT64_OUT2IN_NEXT_DROP;
805  b0->error =
806  node->errors[NAT64_OUT2IN_ERROR_NO_TRANSLATION];
807  goto trace0;
808  }
809 
810  nat64_compose_ip6 (&ip6_saddr0, &ip40->src_address,
811  bibe0->fib_index);
812  ste0 =
813  nat64_db_st_entry_create (db, bibe0, &ip6_saddr0,
814  &saddr0.ip4, udp0->src_port);
815 
816  if (!ste0)
817  {
818  next0 = NAT64_OUT2IN_NEXT_DROP;
819  b0->error =
820  node->errors[NAT64_OUT2IN_ERROR_NO_TRANSLATION];
821  goto trace0;
822  }
823 
824  vlib_set_simple_counter (&nm->total_sessions, thread_index,
825  0, db->st.st_entries_num);
826  }
827  reass0->sess_index = nat64_db_st_entry_get_index (db, ste0);
828  reass0->thread_index = thread_index;
829 
830  nat_ip4_reass_get_frags (reass0, &fragments_to_loopback);
831  }
832  else
833  {
834  ctx0.first_frag = 0;
835 
836  if (PREDICT_FALSE (reass0->sess_index == (u32) ~ 0))
837  {
839  (reass0, bi0, &fragments_to_drop))
840  {
841  b0->error = node->errors[NAT64_OUT2IN_ERROR_MAX_FRAG];
842  next0 = NAT64_OUT2IN_NEXT_DROP;
843  goto trace0;
844  }
845  cached0 = 1;
846  goto trace0;
847  }
848  }
849 
850  ctx0.sess_index = reass0->sess_index;
851  ctx0.proto = ip40->protocol;
852  ctx0.vm = vm;
853  ctx0.b = b0;
854 
856  {
857  next0 = NAT64_OUT2IN_NEXT_DROP;
858  b0->error = node->errors[NAT64_OUT2IN_ERROR_UNKNOWN];
859  goto trace0;
860  }
861 
862  trace0:
863  if (PREDICT_FALSE
864  ((node->flags & VLIB_NODE_FLAG_TRACE)
865  && (b0->flags & VLIB_BUFFER_IS_TRACED)))
866  {
868  vlib_add_trace (vm, node, b0, sizeof (*t));
869  t->cached = cached0;
870  t->sw_if_index = sw_if_index0;
871  t->next_index = next0;
872  }
873 
874  if (cached0)
875  {
876  n_left_to_next++;
877  to_next--;
878  cached_fragments++;
879  }
880  else
881  {
882  pkts_processed += next0 != NAT64_OUT2IN_NEXT_DROP;
883 
884  /* verify speculative enqueue, maybe switch current next frame */
885  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
886  to_next, n_left_to_next,
887  bi0, next0);
888  }
889 
890  if (n_left_from == 0 && vec_len (fragments_to_loopback))
891  {
892  from = vlib_frame_vector_args (frame);
893  u32 len = vec_len (fragments_to_loopback);
894  if (len <= VLIB_FRAME_SIZE)
895  {
896  clib_memcpy_fast (from, fragments_to_loopback,
897  sizeof (u32) * len);
898  n_left_from = len;
899  vec_reset_length (fragments_to_loopback);
900  }
901  else
902  {
903  clib_memcpy_fast (from, fragments_to_loopback +
904  (len - VLIB_FRAME_SIZE),
905  sizeof (u32) * VLIB_FRAME_SIZE);
906  n_left_from = VLIB_FRAME_SIZE;
907  _vec_len (fragments_to_loopback) = len - VLIB_FRAME_SIZE;
908  }
909  }
910  }
911 
912  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
913  }
914 
916  NAT64_OUT2IN_ERROR_PROCESSED_FRAGMENTS,
917  pkts_processed);
919  NAT64_OUT2IN_ERROR_CACHED_FRAGMENTS,
920  cached_fragments);
921 
922  nat_send_all_to_node (vm, fragments_to_drop, node,
923  &node->errors[NAT64_OUT2IN_ERROR_DROP_FRAGMENT],
925 
926  vec_free (fragments_to_drop);
927  vec_free (fragments_to_loopback);
928  return frame->n_vectors;
929 }
930 
931 /* *INDENT-OFF* */
933  .function = nat64_out2in_reass_node_fn,
934  .name = "nat64-out2in-reass",
935  .vector_size = sizeof (u32),
936  .format_trace = format_nat64_out2in_reass_trace,
937  .type = VLIB_NODE_TYPE_INTERNAL,
939  .error_strings = nat64_out2in_error_strings,
940  .n_next_nodes = NAT64_OUT2IN_N_NEXT,
941  /* edit / add dispositions here */
942  .next_nodes = {
943  [NAT64_OUT2IN_NEXT_DROP] = "error-drop",
944  [NAT64_OUT2IN_NEXT_IP6_LOOKUP] = "ip6-lookup",
945  [NAT64_OUT2IN_NEXT_IP4_LOOKUP] = "ip4-lookup",
946  [NAT64_OUT2IN_NEXT_REASS] = "nat64-out2in-reass",
947  },
948 };
949 /* *INDENT-ON* */
950 
953 
954 #define foreach_nat64_out2in_handoff_error \
955 _(CONGESTION_DROP, "congestion drop") \
956 _(SAME_WORKER, "same worker") \
957 _(DO_HANDOFF, "do handoff")
958 
959 typedef enum
960 {
961 #define _(sym,str) NAT64_OUT2IN_HANDOFF_ERROR_##sym,
963 #undef _
966 
968 #define _(sym,string) string,
970 #undef _
971 };
972 
973 typedef struct
974 {
977 
978 static u8 *
980 {
981  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
982  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
984  va_arg (*args, nat64_out2in_handoff_trace_t *);
985 
986  s =
987  format (s, "NAT64-OUT2IN-HANDOFF: next-worker %d", t->next_worker_index);
988 
989  return s;
990 }
991 
992 static inline uword
994  vlib_frame_t * frame)
995 {
996  nat64_main_t *nm = &nat64_main;
997  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
998  u32 n_enq, n_left_from, *from;
999  u16 thread_indices[VLIB_FRAME_SIZE], *ti;
1000  u32 fq_index;
1002  u32 do_handoff = 0, same_worker = 0;
1003 
1004  from = vlib_frame_vector_args (frame);
1005  n_left_from = frame->n_vectors;
1006  vlib_get_buffers (vm, from, bufs, n_left_from);
1007 
1008  b = bufs;
1009  ti = thread_indices;
1010 
1011  fq_index = nm->fq_out2in_index;
1012 
1013  while (n_left_from > 0)
1014  {
1015  ip4_header_t *ip0;
1016 
1017  ip0 = vlib_buffer_get_current (b[0]);
1018  ti[0] = nat64_get_worker_out2in (ip0);
1019 
1020  if (ti[0] != thread_index)
1021  do_handoff++;
1022  else
1023  same_worker++;
1024 
1025  if (PREDICT_FALSE
1026  ((node->flags & VLIB_NODE_FLAG_TRACE)
1027  && (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
1028  {
1030  vlib_add_trace (vm, node, b[0], sizeof (*t));
1031  t->next_worker_index = ti[0];
1032  }
1033 
1034  n_left_from -= 1;
1035  ti += 1;
1036  b += 1;
1037  }
1038 
1039  n_enq =
1040  vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices,
1041  frame->n_vectors, 1);
1042 
1043  if (n_enq < frame->n_vectors)
1045  NAT64_OUT2IN_HANDOFF_ERROR_CONGESTION_DROP,
1046  frame->n_vectors - n_enq);
1048  NAT64_OUT2IN_HANDOFF_ERROR_SAME_WORKER,
1049  same_worker);
1051  NAT64_OUT2IN_HANDOFF_ERROR_DO_HANDOFF,
1052  do_handoff);
1053 
1054  return frame->n_vectors;
1055 }
1056 
1057 /* *INDENT-OFF* */
1059  .function = nat64_out2in_handoff_node_fn,
1060  .name = "nat64-out2in-handoff",
1061  .vector_size = sizeof (u32),
1062  .format_trace = format_nat64_out2in_handoff_trace,
1063  .type = VLIB_NODE_TYPE_INTERNAL,
1065  .error_strings = nat64_out2in_handoff_error_strings,
1066 
1067  .n_next_nodes = 1,
1068 
1069  .next_nodes = {
1070  [0] = "error-drop",
1071  },
1072 };
1073 /* *INDENT-ON* */
1074 
1077 /*
1078  * fd.io coding-style-patch-verification: ON
1079  *
1080  * Local Variables:
1081  * eval: (c-set-style "gnu")
1082  * End:
1083  */
nat64_out2in_handoff_error_t
Definition: nat64_out2in.c:959
nat64_db_t * db
BIB and session DB per thread.
Definition: nat64.h:83
#define CLIB_UNUSED(x)
Definition: clib.h:82
static char * nat64_out2in_error_strings[]
Definition: nat64_out2in.c:99
ip4_address_t src_address
Definition: ip4_packet.h:170
struct nat64_out2in_frag_set_ctx_t_ nat64_out2in_frag_set_ctx_t
u64 as_u64[2]
Definition: ip6_packet.h:51
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
nat64_db_st_entry_t * nat64_db_st_entry_create(nat64_db_t *db, nat64_db_bib_entry_t *bibe, ip6_address_t *in_r_addr, ip4_address_t *out_r_addr, u16 r_port)
Create new NAT64 session table entry.
Definition: nat64_db.c:373
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
u32 fib_table_get_index_for_sw_if_index(fib_protocol_t proto, u32 sw_if_index)
Get the index of the FIB bound to the interface.
Definition: fib_table.c:956
nat64_out2in_error_t
Definition: nat64_out2in.c:91
u32 thread_index
Definition: main.h:179
nat64_db_bib_entry_t * nat64_db_bib_entry_find(nat64_db_t *db, ip46_address_t *addr, u16 port, u8 proto, u32 fib_index, u8 is_ip6)
Find NAT64 BIB entry.
Definition: nat64_db.c:206
nat64_out2in_next_t
Definition: nat64_out2in.c:105
uword ip_csum_t
Definition: ip_packet.h:181
static ip_csum_t ip_csum_with_carry(ip_csum_t sum, ip_csum_t x)
Definition: ip_packet.h:184
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
static int nat64_out2in_tcp_udp_set_cb(ip4_header_t *ip4, ip6_header_t *ip6, void *arg)
Definition: nat64_out2in.c:122
nat64_db_st_entry_t * nat64_db_st_entry_by_index(nat64_db_t *db, u8 proto, u32 ste_index)
Get ST entry by index and protocol.
Definition: nat64_db.c:621
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:493
static u8 * format_nat64_out2in_reass_trace(u8 *s, va_list *args)
Definition: nat64_out2in.c:55
vlib_node_registration_t nat64_out2in_reass_node
(constructor) VLIB_REGISTER_NODE (nat64_out2in_reass_node)
Definition: nat64_out2in.c:71
struct _tcp_header tcp_header_t
ip6_address_t src_address
Definition: ip6_packet.h:378
struct nat64_out2in_set_ctx_t_ nat64_out2in_set_ctx_t
unsigned char u8
Definition: types.h:56
IPv4 to IPv6 translation.
u32 st_entries_num
Definition: nat64_db.h:123
vlib_node_registration_t nat64_out2in_handoff_node
(constructor) VLIB_REGISTER_NODE (nat64_out2in_handoff_node)
Definition: nat64_out2in.c:72
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
static int ip4_is_fragment(const ip4_header_t *i)
Definition: ip4_packet.h:213
u32 ip4_fib_table_get_index_for_sw_if_index(u32 sw_if_index)
Definition: ip4_fib.c:224
u32 sw_if_index
Definition: vxlan_gbp.api:37
ip4_address_t dst_address
Definition: ip4_packet.h:170
static void * ip4_next_header(ip4_header_t *i)
Definition: ip4_packet.h:241
unsigned int u32
Definition: types.h:88
void nat64_tcp_session_set_state(nat64_db_st_entry_t *ste, tcp_header_t *tcp, u8 is_ip6)
Set NAT64 TCP session state.
Definition: nat64.c:900
#define VLIB_FRAME_SIZE
Definition: node.h:401
u32 nat64_get_worker_out2in(ip4_header_t *ip)
Get worker thread index for NAT64 out2in.
Definition: nat64.c:121
void nat64_session_reset_timeout(nat64_db_st_entry_t *ste, vlib_main_t *vm)
Reset NAT64 session timeout.
Definition: nat64.c:861
long ctx[MAX_CONNS]
Definition: main.c:144
#define foreach_nat64_out2in_handoff_error
Definition: nat64_out2in.c:954
unsigned short u16
Definition: types.h:57
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:214
static char * nat64_out2in_handoff_error_strings[]
Definition: nat64_out2in.c:967
static int nat64_out2in_icmp_set_cb(ip4_header_t *ip4, ip6_header_t *ip6, void *arg)
Definition: nat64_out2in.c:203
static u8 * format_nat64_out2in_trace(u8 *s, va_list *args)
Definition: nat64_out2in.c:34
nat64_db_st_t st
Definition: nat64_db.h:139
#define PREDICT_FALSE(x)
Definition: clib.h:111
u32 node_index
Node index.
Definition: node.h:518
static void vlib_set_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 value)
Set a simple counter.
Definition: counter.h:94
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:338
void nat64_compose_ip6(ip6_address_t *ip6, ip4_address_t *ip4, u32 fib_index)
Compose IPv4-embedded IPv6 addresses.
Definition: nat64.c:1030
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:139
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1150
vlib_simple_counter_main_t total_sessions
Definition: nat64.h:113
u8 len
Definition: ip_types.api:49
static uword nat64_out2in_handoff_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: nat64_out2in.c:993
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u16 n_vectors
Definition: node.h:420
static u8 * format_nat64_out2in_handoff_trace(u8 *s, va_list *args)
Definition: nat64_out2in.c:979
u8 nat_reass_is_drop_frag(u8 is_ip6)
Get status of virtual fragmentation reassembly.
Definition: nat_reass.c:168
vlib_main_t * vm
Definition: buffer.c:301
static int ip4_to_ip6_tcp_udp(vlib_buffer_t *p, ip4_to_ip6_set_fn_t fn, void *ctx)
Translate IPv4 UDP/TCP packet to IPv6.
Definition: ip4_to_ip6.h:501
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:341
static ip_csum_t ip_incremental_checksum(ip_csum_t sum, void *_data, uword n_bytes)
Definition: ip_packet.h:254
static int nat64_out2in_frag_set_cb(ip4_header_t *ip4, ip6_header_t *ip6, void *arg)
Definition: nat64_out2in.c:608
#define ARRAY_LEN(x)
Definition: clib.h:62
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:452
static int nat64_out2in_inner_icmp_set_cb(ip4_header_t *ip4, ip6_header_t *ip6, void *arg)
Definition: nat64_out2in.c:282
nat64_main_t nat64_main
Definition: nat64.c:28
u32 fq_out2in_index
Definition: nat64.h:87
static uword nat64_out2in_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: nat64_out2in.c:434
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:537
nat64_db_st_entry_t * nat64_db_st_entry_find(nat64_db_t *db, ip46_address_t *l_addr, ip46_address_t *r_addr, u16 l_port, u16 r_port, u8 proto, u32 fib_index, u8 is_ip6)
Find NAT64 session table entry.
Definition: nat64_db.c:550
VLIB_NODE_FUNCTION_MULTIARCH(nat64_out2in_node, nat64_out2in_node_fn)
static uword nat64_out2in_reass_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: nat64_out2in.c:696
u32 nat64_db_st_entry_get_index(nat64_db_t *db, nat64_db_st_entry_t *ste)
Definition: nat64_db.c:598
static ip_csum_t ip_csum_sub_even(ip_csum_t c, ip_csum_t x)
Definition: ip_packet.h:209
nat64_db_bib_entry_t * nat64_db_bib_entry_by_index(nat64_db_t *db, u8 proto, u32 bibe_index)
Get BIB entry by index and protocol.
Definition: nat64_db.c:299
static int icmp_to_icmp6(vlib_buffer_t *p, ip4_to_ip6_set_fn_t fn, void *ctx, ip4_to_ip6_set_fn_t inner_fn, void *inner_ctx)
Translate ICMP4 packet to ICMP6.
Definition: ip4_to_ip6.h:220
vlib_node_registration_t nat64_out2in_node
(constructor) VLIB_REGISTER_NODE (nat64_out2in_node)
Definition: nat64_out2in.c:70
#define u8_ptr_add(ptr, index)
Definition: ip.h:68
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:57
struct _vlib_node_registration vlib_node_registration_t
static int nat64_out2in_unk_proto_set_cb(ip4_header_t *ip4, ip6_header_t *ip6, void *arg)
Definition: nat64_out2in.c:374
NAT64 global declarations.
Definition: defs.h:47
static int ip4_is_first_fragment(const ip4_header_t *i)
Definition: ip4_packet.h:220
static u32 ip_proto_to_snat_proto(u8 ip_proto)
The NAT inline functions.
Definition: nat_inlines.h:26
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u64 uword
Definition: types.h:112
static int ip4_to_ip6_fragmented(vlib_buffer_t *p, ip4_to_ip6_set_fn_t fn, void *ctx)
Translate IPv4 fragmented packet to IPv6.
Definition: ip4_to_ip6.h:450
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
nat_reass_ip4_t * nat_ip4_reass_find_or_create(ip4_address_t src, ip4_address_t dst, u16 frag_id, u8 proto, u8 reset_timeout, u32 **bi_to_drop)
Find or create reassembly.
Definition: nat_reass.c:220
static_always_inline u32 vlib_buffer_enqueue_to_thread(vlib_main_t *vm, u32 frame_queue_index, u32 *buffer_indices, u16 *thread_indices, u32 n_packets, int drop_on_congestion)
Definition: buffer_node.h:487
static int ip4_to_ip6(vlib_buffer_t *p, ip4_to_ip6_set_fn_t fn, void *ctx)
Translate IPv4 packet to IPv6 (IP header only).
Definition: ip4_to_ip6.h:601
#define vnet_buffer(b)
Definition: buffer.h:368
int nat_ip4_reass_add_fragment(nat_reass_ip4_t *reass, u32 bi, u32 **bi_to_drop)
Cache fragment.
Definition: nat_reass.c:338
u16 flags
Copy of main node flags.
Definition: node.h:531
static void nat_send_all_to_node(vlib_main_t *vm, u32 *bi_vector, vlib_node_runtime_t *node, vlib_error_t *error, u32 next)
Definition: nat_inlines.h:104
void nat_ip4_reass_get_frags(nat_reass_ip4_t *reass, u32 **bi)
Get cached fragments.
Definition: nat_reass.c:370
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:145
NAT plugin virtual fragmentation reassembly.
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:326
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:117
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:62
static u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:237
Definition: defs.h:46
#define foreach_nat64_out2in_error
Definition: nat64_out2in.c:74
static ip_csum_t ip_csum_add_even(ip_csum_t c, ip_csum_t x)
Definition: ip_packet.h:192
ip6_address_t dst_address
Definition: ip6_packet.h:378