41 tag = t->
is_slow_path ?
"NAT64-in2out-slowpath" :
"NAT64-in2out";
66 format (s,
"NAT64-in2out-reass: sw_if_index %d, next index %d, status %s",
68 t->
cached ?
"cached" :
"translated");
78 #define foreach_nat64_in2out_error \ 79 _(UNSUPPORTED_PROTOCOL, "unsupported protocol") \ 80 _(IN2OUT_PACKETS, "good in2out packets processed") \ 81 _(NO_TRANSLATION, "no translation") \ 82 _(UNKNOWN, "unknown") \ 83 _(DROP_FRAGMENT, "Drop fragment") \ 84 _(MAX_REASS, "Maximum reassemblies exceeded") \ 85 _(MAX_FRAG, "Maximum fragments per reassembly exceeded") 90 #define _(sym,str) NAT64_IN2OUT_ERROR_##sym, 97 #define _(sym,string) string, 170 nat64_db_bib_entry_t *bibe;
171 nat64_db_st_entry_t *ste;
172 ip46_address_t saddr, daddr;
173 u32 sw_if_index, fib_index;
214 sport, out_port, fib_index, proto, 0);
234 if (proto == IP_PROTOCOL_TCP)
240 checksum = &tcp->checksum;
254 nat64_db_bib_entry_t *bibe;
255 nat64_db_st_entry_t *ste;
256 ip46_address_t saddr, daddr;
257 u32 sw_if_index, fib_index;
270 if (icmp->type == ICMP4_echo_request || icmp->type == ICMP4_echo_reply)
272 u16 in_id = ((
u16 *) (icmp))[2];
275 IP_PROTOCOL_ICMP, fib_index, 1);
289 IP_PROTOCOL_ICMP, fib_index, 1);
296 (fib_index, SNAT_PROTOCOL_ICMP, &out_addr, &out_id,
302 &out_addr, in_id, out_id,
303 fib_index, IP_PROTOCOL_ICMP, 0);
319 ((
u16 *) (icmp))[2] = bibe->out_port;
341 nat64_db_st_entry_t *ste;
342 nat64_db_bib_entry_t *bibe;
343 ip46_address_t saddr, daddr;
344 u32 sw_if_index, fib_index;
357 if (proto == IP_PROTOCOL_ICMP6)
360 u16 in_id = ((
u16 *) (icmp))[2];
361 proto = IP_PROTOCOL_ICMP;
364 (icmp->type == ICMP4_echo_request
365 || icmp->type == ICMP4_echo_reply))
379 ((
u16 *) (icmp))[2] = bibe->out_port;
406 if (proto == IP_PROTOCOL_TCP)
407 checksum = &tcp->checksum;
433 nat64_db_bib_entry_t *bibe;
434 ip46_address_t saddr, daddr;
446 memset (&saddr, 0,
sizeof (saddr));
447 saddr.ip4.as_u32 = bibe->out_addr.as_u32;
448 memset (&daddr, 0,
sizeof (daddr));
469 nat64_db_bib_entry_t *bibe;
470 nat64_db_st_entry_t *ste;
471 ip46_address_t saddr, daddr,
addr;
472 u32 sw_if_index, fib_index;
507 .out_addr.as_u32 = 0,
508 .fib_index = fib_index,
519 memset (&addr, 0,
sizeof (addr));
539 &ctx.
out_addr, 0, 0, fib_index, proto,
567 nat64_db_bib_entry_t *bibe;
568 nat64_db_st_entry_t *ste;
569 ip46_address_t saddr, daddr;
570 u32 sw_if_index, fib_index;
589 if (proto == IP_PROTOCOL_UDP)
592 checksum = &tcp->checksum;
621 &out_port, thread_index))
626 sport, out_port, fib_index, proto, 0);
641 sport = udp->
src_port = bibe->out_port;
644 memset (&daddr, 0,
sizeof (daddr));
645 daddr.ip4.as_u32 = ste->out_r_addr.as_u32;
681 nat64_db_bib_entry_t *bibe;
682 nat64_db_st_entry_t *ste;
685 ip46_address_t saddr, daddr;
686 u32 sw_if_index, fib_index;
690 u16 *checksum, sport, dport;
694 if (icmp->type == ICMP6_echo_request || icmp->type == ICMP6_echo_reply)
701 if (proto == IP_PROTOCOL_ICMP6)
719 if (proto == IP_PROTOCOL_UDP)
722 checksum = &tcp->checksum;
741 dport = udp->
dst_port = bibe->out_port;
744 memset (&saddr, 0,
sizeof (saddr));
745 memset (&daddr, 0,
sizeof (daddr));
746 saddr.ip4.as_u32 = ste->out_r_addr.as_u32;
747 daddr.ip4.as_u32 = bibe->out_addr.as_u32;
807 nat64_db_bib_entry_t *bibe;
808 nat64_db_st_entry_t *ste;
809 ip46_address_t saddr, daddr,
addr;
810 u32 sw_if_index, fib_index;
845 .out_addr.as_u32 = 0,
846 .fib_index = fib_index,
857 memset (&addr, 0,
sizeof (addr));
877 &ctx.
out_addr, 0, 0, fib_index, proto,
894 memset (&daddr, 0,
sizeof (daddr));
895 daddr.ip4.as_u32 = ste->out_r_addr.as_u32;
921 u32 n_left_from, *from, *to_next;
923 u32 pkts_processed = 0;
924 u32 stats_node_index;
934 while (n_left_from > 0)
940 while (n_left_from > 0 && n_left_to_next > 0)
946 u16 l4_offset0, frag_offset0;
975 b0->
error = node->
errors[NAT64_IN2OUT_ERROR_UNKNOWN];
997 (vm, b0, ip60, thread_index))
1001 node->
errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1010 node->
errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1026 (ip60->
protocol == IP_PROTOCOL_IPV6_FRAGMENTATION))
1032 if (proto0 == SNAT_PROTOCOL_ICMP)
1038 (vm, b0, ip60, thread_index))
1042 node->
errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1052 b0->
error = node->
errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1056 else if (proto0 == SNAT_PROTOCOL_TCP || proto0 == SNAT_PROTOCOL_UDP)
1062 (vm, b0, ip60, thread_index))
1066 node->
errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1075 b0->
error = node->
errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1082 && (b0->
flags & VLIB_BUFFER_IS_TRACED)))
1095 n_left_to_next, bi0, next0);
1100 NAT64_IN2OUT_ERROR_IN2OUT_PACKETS,
1115 .name =
"nat64-in2out",
1116 .vector_size =
sizeof (
u32),
1145 .name =
"nat64-in2out-slowpath",
1146 .vector_size =
sizeof (
u32),
1181 nat64_db_st_entry_t *ste;
1182 nat64_db_bib_entry_t *bibe;
1200 if (ctx->
proto == IP_PROTOCOL_TCP)
1232 nat64_db_st_entry_t *ste;
1233 nat64_db_bib_entry_t *bibe;
1240 ip46_address_t daddr;
1245 if (ctx->
proto == IP_PROTOCOL_UDP)
1248 checksum = &tcp->checksum;
1268 sport = bibe->out_port;
1269 dport = ste->r_port;
1273 memset (&daddr, 0,
sizeof (daddr));
1274 daddr.ip4.as_u32 = ste->out_r_addr.as_u32;
1313 u32 n_left_from, *from, *to_next;
1315 u32 pkts_processed = 0;
1316 u32 *fragments_to_drop = 0;
1317 u32 *fragments_to_loopback = 0;
1325 while (n_left_from > 0)
1331 while (n_left_from > 0 && n_left_to_next > 0)
1338 u16 l4_offset0, frag_offset0;
1340 nat_reass_ip6_t *reass0;
1341 ip6_frag_hdr_t *frag0;
1342 nat64_db_bib_entry_t *bibe0;
1343 nat64_db_st_entry_t *ste0;
1346 u32 sw_if_index0, fib_index0;
1347 ip46_address_t saddr0, daddr0;
1357 n_left_to_next -= 1;
1372 b0->
error = node->
errors[NAT64_IN2OUT_ERROR_DROP_FRAGMENT];
1384 b0->
error = node->
errors[NAT64_IN2OUT_ERROR_UNKNOWN];
1389 (!(l4_protocol0 == IP_PROTOCOL_TCP
1390 || l4_protocol0 == IP_PROTOCOL_UDP)))
1393 b0->
error = node->
errors[NAT64_IN2OUT_ERROR_DROP_FRAGMENT];
1398 frag0 = (ip6_frag_hdr_t *)
u8_ptr_add (ip60, frag_offset0);
1403 frag0->identification,
1405 1, &fragments_to_drop);
1410 b0->
error = node->
errors[NAT64_IN2OUT_ERROR_MAX_REASS];
1420 (reass0, bi0, &fragments_to_drop))
1422 b0->
error = node->
errors[NAT64_IN2OUT_ERROR_MAX_FRAG];
1442 l4_protocol0, fib_index0, 1);
1447 l4_protocol0, fib_index0, 1);
1453 (fib_index0, proto0, &out_addr0, &out_port0,
1458 node->
errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1466 out_port0, fib_index0,
1472 node->
errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1486 node->
errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1496 ctx0.
proto = l4_protocol0;
1506 b0->
error = node->
errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1515 b0->
error = node->
errors[NAT64_IN2OUT_ERROR_UNKNOWN];
1523 && (b0->
flags & VLIB_BUFFER_IS_TRACED)))
1543 to_next, n_left_to_next,
1547 if (n_left_from == 0 &&
vec_len (fragments_to_loopback))
1554 sizeof (
u32) * len);
1561 fragments_to_loopback + (len -
1574 NAT64_IN2OUT_ERROR_IN2OUT_PACKETS,
1578 &node->
errors[NAT64_IN2OUT_ERROR_DROP_FRAGMENT],
1589 .name =
"nat64-in2out-reass",
1590 .vector_size =
sizeof (
u32),
1625 m = t->
do_handoff ?
"next worker" :
"same worker";
1637 u32 n_left_from, *from, *to_next = 0, *to_next_drop = 0;
1645 u32 n_left_to_next_worker = 0, *to_next_worker = 0;
1646 u32 next_worker_index = 0;
1647 u32 current_worker_index = ~0;
1667 while (n_left_from > 0)
1688 if (next_worker_index != current_worker_index)
1693 congested_handoff_queue_by_worker_index);
1704 to_next_drop[0] = bi0;
1716 handoff_queue_elt_by_worker_index);
1719 current_worker_index = next_worker_index;
1722 ASSERT (to_next_worker != 0);
1725 to_next_worker[0] = bi0;
1727 n_left_to_next_worker--;
1729 if (n_left_to_next_worker == 0)
1733 current_worker_index = ~0;
1734 handoff_queue_elt_by_worker_index[next_worker_index] = 0;
1756 && (b0->
flags & VLIB_BUFFER_IS_TRACED)))
1775 for (i = 0; i <
vec_len (handoff_queue_elt_by_worker_index); i++)
1777 if (handoff_queue_elt_by_worker_index[i])
1779 hf = handoff_queue_elt_by_worker_index[
i];
1787 handoff_queue_elt_by_worker_index[
i] = 0;
1792 congested_handoff_queue_by_worker_index[
i] =
1796 current_worker_index = ~0;
1803 .name =
"nat64-in2out-handoff",
1804 .vector_size =
sizeof (
u32),
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
#define foreach_ip_interface_address(lm, a, sw_if_index, loop, body)
nat64_db_t * db
BIB and session DB per thread.
static int unk_proto_st_walk(nat64_db_st_entry_t *ste, void *arg)
static int nat64_in2out_frag_set_cb(ip6_header_t *ip6, ip4_header_t *ip4, void *arg)
static int nat64_in2out_frag_hairpinning(vlib_buffer_t *b, ip6_header_t *ip6, nat64_in2out_frag_set_ctx_t *ctx)
snat_address_t * addr_pool
Address pool vector.
static int icmp6_to_icmp(vlib_buffer_t *p, ip6_to_ip4_set_fn_t fn, void *ctx, ip6_to_ip4_set_fn_t inner_fn, void *inner_ctx)
Translate ICMP6 packet to ICMP4.
void nat64_extract_ip4(ip6_address_t *ip6, ip4_address_t *ip4, u32 fib_index)
Extract IPv4 address from the IPv4-embedded IPv6 addresses.
vlib_node_registration_t nat64_in2out_node
(constructor) VLIB_REGISTER_NODE (nat64_in2out_node)
nat64_db_st_entry_t * nat64_db_st_entry_create(nat64_db_t *db, nat64_db_bib_entry_t *bibe, ip6_address_t *in_r_addr, ip4_address_t *out_r_addr, u16 r_port)
Create new NAT64 session table entry.
u32 fib_table_get_index_for_sw_if_index(fib_protocol_t proto, u32 sw_if_index)
Get the index of the FIB bound to the interface.
nat64_db_bib_entry_t * nat64_db_bib_entry_create(nat64_db_t *db, ip6_address_t *in_addr, ip4_address_t *out_addr, u16 in_port, u16 out_port, u32 fib_index, u8 proto, u8 is_static)
Create new NAT64 BIB entry.
void nat_ip6_reass_get_frags(nat_reass_ip6_t *reass, u32 **bi)
Get cached fragments.
nat64_db_bib_entry_t * nat64_db_bib_entry_find(nat64_db_t *db, ip46_address_t *addr, u16 port, u8 proto, u32 fib_index, u8 is_ip6)
Find NAT64 BIB entry.
static ip_csum_t ip_csum_with_carry(ip_csum_t sum, ip_csum_t x)
static u8 * format_nat64_in2out_trace(u8 *s, va_list *args)
nat64_db_st_entry_t * nat64_db_st_entry_by_index(nat64_db_t *db, u8 proto, u32 ste_index)
Get ST entry by index and protocol.
u32 buffer_index[VLIB_FRAME_SIZE]
vlib_error_t * errors
Vector of errors for this node.
vlib_node_registration_t nat64_in2out_handoff_node
(constructor) VLIB_REGISTER_NODE (nat64_in2out_handoff_node)
struct _tcp_header tcp_header_t
static vlib_frame_queue_t * is_vlib_frame_queue_congested(u32 frame_queue_index, u32 index, u32 queue_hi_thresh, vlib_frame_queue_t **handoff_queue_by_worker_index)
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
static int nat64_in2out_tcp_udp_hairpinning(vlib_main_t *vm, vlib_buffer_t *b, ip6_header_t *ip6, u32 thread_index)
u32 nat64_get_worker_in2out(ip6_address_t *addr)
Get worker thread index for NAT64 in2out.
#define static_always_inline
vlib_node_registration_t nat64_in2out_reass_node
(constructor) VLIB_REGISTER_NODE (nat64_in2out_reass_node)
vlib_frame_t * vlib_get_frame_to_node(vlib_main_t *vm, u32 to_node_index)
static_always_inline int ip6_parse(const ip6_header_t *ip6, u32 buff_len, u8 *l4_protocol, u16 *l4_offset, u16 *frag_hdr_offset)
Parse some useful information from IPv6 header.
struct unk_proto_st_walk_ctx_t_ unk_proto_st_walk_ctx_t
static int ip6_to_ip4_tcp_udp(vlib_buffer_t *p, ip6_to_ip4_set_fn_t fn, void *ctx, u8 udp_checksum)
Translate IPv6 UDP/TCP packet to IPv4.
u16 current_length
Nbytes between current data and the end of this buffer.
void nat64_session_reset_timeout(nat64_db_st_entry_t *ste, vlib_main_t *vm)
Reset NAT64 session timeout.
static uword nat64_in2out_slowpath_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
int nat64_alloc_out_addr_and_port(u32 fib_index, snat_protocol_t proto, ip4_address_t *addr, u16 *port, u32 thread_index)
Alloce IPv4 address and port pair from NAT64 pool.
static u8 * format_nat64_in2out_reass_trace(u8 *s, va_list *args)
void vlib_put_frame_to_node(vlib_main_t *vm, u32 to_node_index, vlib_frame_t *f)
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
int ip6_address_compare(ip6_address_t *a1, ip6_address_t *a2)
static vlib_frame_queue_elt_t * vlib_get_worker_handoff_queue_elt(u32 frame_queue_index, u32 vlib_worker_index, vlib_frame_queue_elt_t **handoff_queue_elt_by_worker_index)
static int nat64_in2out_inner_icmp_set_cb(ip6_header_t *ip6, ip4_header_t *ip4, void *arg)
static char * nat64_in2out_error_strings[]
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
void nat64_compose_ip6(ip6_address_t *ip6, ip4_address_t *ip4, u32 fib_index)
Compose IPv4-embedded IPv6 addresses.
vlib_error_t error
Error code for buffers to be enqueued to error handler.
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
static u8 * format_nat64_in2out_handoff_trace(u8 *s, va_list *args)
#define VLIB_REGISTER_NODE(x,...)
static int nat64_in2out_unk_proto_hairpinning(vlib_main_t *vm, vlib_buffer_t *b, ip6_header_t *ip6, u32 thread_index)
static int nat64_in2out_icmp_set_cb(ip6_header_t *ip6, ip4_header_t *ip4, void *arg)
u8 nat_reass_is_drop_frag(u8 is_ip6)
Get status of virtual fragmentation reassembly.
static int nat64_in2out_tcp_udp_set_cb(ip6_header_t *ip6, ip4_header_t *ip4, void *arg)
int nat_ip6_reass_add_fragment(nat_reass_ip6_t *reass, u32 bi, u32 **bi_to_drop)
Cache fragment.
#define vec_free(V)
Free vector's memory (no header).
static ip_csum_t ip_incremental_checksum(ip_csum_t sum, void *_data, uword n_bytes)
vlib_node_registration_t nat64_in2out_slowpath_node
(constructor) VLIB_REGISTER_NODE (nat64_in2out_slowpath_node)
#define clib_memcpy(a, b, c)
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
static void * ip6_next_header(ip6_header_t *i)
#define ip6_frag_hdr_offset(hdr)
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
nat64_db_st_entry_t * nat64_db_st_entry_find(nat64_db_t *db, ip46_address_t *l_addr, ip46_address_t *r_addr, u16 l_port, u16 r_port, u8 proto, u32 fib_index, u8 is_ip6)
Find NAT64 session table entry.
void nat64_db_st_walk(nat64_db_t *db, u8 proto, nat64_db_st_walk_fn_t fn, void *ctx)
Walk NAT64 session table.
static int nat64_in2out_icmp_hairpinning(vlib_main_t *vm, vlib_buffer_t *b, ip6_header_t *ip6, u32 thread_index)
ip_lookup_main_t lookup_main
u32 fq_in2out_index
Worker handoff.
u32 nat64_db_st_entry_get_index(nat64_db_t *db, nat64_db_st_entry_t *ste)
static u8 nat64_not_translate(u32 sw_if_index, ip6_address_t ip6_addr)
static ip_csum_t ip_csum_sub_even(ip_csum_t c, ip_csum_t x)
nat64_db_bib_entry_t * nat64_db_bib_entry_by_index(nat64_db_t *db, u8 proto, u32 bibe_index)
Get BIB entry by index and protocol.
IPv6 to IPv4 translation.
static uword nat64_in2out_reass_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
#define u8_ptr_add(ptr, index)
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
struct _vlib_node_registration vlib_node_registration_t
NAT64 global declarations.
static u32 ip_proto_to_snat_proto(u8 ip_proto)
The NAT inline functions.
VLIB_NODE_FUNCTION_MULTIARCH(nat64_in2out_node, nat64_in2out_node_fn)
#define ip46_address_is_equal(a1, a2)
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
nat_reass_ip6_t * nat_ip6_reass_find_or_create(ip6_address_t src, ip6_address_t dst, u32 frag_id, u8 proto, u8 reset_timeout, u32 **bi_to_drop)
Find or create reassembly.
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
static uword nat64_in2out_handoff_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
static void vlib_put_frame_queue_elt(vlib_frame_queue_elt_t *hf)
static uword nat64_in2out_node_fn_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u8 is_slow_path)
static int nat64_in2out_unk_proto_set_cb(ip6_header_t *ip6, ip4_header_t *ip4, void *arg)
static vlib_thread_main_t * vlib_get_thread_main()
static_always_inline int is_hairpinning(ip6_address_t *dst_addr)
Check whether is a hairpinning.
#define vec_foreach(var, vec)
Vector iterator.
static uword nat64_in2out_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
u16 flags
Copy of main node flags.
static void nat_send_all_to_node(vlib_main_t *vm, u32 *bi_vector, vlib_node_runtime_t *node, vlib_error_t *error, u32 next)
static void * ip_interface_address_get_address(ip_lookup_main_t *lm, ip_interface_address_t *a)
NAT plugin virtual fragmentation reassembly.
#define VLIB_NODE_FLAG_TRACE
#define foreach_nat64_in2out_error
#define vec_validate_init_empty(V, I, INIT)
Make sure vector is long enough for given index and initialize empty space (no header, unspecified alignment)
struct nat64_in2out_set_ctx_t_ nat64_in2out_set_ctx_t
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
static int ip6_to_ip4(vlib_buffer_t *p, ip6_to_ip4_set_fn_t fn, void *ctx)
Translate IPv6 packet to IPv4 (IP header only).
static u16 ip_csum_fold(ip_csum_t c)
struct nat64_in2out_frag_set_ctx_t_ nat64_in2out_frag_set_ctx_t
static ip_csum_t ip_csum_add_even(ip_csum_t c, ip_csum_t x)
static int ip6_to_ip4_fragmented(vlib_buffer_t *p, ip6_to_ip4_set_fn_t fn, void *ctx)
Translate IPv6 fragmented packet to IPv4.