FD.io VPP  v20.09-64-g4f7b92f0a
Vector Packet Processing
ip4_map_t.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include "map.h"
16 
17 #include <vnet/ip/ip_frag.h>
18 #include <vnet/ip/ip4_to_ip6.h>
19 
20 typedef enum
21 {
29 
30 typedef enum
31 {
38 
39 typedef enum
40 {
47 
48 typedef enum
49 {
56 
57 //This is used to pass information within the buffer data.
58 //Buffer structure being too small to contain big structures like this.
59 /* *INDENT-OFF* */
60 typedef CLIB_PACKED (struct {
61  ip6_address_t daddr;
62  ip6_address_t saddr;
63  //IPv6 header + Fragmentation header will be here
64  //sizeof(ip6) + sizeof(ip_frag) - sizeof(ip4)
65  u8 unused[28];
66 }) ip4_mapt_pseudo_header_t;
67 /* *INDENT-ON* */
68 
69 typedef struct
70 {
71  map_domain_t *d;
72  u16 recv_port;
74 
75 static int
77  ip6_header_t * ip6, void *arg)
78 {
79  icmp_to_icmp6_ctx_t *ctx = arg;
80 
81  ip4_map_t_embedded_address (ctx->d, &ip6->src_address, &ip4->src_address);
82  ip6->dst_address.as_u64[0] =
83  map_get_pfx_net (ctx->d, ip4->dst_address.as_u32, ctx->recv_port);
84  ip6->dst_address.as_u64[1] =
85  map_get_sfx_net (ctx->d, ip4->dst_address.as_u32, ctx->recv_port);
86 
87  return 0;
88 }
89 
90 static int
92  ip6_header_t * ip6, void *arg)
93 {
94  icmp_to_icmp6_ctx_t *ctx = arg;
95  ip4_address_t old_src, old_dst;
96 
97  old_src.as_u32 = ip4->src_address.as_u32;
98  old_dst.as_u32 = ip4->dst_address.as_u32;
99 
100  //Note that the source address is within the domain
101  //while the destination address is the one outside the domain
102  ip4_map_t_embedded_address (ctx->d, &ip6->dst_address, &old_dst);
103  ip6->src_address.as_u64[0] =
104  map_get_pfx_net (ctx->d, old_src.as_u32, ctx->recv_port);
105  ip6->src_address.as_u64[1] =
106  map_get_sfx_net (ctx->d, old_src.as_u32, ctx->recv_port);
107 
108  return 0;
109 }
110 
111 static uword
114 {
115  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
116  vlib_node_runtime_t *error_node =
117  vlib_node_get_runtime (vm, ip4_map_t_icmp_node.index);
118  from = vlib_frame_vector_args (frame);
119  n_left_from = frame->n_vectors;
120  next_index = node->cached_next_index;
122  u32 thread_index = vm->thread_index;
123 
124  while (n_left_from > 0)
125  {
126  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
127 
128  while (n_left_from > 0 && n_left_to_next > 0)
129  {
130  u32 pi0;
131  vlib_buffer_t *p0;
132  ip4_mapt_icmp_next_t next0;
133  u8 error0;
134  map_domain_t *d0;
135  u16 len0;
136  icmp_to_icmp6_ctx_t ctx0;
137  ip4_header_t *ip40;
138 
140  pi0 = to_next[0] = from[0];
141  from += 1;
142  n_left_from -= 1;
143  to_next += 1;
144  n_left_to_next -= 1;
145  error0 = MAP_ERROR_NONE;
146 
147  p0 = vlib_get_buffer (vm, pi0);
148  vlib_buffer_advance (p0, sizeof (ip4_mapt_pseudo_header_t)); //The pseudo-header is not used
149  len0 =
150  clib_net_to_host_u16 (((ip4_header_t *)
151  vlib_buffer_get_current (p0))->length);
152  d0 =
154  vnet_buffer (p0)->map_t.map_domain_index);
155 
156  ip40 = vlib_buffer_get_current (p0);
157  ctx0.recv_port = ip4_get_port (ip40, 0);
158  ctx0.d = d0;
159  if (ctx0.recv_port == 0)
160  {
161  // In case of 1:1 mapping, we don't care about the port
162  if (!(d0->ea_bits_len == 0 && d0->rules))
163  {
164  error0 = MAP_ERROR_ICMP;
165  goto err0;
166  }
167  }
168 
169  if (icmp_to_icmp6
170  (p0, ip4_to_ip6_set_icmp_cb, &ctx0,
172  {
173  error0 = MAP_ERROR_ICMP;
174  goto err0;
175  }
176 
177  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
178  {
179  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
180  vnet_buffer (p0)->ip_frag.next_index = IP_FRAG_NEXT_IP6_LOOKUP;
182  }
183  else
184  {
185  next0 = ip4_map_ip6_lookup_bypass (p0, NULL) ?
187  }
188  err0:
189  if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
190  {
192  thread_index,
193  vnet_buffer (p0)->
194  map_t.map_domain_index, 1,
195  len0);
196  }
197  else
198  {
199  next0 = IP4_MAPT_ICMP_NEXT_DROP;
200  }
201  p0->error = error_node->errors[error0];
202  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
203  to_next, n_left_to_next, pi0,
204  next0);
205  }
206  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
207  }
208  return frame->n_vectors;
209 }
210 
211 /*
212  * Translate fragmented IPv4 UDP/TCP packet to IPv6.
213  */
214 always_inline int
216  ip4_mapt_pseudo_header_t * pheader)
217 {
218  ip4_header_t *ip4;
219  ip6_header_t *ip6;
220  ip6_frag_hdr_t *frag;
221 
222  ip4 = vlib_buffer_get_current (p);
223  frag = (ip6_frag_hdr_t *) u8_ptr_add (ip4, sizeof (*ip4) - sizeof (*frag));
224  ip6 =
225  (ip6_header_t *) u8_ptr_add (ip4,
226  sizeof (*ip4) - sizeof (*frag) -
227  sizeof (*ip6));
228  vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6) - sizeof (*frag));
229 
230  //We know that the protocol was one of ICMP, TCP or UDP
231  //because the first fragment was found and cached
232  frag->next_hdr =
233  (ip4->protocol == IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol;
234  frag->identification = frag_id_4to6 (ip4->fragment_id);
235  frag->rsv = 0;
236  frag->fragment_offset_and_more =
238  clib_net_to_host_u16
241 
243  clib_host_to_net_u32 ((6 << 28) + (ip4->tos << 20));
244  ip6->payload_length =
245  clib_host_to_net_u16 (clib_net_to_host_u16 (ip4->length) -
246  sizeof (*ip4) + sizeof (*frag));
247  ip6->hop_limit = ip4->ttl;
248  ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
249 
250  ip6->dst_address.as_u64[0] = pheader->daddr.as_u64[0];
251  ip6->dst_address.as_u64[1] = pheader->daddr.as_u64[1];
252  ip6->src_address.as_u64[0] = pheader->saddr.as_u64[0];
253  ip6->src_address.as_u64[1] = pheader->saddr.as_u64[1];
254 
255  return 0;
256 }
257 
258 static uword
261 {
262  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
263  from = vlib_frame_vector_args (frame);
264  n_left_from = frame->n_vectors;
265  next_index = node->cached_next_index;
266  vlib_node_runtime_t *error_node =
267  vlib_node_get_runtime (vm, ip4_map_t_fragmented_node.index);
268 
269  while (n_left_from > 0)
270  {
271  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
272 
273  while (n_left_from > 0 && n_left_to_next > 0)
274  {
275  u32 pi0;
276  vlib_buffer_t *p0;
277  ip4_mapt_pseudo_header_t *pheader0;
278  ip4_mapt_fragmented_next_t next0;
279 
281  pi0 = to_next[0] = from[0];
282  from += 1;
283  n_left_from -= 1;
284  to_next += 1;
285  n_left_to_next -= 1;
286 
287  p0 = vlib_get_buffer (vm, pi0);
288 
289  //Accessing pseudo header
290  pheader0 = vlib_buffer_get_current (p0);
291  vlib_buffer_advance (p0, sizeof (*pheader0));
292 
293  if (map_ip4_to_ip6_fragmented (p0, pheader0))
294  {
295  p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
297  }
298  else
299  {
300  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
301  {
302  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
303  vnet_buffer (p0)->ip_frag.next_index =
306  }
307  else
308  {
309  next0 = ip4_map_ip6_lookup_bypass (p0, NULL) ?
311  }
312  }
313 
314  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
315  to_next, n_left_to_next, pi0,
316  next0);
317  }
318  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
319  }
320  return frame->n_vectors;
321 }
322 
323 /*
324  * Translate IPv4 UDP/TCP packet to IPv6.
325  */
326 always_inline int
327 map_ip4_to_ip6_tcp_udp (vlib_buffer_t * p, ip4_mapt_pseudo_header_t * pheader)
328 {
329  map_main_t *mm = &map_main;
330  ip4_header_t *ip4;
331  ip6_header_t *ip6;
332  ip_csum_t csum;
333  u16 *checksum;
334  ip6_frag_hdr_t *frag;
335  u32 frag_id;
336  ip4_address_t old_src, old_dst;
337 
338  ip4 = vlib_buffer_get_current (p);
339 
340  if (ip4->protocol == IP_PROTOCOL_UDP)
341  {
342  udp_header_t *udp = ip4_next_header (ip4);
343  checksum = &udp->checksum;
344 
345  /*
346  * UDP checksum is optional over IPv4 but mandatory for IPv6 We
347  * do not check udp->length sanity but use our safe computed
348  * value instead
349  */
350  if (PREDICT_FALSE (!*checksum))
351  {
352  u16 udp_len = clib_host_to_net_u16 (ip4->length) - sizeof (*ip4);
353  csum = ip_incremental_checksum (0, udp, udp_len);
354  csum = ip_csum_with_carry (csum, clib_host_to_net_u16 (udp_len));
355  csum =
356  ip_csum_with_carry (csum, clib_host_to_net_u16 (IP_PROTOCOL_UDP));
357  csum = ip_csum_with_carry (csum, *((u64 *) (&ip4->src_address)));
358  *checksum = ~ip_csum_fold (csum);
359  }
360  }
361  else
362  {
363  tcp_header_t *tcp = ip4_next_header (ip4);
364  if (mm->tcp_mss > 0)
365  {
366  csum = tcp->checksum;
367  map_mss_clamping (tcp, &csum, mm->tcp_mss);
368  tcp->checksum = ip_csum_fold (csum);
369  }
370  checksum = &tcp->checksum;
371  }
372 
373  old_src.as_u32 = ip4->src_address.as_u32;
374  old_dst.as_u32 = ip4->dst_address.as_u32;
375 
376  /* Deal with fragmented packets */
378  clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS)))
379  {
380  ip6 =
381  (ip6_header_t *) u8_ptr_add (ip4,
382  sizeof (*ip4) - sizeof (*ip6) -
383  sizeof (*frag));
384  frag =
385  (ip6_frag_hdr_t *) u8_ptr_add (ip4, sizeof (*ip4) - sizeof (*frag));
386  frag_id = frag_id_4to6 (ip4->fragment_id);
387  vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6) - sizeof (*frag));
388  }
389  else
390  {
391  ip6 = (ip6_header_t *) (((u8 *) ip4) + sizeof (*ip4) - sizeof (*ip6));
392  vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6));
393  frag = NULL;
394  }
395 
397  clib_host_to_net_u32 ((6 << 28) + (ip4->tos << 20));
398  ip6->payload_length = u16_net_add (ip4->length, -sizeof (*ip4));
399  ip6->hop_limit = ip4->ttl;
400  ip6->protocol = ip4->protocol;
401  if (PREDICT_FALSE (frag != NULL))
402  {
403  frag->next_hdr = ip6->protocol;
404  frag->identification = frag_id;
405  frag->rsv = 0;
406  frag->fragment_offset_and_more = ip6_frag_hdr_offset_and_more (0, 1);
407  ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
408  ip6->payload_length = u16_net_add (ip6->payload_length, sizeof (*frag));
409  }
410 
411  ip6->dst_address.as_u64[0] = pheader->daddr.as_u64[0];
412  ip6->dst_address.as_u64[1] = pheader->daddr.as_u64[1];
413  ip6->src_address.as_u64[0] = pheader->saddr.as_u64[0];
414  ip6->src_address.as_u64[1] = pheader->saddr.as_u64[1];
415 
416  csum = ip_csum_sub_even (*checksum, old_src.as_u32);
417  csum = ip_csum_sub_even (csum, old_dst.as_u32);
418  csum = ip_csum_add_even (csum, ip6->src_address.as_u64[0]);
419  csum = ip_csum_add_even (csum, ip6->src_address.as_u64[1]);
420  csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[0]);
421  csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[1]);
422  *checksum = ip_csum_fold (csum);
423 
424  return 0;
425 }
426 
427 static uword
430 {
431  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
432  from = vlib_frame_vector_args (frame);
433  n_left_from = frame->n_vectors;
434  next_index = node->cached_next_index;
435  vlib_node_runtime_t *error_node =
436  vlib_node_get_runtime (vm, ip4_map_t_tcp_udp_node.index);
437 
438 
439  while (n_left_from > 0)
440  {
441  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
442 
443  while (n_left_from > 0 && n_left_to_next > 0)
444  {
445  u32 pi0;
446  vlib_buffer_t *p0;
447  ip4_mapt_pseudo_header_t *pheader0;
448  ip4_mapt_tcp_udp_next_t next0;
449 
450  pi0 = to_next[0] = from[0];
451  from += 1;
452  n_left_from -= 1;
453  to_next += 1;
454  n_left_to_next -= 1;
455 
457  p0 = vlib_get_buffer (vm, pi0);
458 
459  //Accessing pseudo header
460  pheader0 = vlib_buffer_get_current (p0);
461  vlib_buffer_advance (p0, sizeof (*pheader0));
462 
463  if (map_ip4_to_ip6_tcp_udp (p0, pheader0))
464  {
465  p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
467  }
468  else
469  {
470  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
471  {
472  //Send to fragmentation node if necessary
473  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
474  vnet_buffer (p0)->ip_frag.next_index =
477  }
478  else
479  {
480  next0 = ip4_map_ip6_lookup_bypass (p0, NULL) ?
482  }
483  }
484  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
485  to_next, n_left_to_next, pi0,
486  next0);
487  }
488  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
489  }
490 
491  return frame->n_vectors;
492 }
493 
496  ip4_header_t * ip40, u16 ip4_len0, i32 * dst_port0,
497  u8 * error0, ip4_mapt_next_t * next0, u16 l4_dst_port)
498 {
500  {
502  if (d0->ea_bits_len == 0 && d0->rules)
503  {
504  *dst_port0 = 0;
505  }
506  else
507  {
508  *dst_port0 = l4_dst_port;
509  *error0 = (*dst_port0 == -1) ? MAP_ERROR_FRAGMENT_MEMORY : *error0;
510  }
511  }
512  else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_TCP))
513  {
514  vnet_buffer (p0)->map_t.checksum_offset = 36;
516  *error0 = ip4_len0 < 40 ? MAP_ERROR_MALFORMED : *error0;
517  *dst_port0 = l4_dst_port;
518  }
519  else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_UDP))
520  {
521  vnet_buffer (p0)->map_t.checksum_offset = 26;
523  *error0 = ip4_len0 < 28 ? MAP_ERROR_MALFORMED : *error0;
524  *dst_port0 = l4_dst_port;
525  }
526  else if (ip40->protocol == IP_PROTOCOL_ICMP)
527  {
528  *next0 = IP4_MAPT_NEXT_MAPT_ICMP;
529  if (d0->ea_bits_len == 0 && d0->rules)
530  *dst_port0 = 0;
531  else if (((icmp46_header_t *) u8_ptr_add (ip40, sizeof (*ip40)))->type
532  == ICMP4_echo_reply
533  || ((icmp46_header_t *)
534  u8_ptr_add (ip40,
535  sizeof (*ip40)))->type == ICMP4_echo_request)
536  *dst_port0 = l4_dst_port;
537  }
538  else
539  {
540  *error0 = MAP_ERROR_BAD_PROTOCOL;
541  }
542 }
543 
544 static uword
546 {
547  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
548  vlib_node_runtime_t *error_node =
549  vlib_node_get_runtime (vm, ip4_map_t_node.index);
550  from = vlib_frame_vector_args (frame);
551  n_left_from = frame->n_vectors;
552  next_index = node->cached_next_index;
554  u32 thread_index = vm->thread_index;
555 
556  while (n_left_from > 0)
557  {
558  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
559 
560  while (n_left_from > 0 && n_left_to_next > 0)
561  {
562  u32 pi0;
563  vlib_buffer_t *p0;
564  ip4_header_t *ip40;
565  map_domain_t *d0;
566  ip4_mapt_next_t next0 = 0;
567  u16 ip4_len0;
568  u8 error0;
569  i32 dst_port0;
570  ip4_mapt_pseudo_header_t *pheader0;
571 
572  pi0 = to_next[0] = from[0];
573  from += 1;
574  n_left_from -= 1;
575  to_next += 1;
576  n_left_to_next -= 1;
577  error0 = MAP_ERROR_NONE;
578 
579  p0 = vlib_get_buffer (vm, pi0);
580 
581  u16 l4_dst_port = vnet_buffer (p0)->ip.reass.l4_dst_port;
582 
583  ip40 = vlib_buffer_get_current (p0);
584  ip4_len0 = clib_host_to_net_u16 (ip40->length);
585  if (PREDICT_FALSE (p0->current_length < ip4_len0 ||
586  ip40->ip_version_and_header_length != 0x45))
587  {
588  error0 = MAP_ERROR_UNKNOWN;
589  }
590 
591  d0 = ip4_map_get_domain (&ip40->dst_address,
592  &vnet_buffer (p0)->map_t.map_domain_index,
593  &error0);
594 
595  if (!d0)
596  { /* Guess it wasn't for us */
597  vnet_feature_next (&next0, p0);
598  goto exit;
599  }
600 
601  dst_port0 = -1;
602 
603  if (PREDICT_FALSE (ip40->ttl == 1))
604  {
605  icmp4_error_set_vnet_buffer (p0, ICMP4_time_exceeded,
606  ICMP4_time_exceeded_ttl_exceeded_in_transit,
607  0);
608  p0->error = error_node->errors[MAP_ERROR_TIME_EXCEEDED];
609  next0 = IP4_MAPT_NEXT_ICMP_ERROR;
610  goto trace;
611  }
612 
613  bool df0 =
615  clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT);
616 
617  vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
618 
619  if (PREDICT_FALSE
620  (df0 && !map_main.frag_ignore_df
621  &&
622  ((ip4_len0 +
623  (sizeof (ip6_header_t) - sizeof (ip4_header_t))) >
624  vnet_buffer (p0)->map_t.mtu)))
625  {
626  icmp4_error_set_vnet_buffer (p0, ICMP4_destination_unreachable,
627  ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set,
628  vnet_buffer (p0)->map_t.mtu -
629  (sizeof (ip6_header_t) -
630  sizeof (ip4_header_t)));
631  p0->error = error_node->errors[MAP_ERROR_DF_SET];
632  next0 = IP4_MAPT_NEXT_ICMP_ERROR;
633  goto trace;
634  }
635 
636  ip4_map_t_classify (p0, d0, ip40, ip4_len0, &dst_port0, &error0,
637  &next0, l4_dst_port);
638 
639  /* Verify that port is not among the well-known ports */
640  if ((d0->psid_length > 0 && d0->psid_offset > 0)
641  && (clib_net_to_host_u16 (dst_port0) <
642  (0x1 << (16 - d0->psid_offset))))
643  {
644  error0 = MAP_ERROR_SEC_CHECK;
645  }
646 
647  //Add MAP-T pseudo header in front of the packet
648  vlib_buffer_advance (p0, -sizeof (*pheader0));
649  pheader0 = vlib_buffer_get_current (p0);
650 
651  //Save addresses within the packet
652  ip4_map_t_embedded_address (d0, &pheader0->saddr,
653  &ip40->src_address);
654  pheader0->daddr.as_u64[0] =
655  map_get_pfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
656  pheader0->daddr.as_u64[1] =
657  map_get_sfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
658 
659  if (PREDICT_TRUE
660  (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP))
661  {
663  thread_index,
664  vnet_buffer (p0)->
665  map_t.map_domain_index, 1,
666  clib_net_to_host_u16
667  (ip40->length));
668  }
669 
670  next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0;
671  p0->error = error_node->errors[error0];
672  trace:
673  if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
674  {
675  map_add_trace (vm, node, p0, d0 - map_main.domains, dst_port0);
676  }
677  exit:
678  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
679  to_next, n_left_to_next, pi0,
680  next0);
681  }
682  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
683  }
684  return frame->n_vectors;
685 }
686 
687 static char *map_t_error_strings[] = {
688 #define _(sym,string) string,
690 #undef _
691 };
692 
693 /* *INDENT-OFF* */
694 VNET_FEATURE_INIT (ip4_map_t_feature, static) = {
695  .arc_name = "ip4-unicast",
696  .node_name = "ip4-map-t",
697  .runs_before = VNET_FEATURES ("ip4-flow-classify"),
698  .runs_after = VNET_FEATURES ("ip4-sv-reassembly-feature"),
699 };
700 
701 VLIB_REGISTER_NODE(ip4_map_t_fragmented_node) = {
702  .function = ip4_map_t_fragmented,
703  .name = "ip4-map-t-fragmented",
704  .vector_size = sizeof(u32),
705  .format_trace = format_map_trace,
707 
708  .n_errors = MAP_N_ERROR,
709  .error_strings = map_t_error_strings,
710 
711  .n_next_nodes = IP4_MAPT_FRAGMENTED_N_NEXT,
712  .next_nodes = {
713  [IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP] = "ip6-lookup",
714  [IP4_MAPT_FRAGMENTED_NEXT_IP6_REWRITE] = "ip6-load-balance",
716  [IP4_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
717  },
718 };
719 /* *INDENT-ON* */
720 
721 /* *INDENT-OFF* */
722 VLIB_REGISTER_NODE(ip4_map_t_icmp_node) = {
723  .function = ip4_map_t_icmp,
724  .name = "ip4-map-t-icmp",
725  .vector_size = sizeof(u32),
726  .format_trace = format_map_trace,
728 
729  .n_errors = MAP_N_ERROR,
730  .error_strings = map_t_error_strings,
731 
732  .n_next_nodes = IP4_MAPT_ICMP_N_NEXT,
733  .next_nodes = {
734  [IP4_MAPT_ICMP_NEXT_IP6_LOOKUP] = "ip6-lookup",
735  [IP4_MAPT_ICMP_NEXT_IP6_REWRITE] = "ip6-load-balance",
737  [IP4_MAPT_ICMP_NEXT_DROP] = "error-drop",
738  },
739 };
740 /* *INDENT-ON* */
741 
742 /* *INDENT-OFF* */
743 VLIB_REGISTER_NODE(ip4_map_t_tcp_udp_node) = {
744  .function = ip4_map_t_tcp_udp,
745  .name = "ip4-map-t-tcp-udp",
746  .vector_size = sizeof(u32),
747  .format_trace = format_map_trace,
749 
750  .n_errors = MAP_N_ERROR,
751  .error_strings = map_t_error_strings,
752 
753  .n_next_nodes = IP4_MAPT_TCP_UDP_N_NEXT,
754  .next_nodes = {
755  [IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP] = "ip6-lookup",
756  [IP4_MAPT_TCP_UDP_NEXT_IP6_REWRITE] = "ip6-load-balance",
758  [IP4_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
759  },
760 };
761 /* *INDENT-ON* */
762 
763 /* *INDENT-OFF* */
764 VLIB_REGISTER_NODE(ip4_map_t_node) = {
765  .function = ip4_map_t,
766  .name = "ip4-map-t",
767  .vector_size = sizeof(u32),
768  .format_trace = format_map_trace,
770 
771  .n_errors = MAP_N_ERROR,
772  .error_strings = map_t_error_strings,
773 
774  .n_next_nodes = IP4_MAPT_N_NEXT,
775  .next_nodes = {
776  [IP4_MAPT_NEXT_MAPT_TCP_UDP] = "ip4-map-t-tcp-udp",
777  [IP4_MAPT_NEXT_MAPT_ICMP] = "ip4-map-t-icmp",
778  [IP4_MAPT_NEXT_MAPT_FRAGMENTED] = "ip4-map-t-fragmented",
779  [IP4_MAPT_NEXT_ICMP_ERROR] = "ip4-icmp-error",
780  [IP4_MAPT_NEXT_DROP] = "error-drop",
781  },
782 };
783 /* *INDENT-ON* */
784 
785 /*
786  * fd.io coding-style-patch-verification: ON
787  *
788  * Local Variables:
789  * eval: (c-set-style "gnu")
790  * End:
791  */
ip4_mapt_tcp_udp_next_t
Definition: ip4_map_t.c:39
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
static void map_add_trace(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_buffer_t *b, u32 map_domain_index, u16 port)
Definition: map.h:248
u8 psid_length
Definition: map.h:88
static vlib_cli_command_t trace
(constructor) VLIB_CLI_COMMAND (trace)
Definition: vlib_api_cli.c:899
VNET_FEATURE_INIT(ip4_map_t_feature, static)
map_main_t map_main
Definition: map.c:27
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:220
ip4_address_t src_address
Definition: ip4_packet.h:125
#define IP6_FRAG_NODE_NAME
Definition: ip_frag.h:44
#define PREDICT_TRUE(x)
Definition: clib.h:121
unsigned long u64
Definition: types.h:89
u32 thread_index
Definition: main.h:249
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
static_always_inline map_domain_t * ip4_map_get_domain(ip4_address_t *addr, u32 *map_domain_index, u8 *error)
Definition: map.h:340
ip4_mapt_next_t
Definition: ip4_map_t.c:20
uword ip_csum_t
Definition: ip_packet.h:244
vlib_main_t * vm
Definition: in2out_ed.c:1582
static ip_csum_t ip_csum_with_carry(ip_csum_t sum, ip_csum_t x)
Definition: ip_packet.h:247
u16 flags_and_fragment_offset
Definition: ip4_packet.h:106
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:469
struct _tcp_header tcp_header_t
ip6_address_t src_address
Definition: ip6_packet.h:310
unsigned char u8
Definition: types.h:56
IPv4 to IPv6 translation.
static_always_inline void ip4_map_t_classify(vlib_buffer_t *p0, map_domain_t *d0, ip4_header_t *ip40, u16 ip4_len0, i32 *dst_port0, u8 *error0, ip4_mapt_next_t *next0, u16 l4_dst_port)
Definition: ip4_map_t.c:495
static uword ip4_map_t_icmp(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip4_map_t.c:112
static uword ip4_map_t(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip4_map_t.c:545
static_always_inline void ip4_map_t_embedded_address(map_domain_t *d, ip6_address_t *ip6, const ip4_address_t *ip4)
Definition: map.h:383
#define u8_ptr_add(ptr, index)
Definition: ip_types.h:43
#define static_always_inline
Definition: clib.h:108
vl_api_ip6_address_t ip6
Definition: one.api:424
ip4_address_t dst_address
Definition: ip4_packet.h:125
vlib_combined_counter_main_t * domain_counters
Definition: map.h:169
ip4_mapt_fragmented_next_t
Definition: ip4_map_t.c:48
ip6_address_t * rules
Definition: map.h:78
#define frag_id_4to6(id)
Definition: ip4_to_ip6.h:40
static void * ip4_next_header(ip4_header_t *i)
Definition: ip4_packet.h:196
u8 ea_bits_len
Definition: map.h:86
unsigned int u32
Definition: types.h:88
static u16 ip4_get_port(ip4_header_t *ip, u8 sender)
Get TCP/UDP port number or ICMP id from IPv4 packet.
Definition: ip4_to_ip6.h:51
vl_api_fib_path_type_t type
Definition: fib_types.api:123
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
vnet_crypto_main_t * cm
Definition: quic_crypto.c:53
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:534
bool frag_ignore_df
Definition: map.h:194
long ctx[MAX_CONNS]
Definition: main.c:144
static_always_inline u64 map_get_pfx_net(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:290
unsigned short u16
Definition: types.h:57
map_domain_t * domains
Definition: map.h:164
static_always_inline u64 map_get_sfx_net(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:322
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
#define PREDICT_FALSE(x)
Definition: clib.h:120
#define always_inline
Definition: ipsec.h:28
vl_api_ip4_address_t ip4
Definition: one.api:376
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:224
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:391
static int ip4_to_ip6_set_icmp_cb(vlib_buffer_t *b, ip4_header_t *ip4, ip6_header_t *ip6, void *arg)
Definition: ip4_map_t.c:76
#define IP4_HEADER_FLAG_MORE_FRAGMENTS
Definition: ip4_packet.h:107
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
icmp_to_icmp6_ctx_t
Definition: ip4_map_t.c:73
u16 n_vectors
Definition: node.h:396
ip4_mapt_icmp_next_t
Definition: ip4_map_t.c:30
static int map_ip4_to_ip6_fragmented(vlib_buffer_t *p, ip4_mapt_pseudo_header_t *pheader)
Definition: ip4_map_t.c:215
static u16 ip4_get_fragment_offset(const ip4_header_t *i)
Definition: ip4_packet.h:155
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:322
typedef CLIB_PACKED(struct { ip6_address_t daddr;ip6_address_t saddr;u8 unused[28];})
Definition: ip4_map_t.c:60
u8 psid_offset
Definition: map.h:87
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:115
static int map_ip4_to_ip6_tcp_udp(vlib_buffer_t *p, ip4_mapt_pseudo_header_t *pheader)
Definition: ip4_map_t.c:327
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:483
#define foreach_map_error
Definition: map.h:211
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1582
signed int i32
Definition: types.h:77
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:510
ip_dscp_t tos
Definition: ip4_packet.h:96
static_always_inline bool ip4_map_ip6_lookup_bypass(vlib_buffer_t *p0, ip4_header_t *ip)
Definition: map.h:498
static ip_csum_t ip_csum_sub_even(ip_csum_t c, ip_csum_t x)
Definition: ip_packet.h:272
static int icmp_to_icmp6(vlib_buffer_t *p, ip4_to_ip6_set_fn_t fn, void *ctx, ip4_to_ip6_set_fn_t inner_fn, void *inner_ctx)
Translate ICMP4 packet to ICMP6.
Definition: ip4_to_ip6.h:220
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:248
u8 * format_map_trace(u8 *s, va_list *args)
Definition: map.c:1204
#define VNET_FEATURES(...)
Definition: feature.h:470
static_always_inline void map_mss_clamping(tcp_header_t *tcp, ip_csum_t *sum, u16 mss_clamping)
Definition: map.h:449
u32 ip_version_traffic_class_and_flow_label
Definition: ip6_packet.h:297
u16 mtu
Definition: map.h:82
u16 payload_length
Definition: ip6_packet.h:301
static uword ip4_map_t_tcp_udp(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip4_map_t.c:428
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1583
static uword ip4_map_t_fragmented(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip4_map_t.c:259
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:297
u16 tcp_mss
Definition: map.h:183
A collection of combined counters.
Definition: counter.h:188
#define vnet_buffer(b)
Definition: buffer.h:417
#define IP4_HEADER_FLAG_DONT_FRAGMENT
Definition: ip4_packet.h:108
static int ip4_to_ip6_set_inner_icmp_cb(vlib_buffer_t *b, ip4_header_t *ip4, ip6_header_t *ip6, void *arg)
Definition: ip4_map_t.c:91
#define u16_net_add(u, val)
Definition: ip_types.h:44
#define ip6_frag_hdr_offset_and_more(offset, more)
Definition: ip6_packet.h:673
u8 ip_version_and_header_length
Definition: ip4_packet.h:93
static ip_csum_t ip_incremental_checksum(ip_csum_t sum, void *_data, uword n_bytes)
Definition: ip_packet.h:318
static_always_inline void icmp4_error_set_vnet_buffer(vlib_buffer_t *b, u8 type, u8 code, u32 data)
Definition: icmp4.h:51
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
static u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:300
static ip_csum_t ip_csum_add_even(ip_csum_t c, ip_csum_t x)
Definition: ip_packet.h:255
ip6_address_t dst_address
Definition: ip6_packet.h:310