FD.io VPP  v19.01.1-17-ge106252
Vector Packet Processing
ip4_map_t.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include "map.h"
16 
17 #include <vnet/ip/ip_frag.h>
18 #include <vnet/ip/ip4_to_ip6.h>
19 
20 typedef enum
21 {
28 
29 typedef enum
30 {
36 
37 typedef enum
38 {
44 
45 typedef enum
46 {
52 
53 //This is used to pass information within the buffer data.
54 //Buffer structure being too small to contain big structures like this.
55 /* *INDENT-OFF* */
56 typedef CLIB_PACKED (struct {
57  ip6_address_t daddr;
58  ip6_address_t saddr;
59  //IPv6 header + Fragmentation header will be here
60  //sizeof(ip6) + sizeof(ip_frag) - sizeof(ip4)
61  u8 unused[28];
62 }) ip4_mapt_pseudo_header_t;
63 /* *INDENT-ON* */
64 
65 
67 ip4_map_fragment_cache (ip4_header_t * ip4, u16 port)
68 {
69  u32 *ignore = NULL;
71  map_ip4_reass_t *r =
73  ip4->fragment_id,
74  (ip4->protocol ==
75  IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol,
76  &ignore);
77  if (r)
78  r->port = port;
79 
81  return !r;
82 }
83 
86 {
87  u32 *ignore = NULL;
89  map_ip4_reass_t *r =
91  ip4->fragment_id,
92  (ip4->protocol ==
93  IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol,
94  &ignore);
95  i32 ret = r ? r->port : -1;
97  return ret;
98 }
99 
100 typedef struct
101 {
105 
106 static int
108 {
109  icmp_to_icmp6_ctx_t *ctx = arg;
110 
112  ip6->dst_address.as_u64[0] =
113  map_get_pfx_net (ctx->d, ip4->dst_address.as_u32, ctx->recv_port);
114  ip6->dst_address.as_u64[1] =
115  map_get_sfx_net (ctx->d, ip4->dst_address.as_u32, ctx->recv_port);
116 
117  return 0;
118 }
119 
120 static int
122  void *arg)
123 {
124  icmp_to_icmp6_ctx_t *ctx = arg;
125 
126  //Note that the source address is within the domain
127  //while the destination address is the one outside the domain
129  ip6->src_address.as_u64[0] =
130  map_get_pfx_net (ctx->d, ip4->src_address.as_u32, ctx->recv_port);
131  ip6->src_address.as_u64[1] =
132  map_get_sfx_net (ctx->d, ip4->src_address.as_u32, ctx->recv_port);
133 
134  return 0;
135 }
136 
137 static uword
139  vlib_node_runtime_t * node, vlib_frame_t * frame)
140 {
141  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
142  vlib_node_runtime_t *error_node =
143  vlib_node_get_runtime (vm, ip4_map_t_icmp_node.index);
144  from = vlib_frame_vector_args (frame);
145  n_left_from = frame->n_vectors;
146  next_index = node->cached_next_index;
148  u32 thread_index = vm->thread_index;
149 
150  while (n_left_from > 0)
151  {
152  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
153 
154  while (n_left_from > 0 && n_left_to_next > 0)
155  {
156  u32 pi0;
157  vlib_buffer_t *p0;
158  ip4_mapt_icmp_next_t next0;
159  u8 error0;
160  map_domain_t *d0;
161  u16 len0;
162  icmp_to_icmp6_ctx_t ctx0;
163  ip4_header_t *ip40;
164 
166  pi0 = to_next[0] = from[0];
167  from += 1;
168  n_left_from -= 1;
169  to_next += 1;
170  n_left_to_next -= 1;
171  error0 = MAP_ERROR_NONE;
172 
173  p0 = vlib_get_buffer (vm, pi0);
174  vlib_buffer_advance (p0, sizeof (ip4_mapt_pseudo_header_t)); //The pseudo-header is not used
175  len0 =
176  clib_net_to_host_u16 (((ip4_header_t *)
177  vlib_buffer_get_current (p0))->length);
178  d0 =
180  vnet_buffer (p0)->map_t.map_domain_index);
181 
182  ip40 = vlib_buffer_get_current (p0);
183  ctx0.recv_port = ip4_get_port (ip40, 1);
184  ctx0.d = d0;
185  if (ctx0.recv_port == 0)
186  {
187  // In case of 1:1 mapping, we don't care about the port
188  if (!(d0->ea_bits_len == 0 && d0->rules))
189  {
190  error0 = MAP_ERROR_ICMP;
191  goto err0;
192  }
193  }
194 
195  if (icmp_to_icmp6
196  (p0, ip4_to_ip6_set_icmp_cb, &ctx0,
198  {
199  error0 = MAP_ERROR_ICMP;
200  goto err0;
201  }
202 
203  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
204  {
205  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
206  vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
208  }
209  err0:
210  if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
211  {
213  thread_index,
214  vnet_buffer (p0)->
215  map_t.map_domain_index, 1,
216  len0);
217  }
218  else
219  {
220  next0 = IP4_MAPT_ICMP_NEXT_DROP;
221  }
222  p0->error = error_node->errors[error0];
223  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
224  to_next, n_left_to_next, pi0,
225  next0);
226  }
227  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
228  }
229  return frame->n_vectors;
230 }
231 
232 /*
233  * Translate fragmented IPv4 UDP/TCP packet to IPv6.
234  */
235 always_inline int
237  ip4_mapt_pseudo_header_t * pheader)
238 {
239  ip4_header_t *ip4;
240  ip6_header_t *ip6;
241  ip6_frag_hdr_t *frag;
242 
243  ip4 = vlib_buffer_get_current (p);
244  frag = (ip6_frag_hdr_t *) u8_ptr_add (ip4, sizeof (*ip4) - sizeof (*frag));
245  ip6 =
246  (ip6_header_t *) u8_ptr_add (ip4,
247  sizeof (*ip4) - sizeof (*frag) -
248  sizeof (*ip6));
249  vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6) - sizeof (*frag));
250 
251  //We know that the protocol was one of ICMP, TCP or UDP
252  //because the first fragment was found and cached
253  frag->next_hdr =
254  (ip4->protocol == IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol;
255  frag->identification = frag_id_4to6 (ip4->fragment_id);
256  frag->rsv = 0;
257  frag->fragment_offset_and_more =
259  clib_net_to_host_u16
262 
264  clib_host_to_net_u32 ((6 << 28) + (ip4->tos << 20));
265  ip6->payload_length =
266  clib_host_to_net_u16 (clib_net_to_host_u16 (ip4->length) -
267  sizeof (*ip4) + sizeof (*frag));
268  ip6->hop_limit = ip4->ttl;
269  ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
270 
271  ip6->dst_address.as_u64[0] = pheader->daddr.as_u64[0];
272  ip6->dst_address.as_u64[1] = pheader->daddr.as_u64[1];
273  ip6->src_address.as_u64[0] = pheader->saddr.as_u64[0];
274  ip6->src_address.as_u64[1] = pheader->saddr.as_u64[1];
275 
276  return 0;
277 }
278 
279 static uword
281  vlib_node_runtime_t * node, vlib_frame_t * frame)
282 {
283  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
284  from = vlib_frame_vector_args (frame);
285  n_left_from = frame->n_vectors;
286  next_index = node->cached_next_index;
287  vlib_node_runtime_t *error_node =
288  vlib_node_get_runtime (vm, ip4_map_t_fragmented_node.index);
289 
290  while (n_left_from > 0)
291  {
292  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
293 
294  while (n_left_from > 0 && n_left_to_next > 0)
295  {
296  u32 pi0;
297  vlib_buffer_t *p0;
298  ip4_mapt_pseudo_header_t *pheader0;
299  ip4_mapt_fragmented_next_t next0;
300 
302  pi0 = to_next[0] = from[0];
303  from += 1;
304  n_left_from -= 1;
305  to_next += 1;
306  n_left_to_next -= 1;
307 
308  p0 = vlib_get_buffer (vm, pi0);
309 
310  //Accessing pseudo header
311  pheader0 = vlib_buffer_get_current (p0);
312  vlib_buffer_advance (p0, sizeof (*pheader0));
313 
314  if (map_ip4_to_ip6_fragmented (p0, pheader0))
315  {
316  p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
318  }
319  else
320  {
321  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
322  {
323  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
324  vnet_buffer (p0)->ip_frag.next_index =
327  }
328  }
329 
330  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
331  to_next, n_left_to_next, pi0,
332  next0);
333  }
334  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
335  }
336  return frame->n_vectors;
337 }
338 
339 /*
340  * Translate IPv4 UDP/TCP packet to IPv6.
341  */
342 always_inline int
343 map_ip4_to_ip6_tcp_udp (vlib_buffer_t * p, ip4_mapt_pseudo_header_t * pheader)
344 {
345  map_main_t *mm = &map_main;
346  ip4_header_t *ip4;
347  ip6_header_t *ip6;
348  ip_csum_t csum;
349  u16 *checksum;
350  ip6_frag_hdr_t *frag;
351  u32 frag_id;
352  ip4_address_t old_src, old_dst;
353 
354  ip4 = vlib_buffer_get_current (p);
355 
356  if (ip4->protocol == IP_PROTOCOL_UDP)
357  {
358  udp_header_t *udp = ip4_next_header (ip4);
359  checksum = &udp->checksum;
360 
361  /*
362  * UDP checksum is optional over IPv4 but mandatory for IPv6 We
363  * do not check udp->length sanity but use our safe computed
364  * value instead
365  */
366  if (PREDICT_FALSE (!*checksum))
367  {
368  u16 udp_len = clib_host_to_net_u16 (ip4->length) - sizeof (*ip4);
369  csum = ip_incremental_checksum (0, udp, udp_len);
370  csum = ip_csum_with_carry (csum, clib_host_to_net_u16 (udp_len));
371  csum =
372  ip_csum_with_carry (csum, clib_host_to_net_u16 (IP_PROTOCOL_UDP));
373  csum = ip_csum_with_carry (csum, *((u64 *) (&ip4->src_address)));
374  *checksum = ~ip_csum_fold (csum);
375  }
376  }
377  else
378  {
379  tcp_header_t *tcp = ip4_next_header (ip4);
380  if (mm->tcp_mss > 0)
381  {
382  csum = tcp->checksum;
383  map_mss_clamping (tcp, &csum, mm->tcp_mss);
384  tcp->checksum = ip_csum_fold (csum);
385  }
386  checksum = &tcp->checksum;
387  }
388 
389  old_src.as_u32 = ip4->src_address.as_u32;
390  old_dst.as_u32 = ip4->dst_address.as_u32;
391 
392  /* Deal with fragmented packets */
394  clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS)))
395  {
396  ip6 =
397  (ip6_header_t *) u8_ptr_add (ip4,
398  sizeof (*ip4) - sizeof (*ip6) -
399  sizeof (*frag));
400  frag =
401  (ip6_frag_hdr_t *) u8_ptr_add (ip4, sizeof (*ip4) - sizeof (*frag));
402  frag_id = frag_id_4to6 (ip4->fragment_id);
403  vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6) - sizeof (*frag));
404  }
405  else
406  {
407  ip6 = (ip6_header_t *) (((u8 *) ip4) + sizeof (*ip4) - sizeof (*ip6));
408  vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6));
409  frag = NULL;
410  }
411 
413  clib_host_to_net_u32 ((6 << 28) + (ip4->tos << 20));
414  ip6->payload_length = u16_net_add (ip4->length, -sizeof (*ip4));
415  ip6->hop_limit = ip4->ttl;
416  ip6->protocol = ip4->protocol;
417  if (PREDICT_FALSE (frag != NULL))
418  {
419  frag->next_hdr = ip6->protocol;
420  frag->identification = frag_id;
421  frag->rsv = 0;
422  frag->fragment_offset_and_more = ip6_frag_hdr_offset_and_more (0, 1);
423  ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
424  ip6->payload_length = u16_net_add (ip6->payload_length, sizeof (*frag));
425  }
426 
427  ip6->dst_address.as_u64[0] = pheader->daddr.as_u64[0];
428  ip6->dst_address.as_u64[1] = pheader->daddr.as_u64[1];
429  ip6->src_address.as_u64[0] = pheader->saddr.as_u64[0];
430  ip6->src_address.as_u64[1] = pheader->saddr.as_u64[1];
431 
432  csum = ip_csum_sub_even (*checksum, old_src.as_u32);
433  csum = ip_csum_sub_even (csum, old_dst.as_u32);
434  csum = ip_csum_add_even (csum, ip6->src_address.as_u64[0]);
435  csum = ip_csum_add_even (csum, ip6->src_address.as_u64[1]);
436  csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[0]);
437  csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[1]);
438  *checksum = ip_csum_fold (csum);
439 
440  return 0;
441 }
442 
443 static uword
445  vlib_node_runtime_t * node, vlib_frame_t * frame)
446 {
447  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
448  from = vlib_frame_vector_args (frame);
449  n_left_from = frame->n_vectors;
450  next_index = node->cached_next_index;
451  vlib_node_runtime_t *error_node =
452  vlib_node_get_runtime (vm, ip4_map_t_tcp_udp_node.index);
453 
454 
455  while (n_left_from > 0)
456  {
457  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
458 
459  while (n_left_from > 0 && n_left_to_next > 0)
460  {
461  u32 pi0;
462  vlib_buffer_t *p0;
463  ip4_mapt_pseudo_header_t *pheader0;
464  ip4_mapt_tcp_udp_next_t next0;
465 
466  pi0 = to_next[0] = from[0];
467  from += 1;
468  n_left_from -= 1;
469  to_next += 1;
470  n_left_to_next -= 1;
471 
473  p0 = vlib_get_buffer (vm, pi0);
474 
475  //Accessing pseudo header
476  pheader0 = vlib_buffer_get_current (p0);
477  vlib_buffer_advance (p0, sizeof (*pheader0));
478 
479  if (map_ip4_to_ip6_tcp_udp (p0, pheader0))
480  {
481  p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
483  }
484  else
485  {
486  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
487  {
488  //Send to fragmentation node if necessary
489  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
490  vnet_buffer (p0)->ip_frag.next_index =
493  }
494  }
495  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
496  to_next, n_left_to_next, pi0,
497  next0);
498  }
499  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
500  }
501 
502  return frame->n_vectors;
503 }
504 
507  ip4_header_t * ip40, u16 ip4_len0, i32 * dst_port0,
508  u8 * error0, ip4_mapt_next_t * next0)
509 {
511  {
513  if (d0->ea_bits_len == 0 && d0->rules)
514  {
515  *dst_port0 = 0;
516  }
517  else
518  {
519  *dst_port0 = ip4_map_fragment_get_port (ip40);
520  *error0 = (*dst_port0 == -1) ? MAP_ERROR_FRAGMENT_MEMORY : *error0;
521  }
522  }
523  else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_TCP))
524  {
525  vnet_buffer (p0)->map_t.checksum_offset = 36;
527  *error0 = ip4_len0 < 40 ? MAP_ERROR_MALFORMED : *error0;
528  *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 2));
529  }
530  else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_UDP))
531  {
532  vnet_buffer (p0)->map_t.checksum_offset = 26;
534  *error0 = ip4_len0 < 28 ? MAP_ERROR_MALFORMED : *error0;
535  *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 2));
536  }
537  else if (ip40->protocol == IP_PROTOCOL_ICMP)
538  {
539  *next0 = IP4_MAPT_NEXT_MAPT_ICMP;
540  if (d0->ea_bits_len == 0 && d0->rules)
541  *dst_port0 = 0;
542  else if (((icmp46_header_t *) u8_ptr_add (ip40, sizeof (*ip40)))->code
543  == ICMP4_echo_reply
544  || ((icmp46_header_t *)
545  u8_ptr_add (ip40,
546  sizeof (*ip40)))->code == ICMP4_echo_request)
547  *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 6));
548  }
549  else
550  {
551  *error0 = MAP_ERROR_BAD_PROTOCOL;
552  }
553 }
554 
555 static uword
557 {
558  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
559  vlib_node_runtime_t *error_node =
560  vlib_node_get_runtime (vm, ip4_map_t_node.index);
561  from = vlib_frame_vector_args (frame);
562  n_left_from = frame->n_vectors;
563  next_index = node->cached_next_index;
565  u32 thread_index = vm->thread_index;
566 
567  while (n_left_from > 0)
568  {
569  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
570 
571  while (n_left_from > 0 && n_left_to_next > 0)
572  {
573  u32 pi0;
574  vlib_buffer_t *p0;
575  ip4_header_t *ip40;
576  map_domain_t *d0;
577  ip4_mapt_next_t next0 = 0;
578  u16 ip4_len0;
579  u8 error0;
580  i32 dst_port0;
581  ip4_mapt_pseudo_header_t *pheader0;
582 
583  pi0 = to_next[0] = from[0];
584  from += 1;
585  n_left_from -= 1;
586  to_next += 1;
587  n_left_to_next -= 1;
588  error0 = MAP_ERROR_NONE;
589 
590  p0 = vlib_get_buffer (vm, pi0);
591  ip40 = vlib_buffer_get_current (p0);
592  ip4_len0 = clib_host_to_net_u16 (ip40->length);
593  if (PREDICT_FALSE (p0->current_length < ip4_len0 ||
594  ip40->ip_version_and_header_length != 0x45))
595  {
596  error0 = MAP_ERROR_UNKNOWN;
597  }
598 
599  d0 = ip4_map_get_domain (&ip40->dst_address,
600  &vnet_buffer (p0)->map_t.map_domain_index,
601  &error0);
602 
603  if (!d0)
604  { /* Guess it wasn't for us */
605  vnet_feature_next (&next0, p0);
606  goto exit;
607  }
608 
609  vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
610 
611  dst_port0 = -1;
612  ip4_map_t_classify (p0, d0, ip40, ip4_len0, &dst_port0, &error0,
613  &next0);
614 
615  /* Verify that port is not among the well-known ports */
616  if ((d0->psid_length > 0 && d0->psid_offset > 0)
617  && (clib_net_to_host_u16 (dst_port0) <
618  (0x1 << (16 - d0->psid_offset))))
619  {
620  error0 = MAP_ERROR_SEC_CHECK;
621  }
622 
623  //Add MAP-T pseudo header in front of the packet
624  vlib_buffer_advance (p0, -sizeof (*pheader0));
625  pheader0 = vlib_buffer_get_current (p0);
626 
627  //Save addresses within the packet
628  ip4_map_t_embedded_address (d0, &pheader0->saddr,
629  &ip40->src_address);
630  pheader0->daddr.as_u64[0] =
631  map_get_pfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
632  pheader0->daddr.as_u64[1] =
633  map_get_sfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
634 
635  // It is important to cache at this stage because the result
636  // might be necessary for packets within the same vector.
637  // Actually, this approach even provides some limited
638  // out-of-order fragments support
639  if (PREDICT_FALSE
640  (ip4_is_first_fragment (ip40) && (dst_port0 != -1)
641  && (d0->ea_bits_len != 0 || !d0->rules)
642  && ip4_map_fragment_cache (ip40, dst_port0)))
643  {
644  error0 = MAP_ERROR_UNKNOWN;
645  }
646 
647  if (PREDICT_TRUE
648  (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP))
649  {
651  thread_index,
652  vnet_buffer (p0)->
653  map_t.map_domain_index, 1,
654  clib_net_to_host_u16
655  (ip40->length));
656  }
657 
658  next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0;
659  p0->error = error_node->errors[error0];
660  exit:
661  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
662  to_next, n_left_to_next, pi0,
663  next0);
664  }
665  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
666  }
667  return frame->n_vectors;
668 }
669 
670 static char *map_t_error_strings[] = {
671 #define _(sym,string) string,
673 #undef _
674 };
675 
676 VNET_FEATURE_INIT (ip4_map_t_feature, static) =
677 {
678 .arc_name = "ip4-unicast",.node_name = "ip4-map-t",.runs_before =
679  VNET_FEATURES ("ip4-flow-classify"),};
680 
681 /* *INDENT-OFF* */
682 VLIB_REGISTER_NODE(ip4_map_t_fragmented_node) = {
683  .function = ip4_map_t_fragmented,
684  .name = "ip4-map-t-fragmented",
685  .vector_size = sizeof(u32),
686  .format_trace = format_map_trace,
687  .type = VLIB_NODE_TYPE_INTERNAL,
688 
689  .n_errors = MAP_N_ERROR,
690  .error_strings = map_t_error_strings,
691 
692  .n_next_nodes = IP4_MAPT_FRAGMENTED_N_NEXT,
693  .next_nodes = {
694  [IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP] = "ip6-lookup",
696  [IP4_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
697  },
698 };
699 /* *INDENT-ON* */
700 
701 /* *INDENT-OFF* */
702 VLIB_REGISTER_NODE(ip4_map_t_icmp_node) = {
703  .function = ip4_map_t_icmp,
704  .name = "ip4-map-t-icmp",
705  .vector_size = sizeof(u32),
706  .format_trace = format_map_trace,
707  .type = VLIB_NODE_TYPE_INTERNAL,
708 
709  .n_errors = MAP_N_ERROR,
710  .error_strings = map_t_error_strings,
711 
712  .n_next_nodes = IP4_MAPT_ICMP_N_NEXT,
713  .next_nodes = {
714  [IP4_MAPT_ICMP_NEXT_IP6_LOOKUP] = "ip6-lookup",
716  [IP4_MAPT_ICMP_NEXT_DROP] = "error-drop",
717  },
718 };
719 /* *INDENT-ON* */
720 
721 /* *INDENT-OFF* */
722 VLIB_REGISTER_NODE(ip4_map_t_tcp_udp_node) = {
723  .function = ip4_map_t_tcp_udp,
724  .name = "ip4-map-t-tcp-udp",
725  .vector_size = sizeof(u32),
726  .format_trace = format_map_trace,
727  .type = VLIB_NODE_TYPE_INTERNAL,
728 
729  .n_errors = MAP_N_ERROR,
730  .error_strings = map_t_error_strings,
731 
732  .n_next_nodes = IP4_MAPT_TCP_UDP_N_NEXT,
733  .next_nodes = {
734  [IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP] = "ip6-lookup",
736  [IP4_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
737  },
738 };
739 /* *INDENT-ON* */
740 
741 /* *INDENT-OFF* */
742 VLIB_REGISTER_NODE(ip4_map_t_node) = {
743  .function = ip4_map_t,
744  .name = "ip4-map-t",
745  .vector_size = sizeof(u32),
746  .format_trace = format_map_trace,
747  .type = VLIB_NODE_TYPE_INTERNAL,
748 
749  .n_errors = MAP_N_ERROR,
750  .error_strings = map_t_error_strings,
751 
752  .n_next_nodes = IP4_MAPT_N_NEXT,
753  .next_nodes = {
754  [IP4_MAPT_NEXT_MAPT_TCP_UDP] = "ip4-map-t-tcp-udp",
755  [IP4_MAPT_NEXT_MAPT_ICMP] = "ip4-map-t-icmp",
756  [IP4_MAPT_NEXT_MAPT_FRAGMENTED] = "ip4-map-t-fragmented",
757  [IP4_MAPT_NEXT_DROP] = "error-drop",
758  },
759 };
760 /* *INDENT-ON* */
761 
762 /*
763  * fd.io coding-style-patch-verification: ON
764  *
765  * Local Variables:
766  * eval: (c-set-style "gnu")
767  * End:
768  */
ip4_mapt_tcp_udp_next_t
Definition: ip4_map_t.c:37
#define map_ip4_reass_lock()
Definition: map.h:492
u8 psid_length
Definition: map.h:118
VNET_FEATURE_INIT(ip4_map_t_feature, static)
map_main_t map_main
Definition: map.c:26
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:220
ip4_address_t src_address
Definition: ip4_packet.h:170
#define IP6_FRAG_NODE_NAME
Definition: ip_frag.h:44
#define PREDICT_TRUE(x)
Definition: clib.h:112
u64 as_u64[2]
Definition: ip6_packet.h:51
unsigned long u64
Definition: types.h:89
#define NULL
Definition: clib.h:58
static_always_inline void ip4_map_t_classify(vlib_buffer_t *p0, map_domain_t *d0, ip4_header_t *ip40, u16 ip4_len0, i32 *dst_port0, u8 *error0, ip4_mapt_next_t *next0)
Definition: ip4_map_t.c:506
u32 thread_index
Definition: main.h:179
static_always_inline map_domain_t * ip4_map_get_domain(ip4_address_t *addr, u32 *map_domain_index, u8 *error)
Definition: map.h:452
ip4_mapt_next_t
Definition: ip4_map_t.c:20
uword ip_csum_t
Definition: ip_packet.h:181
static ip_csum_t ip_csum_with_carry(ip_csum_t sum, ip_csum_t x)
Definition: ip_packet.h:184
u16 flags_and_fragment_offset
Definition: ip4_packet.h:151
static_always_inline i32 ip4_map_fragment_get_port(ip4_header_t *ip4)
Definition: ip4_map_t.c:85
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:494
struct _tcp_header tcp_header_t
ip6_address_t src_address
Definition: ip6_packet.h:378
unsigned char u8
Definition: types.h:56
IPv4 to IPv6 translation.
static uword ip4_map_t_icmp(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip4_map_t.c:138
static uword ip4_map_t(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip4_map_t.c:556
static_always_inline void ip4_map_t_embedded_address(map_domain_t *d, ip6_address_t *ip6, const ip4_address_t *ip4)
Definition: map.h:552
#define static_always_inline
Definition: clib.h:99
#define always_inline
Definition: clib.h:98
ip4_address_t dst_address
Definition: ip4_packet.h:170
vlib_combined_counter_main_t * domain_counters
Definition: map.h:257
ip4_mapt_fragmented_next_t
Definition: ip4_map_t.c:45
ip6_address_t * rules
Definition: map.h:108
#define frag_id_4to6(id)
Definition: ip4_to_ip6.h:40
static void * ip4_next_header(ip4_header_t *i)
Definition: ip4_packet.h:241
u8 ea_bits_len
Definition: map.h:116
unsigned int u32
Definition: types.h:88
static u16 ip4_get_port(ip4_header_t *ip, u8 sender)
Get TCP/UDP port number or ICMP id from IPv4 packet.
Definition: ip4_to_ip6.h:51
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:511
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:114
long ctx[MAX_CONNS]
Definition: main.c:144
static_always_inline u64 map_get_pfx_net(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:403
unsigned short u16
Definition: types.h:57
map_domain_t * domains
Definition: map.h:253
static int ip4_get_fragment_offset(const ip4_header_t *i)
Definition: ip4_packet.h:200
static_always_inline u64 map_get_sfx_net(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:435
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:214
map_ip4_reass_t * map_ip4_reass_get(u32 src, u32 dst, u16 fragment_id, u8 protocol, u32 **pi_to_drop)
Definition: map.c:1383
#define PREDICT_FALSE(x)
Definition: clib.h:111
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:368
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:139
map_domain_t * d
Definition: ip4_map_t.c:102
#define IP4_HEADER_FLAG_MORE_FRAGMENTS
Definition: ip4_packet.h:152
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u16 n_vectors
Definition: node.h:420
vlib_main_t * vm
Definition: buffer.c:301
ip4_mapt_icmp_next_t
Definition: ip4_map_t.c:29
static int map_ip4_to_ip6_fragmented(vlib_buffer_t *p, ip4_mapt_pseudo_header_t *pheader)
Definition: ip4_map_t.c:236
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:295
u8 psid_offset
Definition: map.h:117
static ip_csum_t ip_incremental_checksum(ip_csum_t sum, void *_data, uword n_bytes)
Definition: ip_packet.h:254
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:89
static int map_ip4_to_ip6_tcp_udp(vlib_buffer_t *p, ip4_mapt_pseudo_header_t *pheader)
Definition: ip4_map_t.c:343
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:459
#define foreach_map_error
Definition: map.h:337
signed int i32
Definition: types.h:77
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:538
static int ip4_to_ip6_set_inner_icmp_cb(ip4_header_t *ip4, ip6_header_t *ip6, void *arg)
Definition: ip4_map_t.c:121
static ip_csum_t ip_csum_sub_even(ip_csum_t c, ip_csum_t x)
Definition: ip_packet.h:209
static int icmp_to_icmp6(vlib_buffer_t *p, ip4_to_ip6_set_fn_t fn, void *ctx, ip4_to_ip6_set_fn_t inner_fn, void *inner_ctx)
Translate ICMP4 packet to ICMP6.
Definition: ip4_to_ip6.h:220
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:233
#define map_ip4_reass_unlock()
Definition: map.h:493
u8 * format_map_trace(u8 *s, va_list *args)
Definition: map.c:1303
#define u8_ptr_add(ptr, index)
Definition: ip.h:68
#define VNET_FEATURES(...)
Definition: feature.h:435
static_always_inline void map_mss_clamping(tcp_header_t *tcp, ip_csum_t *sum, u16 mss_clamping)
Definition: map.h:614
u32 ip_version_traffic_class_and_flow_label
Definition: ip6_packet.h:365
u16 mtu
Definition: map.h:112
u16 payload_length
Definition: ip6_packet.h:369
static int ip4_is_first_fragment(const ip4_header_t *i)
Definition: ip4_packet.h:220
static int ip4_to_ip6_set_icmp_cb(ip4_header_t *ip4, ip6_header_t *ip6, void *arg)
Definition: ip4_map_t.c:107
static uword ip4_map_t_tcp_udp(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip4_map_t.c:444
i32 port
Definition: map.h:158
static uword ip4_map_t_fragmented(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip4_map_t.c:280
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:274
u16 tcp_mss
Definition: map.h:271
A collection of combined counters.
Definition: counter.h:188
#define vnet_buffer(b)
Definition: buffer.h:368
typedef CLIB_PACKED(struct{ip6_address_t daddr;ip6_address_t saddr;u8 unused[28];})
Definition: ip4_map_t.c:56
#define u16_net_add(u, val)
Definition: ip.h:69
#define ip6_frag_hdr_offset_and_more(offset, more)
Definition: ip6_packet.h:582
u8 ip_version_and_header_length
Definition: ip4_packet.h:138
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:62
static u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:237
static ip_csum_t ip_csum_add_even(ip_csum_t c, ip_csum_t x)
Definition: ip_packet.h:192
ip6_address_t dst_address
Definition: ip6_packet.h:378