FD.io VPP  v18.01-8-g0eacf49
Vector Packet Processing
ip6_map_t.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include "map.h"
16 
17 #include "../ip/ip_frag.h"
18 #include <vnet/ip/ip6_to_ip4.h>
19 #include <vnet/ip/ip4_to_ip6.h>
20 
21 #define IP6_MAP_T_DUAL_LOOP
22 
23 typedef enum
24 {
31 
32 typedef enum
33 {
39 
40 typedef enum
41 {
47 
48 typedef enum
49 {
55 
57 ip6_map_fragment_cache (ip6_header_t * ip6, ip6_frag_hdr_t * frag,
58  map_domain_t * d, u16 port)
59 {
60  u32 *ignore = NULL;
64  &ip6->
65  dst_address),
66  frag_id_6to4 (frag->identification),
67  (ip6->protocol ==
68  IP_PROTOCOL_ICMP6) ?
69  IP_PROTOCOL_ICMP : ip6->protocol,
70  &ignore);
71  if (r)
72  r->port = port;
73 
75  return !r;
76 }
77 
78 /* Returns the associated port or -1 */
80 ip6_map_fragment_get (ip6_header_t * ip6, ip6_frag_hdr_t * frag,
81  map_domain_t * d)
82 {
83  u32 *ignore = NULL;
87  &ip6->
88  dst_address),
89  frag_id_6to4 (frag->identification),
90  (ip6->protocol ==
91  IP_PROTOCOL_ICMP6) ?
92  IP_PROTOCOL_ICMP : ip6->protocol,
93  &ignore);
94  i32 ret = r ? r->port : -1;
96  return ret;
97 }
98 
99 typedef struct
100 {
104 
105 static int
107 {
108  icmp6_to_icmp_ctx_t *ctx = arg;
109  u32 ip4_sadr;
110 
111  //Security check
112  //Note that this prevents an intermediate IPv6 router from answering the request
113  ip4_sadr = map_get_ip4 (&ip6->src_address);
114  if (ip6->src_address.as_u64[0] !=
115  map_get_pfx_net (ctx->d, ip4_sadr, ctx->sender_port)
116  || ip6->src_address.as_u64[1] != map_get_sfx_net (ctx->d, ip4_sadr,
117  ctx->sender_port))
118  return -1;
119 
120  ip4->dst_address.as_u32 =
122  ip4->src_address.as_u32 = ip4_sadr;
123 
124  return 0;
125 }
126 
127 static int
129  void *arg)
130 {
131  icmp6_to_icmp_ctx_t *ctx = arg;
132  u32 inner_ip4_dadr;
133 
134  //Security check of inner packet
135  inner_ip4_dadr = map_get_ip4 (&ip6->dst_address);
136  if (ip6->dst_address.as_u64[0] !=
137  map_get_pfx_net (ctx->d, inner_ip4_dadr, ctx->sender_port)
138  || ip6->dst_address.as_u64[1] != map_get_sfx_net (ctx->d,
139  inner_ip4_dadr,
140  ctx->sender_port))
141  return -1;
142 
143  ip4->dst_address.as_u32 = inner_ip4_dadr;
144  ip4->src_address.as_u32 =
146 
147  return 0;
148 }
149 
150 static uword
152  vlib_node_runtime_t * node, vlib_frame_t * frame)
153 {
154  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
155  vlib_node_runtime_t *error_node =
157  from = vlib_frame_vector_args (frame);
158  n_left_from = frame->n_vectors;
159  next_index = node->cached_next_index;
161  u32 thread_index = vlib_get_thread_index ();
162 
163  while (n_left_from > 0)
164  {
165  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
166 
167  while (n_left_from > 0 && n_left_to_next > 0)
168  {
169  u32 pi0;
170  vlib_buffer_t *p0;
171  u8 error0;
172  ip6_mapt_icmp_next_t next0;
173  map_domain_t *d0;
174  u16 len0;
175  icmp6_to_icmp_ctx_t ctx0;
176  ip6_header_t *ip60;
177 
178  pi0 = to_next[0] = from[0];
179  from += 1;
180  n_left_from -= 1;
181  to_next += 1;
182  n_left_to_next -= 1;
183  error0 = MAP_ERROR_NONE;
185 
186  p0 = vlib_get_buffer (vm, pi0);
187  ip60 = vlib_buffer_get_current (p0);
188  len0 = clib_net_to_host_u16 (ip60->payload_length);
189  d0 =
191  vnet_buffer (p0)->map_t.map_domain_index);
192  ctx0.sender_port = ip6_get_port (ip60, 0, p0->current_length);
193  ctx0.d = d0;
194  if (ctx0.sender_port == 0)
195  {
196  // In case of 1:1 mapping, we don't care about the port
197  if (!(d0->ea_bits_len == 0 && d0->rules))
198  {
199  error0 = MAP_ERROR_ICMP;
200  goto err0;
201  }
202  }
203 
204  if (icmp6_to_icmp
205  (p0, ip6_to_ip4_set_icmp_cb, &ctx0,
207  {
208  error0 = MAP_ERROR_ICMP;
209  goto err0;
210  }
211 
212  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
213  {
214  //Send to fragmentation node if necessary
215  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
216  vnet_buffer (p0)->ip_frag.header_offset = 0;
217  vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
219  }
220  err0:
221  if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
222  {
224  thread_index,
225  vnet_buffer (p0)->
226  map_t.map_domain_index, 1,
227  len0);
228  }
229  else
230  {
231  next0 = IP6_MAPT_ICMP_NEXT_DROP;
232  }
233 
234  p0->error = error_node->errors[error0];
235  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
236  to_next, n_left_to_next, pi0,
237  next0);
238  }
239  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
240  }
241  return frame->n_vectors;
242 }
243 
244 static int
246 {
247  vlib_buffer_t *p = ctx;
248 
249  ip4->dst_address.as_u32 = vnet_buffer (p)->map_t.v6.daddr;
250  ip4->src_address.as_u32 = vnet_buffer (p)->map_t.v6.saddr;
251 
252  return 0;
253 }
254 
255 static uword
257  vlib_node_runtime_t * node, vlib_frame_t * frame)
258 {
259  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
260  from = vlib_frame_vector_args (frame);
261  n_left_from = frame->n_vectors;
262  next_index = node->cached_next_index;
263  vlib_node_runtime_t *error_node =
265 
266  while (n_left_from > 0)
267  {
268  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
269 
270 #ifdef IP6_MAP_T_DUAL_LOOP
271  while (n_left_from >= 4 && n_left_to_next >= 2)
272  {
273  u32 pi0, pi1;
274  vlib_buffer_t *p0, *p1;
275  u32 next0, next1;
276 
277  pi0 = to_next[0] = from[0];
278  pi1 = to_next[1] = from[1];
279  from += 2;
280  n_left_from -= 2;
281  to_next += 2;
282  n_left_to_next -= 2;
283 
286  p0 = vlib_get_buffer (vm, pi0);
287  p1 = vlib_get_buffer (vm, pi1);
288 
290  {
291  p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
293  }
294  else
295  {
296  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
297  {
298  //Send to fragmentation node if necessary
299  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
300  vnet_buffer (p0)->ip_frag.header_offset = 0;
301  vnet_buffer (p0)->ip_frag.next_index =
304  }
305  }
306 
308  {
309  p1->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
311  }
312  else
313  {
314  if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
315  {
316  //Send to fragmentation node if necessary
317  vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
318  vnet_buffer (p1)->ip_frag.header_offset = 0;
319  vnet_buffer (p1)->ip_frag.next_index =
322  }
323  }
324 
325  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
326  to_next, n_left_to_next, pi0, pi1,
327  next0, next1);
328  }
329 #endif
330 
331  while (n_left_from > 0 && n_left_to_next > 0)
332  {
333  u32 pi0;
334  vlib_buffer_t *p0;
335  u32 next0;
336 
337  pi0 = to_next[0] = from[0];
338  from += 1;
339  n_left_from -= 1;
340  to_next += 1;
341  n_left_to_next -= 1;
342 
344  p0 = vlib_get_buffer (vm, pi0);
345 
347  {
348  p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
350  }
351  else
352  {
353  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
354  {
355  //Send to fragmentation node if necessary
356  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
357  vnet_buffer (p0)->ip_frag.header_offset = 0;
358  vnet_buffer (p0)->ip_frag.next_index =
361  }
362  }
363 
364  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
365  to_next, n_left_to_next, pi0,
366  next0);
367  }
368  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
369  }
370  return frame->n_vectors;
371 }
372 
373 static uword
375  vlib_node_runtime_t * node, vlib_frame_t * frame)
376 {
377  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
378  vlib_node_runtime_t *error_node =
380 
381  from = vlib_frame_vector_args (frame);
382  n_left_from = frame->n_vectors;
383  next_index = node->cached_next_index;
384  while (n_left_from > 0)
385  {
386  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
387 
388 #ifdef IP6_MAP_T_DUAL_LOOP
389  while (n_left_from >= 4 && n_left_to_next >= 2)
390  {
391  u32 pi0, pi1;
392  vlib_buffer_t *p0, *p1;
393  ip6_mapt_tcp_udp_next_t next0, next1;
394 
395  pi0 = to_next[0] = from[0];
396  pi1 = to_next[1] = from[1];
397  from += 2;
398  n_left_from -= 2;
399  to_next += 2;
400  n_left_to_next -= 2;
403 
404  p0 = vlib_get_buffer (vm, pi0);
405  p1 = vlib_get_buffer (vm, pi1);
406 
407  if (ip6_to_ip4_tcp_udp (p0, ip6_to_ip4_set_cb, p0, 1))
408  {
409  p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
411  }
412  else
413  {
414  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
415  {
416  //Send to fragmentation node if necessary
417  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
418  vnet_buffer (p0)->ip_frag.header_offset = 0;
419  vnet_buffer (p0)->ip_frag.next_index =
422  }
423  }
424 
425  if (ip6_to_ip4_tcp_udp (p1, ip6_to_ip4_set_cb, p1, 1))
426  {
427  p1->error = error_node->errors[MAP_ERROR_UNKNOWN];
429  }
430  else
431  {
432  if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
433  {
434  //Send to fragmentation node if necessary
435  vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
436  vnet_buffer (p1)->ip_frag.header_offset = 0;
437  vnet_buffer (p1)->ip_frag.next_index =
440  }
441  }
442 
443  vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
444  n_left_to_next, pi0, pi1, next0,
445  next1);
446  }
447 #endif
448 
449  while (n_left_from > 0 && n_left_to_next > 0)
450  {
451  u32 pi0;
452  vlib_buffer_t *p0;
454 
455  pi0 = to_next[0] = from[0];
456  from += 1;
457  n_left_from -= 1;
458  to_next += 1;
459  n_left_to_next -= 1;
461 
462  p0 = vlib_get_buffer (vm, pi0);
463 
464  if (ip6_to_ip4_tcp_udp (p0, ip6_to_ip4_set_cb, p0, 1))
465  {
466  p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
468  }
469  else
470  {
471  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
472  {
473  //Send to fragmentation node if necessary
474  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
475  vnet_buffer (p0)->ip_frag.header_offset = 0;
476  vnet_buffer (p0)->ip_frag.next_index =
479  }
480  }
481 
482  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
483  to_next, n_left_to_next, pi0,
484  next0);
485  }
486  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
487  }
488  return frame->n_vectors;
489 }
490 
493  map_domain_t * d0, i32 * src_port0,
494  u8 * error0, ip6_mapt_next_t * next0,
495  u32 l4_len0, ip6_frag_hdr_t * frag0)
496 {
497  if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
498  ip6_frag_hdr_offset (frag0)))
499  {
501  if (d0->ea_bits_len == 0 && d0->rules)
502  {
503  *src_port0 = 0;
504  }
505  else
506  {
507  *src_port0 = ip6_map_fragment_get (ip60, frag0, d0);
508  *error0 = (*src_port0 != -1) ? *error0 : MAP_ERROR_FRAGMENT_DROPPED;
509  }
510  }
511  else
512  if (PREDICT_TRUE
513  (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP))
514  {
515  *error0 =
516  l4_len0 < sizeof (tcp_header_t) ? MAP_ERROR_MALFORMED : *error0;
517  vnet_buffer (p0)->map_t.checksum_offset =
518  vnet_buffer (p0)->map_t.v6.l4_offset + 16;
520  *src_port0 =
521  (i32) *
522  ((u16 *) u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
523  }
524  else
525  if (PREDICT_TRUE
526  (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP))
527  {
528  *error0 =
529  l4_len0 < sizeof (udp_header_t) ? MAP_ERROR_MALFORMED : *error0;
530  vnet_buffer (p0)->map_t.checksum_offset =
531  vnet_buffer (p0)->map_t.v6.l4_offset + 6;
533  *src_port0 =
534  (i32) *
535  ((u16 *) u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
536  }
537  else if (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_ICMP6)
538  {
539  *error0 =
540  l4_len0 < sizeof (icmp46_header_t) ? MAP_ERROR_MALFORMED : *error0;
541  *next0 = IP6_MAPT_NEXT_MAPT_ICMP;
542  if (d0->ea_bits_len == 0 && d0->rules)
543  {
544  *src_port0 = 0;
545  }
546  else
547  if (((icmp46_header_t *)
548  u8_ptr_add (ip60,
549  vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
550  ICMP6_echo_reply
551  || ((icmp46_header_t *)
552  u8_ptr_add (ip60,
553  vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
554  ICMP6_echo_request)
555  {
556  *src_port0 =
557  (i32) *
558  ((u16 *)
559  u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset + 6));
560  }
561  }
562  else
563  {
564  //TODO: In case of 1:1 mapping, it might be possible to do something with those packets.
565  *error0 = MAP_ERROR_BAD_PROTOCOL;
566  }
567 }
568 
569 static uword
571 {
572  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
573  vlib_node_runtime_t *error_node =
576  u32 thread_index = vlib_get_thread_index ();
577 
578  from = vlib_frame_vector_args (frame);
579  n_left_from = frame->n_vectors;
580  next_index = node->cached_next_index;
581  while (n_left_from > 0)
582  {
583  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
584 
585 #ifdef IP6_MAP_T_DUAL_LOOP
586  while (n_left_from >= 4 && n_left_to_next >= 2)
587  {
588  u32 pi0, pi1;
589  vlib_buffer_t *p0, *p1;
590  ip6_header_t *ip60, *ip61;
591  u8 error0, error1;
592  ip6_mapt_next_t next0, next1;
593  u32 l4_len0, l4_len1;
594  i32 src_port0, src_port1;
595  map_domain_t *d0, *d1;
596  ip6_frag_hdr_t *frag0, *frag1;
597  u32 saddr0, saddr1;
598  next0 = next1 = 0; //Because compiler whines
599 
600  pi0 = to_next[0] = from[0];
601  pi1 = to_next[1] = from[1];
602  from += 2;
603  n_left_from -= 2;
604  to_next += 2;
605  n_left_to_next -= 2;
606 
607  error0 = MAP_ERROR_NONE;
608  error1 = MAP_ERROR_NONE;
609 
610  p0 = vlib_get_buffer (vm, pi0);
611  p1 = vlib_get_buffer (vm, pi1);
612  ip60 = vlib_buffer_get_current (p0);
613  ip61 = vlib_buffer_get_current (p1);
614 
615  saddr0 = map_get_ip4 (&ip60->src_address);
616  saddr1 = map_get_ip4 (&ip61->src_address);
617  d0 = ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
618  (ip4_address_t *) & saddr0,
619  &vnet_buffer (p0)->map_t.map_domain_index,
620  &error0);
621  d1 =
622  ip6_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX],
623  (ip4_address_t *) & saddr1,
624  &vnet_buffer (p1)->map_t.map_domain_index,
625  &error1);
626 
627  vnet_buffer (p0)->map_t.v6.saddr = saddr0;
628  vnet_buffer (p1)->map_t.v6.saddr = saddr1;
629  vnet_buffer (p0)->map_t.v6.daddr =
631  vnet_buffer (p1)->map_t.v6.daddr =
633  vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
634  vnet_buffer (p1)->map_t.mtu = d1->mtu ? d1->mtu : ~0;
635 
636  if (PREDICT_FALSE (ip6_parse (ip60, p0->current_length,
637  &(vnet_buffer (p0)->map_t.
638  v6.l4_protocol),
639  &(vnet_buffer (p0)->map_t.
640  v6.l4_offset),
641  &(vnet_buffer (p0)->map_t.
642  v6.frag_offset))))
643  {
644  error0 = MAP_ERROR_MALFORMED;
645  next0 = IP6_MAPT_NEXT_DROP;
646  }
647 
648  if (PREDICT_FALSE (ip6_parse (ip61, p1->current_length,
649  &(vnet_buffer (p1)->map_t.
650  v6.l4_protocol),
651  &(vnet_buffer (p1)->map_t.
652  v6.l4_offset),
653  &(vnet_buffer (p1)->map_t.
654  v6.frag_offset))))
655  {
656  error1 = MAP_ERROR_MALFORMED;
657  next1 = IP6_MAPT_NEXT_DROP;
658  }
659 
660  src_port0 = src_port1 = -1;
661  l4_len0 = (u32) clib_net_to_host_u16 (ip60->payload_length) +
662  sizeof (*ip60) - vnet_buffer (p0)->map_t.v6.l4_offset;
663  l4_len1 = (u32) clib_net_to_host_u16 (ip61->payload_length) +
664  sizeof (*ip60) - vnet_buffer (p1)->map_t.v6.l4_offset;
665  frag0 =
666  (ip6_frag_hdr_t *) u8_ptr_add (ip60,
667  vnet_buffer (p0)->map_t.
668  v6.frag_offset);
669  frag1 =
670  (ip6_frag_hdr_t *) u8_ptr_add (ip61,
671  vnet_buffer (p1)->map_t.
672  v6.frag_offset);
673 
674  ip6_map_t_classify (p0, ip60, d0, &src_port0, &error0, &next0,
675  l4_len0, frag0);
676  ip6_map_t_classify (p1, ip61, d1, &src_port1, &error1, &next1,
677  l4_len1, frag1);
678 
679  if (PREDICT_FALSE
680  ((src_port0 != -1)
681  && (ip60->src_address.as_u64[0] !=
682  map_get_pfx_net (d0, vnet_buffer (p0)->map_t.v6.saddr,
683  src_port0)
684  || ip60->src_address.as_u64[1] != map_get_sfx_net (d0,
686  (p0)->map_t.v6.saddr,
687  src_port0))))
688  {
689  error0 = MAP_ERROR_SEC_CHECK;
690  }
691 
692  if (PREDICT_FALSE
693  ((src_port1 != -1)
694  && (ip61->src_address.as_u64[0] !=
695  map_get_pfx_net (d1, vnet_buffer (p1)->map_t.v6.saddr,
696  src_port1)
697  || ip61->src_address.as_u64[1] != map_get_sfx_net (d1,
699  (p1)->map_t.v6.saddr,
700  src_port1))))
701  {
702  error1 = MAP_ERROR_SEC_CHECK;
703  }
704 
705  if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
706  !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
707  u8_ptr_add (ip60,
709  (p0)->map_t.
710  v6.frag_offset)))
711  && (src_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules)
712  && (error0 == MAP_ERROR_NONE))
713  {
715  (ip6_frag_hdr_t *) u8_ptr_add (ip60,
717  (p0)->map_t.
718  v6.frag_offset),
719  d0, src_port0);
720  }
721 
722  if (PREDICT_FALSE (vnet_buffer (p1)->map_t.v6.frag_offset &&
723  !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
724  u8_ptr_add (ip61,
726  (p1)->map_t.
727  v6.frag_offset)))
728  && (src_port1 != -1) && (d1->ea_bits_len != 0 || !d1->rules)
729  && (error1 == MAP_ERROR_NONE))
730  {
732  (ip6_frag_hdr_t *) u8_ptr_add (ip61,
734  (p1)->map_t.
735  v6.frag_offset),
736  d1, src_port1);
737  }
738 
739  if (PREDICT_TRUE
740  (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP))
741  {
743  thread_index,
744  vnet_buffer (p0)->
745  map_t.map_domain_index, 1,
746  clib_net_to_host_u16
747  (ip60->payload_length));
748  }
749 
750  if (PREDICT_TRUE
751  (error1 == MAP_ERROR_NONE && next1 != IP6_MAPT_NEXT_MAPT_ICMP))
752  {
754  thread_index,
755  vnet_buffer (p1)->
756  map_t.map_domain_index, 1,
757  clib_net_to_host_u16
758  (ip61->payload_length));
759  }
760 
761  next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
762  next1 = (error1 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next1;
763  p0->error = error_node->errors[error0];
764  p1->error = error_node->errors[error1];
765  vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
766  n_left_to_next, pi0, pi1, next0,
767  next1);
768  }
769 #endif
770 
771  while (n_left_from > 0 && n_left_to_next > 0)
772  {
773  u32 pi0;
774  vlib_buffer_t *p0;
775  ip6_header_t *ip60;
776  u8 error0;
777  u32 l4_len0;
778  i32 src_port0;
779  map_domain_t *d0;
780  ip6_frag_hdr_t *frag0;
781  ip6_mapt_next_t next0 = 0;
782  u32 saddr;
783 
784  pi0 = to_next[0] = from[0];
785  from += 1;
786  n_left_from -= 1;
787  to_next += 1;
788  n_left_to_next -= 1;
789  error0 = MAP_ERROR_NONE;
790 
791  p0 = vlib_get_buffer (vm, pi0);
792  ip60 = vlib_buffer_get_current (p0);
793  //Save saddr in a different variable to not overwrite ip.adj_index
794  saddr = map_get_ip4 (&ip60->src_address);
795  d0 = ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
796  (ip4_address_t *) & saddr,
797  &vnet_buffer (p0)->map_t.map_domain_index,
798  &error0);
799 
800  //FIXME: What if d0 is null
801  vnet_buffer (p0)->map_t.v6.saddr = saddr;
802  vnet_buffer (p0)->map_t.v6.daddr =
804  vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
805 
806  if (PREDICT_FALSE (ip6_parse (ip60, p0->current_length,
807  &(vnet_buffer (p0)->map_t.
808  v6.l4_protocol),
809  &(vnet_buffer (p0)->map_t.
810  v6.l4_offset),
811  &(vnet_buffer (p0)->map_t.
812  v6.frag_offset))))
813  {
814  error0 = MAP_ERROR_MALFORMED;
815  next0 = IP6_MAPT_NEXT_DROP;
816  }
817 
818  src_port0 = -1;
819  l4_len0 = (u32) clib_net_to_host_u16 (ip60->payload_length) +
820  sizeof (*ip60) - vnet_buffer (p0)->map_t.v6.l4_offset;
821  frag0 =
822  (ip6_frag_hdr_t *) u8_ptr_add (ip60,
823  vnet_buffer (p0)->map_t.
824  v6.frag_offset);
825 
826 
827  if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
828  ip6_frag_hdr_offset (frag0)))
829  {
830  src_port0 = ip6_map_fragment_get (ip60, frag0, d0);
831  error0 = (src_port0 != -1) ? error0 : MAP_ERROR_FRAGMENT_MEMORY;
833  }
834  else
835  if (PREDICT_TRUE
836  (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP))
837  {
838  error0 =
839  l4_len0 <
840  sizeof (tcp_header_t) ? MAP_ERROR_MALFORMED : error0;
841  vnet_buffer (p0)->map_t.checksum_offset =
842  vnet_buffer (p0)->map_t.v6.l4_offset + 16;
844  src_port0 =
845  (i32) *
846  ((u16 *)
847  u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
848  }
849  else
850  if (PREDICT_TRUE
851  (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP))
852  {
853  error0 =
854  l4_len0 <
855  sizeof (udp_header_t) ? MAP_ERROR_MALFORMED : error0;
856  vnet_buffer (p0)->map_t.checksum_offset =
857  vnet_buffer (p0)->map_t.v6.l4_offset + 6;
859  src_port0 =
860  (i32) *
861  ((u16 *)
862  u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
863  }
864  else if (vnet_buffer (p0)->map_t.v6.l4_protocol ==
865  IP_PROTOCOL_ICMP6)
866  {
867  error0 =
868  l4_len0 <
869  sizeof (icmp46_header_t) ? MAP_ERROR_MALFORMED : error0;
870  next0 = IP6_MAPT_NEXT_MAPT_ICMP;
871  if (((icmp46_header_t *)
872  u8_ptr_add (ip60,
873  vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
874  ICMP6_echo_reply
875  || ((icmp46_header_t *)
876  u8_ptr_add (ip60,
877  vnet_buffer (p0)->map_t.v6.
878  l4_offset))->code == ICMP6_echo_request)
879  src_port0 =
880  (i32) *
881  ((u16 *)
882  u8_ptr_add (ip60,
883  vnet_buffer (p0)->map_t.v6.l4_offset + 6));
884  }
885  else
886  {
887  //TODO: In case of 1:1 mapping, it might be possible to do something with those packets.
888  error0 = MAP_ERROR_BAD_PROTOCOL;
889  }
890 
891  //Security check
892  if (PREDICT_FALSE
893  ((src_port0 != -1)
894  && (ip60->src_address.as_u64[0] !=
895  map_get_pfx_net (d0, vnet_buffer (p0)->map_t.v6.saddr,
896  src_port0)
897  || ip60->src_address.as_u64[1] != map_get_sfx_net (d0,
899  (p0)->map_t.v6.saddr,
900  src_port0))))
901  {
902  //Security check when src_port0 is not zero (non-first fragment, UDP or TCP)
903  error0 = MAP_ERROR_SEC_CHECK;
904  }
905 
906  //Fragmented first packet needs to be cached for following packets
907  if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
908  !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
909  u8_ptr_add (ip60,
911  (p0)->map_t.
912  v6.frag_offset)))
913  && (src_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules)
914  && (error0 == MAP_ERROR_NONE))
915  {
917  (ip6_frag_hdr_t *) u8_ptr_add (ip60,
919  (p0)->map_t.
920  v6.frag_offset),
921  d0, src_port0);
922  }
923 
924  if (PREDICT_TRUE
925  (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP))
926  {
928  thread_index,
929  vnet_buffer (p0)->
930  map_t.map_domain_index, 1,
931  clib_net_to_host_u16
932  (ip60->payload_length));
933  }
934 
935  next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
936  p0->error = error_node->errors[error0];
937  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
938  to_next, n_left_to_next, pi0,
939  next0);
940  }
941  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
942  }
943  return frame->n_vectors;
944 }
945 
946 static char *map_t_error_strings[] = {
947 #define _(sym,string) string,
949 #undef _
950 };
951 
952 /* *INDENT-OFF* */
954  .function = ip6_map_t_fragmented,
955  .name = "ip6-map-t-fragmented",
956  .vector_size = sizeof (u32),
957  .format_trace = format_map_trace,
958  .type = VLIB_NODE_TYPE_INTERNAL,
959 
960  .n_errors = MAP_N_ERROR,
961  .error_strings = map_t_error_strings,
962 
963  .n_next_nodes = IP6_MAPT_FRAGMENTED_N_NEXT,
964  .next_nodes = {
965  [IP6_MAPT_FRAGMENTED_NEXT_IP4_LOOKUP] = "ip4-lookup",
967  [IP6_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
968  },
969 };
970 /* *INDENT-ON* */
971 
972 /* *INDENT-OFF* */
974  .function = ip6_map_t_icmp,
975  .name = "ip6-map-t-icmp",
976  .vector_size = sizeof (u32),
977  .format_trace = format_map_trace,
978  .type = VLIB_NODE_TYPE_INTERNAL,
979 
980  .n_errors = MAP_N_ERROR,
981  .error_strings = map_t_error_strings,
982 
983  .n_next_nodes = IP6_MAPT_ICMP_N_NEXT,
984  .next_nodes = {
985  [IP6_MAPT_ICMP_NEXT_IP4_LOOKUP] = "ip4-lookup",
987  [IP6_MAPT_ICMP_NEXT_DROP] = "error-drop",
988  },
989 };
990 /* *INDENT-ON* */
991 
992 /* *INDENT-OFF* */
994  .function = ip6_map_t_tcp_udp,
995  .name = "ip6-map-t-tcp-udp",
996  .vector_size = sizeof (u32),
997  .format_trace = format_map_trace,
998  .type = VLIB_NODE_TYPE_INTERNAL,
999 
1000  .n_errors = MAP_N_ERROR,
1001  .error_strings = map_t_error_strings,
1002 
1003  .n_next_nodes = IP6_MAPT_TCP_UDP_N_NEXT,
1004  .next_nodes = {
1005  [IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP] = "ip4-lookup",
1007  [IP6_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
1008  },
1009 };
1010 /* *INDENT-ON* */
1011 
1012 /* *INDENT-OFF* */
1014  .function = ip6_map_t,
1015  .name = "ip6-map-t",
1016  .vector_size = sizeof(u32),
1017  .format_trace = format_map_trace,
1018  .type = VLIB_NODE_TYPE_INTERNAL,
1019 
1020  .n_errors = MAP_N_ERROR,
1021  .error_strings = map_t_error_strings,
1022 
1023  .n_next_nodes = IP6_MAPT_N_NEXT,
1024  .next_nodes = {
1025  [IP6_MAPT_NEXT_MAPT_TCP_UDP] = "ip6-map-t-tcp-udp",
1026  [IP6_MAPT_NEXT_MAPT_ICMP] = "ip6-map-t-icmp",
1027  [IP6_MAPT_NEXT_MAPT_FRAGMENTED] = "ip6-map-t-fragmented",
1028  [IP6_MAPT_NEXT_DROP] = "error-drop",
1029  },
1030 };
1031 /* *INDENT-ON* */
1032 
1033 /*
1034  * fd.io coding-style-patch-verification: ON
1035  *
1036  * Local Variables:
1037  * eval: (c-set-style "gnu")
1038  * End:
1039  */
#define map_ip4_reass_lock()
Definition: map.h:458
map_domain_t * d
Definition: ip6_map_t.c:101
static uword ip6_map_t_tcp_udp(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map_t.c:374
static u16 ip6_get_port(ip6_header_t *ip6, u8 sender, u16 buffer_len)
Get TCP/UDP port number or ICMP id from IPv6 packet.
Definition: ip6_to_ip4.h:90
map_main_t map_main
Definition: map.c:26
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:211
ip6_mapt_next_t
Definition: ip6_map_t.c:23
ip4_address_t src_address
Definition: ip4_packet.h:164
static int icmp6_to_icmp(vlib_buffer_t *p, ip6_to_ip4_set_fn_t fn, void *ctx, ip6_to_ip4_set_fn_t inner_fn, void *inner_ctx)
Translate ICMP6 packet to ICMP4.
Definition: ip6_to_ip4.h:249
#define PREDICT_TRUE(x)
Definition: clib.h:106
u64 as_u64[2]
Definition: ip6_packet.h:51
#define NULL
Definition: clib.h:55
static uword ip6_map_t(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map_t.c:570
static int ip6_to_ip4_set_icmp_cb(ip6_header_t *ip6, ip4_header_t *ip4, void *arg)
Definition: ip6_map_t.c:106
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:415
struct _tcp_header tcp_header_t
ip6_address_t src_address
Definition: ip6_packet.h:341
IPv4 to IPv6 translation.
static_always_inline u32 ip6_map_t_embedded_address(map_domain_t *d, ip6_address_t *addr)
Definition: map.h:527
#define static_always_inline
Definition: clib.h:93
ip4_address_t dst_address
Definition: ip4_packet.h:164
vlib_combined_counter_main_t * domain_counters
Definition: map.h:232
int i32
Definition: types.h:81
ip6_address_t * rules
Definition: map.h:83
static_always_inline int ip6_parse(const ip6_header_t *ip6, u32 buff_len, u8 *l4_protocol, u16 *l4_offset, u16 *frag_hdr_offset)
Parse some useful information from IPv6 header.
Definition: ip6_to_ip4.h:59
u8 ea_bits_len
Definition: map.h:91
static_always_inline i32 ip6_map_fragment_get(ip6_header_t *ip6, ip6_frag_hdr_t *frag, map_domain_t *d)
Definition: ip6_map_t.c:80
#define frag_id_6to4(id)
Definition: ip6_to_ip4.h:45
static int ip6_to_ip4_tcp_udp(vlib_buffer_t *p, ip6_to_ip4_set_fn_t fn, void *ctx, u8 udp_checksum)
Translate IPv6 UDP/TCP packet to IPv4.
Definition: ip6_to_ip4.h:481
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:459
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:72
#define IP4_FRAG_NODE_NAME
Definition: ip_frag.h:43
static_always_inline u64 map_get_pfx_net(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:365
static_always_inline map_domain_t * ip6_map_get_domain(u32 mdi, ip4_address_t *addr, u32 *map_domain_index, u8 *error)
Definition: map.h:423
map_domain_t * domains
Definition: map.h:228
vlib_node_registration_t ip6_map_t_tcp_udp_node
(constructor) VLIB_REGISTER_NODE (ip6_map_t_tcp_udp_node)
Definition: ip6_map_t.c:993
static_always_inline int ip6_map_fragment_cache(ip6_header_t *ip6, ip6_frag_hdr_t *frag, map_domain_t *d, u16 port)
Definition: ip6_map_t.c:57
static_always_inline u64 map_get_sfx_net(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:394
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:195
map_ip4_reass_t * map_ip4_reass_get(u32 src, u32 dst, u16 fragment_id, u8 protocol, u32 **pi_to_drop)
Definition: map.c:1463
#define PREDICT_FALSE(x)
Definition: clib.h:105
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:364
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:113
vlib_node_registration_t ip6_map_t_fragmented_node
(constructor) VLIB_REGISTER_NODE (ip6_map_t_fragmented_node)
Definition: ip6_map_t.c:953
ip6_mapt_tcp_udp_next_t
Definition: ip6_map_t.c:40
u16 n_vectors
Definition: node.h:344
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:221
static uword ip6_map_t_icmp(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map_t.c:151
vlib_main_t * vm
Definition: buffer.c:283
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:89
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:454
#define foreach_map_error
Definition: map.h:299
ip6_mapt_icmp_next_t
Definition: ip6_map_t.c:32
vlib_node_registration_t ip6_map_t_icmp_node
(constructor) VLIB_REGISTER_NODE (ip6_map_t_icmp_node)
Definition: ip6_map_t.c:973
static int ip6_to_ip4_set_inner_icmp_cb(ip6_header_t *ip6, ip4_header_t *ip4, void *arg)
Definition: ip6_map_t.c:128
#define ip6_frag_hdr_offset(hdr)
Definition: ip6_packet.h:519
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:456
#define u8_ptr_add(ptr, index)
Definition: map.h:513
unsigned int u32
Definition: types.h:88
long ctx[MAX_CONNS]
Definition: main.c:122
#define map_ip4_reass_unlock()
Definition: map.h:459
u8 * format_map_trace(u8 *s, va_list *args)
Definition: map.c:1383
IPv6 to IPv4 translation.
static int ip6_to_ip4_set_cb(ip6_header_t *ip6, ip4_header_t *ip4, void *ctx)
Definition: ip6_map_t.c:245
u64 uword
Definition: types.h:112
Definition: defs.h:47
unsigned short u16
Definition: types.h:57
u16 mtu
Definition: map.h:87
static uword ip6_map_t_fragmented(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map_t.c:256
u16 payload_length
Definition: ip6_packet.h:332
static_always_inline u32 map_get_ip4(ip6_address_t *addr)
Definition: map.h:401
unsigned char u8
Definition: types.h:56
i32 port
Definition: map.h:133
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:267
A collection of combined counters.
Definition: counter.h:180
static char * map_t_error_strings[]
Definition: ip6_map_t.c:946
vlib_node_registration_t ip6_map_t_node
(constructor) VLIB_REGISTER_NODE (ip6_map_t_node)
Definition: ip6_map_t.c:1013
#define vnet_buffer(b)
Definition: buffer.h:326
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:143
static_always_inline void ip6_map_t_classify(vlib_buffer_t *p0, ip6_header_t *ip60, map_domain_t *d0, i32 *src_port0, u8 *error0, ip6_mapt_next_t *next0, u32 l4_len0, ip6_frag_hdr_t *frag0)
Definition: ip6_map_t.c:492
ip6_mapt_fragmented_next_t
Definition: ip6_map_t.c:48
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
ip6_address_t dst_address
Definition: ip6_packet.h:341
static int ip6_to_ip4_fragmented(vlib_buffer_t *p, ip6_to_ip4_set_fn_t fn, void *ctx)
Translate IPv6 fragmented packet to IPv4.
Definition: ip6_to_ip4.h:425