FD.io VPP  v18.10-32-g1161dda
Vector Packet Processing
ip4_map_t.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include "map.h"
16 
17 #include <vnet/ip/ip_frag.h>
18 #include <vnet/ip/ip4_to_ip6.h>
19 
20 #define IP4_MAP_T_DUAL_LOOP 1
21 
22 typedef enum
23 {
30 
31 typedef enum
32 {
38 
39 typedef enum
40 {
46 
47 typedef enum
48 {
54 
55 //This is used to pass information within the buffer data.
56 //Buffer structure being too small to contain big structures like this.
57 /* *INDENT-OFF* */
58 typedef CLIB_PACKED (struct {
59  ip6_address_t daddr;
60  ip6_address_t saddr;
61  //IPv6 header + Fragmentation header will be here
62  //sizeof(ip6) + sizeof(ip_frag) - sizeof(ip4)
63  u8 unused[28];
64 }) ip4_mapt_pseudo_header_t;
65 /* *INDENT-ON* */
66 
67 
69 ip4_map_fragment_cache (ip4_header_t * ip4, u16 port)
70 {
71  u32 *ignore = NULL;
73  map_ip4_reass_t *r =
75  ip4->fragment_id,
76  (ip4->protocol ==
77  IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol,
78  &ignore);
79  if (r)
80  r->port = port;
81 
83  return !r;
84 }
85 
88 {
89  u32 *ignore = NULL;
91  map_ip4_reass_t *r =
93  ip4->fragment_id,
94  (ip4->protocol ==
95  IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol,
96  &ignore);
97  i32 ret = r ? r->port : -1;
99  return ret;
100 }
101 
102 typedef struct
103 {
107 
108 static int
110 {
111  icmp_to_icmp6_ctx_t *ctx = arg;
112  map_main_t *mm = &map_main;
113 
114  if (mm->is_ce)
115  {
116  ip6->src_address.as_u64[0] =
117  map_get_pfx_net (ctx->d, ip4->src_address.as_u32, ctx->id);
118  ip6->src_address.as_u64[1] =
119  map_get_sfx_net (ctx->d, ip4->src_address.as_u32, ctx->id);
121  &ip4->dst_address);
122  }
123  else
124  {
126  &ip4->src_address);
127  ip6->dst_address.as_u64[0] =
128  map_get_pfx_net (ctx->d, ip4->dst_address.as_u32, ctx->id);
129  ip6->dst_address.as_u64[1] =
130  map_get_sfx_net (ctx->d, ip4->dst_address.as_u32, ctx->id);
131  }
132 
133  return 0;
134 }
135 
136 static int
138  void *arg)
139 {
140  icmp_to_icmp6_ctx_t *ctx = arg;
141  map_main_t *mm = &map_main;
142 
143  if (mm->is_ce)
144  {
145  //Note that the destination address is within the domain
146  //while the source address is the one outside the domain
148  &ip4->src_address);
149  ip6->dst_address.as_u64[0] =
150  map_get_pfx_net (ctx->d, ip4->dst_address.as_u32, ctx->id);
151  ip6->dst_address.as_u64[1] =
152  map_get_sfx_net (ctx->d, ip4->dst_address.as_u32, ctx->id);
153  }
154  else
155  {
156  //Note that the source address is within the domain
157  //while the destination address is the one outside the domain
159  &ip4->dst_address);
160  ip6->src_address.as_u64[0] =
161  map_get_pfx_net (ctx->d, ip4->src_address.as_u32, ctx->id);
162  ip6->src_address.as_u64[1] =
163  map_get_sfx_net (ctx->d, ip4->src_address.as_u32, ctx->id);
164  }
165 
166  return 0;
167 }
168 
169 static uword
171  vlib_node_runtime_t * node, vlib_frame_t * frame)
172 {
173  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
174  vlib_node_runtime_t *error_node =
175  vlib_node_get_runtime (vm, ip4_map_t_icmp_node.index);
176  from = vlib_frame_vector_args (frame);
177  n_left_from = frame->n_vectors;
178  next_index = node->cached_next_index;
180  u32 thread_index = vm->thread_index;
181 
182  while (n_left_from > 0)
183  {
184  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
185 
186  while (n_left_from > 0 && n_left_to_next > 0)
187  {
188  u32 pi0;
189  vlib_buffer_t *p0;
190  ip4_mapt_icmp_next_t next0;
191  u8 error0;
192  map_domain_t *d0;
193  u16 len0;
194  icmp_to_icmp6_ctx_t ctx0;
195  ip4_header_t *ip40;
196  icmp46_header_t *icmp0;
197 
199  pi0 = to_next[0] = from[0];
200  from += 1;
201  n_left_from -= 1;
202  to_next += 1;
203  n_left_to_next -= 1;
204  error0 = MAP_ERROR_NONE;
205 
206  p0 = vlib_get_buffer (vm, pi0);
207  vlib_buffer_advance (p0, sizeof (ip4_mapt_pseudo_header_t)); //The pseudo-header is not used
208  len0 =
209  clib_net_to_host_u16 (((ip4_header_t *)
210  vlib_buffer_get_current (p0))->length);
211  d0 =
213  vnet_buffer (p0)->map_t.map_domain_index);
214 
215  ip40 = vlib_buffer_get_current (p0);
216  icmp0 = (icmp46_header_t *) (ip40 + 1);
217 
218  ctx0.id = ip4_get_port (ip40, icmp0->type == ICMP6_echo_request);
219  ctx0.d = d0;
220  if (ctx0.id == 0)
221  {
222  // In case of 1:1 mapping, we don't care about the port
223  if (!(d0->ea_bits_len == 0 && d0->rules))
224  {
225  error0 = MAP_ERROR_ICMP;
226  goto err0;
227  }
228  }
229 
230  if (icmp_to_icmp6
231  (p0, ip4_to_ip6_set_icmp_cb, &ctx0,
233  {
234  error0 = MAP_ERROR_ICMP;
235  goto err0;
236  }
237 
238  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
239  {
240  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
241  vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
243  }
244  err0:
245  if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
246  {
248  thread_index,
249  vnet_buffer (p0)->
250  map_t.map_domain_index, 1,
251  len0);
252  }
253  else
254  {
255  next0 = IP4_MAPT_ICMP_NEXT_DROP;
256  }
257  p0->error = error_node->errors[error0];
258  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
259  to_next, n_left_to_next, pi0,
260  next0);
261  }
262  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
263  }
264  return frame->n_vectors;
265 }
266 
267 static int
269 {
270  ip4_mapt_pseudo_header_t *pheader = ctx;
271 
272  ip6->dst_address.as_u64[0] = pheader->daddr.as_u64[0];
273  ip6->dst_address.as_u64[1] = pheader->daddr.as_u64[1];
274  ip6->src_address.as_u64[0] = pheader->saddr.as_u64[0];
275  ip6->src_address.as_u64[1] = pheader->saddr.as_u64[1];
276 
277  return 0;
278 }
279 
280 static uword
282  vlib_node_runtime_t * node, vlib_frame_t * frame)
283 {
284  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
285  from = vlib_frame_vector_args (frame);
286  n_left_from = frame->n_vectors;
287  next_index = node->cached_next_index;
288  vlib_node_runtime_t *error_node =
289  vlib_node_get_runtime (vm, ip4_map_t_fragmented_node.index);
290 
291  while (n_left_from > 0)
292  {
293  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
294 
295  while (n_left_from > 0 && n_left_to_next > 0)
296  {
297  u32 pi0;
298  vlib_buffer_t *p0;
299  ip4_mapt_pseudo_header_t *pheader0;
300  ip4_mapt_fragmented_next_t next0;
301 
303  pi0 = to_next[0] = from[0];
304  from += 1;
305  n_left_from -= 1;
306  to_next += 1;
307  n_left_to_next -= 1;
308 
309  p0 = vlib_get_buffer (vm, pi0);
310 
311  //Accessing pseudo header
312  pheader0 = vlib_buffer_get_current (p0);
313  vlib_buffer_advance (p0, sizeof (*pheader0));
314 
315  if (ip4_to_ip6_fragmented (p0, ip4_to_ip6_set_cb, pheader0))
316  {
317  p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
319  }
320  else
321  {
322  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
323  {
324  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
325  vnet_buffer (p0)->ip_frag.next_index =
328  }
329  }
330 
331  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
332  to_next, n_left_to_next, pi0,
333  next0);
334  }
335  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
336  }
337  return frame->n_vectors;
338 }
339 
340 static uword
342  vlib_node_runtime_t * node, vlib_frame_t * frame)
343 {
344  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
345  from = vlib_frame_vector_args (frame);
346  n_left_from = frame->n_vectors;
347  next_index = node->cached_next_index;
348  vlib_node_runtime_t *error_node =
349  vlib_node_get_runtime (vm, ip4_map_t_tcp_udp_node.index);
350 
351 
352  while (n_left_from > 0)
353  {
354  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
355 
356 #ifdef IP4_MAP_T_DUAL_LOOP
357  while (n_left_from >= 4 && n_left_to_next >= 2)
358  {
359  u32 pi0, pi1;
360  vlib_buffer_t *p0, *p1;
361  ip4_mapt_pseudo_header_t *pheader0, *pheader1;
362  ip4_mapt_tcp_udp_next_t next0, next1;
363 
364  pi0 = to_next[0] = from[0];
365  pi1 = to_next[1] = from[1];
366  from += 2;
367  n_left_from -= 2;
368  to_next += 2;
369  n_left_to_next -= 2;
370 
373  p0 = vlib_get_buffer (vm, pi0);
374  p1 = vlib_get_buffer (vm, pi1);
375 
376  //Accessing pseudo header
377  pheader0 = vlib_buffer_get_current (p0);
378  pheader1 = vlib_buffer_get_current (p1);
379  vlib_buffer_advance (p0, sizeof (*pheader0));
380  vlib_buffer_advance (p1, sizeof (*pheader1));
381 
382  if (ip4_to_ip6_tcp_udp (p0, ip4_to_ip6_set_cb, pheader0))
383  {
384  p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
386  }
387  else
388  {
389  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
390  {
391  //Send to fragmentation node if necessary
392  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
393  vnet_buffer (p0)->ip_frag.next_index =
396  }
397  }
398 
399  if (ip4_to_ip6_tcp_udp (p1, ip4_to_ip6_set_cb, pheader1))
400  {
401  p1->error = error_node->errors[MAP_ERROR_UNKNOWN];
403  }
404  else
405  {
406  if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
407  {
408  //Send to fragmentation node if necessary
409  vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
410  vnet_buffer (p1)->ip_frag.next_index =
413  }
414  }
415 
416  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
417  to_next, n_left_to_next, pi0, pi1,
418  next0, next1);
419  }
420 #endif
421 
422  while (n_left_from > 0 && n_left_to_next > 0)
423  {
424  u32 pi0;
425  vlib_buffer_t *p0;
426  ip4_mapt_pseudo_header_t *pheader0;
427  ip4_mapt_tcp_udp_next_t next0;
428 
429  pi0 = to_next[0] = from[0];
430  from += 1;
431  n_left_from -= 1;
432  to_next += 1;
433  n_left_to_next -= 1;
434 
436  p0 = vlib_get_buffer (vm, pi0);
437 
438  //Accessing pseudo header
439  pheader0 = vlib_buffer_get_current (p0);
440  vlib_buffer_advance (p0, sizeof (*pheader0));
441 
442  if (ip4_to_ip6_tcp_udp (p0, ip4_to_ip6_set_cb, pheader0))
443  {
444  p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
446  }
447  else
448  {
449  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
450  {
451  //Send to fragmentation node if necessary
452  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
453  vnet_buffer (p0)->ip_frag.next_index =
456  }
457  }
458  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
459  to_next, n_left_to_next, pi0,
460  next0);
461  }
462  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
463  }
464 
465  return frame->n_vectors;
466 }
467 
470  ip4_header_t * ip40, u16 ip4_len0, i32 * dst_port0,
471  u8 * error0, ip4_mapt_next_t * next0)
472 {
473  map_main_t *mm = &map_main;
474  u32 port_offset;
475 
476  if (mm->is_ce)
477  port_offset = 0;
478  else
479  port_offset = 2;
480 
482  {
484  if (d0->ea_bits_len == 0 && d0->rules)
485  {
486  *dst_port0 = 0;
487  }
488  else
489  {
490  *dst_port0 = ip4_map_fragment_get_port (ip40);
491  *error0 = (*dst_port0 == -1) ? MAP_ERROR_FRAGMENT_MEMORY : *error0;
492  }
493  }
494  else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_TCP))
495  {
496  vnet_buffer (p0)->map_t.checksum_offset = 36;
498  *error0 = ip4_len0 < 40 ? MAP_ERROR_MALFORMED : *error0;
499  *dst_port0 =
500  (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + port_offset));
501  }
502  else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_UDP))
503  {
504  vnet_buffer (p0)->map_t.checksum_offset = 26;
506  *error0 = ip4_len0 < 28 ? MAP_ERROR_MALFORMED : *error0;
507  *dst_port0 =
508  (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + port_offset));
509  }
510  else if (ip40->protocol == IP_PROTOCOL_ICMP)
511  {
512  *next0 = IP4_MAPT_NEXT_MAPT_ICMP;
513  if (d0->ea_bits_len == 0 && d0->rules)
514  *dst_port0 = 0;
515  else if (((icmp46_header_t *) u8_ptr_add (ip40, sizeof (*ip40)))->code
516  == ICMP4_echo_reply
517  || ((icmp46_header_t *)
518  u8_ptr_add (ip40,
519  sizeof (*ip40)))->code == ICMP4_echo_request)
520  *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 6));
521  }
522  else
523  {
524  *error0 = MAP_ERROR_BAD_PROTOCOL;
525  }
526 }
527 
528 static uword
530 {
531  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
532  vlib_node_runtime_t *error_node =
533  vlib_node_get_runtime (vm, ip4_map_t_node.index);
534  from = vlib_frame_vector_args (frame);
535  n_left_from = frame->n_vectors;
536  next_index = node->cached_next_index;
537  map_main_t *mm = &map_main;
539  u32 thread_index = vm->thread_index;
540 
541  while (n_left_from > 0)
542  {
543  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
544 
545 #ifdef IP4_MAP_T_DUAL_LOOP
546  while (n_left_from >= 4 && n_left_to_next >= 2)
547  {
548  u32 pi0, pi1;
549  vlib_buffer_t *p0, *p1;
550  ip4_header_t *ip40, *ip41;
551  map_domain_t *d0, *d1;
552  ip4_mapt_next_t next0 = 0, next1 = 0;
553  u16 ip4_len0, ip4_len1;
554  u8 error0, error1;
555  i32 map_port0, map_port1;
556  ip4_mapt_pseudo_header_t *pheader0, *pheader1;
557 
558  pi0 = to_next[0] = from[0];
559  pi1 = to_next[1] = from[1];
560  from += 2;
561  n_left_from -= 2;
562  to_next += 2;
563  n_left_to_next -= 2;
564  error0 = MAP_ERROR_NONE;
565  error1 = MAP_ERROR_NONE;
566 
567  p0 = vlib_get_buffer (vm, pi0);
568  p1 = vlib_get_buffer (vm, pi1);
569  ip40 = vlib_buffer_get_current (p0);
570  ip41 = vlib_buffer_get_current (p1);
571  ip4_len0 = clib_host_to_net_u16 (ip40->length);
572  ip4_len1 = clib_host_to_net_u16 (ip41->length);
573 
574  if (PREDICT_FALSE (p0->current_length < ip4_len0 ||
575  ip40->ip_version_and_header_length != 0x45))
576  {
577  error0 = MAP_ERROR_UNKNOWN;
578  next0 = IP4_MAPT_NEXT_DROP;
579  }
580 
581  if (PREDICT_FALSE (p1->current_length < ip4_len1 ||
582  ip41->ip_version_and_header_length != 0x45))
583  {
584  error1 = MAP_ERROR_UNKNOWN;
585  next1 = IP4_MAPT_NEXT_DROP;
586  }
587 
588  vnet_buffer (p0)->map_t.map_domain_index =
589  vnet_buffer (p0)->ip.adj_index[VLIB_TX];
590  d0 = ip4_map_get_domain (vnet_buffer (p0)->map_t.map_domain_index);
591  vnet_buffer (p1)->map_t.map_domain_index =
592  vnet_buffer (p1)->ip.adj_index[VLIB_TX];
593  d1 = ip4_map_get_domain (vnet_buffer (p1)->map_t.map_domain_index);
594 
595  vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
596  vnet_buffer (p1)->map_t.mtu = d1->mtu ? d1->mtu : ~0;
597 
598  map_port0 = -1;
599  map_port1 = -1;
600 
601  ip4_map_t_classify (p0, d0, ip40, ip4_len0, &map_port0, &error0,
602  &next0);
603  ip4_map_t_classify (p1, d1, ip41, ip4_len1, &map_port1, &error1,
604  &next1);
605 
606  //Add MAP-T pseudo header in front of the packet
607  vlib_buffer_advance (p0, -sizeof (*pheader0));
608  vlib_buffer_advance (p1, -sizeof (*pheader1));
609  pheader0 = vlib_buffer_get_current (p0);
610  pheader1 = vlib_buffer_get_current (p1);
611 
612  //Save addresses within the packet
613  if (mm->is_ce)
614  {
615  ip4_map_t_embedded_address (d0, &pheader0->daddr,
616  &ip40->dst_address);
617  ip4_map_t_embedded_address (d1, &pheader1->daddr,
618  &ip41->dst_address);
619  pheader0->saddr.as_u64[0] =
621  (u16) map_port0);
622  pheader0->saddr.as_u64[1] =
624  (u16) map_port0);
625  pheader1->saddr.as_u64[0] =
627  (u16) map_port1);
628  pheader1->saddr.as_u64[1] =
630  (u16) map_port1);
631  }
632  else
633  {
634  ip4_map_t_embedded_address (d0, &pheader0->saddr,
635  &ip40->src_address);
636  ip4_map_t_embedded_address (d1, &pheader1->saddr,
637  &ip41->src_address);
638  pheader0->daddr.as_u64[0] =
640  (u16) map_port0);
641  pheader0->daddr.as_u64[1] =
643  (u16) map_port0);
644  pheader1->daddr.as_u64[0] =
646  (u16) map_port1);
647  pheader1->daddr.as_u64[1] =
649  (u16) map_port1);
650  }
651 
652  if (PREDICT_FALSE
653  (ip4_is_first_fragment (ip40) && (map_port0 != -1)
654  && (d0->ea_bits_len != 0 || !d0->rules)
655  && ip4_map_fragment_cache (ip40, map_port0)))
656  {
657  error0 = MAP_ERROR_FRAGMENT_MEMORY;
658  }
659 
660  if (PREDICT_FALSE
661  (ip4_is_first_fragment (ip41) && (map_port1 != -1)
662  && (d1->ea_bits_len != 0 || !d1->rules)
663  && ip4_map_fragment_cache (ip41, map_port1)))
664  {
665  error1 = MAP_ERROR_FRAGMENT_MEMORY;
666  }
667 
668  if (PREDICT_TRUE
669  (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP))
670  {
672  thread_index,
673  vnet_buffer (p0)->
674  map_t.map_domain_index, 1,
675  clib_net_to_host_u16
676  (ip40->length));
677  }
678 
679  if (PREDICT_TRUE
680  (error1 == MAP_ERROR_NONE && next1 != IP4_MAPT_NEXT_MAPT_ICMP))
681  {
683  thread_index,
684  vnet_buffer (p1)->
685  map_t.map_domain_index, 1,
686  clib_net_to_host_u16
687  (ip41->length));
688  }
689 
690  next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0;
691  next1 = (error1 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next1;
692  p0->error = error_node->errors[error0];
693  p1->error = error_node->errors[error1];
694  vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
695  n_left_to_next, pi0, pi1, next0,
696  next1);
697  }
698 #endif
699 
700  while (n_left_from > 0 && n_left_to_next > 0)
701  {
702  u32 pi0;
703  vlib_buffer_t *p0;
704  ip4_header_t *ip40;
705  map_domain_t *d0;
706  ip4_mapt_next_t next0;
707  u16 ip4_len0;
708  u8 error0;
709  i32 map_port0;
710  ip4_mapt_pseudo_header_t *pheader0;
711 
712  pi0 = to_next[0] = from[0];
713  from += 1;
714  n_left_from -= 1;
715  to_next += 1;
716  n_left_to_next -= 1;
717  error0 = MAP_ERROR_NONE;
718 
719  p0 = vlib_get_buffer (vm, pi0);
720  ip40 = vlib_buffer_get_current (p0);
721  ip4_len0 = clib_host_to_net_u16 (ip40->length);
722  if (PREDICT_FALSE (p0->current_length < ip4_len0 ||
723  ip40->ip_version_and_header_length != 0x45))
724  {
725  error0 = MAP_ERROR_UNKNOWN;
726  next0 = IP4_MAPT_NEXT_DROP;
727  }
728 
729  vnet_buffer (p0)->map_t.map_domain_index =
730  vnet_buffer (p0)->ip.adj_index[VLIB_TX];
731  d0 = ip4_map_get_domain (vnet_buffer (p0)->map_t.map_domain_index);
732 
733  vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
734 
735  map_port0 = -1;
736  ip4_map_t_classify (p0, d0, ip40, ip4_len0, &map_port0, &error0,
737  &next0);
738 
739  /* Verify that port is not among the well-known ports */
740  if ((d0->psid_length > 0 && d0->psid_offset > 0)
741  && (clib_net_to_host_u16 (map_port0) <
742  (0x1 << (16 - d0->psid_offset))))
743  {
744  error0 = MAP_ERROR_SEC_CHECK;
745  }
746 
747  //Add MAP-T pseudo header in front of the packet
748  vlib_buffer_advance (p0, -sizeof (*pheader0));
749  pheader0 = vlib_buffer_get_current (p0);
750 
751  //Save addresses within the packet
752  if (mm->is_ce)
753  {
754  ip4_map_t_embedded_address (d0, &pheader0->daddr,
755  &ip40->dst_address);
756  pheader0->saddr.as_u64[0] =
758  (u16) map_port0);
759  pheader0->saddr.as_u64[1] =
761  (u16) map_port0);
762  }
763  else
764  {
765  ip4_map_t_embedded_address (d0, &pheader0->saddr,
766  &ip40->src_address);
767  pheader0->daddr.as_u64[0] =
769  (u16) map_port0);
770  pheader0->daddr.as_u64[1] =
772  (u16) map_port0);
773  }
774 
775  //It is important to cache at this stage because the result might be necessary
776  //for packets within the same vector.
777  //Actually, this approach even provides some limited out-of-order fragments support
778  if (PREDICT_FALSE
779  (ip4_is_first_fragment (ip40) && (map_port0 != -1)
780  && (d0->ea_bits_len != 0 || !d0->rules)
781  && ip4_map_fragment_cache (ip40, map_port0)))
782  {
783  error0 = MAP_ERROR_UNKNOWN;
784  }
785 
786  if (PREDICT_TRUE
787  (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP))
788  {
790  thread_index,
791  vnet_buffer (p0)->
792  map_t.map_domain_index, 1,
793  clib_net_to_host_u16
794  (ip40->length));
795  }
796 
797  next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0;
798  p0->error = error_node->errors[error0];
799  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
800  to_next, n_left_to_next, pi0,
801  next0);
802  }
803  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
804  }
805  return frame->n_vectors;
806 }
807 
808 static char *map_t_error_strings[] = {
809 #define _(sym,string) string,
811 #undef _
812 };
813 
814 /* *INDENT-OFF* */
815 VLIB_REGISTER_NODE(ip4_map_t_fragmented_node) = {
816  .function = ip4_map_t_fragmented,
817  .name = "ip4-map-t-fragmented",
818  .vector_size = sizeof(u32),
819  .format_trace = format_map_trace,
820  .type = VLIB_NODE_TYPE_INTERNAL,
821 
822  .n_errors = MAP_N_ERROR,
823  .error_strings = map_t_error_strings,
824 
825  .n_next_nodes = IP4_MAPT_FRAGMENTED_N_NEXT,
826  .next_nodes = {
827  [IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP] = "ip6-lookup",
829  [IP4_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
830  },
831 };
832 /* *INDENT-ON* */
833 
834 /* *INDENT-OFF* */
835 VLIB_REGISTER_NODE(ip4_map_t_icmp_node) = {
836  .function = ip4_map_t_icmp,
837  .name = "ip4-map-t-icmp",
838  .vector_size = sizeof(u32),
839  .format_trace = format_map_trace,
840  .type = VLIB_NODE_TYPE_INTERNAL,
841 
842  .n_errors = MAP_N_ERROR,
843  .error_strings = map_t_error_strings,
844 
845  .n_next_nodes = IP4_MAPT_ICMP_N_NEXT,
846  .next_nodes = {
847  [IP4_MAPT_ICMP_NEXT_IP6_LOOKUP] = "ip6-lookup",
849  [IP4_MAPT_ICMP_NEXT_DROP] = "error-drop",
850  },
851 };
852 /* *INDENT-ON* */
853 
854 /* *INDENT-OFF* */
855 VLIB_REGISTER_NODE(ip4_map_t_tcp_udp_node) = {
856  .function = ip4_map_t_tcp_udp,
857  .name = "ip4-map-t-tcp-udp",
858  .vector_size = sizeof(u32),
859  .format_trace = format_map_trace,
860  .type = VLIB_NODE_TYPE_INTERNAL,
861 
862  .n_errors = MAP_N_ERROR,
863  .error_strings = map_t_error_strings,
864 
865  .n_next_nodes = IP4_MAPT_TCP_UDP_N_NEXT,
866  .next_nodes = {
867  [IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP] = "ip6-lookup",
869  [IP4_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
870  },
871 };
872 /* *INDENT-ON* */
873 
874 /* *INDENT-OFF* */
875 VLIB_REGISTER_NODE(ip4_map_t_node) = {
876  .function = ip4_map_t,
877  .name = "ip4-map-t",
878  .vector_size = sizeof(u32),
879  .format_trace = format_map_trace,
880  .type = VLIB_NODE_TYPE_INTERNAL,
881 
882  .n_errors = MAP_N_ERROR,
883  .error_strings = map_t_error_strings,
884 
885  .n_next_nodes = IP4_MAPT_N_NEXT,
886  .next_nodes = {
887  [IP4_MAPT_NEXT_MAPT_TCP_UDP] = "ip4-map-t-tcp-udp",
888  [IP4_MAPT_NEXT_MAPT_ICMP] = "ip4-map-t-icmp",
889  [IP4_MAPT_NEXT_MAPT_FRAGMENTED] = "ip4-map-t-fragmented",
890  [IP4_MAPT_NEXT_DROP] = "error-drop",
891  },
892 };
893 /* *INDENT-ON* */
894 
895 /*
896  * fd.io coding-style-patch-verification: ON
897  *
898  * Local Variables:
899  * eval: (c-set-style "gnu")
900  * End:
901  */
ip4_mapt_tcp_udp_next_t
Definition: ip4_map_t.c:39
#define map_ip4_reass_lock()
Definition: map.h:477
u8 psid_length
Definition: map.h:97
map_main_t map_main
Definition: map.c:27
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:204
ip4_address_t src_address
Definition: ip4_packet.h:169
#define IP6_FRAG_NODE_NAME
Definition: ip_frag.h:44
#define PREDICT_TRUE(x)
Definition: clib.h:108
u64 as_u64[2]
Definition: ip6_packet.h:51
#define NULL
Definition: clib.h:57
static_always_inline void ip4_map_t_classify(vlib_buffer_t *p0, map_domain_t *d0, ip4_header_t *ip40, u16 ip4_len0, i32 *dst_port0, u8 *error0, ip4_mapt_next_t *next0)
Definition: ip4_map_t.c:469
u32 thread_index
Definition: main.h:179
ip4_mapt_next_t
Definition: ip4_map_t.c:22
static_always_inline i32 ip4_map_fragment_get_port(ip4_header_t *ip4)
Definition: ip4_map_t.c:87
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:472
ip6_address_t src_address
Definition: ip6_packet.h:378
unsigned char u8
Definition: types.h:56
IPv4 to IPv6 translation.
static uword ip4_map_t_icmp(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip4_map_t.c:170
static int ip4_to_ip6_set_cb(ip4_header_t *ip4, ip6_header_t *ip6, void *ctx)
Definition: ip4_map_t.c:268
static uword ip4_map_t(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip4_map_t.c:529
static_always_inline void ip4_map_t_embedded_address(map_domain_t *d, ip6_address_t *ip6, const ip4_address_t *ip4)
Definition: map.h:534
#define static_always_inline
Definition: clib.h:95
ip4_address_t dst_address
Definition: ip4_packet.h:169
vlib_combined_counter_main_t * domain_counters
Definition: map.h:236
ip4_mapt_fragmented_next_t
Definition: ip4_map_t.c:47
ip6_address_t * rules
Definition: map.h:87
u8 ea_bits_len
Definition: map.h:95
unsigned int u32
Definition: types.h:88
static u16 ip4_get_port(ip4_header_t *ip, u8 sender)
Get TCP/UDP port number or ICMP id from IPv4 packet.
Definition: ip4_to_ip6.h:51
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:464
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
bool is_ce
Definition: map.h:250
long ctx[MAX_CONNS]
Definition: main.c:144
static_always_inline u64 map_get_pfx_net(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:374
unsigned short u16
Definition: types.h:57
map_domain_t * domains
Definition: map.h:232
static int ip4_get_fragment_offset(const ip4_header_t *i)
Definition: ip4_packet.h:199
static_always_inline u64 map_get_sfx_net(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:406
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:205
map_ip4_reass_t * map_ip4_reass_get(u32 src, u32 dst, u16 fragment_id, u8 protocol, u32 **pi_to_drop)
Definition: map.c:1510
#define PREDICT_FALSE(x)
Definition: clib.h:107
static_always_inline map_domain_t * ip4_map_get_domain(u32 mdi)
Definition: map.h:425
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:364
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:138
map_domain_t * d
Definition: ip4_map_t.c:104
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:155
u16 n_vectors
Definition: node.h:401
vlib_main_t * vm
Definition: buffer.c:294
ip4_mapt_icmp_next_t
Definition: ip4_map_t.c:31
static int ip4_to_ip6_tcp_udp(vlib_buffer_t *p, ip4_to_ip6_set_fn_t fn, void *ctx)
Translate IPv4 UDP/TCP packet to IPv6.
Definition: ip4_to_ip6.h:501
u8 psid_offset
Definition: map.h:96
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:89
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:455
#define foreach_map_error
Definition: map.h:308
signed int i32
Definition: types.h:77
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:513
static int ip4_to_ip6_set_inner_icmp_cb(ip4_header_t *ip4, ip6_header_t *ip6, void *arg)
Definition: ip4_map_t.c:137
static int icmp_to_icmp6(vlib_buffer_t *p, ip4_to_ip6_set_fn_t fn, void *ctx, ip4_to_ip6_set_fn_t inner_fn, void *inner_ctx)
Translate ICMP4 packet to ICMP6.
Definition: ip4_to_ip6.h:220
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:218
#define map_ip4_reass_unlock()
Definition: map.h:478
u8 * format_map_trace(u8 *s, va_list *args)
Definition: map.c:1430
#define u8_ptr_add(ptr, index)
Definition: ip.h:68
Definition: defs.h:47
u16 mtu
Definition: map.h:91
static int ip4_is_first_fragment(const ip4_header_t *i)
Definition: ip4_packet.h:219
static int ip4_to_ip6_set_icmp_cb(ip4_header_t *ip4, ip6_header_t *ip6, void *arg)
Definition: ip4_map_t.c:109
static uword ip4_map_t_tcp_udp(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip4_map_t.c:341
i32 port
Definition: map.h:137
static uword ip4_map_t_fragmented(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip4_map_t.c:281
u64 uword
Definition: types.h:112
static int ip4_to_ip6_fragmented(vlib_buffer_t *p, ip4_to_ip6_set_fn_t fn, void *ctx)
Translate IPv4 fragmented packet to IPv6.
Definition: ip4_to_ip6.h:450
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:267
A collection of combined counters.
Definition: counter.h:172
#define vnet_buffer(b)
Definition: buffer.h:344
typedef CLIB_PACKED(struct{ip6_address_t daddr;ip6_address_t saddr;u8 unused[28];})
Definition: ip4_map_t.c:58
u8 ip_version_and_header_length
Definition: ip4_packet.h:137
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:58
ip6_address_t dst_address
Definition: ip6_packet.h:378