FD.io VPP  v18.07-34-g55fbdb9
Vector Packet Processing
ip4_map_t.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include "map.h"
16 
17 #include <vnet/ip/ip_frag.h>
18 #include <vnet/ip/ip4_to_ip6.h>
19 
20 #define IP4_MAP_T_DUAL_LOOP 1
21 
22 typedef enum
23 {
30 
31 typedef enum
32 {
38 
39 typedef enum
40 {
46 
47 typedef enum
48 {
54 
55 //This is used to pass information within the buffer data.
56 //Buffer structure being too small to contain big structures like this.
57 /* *INDENT-OFF* */
58 typedef CLIB_PACKED (struct {
59  ip6_address_t daddr;
60  ip6_address_t saddr;
61  //IPv6 header + Fragmentation header will be here
62  //sizeof(ip6) + sizeof(ip_frag) - sizeof(ip4)
63  u8 unused[28];
64 }) ip4_mapt_pseudo_header_t;
65 /* *INDENT-ON* */
66 
67 
69 ip4_map_fragment_cache (ip4_header_t * ip4, u16 port)
70 {
71  u32 *ignore = NULL;
73  map_ip4_reass_t *r =
75  ip4->fragment_id,
76  (ip4->protocol ==
77  IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol,
78  &ignore);
79  if (r)
80  r->port = port;
81 
83  return !r;
84 }
85 
88 {
89  u32 *ignore = NULL;
91  map_ip4_reass_t *r =
93  ip4->fragment_id,
94  (ip4->protocol ==
95  IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol,
96  &ignore);
97  i32 ret = r ? r->port : -1;
99  return ret;
100 }
101 
102 typedef struct
103 {
107 
108 static int
110 {
111  icmp_to_icmp6_ctx_t *ctx = arg;
112  map_main_t *mm = &map_main;
113 
114  if (mm->is_ce)
115  {
116  ip6->src_address.as_u64[0] =
117  map_get_pfx_net (ctx->d, ip4->src_address.as_u32, ctx->id);
118  ip6->src_address.as_u64[1] =
119  map_get_sfx_net (ctx->d, ip4->src_address.as_u32, ctx->id);
121  &ip4->dst_address);
122  }
123  else
124  {
126  &ip4->src_address);
127  ip6->dst_address.as_u64[0] =
128  map_get_pfx_net (ctx->d, ip4->dst_address.as_u32, ctx->id);
129  ip6->dst_address.as_u64[1] =
130  map_get_sfx_net (ctx->d, ip4->dst_address.as_u32, ctx->id);
131  }
132 
133  return 0;
134 }
135 
136 static int
138  void *arg)
139 {
140  icmp_to_icmp6_ctx_t *ctx = arg;
141  map_main_t *mm = &map_main;
142 
143  if (mm->is_ce)
144  {
145  //Note that the destination address is within the domain
146  //while the source address is the one outside the domain
148  &ip4->src_address);
149  ip6->dst_address.as_u64[0] =
150  map_get_pfx_net (ctx->d, ip4->dst_address.as_u32, ctx->id);
151  ip6->dst_address.as_u64[1] =
152  map_get_sfx_net (ctx->d, ip4->dst_address.as_u32, ctx->id);
153  }
154  else
155  {
156  //Note that the source address is within the domain
157  //while the destination address is the one outside the domain
159  &ip4->dst_address);
160  ip6->src_address.as_u64[0] =
161  map_get_pfx_net (ctx->d, ip4->src_address.as_u32, ctx->id);
162  ip6->src_address.as_u64[1] =
163  map_get_sfx_net (ctx->d, ip4->src_address.as_u32, ctx->id);
164  }
165 
166  return 0;
167 }
168 
169 static uword
171  vlib_node_runtime_t * node, vlib_frame_t * frame)
172 {
173  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
174  vlib_node_runtime_t *error_node =
175  vlib_node_get_runtime (vm, ip4_map_t_icmp_node.index);
176  from = vlib_frame_vector_args (frame);
177  n_left_from = frame->n_vectors;
178  next_index = node->cached_next_index;
180  u32 thread_index = vm->thread_index;
181 
182  while (n_left_from > 0)
183  {
184  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
185 
186  while (n_left_from > 0 && n_left_to_next > 0)
187  {
188  u32 pi0;
189  vlib_buffer_t *p0;
190  ip4_mapt_icmp_next_t next0;
191  u8 error0;
192  map_domain_t *d0;
193  u16 len0;
194  icmp_to_icmp6_ctx_t ctx0;
195  ip4_header_t *ip40;
196  icmp46_header_t *icmp0;
197 
199  pi0 = to_next[0] = from[0];
200  from += 1;
201  n_left_from -= 1;
202  to_next += 1;
203  n_left_to_next -= 1;
204  error0 = MAP_ERROR_NONE;
205 
206  p0 = vlib_get_buffer (vm, pi0);
207  vlib_buffer_advance (p0, sizeof (ip4_mapt_pseudo_header_t)); //The pseudo-header is not used
208  len0 =
209  clib_net_to_host_u16 (((ip4_header_t *)
210  vlib_buffer_get_current (p0))->length);
211  d0 =
213  vnet_buffer (p0)->map_t.map_domain_index);
214 
215  ip40 = vlib_buffer_get_current (p0);
216  icmp0 = (icmp46_header_t *) (ip40 + 1);
217 
218  ctx0.id = ip4_get_port (ip40, icmp0->type == ICMP6_echo_request);
219  ctx0.d = d0;
220  if (ctx0.id == 0)
221  {
222  // In case of 1:1 mapping, we don't care about the port
223  if (!(d0->ea_bits_len == 0 && d0->rules))
224  {
225  error0 = MAP_ERROR_ICMP;
226  goto err0;
227  }
228  }
229 
230  if (icmp_to_icmp6
231  (p0, ip4_to_ip6_set_icmp_cb, &ctx0,
233  {
234  error0 = MAP_ERROR_ICMP;
235  goto err0;
236  }
237 
238  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
239  {
240  vnet_buffer (p0)->ip_frag.header_offset = 0;
241  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
242  vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
244  }
245  err0:
246  if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
247  {
249  thread_index,
250  vnet_buffer (p0)->
251  map_t.map_domain_index, 1,
252  len0);
253  }
254  else
255  {
256  next0 = IP4_MAPT_ICMP_NEXT_DROP;
257  }
258  p0->error = error_node->errors[error0];
259  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
260  to_next, n_left_to_next, pi0,
261  next0);
262  }
263  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
264  }
265  return frame->n_vectors;
266 }
267 
268 static int
270 {
271  ip4_mapt_pseudo_header_t *pheader = ctx;
272 
273  ip6->dst_address.as_u64[0] = pheader->daddr.as_u64[0];
274  ip6->dst_address.as_u64[1] = pheader->daddr.as_u64[1];
275  ip6->src_address.as_u64[0] = pheader->saddr.as_u64[0];
276  ip6->src_address.as_u64[1] = pheader->saddr.as_u64[1];
277 
278  return 0;
279 }
280 
281 static uword
283  vlib_node_runtime_t * node, vlib_frame_t * frame)
284 {
285  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
286  from = vlib_frame_vector_args (frame);
287  n_left_from = frame->n_vectors;
288  next_index = node->cached_next_index;
289  vlib_node_runtime_t *error_node =
290  vlib_node_get_runtime (vm, ip4_map_t_fragmented_node.index);
291 
292  while (n_left_from > 0)
293  {
294  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
295 
296  while (n_left_from > 0 && n_left_to_next > 0)
297  {
298  u32 pi0;
299  vlib_buffer_t *p0;
300  ip4_mapt_pseudo_header_t *pheader0;
301  ip4_mapt_fragmented_next_t next0;
302 
304  pi0 = to_next[0] = from[0];
305  from += 1;
306  n_left_from -= 1;
307  to_next += 1;
308  n_left_to_next -= 1;
309 
310  p0 = vlib_get_buffer (vm, pi0);
311 
312  //Accessing pseudo header
313  pheader0 = vlib_buffer_get_current (p0);
314  vlib_buffer_advance (p0, sizeof (*pheader0));
315 
316  if (ip4_to_ip6_fragmented (p0, ip4_to_ip6_set_cb, pheader0))
317  {
318  p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
320  }
321  else
322  {
323  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
324  {
325  vnet_buffer (p0)->ip_frag.header_offset = 0;
326  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
327  vnet_buffer (p0)->ip_frag.next_index =
330  }
331  }
332 
333  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
334  to_next, n_left_to_next, pi0,
335  next0);
336  }
337  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
338  }
339  return frame->n_vectors;
340 }
341 
342 static uword
344  vlib_node_runtime_t * node, vlib_frame_t * frame)
345 {
346  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
347  from = vlib_frame_vector_args (frame);
348  n_left_from = frame->n_vectors;
349  next_index = node->cached_next_index;
350  vlib_node_runtime_t *error_node =
351  vlib_node_get_runtime (vm, ip4_map_t_tcp_udp_node.index);
352 
353 
354  while (n_left_from > 0)
355  {
356  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
357 
358 #ifdef IP4_MAP_T_DUAL_LOOP
359  while (n_left_from >= 4 && n_left_to_next >= 2)
360  {
361  u32 pi0, pi1;
362  vlib_buffer_t *p0, *p1;
363  ip4_mapt_pseudo_header_t *pheader0, *pheader1;
364  ip4_mapt_tcp_udp_next_t next0, next1;
365 
366  pi0 = to_next[0] = from[0];
367  pi1 = to_next[1] = from[1];
368  from += 2;
369  n_left_from -= 2;
370  to_next += 2;
371  n_left_to_next -= 2;
372 
375  p0 = vlib_get_buffer (vm, pi0);
376  p1 = vlib_get_buffer (vm, pi1);
377 
378  //Accessing pseudo header
379  pheader0 = vlib_buffer_get_current (p0);
380  pheader1 = vlib_buffer_get_current (p1);
381  vlib_buffer_advance (p0, sizeof (*pheader0));
382  vlib_buffer_advance (p1, sizeof (*pheader1));
383 
384  if (ip4_to_ip6_tcp_udp (p0, ip4_to_ip6_set_cb, pheader0))
385  {
386  p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
388  }
389  else
390  {
391  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
392  {
393  //Send to fragmentation node if necessary
394  vnet_buffer (p0)->ip_frag.header_offset = 0;
395  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
396  vnet_buffer (p0)->ip_frag.next_index =
399  }
400  }
401 
402  if (ip4_to_ip6_tcp_udp (p1, ip4_to_ip6_set_cb, pheader1))
403  {
404  p1->error = error_node->errors[MAP_ERROR_UNKNOWN];
406  }
407  else
408  {
409  if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
410  {
411  //Send to fragmentation node if necessary
412  vnet_buffer (p1)->ip_frag.header_offset = 0;
413  vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
414  vnet_buffer (p1)->ip_frag.next_index =
417  }
418  }
419 
420  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
421  to_next, n_left_to_next, pi0, pi1,
422  next0, next1);
423  }
424 #endif
425 
426  while (n_left_from > 0 && n_left_to_next > 0)
427  {
428  u32 pi0;
429  vlib_buffer_t *p0;
430  ip4_mapt_pseudo_header_t *pheader0;
431  ip4_mapt_tcp_udp_next_t next0;
432 
433  pi0 = to_next[0] = from[0];
434  from += 1;
435  n_left_from -= 1;
436  to_next += 1;
437  n_left_to_next -= 1;
438 
440  p0 = vlib_get_buffer (vm, pi0);
441 
442  //Accessing pseudo header
443  pheader0 = vlib_buffer_get_current (p0);
444  vlib_buffer_advance (p0, sizeof (*pheader0));
445 
446  if (ip4_to_ip6_tcp_udp (p0, ip4_to_ip6_set_cb, pheader0))
447  {
448  p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
450  }
451  else
452  {
453  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
454  {
455  //Send to fragmentation node if necessary
456  vnet_buffer (p0)->ip_frag.header_offset = 0;
457  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
458  vnet_buffer (p0)->ip_frag.next_index =
461  }
462  }
463  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
464  to_next, n_left_to_next, pi0,
465  next0);
466  }
467  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
468  }
469 
470  return frame->n_vectors;
471 }
472 
475  ip4_header_t * ip40, u16 ip4_len0, i32 * dst_port0,
476  u8 * error0, ip4_mapt_next_t * next0)
477 {
478  map_main_t *mm = &map_main;
479  u32 port_offset;
480 
481  if (mm->is_ce)
482  port_offset = 0;
483  else
484  port_offset = 2;
485 
487  {
489  if (d0->ea_bits_len == 0 && d0->rules)
490  {
491  *dst_port0 = 0;
492  }
493  else
494  {
495  *dst_port0 = ip4_map_fragment_get_port (ip40);
496  *error0 = (*dst_port0 == -1) ? MAP_ERROR_FRAGMENT_MEMORY : *error0;
497  }
498  }
499  else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_TCP))
500  {
501  vnet_buffer (p0)->map_t.checksum_offset = 36;
503  *error0 = ip4_len0 < 40 ? MAP_ERROR_MALFORMED : *error0;
504  *dst_port0 =
505  (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + port_offset));
506  }
507  else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_UDP))
508  {
509  vnet_buffer (p0)->map_t.checksum_offset = 26;
511  *error0 = ip4_len0 < 28 ? MAP_ERROR_MALFORMED : *error0;
512  *dst_port0 =
513  (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + port_offset));
514  }
515  else if (ip40->protocol == IP_PROTOCOL_ICMP)
516  {
517  *next0 = IP4_MAPT_NEXT_MAPT_ICMP;
518  if (d0->ea_bits_len == 0 && d0->rules)
519  *dst_port0 = 0;
520  else if (((icmp46_header_t *) u8_ptr_add (ip40, sizeof (*ip40)))->code
521  == ICMP4_echo_reply
522  || ((icmp46_header_t *)
523  u8_ptr_add (ip40,
524  sizeof (*ip40)))->code == ICMP4_echo_request)
525  *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 6));
526  }
527  else
528  {
529  *error0 = MAP_ERROR_BAD_PROTOCOL;
530  }
531 }
532 
533 static uword
535 {
536  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
537  vlib_node_runtime_t *error_node =
538  vlib_node_get_runtime (vm, ip4_map_t_node.index);
539  from = vlib_frame_vector_args (frame);
540  n_left_from = frame->n_vectors;
541  next_index = node->cached_next_index;
542  map_main_t *mm = &map_main;
544  u32 thread_index = vm->thread_index;
545 
546  while (n_left_from > 0)
547  {
548  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
549 
550 #ifdef IP4_MAP_T_DUAL_LOOP
551  while (n_left_from >= 4 && n_left_to_next >= 2)
552  {
553  u32 pi0, pi1;
554  vlib_buffer_t *p0, *p1;
555  ip4_header_t *ip40, *ip41;
556  map_domain_t *d0, *d1;
557  ip4_mapt_next_t next0 = 0, next1 = 0;
558  u16 ip4_len0, ip4_len1;
559  u8 error0, error1;
560  i32 map_port0, map_port1;
561  ip4_mapt_pseudo_header_t *pheader0, *pheader1;
562 
563  pi0 = to_next[0] = from[0];
564  pi1 = to_next[1] = from[1];
565  from += 2;
566  n_left_from -= 2;
567  to_next += 2;
568  n_left_to_next -= 2;
569  error0 = MAP_ERROR_NONE;
570  error1 = MAP_ERROR_NONE;
571 
572  p0 = vlib_get_buffer (vm, pi0);
573  p1 = vlib_get_buffer (vm, pi1);
574  ip40 = vlib_buffer_get_current (p0);
575  ip41 = vlib_buffer_get_current (p1);
576  ip4_len0 = clib_host_to_net_u16 (ip40->length);
577  ip4_len1 = clib_host_to_net_u16 (ip41->length);
578 
579  if (PREDICT_FALSE (p0->current_length < ip4_len0 ||
580  ip40->ip_version_and_header_length != 0x45))
581  {
582  error0 = MAP_ERROR_UNKNOWN;
583  next0 = IP4_MAPT_NEXT_DROP;
584  }
585 
586  if (PREDICT_FALSE (p1->current_length < ip4_len1 ||
587  ip41->ip_version_and_header_length != 0x45))
588  {
589  error1 = MAP_ERROR_UNKNOWN;
590  next1 = IP4_MAPT_NEXT_DROP;
591  }
592 
593  vnet_buffer (p0)->map_t.map_domain_index =
594  vnet_buffer (p0)->ip.adj_index[VLIB_TX];
595  d0 = ip4_map_get_domain (vnet_buffer (p0)->map_t.map_domain_index);
596  vnet_buffer (p1)->map_t.map_domain_index =
597  vnet_buffer (p1)->ip.adj_index[VLIB_TX];
598  d1 = ip4_map_get_domain (vnet_buffer (p1)->map_t.map_domain_index);
599 
600  vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
601  vnet_buffer (p1)->map_t.mtu = d1->mtu ? d1->mtu : ~0;
602 
603  map_port0 = -1;
604  map_port1 = -1;
605 
606  ip4_map_t_classify (p0, d0, ip40, ip4_len0, &map_port0, &error0,
607  &next0);
608  ip4_map_t_classify (p1, d1, ip41, ip4_len1, &map_port1, &error1,
609  &next1);
610 
611  //Add MAP-T pseudo header in front of the packet
612  vlib_buffer_advance (p0, -sizeof (*pheader0));
613  vlib_buffer_advance (p1, -sizeof (*pheader1));
614  pheader0 = vlib_buffer_get_current (p0);
615  pheader1 = vlib_buffer_get_current (p1);
616 
617  //Save addresses within the packet
618  if (mm->is_ce)
619  {
620  ip4_map_t_embedded_address (d0, &pheader0->daddr,
621  &ip40->dst_address);
622  ip4_map_t_embedded_address (d1, &pheader1->daddr,
623  &ip41->dst_address);
624  pheader0->saddr.as_u64[0] =
626  (u16) map_port0);
627  pheader0->saddr.as_u64[1] =
629  (u16) map_port0);
630  pheader1->saddr.as_u64[0] =
632  (u16) map_port1);
633  pheader1->saddr.as_u64[1] =
635  (u16) map_port1);
636  }
637  else
638  {
639  ip4_map_t_embedded_address (d0, &pheader0->saddr,
640  &ip40->src_address);
641  ip4_map_t_embedded_address (d1, &pheader1->saddr,
642  &ip41->src_address);
643  pheader0->daddr.as_u64[0] =
645  (u16) map_port0);
646  pheader0->daddr.as_u64[1] =
648  (u16) map_port0);
649  pheader1->daddr.as_u64[0] =
651  (u16) map_port1);
652  pheader1->daddr.as_u64[1] =
654  (u16) map_port1);
655  }
656 
657  if (PREDICT_FALSE
658  (ip4_is_first_fragment (ip40) && (map_port0 != -1)
659  && (d0->ea_bits_len != 0 || !d0->rules)
660  && ip4_map_fragment_cache (ip40, map_port0)))
661  {
662  error0 = MAP_ERROR_FRAGMENT_MEMORY;
663  }
664 
665  if (PREDICT_FALSE
666  (ip4_is_first_fragment (ip41) && (map_port1 != -1)
667  && (d1->ea_bits_len != 0 || !d1->rules)
668  && ip4_map_fragment_cache (ip41, map_port1)))
669  {
670  error1 = MAP_ERROR_FRAGMENT_MEMORY;
671  }
672 
673  if (PREDICT_TRUE
674  (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP))
675  {
677  thread_index,
678  vnet_buffer (p0)->
679  map_t.map_domain_index, 1,
680  clib_net_to_host_u16
681  (ip40->length));
682  }
683 
684  if (PREDICT_TRUE
685  (error1 == MAP_ERROR_NONE && next1 != IP4_MAPT_NEXT_MAPT_ICMP))
686  {
688  thread_index,
689  vnet_buffer (p1)->
690  map_t.map_domain_index, 1,
691  clib_net_to_host_u16
692  (ip41->length));
693  }
694 
695  next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0;
696  next1 = (error1 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next1;
697  p0->error = error_node->errors[error0];
698  p1->error = error_node->errors[error1];
699  vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
700  n_left_to_next, pi0, pi1, next0,
701  next1);
702  }
703 #endif
704 
705  while (n_left_from > 0 && n_left_to_next > 0)
706  {
707  u32 pi0;
708  vlib_buffer_t *p0;
709  ip4_header_t *ip40;
710  map_domain_t *d0;
711  ip4_mapt_next_t next0;
712  u16 ip4_len0;
713  u8 error0;
714  i32 map_port0;
715  ip4_mapt_pseudo_header_t *pheader0;
716 
717  pi0 = to_next[0] = from[0];
718  from += 1;
719  n_left_from -= 1;
720  to_next += 1;
721  n_left_to_next -= 1;
722  error0 = MAP_ERROR_NONE;
723 
724  p0 = vlib_get_buffer (vm, pi0);
725  ip40 = vlib_buffer_get_current (p0);
726  ip4_len0 = clib_host_to_net_u16 (ip40->length);
727  if (PREDICT_FALSE (p0->current_length < ip4_len0 ||
728  ip40->ip_version_and_header_length != 0x45))
729  {
730  error0 = MAP_ERROR_UNKNOWN;
731  next0 = IP4_MAPT_NEXT_DROP;
732  }
733 
734  vnet_buffer (p0)->map_t.map_domain_index =
735  vnet_buffer (p0)->ip.adj_index[VLIB_TX];
736  d0 = ip4_map_get_domain (vnet_buffer (p0)->map_t.map_domain_index);
737 
738  vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
739 
740  map_port0 = -1;
741  ip4_map_t_classify (p0, d0, ip40, ip4_len0, &map_port0, &error0,
742  &next0);
743 
744  //Add MAP-T pseudo header in front of the packet
745  vlib_buffer_advance (p0, -sizeof (*pheader0));
746  pheader0 = vlib_buffer_get_current (p0);
747 
748  //Save addresses within the packet
749  if (mm->is_ce)
750  {
751  ip4_map_t_embedded_address (d0, &pheader0->daddr,
752  &ip40->dst_address);
753  pheader0->saddr.as_u64[0] =
755  (u16) map_port0);
756  pheader0->saddr.as_u64[1] =
758  (u16) map_port0);
759  }
760  else
761  {
762  ip4_map_t_embedded_address (d0, &pheader0->saddr,
763  &ip40->src_address);
764  pheader0->daddr.as_u64[0] =
766  (u16) map_port0);
767  pheader0->daddr.as_u64[1] =
769  (u16) map_port0);
770  }
771 
772  //It is important to cache at this stage because the result might be necessary
773  //for packets within the same vector.
774  //Actually, this approach even provides some limited out-of-order fragments support
775  if (PREDICT_FALSE
776  (ip4_is_first_fragment (ip40) && (map_port0 != -1)
777  && (d0->ea_bits_len != 0 || !d0->rules)
778  && ip4_map_fragment_cache (ip40, map_port0)))
779  {
780  error0 = MAP_ERROR_UNKNOWN;
781  }
782 
783  if (PREDICT_TRUE
784  (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP))
785  {
787  thread_index,
788  vnet_buffer (p0)->
789  map_t.map_domain_index, 1,
790  clib_net_to_host_u16
791  (ip40->length));
792  }
793 
794  next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0;
795  p0->error = error_node->errors[error0];
796  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
797  to_next, n_left_to_next, pi0,
798  next0);
799  }
800  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
801  }
802  return frame->n_vectors;
803 }
804 
805 static char *map_t_error_strings[] = {
806 #define _(sym,string) string,
808 #undef _
809 };
810 
811 /* *INDENT-OFF* */
812 VLIB_REGISTER_NODE(ip4_map_t_fragmented_node) = {
813  .function = ip4_map_t_fragmented,
814  .name = "ip4-map-t-fragmented",
815  .vector_size = sizeof(u32),
816  .format_trace = format_map_trace,
817  .type = VLIB_NODE_TYPE_INTERNAL,
818 
819  .n_errors = MAP_N_ERROR,
820  .error_strings = map_t_error_strings,
821 
822  .n_next_nodes = IP4_MAPT_FRAGMENTED_N_NEXT,
823  .next_nodes = {
824  [IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP] = "ip6-lookup",
826  [IP4_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
827  },
828 };
829 /* *INDENT-ON* */
830 
831 /* *INDENT-OFF* */
832 VLIB_REGISTER_NODE(ip4_map_t_icmp_node) = {
833  .function = ip4_map_t_icmp,
834  .name = "ip4-map-t-icmp",
835  .vector_size = sizeof(u32),
836  .format_trace = format_map_trace,
837  .type = VLIB_NODE_TYPE_INTERNAL,
838 
839  .n_errors = MAP_N_ERROR,
840  .error_strings = map_t_error_strings,
841 
842  .n_next_nodes = IP4_MAPT_ICMP_N_NEXT,
843  .next_nodes = {
844  [IP4_MAPT_ICMP_NEXT_IP6_LOOKUP] = "ip6-lookup",
846  [IP4_MAPT_ICMP_NEXT_DROP] = "error-drop",
847  },
848 };
849 /* *INDENT-ON* */
850 
851 /* *INDENT-OFF* */
852 VLIB_REGISTER_NODE(ip4_map_t_tcp_udp_node) = {
853  .function = ip4_map_t_tcp_udp,
854  .name = "ip4-map-t-tcp-udp",
855  .vector_size = sizeof(u32),
856  .format_trace = format_map_trace,
857  .type = VLIB_NODE_TYPE_INTERNAL,
858 
859  .n_errors = MAP_N_ERROR,
860  .error_strings = map_t_error_strings,
861 
862  .n_next_nodes = IP4_MAPT_TCP_UDP_N_NEXT,
863  .next_nodes = {
864  [IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP] = "ip6-lookup",
866  [IP4_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
867  },
868 };
869 /* *INDENT-ON* */
870 
871 /* *INDENT-OFF* */
872 VLIB_REGISTER_NODE(ip4_map_t_node) = {
873  .function = ip4_map_t,
874  .name = "ip4-map-t",
875  .vector_size = sizeof(u32),
876  .format_trace = format_map_trace,
877  .type = VLIB_NODE_TYPE_INTERNAL,
878 
879  .n_errors = MAP_N_ERROR,
880  .error_strings = map_t_error_strings,
881 
882  .n_next_nodes = IP4_MAPT_N_NEXT,
883  .next_nodes = {
884  [IP4_MAPT_NEXT_MAPT_TCP_UDP] = "ip4-map-t-tcp-udp",
885  [IP4_MAPT_NEXT_MAPT_ICMP] = "ip4-map-t-icmp",
886  [IP4_MAPT_NEXT_MAPT_FRAGMENTED] = "ip4-map-t-fragmented",
887  [IP4_MAPT_NEXT_DROP] = "error-drop",
888  },
889 };
890 /* *INDENT-ON* */
891 
892 /*
893  * fd.io coding-style-patch-verification: ON
894  *
895  * Local Variables:
896  * eval: (c-set-style "gnu")
897  * End:
898  */
ip4_mapt_tcp_udp_next_t
Definition: ip4_map_t.c:39
#define map_ip4_reass_lock()
Definition: map.h:477
map_main_t map_main
Definition: map.c:27
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:213
ip4_address_t src_address
Definition: ip4_packet.h:169
#define IP6_FRAG_NODE_NAME
Definition: ip_frag.h:44
#define PREDICT_TRUE(x)
Definition: clib.h:106
u64 as_u64[2]
Definition: ip6_packet.h:51
#define NULL
Definition: clib.h:55
static_always_inline void ip4_map_t_classify(vlib_buffer_t *p0, map_domain_t *d0, ip4_header_t *ip40, u16 ip4_len0, i32 *dst_port0, u8 *error0, ip4_mapt_next_t *next0)
Definition: ip4_map_t.c:474
u32 thread_index
Definition: main.h:176
ip4_mapt_next_t
Definition: ip4_map_t.c:22
static_always_inline i32 ip4_map_fragment_get_port(ip4_header_t *ip4)
Definition: ip4_map_t.c:87
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:451
ip6_address_t src_address
Definition: ip6_packet.h:347
unsigned char u8
Definition: types.h:56
IPv4 to IPv6 translation.
static uword ip4_map_t_icmp(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip4_map_t.c:170
static int ip4_to_ip6_set_cb(ip4_header_t *ip4, ip6_header_t *ip6, void *ctx)
Definition: ip4_map_t.c:269
static uword ip4_map_t(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip4_map_t.c:534
static_always_inline void ip4_map_t_embedded_address(map_domain_t *d, ip6_address_t *ip6, const ip4_address_t *ip4)
Definition: map.h:534
#define static_always_inline
Definition: clib.h:93
ip4_address_t dst_address
Definition: ip4_packet.h:169
vlib_combined_counter_main_t * domain_counters
Definition: map.h:236
ip4_mapt_fragmented_next_t
Definition: ip4_map_t.c:47
static int ip4_get_fragment_offset(ip4_header_t *i)
Definition: ip4_packet.h:199
ip6_address_t * rules
Definition: map.h:87
u8 ea_bits_len
Definition: map.h:95
unsigned int u32
Definition: types.h:88
static u16 ip4_get_port(ip4_header_t *ip, u8 sender)
Get TCP/UDP port number or ICMP id from IPv4 packet.
Definition: ip4_to_ip6.h:51
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:464
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:108
bool is_ce
Definition: map.h:250
static_always_inline u64 map_get_pfx_net(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:374
unsigned short u16
Definition: types.h:57
map_domain_t * domains
Definition: map.h:232
static_always_inline u64 map_get_sfx_net(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:406
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:202
map_ip4_reass_t * map_ip4_reass_get(u32 src, u32 dst, u16 fragment_id, u8 protocol, u32 **pi_to_drop)
Definition: map.c:1516
#define PREDICT_FALSE(x)
Definition: clib.h:105
static_always_inline map_domain_t * ip4_map_get_domain(u32 mdi)
Definition: map.h:425
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:364
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:135
map_domain_t * d
Definition: ip4_map_t.c:104
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:153
u16 n_vectors
Definition: node.h:380
vlib_main_t * vm
Definition: buffer.c:294
ip4_mapt_icmp_next_t
Definition: ip4_map_t.c:31
static int ip4_to_ip6_tcp_udp(vlib_buffer_t *p, ip4_to_ip6_set_fn_t fn, void *ctx)
Translate IPv4 UDP/TCP packet to IPv6.
Definition: ip4_to_ip6.h:501
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:89
static int ip4_is_first_fragment(ip4_header_t *i)
Definition: ip4_packet.h:219
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:454
#define foreach_map_error
Definition: map.h:308
signed int i32
Definition: types.h:81
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:492
static int ip4_to_ip6_set_inner_icmp_cb(ip4_header_t *ip4, ip6_header_t *ip6, void *arg)
Definition: ip4_map_t.c:137
long ctx[MAX_CONNS]
Definition: main.c:126
static int icmp_to_icmp6(vlib_buffer_t *p, ip4_to_ip6_set_fn_t fn, void *ctx, ip4_to_ip6_set_fn_t inner_fn, void *inner_ctx)
Translate ICMP4 packet to ICMP6.
Definition: ip4_to_ip6.h:220
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:215
#define map_ip4_reass_unlock()
Definition: map.h:478
u8 * format_map_trace(u8 *s, va_list *args)
Definition: map.c:1436
#define u8_ptr_add(ptr, index)
Definition: ip.h:68
Definition: defs.h:47
u16 mtu
Definition: map.h:91
static int ip4_to_ip6_set_icmp_cb(ip4_header_t *ip4, ip6_header_t *ip6, void *arg)
Definition: ip4_map_t.c:109
static uword ip4_map_t_tcp_udp(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip4_map_t.c:343
i32 port
Definition: map.h:137
static uword ip4_map_t_fragmented(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip4_map_t.c:282
u64 uword
Definition: types.h:112
static int ip4_to_ip6_fragmented(vlib_buffer_t *p, ip4_to_ip6_set_fn_t fn, void *ctx)
Translate IPv4 fragmented packet to IPv6.
Definition: ip4_to_ip6.h:450
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:267
A collection of combined counters.
Definition: counter.h:181
#define vnet_buffer(b)
Definition: buffer.h:360
typedef CLIB_PACKED(struct{ip6_address_t daddr;ip6_address_t saddr;u8 unused[28];})
Definition: ip4_map_t.c:58
u8 ip_version_and_header_length
Definition: ip4_packet.h:137
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
ip6_address_t dst_address
Definition: ip6_packet.h:347