FD.io VPP  v18.11-rc0-18-g2a3fb1a
Vector Packet Processing
ip6_map_t.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include "map.h"
16 
17 #include <vnet/ip/ip_frag.h>
18 #include <vnet/ip/ip6_to_ip4.h>
19 #include <vnet/ip/ip4_to_ip6.h>
20 
21 #define IP6_MAP_T_DUAL_LOOP
22 
23 typedef enum
24 {
31 
32 typedef enum
33 {
39 
40 typedef enum
41 {
47 
48 typedef enum
49 {
55 
57 ip6_map_fragment_cache (ip6_header_t * ip6, ip6_frag_hdr_t * frag,
58  map_domain_t * d, u16 port)
59 {
60  u32 *ignore = NULL;
63  d->flags),
65  &ip6->
66  dst_address),
67  frag_id_6to4 (frag->identification),
68  (ip6->protocol ==
69  IP_PROTOCOL_ICMP6) ?
70  IP_PROTOCOL_ICMP : ip6->protocol,
71  &ignore);
72  if (r)
73  r->port = port;
74 
76  return !r;
77 }
78 
79 /* Returns the associated port or -1 */
81 ip6_map_fragment_get (ip6_header_t * ip6, ip6_frag_hdr_t * frag,
82  map_domain_t * d)
83 {
84  u32 *ignore = NULL;
87  d->flags),
89  &ip6->
90  dst_address),
91  frag_id_6to4 (frag->identification),
92  (ip6->protocol ==
93  IP_PROTOCOL_ICMP6) ?
94  IP_PROTOCOL_ICMP : ip6->protocol,
95  &ignore);
96  i32 ret = r ? r->port : -1;
98  return ret;
99 }
100 
101 typedef struct
102 {
106 
107 static int
109 {
110  icmp6_to_icmp_ctx_t *ctx = arg;
111  map_main_t *mm = &map_main;
112 
113  if (mm->is_ce)
114  {
115  u32 ip4_dadr;
116 
117  //Security check
118  //Note that this prevents an intermediate IPv6 router from answering the request
119  ip4_dadr = map_get_ip4 (&ip6->dst_address, ctx->d->flags);
120  if (ip6->dst_address.as_u64[0] !=
121  map_get_pfx_net (ctx->d, ip4_dadr, ctx->id)
122  || ip6->dst_address.as_u64[1] != map_get_sfx_net (ctx->d, ip4_dadr,
123  ctx->id))
124  return -1;
125 
126  ip4->src_address.as_u32 =
128  ip4->dst_address.as_u32 = ip4_dadr;
129  }
130  else
131  {
132  u32 ip4_sadr;
133 
134  //Security check
135  //Note that this prevents an intermediate IPv6 router from answering the request
136  ip4_sadr = map_get_ip4 (&ip6->src_address, ctx->d->flags);
137  if (ip6->src_address.as_u64[0] !=
138  map_get_pfx_net (ctx->d, ip4_sadr, ctx->id)
139  || ip6->src_address.as_u64[1] != map_get_sfx_net (ctx->d, ip4_sadr,
140  ctx->id))
141  return -1;
142 
143  ip4->dst_address.as_u32 =
145  ip4->src_address.as_u32 = ip4_sadr;
146  }
147 
148  return 0;
149 }
150 
151 static int
153  void *arg)
154 {
155  icmp6_to_icmp_ctx_t *ctx = arg;
156  map_main_t *mm = &map_main;
157 
158  if (mm->is_ce)
159  {
160  u32 inner_ip4_sadr;
161 
162  //Security check of inner packet
163  inner_ip4_sadr = map_get_ip4 (&ip6->src_address, ctx->d->flags);
164  if (ip6->src_address.as_u64[0] !=
165  map_get_pfx_net (ctx->d, inner_ip4_sadr, ctx->id)
166  || ip6->src_address.as_u64[1] != map_get_sfx_net (ctx->d,
167  inner_ip4_sadr,
168  ctx->id))
169  return -1;
170 
171  ip4->src_address.as_u32 = inner_ip4_sadr;
172  ip4->dst_address.as_u32 =
174  }
175  else
176  {
177  u32 inner_ip4_dadr;
178 
179  //Security check of inner packet
180  inner_ip4_dadr = map_get_ip4 (&ip6->dst_address, ctx->d->flags);
181  if (ip6->dst_address.as_u64[0] !=
182  map_get_pfx_net (ctx->d, inner_ip4_dadr, ctx->id)
183  || ip6->dst_address.as_u64[1] != map_get_sfx_net (ctx->d,
184  inner_ip4_dadr,
185  ctx->id))
186  return -1;
187 
188  ip4->dst_address.as_u32 = inner_ip4_dadr;
189  ip4->src_address.as_u32 =
191  }
192 
193  return 0;
194 }
195 
196 static uword
198  vlib_node_runtime_t * node, vlib_frame_t * frame)
199 {
200  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
201  vlib_node_runtime_t *error_node =
203  from = vlib_frame_vector_args (frame);
204  n_left_from = frame->n_vectors;
205  next_index = node->cached_next_index;
207  u32 thread_index = vm->thread_index;
208 
209  while (n_left_from > 0)
210  {
211  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
212 
213  while (n_left_from > 0 && n_left_to_next > 0)
214  {
215  u32 pi0;
216  vlib_buffer_t *p0;
217  u8 error0;
218  ip6_mapt_icmp_next_t next0;
219  map_domain_t *d0;
220  u16 len0;
221  icmp6_to_icmp_ctx_t ctx0;
222  ip6_header_t *ip60;
223  icmp46_header_t *icmp0;
224 
225  pi0 = to_next[0] = from[0];
226  from += 1;
227  n_left_from -= 1;
228  to_next += 1;
229  n_left_to_next -= 1;
230  error0 = MAP_ERROR_NONE;
232 
233  p0 = vlib_get_buffer (vm, pi0);
234  ip60 = vlib_buffer_get_current (p0);
235  len0 = clib_net_to_host_u16 (ip60->payload_length);
236  icmp0 = (icmp46_header_t *) (ip60 + 1);
237  d0 =
239  vnet_buffer (p0)->map_t.map_domain_index);
240 
241  ctx0.id =
242  ip6_get_port (ip60, icmp0->type == ICMP6_echo_request,
243  p0->current_length);
244  ctx0.d = d0;
245  if (ctx0.id == 0)
246  {
247  // In case of 1:1 mapping, we don't care about the port
248  if (!(d0->ea_bits_len == 0 && d0->rules))
249  {
250  error0 = MAP_ERROR_ICMP;
251  goto err0;
252  }
253  }
254 
255  if (icmp6_to_icmp
256  (p0, ip6_to_ip4_set_icmp_cb, &ctx0,
258  {
259  error0 = MAP_ERROR_ICMP;
260  goto err0;
261  }
262 
263  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
264  {
265  //Send to fragmentation node if necessary
266  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
267  vnet_buffer (p0)->ip_frag.header_offset = 0;
268  vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
270  }
271  err0:
272  if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
273  {
275  thread_index,
276  vnet_buffer (p0)->
277  map_t.map_domain_index, 1,
278  len0);
279  }
280  else
281  {
282  next0 = IP6_MAPT_ICMP_NEXT_DROP;
283  }
284 
285  p0->error = error_node->errors[error0];
286  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
287  to_next, n_left_to_next, pi0,
288  next0);
289  }
290  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
291  }
292  return frame->n_vectors;
293 }
294 
295 static int
297 {
298  vlib_buffer_t *p = ctx;
299 
300  ip4->dst_address.as_u32 = vnet_buffer (p)->map_t.v6.daddr;
301  ip4->src_address.as_u32 = vnet_buffer (p)->map_t.v6.saddr;
302 
303  return 0;
304 }
305 
306 static uword
308  vlib_node_runtime_t * node, vlib_frame_t * frame)
309 {
310  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
311  from = vlib_frame_vector_args (frame);
312  n_left_from = frame->n_vectors;
313  next_index = node->cached_next_index;
314  vlib_node_runtime_t *error_node =
316 
317  while (n_left_from > 0)
318  {
319  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
320 
321 #ifdef IP6_MAP_T_DUAL_LOOP
322  while (n_left_from >= 4 && n_left_to_next >= 2)
323  {
324  u32 pi0, pi1;
325  vlib_buffer_t *p0, *p1;
326  u32 next0, next1;
327 
328  pi0 = to_next[0] = from[0];
329  pi1 = to_next[1] = from[1];
330  from += 2;
331  n_left_from -= 2;
332  to_next += 2;
333  n_left_to_next -= 2;
334 
337  p0 = vlib_get_buffer (vm, pi0);
338  p1 = vlib_get_buffer (vm, pi1);
339 
341  {
342  p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
344  }
345  else
346  {
347  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
348  {
349  //Send to fragmentation node if necessary
350  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
351  vnet_buffer (p0)->ip_frag.header_offset = 0;
352  vnet_buffer (p0)->ip_frag.next_index =
355  }
356  }
357 
359  {
360  p1->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
362  }
363  else
364  {
365  if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
366  {
367  //Send to fragmentation node if necessary
368  vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
369  vnet_buffer (p1)->ip_frag.header_offset = 0;
370  vnet_buffer (p1)->ip_frag.next_index =
373  }
374  }
375 
376  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
377  to_next, n_left_to_next, pi0, pi1,
378  next0, next1);
379  }
380 #endif
381 
382  while (n_left_from > 0 && n_left_to_next > 0)
383  {
384  u32 pi0;
385  vlib_buffer_t *p0;
386  u32 next0;
387 
388  pi0 = to_next[0] = from[0];
389  from += 1;
390  n_left_from -= 1;
391  to_next += 1;
392  n_left_to_next -= 1;
393 
395  p0 = vlib_get_buffer (vm, pi0);
396 
398  {
399  p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
401  }
402  else
403  {
404  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
405  {
406  //Send to fragmentation node if necessary
407  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
408  vnet_buffer (p0)->ip_frag.header_offset = 0;
409  vnet_buffer (p0)->ip_frag.next_index =
412  }
413  }
414 
415  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
416  to_next, n_left_to_next, pi0,
417  next0);
418  }
419  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
420  }
421  return frame->n_vectors;
422 }
423 
424 static uword
426  vlib_node_runtime_t * node, vlib_frame_t * frame)
427 {
428  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
429  vlib_node_runtime_t *error_node =
431 
432  from = vlib_frame_vector_args (frame);
433  n_left_from = frame->n_vectors;
434  next_index = node->cached_next_index;
435  while (n_left_from > 0)
436  {
437  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
438 
439 #ifdef IP6_MAP_T_DUAL_LOOP
440  while (n_left_from >= 4 && n_left_to_next >= 2)
441  {
442  u32 pi0, pi1;
443  vlib_buffer_t *p0, *p1;
444  ip6_mapt_tcp_udp_next_t next0, next1;
445 
446  pi0 = to_next[0] = from[0];
447  pi1 = to_next[1] = from[1];
448  from += 2;
449  n_left_from -= 2;
450  to_next += 2;
451  n_left_to_next -= 2;
454 
455  p0 = vlib_get_buffer (vm, pi0);
456  p1 = vlib_get_buffer (vm, pi1);
457 
458  if (ip6_to_ip4_tcp_udp (p0, ip6_to_ip4_set_cb, p0, 1))
459  {
460  p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
462  }
463  else
464  {
465  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
466  {
467  //Send to fragmentation node if necessary
468  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
469  vnet_buffer (p0)->ip_frag.header_offset = 0;
470  vnet_buffer (p0)->ip_frag.next_index =
473  }
474  }
475 
476  if (ip6_to_ip4_tcp_udp (p1, ip6_to_ip4_set_cb, p1, 1))
477  {
478  p1->error = error_node->errors[MAP_ERROR_UNKNOWN];
480  }
481  else
482  {
483  if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
484  {
485  //Send to fragmentation node if necessary
486  vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
487  vnet_buffer (p1)->ip_frag.header_offset = 0;
488  vnet_buffer (p1)->ip_frag.next_index =
491  }
492  }
493 
494  vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
495  n_left_to_next, pi0, pi1, next0,
496  next1);
497  }
498 #endif
499 
500  while (n_left_from > 0 && n_left_to_next > 0)
501  {
502  u32 pi0;
503  vlib_buffer_t *p0;
505 
506  pi0 = to_next[0] = from[0];
507  from += 1;
508  n_left_from -= 1;
509  to_next += 1;
510  n_left_to_next -= 1;
512 
513  p0 = vlib_get_buffer (vm, pi0);
514 
515  if (ip6_to_ip4_tcp_udp (p0, ip6_to_ip4_set_cb, p0, 1))
516  {
517  p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
519  }
520  else
521  {
522  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
523  {
524  //Send to fragmentation node if necessary
525  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
526  vnet_buffer (p0)->ip_frag.header_offset = 0;
527  vnet_buffer (p0)->ip_frag.next_index =
530  }
531  }
532 
533  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
534  to_next, n_left_to_next, pi0,
535  next0);
536  }
537  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
538  }
539  return frame->n_vectors;
540 }
541 
544  map_domain_t * d0, i32 * map_port0,
545  u8 * error0, ip6_mapt_next_t * next0,
546  u32 l4_len0, ip6_frag_hdr_t * frag0)
547 {
548  map_main_t *mm = &map_main;
549  u32 port_offset;
550 
551  if (mm->is_ce)
552  port_offset = 2;
553  else
554  port_offset = 0;
555 
556  if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
557  ip6_frag_hdr_offset (frag0)))
558  {
560  if (d0->ea_bits_len == 0 && d0->rules)
561  {
562  *map_port0 = 0;
563  }
564  else
565  {
566  *map_port0 = ip6_map_fragment_get (ip60, frag0, d0);
567  *error0 = (*map_port0 != -1) ? *error0 : MAP_ERROR_FRAGMENT_DROPPED;
568  }
569  }
570  else
571  if (PREDICT_TRUE
572  (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP))
573  {
574  *error0 =
575  l4_len0 < sizeof (tcp_header_t) ? MAP_ERROR_MALFORMED : *error0;
576  vnet_buffer (p0)->map_t.checksum_offset =
577  vnet_buffer (p0)->map_t.v6.l4_offset + 16;
579  *map_port0 =
580  (i32) *
581  ((u16 *)
582  u8_ptr_add (ip60,
583  vnet_buffer (p0)->map_t.v6.l4_offset + port_offset));
584  }
585  else
586  if (PREDICT_TRUE
587  (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP))
588  {
589  *error0 =
590  l4_len0 < sizeof (udp_header_t) ? MAP_ERROR_MALFORMED : *error0;
591  vnet_buffer (p0)->map_t.checksum_offset =
592  vnet_buffer (p0)->map_t.v6.l4_offset + 6;
594  *map_port0 =
595  (i32) *
596  ((u16 *)
597  u8_ptr_add (ip60,
598  vnet_buffer (p0)->map_t.v6.l4_offset + port_offset));
599  }
600  else if (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_ICMP6)
601  {
602  *error0 =
603  l4_len0 < sizeof (icmp46_header_t) ? MAP_ERROR_MALFORMED : *error0;
604  *next0 = IP6_MAPT_NEXT_MAPT_ICMP;
605  if (d0->ea_bits_len == 0 && d0->rules)
606  {
607  *map_port0 = 0;
608  }
609  else
610  if (((icmp46_header_t *)
611  u8_ptr_add (ip60,
612  vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
613  ICMP6_echo_reply
614  || ((icmp46_header_t *)
615  u8_ptr_add (ip60,
616  vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
617  ICMP6_echo_request)
618  {
619  *map_port0 =
620  (i32) *
621  ((u16 *)
622  u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset + 6));
623  }
624  }
625  else
626  {
627  //TODO: In case of 1:1 mapping, it might be possible to do something with those packets.
628  *error0 = MAP_ERROR_BAD_PROTOCOL;
629  }
630 }
631 
632 static uword
634 {
635  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
636  vlib_node_runtime_t *error_node =
638  map_main_t *mm = &map_main;
640  u32 thread_index = vm->thread_index;
641 
642  from = vlib_frame_vector_args (frame);
643  n_left_from = frame->n_vectors;
644  next_index = node->cached_next_index;
645  while (n_left_from > 0)
646  {
647  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
648 
649 #ifdef IP6_MAP_T_DUAL_LOOP
650  while (n_left_from >= 4 && n_left_to_next >= 2)
651  {
652  u32 pi0, pi1;
653  vlib_buffer_t *p0, *p1;
654  ip6_header_t *ip60, *ip61;
655  u8 error0, error1;
656  ip6_mapt_next_t next0, next1;
657  u32 l4_len0, l4_len1;
658  i32 map_port0, map_port1;
659  map_domain_t *d0, *d1;
660  ip6_frag_hdr_t *frag0, *frag1;
661  next0 = next1 = 0; //Because compiler whines
662 
663  pi0 = to_next[0] = from[0];
664  pi1 = to_next[1] = from[1];
665  from += 2;
666  n_left_from -= 2;
667  to_next += 2;
668  n_left_to_next -= 2;
669 
670  error0 = MAP_ERROR_NONE;
671  error1 = MAP_ERROR_NONE;
672 
673  p0 = vlib_get_buffer (vm, pi0);
674  p1 = vlib_get_buffer (vm, pi1);
675  ip60 = vlib_buffer_get_current (p0);
676  ip61 = vlib_buffer_get_current (p1);
677 
678  if (mm->is_ce)
679  {
680  u32 daddr0, daddr1;
681  daddr0 = 0; /* TODO */
682  daddr1 = 0; /* TODO */
683  /* NOTE: ip6_map_get_domain currently doesn't utilize second argument */
684 
685  daddr0 = map_get_ip4 (&ip60->dst_address, 0 /*TODO*/);
686  daddr1 = map_get_ip4 (&ip61->dst_address, 0 /*TODO*/);
687  d0 =
688  ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
689  (ip4_address_t *) & daddr0,
690  &vnet_buffer (p0)->map_t.map_domain_index,
691  &error0);
692  d1 =
693  ip6_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX],
694  (ip4_address_t *) & daddr1,
695  &vnet_buffer (p1)->map_t.map_domain_index,
696  &error1);
697 
698  daddr0 = map_get_ip4 (&ip60->dst_address, d0->flags);
699  daddr1 = map_get_ip4 (&ip61->dst_address, d1->flags);
700 
701  vnet_buffer (p0)->map_t.v6.daddr = daddr0;
702  vnet_buffer (p1)->map_t.v6.daddr = daddr1;
703  vnet_buffer (p0)->map_t.v6.saddr =
705  vnet_buffer (p1)->map_t.v6.saddr =
707  }
708  else
709  {
710  u32 saddr0, saddr1;
711  saddr0 = 0; /* TODO */
712  saddr1 = 0; /* TODO */
713  /* NOTE: ip6_map_get_domain currently doesn't utilize second argument */
714 
715  saddr0 = map_get_ip4 (&ip60->src_address, 0 /*TODO*/);
716  saddr1 = map_get_ip4 (&ip61->src_address, 0 /*TODO*/);
717  d0 =
718  ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
719  (ip4_address_t *) & saddr0,
720  &vnet_buffer (p0)->map_t.map_domain_index,
721  &error0);
722  d1 =
723  ip6_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX],
724  (ip4_address_t *) & saddr1,
725  &vnet_buffer (p1)->map_t.map_domain_index,
726  &error1);
727 
728  saddr0 = map_get_ip4 (&ip60->src_address, d0->flags);
729  saddr1 = map_get_ip4 (&ip61->src_address, d1->flags);
730 
731  vnet_buffer (p0)->map_t.v6.saddr = saddr0;
732  vnet_buffer (p1)->map_t.v6.saddr = saddr1;
733  vnet_buffer (p0)->map_t.v6.daddr =
735  vnet_buffer (p1)->map_t.v6.daddr =
737  }
738 
739  vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
740  vnet_buffer (p1)->map_t.mtu = d1->mtu ? d1->mtu : ~0;
741 
742  if (PREDICT_FALSE (ip6_parse (ip60, p0->current_length,
743  &(vnet_buffer (p0)->map_t.
744  v6.l4_protocol),
745  &(vnet_buffer (p0)->map_t.
746  v6.l4_offset),
747  &(vnet_buffer (p0)->map_t.
748  v6.frag_offset))))
749  {
750  error0 = MAP_ERROR_MALFORMED;
751  next0 = IP6_MAPT_NEXT_DROP;
752  }
753 
754  if (PREDICT_FALSE (ip6_parse (ip61, p1->current_length,
755  &(vnet_buffer (p1)->map_t.
756  v6.l4_protocol),
757  &(vnet_buffer (p1)->map_t.
758  v6.l4_offset),
759  &(vnet_buffer (p1)->map_t.
760  v6.frag_offset))))
761  {
762  error1 = MAP_ERROR_MALFORMED;
763  next1 = IP6_MAPT_NEXT_DROP;
764  }
765 
766  map_port0 = map_port1 = -1;
767  l4_len0 = (u32) clib_net_to_host_u16 (ip60->payload_length) +
768  sizeof (*ip60) - vnet_buffer (p0)->map_t.v6.l4_offset;
769  l4_len1 = (u32) clib_net_to_host_u16 (ip61->payload_length) +
770  sizeof (*ip60) - vnet_buffer (p1)->map_t.v6.l4_offset;
771  frag0 =
772  (ip6_frag_hdr_t *) u8_ptr_add (ip60,
773  vnet_buffer (p0)->map_t.
774  v6.frag_offset);
775  frag1 =
776  (ip6_frag_hdr_t *) u8_ptr_add (ip61,
777  vnet_buffer (p1)->map_t.
778  v6.frag_offset);
779 
780  ip6_map_t_classify (p0, ip60, d0, &map_port0, &error0, &next0,
781  l4_len0, frag0);
782  ip6_map_t_classify (p1, ip61, d1, &map_port1, &error1, &next1,
783  l4_len1, frag1);
784 
785  if (PREDICT_FALSE
786  ((map_port0 != -1)
787  && (ip60->src_address.as_u64[0] !=
788  map_get_pfx_net (d0, vnet_buffer (p0)->map_t.v6.saddr,
789  map_port0)
790  || ip60->src_address.as_u64[1] != map_get_sfx_net (d0,
792  (p0)->map_t.v6.saddr,
793  map_port0))))
794  {
795  error0 = MAP_ERROR_SEC_CHECK;
796  }
797 
798  if (PREDICT_FALSE
799  ((map_port1 != -1)
800  && (ip61->src_address.as_u64[0] !=
801  map_get_pfx_net (d1, vnet_buffer (p1)->map_t.v6.saddr,
802  map_port1)
803  || ip61->src_address.as_u64[1] != map_get_sfx_net (d1,
805  (p1)->map_t.v6.saddr,
806  map_port1))))
807  {
808  error1 = MAP_ERROR_SEC_CHECK;
809  }
810 
811  if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
812  !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
813  u8_ptr_add (ip60,
815  (p0)->map_t.
816  v6.frag_offset)))
817  && (map_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules)
818  && (error0 == MAP_ERROR_NONE))
819  {
821  (ip6_frag_hdr_t *) u8_ptr_add (ip60,
823  (p0)->map_t.
824  v6.frag_offset),
825  d0, map_port0);
826  }
827 
828  if (PREDICT_FALSE (vnet_buffer (p1)->map_t.v6.frag_offset &&
829  !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
830  u8_ptr_add (ip61,
832  (p1)->map_t.
833  v6.frag_offset)))
834  && (map_port1 != -1) && (d1->ea_bits_len != 0 || !d1->rules)
835  && (error1 == MAP_ERROR_NONE))
836  {
838  (ip6_frag_hdr_t *) u8_ptr_add (ip61,
840  (p1)->map_t.
841  v6.frag_offset),
842  d1, map_port1);
843  }
844 
845  if (PREDICT_TRUE
846  (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP))
847  {
849  thread_index,
850  vnet_buffer (p0)->
851  map_t.map_domain_index, 1,
852  clib_net_to_host_u16
853  (ip60->payload_length));
854  }
855 
856  if (PREDICT_TRUE
857  (error1 == MAP_ERROR_NONE && next1 != IP6_MAPT_NEXT_MAPT_ICMP))
858  {
860  thread_index,
861  vnet_buffer (p1)->
862  map_t.map_domain_index, 1,
863  clib_net_to_host_u16
864  (ip61->payload_length));
865  }
866 
867  next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
868  next1 = (error1 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next1;
869  p0->error = error_node->errors[error0];
870  p1->error = error_node->errors[error1];
871  vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
872  n_left_to_next, pi0, pi1, next0,
873  next1);
874  }
875 #endif
876 
877  while (n_left_from > 0 && n_left_to_next > 0)
878  {
879  u32 pi0;
880  vlib_buffer_t *p0;
881  ip6_header_t *ip60;
882  u8 error0;
883  u32 l4_len0;
884  i32 map_port0;
885  map_domain_t *d0;
886  ip6_frag_hdr_t *frag0;
887  u32 port_offset;
888  ip6_mapt_next_t next0 = 0;
889 
890  pi0 = to_next[0] = from[0];
891  from += 1;
892  n_left_from -= 1;
893  to_next += 1;
894  n_left_to_next -= 1;
895  error0 = MAP_ERROR_NONE;
896 
897  p0 = vlib_get_buffer (vm, pi0);
898  ip60 = vlib_buffer_get_current (p0);
899 
900  if (mm->is_ce)
901  {
902  u32 daddr;
903  //Save daddr in a different variable to not overwrite ip.adj_index
904  daddr = 0; /* TODO */
905  /* NOTE: ip6_map_get_domain currently doesn't utilize second argument */
906 
907  daddr = map_get_ip4 (&ip60->dst_address, 0 /*TODO*/);
908  d0 =
909  ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
910  (ip4_address_t *) & daddr,
911  &vnet_buffer (p0)->map_t.map_domain_index,
912  &error0);
913 
914  daddr = map_get_ip4 (&ip60->dst_address, d0->flags);
915 
916  //FIXME: What if d0 is null
917  vnet_buffer (p0)->map_t.v6.daddr = daddr;
918  vnet_buffer (p0)->map_t.v6.saddr =
920 
921  port_offset = 2;
922  }
923  else
924  {
925  u32 saddr;
926  //Save saddr in a different variable to not overwrite ip.adj_index
927  saddr = 0; /* TODO */
928  /* NOTE: ip6_map_get_domain currently doesn't utilize second argument */
929 
930  saddr = map_get_ip4 (&ip60->src_address, 0 /*TODO*/);
931  d0 =
932  ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
933  (ip4_address_t *) & saddr,
934  &vnet_buffer (p0)->map_t.map_domain_index,
935  &error0);
936 
937  saddr = map_get_ip4 (&ip60->src_address, d0->flags);
938 
939  //FIXME: What if d0 is null
940  vnet_buffer (p0)->map_t.v6.saddr = saddr;
941  vnet_buffer (p0)->map_t.v6.daddr =
943 
944  port_offset = 0;
945  }
946 
947  vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
948 
949  if (PREDICT_FALSE (ip6_parse (ip60, p0->current_length,
950  &(vnet_buffer (p0)->map_t.
951  v6.l4_protocol),
952  &(vnet_buffer (p0)->map_t.
953  v6.l4_offset),
954  &(vnet_buffer (p0)->map_t.
955  v6.frag_offset))))
956  {
957  error0 = MAP_ERROR_MALFORMED;
958  next0 = IP6_MAPT_NEXT_DROP;
959  }
960 
961  map_port0 = -1;
962  l4_len0 = (u32) clib_net_to_host_u16 (ip60->payload_length) +
963  sizeof (*ip60) - vnet_buffer (p0)->map_t.v6.l4_offset;
964  frag0 =
965  (ip6_frag_hdr_t *) u8_ptr_add (ip60,
966  vnet_buffer (p0)->map_t.
967  v6.frag_offset);
968 
969 
970  if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
971  ip6_frag_hdr_offset (frag0)))
972  {
973  map_port0 = ip6_map_fragment_get (ip60, frag0, d0);
974  error0 = (map_port0 != -1) ? error0 : MAP_ERROR_FRAGMENT_MEMORY;
976  }
977  else
978  if (PREDICT_TRUE
979  (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP))
980  {
981  error0 =
982  l4_len0 <
983  sizeof (tcp_header_t) ? MAP_ERROR_MALFORMED : error0;
984  vnet_buffer (p0)->map_t.checksum_offset =
985  vnet_buffer (p0)->map_t.v6.l4_offset + 16;
987  map_port0 =
988  (i32) *
989  ((u16 *)
990  u8_ptr_add (ip60,
991  vnet_buffer (p0)->map_t.v6.l4_offset +
992  port_offset));
993  }
994  else
995  if (PREDICT_TRUE
996  (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP))
997  {
998  error0 =
999  l4_len0 <
1000  sizeof (udp_header_t) ? MAP_ERROR_MALFORMED : error0;
1001  vnet_buffer (p0)->map_t.checksum_offset =
1002  vnet_buffer (p0)->map_t.v6.l4_offset + 6;
1004  map_port0 =
1005  (i32) *
1006  ((u16 *)
1007  u8_ptr_add (ip60,
1008  vnet_buffer (p0)->map_t.v6.l4_offset +
1009  port_offset));
1010  }
1011  else if (vnet_buffer (p0)->map_t.v6.l4_protocol ==
1012  IP_PROTOCOL_ICMP6)
1013  {
1014  error0 =
1015  l4_len0 <
1016  sizeof (icmp46_header_t) ? MAP_ERROR_MALFORMED : error0;
1017  next0 = IP6_MAPT_NEXT_MAPT_ICMP;
1018  if (((icmp46_header_t *)
1019  u8_ptr_add (ip60,
1020  vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
1021  ICMP6_echo_reply
1022  || ((icmp46_header_t *)
1023  u8_ptr_add (ip60,
1024  vnet_buffer (p0)->map_t.v6.
1025  l4_offset))->code == ICMP6_echo_request)
1026  map_port0 =
1027  (i32) *
1028  ((u16 *)
1029  u8_ptr_add (ip60,
1030  vnet_buffer (p0)->map_t.v6.l4_offset + 6));
1031  }
1032  else
1033  {
1034  //TODO: In case of 1:1 mapping, it might be possible to do something with those packets.
1035  error0 = MAP_ERROR_BAD_PROTOCOL;
1036  }
1037 
1038  //Security check
1039  if (PREDICT_FALSE
1040  ((!mm->is_ce) && (map_port0 != -1)
1041  && (ip60->src_address.as_u64[0] !=
1042  map_get_pfx_net (d0, vnet_buffer (p0)->map_t.v6.saddr,
1043  map_port0)
1044  || ip60->src_address.as_u64[1] != map_get_sfx_net (d0,
1045  vnet_buffer
1046  (p0)->map_t.v6.saddr,
1047  map_port0))))
1048  {
1049  //Security check when src_port0 is not zero (non-first fragment, UDP or TCP)
1050  error0 = MAP_ERROR_SEC_CHECK;
1051  }
1052 
1053  //Fragmented first packet needs to be cached for following packets
1054  if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
1055  !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
1056  u8_ptr_add (ip60,
1057  vnet_buffer
1058  (p0)->map_t.
1059  v6.frag_offset)))
1060  && (map_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules)
1061  && (error0 == MAP_ERROR_NONE))
1062  {
1063  ip6_map_fragment_cache (ip60,
1064  (ip6_frag_hdr_t *) u8_ptr_add (ip60,
1065  vnet_buffer
1066  (p0)->map_t.
1067  v6.frag_offset),
1068  d0, map_port0);
1069  }
1070 
1071  if (PREDICT_TRUE
1072  (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP))
1073  {
1075  thread_index,
1076  vnet_buffer (p0)->
1077  map_t.map_domain_index, 1,
1078  clib_net_to_host_u16
1079  (ip60->payload_length));
1080  }
1081 
1082  next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
1083  p0->error = error_node->errors[error0];
1084  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1085  to_next, n_left_to_next, pi0,
1086  next0);
1087  }
1088  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1089  }
1090  return frame->n_vectors;
1091 }
1092 
1093 static char *map_t_error_strings[] = {
1094 #define _(sym,string) string,
1096 #undef _
1097 };
1098 
1099 /* *INDENT-OFF* */
1101  .function = ip6_map_t_fragmented,
1102  .name = "ip6-map-t-fragmented",
1103  .vector_size = sizeof (u32),
1104  .format_trace = format_map_trace,
1105  .type = VLIB_NODE_TYPE_INTERNAL,
1106 
1107  .n_errors = MAP_N_ERROR,
1108  .error_strings = map_t_error_strings,
1109 
1110  .n_next_nodes = IP6_MAPT_FRAGMENTED_N_NEXT,
1111  .next_nodes = {
1112  [IP6_MAPT_FRAGMENTED_NEXT_IP4_LOOKUP] = "ip4-lookup",
1114  [IP6_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
1115  },
1116 };
1117 /* *INDENT-ON* */
1118 
1119 /* *INDENT-OFF* */
1121  .function = ip6_map_t_icmp,
1122  .name = "ip6-map-t-icmp",
1123  .vector_size = sizeof (u32),
1124  .format_trace = format_map_trace,
1125  .type = VLIB_NODE_TYPE_INTERNAL,
1126 
1127  .n_errors = MAP_N_ERROR,
1128  .error_strings = map_t_error_strings,
1129 
1130  .n_next_nodes = IP6_MAPT_ICMP_N_NEXT,
1131  .next_nodes = {
1132  [IP6_MAPT_ICMP_NEXT_IP4_LOOKUP] = "ip4-lookup",
1134  [IP6_MAPT_ICMP_NEXT_DROP] = "error-drop",
1135  },
1136 };
1137 /* *INDENT-ON* */
1138 
1139 /* *INDENT-OFF* */
1141  .function = ip6_map_t_tcp_udp,
1142  .name = "ip6-map-t-tcp-udp",
1143  .vector_size = sizeof (u32),
1144  .format_trace = format_map_trace,
1145  .type = VLIB_NODE_TYPE_INTERNAL,
1146 
1147  .n_errors = MAP_N_ERROR,
1148  .error_strings = map_t_error_strings,
1149 
1150  .n_next_nodes = IP6_MAPT_TCP_UDP_N_NEXT,
1151  .next_nodes = {
1152  [IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP] = "ip4-lookup",
1154  [IP6_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
1155  },
1156 };
1157 /* *INDENT-ON* */
1158 
1159 /* *INDENT-OFF* */
1161  .function = ip6_map_t,
1162  .name = "ip6-map-t",
1163  .vector_size = sizeof(u32),
1164  .format_trace = format_map_trace,
1165  .type = VLIB_NODE_TYPE_INTERNAL,
1166 
1167  .n_errors = MAP_N_ERROR,
1168  .error_strings = map_t_error_strings,
1169 
1170  .n_next_nodes = IP6_MAPT_N_NEXT,
1171  .next_nodes = {
1172  [IP6_MAPT_NEXT_MAPT_TCP_UDP] = "ip6-map-t-tcp-udp",
1173  [IP6_MAPT_NEXT_MAPT_ICMP] = "ip6-map-t-icmp",
1174  [IP6_MAPT_NEXT_MAPT_FRAGMENTED] = "ip6-map-t-fragmented",
1175  [IP6_MAPT_NEXT_DROP] = "error-drop",
1176  },
1177 };
1178 /* *INDENT-ON* */
1179 
1180 /*
1181  * fd.io coding-style-patch-verification: ON
1182  *
1183  * Local Variables:
1184  * eval: (c-set-style "gnu")
1185  * End:
1186  */
#define map_ip4_reass_lock()
Definition: map.h:477
map_domain_t * d
Definition: ip6_map_t.c:103
static uword ip6_map_t_tcp_udp(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map_t.c:425
static u16 ip6_get_port(ip6_header_t *ip6, u8 sender, u16 buffer_len)
Get TCP/UDP port number or ICMP id from IPv6 packet.
Definition: ip6_to_ip4.h:90
map_domain_flags_e flags
Definition: map.h:92
map_main_t map_main
Definition: map.c:27
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:213
ip6_mapt_next_t
Definition: ip6_map_t.c:23
ip4_address_t src_address
Definition: ip4_packet.h:169
static int icmp6_to_icmp(vlib_buffer_t *p, ip6_to_ip4_set_fn_t fn, void *ctx, ip6_to_ip4_set_fn_t inner_fn, void *inner_ctx)
Translate ICMP6 packet to ICMP4.
Definition: ip6_to_ip4.h:249
#define PREDICT_TRUE(x)
Definition: clib.h:106
u64 as_u64[2]
Definition: ip6_packet.h:51
#define NULL
Definition: clib.h:55
u32 thread_index
Definition: main.h:176
static uword ip6_map_t(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map_t.c:633
static int ip6_to_ip4_set_icmp_cb(ip6_header_t *ip6, ip4_header_t *ip4, void *arg)
Definition: ip6_map_t.c:108
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:451
struct _tcp_header tcp_header_t
static_always_inline void ip6_map_t_classify(vlib_buffer_t *p0, ip6_header_t *ip60, map_domain_t *d0, i32 *map_port0, u8 *error0, ip6_mapt_next_t *next0, u32 l4_len0, ip6_frag_hdr_t *frag0)
Definition: ip6_map_t.c:543
ip6_address_t src_address
Definition: ip6_packet.h:347
unsigned char u8
Definition: types.h:56
IPv4 to IPv6 translation.
static_always_inline u32 ip6_map_t_embedded_address(map_domain_t *d, ip6_address_t *addr)
Definition: map.h:544
#define static_always_inline
Definition: clib.h:93
ip4_address_t dst_address
Definition: ip4_packet.h:169
vlib_combined_counter_main_t * domain_counters
Definition: map.h:236
ip6_address_t * rules
Definition: map.h:87
static_always_inline int ip6_parse(const ip6_header_t *ip6, u32 buff_len, u8 *l4_protocol, u16 *l4_offset, u16 *frag_hdr_offset)
Parse some useful information from IPv6 header.
Definition: ip6_to_ip4.h:59
u8 ea_bits_len
Definition: map.h:95
unsigned int u32
Definition: types.h:88
static_always_inline i32 ip6_map_fragment_get(ip6_header_t *ip6, ip6_frag_hdr_t *frag, map_domain_t *d)
Definition: ip6_map_t.c:81
#define frag_id_6to4(id)
Definition: ip6_to_ip4.h:45
static int ip6_to_ip4_tcp_udp(vlib_buffer_t *p, ip6_to_ip4_set_fn_t fn, void *ctx, u8 udp_checksum)
Translate IPv6 UDP/TCP packet to IPv4.
Definition: ip6_to_ip4.h:481
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:464
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:108
bool is_ce
Definition: map.h:250
#define IP4_FRAG_NODE_NAME
Definition: ip_frag.h:43
static_always_inline u64 map_get_pfx_net(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:374
static_always_inline map_domain_t * ip6_map_get_domain(u32 mdi, ip4_address_t *addr, u32 *map_domain_index, u8 *error)
Definition: map.h:438
unsigned short u16
Definition: types.h:57
map_domain_t * domains
Definition: map.h:232
vlib_node_registration_t ip6_map_t_tcp_udp_node
(constructor) VLIB_REGISTER_NODE (ip6_map_t_tcp_udp_node)
Definition: ip6_map_t.c:1140
static_always_inline int ip6_map_fragment_cache(ip6_header_t *ip6, ip6_frag_hdr_t *frag, map_domain_t *d, u16 port)
Definition: ip6_map_t.c:57
static_always_inline u64 map_get_sfx_net(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:406
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:202
map_ip4_reass_t * map_ip4_reass_get(u32 src, u32 dst, u16 fragment_id, u8 protocol, u32 **pi_to_drop)
Definition: map.c:1516
#define PREDICT_FALSE(x)
Definition: clib.h:105
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:364
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:135
vlib_node_registration_t ip6_map_t_fragmented_node
(constructor) VLIB_REGISTER_NODE (ip6_map_t_fragmented_node)
Definition: ip6_map_t.c:1100
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:153
ip6_mapt_tcp_udp_next_t
Definition: ip6_map_t.c:40
u16 n_vectors
Definition: node.h:380
static uword ip6_map_t_icmp(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map_t.c:197
vlib_main_t * vm
Definition: buffer.c:294
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:89
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:454
static_always_inline u32 map_get_ip4(ip6_address_t *addr, map_domain_flags_e flags)
Definition: map.h:413
#define foreach_map_error
Definition: map.h:308
ip6_mapt_icmp_next_t
Definition: ip6_map_t.c:32
vlib_node_registration_t ip6_map_t_icmp_node
(constructor) VLIB_REGISTER_NODE (ip6_map_t_icmp_node)
Definition: ip6_map_t.c:1120
static int ip6_to_ip4_set_inner_icmp_cb(ip6_header_t *ip6, ip4_header_t *ip4, void *arg)
Definition: ip6_map_t.c:152
signed int i32
Definition: types.h:81
#define ip6_frag_hdr_offset(hdr)
Definition: ip6_packet.h:542
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:492
long ctx[MAX_CONNS]
Definition: main.c:126
#define map_ip4_reass_unlock()
Definition: map.h:478
u8 * format_map_trace(u8 *s, va_list *args)
Definition: map.c:1436
IPv6 to IPv4 translation.
#define u8_ptr_add(ptr, index)
Definition: ip.h:68
static int ip6_to_ip4_set_cb(ip6_header_t *ip6, ip4_header_t *ip4, void *ctx)
Definition: ip6_map_t.c:296
Definition: defs.h:47
u16 mtu
Definition: map.h:91
static uword ip6_map_t_fragmented(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map_t.c:307
u16 payload_length
Definition: ip6_packet.h:338
i32 port
Definition: map.h:137
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:267
A collection of combined counters.
Definition: counter.h:181
static char * map_t_error_strings[]
Definition: ip6_map_t.c:1093
vlib_node_registration_t ip6_map_t_node
(constructor) VLIB_REGISTER_NODE (ip6_map_t_node)
Definition: ip6_map_t.c:1160
#define vnet_buffer(b)
Definition: buffer.h:359
ip6_mapt_fragmented_next_t
Definition: ip6_map_t.c:48
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
ip6_address_t dst_address
Definition: ip6_packet.h:347
static int ip6_to_ip4_fragmented(vlib_buffer_t *p, ip6_to_ip4_set_fn_t fn, void *ctx)
Translate IPv6 fragmented packet to IPv4.
Definition: ip6_to_ip4.h:425