FD.io VPP  v17.01.1-3-gc6833f8
Vector Packet Processing
ip6_map.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include "map.h"
16 
17 #include "../ip/ip_frag.h"
18 
20 {
22 #ifdef MAP_SKIP_IP6_LOOKUP
24 #endif
33 };
34 
36 {
40 };
41 
43 {
48 };
49 
51 {
55 };
56 
60 
61 typedef struct
62 {
67 
68 u8 *
69 format_ip6_map_ip4_reass_trace (u8 * s, va_list * args)
70 {
71  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
72  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
74  va_arg (*args, map_ip6_map_ip4_reass_trace_t *);
75  return format (s, "MAP domain index: %d L4 port: %u Status: %s",
76  t->map_domain_index, t->port,
77  t->cached ? "cached" : "forwarded");
78 }
79 
80 typedef struct
81 {
86 
87 u8 *
88 format_ip6_map_ip6_reass_trace (u8 * s, va_list * args)
89 {
90  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
91  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
93  va_arg (*args, map_ip6_map_ip6_reass_trace_t *);
94  return format (s, "Offset: %d Fragment length: %d Status: %s", t->offset,
95  t->frag_len, t->out ? "out" : "in");
96 }
97 
98 /*
99  * ip6_map_sec_check
100  */
103  ip6_header_t * ip6)
104 {
105  u16 sp4 = clib_net_to_host_u16 (port);
106  u32 sa4 = clib_net_to_host_u32 (ip4->src_address.as_u32);
107  u64 sal6 = map_get_pfx (d, sa4, sp4);
108  u64 sar6 = map_get_sfx (d, sa4, sp4);
109 
110  if (PREDICT_FALSE
111  (sal6 != clib_net_to_host_u64 (ip6->src_address.as_u64[0])
112  || sar6 != clib_net_to_host_u64 (ip6->src_address.as_u64[1])))
113  return (false);
114  return (true);
115 }
116 
119  ip6_header_t * ip6, u32 * next, u8 * error)
120 {
121  map_main_t *mm = &map_main;
122  if (d->ea_bits_len || d->rules)
123  {
124  if (d->psid_length > 0)
125  {
126  if (!ip4_is_fragment (ip4))
127  {
128  u16 port = ip4_map_get_port (ip4, MAP_SENDER);
129  if (port)
130  {
131  if (mm->sec_check)
132  *error =
133  ip6_map_sec_check (d, port, ip4,
134  ip6) ? MAP_ERROR_NONE :
135  MAP_ERROR_DECAP_SEC_CHECK;
136  }
137  else
138  {
139  *error = MAP_ERROR_BAD_PROTOCOL;
140  }
141  }
142  else
143  {
144  *next = mm->sec_check_frag ? IP6_MAP_NEXT_IP4_REASS : *next;
145  }
146  }
147  }
148 }
149 
152 {
153 #ifdef MAP_SKIP_IP6_LOOKUP
154  map_main_t *mm = &map_main;
155  u32 adj_index0 = mm->adj4_index;
156  if (adj_index0 > 0)
157  {
159  ip_adjacency_t *adj = ip_get_adjacency (lm4, mm->adj4_index);
160  if (adj->n_adj > 1)
161  {
163  adj_index0 += (hash_c0 & (adj->n_adj - 1));
164  }
165  vnet_buffer (p0)->ip.adj_index[VLIB_TX] = adj_index0;
166  return (true);
167  }
168 #endif
169  return (false);
170 }
171 
172 /*
173  * ip6_map
174  */
175 static uword
177 {
178  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
179  vlib_node_runtime_t *error_node =
181  map_main_t *mm = &map_main;
183  u32 cpu_index = os_get_cpu_number ();
184 
185  from = vlib_frame_vector_args (frame);
186  n_left_from = frame->n_vectors;
187  next_index = node->cached_next_index;
188  while (n_left_from > 0)
189  {
190  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
191 
192  /* Dual loop */
193  while (n_left_from >= 4 && n_left_to_next >= 2)
194  {
195  u32 pi0, pi1;
196  vlib_buffer_t *p0, *p1;
197  u8 error0 = MAP_ERROR_NONE;
198  u8 error1 = MAP_ERROR_NONE;
199  map_domain_t *d0 = 0, *d1 = 0;
200  ip4_header_t *ip40, *ip41;
201  ip6_header_t *ip60, *ip61;
202  u16 port0 = 0, port1 = 0;
203  u32 map_domain_index0 = ~0, map_domain_index1 = ~0;
206 
207  /* Prefetch next iteration. */
208  {
209  vlib_buffer_t *p2, *p3;
210 
211  p2 = vlib_get_buffer (vm, from[2]);
212  p3 = vlib_get_buffer (vm, from[3]);
213 
214  vlib_prefetch_buffer_header (p2, LOAD);
215  vlib_prefetch_buffer_header (p3, LOAD);
216 
217  /* IPv6 + IPv4 header + 8 bytes of ULP */
218  CLIB_PREFETCH (p2->data, 68, LOAD);
219  CLIB_PREFETCH (p3->data, 68, LOAD);
220  }
221 
222  pi0 = to_next[0] = from[0];
223  pi1 = to_next[1] = from[1];
224  from += 2;
225  n_left_from -= 2;
226  to_next += 2;
227  n_left_to_next -= 2;
228 
229  p0 = vlib_get_buffer (vm, pi0);
230  p1 = vlib_get_buffer (vm, pi1);
231  ip60 = vlib_buffer_get_current (p0);
232  ip61 = vlib_buffer_get_current (p1);
233  vlib_buffer_advance (p0, sizeof (ip6_header_t));
234  vlib_buffer_advance (p1, sizeof (ip6_header_t));
235  ip40 = vlib_buffer_get_current (p0);
236  ip41 = vlib_buffer_get_current (p1);
237 
238  /*
239  * Encapsulated IPv4 packet
240  * - IPv4 fragmented -> Pass to virtual reassembly unless security check disabled
241  * - Lookup/Rewrite or Fragment node in case of packet > MTU
242  * Fragmented IPv6 packet
243  * ICMP IPv6 packet
244  * - Error -> Pass to ICMPv6/ICMPv4 relay
245  * - Info -> Pass to IPv6 local
246  * Anything else -> drop
247  */
248  if (PREDICT_TRUE
249  (ip60->protocol == IP_PROTOCOL_IP_IN_IP
250  && clib_net_to_host_u16 (ip60->payload_length) > 20))
251  {
252  d0 =
253  ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
254  (ip4_address_t *) & ip40->src_address.
255  as_u32, &map_domain_index0, &error0);
256  }
257  else if (ip60->protocol == IP_PROTOCOL_ICMP6 &&
258  clib_net_to_host_u16 (ip60->payload_length) >
259  sizeof (icmp46_header_t))
260  {
261  icmp46_header_t *icmp = (void *) (ip60 + 1);
262  next0 = (icmp->type == ICMP6_echo_request
263  || icmp->type ==
264  ICMP6_echo_reply) ? IP6_MAP_NEXT_IP6_LOCAL :
266  }
267  else if (ip60->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION)
268  {
269  next0 = IP6_MAP_NEXT_IP6_REASS;
270  }
271  else
272  {
273  error0 = MAP_ERROR_BAD_PROTOCOL;
274  }
275  if (PREDICT_TRUE
276  (ip61->protocol == IP_PROTOCOL_IP_IN_IP
277  && clib_net_to_host_u16 (ip61->payload_length) > 20))
278  {
279  d1 =
280  ip6_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX],
281  (ip4_address_t *) & ip41->src_address.
282  as_u32, &map_domain_index1, &error1);
283  }
284  else if (ip61->protocol == IP_PROTOCOL_ICMP6 &&
285  clib_net_to_host_u16 (ip61->payload_length) >
286  sizeof (icmp46_header_t))
287  {
288  icmp46_header_t *icmp = (void *) (ip61 + 1);
289  next1 = (icmp->type == ICMP6_echo_request
290  || icmp->type ==
291  ICMP6_echo_reply) ? IP6_MAP_NEXT_IP6_LOCAL :
293  }
294  else if (ip61->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION)
295  {
296  next1 = IP6_MAP_NEXT_IP6_REASS;
297  }
298  else
299  {
300  error1 = MAP_ERROR_BAD_PROTOCOL;
301  }
302 
303  if (d0)
304  {
305  /* MAP inbound security check */
306  ip6_map_security_check (d0, ip40, ip60, &next0, &error0);
307 
308  if (PREDICT_TRUE (error0 == MAP_ERROR_NONE &&
309  next0 == IP6_MAP_NEXT_IP4_LOOKUP))
310  {
311  if (PREDICT_FALSE
312  (d0->mtu
313  && (clib_host_to_net_u16 (ip40->length) > d0->mtu)))
314  {
315  vnet_buffer (p0)->ip_frag.header_offset = 0;
316  vnet_buffer (p0)->ip_frag.flags = 0;
317  vnet_buffer (p0)->ip_frag.next_index =
319  vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
321  }
322  else
323  {
324  next0 =
326  ip40) ?
327  IP6_MAP_NEXT_IP4_REWRITE : next0;
328  }
330  cpu_index,
331  map_domain_index0, 1,
332  clib_net_to_host_u16
333  (ip40->length));
334  }
335  }
336  if (d1)
337  {
338  /* MAP inbound security check */
339  ip6_map_security_check (d1, ip41, ip61, &next1, &error1);
340 
341  if (PREDICT_TRUE (error1 == MAP_ERROR_NONE &&
342  next1 == IP6_MAP_NEXT_IP4_LOOKUP))
343  {
344  if (PREDICT_FALSE
345  (d1->mtu
346  && (clib_host_to_net_u16 (ip41->length) > d1->mtu)))
347  {
348  vnet_buffer (p1)->ip_frag.header_offset = 0;
349  vnet_buffer (p1)->ip_frag.flags = 0;
350  vnet_buffer (p1)->ip_frag.next_index =
352  vnet_buffer (p1)->ip_frag.mtu = d1->mtu;
354  }
355  else
356  {
357  next1 =
359  ip41) ?
360  IP6_MAP_NEXT_IP4_REWRITE : next1;
361  }
363  cpu_index,
364  map_domain_index1, 1,
365  clib_net_to_host_u16
366  (ip41->length));
367  }
368  }
369 
371  {
372  map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
373  tr->map_domain_index = map_domain_index0;
374  tr->port = port0;
375  }
376 
378  {
379  map_trace_t *tr = vlib_add_trace (vm, node, p1, sizeof (*tr));
380  tr->map_domain_index = map_domain_index1;
381  tr->port = port1;
382  }
383 
384  if (error0 == MAP_ERROR_DECAP_SEC_CHECK && mm->icmp6_enabled)
385  {
386  /* Set ICMP parameters */
387  vlib_buffer_advance (p0, -sizeof (ip6_header_t));
388  icmp6_error_set_vnet_buffer (p0, ICMP6_destination_unreachable,
389  ICMP6_destination_unreachable_source_address_failed_policy,
390  0);
391  next0 = IP6_MAP_NEXT_ICMP;
392  }
393  else
394  {
395  next0 = (error0 == MAP_ERROR_NONE) ? next0 : IP6_MAP_NEXT_DROP;
396  }
397 
398  if (error1 == MAP_ERROR_DECAP_SEC_CHECK && mm->icmp6_enabled)
399  {
400  /* Set ICMP parameters */
401  vlib_buffer_advance (p1, -sizeof (ip6_header_t));
402  icmp6_error_set_vnet_buffer (p1, ICMP6_destination_unreachable,
403  ICMP6_destination_unreachable_source_address_failed_policy,
404  0);
405  next1 = IP6_MAP_NEXT_ICMP;
406  }
407  else
408  {
409  next1 = (error1 == MAP_ERROR_NONE) ? next1 : IP6_MAP_NEXT_DROP;
410  }
411 
412  /* Reset packet */
413  if (next0 == IP6_MAP_NEXT_IP6_LOCAL)
414  vlib_buffer_advance (p0, -sizeof (ip6_header_t));
415  if (next1 == IP6_MAP_NEXT_IP6_LOCAL)
416  vlib_buffer_advance (p1, -sizeof (ip6_header_t));
417 
418  p0->error = error_node->errors[error0];
419  p1->error = error_node->errors[error1];
420  vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
421  n_left_to_next, pi0, pi1, next0,
422  next1);
423  }
424 
425  /* Single loop */
426  while (n_left_from > 0 && n_left_to_next > 0)
427  {
428  u32 pi0;
429  vlib_buffer_t *p0;
430  u8 error0 = MAP_ERROR_NONE;
431  map_domain_t *d0 = 0;
432  ip4_header_t *ip40;
433  ip6_header_t *ip60;
434  i32 port0 = 0;
435  u32 map_domain_index0 = ~0;
437 
438  pi0 = to_next[0] = from[0];
439  from += 1;
440  n_left_from -= 1;
441  to_next += 1;
442  n_left_to_next -= 1;
443 
444  p0 = vlib_get_buffer (vm, pi0);
445  ip60 = vlib_buffer_get_current (p0);
446  vlib_buffer_advance (p0, sizeof (ip6_header_t));
447  ip40 = vlib_buffer_get_current (p0);
448 
449  /*
450  * Encapsulated IPv4 packet
451  * - IPv4 fragmented -> Pass to virtual reassembly unless security check disabled
452  * - Lookup/Rewrite or Fragment node in case of packet > MTU
453  * Fragmented IPv6 packet
454  * ICMP IPv6 packet
455  * - Error -> Pass to ICMPv6/ICMPv4 relay
456  * - Info -> Pass to IPv6 local
457  * Anything else -> drop
458  */
459  if (PREDICT_TRUE
460  (ip60->protocol == IP_PROTOCOL_IP_IN_IP
461  && clib_net_to_host_u16 (ip60->payload_length) > 20))
462  {
463  d0 =
464  ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
465  (ip4_address_t *) & ip40->src_address.
466  as_u32, &map_domain_index0, &error0);
467  }
468  else if (ip60->protocol == IP_PROTOCOL_ICMP6 &&
469  clib_net_to_host_u16 (ip60->payload_length) >
470  sizeof (icmp46_header_t))
471  {
472  icmp46_header_t *icmp = (void *) (ip60 + 1);
473  next0 = (icmp->type == ICMP6_echo_request
474  || icmp->type ==
475  ICMP6_echo_reply) ? IP6_MAP_NEXT_IP6_LOCAL :
477  }
478  else if (ip60->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION &&
479  (((ip6_frag_hdr_t *) (ip60 + 1))->next_hdr ==
480  IP_PROTOCOL_IP_IN_IP))
481  {
482  next0 = IP6_MAP_NEXT_IP6_REASS;
483  }
484  else
485  {
486  error0 = MAP_ERROR_BAD_PROTOCOL;
487  }
488 
489  if (d0)
490  {
491  /* MAP inbound security check */
492  ip6_map_security_check (d0, ip40, ip60, &next0, &error0);
493 
494  if (PREDICT_TRUE (error0 == MAP_ERROR_NONE &&
495  next0 == IP6_MAP_NEXT_IP4_LOOKUP))
496  {
497  if (PREDICT_FALSE
498  (d0->mtu
499  && (clib_host_to_net_u16 (ip40->length) > d0->mtu)))
500  {
501  vnet_buffer (p0)->ip_frag.header_offset = 0;
502  vnet_buffer (p0)->ip_frag.flags = 0;
503  vnet_buffer (p0)->ip_frag.next_index =
505  vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
507  }
508  else
509  {
510  next0 =
512  ip40) ?
513  IP6_MAP_NEXT_IP4_REWRITE : next0;
514  }
516  cpu_index,
517  map_domain_index0, 1,
518  clib_net_to_host_u16
519  (ip40->length));
520  }
521  }
522 
524  {
525  map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
526  tr->map_domain_index = map_domain_index0;
527  tr->port = (u16) port0;
528  }
529 
530  if (mm->icmp6_enabled &&
531  (error0 == MAP_ERROR_DECAP_SEC_CHECK
532  || error0 == MAP_ERROR_NO_DOMAIN))
533  {
534  /* Set ICMP parameters */
535  vlib_buffer_advance (p0, -sizeof (ip6_header_t));
536  icmp6_error_set_vnet_buffer (p0, ICMP6_destination_unreachable,
537  ICMP6_destination_unreachable_source_address_failed_policy,
538  0);
539  next0 = IP6_MAP_NEXT_ICMP;
540  }
541  else
542  {
543  next0 = (error0 == MAP_ERROR_NONE) ? next0 : IP6_MAP_NEXT_DROP;
544  }
545 
546  /* Reset packet */
547  if (next0 == IP6_MAP_NEXT_IP6_LOCAL)
548  vlib_buffer_advance (p0, -sizeof (ip6_header_t));
549 
550  p0->error = error_node->errors[error0];
551  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
552  n_left_to_next, pi0, next0);
553  }
554  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
555  }
556 
557  return frame->n_vectors;
558 }
559 
560 
563  map_ip6_reass_t * r, u32 ** fragments_ready,
564  u32 ** fragments_to_drop)
565 {
566  ip4_header_t *ip40;
567  ip6_header_t *ip60;
568  ip6_frag_hdr_t *frag0;
569  vlib_buffer_t *p0;
570 
572  return;
573 
574  //The IP header is here, we need to check for packets
575  //that can be forwarded
576  int i;
577  for (i = 0; i < MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++)
578  {
579  if (r->fragments[i].pi == ~0 ||
580  ((!r->fragments[i].next_data_len)
581  && (r->fragments[i].next_data_offset != (0xffff))))
582  continue;
583 
584  p0 = vlib_get_buffer (vm, r->fragments[i].pi);
585  ip60 = vlib_buffer_get_current (p0);
586  frag0 = (ip6_frag_hdr_t *) (ip60 + 1);
587  ip40 = (ip4_header_t *) (frag0 + 1);
588 
589  if (ip6_frag_hdr_offset (frag0))
590  {
591  //Not first fragment, add the IPv4 header
592  clib_memcpy (ip40, &r->ip4_header, 20);
593  }
594 
595 #ifdef MAP_IP6_REASS_COUNT_BYTES
596  r->forwarded +=
597  clib_net_to_host_u16 (ip60->payload_length) - sizeof (*frag0);
598 #endif
599 
600  if (ip6_frag_hdr_more (frag0))
601  {
602  //Not last fragment, we copy end of next
604  r->fragments[i].next_data, 20);
605  p0->current_length += 20;
606  ip60->payload_length = u16_net_add (ip60->payload_length, 20);
607  }
608 
609  if (!ip4_is_fragment (ip40))
610  {
611  ip40->fragment_id = frag_id_6to4 (frag0->identification);
613  clib_host_to_net_u16 (ip6_frag_hdr_offset (frag0));
614  }
615  else
616  {
618  clib_host_to_net_u16 (ip4_get_fragment_offset (ip40) +
619  ip6_frag_hdr_offset (frag0));
620  }
621 
622  if (ip6_frag_hdr_more (frag0))
624  clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS);
625 
626  ip40->length =
627  clib_host_to_net_u16 (p0->current_length - sizeof (*ip60) -
628  sizeof (*frag0));
629  ip40->checksum = ip4_header_checksum (ip40);
630 
632  {
634  vlib_add_trace (vm, node, p0, sizeof (*tr));
635  tr->offset = ip4_get_fragment_offset (ip40);
636  tr->frag_len = clib_net_to_host_u16 (ip40->length) - sizeof (*ip40);
637  tr->out = 1;
638  }
639 
640  vec_add1 (*fragments_ready, r->fragments[i].pi);
641  r->fragments[i].pi = ~0;
642  r->fragments[i].next_data_len = 0;
643  r->fragments[i].next_data_offset = 0;
645 
646  //TODO: Best solution would be that ip6_map handles extension headers
647  // and ignores atomic fragment. But in the meantime, let's just copy the header.
648 
649  u8 protocol = frag0->next_hdr;
650  memmove (u8_ptr_add (ip40, -sizeof (*ip60)), ip60, sizeof (*ip60));
651  ((ip6_header_t *) u8_ptr_add (ip40, -sizeof (*ip60)))->protocol =
652  protocol;
653  vlib_buffer_advance (p0, sizeof (*frag0));
654  }
655 }
656 
657 void
659 {
660  vlib_main_t *vm = vlib_get_main ();
664 }
665 
666 void
668 {
669  vlib_main_t *vm = vlib_get_main ();
673 }
674 
675 /*
676  * ip6_reass
677  * TODO: We should count the number of successfully
678  * transmitted fragment bytes and compare that to the last fragment
679  * offset such that we can free the reassembly structure when all fragments
680  * have been forwarded.
681  */
682 static uword
684  vlib_node_runtime_t * node, vlib_frame_t * frame)
685 {
686  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
687  vlib_node_runtime_t *error_node =
689  u32 *fragments_to_drop = NULL;
690  u32 *fragments_ready = NULL;
691 
692  from = vlib_frame_vector_args (frame);
693  n_left_from = frame->n_vectors;
694  next_index = node->cached_next_index;
695  while (n_left_from > 0)
696  {
697  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
698 
699  /* Single loop */
700  while (n_left_from > 0 && n_left_to_next > 0)
701  {
702  u32 pi0;
703  vlib_buffer_t *p0;
704  u8 error0 = MAP_ERROR_NONE;
705  ip6_header_t *ip60;
706  ip6_frag_hdr_t *frag0;
707  u16 offset;
708  u16 next_offset;
709  u16 frag_len;
710 
711  pi0 = to_next[0] = from[0];
712  from += 1;
713  n_left_from -= 1;
714  to_next += 1;
715  n_left_to_next -= 1;
716 
717  p0 = vlib_get_buffer (vm, pi0);
718  ip60 = vlib_buffer_get_current (p0);
719  frag0 = (ip6_frag_hdr_t *) (ip60 + 1);
720  offset =
721  clib_host_to_net_u16 (frag0->fragment_offset_and_more) & (~7);
722  frag_len =
723  clib_net_to_host_u16 (ip60->payload_length) - sizeof (*frag0);
724  next_offset =
725  ip6_frag_hdr_more (frag0) ? (offset + frag_len) : (0xffff);
726 
727  //FIXME: Support other extension headers, maybe
728 
730  {
732  vlib_add_trace (vm, node, p0, sizeof (*tr));
733  tr->offset = offset;
734  tr->frag_len = frag_len;
735  tr->out = 0;
736  }
737 
739  map_ip6_reass_t *r =
740  map_ip6_reass_get (&ip60->src_address, &ip60->dst_address,
741  frag0->identification, frag0->next_hdr,
742  &fragments_to_drop);
743  //FIXME: Use better error codes
744  if (PREDICT_FALSE (!r))
745  {
746  // Could not create a caching entry
747  error0 = MAP_ERROR_FRAGMENT_MEMORY;
748  }
749  else if (PREDICT_FALSE ((frag_len <= 20 &&
750  (ip6_frag_hdr_more (frag0) || (!offset)))))
751  {
752  //Very small fragment are restricted to the last one and
753  //can't be the first one
754  error0 = MAP_ERROR_FRAGMENT_MALFORMED;
755  }
756  else
758  (r, pi0, offset, next_offset, (u8 *) (frag0 + 1), frag_len))
759  {
760  map_ip6_reass_free (r, &fragments_to_drop);
761  error0 = MAP_ERROR_FRAGMENT_MEMORY;
762  }
763  else
764  {
765 #ifdef MAP_IP6_REASS_COUNT_BYTES
766  if (!ip6_frag_hdr_more (frag0))
767  r->expected_total = offset + frag_len;
768 #endif
769  ip6_map_ip6_reass_prepare (vm, node, r, &fragments_ready,
770  &fragments_to_drop);
771 #ifdef MAP_IP6_REASS_COUNT_BYTES
772  if (r->forwarded >= r->expected_total)
773  map_ip6_reass_free (r, &fragments_to_drop);
774 #endif
775  }
777 
778  if (error0 == MAP_ERROR_NONE)
779  {
780  if (frag_len > 20)
781  {
782  //Dequeue the packet
783  n_left_to_next++;
784  to_next--;
785  }
786  else
787  {
788  //All data from that packet was copied no need to keep it, but this is not an error
789  p0->error = error_node->errors[MAP_ERROR_NONE];
790  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
791  to_next, n_left_to_next,
792  pi0,
794  }
795  }
796  else
797  {
798  p0->error = error_node->errors[error0];
799  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
800  n_left_to_next, pi0,
802  }
803  }
804  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
805  }
806 
807  map_send_all_to_node (vm, fragments_ready, node,
808  &error_node->errors[MAP_ERROR_NONE],
810  map_send_all_to_node (vm, fragments_to_drop, node,
811  &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED],
813 
814  vec_free (fragments_to_drop);
815  vec_free (fragments_ready);
816  return frame->n_vectors;
817 }
818 
819 /*
820  * ip6_ip4_virt_reass
821  */
822 static uword
824  vlib_node_runtime_t * node, vlib_frame_t * frame)
825 {
826  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
827  vlib_node_runtime_t *error_node =
829  map_main_t *mm = &map_main;
831  u32 cpu_index = os_get_cpu_number ();
832  u32 *fragments_to_drop = NULL;
833  u32 *fragments_to_loopback = NULL;
834 
835  from = vlib_frame_vector_args (frame);
836  n_left_from = frame->n_vectors;
837  next_index = node->cached_next_index;
838  while (n_left_from > 0)
839  {
840  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
841 
842  /* Single loop */
843  while (n_left_from > 0 && n_left_to_next > 0)
844  {
845  u32 pi0;
846  vlib_buffer_t *p0;
847  u8 error0 = MAP_ERROR_NONE;
848  map_domain_t *d0;
849  ip4_header_t *ip40;
850  ip6_header_t *ip60;
851  i32 port0 = 0;
852  u32 map_domain_index0 = ~0;
854  u8 cached = 0;
855 
856  pi0 = to_next[0] = from[0];
857  from += 1;
858  n_left_from -= 1;
859  to_next += 1;
860  n_left_to_next -= 1;
861 
862  p0 = vlib_get_buffer (vm, pi0);
863  ip40 = vlib_buffer_get_current (p0);
864  ip60 = ((ip6_header_t *) ip40) - 1;
865 
866  d0 =
867  ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
868  (ip4_address_t *) & ip40->src_address.as_u32,
869  &map_domain_index0, &error0);
870 
872  //This node only deals with fragmented ip4
874  ip40->dst_address.as_u32,
875  ip40->fragment_id,
876  ip40->protocol,
877  &fragments_to_drop);
878  if (PREDICT_FALSE (!r))
879  {
880  // Could not create a caching entry
881  error0 = MAP_ERROR_FRAGMENT_MEMORY;
882  }
883  else if (PREDICT_TRUE (ip4_get_fragment_offset (ip40)))
884  {
885  // This is a fragment
886  if (r->port >= 0)
887  {
888  // We know the port already
889  port0 = r->port;
890  }
891  else if (map_ip4_reass_add_fragment (r, pi0))
892  {
893  // Not enough space for caching
894  error0 = MAP_ERROR_FRAGMENT_MEMORY;
895  map_ip4_reass_free (r, &fragments_to_drop);
896  }
897  else
898  {
899  cached = 1;
900  }
901  }
902  else
903  if ((port0 =
904  ip4_get_port (ip40, MAP_SENDER, p0->current_length)) < 0)
905  {
906  // Could not find port from first fragment. Stop reassembling.
907  error0 = MAP_ERROR_BAD_PROTOCOL;
908  port0 = 0;
909  map_ip4_reass_free (r, &fragments_to_drop);
910  }
911  else
912  {
913  // Found port. Remember it and loopback saved fragments
914  r->port = port0;
915  map_ip4_reass_get_fragments (r, &fragments_to_loopback);
916  }
917 
918 #ifdef MAP_IP4_REASS_COUNT_BYTES
919  if (!cached && r)
920  {
921  r->forwarded += clib_host_to_net_u16 (ip40->length) - 20;
922  if (!ip4_get_fragment_more (ip40))
923  r->expected_total =
924  ip4_get_fragment_offset (ip40) * 8 +
925  clib_host_to_net_u16 (ip40->length) - 20;
926  if (r->forwarded >= r->expected_total)
927  map_ip4_reass_free (r, &fragments_to_drop);
928  }
929 #endif
930 
932 
933  if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
934  error0 =
935  ip6_map_sec_check (d0, port0, ip40,
936  ip60) ? MAP_ERROR_NONE :
937  MAP_ERROR_DECAP_SEC_CHECK;
938 
939  if (PREDICT_FALSE
940  (d0->mtu && (clib_host_to_net_u16 (ip40->length) > d0->mtu)
941  && error0 == MAP_ERROR_NONE && !cached))
942  {
943  vnet_buffer (p0)->ip_frag.header_offset = 0;
944  vnet_buffer (p0)->ip_frag.flags = 0;
945  vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
946  vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
948  }
949 
951  {
953  vlib_add_trace (vm, node, p0, sizeof (*tr));
954  tr->map_domain_index = map_domain_index0;
955  tr->port = port0;
956  tr->cached = cached;
957  }
958 
959  if (cached)
960  {
961  //Dequeue the packet
962  n_left_to_next++;
963  to_next--;
964  }
965  else
966  {
967  if (error0 == MAP_ERROR_NONE)
969  cpu_index, map_domain_index0,
970  1,
971  clib_net_to_host_u16
972  (ip40->length));
973  next0 =
974  (error0 ==
975  MAP_ERROR_NONE) ? next0 : IP6_MAP_IP4_REASS_NEXT_DROP;
976  p0->error = error_node->errors[error0];
977  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
978  n_left_to_next, pi0, next0);
979  }
980 
981  //Loopback when we reach the end of the inpu vector
982  if (n_left_from == 0 && vec_len (fragments_to_loopback))
983  {
984  from = vlib_frame_vector_args (frame);
985  u32 len = vec_len (fragments_to_loopback);
986  if (len <= VLIB_FRAME_SIZE)
987  {
988  clib_memcpy (from, fragments_to_loopback,
989  sizeof (u32) * len);
990  n_left_from = len;
991  vec_reset_length (fragments_to_loopback);
992  }
993  else
994  {
995  clib_memcpy (from,
996  fragments_to_loopback + (len -
998  sizeof (u32) * VLIB_FRAME_SIZE);
999  n_left_from = VLIB_FRAME_SIZE;
1000  _vec_len (fragments_to_loopback) = len - VLIB_FRAME_SIZE;
1001  }
1002  }
1003  }
1004  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1005  }
1006  map_send_all_to_node (vm, fragments_to_drop, node,
1007  &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED],
1009 
1010  vec_free (fragments_to_drop);
1011  vec_free (fragments_to_loopback);
1012  return frame->n_vectors;
1013 }
1014 
1015 /*
1016  * ip6_icmp_relay
1017  */
1018 static uword
1020  vlib_node_runtime_t * node, vlib_frame_t * frame)
1021 {
1022  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
1023  vlib_node_runtime_t *error_node =
1025  map_main_t *mm = &map_main;
1026  u32 cpu_index = os_get_cpu_number ();
1027  u16 *fragment_ids, *fid;
1028 
1029  from = vlib_frame_vector_args (frame);
1030  n_left_from = frame->n_vectors;
1031  next_index = node->cached_next_index;
1032 
1033  /* Get random fragment IDs for replies. */
1034  fid = fragment_ids =
1036  n_left_from * sizeof (fragment_ids[0]));
1037 
1038  while (n_left_from > 0)
1039  {
1040  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1041 
1042  /* Single loop */
1043  while (n_left_from > 0 && n_left_to_next > 0)
1044  {
1045  u32 pi0;
1046  vlib_buffer_t *p0;
1047  u8 error0 = MAP_ERROR_NONE;
1048  ip6_header_t *ip60;
1050  u32 mtu;
1051 
1052  pi0 = to_next[0] = from[0];
1053  from += 1;
1054  n_left_from -= 1;
1055  to_next += 1;
1056  n_left_to_next -= 1;
1057 
1058  p0 = vlib_get_buffer (vm, pi0);
1059  ip60 = vlib_buffer_get_current (p0);
1060  u16 tlen = clib_net_to_host_u16 (ip60->payload_length);
1061 
1062  /*
1063  * In:
1064  * IPv6 header (40)
1065  * ICMPv6 header (8)
1066  * IPv6 header (40)
1067  * Original IPv4 header / packet
1068  * Out:
1069  * New IPv4 header
1070  * New ICMP header
1071  * Original IPv4 header / packet
1072  */
1073 
1074  /* Need at least ICMP(8) + IPv6(40) + IPv4(20) + L4 header(8) */
1075  if (tlen < 76)
1076  {
1077  error0 = MAP_ERROR_ICMP_RELAY;
1078  goto error;
1079  }
1080 
1081  icmp46_header_t *icmp60 = (icmp46_header_t *) (ip60 + 1);
1082  ip6_header_t *inner_ip60 = (ip6_header_t *) (icmp60 + 2);
1083 
1084  if (inner_ip60->protocol != IP_PROTOCOL_IP_IN_IP)
1085  {
1086  error0 = MAP_ERROR_ICMP_RELAY;
1087  goto error;
1088  }
1089 
1090  ip4_header_t *inner_ip40 = (ip4_header_t *) (inner_ip60 + 1);
1091  vlib_buffer_advance (p0, 60); /* sizeof ( IPv6 + ICMP + IPv6 - IPv4 - ICMP ) */
1092  ip4_header_t *new_ip40 = vlib_buffer_get_current (p0);
1093  icmp46_header_t *new_icmp40 = (icmp46_header_t *) (new_ip40 + 1);
1094 
1095  /*
1096  * Relay according to RFC2473, section 8.3
1097  */
1098  switch (icmp60->type)
1099  {
1100  case ICMP6_destination_unreachable:
1101  case ICMP6_time_exceeded:
1102  case ICMP6_parameter_problem:
1103  /* Type 3 - destination unreachable, Code 1 - host unreachable */
1104  new_icmp40->type = ICMP4_destination_unreachable;
1105  new_icmp40->code =
1106  ICMP4_destination_unreachable_destination_unreachable_host;
1107  break;
1108 
1109  case ICMP6_packet_too_big:
1110  /* Type 3 - destination unreachable, Code 4 - packet too big */
1111  /* Potential TODO: Adjust domain tunnel MTU based on the value received here */
1112  mtu = clib_net_to_host_u32 (*((u32 *) (icmp60 + 1)));
1113 
1114  /* Check DF flag */
1115  if (!
1116  (inner_ip40->flags_and_fragment_offset &
1117  clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT)))
1118  {
1119  error0 = MAP_ERROR_ICMP_RELAY;
1120  goto error;
1121  }
1122 
1123  new_icmp40->type = ICMP4_destination_unreachable;
1124  new_icmp40->code =
1125  ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set;
1126  *((u32 *) (new_icmp40 + 1)) =
1127  clib_host_to_net_u32 (mtu < 1280 ? 1280 : mtu);
1128  break;
1129 
1130  default:
1131  error0 = MAP_ERROR_ICMP_RELAY;
1132  break;
1133  }
1134 
1135  /*
1136  * Ensure the total ICMP packet is no longer than 576 bytes (RFC1812)
1137  */
1138  new_ip40->ip_version_and_header_length = 0x45;
1139  new_ip40->tos = 0;
1140  u16 nlen = (tlen - 20) > 576 ? 576 : tlen - 20;
1141  new_ip40->length = clib_host_to_net_u16 (nlen);
1142  new_ip40->fragment_id = fid[0];
1143  fid++;
1144  new_ip40->ttl = 64;
1145  new_ip40->protocol = IP_PROTOCOL_ICMP;
1146  new_ip40->src_address = mm->icmp4_src_address;
1147  new_ip40->dst_address = inner_ip40->src_address;
1148  new_ip40->checksum = ip4_header_checksum (new_ip40);
1149 
1150  new_icmp40->checksum = 0;
1151  ip_csum_t sum = ip_incremental_checksum (0, new_icmp40, nlen - 20);
1152  new_icmp40->checksum = ~ip_csum_fold (sum);
1153 
1154  vlib_increment_simple_counter (&mm->icmp_relayed, cpu_index, 0, 1);
1155 
1156  error:
1158  {
1159  map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
1160  tr->map_domain_index = 0;
1161  tr->port = 0;
1162  }
1163 
1164  next0 =
1165  (error0 == MAP_ERROR_NONE) ? next0 : IP6_ICMP_RELAY_NEXT_DROP;
1166  p0->error = error_node->errors[error0];
1167  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1168  n_left_to_next, pi0, next0);
1169  }
1170  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1171  }
1172 
1173  return frame->n_vectors;
1174 
1175 }
1176 
1177 static char *map_error_strings[] = {
1178 #define _(sym,string) string,
1180 #undef _
1181 };
1182 
1183 /* *INDENT-OFF* */
1185  .function = ip6_map,
1186  .name = "ip6-map",
1187  .vector_size = sizeof(u32),
1188  .format_trace = format_map_trace,
1190 
1191  .n_errors = MAP_N_ERROR,
1192  .error_strings = map_error_strings,
1193 
1194  .n_next_nodes = IP6_MAP_N_NEXT,
1195  .next_nodes = {
1196  [IP6_MAP_NEXT_IP4_LOOKUP] = "ip4-lookup",
1197 #ifdef MAP_SKIP_IP6_LOOKUP
1198  [IP6_MAP_NEXT_IP4_REWRITE] = "ip4-rewrite",
1199 #endif
1200  [IP6_MAP_NEXT_IP6_REASS] = "ip6-map-ip6-reass",
1201  [IP6_MAP_NEXT_IP4_REASS] = "ip6-map-ip4-reass",
1202  [IP6_MAP_NEXT_IP4_FRAGMENT] = "ip4-frag",
1203  [IP6_MAP_NEXT_IP6_ICMP_RELAY] = "ip6-map-icmp-relay",
1204  [IP6_MAP_NEXT_IP6_LOCAL] = "ip6-local",
1205  [IP6_MAP_NEXT_DROP] = "error-drop",
1206  [IP6_MAP_NEXT_ICMP] = "ip6-icmp-error",
1207  },
1208 };
1209 /* *INDENT-ON* */
1210 
1211 /* *INDENT-OFF* */
1213  .function = ip6_map_ip6_reass,
1214  .name = "ip6-map-ip6-reass",
1215  .vector_size = sizeof(u32),
1216  .format_trace = format_ip6_map_ip6_reass_trace,
1218  .n_errors = MAP_N_ERROR,
1219  .error_strings = map_error_strings,
1220  .n_next_nodes = IP6_MAP_IP6_REASS_N_NEXT,
1221  .next_nodes = {
1222  [IP6_MAP_IP6_REASS_NEXT_IP6_MAP] = "ip6-map",
1223  [IP6_MAP_IP6_REASS_NEXT_DROP] = "error-drop",
1224  },
1225 };
1226 /* *INDENT-ON* */
1227 
1228 /* *INDENT-OFF* */
1230  .function = ip6_map_ip4_reass,
1231  .name = "ip6-map-ip4-reass",
1232  .vector_size = sizeof(u32),
1233  .format_trace = format_ip6_map_ip4_reass_trace,
1235  .n_errors = MAP_N_ERROR,
1236  .error_strings = map_error_strings,
1237  .n_next_nodes = IP6_MAP_IP4_REASS_N_NEXT,
1238  .next_nodes = {
1239  [IP6_MAP_IP4_REASS_NEXT_IP4_LOOKUP] = "ip4-lookup",
1240  [IP6_MAP_IP4_REASS_NEXT_IP4_FRAGMENT] = "ip4-frag",
1241  [IP6_MAP_IP4_REASS_NEXT_DROP] = "error-drop",
1242  },
1243 };
1244 /* *INDENT-ON* */
1245 
1246 /* *INDENT-OFF* */
1248  .function = ip6_map_icmp_relay,
1249  .name = "ip6-map-icmp-relay",
1250  .vector_size = sizeof(u32),
1251  .format_trace = format_map_trace, //FIXME
1253  .n_errors = MAP_N_ERROR,
1254  .error_strings = map_error_strings,
1255  .n_next_nodes = IP6_ICMP_RELAY_N_NEXT,
1256  .next_nodes = {
1257  [IP6_ICMP_RELAY_NEXT_IP4_LOOKUP] = "ip4-lookup",
1258  [IP6_ICMP_RELAY_NEXT_DROP] = "error-drop",
1259  },
1260 };
1261 /* *INDENT-ON* */
1262 
1263 /*
1264  * fd.io coding-style-patch-verification: ON
1265  *
1266  * Local Variables:
1267  * eval: (c-set-style "gnu")
1268  * End:
1269  */
u16 forwarded
Definition: map.h:191
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:459
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 cpu_index, u32 index, u32 increment)
Increment a simple counter.
Definition: counter.h:78
#define map_ip4_reass_lock()
Definition: map.h:449
u8 psid_length
Definition: map.h:102
sll srl srl sll sra u16x4 i
Definition: vector_sse2.h:343
#define CLIB_UNUSED(x)
Definition: clib.h:79
static_always_inline bool ip6_map_sec_check(map_domain_t *d, u16 port, ip4_header_t *ip4, ip6_header_t *ip6)
Definition: ip6_map.c:102
static_always_inline u64 map_get_pfx(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:333
ip4_address_t src_address
Definition: ip4_packet.h:163
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
static void vlib_set_next_frame_buffer(vlib_main_t *vm, vlib_node_runtime_t *node, u32 next_index, u32 buffer_index)
Definition: node_funcs.h:383
bad routing header type(not 4)") sr_error (NO_MORE_SEGMENTS
ip6_map_ip4_reass_next_e
Definition: ip6_map.c:42
#define PREDICT_TRUE(x)
Definition: clib.h:98
u64 as_u64[2]
Definition: ip6_packet.h:51
#define NULL
Definition: clib.h:55
static u32 ip4_compute_flow_hash(const ip4_header_t *ip, flow_hash_config_t flow_hash_config)
Definition: ip4.h:271
IP unicast adjacency.
Definition: lookup.h:188
static void * clib_random_buffer_get_data(clib_random_buffer_t *b, uword n_bytes)
Definition: random_buffer.h:78
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:482
struct _vlib_node_registration vlib_node_registration_t
ip6_icmp_relay_next_e
Definition: ip6_map.c:50
bool sec_check_frag
Definition: map.h:222
ip_lookup_main_t lookup_main
Definition: ip4.h:97
uword ip_csum_t
Definition: ip_packet.h:90
u16 flags_and_fragment_offset
Definition: ip4_packet.h:144
static char * map_error_strings[]
Definition: ip6_map.c:1177
vlib_error_t * errors
Definition: node.h:419
vlib_node_registration_t ip6_map_ip4_reass_node
(constructor) VLIB_REGISTER_NODE (ip6_map_ip4_reass_node)
Definition: ip6_map.c:57
#define u16_net_add(u, val)
Definition: map.h:523
static_always_inline void ip6_map_ip6_reass_prepare(vlib_main_t *vm, vlib_node_runtime_t *node, map_ip6_reass_t *r, u32 **fragments_ready, u32 **fragments_to_drop)
Definition: ip6_map.c:562
ip6_address_t src_address
Definition: ip6_packet.h:337
#define frag_id_6to4(id)
Definition: map.h:525
vlib_node_registration_t ip6_map_node
(constructor) VLIB_REGISTER_NODE (ip6_map_node)
Definition: ip6_map.c:1184
map_ip6_fragment_t fragments[MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY]
Definition: map.h:198
u16 port
Definition: map.h:311
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
#define map_ip6_reass_unlock()
Definition: map.h:473
#define static_always_inline
Definition: clib.h:85
static_always_inline void ip6_map_security_check(map_domain_t *d, ip4_header_t *ip4, ip6_header_t *ip6, u32 *next, u8 *error)
Definition: ip6_map.c:118
ip_csum_t ip_incremental_checksum(ip_csum_t sum, void *_data, uword n_bytes)
Definition: ip_checksum.c:43
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:194
ip4_address_t dst_address
Definition: ip4_packet.h:163
vlib_combined_counter_main_t * domain_counters
Definition: map.h:207
static int ip4_get_fragment_offset(ip4_header_t *i)
Definition: ip4_packet.h:191
ip4_address_t icmp4_src_address
Definition: map.h:226
int i32
Definition: types.h:81
vlib_node_registration_t ip6_map_ip6_reass_node
(constructor) VLIB_REGISTER_NODE (ip6_map_ip6_reass_node)
Definition: ip6_map.c:58
static_always_inline void map_send_all_to_node(vlib_main_t *vm, u32 *pi_vector, vlib_node_runtime_t *node, vlib_error_t *error, u32 next)
Definition: map.h:560
vlib_simple_counter_main_t icmp_relayed
Definition: map.h:227
int map_ip4_reass_add_fragment(map_ip4_reass_t *r, u32 pi)
Definition: map.c:1437
ip6_address_t * rules
Definition: map.h:92
unsigned long u64
Definition: types.h:89
u8 ea_bits_len
Definition: map.h:100
u8 * format_ip6_map_ip4_reass_trace(u8 *s, va_list *args)
Definition: ip6_map.c:69
void icmp6_error_set_vnet_buffer(vlib_buffer_t *b, u8 type, u8 code, u32 data)
Definition: icmp6.c:509
static int ip4_is_fragment(ip4_header_t *i)
Definition: ip4_packet.h:204
#define ip6_frag_hdr_more(hdr)
Definition: ip6_packet.h:485
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:82
static_always_inline map_domain_t * ip6_map_get_domain(u32 mdi, ip4_address_t *addr, u32 *map_domain_index, u8 *error)
Definition: map.h:411
uword os_get_cpu_number(void)
Definition: unix-misc.c:224
map_ip4_reass_t * map_ip4_reass_get(u32 src, u32 dst, u16 fragment_id, u8 protocol, u32 **pi_to_drop)
Definition: map.c:1358
#define PREDICT_FALSE(x)
Definition: clib.h:97
#define VLIB_FRAME_SIZE
Definition: node.h:328
map_main_t map_main
Definition: map.h:314
i32 ip4_get_port(ip4_header_t *ip, map_dir_e dir, u16 buffer_len)
Definition: map.c:79
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:216
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:350
u16 expected_total
Definition: map.h:139
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:121
void map_ip4_drop_pi(u32 pi)
Definition: ip6_map.c:667
u16 ip4_map_get_port(ip4_header_t *ip, map_dir_e dir)
Definition: ip4_map.c:69
void map_ip4_reass_free(map_ip4_reass_t *r, u32 **pi_to_drop)
Definition: map.c:1315
#define IP4_HEADER_FLAG_MORE_FRAGMENTS
Definition: ip4_packet.h:145
u8 next_data_len
Definition: map.h:182
u16 n_vectors
Definition: node.h:344
u16 next_data_offset
Definition: map.h:181
static_always_inline void map_ip4_reass_get_fragments(map_ip4_reass_t *r, u32 **pi)
Definition: map.h:453
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:82
map_ip6_reass_t * map_ip6_reass_get(ip6_address_t *src, ip6_address_t *dst, u32 fragment_id, u8 protocol, u32 **pi_to_drop)
Definition: map.c:1528
u8 next_data[20]
Definition: map.h:183
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:300
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:88
u16 forwarded
Definition: map.h:140
static uword ip6_map_icmp_relay(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map.c:1019
#define clib_memcpy(a, b, c)
Definition: string.h:69
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:207
bool icmp6_enabled
Definition: map.h:223
static uword ip6_map_ip4_reass(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map.c:823
#define foreach_map_error
Definition: map.h:281
u32 adj4_index
Definition: map.h:212
static uword ip6_map(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map.c:176
#define ip6_frag_hdr_offset(hdr)
Definition: ip6_packet.h:482
u16 cached_next_index
Definition: node.h:463
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 cpu_index, u32 index, u32 packet_increment, u32 byte_increment)
Increment a combined counter.
Definition: counter.h:241
#define u8_ptr_add(ptr, index)
Definition: map.h:522
unsigned int u32
Definition: types.h:88
bool sec_check
Definition: map.h:221
#define vnet_buffer(b)
Definition: buffer.h:361
ip6_map_next_e
Definition: ip6_map.c:19
static_always_inline bool ip6_map_ip4_lookup_bypass(vlib_buffer_t *p0, ip4_header_t *ip)
Definition: ip6_map.c:151
ip4_header_t ip4_header
Definition: map.h:197
#define map_ip4_reass_unlock()
Definition: map.h:450
static vlib_node_registration_t ip6_map_icmp_relay_node
(constructor) VLIB_REGISTER_NODE (ip6_map_icmp_relay_node)
Definition: ip6_map.c:59
u8 * format_map_trace(u8 *s, va_list *args)
Definition: map.c:1278
#define IP_FLOW_HASH_DEFAULT
Default: 5-tuple without the "reverse" bit.
Definition: lookup.h:147
static int ip4_get_fragment_more(ip4_header_t *i)
Definition: ip4_packet.h:197
u8 * format_ip6_map_ip6_reass_trace(u8 *s, va_list *args)
Definition: ip6_map.c:88
#define VLIB_BUFFER_IS_TRACED
Definition: buffer.h:95
ip6_map_ip6_reass_next_e
Definition: ip6_map.c:35
u64 uword
Definition: types.h:112
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
u16 n_adj
Number of adjecencies in block.
Definition: lookup.h:194
Definition: defs.h:47
unsigned short u16
Definition: types.h:57
static uword ip6_map_ip6_reass(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map.c:683
u16 mtu
Definition: map.h:96
u16 payload_length
Definition: ip6_packet.h:328
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
unsigned char u8
Definition: types.h:56
#define map_ip6_reass_lock()
Definition: map.h:472
i32 port
Definition: map.h:142
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:253
A collection of combined counters.
Definition: counter.h:212
struct clib_bihash_value offset
template key/value backing page structure
void map_ip6_reass_free(map_ip6_reass_t *r, u32 **pi_to_drop)
Definition: map.c:1476
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:170
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:143
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:418
ip4_main_t ip4_main
Global ip4 main structure.
Definition: ip4_forward.c:1099
#define MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY
Definition: map.h:77
u8 data[0]
Packet data.
Definition: buffer.h:158
#define IP4_HEADER_FLAG_DONT_FRAGMENT
Definition: ip4_packet.h:146
static_always_inline u64 map_get_sfx(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:357
u32 map_domain_index
Definition: map.h:310
u8 ip_version_and_header_length
Definition: ip4_packet.h:131
Definition: map.h:30
u32 ip6_reass_buffered_counter
Definition: map.h:274
int map_ip6_reass_add_fragment(map_ip6_reass_t *r, u32 pi, u16 data_offset, u16 next_data_offset, u8 *data_start, u16 data_len)
Definition: map.c:1609
u32 flags
buffer flags: VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:85
u16 expected_total
Definition: map.h:190
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
static u16 ip4_header_checksum(ip4_header_t *i)
Definition: ip4_packet.h:238
clib_random_buffer_t random_buffer
Definition: main.h:153
static u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:145
void map_ip6_drop_pi(u32 pi)
Definition: ip6_map.c:658
ip6_address_t dst_address
Definition: ip6_packet.h:337
static ip_adjacency_t * ip_get_adjacency(ip_lookup_main_t *lm, u32 adj_index)
Definition: lookup.h:415