FD.io VPP  v18.10-32-g1161dda
Vector Packet Processing
ip4_map.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /*
16  * Defines used for testing various optimisation schemes
17  */
18 #define MAP_ENCAP_DUAL 0
19 
20 #include "map.h"
21 #include <vnet/ip/ip_frag.h>
22 #include <vnet/ip/ip4_to_ip6.h>
23 
25 
27 {
29 #ifdef MAP_SKIP_IP6_LOOKUP
31 #endif
38 };
39 
41 {
46 };
47 
48 typedef struct
49 {
54 
55 u8 *
56 format_ip4_map_reass_trace (u8 * s, va_list * args)
57 {
58  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
59  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
61  return format (s, "MAP domain index: %d L4 port: %u Status: %s",
62  t->map_domain_index, t->port,
63  t->cached ? "cached" : "forwarded");
64 }
65 
68  u32 * next, u8 * error)
69 {
70  u16 port = 0;
71 
72  if (d->psid_length > 0)
73  {
74  if (ip4_get_fragment_offset (ip) == 0)
75  {
76  if (PREDICT_FALSE
77  ((ip->ip_version_and_header_length != 0x45)
78  || clib_host_to_net_u16 (ip->length) < 28))
79  {
80  return 0;
81  }
82  port = ip4_get_port (ip, 0);
83  if (port)
84  {
85  /* Verify that port is not among the well-known ports */
86  if ((d->psid_offset > 0)
87  && (clib_net_to_host_u16 (port) <
88  (0x1 << (16 - d->psid_offset))))
89  {
90  *error = MAP_ERROR_ENCAP_SEC_CHECK;
91  }
92  else
93  {
94  if (ip4_get_fragment_more (ip))
95  *next = IP4_MAP_NEXT_REASS;
96  return (port);
97  }
98  }
99  else
100  {
101  *error = MAP_ERROR_BAD_PROTOCOL;
102  }
103  }
104  else
105  {
106  *next = IP4_MAP_NEXT_REASS;
107  }
108  }
109  return (0);
110 }
111 
112 /*
113  * ip4_map_vtcfl
114  */
117 {
118  map_main_t *mm = &map_main;
119  u8 tc = mm->tc_copy ? ip4->tos : mm->tc;
120  u32 vtcfl = 0x6 << 28;
121  vtcfl |= tc << 20;
122  vtcfl |= vnet_buffer (p)->ip.flow_hash & 0x000fffff;
123 
124  return (clib_host_to_net_u32 (vtcfl));
125 }
126 
129 {
130 #ifdef MAP_SKIP_IP6_LOOKUP
132  {
133  vnet_buffer (p0)->ip.adj_index[VLIB_TX] =
135  return (true);
136  }
137 #endif
138  return (false);
139 }
140 
141 /*
142  * ip4_map_ttl
143  */
144 static inline void
146 {
147  i32 ttl = ip->ttl;
148 
149  /* Input node should have reject packets with ttl 0. */
150  ASSERT (ip->ttl > 0);
151 
152  u32 checksum = ip->checksum + clib_host_to_net_u16 (0x0100);
153  checksum += checksum >= 0xffff;
154  ip->checksum = checksum;
155  ttl -= 1;
156  ip->ttl = ttl;
157  *error = ttl <= 0 ? IP4_ERROR_TIME_EXPIRED : *error;
158 
159  /* Verify checksum. */
160  ASSERT (ip->checksum == ip4_header_checksum (ip));
161 }
162 
163 static u32
164 ip4_map_fragment (vlib_buffer_t * b, u16 mtu, bool df, u8 * error)
165 {
166  map_main_t *mm = &map_main;
167 
168  if (mm->frag_inner)
169  {
170  // TODO: Fix inner fragmentation after removed inner support from ip-frag.
171  ip_frag_set_vnet_buffer (b, /*sizeof (ip6_header_t), */ mtu,
174  return (IP4_MAP_NEXT_IP4_FRAGMENT);
175  }
176  else
177  {
178  if (df && !mm->frag_ignore_df)
179  {
180  icmp4_error_set_vnet_buffer (b, ICMP4_destination_unreachable,
181  ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set,
182  mtu);
183  vlib_buffer_advance (b, sizeof (ip6_header_t));
184  *error = MAP_ERROR_DF_SET;
185  return (IP4_MAP_NEXT_ICMP_ERROR);
186  }
189  return (IP4_MAP_NEXT_IP6_FRAGMENT);
190  }
191 }
192 
193 /*
194  * ip4_map
195  */
196 static uword
198 {
199  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
200  vlib_node_runtime_t *error_node =
202  from = vlib_frame_vector_args (frame);
203  n_left_from = frame->n_vectors;
204  next_index = node->cached_next_index;
205  map_main_t *mm = &map_main;
207  u32 thread_index = vm->thread_index;
208 
209  while (n_left_from > 0)
210  {
211  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
212 
213  /* Dual loop */
214  while (n_left_from >= 4 && n_left_to_next >= 2)
215  {
216  u32 pi0, pi1;
217  vlib_buffer_t *p0, *p1;
218  map_domain_t *d0, *d1;
219  u8 error0 = MAP_ERROR_NONE, error1 = MAP_ERROR_NONE;
220  ip4_header_t *ip40, *ip41;
221  u16 port0 = 0, port1 = 0;
222  ip6_header_t *ip6h0, *ip6h1;
223  u32 map_domain_index0 = ~0, map_domain_index1 = ~0;
224  u32 next0 = IP4_MAP_NEXT_IP6_LOOKUP, next1 =
226 
227  /* Prefetch next iteration. */
228  {
229  vlib_buffer_t *p2, *p3;
230 
231  p2 = vlib_get_buffer (vm, from[2]);
232  p3 = vlib_get_buffer (vm, from[3]);
233 
234  vlib_prefetch_buffer_header (p2, STORE);
235  vlib_prefetch_buffer_header (p3, STORE);
236  /* IPv4 + 8 = 28. possibly plus -40 */
237  CLIB_PREFETCH (p2->data - 40, 68, STORE);
238  CLIB_PREFETCH (p3->data - 40, 68, STORE);
239  }
240 
241  pi0 = to_next[0] = from[0];
242  pi1 = to_next[1] = from[1];
243  from += 2;
244  n_left_from -= 2;
245  to_next += 2;
246  n_left_to_next -= 2;
247 
248  p0 = vlib_get_buffer (vm, pi0);
249  p1 = vlib_get_buffer (vm, pi1);
250  ip40 = vlib_buffer_get_current (p0);
251  ip41 = vlib_buffer_get_current (p1);
252  map_domain_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
253  d0 = ip4_map_get_domain (map_domain_index0);
254  map_domain_index1 = vnet_buffer (p1)->ip.adj_index[VLIB_TX];
255  d1 = ip4_map_get_domain (map_domain_index1);
256  ASSERT (d0);
257  ASSERT (d1);
258 
259  /*
260  * Shared IPv4 address
261  */
262  port0 = ip4_map_port_and_security_check (d0, ip40, &next0, &error0);
263  port1 = ip4_map_port_and_security_check (d1, ip41, &next1, &error1);
264 
265  /* Decrement IPv4 TTL */
266  ip4_map_decrement_ttl (ip40, &error0);
267  ip4_map_decrement_ttl (ip41, &error1);
268  bool df0 =
270  clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT);
271  bool df1 =
273  clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT);
274 
275  /* MAP calc */
276  u32 da40 = clib_net_to_host_u32 (ip40->dst_address.as_u32);
277  u32 da41 = clib_net_to_host_u32 (ip41->dst_address.as_u32);
278  u16 dp40 = clib_net_to_host_u16 (port0);
279  u16 dp41 = clib_net_to_host_u16 (port1);
280  u64 dal60 = map_get_pfx (d0, da40, dp40);
281  u64 dal61 = map_get_pfx (d1, da41, dp41);
282  u64 dar60 = map_get_sfx (d0, da40, dp40);
283  u64 dar61 = map_get_sfx (d1, da41, dp41);
284  if (dal60 == 0 && dar60 == 0 && error0 == MAP_ERROR_NONE
285  && next0 != IP4_MAP_NEXT_REASS)
286  error0 = MAP_ERROR_NO_BINDING;
287  if (dal61 == 0 && dar61 == 0 && error1 == MAP_ERROR_NONE
288  && next1 != IP4_MAP_NEXT_REASS)
289  error1 = MAP_ERROR_NO_BINDING;
290 
291  /* construct ipv6 header */
292  vlib_buffer_advance (p0, -sizeof (ip6_header_t));
293  vlib_buffer_advance (p1, -sizeof (ip6_header_t));
294  ip6h0 = vlib_buffer_get_current (p0);
295  ip6h1 = vlib_buffer_get_current (p1);
296  vnet_buffer (p0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
297  vnet_buffer (p1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
298 
300  ip4_map_vtcfl (ip40, p0);
302  ip4_map_vtcfl (ip41, p1);
303  ip6h0->payload_length = ip40->length;
304  ip6h1->payload_length = ip41->length;
305  ip6h0->protocol = IP_PROTOCOL_IP_IN_IP;
306  ip6h1->protocol = IP_PROTOCOL_IP_IN_IP;
307  ip6h0->hop_limit = 0x40;
308  ip6h1->hop_limit = 0x40;
309  ip6h0->src_address = d0->ip6_src;
310  ip6h1->src_address = d1->ip6_src;
311  ip6h0->dst_address.as_u64[0] = clib_host_to_net_u64 (dal60);
312  ip6h0->dst_address.as_u64[1] = clib_host_to_net_u64 (dar60);
313  ip6h1->dst_address.as_u64[0] = clib_host_to_net_u64 (dal61);
314  ip6h1->dst_address.as_u64[1] = clib_host_to_net_u64 (dar61);
315 
316  /*
317  * Determine next node. Can be one of:
318  * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop
319  */
320  if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
321  {
322  if (PREDICT_FALSE
323  (d0->mtu
324  && (clib_net_to_host_u16 (ip6h0->payload_length) +
325  sizeof (*ip6h0) > d0->mtu)))
326  {
327  next0 = ip4_map_fragment (p0, d0->mtu, df0, &error0);
328  }
329  else
330  {
331  next0 =
333  ip40) ?
334  IP4_MAP_NEXT_IP6_REWRITE : next0;
336  thread_index,
337  map_domain_index0, 1,
338  clib_net_to_host_u16
339  (ip6h0->payload_length) +
340  40);
341  }
342  }
343  else
344  {
345  next0 = IP4_MAP_NEXT_DROP;
346  }
347 
348  /*
349  * Determine next node. Can be one of:
350  * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop
351  */
352  if (PREDICT_TRUE (error1 == MAP_ERROR_NONE))
353  {
354  if (PREDICT_FALSE
355  (d1->mtu
356  && (clib_net_to_host_u16 (ip6h1->payload_length) +
357  sizeof (*ip6h1) > d1->mtu)))
358  {
359  next1 = ip4_map_fragment (p1, d1->mtu, df1, &error1);
360  }
361  else
362  {
363  next1 =
365  ip41) ?
366  IP4_MAP_NEXT_IP6_REWRITE : next1;
368  thread_index,
369  map_domain_index1, 1,
370  clib_net_to_host_u16
371  (ip6h1->payload_length) +
372  40);
373  }
374  }
375  else
376  {
377  next1 = IP4_MAP_NEXT_DROP;
378  }
379 
380  if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
381  {
382  map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
383  tr->map_domain_index = map_domain_index0;
384  tr->port = port0;
385  }
386  if (PREDICT_FALSE (p1->flags & VLIB_BUFFER_IS_TRACED))
387  {
388  map_trace_t *tr = vlib_add_trace (vm, node, p1, sizeof (*tr));
389  tr->map_domain_index = map_domain_index1;
390  tr->port = port1;
391  }
392 
393  p0->error = error_node->errors[error0];
394  p1->error = error_node->errors[error1];
395 
396  vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
397  n_left_to_next, pi0, pi1, next0,
398  next1);
399  }
400 
401  while (n_left_from > 0 && n_left_to_next > 0)
402  {
403  u32 pi0;
404  vlib_buffer_t *p0;
405  map_domain_t *d0;
406  u8 error0 = MAP_ERROR_NONE;
407  ip4_header_t *ip40;
408  u16 port0 = 0;
409  ip6_header_t *ip6h0;
411  u32 map_domain_index0 = ~0;
412 
413  pi0 = to_next[0] = from[0];
414  from += 1;
415  n_left_from -= 1;
416  to_next += 1;
417  n_left_to_next -= 1;
418 
419  p0 = vlib_get_buffer (vm, pi0);
420  ip40 = vlib_buffer_get_current (p0);
421  map_domain_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
422  d0 = ip4_map_get_domain (map_domain_index0);
423  ASSERT (d0);
424 
425  /*
426  * Shared IPv4 address
427  */
428  port0 = ip4_map_port_and_security_check (d0, ip40, &next0, &error0);
429 
430  /* Decrement IPv4 TTL */
431  ip4_map_decrement_ttl (ip40, &error0);
432  bool df0 =
434  clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT);
435 
436  /* MAP calc */
437  u32 da40 = clib_net_to_host_u32 (ip40->dst_address.as_u32);
438  u16 dp40 = clib_net_to_host_u16 (port0);
439  u64 dal60 = map_get_pfx (d0, da40, dp40);
440  u64 dar60 = map_get_sfx (d0, da40, dp40);
441  if (dal60 == 0 && dar60 == 0 && error0 == MAP_ERROR_NONE
442  && next0 != IP4_MAP_NEXT_REASS)
443  error0 = MAP_ERROR_NO_BINDING;
444 
445  /* construct ipv6 header */
446  vlib_buffer_advance (p0, -(sizeof (ip6_header_t)));
447  ip6h0 = vlib_buffer_get_current (p0);
448  vnet_buffer (p0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
449 
451  ip4_map_vtcfl (ip40, p0);
452  ip6h0->payload_length = ip40->length;
453  ip6h0->protocol = IP_PROTOCOL_IP_IN_IP;
454  ip6h0->hop_limit = 0x40;
455  ip6h0->src_address = d0->ip6_src;
456  ip6h0->dst_address.as_u64[0] = clib_host_to_net_u64 (dal60);
457  ip6h0->dst_address.as_u64[1] = clib_host_to_net_u64 (dar60);
458 
459  /*
460  * Determine next node. Can be one of:
461  * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop
462  */
463  if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
464  {
465  if (PREDICT_FALSE
466  (d0->mtu
467  && (clib_net_to_host_u16 (ip6h0->payload_length) +
468  sizeof (*ip6h0) > d0->mtu)))
469  {
470  next0 = ip4_map_fragment (p0, d0->mtu, df0, &error0);
471  }
472  else
473  {
474  next0 =
476  ip40) ?
477  IP4_MAP_NEXT_IP6_REWRITE : next0;
479  thread_index,
480  map_domain_index0, 1,
481  clib_net_to_host_u16
482  (ip6h0->payload_length) +
483  40);
484  }
485  }
486  else
487  {
488  next0 = IP4_MAP_NEXT_DROP;
489  }
490 
491  if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
492  {
493  map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
494  tr->map_domain_index = map_domain_index0;
495  tr->port = port0;
496  }
497 
498  p0->error = error_node->errors[error0];
499  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
500  n_left_to_next, pi0, next0);
501  }
502  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
503  }
504 
505  return frame->n_vectors;
506 }
507 
508 /*
509  * ip4_map_reass
510  */
511 static uword
513  vlib_node_runtime_t * node, vlib_frame_t * frame)
514 {
515  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
516  vlib_node_runtime_t *error_node =
518  from = vlib_frame_vector_args (frame);
519  n_left_from = frame->n_vectors;
520  next_index = node->cached_next_index;
521  map_main_t *mm = &map_main;
523  u32 thread_index = vm->thread_index;
524  u32 *fragments_to_drop = NULL;
525  u32 *fragments_to_loopback = NULL;
526 
527  while (n_left_from > 0)
528  {
529  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
530 
531  while (n_left_from > 0 && n_left_to_next > 0)
532  {
533  u32 pi0;
534  vlib_buffer_t *p0;
535  map_domain_t *d0;
536  u8 error0 = MAP_ERROR_NONE;
537  ip4_header_t *ip40;
538  i32 port0 = 0;
539  ip6_header_t *ip60;
541  u32 map_domain_index0;
542  u8 cached = 0;
543 
544  pi0 = to_next[0] = from[0];
545  from += 1;
546  n_left_from -= 1;
547  to_next += 1;
548  n_left_to_next -= 1;
549 
550  p0 = vlib_get_buffer (vm, pi0);
551  ip60 = vlib_buffer_get_current (p0);
552  ip40 = (ip4_header_t *) (ip60 + 1);
553  map_domain_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
554  d0 = ip4_map_get_domain (map_domain_index0);
555 
558  ip40->dst_address.as_u32,
559  ip40->fragment_id,
560  ip40->protocol,
561  &fragments_to_drop);
562  if (PREDICT_FALSE (!r))
563  {
564  // Could not create a caching entry
565  error0 = MAP_ERROR_FRAGMENT_MEMORY;
566  }
567  else if (PREDICT_TRUE (ip4_get_fragment_offset (ip40)))
568  {
569  if (r->port >= 0)
570  {
571  // We know the port already
572  port0 = r->port;
573  }
574  else if (map_ip4_reass_add_fragment (r, pi0))
575  {
576  // Not enough space for caching
577  error0 = MAP_ERROR_FRAGMENT_MEMORY;
578  map_ip4_reass_free (r, &fragments_to_drop);
579  }
580  else
581  {
582  cached = 1;
583  }
584  }
585  else if ((port0 = ip4_get_port (ip40, 0)) == 0)
586  {
587  // Could not find port. We'll free the reassembly.
588  error0 = MAP_ERROR_BAD_PROTOCOL;
589  port0 = 0;
590  map_ip4_reass_free (r, &fragments_to_drop);
591  }
592  else
593  {
594  r->port = port0;
595  map_ip4_reass_get_fragments (r, &fragments_to_loopback);
596  }
597 
598 #ifdef MAP_IP4_REASS_COUNT_BYTES
599  if (!cached && r)
600  {
601  r->forwarded += clib_host_to_net_u16 (ip40->length) - 20;
602  if (!ip4_get_fragment_more (ip40))
603  r->expected_total =
604  ip4_get_fragment_offset (ip40) * 8 +
605  clib_host_to_net_u16 (ip40->length) - 20;
606  if (r->forwarded >= r->expected_total)
607  map_ip4_reass_free (r, &fragments_to_drop);
608  }
609 #endif
610 
612 
613  // NOTE: Most operations have already been performed by ip4_map
614  // All we need is the right destination address
615  ip60->dst_address.as_u64[0] =
616  map_get_pfx_net (d0, ip40->dst_address.as_u32, port0);
617  ip60->dst_address.as_u64[1] =
618  map_get_sfx_net (d0, ip40->dst_address.as_u32, port0);
619 
620  if (PREDICT_FALSE
621  (d0->mtu
622  && (clib_net_to_host_u16 (ip60->payload_length) +
623  sizeof (*ip60) > d0->mtu)))
624  {
625  // TODO: vnet_buffer (p0)->ip_frag.header_offset = sizeof (*ip60);
626  vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP6_LOOKUP;
627  vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
628  vnet_buffer (p0)->ip_frag.flags = IP_FRAG_FLAG_IP6_HEADER;
630  }
631 
632  if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
633  {
635  vlib_add_trace (vm, node, p0, sizeof (*tr));
636  tr->map_domain_index = map_domain_index0;
637  tr->port = port0;
638  tr->cached = cached;
639  }
640 
641  if (cached)
642  {
643  //Dequeue the packet
644  n_left_to_next++;
645  to_next--;
646  }
647  else
648  {
649  if (error0 == MAP_ERROR_NONE)
651  thread_index,
652  map_domain_index0, 1,
653  clib_net_to_host_u16
654  (ip60->payload_length) + 40);
655  next0 =
656  (error0 == MAP_ERROR_NONE) ? next0 : IP4_MAP_REASS_NEXT_DROP;
657  p0->error = error_node->errors[error0];
658  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
659  n_left_to_next, pi0, next0);
660  }
661 
662  //Loopback when we reach the end of the inpu vector
663  if (n_left_from == 0 && vec_len (fragments_to_loopback))
664  {
665  from = vlib_frame_vector_args (frame);
666  u32 len = vec_len (fragments_to_loopback);
667  if (len <= VLIB_FRAME_SIZE)
668  {
669  clib_memcpy (from, fragments_to_loopback,
670  sizeof (u32) * len);
671  n_left_from = len;
672  vec_reset_length (fragments_to_loopback);
673  }
674  else
675  {
676  clib_memcpy (from,
677  fragments_to_loopback + (len -
679  sizeof (u32) * VLIB_FRAME_SIZE);
680  n_left_from = VLIB_FRAME_SIZE;
681  _vec_len (fragments_to_loopback) = len - VLIB_FRAME_SIZE;
682  }
683  }
684  }
685  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
686  }
687 
688  map_send_all_to_node (vm, fragments_to_drop, node,
689  &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED],
691 
692  vec_free (fragments_to_drop);
693  vec_free (fragments_to_loopback);
694  return frame->n_vectors;
695 }
696 
697 static char *map_error_strings[] = {
698 #define _(sym,string) string,
700 #undef _
701 };
702 
703 /* *INDENT-OFF* */
705  .function = ip4_map,
706  .name = "ip4-map",
707  .vector_size = sizeof(u32),
708  .format_trace = format_map_trace,
709  .type = VLIB_NODE_TYPE_INTERNAL,
710 
711  .n_errors = MAP_N_ERROR,
712  .error_strings = map_error_strings,
713 
714  .n_next_nodes = IP4_MAP_N_NEXT,
715  .next_nodes = {
716  [IP4_MAP_NEXT_IP6_LOOKUP] = "ip6-lookup",
717 #ifdef MAP_SKIP_IP6_LOOKUP
718  [IP4_MAP_NEXT_IP6_REWRITE] = "ip6-load-balance",
719 #endif
720  [IP4_MAP_NEXT_IP4_FRAGMENT] = "ip4-frag",
721  [IP4_MAP_NEXT_IP6_FRAGMENT] = "ip6-frag",
722  [IP4_MAP_NEXT_REASS] = "ip4-map-reass",
723  [IP4_MAP_NEXT_ICMP_ERROR] = "ip4-icmp-error",
724  [IP4_MAP_NEXT_DROP] = "error-drop",
725  },
726 };
727 /* *INDENT-ON* */
728 
729 /* *INDENT-OFF* */
731  .function = ip4_map_reass,
732  .name = "ip4-map-reass",
733  .vector_size = sizeof(u32),
734  .format_trace = format_ip4_map_reass_trace,
735  .type = VLIB_NODE_TYPE_INTERNAL,
736 
737  .n_errors = MAP_N_ERROR,
738  .error_strings = map_error_strings,
739 
740  .n_next_nodes = IP4_MAP_REASS_N_NEXT,
741  .next_nodes = {
742  [IP4_MAP_REASS_NEXT_IP6_LOOKUP] = "ip6-lookup",
743  [IP4_MAP_REASS_NEXT_IP4_FRAGMENT] = "ip4-frag",
744  [IP4_MAP_REASS_NEXT_DROP] = "error-drop",
745  },
746 };
747 /* *INDENT-ON* */
748 
749 /*
750  * fd.io coding-style-patch-verification: ON
751  *
752  * Local Variables:
753  * eval: (c-set-style "gnu")
754  * End:
755  */
#define map_ip4_reass_lock()
Definition: map.h:477
u8 psid_length
Definition: map.h:97
#define CLIB_UNUSED(x)
Definition: clib.h:81
u8 * format_ip4_map_reass_trace(u8 *s, va_list *args)
Definition: ip4_map.c:56
map_main_t map_main
Definition: map.c:27
void ip_frag_set_vnet_buffer(vlib_buffer_t *b, u16 mtu, u8 next_index, u8 flags)
Definition: ip_frag.c:236
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:204
static_always_inline u64 map_get_pfx(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:360
ip4_address_t src_address
Definition: ip4_packet.h:169
#define PREDICT_TRUE(x)
Definition: clib.h:108
u64 as_u64[2]
Definition: ip6_packet.h:51
unsigned long u64
Definition: types.h:89
dpo_id_t dpo
The Load-balance object index to use to forward.
Definition: map.h:220
#define NULL
Definition: clib.h:57
u8 tc
Definition: map.h:243
u32 thread_index
Definition: main.h:179
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
vlib_node_registration_t ip4_map_node
(constructor) VLIB_REGISTER_NODE (ip4_map_node)
Definition: ip4_map.c:704
u16 flags_and_fragment_offset
Definition: ip4_packet.h:150
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:472
ip6_address_t src_address
Definition: ip6_packet.h:378
unsigned char u8
Definition: types.h:56
IPv4 to IPv6 translation.
u16 port
Definition: map.h:338
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
bool tc_copy
Definition: map.h:244
#define static_always_inline
Definition: clib.h:95
ip4_address_t dst_address
Definition: ip4_packet.h:169
vlib_combined_counter_main_t * domain_counters
Definition: map.h:236
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:187
static_always_inline void map_send_all_to_node(vlib_main_t *vm, u32 *pi_vector, vlib_node_runtime_t *node, vlib_error_t *error, u32 next)
Definition: map.h:570
int map_ip4_reass_add_fragment(map_ip4_reass_t *r, u32 pi)
Definition: map.c:1591
unsigned int u32
Definition: types.h:88
#define VLIB_FRAME_SIZE
Definition: node.h:382
static u16 ip4_get_port(ip4_header_t *ip, u8 sender)
Get TCP/UDP port number or ICMP id from IPv4 packet.
Definition: ip4_to_ip6.h:51
static u32 ip4_map_fragment(vlib_buffer_t *b, u16 mtu, bool df, u8 *error)
Definition: ip4_map.c:164
static uword ip4_map_reass(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip4_map.c:512
bool frag_ignore_df
Definition: map.h:281
static_always_inline u64 map_get_pfx_net(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:374
unsigned short u16
Definition: types.h:57
ip4_map_reass_next_t
Definition: ip4_map.c:40
ip4_map_next_e
Definition: ip4_map.c:26
static int ip4_get_fragment_offset(const ip4_header_t *i)
Definition: ip4_packet.h:199
static_always_inline u64 map_get_sfx_net(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:406
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:205
map_ip4_reass_t * map_ip4_reass_get(u32 src, u32 dst, u16 fragment_id, u8 protocol, u32 **pi_to_drop)
Definition: map.c:1510
#define PREDICT_FALSE(x)
Definition: clib.h:107
static_always_inline u32 ip4_map_vtcfl(ip4_header_t *ip4, vlib_buffer_t *p)
Definition: ip4_map.c:116
static_always_inline map_domain_t * ip4_map_get_domain(u32 mdi)
Definition: map.h:425
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:364
u16 expected_total
Definition: map.h:134
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:138
vlib_node_registration_t ip4_map_reass_node
(constructor) VLIB_REGISTER_NODE (ip4_map_reass_node)
Definition: ip4_map.c:24
map_main_pre_resolved_t pre_resolved[FIB_PROTOCOL_MAX]
Pre-resolvd per-protocol global next-hops.
Definition: map.c:344
void map_ip4_reass_free(map_ip4_reass_t *r, u32 **pi_to_drop)
Definition: map.c:1467
static uword ip4_map(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip4_map.c:197
static_always_inline bool ip4_map_ip6_lookup_bypass(vlib_buffer_t *p0, ip4_header_t *ip)
Definition: ip4_map.c:128
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:155
u16 n_vectors
Definition: node.h:401
static_always_inline void map_ip4_reass_get_fragments(map_ip4_reass_t *r, u32 **pi)
Definition: map.h:481
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:79
vlib_main_t * vm
Definition: buffer.c:294
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:339
void icmp4_error_set_vnet_buffer(vlib_buffer_t *b, u8 type, u8 code, u32 data)
Definition: icmp4.c:431
u8 psid_offset
Definition: map.h:96
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:89
u16 forwarded
Definition: map.h:135
#define clib_memcpy(a, b, c)
Definition: string.h:75
static void ip4_map_decrement_ttl(ip4_header_t *ip, u8 *error)
Definition: ip4_map.c:145
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:455
#define foreach_map_error
Definition: map.h:308
ip6_address_t ip6_src
Definition: map.h:85
static int ip4_get_fragment_more(const ip4_header_t *i)
Definition: ip4_packet.h:205
static char * map_error_strings[]
Definition: ip4_map.c:697
signed int i32
Definition: types.h:77
#define IP_FRAG_FLAG_IP6_HEADER
Definition: ip_frag.h:41
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:513
#define ASSERT(truth)
bool frag_inner
Definition: map.h:280
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:218
#define map_ip4_reass_unlock()
Definition: map.h:478
u8 * format_map_trace(u8 *s, va_list *args)
Definition: map.c:1430
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:57
struct _vlib_node_registration vlib_node_registration_t
u32 ip_version_traffic_class_and_flow_label
Definition: ip6_packet.h:365
Definition: defs.h:47
u16 mtu
Definition: map.h:91
u16 payload_length
Definition: ip6_packet.h:369
index_t dpoi_index
the index of objects of that type
Definition: dpo.h:184
#define FIB_NODE_INDEX_INVALID
Definition: fib_types.h:31
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
i32 port
Definition: map.h:137
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:267
A collection of combined counters.
Definition: counter.h:172
#define vnet_buffer(b)
Definition: buffer.h:344
u8 data[0]
Packet data.
Definition: buffer.h:175
#define IP4_HEADER_FLAG_DONT_FRAGMENT
Definition: ip4_packet.h:152
static_always_inline u64 map_get_sfx(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:384
u32 map_domain_index
Definition: map.h:337
u8 ip_version_and_header_length
Definition: ip4_packet.h:137
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:116
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:58
static u16 ip4_header_checksum(ip4_header_t *i)
Definition: ip4_packet.h:246
static_always_inline u16 ip4_map_port_and_security_check(map_domain_t *d, ip4_header_t *ip, u32 *next, u8 *error)
Definition: ip4_map.c:67
ip6_address_t dst_address
Definition: ip6_packet.h:378