FD.io VPP  v19.08.1-401-g8e4ed521a
Vector Packet Processing
ip6_full_reass.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 /**
17  * @file
18  * @brief IPv6 Full Reassembly.
19  *
20  * This file contains the source code for IPv6 full reassembly.
21  */
22 
23 #include <vppinfra/vec.h>
24 #include <vnet/vnet.h>
25 #include <vnet/ip/ip.h>
26 #include <vppinfra/bihash_48_8.h>
28 
29 #define MSEC_PER_SEC 1000
30 #define IP6_FULL_REASS_TIMEOUT_DEFAULT_MS 100
31 #define IP6_FULL_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS 10000 // 10 seconds default
32 #define IP6_FULL_REASS_MAX_REASSEMBLIES_DEFAULT 1024
33 #define IP6_FULL_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT 3
34 #define IP6_FULL_REASS_HT_LOAD_FACTOR (0.75)
35 
36 typedef enum
37 {
44 
45 typedef struct
46 {
47  union
48  {
49  struct
50  {
55  u8 unused[7];
57  };
58  u64 as_u64[6];
59  };
61 
62 typedef union
63 {
64  struct
65  {
68  };
71 
72 typedef union
73 {
74  struct
75  {
78  };
81 
82 
85 {
87  return vnb->ip.reass.range_first - vnb->ip.reass.fragment_first;
88 }
89 
92 {
94  return clib_min (vnb->ip.reass.range_last, vnb->ip.reass.fragment_last) -
95  (vnb->ip.reass.fragment_first +
97 }
98 
99 typedef struct
100 {
101  // hash table key
103  // time when last packet was received
105  // internal id of this reassembly
107  // buffer index of first buffer in this reassembly context
109  // last octet of packet, ~0 until fragment without more_fragments arrives
111  // length of data collected so far
113  // trace operation counter
115  // next index - used by custom apps (~0 if not set)
117  // error next index - used by custom apps (~0 if not set)
119  // minimum fragment length for this reassembly - used to estimate MTU
121  // number of fragments for this reassembly
123  // thread owning memory for this context (whose pool contains this ctx)
125  // thread which received fragment with offset 0 and which sends out the
126  // completed reassembly
129 
130 typedef struct
131 {
137 
138 typedef struct
139 {
140  // IPv6 config
144  // maximum number of fragments in one reassembly
146  // maximum number of reassemblies
148 
149  // IPv6 runtime
150  clib_bihash_48_8_t hash;
151 
152  // per-thread data
154 
155  // convenience
157 
158  // node index of ip6-drop node
162 
163  /** Worker handoff */
166 
168 
170 
171 #ifndef CLIB_MARCH_VARIANT
173 #endif /* CLIB_MARCH_VARIANT */
174 
175 typedef enum
176 {
183 
184 typedef enum
185 {
194 
195 typedef struct
196 {
204 
205 typedef struct
206 {
207  ip6_full_reass_trace_operation_e action;
217 
218 static void
221 {
222  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
224  trace->range_first = vnb->ip.reass.range_first;
225  trace->range_last = vnb->ip.reass.range_last;
228  trace->range_bi = bi;
229 }
230 
231 static u8 *
233 {
235  va_arg (*args, ip6_full_reass_range_trace_t *);
236  s =
237  format (s, "range: [%u, %u], off %d, len %u, bi %u", trace->range_first,
238  trace->range_last, trace->data_offset, trace->data_len,
239  trace->range_bi);
240  return s;
241 }
242 
243 static u8 *
244 format_ip6_full_reass_trace (u8 * s, va_list * args)
245 {
246  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
247  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
248  ip6_full_reass_trace_t *t = va_arg (*args, ip6_full_reass_trace_t *);
249  u32 indent = 0;
250  if (~0 != t->reass_id)
251  {
252  s = format (s, "reass id: %u, op id: %u ", t->reass_id, t->op_id);
253  indent = format_get_indent (s);
254  s = format (s, "first bi: %u, data len: %u, ip/fragment[%u, %u]",
257  }
258  switch (t->action)
259  {
260  case RANGE_NEW:
261  s = format (s, "\n%Unew %U", format_white_space, indent,
263  break;
264  case RANGE_OVERLAP:
265  s = format (s, "\n%Uoverlap %U", format_white_space, indent,
267  break;
269  s = format (s, "\n%Uicmp-error - frag_len > 65535 %U",
270  format_white_space, indent,
272  break;
274  s = format (s, "\n%Uicmp-error - frag_len mod 8 != 0 %U",
275  format_white_space, indent,
277  break;
279  s = format (s, "\n%Uicmp-error - reassembly time exceeded",
280  format_white_space, indent);
281  break;
282  case FINALIZE:
283  s = format (s, "\n%Ufinalize reassembly", format_white_space, indent);
284  break;
285  case HANDOFF:
286  s =
287  format (s, "handoff from thread #%u to thread #%u", t->thread_id,
288  t->thread_id_to);
289  break;
290  }
291  return s;
292 }
293 
294 static void
297  ip6_full_reass_t * reass, u32 bi,
298  ip6_full_reass_trace_operation_e action,
299  u32 thread_id_to)
300 {
301  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
303  ip6_full_reass_trace_t *t = vlib_add_trace (vm, node, b, sizeof (t[0]));
304  if (reass)
305  {
306  t->reass_id = reass->id;
307  t->op_id = reass->trace_op_counter;
308  t->trace_range.first_bi = reass->first_bi;
309  t->total_data_len = reass->data_len;
310  ++reass->trace_op_counter;
311  }
312  else
313  {
314  t->reass_id = ~0;
315  }
316  t->action = action;
317  t->thread_id = vm->thread_index;
318  t->thread_id_to = thread_id_to;
320  t->fragment_first = vnb->ip.reass.fragment_first;
321  t->fragment_last = vnb->ip.reass.fragment_last;
322 #if 0
323  static u8 *s = NULL;
324  s = format (s, "%U", format_ip6_full_reass_trace, NULL, NULL, t);
325  printf ("%.*s\n", vec_len (s), s);
326  fflush (stdout);
327  vec_reset_length (s);
328 #endif
329 }
330 
331 always_inline void
333  ip6_full_reass_t * reass)
334 {
335  pool_put (rt->pool, reass);
336  --rt->reass_n;
337 }
338 
339 always_inline void
342  ip6_full_reass_t * reass)
343 {
345  kv.key[0] = reass->key.as_u64[0];
346  kv.key[1] = reass->key.as_u64[1];
347  kv.key[2] = reass->key.as_u64[2];
348  kv.key[3] = reass->key.as_u64[3];
349  kv.key[4] = reass->key.as_u64[4];
350  kv.key[5] = reass->key.as_u64[5];
351  clib_bihash_add_del_48_8 (&rm->hash, &kv, 0);
352  ip6_full_reass_free_ctx (rt, reass);
353 }
354 
355 always_inline void
358 {
359  u32 range_bi = reass->first_bi;
360  vlib_buffer_t *range_b;
361  vnet_buffer_opaque_t *range_vnb;
362  u32 *to_free = NULL;
363  while (~0 != range_bi)
364  {
365  range_b = vlib_get_buffer (vm, range_bi);
366  range_vnb = vnet_buffer (range_b);
367  u32 bi = range_bi;
368  while (~0 != bi)
369  {
370  vec_add1 (to_free, bi);
371  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
372  if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
373  {
374  bi = b->next_buffer;
375  b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
376  }
377  else
378  {
379  bi = ~0;
380  }
381  }
382  range_bi = range_vnb->ip.reass.next_range_bi;
383  }
384  /* send to next_error_index */
385  if (~0 != reass->error_next_index)
386  {
387  u32 n_left_to_next, *to_next, next_index;
388 
389  next_index = reass->error_next_index;
390  u32 bi = ~0;
391 
392  while (vec_len (to_free) > 0)
393  {
394  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
395 
396  while (vec_len (to_free) > 0 && n_left_to_next > 0)
397  {
398  bi = vec_pop (to_free);
399 
400  if (~0 != bi)
401  {
402  to_next[0] = bi;
403  to_next += 1;
404  n_left_to_next -= 1;
405  }
406  }
407  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
408  }
409  }
410  else
411  {
412  vlib_buffer_free (vm, to_free, vec_len (to_free));
413  }
414  vec_free (to_free);
415 }
416 
417 always_inline void
420  ip6_full_reass_t * reass, u32 * icmp_bi)
421 {
422  if (~0 == reass->first_bi)
423  {
424  return;
425  }
426  if (~0 == reass->next_index) // custom apps don't want icmp
427  {
428  vlib_buffer_t *b = vlib_get_buffer (vm, reass->first_bi);
429  if (0 == vnet_buffer (b)->ip.reass.fragment_first)
430  {
431  *icmp_bi = reass->first_bi;
432  if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
433  {
434  ip6_full_reass_add_trace (vm, node, rm, reass, reass->first_bi,
436  }
437  // fragment with offset zero received - send icmp message back
438  if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
439  {
440  // separate first buffer from chain and steer it towards icmp node
441  b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
442  reass->first_bi = b->next_buffer;
443  }
444  else
445  {
446  reass->first_bi = vnet_buffer (b)->ip.reass.next_range_bi;
447  }
448  icmp6_error_set_vnet_buffer (b, ICMP6_time_exceeded,
449  ICMP6_time_exceeded_fragment_reassembly_time_exceeded,
450  0);
451  }
452  }
453  ip6_full_reass_drop_all (vm, node, rm, reass);
454 }
455 
460  ip6_full_reass_kv_t * kv, u32 * icmp_bi,
461  u8 * do_handoff)
462 {
463  ip6_full_reass_t *reass;
464  f64 now;
465 
466 again:
467 
468  reass = NULL;
469  now = vlib_time_now (vm);
470 
471  if (!clib_bihash_search_48_8
472  (&rm->hash, (clib_bihash_kv_48_8_t *) kv, (clib_bihash_kv_48_8_t *) kv))
473  {
474  reass =
476  [kv->v.memory_owner_thread_index].pool,
477  kv->v.reass_index);
478  if (vm->thread_index != kv->v.memory_owner_thread_index)
479  {
480  *do_handoff = 1;
481  return reass;
482  }
483 
484  if (now > reass->last_heard + rm->timeout)
485  {
486  ip6_full_reass_on_timeout (vm, node, rm, reass, icmp_bi);
487  ip6_full_reass_free (rm, rt, reass);
488  reass = NULL;
489  }
490  }
491 
492  if (reass)
493  {
494  reass->last_heard = now;
495  return reass;
496  }
497 
498  if (rt->reass_n >= rm->max_reass_n)
499  {
500  reass = NULL;
501  return reass;
502  }
503  else
504  {
505  pool_get (rt->pool, reass);
506  clib_memset (reass, 0, sizeof (*reass));
507  reass->id = ((u64) vm->thread_index * 1000000000) + rt->id_counter;
508  ++rt->id_counter;
509  reass->first_bi = ~0;
510  reass->last_packet_octet = ~0;
511  reass->data_len = 0;
512  reass->next_index = ~0;
513  reass->error_next_index = ~0;
514  ++rt->reass_n;
515  }
516 
517  reass->key.as_u64[0] = ((clib_bihash_kv_48_8_t *) kv)->key[0];
518  reass->key.as_u64[1] = ((clib_bihash_kv_48_8_t *) kv)->key[1];
519  reass->key.as_u64[2] = ((clib_bihash_kv_48_8_t *) kv)->key[2];
520  reass->key.as_u64[3] = ((clib_bihash_kv_48_8_t *) kv)->key[3];
521  reass->key.as_u64[4] = ((clib_bihash_kv_48_8_t *) kv)->key[4];
522  reass->key.as_u64[5] = ((clib_bihash_kv_48_8_t *) kv)->key[5];
523  kv->v.reass_index = (reass - rt->pool);
524  kv->v.memory_owner_thread_index = vm->thread_index;
525  reass->last_heard = now;
526 
527  int rv =
528  clib_bihash_add_del_48_8 (&rm->hash, (clib_bihash_kv_48_8_t *) kv, 2);
529  if (rv)
530  {
531  ip6_full_reass_free (rm, rt, reass);
532  reass = NULL;
533  // if other worker created a context already work with the other copy
534  if (-2 == rv)
535  goto again;
536  }
537 
538  return reass;
539 }
540 
541 always_inline ip6_full_reass_rc_t
545  ip6_full_reass_t * reass, u32 * bi0, u32 * next0,
546  u32 * error0, bool is_custom_app)
547 {
548  *bi0 = reass->first_bi;
549  *error0 = IP6_ERROR_NONE;
550  ip6_frag_hdr_t *frag_hdr;
551  vlib_buffer_t *last_b = NULL;
552  u32 sub_chain_bi = reass->first_bi;
553  u32 total_length = 0;
554  u32 buf_cnt = 0;
555  u32 dropped_cnt = 0;
556  u32 *vec_drop_compress = NULL;
557  ip6_full_reass_rc_t rv = IP6_FULL_REASS_RC_OK;
558  do
559  {
560  u32 tmp_bi = sub_chain_bi;
561  vlib_buffer_t *tmp = vlib_get_buffer (vm, tmp_bi);
562  vnet_buffer_opaque_t *vnb = vnet_buffer (tmp);
563  if (!(vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first) &&
564  !(vnb->ip.reass.range_last > vnb->ip.reass.fragment_first))
565  {
567  goto free_buffers_and_return;
568  }
569 
570  u32 data_len = ip6_full_reass_buffer_get_data_len (tmp);
571  u32 trim_front = vnet_buffer (tmp)->ip.reass.ip6_frag_hdr_offset +
572  sizeof (*frag_hdr) + ip6_full_reass_buffer_get_data_offset (tmp);
573  u32 trim_end =
574  vlib_buffer_length_in_chain (vm, tmp) - trim_front - data_len;
575  if (tmp_bi == reass->first_bi)
576  {
577  /* first buffer - keep ip6 header */
578  if (0 != ip6_full_reass_buffer_get_data_offset (tmp))
579  {
581  goto free_buffers_and_return;
582  }
583  trim_front = 0;
584  trim_end = vlib_buffer_length_in_chain (vm, tmp) - data_len -
585  (vnet_buffer (tmp)->ip.reass.ip6_frag_hdr_offset +
586  sizeof (*frag_hdr));
587  if (!(vlib_buffer_length_in_chain (vm, tmp) - trim_end > 0))
588  {
590  goto free_buffers_and_return;
591  }
592  }
593  u32 keep_data =
594  vlib_buffer_length_in_chain (vm, tmp) - trim_front - trim_end;
595  while (1)
596  {
597  ++buf_cnt;
598  if (trim_front)
599  {
600  if (trim_front > tmp->current_length)
601  {
602  /* drop whole buffer */
603  vec_add1 (vec_drop_compress, tmp_bi);
604  trim_front -= tmp->current_length;
605  if (!(tmp->flags & VLIB_BUFFER_NEXT_PRESENT))
606  {
608  goto free_buffers_and_return;
609  }
610  tmp->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
611  tmp_bi = tmp->next_buffer;
612  tmp = vlib_get_buffer (vm, tmp_bi);
613  continue;
614  }
615  else
616  {
617  vlib_buffer_advance (tmp, trim_front);
618  trim_front = 0;
619  }
620  }
621  if (keep_data)
622  {
623  if (last_b)
624  {
625  last_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
626  last_b->next_buffer = tmp_bi;
627  }
628  last_b = tmp;
629  if (keep_data <= tmp->current_length)
630  {
631  tmp->current_length = keep_data;
632  keep_data = 0;
633  }
634  else
635  {
636  keep_data -= tmp->current_length;
637  if (!(tmp->flags & VLIB_BUFFER_NEXT_PRESENT))
638  {
640  goto free_buffers_and_return;
641  }
642  }
643  total_length += tmp->current_length;
644  }
645  else
646  {
647  vec_add1 (vec_drop_compress, tmp_bi);
648  if (reass->first_bi == tmp_bi)
649  {
651  goto free_buffers_and_return;
652  }
653  ++dropped_cnt;
654  }
655  if (tmp->flags & VLIB_BUFFER_NEXT_PRESENT)
656  {
657  tmp_bi = tmp->next_buffer;
658  tmp = vlib_get_buffer (vm, tmp->next_buffer);
659  }
660  else
661  {
662  break;
663  }
664  }
665  sub_chain_bi =
666  vnet_buffer (vlib_get_buffer (vm, sub_chain_bi))->ip.
667  reass.next_range_bi;
668  }
669  while (~0 != sub_chain_bi);
670 
671  if (!last_b)
672  {
674  goto free_buffers_and_return;
675  }
676  last_b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
677  vlib_buffer_t *first_b = vlib_get_buffer (vm, reass->first_bi);
678  if (total_length < first_b->current_length)
679  {
681  goto free_buffers_and_return;
682  }
683  total_length -= first_b->current_length;
684  first_b->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
685  first_b->total_length_not_including_first_buffer = total_length;
686  // drop fragment header
687  vnet_buffer_opaque_t *first_b_vnb = vnet_buffer (first_b);
689  u16 ip6_frag_hdr_offset = first_b_vnb->ip.reass.ip6_frag_hdr_offset;
690  ip6_ext_header_t *prev_hdr;
691  frag_hdr =
692  ip6_ext_header_find (vm, first_b, ip, IP_PROTOCOL_IPV6_FRAGMENTATION,
693  &prev_hdr);
694  if (prev_hdr)
695  {
696  prev_hdr->next_hdr = frag_hdr->next_hdr;
697  }
698  else
699  {
700  ip->protocol = frag_hdr->next_hdr;
701  }
702  if (!((u8 *) frag_hdr - (u8 *) ip == ip6_frag_hdr_offset))
703  {
705  goto free_buffers_and_return;
706  }
707  memmove (frag_hdr, (u8 *) frag_hdr + sizeof (*frag_hdr),
708  first_b->current_length - ip6_frag_hdr_offset -
709  sizeof (ip6_frag_hdr_t));
710  first_b->current_length -= sizeof (*frag_hdr);
711  ip->payload_length =
712  clib_host_to_net_u16 (total_length + first_b->current_length -
713  sizeof (*ip));
714  if (!vlib_buffer_chain_linearize (vm, first_b))
715  {
717  goto free_buffers_and_return;
718  }
719  first_b->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
720  if (PREDICT_FALSE (first_b->flags & VLIB_BUFFER_IS_TRACED))
721  {
722  ip6_full_reass_add_trace (vm, node, rm, reass, reass->first_bi,
723  FINALIZE, ~0);
724 #if 0
725  // following code does a hexdump of packet fragments to stdout ...
726  do
727  {
728  u32 bi = reass->first_bi;
729  u8 *s = NULL;
730  while (~0 != bi)
731  {
732  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
733  s = format (s, "%u: %U\n", bi, format_hexdump,
735  if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
736  {
737  bi = b->next_buffer;
738  }
739  else
740  {
741  break;
742  }
743  }
744  printf ("%.*s\n", vec_len (s), s);
745  fflush (stdout);
746  vec_free (s);
747  }
748  while (0);
749 #endif
750  }
751  if (!is_custom_app)
752  {
754  }
755  else
756  {
757  *next0 = reass->next_index;
758  }
759  vnet_buffer (first_b)->ip.reass.estimated_mtu = reass->min_fragment_length;
760  ip6_full_reass_free (rm, rt, reass);
761  reass = NULL;
762 free_buffers_and_return:
763  vlib_buffer_free (vm, vec_drop_compress, vec_len (vec_drop_compress));
764  vec_free (vec_drop_compress);
765  return rv;
766 }
767 
768 always_inline void
772  ip6_full_reass_t * reass,
773  u32 prev_range_bi, u32 new_next_bi)
774 {
775 
776  vlib_buffer_t *new_next_b = vlib_get_buffer (vm, new_next_bi);
777  vnet_buffer_opaque_t *new_next_vnb = vnet_buffer (new_next_b);
778  if (~0 != prev_range_bi)
779  {
780  vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_range_bi);
781  vnet_buffer_opaque_t *prev_vnb = vnet_buffer (prev_b);
782  new_next_vnb->ip.reass.next_range_bi = prev_vnb->ip.reass.next_range_bi;
783  prev_vnb->ip.reass.next_range_bi = new_next_bi;
784  }
785  else
786  {
787  if (~0 != reass->first_bi)
788  {
789  new_next_vnb->ip.reass.next_range_bi = reass->first_bi;
790  }
791  reass->first_bi = new_next_bi;
792  }
793  reass->data_len += ip6_full_reass_buffer_get_data_len (new_next_b);
794 }
795 
796 always_inline ip6_full_reass_rc_t
800  ip6_full_reass_t * reass, u32 * bi0, u32 * next0,
801  u32 * error0, ip6_frag_hdr_t * frag_hdr,
802  bool is_custom_app, u32 * handoff_thread_idx)
803 {
804  int consumed = 0;
805  vlib_buffer_t *fb = vlib_get_buffer (vm, *bi0);
806  vnet_buffer_opaque_t *fvnb = vnet_buffer (fb);
807  if (is_custom_app)
808  {
809  reass->next_index = fvnb->ip.reass.next_index; // store next_index before it's overwritten
810  reass->error_next_index = fvnb->ip.reass.error_next_index; // store error_next_index before it is overwritten
811  }
812 
813  fvnb->ip.reass.ip6_frag_hdr_offset =
814  (u8 *) frag_hdr - (u8 *) vlib_buffer_get_current (fb);
816  if (fb->current_length < sizeof (*fip) ||
817  fvnb->ip.reass.ip6_frag_hdr_offset == 0 ||
818  fvnb->ip.reass.ip6_frag_hdr_offset >= fb->current_length)
819  {
821  }
822 
823  u32 fragment_first = fvnb->ip.reass.fragment_first =
824  ip6_frag_hdr_offset_bytes (frag_hdr);
825  u32 fragment_length =
826  vlib_buffer_length_in_chain (vm, fb) -
827  (fvnb->ip.reass.ip6_frag_hdr_offset + sizeof (*frag_hdr));
828  u32 fragment_last = fvnb->ip.reass.fragment_last =
829  fragment_first + fragment_length - 1;
830  int more_fragments = ip6_frag_hdr_more (frag_hdr);
831  u32 candidate_range_bi = reass->first_bi;
832  u32 prev_range_bi = ~0;
833  fvnb->ip.reass.range_first = fragment_first;
834  fvnb->ip.reass.range_last = fragment_last;
835  fvnb->ip.reass.next_range_bi = ~0;
836  if (!more_fragments)
837  {
838  reass->last_packet_octet = fragment_last;
839  }
840  if (~0 == reass->first_bi)
841  {
842  // starting a new reassembly
843  ip6_full_reass_insert_range_in_chain (vm, rm, rt, reass, prev_range_bi,
844  *bi0);
845  reass->min_fragment_length = clib_net_to_host_u16 (fip->payload_length);
846  consumed = 1;
847  reass->fragments_n = 1;
848  goto check_if_done_maybe;
849  }
850  reass->min_fragment_length =
851  clib_min (clib_net_to_host_u16 (fip->payload_length),
852  fvnb->ip.reass.estimated_mtu);
853  while (~0 != candidate_range_bi)
854  {
855  vlib_buffer_t *candidate_b = vlib_get_buffer (vm, candidate_range_bi);
856  vnet_buffer_opaque_t *candidate_vnb = vnet_buffer (candidate_b);
857  if (fragment_first > candidate_vnb->ip.reass.range_last)
858  {
859  // this fragments starts after candidate range
860  prev_range_bi = candidate_range_bi;
861  candidate_range_bi = candidate_vnb->ip.reass.next_range_bi;
862  if (candidate_vnb->ip.reass.range_last < fragment_last &&
863  ~0 == candidate_range_bi)
864  {
865  // special case - this fragment falls beyond all known ranges
866  ip6_full_reass_insert_range_in_chain (vm, rm, rt, reass,
867  prev_range_bi, *bi0);
868  consumed = 1;
869  break;
870  }
871  continue;
872  }
873  if (fragment_last < candidate_vnb->ip.reass.range_first)
874  {
875  // this fragment ends before candidate range without any overlap
876  ip6_full_reass_insert_range_in_chain (vm, rm, rt, reass,
877  prev_range_bi, *bi0);
878  consumed = 1;
879  }
880  else if (fragment_first == candidate_vnb->ip.reass.range_first &&
881  fragment_last == candidate_vnb->ip.reass.range_last)
882  {
883  // duplicate fragment - ignore
884  }
885  else
886  {
887  // overlapping fragment - not allowed by RFC 8200
888  if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
889  {
890  ip6_full_reass_add_trace (vm, node, rm, reass, *bi0,
891  RANGE_OVERLAP, ~0);
892  }
893  ip6_full_reass_drop_all (vm, node, rm, reass);
894  ip6_full_reass_free (rm, rt, reass);
896  *error0 = IP6_ERROR_REASS_OVERLAPPING_FRAGMENT;
897  return IP6_FULL_REASS_RC_OK;
898  }
899  break;
900  }
901  ++reass->fragments_n;
902 check_if_done_maybe:
903  if (consumed)
904  {
905  if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
906  {
907  ip6_full_reass_add_trace (vm, node, rm, reass, *bi0, RANGE_NEW, ~0);
908  }
909  }
910  if (~0 != reass->last_packet_octet &&
911  reass->data_len == reass->last_packet_octet + 1)
912  {
913  *handoff_thread_idx = reass->sendout_thread_index;
914  int handoff =
916  ip6_full_reass_rc_t rc =
917  ip6_full_reass_finalize (vm, node, rm, rt, reass, bi0, next0, error0,
918  is_custom_app);
919  if (IP6_FULL_REASS_RC_OK == rc && handoff)
920  {
922  }
923  return rc;
924  }
925  else
926  {
927  if (consumed)
928  {
929  *bi0 = ~0;
930  if (reass->fragments_n > rm->max_reass_len)
931  {
933  }
934  }
935  else
936  {
938  *error0 = IP6_ERROR_REASS_DUPLICATE_FRAGMENT;
939  }
940  }
941  return IP6_FULL_REASS_RC_OK;
942 }
943 
944 always_inline bool
946  vlib_buffer_t * b,
947  ip6_frag_hdr_t * frag_hdr)
948 {
949  ip6_ext_header_t *tmp = (ip6_ext_header_t *) frag_hdr;
950  while (ip6_ext_hdr (tmp->next_hdr))
951  {
952  tmp = ip6_ext_next_header (tmp);
953  }
954  if (IP_PROTOCOL_IP6_NONXT == tmp->next_hdr)
955  {
956  icmp6_error_set_vnet_buffer (b, ICMP6_parameter_problem,
957  ICMP6_parameter_problem_first_fragment_has_incomplete_header_chain,
958  0);
959  b->error = node->errors[IP6_ERROR_REASS_MISSING_UPPER];
960 
961  return false;
962  }
963  return true;
964 }
965 
966 always_inline bool
968  vlib_node_runtime_t * node,
969  vlib_buffer_t * b,
970  ip6_frag_hdr_t * frag_hdr)
971 {
974  int more_fragments = ip6_frag_hdr_more (frag_hdr);
975  u32 fragment_length =
977  (vnb->ip.reass.ip6_frag_hdr_offset + sizeof (*frag_hdr));
978  if (more_fragments && 0 != fragment_length % 8)
979  {
980  icmp6_error_set_vnet_buffer (b, ICMP6_parameter_problem,
981  ICMP6_parameter_problem_erroneous_header_field,
982  (u8 *) & ip->payload_length - (u8 *) ip);
983  return false;
984  }
985  return true;
986 }
987 
988 always_inline bool
990  vlib_node_runtime_t * node,
991  vlib_buffer_t * b,
992  ip6_frag_hdr_t * frag_hdr)
993 {
995  u32 fragment_first = ip6_frag_hdr_offset_bytes (frag_hdr);
996  u32 fragment_length =
998  (vnb->ip.reass.ip6_frag_hdr_offset + sizeof (*frag_hdr));
999  if (fragment_first + fragment_length > 65535)
1000  {
1002  icmp6_error_set_vnet_buffer (b, ICMP6_parameter_problem,
1003  ICMP6_parameter_problem_erroneous_header_field,
1004  (u8 *) & frag_hdr->fragment_offset_and_more
1005  - (u8 *) ip0);
1006  return false;
1007  }
1008  return true;
1009 }
1010 
1013  vlib_node_runtime_t * node,
1014  vlib_frame_t * frame, bool is_feature,
1015  bool is_custom_app)
1016 {
1017  u32 *from = vlib_frame_vector_args (frame);
1018  u32 n_left_from, n_left_to_next, *to_next, next_index;
1021  clib_spinlock_lock (&rt->lock);
1022 
1023  n_left_from = frame->n_vectors;
1024  next_index = node->cached_next_index;
1025  while (n_left_from > 0)
1026  {
1027  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1028 
1029  while (n_left_from > 0 && n_left_to_next > 0)
1030  {
1031  u32 bi0;
1032  vlib_buffer_t *b0;
1034  u32 error0 = IP6_ERROR_NONE;
1035  u32 icmp_bi = ~0;
1036 
1037  bi0 = from[0];
1038  b0 = vlib_get_buffer (vm, bi0);
1039 
1041  ip6_frag_hdr_t *frag_hdr = NULL;
1042  ip6_ext_header_t *prev_hdr;
1043  if (ip6_ext_hdr (ip0->protocol))
1044  {
1045  frag_hdr =
1046  ip6_ext_header_find (vm, b0, ip0,
1047  IP_PROTOCOL_IPV6_FRAGMENTATION,
1048  &prev_hdr);
1049  }
1050  if (!frag_hdr)
1051  {
1052  // this is a regular packet - no fragmentation
1054  goto skip_reass;
1055  }
1056  if (0 == ip6_frag_hdr_offset (frag_hdr))
1057  {
1058  // first fragment - verify upper-layer is present
1060  (node, b0, frag_hdr))
1061  {
1063  goto skip_reass;
1064  }
1065  }
1067  (vm, node, b0, frag_hdr)
1068  || !ip6_full_reass_verify_packet_size_lt_64k (vm, node, b0,
1069  frag_hdr))
1070  {
1072  goto skip_reass;
1073  }
1074  vnet_buffer (b0)->ip.reass.ip6_frag_hdr_offset =
1075  (u8 *) frag_hdr - (u8 *) ip0;
1076 
1078  u8 do_handoff = 0;
1079 
1080  kv.k.as_u64[0] = ip0->src_address.as_u64[0];
1081  kv.k.as_u64[1] = ip0->src_address.as_u64[1];
1082  kv.k.as_u64[2] = ip0->dst_address.as_u64[0];
1083  kv.k.as_u64[3] = ip0->dst_address.as_u64[1];
1084  kv.k.as_u64[4] =
1086  vnet_buffer (b0)->sw_if_index[VLIB_RX])) << 32 |
1087  (u64) frag_hdr->identification;
1088  kv.k.as_u64[5] = ip0->protocol;
1089 
1090  ip6_full_reass_t *reass =
1091  ip6_full_reass_find_or_create (vm, node, rm, rt, &kv, &icmp_bi,
1092  &do_handoff);
1093 
1094  if (reass)
1095  {
1096  const u32 fragment_first = ip6_frag_hdr_offset (frag_hdr);
1097  if (0 == fragment_first)
1098  {
1099  reass->sendout_thread_index = vm->thread_index;
1100  }
1101  }
1102  if (PREDICT_FALSE (do_handoff))
1103  {
1105  if (is_feature)
1106  vnet_buffer (b0)->ip.reass.owner_feature_thread_index =
1108  else
1109  vnet_buffer (b0)->ip.reass.owner_thread_index =
1111  }
1112  else if (reass)
1113  {
1114  u32 handoff_thread_idx;
1115  switch (ip6_full_reass_update
1116  (vm, node, rm, rt, reass, &bi0, &next0, &error0,
1117  frag_hdr, is_custom_app, &handoff_thread_idx))
1118  {
1119  case IP6_FULL_REASS_RC_OK:
1120  /* nothing to do here */
1121  break;
1124  b0 = vlib_get_buffer (vm, bi0);
1125  if (is_feature)
1126  vnet_buffer (b0)->ip.reass.owner_feature_thread_index =
1127  handoff_thread_idx;
1128  else
1129  vnet_buffer (b0)->ip.reass.owner_thread_index =
1130  handoff_thread_idx;
1131  break;
1134  IP6_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG,
1135  1);
1136  ip6_full_reass_drop_all (vm, node, rm, reass);
1137  ip6_full_reass_free (rm, rt, reass);
1138  goto next_packet;
1139  break;
1142  IP6_ERROR_REASS_NO_BUF, 1);
1143  ip6_full_reass_drop_all (vm, node, rm, reass);
1144  ip6_full_reass_free (rm, rt, reass);
1145  goto next_packet;
1146  break;
1149  IP6_ERROR_REASS_INTERNAL_ERROR,
1150  1);
1151  ip6_full_reass_drop_all (vm, node, rm, reass);
1152  ip6_full_reass_free (rm, rt, reass);
1153  goto next_packet;
1154  break;
1155  }
1156  }
1157  else
1158  {
1159  if (is_feature)
1160  {
1162  }
1163  else
1164  {
1165  vnet_buffer_opaque_t *fvnb = vnet_buffer (b0);
1166  next0 = fvnb->ip.reass.error_next_index;
1167  }
1168  error0 = IP6_ERROR_REASS_LIMIT_REACHED;
1169  }
1170 
1171  if (~0 != bi0)
1172  {
1173  skip_reass:
1174  to_next[0] = bi0;
1175  to_next += 1;
1176  n_left_to_next -= 1;
1177 
1178  /* bi0 might have been updated by reass_finalize, reload */
1179  b0 = vlib_get_buffer (vm, bi0);
1180  b0->error = node->errors[error0];
1181 
1182  if (next0 == IP6_FULL_REASSEMBLY_NEXT_HANDOFF)
1183  {
1184  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1185  {
1186  if (is_feature)
1187  ip6_full_reass_add_trace (vm, node, rm, NULL, bi0,
1188  HANDOFF,
1189  vnet_buffer (b0)->ip.
1190  reass.owner_feature_thread_index);
1191  else
1192  ip6_full_reass_add_trace (vm, node, rm, NULL, bi0,
1193  HANDOFF,
1194  vnet_buffer (b0)->ip.
1195  reass.owner_thread_index);
1196  }
1197  }
1198  else if (is_feature && IP6_ERROR_NONE == error0)
1199  {
1200  vnet_feature_next (&next0, b0);
1201  }
1202  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1203  n_left_to_next, bi0, next0);
1204  }
1205 
1206  if (~0 != icmp_bi)
1207  {
1209  to_next[0] = icmp_bi;
1210  to_next += 1;
1211  n_left_to_next -= 1;
1212  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1213  n_left_to_next, icmp_bi,
1214  next0);
1215  }
1216  next_packet:
1217  from += 1;
1218  n_left_from -= 1;
1219  }
1220 
1221  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1222  }
1223 
1224  clib_spinlock_unlock (&rt->lock);
1225  return frame->n_vectors;
1226 }
1227 
1229 #define _(sym, string) string,
1231 #undef _
1232 };
1233 
1235  vlib_node_runtime_t * node,
1236  vlib_frame_t * frame)
1237 {
1238  return ip6_full_reassembly_inline (vm, node, frame, false /* is_feature */ ,
1239  false /* is_custom_app */ );
1240 }
1241 
1242 /* *INDENT-OFF* */
1244  .name = "ip6-full-reassembly",
1245  .vector_size = sizeof (u32),
1246  .format_trace = format_ip6_full_reass_trace,
1247  .n_errors = ARRAY_LEN (ip6_full_reassembly_error_strings),
1248  .error_strings = ip6_full_reassembly_error_strings,
1249  .n_next_nodes = IP6_FULL_REASSEMBLY_N_NEXT,
1250  .next_nodes =
1251  {
1252  [IP6_FULL_REASSEMBLY_NEXT_INPUT] = "ip6-input",
1253  [IP6_FULL_REASSEMBLY_NEXT_DROP] = "ip6-drop",
1254  [IP6_FULL_REASSEMBLY_NEXT_ICMP_ERROR] = "ip6-icmp-error",
1255  [IP6_FULL_REASSEMBLY_NEXT_HANDOFF] = "ip6-full-reassembly-handoff",
1256  },
1257 };
1258 /* *INDENT-ON* */
1259 
1261  vlib_node_runtime_t * node,
1262  vlib_frame_t * frame)
1263 {
1264  return ip6_full_reassembly_inline (vm, node, frame, true /* is_feature */ ,
1265  false /* is_custom_app */ );
1266 }
1267 
1268 /* *INDENT-OFF* */
1270  .name = "ip6-full-reassembly-feature",
1271  .vector_size = sizeof (u32),
1272  .format_trace = format_ip6_full_reass_trace,
1273  .n_errors = ARRAY_LEN (ip6_full_reassembly_error_strings),
1274  .error_strings = ip6_full_reassembly_error_strings,
1275  .n_next_nodes = IP6_FULL_REASSEMBLY_N_NEXT,
1276  .next_nodes =
1277  {
1278  [IP6_FULL_REASSEMBLY_NEXT_INPUT] = "ip6-input",
1279  [IP6_FULL_REASSEMBLY_NEXT_DROP] = "ip6-drop",
1280  [IP6_FULL_REASSEMBLY_NEXT_ICMP_ERROR] = "ip6-icmp-error",
1281  [IP6_FULL_REASSEMBLY_NEXT_HANDOFF] = "ip6-full-reass-feature-hoff",
1282  },
1283 };
1284 /* *INDENT-ON* */
1285 
1286 /* *INDENT-OFF* */
1287 VNET_FEATURE_INIT (ip6_full_reassembly_feature, static) = {
1288  .arc_name = "ip6-unicast",
1289  .node_name = "ip6-full-reassembly-feature",
1290  .runs_before = VNET_FEATURES ("ip6-lookup",
1291  "ipsec6-input-feature"),
1292  .runs_after = 0,
1293 };
1294 /* *INDENT-ON* */
1295 
1296 #ifndef CLIB_MARCH_VARIANT
1297 static u32
1299 {
1301  u32 nbuckets;
1302  u8 i;
1303 
1304  nbuckets = (u32) (rm->max_reass_n / IP6_FULL_REASS_HT_LOAD_FACTOR);
1305 
1306  for (i = 0; i < 31; i++)
1307  if ((1 << i) >= nbuckets)
1308  break;
1309  nbuckets = 1 << i;
1310 
1311  return nbuckets;
1312 }
1313 #endif /* CLIB_MARCH_VARIANT */
1314 
1315 typedef enum
1316 {
1319 
1320 #ifndef CLIB_MARCH_VARIANT
1321 typedef struct
1322 {
1323  int failure;
1324  clib_bihash_48_8_t *new_hash;
1326 
1327 static void
1329 {
1330  ip6_rehash_cb_ctx *ctx = _ctx;
1331  if (clib_bihash_add_del_48_8 (ctx->new_hash, kv, 1))
1332  {
1333  ctx->failure = 1;
1334  }
1335 }
1336 
1337 static void
1338 ip6_full_reass_set_params (u32 timeout_ms, u32 max_reassemblies,
1339  u32 max_reassembly_length,
1340  u32 expire_walk_interval_ms)
1341 {
1342  ip6_full_reass_main.timeout_ms = timeout_ms;
1343  ip6_full_reass_main.timeout = (f64) timeout_ms / (f64) MSEC_PER_SEC;
1344  ip6_full_reass_main.max_reass_n = max_reassemblies;
1345  ip6_full_reass_main.max_reass_len = max_reassembly_length;
1346  ip6_full_reass_main.expire_walk_interval_ms = expire_walk_interval_ms;
1347 }
1348 
1350 ip6_full_reass_set (u32 timeout_ms, u32 max_reassemblies,
1351  u32 max_reassembly_length, u32 expire_walk_interval_ms)
1352 {
1353  u32 old_nbuckets = ip6_full_reass_get_nbuckets ();
1354  ip6_full_reass_set_params (timeout_ms, max_reassemblies,
1355  max_reassembly_length, expire_walk_interval_ms);
1356  vlib_process_signal_event (ip6_full_reass_main.vlib_main,
1357  ip6_full_reass_main.ip6_full_reass_expire_node_idx,
1359  u32 new_nbuckets = ip6_full_reass_get_nbuckets ();
1360  if (ip6_full_reass_main.max_reass_n > 0 && new_nbuckets > old_nbuckets)
1361  {
1362  clib_bihash_48_8_t new_hash;
1363  clib_memset (&new_hash, 0, sizeof (new_hash));
1365  ctx.failure = 0;
1366  ctx.new_hash = &new_hash;
1367  clib_bihash_init_48_8 (&new_hash, "ip6-full-reass", new_nbuckets,
1368  new_nbuckets * 1024);
1369  clib_bihash_foreach_key_value_pair_48_8 (&ip6_full_reass_main.hash,
1370  ip6_rehash_cb, &ctx);
1371  if (ctx.failure)
1372  {
1373  clib_bihash_free_48_8 (&new_hash);
1374  return -1;
1375  }
1376  else
1377  {
1378  clib_bihash_free_48_8 (&ip6_full_reass_main.hash);
1379  clib_memcpy_fast (&ip6_full_reass_main.hash, &new_hash,
1380  sizeof (ip6_full_reass_main.hash));
1381  clib_bihash_copied (&ip6_full_reass_main.hash, &new_hash);
1382  }
1383  }
1384  return 0;
1385 }
1386 
1388 ip6_full_reass_get (u32 * timeout_ms, u32 * max_reassemblies,
1389  u32 * max_reassembly_length,
1390  u32 * expire_walk_interval_ms)
1391 {
1392  *timeout_ms = ip6_full_reass_main.timeout_ms;
1393  *max_reassemblies = ip6_full_reass_main.max_reass_n;
1394  *max_reassembly_length = ip6_full_reass_main.max_reass_len;
1395  *expire_walk_interval_ms = ip6_full_reass_main.expire_walk_interval_ms;
1396  return 0;
1397 }
1398 
1399 static clib_error_t *
1401 {
1403  clib_error_t *error = 0;
1404  u32 nbuckets;
1405  vlib_node_t *node;
1406 
1407  rm->vlib_main = vm;
1408 
1411  vec_foreach (rt, rm->per_thread_data)
1412  {
1413  clib_spinlock_init (&rt->lock);
1414  pool_alloc (rt->pool, rm->max_reass_n);
1415  }
1416 
1417  node = vlib_get_node_by_name (vm, (u8 *) "ip6-full-reassembly-expire-walk");
1418  ASSERT (node);
1420 
1425 
1426  nbuckets = ip6_full_reass_get_nbuckets ();
1427  clib_bihash_init_48_8 (&rm->hash, "ip6-full-reass", nbuckets,
1428  nbuckets * 1024);
1429 
1430  node = vlib_get_node_by_name (vm, (u8 *) "ip6-drop");
1431  ASSERT (node);
1432  rm->ip6_drop_idx = node->index;
1433  node = vlib_get_node_by_name (vm, (u8 *) "ip6-icmp-error");
1434  ASSERT (node);
1435  rm->ip6_icmp_error_idx = node->index;
1436 
1437  if ((error = vlib_call_init_function (vm, ip_main_init)))
1438  return error;
1439  ip6_register_protocol (IP_PROTOCOL_IPV6_FRAGMENTATION,
1440  ip6_full_reass_node.index);
1441 
1443  rm->fq_feature_index =
1445 
1446  return error;
1447 }
1448 
1450 #endif /* CLIB_MARCH_VARIANT */
1451 
1452 static uword
1454  vlib_node_runtime_t * node, vlib_frame_t * f)
1455 {
1457  uword event_type, *event_data = 0;
1458 
1459  while (true)
1460  {
1463  / (f64) MSEC_PER_SEC);
1464  event_type = vlib_process_get_events (vm, &event_data);
1465 
1466  switch (event_type)
1467  {
1468  case ~0: /* no events => timeout */
1469  /* nothing to do here */
1470  break;
1472  break;
1473  default:
1474  clib_warning ("BUG: event type 0x%wx", event_type);
1475  break;
1476  }
1477  f64 now = vlib_time_now (vm);
1478 
1479  ip6_full_reass_t *reass;
1480  int *pool_indexes_to_free = NULL;
1481 
1482  uword thread_index = 0;
1483  int index;
1484  const uword nthreads = vlib_num_workers () + 1;
1485  u32 *vec_icmp_bi = NULL;
1486  for (thread_index = 0; thread_index < nthreads; ++thread_index)
1487  {
1489  &rm->per_thread_data[thread_index];
1490  clib_spinlock_lock (&rt->lock);
1491 
1492  vec_reset_length (pool_indexes_to_free);
1493  /* *INDENT-OFF* */
1494  pool_foreach_index (index, rt->pool, ({
1495  reass = pool_elt_at_index (rt->pool, index);
1496  if (now > reass->last_heard + rm->timeout)
1497  {
1498  vec_add1 (pool_indexes_to_free, index);
1499  }
1500  }));
1501  /* *INDENT-ON* */
1502  int *i;
1503  /* *INDENT-OFF* */
1504  vec_foreach (i, pool_indexes_to_free)
1505  {
1506  ip6_full_reass_t *reass = pool_elt_at_index (rt->pool, i[0]);
1507  u32 icmp_bi = ~0;
1508  ip6_full_reass_on_timeout (vm, node, rm, reass, &icmp_bi);
1509  if (~0 != icmp_bi)
1510  vec_add1 (vec_icmp_bi, icmp_bi);
1511 
1512  ip6_full_reass_free (rm, rt, reass);
1513  }
1514  /* *INDENT-ON* */
1515 
1516  clib_spinlock_unlock (&rt->lock);
1517  }
1518 
1519  while (vec_len (vec_icmp_bi) > 0)
1520  {
1521  vlib_frame_t *f =
1523  u32 *to_next = vlib_frame_vector_args (f);
1524  u32 n_left_to_next = VLIB_FRAME_SIZE - f->n_vectors;
1525  int trace_frame = 0;
1526  while (vec_len (vec_icmp_bi) > 0 && n_left_to_next > 0)
1527  {
1528  u32 bi = vec_pop (vec_icmp_bi);
1529  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
1530  if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
1531  trace_frame = 1;
1532  b->error = node->errors[IP6_ERROR_REASS_TIMEOUT];
1533  to_next[0] = bi;
1534  ++f->n_vectors;
1535  to_next += 1;
1536  n_left_to_next -= 1;
1537  }
1538  f->frame_flags |= (trace_frame * VLIB_FRAME_TRACE);
1540  }
1541 
1542  vec_free (pool_indexes_to_free);
1543  vec_free (vec_icmp_bi);
1544  if (event_data)
1545  {
1546  _vec_len (event_data) = 0;
1547  }
1548  }
1549 
1550  return 0;
1551 }
1552 
1553 /* *INDENT-OFF* */
1555  .function = ip6_full_reass_walk_expired,
1556  .format_trace = format_ip6_full_reass_trace,
1557  .type = VLIB_NODE_TYPE_PROCESS,
1558  .name = "ip6-full-reassembly-expire-walk",
1559 
1561  .error_strings = ip6_full_reassembly_error_strings,
1562 
1563 };
1564 /* *INDENT-ON* */
1565 
1566 static u8 *
1567 format_ip6_full_reass_key (u8 * s, va_list * args)
1568 {
1569  ip6_full_reass_key_t *key = va_arg (*args, ip6_full_reass_key_t *);
1570  s = format (s, "xx_id: %u, src: %U, dst: %U, frag_id: %u, proto: %u",
1572  &key->dst, clib_net_to_host_u16 (key->frag_id), key->proto);
1573  return s;
1574 }
1575 
1576 static u8 *
1577 format_ip6_full_reass (u8 * s, va_list * args)
1578 {
1579  vlib_main_t *vm = va_arg (*args, vlib_main_t *);
1580  ip6_full_reass_t *reass = va_arg (*args, ip6_full_reass_t *);
1581 
1582  s = format (s, "ID: %lu, key: %U\n first_bi: %u, data_len: %u, "
1583  "last_packet_octet: %u, trace_op_counter: %u\n",
1584  reass->id, format_ip6_full_reass_key, &reass->key,
1585  reass->first_bi, reass->data_len, reass->last_packet_octet,
1586  reass->trace_op_counter);
1587  u32 bi = reass->first_bi;
1588  u32 counter = 0;
1589  while (~0 != bi)
1590  {
1591  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
1592  vnet_buffer_opaque_t *vnb = vnet_buffer (b);
1593  s = format (s, " #%03u: range: [%u, %u], bi: %u, off: %d, len: %u, "
1594  "fragment[%u, %u]\n",
1595  counter, vnb->ip.reass.range_first,
1596  vnb->ip.reass.range_last, bi,
1599  vnb->ip.reass.fragment_first, vnb->ip.reass.fragment_last);
1600  if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
1601  {
1602  bi = b->next_buffer;
1603  }
1604  else
1605  {
1606  bi = ~0;
1607  }
1608  }
1609  return s;
1610 }
1611 
1612 static clib_error_t *
1615 {
1617 
1618  vlib_cli_output (vm, "---------------------");
1619  vlib_cli_output (vm, "IP6 reassembly status");
1620  vlib_cli_output (vm, "---------------------");
1621  bool details = false;
1622  if (unformat (input, "details"))
1623  {
1624  details = true;
1625  }
1626 
1627  u32 sum_reass_n = 0;
1628  u64 sum_buffers_n = 0;
1629  ip6_full_reass_t *reass;
1630  uword thread_index;
1631  const uword nthreads = vlib_num_workers () + 1;
1632  for (thread_index = 0; thread_index < nthreads; ++thread_index)
1633  {
1634  ip6_full_reass_per_thread_t *rt = &rm->per_thread_data[thread_index];
1635  clib_spinlock_lock (&rt->lock);
1636  if (details)
1637  {
1638  /* *INDENT-OFF* */
1639  pool_foreach (reass, rt->pool, {
1640  vlib_cli_output (vm, "%U", format_ip6_full_reass, vm, reass);
1641  });
1642  /* *INDENT-ON* */
1643  }
1644  sum_reass_n += rt->reass_n;
1645  clib_spinlock_unlock (&rt->lock);
1646  }
1647  vlib_cli_output (vm, "---------------------");
1648  vlib_cli_output (vm, "Current IP6 reassemblies count: %lu\n",
1649  (long unsigned) sum_reass_n);
1650  vlib_cli_output (vm, "Maximum configured concurrent IP6 reassemblies per "
1651  "worker-thread: %lu\n", (long unsigned) rm->max_reass_n);
1652  vlib_cli_output (vm, "Buffers in use: %lu\n",
1653  (long unsigned) sum_buffers_n);
1654  return 0;
1655 }
1656 
1657 /* *INDENT-OFF* */
1659  .path = "show ip6-full-reassembly",
1660  .short_help = "show ip6-full-reassembly [details]",
1661  .function = show_ip6_full_reass,
1662 };
1663 /* *INDENT-ON* */
1664 
1665 #ifndef CLIB_MARCH_VARIANT
1668 {
1669  return vnet_feature_enable_disable ("ip6-unicast",
1670  "ip6-full-reassembly-feature",
1671  sw_if_index, enable_disable, 0, 0);
1672 }
1673 #endif /* CLIB_MARCH_VARIANT */
1674 
1675 #define foreach_ip6_full_reassembly_handoff_error \
1676 _(CONGESTION_DROP, "congestion drop")
1677 
1678 
1679 typedef enum
1680 {
1681 #define _(sym,str) IP6_FULL_REASSEMBLY_HANDOFF_ERROR_##sym,
1683 #undef _
1686 
1688 #define _(sym,string) string,
1690 #undef _
1691 };
1692 
1693 typedef struct
1694 {
1697 
1698 static u8 *
1700 {
1701  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1702  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1704  va_arg (*args, ip6_full_reassembly_handoff_trace_t *);
1705 
1706  s =
1707  format (s, "ip6-full-reassembly-handoff: next-worker %d",
1708  t->next_worker_index);
1709 
1710  return s;
1711 }
1712 
1715  vlib_node_runtime_t * node,
1716  vlib_frame_t * frame, bool is_feature)
1717 {
1719 
1720  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
1721  u32 n_enq, n_left_from, *from;
1722  u16 thread_indices[VLIB_FRAME_SIZE], *ti;
1723  u32 fq_index;
1724 
1725  from = vlib_frame_vector_args (frame);
1726  n_left_from = frame->n_vectors;
1727  vlib_get_buffers (vm, from, bufs, n_left_from);
1728 
1729  b = bufs;
1730  ti = thread_indices;
1731 
1732  fq_index = (is_feature) ? rm->fq_feature_index : rm->fq_index;
1733 
1734  while (n_left_from > 0)
1735  {
1736  ti[0] =
1737  (is_feature) ? vnet_buffer (b[0])->ip.
1738  reass.owner_feature_thread_index : vnet_buffer (b[0])->ip.
1739  reass.owner_thread_index;
1740 
1741  if (PREDICT_FALSE
1742  ((node->flags & VLIB_NODE_FLAG_TRACE)
1743  && (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
1744  {
1746  vlib_add_trace (vm, node, b[0], sizeof (*t));
1747  t->next_worker_index = ti[0];
1748  }
1749 
1750  n_left_from -= 1;
1751  ti += 1;
1752  b += 1;
1753  }
1754  n_enq =
1755  vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices,
1756  frame->n_vectors, 1);
1757 
1758  if (n_enq < frame->n_vectors)
1760  IP6_FULL_REASSEMBLY_HANDOFF_ERROR_CONGESTION_DROP,
1761  frame->n_vectors - n_enq);
1762  return frame->n_vectors;
1763 }
1764 
1766  vlib_node_runtime_t * node,
1767  vlib_frame_t * frame)
1768 {
1769  return ip6_full_reassembly_handoff_inline (vm, node, frame,
1770  false /* is_feature */ );
1771 }
1772 
1773 /* *INDENT-OFF* */
1775  .name = "ip6-full-reassembly-handoff",
1776  .vector_size = sizeof (u32),
1777  .n_errors = ARRAY_LEN(ip6_full_reassembly_handoff_error_strings),
1780 
1781  .n_next_nodes = 1,
1782 
1783  .next_nodes = {
1784  [0] = "error-drop",
1785  },
1786 };
1787 
1788 
1790  vlib_node_runtime_t * node, vlib_frame_t * frame)
1791 {
1792  return ip6_full_reassembly_handoff_inline (vm, node, frame, true /* is_feature */ );
1793 }
1794 
1795 
1796 /* *INDENT-OFF* */
1798  .name = "ip6-full-reass-feature-hoff",
1799  .vector_size = sizeof (u32),
1800  .n_errors = ARRAY_LEN(ip6_full_reassembly_handoff_error_strings),
1803 
1804  .n_next_nodes = 1,
1805 
1806  .next_nodes = {
1807  [0] = "error-drop",
1808  },
1809 };
1810 /* *INDENT-ON* */
1811 
1812 /*
1813  * fd.io coding-style-patch-verification: ON
1814  *
1815  * Local Variables:
1816  * eval: (c-set-style "gnu")
1817  * End:
1818  */
#define IP6_FULL_REASS_MAX_REASSEMBLIES_DEFAULT
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:439
vnet_api_error_t ip6_full_reass_get(u32 *timeout_ms, u32 *max_reassemblies, u32 *max_reassembly_length, u32 *expire_walk_interval_ms)
get ip6 reassembly configuration
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
clib_bihash_48_8_t hash
static bool ip6_full_reass_verify_fragment_multiple_8(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_buffer_t *b, ip6_frag_hdr_t *frag_hdr)
vnet_api_error_t
Definition: api_errno.h:154
static vlib_cli_command_t trace
(constructor) VLIB_CLI_COMMAND (trace)
Definition: vlib_api_cli.c:889
#define clib_min(x, y)
Definition: clib.h:295
vlib_node_registration_t ip6_full_reassembly_handoff_node
(constructor) VLIB_REGISTER_NODE (ip6_full_reassembly_handoff_node)
static_always_inline void clib_spinlock_unlock(clib_spinlock_t *p)
Definition: lock.h:102
static_always_inline void clib_spinlock_lock(clib_spinlock_t *p)
Definition: lock.h:80
#define CLIB_UNUSED(x)
Definition: clib.h:82
static u32 ip6_full_reass_buffer_get_data_offset(vlib_buffer_t *b)
ip6_full_reass_trace_operation_e action
static f64 vlib_process_wait_for_event_or_clock(vlib_main_t *vm, f64 dt)
Suspend a cooperative multi-tasking thread Waits for an event, or for the indicated number of seconds...
Definition: node_funcs.h:673
void ip6_register_protocol(u32 protocol, u32 node_index)
Definition: ip6_forward.c:1508
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:865
ip6_full_reass_main_t ip6_full_reass_main
static void ip6_full_reass_insert_range_in_chain(vlib_main_t *vm, ip6_full_reass_main_t *rm, ip6_full_reass_per_thread_t *rt, ip6_full_reass_t *reass, u32 prev_range_bi, u32 new_next_bi)
#define pool_alloc(P, N)
Allocate N more free elements to pool (unspecified alignment).
Definition: pool.h:341
u64 as_u64
Definition: bihash_doc.h:63
static ip6_full_reass_rc_t ip6_full_reass_update(vlib_main_t *vm, vlib_node_runtime_t *node, ip6_full_reass_main_t *rm, ip6_full_reass_per_thread_t *rt, ip6_full_reass_t *reass, u32 *bi0, u32 *next0, u32 *error0, ip6_frag_hdr_t *frag_hdr, bool is_custom_app, u32 *handoff_thread_idx)
u64 as_u64[2]
Definition: ip6_packet.h:51
unsigned long u64
Definition: types.h:89
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
#define NULL
Definition: clib.h:58
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
u32 index
Definition: node.h:280
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:279
u32 vlib_frame_queue_main_init(u32 node_index, u32 frame_queue_nelts)
Definition: threads.c:1763
u32 thread_index
Definition: main.h:218
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:522
static ip6_full_reass_t * ip6_full_reass_find_or_create(vlib_main_t *vm, vlib_node_runtime_t *node, ip6_full_reass_main_t *rm, ip6_full_reass_per_thread_t *rt, ip6_full_reass_kv_t *kv, u32 *icmp_bi, u8 *do_handoff)
static void * ip6_ext_next_header(ip6_ext_header_t *ext_hdr)
Definition: ip6_packet.h:552
ip6_full_reass_rc_t
int i
static uword ip6_full_reassembly_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, bool is_feature, bool is_custom_app)
#define MSEC_PER_SEC
static u32 format_get_indent(u8 *s)
Definition: format.h:72
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
static void ip6_full_reass_free_ctx(ip6_full_reass_per_thread_t *rt, ip6_full_reass_t *reass)
#define VLIB_NODE_FN(node)
Definition: node.h:202
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:470
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:366
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:236
ip6_address_t src_address
Definition: ip6_packet.h:383
ip6_full_reass_next_t
unsigned char u8
Definition: types.h:56
#define vec_pop(V)
Returns last element of a vector and decrements its length.
Definition: vec.h:615
vlib_node_registration_t ip6_full_reass_expire_node
(constructor) VLIB_REGISTER_NODE (ip6_full_reass_expire_node)
static bool ip6_full_reass_verify_upper_layer_present(vlib_node_runtime_t *node, vlib_buffer_t *b, ip6_frag_hdr_t *frag_hdr)
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
double f64
Definition: types.h:142
static u8 * format_ip6_full_reass_trace(u8 *s, va_list *args)
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
Definition: pool.h:493
vl_api_interface_index_t sw_if_index
Definition: gre.api:50
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:173
static uword vlib_process_get_events(vlib_main_t *vm, uword **data_vector)
Return the first event type which has occurred and a vector of per-event data of that type...
Definition: node_funcs.h:516
#define always_inline
Definition: clib.h:98
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:129
struct vnet_buffer_opaque_t::@60::@62 ip
static char * ip6_full_reassembly_error_strings[]
vlib_frame_t * vlib_get_frame_to_node(vlib_main_t *vm, u32 to_node_index)
Definition: main.c:185
vlib_node_registration_t ip6_full_reass_node
(constructor) VLIB_REGISTER_NODE (ip6_full_reass_node)
unsigned int u32
Definition: types.h:88
static void ip6_full_reass_free(ip6_full_reass_main_t *rm, ip6_full_reass_per_thread_t *rt, ip6_full_reass_t *reass)
#define vlib_call_init_function(vm, x)
Definition: init.h:270
#define VLIB_FRAME_SIZE
Definition: node.h:378
static void * ip6_ext_header_find(vlib_main_t *vm, vlib_buffer_t *b, ip6_header_t *ip6_header, u8 header_type, ip6_ext_header_t **prev_ext_header)
Definition: ip6_packet.h:578
static u32 ip6_full_reass_get_nbuckets()
void icmp6_error_set_vnet_buffer(vlib_buffer_t *b, u8 type, u8 code, u32 data)
Definition: icmp6.c:446
static void clib_spinlock_init(clib_spinlock_t *p)
Definition: lock.h:63
#define ip6_frag_hdr_more(hdr)
Definition: ip6_packet.h:648
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
static vlib_cli_command_t show_ip6_full_reassembly_cmd
(constructor) VLIB_CLI_COMMAND (show_ip6_full_reassembly_cmd)
#define IP6_FULL_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT
static u32 vlib_buffer_chain_linearize(vlib_main_t *vm, vlib_buffer_t *b)
static ip6_full_reass_rc_t ip6_full_reass_finalize(vlib_main_t *vm, vlib_node_runtime_t *node, ip6_full_reass_main_t *rm, ip6_full_reass_per_thread_t *rt, ip6_full_reass_t *reass, u32 *bi0, u32 *next0, u32 *error0, bool is_custom_app)
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:514
static clib_error_t * show_ip6_full_reass(vlib_main_t *vm, unformat_input_t *input, CLIB_UNUSED(vlib_cli_command_t *lmd))
ip6_full_reassembly_handoff_error_t
static void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
Definition: node_funcs.h:934
static u8 * format_ip6_full_reassembly_handoff_trace(u8 *s, va_list *args)
u16 frame_flags
Definition: node.h:385
long ctx[MAX_CONNS]
Definition: main.c:144
struct _unformat_input_t unformat_input_t
unsigned short u16
Definition: types.h:57
void vlib_put_frame_to_node(vlib_main_t *vm, u32 to_node_index, vlib_frame_t *f)
Definition: main.c:194
ip6_full_reass_trace_operation_e
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:286
#define PREDICT_FALSE(x)
Definition: clib.h:111
static void ip6_full_reass_on_timeout(vlib_main_t *vm, vlib_node_runtime_t *node, ip6_full_reass_main_t *rm, ip6_full_reass_t *reass, u32 *icmp_bi)
static char * ip6_full_reassembly_handoff_error_strings[]
u32 node_index
Node index.
Definition: node.h:496
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:338
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1150
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
clib_error_t * ip_main_init(vlib_main_t *vm)
Definition: ip_init.c:45
VNET_FEATURE_INIT(ip6_full_reassembly_feature, static)
u16 n_vectors
Definition: node.h:397
format_function_t format_ip6_address
Definition: format.h:93
vlib_main_t * vm
Definition: buffer.c:323
#define IP6_FULL_REASS_HT_LOAD_FACTOR
ip6_full_reass_range_trace_t trace_range
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:341
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:302
#define clib_warning(format, args...)
Definition: error.h:59
ip6_full_reass_per_thread_t * per_thread_data
u8 * format_hexdump(u8 *s, va_list *va)
Definition: std-formats.c:297
static void ip6_full_reass_add_trace(vlib_main_t *vm, vlib_node_runtime_t *node, ip6_full_reass_main_t *rm, ip6_full_reass_t *reass, u32 bi, ip6_full_reass_trace_operation_e action, u32 thread_id_to)
#define ip6_frag_hdr_offset_bytes(hdr)
Definition: ip6_packet.h:645
vlib_main_t * vlib_main
#define ARRAY_LEN(x)
Definition: clib.h:62
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:456
vlib_node_t * vlib_get_node_by_name(vlib_main_t *vm, u8 *name)
Definition: node.c:45
static u8 ip6_ext_hdr(u8 nexthdr)
Definition: ip6_packet.h:527
vlib_node_registration_t ip6_full_reass_node_feature
(constructor) VLIB_REGISTER_NODE (ip6_full_reass_node_feature)
clib_bihash_kv_48_8_t kv
#define VLIB_CLI_COMMAND(x,...)
Definition: cli.h:161
u32 fq_index
Worker handoff.
signed int i32
Definition: types.h:77
#define ip6_frag_hdr_offset(hdr)
Definition: ip6_packet.h:642
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:515
#define IP6_FULL_REASS_TIMEOUT_DEFAULT_MS
#define ASSERT(truth)
ip6_main_t ip6_main
Definition: ip6_forward.c:2732
ip6_full_reass_key_t k
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:248
static u16 ip6_full_reass_buffer_get_data_len(vlib_buffer_t *b)
static uword ip6_full_reassembly_handoff_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, bool is_feature)
#define VNET_FEATURES(...)
Definition: feature.h:442
static u8 * format_ip6_full_reass(u8 *s, va_list *args)
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
#define vec_elt(v, i)
Get vector value at index i.
void clib_bihash_copied(void *dst, void *src)
static void ip6_full_reass_drop_all(vlib_main_t *vm, vlib_node_runtime_t *node, ip6_full_reass_main_t *rm, ip6_full_reass_t *reass)
static void ip6_full_reass_trace_details(vlib_main_t *vm, u32 bi, ip6_full_reass_range_trace_t *trace)
u16 payload_length
Definition: ip6_packet.h:374
vl_api_address_t ip
Definition: l2.api:489
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
#define foreach_ip6_full_reassembly_handoff_error
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
typedef key
Definition: ipsec.api:247
#define VLIB_FRAME_TRACE
Definition: node.h:435
vnet_api_error_t ip6_full_reass_set(u32 timeout_ms, u32 max_reassemblies, u32 max_reassembly_length, u32 expire_walk_interval_ms)
set ip6 reassembly configuration
#define IP6_FULL_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS
#define foreach_ip6_error
Definition: ip6_error.h:43
vlib_node_registration_t ip6_full_reassembly_feature_handoff_node
(constructor) VLIB_REGISTER_NODE (ip6_full_reassembly_feature_handoff_node)
static_always_inline u32 vlib_buffer_enqueue_to_thread(vlib_main_t *vm, u32 frame_queue_index, u32 *buffer_indices, u16 *thread_indices, u32 n_packets, int drop_on_congestion)
Definition: buffer_node.h:487
static void ip6_full_reass_set_params(u32 timeout_ms, u32 max_reassemblies, u32 max_reassembly_length, u32 expire_walk_interval_ms)
#define vnet_buffer(b)
Definition: buffer.h:365
static bool ip6_full_reass_verify_packet_size_lt_64k(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_buffer_t *b, ip6_frag_hdr_t *frag_hdr)
clib_bihash_48_8_t * new_hash
static u32 vlib_num_workers()
Definition: threads.h:367
ip6_full_reass_key_t key
#define vec_foreach(var, vec)
Vector iterator.
vnet_api_error_t ip6_full_reass_enable_disable(u32 sw_if_index, u8 enable_disable)
ip6_full_reass_event_t
u16 flags
Copy of main node flags.
Definition: node.h:509
static u8 * format_ip6_full_reass_key(u8 *s, va_list *args)
#define pool_foreach_index(i, v, body)
Iterate pool by index.
Definition: pool.h:538
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:244
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:302
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:167
static clib_error_t * ip6_full_reass_init_function(vlib_main_t *vm)
static uword ip6_full_reass_walk_expired(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
ip6_full_reass_val_t v
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
Definition: cli.c:772
u32 * fib_index_by_sw_if_index
Definition: ip6.h:194
static void ip6_rehash_cb(clib_bihash_kv_48_8_t *kv, void *_ctx)
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
uword unformat(unformat_input_t *i, const char *fmt,...)
Definition: unformat.c:978
Definition: defs.h:46
int vnet_feature_enable_disable(const char *arc_name, const char *node_name, u32 sw_if_index, int enable_disable, void *feature_config, u32 n_feature_config_bytes)
Definition: feature.c:275
CLIB vectors are ubiquitous dynamically resized arrays with by user defined "headers".
ip6_address_t dst_address
Definition: ip6_packet.h:383
static u8 * format_ip6_full_reass_range_trace(u8 *s, va_list *args)
IPv6 Reassembly.