FD.io VPP  v20.01-48-g3e0dafb74
Vector Packet Processing
ip4_sv_reass.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 /**
17  * @file
18  * @brief IPv4 Shallow Virtual Reassembly.
19  *
20  * This file contains the source code for IPv4 Shallow Virtual reassembly.
21  */
22 
23 #include <vppinfra/vec.h>
24 #include <vnet/vnet.h>
25 #include <vnet/ip/ip.h>
26 #include <vnet/ip/ip4_to_ip6.h>
27 #include <vppinfra/fifo.h>
28 #include <vppinfra/bihash_16_8.h>
30 
31 #define MSEC_PER_SEC 1000
32 #define IP4_SV_REASS_TIMEOUT_DEFAULT_MS 100
33 #define IP4_SV_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS 10000 // 10 seconds default
34 #define IP4_SV_REASS_MAX_REASSEMBLIES_DEFAULT 1024
35 #define IP4_SV_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT 3
36 #define IP4_SV_REASS_HT_LOAD_FACTOR (0.75)
37 
38 typedef enum
39 {
44 
45 typedef struct
46 {
47  union
48  {
49  struct
50  {
57  };
58  u64 as_u64[2];
59  };
61 
62 typedef union
63 {
64  struct
65  {
68  };
71 
72 typedef union
73 {
74  struct
75  {
78  };
81 
82 typedef struct
83 {
84  // hash table key
86  // time when last packet was received
88  // internal id of this reassembly
90  // trace operation counter
92  // minimum fragment length for this reassembly - used to estimate MTU
94  // buffer indexes of buffers in this reassembly in chronological order -
95  // including overlaps and duplicate fragments
97  // set to true when this reassembly is completed
99  // ip protocol
104  // l4 src port
106  // l4 dst port
109  // lru indexes
113 
114 typedef struct
115 {
120  // lru indexes
123 
125 
126 typedef struct
127 {
128  // IPv4 config
132  // maximum number of fragments in one reassembly
134  // maximum number of reassemblies
136 
137  // IPv4 runtime
138  clib_bihash_16_8_t hash;
139  // per-thread data
141 
142  // convenience
145 
146  // node index of ip4-drop node
149 
150  /** Worker handoff */
153 
154  // reference count for enabling/disabling feature - per interface
156 
157  // reference count for enabling/disabling feature - per interface
159 
161 
163 
164 #ifndef CLIB_MARCH_VARIANT
166 #endif /* CLIB_MARCH_VARIANT */
167 
168 typedef enum
169 {
175 
176 typedef enum
177 {
183 
184 typedef struct
185 {
186  ip4_sv_reass_trace_operation_e action;
193 
196 
197 static u8 *
198 format_ip4_sv_reass_trace (u8 * s, va_list * args)
199 {
200  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
201  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
202  ip4_sv_reass_trace_t *t = va_arg (*args, ip4_sv_reass_trace_t *);
203  if (REASS_PASSTHROUGH != t->action)
204  {
205  s = format (s, "reass id: %u, op id: %u ", t->reass_id, t->op_id);
206  }
207  switch (t->action)
208  {
210  s = format (s, "[cached]");
211  break;
212  case REASS_FINISH:
213  s =
214  format (s, "[finish, ip proto=%u, src_port=%u, dst_port=%u]",
215  t->ip_proto, clib_net_to_host_u16 (t->l4_src_port),
216  clib_net_to_host_u16 (t->l4_dst_port));
217  break;
219  s =
220  format (s, "[forward, ip proto=%u, src_port=%u, dst_port=%u]",
221  t->ip_proto, clib_net_to_host_u16 (t->l4_src_port),
222  clib_net_to_host_u16 (t->l4_dst_port));
223  break;
224  case REASS_PASSTHROUGH:
225  s = format (s, "[not-fragmented]");
226  break;
227  }
228  return s;
229 }
230 
231 static void
233  ip4_sv_reass_main_t * rm, ip4_sv_reass_t * reass,
234  u32 bi, ip4_sv_reass_trace_operation_e action,
235  u32 ip_proto, u16 l4_src_port, u16 l4_dst_port)
236 {
237  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
238  ip4_sv_reass_trace_t *t = vlib_add_trace (vm, node, b, sizeof (t[0]));
239  if (reass)
240  {
241  t->reass_id = reass->id;
242  t->op_id = reass->trace_op_counter;
243  ++reass->trace_op_counter;
244  }
245  t->action = action;
246  t->ip_proto = ip_proto;
247  t->l4_src_port = l4_src_port;
248  t->l4_dst_port = l4_dst_port;
249 #if 0
250  static u8 *s = NULL;
251  s = format (s, "%U", format_ip4_sv_reass_trace, NULL, NULL, t);
252  printf ("%.*s\n", vec_len (s), s);
253  fflush (stdout);
254  vec_reset_length (s);
255 #endif
256 }
257 
258 
259 always_inline void
262 {
264  kv.key[0] = reass->key.as_u64[0];
265  kv.key[1] = reass->key.as_u64[1];
266  clib_bihash_add_del_16_8 (&rm->hash, &kv, 0);
267  vlib_buffer_free (vm, reass->cached_buffers,
268  vec_len (reass->cached_buffers));
269  vec_free (reass->cached_buffers);
270  reass->cached_buffers = NULL;
271  if (~0 != reass->lru_prev)
272  {
273  ip4_sv_reass_t *lru_prev =
274  pool_elt_at_index (rt->pool, reass->lru_prev);
275  lru_prev->lru_next = reass->lru_next;
276  }
277  if (~0 != reass->lru_next)
278  {
279  ip4_sv_reass_t *lru_next =
280  pool_elt_at_index (rt->pool, reass->lru_next);
281  lru_next->lru_prev = reass->lru_prev;
282  }
283  if (rt->lru_first == reass - rt->pool)
284  {
285  rt->lru_first = reass->lru_next;
286  }
287  if (rt->lru_last == reass - rt->pool)
288  {
289  rt->lru_last = reass->lru_prev;
290  }
291  pool_put (rt->pool, reass);
292  --rt->reass_n;
293 }
294 
295 always_inline void
297 {
298  reass->cached_buffers = NULL;
299  reass->is_complete = false;
300 }
301 
305  ip4_sv_reass_kv_t * kv, u8 * do_handoff)
306 {
307  ip4_sv_reass_t *reass = NULL;
308  f64 now = vlib_time_now (rm->vlib_main);
309 
310  if (!clib_bihash_search_16_8
311  (&rm->hash, (clib_bihash_kv_16_8_t *) kv, (clib_bihash_kv_16_8_t *) kv))
312  {
313  if (vm->thread_index != kv->v.thread_index)
314  {
315  *do_handoff = 1;
316  return NULL;
317  }
318  reass = pool_elt_at_index (rt->pool, kv->v.reass_index);
319 
320  if (now > reass->last_heard + rm->timeout)
321  {
322  ip4_sv_reass_free (vm, rm, rt, reass);
323  reass = NULL;
324  }
325  }
326 
327  if (reass)
328  {
329  reass->last_heard = now;
330  return reass;
331  }
332 
333  if (rt->reass_n >= rm->max_reass_n && rm->max_reass_n)
334  {
335  reass = pool_elt_at_index (rt->pool, rt->lru_last);
336  ip4_sv_reass_free (vm, rm, rt, reass);
337  }
338 
339  pool_get (rt->pool, reass);
340  clib_memset (reass, 0, sizeof (*reass));
341  reass->id = ((u64) vm->thread_index * 1000000000) + rt->id_counter;
342  ++rt->id_counter;
343  ip4_sv_reass_init (reass);
344  ++rt->reass_n;
345  reass->lru_prev = reass->lru_next = ~0;
346 
347  if (~0 != rt->lru_last)
348  {
349  ip4_sv_reass_t *lru_last = pool_elt_at_index (rt->pool, rt->lru_last);
350  reass->lru_prev = rt->lru_last;
351  lru_last->lru_next = rt->lru_last = reass - rt->pool;
352  }
353 
354  if (~0 == rt->lru_first)
355  {
356  rt->lru_first = rt->lru_last = reass - rt->pool;
357  }
358 
359  reass->key.as_u64[0] = ((clib_bihash_kv_16_8_t *) kv)->key[0];
360  reass->key.as_u64[1] = ((clib_bihash_kv_16_8_t *) kv)->key[1];
361  kv->v.reass_index = (reass - rt->pool);
362  kv->v.thread_index = vm->thread_index;
363  reass->last_heard = now;
364 
365  if (clib_bihash_add_del_16_8 (&rm->hash, (clib_bihash_kv_16_8_t *) kv, 1))
366  {
367  ip4_sv_reass_free (vm, rm, rt, reass);
368  reass = NULL;
369  }
370 
371  return reass;
372 }
373 
374 always_inline ip4_sv_reass_rc_t
377  ip4_header_t * ip0, ip4_sv_reass_t * reass, u32 bi0)
378 {
379  vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
380  ip4_sv_reass_rc_t rc = IP4_SV_REASS_RC_OK;
381  const u32 fragment_first = ip4_get_fragment_offset_bytes (ip0);
382  if (0 == fragment_first)
383  {
384  reass->ip_proto = ip0->protocol;
385  reass->l4_src_port = ip4_get_port (ip0, 1);
386  reass->l4_dst_port = ip4_get_port (ip0, 0);
387  if (!reass->l4_src_port || !reass->l4_dst_port)
389  if (IP_PROTOCOL_TCP == reass->ip_proto)
390  {
391  reass->icmp_type_or_tcp_flags = ((tcp_header_t *) (ip0 + 1))->flags;
392  reass->tcp_ack_number = ((tcp_header_t *) (ip0 + 1))->ack_number;
393  reass->tcp_seq_number = ((tcp_header_t *) (ip0 + 1))->seq_number;
394  }
395  else if (IP_PROTOCOL_ICMP == reass->ip_proto)
396  {
397  reass->icmp_type_or_tcp_flags =
398  ((icmp46_header_t *) (ip0 + 1))->type;
399  }
400  reass->is_complete = true;
401  vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
402  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
403  {
404  ip4_sv_reass_add_trace (vm, node, rm, reass, bi0, REASS_FINISH,
405  reass->ip_proto, reass->l4_src_port,
406  reass->l4_dst_port);
407  }
408  }
409  vec_add1 (reass->cached_buffers, bi0);
410  if (!reass->is_complete)
411  {
412  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
413  {
414  ip4_sv_reass_add_trace (vm, node, rm, reass, bi0,
415  REASS_FRAGMENT_CACHE, ~0, ~0, ~0);
416  }
417  if (vec_len (reass->cached_buffers) > rm->max_reass_len)
418  {
420  }
421  }
422  return rc;
423 }
424 
427  vlib_frame_t * frame, bool is_feature,
428  bool is_output_feature, bool is_custom)
429 {
430  u32 *from = vlib_frame_vector_args (frame);
431  u32 n_left_from, n_left_to_next, *to_next, next_index;
434  clib_spinlock_lock (&rt->lock);
435 
436  n_left_from = frame->n_vectors;
437  next_index = node->cached_next_index;
438 
439  while (n_left_from > 0)
440  {
441  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
442 
443  while (n_left_from > 0 && n_left_to_next > 0)
444  {
445  u32 bi0;
446  vlib_buffer_t *b0;
447  u32 next0;
448  u32 error0 = IP4_ERROR_NONE;
449 
450  bi0 = from[0];
451  b0 = vlib_get_buffer (vm, bi0);
452 
453  ip4_header_t *ip0 =
455  is_output_feature *
456  vnet_buffer (b0)->
457  ip.save_rewrite_length);
458  if (!ip4_get_fragment_more (ip0) && !ip4_get_fragment_offset (ip0))
459  {
460  // this is a regular packet - no fragmentation
461  if (is_custom)
462  {
463  next0 = vnet_buffer (b0)->ip.reass.next_index;
464  }
465  else
466  {
468  }
469  vnet_buffer (b0)->ip.reass.save_rewrite_length =
470  vnet_buffer (b0)->ip.save_rewrite_length;
471  vnet_buffer (b0)->ip.reass.is_non_first_fragment = 0;
472  vnet_buffer (b0)->ip.reass.ip_proto = ip0->protocol;
473  if (IP_PROTOCOL_TCP == ip0->protocol)
474  {
475  vnet_buffer (b0)->ip.reass.icmp_type_or_tcp_flags =
476  ((tcp_header_t *) (ip0 + 1))->flags;
477  vnet_buffer (b0)->ip.reass.tcp_ack_number =
478  ((tcp_header_t *) (ip0 + 1))->ack_number;
479  vnet_buffer (b0)->ip.reass.tcp_seq_number =
480  ((tcp_header_t *) (ip0 + 1))->seq_number;
481  }
482  else if (IP_PROTOCOL_ICMP == ip0->protocol)
483  {
484  vnet_buffer (b0)->ip.reass.icmp_type_or_tcp_flags =
485  ((icmp46_header_t *) (ip0 + 1))->type;
486  }
487  vnet_buffer (b0)->ip.reass.l4_src_port = ip4_get_port (ip0, 1);
488  vnet_buffer (b0)->ip.reass.l4_dst_port = ip4_get_port (ip0, 0);
489  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
490  {
491  ip4_sv_reass_add_trace (vm, node, rm, NULL, bi0,
493  vnet_buffer (b0)->ip.reass.ip_proto,
494  vnet_buffer (b0)->ip.
495  reass.l4_src_port,
496  vnet_buffer (b0)->ip.
497  reass.l4_dst_port);
498  }
499  goto packet_enqueue;
500  }
501  const u32 fragment_first = ip4_get_fragment_offset_bytes (ip0);
502  const u32 fragment_length =
503  clib_net_to_host_u16 (ip0->length) - ip4_header_bytes (ip0);
504  const u32 fragment_last = fragment_first + fragment_length - 1;
505  if (fragment_first > fragment_last || fragment_first + fragment_length > UINT16_MAX - 20 || (fragment_length < 8 && ip4_get_fragment_more (ip0))) // 8 is minimum frag length per RFC 791
506  {
508  error0 = IP4_ERROR_REASS_MALFORMED_PACKET;
509  goto packet_enqueue;
510  }
512  u8 do_handoff = 0;
513 
514  kv.k.as_u64[0] =
516  vnet_buffer (b0)->sw_if_index[VLIB_RX]) |
517  (u64) ip0->src_address.as_u32 << 32;
518  kv.k.as_u64[1] =
519  (u64) ip0->dst_address.
520  as_u32 | (u64) ip0->fragment_id << 32 | (u64) ip0->protocol << 48;
521 
522  ip4_sv_reass_t *reass =
523  ip4_sv_reass_find_or_create (vm, rm, rt, &kv, &do_handoff);
524 
525  if (PREDICT_FALSE (do_handoff))
526  {
528  vnet_buffer (b0)->ip.reass.owner_thread_index =
529  kv.v.thread_index;
530  goto packet_enqueue;
531  }
532 
533  if (!reass)
534  {
536  error0 = IP4_ERROR_REASS_LIMIT_REACHED;
537  goto packet_enqueue;
538  }
539 
540  if (reass->is_complete)
541  {
542  if (is_custom)
543  {
544  next0 = vnet_buffer (b0)->ip.reass.next_index;
545  }
546  else
547  {
549  }
550  vnet_buffer (b0)->ip.reass.save_rewrite_length =
551  vnet_buffer (b0)->ip.save_rewrite_length;
552  vnet_buffer (b0)->ip.reass.is_non_first_fragment =
553  ! !fragment_first;
554  vnet_buffer (b0)->ip.reass.ip_proto = reass->ip_proto;
555  vnet_buffer (b0)->ip.reass.icmp_type_or_tcp_flags =
556  reass->icmp_type_or_tcp_flags;
557  vnet_buffer (b0)->ip.reass.tcp_ack_number =
558  reass->tcp_ack_number;
559  vnet_buffer (b0)->ip.reass.tcp_seq_number =
560  reass->tcp_seq_number;
561  vnet_buffer (b0)->ip.reass.l4_src_port = reass->l4_src_port;
562  vnet_buffer (b0)->ip.reass.l4_dst_port = reass->l4_dst_port;
563  error0 = IP4_ERROR_NONE;
564  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
565  {
566  ip4_sv_reass_add_trace (vm, node, rm, reass, bi0,
568  reass->ip_proto,
569  reass->l4_src_port,
570  reass->l4_dst_port);
571  }
572  goto packet_enqueue;
573  }
574 
575  ip4_sv_reass_rc_t rc =
576  ip4_sv_reass_update (vm, node, rm, rt, ip0, reass, bi0);
577  switch (rc)
578  {
579  case IP4_SV_REASS_RC_OK:
580  /* nothing to do here */
581  break;
584  IP4_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG,
585  1);
586  ip4_sv_reass_free (vm, rm, rt, reass);
587  goto next_packet;
588  break;
591  IP4_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG,
592  1);
593  ip4_sv_reass_free (vm, rm, rt, reass);
594  goto next_packet;
595  break;
596  }
597  if (reass->is_complete)
598  {
599  u32 idx;
600  vec_foreach_index (idx, reass->cached_buffers)
601  {
602  u32 bi0 = vec_elt (reass->cached_buffers, idx);
603  vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
605  if (is_feature)
606  {
607  vnet_feature_next (&next0, b0);
608  }
609  if (is_custom)
610  {
611  next0 = vnet_buffer (b0)->ip.reass.next_index;
612  }
613  if (0 == n_left_to_next)
614  {
615  vlib_put_next_frame (vm, node, next_index,
616  n_left_to_next);
617  vlib_get_next_frame (vm, node, next_index, to_next,
618  n_left_to_next);
619  }
620  to_next[0] = bi0;
621  to_next += 1;
622  n_left_to_next -= 1;
623  ASSERT (vnet_buffer (b0)->ip.save_rewrite_length < (2 << 14));
624  vnet_buffer (b0)->ip.reass.save_rewrite_length =
625  vnet_buffer (b0)->ip.save_rewrite_length;
626  vnet_buffer (b0)->ip.reass.is_non_first_fragment =
628  vnet_buffer (b0)->ip.reass.ip_proto = reass->ip_proto;
629  vnet_buffer (b0)->ip.reass.icmp_type_or_tcp_flags =
630  reass->icmp_type_or_tcp_flags;
631  vnet_buffer (b0)->ip.reass.tcp_ack_number =
632  reass->tcp_ack_number;
633  vnet_buffer (b0)->ip.reass.tcp_seq_number =
634  reass->tcp_seq_number;
635  vnet_buffer (b0)->ip.reass.l4_src_port = reass->l4_src_port;
636  vnet_buffer (b0)->ip.reass.l4_dst_port = reass->l4_dst_port;
637  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
638  {
639  ip4_sv_reass_add_trace (vm, node, rm, reass, bi0,
641  reass->ip_proto,
642  reass->l4_src_port,
643  reass->l4_dst_port);
644  }
645  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
646  to_next, n_left_to_next, bi0,
647  next0);
648  }
649  _vec_len (reass->cached_buffers) = 0; // buffers are owned by frame now
650  }
651  goto next_packet;
652 
653  packet_enqueue:
654  b0->error = node->errors[error0];
655 
656  to_next[0] = bi0;
657  to_next += 1;
658  n_left_to_next -= 1;
659  if (is_feature && IP4_ERROR_NONE == error0)
660  {
661  b0 = vlib_get_buffer (vm, bi0);
662  vnet_feature_next (&next0, b0);
663  }
664  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
665  to_next, n_left_to_next,
666  bi0, next0);
667 
668  next_packet:
669  from += 1;
670  n_left_from -= 1;
671  }
672 
673  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
674  }
675 
676  clib_spinlock_unlock (&rt->lock);
677  return frame->n_vectors;
678 }
679 
680 static char *ip4_sv_reass_error_strings[] = {
681 #define _(sym, string) string,
683 #undef _
684 };
685 
686 VLIB_NODE_FN (ip4_sv_reass_node) (vlib_main_t * vm,
689 {
690  return ip4_sv_reass_inline (vm, node, frame, false /* is_feature */ ,
691  false /* is_output_feature */ ,
692  false /* is_custom */ );
693 }
694 
695 /* *INDENT-OFF* */
696 VLIB_REGISTER_NODE (ip4_sv_reass_node) = {
697  .name = "ip4-sv-reassembly",
698  .vector_size = sizeof (u32),
699  .format_trace = format_ip4_sv_reass_trace,
700  .n_errors = ARRAY_LEN (ip4_sv_reass_error_strings),
701  .error_strings = ip4_sv_reass_error_strings,
702  .n_next_nodes = IP4_SV_REASSEMBLY_N_NEXT,
703  .next_nodes =
704  {
705  [IP4_SV_REASSEMBLY_NEXT_INPUT] = "ip4-input",
706  [IP4_SV_REASSEMBLY_NEXT_DROP] = "ip4-drop",
707  [IP4_SV_REASSEMBLY_NEXT_HANDOFF] = "ip4-sv-reassembly-handoff",
708 
709  },
710 };
711 /* *INDENT-ON* */
712 
713 VLIB_NODE_FN (ip4_sv_reass_node_feature) (vlib_main_t * vm,
716 {
717  return ip4_sv_reass_inline (vm, node, frame, true /* is_feature */ ,
718  false /* is_output_feature */ ,
719  false /* is_custom */ );
720 }
721 
722 /* *INDENT-OFF* */
723 VLIB_REGISTER_NODE (ip4_sv_reass_node_feature) = {
724  .name = "ip4-sv-reassembly-feature",
725  .vector_size = sizeof (u32),
726  .format_trace = format_ip4_sv_reass_trace,
727  .n_errors = ARRAY_LEN (ip4_sv_reass_error_strings),
728  .error_strings = ip4_sv_reass_error_strings,
729  .n_next_nodes = IP4_SV_REASSEMBLY_N_NEXT,
730  .next_nodes =
731  {
732  [IP4_SV_REASSEMBLY_NEXT_INPUT] = "ip4-input",
733  [IP4_SV_REASSEMBLY_NEXT_DROP] = "ip4-drop",
734  [IP4_SV_REASSEMBLY_NEXT_HANDOFF] = "ip4-sv-reass-feature-hoff",
735  },
736 };
737 /* *INDENT-ON* */
738 
739 /* *INDENT-OFF* */
740 VNET_FEATURE_INIT (ip4_sv_reass_feature) = {
741  .arc_name = "ip4-unicast",
742  .node_name = "ip4-sv-reassembly-feature",
743  .runs_before = VNET_FEATURES ("ip4-lookup"),
744  .runs_after = 0,
745 };
746 /* *INDENT-ON* */
747 
751 {
752  return ip4_sv_reass_inline (vm, node, frame, true /* is_feature */ ,
753  true /* is_output_feature */ ,
754  false /* is_custom */ );
755 }
756 
757 
758 /* *INDENT-OFF* */
760  .name = "ip4-sv-reassembly-output-feature",
761  .vector_size = sizeof (u32),
762  .format_trace = format_ip4_sv_reass_trace,
763  .n_errors = ARRAY_LEN (ip4_sv_reass_error_strings),
764  .error_strings = ip4_sv_reass_error_strings,
765  .n_next_nodes = IP4_SV_REASSEMBLY_N_NEXT,
766  .next_nodes =
767  {
768  [IP4_SV_REASSEMBLY_NEXT_INPUT] = "ip4-input",
769  [IP4_SV_REASSEMBLY_NEXT_DROP] = "ip4-drop",
770  [IP4_SV_REASSEMBLY_NEXT_HANDOFF] = "ip4-sv-reass-feature-hoff",
771  },
772 };
773 /* *INDENT-ON* */
774 
775 /* *INDENT-OFF* */
776 VNET_FEATURE_INIT (ip4_sv_reass_output_feature) = {
777  .arc_name = "ip4-output",
778  .node_name = "ip4-sv-reassembly-output-feature",
779  .runs_before = 0,
780  .runs_after = 0,
781 };
782 /* *INDENT-ON* */
783 
784 /* *INDENT-OFF* */
786  .name = "ip4-sv-reassembly-custom-next",
787  .vector_size = sizeof (u32),
788  .format_trace = format_ip4_sv_reass_trace,
789  .n_errors = ARRAY_LEN (ip4_sv_reass_error_strings),
790  .error_strings = ip4_sv_reass_error_strings,
791  .n_next_nodes = IP4_SV_REASSEMBLY_N_NEXT,
792  .next_nodes =
793  {
794  [IP4_SV_REASSEMBLY_NEXT_INPUT] = "ip4-input",
795  [IP4_SV_REASSEMBLY_NEXT_DROP] = "ip4-drop",
796  [IP4_SV_REASSEMBLY_NEXT_HANDOFF] = "ip4-sv-reassembly-handoff",
797 
798  },
799 };
800 /* *INDENT-ON* */
801 
805 {
806  return ip4_sv_reass_inline (vm, node, frame, false /* is_feature */ ,
807  false /* is_output_feature */ ,
808  true /* is_custom */ );
809 }
810 
811 #ifndef CLIB_MARCH_VARIANT
814 {
816  u32 nbuckets;
817  u8 i;
818 
819  nbuckets = (u32) (rm->max_reass_n / IP4_SV_REASS_HT_LOAD_FACTOR);
820 
821  for (i = 0; i < 31; i++)
822  if ((1 << i) >= nbuckets)
823  break;
824  nbuckets = 1 << i;
825 
826  return nbuckets;
827 }
828 #endif /* CLIB_MARCH_VARIANT */
829 
830 typedef enum
831 {
834 
835 typedef struct
836 {
837  int failure;
838  clib_bihash_16_8_t *new_hash;
840 
841 #ifndef CLIB_MARCH_VARIANT
842 static int
844 {
845  ip4_rehash_cb_ctx *ctx = _ctx;
846  if (clib_bihash_add_del_16_8 (ctx->new_hash, kv, 1))
847  {
848  ctx->failure = 1;
849  }
850  return (BIHASH_WALK_CONTINUE);
851 }
852 
853 static void
854 ip4_sv_reass_set_params (u32 timeout_ms, u32 max_reassemblies,
855  u32 max_reassembly_length,
856  u32 expire_walk_interval_ms)
857 {
858  ip4_sv_reass_main.timeout_ms = timeout_ms;
859  ip4_sv_reass_main.timeout = (f64) timeout_ms / (f64) MSEC_PER_SEC;
860  ip4_sv_reass_main.max_reass_n = max_reassemblies;
861  ip4_sv_reass_main.max_reass_len = max_reassembly_length;
862  ip4_sv_reass_main.expire_walk_interval_ms = expire_walk_interval_ms;
863 }
864 
866 ip4_sv_reass_set (u32 timeout_ms, u32 max_reassemblies,
867  u32 max_reassembly_length, u32 expire_walk_interval_ms)
868 {
869  u32 old_nbuckets = ip4_sv_reass_get_nbuckets ();
870  ip4_sv_reass_set_params (timeout_ms, max_reassemblies,
871  max_reassembly_length, expire_walk_interval_ms);
872  vlib_process_signal_event (ip4_sv_reass_main.vlib_main,
873  ip4_sv_reass_main.ip4_sv_reass_expire_node_idx,
875  u32 new_nbuckets = ip4_sv_reass_get_nbuckets ();
876  if (ip4_sv_reass_main.max_reass_n > 0 && new_nbuckets > old_nbuckets)
877  {
878  clib_bihash_16_8_t new_hash;
879  clib_memset (&new_hash, 0, sizeof (new_hash));
881  ctx.failure = 0;
882  ctx.new_hash = &new_hash;
883  clib_bihash_init_16_8 (&new_hash, "ip4-dr", new_nbuckets,
884  new_nbuckets * 1024);
885  clib_bihash_foreach_key_value_pair_16_8 (&ip4_sv_reass_main.hash,
886  ip4_rehash_cb, &ctx);
887  if (ctx.failure)
888  {
889  clib_bihash_free_16_8 (&new_hash);
890  return -1;
891  }
892  else
893  {
894  clib_bihash_free_16_8 (&ip4_sv_reass_main.hash);
895  clib_memcpy_fast (&ip4_sv_reass_main.hash, &new_hash,
896  sizeof (ip4_sv_reass_main.hash));
897  clib_bihash_copied (&ip4_sv_reass_main.hash, &new_hash);
898  }
899  }
900  return 0;
901 }
902 
904 ip4_sv_reass_get (u32 * timeout_ms, u32 * max_reassemblies,
905  u32 * max_reassembly_length, u32 * expire_walk_interval_ms)
906 {
907  *timeout_ms = ip4_sv_reass_main.timeout_ms;
908  *max_reassemblies = ip4_sv_reass_main.max_reass_n;
909  *max_reassembly_length = ip4_sv_reass_main.max_reass_len;
910  *expire_walk_interval_ms = ip4_sv_reass_main.expire_walk_interval_ms;
911  return 0;
912 }
913 
914 static clib_error_t *
916 {
918  clib_error_t *error = 0;
919  u32 nbuckets;
920  vlib_node_t *node;
921 
922  rm->vlib_main = vm;
923  rm->vnet_main = vnet_get_main ();
924 
927  vec_foreach (rt, rm->per_thread_data)
928  {
929  clib_spinlock_init (&rt->lock);
930  pool_alloc (rt->pool, rm->max_reass_n);
931  rt->lru_first = rt->lru_last = ~0;
932  }
933 
934  node = vlib_get_node_by_name (vm, (u8 *) "ip4-sv-reassembly-expire-walk");
935  ASSERT (node);
937 
942 
943  nbuckets = ip4_sv_reass_get_nbuckets ();
944  clib_bihash_init_16_8 (&rm->hash, "ip4-dr", nbuckets, nbuckets * 1024);
945 
946  node = vlib_get_node_by_name (vm, (u8 *) "ip4-drop");
947  ASSERT (node);
948  rm->ip4_drop_idx = node->index;
949 
950  rm->fq_index = vlib_frame_queue_main_init (ip4_sv_reass_node.index, 0);
951  rm->fq_feature_index =
952  vlib_frame_queue_main_init (ip4_sv_reass_node_feature.index, 0);
953 
956 
957  return error;
958 }
959 
961 #endif /* CLIB_MARCH_VARIANT */
962 
963 static uword
965  vlib_node_runtime_t * node, vlib_frame_t * f)
966 {
968  uword event_type, *event_data = 0;
969 
970  while (true)
971  {
973  (f64)
975  (f64) MSEC_PER_SEC);
976  event_type = vlib_process_get_events (vm, &event_data);
977 
978  switch (event_type)
979  {
980  case ~0: /* no events => timeout */
981  /* nothing to do here */
982  break;
984  break;
985  default:
986  clib_warning ("BUG: event type 0x%wx", event_type);
987  break;
988  }
989  f64 now = vlib_time_now (vm);
990 
991  ip4_sv_reass_t *reass;
992  int *pool_indexes_to_free = NULL;
993 
994  uword thread_index = 0;
995  int index;
996  const uword nthreads = vlib_num_workers () + 1;
997  for (thread_index = 0; thread_index < nthreads; ++thread_index)
998  {
999  ip4_sv_reass_per_thread_t *rt = &rm->per_thread_data[thread_index];
1000  clib_spinlock_lock (&rt->lock);
1001 
1002  vec_reset_length (pool_indexes_to_free);
1003  /* *INDENT-OFF* */
1004  pool_foreach_index (index, rt->pool, ({
1005  reass = pool_elt_at_index (rt->pool, index);
1006  if (now > reass->last_heard + rm->timeout)
1007  {
1008  vec_add1 (pool_indexes_to_free, index);
1009  }
1010  }));
1011  /* *INDENT-ON* */
1012  int *i;
1013  /* *INDENT-OFF* */
1014  vec_foreach (i, pool_indexes_to_free)
1015  {
1016  ip4_sv_reass_t *reass = pool_elt_at_index (rt->pool, i[0]);
1017  ip4_sv_reass_free (vm, rm, rt, reass);
1018  }
1019  /* *INDENT-ON* */
1020 
1021  clib_spinlock_unlock (&rt->lock);
1022  }
1023 
1024  vec_free (pool_indexes_to_free);
1025  if (event_data)
1026  {
1027  _vec_len (event_data) = 0;
1028  }
1029  }
1030 
1031  return 0;
1032 }
1033 
1034 /* *INDENT-OFF* */
1036  .function = ip4_sv_reass_walk_expired,
1037  .type = VLIB_NODE_TYPE_PROCESS,
1038  .name = "ip4-sv-reassembly-expire-walk",
1039  .format_trace = format_ip4_sv_reass_trace,
1040  .n_errors = ARRAY_LEN (ip4_sv_reass_error_strings),
1041  .error_strings = ip4_sv_reass_error_strings,
1042 
1043 };
1044 /* *INDENT-ON* */
1045 
1046 static u8 *
1047 format_ip4_sv_reass_key (u8 * s, va_list * args)
1048 {
1049  ip4_sv_reass_key_t *key = va_arg (*args, ip4_sv_reass_key_t *);
1050  s =
1051  format (s,
1052  "xx_id: %u, src: %U, dst: %U, frag_id: %u, proto: %u",
1054  &key->dst, clib_net_to_host_u16 (key->frag_id), key->proto);
1055  return s;
1056 }
1057 
1058 static u8 *
1059 format_ip4_sv_reass (u8 * s, va_list * args)
1060 {
1061  vlib_main_t *vm = va_arg (*args, vlib_main_t *);
1062  ip4_sv_reass_t *reass = va_arg (*args, ip4_sv_reass_t *);
1063 
1064  s = format (s, "ID: %lu, key: %U trace_op_counter: %u\n",
1065  reass->id, format_ip4_sv_reass_key, &reass->key,
1066  reass->trace_op_counter);
1067 
1068  vlib_buffer_t *b;
1069  u32 *bip;
1070  u32 counter = 0;
1071  vec_foreach (bip, reass->cached_buffers)
1072  {
1073  u32 bi = *bip;
1074  do
1075  {
1076  b = vlib_get_buffer (vm, bi);
1077  s = format (s, " #%03u: bi: %u, ", counter, bi);
1078  ++counter;
1079  bi = b->next_buffer;
1080  }
1081  while (b->flags & VLIB_BUFFER_NEXT_PRESENT);
1082  }
1083  return s;
1084 }
1085 
1086 static clib_error_t *
1088  unformat_input_t * input,
1090 {
1092 
1093  vlib_cli_output (vm, "---------------------");
1094  vlib_cli_output (vm, "IP4 reassembly status");
1095  vlib_cli_output (vm, "---------------------");
1096  bool details = false;
1097  if (unformat (input, "details"))
1098  {
1099  details = true;
1100  }
1101 
1102  u32 sum_reass_n = 0;
1103  ip4_sv_reass_t *reass;
1104  uword thread_index;
1105  const uword nthreads = vlib_num_workers () + 1;
1106  for (thread_index = 0; thread_index < nthreads; ++thread_index)
1107  {
1108  ip4_sv_reass_per_thread_t *rt = &rm->per_thread_data[thread_index];
1109  clib_spinlock_lock (&rt->lock);
1110  if (details)
1111  {
1112  /* *INDENT-OFF* */
1113  pool_foreach (reass, rt->pool, {
1114  vlib_cli_output (vm, "%U", format_ip4_sv_reass, vm, reass);
1115  });
1116  /* *INDENT-ON* */
1117  }
1118  sum_reass_n += rt->reass_n;
1119  clib_spinlock_unlock (&rt->lock);
1120  }
1121  vlib_cli_output (vm, "---------------------");
1122  vlib_cli_output (vm, "Current IP4 reassemblies count: %lu\n",
1123  (long unsigned) sum_reass_n);
1124  vlib_cli_output (vm,
1125  "Maximum configured concurrent IP4 reassemblies per worker-thread: %lu\n",
1126  (long unsigned) rm->max_reass_n);
1127  return 0;
1128 }
1129 
1130 /* *INDENT-OFF* */
1132  .path = "show ip4-sv-reassembly",
1133  .short_help = "show ip4-sv-reassembly [details]",
1134  .function = show_ip4_reass,
1135 };
1136 /* *INDENT-ON* */
1137 
1138 #ifndef CLIB_MARCH_VARIANT
1141 {
1142  return ip4_sv_reass_enable_disable_with_refcnt (sw_if_index,
1143  enable_disable);
1144 }
1145 #endif /* CLIB_MARCH_VARIANT */
1146 
1147 
1148 #define foreach_ip4_sv_reass_handoff_error \
1149 _(CONGESTION_DROP, "congestion drop")
1150 
1151 
1152 typedef enum
1153 {
1154 #define _(sym,str) IP4_SV_REASSEMBLY_HANDOFF_ERROR_##sym,
1156 #undef _
1159 
1161 #define _(sym,string) string,
1163 #undef _
1164 };
1165 
1166 typedef struct
1167 {
1170 
1171 static u8 *
1173 {
1174  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1175  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1177  va_arg (*args, ip4_sv_reass_handoff_trace_t *);
1178 
1179  s =
1180  format (s, "ip4-sv-reassembly-handoff: next-worker %d",
1181  t->next_worker_index);
1182 
1183  return s;
1184 }
1185 
1189  vlib_frame_t * frame, bool is_feature)
1190 {
1192 
1193  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
1194  u32 n_enq, n_left_from, *from;
1195  u16 thread_indices[VLIB_FRAME_SIZE], *ti;
1196  u32 fq_index;
1197 
1198  from = vlib_frame_vector_args (frame);
1199  n_left_from = frame->n_vectors;
1200  vlib_get_buffers (vm, from, bufs, n_left_from);
1201 
1202  b = bufs;
1203  ti = thread_indices;
1204 
1205  fq_index = (is_feature) ? rm->fq_feature_index : rm->fq_index;
1206 
1207  while (n_left_from > 0)
1208  {
1209  ti[0] = vnet_buffer (b[0])->ip.reass.owner_thread_index;
1210 
1211  if (PREDICT_FALSE
1212  ((node->flags & VLIB_NODE_FLAG_TRACE)
1213  && (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
1214  {
1216  vlib_add_trace (vm, node, b[0], sizeof (*t));
1217  t->next_worker_index = ti[0];
1218  }
1219 
1220  n_left_from -= 1;
1221  ti += 1;
1222  b += 1;
1223  }
1224  n_enq =
1225  vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices,
1226  frame->n_vectors, 1);
1227 
1228  if (n_enq < frame->n_vectors)
1230  IP4_SV_REASSEMBLY_HANDOFF_ERROR_CONGESTION_DROP,
1231  frame->n_vectors - n_enq);
1232  return frame->n_vectors;
1233 }
1234 
1237  vlib_frame_t * frame)
1238 {
1239  return ip4_sv_reass_handoff_node_inline (vm, node, frame,
1240  false /* is_feature */ );
1241 }
1242 
1243 
1244 /* *INDENT-OFF* */
1246  .name = "ip4-sv-reassembly-handoff",
1247  .vector_size = sizeof (u32),
1248  .n_errors = ARRAY_LEN(ip4_sv_reass_handoff_error_strings),
1249  .error_strings = ip4_sv_reass_handoff_error_strings,
1250  .format_trace = format_ip4_sv_reass_handoff_trace,
1251 
1252  .n_next_nodes = 1,
1253 
1254  .next_nodes = {
1255  [0] = "error-drop",
1256  },
1257 };
1258 /* *INDENT-ON* */
1259 
1260 
1261 /* *INDENT-OFF* */
1264  node,
1265  vlib_frame_t * frame)
1266 {
1267  return ip4_sv_reass_handoff_node_inline (vm, node, frame,
1268  true /* is_feature */ );
1269 }
1270 /* *INDENT-ON* */
1271 
1272 
1273 /* *INDENT-OFF* */
1275  .name = "ip4-sv-reass-feature-hoff",
1276  .vector_size = sizeof (u32),
1277  .n_errors = ARRAY_LEN(ip4_sv_reass_handoff_error_strings),
1278  .error_strings = ip4_sv_reass_handoff_error_strings,
1279  .format_trace = format_ip4_sv_reass_handoff_trace,
1280 
1281  .n_next_nodes = 1,
1282 
1283  .next_nodes = {
1284  [0] = "error-drop",
1285  },
1286 };
1287 /* *INDENT-ON* */
1288 
1289 #ifndef CLIB_MARCH_VARIANT
1290 int
1292 {
1294  vec_validate (rm->feature_use_refcount_per_intf, sw_if_index);
1295  if (is_enable)
1296  {
1297  if (!rm->feature_use_refcount_per_intf[sw_if_index])
1298  {
1300  return vnet_feature_enable_disable ("ip4-unicast",
1301  "ip4-sv-reassembly-feature",
1302  sw_if_index, 1, 0, 0);
1303  }
1305  }
1306  else
1307  {
1308  if (rm->feature_use_refcount_per_intf[sw_if_index])
1310  if (!rm->feature_use_refcount_per_intf[sw_if_index])
1311  return vnet_feature_enable_disable ("ip4-unicast",
1312  "ip4-sv-reassembly-feature",
1313  sw_if_index, 0, 0, 0);
1314  }
1315  return 0;
1316 }
1317 
1318 uword
1320 {
1322  node_index);
1323 }
1324 
1325 int
1327  int is_enable)
1328 {
1331  if (is_enable)
1332  {
1333  if (!rm->output_feature_use_refcount_per_intf[sw_if_index])
1334  {
1336  return vnet_feature_enable_disable ("ip4-output",
1337  "ip4-sv-reassembly-output-feature",
1338  sw_if_index, 1, 0, 0);
1339  }
1341  }
1342  else
1343  {
1344  if (rm->output_feature_use_refcount_per_intf[sw_if_index])
1346  if (!rm->output_feature_use_refcount_per_intf[sw_if_index])
1347  return vnet_feature_enable_disable ("ip4-output",
1348  "ip4-sv-reassembly-output-feature",
1349  sw_if_index, 0, 0, 0);
1350  }
1351  return 0;
1352 }
1353 #endif
1354 
1355 /*
1356  * fd.io coding-style-patch-verification: ON
1357  *
1358  * Local Variables:
1359  * eval: (c-set-style "gnu")
1360  * End:
1361  */
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:440
ip4_sv_reass_t * pool
Definition: ip4_sv_reass.c:116
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
vnet_main_t * vnet_main
Definition: ip4_sv_reass.c:144
#define IP4_SV_REASS_TIMEOUT_DEFAULT_MS
Definition: ip4_sv_reass.c:32
#define vec_foreach_index(var, v)
Iterate over vector indices.
vnet_api_error_t
Definition: api_errno.h:158
ip4_sv_reass_rc_t
Definition: ip4_sv_reass.c:38
static void ip4_sv_reass_set_params(u32 timeout_ms, u32 max_reassemblies, u32 max_reassembly_length, u32 expire_walk_interval_ms)
Definition: ip4_sv_reass.c:854
#define IP4_SV_REASS_HT_LOAD_FACTOR
Definition: ip4_sv_reass.c:36
static_always_inline void clib_spinlock_unlock(clib_spinlock_t *p)
Definition: lock.h:102
static vlib_cli_command_t show_ip4_sv_reass_cmd
(constructor) VLIB_CLI_COMMAND (show_ip4_sv_reass_cmd)
static_always_inline void clib_spinlock_lock(clib_spinlock_t *p)
Definition: lock.h:80
#define CLIB_UNUSED(x)
Definition: clib.h:82
clib_bihash_kv_16_8_t kv
Definition: ip4_sv_reass.c:79
static char * ip4_sv_reass_handoff_error_strings[]
u16 min_fragment_length
Definition: ip4_sv_reass.c:93
static f64 vlib_process_wait_for_event_or_clock(vlib_main_t *vm, f64 dt)
Suspend a cooperative multi-tasking thread Waits for an event, or for the indicated number of seconds...
Definition: node_funcs.h:673
static uword ip4_sv_reass_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, bool is_feature, bool is_output_feature, bool is_custom)
Definition: ip4_sv_reass.c:426
int ip4_sv_reass_enable_disable_with_refcnt(u32 sw_if_index, int is_enable)
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:890
ip4_address_t src_address
Definition: ip4_packet.h:170
static ip4_sv_reass_rc_t ip4_sv_reass_update(vlib_main_t *vm, vlib_node_runtime_t *node, ip4_sv_reass_main_t *rm, ip4_sv_reass_per_thread_t *rt, ip4_header_t *ip0, ip4_sv_reass_t *reass, u32 bi0)
Definition: ip4_sv_reass.c:375
#define IP4_SV_REASS_MAX_REASSEMBLIES_DEFAULT
Definition: ip4_sv_reass.c:34
vlib_node_registration_t ip4_sv_reass_custom_node
(constructor) VLIB_REGISTER_NODE (ip4_sv_reass_custom_node)
Definition: ip4_sv_reass.c:785
vnet_main_t * vnet_get_main(void)
Definition: misc.c:46
static u8 * format_ip4_sv_reass_key(u8 *s, va_list *args)
vlib_main_t * vlib_main
Definition: ip4_sv_reass.c:143
#define pool_alloc(P, N)
Allocate N more free elements to pool (unspecified alignment).
Definition: pool.h:346
u64 as_u64
Definition: bihash_doc.h:63
u32 * feature_use_refcount_per_intf
Definition: ip4_sv_reass.c:155
unsigned long u64
Definition: types.h:89
vnet_api_error_t ip4_sv_reass_get(u32 *timeout_ms, u32 *max_reassemblies, u32 *max_reassembly_length, u32 *expire_walk_interval_ms)
get ip4 reassembly configuration
Definition: ip4_sv_reass.c:904
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
vlib_node_registration_t ip4_sv_reass_node_feature
(constructor) VLIB_REGISTER_NODE (ip4_sv_reass_node_feature)
Definition: ip4_sv_reass.c:723
#define NULL
Definition: clib.h:58
static void ip4_sv_reass_init(ip4_sv_reass_t *reass)
Definition: ip4_sv_reass.c:296
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
u32 index
Definition: node.h:280
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:279
static u32 ip4_sv_reass_get_nbuckets()
Definition: ip4_sv_reass.c:813
u32 * output_feature_use_refcount_per_intf
Definition: ip4_sv_reass.c:158
u32 vlib_frame_queue_main_init(u32 node_index, u32 frame_queue_nelts)
Definition: threads.c:1810
u32 thread_index
Definition: main.h:218
int ip4_sv_reass_output_enable_disable_with_refcnt(u32 sw_if_index, int is_enable)
static u8 * format_ip4_sv_reass_trace(u8 *s, va_list *args)
Definition: ip4_sv_reass.c:198
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:523
int i
#define IP4_SV_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS
Definition: ip4_sv_reass.c:33
u32 * fib_index_by_sw_if_index
Table index indexed by software interface.
Definition: ip4.h:121
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
vnet_api_error_t ip4_sv_reass_enable_disable(u32 sw_if_index, u8 enable_disable)
#define VLIB_NODE_FN(node)
Definition: node.h:202
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:470
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:237
struct _tcp_header tcp_header_t
static void ip4_sv_reass_add_trace(vlib_main_t *vm, vlib_node_runtime_t *node, ip4_sv_reass_main_t *rm, ip4_sv_reass_t *reass, u32 bi, ip4_sv_reass_trace_operation_e action, u32 ip_proto, u16 l4_src_port, u16 l4_dst_port)
Definition: ip4_sv_reass.c:232
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
Definition: node_funcs.h:1092
unsigned char u8
Definition: types.h:56
static int ip4_get_fragment_offset_bytes(const ip4_header_t *i)
Definition: ip4_packet.h:229
IPv4 to IPv6 translation.
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
double f64
Definition: types.h:142
ip4_sv_reass_main_t ip4_sv_reass_main
Definition: ip4_sv_reass.c:165
ip4_address_t src
Definition: ip4_sv_reass.c:52
#define u8_ptr_add(ptr, index)
Definition: ip_types.h:34
format_function_t format_ip4_address
Definition: format.h:73
#define foreach_ip4_sv_reass_handoff_error
static ip4_sv_reass_t * ip4_sv_reass_find_or_create(vlib_main_t *vm, ip4_sv_reass_main_t *rm, ip4_sv_reass_per_thread_t *rt, ip4_sv_reass_kv_t *kv, u8 *do_handoff)
Definition: ip4_sv_reass.c:303
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
Definition: pool.h:498
vl_api_interface_index_t sw_if_index
Definition: gre.api:59
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:173
static uword vlib_process_get_events(vlib_main_t *vm, uword **data_vector)
Return the first event type which has occurred and a vector of per-event data of that type...
Definition: node_funcs.h:516
static u16 ip4_get_fragment_more(const ip4_header_t *i)
Definition: ip4_packet.h:206
ip4_address_t dst_address
Definition: ip4_packet.h:170
vlib_node_registration_t ip4_sv_reass_feature_handoff_node
(constructor) VLIB_REGISTER_NODE (ip4_sv_reass_feature_handoff_node)
clib_bihash_16_8_t * new_hash
ip4_sv_reass_trace_operation_e action
Definition: ip4_sv_reass.c:186
ip4_sv_reass_next_t
Definition: ip4_sv_reass.c:168
unsigned int u32
Definition: types.h:88
#define VLIB_FRAME_SIZE
Definition: node.h:378
ip4_sv_reass_handoff_error_t
static char * ip4_sv_reass_error_strings[]
Definition: ip4_sv_reass.c:680
static u16 ip4_get_port(ip4_header_t *ip, u8 sender)
Get TCP/UDP port number or ICMP id from IPv4 packet.
Definition: ip4_to_ip6.h:51
static void clib_spinlock_init(clib_spinlock_t *p)
Definition: lock.h:63
vl_api_fib_path_type_t type
Definition: fib_types.api:123
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
vlib_node_registration_t ip4_sv_reass_node
(constructor) VLIB_REGISTER_NODE (ip4_sv_reass_node)
Definition: ip4_sv_reass.c:696
vlib_node_registration_t ip4_sv_reass_expire_node
(constructor) VLIB_REGISTER_NODE (ip4_sv_reass_expire_node)
vnet_api_error_t ip4_sv_reass_set(u32 timeout_ms, u32 max_reassemblies, u32 max_reassembly_length, u32 expire_walk_interval_ms)
set ip4 reassembly configuration
Definition: ip4_sv_reass.c:866
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:519
static void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
Definition: node_funcs.h:934
#define IP4_SV_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT
Definition: ip4_sv_reass.c:35
IPv4 shallow virtual reassembly.
long ctx[MAX_CONNS]
Definition: main.c:144
struct _unformat_input_t unformat_input_t
unsigned short u16
Definition: types.h:57
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:287
ip4_sv_reass_trace_operation_e
Definition: ip4_sv_reass.c:176
u32 fq_index
Worker handoff.
Definition: ip4_sv_reass.c:151
ip4_sv_reass_key_t k
Definition: ip4_sv_reass.c:76
#define PREDICT_FALSE(x)
Definition: clib.h:111
#define always_inline
Definition: ipsec.h:28
u32 node_index
Node index.
Definition: node.h:496
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:338
vlib_main_t * vm
Definition: in2out_ed.c:1810
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1150
#define foreach_ip4_error
Definition: ip4_error.h:43
static uword ip4_sv_reass_handoff_node_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, bool is_feature)
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u32 flags
Definition: vhost_user.h:141
u16 n_vectors
Definition: node.h:397
clib_bihash_16_8_t hash
Definition: ip4_sv_reass.c:138
static u16 ip4_get_fragment_offset(const ip4_header_t *i)
Definition: ip4_packet.h:200
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:342
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:302
ip4_sv_reass_event_t
Definition: ip4_sv_reass.c:830
#define clib_warning(format, args...)
Definition: error.h:59
static clib_error_t * show_ip4_reass(vlib_main_t *vm, unformat_input_t *input, CLIB_UNUSED(vlib_cli_command_t *lmd))
#define ARRAY_LEN(x)
Definition: clib.h:62
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:456
vlib_node_t * vlib_get_node_by_name(vlib_main_t *vm, u8 *name)
Definition: node.c:45
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1810
#define VLIB_CLI_COMMAND(x,...)
Definition: cli.h:152
VNET_FEATURE_INIT(ip4_sv_reass_feature)
static uword ip4_sv_reass_walk_expired(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
Definition: ip4_sv_reass.c:964
ip_proto
Definition: ip_types.api:64
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:515
#define ASSERT(truth)
ip4_sv_reass_val_t v
Definition: ip4_sv_reass.c:77
#define VNET_FEATURES(...)
Definition: feature.h:442
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
ip4_address_t dst
Definition: ip4_sv_reass.c:53
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
#define vec_elt(v, i)
Get vector value at index i.
void clib_bihash_copied(void *dst, void *src)
typedef key
Definition: ipsec_types.api:83
struct _vlib_node_registration vlib_node_registration_t
u8 action
Definition: l2.api:173
vl_api_address_t ip
Definition: l2.api:490
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
#define MSEC_PER_SEC
Definition: ip4_sv_reass.c:31
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
u32 * cached_buffers
Definition: ip4_sv_reass.c:96
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
static_always_inline u32 vlib_buffer_enqueue_to_thread(vlib_main_t *vm, u32 frame_queue_index, u32 *buffer_indices, u16 *thread_indices, u32 n_packets, int drop_on_congestion)
Definition: buffer_node.h:487
ip4_sv_reass_per_thread_t * per_thread_data
Definition: ip4_sv_reass.c:140
#define vnet_buffer(b)
Definition: buffer.h:408
static u8 * format_ip4_sv_reass(u8 *s, va_list *args)
ip4_main_t ip4_main
Global ip4 main structure.
Definition: ip4_forward.c:1079
vlib_node_registration_t ip4_sv_reass_handoff_node
(constructor) VLIB_REGISTER_NODE (ip4_sv_reass_handoff_node)
static u32 vlib_num_workers()
Definition: threads.h:372
#define vec_foreach(var, vec)
Vector iterator.
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1811
u16 flags
Copy of main node flags.
Definition: node.h:509
static void ip4_sv_reass_free(vlib_main_t *vm, ip4_sv_reass_main_t *rm, ip4_sv_reass_per_thread_t *rt, ip4_sv_reass_t *reass)
Definition: ip4_sv_reass.c:260
static int ip4_rehash_cb(clib_bihash_kv_16_8_t *kv, void *_ctx)
Definition: ip4_sv_reass.c:843
static int ip4_header_bytes(const ip4_header_t *i)
Definition: ip4_packet.h:235
#define pool_foreach_index(i, v, body)
Iterate pool by index.
Definition: pool.h:543
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:244
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:302
vlib_node_registration_t ip4_sv_reass_node_output_feature
(constructor) VLIB_REGISTER_NODE (ip4_sv_reass_node_output_feature)
Definition: ip4_sv_reass.c:759
static u8 * format_ip4_sv_reass_handoff_trace(u8 *s, va_list *args)
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
Definition: cli.c:689
uword ip4_sv_reass_custom_register_next_node(uword node_index)
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
ip4_sv_reass_key_t key
Definition: ip4_sv_reass.c:85
static clib_error_t * ip4_sv_reass_init_function(vlib_main_t *vm)
Definition: ip4_sv_reass.c:915
uword unformat(unformat_input_t *i, const char *fmt,...)
Definition: unformat.c:978
Definition: defs.h:46
int vnet_feature_enable_disable(const char *arc_name, const char *node_name, u32 sw_if_index, int enable_disable, void *feature_config, u32 n_feature_config_bytes)
Definition: feature.c:304
CLIB vectors are ubiquitous dynamically resized arrays with by user defined "headers".