FD.io VPP  v18.04-17-g3a0d853
Vector Packet Processing
load_balance.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <vnet/ip/lookup.h>
17 #include <vnet/dpo/load_balance.h>
19 #include <vnet/dpo/drop_dpo.h>
20 #include <vppinfra/math.h> /* for fabs */
21 #include <vnet/adj/adj.h>
22 #include <vnet/adj/adj_internal.h>
23 #include <vnet/fib/fib_urpf_list.h>
25 
26 /*
27  * distribution error tolerance for load-balancing
28  */
30 
31 #undef LB_DEBUG
32 
33 #ifdef LB_DEBUG
34 #define LB_DBG(_lb, _fmt, _args...) \
35 { \
36  u8* _tmp =NULL; \
37  clib_warning("lb:[%s]:" _fmt, \
38  load_balance_format(load_balance_get_index((_lb)), \
39  0, _tmp), \
40  ##_args); \
41  vec_free(_tmp); \
42 }
43 #else
44 #define LB_DBG(_p, _fmt, _args...)
45 #endif
46 
47 
48 /**
49  * Pool of all DPOs. It's not static so the DP can have fast access
50  */
52 
53 /**
54  * The one instance of load-balance main
55  */
57 
58 f64
60 {
62 }
63 
64 static inline index_t
66 {
67  return (lb - load_balance_pool);
68 }
69 
70 static inline dpo_id_t*
72 {
73  if (LB_HAS_INLINE_BUCKETS(lb))
74  {
75  return (lb->lb_buckets_inline);
76  }
77  else
78  {
79  return (lb->lb_buckets);
80  }
81 }
82 
83 static load_balance_t *
85 {
86  load_balance_t *lb;
87 
88  pool_get_aligned(load_balance_pool, lb, CLIB_CACHE_LINE_BYTES);
89  memset(lb, 0, sizeof(*lb));
90 
91  lb->lb_map = INDEX_INVALID;
92  lb->lb_urpf = INDEX_INVALID;
97  vlib_zero_combined_counter(&(load_balance_main.lbm_to_counters),
99  vlib_zero_combined_counter(&(load_balance_main.lbm_via_counters),
101 
102  return (lb);
103 }
104 
105 static u8*
108  u32 indent,
109  u8 *s)
110 {
111  vlib_counter_t to, via;
112  load_balance_t *lb;
113  dpo_id_t *buckets;
114  u32 i;
115 
116  lb = load_balance_get(lbi);
117  vlib_get_combined_counter(&(load_balance_main.lbm_to_counters), lbi, &to);
118  vlib_get_combined_counter(&(load_balance_main.lbm_via_counters), lbi, &via);
119  buckets = load_balance_get_buckets(lb);
120 
121  s = format(s, "%U: ", format_dpo_type, DPO_LOAD_BALANCE);
122  s = format(s, "[proto:%U ", format_dpo_proto, lb->lb_proto);
123  s = format(s, "index:%d buckets:%d ", lbi, lb->lb_n_buckets);
124  s = format(s, "uRPF:%d ", lb->lb_urpf);
125  s = format(s, "to:[%Ld:%Ld]", to.packets, to.bytes);
126  if (0 != via.packets)
127  {
128  s = format(s, " via:[%Ld:%Ld]",
129  via.packets, via.bytes);
130  }
131  s = format(s, "]");
132 
133  if (INDEX_INVALID != lb->lb_map)
134  {
135  s = format(s, "\n%U%U",
136  format_white_space, indent+4,
137  format_load_balance_map, lb->lb_map, indent+4);
138  }
139  for (i = 0; i < lb->lb_n_buckets; i++)
140  {
141  s = format(s, "\n%U[%d] %U",
142  format_white_space, indent+2,
143  i,
145  &buckets[i], indent+6);
146  }
147  return (s);
148 }
149 
150 u8*
151 format_load_balance (u8 * s, va_list * args)
152 {
153  index_t lbi = va_arg(*args, index_t);
155 
156  return (load_balance_format(lbi, flags, 0, s));
157 }
158 static u8*
159 format_load_balance_dpo (u8 * s, va_list * args)
160 {
161  index_t lbi = va_arg(*args, index_t);
162  u32 indent = va_arg(*args, u32);
163 
164  return (load_balance_format(lbi, LOAD_BALANCE_FORMAT_DETAIL, indent, s));
165 }
166 
167 
168 static load_balance_t *
170  dpo_proto_t lb_proto,
171  flow_hash_config_t fhc)
172 {
173  load_balance_t *lb;
174 
175  lb = load_balance_alloc_i();
176  lb->lb_hash_config = fhc;
177  lb->lb_n_buckets = num_buckets;
178  lb->lb_n_buckets_minus_1 = num_buckets-1;
179  lb->lb_proto = lb_proto;
180 
181  if (!LB_HAS_INLINE_BUCKETS(lb))
182  {
184  lb->lb_n_buckets - 1,
186  }
187 
188  LB_DBG(lb, "create");
189 
190  return (lb);
191 }
192 
193 index_t
195  dpo_proto_t lb_proto,
196  flow_hash_config_t fhc)
197 {
198  return (load_balance_get_index(load_balance_create_i(n_buckets, lb_proto, fhc)));
199 }
200 
201 static inline void
203  u32 bucket,
204  dpo_id_t *buckets,
205  const dpo_id_t *next)
206 {
207  dpo_stack(DPO_LOAD_BALANCE, lb->lb_proto, &buckets[bucket], next);
208 }
209 
210 void
212  u32 bucket,
213  const dpo_id_t *next)
214 {
215  load_balance_t *lb;
216  dpo_id_t *buckets;
217 
218  lb = load_balance_get(lbi);
219  buckets = load_balance_get_buckets(lb);
220 
221  ASSERT(bucket < lb->lb_n_buckets);
222 
223  load_balance_set_bucket_i(lb, bucket, buckets, next);
224 }
225 
226 int
228 {
229  load_balance_t *lb;
230 
231  if (DPO_LOAD_BALANCE != dpo->dpoi_type)
232  return (0);
233 
234  lb = load_balance_get(dpo->dpoi_index);
235 
236  if (1 == lb->lb_n_buckets)
237  {
238  return (dpo_is_drop(load_balance_get_bucket_i(lb, 0)));
239  }
240  return (0);
241 }
242 
243 u16
245 {
246  load_balance_t *lb;
247 
248  lb = load_balance_get(lbi);
249 
250  return (lb->lb_n_buckets);
251 }
252 
253 void
256 {
257  load_balance_t *lb;
258 
259  lb = load_balance_get(lbi);
261 }
262 
263 
264 void
266  index_t urpf)
267 {
268  load_balance_t *lb;
269  index_t old;
270 
271  lb = load_balance_get(lbi);
272 
273  /*
274  * packets in flight we see this change. but it's atomic, so :P
275  */
276  old = lb->lb_urpf;
277  lb->lb_urpf = urpf;
278 
280  fib_urpf_list_lock(urpf);
281 }
282 
283 index_t
285 {
286  load_balance_t *lb;
287 
288  lb = load_balance_get(lbi);
289 
290  return (lb->lb_urpf);
291 }
292 
293 const dpo_id_t *
295  u32 bucket)
296 {
297  load_balance_t *lb;
298 
299  lb = load_balance_get(lbi);
300 
301  return (load_balance_get_bucket_i(lb, bucket));
302 }
303 
304 static int
306  const load_balance_path_t * n2)
307 {
308  return ((int) n1->path_weight - (int) n2->path_weight);
309 }
310 
311 /* Given next hop vector is over-written with normalized one with sorted weights and
312  with weights corresponding to the number of adjacencies for each next hop.
313  Returns number of adjacencies in block. */
314 u32
316  load_balance_path_t ** normalized_next_hops,
317  u32 *sum_weight_in,
319 {
320  load_balance_path_t * nhs;
321  uword n_nhs, n_adj, n_adj_left, i, sum_weight;
322  f64 norm, error;
323 
324  n_nhs = vec_len (raw_next_hops);
325  ASSERT (n_nhs > 0);
326  if (n_nhs == 0)
327  return 0;
328 
329  /* Allocate enough space for 2 copies; we'll use second copy to save original weights. */
330  nhs = *normalized_next_hops;
331  vec_validate (nhs, 2*n_nhs - 1);
332 
333  /* Fast path: 1 next hop in block. */
334  n_adj = n_nhs;
335  if (n_nhs == 1)
336  {
337  nhs[0] = raw_next_hops[0];
338  nhs[0].path_weight = 1;
339  _vec_len (nhs) = 1;
340  sum_weight = 1;
341  goto done;
342  }
343 
344  else if (n_nhs == 2)
345  {
346  int cmp = next_hop_sort_by_weight (&raw_next_hops[0], &raw_next_hops[1]) < 0;
347 
348  /* Fast sort. */
349  nhs[0] = raw_next_hops[cmp];
350  nhs[1] = raw_next_hops[cmp ^ 1];
351 
352  /* Fast path: equal cost multipath with 2 next hops. */
353  if (nhs[0].path_weight == nhs[1].path_weight)
354  {
355  nhs[0].path_weight = nhs[1].path_weight = 1;
356  _vec_len (nhs) = 2;
357  sum_weight = 2;
358  goto done;
359  }
360  }
361  else
362  {
363  clib_memcpy (nhs, raw_next_hops, n_nhs * sizeof (raw_next_hops[0]));
364  qsort (nhs, n_nhs, sizeof (nhs[0]), (void *) next_hop_sort_by_weight);
365  }
366 
367  /* Find total weight to normalize weights. */
368  sum_weight = 0;
369  for (i = 0; i < n_nhs; i++)
370  sum_weight += nhs[i].path_weight;
371 
372  /* In the unlikely case that all weights are given as 0, set them all to 1. */
373  if (sum_weight == 0)
374  {
375  for (i = 0; i < n_nhs; i++)
376  nhs[i].path_weight = 1;
377  sum_weight = n_nhs;
378  }
379 
380  /* Save copies of all next hop weights to avoid being overwritten in loop below. */
381  for (i = 0; i < n_nhs; i++)
382  nhs[n_nhs + i].path_weight = nhs[i].path_weight;
383 
384  /* Try larger and larger power of 2 sized adjacency blocks until we
385  find one where traffic flows to within 1% of specified weights. */
386  for (n_adj = max_pow2 (n_nhs); ; n_adj *= 2)
387  {
388  error = 0;
389 
390  norm = n_adj / ((f64) sum_weight);
391  n_adj_left = n_adj;
392  for (i = 0; i < n_nhs; i++)
393  {
394  f64 nf = nhs[n_nhs + i].path_weight * norm; /* use saved weights */
395  word n = flt_round_nearest (nf);
396 
397  n = n > n_adj_left ? n_adj_left : n;
398  n_adj_left -= n;
399  error += fabs (nf - n);
400  nhs[i].path_weight = n;
401 
402  if (0 == nhs[i].path_weight)
403  {
404  /*
405  * when the weight skew is high (norm is small) and n == nf.
406  * without this correction the path with a low weight would have
407  * no represenation in the load-balanace - don't want that.
408  * If the weight skew is high so the load-balance has many buckets
409  * to allow it. pays ya money takes ya choice.
410  */
411  error = n_adj;
412  break;
413  }
414  }
415 
416  nhs[0].path_weight += n_adj_left;
417 
418  /* Less than 5% average error per adjacency with this size adjacency block? */
419  if (error <= multipath_next_hop_error_tolerance*n_adj)
420  {
421  /* Truncate any next hops with zero weight. */
422  _vec_len (nhs) = i;
423  break;
424  }
425  }
426 
427 done:
428  /* Save vector for next call. */
429  *normalized_next_hops = nhs;
430  *sum_weight_in = sum_weight;
431  return n_adj;
432 }
433 
434 static load_balance_path_t *
436  dpo_proto_t drop_proto)
437 {
438  if (0 == vec_len(nhs))
439  {
440  load_balance_path_t *new_nhs = NULL, *nh;
441 
442  /*
443  * we need something for the load-balance. so use the drop
444  */
445  vec_add2(new_nhs, nh, 1);
446 
447  nh->path_weight = 1;
448  dpo_copy(&nh->path_dpo, drop_dpo_get(drop_proto));
449 
450  return (new_nhs);
451  }
452 
453  return (NULL);
454 }
455 
456 /*
457  * Fill in adjacencies in block based on corresponding
458  * next hop adjacencies.
459  */
460 static void
462  load_balance_path_t *nhs,
463  dpo_id_t *buckets,
464  u32 n_buckets)
465 {
466  load_balance_path_t * nh;
467  u16 ii, bucket;
468 
469  bucket = 0;
470 
471  /*
472  * the next-hops have normalised weights. that means their sum is the number
473  * of buckets we need to fill.
474  */
475  vec_foreach (nh, nhs)
476  {
477  for (ii = 0; ii < nh->path_weight; ii++)
478  {
479  ASSERT(bucket < n_buckets);
480  load_balance_set_bucket_i(lb, bucket++, buckets, &nh->path_dpo);
481  }
482  }
483 }
484 
485 static inline void
487  u32 n_buckets)
488 {
489  lb->lb_n_buckets = n_buckets;
490  lb->lb_n_buckets_minus_1 = n_buckets-1;
491 }
492 
493 void
495  const load_balance_path_t * raw_nhs,
497 {
498  load_balance_path_t *nh, *nhs, *fixed_nhs;
499  u32 sum_of_weights, n_buckets, ii;
500  index_t lbmi, old_lbmi;
501  load_balance_t *lb;
502  dpo_id_t *tmp_dpo;
503 
504  nhs = NULL;
505 
507  lb = load_balance_get(dpo->dpoi_index);
508  fixed_nhs = load_balance_multipath_next_hop_fixup(raw_nhs, lb->lb_proto);
509  n_buckets =
510  ip_multipath_normalize_next_hops((NULL == fixed_nhs ?
511  raw_nhs :
512  fixed_nhs),
513  &nhs,
514  &sum_of_weights,
516 
517  ASSERT (n_buckets >= vec_len (raw_nhs));
518 
519  /*
520  * Save the old load-balance map used, and get a new one if required.
521  */
522  old_lbmi = lb->lb_map;
523  if (flags & LOAD_BALANCE_FLAG_USES_MAP)
524  {
525  lbmi = load_balance_map_add_or_lock(n_buckets, sum_of_weights, nhs);
526  }
527  else
528  {
529  lbmi = INDEX_INVALID;
530  }
531 
532  if (0 == lb->lb_n_buckets)
533  {
534  /*
535  * first time initialisation. no packets inflight, so we can write
536  * at leisure.
537  */
538  load_balance_set_n_buckets(lb, n_buckets);
539 
540  if (!LB_HAS_INLINE_BUCKETS(lb))
542  lb->lb_n_buckets - 1,
544 
547  n_buckets);
548  lb->lb_map = lbmi;
549  }
550  else
551  {
552  /*
553  * This is a modification of an existing load-balance.
554  * We need to ensure that packets inflight see a consistent state, that
555  * is the number of reported buckets the LB has (read from
556  * lb_n_buckets_minus_1) is not more than it actually has. So if the
557  * number of buckets is increasing, we must update the bucket array first,
558  * then the reported number. vice-versa if the number of buckets goes down.
559  */
560  if (n_buckets == lb->lb_n_buckets)
561  {
562  /*
563  * no change in the number of buckets. we can simply fill what
564  * is new over what is old.
565  */
568  n_buckets);
569  lb->lb_map = lbmi;
570  }
571  else if (n_buckets > lb->lb_n_buckets)
572  {
573  /*
574  * we have more buckets. the old load-balance map (if there is one)
575  * will remain valid, i.e. mapping to indices within range, so we
576  * update it last.
577  */
578  if (n_buckets > LB_NUM_INLINE_BUCKETS &&
580  {
581  /*
582  * the new increased number of buckets is crossing the threshold
583  * from the inline storage to out-line. Alloc the outline buckets
584  * first, then fixup the number. then reset the inlines.
585  */
586  ASSERT(NULL == lb->lb_buckets);
588  n_buckets - 1,
590 
592  lb->lb_buckets,
593  n_buckets);
595  load_balance_set_n_buckets(lb, n_buckets);
596 
598 
599  for (ii = 0; ii < LB_NUM_INLINE_BUCKETS; ii++)
600  {
601  dpo_reset(&lb->lb_buckets_inline[ii]);
602  }
603  }
604  else
605  {
606  if (n_buckets <= LB_NUM_INLINE_BUCKETS)
607  {
608  /*
609  * we are not crossing the threshold and it's still inline buckets.
610  * we can write the new on the old..
611  */
614  n_buckets);
616  load_balance_set_n_buckets(lb, n_buckets);
617  }
618  else
619  {
620  /*
621  * we are not crossing the threshold. We need a new bucket array to
622  * hold the increased number of choices.
623  */
624  dpo_id_t *new_buckets, *old_buckets, *tmp_dpo;
625 
626  new_buckets = NULL;
627  old_buckets = load_balance_get_buckets(lb);
628 
629  vec_validate_aligned(new_buckets,
630  n_buckets - 1,
632 
633  load_balance_fill_buckets(lb, nhs, new_buckets, n_buckets);
635  lb->lb_buckets = new_buckets;
637  load_balance_set_n_buckets(lb, n_buckets);
638 
639  vec_foreach(tmp_dpo, old_buckets)
640  {
641  dpo_reset(tmp_dpo);
642  }
643  vec_free(old_buckets);
644  }
645  }
646 
647  /*
648  * buckets fixed. ready for the MAP update.
649  */
650  lb->lb_map = lbmi;
651  }
652  else
653  {
654  /*
655  * bucket size shrinkage.
656  * Any map we have will be based on the old
657  * larger number of buckets, so will be translating to indices
658  * out of range. So the new MAP must be installed first.
659  */
660  lb->lb_map = lbmi;
662 
663 
664  if (n_buckets <= LB_NUM_INLINE_BUCKETS &&
666  {
667  /*
668  * the new decreased number of buckets is crossing the threshold
669  * from out-line storage to inline:
670  * 1 - Fill the inline buckets,
671  * 2 - fixup the number (and this point the inline buckets are
672  * used).
673  * 3 - free the outline buckets
674  */
676  lb->lb_buckets_inline,
677  n_buckets);
679  load_balance_set_n_buckets(lb, n_buckets);
681 
682  vec_foreach(tmp_dpo, lb->lb_buckets)
683  {
684  dpo_reset(tmp_dpo);
685  }
686  vec_free(lb->lb_buckets);
687  }
688  else
689  {
690  /*
691  * not crossing the threshold.
692  * 1 - update the number to the smaller size
693  * 2 - write the new buckets
694  * 3 - reset those no longer used.
695  */
696  dpo_id_t *buckets;
697  u32 old_n_buckets;
698 
699  old_n_buckets = lb->lb_n_buckets;
700  buckets = load_balance_get_buckets(lb);
701 
702  load_balance_set_n_buckets(lb, n_buckets);
704 
706  buckets,
707  n_buckets);
708 
709  for (ii = n_buckets; ii < old_n_buckets; ii++)
710  {
711  dpo_reset(&buckets[ii]);
712  }
713  }
714  }
715  }
716 
717  vec_foreach (nh, nhs)
718  {
719  dpo_reset(&nh->path_dpo);
720  }
721  vec_free(nhs);
722  vec_free(fixed_nhs);
723 
724  load_balance_map_unlock(old_lbmi);
725 }
726 
727 static void
729 {
730  load_balance_t *lb;
731 
732  lb = load_balance_get(dpo->dpoi_index);
733 
734  lb->lb_locks++;
735 }
736 
737 static void
739 {
740  dpo_id_t *buckets;
741  int i;
742 
743  buckets = load_balance_get_buckets(lb);
744 
745  for (i = 0; i < lb->lb_n_buckets; i++)
746  {
747  dpo_reset(&buckets[i]);
748  }
749 
750  LB_DBG(lb, "destroy");
751  if (!LB_HAS_INLINE_BUCKETS(lb))
752  {
753  vec_free(lb->lb_buckets);
754  }
755 
758 
759  pool_put(load_balance_pool, lb);
760 }
761 
762 static void
764 {
765  load_balance_t *lb;
766 
767  lb = load_balance_get(dpo->dpoi_index);
768 
769  lb->lb_locks--;
770 
771  if (0 == lb->lb_locks)
772  {
774  }
775 }
776 
777 static void
779 {
780  fib_show_memory_usage("load-balance",
781  pool_elts(load_balance_pool),
782  pool_len(load_balance_pool),
783  sizeof(load_balance_t));
785 }
786 
787 const static dpo_vft_t lb_vft = {
789  .dv_unlock = load_balance_unlock,
790  .dv_format = format_load_balance_dpo,
791  .dv_mem_show = load_balance_mem_show,
792 };
793 
794 /**
795  * @brief The per-protocol VLIB graph nodes that are assigned to a load-balance
796  * object.
797  *
798  * this means that these graph nodes are ones from which a load-balance is the
799  * parent object in the DPO-graph.
800  *
801  * We do not list all the load-balance nodes, such as the *-lookup. instead
802  * we are relying on the correct use of the .sibling_of field when setting
803  * up these sibling nodes.
804  */
805 const static char* const load_balance_ip4_nodes[] =
806 {
807  "ip4-load-balance",
808  NULL,
809 };
810 const static char* const load_balance_ip6_nodes[] =
811 {
812  "ip6-load-balance",
813  NULL,
814 };
815 const static char* const load_balance_mpls_nodes[] =
816 {
817  "mpls-load-balance",
818  NULL,
819 };
820 const static char* const load_balance_l2_nodes[] =
821 {
822  "l2-load-balance",
823  NULL,
824 };
825 const static char* const load_balance_nsh_nodes[] =
826 {
827  "nsh-load-balance",
828  NULL
829 };
830 const static char* const load_balance_bier_nodes[] =
831 {
832  "bier-load-balance",
833  NULL,
834 };
835 const static char* const * const load_balance_nodes[DPO_PROTO_NUM] =
836 {
843 };
844 
845 void
847 {
848  index_t lbi;
849 
851 
852  /*
853  * Special LB with index zero. we need to define this since the v4 mtrie
854  * assumes an index of 0 implies the ply is empty. therefore all 'real'
855  * adjs need a non-zero index.
856  * This should never be used, but just in case, stack it on a drop.
857  */
858  lbi = load_balance_create(1, DPO_PROTO_IP4, 0);
860 
862 }
863 
864 static clib_error_t *
866  unformat_input_t * input,
867  vlib_cli_command_t * cmd)
868 {
869  index_t lbi = INDEX_INVALID;
870 
872  {
873  if (unformat (input, "%d", &lbi))
874  ;
875  else
876  break;
877  }
878 
879  if (INDEX_INVALID != lbi)
880  {
881  vlib_cli_output (vm, "%U", format_load_balance, lbi,
883  }
884  else
885  {
886  load_balance_t *lb;
887 
888  pool_foreach(lb, load_balance_pool,
889  ({
893  }));
894  }
895 
896  return 0;
897 }
898 
899 VLIB_CLI_COMMAND (load_balance_show_command, static) = {
900  .path = "show load-balance",
901  .short_help = "show load-balance [<index>]",
902  .function = load_balance_show,
903 };
904 
905 
907 ip_flow_hash (void *data)
908 {
909  ip4_header_t *iph = (ip4_header_t *) data;
910 
911  if ((iph->ip_version_and_header_length & 0xF0) == 0x40)
913  else
915 }
916 
919 {
920  return (*((u64 *) m) & 0xffffffffffff);
921 }
922 
925 {
926  ethernet_header_t *eh;
927  u64 a, b, c;
928  uword is_ip, eh_size;
929  u16 eh_type;
930 
931  eh = vlib_buffer_get_current (b0);
932  eh_type = clib_net_to_host_u16 (eh->type);
933  eh_size = ethernet_buffer_header_size (b0);
934 
935  is_ip = (eh_type == ETHERNET_TYPE_IP4 || eh_type == ETHERNET_TYPE_IP6);
936 
937  /* since we have 2 cache lines, use them */
938  if (is_ip)
939  a = ip_flow_hash ((u8 *) vlib_buffer_get_current (b0) + eh_size);
940  else
941  a = eh->type;
942 
943  b = mac_to_u64 ((u8 *) eh->dst_address);
944  c = mac_to_u64 ((u8 *) eh->src_address);
945  hash_mix64 (a, b, c);
946 
947  return (u32) c;
948 }
949 
950 typedef struct load_balance_trace_t_
951 {
954 
957  vlib_node_runtime_t * node,
958  vlib_frame_t * frame,
959  int is_l2)
960 {
961  u32 n_left_from, next_index, *from, *to_next;
962 
963  from = vlib_frame_vector_args (frame);
964  n_left_from = frame->n_vectors;
965 
966  next_index = node->cached_next_index;
967 
968  while (n_left_from > 0)
969  {
970  u32 n_left_to_next;
971 
972  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
973 
974  while (n_left_from > 0 && n_left_to_next > 0)
975  {
976  vlib_buffer_t *b0;
977  u32 bi0, lbi0, next0;
978  const dpo_id_t *dpo0;
979  const load_balance_t *lb0;
980 
981  bi0 = from[0];
982  to_next[0] = bi0;
983  from += 1;
984  to_next += 1;
985  n_left_from -= 1;
986  n_left_to_next -= 1;
987 
988  b0 = vlib_get_buffer (vm, bi0);
989 
990  /* lookup dst + src mac */
991  lbi0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
992  lb0 = load_balance_get(lbi0);
993 
994  if (is_l2)
995  {
996  vnet_buffer(b0)->ip.flow_hash = l2_flow_hash(b0);
997  }
998  else
999  {
1000  /* it's BIER */
1001  const bier_hdr_t *bh0 = vlib_buffer_get_current(b0);
1002  vnet_buffer(b0)->ip.flow_hash = bier_hdr_get_entropy(bh0);
1003  }
1004 
1005  dpo0 = load_balance_get_bucket_i(lb0,
1006  vnet_buffer(b0)->ip.flow_hash &
1007  (lb0->lb_n_buckets_minus_1));
1008 
1009  next0 = dpo0->dpoi_next_node;
1010  vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
1011 
1012  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1013  {
1014  load_balance_trace_t *tr = vlib_add_trace (vm, node, b0,
1015  sizeof (*tr));
1016  tr->lb_index = lbi0;
1017  }
1018  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1019  n_left_to_next, bi0, next0);
1020  }
1021 
1022  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1023  }
1024 
1025  return frame->n_vectors;
1026 }
1027 
1028 static uword
1030  vlib_node_runtime_t * node,
1031  vlib_frame_t * frame)
1032 {
1033  return (load_balance_inline(vm, node, frame, 1));
1034 }
1035 
1036 static u8 *
1037 format_l2_load_balance_trace (u8 * s, va_list * args)
1038 {
1039  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1040  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1041  load_balance_trace_t *t = va_arg (*args, load_balance_trace_t *);
1042 
1043  s = format (s, "L2-load-balance: index %d", t->lb_index);
1044  return s;
1045 }
1046 
1047 /**
1048  * @brief
1049  */
1051  .function = l2_load_balance,
1052  .name = "l2-load-balance",
1053  .vector_size = sizeof (u32),
1054 
1055  .format_trace = format_l2_load_balance_trace,
1056  .n_next_nodes = 1,
1057  .next_nodes = {
1058  [0] = "error-drop",
1059  },
1060 };
1061 
1062 static uword
1064  vlib_node_runtime_t * node,
1065  vlib_frame_t * frame)
1066 {
1067  u32 n_left_from, next_index, *from, *to_next;
1068 
1069  from = vlib_frame_vector_args (frame);
1070  n_left_from = frame->n_vectors;
1071 
1072  next_index = node->cached_next_index;
1073 
1074  while (n_left_from > 0)
1075  {
1076  u32 n_left_to_next;
1077 
1078  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1079 
1080  while (n_left_from > 0 && n_left_to_next > 0)
1081  {
1082  vlib_buffer_t *b0;
1083  u32 bi0, lbi0, next0, *nsh0;
1084  const dpo_id_t *dpo0;
1085  const load_balance_t *lb0;
1086 
1087  bi0 = from[0];
1088  to_next[0] = bi0;
1089  from += 1;
1090  to_next += 1;
1091  n_left_from -= 1;
1092  n_left_to_next -= 1;
1093 
1094  b0 = vlib_get_buffer (vm, bi0);
1095 
1096  lbi0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
1097  lb0 = load_balance_get(lbi0);
1098 
1099  /* SPI + SI are the second word of the NSH header */
1100  nsh0 = vlib_buffer_get_current (b0);
1101  vnet_buffer(b0)->ip.flow_hash = nsh0[1] % lb0->lb_n_buckets;
1102 
1103  dpo0 = load_balance_get_bucket_i(lb0,
1104  vnet_buffer(b0)->ip.flow_hash &
1105  (lb0->lb_n_buckets_minus_1));
1106 
1107  next0 = dpo0->dpoi_next_node;
1108  vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
1109 
1110  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1111  {
1112  load_balance_trace_t *tr = vlib_add_trace (vm, node, b0,
1113  sizeof (*tr));
1114  tr->lb_index = lbi0;
1115  }
1116  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1117  n_left_to_next, bi0, next0);
1118  }
1119 
1120  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1121  }
1122 
1123  return frame->n_vectors;
1124 }
1125 
1126 static u8 *
1127 format_nsh_load_balance_trace (u8 * s, va_list * args)
1128 {
1129  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1130  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1131  load_balance_trace_t *t = va_arg (*args, load_balance_trace_t *);
1132 
1133  s = format (s, "NSH-load-balance: index %d", t->lb_index);
1134  return s;
1135 }
1136 
1137 /**
1138  * @brief
1139  */
1141  .function = nsh_load_balance,
1142  .name = "nsh-load-balance",
1143  .vector_size = sizeof (u32),
1144 
1145  .format_trace = format_nsh_load_balance_trace,
1146  .n_next_nodes = 1,
1147  .next_nodes = {
1148  [0] = "error-drop",
1149  },
1150 };
1151 
1152 static u8 *
1153 format_bier_load_balance_trace (u8 * s, va_list * args)
1154 {
1155  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1156  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1157  load_balance_trace_t *t = va_arg (*args, load_balance_trace_t *);
1158 
1159  s = format (s, "BIER-load-balance: index %d", t->lb_index);
1160  return s;
1161 }
1162 
1163 static uword
1165  vlib_node_runtime_t * node,
1166  vlib_frame_t * frame)
1167 {
1168  return (load_balance_inline(vm, node, frame, 0));
1169 }
1170 
1171 /**
1172  * @brief
1173  */
1175  .function = bier_load_balance,
1176  .name = "bier-load-balance",
1177  .vector_size = sizeof (u32),
1178 
1179  .format_trace = format_bier_load_balance_trace,
1180  .sibling_of = "mpls-load-balance",
1181 };
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:434
u16 lb_n_buckets
number of buckets in the load-balance.
Definition: load_balance.h:94
static const char *const load_balance_ip6_nodes[]
Definition: load_balance.c:810
dpo_id_t * lb_buckets
Vector of buckets containing the next DPOs, sized as lbo_num.
Definition: load_balance.h:139
dpo_lock_fn_t dv_lock
A reference counting lock function.
Definition: dpo.h:404
static u8 * format_l2_load_balance_trace(u8 *s, va_list *args)
void load_balance_map_unlock(index_t lbmi)
vlib_combined_counter_main_t lbm_to_counters
Definition: load_balance.h:46
index_t load_balance_map_add_or_lock(u32 n_buckets, u32 sum_of_weights, const load_balance_path_t *paths)
fib_entry_flag_t lb_fib_entry_flags
Flags from the load-balance&#39;s associated fib_entry_t.
Definition: load_balance.h:111
static index_t load_balance_get_index(const load_balance_t *lb)
Definition: load_balance.c:65
static const char *const *const load_balance_nodes[DPO_PROTO_NUM]
Definition: load_balance.c:835
#define CLIB_UNUSED(x)
Definition: clib.h:79
A virtual function table regisitered for a DPO type.
Definition: dpo.h:399
static const char *const load_balance_mpls_nodes[]
Definition: load_balance.c:815
void vlib_validate_combined_counter(vlib_combined_counter_main_t *cm, u32 index)
validate a combined counter
Definition: counter.c:89
u16 load_balance_n_buckets(index_t lbi)
Definition: load_balance.c:244
a
Definition: bitmap.h:516
u8 * format_dpo_type(u8 *s, va_list *args)
format a DPO type
Definition: dpo.c:137
dpo_id_t path_dpo
ID of the Data-path object.
Definition: load_balance.h:66
static void load_balance_set_n_buckets(load_balance_t *lb, u32 n_buckets)
Definition: load_balance.c:486
static u32 ip_flow_hash(void *data)
Definition: load_balance.c:907
enum load_balance_format_flags_t_ load_balance_format_flags_t
Flags controlling load-balance formatting/display.
Definitions for all things IP (v4|v6) unicast and multicast lookup related.
#define NULL
Definition: clib.h:55
static u32 ip4_compute_flow_hash(const ip4_header_t *ip, flow_hash_config_t flow_hash_config)
Definition: ip4.h:287
void load_balance_set_urpf(index_t lbi, index_t urpf)
Definition: load_balance.c:265
#define ethernet_buffer_header_size(b)
Determine the size of the Ethernet headers of the current frame in the buffer.
Definition: ethernet.h:424
flow_hash_config_t lb_hash_config
the hash config to use when selecting a bucket.
Definition: load_balance.h:134
u8 src_address[6]
Definition: packet.h:56
void dpo_copy(dpo_id_t *dst, const dpo_id_t *src)
atomic copy a data-plane object.
Definition: dpo.c:261
u32 index_t
A Data-Path Object is an object that represents actions that are applied to packets are they are swit...
Definition: dpo.h:41
Combined counter to hold both packets and byte differences.
Definition: counter.h:139
static const char *const load_balance_ip4_nodes[]
The per-protocol VLIB graph nodes that are assigned to a load-balance object.
Definition: load_balance.c:805
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:559
static u8 * format_load_balance_dpo(u8 *s, va_list *args)
Definition: load_balance.c:159
int i
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
Definition: vec.h:445
static u8 * load_balance_format(index_t lbi, load_balance_format_flags_t flags, u32 indent, u8 *s)
Definition: load_balance.c:106
static load_balance_t * load_balance_alloc_i(void)
Definition: load_balance.c:84
index_t load_balance_get_urpf(index_t lbi)
Definition: load_balance.c:284
static const char *const load_balance_l2_nodes[]
Definition: load_balance.c:820
#define pool_len(p)
Number of elements in pool vector.
Definition: pool.h:140
index_t load_balance_create(u32 n_buckets, dpo_proto_t lb_proto, flow_hash_config_t fhc)
Definition: load_balance.c:194
const dpo_id_t * drop_dpo_get(dpo_proto_t proto)
Definition: drop_dpo.c:25
void dpo_register(dpo_type_t type, const dpo_vft_t *vft, const char *const *const *nodes)
For a given DPO type Register:
Definition: dpo.c:321
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
Definition: pool.h:440
load_balance_t * load_balance_pool
Pool of all DPOs.
Definition: load_balance.c:51
void load_balance_map_module_init(void)
Make/add a new or lock an existing Load-balance map.
static dpo_id_t * load_balance_get_buckets(load_balance_t *lb)
Definition: load_balance.c:71
#define always_inline
Definition: clib.h:92
void load_balance_module_init(void)
Definition: load_balance.c:846
u16 lb_n_buckets_minus_1
number of buckets in the load-balance - 1.
Definition: load_balance.h:99
u8 dst_address[6]
Definition: packet.h:55
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:113
static int next_hop_sort_by_weight(const load_balance_path_t *n1, const load_balance_path_t *n2)
Definition: load_balance.c:305
static void load_balance_mem_show(void)
Definition: load_balance.c:778
void fib_urpf_list_lock(index_t ui)
Definition: fib_urpf_list.c:87
static load_balance_t * load_balance_create_i(u32 num_buckets, dpo_proto_t lb_proto, flow_hash_config_t fhc)
Definition: load_balance.c:169
void fib_show_memory_usage(const char *name, u32 in_use_elts, u32 allocd_elts, size_t size_elt)
Show the memory usage for a type.
Definition: fib_node.c:220
void load_balance_multipath_update(const dpo_id_t *dpo, const load_balance_path_t *raw_nhs, load_balance_flags_t flags)
Definition: load_balance.c:494
unsigned long u64
Definition: types.h:89
f64 load_balance_get_multipath_tolerance(void)
Definition: load_balance.c:59
enum dpo_proto_t_ dpo_proto_t
Data path protocol.
static void load_balance_lock(dpo_id_t *dpo)
Definition: load_balance.c:728
int load_balance_is_drop(const dpo_id_t *dpo)
Definition: load_balance.c:227
static void load_balance_unlock(dpo_id_t *dpo)
Definition: load_balance.c:763
The identity of a DPO is a combination of its type and its instance number/index of objects of that t...
Definition: dpo.h:168
static load_balance_path_t * load_balance_multipath_next_hop_fixup(const load_balance_path_t *nhs, dpo_proto_t drop_proto)
Definition: load_balance.c:435
static void vlib_zero_combined_counter(vlib_combined_counter_main_t *cm, u32 index)
Clear a combined counter Clears the set of per-thread counters.
Definition: counter.h:276
static uword bier_load_balance(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
counter_t packets
packet counter
Definition: counter.h:141
u8 * format_load_balance(u8 *s, va_list *args)
Definition: load_balance.c:151
dpo_type_t dpoi_type
the type
Definition: dpo.h:172
static const dpo_id_t * load_balance_get_bucket_i(const load_balance_t *lb, u32 bucket)
Definition: load_balance.h:209
dpo_proto_t lb_proto
The protocol of packets that traverse this LB.
Definition: load_balance.h:106
struct _unformat_input_t unformat_input_t
void load_balance_set_fib_entry_flags(index_t lbi, fib_entry_flag_t flags)
Definition: load_balance.c:254
load-balancing over a choice of [un]equal cost paths
Definition: dpo.h:102
static u8 * format_bier_load_balance_trace(u8 *s, va_list *args)
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:209
static u32 ip6_compute_flow_hash(const ip6_header_t *ip, flow_hash_config_t flow_hash_config)
Definition: ip6.h:428
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:273
The FIB DPO provieds;.
Definition: load_balance.h:84
#define PREDICT_FALSE(x)
Definition: clib.h:105
u8 * format_load_balance_map(u8 *s, va_list *ap)
load_balance_main_t load_balance_main
The one instance of load-balance main.
Definition: load_balance.c:56
static const char *const load_balance_bier_nodes[]
Definition: load_balance.c:830
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:364
static bier_hdr_entropy_t bier_hdr_get_entropy(const bier_hdr_t *bier_hdr)
const dpo_id_t * load_balance_get_bucket(index_t lbi, u32 bucket)
Definition: load_balance.c:294
The load-balance object represents an ECMP choice.
Definition: load_balance.h:44
vlib_node_registration_t l2_load_balance_node
(constructor) VLIB_REGISTER_NODE (l2_load_balance_node)
dpo_id_t lb_buckets_inline[LB_NUM_INLINE_BUCKETS]
The rest of the cache line is used for buckets.
Definition: load_balance.h:147
#define pool_get_aligned(P, E, A)
Allocate an object E from a pool P (general version).
Definition: pool.h:188
enum load_balance_flags_t_ load_balance_flags_t
Flags controlling load-balance creation and modification.
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:143
#define UNFORMAT_END_OF_INPUT
Definition: format.h:143
#define hash_mix64(a0, b0, c0)
Definition: hash.h:530
svmdb_client_t * c
u16 n_vectors
Definition: node.h:344
static void vlib_get_combined_counter(const vlib_combined_counter_main_t *cm, u32 index, vlib_counter_t *result)
Get the value of a combined counter, never called in the speed path Scrapes the entire set of per-thr...
Definition: counter.h:250
vlib_main_t * vm
Definition: buffer.c:294
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:336
static const char *const load_balance_nsh_nodes[]
Definition: load_balance.c:825
void load_balance_map_show_mem(void)
static f64 fabs(f64 x)
Definition: math.h:50
#define clib_memcpy(a, b, c)
Definition: string.h:75
static uword max_pow2(uword x)
Definition: clib.h:271
static u8 * format_nsh_load_balance_trace(u8 *s, va_list *args)
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:454
#define LB_NUM_INLINE_BUCKETS
The number of buckets that a load-balance object can have and still fit in one cache-line.
Definition: load_balance.h:56
vlib_combined_counter_main_t lbm_via_counters
Definition: load_balance.h:47
static void load_balance_fill_buckets(load_balance_t *lb, load_balance_path_t *nhs, dpo_id_t *buckets, u32 n_buckets)
Definition: load_balance.c:461
enum fib_entry_flag_t_ fib_entry_flag_t
static uword nsh_load_balance(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
#define VLIB_CLI_COMMAND(x,...)
Definition: cli.h:154
vlib_node_registration_t bier_load_balance_node
(constructor) VLIB_REGISTER_NODE (bier_load_balance_node)
vlib_node_registration_t nsh_load_balance_node
(constructor) VLIB_REGISTER_NODE (nsh_load_balance_node)
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:456
#define ASSERT(truth)
index_t lb_urpf
This is the index of the uRPF list for this LB.
Definition: load_balance.h:129
unsigned int u32
Definition: types.h:88
static load_balance_t * load_balance_get(index_t lbi)
Definition: load_balance.h:200
static clib_error_t * load_balance_show(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
Definition: load_balance.c:865
u32 lb_locks
The number of locks, which is approximately the number of users, of this load-balance.
Definition: load_balance.h:119
static u64 mac_to_u64(u8 *m)
Definition: load_balance.c:918
#define IP_FLOW_HASH_DEFAULT
Default: 5-tuple without the "reverse" bit.
Definition: lookup.h:69
#define LB_HAS_INLINE_BUCKETS(_lb)
Definition: load_balance.h:205
void load_balance_set_bucket(index_t lbi, u32 bucket, const dpo_id_t *next)
Definition: load_balance.c:211
u8 * format_dpo_id(u8 *s, va_list *args)
Format a DPO_id_t oject
Definition: dpo.c:147
u32 flow_hash_config_t
A flow hash configuration is a mask of the flow hash options.
Definition: lookup.h:82
u64 uword
Definition: types.h:112
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
counter_t bytes
byte counter
Definition: counter.h:142
static uword l2_load_balance(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: defs.h:47
unsigned short u16
Definition: types.h:57
#define DPO_PROTO_NUM
Definition: dpo.h:70
A BIER header of variable length The encoding follows: https://tools.ietf.org/html/draft-ietf-bier-mp...
Definition: bier_types.h:321
i64 word
Definition: types.h:111
static word flt_round_nearest(f64 x)
Definition: clib.h:322
void qsort(void *base, uword n, uword size, int(*compar)(const void *, const void *))
Definition: qsort.c:56
index_t dpoi_index
the index of objects of that type
Definition: dpo.h:184
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
double f64
Definition: types.h:142
unsigned char u8
Definition: types.h:56
u32 path_weight
weight for the path.
Definition: load_balance.h:76
#define INDEX_INVALID
Invalid index - used when no index is known blazoned capitals INVALID speak volumes where ~0 does not...
Definition: dpo.h:47
static void load_balance_destroy(load_balance_t *lb)
Definition: load_balance.c:738
static uword load_balance_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, int is_l2)
Definition: load_balance.c:956
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:267
void fib_urpf_list_unlock(index_t ui)
Definition: fib_urpf_list.c:68
One path from an [EU]CMP set that the client wants to add to a load-balance object.
Definition: load_balance.h:62
u8 * format_dpo_proto(u8 *s, va_list *args)
format a DPO protocol
Definition: dpo.c:177
static u32 l2_flow_hash(vlib_buffer_t *b0)
Definition: load_balance.c:924
#define vnet_buffer(b)
Definition: buffer.h:372
index_t lb_map
index of the load-balance map, INVALID if this LB does not use one
Definition: load_balance.h:124
const f64 multipath_next_hop_error_tolerance
Definition: load_balance.c:29
int dpo_is_drop(const dpo_id_t *dpo)
The Drop DPO will drop all packets, no questions asked.
Definition: drop_dpo.c:33
u32 ip_multipath_normalize_next_hops(const load_balance_path_t *raw_next_hops, load_balance_path_t **normalized_next_hops, u32 *sum_weight_in, f64 multipath_next_hop_error_tolerance)
Definition: load_balance.c:315
void dpo_reset(dpo_id_t *dpo)
reset a DPO ID The DPO will be unlocked.
Definition: dpo.c:231
#define vec_foreach(var, vec)
Vector iterator.
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:109
u16 dpoi_next_node
The next VLIB node to follow.
Definition: dpo.h:180
#define LB_DBG(_p, _fmt, _args...)
Definition: load_balance.c:44
u8 ip_version_and_header_length
Definition: ip4_packet.h:132
u32 flags
Definition: vhost-user.h:77
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:111
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
Definition: cli.c:680
struct load_balance_trace_t_ load_balance_trace_t
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
uword unformat(unformat_input_t *i, const char *fmt,...)
Definition: unformat.c:972
static uword unformat_check_input(unformat_input_t *i)
Definition: format.h:169
void dpo_stack(dpo_type_t child_type, dpo_proto_t child_proto, dpo_id_t *dpo, const dpo_id_t *parent)
Stack one DPO object on another, and thus establish a child-parent relationship.
Definition: dpo.c:515
static void load_balance_set_bucket_i(load_balance_t *lb, u32 bucket, dpo_id_t *buckets, const dpo_id_t *next)
Definition: load_balance.c:202
static uword pool_elts(void *v)
Number of active elements in a pool.
Definition: pool.h:128