FD.io VPP  v18.10-32-g1161dda
Vector Packet Processing
stats_to_be_deprecated.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include "stats_to_be_deprecated.h"
16 #include <signal.h>
17 #include <vnet/fib/ip4_fib.h>
18 #include <vnet/fib/fib_entry.h>
19 #include <vnet/mfib/mfib_entry.h>
20 #include <vnet/dpo/load_balance.h>
21 #include <vnet/udp/udp_encap.h>
22 #include <vnet/bier/bier_fmask.h>
23 #include <vnet/bier/bier_table.h>
24 #include <vnet/fib/fib_api.h>
25 
26 #define STATS_DEBUG 0
27 
29 
30 #include <vnet/ip/ip.h>
31 
32 #include <vpp/api/vpe_msg_enum.h>
33 
34 #define f64_endian(a)
35 #define f64_print(a,b)
36 
37 #define vl_typedefs /* define message structures */
38 #include <vpp/api/vpe_all_api_h.h>
39 #undef vl_typedefs
40 
41 #define vl_endianfun /* define message structures */
42 #include <vpp/api/vpe_all_api_h.h>
43 #undef vl_endianfun
44 
45 /* instantiate all the print functions we know about */
46 #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
47 #define vl_printfun
48 #include <vpp/api/vpe_all_api_h.h>
49 #undef vl_printfun
50 
51 #define foreach_stats_msg \
52 _(WANT_STATS, want_stats) \
53 _(VNET_INTERFACE_SIMPLE_COUNTERS, vnet_interface_simple_counters) \
54 _(WANT_INTERFACE_SIMPLE_STATS, want_interface_simple_stats) \
55 _(VNET_INTERFACE_COMBINED_COUNTERS, vnet_interface_combined_counters) \
56 _(WANT_INTERFACE_COMBINED_STATS, want_interface_combined_stats) \
57 _(WANT_PER_INTERFACE_COMBINED_STATS, want_per_interface_combined_stats) \
58 _(WANT_PER_INTERFACE_SIMPLE_STATS, want_per_interface_simple_stats) \
59 _(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \
60 _(WANT_IP4_FIB_STATS, want_ip4_fib_stats) \
61 _(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \
62 _(WANT_IP6_FIB_STATS, want_ip6_fib_stats) \
63 _(WANT_IP4_MFIB_STATS, want_ip4_mfib_stats) \
64 _(WANT_IP6_MFIB_STATS, want_ip6_mfib_stats) \
65 _(VNET_IP4_NBR_COUNTERS, vnet_ip4_nbr_counters) \
66 _(WANT_IP4_NBR_STATS, want_ip4_nbr_stats) \
67 _(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) \
68 _(WANT_IP6_NBR_STATS, want_ip6_nbr_stats) \
69 _(VNET_GET_SUMMARY_STATS, vnet_get_summary_stats) \
70 _(STATS_GET_POLLER_DELAY, stats_get_poller_delay) \
71 _(WANT_UDP_ENCAP_STATS, want_udp_encap_stats) \
72 _(WANT_BIER_NEIGHBOR_STATS, want_bier_neighbor_stats)
73 
74 #define vl_msg_name_crc_list
75 #include <vpp/stats/stats.api.h>
76 #undef vl_msg_name_crc_list
77 
78 static void
80 {
81 #define _(id,n,crc) \
82  vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
83  foreach_vl_msg_name_crc_stats;
84 #undef _
85 }
86 
87 /* These constants ensure msg sizes <= 1024, aka ring allocation */
88 #define SIMPLE_COUNTER_BATCH_SIZE 126
89 #define COMBINED_COUNTER_BATCH_SIZE 63
90 #define IP4_FIB_COUNTER_BATCH_SIZE 48
91 #define IP6_FIB_COUNTER_BATCH_SIZE 30
92 #define IP4_MFIB_COUNTER_BATCH_SIZE 24
93 #define IP6_MFIB_COUNTER_BATCH_SIZE 15
94 #define UDP_ENCAP_COUNTER_BATCH_SIZE (1024 / sizeof(vl_api_udp_encap_counter_t))
95 #define BIER_NEIGHBOR_COUNTER_BATCH_SIZE (1024 / sizeof(vl_api_bier_neighbor_counter_t))
96 
97 /* 5ms */
98 #define STATS_RELEASE_DELAY_NS (1000 * 1000 * 5)
99 /* ns/us us/ms */
100 
101 u8 *
103 {
104  stats_main_t *sm = &stats_main;
107 
108  char *counter_name;
110  int i;
111  count = ntohl (mp->count);
112  sw_if_index = ntohl (mp->first_sw_if_index);
113 
114  vlib_counter_t *vp;
115  u64 packets, bytes;
116  vp = (vlib_counter_t *) mp->data;
117 
118  switch (mp->vnet_counter_type)
119  {
121  counter_name = "rx";
122  break;
124  counter_name = "tx";
125  break;
126  default:
127  counter_name = "bogus";
128  break;
129  }
130  for (i = 0; i < count; i++)
131  {
132  packets = clib_mem_unaligned (&vp->packets, u64);
133  packets = clib_net_to_host_u64 (packets);
134  bytes = clib_mem_unaligned (&vp->bytes, u64);
135  bytes = clib_net_to_host_u64 (bytes);
136  vp++;
137  s = format (s, "%U.%s.packets %lld\n",
139  sm->vnet_main, sw_if_index, counter_name, packets);
140  s = format (s, "%U.%s.bytes %lld\n",
142  sm->vnet_main, sw_if_index, counter_name, bytes);
143  sw_if_index++;
144  }
145  return s;
146 }
147 
148 u8 *
150 {
151  stats_main_t *sm = &stats_main;
153  va_arg (*args, vl_api_vnet_interface_simple_counters_t *);
154  char *counter_name;
156  count = ntohl (mp->count);
157  sw_if_index = ntohl (mp->first_sw_if_index);
158  u64 *vp, v;
159  vp = (u64 *) mp->data;
160  int i;
161 
162  switch (mp->vnet_counter_type)
163  {
165  counter_name = "drop";
166  break;
168  counter_name = "punt";
169  break;
171  counter_name = "ip4";
172  break;
174  counter_name = "ip6";
175  break;
177  counter_name = "rx-no-buff";
178  break;
180  counter_name = "rx-miss";
181  break;
183  counter_name = "rx-error (fifo-full)";
184  break;
186  counter_name = "tx-error (fifo-full)";
187  break;
188  default:
189  counter_name = "bogus";
190  break;
191  }
192  for (i = 0; i < count; i++)
193  {
194  v = clib_mem_unaligned (vp, u64);
195  v = clib_net_to_host_u64 (v);
196  vp++;
197  s = format (s, "%U.%s %lld\n", format_vnet_sw_if_index_name,
198  sm->vnet_main, sw_if_index, counter_name, v);
199  sw_if_index++;
200  }
201 
202  return s;
203 }
204 
205 static void
206 dslock (stats_main_t * sm, int release_hint, int tag)
207 {
208  u32 thread_index;
210 
211  if (PREDICT_FALSE (l == 0))
212  return;
213 
214  thread_index = vlib_get_thread_index ();
215  if (l->lock && l->thread_index == thread_index)
216  {
217  l->count++;
218  return;
219  }
220 
221  if (release_hint)
222  l->release_hint++;
223 
224  while (__sync_lock_test_and_set (&l->lock, 1))
225  /* zzzz */ ;
226  l->tag = tag;
227  l->thread_index = thread_index;
228  l->count = 1;
229 }
230 
231 void
232 stats_dslock_with_hint (int hint, int tag)
233 {
234  stats_main_t *sm = &stats_main;
235  dslock (sm, hint, tag);
236 }
237 
238 static void
240 {
241  u32 thread_index;
243 
244  if (PREDICT_FALSE (l == 0))
245  return;
246 
247  thread_index = vlib_get_thread_index ();
248  ASSERT (l->lock && l->thread_index == thread_index);
249  l->count--;
250  if (l->count == 0)
251  {
252  l->tag = -l->tag;
253  l->release_hint = 0;
255  l->lock = 0;
256  }
257 }
258 
259 void
260 stats_dsunlock (int hint, int tag)
261 {
262  stats_main_t *sm = &stats_main;
263  dsunlock (sm);
264 }
265 
267 get_client_for_stat (u32 reg, u32 item, u32 client_index)
268 {
269  stats_main_t *sm = &stats_main;
270  vpe_client_stats_registration_t *registration;
271  uword *p;
272 
273  /* Is there anything listening for item in that reg */
274  p = hash_get (sm->stats_registration_hash[reg], item);
275 
276  if (!p)
277  return 0; // Fail
278 
279  /* If there is, is our client_index one of them */
280  registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
281  p = hash_get (registration->client_hash, client_index);
282 
283  if (!p)
284  return 0; // Fail
285 
286  return pool_elt_at_index (registration->clients, p[0]);
287 
288 }
289 
290 static int
292 {
293  stats_main_t *sm = &stats_main;
294  vpe_client_stats_registration_t *registration;
296  uword *p;
297 
298  /* Is there anything listening for item in that reg */
299  p = hash_get (sm->stats_registration_hash[reg], item);
300 
301  if (!p)
302  {
303  pool_get (sm->stats_registrations[reg], registration);
304  registration->item = item;
305  registration->client_hash = NULL;
306  registration->clients = NULL;
307  hash_set (sm->stats_registration_hash[reg], item,
308  registration - sm->stats_registrations[reg]);
309  }
310  else
311  {
312  registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
313  }
314 
315  p = hash_get (registration->client_hash, client->client_index);
316 
317  if (!p)
318  {
319  pool_get (registration->clients, cr);
320  cr->client_index = client->client_index;
321  cr->client_pid = client->client_pid;
322  hash_set (registration->client_hash, cr->client_index,
323  cr - registration->clients);
324  }
325 
326  return 1; //At least one client is doing something ... poll
327 }
328 
329 static void
330 clear_one_client (u32 reg_index, u32 reg, u32 item, u32 client_index)
331 {
332  stats_main_t *sm = &stats_main;
333  vpe_client_stats_registration_t *registration;
335  uword *p;
336 
337  registration = pool_elt_at_index (sm->stats_registrations[reg], reg_index);
338  p = hash_get (registration->client_hash, client_index);
339 
340  if (p)
341  {
342  client = pool_elt_at_index (registration->clients, p[0]);
343  hash_unset (registration->client_hash, client->client_index);
344  pool_put (registration->clients, client);
345 
346  /* Now check if that was the last client for that item */
347  if (0 == pool_elts (registration->clients))
348  {
349  hash_unset (sm->stats_registration_hash[reg], item);
350  hash_free (registration->client_hash);
351  pool_free (registration->clients);
352  pool_put (sm->stats_registrations[reg], registration);
353  }
354  }
355 }
356 
357 int
358 clear_client_for_stat (u32 reg, u32 item, u32 client_index)
359 {
360  stats_main_t *sm = &stats_main;
361  uword *p;
362  int i, elts;
363 
364  /* Clear the client first */
365  /* Is there anything listening for item in that reg */
366  p = hash_get (sm->stats_registration_hash[reg], item);
367 
368  if (!p)
369  goto exit;
370 
371  /* If there is, is our client_index one of them */
372  clear_one_client (p[0], reg, item, client_index);
373 
374 exit:
375  elts = 0;
376  /* Now check if that was the last item in any of the listened to stats */
377  for (i = 0; i < STATS_REG_N_IDX; i++)
378  {
379  elts += pool_elts (sm->stats_registrations[i]);
380  }
381  return elts;
382 }
383 
384 static int
386 {
387  stats_main_t *sm = &stats_main;
388  u32 reg_index, item, reg;
389  int i, elts;
390 
391  /* *INDENT-OFF* */
393  {
394  hash_foreach(item, reg_index, sm->stats_registration_hash[reg],
395  ({
396  clear_one_client(reg_index, reg, item, client_index);
397  }));
398  }
399  /* *INDENT-OFF* */
400 
401  elts = 0;
402  /* Now check if that was the last item in any of the listened to stats */
403  for (i = 0; i < STATS_REG_N_IDX; i++)
404  {
405  elts += pool_elts (sm->stats_registrations[i]);
406  }
407  return elts;
408 }
409 
410 static clib_error_t *
411 want_stats_reaper (u32 client_index)
412 {
413  stats_main_t *sm = &stats_main;
414 
415  sm->enable_poller = clear_client_for_all_stats (client_index);
416 
417  return (NULL);
418 }
419 
421 
422 
423 /*
424  * Return a copy of the clients list.
425  */
428 {
429  stats_main_t *sm = &stats_main;
430  vpe_client_registration_t *client, *clients = 0;
431  vpe_client_stats_registration_t *registration;
432  uword *p;
433 
434  /* Is there anything listening for item in that reg */
435  p = hash_get (sm->stats_registration_hash[reg], item);
436 
437  if (!p)
438  return 0; // Fail
439 
440  /* If there is, is our client_index one of them */
441  registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
442 
443  vec_reset_length (clients);
444 
445  /* *INDENT-OFF* */
446  pool_foreach (client, registration->clients,
447  ({
448  vec_add1 (clients, *client);}
449  ));
450  /* *INDENT-ON* */
451  return clients;
452 }
453 
454 
455 static void
456 clear_client_reg (u32 ** registrations)
457 {
458  /* When registrations[x] is a vector of pool indices
459  here is a good place to clean up the pools
460  */
461 #define stats_reg(n) vec_free(registrations[IDX_##n]);
462 #include <vpp/stats/stats.reg>
463 #undef stats_reg
464 
465  vec_free (registrations);
466 }
467 
468 u32 **
469 init_client_reg (u32 ** registrations)
470 {
471 
472  /*
473  Initialise the stats registrations for each
474  type of stat a client can register for as well as
475  a vector of "interested" indexes.
476  Initially this is a u32 of either sw_if_index or fib_index
477  but eventually this should migrate to a pool_index (u32)
478  with a type specific pool that can include more complex things
479  such as timing and structured events.
480  */
481  vec_validate (registrations, STATS_REG_N_IDX);
482 #define stats_reg(n) \
483  vec_reset_length(registrations[IDX_##n]);
484 #include <vpp/stats/stats.reg>
485 #undef stats_reg
486 
487  /*
488  When registrations[x] is a vector of pool indices, here
489  is a good place to init the pools.
490  */
491  return registrations;
492 }
493 
494 u32 **
495 enable_all_client_reg (u32 ** registrations)
496 {
497 
498  /*
499  Enable all stats known by adding
500  ~0 to the index vector. Eventually this
501  should be deprecated.
502  */
503 #define stats_reg(n) \
504  vec_add1(registrations[IDX_##n], ~0);
505 #include <vpp/stats/stats.reg>
506 #undef stats_reg
507  return registrations;
508 }
509 
510 static void
512 {
515  api_main_t *am = sm->api_main;
517  svm_queue_t *q = shmem_hdr->vl_input_queue;
519  u32 items_this_message = 0;
520  u64 v, *vp = 0;
521  int i, n_counts;
522 
523  /*
524  * Prevent interface registration from expanding / moving the vectors...
525  * That tends never to happen, so we can hold this lock for a while.
526  */
528 
529  vec_foreach (cm, im->sw_if_counters)
530  {
531  n_counts = vlib_simple_counter_n_counters (cm);
532  for (i = 0; i < n_counts; i++)
533  {
534  if (mp == 0)
535  {
536  items_this_message = clib_min (SIMPLE_COUNTER_BATCH_SIZE,
537  n_counts - i);
538 
540  (sizeof (*mp) + items_this_message * sizeof (v));
541  mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_SIMPLE_COUNTERS);
542  mp->vnet_counter_type = cm - im->sw_if_counters;
543  mp->first_sw_if_index = htonl (i);
544  mp->count = 0;
545  vp = (u64 *) mp->data;
546  }
547  v = vlib_get_simple_counter (cm, i);
548  clib_mem_unaligned (vp, u64) = clib_host_to_net_u64 (v);
549  vp++;
550  mp->count++;
551  if (mp->count == items_this_message)
552  {
553  mp->count = htonl (items_this_message);
554  /* Send to the main thread... */
555  vl_msg_api_send_shmem (q, (u8 *) & mp);
556  mp = 0;
557  }
558  }
559  ASSERT (mp == 0);
560  }
562 }
563 
564 void
566  u32 item, int enable_disable)
567 {
568  stats_main_t *sm = &stats_main;
569  vpe_client_registration_t *rp, _rp;
570 
571  rp = get_client_for_stat (stat, item, client->client_index);
572 
573  /* Disable case */
574  if (enable_disable == 0)
575  {
576  if (!rp) // No client to disable
577  {
578  clib_warning ("pid %d: already disabled for stats...",
579  client->client_pid);
580  return;
581  }
582  sm->enable_poller =
583  clear_client_for_stat (stat, item, client->client_index);
584  return;
585  }
586  /* Enable case */
587  if (!rp)
588  {
589  rp = &_rp;
590  rp->client_index = client->client_index;
591  rp->client_pid = client->client_pid;
592  sm->enable_poller = set_client_for_stat (stat, item, rp);
593  }
594 }
595 
596 
597 /**********************************
598  * ALL Interface Combined stats - to be deprecated
599  **********************************/
600 
601 /*
602  * This API should be deprecated as _per_interface_ works with ~0 as sw_if_index.
603  */
604 static void
607 {
608  stats_main_t *sm = &stats_main;
610  vl_api_want_interface_combined_stats_reply_t *rmp;
611  uword *p;
612  i32 retval = 0;
614  u32 swif;
615 
616  swif = ~0; //Using same mechanism as _per_interface_
617  rp.client_index = mp->client_index;
618  rp.client_pid = mp->pid;
619 
620  handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
621  mp->enable_disable);
622 
623 reply:
625  if (!reg)
626  {
627  sm->enable_poller =
628  clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
629  mp->client_index);
630  return;
631  }
632 
633  rmp = vl_msg_api_alloc (sizeof (*rmp));
634  rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_COMBINED_STATS_REPLY);
635  rmp->context = mp->context;
636  rmp->retval = retval;
637 
638  vl_api_send_msg (reg, (u8 *) rmp);
639 }
640 
641 static void
644 {
645  vpe_client_registration_t *clients, client;
646  stats_main_t *sm = &stats_main;
647  vl_api_registration_t *reg, *reg_prev = NULL;
649  u32 mp_size;
650  int i;
651 
652  mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (vlib_counter_t));
653 
654  clients =
655  get_clients_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
656  ~0 /*flag for all */ );
657 
658  for (i = 0; i < vec_len (clients); i++)
659  {
660  client = clients[i];
662  if (reg)
663  {
664  if (reg_prev && vl_api_can_send_msg (reg_prev))
665  {
666  mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
667  clib_memcpy (mp_copy, mp, mp_size);
668  vl_api_send_msg (reg_prev, (u8 *) mp);
669  mp = mp_copy;
670  }
671  reg_prev = reg;
672  }
673  }
674  vec_free (clients);
675 #if STATS_DEBUG > 0
676  fformat (stdout, "%U\n", format_vnet_combined_counters, mp);
677 #endif
678 
679  if (reg_prev && vl_api_can_send_msg (reg_prev))
680  {
681  vl_api_send_msg (reg_prev, (u8 *) mp);
682  }
683  else
684  {
685  vl_msg_api_free (mp);
686  }
687 }
688 
689 static void
691 {
694  api_main_t *am = sm->api_main;
696  svm_queue_t *q = shmem_hdr->vl_input_queue;
698  u32 items_this_message = 0;
699  vlib_counter_t v, *vp = 0;
700  int i, n_counts;
701 
703 
705  {
706  n_counts = vlib_combined_counter_n_counters (cm);
707  for (i = 0; i < n_counts; i++)
708  {
709  if (mp == 0)
710  {
711  items_this_message = clib_min (COMBINED_COUNTER_BATCH_SIZE,
712  n_counts - i);
713 
715  (sizeof (*mp) + items_this_message * sizeof (v));
716  mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COMBINED_COUNTERS);
718  mp->first_sw_if_index = htonl (i);
719  mp->count = 0;
720  vp = (vlib_counter_t *) mp->data;
721  }
722  vlib_get_combined_counter (cm, i, &v);
724  = clib_host_to_net_u64 (v.packets);
725  clib_mem_unaligned (&vp->bytes, u64) = clib_host_to_net_u64 (v.bytes);
726  vp++;
727  mp->count++;
728  if (mp->count == items_this_message)
729  {
730  mp->count = htonl (items_this_message);
731  /* Send to the main thread... */
732  vl_msg_api_send_shmem (q, (u8 *) & mp);
733  mp = 0;
734  }
735  }
736  ASSERT (mp == 0);
737  }
739 }
740 
741 /**********************************
742  * Per Interface Combined stats
743  **********************************/
744 
745 /* Request from client registering interfaces it wants */
746 static void
749 {
750  stats_main_t *sm = &stats_main;
752  vl_api_want_per_interface_combined_stats_reply_t *rmp;
754  uword *p;
755  i32 retval = 0;
757  u32 i, swif, num = 0;
758 
759  num = ntohl (mp->num);
760 
761  /*
762  * Validate sw_if_indexes before registering
763  */
764  for (i = 0; i < num; i++)
765  {
766  swif = ntohl (mp->sw_ifs[i]);
767 
768  /*
769  * Check its a real sw_if_index that the client is allowed to see
770  */
771  if (swif != ~0)
772  {
774  {
775  retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
776  goto reply;
777  }
778  }
779  }
780 
781  for (i = 0; i < num; i++)
782  {
783  swif = ntohl (mp->sw_ifs[i]);
784 
785  rp.client_index = mp->client_index;
786  rp.client_pid = mp->pid;
787  handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
788  swif, ntohl (mp->enable_disable));
789  }
790 
791 reply:
793  if (!reg)
794  {
795  for (i = 0; i < num; i++)
796  {
797  swif = ntohl (mp->sw_ifs[i]);
798 
799  sm->enable_poller =
800  clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
801  mp->client_index);
802  }
803  return;
804  }
805 
806  rmp = vl_msg_api_alloc (sizeof (*rmp));
807  rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_COMBINED_STATS_REPLY);
808  rmp->context = mp->context;
809  rmp->retval = retval;
810 
811  vl_api_send_msg (reg, (u8 *) rmp);
812 }
813 
814 /* Per Interface Combined distribution to client */
815 static void
817 {
820  api_main_t *am = sm->api_main;
822  vl_api_registration_t *vl_reg;
826  u32 i, j;
829  u32 *sw_if_index = 0;
830 
832 
834 
835  /* *INDENT-OFF* */
836  pool_foreach (reg,
837  sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS],
838  ({ vec_add1 (sm->regs_tmp, reg); }));
839  /* *INDENT-ON* */
840 
841  for (i = 0; i < vec_len (sm->regs_tmp); i++)
842  {
843  reg = sm->regs_tmp[i];
844  if (reg->item == ~0)
845  {
849  continue;
850  }
852 
853  /* *INDENT-OFF* */
854  pool_foreach (client, reg->clients, ({ vec_add1 (sm->clients_tmp,
855  client);}));
856  /* *INDENT-ON* */
857 
858  for (j = 0; j < vec_len (sm->clients_tmp); j++)
859  {
860  client = sm->clients_tmp[j];
861 
863 
864  //Client may have disconnected abrubtly, clean up so we don't poll nothing.
865  if (!vl_reg)
866  {
867  sm->enable_poller =
868  clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
869  reg->item, client->client_index);
870  continue;
871  }
872  mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + sizeof (*vp));
873  memset (mp, 0, sizeof (*mp));
874 
875  mp->_vl_msg_id =
876  ntohs (VL_API_VNET_PER_INTERFACE_COMBINED_COUNTERS);
877 
878  /*
879  * count will eventually be used to optimise the batching
880  * of per client messages for each stat. For now setting this to 1 then
881  * iterate. This will not affect API.
882  *
883  * FIXME instead of enqueueing here, this should be sent to a batch
884  * storer for per-client transmission. Each "mp" sent would be a single entry
885  * and if a client is listening to other sw_if_indexes for same, it would be
886  * appended to that *mp
887  *
888  *
889  * FIXME(s):
890  * - capturing the timestamp of the counters "when VPP knew them" is important.
891  * Less so is that the timing of the delivery to the control plane be in the same
892  * timescale.
893 
894  * i.e. As long as the control plane can delta messages from VPP and work out
895  * velocity etc based on the timestamp, it can do so in a more "batch mode".
896 
897  * It would be beneficial to keep a "per-client" message queue, and then
898  * batch all the stat messages for a client into one message, with
899  * discrete timestamps.
900 
901  * Given this particular API is for "per interface" one assumes that the scale
902  * is less than the ~0 case, which the prior API is suited for.
903  */
904 
905  /*
906  * 1 message per api call for now
907  */
908  mp->count = htonl (1);
909  mp->timestamp = htonl (vlib_time_now (sm->vlib_main));
910 
912  vp->sw_if_index = htonl (reg->item);
913 
914  im = &vnet_get_main ()->interface_main;
915 
916 #define _(X, x) \
917  cm = im->combined_sw_if_counters + X; \
918  vlib_get_combined_counter (cm, reg->item, &v); \
919  clib_mem_unaligned (&vp->x##_packets, u64) = \
920  clib_host_to_net_u64 (v.packets); \
921  clib_mem_unaligned (&vp->x##_bytes, u64) = \
922  clib_host_to_net_u64 (v.bytes);
923 
924 
927  _(VNET_INTERFACE_COUNTER_RX_UNICAST, rx_unicast);
928  _(VNET_INTERFACE_COUNTER_TX_UNICAST, tx_unicast);
929  _(VNET_INTERFACE_COUNTER_RX_MULTICAST, rx_multicast);
930  _(VNET_INTERFACE_COUNTER_TX_MULTICAST, tx_multicast);
931  _(VNET_INTERFACE_COUNTER_RX_BROADCAST, rx_broadcast);
932  _(VNET_INTERFACE_COUNTER_TX_BROADCAST, tx_broadcast);
933 
934 #undef _
935 
936  vl_api_send_msg (vl_reg, (u8 *) mp);
937  }
938  }
939 
941 }
942 
943 /**********************************
944  * Per Interface simple stats
945  **********************************/
946 
947 /* Request from client registering interfaces it wants */
948 static void
951 {
952  stats_main_t *sm = &stats_main;
954  vl_api_want_per_interface_simple_stats_reply_t *rmp;
956  uword *p;
957  i32 retval = 0;
959  u32 i, swif, num = 0;
960 
961  num = ntohl (mp->num);
962 
963  for (i = 0; i < num; i++)
964  {
965  swif = ntohl (mp->sw_ifs[i]);
966 
967  /* Check its a real sw_if_index that the client is allowed to see */
968  if (swif != ~0)
969  {
971  {
972  retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
973  goto reply;
974  }
975  }
976  }
977 
978  for (i = 0; i < num; i++)
979  {
980  swif = ntohl (mp->sw_ifs[i]);
981 
982  rp.client_index = mp->client_index;
983  rp.client_pid = mp->pid;
984  handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
985  swif, ntohl (mp->enable_disable));
986  }
987 
988 reply:
990 
991  /* Client may have disconnected abruptly, clean up */
992  if (!reg)
993  {
994  for (i = 0; i < num; i++)
995  {
996  swif = ntohl (mp->sw_ifs[i]);
997  sm->enable_poller =
998  clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
999  mp->client_index);
1000  }
1001 
1002  return;
1003  }
1004 
1005 
1006  rmp = vl_msg_api_alloc (sizeof (*rmp));
1007  rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_SIMPLE_STATS_REPLY);
1008  rmp->context = mp->context;
1009  rmp->retval = retval;
1010 
1011  vl_api_send_msg (reg, (u8 *) rmp);
1012 }
1013 
1014 /* Per Interface Simple distribution to client */
1015 static void
1017 {
1020  api_main_t *am = sm->api_main;
1022  vl_api_registration_t *vl_reg;
1024  u32 i, j, size;
1026  vpe_client_registration_t *client;
1027  u32 timestamp, count;
1029  counter_t v;
1030 
1032 
1033  vec_reset_length (sm->regs_tmp);
1034 
1035  /* *INDENT-OFF* */
1036  pool_foreach (reg,
1037  sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS],
1038  ({ vec_add1 (sm->regs_tmp, reg); }));
1039  /* *INDENT-ON* */
1040 
1041  for (i = 0; i < vec_len (sm->regs_tmp); i++)
1042  {
1043  reg = sm->regs_tmp[i];
1044  if (reg->item == ~0)
1045  {
1049  continue;
1050  }
1052 
1053  /* *INDENT-OFF* */
1054  pool_foreach (client, reg->clients, ({ vec_add1 (sm->clients_tmp,
1055  client);}));
1056  /* *INDENT-ON* */
1057 
1058  for (j = 0; j < vec_len (sm->clients_tmp); j++)
1059  {
1060  client = sm->clients_tmp[j];
1062 
1063  /* Client may have disconnected abrubtly, clean up */
1064  if (!vl_reg)
1065  {
1066  sm->enable_poller =
1067  clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
1068  reg->item, client->client_index);
1069  continue;
1070  }
1071 
1072  mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + sizeof (*vp));
1073  memset (mp, 0, sizeof (*mp));
1074  mp->_vl_msg_id = ntohs (VL_API_VNET_PER_INTERFACE_SIMPLE_COUNTERS);
1075 
1076  /*
1077  * count will eventually be used to optimise the batching
1078  * of per client messages for each stat. For now setting this to 1 then
1079  * iterate. This will not affect API.
1080  *
1081  * FIXME instead of enqueueing here, this should be sent to a batch
1082  * storer for per-client transmission. Each "mp" sent would be a single entry
1083  * and if a client is listening to other sw_if_indexes for same, it would be
1084  * appended to that *mp
1085  *
1086  *
1087  * FIXME(s):
1088  * - capturing the timestamp of the counters "when VPP knew them" is important.
1089  * Less so is that the timing of the delivery to the control plane be in the same
1090  * timescale.
1091 
1092  * i.e. As long as the control plane can delta messages from VPP and work out
1093  * velocity etc based on the timestamp, it can do so in a more "batch mode".
1094 
1095  * It would be beneficial to keep a "per-client" message queue, and then
1096  * batch all the stat messages for a client into one message, with
1097  * discrete timestamps.
1098 
1099  * Given this particular API is for "per interface" one assumes that the scale
1100  * is less than the ~0 case, which the prior API is suited for.
1101  */
1102 
1103  /*
1104  * 1 message per api call for now
1105  */
1106  mp->count = htonl (1);
1107  mp->timestamp = htonl (vlib_time_now (sm->vlib_main));
1108  vp = (vl_api_vnet_simple_counter_t *) mp->data;
1109 
1110  vp->sw_if_index = htonl (reg->item);
1111 
1112  // VNET_INTERFACE_COUNTER_DROP
1114  v = vlib_get_simple_counter (cm, reg->item);
1115  clib_mem_unaligned (&vp->drop, u64) = clib_host_to_net_u64 (v);
1116 
1117  // VNET_INTERFACE_COUNTER_PUNT
1119  v = vlib_get_simple_counter (cm, reg->item);
1120  clib_mem_unaligned (&vp->punt, u64) = clib_host_to_net_u64 (v);
1121 
1122  // VNET_INTERFACE_COUNTER_IP4
1124  v = vlib_get_simple_counter (cm, reg->item);
1125  clib_mem_unaligned (&vp->rx_ip4, u64) = clib_host_to_net_u64 (v);
1126 
1127  //VNET_INTERFACE_COUNTER_IP6
1129  v = vlib_get_simple_counter (cm, reg->item);
1130  clib_mem_unaligned (&vp->rx_ip6, u64) = clib_host_to_net_u64 (v);
1131 
1132  //VNET_INTERFACE_COUNTER_RX_NO_BUF
1134  v = vlib_get_simple_counter (cm, reg->item);
1136  clib_host_to_net_u64 (v);
1137 
1138  //VNET_INTERFACE_COUNTER_RX_MISS
1140  v = vlib_get_simple_counter (cm, reg->item);
1141  clib_mem_unaligned (&vp->rx_miss, u64) = clib_host_to_net_u64 (v);
1142 
1143  //VNET_INTERFACE_COUNTER_RX_ERROR
1145  v = vlib_get_simple_counter (cm, reg->item);
1146  clib_mem_unaligned (&vp->rx_error, u64) = clib_host_to_net_u64 (v);
1147 
1148  //VNET_INTERFACE_COUNTER_TX_ERROR
1150  v = vlib_get_simple_counter (cm, reg->item);
1151  clib_mem_unaligned (&vp->tx_error, u64) = clib_host_to_net_u64 (v);
1152 
1153  //VNET_INTERFACE_COUNTER_MPLS
1155  v = vlib_get_simple_counter (cm, reg->item);
1156  clib_mem_unaligned (&vp->rx_mpls, u64) = clib_host_to_net_u64 (v);
1157 
1158  vl_api_send_msg (vl_reg, (u8 *) mp);
1159  }
1160  }
1161 
1163 }
1164 
1165 /**********************************
1166  * Per FIB IP4 stats
1167  **********************************/
1168 
1169 static void
1171 {
1172  struct timespec _req, *req = &_req;
1173  struct timespec _rem, *rem = &_rem;
1174 
1175  req->tv_sec = sec;
1176  req->tv_nsec = nsec;
1177  while (1)
1178  {
1179  if (nanosleep (req, rem) == 0)
1180  break;
1181  *req = *rem;
1182  if (errno == EINTR)
1183  continue;
1184  clib_unix_warning ("nanosleep");
1185  break;
1186  }
1187 }
1188 
1189 /**
1190  * @brief The context passed when collecting adjacency counters
1191  */
1192 typedef struct ip4_nbr_stats_ctx_t_
1193 {
1194  /**
1195  * The SW IF index all these adjs belong to
1196  */
1198 
1199  /**
1200  * A vector of ip4 nbr counters
1201  */
1204 
1205 static adj_walk_rc_t
1207 {
1208  vl_api_ip4_nbr_counter_t *vl_counter;
1209  vlib_counter_t adj_counter;
1211  ip_adjacency_t *adj;
1212 
1213  ctx = arg;
1214  vlib_get_combined_counter (&adjacency_counters, ai, &adj_counter);
1215 
1216  if (0 != adj_counter.packets)
1217  {
1218  vec_add2 (ctx->counters, vl_counter, 1);
1219  adj = adj_get (ai);
1220 
1221  vl_counter->packets = clib_host_to_net_u64 (adj_counter.packets);
1222  vl_counter->bytes = clib_host_to_net_u64 (adj_counter.bytes);
1223  vl_counter->address = adj->sub_type.nbr.next_hop.ip4.as_u32;
1224  vl_counter->link_type = adj->ia_link;
1225  }
1226  return (ADJ_WALK_RC_CONTINUE);
1227 }
1228 
1229 #define MIN(x,y) (((x)<(y))?(x):(y))
1230 
1231 static void
1233 {
1234  u8 pause = 0;
1235 
1236  svm_queue_lock (q);
1237  pause = svm_queue_is_full (q);
1238 
1239  vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1240  svm_queue_unlock (q);
1241  dsunlock (sm);
1242 
1243  if (pause)
1244  ip46_fib_stats_delay (sm, 0 /* sec */ ,
1246 }
1247 
1248 static void
1250 {
1251  api_main_t *am = sm->api_main;
1253  svm_queue_t *q = shmem_hdr->vl_input_queue;
1255  int first = 0;
1256 
1257  /*
1258  * If the walk context has counters, which may be left over from the last
1259  * suspend, then we continue from there.
1260  */
1261  while (0 != vec_len (ctx->counters))
1262  {
1263  u32 n_items = MIN (vec_len (ctx->counters),
1265  u8 pause = 0;
1266 
1267  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1268 
1269  mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
1270  (n_items *
1271  sizeof
1273  mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_NBR_COUNTERS);
1274  mp->count = ntohl (n_items);
1275  mp->sw_if_index = ntohl (ctx->sw_if_index);
1276  mp->begin = first;
1277  first = 0;
1278 
1279  /*
1280  * copy the counters from the back of the context, then we can easily
1281  * 'erase' them by resetting the vector length.
1282  * The order we push the stats to the caller is not important.
1283  */
1284  clib_memcpy (mp->c,
1285  &ctx->counters[vec_len (ctx->counters) - n_items],
1286  n_items * sizeof (*ctx->counters));
1287 
1288  _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
1289 
1290  /*
1291  * send to the shm q
1292  */
1293  send_and_pause (sm, q, (u8 *) & mp);
1294  }
1295 }
1296 
1297 static void
1299 {
1300  vnet_main_t *vnm = vnet_get_main ();
1302  vnet_sw_interface_t *si;
1303 
1305  .sw_if_index = 0,
1306  .counters = NULL,
1307  };
1308 
1309  /* *INDENT-OFF* */
1310  pool_foreach (si, im->sw_interfaces,
1311  ({
1312  /*
1313  * update the interface we are now concerned with
1314  */
1315  ctx.sw_if_index = si->sw_if_index;
1316 
1317  /*
1318  * we are about to walk another interface, so we shouldn't have any pending
1319  * stats to export.
1320  */
1321  ASSERT(ctx.counters == NULL);
1322 
1323  /*
1324  * visit each neighbour adjacency on the interface and collect
1325  * its current stats.
1326  * Because we hold the lock the walk is synchronous, so safe to routing
1327  * updates. It's limited in work by the number of adjacenies on an
1328  * interface, which is typically not huge.
1329  */
1330  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1331  adj_nbr_walk (si->sw_if_index,
1332  FIB_PROTOCOL_IP4,
1333  ip4_nbr_stats_cb,
1334  &ctx);
1335  dsunlock (sm);
1336 
1337  /*
1338  * if this interface has some adjacencies with counters then ship them,
1339  * else continue to the next interface.
1340  */
1341  if (NULL != ctx.counters)
1342  {
1343  ip4_nbr_ship(sm, &ctx);
1344  }
1345  }));
1346  /* *INDENT-OFF* */
1347 }
1348 
1349 /**
1350  * @brief The context passed when collecting adjacency counters
1351  */
1352 typedef struct ip6_nbr_stats_ctx_t_
1353 {
1354  /**
1355  * The SW IF index all these adjs belong to
1356  */
1358 
1359  /**
1360  * A vector of ip6 nbr counters
1361  */
1364 
1365 static adj_walk_rc_t
1367  void *arg)
1368 {
1369  vl_api_ip6_nbr_counter_t *vl_counter;
1370  vlib_counter_t adj_counter;
1372  ip_adjacency_t *adj;
1373 
1374  ctx = arg;
1375  vlib_get_combined_counter(&adjacency_counters, ai, &adj_counter);
1376 
1377  if (0 != adj_counter.packets)
1378  {
1379  vec_add2(ctx->counters, vl_counter, 1);
1380  adj = adj_get(ai);
1381 
1382  vl_counter->packets = clib_host_to_net_u64(adj_counter.packets);
1383  vl_counter->bytes = clib_host_to_net_u64(adj_counter.bytes);
1384  vl_counter->address[0] = adj->sub_type.nbr.next_hop.ip6.as_u64[0];
1385  vl_counter->address[1] = adj->sub_type.nbr.next_hop.ip6.as_u64[1];
1386  vl_counter->link_type = adj->ia_link;
1387  }
1388  return (ADJ_WALK_RC_CONTINUE);
1389 }
1390 
1391 #define MIN(x,y) (((x)<(y))?(x):(y))
1392 
1393 static void
1396 {
1397  api_main_t *am = sm->api_main;
1399  svm_queue_t *q = shmem_hdr->vl_input_queue;
1401  int first = 0;
1402 
1403  /*
1404  * If the walk context has counters, which may be left over from the last
1405  * suspend, then we continue from there.
1406  */
1407  while (0 != vec_len(ctx->counters))
1408  {
1409  u32 n_items = MIN (vec_len (ctx->counters),
1411  u8 pause = 0;
1412 
1413  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1414 
1415  mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
1416  (n_items *
1417  sizeof
1419  mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_NBR_COUNTERS);
1420  mp->count = ntohl (n_items);
1421  mp->sw_if_index = ntohl (ctx->sw_if_index);
1422  mp->begin = first;
1423  first = 0;
1424 
1425  /*
1426  * copy the counters from the back of the context, then we can easily
1427  * 'erase' them by resetting the vector length.
1428  * The order we push the stats to the caller is not important.
1429  */
1430  clib_memcpy (mp->c,
1431  &ctx->counters[vec_len (ctx->counters) - n_items],
1432  n_items * sizeof (*ctx->counters));
1433 
1434  _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
1435 
1436  /*
1437  * send to the shm q
1438  */
1439  send_and_pause(sm, q, (u8 *) & mp);
1440  }
1441 }
1442 
1443 static void
1445 {
1446  vnet_main_t *vnm = vnet_get_main ();
1448  vnet_sw_interface_t *si;
1449 
1451  .sw_if_index = 0,
1452  .counters = NULL,
1453  };
1454 
1455  /* *INDENT-OFF* */
1456  pool_foreach (si, im->sw_interfaces,
1457  ({
1458  /*
1459  * update the interface we are now concerned with
1460  */
1461  ctx.sw_if_index = si->sw_if_index;
1462 
1463  /*
1464  * we are about to walk another interface, so we shouldn't have any pending
1465  * stats to export.
1466  */
1467  ASSERT(ctx.counters == NULL);
1468 
1469  /*
1470  * visit each neighbour adjacency on the interface and collect
1471  * its current stats.
1472  * Because we hold the lock the walk is synchronous, so safe to routing
1473  * updates. It's limited in work by the number of adjacenies on an
1474  * interface, which is typically not huge.
1475  */
1476  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1477  adj_nbr_walk (si->sw_if_index,
1478  FIB_PROTOCOL_IP6,
1479  ip6_nbr_stats_cb,
1480  &ctx);
1481  dsunlock (sm);
1482 
1483  /*
1484  * if this interface has some adjacencies with counters then ship them,
1485  * else continue to the next interface.
1486  */
1487  if (NULL != ctx.counters)
1488  {
1489  ip6_nbr_ship(sm, &ctx);
1490  }
1491  }));
1492  /* *INDENT-OFF* */
1493 }
1494 
1495 static void
1497 {
1498  ip4_main_t *im4 = &ip4_main;
1499  api_main_t *am = sm->api_main;
1501  svm_queue_t *q = shmem_hdr->vl_input_queue;
1502  ip4_route_t *r;
1503  fib_table_t *fib;
1504  ip4_fib_t *v4_fib;
1505  do_ip46_fibs_t *do_fibs;
1507  u32 items_this_message;
1508  vl_api_ip4_fib_counter_t *ctrp = 0;
1509  u32 start_at_fib_index = 0;
1510  int i, j, k;
1511 
1512  do_fibs = &sm->do_ip46_fibs;
1513 
1514 again:
1515  vec_reset_length (do_fibs->fibs);
1516  /* *INDENT-OFF* */
1517  pool_foreach (fib, im4->fibs,
1518  ({vec_add1(do_fibs->fibs,fib);}));
1519 
1520  /* *INDENT-ON* */
1521 
1522  for (j = 0; j < vec_len (do_fibs->fibs); j++)
1523  {
1524  fib = do_fibs->fibs[j];
1525  /* We may have bailed out due to control-plane activity */
1526  while ((fib - im4->fibs) < start_at_fib_index)
1527  continue;
1528 
1529  v4_fib = pool_elt_at_index (im4->v4_fibs, fib->ft_index);
1530 
1531  if (mp == 0)
1532  {
1533  items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
1535  (sizeof (*mp) +
1536  items_this_message * sizeof (vl_api_ip4_fib_counter_t));
1537  mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
1538  mp->count = 0;
1539  mp->vrf_id = ntohl (fib->ft_table_id);
1540  ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1541  }
1542  else
1543  {
1544  /* happens if the last FIB was empty... */
1545  ASSERT (mp->count == 0);
1546  mp->vrf_id = ntohl (fib->ft_table_id);
1547  }
1548 
1549  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1550 
1551  vec_reset_length (do_fibs->ip4routes);
1552  vec_reset_length (do_fibs->results);
1553 
1554  for (i = 0; i < ARRAY_LEN (v4_fib->fib_entry_by_dst_address); i++)
1555  {
1556  uword *hash = v4_fib->fib_entry_by_dst_address[i];
1557  hash_pair_t *p;
1558  ip4_route_t x;
1559 
1560  vec_reset_length (do_fibs->pvec);
1561 
1562  x.address_length = i;
1563 
1564  hash_foreach_pair (p, hash, (
1565  {
1566  vec_add1 (do_fibs->pvec, p);}
1567  ));
1568  for (k = 0; k < vec_len (do_fibs->pvec); k++)
1569  {
1570  p = do_fibs->pvec[k];
1571  x.address.data_u32 = p->key;
1572  x.index = p->value[0];
1573 
1574  vec_add1 (do_fibs->ip4routes, x);
1576  {
1577  start_at_fib_index = fib - im4->fibs;
1578  dsunlock (sm);
1579  ip46_fib_stats_delay (sm, 0 /* sec */ ,
1581  mp->count = 0;
1582  ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1583  goto again;
1584  }
1585  }
1586  }
1587 
1588  vec_foreach (r, do_fibs->ip4routes)
1589  {
1590  vlib_counter_t c;
1591  const dpo_id_t *dpo_id;
1592  u32 index;
1593 
1594  dpo_id = fib_entry_contribute_ip_forwarding (r->index);
1595  index = (u32) dpo_id->dpoi_index;
1596 
1598  index, &c);
1599  /*
1600  * If it has actually
1601  * seen at least one packet, send it.
1602  */
1603  if (c.packets > 0)
1604  {
1605 
1606  /* already in net byte order */
1607  ctrp->address = r->address.as_u32;
1608  ctrp->address_length = r->address_length;
1609  ctrp->packets = clib_host_to_net_u64 (c.packets);
1610  ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1611  mp->count++;
1612  ctrp++;
1613 
1614  if (mp->count == items_this_message)
1615  {
1616  mp->count = htonl (items_this_message);
1617  /*
1618  * If the main thread's input queue is stuffed,
1619  * drop the data structure lock (which the main thread
1620  * may want), and take a pause.
1621  */
1622  svm_queue_lock (q);
1623  if (svm_queue_is_full (q))
1624  {
1625  dsunlock (sm);
1626  vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1627  svm_queue_unlock (q);
1628  mp = 0;
1629  ip46_fib_stats_delay (sm, 0 /* sec */ ,
1631  goto again;
1632  }
1633  vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1634  svm_queue_unlock (q);
1635 
1636  items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
1638  (sizeof (*mp) +
1639  items_this_message * sizeof (vl_api_ip4_fib_counter_t));
1640  mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
1641  mp->count = 0;
1642  mp->vrf_id = ntohl (fib->ft_table_id);
1643  ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1644  }
1645  } /* for each (mp or single) adj */
1647  {
1648  start_at_fib_index = fib - im4->fibs;
1649  dsunlock (sm);
1650  ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
1651  mp->count = 0;
1652  ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1653  goto again;
1654  }
1655  } /* vec_foreach (routes) */
1656 
1657  dsunlock (sm);
1658 
1659  /* Flush any data from this fib */
1660  if (mp->count)
1661  {
1662  mp->count = htonl (mp->count);
1663  vl_msg_api_send_shmem (q, (u8 *) & mp);
1664  mp = 0;
1665  }
1666  }
1667 
1668  /* If e.g. the last FIB had no reportable routes, free the buffer */
1669  if (mp)
1670  vl_msg_api_free (mp);
1671 }
1672 
1673 static int
1675 {
1676  stats_main_t *sm = ctx;
1677  do_ip46_fibs_t *do_fibs;
1678  mfib_entry_t *entry;
1679 
1680  do_fibs = &sm->do_ip46_fibs;
1681  entry = mfib_entry_get (fei);
1682 
1683  vec_add1 (do_fibs->mroutes, entry->mfe_prefix);
1684 
1685  return (1);
1686 }
1687 
1688 static void
1690 {
1691  ip4_main_t *im4 = &ip4_main;
1692  api_main_t *am = sm->api_main;
1694  svm_queue_t *q = shmem_hdr->vl_input_queue;
1695  mfib_prefix_t *pfx;
1696  mfib_table_t *mfib;
1697  do_ip46_fibs_t *do_fibs;
1699  u32 items_this_message;
1700  vl_api_ip4_mfib_counter_t *ctrp = 0;
1701  u32 start_at_mfib_index = 0;
1702  int i, j, k;
1703 
1704  do_fibs = &sm->do_ip46_fibs;
1705 
1706  vec_reset_length (do_fibs->mfibs);
1707  /* *INDENT-OFF* */
1708  pool_foreach (mfib, im4->mfibs, ({vec_add1(do_fibs->mfibs, mfib);}));
1709  /* *INDENT-ON* */
1710 
1711  for (j = 0; j < vec_len (do_fibs->mfibs); j++)
1712  {
1713  mfib = do_fibs->mfibs[j];
1714  /* We may have bailed out due to control-plane activity */
1715  while ((mfib - im4->mfibs) < start_at_mfib_index)
1716  continue;
1717 
1718  if (mp == 0)
1719  {
1720  items_this_message = IP4_MFIB_COUNTER_BATCH_SIZE;
1722  (sizeof (*mp) +
1723  items_this_message * sizeof (vl_api_ip4_mfib_counter_t));
1724  mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_MFIB_COUNTERS);
1725  mp->count = 0;
1726  mp->vrf_id = ntohl (mfib->mft_table_id);
1727  ctrp = (vl_api_ip4_mfib_counter_t *) mp->c;
1728  }
1729  else
1730  {
1731  /* happens if the last MFIB was empty... */
1732  ASSERT (mp->count == 0);
1733  mp->vrf_id = ntohl (mfib->mft_table_id);
1734  }
1735 
1736  vec_reset_length (do_fibs->mroutes);
1737 
1738  /*
1739  * walk the table with table updates blocked
1740  */
1741  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1742 
1743  mfib_table_walk (mfib->mft_index,
1745  dsunlock (sm);
1746 
1747  vec_foreach (pfx, do_fibs->mroutes)
1748  {
1749  const dpo_id_t *dpo_id;
1750  fib_node_index_t mfei;
1751  vlib_counter_t c;
1752  u32 index;
1753 
1754  /*
1755  * re-lookup the entry, since we suspend during the collection
1756  */
1757  mfei = mfib_table_lookup (mfib->mft_index, pfx);
1758 
1759  if (FIB_NODE_INDEX_INVALID == mfei)
1760  continue;
1761 
1762  dpo_id = mfib_entry_contribute_ip_forwarding (mfei);
1763  index = (u32) dpo_id->dpoi_index;
1764 
1766  dpo_id->dpoi_index, &c);
1767  /*
1768  * If it has seen at least one packet, send it.
1769  */
1770  if (c.packets > 0)
1771  {
1772  /* already in net byte order */
1773  memcpy (ctrp->group, &pfx->fp_grp_addr.ip4, 4);
1774  memcpy (ctrp->source, &pfx->fp_src_addr.ip4, 4);
1775  ctrp->group_length = pfx->fp_len;
1776  ctrp->packets = clib_host_to_net_u64 (c.packets);
1777  ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1778  mp->count++;
1779  ctrp++;
1780 
1781  if (mp->count == items_this_message)
1782  {
1783  mp->count = htonl (items_this_message);
1784  /*
1785  * If the main thread's input queue is stuffed,
1786  * drop the data structure lock (which the main thread
1787  * may want), and take a pause.
1788  */
1789  svm_queue_lock (q);
1790 
1791  while (svm_queue_is_full (q))
1792  {
1793  svm_queue_unlock (q);
1794  ip46_fib_stats_delay (sm, 0 /* sec */ ,
1796  svm_queue_lock (q);
1797  }
1798  vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1799  svm_queue_unlock (q);
1800 
1801  items_this_message = IP4_MFIB_COUNTER_BATCH_SIZE;
1803  (sizeof (*mp) +
1804  items_this_message * sizeof (vl_api_ip4_mfib_counter_t));
1805  mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_MFIB_COUNTERS);
1806  mp->count = 0;
1807  mp->vrf_id = ntohl (mfib->mft_table_id);
1808  ctrp = (vl_api_ip4_mfib_counter_t *) mp->c;
1809  }
1810  }
1811  }
1812 
1813  /* Flush any data from this mfib */
1814  if (mp->count)
1815  {
1816  mp->count = htonl (mp->count);
1817  vl_msg_api_send_shmem (q, (u8 *) & mp);
1818  mp = 0;
1819  }
1820  }
1821 
1822  /* If e.g. the last FIB had no reportable routes, free the buffer */
1823  if (mp)
1824  vl_msg_api_free (mp);
1825 }
1826 
1827 static void
1829 {
1830  ip6_main_t *im6 = &ip6_main;
1831  api_main_t *am = sm->api_main;
1833  svm_queue_t *q = shmem_hdr->vl_input_queue;
1834  mfib_prefix_t *pfx;
1835  mfib_table_t *mfib;
1836  do_ip46_fibs_t *do_fibs;
1838  u32 items_this_message;
1839  vl_api_ip6_mfib_counter_t *ctrp = 0;
1840  u32 start_at_mfib_index = 0;
1841  int i, j, k;
1842 
1843  do_fibs = &sm->do_ip46_fibs;
1844 
1845  vec_reset_length (do_fibs->mfibs);
1846  /* *INDENT-OFF* */
1847  pool_foreach (mfib, im6->mfibs, ({vec_add1(do_fibs->mfibs, mfib);}));
1848  /* *INDENT-ON* */
1849 
1850  for (j = 0; j < vec_len (do_fibs->mfibs); j++)
1851  {
1852  mfib = do_fibs->mfibs[j];
1853  /* We may have bailed out due to control-plane activity */
1854  while ((mfib - im6->mfibs) < start_at_mfib_index)
1855  continue;
1856 
1857  if (mp == 0)
1858  {
1859  items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
1861  (sizeof (*mp) +
1862  items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
1863  mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
1864  mp->count = 0;
1865  mp->vrf_id = ntohl (mfib->mft_table_id);
1866  ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
1867  }
1868  else
1869  {
1870  /* happens if the last MFIB was empty... */
1871  ASSERT (mp->count == 0);
1872  mp->vrf_id = ntohl (mfib->mft_table_id);
1873  }
1874 
1875  vec_reset_length (do_fibs->mroutes);
1876 
1877  /*
1878  * walk the table with table updates blocked
1879  */
1880  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1881 
1882  mfib_table_walk (mfib->mft_index,
1884  dsunlock (sm);
1885 
1886  vec_foreach (pfx, do_fibs->mroutes)
1887  {
1888  const dpo_id_t *dpo_id;
1889  fib_node_index_t mfei;
1890  vlib_counter_t c;
1891  u32 index;
1892 
1893  /*
1894  * re-lookup the entry, since we suspend during the collection
1895  */
1896  mfei = mfib_table_lookup (mfib->mft_index, pfx);
1897 
1898  if (FIB_NODE_INDEX_INVALID == mfei)
1899  continue;
1900 
1901  dpo_id = mfib_entry_contribute_ip_forwarding (mfei);
1902  index = (u32) dpo_id->dpoi_index;
1903 
1905  dpo_id->dpoi_index, &c);
1906  /*
1907  * If it has seen at least one packet, send it.
1908  */
1909  if (c.packets > 0)
1910  {
1911  /* already in net byte order */
1912  memcpy (ctrp->group, &pfx->fp_grp_addr.ip6, 16);
1913  memcpy (ctrp->source, &pfx->fp_src_addr.ip6, 16);
1914  ctrp->group_length = pfx->fp_len;
1915  ctrp->packets = clib_host_to_net_u64 (c.packets);
1916  ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1917  mp->count++;
1918  ctrp++;
1919 
1920  if (mp->count == items_this_message)
1921  {
1922  mp->count = htonl (items_this_message);
1923  /*
1924  * If the main thread's input queue is stuffed,
1925  * drop the data structure lock (which the main thread
1926  * may want), and take a pause.
1927  */
1928  svm_queue_lock (q);
1929 
1930  while (svm_queue_is_full (q))
1931  {
1932  svm_queue_unlock (q);
1933  ip46_fib_stats_delay (sm, 0 /* sec */ ,
1935  svm_queue_lock (q);
1936  }
1937  vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1938  svm_queue_unlock (q);
1939 
1940  items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
1942  (sizeof (*mp) +
1943  items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
1944  mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
1945  mp->count = 0;
1946  mp->vrf_id = ntohl (mfib->mft_table_id);
1947  ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
1948  }
1949  }
1950  }
1951 
1952  /* Flush any data from this mfib */
1953  if (mp->count)
1954  {
1955  mp->count = htonl (mp->count);
1956  vl_msg_api_send_shmem (q, (u8 *) & mp);
1957  mp = 0;
1958  }
1959  }
1960 
1961  /* If e.g. the last FIB had no reportable routes, free the buffer */
1962  if (mp)
1963  vl_msg_api_free (mp);
1964 }
1965 
1966 typedef struct
1967 {
1972 
1973 static void
1974 add_routes_in_fib (BVT (clib_bihash_kv) * kvp, void *arg)
1975 {
1976  add_routes_in_fib_arg_t *ap = arg;
1977  stats_main_t *sm = ap->sm;
1978 
1980  clib_longjmp (&sm->jmp_buf, 1);
1981 
1982  if (kvp->key[2] >> 32 == ap->fib_index)
1983  {
1985  ip6_route_t *r;
1986  addr = (ip6_address_t *) kvp;
1987  vec_add2 (*ap->routep, r, 1);
1988  r->address = addr[0];
1989  r->address_length = kvp->key[2] & 0xFF;
1990  r->index = kvp->value;
1991  }
1992 }
1993 
1994 static void
1996 {
1997  ip6_main_t *im6 = &ip6_main;
1998  api_main_t *am = sm->api_main;
2000  svm_queue_t *q = shmem_hdr->vl_input_queue;
2001  ip6_route_t *r;
2002  fib_table_t *fib;
2003  do_ip46_fibs_t *do_fibs;
2005  u32 items_this_message;
2006  vl_api_ip6_fib_counter_t *ctrp = 0;
2007  u32 start_at_fib_index = 0;
2008  BVT (clib_bihash) * h = &im6->ip6_table[IP6_FIB_TABLE_FWDING].ip6_hash;
2009  add_routes_in_fib_arg_t _a, *a = &_a;
2010  int i;
2011 
2012  do_fibs = &sm->do_ip46_fibs;
2013 again:
2014  vec_reset_length (do_fibs->fibs);
2015  /* *INDENT-OFF* */
2016  pool_foreach (fib, im6->fibs,
2017  ({vec_add1(do_fibs->fibs,fib);}));
2018  /* *INDENT-ON* */
2019 
2020 
2021  for (i = 0; i < vec_len (do_fibs->fibs); i++)
2022  {
2023  fib = do_fibs->fibs[i];
2024  /* We may have bailed out due to control-plane activity */
2025  while ((fib - im6->fibs) < start_at_fib_index)
2026  continue;
2027 
2028  if (mp == 0)
2029  {
2030  items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
2032  (sizeof (*mp) +
2033  items_this_message * sizeof (vl_api_ip6_fib_counter_t));
2034  mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
2035  mp->count = 0;
2036  mp->vrf_id = ntohl (fib->ft_table_id);
2037  ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2038  }
2039 
2040  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2041 
2042  vec_reset_length (do_fibs->ip6routes);
2043  vec_reset_length (do_fibs->results);
2044 
2045  a->fib_index = fib - im6->fibs;
2046  a->routep = &do_fibs->ip6routes;
2047  a->sm = sm;
2048 
2049  if (clib_setjmp (&sm->jmp_buf, 0) == 0)
2050  {
2051  start_at_fib_index = fib - im6->fibs;
2053  }
2054  else
2055  {
2056  dsunlock (sm);
2057  ip46_fib_stats_delay (sm, 0 /* sec */ ,
2059  mp->count = 0;
2060  ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2061  goto again;
2062  }
2063 
2064  vec_foreach (r, do_fibs->ip6routes)
2065  {
2066  vlib_counter_t c;
2067 
2069  r->index, &c);
2070  /*
2071  * If it has actually
2072  * seen at least one packet, send it.
2073  */
2074  if (c.packets > 0)
2075  {
2076  /* already in net byte order */
2077  ctrp->address[0] = r->address.as_u64[0];
2078  ctrp->address[1] = r->address.as_u64[1];
2079  ctrp->address_length = (u8) r->address_length;
2080  ctrp->packets = clib_host_to_net_u64 (c.packets);
2081  ctrp->bytes = clib_host_to_net_u64 (c.bytes);
2082  mp->count++;
2083  ctrp++;
2084 
2085  if (mp->count == items_this_message)
2086  {
2087  mp->count = htonl (items_this_message);
2088  /*
2089  * If the main thread's input queue is stuffed,
2090  * drop the data structure lock (which the main thread
2091  * may want), and take a pause.
2092  */
2093  svm_queue_lock (q);
2094  if (svm_queue_is_full (q))
2095  {
2096  dsunlock (sm);
2097  vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2098  svm_queue_unlock (q);
2099  mp = 0;
2100  ip46_fib_stats_delay (sm, 0 /* sec */ ,
2102  goto again;
2103  }
2104  vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2105  svm_queue_unlock (q);
2106 
2107  items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
2109  (sizeof (*mp) +
2110  items_this_message * sizeof (vl_api_ip6_fib_counter_t));
2111  mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
2112  mp->count = 0;
2113  mp->vrf_id = ntohl (fib->ft_table_id);
2114  ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2115  }
2116  }
2117 
2119  {
2120  start_at_fib_index = fib - im6->fibs;
2121  dsunlock (sm);
2122  ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
2123  mp->count = 0;
2124  ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2125  goto again;
2126  }
2127  } /* vec_foreach (routes) */
2128 
2129  dsunlock (sm);
2130 
2131  /* Flush any data from this fib */
2132  if (mp->count)
2133  {
2134  mp->count = htonl (mp->count);
2135  vl_msg_api_send_shmem (q, (u8 *) & mp);
2136  mp = 0;
2137  }
2138  }
2139 
2140  /* If e.g. the last FIB had no reportable routes, free the buffer */
2141  if (mp)
2142  vl_msg_api_free (mp);
2143 }
2144 
2146 {
2149 
2150 static walk_rc_t
2152 {
2153  udp_encap_stats_walk_t *ctx = arg;
2155  udp_encap_t *ue;
2156 
2157  vec_add2 (ctx->stats, stat, 1);
2158 
2159  udp_encap_get_stats (uei, &stat->packets, &stat->bytes);
2160 
2161  return (WALK_CONTINUE);
2162 }
2163 
2164 static void
2166 {
2169  stats_main_t *sm;
2170  api_main_t *am;
2171  svm_queue_t *q;
2172 
2173  mp = NULL;
2174  sm = &stats_main;
2175  am = sm->api_main;
2176  shmem_hdr = am->shmem_hdr;
2177  q = shmem_hdr->vl_input_queue;
2178 
2179  /*
2180  * If the walk context has counters, which may be left over from the last
2181  * suspend, then we continue from there.
2182  */
2183  while (0 != vec_len (ctx->stats))
2184  {
2185  u32 n_items = MIN (vec_len (ctx->stats),
2187  u8 pause = 0;
2188 
2189  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2190 
2191  mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
2192  (n_items *
2193  sizeof
2195  mp->_vl_msg_id = ntohs (VL_API_VNET_UDP_ENCAP_COUNTERS);
2196  mp->count = ntohl (n_items);
2197 
2198  /*
2199  * copy the counters from the back of the context, then we can easily
2200  * 'erase' them by resetting the vector length.
2201  * The order we push the stats to the caller is not important.
2202  */
2203  clib_memcpy (mp->c,
2204  &ctx->stats[vec_len (ctx->stats) - n_items],
2205  n_items * sizeof (*ctx->stats));
2206 
2207  _vec_len (ctx->stats) = vec_len (ctx->stats) - n_items;
2208 
2209  /*
2210  * send to the shm q
2211  */
2212  send_and_pause (sm, q, (u8 *) & mp);
2213  }
2214 }
2215 
2216 static void
2218 {
2220 
2222  .stats = NULL,
2223  };
2224 
2225  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2227  dsunlock (sm);
2228 
2229  udp_encap_ship (&ctx);
2230 }
2231 
2233 {
2236 
2237 static walk_rc_t
2239 {
2243  bier_table_id_t btid;
2244 
2245  vec_add2 (ctx->stats, stat, 1);
2246 
2247  bier_fmask_encode (bfmi, &btid, &rpath);
2248 
2249  stat->tbl_id.bt_set = btid.bti_set;
2250  stat->tbl_id.bt_sub_domain = btid.bti_sub_domain;
2251  stat->tbl_id.bt_hdr_len_id = btid.bti_hdr_len;
2252  fib_api_path_encode (&rpath, &stat->path);
2253  bier_fmask_get_stats (bfmi, &stat->packets, &stat->bytes);
2254 
2255  return (WALK_CONTINUE);
2256 }
2257 
2258 static void
2260 {
2263  stats_main_t *sm;
2264  api_main_t *am;
2265  svm_queue_t *q;
2266 
2267  mp = NULL;
2268  sm = &stats_main;
2269  am = sm->api_main;
2270  shmem_hdr = am->shmem_hdr;
2271  q = shmem_hdr->vl_input_queue;
2272 
2273  /*
2274  * If the walk context has counters, which may be left over from the last
2275  * suspend, then we continue from there.
2276  */
2277  while (0 != vec_len (ctx->stats))
2278  {
2279  u32 n_items = MIN (vec_len (ctx->stats),
2281  u8 pause = 0;
2282 
2283  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2284 
2285  mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
2286  (n_items *
2287  sizeof
2289  mp->_vl_msg_id = ntohs (VL_API_VNET_BIER_NEIGHBOR_COUNTERS);
2290  mp->count = ntohl (n_items);
2291 
2292  /*
2293  * copy the counters from the back of the context, then we can easily
2294  * 'erase' them by resetting the vector length.
2295  * The order we push the stats to the caller is not important.
2296  */
2297  clib_memcpy (mp->c,
2298  &ctx->stats[vec_len (ctx->stats) - n_items],
2299  n_items * sizeof (*ctx->stats));
2300 
2301  _vec_len (ctx->stats) = vec_len (ctx->stats) - n_items;
2302 
2303  /*
2304  * send to the shm q
2305  */
2306  send_and_pause (sm, q, (u8 *) & mp);
2307  }
2308 }
2309 
2310 static void
2312 {
2314 
2316  .stats = NULL,
2317  };
2318 
2319  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
2321  dsunlock (sm);
2322 
2323  bier_neighbor_ship (&ctx);
2324 }
2325 
2326 int
2327 stats_set_poller_delay (u32 poller_delay_sec)
2328 {
2329  stats_main_t *sm = &stats_main;
2330  if (!poller_delay_sec)
2331  {
2332  return VNET_API_ERROR_INVALID_ARGUMENT;
2333  }
2334  else
2335  {
2336  sm->stats_poll_interval_in_seconds = poller_delay_sec;
2337  return 0;
2338  }
2339 }
2340 
2341 static clib_error_t *
2343 {
2344  stats_main_t *sm = &stats_main;
2345  u32 sec;
2346 
2347  while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
2348  {
2349  if (unformat (input, "interval %u", &sec))
2350  {
2351  int rv = stats_set_poller_delay (sec);
2352  if (rv)
2353  {
2354  return clib_error_return (0,
2355  "`stats_set_poller_delay' API call failed, rv=%d:%U",
2356  (int) rv, format_vnet_api_errno, rv);
2357  }
2358  }
2359  else
2360  {
2361  return clib_error_return (0, "unknown input '%U'",
2362  format_unformat_error, input);
2363  }
2364  }
2365 
2366  return 0;
2367 }
2368 
2369 /* stats { ... } configuration. */
2370 /*?
2371  *
2372  * @cfgcmd{interval, &lt;seconds&gt;}
2373  * Configure stats poller delay to be @c seconds.
2374  *
2375 ?*/
2377 
2378 static void
2381 {
2382  stats_main_t *sm = &stats_main;
2383  vl_api_registration_t *reg;
2385  if (!reg)
2386  return;
2388 
2389  rmp = vl_msg_api_alloc (sizeof (*rmp));
2390  rmp->_vl_msg_id = ntohs (VL_API_STATS_GET_POLLER_DELAY_REPLY);
2391  rmp->context = mp->context;
2392  rmp->retval = 0;
2393  rmp->delay = clib_host_to_net_u32 (sm->stats_poll_interval_in_seconds);
2394 
2395  vl_api_send_msg (reg, (u8 *) rmp);
2396 
2397 }
2398 
2399 static void
2400 stats_thread_fn (void *arg)
2401 {
2402  stats_main_t *sm = &stats_main;
2405 
2406  /* stats thread wants no signals. */
2407  {
2408  sigset_t s;
2409  sigfillset (&s);
2410  pthread_sigmask (SIG_SETMASK, &s, 0);
2411  }
2412 
2414 
2415  if (vec_len (tm->thread_prefix))
2416  vlib_set_thread_name ((char *)
2417  format (0, "%v_stats%c", tm->thread_prefix, '\0'));
2418 
2419  while (1)
2420  {
2422  0 /* nsec */ );
2423 
2424  if (!(sm->enable_poller))
2425  continue;
2426 
2427  if (pool_elts
2428  (sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS]))
2430 
2431  if (pool_elts
2432  (sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS]))
2434 
2435  if (pool_elts (sm->stats_registrations[IDX_IP4_FIB_COUNTERS]))
2436  do_ip4_fib_counters (sm);
2437 
2438  if (pool_elts (sm->stats_registrations[IDX_IP6_FIB_COUNTERS]))
2439  do_ip6_fib_counters (sm);
2440 
2441  if (pool_elts (sm->stats_registrations[IDX_IP4_MFIB_COUNTERS]))
2442  do_ip4_mfib_counters (sm);
2443 
2444  if (pool_elts (sm->stats_registrations[IDX_IP6_MFIB_COUNTERS]))
2445  do_ip6_mfib_counters (sm);
2446 
2447  if (pool_elts (sm->stats_registrations[IDX_IP4_NBR_COUNTERS]))
2448  do_ip4_nbr_counters (sm);
2449 
2450  if (pool_elts (sm->stats_registrations[IDX_IP6_NBR_COUNTERS]))
2451  do_ip6_nbr_counters (sm);
2452 
2453  if (pool_elts (sm->stats_registrations[IDX_BIER_NEIGHBOR_COUNTERS]))
2455  }
2456 }
2457 
2458 static void
2461 {
2462  vpe_client_registration_t *clients, client;
2463  stats_main_t *sm = &stats_main;
2464  vl_api_registration_t *reg, *reg_prev = NULL;
2466  u32 mp_size;
2467  int i;
2468 
2469  mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (u64));
2470 
2471  clients =
2472  get_clients_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
2473  ~0 /*flag for all */ );
2474 
2475  for (i = 0; i < vec_len (clients); i++)
2476  {
2477  client = clients[i];
2479  if (reg)
2480  {
2481  if (reg_prev && vl_api_can_send_msg (reg_prev))
2482  {
2483  mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2484  clib_memcpy (mp_copy, mp, mp_size);
2485  vl_api_send_msg (reg_prev, (u8 *) mp);
2486  mp = mp_copy;
2487  }
2488  reg_prev = reg;
2489  }
2490  else
2491  {
2492  sm->enable_poller =
2493  clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, ~0,
2494  client.client_index);
2495  continue;
2496  }
2497  }
2498  vec_free (clients);
2499 
2500 #if STATS_DEBUG > 0
2501  fformat (stdout, "%U\n", format_vnet_simple_counters, mp);
2502 #endif
2503 
2504  if (reg_prev && vl_api_can_send_msg (reg_prev))
2505  {
2506  vl_api_send_msg (reg_prev, (u8 *) mp);
2507  }
2508  else
2509  {
2510  vl_msg_api_free (mp);
2511  }
2512 }
2513 
2514 static void
2516 {
2517  stats_main_t *sm = &stats_main;
2518  vl_api_registration_t *reg, *reg_prev = NULL;
2520  u32 mp_size;
2521  vpe_client_registration_t *clients, client;
2522  int i;
2523 
2524  mp_size = sizeof (*mp_copy) +
2525  ntohl (mp->count) * sizeof (vl_api_ip4_fib_counter_t);
2526 
2527  clients =
2528  get_clients_for_stat (IDX_IP4_FIB_COUNTERS, ~0 /*flag for all */ );
2529 
2530  for (i = 0; i < vec_len (clients); i++)
2531  {
2532  client = clients[i];
2534  if (reg)
2535  {
2536  if (reg_prev && vl_api_can_send_msg (reg_prev))
2537  {
2538  mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2539  clib_memcpy (mp_copy, mp, mp_size);
2540  vl_api_send_msg (reg_prev, (u8 *) mp);
2541  mp = mp_copy;
2542  }
2543  reg_prev = reg;
2544  }
2545  else
2546  {
2547  sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
2548  ~0, client.client_index);
2549  continue;
2550  }
2551  }
2552  vec_free (clients);
2553 
2554  if (reg_prev && vl_api_can_send_msg (reg_prev))
2555  {
2556  vl_api_send_msg (reg_prev, (u8 *) mp);
2557  }
2558  else
2559  {
2560  vl_msg_api_free (mp);
2561  }
2562 }
2563 
2564 static void
2566 {
2567  stats_main_t *sm = &stats_main;
2568  vl_api_registration_t *reg, *reg_prev = NULL;
2570  u32 mp_size;
2571  vpe_client_registration_t *clients, client;
2572  int i;
2573 
2574  mp_size = sizeof (*mp_copy) +
2575  ntohl (mp->count) * sizeof (vl_api_ip4_nbr_counter_t);
2576 
2577  clients =
2578  get_clients_for_stat (IDX_IP4_NBR_COUNTERS, ~0 /*flag for all */ );
2579 
2580  for (i = 0; i < vec_len (clients); i++)
2581  {
2582  client = clients[i];
2584  if (reg)
2585  {
2586  if (reg_prev && vl_api_can_send_msg (reg_prev))
2587  {
2588  mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2589  clib_memcpy (mp_copy, mp, mp_size);
2590  vl_api_send_msg (reg_prev, (u8 *) mp);
2591  mp = mp_copy;
2592  }
2593  reg_prev = reg;
2594  }
2595  else
2596  {
2597  sm->enable_poller = clear_client_for_stat (IDX_IP4_NBR_COUNTERS,
2598  ~0, client.client_index);
2599  continue;
2600  }
2601  }
2602  vec_free (clients);
2603 
2604  /* *INDENT-ON* */
2605  if (reg_prev && vl_api_can_send_msg (reg_prev))
2606  {
2607  vl_api_send_msg (reg_prev, (u8 *) mp);
2608  }
2609  else
2610  {
2611  vl_msg_api_free (mp);
2612  }
2613 }
2614 
2615 static void
2617 {
2618  stats_main_t *sm = &stats_main;
2619  vl_api_registration_t *reg, *reg_prev = NULL;
2621  u32 mp_size;
2622  vpe_client_registration_t *clients, client;
2623  int i;
2624 
2625  mp_size = sizeof (*mp_copy) +
2626  ntohl (mp->count) * sizeof (vl_api_ip6_fib_counter_t);
2627 
2628  clients =
2629  get_clients_for_stat (IDX_IP6_FIB_COUNTERS, ~0 /*flag for all */ );
2630 
2631  for (i = 0; i < vec_len (clients); i++)
2632  {
2633  client = clients[i];
2635  if (reg)
2636  {
2637  if (reg_prev && vl_api_can_send_msg (reg_prev))
2638  {
2639  mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2640  clib_memcpy (mp_copy, mp, mp_size);
2641  vl_api_send_msg (reg_prev, (u8 *) mp);
2642  mp = mp_copy;
2643  }
2644  reg_prev = reg;
2645  }
2646  else
2647  {
2648  sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
2649  ~0, client.client_index);
2650  continue;
2651  }
2652  }
2653  vec_free (clients);
2654 
2655  /* *INDENT-ON* */
2656  if (reg_prev && vl_api_can_send_msg (reg_prev))
2657  {
2658  vl_api_send_msg (reg_prev, (u8 *) mp);
2659  }
2660  else
2661  {
2662  vl_msg_api_free (mp);
2663  }
2664 }
2665 
2666 static void
2668 {
2669  stats_main_t *sm = &stats_main;
2670  vl_api_registration_t *reg, *reg_prev = NULL;
2672  u32 mp_size;
2673  vpe_client_registration_t *clients, client;
2674  int i;
2675 
2676  mp_size = sizeof (*mp_copy) +
2677  ntohl (mp->count) * sizeof (vl_api_ip6_nbr_counter_t);
2678 
2679  clients =
2680  get_clients_for_stat (IDX_IP6_NBR_COUNTERS, ~0 /*flag for all */ );
2681 
2682  for (i = 0; i < vec_len (clients); i++)
2683  {
2684  client = clients[i];
2686  if (reg)
2687  {
2688  if (reg_prev && vl_api_can_send_msg (reg_prev))
2689  {
2690  mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2691  clib_memcpy (mp_copy, mp, mp_size);
2692  vl_api_send_msg (reg_prev, (u8 *) mp);
2693  mp = mp_copy;
2694  }
2695  reg_prev = reg;
2696  }
2697  else
2698  {
2699  sm->enable_poller = clear_client_for_stat (IDX_IP6_NBR_COUNTERS,
2700  ~0, client.client_index);
2701  continue;
2702  }
2703  }
2704  vec_free (clients);
2705 
2706  /* *INDENT-ON* */
2707  if (reg_prev && vl_api_can_send_msg (reg_prev))
2708  {
2709  vl_api_send_msg (reg_prev, (u8 *) mp);
2710  }
2711  else
2712  {
2713  vl_msg_api_free (mp);
2714  }
2715 }
2716 
2717 static void
2719 {
2720  stats_main_t *sm = &stats_main;
2722  vl_api_want_udp_encap_stats_reply_t *rmp;
2723  uword *p;
2724  i32 retval = 0;
2725  vl_api_registration_t *reg;
2726  u32 fib;
2727 
2728  fib = ~0; //Using same mechanism as _per_interface_
2729  rp.client_index = mp->client_index;
2730  rp.client_pid = mp->pid;
2731 
2732  handle_client_registration (&rp, IDX_UDP_ENCAP_COUNTERS, fib, mp->enable);
2733 
2734 reply:
2736 
2737  if (!reg)
2738  {
2739  sm->enable_poller = clear_client_for_stat (IDX_UDP_ENCAP_COUNTERS,
2740  fib, mp->client_index);
2741  return;
2742  }
2743 
2744  rmp = vl_msg_api_alloc (sizeof (*rmp));
2745  rmp->_vl_msg_id = ntohs (VL_API_WANT_UDP_ENCAP_STATS_REPLY);
2746  rmp->context = mp->context;
2747  rmp->retval = retval;
2748 
2749  vl_api_send_msg (reg, (u8 *) rmp);
2750 }
2751 
2752 static void
2754  mp)
2755 {
2756  stats_main_t *sm = &stats_main;
2758  vl_api_want_bier_neighbor_stats_reply_t *rmp;
2759  uword *p;
2760  i32 retval = 0;
2761  vl_api_registration_t *reg;
2762  u32 fib;
2763 
2764  fib = ~0; //Using same mechanism as _per_interface_
2765  rp.client_index = mp->client_index;
2766  rp.client_pid = mp->pid;
2767 
2768  handle_client_registration (&rp, IDX_BIER_NEIGHBOR_COUNTERS, fib,
2769  mp->enable);
2770 
2771 reply:
2773 
2774  if (!reg)
2775  {
2776  sm->enable_poller = clear_client_for_stat (IDX_BIER_NEIGHBOR_COUNTERS,
2777  fib, mp->client_index);
2778  return;
2779  }
2780 
2781  rmp = vl_msg_api_alloc (sizeof (*rmp));
2782  rmp->_vl_msg_id = ntohs (VL_API_WANT_BIER_NEIGHBOR_STATS_REPLY);
2783  rmp->context = mp->context;
2784  rmp->retval = retval;
2785 
2786  vl_api_send_msg (reg, (u8 *) rmp);
2787 }
2788 
2789 static void
2791 {
2792  stats_main_t *sm = &stats_main;
2794  vl_api_want_stats_reply_t *rmp;
2795  uword *p;
2796  i32 retval = 0;
2797  u32 item;
2798  vl_api_registration_t *reg;
2799 
2800  item = ~0; //"ALL THE THINGS IN THE THINGS
2801  rp.client_index = mp->client_index;
2802  rp.client_pid = mp->pid;
2803 
2804  handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
2805  item, mp->enable_disable);
2806 
2807  handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
2808  item, mp->enable_disable);
2809 
2810  handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS,
2811  item, mp->enable_disable);
2812 
2813  handle_client_registration (&rp, IDX_IP4_NBR_COUNTERS,
2814  item, mp->enable_disable);
2815 
2816  handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS,
2817  item, mp->enable_disable);
2818 
2819  handle_client_registration (&rp, IDX_IP6_NBR_COUNTERS,
2820  item, mp->enable_disable);
2821 
2822 reply:
2824  if (!reg)
2825  return;
2826 
2827  rmp = vl_msg_api_alloc (sizeof (*rmp));
2828  rmp->_vl_msg_id = ntohs (VL_API_WANT_STATS_REPLY);
2829  rmp->context = mp->context;
2830  rmp->retval = retval;
2831 
2832  vl_api_send_msg (reg, (u8 *) rmp);
2833 }
2834 
2835 static void
2838 {
2839  stats_main_t *sm = &stats_main;
2841  vl_api_want_interface_simple_stats_reply_t *rmp;
2842  uword *p;
2843  i32 retval = 0;
2844  u32 swif;
2845  vl_api_registration_t *reg;
2846 
2847  swif = ~0; //Using same mechanism as _per_interface_
2848  rp.client_index = mp->client_index;
2849  rp.client_pid = mp->pid;
2850 
2851  handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
2852  mp->enable_disable);
2853 
2854 reply:
2856 
2857  if (!reg)
2858  {
2859  sm->enable_poller =
2860  clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
2861  mp->client_index);
2862  return;
2863  }
2864 
2865  rmp = vl_msg_api_alloc (sizeof (*rmp));
2866  rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_SIMPLE_STATS_REPLY);
2867  rmp->context = mp->context;
2868  rmp->retval = retval;
2869 
2870  vl_api_send_msg (reg, (u8 *) rmp);
2871 }
2872 
2873 
2874 static void
2876 {
2877  stats_main_t *sm = &stats_main;
2879  vl_api_want_ip4_fib_stats_reply_t *rmp;
2880  uword *p;
2881  i32 retval = 0;
2882  vl_api_registration_t *reg;
2883  u32 fib;
2884 
2885  fib = ~0; //Using same mechanism as _per_interface_
2886  rp.client_index = mp->client_index;
2887  rp.client_pid = mp->pid;
2888 
2889  handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS, fib,
2890  mp->enable_disable);
2891 
2892 reply:
2894 
2895  if (!reg)
2896  {
2897  sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
2898  fib, mp->client_index);
2899  return;
2900  }
2901 
2902  rmp = vl_msg_api_alloc (sizeof (*rmp));
2903  rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_FIB_STATS_REPLY);
2904  rmp->context = mp->context;
2905  rmp->retval = retval;
2906 
2907  vl_api_send_msg (reg, (u8 *) rmp);
2908 }
2909 
2910 static void
2912 {
2913  stats_main_t *sm = &stats_main;
2915  vl_api_want_ip4_mfib_stats_reply_t *rmp;
2916  uword *p;
2917  i32 retval = 0;
2918  vl_api_registration_t *reg;
2919  u32 mfib;
2920 
2921  mfib = ~0; //Using same mechanism as _per_interface_
2922  rp.client_index = mp->client_index;
2923  rp.client_pid = mp->pid;
2924 
2925  handle_client_registration (&rp, IDX_IP4_MFIB_COUNTERS, mfib,
2926  mp->enable_disable);
2927 
2928 reply:
2930  if (!reg)
2931  {
2932  sm->enable_poller = clear_client_for_stat (IDX_IP4_MFIB_COUNTERS,
2933  mfib, mp->client_index);
2934  return;
2935  }
2936 
2937  rmp = vl_msg_api_alloc (sizeof (*rmp));
2938  rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_MFIB_STATS_REPLY);
2939  rmp->context = mp->context;
2940  rmp->retval = retval;
2941 
2942  vl_api_send_msg (reg, (u8 *) rmp);
2943 }
2944 
2945 static void
2947 {
2948  stats_main_t *sm = &stats_main;
2950  vl_api_want_ip4_fib_stats_reply_t *rmp;
2951  uword *p;
2952  i32 retval = 0;
2953  vl_api_registration_t *reg;
2954  u32 fib;
2955 
2956  fib = ~0; //Using same mechanism as _per_interface_
2957  rp.client_index = mp->client_index;
2958  rp.client_pid = mp->pid;
2959 
2960  handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS, fib,
2961  mp->enable_disable);
2962 
2963 reply:
2965  if (!reg)
2966  {
2967  sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
2968  fib, mp->client_index);
2969  return;
2970  }
2971 
2972  rmp = vl_msg_api_alloc (sizeof (*rmp));
2973  rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_FIB_STATS_REPLY);
2974  rmp->context = mp->context;
2975  rmp->retval = retval;
2976 
2977  vl_api_send_msg (reg, (u8 *) rmp);
2978 }
2979 
2980 static void
2982 {
2983  stats_main_t *sm = &stats_main;
2985  vl_api_want_ip4_mfib_stats_reply_t *rmp;
2986  uword *p;
2987  i32 retval = 0;
2988  vl_api_registration_t *reg;
2989  u32 mfib;
2990 
2991  mfib = ~0; //Using same mechanism as _per_interface_
2992  rp.client_index = mp->client_index;
2993  rp.client_pid = mp->pid;
2994 
2995  handle_client_registration (&rp, IDX_IP6_MFIB_COUNTERS, mfib,
2996  mp->enable_disable);
2997 
2998 reply:
3000  if (!reg)
3001  {
3002  sm->enable_poller = clear_client_for_stat (IDX_IP6_MFIB_COUNTERS,
3003  mfib, mp->client_index);
3004  return;
3005  }
3006 
3007  rmp = vl_msg_api_alloc (sizeof (*rmp));
3008  rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_MFIB_STATS_REPLY);
3009  rmp->context = mp->context;
3010  rmp->retval = retval;
3011 
3012  vl_api_send_msg (reg, (u8 *) rmp);
3013 }
3014 
3015 /* FIXME - NBR stats broken - this will be fixed in subsequent patch */
3016 static void
3018 {
3019 }
3020 
3021 static void
3023 {
3024 }
3025 
3026 static void
3028 {
3029  stats_main_t *sm = &stats_main;
3033  vlib_counter_t v;
3035  int i, which;
3038  vl_api_registration_t *reg;
3039 
3041  if (!reg)
3042  return;
3043 
3044  rmp = vl_msg_api_alloc (sizeof (*rmp));
3045  rmp->_vl_msg_id = ntohs (VL_API_VNET_GET_SUMMARY_STATS_REPLY);
3046  rmp->context = mp->context;
3047  rmp->retval = 0;
3048 
3049  memset (total_pkts, 0, sizeof (total_pkts));
3050  memset (total_bytes, 0, sizeof (total_bytes));
3051 
3053 
3055  {
3056  which = cm - im->combined_sw_if_counters;
3057 
3058  for (i = 0; i < vlib_combined_counter_n_counters (cm); i++)
3059  {
3060  vlib_get_combined_counter (cm, i, &v);
3061  total_pkts[which] += v.packets;
3062  total_bytes[which] += v.bytes;
3063  }
3064  }
3066 
3068  {
3069  rmp->total_pkts[ct] = clib_host_to_net_u64 (total_pkts[ct]);
3070  rmp->total_bytes[ct] = clib_host_to_net_u64 (total_bytes[ct]);
3071  }
3072 
3074  {
3075  rmp->total_pkts[ct] = clib_host_to_net_u64 (total_pkts[ct]);
3076  rmp->total_bytes[ct] = clib_host_to_net_u64 (total_bytes[ct]);
3077  }
3078  rmp->vector_rate =
3079  clib_host_to_net_u64 (vlib_last_vector_length_per_node (sm->vlib_main));
3080 
3081  vl_api_send_msg (reg, (u8 *) rmp);
3082 }
3083 
3084 int
3086 {
3088  stats_main_t *sm = &stats_main;
3089  uword *p;
3090 
3091  // FIXME
3092  /* p = hash_get (sm->stats_registration_hash, client_index); */
3093  /* if (p) */
3094  /* { */
3095  /* rp = pool_elt_at_index (sm->stats_registrations, p[0]); */
3096  /* pool_put (sm->stats_registrations, rp); */
3097  /* hash_unset (sm->stats_registration_hash, client_index); */
3098  /* } */
3099 
3100  return 0;
3101 }
3102 
3103 #define vl_api_vnet_interface_simple_counters_t_endian vl_noop_handler
3104 #define vl_api_vnet_interface_simple_counters_t_print vl_noop_handler
3105 #define vl_api_vnet_interface_combined_counters_t_endian vl_noop_handler
3106 #define vl_api_vnet_interface_combined_counters_t_print vl_noop_handler
3107 #define vl_api_vnet_ip4_fib_counters_t_endian vl_noop_handler
3108 #define vl_api_vnet_ip4_fib_counters_t_print vl_noop_handler
3109 #define vl_api_vnet_ip6_fib_counters_t_endian vl_noop_handler
3110 #define vl_api_vnet_ip6_fib_counters_t_print vl_noop_handler
3111 #define vl_api_vnet_ip4_nbr_counters_t_endian vl_noop_handler
3112 #define vl_api_vnet_ip4_nbr_counters_t_print vl_noop_handler
3113 #define vl_api_vnet_ip6_nbr_counters_t_endian vl_noop_handler
3114 #define vl_api_vnet_ip6_nbr_counters_t_print vl_noop_handler
3115 
3116 static clib_error_t *
3118 {
3119  stats_main_t *sm = &stats_main;
3120  api_main_t *am = &api_main;
3121  void *vlib_worker_thread_bootstrap_fn (void *arg);
3122 
3123  sm->vlib_main = vm;
3124  sm->vnet_main = vnet_get_main ();
3126  sm->api_main = am;
3128  sm->data_structure_lock =
3131  memset (sm->data_structure_lock, 0, sizeof (*sm->data_structure_lock));
3132 
3133 #define _(N,n) \
3134  vl_msg_api_set_handlers(VL_API_##N, #n, \
3135  vl_api_##n##_t_handler, \
3136  vl_noop_handler, \
3137  vl_api_##n##_t_endian, \
3138  vl_api_##n##_t_print, \
3139  sizeof(vl_api_##n##_t), 0 /* do NOT trace! */);
3141 #undef _
3142 
3143  /* tell the msg infra not to free these messages... */
3144  am->message_bounce[VL_API_VNET_INTERFACE_SIMPLE_COUNTERS] = 1;
3145  am->message_bounce[VL_API_VNET_INTERFACE_COMBINED_COUNTERS] = 1;
3146  am->message_bounce[VL_API_VNET_IP4_FIB_COUNTERS] = 1;
3147  am->message_bounce[VL_API_VNET_IP6_FIB_COUNTERS] = 1;
3148  am->message_bounce[VL_API_VNET_IP4_NBR_COUNTERS] = 1;
3149  am->message_bounce[VL_API_VNET_IP6_NBR_COUNTERS] = 1;
3150 
3151  /*
3152  * Set up the (msg_name, crc, message-id) table
3153  */
3155 
3158 #define stats_reg(n) \
3159  sm->stats_registrations[IDX_##n] = 0; \
3160  sm->stats_registration_hash[IDX_##n] = 0;
3161 #include <vpp/stats/stats.reg>
3162 #undef stats_reg
3163 
3164  return 0;
3165 }
3166 
3168 
3169 /* *INDENT-OFF* */
3170 VLIB_REGISTER_THREAD (stats_thread_reg, static) = {
3171  .name = "stats",
3172  .function = stats_thread_fn,
3173  .fixed_count = 1,
3174  .count = 1,
3175  .no_data_structure_clone = 1,
3176  .use_pthreads = 1,
3177 };
3178 /* *INDENT-ON* */
3179 
3180 /*
3181  * fd.io coding-style-patch-verification: ON
3182  *
3183  * Local Variables:
3184  * eval: (c-set-style "gnu")
3185  * End:
3186  */
static void ip6_nbr_ship(stats_main_t *sm, ip6_nbr_stats_ctx_t *ctx)
void udp_encap_walk(udp_encap_walk_cb_t cb, void *ctx)
Walk each of the encap objects.
Definition: udp_encap.c:511
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:437
Want Interface Simple Stats, register for detailed interface stats.
Definition: stats.api:89
#define vec_foreach_index(var, v)
Iterate over vector indices.
vpe_client_registration_t ** clients_tmp
Want Stats, enable/disable ALL stats updates.
Definition: stats.api:73
vlib_combined_counter_main_t lbm_to_counters
Definition: load_balance.h:46
static void add_routes_in_fib(BVT(clib_bihash_kv)*kvp, void *arg)
ip46_address_t fp_src_addr
Definition: mfib_types.h:47
#define hash_set(h, key, value)
Definition: hash.h:255
fib_node_index_t mfib_table_lookup(u32 fib_index, const mfib_prefix_t *prefix)
Perfom a longest prefix match in the non-forwarding table.
Definition: mfib_table.c:65
u32 vlib_simple_counter_n_counters(const vlib_simple_counter_main_t *cm)
The number of counters (not the number of per-thread counters)
Definition: counter.c:128
do_ip46_fibs_t do_ip46_fibs
#define clib_min(x, y)
Definition: clib.h:291
vl_api_vlib_counter_t data[count]
Definition: stats.api:370
The UDP encap representation.
Definition: udp_encap.h:46
int svm_queue_is_full(svm_queue_t *q)
Definition: queue.c:109
A representation of a fib path for fib_path_encode to convey the information to the caller...
Definition: fib_types.h:584
vl_api_vnet_combined_counter_t data[count]
Definition: stats.api:395
static void stats_thread_fn(void *arg)
vpe_client_registration_t * clients
#define hash_unset(h, key)
Definition: hash.h:261
#define IP4_MFIB_COUNTER_BATCH_SIZE
a
Definition: bitmap.h:538
vl_api_vnet_simple_counter_t data[count]
Definition: stats.api:383
vl_api_ip6_mfib_counter_t c[count]
Definition: stats.api:320
static void udp_encap_ship(udp_encap_stats_walk_t *ctx)
vnet_main_t * vnet_get_main(void)
Definition: misc.c:47
static void vl_api_vnet_get_summary_stats_t_handler(vl_api_vnet_get_summary_stats_t *mp)
mfib_table_t ** mfibs
vnet_interface_main_t interface_main
Definition: vnet.h:56
struct ip_adjacency_t_::@45::@46 nbr
IP_LOOKUP_NEXT_ARP/IP_LOOKUP_NEXT_REWRITE.
static void do_bier_neighbor_counters(stats_main_t *sm)
u64 as_u64[2]
Definition: ip6_packet.h:51
unsigned long u64
Definition: types.h:89
#define NULL
Definition: clib.h:57
An entry in a FIB table.
Definition: mfib_entry.h:31
Request for a single block of summary stats.
Definition: stats.api:402
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:227
IP unicast adjacency.
Definition: adj.h:185
vl_api_udp_encap_counter_t * stats
union ip_adjacency_t_::@45 sub_type
u8 * format_vnet_interface_simple_counters(u8 *s, va_list *args)
void handle_client_registration(vpe_client_registration_t *client, u32 stat, u32 item, int enable_disable)
static void vl_api_send_msg(vl_api_registration_t *rp, u8 *elem)
Definition: api.h:34
const dpo_id_t * fib_entry_contribute_ip_forwarding(fib_node_index_t fib_entry_index)
Definition: fib_entry.c:478
u8 * message_bounce
Don&#39;t automatically free message buffer vetor.
Definition: api_common.h:221
u32 sw_if_index
The SW IF index all these adjs belong to.
vlib_main_t * vlib_main
u32 index_t
A Data-Path Object is an object that represents actions that are applied to packets are they are swit...
Definition: dpo.h:41
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:523
static void ip4_nbr_ship(stats_main_t *sm, ip4_nbr_stats_ctx_t *ctx)
Combined counter to hold both packets and byte differences.
Definition: counter_types.h:26
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:562
int i
vl_api_ip4_nbr_counter_t c[count]
Definition: stats.api:289
vl_api_bier_table_id_t tbl_id
Definition: stats.api:504
static void vl_api_want_ip6_nbr_stats_t_handler(vl_api_want_ip6_nbr_stats_t *mp)
Want UDP encap Stats, register for continuous stats.
Definition: stats.api:454
void clib_longjmp(clib_longjmp_t *save, uword return_value)
void bier_fmask_db_walk(bier_fmask_walk_fn_t fn, void *ctx)
static void clear_one_client(u32 reg_index, u32 reg, u32 item, u32 client_index)
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
struct bier_neighbor_stats_walk_t_ bier_neighbor_stats_walk_t
vl_api_bier_neighbor_counter_t c[count]
Definition: stats.api:514
Combined stats counters structure.
Definition: stats.api:365
stats_main_t stats_main
The ID of a table.
Definition: bier_types.h:394
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:228
uword clib_setjmp(clib_longjmp_t *save, uword return_value_not_taken)
void * vl_msg_api_alloc(int nbytes)
VLIB_REGISTER_THREAD(stats_thread_reg, static)
vhost_vring_addr_t addr
Definition: vhost_user.h:121
format_function_t format_vnet_sw_if_index_name
unsigned char u8
Definition: types.h:56
#define COMBINED_COUNTER_BATCH_SIZE
static void ip46_fib_stats_delay(stats_main_t *sm, u32 sec, u32 nsec)
void svm_queue_unlock(svm_queue_t *q)
Definition: queue.c:103
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
uword value[0]
Definition: hash.h:165
vnet_link_t ia_link
link/ether-type 1 bytes
Definition: adj.h:206
static f64 vlib_last_vector_length_per_node(vlib_main_t *vm)
Definition: main.h:318
enum walk_rc_t_ walk_rc_t
Walk return code.
static int vl_api_can_send_msg(vl_api_registration_t *rp)
Definition: api.h:47
#define BIER_NEIGHBOR_COUNTER_BATCH_SIZE
static walk_rc_t udp_encap_stats_walk_cb(index_t uei, void *arg)
static counter_t vlib_get_simple_counter(vlib_simple_counter_main_t *cm, u32 index)
Get the value of a simple counter Scrapes the entire set of per-thread counters.
Definition: counter.h:97
static ip_adjacency_t * adj_get(adj_index_t adj_index)
Get a pointer to an adjacency object from its index.
Definition: adj.h:380
static void do_ip6_mfib_counters(stats_main_t *sm)
memset(h->entries, 0, sizeof(h->entries[0])*entries)
Combined stats counters structure per interface.
Definition: stats.api:391
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
Definition: pool.h:443
static clib_error_t * stats_init(vlib_main_t *vm)
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:163
vpe_client_stats_registration_t ** regs_tmp
u32 sw_if_index
Definition: vxlan_gbp.api:39
fib_node_index_t mft_index
Index into FIB vector.
Definition: mfib_table.h:71
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:855
static BVT(clib_bihash)
Definition: adj_nbr.c:26
enum adj_walk_rc_t_ adj_walk_rc_t
return codes from a adjacency walker callback function
uint64_t counter_t
64bit counters
Definition: counter_types.h:22
bier_table_set_id_t bti_set
The SET-ID The control plane divdies the bit-position space into sets in the case the max bit-positio...
Definition: bier_types.h:401
#define hash_foreach(key_var, value_var, h, body)
Definition: hash.h:442
int stats_memclnt_delete_callback(u32 client_index)
#define clib_error_return(e, args...)
Definition: error.h:99
static void do_ip4_mfib_counters(stats_main_t *sm)
mfib_prefix_t mfe_prefix
The prefix of the route.
Definition: mfib_entry.h:40
struct vl_shmem_hdr_ * shmem_hdr
Binary API shared-memory segment header pointer.
Definition: api_common.h:264
unsigned int u32
Definition: types.h:88
#define foreach_stats_msg
struct mfib_table_t_ * mfibs
Vector of MFIBs.
Definition: ip4.h:107
void stats_dsunlock(int hint, int tag)
struct ip4_nbr_stats_ctx_t_ ip4_nbr_stats_ctx_t
The context passed when collecting adjacency counters.
A collection of simple counters.
Definition: counter.h:57
static vpe_client_registration_t * get_client_for_stat(u32 reg, u32 item, u32 client_index)
void vlib_set_thread_name(char *name)
Definition: threads.c:182
#define UDP_ENCAP_COUNTER_BATCH_SIZE
static void do_combined_per_interface_counters(stats_main_t *sm)
static void do_ip6_fib_counters(stats_main_t *sm)
vl_shmem_hdr_t * shmem_hdr
static heap_elt_t * first(heap_header_t *h)
Definition: heap.c:59
int clear_client_for_stat(u32 reg, u32 item, u32 client_index)
The identity of a DPO is a combination of its type and its instance number/index of objects of that t...
Definition: dpo.h:168
vl_api_ip4_nbr_counter_t * counters
A vector of ip4 nbr counters.
#define hash_get(h, key)
Definition: hash.h:249
Want Per Interface simple Stats, register for continuous stats.
Definition: stats.api:105
void * thread_mheap
Definition: threads.h:95
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:464
uword size
#define IP6_MFIB_COUNTER_BATCH_SIZE
ip6_route_t * ip6routes
static void vl_api_stats_get_poller_delay_t_handler(vl_api_stats_get_poller_delay_t *mp)
static void vl_api_want_udp_encap_stats_t_handler(vl_api_want_udp_encap_stats_t *mp)
counter_t packets
packet counter
Definition: counter_types.h:28
bier_hdr_len_id_t bti_hdr_len
The size of the bit string processed by this table.
Definition: bier_types.h:419
static void vl_api_want_bier_neighbor_stats_t_handler(vl_api_want_bier_neighbor_stats_t *mp)
void svm_queue_lock(svm_queue_t *q)
Definition: queue.c:97
static void do_ip4_nbr_counters(stats_main_t *sm)
long ctx[MAX_CONNS]
Definition: main.c:144
#define v
Definition: acl.c:496
u32 vlib_combined_counter_n_counters(const vlib_combined_counter_main_t *cm)
The number of counters (not the number of per-thread counters)
Definition: counter.c:121
static void do_ip4_fib_counters(stats_main_t *sm)
struct _unformat_input_t unformat_input_t
#define hash_free(h)
Definition: hash.h:310
static void send_and_pause(stats_main_t *sm, svm_queue_t *q, u8 *mp)
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:274
struct udp_encap_stats_walk_t_ udp_encap_stats_walk_t
Stat for one UDP encap object.
Definition: stats.api:467
vlib_combined_counter_main_t repm_counters
Definition: replicate_dpo.h:35
#define PREDICT_FALSE(x)
Definition: clib.h:107
#define VLIB_CONFIG_FUNCTION(x, n,...)
Definition: init.h:172
#define MIN(x, y)
vl_api_bier_neighbor_counter_t * stats
Stat for one BIER neighbor object.
Definition: stats.api:502
vlib_simple_counter_main_t * sw_if_counters
Definition: interface.h:854
load_balance_main_t load_balance_main
The one instance of load-balance main.
Definition: load_balance.c:56
static u8 * format_vnet_api_errno(u8 *s, va_list *args)
Definition: api_errno.h:160
ip4_route_t * ip4routes
#define STATS_RELEASE_DELAY_NS
vl_api_ip6_nbr_counter_t * counters
A vector of ip6 nbr counters.
vlib_combined_counter_main_t adjacency_counters
Adjacency packet counters.
Definition: adj.c:25
vpe_client_registration_t * get_clients_for_stat(u32 reg, u32 item)
static mfib_entry_t * mfib_entry_get(fib_node_index_t index)
Definition: mfib_entry.h:154
word fformat(FILE *f, char *fmt,...)
Definition: format.c:453
The context passed when collecting adjacency counters.
API main structure, used by both vpp and binary API clients.
Definition: api_common.h:201
static int mfib_table_stats_walk_cb(fib_node_index_t fei, void *ctx)
#define pool_free(p)
Free a pool.
Definition: pool.h:357
An API client registration, only in vpp/vlib.
Definition: api_common.h:44
static void vl_api_want_ip4_mfib_stats_t_handler(vl_api_want_ip4_mfib_stats_t *mp)
void clib_bihash_foreach_key_value_pair(clib_bihash *h, void *callback, void *arg)
Visit active (key,value) pairs in a bi-hash table.
#define SIMPLE_COUNTER_BATCH_SIZE
The IPv4 FIB.
Definition: ip4_fib.h:39
int stats_set_poller_delay(u32 poller_delay_sec)
u32 sw_if_index
The SW IF index all these adjs belong to.
fib_node_index_t ft_index
Index into FIB vector.
Definition: fib_table.h:94
void mfib_table_walk(u32 fib_index, fib_protocol_t proto, mfib_table_walk_fn_t fn, void *ctx)
Walk all entries in a FIB table N.B: This is NOT safe to deletes.
Definition: mfib_table.c:599
void vl_msg_api_send_shmem(svm_queue_t *q, u8 *elem)
#define UNFORMAT_END_OF_INPUT
Definition: format.h:144
svmdb_client_t * c
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:211
u32 ft_table_id
Table ID (hash key) for this FIB.
Definition: fib_table.h:89
static void vlib_get_combined_counter(const vlib_combined_counter_main_t *cm, u32 index, vlib_counter_t *result)
Get the value of a combined counter, never called in the speed path Scrapes the entire set of per-thr...
Definition: counter.h:243
vlib_main_t * vm
Definition: buffer.c:294
static void bier_neighbor_ship(bier_neighbor_stats_walk_t *ctx)
clib_longjmp_t jmp_buf
static int clear_client_for_all_stats(u32 client_index)
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:339
#define foreach_tx_combined_interface_counter(_x)
Definition: interface.h:780
static void do_simple_per_interface_counters(stats_main_t *sm)
static void * clib_mem_set_heap(void *heap)
Definition: mem.h:261
static void vl_api_vnet_ip4_fib_counters_t_handler(vl_api_vnet_ip4_fib_counters_t *mp)
Want Interface Combined Stats, register for continuous stats.
Definition: stats.api:125
#define clib_warning(format, args...)
Definition: error.h:59
This table stores the routes that are used to forward traffic.
Definition: ip6.h:128
#define clib_memcpy(a, b, c)
Definition: string.h:75
static void vnet_interface_counter_unlock(vnet_interface_main_t *im)
Definition: interface.h:880
u32 fib_node_index_t
A typedef of a node index.
Definition: fib_types.h:30
static void vl_api_want_interface_simple_stats_t_handler(vl_api_want_interface_simple_stats_t *mp)
#define pool_is_free_index(P, I)
Use free bitmap to query whether given index is free.
Definition: pool.h:271
static void vl_api_vnet_interface_combined_counters_t_handler(vl_api_vnet_interface_combined_counters_t *mp)
Want IP6 FIB Stats, register for continuous stats.
Definition: stats.api:172
u32 adj_index_t
An index for adjacencies.
Definition: adj_types.h:30
#define ARRAY_LEN(x)
Definition: clib.h:61
static vl_api_registration_t * vl_api_client_index_to_registration(u32 index)
Definition: api.h:56
vl_api_ip6_fib_counter_t c[count]
Definition: stats.api:304
static void vl_api_want_per_interface_simple_stats_t_handler(vl_api_want_per_interface_simple_stats_t *mp)
Simple per interface stats counters structure.
Definition: stats.api:379
vnet_interface_counter_type_t
Definition: interface.h:750
svm_queue_t * vl_input_queue
Definition: memory_shared.h:84
struct ip6_nbr_stats_ctx_t_ ip6_nbr_stats_ctx_t
The context passed when collecting adjacency counters.
Aggregrate type for a prefix.
Definition: mfib_types.h:24
static void vl_api_want_ip4_fib_stats_t_handler(vl_api_want_ip4_fib_stats_t *mp)
static void vnet_interface_counter_lock(vnet_interface_main_t *im)
Definition: interface.h:872
signed int i32
Definition: types.h:77
u32 ** init_client_reg(u32 **registrations)
#define ASSERT(truth)
static void dslock(stats_main_t *sm, int release_hint, int tag)
#define foreach_rx_combined_interface_counter(_x)
Definition: interface.h:775
ip6_main_t ip6_main
Definition: ip6_forward.c:2590
u32 ** enable_all_client_reg(u32 **registrations)
The context passed when collecting adjacency counters.
uword ** stats_registration_hash
static void vl_api_vnet_interface_simple_counters_t_handler(vl_api_vnet_interface_simple_counters_t *mp)
IPv4 main type.
Definition: ip4.h:96
const dpo_id_t * mfib_entry_contribute_ip_forwarding(fib_node_index_t mfib_entry_index)
Definition: mfib_entry.c:1252
vpe_client_stats_registration_t ** stats_registrations
struct mfib_table_t_ * mfibs
Vector of MFIBs.
Definition: ip6.h:170
static void vl_api_want_ip6_fib_stats_t_handler(vl_api_want_ip6_fib_stats_t *mp)
Combined interface counter data type for vnet_interface_combined_counters.
Definition: interface.api:285
void stats_dslock_with_hint(int hint, int tag)
size_t count
Definition: vapi.c:46
#define clib_mem_unaligned(pointer, type)
Definition: types.h:155
u32 mft_table_id
Table ID (hash key) for this FIB.
Definition: mfib_table.h:66
Simple stats counters structure.
Definition: stats.api:348
vl_api_udp_encap_counter_t c[count]
Definition: stats.api:478
Want IP6 NBR Stats, register for continuous stats.
Definition: stats.api:228
void udp_encap_get_stats(index_t uei, u64 *packets, u64 *bytes)
Definition: udp_encap.c:250
void fib_api_path_encode(const fib_route_path_encode_t *api_rpath, vl_api_fib_path_t *out)
Definition: fib_api.c:217
ip6_fib_table_instance_t ip6_table[IP6_FIB_NUM_TABLES]
The two FIB tables; fwding and non-fwding.
Definition: ip6.h:159
static void do_ip6_nbr_counters(stats_main_t *sm)
Get delay between polling statistics.
Definition: stats.api:430
static walk_rc_t bier_neighbor_stats_walk_cb(index_t bfmi, void *arg)
static adj_walk_rc_t ip4_nbr_stats_cb(adj_index_t ai, void *arg)
void vl_msg_api_free(void *)
VL_MSG_API_REAPER_FUNCTION(want_stats_reaper)
vl_api_ip4_fib_counter_t c[count]
Definition: stats.api:248
counter_t bytes
byte counter
Definition: counter_types.h:29
Want IP4 FIB Stats, register for continuous stats.
Definition: stats.api:158
data_structure_lock_t * data_structure_lock
static adj_walk_rc_t ip6_nbr_stats_cb(adj_index_t ai, void *arg)
static int set_client_for_stat(u32 reg, u32 item, vpe_client_registration_t *client)
static void dsunlock(stats_main_t *sm)
Per-neighbour (i.e.
Definition: stats.api:284
index_t dpoi_index
the index of objects of that type
Definition: dpo.h:184
Want IP6 multicast FIB Stats, register for continuous stats.
Definition: stats.api:200
#define FIB_NODE_INDEX_INVALID
Definition: fib_types.h:31
struct ip4_fib_t_ * v4_fibs
Vector of MTries.
Definition: ip4.h:104
static void vl_api_vnet_ip6_nbr_counters_t_handler(vl_api_vnet_ip6_nbr_counters_t *mp)
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
vnet_interface_main_t * interface_main
#define hash_foreach_pair(p, v, body)
Iterate over hash pairs.
Definition: hash.h:373
void bier_fmask_get_stats(index_t bfmi, u64 *packets, u64 *bytes)
Definition: bier_fmask.c:394
u8 * format_vnet_interface_combined_counters(u8 *s, va_list *args)
bier_table_sub_domain_id_t bti_sub_domain
The Sub-Domain-ID The control plane has the configuration option to specify multiple domains or topol...
Definition: bier_types.h:408
u64 uword
Definition: types.h:112
Want BIER neighbor Stats, register for continuous stats.
Definition: stats.api:487
vnet_sw_interface_t * sw_interfaces
Definition: interface.h:846
#define clib_unix_warning(format, args...)
Definition: error.h:68
replicate_main_t replicate_main
The one instance of replicate main.
Definition: replicate_dpo.c:62
A collection of combined counters.
Definition: counter.h:172
Want Per Interface Combined Stats, register for continuous stats.
Definition: stats.api:141
uword * fib_entry_by_dst_address[33]
Definition: ip4_fib.h:51
struct _svm_queue svm_queue_t
vl_api_ip4_mfib_counter_t c[count]
Definition: stats.api:264
static void vl_api_want_stats_t_handler(vl_api_want_stats_t *mp)
static void * clib_mem_alloc_aligned(uword size, uword align)
Definition: mem.h:140
u8 * format_unformat_error(u8 *s, va_list *va)
Definition: unformat.c:91
static void do_combined_interface_counters(stats_main_t *sm)
A protocol Independent IP multicast FIB table.
Definition: mfib_table.h:35
ip4_main_t ip4_main
Global ip4 main structure.
Definition: ip4_forward.c:900
static vlib_thread_main_t * vlib_get_thread_main()
Definition: global_funcs.h:32
Reply for vnet_get_summary_stats request.
Definition: stats.api:417
static clib_error_t * want_stats_reaper(u32 client_index)
struct fib_table_t_ * fibs
Vector of FIBs.
Definition: ip4.h:101
Get delay between polling statistics reply.
Definition: stats.api:441
#define vec_foreach(var, vec)
Vector iterator.
Want IP4 NBR Stats, register for continuous stats.
Definition: stats.api:214
void * vlib_worker_thread_bootstrap_fn(void *arg)
Definition: threads.c:589
static void vl_api_want_ip6_mfib_stats_t_handler(vl_api_want_ip6_mfib_stats_t *mp)
static void vl_api_want_ip4_nbr_stats_t_handler(vl_api_want_ip4_nbr_stats_t *mp)
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:111
void * vl_msg_api_alloc_as_if_client(int nbytes)
static void clear_client_reg(u32 **registrations)
void bier_fmask_encode(index_t bfmi, bier_table_id_t *btid, fib_route_path_encode_t *rpath)
Definition: bier_fmask.c:405
static void vl_api_want_per_interface_combined_stats_t_handler(vl_api_want_per_interface_combined_stats_t *mp)
static void do_udp_encap_counters(stats_main_t *sm)
u16 fp_len
The mask length.
Definition: mfib_types.h:28
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
vnet_main_t * vnet_main
api_main_t api_main
Definition: api_shared.c:35
struct fib_table_t_ * fibs
Definition: ip6.h:164
void vl_msg_api_send_shmem_nolock(svm_queue_t *q, u8 *elem)
static void setup_message_id_table(api_main_t *am)
static void vl_api_vnet_ip4_nbr_counters_t_handler(vl_api_vnet_ip4_nbr_counters_t *mp)
static void vl_api_want_interface_combined_stats_t_handler(vl_api_want_interface_combined_stats_t *mp)
static void vl_api_vnet_ip6_fib_counters_t_handler(vl_api_vnet_ip6_fib_counters_t *mp)
vl_api_fib_path_t path
Definition: stats.api:505
static clib_error_t * stats_config(vlib_main_t *vm, unformat_input_t *input)
uword key
Definition: hash.h:162
Simple interface counter data type for vnet_interface_simple_counters.
Definition: interface.api:319
#define IP4_FIB_COUNTER_BATCH_SIZE
uword unformat(unformat_input_t *i, const char *fmt,...)
Definition: unformat.c:972
vl_api_ip6_nbr_counter_t c[count]
Definition: stats.api:336
Want IP4 multicast FIB Stats, register for continuous stats.
Definition: stats.api:186
ip46_address_t fp_grp_addr
The address type is not deriveable from the fp_addr member.
Definition: mfib_types.h:46
mfib_prefix_t * mroutes
static uword unformat_check_input(unformat_input_t *i)
Definition: format.h:170
A protocol Independent FIB table.
Definition: fib_table.h:69
ip6_address_t address
static void do_simple_interface_counters(stats_main_t *sm)
#define IP6_FIB_COUNTER_BATCH_SIZE
static uword pool_elts(void *v)
Number of active elements in a pool.
Definition: pool.h:128