FD.io VPP  v18.01-8-g0eacf49
Vector Packet Processing
stats.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <vpp/stats/stats.h>
16 #include <signal.h>
17 #include <vnet/fib/ip4_fib.h>
18 #include <vnet/fib/fib_entry.h>
19 #include <vnet/mfib/mfib_entry.h>
20 #include <vnet/dpo/load_balance.h>
21 
22 #define STATS_DEBUG 0
23 
25 
26 #include <vnet/ip/ip.h>
27 
28 #include <vpp/api/vpe_msg_enum.h>
29 
30 #define f64_endian(a)
31 #define f64_print(a,b)
32 
33 #define vl_typedefs /* define message structures */
34 #include <vpp/api/vpe_all_api_h.h>
35 #undef vl_typedefs
36 
37 #define vl_endianfun /* define message structures */
38 #include <vpp/api/vpe_all_api_h.h>
39 #undef vl_endianfun
40 
41 /* instantiate all the print functions we know about */
42 #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
43 #define vl_printfun
44 #include <vpp/api/vpe_all_api_h.h>
45 #undef vl_printfun
46 
47 #define foreach_stats_msg \
48 _(WANT_STATS, want_stats) \
49 _(VNET_INTERFACE_SIMPLE_COUNTERS, vnet_interface_simple_counters) \
50 _(WANT_INTERFACE_SIMPLE_STATS, want_interface_simple_stats) \
51 _(VNET_INTERFACE_COMBINED_COUNTERS, vnet_interface_combined_counters) \
52 _(WANT_INTERFACE_COMBINED_STATS, want_interface_combined_stats) \
53 _(WANT_PER_INTERFACE_COMBINED_STATS, want_per_interface_combined_stats) \
54 _(WANT_PER_INTERFACE_SIMPLE_STATS, want_per_interface_simple_stats) \
55 _(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \
56 _(WANT_IP4_FIB_STATS, want_ip4_fib_stats) \
57 _(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \
58 _(WANT_IP6_FIB_STATS, want_ip6_fib_stats) \
59 _(WANT_IP4_MFIB_STATS, want_ip4_mfib_stats) \
60 _(WANT_IP6_MFIB_STATS, want_ip6_mfib_stats) \
61 _(VNET_IP4_NBR_COUNTERS, vnet_ip4_nbr_counters) \
62 _(WANT_IP4_NBR_STATS, want_ip4_nbr_stats) \
63 _(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) \
64 _(WANT_IP6_NBR_STATS, want_ip6_nbr_stats) \
65 _(VNET_GET_SUMMARY_STATS, vnet_get_summary_stats)
66 
67 
68 #define vl_msg_name_crc_list
69 #include <vpp/stats/stats.api.h>
70 #undef vl_msg_name_crc_list
71 
72 static void
74 {
75 #define _(id,n,crc) \
76  vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
77  foreach_vl_msg_name_crc_stats;
78 #undef _
79 }
80 
81 /* These constants ensure msg sizes <= 1024, aka ring allocation */
82 #define SIMPLE_COUNTER_BATCH_SIZE 126
83 #define COMBINED_COUNTER_BATCH_SIZE 63
84 #define IP4_FIB_COUNTER_BATCH_SIZE 48
85 #define IP6_FIB_COUNTER_BATCH_SIZE 30
86 #define IP4_MFIB_COUNTER_BATCH_SIZE 24
87 #define IP6_MFIB_COUNTER_BATCH_SIZE 15
88 
89 /* 5ms */
90 #define STATS_RELEASE_DELAY_NS (1000 * 1000 * 5)
91 /* ns/us us/ms */
92 
93 u8 *
95 {
96  stats_main_t *sm = &stats_main;
99 
100  char *counter_name;
101  u32 count, sw_if_index;
102  int i;
103  count = ntohl (mp->count);
104  sw_if_index = ntohl (mp->first_sw_if_index);
105 
106  vlib_counter_t *vp;
107  u64 packets, bytes;
108  vp = (vlib_counter_t *) mp->data;
109 
110  switch (mp->vnet_counter_type)
111  {
113  counter_name = "rx";
114  break;
116  counter_name = "tx";
117  break;
118  default:
119  counter_name = "bogus";
120  break;
121  }
122  for (i = 0; i < count; i++)
123  {
124  packets = clib_mem_unaligned (&vp->packets, u64);
125  packets = clib_net_to_host_u64 (packets);
126  bytes = clib_mem_unaligned (&vp->bytes, u64);
127  bytes = clib_net_to_host_u64 (bytes);
128  vp++;
129  s = format (s, "%U.%s.packets %lld\n",
131  sm->vnet_main, sw_if_index, counter_name, packets);
132  s = format (s, "%U.%s.bytes %lld\n",
134  sm->vnet_main, sw_if_index, counter_name, bytes);
135  sw_if_index++;
136  }
137  return s;
138 }
139 
140 u8 *
142 {
143  stats_main_t *sm = &stats_main;
145  va_arg (*args, vl_api_vnet_interface_simple_counters_t *);
146  char *counter_name;
147  u32 count, sw_if_index;
148  count = ntohl (mp->count);
149  sw_if_index = ntohl (mp->first_sw_if_index);
150  u64 *vp, v;
151  vp = (u64 *) mp->data;
152  int i;
153 
154  switch (mp->vnet_counter_type)
155  {
157  counter_name = "drop";
158  break;
160  counter_name = "punt";
161  break;
163  counter_name = "ip4";
164  break;
166  counter_name = "ip6";
167  break;
169  counter_name = "rx-no-buff";
170  break;
172  counter_name = "rx-miss";
173  break;
175  counter_name = "rx-error (fifo-full)";
176  break;
178  counter_name = "tx-error (fifo-full)";
179  break;
180  default:
181  counter_name = "bogus";
182  break;
183  }
184  for (i = 0; i < count; i++)
185  {
186  v = clib_mem_unaligned (vp, u64);
187  v = clib_net_to_host_u64 (v);
188  vp++;
189  s = format (s, "%U.%s %lld\n", format_vnet_sw_if_index_name,
190  sm->vnet_main, sw_if_index, counter_name, v);
191  sw_if_index++;
192  }
193 
194  return s;
195 }
196 
197 static void
198 dslock (stats_main_t * sm, int release_hint, int tag)
199 {
200  u32 thread_index;
202 
203  if (PREDICT_FALSE (l == 0))
204  return;
205 
206  thread_index = vlib_get_thread_index ();
207  if (l->lock && l->thread_index == thread_index)
208  {
209  l->count++;
210  return;
211  }
212 
213  if (release_hint)
214  l->release_hint++;
215 
216  while (__sync_lock_test_and_set (&l->lock, 1))
217  /* zzzz */ ;
218  l->tag = tag;
219  l->thread_index = thread_index;
220  l->count = 1;
221 }
222 
223 void
224 stats_dslock_with_hint (int hint, int tag)
225 {
226  stats_main_t *sm = &stats_main;
227  dslock (sm, hint, tag);
228 }
229 
230 static void
232 {
233  u32 thread_index;
235 
236  if (PREDICT_FALSE (l == 0))
237  return;
238 
239  thread_index = vlib_get_thread_index ();
240  ASSERT (l->lock && l->thread_index == thread_index);
241  l->count--;
242  if (l->count == 0)
243  {
244  l->tag = -l->tag;
245  l->release_hint = 0;
247  l->lock = 0;
248  }
249 }
250 
251 void
252 stats_dsunlock (int hint, int tag)
253 {
254  stats_main_t *sm = &stats_main;
255  dsunlock (sm);
256 }
257 
259 get_client_for_stat (u32 reg, u32 item, u32 client_index)
260 {
261  stats_main_t *sm = &stats_main;
262  vpe_client_stats_registration_t *registration;
263  uword *p;
264 
265  /* Is there anything listening for item in that reg */
266  p = hash_get (sm->stats_registration_hash[reg], item);
267 
268  if (!p)
269  return 0; // Fail
270 
271  /* If there is, is our client_index one of them */
272  registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
273  p = hash_get (registration->client_hash, client_index);
274 
275  if (!p)
276  return 0; // Fail
277 
278  return pool_elt_at_index (registration->clients, p[0]);
279 
280 }
281 
282 static int
284 {
285  stats_main_t *sm = &stats_main;
286  vpe_client_stats_registration_t *registration;
288  uword *p;
289 
290  /* Is there anything listening for item in that reg */
291  p = hash_get (sm->stats_registration_hash[reg], item);
292 
293  if (!p)
294  {
295  pool_get (sm->stats_registrations[reg], registration);
296  registration->item = item;
297  hash_set (sm->stats_registration_hash[reg], item,
298  registration - sm->stats_registrations[reg]);
299  }
300  else
301  {
302  registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
303  }
304 
305  p = hash_get (registration->client_hash, client->client_index);
306 
307  if (!p)
308  {
309  pool_get (registration->clients, cr);
310  cr->client_index = client->client_index;
311  cr->client_pid = client->client_pid;
312  hash_set (registration->client_hash, cr->client_index,
313  cr - registration->clients);
314  }
315 
316  return 1; //At least one client is doing something ... poll
317 }
318 
319 int
320 clear_client_for_stat (u32 reg, u32 item, u32 client_index)
321 {
322  stats_main_t *sm = &stats_main;
323  vpe_client_stats_registration_t *registration;
325  uword *p;
326  int i, elts;
327 
328  /* Clear the client first */
329  /* Is there anything listening for item in that reg */
330  p = hash_get (sm->stats_registration_hash[reg], item);
331 
332  if (!p)
333  goto exit;
334 
335  /* If there is, is our client_index one of them */
336  registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
337  p = hash_get (registration->client_hash, client_index);
338 
339  if (!p)
340  goto exit;
341 
342  client = pool_elt_at_index (registration->clients, p[0]);
343  hash_unset (registration->client_hash, client->client_index);
344  pool_put (registration->clients, client);
345 
346  /* Now check if that was the last client for that item */
347  if (0 == pool_elts (registration->clients))
348  {
349  hash_unset (sm->stats_registration_hash[reg], item);
350  pool_put (sm->stats_registrations[reg], registration);
351  }
352 
353 exit:
354  elts = 0;
355  /* Now check if that was the last item in any of the listened to stats */
356  for (i = 0; i < STATS_REG_N_IDX; i++)
357  {
358  elts += pool_elts (sm->stats_registrations[i]);
359  }
360  return elts;
361 }
362 
365 {
366  stats_main_t *sm = &stats_main;
367  vpe_client_registration_t *client, *clients = 0;
368  vpe_client_stats_registration_t *registration;
369  uword *p;
370 
371  /* Is there anything listening for item in that reg */
372  p = hash_get (sm->stats_registration_hash[reg], item);
373 
374  if (!p)
375  return 0; // Fail
376 
377  /* If there is, is our client_index one of them */
378  registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]);
379 
380  vec_reset_length (clients);
381  pool_foreach (client, registration->clients, (
382  {
383  vec_add1 (clients, *client);}
384  ));
385  return clients;
386 }
387 
388 
389 static void
390 clear_client_reg (u32 ** registrations)
391 {
392  /* When registrations[x] is a vector of pool indices
393  here is a good place to clean up the pools
394  */
395 #define stats_reg(n) vec_free(registrations[IDX_##n]);
396 #include <vpp/stats/stats.reg>
397 #undef stats_reg
398 
399  vec_free (registrations);
400 }
401 
402 u32 **
403 init_client_reg (u32 ** registrations)
404 {
405 
406  /*
407  Initialise the stats registrations for each
408  type of stat a client can register for as well as
409  a vector of "interested" indexes.
410  Initially this is a u32 of either sw_if_index or fib_index
411  but eventually this should migrate to a pool_index (u32)
412  with a type specific pool that can include more complex things
413  such as timing and structured events.
414  */
415  vec_validate (registrations, STATS_REG_N_IDX);
416 #define stats_reg(n) \
417  vec_reset_length(registrations[IDX_##n]);
418 #include <vpp/stats/stats.reg>
419 #undef stats_reg
420 
421  /*
422  When registrations[x] is a vector of pool indices, here
423  is a good place to init the pools.
424  */
425  return registrations;
426 }
427 
428 u32 **
429 enable_all_client_reg (u32 ** registrations)
430 {
431 
432  /*
433  Enable all stats known by adding
434  ~0 to the index vector. Eventually this
435  should be deprecated.
436  */
437 #define stats_reg(n) \
438  vec_add1(registrations[IDX_##n], ~0);
439 #include <vpp/stats/stats.reg>
440 #undef stats_reg
441  return registrations;
442 }
443 
444 static void
446 {
449  api_main_t *am = sm->api_main;
453  u32 items_this_message = 0;
454  u64 v, *vp = 0;
455  int i, n_counts;
456 
457  /*
458  * Prevent interface registration from expanding / moving the vectors...
459  * That tends never to happen, so we can hold this lock for a while.
460  */
462 
463  vec_foreach (cm, im->sw_if_counters)
464  {
465  n_counts = vlib_simple_counter_n_counters (cm);
466  for (i = 0; i < n_counts; i++)
467  {
468  if (mp == 0)
469  {
470  items_this_message = clib_min (SIMPLE_COUNTER_BATCH_SIZE,
471  n_counts - i);
472 
474  (sizeof (*mp) + items_this_message * sizeof (v));
475  mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_SIMPLE_COUNTERS);
476  mp->vnet_counter_type = cm - im->sw_if_counters;
477  mp->first_sw_if_index = htonl (i);
478  mp->count = 0;
479  vp = (u64 *) mp->data;
480  }
481  v = vlib_get_simple_counter (cm, i);
482  clib_mem_unaligned (vp, u64) = clib_host_to_net_u64 (v);
483  vp++;
484  mp->count++;
485  if (mp->count == items_this_message)
486  {
487  mp->count = htonl (items_this_message);
488  /* Send to the main thread... */
489  vl_msg_api_send_shmem (q, (u8 *) & mp);
490  mp = 0;
491  }
492  }
493  ASSERT (mp == 0);
494  }
496 }
497 
498 void
500  u32 item, int enable_disable)
501 {
502  stats_main_t *sm = &stats_main;
503  vpe_client_registration_t *rp, _rp;
504 
505  rp = get_client_for_stat (stat, item, client->client_index);
506 
507  /* Disable case */
508  if (enable_disable == 0)
509  {
510  if (!rp) // No client to disable
511  {
512  clib_warning ("pid %d: already disabled for stats...",
513  client->client_pid);
514  return;
515  }
516  sm->enable_poller =
517  clear_client_for_stat (stat, item, client->client_index);
518  return;
519  }
520  /* Enable case */
521  if (!rp)
522  {
523  rp = &_rp;
524  rp->client_index = client->client_index;
525  rp->client_pid = client->client_pid;
526  sm->enable_poller = set_client_for_stat (stat, item, rp);
527  }
528 }
529 
530 
531 /**********************************
532  * ALL Interface Combined stats - to be deprecated
533  **********************************/
534 
535 /*
536  * This API should be deprecated as _per_interface_ works with ~0 as sw_if_index.
537  */
538 static void
541 {
542  stats_main_t *sm = &stats_main;
544  vl_api_want_interface_combined_stats_reply_t *rmp;
545  uword *p;
546  i32 retval = 0;
548  u32 swif;
549 
550  swif = ~0; //Using same mechanism as _per_interface_
551  rp.client_index = mp->client_index;
552  rp.client_pid = mp->pid;
553 
554  handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
555  mp->enable_disable);
556 
557 reply:
559 
560  if (!q)
561  {
562  sm->enable_poller =
563  clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
564  mp->client_index);
565  return;
566  }
567 
568  rmp = vl_msg_api_alloc (sizeof (*rmp));
569  rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_COMBINED_STATS_REPLY);
570  rmp->context = mp->context;
571  rmp->retval = retval;
572 
573  vl_msg_api_send_shmem (q, (u8 *) & rmp);
574 }
575 
576 static void
579 {
580  vpe_client_registration_t *clients, client;
581  stats_main_t *sm = &stats_main;
582  unix_shared_memory_queue_t *q, *q_prev = NULL;
584  u32 mp_size;
585  int i;
586 
587  mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (vlib_counter_t));
588 
589  clients =
590  get_clients_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
591  ~0 /*flag for all */ );
592 
593  for (i = 0; i < vec_len (clients); i++)
594  {
595  client = clients[i];
597  if (q)
598  {
599  if (q_prev && (q_prev->cursize < q_prev->maxsize))
600  {
601  mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
602  clib_memcpy (mp_copy, mp, mp_size);
603  vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
604  mp = mp_copy;
605  }
606  q_prev = q;
607  }
608  }
609 #if STATS_DEBUG > 0
610  fformat (stdout, "%U\n", format_vnet_combined_counters, mp);
611 #endif
612 
613  if (q_prev && (q_prev->cursize < q_prev->maxsize))
614  {
615  vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
616  }
617  else
618  {
619  vl_msg_api_free (mp);
620  }
621 }
622 
623 static void
625 {
628  api_main_t *am = sm->api_main;
632  u32 items_this_message = 0;
633  vlib_counter_t v, *vp = 0;
634  int i, n_counts;
635 
637 
639  {
640  n_counts = vlib_combined_counter_n_counters (cm);
641  for (i = 0; i < n_counts; i++)
642  {
643  if (mp == 0)
644  {
645  items_this_message = clib_min (COMBINED_COUNTER_BATCH_SIZE,
646  n_counts - i);
647 
649  (sizeof (*mp) + items_this_message * sizeof (v));
650  mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COMBINED_COUNTERS);
652  mp->first_sw_if_index = htonl (i);
653  mp->count = 0;
654  vp = (vlib_counter_t *) mp->data;
655  }
656  vlib_get_combined_counter (cm, i, &v);
658  = clib_host_to_net_u64 (v.packets);
659  clib_mem_unaligned (&vp->bytes, u64) = clib_host_to_net_u64 (v.bytes);
660  vp++;
661  mp->count++;
662  if (mp->count == items_this_message)
663  {
664  mp->count = htonl (items_this_message);
665  /* Send to the main thread... */
666  vl_msg_api_send_shmem (q, (u8 *) & mp);
667  mp = 0;
668  }
669  }
670  ASSERT (mp == 0);
671  }
673 }
674 
675 /**********************************
676  * Per Interface Combined stats
677  **********************************/
678 
679 /* Request from client registering interfaces it wants */
680 static void
683 {
684  stats_main_t *sm = &stats_main;
686  vl_api_want_per_interface_combined_stats_reply_t *rmp;
688  uword *p;
689  i32 retval = 0;
691  int i;
692  u32 swif;
693 
694  // Validate we have good sw_if_indexes before registering
695  for (i = 0; i < mp->num; i++)
696  {
697  swif = mp->sw_ifs[i];
698 
699  /* Check its a real sw_if_index that the client is allowed to see */
700  if (swif != ~0)
701  {
703  {
704  retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
705  goto reply;
706  }
707  }
708  }
709 
710  for (i = 0; i < mp->num; i++)
711  {
712  swif = mp->sw_ifs[i];
713 
714  rp.client_index = mp->client_index;
715  rp.client_pid = mp->pid;
716  handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
717  swif, mp->enable_disable);
718  }
719 
720 reply:
722 
723  if (!q)
724  {
725  for (i = 0; i < mp->num; i++)
726  {
727  swif = mp->sw_ifs[i];
728  sm->enable_poller =
729  clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
730  mp->client_index);
731  }
732  return;
733  }
734 
735  rmp = vl_msg_api_alloc (sizeof (*rmp));
736  rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_COMBINED_STATS_REPLY);
737  rmp->context = mp->context;
738  rmp->retval = retval;
739 
740  vl_msg_api_send_shmem (q, (u8 *) & rmp);
741 }
742 
743 /* Per Interface Combined distribution to client */
744 static void
746 {
749  api_main_t *am = sm->api_main;
753  /*
754  * items_this_message will eventually be used to optimise the batching
755  * of per client messages for each stat. For now setting this to 1 then
756  * iterate. This will not affect API.
757  *
758  * FIXME instead of enqueueing here, this should be sent to a batch
759  * storer for per-client transmission. Each "mp" sent would be a single entry
760  * and if a client is listening to other sw_if_indexes for same, it would be
761  * appended to that *mp
762  */
763  u32 items_this_message = 1;
764  vnet_combined_counter_t *vp = 0;
766  int i, j;
767  u32 timestamp;
770  u32 *sw_if_index = 0;
771 
772  /*
773  FIXME(s):
774  - capturing the timestamp of the counters "when VPP knew them" is important.
775  Less so is that the timing of the delivery to the control plane be in the same
776  timescale.
777 
778  i.e. As long as the control plane can delta messages from VPP and work out
779  velocity etc based on the timestamp, it can do so in a more "batch mode".
780 
781  It would be beneficial to keep a "per-client" message queue, and then
782  batch all the stat messages for a client into one message, with
783  discrete timestamps.
784 
785  Given this particular API is for "per interface" one assumes that the scale
786  is less than the ~0 case, which the prior API is suited for.
787  */
789 
790  timestamp = vlib_time_now (sm->vlib_main);
791 
793  pool_foreach (reg,
794  sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS],
795  (
796  {
797  vec_add1 (sm->regs_tmp, reg);}));
798 
799  for (i = 0; i < vec_len (sm->regs_tmp); i++)
800  {
801  reg = sm->regs_tmp[i];
802  if (reg->item == ~0)
803  {
807  continue;
808  }
810  pool_foreach (client, reg->clients, (
811  {
812  vec_add1 (sm->clients_tmp,
813  client);}
814  ));
815 
816  //FIXME - should be doing non-variant part of mp here and managing
817  // any alloc per client in that vec_foreach
818  for (j = 0; j < vec_len (sm->clients_tmp); j++)
819  {
820  client = sm->clients_tmp[j];
822 
823  //Client may have disconnected abrubtly, clean up so we don't poll nothing.
824  if (!q)
825  {
826  sm->enable_poller =
827  clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
828  reg->item, client->client_index);
829  continue;
830  }
831 
832  mp = vl_msg_api_alloc (sizeof (*mp) +
833  (items_this_message *
834  (sizeof (*vp) /* rx */ )));
835 
836  // FIXME when optimising for items_this_message > 1 need to include a
837  // SIMPLE_INTERFACE_BATCH_SIZE check.
838  mp->_vl_msg_id =
839  ntohs (VL_API_VNET_PER_INTERFACE_COMBINED_COUNTERS);
840 
841  mp->count = items_this_message;
842  mp->timestamp = timestamp;
843  vp = (vnet_combined_counter_t *) mp->data;
844 
845  vp->sw_if_index = htonl (reg->item);
846 
848  vlib_get_combined_counter (cm, reg->item, &v);
850  = clib_host_to_net_u64 (v.packets);
852  clib_host_to_net_u64 (v.bytes);
853 
854 
855  /* TX vlib_counter_t packets/bytes */
857  vlib_get_combined_counter (cm, reg->item, &v);
859  = clib_host_to_net_u64 (v.packets);
861  clib_host_to_net_u64 (v.bytes);
862 
863  vl_msg_api_send_shmem (q, (u8 *) & mp);
864  }
865  }
866 
868 }
869 
870 /**********************************
871  * Per Interface simple stats
872  **********************************/
873 
874 /* Request from client registering interfaces it wants */
875 static void
878 {
879  stats_main_t *sm = &stats_main;
881  vl_api_want_per_interface_simple_stats_reply_t *rmp;
883  uword *p;
884  i32 retval = 0;
886  int i;
887  u32 swif;
888 
889  for (i = 0; i < mp->num; i++)
890  {
891  swif = mp->sw_ifs[i];
892 
893  /* Check its a real sw_if_index that the client is allowed to see */
894  if (swif != ~0)
895  {
897  {
898  retval = VNET_API_ERROR_INVALID_SW_IF_INDEX;
899  goto reply;
900  }
901  }
902  }
903 
904  for (i = 0; i < mp->num; i++)
905  {
906  swif = mp->sw_ifs[i];
907 
908  rp.client_index = mp->client_index;
909  rp.client_pid = mp->pid;
910  handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
911  swif, mp->enable_disable);
912  }
913 
914 reply:
916 
917  //Client may have disconnected abrubtly, clean up so we don't poll nothing.
918  if (!q)
919  {
920  for (i = 0; i < mp->num; i++)
921  {
922  swif = mp->sw_ifs[i];
923  sm->enable_poller =
924  clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
925  mp->client_index);
926  }
927 
928  return;
929  }
930 
931 
932  rmp = vl_msg_api_alloc (sizeof (*rmp));
933  rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_SIMPLE_STATS_REPLY);
934  rmp->context = mp->context;
935  rmp->retval = retval;
936 
937  vl_msg_api_send_shmem (q, (u8 *) & rmp);
938 }
939 
940 /* Per Interface Simple distribution to client */
941 static void
943 {
946  api_main_t *am = sm->api_main;
950  /*
951  * items_this_message will eventually be used to optimise the batching
952  * of per client messages for each stat. For now setting this to 1 then
953  * iterate. This will not affect API.
954  *
955  * FIXME instead of enqueueing here, this should be sent to a batch
956  * storer for per-client transmission. Each "mp" sent would be a single entry
957  * and if a client is listening to other sw_if_indexes for same, it would be
958  * appended to that *mp
959  */
960  u32 items_this_message = 1;
961  int i, j, size;
964  u32 timestamp;
965  u32 count;
966  vnet_simple_counter_t *vp = 0;
967  counter_t v;
968 
969  /*
970  FIXME(s):
971  - capturing the timestamp of the counters "when VPP knew them" is important.
972  Less so is that the timing of the delivery to the control plane be in the same
973  timescale.
974 
975  i.e. As long as the control plane can delta messages from VPP and work out
976  velocity etc based on the timestamp, it can do so in a more "batch mode".
977 
978  It would be beneficial to keep a "per-client" message queue, and then
979  batch all the stat messages for a client into one message, with
980  discrete timestamps.
981 
982  Given this particular API is for "per interface" one assumes that the scale
983  is less than the ~0 case, which the prior API is suited for.
984  */
986 
987  timestamp = vlib_time_now (sm->vlib_main);
988 
990  pool_foreach (reg,
991  sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS], (
992  {
993  vec_add1
994  (sm->regs_tmp,
995  reg);}));
996 
997  for (i = 0; i < vec_len (sm->regs_tmp); i++)
998  {
999  reg = sm->regs_tmp[i];
1000  if (reg->item == ~0)
1001  {
1005  continue;
1006  }
1008  pool_foreach (client, reg->clients, (
1009  {
1010  vec_add1 (sm->clients_tmp,
1011  client);}
1012  ));
1013 
1014  //FIXME - should be doing non-variant part of mp here and managing
1015  // any alloc per client in that vec_foreach
1016  for (j = 0; j < vec_len (sm->clients_tmp); j++)
1017  {
1018  client = sm->clients_tmp[j];
1020 
1021  //Client may have disconnected abrubtly, clean up so we don't poll nothing.
1022  if (!q)
1023  {
1024  sm->enable_poller =
1025  clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
1026  reg->item, client->client_index);
1027  continue;
1028  }
1029 
1030  size = (sizeof (*mp) + (items_this_message * (sizeof (u64) * 10)));
1031  mp = vl_msg_api_alloc (size);
1032  // FIXME when optimising for items_this_message > 1 need to include a
1033  // SIMPLE_INTERFACE_BATCH_SIZE check.
1034  mp->_vl_msg_id = ntohs (VL_API_VNET_PER_INTERFACE_SIMPLE_COUNTERS);
1035 
1036  mp->count = items_this_message;
1037  mp->timestamp = timestamp;
1038  vp = (vnet_simple_counter_t *) mp->data;
1039 
1040  vp->sw_if_index = htonl (reg->item);
1041 
1042  //FIXME will be simpler with a preprocessor macro
1043  // VNET_INTERFACE_COUNTER_DROP
1045  v = vlib_get_simple_counter (cm, reg->item);
1046  clib_mem_unaligned (&vp->drop, u64) = clib_host_to_net_u64 (v);
1047 
1048  // VNET_INTERFACE_COUNTER_PUNT
1050  v = vlib_get_simple_counter (cm, reg->item);
1051  clib_mem_unaligned (&vp->punt, u64) = clib_host_to_net_u64 (v);
1052 
1053  // VNET_INTERFACE_COUNTER_IP4
1055  v = vlib_get_simple_counter (cm, reg->item);
1056  clib_mem_unaligned (&vp->rx_ip4, u64) = clib_host_to_net_u64 (v);
1057 
1058  //VNET_INTERFACE_COUNTER_IP6
1060  v = vlib_get_simple_counter (cm, reg->item);
1061  clib_mem_unaligned (&vp->rx_ip6, u64) = clib_host_to_net_u64 (v);
1062 
1063  //VNET_INTERFACE_COUNTER_RX_NO_BUF
1065  v = vlib_get_simple_counter (cm, reg->item);
1067  clib_host_to_net_u64 (v);
1068 
1069  //VNET_INTERFACE_COUNTER_RX_MISS
1071  v = vlib_get_simple_counter (cm, reg->item);
1072  clib_mem_unaligned (&vp->rx_miss, u64) = clib_host_to_net_u64 (v);
1073 
1074  //VNET_INTERFACE_COUNTER_RX_ERROR
1076  v = vlib_get_simple_counter (cm, reg->item);
1077  clib_mem_unaligned (&vp->rx_error, u64) = clib_host_to_net_u64 (v);
1078 
1079  //VNET_INTERFACE_COUNTER_TX_ERROR
1081  v = vlib_get_simple_counter (cm, reg->item);
1082  clib_mem_unaligned (&vp->tx_error, u64) = clib_host_to_net_u64 (v);
1083 
1084  //VNET_INTERFACE_COUNTER_MPLS
1086  v = vlib_get_simple_counter (cm, reg->item);
1087  clib_mem_unaligned (&vp->rx_mpls, u64) = clib_host_to_net_u64 (v);
1088 
1089  vl_msg_api_send_shmem (q, (u8 *) & mp);
1090  }
1091  }
1092 
1094 }
1095 
1096 /**********************************
1097  * Per FIB IP4 stats
1098  **********************************/
1099 
1100 static void
1102 {
1103  struct timespec _req, *req = &_req;
1104  struct timespec _rem, *rem = &_rem;
1105 
1106  req->tv_sec = sec;
1107  req->tv_nsec = nsec;
1108  while (1)
1109  {
1110  if (nanosleep (req, rem) == 0)
1111  break;
1112  *req = *rem;
1113  if (errno == EINTR)
1114  continue;
1115  clib_unix_warning ("nanosleep");
1116  break;
1117  }
1118 }
1119 
1120 /**
1121  * @brief The context passed when collecting adjacency counters
1122  */
1123 typedef struct ip4_nbr_stats_ctx_t_
1124 {
1125  /**
1126  * The SW IF index all these adjs belong to
1127  */
1129 
1130  /**
1131  * A vector of ip4 nbr counters
1132  */
1135 
1136 static adj_walk_rc_t
1138 {
1139  vl_api_ip4_nbr_counter_t *vl_counter;
1140  vlib_counter_t adj_counter;
1142  ip_adjacency_t *adj;
1143 
1144  ctx = arg;
1145  vlib_get_combined_counter (&adjacency_counters, ai, &adj_counter);
1146 
1147  if (0 != adj_counter.packets)
1148  {
1149  vec_add2 (ctx->counters, vl_counter, 1);
1150  adj = adj_get (ai);
1151 
1152  vl_counter->packets = clib_host_to_net_u64 (adj_counter.packets);
1153  vl_counter->bytes = clib_host_to_net_u64 (adj_counter.bytes);
1154  vl_counter->address = adj->sub_type.nbr.next_hop.ip4.as_u32;
1155  vl_counter->link_type = adj->ia_link;
1156  }
1157  return (ADJ_WALK_RC_CONTINUE);
1158 }
1159 
1160 #define MIN(x,y) (((x)<(y))?(x):(y))
1161 
1162 static void
1164 {
1165  api_main_t *am = sm->api_main;
1169  int first = 0;
1170 
1171  /*
1172  * If the walk context has counters, which may be left over from the last
1173  * suspend, then we continue from there.
1174  */
1175  while (0 != vec_len (ctx->counters))
1176  {
1177  u32 n_items = MIN (vec_len (ctx->counters),
1179  u8 pause = 0;
1180 
1181  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1182 
1183  mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
1184  (n_items *
1185  sizeof
1187  mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_NBR_COUNTERS);
1188  mp->count = ntohl (n_items);
1189  mp->sw_if_index = ntohl (ctx->sw_if_index);
1190  mp->begin = first;
1191  first = 0;
1192 
1193  /*
1194  * copy the counters from the back of the context, then we can easily
1195  * 'erase' them by resetting the vector length.
1196  * The order we push the stats to the caller is not important.
1197  */
1198  clib_memcpy (mp->c,
1199  &ctx->counters[vec_len (ctx->counters) - n_items],
1200  n_items * sizeof (*ctx->counters));
1201 
1202  _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
1203 
1204  /*
1205  * send to the shm q
1206  */
1209 
1210  vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1212  dsunlock (sm);
1213 
1214  if (pause)
1215  ip46_fib_stats_delay (sm, 0 /* sec */ ,
1217  }
1218 }
1219 
1220 static void
1222 {
1223  vnet_main_t *vnm = vnet_get_main ();
1225  vnet_sw_interface_t *si;
1226 
1228  .sw_if_index = 0,
1229  .counters = NULL,
1230  };
1231 
1232  /* *INDENT-OFF* */
1233  pool_foreach (si, im->sw_interfaces,
1234  ({
1235  /*
1236  * update the interface we are now concerned with
1237  */
1238  ctx.sw_if_index = si->sw_if_index;
1239 
1240  /*
1241  * we are about to walk another interface, so we shouldn't have any pending
1242  * stats to export.
1243  */
1244  ASSERT(ctx.counters == NULL);
1245 
1246  /*
1247  * visit each neighbour adjacency on the interface and collect
1248  * its current stats.
1249  * Because we hold the lock the walk is synchronous, so safe to routing
1250  * updates. It's limited in work by the number of adjacenies on an
1251  * interface, which is typically not huge.
1252  */
1253  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1254  adj_nbr_walk (si->sw_if_index,
1255  FIB_PROTOCOL_IP4,
1256  ip4_nbr_stats_cb,
1257  &ctx);
1258  dsunlock (sm);
1259 
1260  /*
1261  * if this interface has some adjacencies with counters then ship them,
1262  * else continue to the next interface.
1263  */
1264  if (NULL != ctx.counters)
1265  {
1266  ip4_nbr_ship(sm, &ctx);
1267  }
1268  }));
1269  /* *INDENT-OFF* */
1270 }
1271 
1272 /**
1273  * @brief The context passed when collecting adjacency counters
1274  */
1275 typedef struct ip6_nbr_stats_ctx_t_
1276 {
1277  /**
1278  * The SW IF index all these adjs belong to
1279  */
1281 
1282  /**
1283  * A vector of ip6 nbr counters
1284  */
1287 
1288 static adj_walk_rc_t
1290  void *arg)
1291 {
1292  vl_api_ip6_nbr_counter_t *vl_counter;
1293  vlib_counter_t adj_counter;
1295  ip_adjacency_t *adj;
1296 
1297  ctx = arg;
1298  vlib_get_combined_counter(&adjacency_counters, ai, &adj_counter);
1299 
1300  if (0 != adj_counter.packets)
1301  {
1302  vec_add2(ctx->counters, vl_counter, 1);
1303  adj = adj_get(ai);
1304 
1305  vl_counter->packets = clib_host_to_net_u64(adj_counter.packets);
1306  vl_counter->bytes = clib_host_to_net_u64(adj_counter.bytes);
1307  vl_counter->address[0] = adj->sub_type.nbr.next_hop.ip6.as_u64[0];
1308  vl_counter->address[1] = adj->sub_type.nbr.next_hop.ip6.as_u64[1];
1309  vl_counter->link_type = adj->ia_link;
1310  }
1311  return (ADJ_WALK_RC_CONTINUE);
1312 }
1313 
1314 #define MIN(x,y) (((x)<(y))?(x):(y))
1315 
1316 static void
1319 {
1320  api_main_t *am = sm->api_main;
1324  int first = 0;
1325 
1326  /*
1327  * If the walk context has counters, which may be left over from the last
1328  * suspend, then we continue from there.
1329  */
1330  while (0 != vec_len(ctx->counters))
1331  {
1332  u32 n_items = MIN (vec_len (ctx->counters),
1334  u8 pause = 0;
1335 
1336  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1337 
1338  mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
1339  (n_items *
1340  sizeof
1342  mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_NBR_COUNTERS);
1343  mp->count = ntohl (n_items);
1344  mp->sw_if_index = ntohl (ctx->sw_if_index);
1345  mp->begin = first;
1346  first = 0;
1347 
1348  /*
1349  * copy the counters from the back of the context, then we can easily
1350  * 'erase' them by resetting the vector length.
1351  * The order we push the stats to the caller is not important.
1352  */
1353  clib_memcpy (mp->c,
1354  &ctx->counters[vec_len (ctx->counters) - n_items],
1355  n_items * sizeof (*ctx->counters));
1356 
1357  _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
1358 
1359  /*
1360  * send to the shm q
1361  */
1364 
1365  vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1367  dsunlock (sm);
1368 
1369  if (pause)
1370  ip46_fib_stats_delay (sm, 0 /* sec */ ,
1372  }
1373 }
1374 
1375 static void
1377 {
1378  vnet_main_t *vnm = vnet_get_main ();
1380  vnet_sw_interface_t *si;
1381 
1383  .sw_if_index = 0,
1384  .counters = NULL,
1385  };
1386 
1387  /* *INDENT-OFF* */
1388  pool_foreach (si, im->sw_interfaces,
1389  ({
1390  /*
1391  * update the interface we are now concerned with
1392  */
1393  ctx.sw_if_index = si->sw_if_index;
1394 
1395  /*
1396  * we are about to walk another interface, so we shouldn't have any pending
1397  * stats to export.
1398  */
1399  ASSERT(ctx.counters == NULL);
1400 
1401  /*
1402  * visit each neighbour adjacency on the interface and collect
1403  * its current stats.
1404  * Because we hold the lock the walk is synchronous, so safe to routing
1405  * updates. It's limited in work by the number of adjacenies on an
1406  * interface, which is typically not huge.
1407  */
1408  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1409  adj_nbr_walk (si->sw_if_index,
1410  FIB_PROTOCOL_IP6,
1411  ip6_nbr_stats_cb,
1412  &ctx);
1413  dsunlock (sm);
1414 
1415  /*
1416  * if this interface has some adjacencies with counters then ship them,
1417  * else continue to the next interface.
1418  */
1419  if (NULL != ctx.counters)
1420  {
1421  ip6_nbr_ship(sm, &ctx);
1422  }
1423  }));
1424  /* *INDENT-OFF* */
1425 }
1426 
1427 static void
1429 {
1430  ip4_main_t *im4 = &ip4_main;
1431  api_main_t *am = sm->api_main;
1434  ip4_route_t *r;
1435  fib_table_t *fib;
1436  ip4_fib_t *v4_fib;
1437  do_ip46_fibs_t *do_fibs;
1439  u32 items_this_message;
1440  vl_api_ip4_fib_counter_t *ctrp = 0;
1441  u32 start_at_fib_index = 0;
1442  int i, j, k;
1443 
1444  do_fibs = &sm->do_ip46_fibs;
1445 
1446 again:
1447  vec_reset_length (do_fibs->fibs);
1448  /* *INDENT-OFF* */
1449  pool_foreach (fib, im4->fibs,
1450  ({vec_add1(do_fibs->fibs,fib);}));
1451 
1452  /* *INDENT-ON* */
1453 
1454  for (j = 0; j < vec_len (do_fibs->fibs); j++)
1455  {
1456  fib = do_fibs->fibs[j];
1457  /* We may have bailed out due to control-plane activity */
1458  while ((fib - im4->fibs) < start_at_fib_index)
1459  continue;
1460 
1461  v4_fib = pool_elt_at_index (im4->v4_fibs, fib->ft_index);
1462 
1463  if (mp == 0)
1464  {
1465  items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
1467  (sizeof (*mp) +
1468  items_this_message * sizeof (vl_api_ip4_fib_counter_t));
1469  mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
1470  mp->count = 0;
1471  mp->vrf_id = ntohl (fib->ft_table_id);
1472  ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1473  }
1474  else
1475  {
1476  /* happens if the last FIB was empty... */
1477  ASSERT (mp->count == 0);
1478  mp->vrf_id = ntohl (fib->ft_table_id);
1479  }
1480 
1481  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1482 
1483  vec_reset_length (do_fibs->ip4routes);
1484  vec_reset_length (do_fibs->results);
1485 
1486  for (i = 0; i < ARRAY_LEN (v4_fib->fib_entry_by_dst_address); i++)
1487  {
1488  uword *hash = v4_fib->fib_entry_by_dst_address[i];
1489  hash_pair_t *p;
1490  ip4_route_t x;
1491 
1492  vec_reset_length (do_fibs->pvec);
1493 
1494  x.address_length = i;
1495 
1496  hash_foreach_pair (p, hash, (
1497  {
1498  vec_add1 (do_fibs->pvec, p);}
1499  ));
1500  for (k = 0; k < vec_len (do_fibs->pvec); k++)
1501  {
1502  p = do_fibs->pvec[k];
1503  x.address.data_u32 = p->key;
1504  x.index = p->value[0];
1505 
1506  vec_add1 (do_fibs->ip4routes, x);
1508  {
1509  start_at_fib_index = fib - im4->fibs;
1510  dsunlock (sm);
1511  ip46_fib_stats_delay (sm, 0 /* sec */ ,
1513  mp->count = 0;
1514  ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1515  goto again;
1516  }
1517  }
1518  }
1519 
1520  vec_foreach (r, do_fibs->ip4routes)
1521  {
1522  vlib_counter_t c;
1523  const dpo_id_t *dpo_id;
1524  u32 index;
1525 
1526  dpo_id = fib_entry_contribute_ip_forwarding (r->index);
1527  index = (u32) dpo_id->dpoi_index;
1528 
1530  index, &c);
1531  /*
1532  * If it has actually
1533  * seen at least one packet, send it.
1534  */
1535  if (c.packets > 0)
1536  {
1537 
1538  /* already in net byte order */
1539  ctrp->address = r->address.as_u32;
1540  ctrp->address_length = r->address_length;
1541  ctrp->packets = clib_host_to_net_u64 (c.packets);
1542  ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1543  mp->count++;
1544  ctrp++;
1545 
1546  if (mp->count == items_this_message)
1547  {
1548  mp->count = htonl (items_this_message);
1549  /*
1550  * If the main thread's input queue is stuffed,
1551  * drop the data structure lock (which the main thread
1552  * may want), and take a pause.
1553  */
1556  {
1557  dsunlock (sm);
1558  vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1560  mp = 0;
1561  ip46_fib_stats_delay (sm, 0 /* sec */ ,
1563  goto again;
1564  }
1565  vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1567 
1568  items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
1570  (sizeof (*mp) +
1571  items_this_message * sizeof (vl_api_ip4_fib_counter_t));
1572  mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
1573  mp->count = 0;
1574  mp->vrf_id = ntohl (fib->ft_table_id);
1575  ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1576  }
1577  } /* for each (mp or single) adj */
1579  {
1580  start_at_fib_index = fib - im4->fibs;
1581  dsunlock (sm);
1582  ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
1583  mp->count = 0;
1584  ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
1585  goto again;
1586  }
1587  } /* vec_foreach (routes) */
1588 
1589  dsunlock (sm);
1590 
1591  /* Flush any data from this fib */
1592  if (mp->count)
1593  {
1594  mp->count = htonl (mp->count);
1595  vl_msg_api_send_shmem (q, (u8 *) & mp);
1596  mp = 0;
1597  }
1598  }
1599 
1600  /* If e.g. the last FIB had no reportable routes, free the buffer */
1601  if (mp)
1602  vl_msg_api_free (mp);
1603 }
1604 
1605 static int
1607 {
1608  stats_main_t *sm = ctx;
1609  do_ip46_fibs_t *do_fibs;
1610  mfib_entry_t *entry;
1611 
1612  do_fibs = &sm->do_ip46_fibs;
1613  entry = mfib_entry_get (fei);
1614 
1615  vec_add1 (do_fibs->mroutes, entry->mfe_prefix);
1616 
1617  return (1);
1618 }
1619 
1620 static void
1622 {
1623  ip4_main_t *im4 = &ip4_main;
1624  api_main_t *am = sm->api_main;
1627  mfib_prefix_t *pfx;
1628  mfib_table_t *mfib;
1629  do_ip46_fibs_t *do_fibs;
1631  u32 items_this_message;
1632  vl_api_ip4_mfib_counter_t *ctrp = 0;
1633  u32 start_at_mfib_index = 0;
1634  int i, j, k;
1635 
1636  do_fibs = &sm->do_ip46_fibs;
1637 
1638  vec_reset_length (do_fibs->mfibs);
1639  /* *INDENT-OFF* */
1640  pool_foreach (mfib, im4->mfibs, ({vec_add1(do_fibs->mfibs, mfib);}));
1641  /* *INDENT-ON* */
1642 
1643  for (j = 0; j < vec_len (do_fibs->mfibs); j++)
1644  {
1645  mfib = do_fibs->mfibs[j];
1646  /* We may have bailed out due to control-plane activity */
1647  while ((mfib - im4->mfibs) < start_at_mfib_index)
1648  continue;
1649 
1650  if (mp == 0)
1651  {
1652  items_this_message = IP4_MFIB_COUNTER_BATCH_SIZE;
1654  (sizeof (*mp) +
1655  items_this_message * sizeof (vl_api_ip4_mfib_counter_t));
1656  mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_MFIB_COUNTERS);
1657  mp->count = 0;
1658  mp->vrf_id = ntohl (mfib->mft_table_id);
1659  ctrp = (vl_api_ip4_mfib_counter_t *) mp->c;
1660  }
1661  else
1662  {
1663  /* happens if the last MFIB was empty... */
1664  ASSERT (mp->count == 0);
1665  mp->vrf_id = ntohl (mfib->mft_table_id);
1666  }
1667 
1668  vec_reset_length (do_fibs->mroutes);
1669 
1670  /*
1671  * walk the table with table updates blocked
1672  */
1673  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1674 
1675  mfib_table_walk (mfib->mft_index,
1677  dsunlock (sm);
1678 
1679  vec_foreach (pfx, do_fibs->mroutes)
1680  {
1681  const dpo_id_t *dpo_id;
1682  fib_node_index_t mfei;
1683  vlib_counter_t c;
1684  u32 index;
1685 
1686  /*
1687  * re-lookup the entry, since we suspend during the collection
1688  */
1689  mfei = mfib_table_lookup (mfib->mft_index, pfx);
1690 
1691  if (FIB_NODE_INDEX_INVALID == mfei)
1692  continue;
1693 
1694  dpo_id = mfib_entry_contribute_ip_forwarding (mfei);
1695  index = (u32) dpo_id->dpoi_index;
1696 
1698  dpo_id->dpoi_index, &c);
1699  /*
1700  * If it has seen at least one packet, send it.
1701  */
1702  if (c.packets > 0)
1703  {
1704  /* already in net byte order */
1705  memcpy (ctrp->group, &pfx->fp_grp_addr.ip4, 4);
1706  memcpy (ctrp->source, &pfx->fp_src_addr.ip4, 4);
1707  ctrp->group_length = pfx->fp_len;
1708  ctrp->packets = clib_host_to_net_u64 (c.packets);
1709  ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1710  mp->count++;
1711  ctrp++;
1712 
1713  if (mp->count == items_this_message)
1714  {
1715  mp->count = htonl (items_this_message);
1716  /*
1717  * If the main thread's input queue is stuffed,
1718  * drop the data structure lock (which the main thread
1719  * may want), and take a pause.
1720  */
1722 
1724  {
1726  ip46_fib_stats_delay (sm, 0 /* sec */ ,
1729  }
1730  vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1732 
1733  items_this_message = IP4_MFIB_COUNTER_BATCH_SIZE;
1735  (sizeof (*mp) +
1736  items_this_message * sizeof (vl_api_ip4_mfib_counter_t));
1737  mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_MFIB_COUNTERS);
1738  mp->count = 0;
1739  mp->vrf_id = ntohl (mfib->mft_table_id);
1740  ctrp = (vl_api_ip4_mfib_counter_t *) mp->c;
1741  }
1742  }
1743  }
1744 
1745  /* Flush any data from this mfib */
1746  if (mp->count)
1747  {
1748  mp->count = htonl (mp->count);
1749  vl_msg_api_send_shmem (q, (u8 *) & mp);
1750  mp = 0;
1751  }
1752  }
1753 
1754  /* If e.g. the last FIB had no reportable routes, free the buffer */
1755  if (mp)
1756  vl_msg_api_free (mp);
1757 }
1758 
1759 static void
1761 {
1762  ip6_main_t *im6 = &ip6_main;
1763  api_main_t *am = sm->api_main;
1766  mfib_prefix_t *pfx;
1767  mfib_table_t *mfib;
1768  do_ip46_fibs_t *do_fibs;
1770  u32 items_this_message;
1771  vl_api_ip6_mfib_counter_t *ctrp = 0;
1772  u32 start_at_mfib_index = 0;
1773  int i, j, k;
1774 
1775  do_fibs = &sm->do_ip46_fibs;
1776 
1777  vec_reset_length (do_fibs->mfibs);
1778  /* *INDENT-OFF* */
1779  pool_foreach (mfib, im6->mfibs, ({vec_add1(do_fibs->mfibs, mfib);}));
1780  /* *INDENT-ON* */
1781 
1782  for (j = 0; j < vec_len (do_fibs->mfibs); j++)
1783  {
1784  mfib = do_fibs->mfibs[j];
1785  /* We may have bailed out due to control-plane activity */
1786  while ((mfib - im6->mfibs) < start_at_mfib_index)
1787  continue;
1788 
1789  if (mp == 0)
1790  {
1791  items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
1793  (sizeof (*mp) +
1794  items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
1795  mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
1796  mp->count = 0;
1797  mp->vrf_id = ntohl (mfib->mft_table_id);
1798  ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
1799  }
1800  else
1801  {
1802  /* happens if the last MFIB was empty... */
1803  ASSERT (mp->count == 0);
1804  mp->vrf_id = ntohl (mfib->mft_table_id);
1805  }
1806 
1807  vec_reset_length (do_fibs->mroutes);
1808 
1809  /*
1810  * walk the table with table updates blocked
1811  */
1812  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1813 
1814  mfib_table_walk (mfib->mft_index,
1816  dsunlock (sm);
1817 
1818  vec_foreach (pfx, do_fibs->mroutes)
1819  {
1820  const dpo_id_t *dpo_id;
1821  fib_node_index_t mfei;
1822  vlib_counter_t c;
1823  u32 index;
1824 
1825  /*
1826  * re-lookup the entry, since we suspend during the collection
1827  */
1828  mfei = mfib_table_lookup (mfib->mft_index, pfx);
1829 
1830  if (FIB_NODE_INDEX_INVALID == mfei)
1831  continue;
1832 
1833  dpo_id = mfib_entry_contribute_ip_forwarding (mfei);
1834  index = (u32) dpo_id->dpoi_index;
1835 
1837  dpo_id->dpoi_index, &c);
1838  /*
1839  * If it has seen at least one packet, send it.
1840  */
1841  if (c.packets > 0)
1842  {
1843  /* already in net byte order */
1844  memcpy (ctrp->group, &pfx->fp_grp_addr.ip6, 16);
1845  memcpy (ctrp->source, &pfx->fp_src_addr.ip6, 16);
1846  ctrp->group_length = pfx->fp_len;
1847  ctrp->packets = clib_host_to_net_u64 (c.packets);
1848  ctrp->bytes = clib_host_to_net_u64 (c.bytes);
1849  mp->count++;
1850  ctrp++;
1851 
1852  if (mp->count == items_this_message)
1853  {
1854  mp->count = htonl (items_this_message);
1855  /*
1856  * If the main thread's input queue is stuffed,
1857  * drop the data structure lock (which the main thread
1858  * may want), and take a pause.
1859  */
1861 
1863  {
1865  ip46_fib_stats_delay (sm, 0 /* sec */ ,
1868  }
1869  vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
1871 
1872  items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
1874  (sizeof (*mp) +
1875  items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
1876  mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
1877  mp->count = 0;
1878  mp->vrf_id = ntohl (mfib->mft_table_id);
1879  ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
1880  }
1881  }
1882  }
1883 
1884  /* Flush any data from this mfib */
1885  if (mp->count)
1886  {
1887  mp->count = htonl (mp->count);
1888  vl_msg_api_send_shmem (q, (u8 *) & mp);
1889  mp = 0;
1890  }
1891  }
1892 
1893  /* If e.g. the last FIB had no reportable routes, free the buffer */
1894  if (mp)
1895  vl_msg_api_free (mp);
1896 }
1897 
1898 typedef struct
1899 {
1904 
1905 static void
1906 add_routes_in_fib (BVT (clib_bihash_kv) * kvp, void *arg)
1907 {
1908  add_routes_in_fib_arg_t *ap = arg;
1909  stats_main_t *sm = ap->sm;
1910 
1912  clib_longjmp (&sm->jmp_buf, 1);
1913 
1914  if (kvp->key[2] >> 32 == ap->fib_index)
1915  {
1917  ip6_route_t *r;
1918  addr = (ip6_address_t *) kvp;
1919  vec_add2 (*ap->routep, r, 1);
1920  r->address = addr[0];
1921  r->address_length = kvp->key[2] & 0xFF;
1922  r->index = kvp->value;
1923  }
1924 }
1925 
1926 static void
1928 {
1929  ip6_main_t *im6 = &ip6_main;
1930  api_main_t *am = sm->api_main;
1933  ip6_route_t *r;
1934  fib_table_t *fib;
1935  do_ip46_fibs_t *do_fibs;
1937  u32 items_this_message;
1938  vl_api_ip6_fib_counter_t *ctrp = 0;
1939  u32 start_at_fib_index = 0;
1940  BVT (clib_bihash) * h = &im6->ip6_table[IP6_FIB_TABLE_FWDING].ip6_hash;
1941  add_routes_in_fib_arg_t _a, *a = &_a;
1942  int i;
1943 
1944  do_fibs = &sm->do_ip46_fibs;
1945 again:
1946  vec_reset_length (do_fibs->fibs);
1947  /* *INDENT-OFF* */
1948  pool_foreach (fib, im6->fibs,
1949  ({vec_add1(do_fibs->fibs,fib);}));
1950  /* *INDENT-ON* */
1951 
1952 
1953  for (i = 0; i < vec_len (do_fibs->fibs); i++)
1954  {
1955  fib = do_fibs->fibs[i];
1956  /* We may have bailed out due to control-plane activity */
1957  while ((fib - im6->fibs) < start_at_fib_index)
1958  continue;
1959 
1960  if (mp == 0)
1961  {
1962  items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
1964  (sizeof (*mp) +
1965  items_this_message * sizeof (vl_api_ip6_fib_counter_t));
1966  mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
1967  mp->count = 0;
1968  mp->vrf_id = ntohl (fib->ft_table_id);
1969  ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
1970  }
1971 
1972  dslock (sm, 0 /* release hint */ , 1 /* tag */ );
1973 
1974  vec_reset_length (do_fibs->ip6routes);
1975  vec_reset_length (do_fibs->results);
1976 
1977  a->fib_index = fib - im6->fibs;
1978  a->routep = &do_fibs->ip6routes;
1979  a->sm = sm;
1980 
1981  if (clib_setjmp (&sm->jmp_buf, 0) == 0)
1982  {
1983  start_at_fib_index = fib - im6->fibs;
1985  }
1986  else
1987  {
1988  dsunlock (sm);
1989  ip46_fib_stats_delay (sm, 0 /* sec */ ,
1991  mp->count = 0;
1992  ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
1993  goto again;
1994  }
1995 
1996  vec_foreach (r, do_fibs->ip6routes)
1997  {
1998  vlib_counter_t c;
1999 
2001  r->index, &c);
2002  /*
2003  * If it has actually
2004  * seen at least one packet, send it.
2005  */
2006  if (c.packets > 0)
2007  {
2008  /* already in net byte order */
2009  ctrp->address[0] = r->address.as_u64[0];
2010  ctrp->address[1] = r->address.as_u64[1];
2011  ctrp->address_length = (u8) r->address_length;
2012  ctrp->packets = clib_host_to_net_u64 (c.packets);
2013  ctrp->bytes = clib_host_to_net_u64 (c.bytes);
2014  mp->count++;
2015  ctrp++;
2016 
2017  if (mp->count == items_this_message)
2018  {
2019  mp->count = htonl (items_this_message);
2020  /*
2021  * If the main thread's input queue is stuffed,
2022  * drop the data structure lock (which the main thread
2023  * may want), and take a pause.
2024  */
2027  {
2028  dsunlock (sm);
2029  vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2031  mp = 0;
2032  ip46_fib_stats_delay (sm, 0 /* sec */ ,
2034  goto again;
2035  }
2036  vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
2038 
2039  items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
2041  (sizeof (*mp) +
2042  items_this_message * sizeof (vl_api_ip6_fib_counter_t));
2043  mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
2044  mp->count = 0;
2045  mp->vrf_id = ntohl (fib->ft_table_id);
2046  ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2047  }
2048  }
2049 
2051  {
2052  start_at_fib_index = fib - im6->fibs;
2053  dsunlock (sm);
2054  ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
2055  mp->count = 0;
2056  ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
2057  goto again;
2058  }
2059  } /* vec_foreach (routes) */
2060 
2061  dsunlock (sm);
2062 
2063  /* Flush any data from this fib */
2064  if (mp->count)
2065  {
2066  mp->count = htonl (mp->count);
2067  vl_msg_api_send_shmem (q, (u8 *) & mp);
2068  mp = 0;
2069  }
2070  }
2071 
2072  /* If e.g. the last FIB had no reportable routes, free the buffer */
2073  if (mp)
2074  vl_msg_api_free (mp);
2075 }
2076 
2077 static void
2078 stats_thread_fn (void *arg)
2079 {
2080  stats_main_t *sm = &stats_main;
2083 
2084  /* stats thread wants no signals. */
2085  {
2086  sigset_t s;
2087  sigfillset (&s);
2088  pthread_sigmask (SIG_SETMASK, &s, 0);
2089  }
2090 
2091  if (vec_len (tm->thread_prefix))
2092  vlib_set_thread_name ((char *)
2093  format (0, "%v_stats%c", tm->thread_prefix, '\0'));
2094 
2096 
2097  while (1)
2098  {
2099  /* 10 second poll interval */
2100  ip46_fib_stats_delay (sm, 10 /* secs */ , 0 /* nsec */ );
2101 
2102  if (!(sm->enable_poller))
2103  {
2104  continue;
2105  }
2106  if (pool_elts
2107  (sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS]))
2109 
2110  if (pool_elts
2111  (sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS]))
2113 
2114  if (pool_elts (sm->stats_registrations[IDX_IP4_FIB_COUNTERS]))
2115  do_ip4_fib_counters (sm);
2116 
2117  if (pool_elts (sm->stats_registrations[IDX_IP6_FIB_COUNTERS]))
2118  do_ip6_fib_counters (sm);
2119 
2120  if (pool_elts (sm->stats_registrations[IDX_IP4_MFIB_COUNTERS]))
2121  do_ip4_mfib_counters (sm);
2122 
2123  if (pool_elts (sm->stats_registrations[IDX_IP6_MFIB_COUNTERS]))
2124  do_ip6_mfib_counters (sm);
2125 
2126  if (pool_elts (sm->stats_registrations[IDX_IP4_NBR_COUNTERS]))
2127  do_ip4_nbr_counters (sm);
2128 
2129  if (pool_elts (sm->stats_registrations[IDX_IP6_NBR_COUNTERS]))
2130  do_ip6_nbr_counters (sm);
2131  }
2132 }
2133 
2134 static void
2137 {
2138  vpe_client_registration_t *clients, client;
2139  stats_main_t *sm = &stats_main;
2140  unix_shared_memory_queue_t *q, *q_prev = NULL;
2142  u32 mp_size;
2143  int i;
2144 
2145  mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (u64));
2146 
2147  clients =
2148  get_clients_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
2149  ~0 /*flag for all */ );
2150 
2151  for (i = 0; i < vec_len (clients); i++)
2152  {
2153  client = clients[i];
2155  if (q)
2156  {
2157  if (q_prev && (q_prev->cursize < q_prev->maxsize))
2158  {
2159  mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2160  clib_memcpy (mp_copy, mp, mp_size);
2161  vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2162  mp = mp_copy;
2163  }
2164  q_prev = q;
2165  }
2166  else
2167  {
2168  sm->enable_poller =
2169  clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, ~0,
2170  client.client_index);
2171  continue;
2172  }
2173  }
2174 
2175 #if STATS_DEBUG > 0
2176  fformat (stdout, "%U\n", format_vnet_simple_counters, mp);
2177 #endif
2178 
2179  if (q_prev && (q_prev->cursize < q_prev->maxsize))
2180  {
2181  vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2182  }
2183  else
2184  {
2185  vl_msg_api_free (mp);
2186  }
2187 }
2188 
2189 static void
2191 {
2192  stats_main_t *sm = &stats_main;
2193  unix_shared_memory_queue_t *q, *q_prev = NULL;
2195  u32 mp_size;
2196  vpe_client_registration_t *clients, client;
2197  int i;
2198 
2199  mp_size = sizeof (*mp_copy) +
2200  ntohl (mp->count) * sizeof (vl_api_ip4_fib_counter_t);
2201 
2202  clients =
2203  get_clients_for_stat (IDX_IP4_FIB_COUNTERS, ~0 /*flag for all */ );
2204 
2205  for (i = 0; i < vec_len (clients); i++)
2206  {
2207  client = clients[i];
2209  if (q)
2210  {
2211  if (q_prev && (q_prev->cursize < q_prev->maxsize))
2212  {
2213  mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2214  clib_memcpy (mp_copy, mp, mp_size);
2215  vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2216  mp = mp_copy;
2217  }
2218  q_prev = q;
2219  }
2220  else
2221  {
2222  sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
2223  ~0, client.client_index);
2224  continue;
2225  }
2226  }
2227 
2228  if (q_prev && (q_prev->cursize < q_prev->maxsize))
2229  {
2230  vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2231  }
2232  else
2233  {
2234  vl_msg_api_free (mp);
2235  }
2236 }
2237 
2238 static void
2240 {
2241  stats_main_t *sm = &stats_main;
2242  unix_shared_memory_queue_t *q, *q_prev = NULL;
2244  u32 mp_size;
2245  vpe_client_registration_t *clients, client;
2246  int i;
2247 
2248  mp_size = sizeof (*mp_copy) +
2249  ntohl (mp->count) * sizeof (vl_api_ip4_nbr_counter_t);
2250 
2251  clients =
2252  get_clients_for_stat (IDX_IP4_NBR_COUNTERS, ~0 /*flag for all */ );
2253 
2254  for (i = 0; i < vec_len (clients); i++)
2255  {
2256  client = clients[i];
2258  if (q)
2259  {
2260  if (q_prev && (q_prev->cursize < q_prev->maxsize))
2261  {
2262  mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2263  clib_memcpy (mp_copy, mp, mp_size);
2264  vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2265  mp = mp_copy;
2266  }
2267  q_prev = q;
2268  }
2269  else
2270  {
2271  sm->enable_poller = clear_client_for_stat (IDX_IP4_NBR_COUNTERS,
2272  ~0, client.client_index);
2273  continue;
2274  }
2275  }
2276 
2277  /* *INDENT-ON* */
2278  if (q_prev && (q_prev->cursize < q_prev->maxsize))
2279  {
2280  vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2281  }
2282  else
2283  {
2284  vl_msg_api_free (mp);
2285  }
2286 }
2287 
2288 static void
2290 {
2291  stats_main_t *sm = &stats_main;
2292  unix_shared_memory_queue_t *q, *q_prev = NULL;
2294  u32 mp_size;
2295  vpe_client_registration_t *clients, client;
2296  int i;
2297 
2298  mp_size = sizeof (*mp_copy) +
2299  ntohl (mp->count) * sizeof (vl_api_ip6_fib_counter_t);
2300 
2301  clients =
2302  get_clients_for_stat (IDX_IP6_FIB_COUNTERS, ~0 /*flag for all */ );
2303 
2304  for (i = 0; i < vec_len (clients); i++)
2305  {
2306  client = clients[i];
2308  if (q)
2309  {
2310  if (q_prev && (q_prev->cursize < q_prev->maxsize))
2311  {
2312  mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2313  clib_memcpy (mp_copy, mp, mp_size);
2314  vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2315  mp = mp_copy;
2316  }
2317  q_prev = q;
2318  }
2319  else
2320  {
2321  sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
2322  ~0, client.client_index);
2323  continue;
2324  }
2325  }
2326  /* *INDENT-ON* */
2327  if (q_prev && (q_prev->cursize < q_prev->maxsize))
2328  {
2329  vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2330  }
2331  else
2332  {
2333  vl_msg_api_free (mp);
2334  }
2335 }
2336 
2337 static void
2339 {
2340  stats_main_t *sm = &stats_main;
2341  unix_shared_memory_queue_t *q, *q_prev = NULL;
2343  u32 mp_size;
2344  vpe_client_registration_t *clients, client;
2345  int i;
2346 
2347  mp_size = sizeof (*mp_copy) +
2348  ntohl (mp->count) * sizeof (vl_api_ip6_nbr_counter_t);
2349 
2350  clients =
2351  get_clients_for_stat (IDX_IP6_NBR_COUNTERS, ~0 /*flag for all */ );
2352 
2353  for (i = 0; i < vec_len (clients); i++)
2354  {
2355  client = clients[i];
2357  if (q)
2358  {
2359  if (q_prev && (q_prev->cursize < q_prev->maxsize))
2360  {
2361  mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
2362  clib_memcpy (mp_copy, mp, mp_size);
2363  vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2364  mp = mp_copy;
2365  }
2366  q_prev = q;
2367  }
2368  else
2369  {
2370  sm->enable_poller = clear_client_for_stat (IDX_IP6_NBR_COUNTERS,
2371  ~0, client.client_index);
2372  continue;
2373  }
2374  }
2375  /* *INDENT-ON* */
2376  if (q_prev && (q_prev->cursize < q_prev->maxsize))
2377  {
2378  vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
2379  }
2380  else
2381  {
2382  vl_msg_api_free (mp);
2383  }
2384 }
2385 
2386 static void
2388 {
2389  stats_main_t *sm = &stats_main;
2391  vl_api_want_stats_reply_t *rmp;
2392  uword *p;
2393  i32 retval = 0;
2394  u32 item;
2396 
2397  item = ~0; //"ALL THE THINGS IN THE THINGS
2398  rp.client_index = mp->client_index;
2399  rp.client_pid = mp->pid;
2400 
2401  handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
2402  item, mp->enable_disable);
2403 
2404  handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
2405  item, mp->enable_disable);
2406 
2407  handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS,
2408  item, mp->enable_disable);
2409 
2410  handle_client_registration (&rp, IDX_IP4_NBR_COUNTERS,
2411  item, mp->enable_disable);
2412 
2413  handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS,
2414  item, mp->enable_disable);
2415 
2416  handle_client_registration (&rp, IDX_IP6_NBR_COUNTERS,
2417  item, mp->enable_disable);
2418 
2419 reply:
2421 
2422  if (!q)
2423  return;
2424 
2425  rmp = vl_msg_api_alloc (sizeof (*rmp));
2426  rmp->_vl_msg_id = ntohs (VL_API_WANT_STATS_REPLY);
2427  rmp->context = mp->context;
2428  rmp->retval = retval;
2429 
2430  vl_msg_api_send_shmem (q, (u8 *) & rmp);
2431 }
2432 
2433 static void
2436 {
2437  stats_main_t *sm = &stats_main;
2439  vl_api_want_interface_simple_stats_reply_t *rmp;
2440  uword *p;
2441  i32 retval = 0;
2442  u32 swif;
2444 
2445  swif = ~0; //Using same mechanism as _per_interface_
2446  rp.client_index = mp->client_index;
2447  rp.client_pid = mp->pid;
2448 
2449  handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
2450  mp->enable_disable);
2451 
2452 reply:
2454 
2455  if (!q)
2456  {
2457  sm->enable_poller =
2458  clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
2459  mp->client_index);
2460  return;
2461  }
2462 
2463  rmp = vl_msg_api_alloc (sizeof (*rmp));
2464  rmp->_vl_msg_id = ntohs (VL_API_WANT_INTERFACE_SIMPLE_STATS_REPLY);
2465  rmp->context = mp->context;
2466  rmp->retval = retval;
2467 
2468  vl_msg_api_send_shmem (q, (u8 *) & rmp);
2469 }
2470 
2471 
2472 static void
2474 {
2475  stats_main_t *sm = &stats_main;
2477  vl_api_want_ip4_fib_stats_reply_t *rmp;
2478  uword *p;
2479  i32 retval = 0;
2481  u32 fib;
2482 
2483  fib = ~0; //Using same mechanism as _per_interface_
2484  rp.client_index = mp->client_index;
2485  rp.client_pid = mp->pid;
2486 
2487  handle_client_registration (&rp, IDX_IP4_FIB_COUNTERS, fib,
2488  mp->enable_disable);
2489 
2490 reply:
2492 
2493  if (!q)
2494  {
2495  sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
2496  fib, mp->client_index);
2497  return;
2498  }
2499 
2500  rmp = vl_msg_api_alloc (sizeof (*rmp));
2501  rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_FIB_STATS_REPLY);
2502  rmp->context = mp->context;
2503  rmp->retval = retval;
2504 
2505  vl_msg_api_send_shmem (q, (u8 *) & rmp);
2506 }
2507 
2508 static void
2510 {
2511  stats_main_t *sm = &stats_main;
2513  vl_api_want_ip4_mfib_stats_reply_t *rmp;
2514  uword *p;
2515  i32 retval = 0;
2517  u32 mfib;
2518 
2519  mfib = ~0; //Using same mechanism as _per_interface_
2520  rp.client_index = mp->client_index;
2521  rp.client_pid = mp->pid;
2522 
2523  handle_client_registration (&rp, IDX_IP4_MFIB_COUNTERS, mfib,
2524  mp->enable_disable);
2525 
2526 reply:
2528 
2529  if (!q)
2530  {
2531  sm->enable_poller = clear_client_for_stat (IDX_IP4_MFIB_COUNTERS,
2532  mfib, mp->client_index);
2533  return;
2534  }
2535 
2536  rmp = vl_msg_api_alloc (sizeof (*rmp));
2537  rmp->_vl_msg_id = ntohs (VL_API_WANT_IP4_MFIB_STATS_REPLY);
2538  rmp->context = mp->context;
2539  rmp->retval = retval;
2540 
2541  vl_msg_api_send_shmem (q, (u8 *) & rmp);
2542 }
2543 
2544 static void
2546 {
2547  stats_main_t *sm = &stats_main;
2549  vl_api_want_ip4_fib_stats_reply_t *rmp;
2550  uword *p;
2551  i32 retval = 0;
2553  u32 fib;
2554 
2555  fib = ~0; //Using same mechanism as _per_interface_
2556  rp.client_index = mp->client_index;
2557  rp.client_pid = mp->pid;
2558 
2559  handle_client_registration (&rp, IDX_IP6_FIB_COUNTERS, fib,
2560  mp->enable_disable);
2561 
2562 reply:
2564 
2565  if (!q)
2566  {
2567  sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
2568  fib, mp->client_index);
2569  return;
2570  }
2571 
2572  rmp = vl_msg_api_alloc (sizeof (*rmp));
2573  rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_FIB_STATS_REPLY);
2574  rmp->context = mp->context;
2575  rmp->retval = retval;
2576 
2577  vl_msg_api_send_shmem (q, (u8 *) & rmp);
2578 }
2579 
2580 static void
2582 {
2583  stats_main_t *sm = &stats_main;
2585  vl_api_want_ip4_mfib_stats_reply_t *rmp;
2586  uword *p;
2587  i32 retval = 0;
2589  u32 mfib;
2590 
2591  mfib = ~0; //Using same mechanism as _per_interface_
2592  rp.client_index = mp->client_index;
2593  rp.client_pid = mp->pid;
2594 
2595  handle_client_registration (&rp, IDX_IP6_MFIB_COUNTERS, mfib,
2596  mp->enable_disable);
2597 
2598 reply:
2600 
2601  if (!q)
2602  {
2603  sm->enable_poller = clear_client_for_stat (IDX_IP6_MFIB_COUNTERS,
2604  mfib, mp->client_index);
2605  return;
2606  }
2607 
2608  rmp = vl_msg_api_alloc (sizeof (*rmp));
2609  rmp->_vl_msg_id = ntohs (VL_API_WANT_IP6_MFIB_STATS_REPLY);
2610  rmp->context = mp->context;
2611  rmp->retval = retval;
2612 
2613  vl_msg_api_send_shmem (q, (u8 *) & rmp);
2614 }
2615 
2616 /* FIXME - NBR stats broken - this will be fixed in subsequent patch */
2617 static void
2619 {
2620 }
2621 
2622 static void
2624 {
2625 }
2626 
2627 static void
2629 {
2630  stats_main_t *sm = &stats_main;
2634  vlib_counter_t v;
2635  int i, which;
2636  u64 total_pkts[VLIB_N_RX_TX];
2637  u64 total_bytes[VLIB_N_RX_TX];
2638 
2641 
2642  if (!q)
2643  {
2644  return;
2645  }
2646 
2647  rmp = vl_msg_api_alloc (sizeof (*rmp));
2648  rmp->_vl_msg_id = ntohs (VL_API_VNET_GET_SUMMARY_STATS_REPLY);
2649  rmp->context = mp->context;
2650  rmp->retval = 0;
2651 
2652  memset (total_pkts, 0, sizeof (total_pkts));
2653  memset (total_bytes, 0, sizeof (total_bytes));
2654 
2656 
2658  {
2659  which = cm - im->combined_sw_if_counters;
2660 
2661  for (i = 0; i < vlib_combined_counter_n_counters (cm); i++)
2662  {
2663  vlib_get_combined_counter (cm, i, &v);
2664  total_pkts[which] += v.packets;
2665  total_bytes[which] += v.bytes;
2666  }
2667  }
2669 
2670  rmp->total_pkts[VLIB_RX] = clib_host_to_net_u64 (total_pkts[VLIB_RX]);
2671  rmp->total_bytes[VLIB_RX] = clib_host_to_net_u64 (total_bytes[VLIB_RX]);
2672  rmp->total_pkts[VLIB_TX] = clib_host_to_net_u64 (total_pkts[VLIB_TX]);
2673  rmp->total_bytes[VLIB_TX] = clib_host_to_net_u64 (total_bytes[VLIB_TX]);
2674  rmp->vector_rate =
2675  clib_host_to_net_u64 (vlib_last_vector_length_per_node (sm->vlib_main));
2676 
2677  vl_msg_api_send_shmem (q, (u8 *) & rmp);
2678 }
2679 
2680 int
2682 {
2684  stats_main_t *sm = &stats_main;
2685  uword *p;
2686 
2687  // FIXME
2688  /* p = hash_get (sm->stats_registration_hash, client_index); */
2689  /* if (p) */
2690  /* { */
2691  /* rp = pool_elt_at_index (sm->stats_registrations, p[0]); */
2692  /* pool_put (sm->stats_registrations, rp); */
2693  /* hash_unset (sm->stats_registration_hash, client_index); */
2694  /* } */
2695 
2696  return 0;
2697 }
2698 
2699 #define vl_api_vnet_interface_simple_counters_t_endian vl_noop_handler
2700 #define vl_api_vnet_interface_simple_counters_t_print vl_noop_handler
2701 #define vl_api_vnet_interface_combined_counters_t_endian vl_noop_handler
2702 #define vl_api_vnet_interface_combined_counters_t_print vl_noop_handler
2703 #define vl_api_vnet_ip4_fib_counters_t_endian vl_noop_handler
2704 #define vl_api_vnet_ip4_fib_counters_t_print vl_noop_handler
2705 #define vl_api_vnet_ip6_fib_counters_t_endian vl_noop_handler
2706 #define vl_api_vnet_ip6_fib_counters_t_print vl_noop_handler
2707 #define vl_api_vnet_ip4_nbr_counters_t_endian vl_noop_handler
2708 #define vl_api_vnet_ip4_nbr_counters_t_print vl_noop_handler
2709 #define vl_api_vnet_ip6_nbr_counters_t_endian vl_noop_handler
2710 #define vl_api_vnet_ip6_nbr_counters_t_print vl_noop_handler
2711 
2712 static clib_error_t *
2714 {
2715  stats_main_t *sm = &stats_main;
2716  api_main_t *am = &api_main;
2717  void *vlib_worker_thread_bootstrap_fn (void *arg);
2718 
2719  sm->vlib_main = vm;
2720  sm->vnet_main = vnet_get_main ();
2722  sm->api_main = am;
2724  sm->data_structure_lock =
2727  memset (sm->data_structure_lock, 0, sizeof (*sm->data_structure_lock));
2728 
2729 #define _(N,n) \
2730  vl_msg_api_set_handlers(VL_API_##N, #n, \
2731  vl_api_##n##_t_handler, \
2732  vl_noop_handler, \
2733  vl_api_##n##_t_endian, \
2734  vl_api_##n##_t_print, \
2735  sizeof(vl_api_##n##_t), 0 /* do NOT trace! */);
2737 #undef _
2738 
2739  /* tell the msg infra not to free these messages... */
2740  am->message_bounce[VL_API_VNET_INTERFACE_SIMPLE_COUNTERS] = 1;
2741  am->message_bounce[VL_API_VNET_INTERFACE_COMBINED_COUNTERS] = 1;
2742  am->message_bounce[VL_API_VNET_IP4_FIB_COUNTERS] = 1;
2743  am->message_bounce[VL_API_VNET_IP6_FIB_COUNTERS] = 1;
2744  am->message_bounce[VL_API_VNET_IP4_NBR_COUNTERS] = 1;
2745  am->message_bounce[VL_API_VNET_IP6_NBR_COUNTERS] = 1;
2746 
2747  /*
2748  * Set up the (msg_name, crc, message-id) table
2749  */
2751 
2754 #define stats_reg(n) \
2755  sm->stats_registrations[IDX_##n] = 0; \
2756  sm->stats_registration_hash[IDX_##n] = 0;
2757 #include <vpp/stats/stats.reg>
2758 #undef stats_reg
2759 
2760  return 0;
2761 }
2762 
2764 
2765 /* *INDENT-OFF* */
2766 VLIB_REGISTER_THREAD (stats_thread_reg, static) = {
2767  .name = "stats",
2768  .function = stats_thread_fn,
2769  .fixed_count = 1,
2770  .count = 1,
2771  .no_data_structure_clone = 1,
2772  .use_pthreads = 1,
2773 };
2774 /* *INDENT-ON* */
2775 
2776 /*
2777  * fd.io coding-style-patch-verification: ON
2778  *
2779  * Local Variables:
2780  * eval: (c-set-style "gnu")
2781  * End:
2782  */
#define IP4_MFIB_COUNTER_BATCH_SIZE
Definition: stats.c:86
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:432
static void vl_api_vnet_get_summary_stats_t_handler(vl_api_vnet_get_summary_stats_t *mp)
Definition: stats.c:2628
Want Interface Simple Stats, register for detailed interface stats.
Definition: stats.api:45
static void stats_thread_fn(void *arg)
Definition: stats.c:2078
void * vl_msg_api_alloc_as_if_client(int nbytes)
static int set_client_for_stat(u32 reg, u32 item, vpe_client_registration_t *client)
Definition: stats.c:283
vpe_client_registration_t ** clients_tmp
Definition: stats.h:182
Want Stats, enable/disable ALL stats updates.
Definition: stats.api:29
vlib_combined_counter_main_t lbm_to_counters
Definition: load_balance.h:46
ip46_address_t fp_src_addr
Definition: mfib_types.h:47
#define hash_set(h, key, value)
Definition: hash.h:254
fib_node_index_t mfib_table_lookup(u32 fib_index, const mfib_prefix_t *prefix)
Perfom a longest prefix match in the non-forwarding table.
Definition: mfib_table.c:65
u32 vlib_simple_counter_n_counters(const vlib_simple_counter_main_t *cm)
The number of counters (not the number of per-thread counters)
Definition: counter.c:107
do_ip46_fibs_t do_ip46_fibs
Definition: stats.h:175
sll srl srl sll sra u16x4 i
Definition: vector_sse2.h:337
#define clib_min(x, y)
Definition: clib.h:340
vl_api_vlib_counter_t data[count]
Definition: interface.api:307
vl_api_vnet_combined_counter_t data[count]
Definition: interface.api:333
vpe_client_registration_t * clients
Definition: stats.h:117
#define hash_unset(h, key)
Definition: hash.h:260
a
Definition: bitmap.h:516
int unix_shared_memory_queue_is_full(unix_shared_memory_queue_t *q)
vl_api_vnet_simple_counter_t data[count]
Definition: interface.api:321
vl_api_ip6_mfib_counter_t c[count]
Definition: stats.api:276
static adj_walk_rc_t ip6_nbr_stats_cb(adj_index_t ai, void *arg)
Definition: stats.c:1289
hash_pair_t ** pvec
Definition: stats.h:92
vnet_main_t * vnet_get_main(void)
Definition: misc.c:47
mfib_table_t ** mfibs
Definition: stats.h:91
static void vl_api_want_interface_simple_stats_t_handler(vl_api_want_interface_simple_stats_t *mp)
Definition: stats.c:2435
vnet_interface_main_t interface_main
Definition: vnet.h:56
static void vl_api_vnet_interface_simple_counters_t_handler(vl_api_vnet_interface_simple_counters_t *mp)
Definition: stats.c:2136
u64 as_u64[2]
Definition: ip6_packet.h:51
unix_shared_memory_queue_t * vl_input_queue
Definition: api_common.h:68
#define NULL
Definition: clib.h:55
An entry in a FIB table.
Definition: mfib_entry.h:31
Request for a single block of summary stats.
Definition: stats.api:300
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:224
IP unicast adjacency.
Definition: adj.h:174
const dpo_id_t * fib_entry_contribute_ip_forwarding(fib_node_index_t fib_entry_index)
Definition: fib_entry.c:459
u8 * message_bounce
Don&#39;t automatically free message buffer vetor.
Definition: api_common.h:218
u32 enable_poller
Definition: stats.h:130
u32 sw_if_index
The SW IF index all these adjs belong to.
Definition: stats.c:1280
vlib_main_t * vlib_main
Definition: stats.h:185
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:518
Combined counter to hold both packets and byte differences.
Definition: counter.h:139
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:557
vl_api_ip4_nbr_counter_t c[count]
Definition: stats.api:245
VLIB_REGISTER_THREAD(stats_thread_reg, static)
void clib_longjmp(clib_longjmp_t *save, uword return_value)
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
static void ip46_fib_stats_delay(stats_main_t *sm, u32 sec, u32 nsec)
Definition: stats.c:1101
void handle_client_registration(vpe_client_registration_t *client, u32 stat, u32 item, int enable_disable)
Definition: stats.c:499
Combined stats counters structure.
Definition: interface.api:302
#define MIN(x, y)
Definition: stats.c:1314
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:225
u64 rx_bytes
byte counter
Definition: stats.h:73
uword clib_setjmp(clib_longjmp_t *save, uword return_value_not_taken)
static void vl_api_vnet_ip4_fib_counters_t_handler(vl_api_vnet_ip4_fib_counters_t *mp)
Definition: stats.c:2190
static clib_error_t * stats_init(vlib_main_t *vm)
Definition: stats.c:2713
format_function_t format_vnet_sw_if_index_name
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
uword value[0]
Definition: hash.h:164
vnet_link_t ia_link
link/ether-type 1 bytes
Definition: adj.h:195
static void do_ip6_nbr_counters(stats_main_t *sm)
Definition: stats.c:1376
static void vl_api_vnet_ip4_nbr_counters_t_handler(vl_api_vnet_ip4_nbr_counters_t *mp)
Definition: stats.c:2239
static f64 vlib_last_vector_length_per_node(vlib_main_t *vm)
Definition: main.h:315
#define IP6_MFIB_COUNTER_BATCH_SIZE
Definition: stats.c:87
volatile u32 release_hint
Definition: stats.h:34
static counter_t vlib_get_simple_counter(vlib_simple_counter_main_t *cm, u32 index)
Get the value of a simple counter Scrapes the entire set of per-thread counters.
Definition: counter.h:97
static ip_adjacency_t * adj_get(adj_index_t adj_index)
Get a pointer to an adjacency object from its index.
Definition: adj.h:365
Combined stats counters structure per interface.
Definition: interface.api:329
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
Definition: pool.h:438
static void vl_api_want_ip4_mfib_stats_t_handler(vl_api_want_ip4_mfib_stats_t *mp)
Definition: stats.c:2509
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:111
vpe_client_stats_registration_t ** regs_tmp
Definition: stats.h:181
fib_node_index_t mft_index
Index into FIB vector.
Definition: mfib_table.h:66
static void do_ip4_fib_counters(stats_main_t *sm)
Definition: stats.c:1428
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:672
static BVT(clib_bihash)
Definition: adj_nbr.c:26
enum adj_walk_rc_t_ adj_walk_rc_t
return codes from a adjacency walker callback function
int i32
Definition: types.h:81
#define IP6_FIB_COUNTER_BATCH_SIZE
Definition: stats.c:85
mfib_prefix_t mfe_prefix
The prefix of the route.
Definition: mfib_entry.h:40
unsigned long u64
Definition: types.h:89
struct vl_shmem_hdr_ * shmem_hdr
Binary API shared-memory segment header pointer.
Definition: api_common.h:261
volatile u32 lock
Definition: stats.h:33
struct mfib_table_t_ * mfibs
Vector of MFIBs.
Definition: ip4.h:106
A collection of simple counters.
Definition: counter.h:58
static adj_walk_rc_t ip4_nbr_stats_cb(adj_index_t ai, void *arg)
Definition: stats.c:1137
static void vl_api_want_ip6_mfib_stats_t_handler(vl_api_want_ip6_mfib_stats_t *mp)
Definition: stats.c:2581
void vlib_set_thread_name(char *name)
Definition: threads.c:267
void * vl_msg_api_alloc(int nbytes)
stats_main_t stats_main
Definition: stats.c:24
#define foreach_stats_msg
Definition: stats.c:47
static void vl_api_want_ip4_fib_stats_t_handler(vl_api_want_ip4_fib_stats_t *mp)
Definition: stats.c:2473
vl_shmem_hdr_t * shmem_hdr
static heap_elt_t * first(heap_header_t *h)
Definition: heap.c:59
The identity of a DPO is a combination of its type and its instance number/index of objects of that t...
Definition: dpo.h:166
struct ip4_nbr_stats_ctx_t_ ip4_nbr_stats_ctx_t
The context passed when collecting adjacency counters.
vl_api_ip4_nbr_counter_t * counters
A vector of ip4 nbr counters.
Definition: stats.c:1133
#define hash_get(h, key)
Definition: hash.h:248
Want Per Interface simple Stats, register for continuous stats.
Definition: stats.api:61
static void vl_api_want_per_interface_combined_stats_t_handler(vl_api_want_per_interface_combined_stats_t *mp)
Definition: stats.c:682
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:459
ip6_route_t * ip6routes
Definition: stats.h:88
u32 ** enable_all_client_reg(u32 **registrations)
Definition: stats.c:429
counter_t packets
packet counter
Definition: counter.h:141
static void vl_api_want_ip6_fib_stats_t_handler(vl_api_want_ip6_fib_stats_t *mp)
Definition: stats.c:2545
struct ip_adjacency_t_::@38::@39 nbr
IP_LOOKUP_NEXT_ARP/IP_LOOKUP_NEXT_REWRITE.
static void vl_api_want_ip4_nbr_stats_t_handler(vl_api_want_ip4_nbr_stats_t *mp)
Definition: stats.c:2618
#define v
Definition: acl.c:341
u32 vlib_combined_counter_n_counters(const vlib_combined_counter_main_t *cm)
The number of counters (not the number of per-thread counters)
Definition: counter.c:100
static void vl_api_want_ip6_nbr_stats_t_handler(vl_api_want_ip6_nbr_stats_t *mp)
Definition: stats.c:2623
void vl_msg_api_free(void *)
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:271
vlib_combined_counter_main_t repm_counters
Definition: replicate_dpo.h:35
#define PREDICT_FALSE(x)
Definition: clib.h:105
vlib_simple_counter_main_t * sw_if_counters
Definition: interface.h:671
load_balance_main_t load_balance_main
The one instance of load-balance main.
Definition: load_balance.c:56
static void ip4_nbr_ship(stats_main_t *sm, ip4_nbr_stats_ctx_t *ctx)
Definition: stats.c:1163
#define SIMPLE_COUNTER_BATCH_SIZE
Definition: stats.c:82
ip4_route_t * ip4routes
Definition: stats.h:87
void unix_shared_memory_queue_lock(unix_shared_memory_queue_t *q)
#define COMBINED_COUNTER_BATCH_SIZE
Definition: stats.c:83
vl_api_ip6_nbr_counter_t * counters
A vector of ip6 nbr counters.
Definition: stats.c:1285
vlib_combined_counter_main_t adjacency_counters
Adjacency packet counters.
Definition: adj.c:25
static void do_simple_interface_counters(stats_main_t *sm)
Definition: stats.c:445
static mfib_entry_t * mfib_entry_get(fib_node_index_t index)
Definition: mfib_entry.h:153
u32 index
Definition: stats.h:82
word fformat(FILE *f, char *fmt,...)
Definition: format.c:453
The context passed when collecting adjacency counters.
Definition: stats.c:1123
API main structure, used by both vpp and binary API clients.
Definition: api_common.h:198
int clear_client_for_stat(u32 reg, u32 item, u32 client_index)
Definition: stats.c:320
union ip_adjacency_t_::@38 sub_type
void clib_bihash_foreach_key_value_pair(clib_bihash *h, void *callback, void *arg)
Visit active (key,value) pairs in a bi-hash table.
The IPv4 FIB.
Definition: ip4_fib.h:39
u8 * format_vnet_interface_combined_counters(u8 *s, va_list *args)
Definition: stats.c:94
u32 sw_if_index
The SW IF index all these adjs belong to.
Definition: stats.c:1128
fib_node_index_t ft_index
Index into FIB vector.
Definition: fib_table.h:55
void mfib_table_walk(u32 fib_index, fib_protocol_t proto, mfib_table_walk_fn_t fn, void *ctx)
Walk all entries in a FIB table N.B: This is NOT safe to deletes.
Definition: mfib_table.c:599
static void add_routes_in_fib(BVT(clib_bihash_kv)*kvp, void *arg)
Definition: stats.c:1906
stats_main_t * sm
Definition: stats.c:1902
static vpe_client_registration_t * get_client_for_stat(u32 reg, u32 item, u32 client_index)
Definition: stats.c:259
api_main_t api_main
Definition: api_shared.c:35
svmdb_client_t * c
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:221
u32 ft_table_id
Table ID (hash key) for this FIB.
Definition: fib_table.h:50
static void vlib_get_combined_counter(const vlib_combined_counter_main_t *cm, u32 index, vlib_counter_t *result)
Get the value of a combined counter, never called in the speed path Scrapes the entire set of per-thr...
Definition: counter.h:250
vlib_main_t * vm
Definition: buffer.c:283
vec_header_t h
Definition: buffer.c:282
uword * results
Definition: stats.h:93
clib_longjmp_t jmp_buf
Definition: stats.h:172
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:336
#define IP4_FIB_COUNTER_BATCH_SIZE
Definition: stats.c:84
static void * clib_mem_set_heap(void *heap)
Definition: mem.h:226
Want Interface Combined Stats, register for continuous stats.
Definition: stats.api:81
#define clib_warning(format, args...)
Definition: error.h:59
This table stores the routes that are used to forward traffic.
Definition: ip6.h:125
#define clib_memcpy(a, b, c)
Definition: string.h:75
unix_shared_memory_queue_t * vl_api_client_index_to_input_queue(u32 index)
static void vnet_interface_counter_unlock(vnet_interface_main_t *im)
Definition: interface.h:697
u32 fib_node_index_t
A typedef of a node index.
Definition: fib_types.h:29
static void vl_api_vnet_ip6_fib_counters_t_handler(vl_api_vnet_ip6_fib_counters_t *mp)
Definition: stats.c:2289
#define pool_is_free_index(P, I)
Use free bitmap to query whether given index is free.
Definition: pool.h:268
Want IP6 FIB Stats, register for continuous stats.
Definition: stats.api:128
api_main_t * api_main
Definition: stats.h:188
u32 adj_index_t
An index for adjacencies.
Definition: adj_types.h:30
#define ARRAY_LEN(x)
Definition: clib.h:59
vl_api_ip6_fib_counter_t c[count]
Definition: stats.api:260
Simple per interface stats counters structure.
Definition: interface.api:317
Aggregrate type for a prefix.
Definition: mfib_types.h:24
static void vnet_interface_counter_lock(vnet_interface_main_t *im)
Definition: interface.h:689
struct ip6_nbr_stats_ctx_t_ ip6_nbr_stats_ctx_t
The context passed when collecting adjacency counters.
void vl_msg_api_send_shmem(unix_shared_memory_queue_t *q, u8 *elem)
#define ASSERT(truth)
void stats_dslock_with_hint(int hint, int tag)
Definition: stats.c:224
u64 tx_packets
packet counter
Definition: stats.h:74
unsigned int u32
Definition: types.h:88
ip6_main_t ip6_main
Definition: ip6_forward.c:3009
static void ip6_nbr_ship(stats_main_t *sm, ip6_nbr_stats_ctx_t *ctx)
Definition: stats.c:1317
long ctx[MAX_CONNS]
Definition: main.c:122
#define STATS_RELEASE_DELAY_NS
Definition: stats.c:90
The context passed when collecting adjacency counters.
Definition: stats.c:1275
static void setup_message_id_table(api_main_t *am)
Definition: stats.c:73
uword ** stats_registration_hash
Definition: stats.h:165
IPv4 main type.
Definition: ip4.h:95
const dpo_id_t * mfib_entry_contribute_ip_forwarding(fib_node_index_t mfib_entry_index)
Definition: mfib_entry.c:1230
int stats_memclnt_delete_callback(u32 client_index)
Definition: stats.c:2681
static void do_combined_interface_counters(stats_main_t *sm)
Definition: stats.c:624
u64 size
Definition: vhost-user.h:76
vpe_client_stats_registration_t ** stats_registrations
Definition: stats.h:166
struct mfib_table_t_ * mfibs
Vector of MFIBs.
Definition: ip6.h:167
fib_table_t ** fibs
Definition: stats.h:90
size_t count
Definition: vapi.c:42
u64 counter_t
64bit counters
Definition: counter.h:54
u32 mft_table_id
Table ID (hash key) for this FIB.
Definition: mfib_table.h:61
Simple stats counters structure.
Definition: interface.api:285
Want IP6 NBR Stats, register for continuous stats.
Definition: stats.api:184
u32 stats_poll_interval_in_seconds
Definition: stats.h:129
ip6_fib_table_instance_t ip6_table[IP6_FIB_NUM_TABLES]
The two FIB tables; fwding and non-fwding.
Definition: ip6.h:156
u64 uword
Definition: types.h:112
vl_api_ip4_fib_counter_t c[count]
Definition: stats.api:204
counter_t bytes
byte counter
Definition: counter.h:142
Want IP4 FIB Stats, register for continuous stats.
Definition: stats.api:114
data_structure_lock_t * data_structure_lock
Definition: stats.h:169
Definition: defs.h:47
u32 ** init_client_reg(u32 **registrations)
Definition: stats.c:403
Per-neighbour (i.e.
Definition: stats.api:240
index_t dpoi_index
the index of objects of that type
Definition: dpo.h:182
Want IP6 multicast FIB Stats, register for continuous stats.
Definition: stats.api:156
void vl_msg_api_send_shmem_nolock(unix_shared_memory_queue_t *q, u8 *elem)
#define FIB_NODE_INDEX_INVALID
Definition: fib_types.h:30
struct ip4_fib_t_ * v4_fibs
Vector of MTries.
Definition: ip4.h:103
u8 * format_vnet_interface_simple_counters(u8 *s, va_list *args)
Definition: stats.c:141
void unix_shared_memory_queue_unlock(unix_shared_memory_queue_t *q)
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
vpe_client_registration_t * get_clients_for_stat(u32 reg, u32 item)
Definition: stats.c:364
vnet_interface_main_t * interface_main
Definition: stats.h:187
unsigned char u8
Definition: types.h:56
static void vl_api_want_stats_t_handler(vl_api_want_stats_t *mp)
Definition: stats.c:2387
#define hash_foreach_pair(p, v, body)
Iterate over hash pairs.
Definition: hash.h:372
static void clear_client_reg(u32 **registrations)
Definition: stats.c:390
static void do_ip4_mfib_counters(stats_main_t *sm)
Definition: stats.c:1621
vnet_sw_interface_t * sw_interfaces
Definition: interface.h:663
static int mfib_table_stats_walk_cb(fib_node_index_t fei, void *ctx)
Definition: stats.c:1606
#define clib_unix_warning(format, args...)
Definition: error.h:68
u32 address_length
Definition: stats.h:81
replicate_main_t replicate_main
The one instance of replicate main.
Definition: replicate_dpo.c:62
A collection of combined counters.
Definition: counter.h:180
Want Per Interface Combined Stats, register for continuous stats.
Definition: stats.api:97
uword * fib_entry_by_dst_address[33]
Definition: ip4_fib.h:48
#define clib_mem_unaligned(pointer, type)
Definition: types.h:155
vl_api_ip4_mfib_counter_t c[count]
Definition: stats.api:220
static void vl_api_vnet_ip6_nbr_counters_t_handler(vl_api_vnet_ip6_nbr_counters_t *mp)
Definition: stats.c:2338
static void * clib_mem_alloc_aligned(uword size, uword align)
Definition: mem.h:120
static void do_ip6_fib_counters(stats_main_t *sm)
Definition: stats.c:1927
static void do_simple_per_interface_counters(stats_main_t *sm)
Definition: stats.c:942
A protocol Independent IP multicast FIB table.
Definition: mfib_table.h:35
ip4_main_t ip4_main
Global ip4 main structure.
Definition: ip4_forward.c:1181
static vlib_thread_main_t * vlib_get_thread_main()
Definition: global_funcs.h:32
static void do_ip6_mfib_counters(stats_main_t *sm)
Definition: stats.c:1760
Reply for vnet_get_summary_stats request.
Definition: stats.api:313
ip6_route_t ** routep
Definition: stats.c:1901
struct fib_table_t_ * fibs
Vector of FIBs.
Definition: ip4.h:100
#define vec_foreach(var, vec)
Vector iterator.
Want IP4 NBR Stats, register for continuous stats.
Definition: stats.api:170
static void vl_api_want_interface_combined_stats_t_handler(vl_api_want_interface_combined_stats_t *mp)
Definition: stats.c:540
void * vlib_worker_thread_bootstrap_fn(void *arg)
Definition: threads.c:670
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:109
static void do_combined_per_interface_counters(stats_main_t *sm)
Definition: stats.c:745
static void vl_api_vnet_interface_combined_counters_t_handler(vl_api_vnet_interface_combined_counters_t *mp)
Definition: stats.c:578
vhost_vring_addr_t addr
Definition: vhost-user.h:83
void stats_dsunlock(int hint, int tag)
Definition: stats.c:252
u64 tx_bytes
byte counter
Definition: stats.h:75
u16 fp_len
The mask length.
Definition: mfib_types.h:28
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:67
vnet_main_t * vnet_main
Definition: stats.h:186
struct fib_table_t_ * fibs
Definition: ip6.h:161
static void vl_api_want_per_interface_simple_stats_t_handler(vl_api_want_per_interface_simple_stats_t *mp)
Definition: stats.c:877
static void dsunlock(stats_main_t *sm)
Definition: stats.c:231
static void do_ip4_nbr_counters(stats_main_t *sm)
Definition: stats.c:1221
static void dslock(stats_main_t *sm, int release_hint, int tag)
Definition: stats.c:198
uword key
Definition: hash.h:161
vl_api_ip6_nbr_counter_t c[count]
Definition: stats.api:292
Definition: defs.h:46
Want IP4 muilticast FIB Stats, register for continuous stats.
Definition: stats.api:142
ip46_address_t fp_grp_addr
The address type is not deriveable from the fp_addr member.
Definition: mfib_types.h:46
mfib_prefix_t * mroutes
Definition: stats.h:89
A protocol Independent FIB table.
Definition: fib_table.h:35
struct _unix_shared_memory_queue unix_shared_memory_queue_t
ip6_address_t address
Definition: stats.h:80
u64 rx_packets
packet counter
Definition: stats.h:72
static uword pool_elts(void *v)
Number of active elements in a pool.
Definition: pool.h:128