FD.io VPP  v20.01-48-g3e0dafb74
Vector Packet Processing
ioam_export.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef __included_ioam_export_h__
16 #define __included_ioam_export_h__
17 
18 #include <vnet/vnet.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/ip/ip_packet.h>
21 #include <vnet/ip/ip4_packet.h>
22 #include <vnet/ip/ip6_packet.h>
23 #include <vnet/ip/ip6_hop_by_hop.h>
24 #include <vnet/udp/udp.h>
26 
27 #include <vppinfra/pool.h>
28 #include <vppinfra/hash.h>
29 #include <vppinfra/error.h>
30 #include <vppinfra/elog.h>
31 #include <vppinfra/lock.h>
32 
33 #include <vlib/threads.h>
34 
35 typedef struct ioam_export_buffer
36 {
37  /** Required for pool_get_aligned */
38  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
39  /* Allocated buffer */
44 
45 
46 typedef struct
47 {
48  /* API message ID base */
51 
52  /* TODO: to support multiple collectors all this has to be grouped and create a vector here */
56 
57  /* ipfix collector, our ip address */
60 
61  /* Pool of ioam_export_buffer_t */
63  /* Vector of per thread ioam_export_buffer_t to buffer pool index */
65  /* Lock per thread to swap buffers between worker and timer process */
67 
68  /* time scale transform */
71 
72  /* convenience */
77 
81 
82 
83 #define DEFAULT_EXPORT_SIZE (3 * CLIB_CACHE_LINE_BYTES)
84 /*
85  * Number of records in a buffer
86  * ~(MTU (1500) - [ip hdr(40) + UDP(8) + ipfix (24)]) / DEFAULT_EXPORT_SIZE
87  */
88 #define DEFAULT_EXPORT_RECORDS 7
89 
90 inline static void
92 {
93  vlib_node_t *next_node;
94 
95  next_node = vlib_get_node_by_name (em->vlib_main, next_node_name);
96  em->next_node_index = next_node->index;
97 }
98 
99 inline static void
101 {
102  vlib_node_t *next_node;
103 
104  next_node = vlib_get_node_by_name (em->vlib_main, (u8 *) "ip4-lookup");
105  em->next_node_index = next_node->index;
106 }
107 
110 {
111 
112  if (vec_len (em->buffer_per_thread) > thread_id)
113  return (pool_elt_at_index
114  (em->buffer_pool, em->buffer_per_thread[thread_id]));
115  return (0);
116 }
117 
118 inline static int
120 {
122  b0->current_data = 0;
124  b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
125  return (1);
126 }
127 
128 inline static int
131 {
132  vlib_buffer_t *b = 0;
133 
134  if (!eb)
135  return (-1);
136  /* TODO: Perhaps buffer init from template here */
137  if (vlib_buffer_alloc (vm, &(eb->buffer_index), 1) != 1)
138  return (-2);
139  eb->records_in_this_buffer = 0;
140  eb->touched_at = vlib_time_now (vm);
141  b = vlib_get_buffer (vm, eb->buffer_index);
142  (void) ioam_export_buffer_add_header (em, b);
143  vnet_buffer (b)->sw_if_index[VLIB_RX] = 0;
144  vnet_buffer (b)->sw_if_index[VLIB_TX] = ~0;
145  return (1);
146 }
147 
148 inline static void
150 {
151  vlib_main_t *vm = em->vlib_main;
152  ioam_export_buffer_t *eb = 0;
153  int i;
154  for (i = 0; i < vec_len (em->buffer_per_thread); i++)
155  {
157  if (eb)
158  vlib_buffer_free (vm, &(eb->buffer_index), 1);
159  }
160  for (i = 0; i < vec_len (em->lockp); i++)
161  clib_mem_free ((void *) em->lockp[i]);
163  pool_free (em->buffer_pool);
164  vec_free (em->lockp);
165  em->buffer_per_thread = 0;
166  em->buffer_pool = 0;
167  em->lockp = 0;
168 }
169 
170 inline static int
172 {
173  int no_of_threads = vec_len (vlib_worker_threads);
174  int i;
175  ioam_export_buffer_t *eb = 0;
176 
178  no_of_threads - 1, CLIB_CACHE_LINE_BYTES);
180  no_of_threads - 1, CLIB_CACHE_LINE_BYTES);
181  vec_validate_aligned (em->lockp, no_of_threads - 1, CLIB_CACHE_LINE_BYTES);
182 
183  if (!em->buffer_per_thread || !em->buffer_pool || !em->lockp)
184  {
185  return (-1);
186  }
187  for (i = 0; i < no_of_threads; i++)
188  {
189  eb = 0;
191  clib_memset (eb, 0, sizeof (*eb));
192  em->buffer_per_thread[i] = eb - em->buffer_pool;
193  if (ioam_export_init_buffer (em, vm, eb) != 1)
194  {
196  return (-2);
197  }
198  clib_spinlock_init (&em->lockp[i]);
199  }
200  return (1);
201 }
202 
203 #define IPFIX_IOAM_EXPORT_ID 272
204 #define IPFIX_VXLAN_IOAM_EXPORT_ID 273
205 
206 /* Used to build the rewrite */
207 /* data set packet */
208 typedef struct
209 {
213 
214 typedef struct
215 {
220 
221 
222 inline static void
224  ip4_address_t * collector_address,
226 {
227  vec_free (em->record_header);
228  em->record_header = 0;
229 }
230 
231 inline static int
233  ip4_address_t * collector_address,
235 {
236  ip4_header_t *ip;
237  udp_header_t *udp;
240  u8 *rewrite = 0;
242 
243 
244  /* allocate rewrite space */
245  vec_validate_aligned (rewrite,
246  sizeof (ip4_ipfix_data_packet_t) - 1,
248 
249  tp = (ip4_ipfix_data_packet_t *) rewrite;
250  ip = (ip4_header_t *) & tp->ip4;
251  udp = (udp_header_t *) (ip + 1);
252  h = (ipfix_message_header_t *) (udp + 1);
253  s = (ipfix_set_header_t *) (h + 1);
254 
255  ip->ip_version_and_header_length = 0x45;
256  ip->ttl = 254;
257  ip->protocol = IP_PROTOCOL_UDP;
258  ip->src_address.as_u32 = src_address->as_u32;
259  ip->dst_address.as_u32 = collector_address->as_u32;
260  udp->src_port = clib_host_to_net_u16 (UDP_DST_PORT_ipfix);
261  udp->dst_port = clib_host_to_net_u16 (UDP_DST_PORT_ipfix);
262  /* FIXUP: UDP length */
263  udp->length = clib_host_to_net_u16 (vec_len (rewrite) +
265  DEFAULT_EXPORT_SIZE) - sizeof (*ip));
266 
267  /* FIXUP: message header export_time */
268  /* FIXUP: message header sequence_number */
269  h->domain_id = clib_host_to_net_u32 (em->domain_id);
270 
271  /*FIXUP: Setid length in octets if records exported are not default */
273  (sizeof (*s) +
275  DEFAULT_EXPORT_SIZE)));
276 
277  /* FIXUP: h version and length length in octets if records exported are not default */
278  h->version_length = version_length (sizeof (*h) +
279  (sizeof (*s) +
281  DEFAULT_EXPORT_SIZE)));
282 
283  /* FIXUP: ip length if records exported are not default */
284  /* FIXUP: ip checksum if records exported are not default */
285  ip->length = clib_host_to_net_u16 (vec_len (rewrite) +
287  DEFAULT_EXPORT_SIZE));
288  ip->checksum = ip4_header_checksum (ip);
289  _vec_len (rewrite) = sizeof (ip4_ipfix_data_packet_t);
290  em->record_header = rewrite;
291  return (1);
292 }
293 
294 inline static int
297 {
298  ip4_header_t *ip;
299  udp_header_t *udp;
303  vlib_buffer_t *b0;
304  u16 new_l0, old_l0;
305  ip_csum_t sum0;
306  vlib_frame_t *nf = 0;
307  u32 *to_next;
308 
309  b0 = vlib_get_buffer (vm, eb->buffer_index);
310  tp = vlib_buffer_get_current (b0);
311  ip = (ip4_header_t *) & tp->ip4;
312  udp = (udp_header_t *) (ip + 1);
313  h = (ipfix_message_header_t *) (udp + 1);
314  s = (ipfix_set_header_t *) (h + 1);
315 
316  /* FIXUP: message header export_time */
317  h->export_time = clib_host_to_net_u32 ((u32)
318  (((f64) em->unix_time_0) +
319  (vlib_time_now (em->vlib_main) -
320  em->vlib_time_0)));
321 
322  /* FIXUP: message header sequence_number */
323  h->sequence_number = clib_host_to_net_u32 (em->sequence_number++);
324 
325  /* FIXUP: lengths if different from default */
327  {
328  s->set_id_length = ipfix_set_id_length (em->set_id /* set_id */ ,
329  b0->current_length -
330  (sizeof (*ip) + sizeof (*udp) +
331  sizeof (*h)));
332  h->version_length =
333  version_length (b0->current_length - (sizeof (*ip) + sizeof (*udp)));
334  sum0 = ip->checksum;
335  old_l0 = ip->length;
336  new_l0 = clib_host_to_net_u16 ((u16) b0->current_length);
337  sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
338  length /* changed member */ );
339  ip->checksum = ip_csum_fold (sum0);
340  ip->length = new_l0;
341  udp->length = clib_host_to_net_u16 (b0->current_length - sizeof (*ip));
342  }
343 
344  /* Enqueue pkts to ip4-lookup */
345 
347  nf->n_vectors = 0;
348  to_next = vlib_frame_vector_args (nf);
349  nf->n_vectors = 1;
350  to_next[0] = eb->buffer_index;
352  return (1);
353 
354 }
355 
356 #define EXPORT_TIMEOUT (20.0)
357 #define THREAD_PERIOD (30.0)
358 inline static uword
361  u32 index)
362 {
363  f64 now;
364  f64 timeout = 30.0;
365  uword event_type;
366  uword *event_data = 0;
367  int i;
368  ioam_export_buffer_t *eb = 0, *new_eb = 0;
369  u32 *vec_buffer_indices = 0;
370  u32 *vec_buffer_to_be_sent = 0;
371  u32 *thread_index = 0;
372  u32 new_pool_index = 0;
373 
374  em->export_process_node_index = index;
375  /* Wait for Godot... */
377  event_type = vlib_process_get_events (vm, &event_data);
378  if (event_type != 1)
379  clib_warning ("bogus kickoff event received, %d", event_type);
380  vec_reset_length (event_data);
381 
382  while (1)
383  {
385  event_type = vlib_process_get_events (vm, &event_data);
386  switch (event_type)
387  {
388  case 2: /* Stop and Wait for kickoff again */
389  timeout = 1e9;
390  break;
391  case 1: /* kickoff : Check for unsent buffers */
392  timeout = THREAD_PERIOD;
393  break;
394  case ~0: /* timeout */
395  break;
396  }
397  vec_reset_length (event_data);
398  now = vlib_time_now (vm);
399  /*
400  * Create buffers for threads that are not active enough
401  * to send out the export records
402  */
403  for (i = 0; i < vec_len (em->buffer_per_thread); i++)
404  {
405  /* If the worker thread is processing export records ignore further checks */
406  if (CLIB_SPINLOCK_IS_LOCKED (&em->lockp[i]))
407  continue;
409  if (eb->records_in_this_buffer > 0
410  && now > (eb->touched_at + EXPORT_TIMEOUT))
411  {
412  pool_get_aligned (em->buffer_pool, new_eb,
414  clib_memset (new_eb, 0, sizeof (*new_eb));
415  if (ioam_export_init_buffer (em, vm, new_eb) == 1)
416  {
417  new_pool_index = new_eb - em->buffer_pool;
418  vec_add (vec_buffer_indices, &new_pool_index, 1);
419  vec_add (vec_buffer_to_be_sent, &em->buffer_per_thread[i],
420  1);
421  vec_add (thread_index, &i, 1);
422  }
423  else
424  {
425  pool_put (em->buffer_pool, new_eb);
426  /*Give up */
427  goto CLEANUP;
428  }
429  }
430  }
431  if (vec_len (thread_index) != 0)
432  {
433  /*
434  * Now swap the buffers out
435  */
436  for (i = 0; i < vec_len (thread_index); i++)
437  {
438  clib_spinlock_lock (&em->lockp[thread_index[i]]);
439  em->buffer_per_thread[thread_index[i]] =
440  vec_pop (vec_buffer_indices);
441  clib_spinlock_unlock (&em->lockp[thread_index[i]]);
442  }
443 
444  /* Send the buffers */
445  for (i = 0; i < vec_len (vec_buffer_to_be_sent); i++)
446  {
447  eb =
448  pool_elt_at_index (em->buffer_pool, vec_buffer_to_be_sent[i]);
449  ioam_export_send_buffer (em, vm, eb);
450  pool_put (em->buffer_pool, eb);
451  }
452  }
453 
454  CLEANUP:
455  /* Free any leftover/unused buffers and everything that was allocated */
456  for (i = 0; i < vec_len (vec_buffer_indices); i++)
457  {
458  new_eb = pool_elt_at_index (em->buffer_pool, vec_buffer_indices[i]);
459  vlib_buffer_free (vm, &new_eb->buffer_index, 1);
460  pool_put (em->buffer_pool, new_eb);
461  }
462  vec_free (vec_buffer_indices);
463  vec_free (vec_buffer_to_be_sent);
464  vec_free (thread_index);
465  }
466  return 0; /* not so much */
467 }
468 
469 #define ioam_export_node_common(EM, VM, N, F, HTYPE, L, V, NEXT, FIXUP_FUNC) \
470 do { \
471  u32 n_left_from, *from, *to_next; \
472  export_next_t next_index; \
473  u32 pkts_recorded = 0; \
474  ioam_export_buffer_t *my_buf = 0; \
475  vlib_buffer_t *eb0 = 0; \
476  u32 ebi0 = 0; \
477  from = vlib_frame_vector_args (F); \
478  n_left_from = (F)->n_vectors; \
479  next_index = (N)->cached_next_index; \
480  clib_spinlock_lock (&(EM)->lockp[(VM)->thread_index]); \
481  my_buf = ioam_export_get_my_buffer (EM, (VM)->thread_index); \
482  my_buf->touched_at = vlib_time_now (VM); \
483  while (n_left_from > 0) \
484  { \
485  u32 n_left_to_next; \
486  vlib_get_next_frame (VM, N, next_index, to_next, n_left_to_next); \
487  while (n_left_from >= 4 && n_left_to_next >= 2) \
488  { \
489  u32 next0 = NEXT; \
490  u32 next1 = NEXT; \
491  u32 bi0, bi1; \
492  HTYPE *ip0, *ip1; \
493  vlib_buffer_t *p0, *p1; \
494  u32 ip_len0, ip_len1; \
495  { \
496  vlib_buffer_t *p2, *p3; \
497  p2 = vlib_get_buffer (VM, from[2]); \
498  p3 = vlib_get_buffer (VM, from[3]); \
499  vlib_prefetch_buffer_header (p2, LOAD); \
500  vlib_prefetch_buffer_header (p3, LOAD); \
501  CLIB_PREFETCH (p2->data, 3 * CLIB_CACHE_LINE_BYTES, LOAD); \
502  CLIB_PREFETCH (p3->data, 3 * CLIB_CACHE_LINE_BYTES, LOAD); \
503  } \
504  to_next[0] = bi0 = from[0]; \
505  to_next[1] = bi1 = from[1]; \
506  from += 2; \
507  to_next += 2; \
508  n_left_from -= 2; \
509  n_left_to_next -= 2; \
510  p0 = vlib_get_buffer (VM, bi0); \
511  p1 = vlib_get_buffer (VM, bi1); \
512  ip0 = vlib_buffer_get_current (p0); \
513  ip1 = vlib_buffer_get_current (p1); \
514  ip_len0 = \
515  clib_net_to_host_u16 (ip0->L) + sizeof (HTYPE); \
516  ip_len1 = \
517  clib_net_to_host_u16 (ip1->L) + sizeof (HTYPE); \
518  ebi0 = my_buf->buffer_index; \
519  eb0 = vlib_get_buffer (VM, ebi0); \
520  if (PREDICT_FALSE (eb0 == 0)) \
521  goto NO_BUFFER1; \
522  ip_len0 = \
523  ip_len0 > DEFAULT_EXPORT_SIZE ? DEFAULT_EXPORT_SIZE : ip_len0; \
524  ip_len1 = \
525  ip_len1 > DEFAULT_EXPORT_SIZE ? DEFAULT_EXPORT_SIZE : ip_len1; \
526  copy3cachelines (eb0->data + eb0->current_length, ip0, ip_len0); \
527  FIXUP_FUNC(eb0, p0); \
528  eb0->current_length += DEFAULT_EXPORT_SIZE; \
529  my_buf->records_in_this_buffer++; \
530  if (my_buf->records_in_this_buffer >= DEFAULT_EXPORT_RECORDS) \
531  { \
532  ioam_export_send_buffer (EM, VM, my_buf); \
533  ioam_export_init_buffer (EM, VM, my_buf); \
534  } \
535  ebi0 = my_buf->buffer_index; \
536  eb0 = vlib_get_buffer (VM, ebi0); \
537  if (PREDICT_FALSE (eb0 == 0)) \
538  goto NO_BUFFER1; \
539  copy3cachelines (eb0->data + eb0->current_length, ip1, ip_len1); \
540  FIXUP_FUNC(eb0, p1); \
541  eb0->current_length += DEFAULT_EXPORT_SIZE; \
542  my_buf->records_in_this_buffer++; \
543  if (my_buf->records_in_this_buffer >= DEFAULT_EXPORT_RECORDS) \
544  { \
545  ioam_export_send_buffer (EM, VM, my_buf); \
546  ioam_export_init_buffer (EM, VM, my_buf); \
547  } \
548  pkts_recorded += 2; \
549  if (PREDICT_FALSE (((node)->flags & VLIB_NODE_FLAG_TRACE))) \
550  { \
551  if (p0->flags & VLIB_BUFFER_IS_TRACED) \
552  { \
553  export_trace_t *t = \
554  vlib_add_trace (VM, node, p0, sizeof (*t)); \
555  t->flow_label = \
556  clib_net_to_host_u32 (ip0->V); \
557  t->next_index = next0; \
558  } \
559  if (p1->flags & VLIB_BUFFER_IS_TRACED) \
560  { \
561  export_trace_t *t = \
562  vlib_add_trace (VM, N, p1, sizeof (*t)); \
563  t->flow_label = \
564  clib_net_to_host_u32 (ip1->V); \
565  t->next_index = next1; \
566  } \
567  } \
568  NO_BUFFER1: \
569  vlib_validate_buffer_enqueue_x2 (VM, N, next_index, \
570  to_next, n_left_to_next, \
571  bi0, bi1, next0, next1); \
572  } \
573  while (n_left_from > 0 && n_left_to_next > 0) \
574  { \
575  u32 bi0; \
576  vlib_buffer_t *p0; \
577  u32 next0 = NEXT; \
578  HTYPE *ip0; \
579  u32 ip_len0; \
580  bi0 = from[0]; \
581  to_next[0] = bi0; \
582  from += 1; \
583  to_next += 1; \
584  n_left_from -= 1; \
585  n_left_to_next -= 1; \
586  p0 = vlib_get_buffer (VM, bi0); \
587  ip0 = vlib_buffer_get_current (p0); \
588  ip_len0 = \
589  clib_net_to_host_u16 (ip0->L) + sizeof (HTYPE); \
590  ebi0 = my_buf->buffer_index; \
591  eb0 = vlib_get_buffer (VM, ebi0); \
592  if (PREDICT_FALSE (eb0 == 0)) \
593  goto NO_BUFFER; \
594  ip_len0 = \
595  ip_len0 > DEFAULT_EXPORT_SIZE ? DEFAULT_EXPORT_SIZE : ip_len0; \
596  copy3cachelines (eb0->data + eb0->current_length, ip0, ip_len0); \
597  FIXUP_FUNC(eb0, p0); \
598  eb0->current_length += DEFAULT_EXPORT_SIZE; \
599  my_buf->records_in_this_buffer++; \
600  if (my_buf->records_in_this_buffer >= DEFAULT_EXPORT_RECORDS) \
601  { \
602  ioam_export_send_buffer (EM, VM, my_buf); \
603  ioam_export_init_buffer (EM, VM, my_buf); \
604  } \
605  if (PREDICT_FALSE (((N)->flags & VLIB_NODE_FLAG_TRACE) \
606  && (p0->flags & VLIB_BUFFER_IS_TRACED))) \
607  { \
608  export_trace_t *t = vlib_add_trace (VM, (N), p0, sizeof (*t)); \
609  t->flow_label = \
610  clib_net_to_host_u32 (ip0->V); \
611  t->next_index = next0; \
612  } \
613  pkts_recorded += 1; \
614  NO_BUFFER: \
615  vlib_validate_buffer_enqueue_x1 (VM, N, next_index, \
616  to_next, n_left_to_next, \
617  bi0, next0); \
618  } \
619  vlib_put_next_frame (VM, N, next_index, n_left_to_next); \
620  } \
621  vlib_node_increment_counter (VM, export_node.index, \
622  EXPORT_ERROR_RECORDED, pkts_recorded); \
623  clib_spinlock_unlock (&(EM)->lockp[(VM)->thread_index]); \
624 } while(0)
625 
626 #endif /* __included_ioam_export_h__ */
627 
628 /*
629  * fd.io coding-style-patch-verification: ON
630  *
631  * Local Variables:
632  * eval: (c-set-style "gnu")
633  * End:
634  */
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
#define DEFAULT_EXPORT_RECORDS
Definition: ioam_export.h:88
static_always_inline void clib_spinlock_unlock(clib_spinlock_t *p)
Definition: lock.h:102
static_always_inline void clib_spinlock_lock(clib_spinlock_t *p)
Definition: lock.h:80
static f64 vlib_process_wait_for_event_or_clock(vlib_main_t *vm, f64 dt)
Suspend a cooperative multi-tasking thread Waits for an event, or for the indicated number of seconds...
Definition: node_funcs.h:673
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:890
CLIB_CACHE_LINE_ALIGN_MARK(cacheline0)
Required for pool_get_aligned.
ip4_address_t src_address
Definition: ip4_packet.h:170
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:110
unsigned long u64
Definition: types.h:89
static void ioam_export_reset_next_node(ioam_export_main_t *em)
Definition: ioam_export.h:100
Fixed length block allocator.
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
u32 export_process_node_index
Definition: ioam_export.h:79
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
u32 index
Definition: node.h:280
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:279
static int ioam_export_send_buffer(ioam_export_main_t *em, vlib_main_t *vm, ioam_export_buffer_t *eb)
Definition: ioam_export.h:295
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
u8 data[0]
Packet data.
Definition: buffer.h:181
int i
uword ip_csum_t
Definition: ip_packet.h:244
static int ioam_export_header_create(ioam_export_main_t *em, ip4_address_t *collector_address, ip4_address_t *src_address)
Definition: ioam_export.h:232
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
Definition: vec.h:451
ip4_address_t ipfix_collector
Definition: ioam_export.h:58
unsigned char u8
Definition: types.h:56
#define vec_pop(V)
Returns last element of a vector and decrements its length.
Definition: vec.h:616
#define CLIB_SPINLOCK_IS_LOCKED(_p)
Definition: lock.h:48
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
double f64
Definition: types.h:142
#define vec_add(V, E, N)
Add N elements to end of vector V (no header, unspecified alignment)
Definition: vec.h:599
static uword ioam_export_process_common(ioam_export_main_t *em, vlib_main_t *vm, vlib_node_runtime_t *rt, vlib_frame_t *f, u32 index)
Definition: ioam_export.h:359
static int ioam_export_buffer_add_header(ioam_export_main_t *em, vlib_buffer_t *b0)
Definition: ioam_export.h:119
#define pool_alloc_aligned(P, N, A)
Allocate N more free elements to pool (general version).
Definition: pool.h:323
static uword vlib_process_get_events(vlib_main_t *vm, uword **data_vector)
Return the first event type which has occurred and a vector of per-event data of that type...
Definition: node_funcs.h:516
ioam_export_buffer_t * buffer_pool
Definition: ioam_export.h:62
ipfix_data_packet_t ipfix
Definition: ioam_export.h:218
ip4_address_t dst_address
Definition: ip4_packet.h:170
vlib_frame_t * vlib_get_frame_to_node(vlib_main_t *vm, u32 to_node_index)
Definition: main.c:185
struct ioam_export_buffer ioam_export_buffer_t
#define THREAD_PERIOD
Definition: ioam_export.h:357
unsigned int u32
Definition: types.h:88
static void clib_spinlock_init(clib_spinlock_t *p)
Definition: lock.h:63
vlib_worker_thread_t * vlib_worker_threads
Definition: threads.c:37
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:519
unsigned short u16
Definition: types.h:57
void vlib_put_frame_to_node(vlib_main_t *vm, u32 to_node_index, vlib_frame_t *f)
Definition: main.c:194
vnet_main_t * vnet_main
Definition: ioam_export.h:74
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:287
#define PREDICT_FALSE(x)
Definition: clib.h:111
#define always_inline
Definition: ipsec.h:28
vl_api_address_union_t src_address
Definition: ip_types.api:98
static void ioam_export_header_cleanup(ioam_export_main_t *em, ip4_address_t *collector_address, ip4_address_t *src_address)
Definition: ioam_export.h:223
static u32 version_length(u16 length)
Definition: ipfix_packet.h:33
vlib_main_t * vm
Definition: in2out_ed.c:1810
static void ioam_export_thread_buffer_free(ioam_export_main_t *em)
Definition: ioam_export.h:149
#define EXPORT_TIMEOUT
Definition: ioam_export.h:356
#define pool_get_aligned(P, E, A)
Allocate an object E from a pool P with alignment A.
Definition: pool.h:231
#define pool_free(p)
Free a pool.
Definition: pool.h:412
ipfix_message_header_t h
Definition: ioam_export.h:210
The fine-grained event logger allows lightweight, thread-safe event logging at minimum cost...
vlib_main_t * vlib_main
Definition: ioam_export.h:73
u16 n_vectors
Definition: node.h:397
static void ioam_export_set_next_node(ioam_export_main_t *em, u8 *next_node_name)
Definition: ioam_export.h:91
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:342
ip4_address_t src_address
Definition: ioam_export.h:59
#define clib_warning(format, args...)
Definition: error.h:59
clib_spinlock_t * lockp
Definition: ioam_export.h:66
vlib_node_t * vlib_get_node_by_name(vlib_main_t *vm, u8 *name)
Definition: node.c:45
static ioam_export_buffer_t * ioam_export_get_my_buffer(ioam_export_main_t *em, u32 thread_id)
Definition: ioam_export.h:109
ipfix_set_header_t s
Definition: ioam_export.h:211
ethernet_main_t * ethernet_main
Definition: ioam_export.h:75
static void clib_mem_free(void *p)
Definition: mem.h:226
static u32 ipfix_set_id_length(u16 set_id, u16 length)
Definition: ipfix_packet.h:121
Definition: defs.h:47
vl_api_address_t ip
Definition: l2.api:490
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
static int ioam_export_init_buffer(ioam_export_main_t *em, vlib_main_t *vm, ioam_export_buffer_t *eb)
Definition: ioam_export.h:129
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
#define ip_csum_update(sum, old, new, type, field)
Definition: ip_packet.h:294
#define DEFAULT_EXPORT_SIZE
Definition: ioam_export.h:83
#define vnet_buffer(b)
Definition: buffer.h:408
static int ioam_export_thread_buffer_init(ioam_export_main_t *em, vlib_main_t *vm)
Definition: ioam_export.h:171
u8 ip_version_and_header_length
Definition: ip4_packet.h:138
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
static u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
Definition: buffer_funcs.h:630
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
static u16 ip4_header_checksum(ip4_header_t *i)
Definition: ip4_packet.h:247
static u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:300
Definition: defs.h:46