FD.io VPP  v20.09-rc2-28-g3c5414029
Vector Packet Processing
svm_fifo.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016-2019 Cisco and/or its affiliates.
3  * Copyright (c) 2019 Arm Limited
4  * Copyright (c) 2010-2017 Intel Corporation and/or its affiliates.
5  * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
6  * Inspired from DPDK rte_ring.h (SPSC only) (derived from freebsd bufring.h).
7  * Licensed under the Apache License, Version 2.0 (the "License");
8  * you may not use this file except in compliance with the License.
9  * You may obtain a copy of the License at:
10  *
11  * http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an "AS IS" BASIS,
15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  */
19 
20 #include <svm/svm_fifo.h>
21 #include <svm/fifo_segment.h>
22 #include <vppinfra/cpu.h>
23 
25  svm_fifo_chunk_t * c, u32 tail_idx, const u8 * src, u32 len,
27 {
28  u32 n_chunk;
29 
30  ASSERT (f_pos_geq (tail_idx, c->start_byte)
31  && f_pos_lt (tail_idx, c->start_byte + c->length));
32 
33  tail_idx -= c->start_byte;
34  n_chunk = c->length - tail_idx;
35  if (n_chunk <= len)
36  {
37  u32 to_copy = len;
38  clib_memcpy_fast (&c->data[tail_idx], src, n_chunk);
39  c = c->next;
40  while ((to_copy -= n_chunk))
41  {
42  n_chunk = clib_min (c->length, to_copy);
43  clib_memcpy_fast (&c->data[0], src + (len - to_copy), n_chunk);
44  c = c->length <= to_copy ? c->next : c;
45  }
46  if (*last)
47  *last = c;
48  }
49  else
50  {
51  clib_memcpy_fast (&c->data[tail_idx], src, len);
52  }
53 }
54 
56  svm_fifo_chunk_t * c, u32 head_idx, u8 * dst, u32 len,
58 {
59  u32 n_chunk;
60 
61  ASSERT (f_pos_geq (head_idx, c->start_byte)
62  && f_pos_lt (head_idx, c->start_byte + c->length));
63 
64  head_idx -= c->start_byte;
65  n_chunk = c->length - head_idx;
66  if (n_chunk <= len)
67  {
68  u32 to_copy = len;
69  clib_memcpy_fast (dst, &c->data[head_idx], n_chunk);
70  c = c->next;
71  while ((to_copy -= n_chunk))
72  {
73  n_chunk = clib_min (c->length, to_copy);
74  clib_memcpy_fast (dst + (len - to_copy), &c->data[0], n_chunk);
75  c = c->length <= to_copy ? c->next : c;
76  }
77  if (*last)
78  *last = c;
79  }
80  else
81  {
82  clib_memcpy_fast (dst, &c->data[head_idx], len);
83  }
84 }
85 
86 #ifndef CLIB_MARCH_VARIANT
87 
88 static inline void
90  const u8 * src, u32 len, svm_fifo_chunk_t ** last)
91 {
93  last);
94 }
95 
96 static inline void
99 {
101  last);
102 }
103 
104 static inline u32
106 {
107  return (s->start + s->length);
108 }
109 
110 void
112 {
113  pool_free (f->ooo_segments);
114 }
115 
116 static inline ooo_segment_t *
118 {
120  return 0;
121  return pool_elt_at_index (f->ooo_segments, s->prev);
122 }
123 
124 static inline ooo_segment_t *
126 {
128  return 0;
129  return pool_elt_at_index (f->ooo_segments, s->next);
130 }
131 
132 static inline ooo_segment_t *
133 ooo_segment_alloc (svm_fifo_t * f, u32 start, u32 length)
134 {
135  ooo_segment_t *s;
136 
137  pool_get (f->ooo_segments, s);
138 
139  s->start = start;
140  s->length = length;
142 
143  return s;
144 }
145 
146 static inline void
148 {
149  ooo_segment_t *cur, *prev = 0, *next = 0;
150  cur = pool_elt_at_index (f->ooo_segments, index);
151 
152  if (cur->next != OOO_SEGMENT_INVALID_INDEX)
153  {
154  next = pool_elt_at_index (f->ooo_segments, cur->next);
155  next->prev = cur->prev;
156  }
157 
158  if (cur->prev != OOO_SEGMENT_INVALID_INDEX)
159  {
160  prev = pool_elt_at_index (f->ooo_segments, cur->prev);
161  prev->next = cur->next;
162  }
163  else
164  {
165  f->ooos_list_head = cur->next;
166  }
167 
168  pool_put (f->ooo_segments, cur);
169 }
170 
171 /**
172  * Add segment to fifo's out-of-order segment list. Takes care of merging
173  * adjacent segments and removing overlapping ones.
174  */
175 static void
176 ooo_segment_add (svm_fifo_t * f, u32 offset, u32 head, u32 tail, u32 length)
177 {
178  ooo_segment_t *s, *new_s, *prev, *next, *it;
179  u32 new_index, s_end_pos, s_index;
180  u32 offset_pos, offset_end_pos;
181 
182  ASSERT (offset + length <= f_free_count (f, head, tail));
183 
184  offset_pos = tail + offset;
185  offset_end_pos = tail + offset + length;
186 
187  f->ooos_newest = OOO_SEGMENT_INVALID_INDEX;
188 
189  if (f->ooos_list_head == OOO_SEGMENT_INVALID_INDEX)
190  {
191  s = ooo_segment_alloc (f, offset_pos, length);
192  f->ooos_list_head = s - f->ooo_segments;
193  f->ooos_newest = f->ooos_list_head;
194  return;
195  }
196 
197  /* Find first segment that starts after new segment */
198  s = pool_elt_at_index (f->ooo_segments, f->ooos_list_head);
199  while (s->next != OOO_SEGMENT_INVALID_INDEX
200  && f_pos_lt (s->start, offset_pos))
201  s = pool_elt_at_index (f->ooo_segments, s->next);
202 
203  /* If we have a previous and we overlap it, use it as starting point */
204  prev = ooo_segment_prev (f, s);
205  if (prev && f_pos_leq (offset_pos, ooo_segment_end_pos (prev)))
206  {
207  s = prev;
208  s_end_pos = ooo_segment_end_pos (s);
209 
210  /* Since we have previous, offset start position cannot be smaller
211  * than prev->start. Check tail */
212  ASSERT (f_pos_lt (s->start, offset_pos));
213  goto check_tail;
214  }
215 
216  s_index = s - f->ooo_segments;
217  s_end_pos = ooo_segment_end_pos (s);
218 
219  /* No overlap, add before current segment */
220  if (f_pos_lt (offset_end_pos, s->start))
221  {
222  new_s = ooo_segment_alloc (f, offset_pos, length);
223  new_index = new_s - f->ooo_segments;
224 
225  /* Pool might've moved, get segment again */
226  s = pool_elt_at_index (f->ooo_segments, s_index);
228  {
229  new_s->prev = s->prev;
230  prev = pool_elt_at_index (f->ooo_segments, new_s->prev);
231  prev->next = new_index;
232  }
233  else
234  {
235  /* New head */
236  f->ooos_list_head = new_index;
237  }
238 
239  new_s->next = s_index;
240  s->prev = new_index;
241  f->ooos_newest = new_index;
242  return;
243  }
244  /* No overlap, add after current segment */
245  else if (f_pos_gt (offset_pos, s_end_pos))
246  {
247  new_s = ooo_segment_alloc (f, offset_pos, length);
248  new_index = new_s - f->ooo_segments;
249 
250  /* Pool might've moved, get segment again */
251  s = pool_elt_at_index (f->ooo_segments, s_index);
252 
253  /* Needs to be last */
255 
256  new_s->prev = s_index;
257  s->next = new_index;
258  f->ooos_newest = new_index;
259 
260  return;
261  }
262 
263  /*
264  * Merge needed
265  */
266 
267  /* Merge at head */
268  if (f_pos_lt (offset_pos, s->start))
269  {
270  s->start = offset_pos;
271  s->length = s_end_pos - s->start;
272  f->ooos_newest = s - f->ooo_segments;
273  }
274 
275 check_tail:
276 
277  /* Overlapping tail */
278  if (f_pos_gt (offset_end_pos, s_end_pos))
279  {
280  s->length = offset_end_pos - s->start;
281 
282  /* Remove the completely overlapped segments in the tail */
283  it = ooo_segment_next (f, s);
284  while (it && f_pos_leq (ooo_segment_end_pos (it), offset_end_pos))
285  {
286  next = ooo_segment_next (f, it);
287  ooo_segment_free (f, it - f->ooo_segments);
288  it = next;
289  }
290 
291  /* If partial overlap with last, merge */
292  if (it && f_pos_leq (it->start, offset_end_pos))
293  {
294  s->length = ooo_segment_end_pos (it) - s->start;
295  ooo_segment_free (f, it - f->ooo_segments);
296  }
297  f->ooos_newest = s - f->ooo_segments;
298  }
299 }
300 
301 /**
302  * Removes segments that can now be enqueued because the fifo's tail has
303  * advanced. Returns the number of bytes added to tail.
304  */
305 static int
306 ooo_segment_try_collect (svm_fifo_t * f, u32 n_bytes_enqueued, u32 * tail)
307 {
308  u32 s_index, bytes = 0;
309  ooo_segment_t *s;
310  i32 diff;
311 
312  s = pool_elt_at_index (f->ooo_segments, f->ooos_list_head);
313  diff = *tail - s->start;
314 
315  ASSERT (diff != n_bytes_enqueued);
316 
317  if (diff > n_bytes_enqueued)
318  return 0;
319 
320  /* If last tail update overlaps one/multiple ooo segments, remove them */
321  while (0 <= diff && diff < n_bytes_enqueued)
322  {
323  s_index = s - f->ooo_segments;
324 
325  /* Segment end is beyond the tail. Advance tail and remove segment */
326  if (s->length > diff)
327  {
328  bytes = s->length - diff;
329  *tail = *tail + bytes;
330  ooo_segment_free (f, s_index);
331  break;
332  }
333 
334  /* If we have next go on */
336  {
337  s = pool_elt_at_index (f->ooo_segments, s->next);
338  diff = *tail - s->start;
339  ooo_segment_free (f, s_index);
340  }
341  /* End of search */
342  else
343  {
344  ooo_segment_free (f, s_index);
345  break;
346  }
347  }
348 
349  ASSERT (bytes <= f->size);
350  return bytes;
351 }
352 
353 __clib_unused static ooo_segment_t *
355 {
356  ooo_segment_t *s;
357 
358  if (f->ooos_list_head == OOO_SEGMENT_INVALID_INDEX)
359  return 0;
360 
362  while (s->next != OOO_SEGMENT_INVALID_INDEX)
363  s = pool_elt_at_index (f->ooo_segments, s->next);
364  return s;
365 }
366 
367 void
369 {
370  svm_fifo_chunk_t *c, *prev;
371  u32 min_alloc;
372 
373  f->size = size;
374  f->ooos_list_head = OOO_SEGMENT_INVALID_INDEX;
375  f->segment_index = SVM_FIFO_INVALID_INDEX;
376  f->refcnt = 1;
377  f->head = f->tail = f->flags = 0;
378  f->head_chunk = f->tail_chunk = f->start_chunk;
379  f->ooo_deq = f->ooo_enq = 0;
380 
381  min_alloc = size > 32 << 10 ? size >> 3 : 4096;
382  min_alloc = clib_min (min_alloc, 64 << 10);
383  f->min_alloc = min_alloc;
384 
385  /*
386  * Initialize chunks
387  */
388  f->start_chunk->start_byte = 0;
389  prev = f->start_chunk;
390  c = prev->next;
391 
392  while (c)
393  {
394  c->start_byte = prev->start_byte + prev->length;
395  prev = c;
396  c = c->next;
397  }
398 }
399 
400 void
402 {
403  if (ooo_type == 0)
404  {
405  ASSERT (!rb_tree_is_init (&f->ooo_enq_lookup));
406  rb_tree_init (&f->ooo_enq_lookup);
407  }
408  else
409  {
410  ASSERT (!rb_tree_is_init (&f->ooo_deq_lookup));
411  rb_tree_init (&f->ooo_deq_lookup);
412  }
413 }
414 
415 /**
416  * Creates a fifo in the current heap. Fails vs blow up the process
417  */
418 svm_fifo_t *
419 svm_fifo_alloc (u32 data_size_in_bytes)
420 {
421  u32 rounded_data_size;
423  svm_fifo_t *f;
424 
426  if (f == 0)
427  return 0;
428 
429  clib_memset (f, 0, sizeof (*f));
430 
431  /* always round fifo data size to the next highest power-of-two */
432  rounded_data_size = (1 << (max_log2 (data_size_in_bytes)));
433  c = clib_mem_alloc_aligned_or_null (sizeof (*c) + rounded_data_size,
435  if (!c)
436  {
437  clib_mem_free (f);
438  return 0;
439  }
440 
441  clib_memset (c, 0, sizeof (*c));
442  c->start_byte = 0;
443  c->length = data_size_in_bytes;
446  f->start_chunk = f->end_chunk = c;
447 
448  return f;
449 }
450 
451 /**
452  * Creates a fifo chunk in the current heap
453  */
456 {
458  u32 rounded_size;
459 
460  /* round chunk size to the next highest power-of-two */
461  rounded_size = (1 << (max_log2 (size)));
462  c = clib_mem_alloc_aligned_or_null (sizeof (*c) + rounded_size,
464  if (c == 0)
465  return 0;
466 
467  clib_memset (c, 0, sizeof (*c));
468  c->length = rounded_size;
469  return c;
470 }
471 
472 /**
473  * Find chunk for given byte position
474  *
475  * @param f fifo
476  * @param pos normalized position in fifo
477  *
478  * @return chunk that includes given position or 0
479  */
480 static svm_fifo_chunk_t *
482 {
484 
485  c = f->start_chunk;
486  while (c && !f_chunk_includes_pos (c, pos))
487  c = c->next;
488 
489  return c;
490 }
491 
492 static svm_fifo_chunk_t *
494 {
496 
497  ASSERT (start != 0);
498 
499  c = start;
500  while (c && !f_chunk_includes_pos (c, pos))
501  c = c->next;
502 
503  return c;
504 }
505 
506 u32
508 {
509  u32 head, tail, end_chunk;
510 
511  f_load_head_tail_cons (f, &head, &tail);
512  ASSERT (!f->head_chunk || f_chunk_includes_pos (f->head_chunk, head));
513 
514  if (!f->head_chunk)
515  {
516  f->head_chunk = svm_fifo_find_chunk (f, head);
517  if (PREDICT_FALSE (!f->head_chunk))
518  return 0;
519  }
520 
521  end_chunk = f_chunk_end (f->head_chunk);
522 
523  return f_pos_lt (end_chunk, tail) ? end_chunk - head : tail - head;
524 }
525 
526 u32
528 {
529  u32 head, tail;
530 
531  f_load_head_tail_prod (f, &head, &tail);
532  ASSERT (!f->tail_chunk || f_chunk_includes_pos (f->tail_chunk, tail));
533 
534  return f->tail_chunk ? f_chunk_end (f->tail_chunk) - tail : 0;
535 }
536 
537 static rb_node_t *
539 {
540  rb_node_t *cur, *prev;
541 
542  cur = rb_node (rt, rt->root);
543  if (PREDICT_FALSE (rb_node_is_tnil (rt, cur)))
544  return 0;
545 
546  while (pos != cur->key)
547  {
548  prev = cur;
549  if (f_pos_lt (pos, cur->key))
550  {
551  cur = rb_node_left (rt, cur);
552  if (rb_node_is_tnil (rt, cur))
553  {
554  cur = rb_tree_predecessor (rt, prev);
555  break;
556  }
557  }
558  else
559  {
560  cur = rb_node_right (rt, cur);
561  if (rb_node_is_tnil (rt, cur))
562  {
563  cur = prev;
564  break;
565  }
566  }
567  }
568 
569  if (rb_node_is_tnil (rt, cur))
570  return 0;
571 
572  return cur;
573 }
574 
575 static svm_fifo_chunk_t *
577 {
579  rb_node_t *n;
580 
581  if (!rb_tree_is_init (rt))
582  return 0;
583 
584  n = f_find_node_rbtree (rt, pos);
585  if (!n)
586  return 0;
588  if (f_chunk_includes_pos (c, pos))
589  return c;
590 
591  return 0;
592 }
593 
594 static void
595 f_update_ooo_enq (svm_fifo_t * f, u32 start_pos, u32 end_pos)
596 {
597  rb_tree_t *rt = &f->ooo_enq_lookup;
599  rb_node_t *cur;
600 
601  /* Use linear search if rbtree is not initialized */
602  if (PREDICT_FALSE (!rb_tree_is_init (rt)))
603  {
604  f->ooo_enq = svm_fifo_find_next_chunk (f, f->tail_chunk, start_pos);
605  return;
606  }
607 
608  if (rt->root == RBTREE_TNIL_INDEX)
609  {
610  c = f->tail_chunk;
614  }
615  else
616  {
617  cur = f_find_node_rbtree (rt, start_pos);
619  ASSERT (f_pos_leq (c->start_byte, start_pos));
620  }
621 
622  if (f_chunk_includes_pos (c, start_pos))
623  f->ooo_enq = c;
624 
625  if (f_chunk_includes_pos (c, end_pos))
626  return;
627 
628  do
629  {
630  c = c->next;
631  if (!c || c->enq_rb_index != RBTREE_TNIL_INDEX)
632  break;
633 
636 
637  if (f_chunk_includes_pos (c, start_pos))
638  f->ooo_enq = c;
639  }
640  while (!f_chunk_includes_pos (c, end_pos));
641 }
642 
643 static void
644 f_update_ooo_deq (svm_fifo_t * f, u32 start_pos, u32 end_pos)
645 {
646  rb_tree_t *rt = &f->ooo_deq_lookup;
647  rb_node_t *cur;
649 
650  /* Use linear search if rbtree is not initialized */
651  if (PREDICT_FALSE (!rb_tree_is_init (rt)))
652  {
653  f->ooo_deq = svm_fifo_find_chunk (f, start_pos);
654  return;
655  }
656 
657  if (rt->root == RBTREE_TNIL_INDEX)
658  {
659  c = f->start_chunk;
663  }
664  else
665  {
666  cur = f_find_node_rbtree (rt, start_pos);
668  ASSERT (f_pos_leq (c->start_byte, start_pos));
669  }
670 
671  if (f_chunk_includes_pos (c, start_pos))
672  f->ooo_deq = c;
673 
674  if (f_chunk_includes_pos (c, end_pos))
675  return;
676 
677  do
678  {
679  c = c->next;
680  if (!c || c->deq_rb_index != RBTREE_TNIL_INDEX)
681  break;
682 
685 
686  if (f_chunk_includes_pos (c, start_pos))
687  f->ooo_deq = c;
688  }
689  while (!f_chunk_includes_pos (c, end_pos));
690 }
691 
692 static svm_fifo_chunk_t *
694  u32 end_pos)
695 {
696  rb_tree_t *rt = &f->ooo_enq_lookup;
698  rb_node_t *n;
699 
700  c = start;
701  while (c && !f_chunk_includes_pos (c, end_pos))
702  {
704  {
705  n = rb_node (rt, c->enq_rb_index);
706  rb_tree_del_node (rt, n);
708  }
709 
710  c = c->next;
711  }
712 
713  /* No ooo segments left, so make sure the current chunk
714  * is not tracked in the enq rbtree */
715  if (f->ooos_list_head == OOO_SEGMENT_INVALID_INDEX
716  && c && c->enq_rb_index != RBTREE_TNIL_INDEX)
717  {
718  n = rb_node (rt, c->enq_rb_index);
719  rb_tree_del_node (rt, n);
721  }
722 
723  return c;
724 }
725 
726 static svm_fifo_chunk_t *
728  u32 end_pos)
729 {
730  rb_tree_t *rt = &f->ooo_deq_lookup;
732  rb_node_t *n;
733 
734  c = start;
735  while (c && !f_chunk_includes_pos (c, end_pos))
736  {
738  {
739  n = rb_node (rt, c->deq_rb_index);
740  rb_tree_del_node (rt, n);
742  }
743 
744  c = c->next;
745  }
746 
747  return c;
748 }
749 
750 void
752 {
753  rb_tree_free_nodes (&f->ooo_enq_lookup);
754  rb_tree_free_nodes (&f->ooo_deq_lookup);
755 }
756 
757 void
759 {
760  ASSERT (f->refcnt > 0);
761 
762  if (--f->refcnt == 0)
763  {
764  /* ooo data is not allocated on segment heap */
766  clib_mem_free (f);
767  }
768 }
769 
770 void
772 {
773  u32 n_chunk;
774  u32 head, tail, head_idx;
776 
777  ASSERT (len <= f->size);
778 
779  f_load_head_tail_cons (f, &head, &tail);
780 
781  if (!f->head_chunk)
782  f->head_chunk = svm_fifo_find_chunk (f, head);
783 
784  c = f->head_chunk;
785  head_idx = head - c->start_byte;
786  n_chunk = c->length - head_idx;
787  if (len <= n_chunk)
788  clib_memcpy_fast (&c->data[head_idx], src, len);
789  else
790  {
791  ASSERT (len - n_chunk <= c->next->length);
792  clib_memcpy_fast (&c->data[head_idx], src, n_chunk);
793  clib_memcpy_fast (&c->next->data[0], src + n_chunk, len - n_chunk);
794  }
795 }
796 
797 static int
799 {
800  svm_fifo_chunk_t *c, *cur, *prev;
801  u32 alloc_size, free_alloced;
802 
803  free_alloced = f_chunk_end (f->end_chunk) - tail;
804 
805  alloc_size = clib_min (f->min_alloc, f->size - (tail - head));
806  alloc_size = clib_max (alloc_size, len - free_alloced);
807 
808  c = fsh_alloc_chunk (f->fs_hdr, f->slice_index, alloc_size);
809  if (PREDICT_FALSE (!c))
810  return -1;
811 
812  cur = c;
813  prev = f->end_chunk;
814 
815  while (cur)
816  {
817  cur->start_byte = prev->start_byte + prev->length;
820 
821  prev = cur;
822  cur = cur->next;
823  }
824 
825  prev->next = 0;
826  f->end_chunk->next = c;
827  f->end_chunk = prev;
828 
829  if (!f->tail_chunk)
830  f->tail_chunk = c;
831 
832  return 0;
833 }
834 
835 int
837 {
838  u32 tail, head, free_count;
839  svm_fifo_chunk_t *old_tail_c;
840 
841  f->ooos_newest = OOO_SEGMENT_INVALID_INDEX;
842 
843  f_load_head_tail_prod (f, &head, &tail);
844 
845  /* free space in fifo can only increase during enqueue: SPSC */
846  free_count = f_free_count (f, head, tail);
847 
848  if (PREDICT_FALSE (free_count == 0))
849  return SVM_FIFO_EFULL;
850 
851  /* number of bytes we're going to copy */
852  len = clib_min (free_count, len);
853 
854  if (f_pos_gt (tail + len, f_chunk_end (f->end_chunk)))
855  {
856  if (PREDICT_FALSE (f_try_chunk_alloc (f, head, tail, len)))
857  {
858  len = f_chunk_end (f->end_chunk) - tail;
859  if (!len)
860  return SVM_FIFO_EGROW;
861  }
862  }
863 
864  old_tail_c = f->tail_chunk;
865 
866  svm_fifo_copy_to_chunk (f, f->tail_chunk, tail, src, len, &f->tail_chunk);
867  tail = tail + len;
868 
869  svm_fifo_trace_add (f, head, len, 2);
870 
871  /* collect out-of-order segments */
872  if (PREDICT_FALSE (f->ooos_list_head != OOO_SEGMENT_INVALID_INDEX))
873  {
874  len += ooo_segment_try_collect (f, len, &tail);
875  /* Tail chunk might've changed even if nothing was collected */
876  f->tail_chunk = f_lookup_clear_enq_chunks (f, old_tail_c, tail);
877  f->ooo_enq = 0;
878  }
879 
880  /* store-rel: producer owned index (paired with load-acq in consumer) */
881  clib_atomic_store_rel_n (&f->tail, tail);
882 
883  return len;
884 }
885 
886 /**
887  * Enqueue a future segment.
888  *
889  * Two choices: either copies the entire segment, or copies nothing
890  * Returns 0 of the entire segment was copied
891  * Returns -1 if none of the segment was copied due to lack of space
892  */
893 int
895 {
896  u32 tail, head, free_count, enq_pos;
897 
898  f_load_head_tail_prod (f, &head, &tail);
899 
900  /* free space in fifo can only increase during enqueue: SPSC */
901  free_count = f_free_count (f, head, tail);
902  f->ooos_newest = OOO_SEGMENT_INVALID_INDEX;
903 
904  /* will this request fit? */
905  if ((len + offset) > free_count)
906  return SVM_FIFO_EFULL;
907 
908  enq_pos = tail + offset;
909 
910  if (f_pos_gt (enq_pos + len, f_chunk_end (f->end_chunk)))
911  {
912  if (PREDICT_FALSE (f_try_chunk_alloc (f, head, tail, offset + len)))
913  return SVM_FIFO_EGROW;
914  }
915 
916  svm_fifo_trace_add (f, offset, len, 1);
917  ooo_segment_add (f, offset, head, tail, len);
918 
919  if (!f->ooo_enq || !f_chunk_includes_pos (f->ooo_enq, enq_pos))
920  f_update_ooo_enq (f, enq_pos, enq_pos + len);
921 
922  svm_fifo_copy_to_chunk (f, f->ooo_enq, enq_pos, src, len, &f->ooo_enq);
923 
924  return 0;
925 }
926 
927 /**
928  * Advance tail
929  */
930 void
932 {
933  u32 tail;
934 
935  ASSERT (len <= svm_fifo_max_enqueue_prod (f));
936  /* load-relaxed: producer owned index */
937  tail = f->tail;
938  tail = tail + len;
939 
940  if (rb_tree_is_init (&f->ooo_enq_lookup))
941  {
942  f->tail_chunk = f_lookup_clear_enq_chunks (f, f->tail_chunk, tail);
943  f->ooo_enq = 0;
944  }
945  else
946  {
947  f->tail_chunk = svm_fifo_find_next_chunk (f, f->tail_chunk, tail);
948  }
949 
950  /* store-rel: producer owned index (paired with load-acq in consumer) */
951  clib_atomic_store_rel_n (&f->tail, tail);
952 }
953 
955 f_unlink_chunks (svm_fifo_t * f, u32 end_pos, u8 maybe_ooo)
956 {
957  svm_fifo_chunk_t *start, *prev = 0, *c;
958  rb_tree_t *rt;
959  rb_node_t *n;
960 
961  ASSERT (!f_chunk_includes_pos (f->start_chunk, end_pos));
962 
963  if (maybe_ooo)
964  rt = &f->ooo_deq_lookup;
965 
966  c = f->start_chunk;
967 
968  do
969  {
970  if (maybe_ooo && c->deq_rb_index != RBTREE_TNIL_INDEX)
971  {
972  n = rb_node (rt, c->deq_rb_index);
973  ASSERT (n == f_find_node_rbtree (rt, c->start_byte));
974  rb_tree_del_node (rt, n);
975  c->deq_rb_index = RBTREE_TNIL_INDEX;
976  }
977  if (!c->next)
978  break;
979  prev = c;
980  c = c->next;
981  }
982  while (!f_chunk_includes_pos (c, end_pos));
983 
984  if (maybe_ooo)
985  {
986  if (f->ooo_deq && f_pos_lt (f->ooo_deq->start_byte, f_chunk_end (c)))
987  f->ooo_deq = 0;
988  }
989  else
990  {
991  if (PREDICT_FALSE (f->ooo_deq != 0))
992  f->ooo_deq = 0;
993  }
994 
995  /* Avoid unlinking the last chunk */
996  if (!prev)
997  return 0;
998 
999  prev->next = 0;
1000  start = f->start_chunk;
1001  f->start_chunk = c;
1002 
1003  return start;
1004 }
1005 
1006 int
1008 {
1009  u32 tail, head, cursize;
1010 
1011  f_load_head_tail_cons (f, &head, &tail);
1012 
1013  /* current size of fifo can only increase during dequeue: SPSC */
1014  cursize = f_cursize (f, head, tail);
1015 
1016  if (PREDICT_FALSE (cursize == 0))
1017  return SVM_FIFO_EEMPTY;
1018 
1019  len = clib_min (cursize, len);
1020 
1021  if (!f->head_chunk)
1022  f->head_chunk = svm_fifo_find_chunk (f, head);
1023 
1024  svm_fifo_copy_from_chunk (f, f->head_chunk, head, dst, len, &f->head_chunk);
1025  head = head + len;
1026 
1027  /* In order dequeues are not supported in combination with ooo peeking.
1028  * Use svm_fifo_dequeue_drop instead. */
1029  ASSERT (rb_tree_n_nodes (&f->ooo_deq_lookup) <= 1);
1030 
1031  if (f_pos_geq (head, f_chunk_end (f->start_chunk)))
1032  fsh_collect_chunks (f->fs_hdr, f->slice_index,
1033  f_unlink_chunks (f, head, 0));
1034 
1035  /* store-rel: consumer owned index (paired with load-acq in producer) */
1036  clib_atomic_store_rel_n (&f->head, head);
1037 
1038  return len;
1039 }
1040 
1041 int
1043 {
1044  u32 tail, head, cursize, head_idx;
1045 
1046  f_load_head_tail_cons (f, &head, &tail);
1047 
1048  /* current size of fifo can only increase during peek: SPSC */
1049  cursize = f_cursize (f, head, tail);
1050 
1051  if (PREDICT_FALSE (cursize < offset))
1052  return SVM_FIFO_EEMPTY;
1053 
1054  len = clib_min (cursize - offset, len);
1055  head_idx = head + offset;
1056 
1057  if (!f->ooo_deq || !f_chunk_includes_pos (f->ooo_deq, head_idx))
1058  f_update_ooo_deq (f, head_idx, head_idx + len);
1059 
1060  svm_fifo_copy_from_chunk (f, f->ooo_deq, head_idx, dst, len, &f->ooo_deq);
1061  return len;
1062 }
1063 
1064 int
1066 {
1067  u32 total_drop_bytes, tail, head, cursize;
1068 
1069  f_load_head_tail_cons (f, &head, &tail);
1070 
1071  /* number of bytes available */
1072  cursize = f_cursize (f, head, tail);
1073  if (PREDICT_FALSE (cursize == 0))
1074  return SVM_FIFO_EEMPTY;
1075 
1076  /* number of bytes we're going to drop */
1077  total_drop_bytes = clib_min (cursize, len);
1078 
1079  svm_fifo_trace_add (f, tail, total_drop_bytes, 3);
1080 
1081  /* move head */
1082  head = head + total_drop_bytes;
1083 
1084  if (f_pos_geq (head, f_chunk_end (f->start_chunk)))
1085  {
1086  fsh_collect_chunks (f->fs_hdr, f->slice_index,
1087  f_unlink_chunks (f, head, 1));
1088  f->head_chunk =
1089  f_chunk_includes_pos (f->start_chunk, head) ? f->start_chunk : 0;
1090  }
1091 
1092  /* store-rel: consumer owned index (paired with load-acq in producer) */
1093  clib_atomic_store_rel_n (&f->head, head);
1094 
1095  return total_drop_bytes;
1096 }
1097 
1098 /**
1099  * Drop all data from fifo
1100  *
1101  */
1102 void
1104 {
1105  u32 head, tail;
1106 
1107  f_load_head_tail_all_acq (f, &head, &tail);
1108 
1109  if (!f->head_chunk || !f_chunk_includes_pos (f->head_chunk, head))
1110  f->head_chunk = svm_fifo_find_chunk (f, head);
1111 
1112  f->head_chunk = f_lookup_clear_deq_chunks (f, f->head_chunk, tail);
1113 
1114  if (f_pos_geq (tail, f_chunk_end (f->start_chunk)))
1115  fsh_collect_chunks (f->fs_hdr, f->slice_index,
1116  f_unlink_chunks (f, tail, 0));
1117 
1118  /* store-rel: consumer owned index (paired with load-acq in producer) */
1119  clib_atomic_store_rel_n (&f->head, tail);
1120 }
1121 
1122 int
1124 {
1125  u32 head, tail;
1126 
1127  f_load_head_tail_prod (f, &head, &tail);
1128 
1129  if (f_chunk_end (f->end_chunk) - head >= f->size)
1130  return 0;
1131 
1132  if (f_try_chunk_alloc (f, head, tail, f->size - (tail - head)))
1133  return SVM_FIFO_EGROW;
1134 
1135  return 0;
1136 }
1137 
1138 int
1140 {
1141  u32 cursize, head, tail, head_idx;
1142 
1143  f_load_head_tail_cons (f, &head, &tail);
1144 
1145  /* consumer function, cursize can only increase while we're working */
1146  cursize = f_cursize (f, head, tail);
1147 
1148  if (PREDICT_FALSE (cursize == 0))
1149  return SVM_FIFO_EEMPTY;
1150 
1151  head_idx = head;
1152 
1153  if (tail < head)
1154  {
1155  fs[0].len = f->size - head_idx;
1156  fs[0].data = f->head_chunk->data + head_idx;
1157  fs[1].len = cursize - fs[0].len;
1158  fs[1].data = f->head_chunk->data;
1159  }
1160  else
1161  {
1162  fs[0].len = cursize;
1163  fs[0].data = f->head_chunk->data + head_idx;
1164  fs[1].len = 0;
1165  fs[1].data = 0;
1166  }
1167  return cursize;
1168 }
1169 
1170 void
1172 {
1173  u32 head;
1174 
1175  /* consumer owned index */
1176  head = f->head;
1177 
1178  ASSERT (fs[0].data == f->head_chunk->data + head);
1179  head = (head + fs[0].len + fs[1].len);
1180  /* store-rel: consumer owned index (paired with load-acq in producer) */
1181  clib_atomic_store_rel_n (&f->head, head);
1182 }
1183 
1184 /**
1185  * Clones fifo
1186  *
1187  * Assumptions:
1188  * - no prod and cons are accessing either dest or src fifo
1189  * - fifo is not multi chunk
1190  */
1191 void
1193 {
1194  u32 head, tail;
1195 
1196  /* Support only single chunk clones for now */
1197  ASSERT (svm_fifo_n_chunks (sf) == 1);
1198 
1199  clib_memcpy_fast (df->head_chunk->data, sf->head_chunk->data, sf->size);
1200 
1201  f_load_head_tail_all_acq (sf, &head, &tail);
1202  clib_atomic_store_rel_n (&df->head, head);
1203  clib_atomic_store_rel_n (&df->tail, tail);
1204 }
1205 
1206 u32
1208 {
1209  return pool_elts (f->ooo_segments);
1210 }
1211 
1212 ooo_segment_t *
1214 {
1215  return pool_elt_at_index (f->ooo_segments, f->ooos_list_head);
1216 }
1217 
1218 /**
1219  * Set fifo pointers to requested offset
1220  */
1221 void
1223 {
1225 
1226  clib_atomic_store_rel_n (&f->head, head);
1227  clib_atomic_store_rel_n (&f->tail, tail);
1228 
1229  c = svm_fifo_find_chunk (f, head);
1230  ASSERT (c != 0);
1231  f->head_chunk = f->ooo_deq = c;
1232  c = svm_fifo_find_chunk (f, tail);
1233  ASSERT (c != 0);
1234  f->tail_chunk = f->ooo_enq = c;
1235 }
1236 
1237 void
1239 {
1240  if (f->n_subscribers >= SVM_FIFO_MAX_EVT_SUBSCRIBERS)
1241  return;
1242  f->subscribers[f->n_subscribers++] = subscriber;
1243 }
1244 
1245 void
1247 {
1248  int i;
1249 
1250  for (i = 0; i < f->n_subscribers; i++)
1251  {
1252  if (f->subscribers[i] != subscriber)
1253  continue;
1254  f->subscribers[i] = f->subscribers[f->n_subscribers - 1];
1255  f->n_subscribers--;
1256  break;
1257  }
1258 }
1259 
1260 u8
1262 {
1263  svm_fifo_chunk_t *tmp;
1264 
1265  if (f->head_chunk && !f_chunk_includes_pos (f->head_chunk, f->head))
1266  return 0;
1267  if (f->tail_chunk && !f_chunk_includes_pos (f->tail_chunk, f->tail))
1268  return 0;
1269  if (f->ooo_deq)
1270  {
1271  if (rb_tree_is_init (&f->ooo_deq_lookup))
1272  {
1273  if (f_pos_lt (f->ooo_deq->start_byte, f->start_chunk->start_byte)
1274  || f_pos_gt (f->ooo_deq->start_byte,
1275  f_chunk_end (f->end_chunk)))
1276  return 0;
1277 
1278  tmp = f_find_chunk_rbtree (&f->ooo_deq_lookup,
1279  f->ooo_deq->start_byte);
1280  }
1281  else
1282  tmp = svm_fifo_find_chunk (f, f->ooo_deq->start_byte);
1283  if (tmp != f->ooo_deq)
1284  return 0;
1285  }
1286  if (f->ooo_enq)
1287  {
1288  if (rb_tree_is_init (&f->ooo_enq_lookup))
1289  {
1290  if (f_pos_lt (f->ooo_enq->start_byte, f->start_chunk->start_byte)
1291  || f_pos_gt (f->ooo_enq->start_byte,
1292  f_chunk_end (f->end_chunk)))
1293  return 0;
1294 
1295  tmp = f_find_chunk_rbtree (&f->ooo_enq_lookup,
1296  f->ooo_enq->start_byte);
1297  }
1298  else
1299  {
1300  tmp = svm_fifo_find_next_chunk (f, f->tail_chunk,
1301  f->ooo_enq->start_byte);
1302  }
1303  if (tmp != f->ooo_enq)
1304  return 0;
1305  }
1306 
1307  if (f->start_chunk->next)
1308  {
1309  svm_fifo_chunk_t *c, *prev = 0, *tmp;
1310  u32 chunks_bytes = 0;
1311 
1312  c = f->start_chunk;
1313  do
1314  {
1315  tmp = svm_fifo_find_chunk (f, c->start_byte);
1316  if (tmp != c)
1317  return 0;
1318  if (prev && (prev->start_byte + prev->length != c->start_byte))
1319  return 0;
1320 
1321  if (c->enq_rb_index != RBTREE_TNIL_INDEX)
1322  {
1323  tmp = f_find_chunk_rbtree (&f->ooo_enq_lookup, c->start_byte);
1324  if (tmp)
1325  {
1326  if (tmp != c)
1327  return 0;
1328  }
1329  }
1330  if (c->deq_rb_index != RBTREE_TNIL_INDEX)
1331  {
1332  tmp = f_find_chunk_rbtree (&f->ooo_deq_lookup, c->start_byte);
1333  if (tmp)
1334  {
1335  if (tmp != c)
1336  return 0;
1337  }
1338  }
1339 
1340  chunks_bytes += c->length;
1341  prev = c;
1342  c = c->next;
1343  }
1344  while (c);
1345 
1346  if (chunks_bytes < f->tail - f->head)
1347  return 0;
1348  }
1349 
1350  return 1;
1351 }
1352 
1353 u32
1355 {
1357  int n_chunks = 0;
1358 
1359  c = f->start_chunk;
1360  while (c)
1361  {
1362  n_chunks++;
1363  c = c->next;
1364  }
1365 
1366  return n_chunks;
1367 }
1368 
1369 u8 *
1370 format_ooo_segment (u8 * s, va_list * args)
1371 {
1372  svm_fifo_t __clib_unused *f = va_arg (*args, svm_fifo_t *);
1373  ooo_segment_t *seg = va_arg (*args, ooo_segment_t *);
1374  s = format (s, "[%u, %u], len %u, next %d, prev %d", seg->start,
1375  seg->start + seg->length, seg->length, seg->next, seg->prev);
1376  return s;
1377 }
1378 
1379 u8 *
1381 {
1382 #if SVM_FIFO_TRACE
1383  svm_fifo_trace_elem_t *seg = 0;
1384  int i = 0;
1385 
1386  if (f->trace)
1387  {
1388  vec_foreach (seg, f->trace)
1389  {
1390  s = format (s, "{%u, %u, %u}, ", seg->offset, seg->len, seg->action);
1391  i++;
1392  if (i % 5 == 0)
1393  s = format (s, "\n");
1394  }
1395  s = format (s, "\n");
1396  }
1397  return s;
1398 #else
1399  return 0;
1400 #endif
1401 }
1402 
1403 u8 *
1404 svm_fifo_replay (u8 * s, svm_fifo_t * f, u8 no_read, u8 verbose)
1405 {
1406  int i, trace_len;
1407  u8 *data = 0;
1409  u32 offset;
1410  svm_fifo_t *placeholder_fifo;
1411 
1412  if (!f)
1413  return s;
1414 
1415 #if SVM_FIFO_TRACE
1416  trace = f->trace;
1417  trace_len = vec_len (trace);
1418 #else
1419  trace = 0;
1420  trace_len = 0;
1421 #endif
1422 
1423  placeholder_fifo = svm_fifo_alloc (f->size);
1424  svm_fifo_init (f, f->size);
1425  clib_memset (f->head_chunk->data, 0xFF, f->size);
1426  vec_validate (data, f->size);
1427  for (i = 0; i < vec_len (data); i++)
1428  data[i] = i;
1429 
1430  for (i = 0; i < trace_len; i++)
1431  {
1432  offset = trace[i].offset;
1433  if (trace[i].action == 1)
1434  {
1435  if (verbose)
1436  s = format (s, "adding [%u, %u]:", trace[i].offset,
1437  (trace[i].offset + trace[i].len));
1438  svm_fifo_enqueue_with_offset (placeholder_fifo, trace[i].offset,
1439  trace[i].len, &data[offset]);
1440  }
1441  else if (trace[i].action == 2)
1442  {
1443  if (verbose)
1444  s = format (s, "adding [%u, %u]:", 0, trace[i].len);
1445  svm_fifo_enqueue (placeholder_fifo, trace[i].len, &data[offset]);
1446  }
1447  else if (!no_read)
1448  {
1449  if (verbose)
1450  s = format (s, "read: %u", trace[i].len);
1451  svm_fifo_dequeue_drop (placeholder_fifo, trace[i].len);
1452  }
1453  if (verbose)
1454  s = format (s, "%U", format_svm_fifo, placeholder_fifo, 1);
1455  }
1456 
1457  s = format (s, "result: %U", format_svm_fifo, placeholder_fifo, 1);
1458 
1459  return s;
1460 }
1461 
1462 u8 *
1463 format_ooo_list (u8 * s, va_list * args)
1464 {
1465  svm_fifo_t *f = va_arg (*args, svm_fifo_t *);
1466  u32 indent = va_arg (*args, u32);
1467  u32 ooo_segment_index = f->ooos_list_head;
1468  ooo_segment_t *seg;
1469 
1470  while (ooo_segment_index != OOO_SEGMENT_INVALID_INDEX)
1471  {
1472  seg = pool_elt_at_index (f->ooo_segments, ooo_segment_index);
1473  s = format (s, "%U%U\n", format_white_space, indent, format_ooo_segment,
1474  f, seg);
1475  ooo_segment_index = seg->next;
1476  }
1477 
1478  return s;
1479 }
1480 
1481 u8 *
1482 format_svm_fifo (u8 * s, va_list * args)
1483 {
1484  svm_fifo_t *f = va_arg (*args, svm_fifo_t *);
1485  int verbose = va_arg (*args, int);
1486  u32 indent;
1487 
1488  if (!s)
1489  return s;
1490 
1491  indent = format_get_indent (s);
1492  s = format (s, "cursize %u nitems %u has_event %d min_alloc %u\n",
1493  svm_fifo_max_dequeue (f), f->size, f->has_event, f->min_alloc);
1494  s = format (s, "%Uhead %u tail %u segment manager %u\n", format_white_space,
1495  indent, f->head, f->tail, f->segment_manager);
1496 
1497  if (verbose > 1)
1498  s = format (s, "%Uvpp session %d thread %d app session %d thread %d\n",
1499  format_white_space, indent, f->master_session_index,
1500  f->master_thread_index, f->client_session_index,
1501  f->client_thread_index);
1502 
1503  if (verbose)
1504  {
1505  s = format (s, "%Uooo pool %d active elts newest %u\n",
1506  format_white_space, indent, pool_elts (f->ooo_segments),
1507  f->ooos_newest);
1508  if (svm_fifo_has_ooo_data (f))
1509  s = format (s, " %U", format_ooo_list, f, indent, verbose);
1510  }
1511  return s;
1512 }
1513 
1514 #endif
1515 /*
1516  * fd.io coding-style-patch-verification: ON
1517  *
1518  * Local Variables:
1519  * eval: (c-set-style "gnu")
1520  * End:
1521  */
u32 length
length of chunk in bytes
Definition: fifo_types.h:32
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:509
static void svm_fifo_copy_to_chunk(svm_fifo_t *f, svm_fifo_chunk_t *c, u32 tail_idx, const u8 *src, u32 len, svm_fifo_chunk_t **last)
Definition: svm_fifo.c:89
rb_node_t * rb_tree_predecessor(rb_tree_t *rt, rb_node_t *x)
Definition: rbtree.c:287
static void f_load_head_tail_all_acq(svm_fifo_t *f, u32 *head, u32 *tail)
Load head and tail independent of producer/consumer role.
Definition: svm_fifo.h:107
static vlib_cli_command_t trace
(constructor) VLIB_CLI_COMMAND (trace)
Definition: vlib_api_cli.c:899
#define clib_min(x, y)
Definition: clib.h:327
static int ooo_segment_try_collect(svm_fifo_t *f, u32 n_bytes_enqueued, u32 *tail)
Removes segments that can now be enqueued because the fifo&#39;s tail has advanced.
Definition: svm_fifo.c:306
int svm_fifo_segments(svm_fifo_t *f, svm_fifo_seg_t *fs)
Definition: svm_fifo.c:1139
static u32 svm_fifo_max_enqueue_prod(svm_fifo_t *f)
Maximum number of bytes that can be enqueued into fifo.
Definition: svm_fifo.h:524
static ooo_segment_t * ooo_segment_next(svm_fifo_t *f, ooo_segment_t *s)
Definition: svm_fifo.c:125
static rb_node_t * rb_node_left(rb_tree_t *rt, rb_node_t *n)
Definition: rbtree.h:94
void svm_fifo_free_chunk_lookup(svm_fifo_t *f)
Cleanup fifo chunk lookup rb tree.
Definition: svm_fifo.c:751
static u8 svm_fifo_has_ooo_data(svm_fifo_t *f)
Check if fifo has out-of-order data.
Definition: svm_fifo.h:630
void svm_fifo_init_pointers(svm_fifo_t *f, u32 head, u32 tail)
Set fifo pointers to requested offset.
Definition: svm_fifo.c:1222
static u32 f_free_count(svm_fifo_t *f, u32 head, u32 tail)
Fifo free bytes, i.e., number of free bytes.
Definition: svm_fifo.h:132
void svm_fifo_segments_free(svm_fifo_t *f, svm_fifo_seg_t *fs)
Definition: svm_fifo.c:1171
void svm_fifo_free(svm_fifo_t *f)
Free fifo and associated state.
Definition: svm_fifo.c:758
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
u32 prev
Previous linked-list element pool index.
Definition: fifo_types.h:42
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
static void f_load_head_tail_cons(svm_fifo_t *f, u32 *head, u32 *tail)
Load head and tail optimized for consumer.
Definition: svm_fifo.h:80
static void svm_fifo_copy_from_chunk(svm_fifo_t *f, svm_fifo_chunk_t *c, u32 head_idx, u8 *dst, u32 len, svm_fifo_chunk_t **last)
Definition: svm_fifo.c:97
#define CLIB_MARCH_FN_SELECT(fn)
Definition: cpu.h:421
svm_fifo_chunk_t * fsh_alloc_chunk(fifo_segment_header_t *fsh, u32 slice_index, u32 chunk_size)
Allocate chunks in fifo segment.
Definition: fifo_segment.c:695
vl_api_address_t src
Definition: gre.api:54
static heap_elt_t * last(heap_header_t *h)
Definition: heap.c:53
int svm_fifo_peek(svm_fifo_t *f, u32 offset, u32 len, u8 *dst)
Peek data from fifo.
Definition: svm_fifo.c:1042
static svm_fifo_chunk_t * f_lookup_clear_deq_chunks(svm_fifo_t *f, svm_fifo_chunk_t *start, u32 end_pos)
Definition: svm_fifo.c:727
void svm_fifo_enqueue_nocopy(svm_fifo_t *f, u32 len)
Advance tail.
Definition: svm_fifo.c:931
void svm_fifo_init(svm_fifo_t *f, u32 size)
Initialize fifo.
Definition: svm_fifo.c:368
static rb_node_t * rb_node(rb_tree_t *rt, rb_node_index_t ri)
Definition: rbtree.h:82
static u32 format_get_indent(u8 *s)
Definition: format.h:72
#define RBTREE_TNIL_INDEX
Definition: rbtree.h:22
ooo_segment_t * svm_fifo_first_ooo_segment(svm_fifo_t *f)
First out-of-order segment for fifo.
Definition: svm_fifo.c:1213
void svm_fifo_dequeue_drop_all(svm_fifo_t *f)
Drop all data from fifo.
Definition: svm_fifo.c:1103
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
static rb_node_t * rb_node_right(rb_tree_t *rt, rb_node_t *n)
Definition: rbtree.h:88
u32 rb_tree_n_nodes(rb_tree_t *rt)
Definition: rbtree.c:470
void svm_fifo_overwrite_head(svm_fifo_t *f, u8 *src, u32 len)
Overwrite fifo head with new data.
Definition: svm_fifo.c:771
#define SVM_FIFO_INVALID_INDEX
Definition: svm_fifo.h:30
void rb_tree_free_nodes(rb_tree_t *rt)
Definition: rbtree.c:476
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:252
unsigned char u8
Definition: types.h:56
u8 data[128]
Definition: ipsec_types.api:89
int rb_tree_is_init(rb_tree_t *rt)
Definition: rbtree.c:496
static ooo_segment_t * ooo_segment_alloc(svm_fifo_t *f, u32 start, u32 length)
Definition: svm_fifo.c:133
static svm_fifo_chunk_t * svm_fifo_find_chunk(svm_fifo_t *f, u32 pos)
Find chunk for given byte position.
Definition: svm_fifo.c:481
void svm_fifo_clone(svm_fifo_t *df, svm_fifo_t *sf)
Clones fifo.
Definition: svm_fifo.c:1192
static u32 svm_fifo_max_dequeue(svm_fifo_t *f)
Fifo max bytes to dequeue.
Definition: svm_fifo.h:433
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:129
u32 svm_fifo_n_chunks(svm_fifo_t *f)
Number of chunks linked into the fifo.
Definition: svm_fifo.c:1354
svm_fifo_chunk_t * svm_fifo_chunk_alloc(u32 size)
Creates a fifo chunk in the current heap.
Definition: svm_fifo.c:455
u8 * format_ooo_list(u8 *s, va_list *args)
Definition: svm_fifo.c:1463
void svm_fifo_free_ooo_data(svm_fifo_t *f)
Cleanup fifo ooo data.
Definition: svm_fifo.c:111
static void ooo_segment_free(svm_fifo_t *f, u32 index)
Definition: svm_fifo.c:147
unsigned int u32
Definition: types.h:88
int svm_fifo_dequeue(svm_fifo_t *f, u32 len, u8 *dst)
Dequeue data from fifo.
Definition: svm_fifo.c:1007
u32 key
node key
Definition: rbtree.h:38
static svm_fifo_chunk_t * f_unlink_chunks(svm_fifo_t *f, u32 end_pos, u8 maybe_ooo)
Definition: svm_fifo.c:955
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:534
static int f_pos_gt(u32 a, u32 b)
Definition: svm_fifo.h:156
uword opaque
value stored by node
Definition: rbtree.h:39
static ooo_segment_t * ooo_segment_prev(svm_fifo_t *f, ooo_segment_t *s)
Definition: svm_fifo.c:117
struct svm_fifo_chunk_ * next
pointer to next chunk in linked-lists
Definition: fifo_types.h:33
void fsh_collect_chunks(fifo_segment_header_t *fsh, u32 slice_index, svm_fifo_chunk_t *c)
Return chunks to fifo segment.
Definition: fifo_segment.c:804
u32 size
Definition: vhost_user.h:106
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:302
int svm_fifo_enqueue(svm_fifo_t *f, u32 len, const u8 *src)
Enqueue data to fifo.
Definition: svm_fifo.c:836
#define SVM_FIFO_MAX_EVT_SUBSCRIBERS
Definition: fifo_types.h:25
#define PREDICT_FALSE(x)
Definition: clib.h:120
void rb_tree_init(rb_tree_t *rt)
Definition: rbtree.c:483
#define always_inline
Definition: ipsec.h:28
static u32 f_chunk_end(svm_fifo_chunk_t *c)
Definition: svm_fifo.h:138
#define svm_fifo_trace_add(_f, _s, _l, _t)
Definition: svm_fifo.h:68
vl_api_address_t dst
Definition: gre.api:55
u8 * svm_fifo_replay(u8 *s, svm_fifo_t *f, u8 no_read, u8 verbose)
Definition: svm_fifo.c:1404
CLIB_MARCH_FN(svm_fifo_copy_to_chunk, void, svm_fifo_t *f, svm_fifo_chunk_t *c, u32 tail_idx, const u8 *src, u32 len, svm_fifo_chunk_t **last)
Definition: svm_fifo.c:24
u8 len
Definition: ip_types.api:92
void svm_fifo_add_subscriber(svm_fifo_t *f, u8 subscriber)
Add io events subscriber to list.
Definition: svm_fifo.c:1238
#define pool_free(p)
Free a pool.
Definition: pool.h:427
svmdb_client_t * c
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
static u32 f_cursize(svm_fifo_t *f, u32 head, u32 tail)
Fifo current size, i.e., number of bytes enqueued.
Definition: svm_fifo.h:121
static void f_load_head_tail_prod(svm_fifo_t *f, u32 *head, u32 *tail)
Load head and tail optimized for producer.
Definition: svm_fifo.h:93
u32 start_byte
chunk start byte
Definition: fifo_types.h:31
u8 svm_fifo_is_sane(svm_fifo_t *f)
Check if fifo is sane.
Definition: svm_fifo.c:1261
static svm_fifo_chunk_t * svm_fifo_find_next_chunk(svm_fifo_t *f, svm_fifo_chunk_t *start, u32 pos)
Definition: svm_fifo.c:493
#define OOO_SEGMENT_INVALID_INDEX
Definition: svm_fifo.h:28
u8 * format_ooo_segment(u8 *s, va_list *args)
Definition: svm_fifo.c:1370
static svm_fifo_chunk_t * f_lookup_clear_enq_chunks(svm_fifo_t *f, svm_fifo_chunk_t *start, u32 end_pos)
Definition: svm_fifo.c:693
u32 svm_fifo_n_ooo_segments(svm_fifo_t *f)
Number of out-of-order segments for fifo.
Definition: svm_fifo.c:1207
static void * clib_mem_alloc_aligned_or_null(uword size, uword align)
Definition: mem.h:181
signed int i32
Definition: types.h:77
#define uword_to_pointer(u, type)
Definition: types.h:136
u8 * format_svm_fifo(u8 *s, va_list *args)
Definition: svm_fifo.c:1482
#define ASSERT(truth)
static u8 f_chunk_includes_pos(svm_fifo_chunk_t *c, u32 pos)
Definition: svm_fifo.h:168
static int f_pos_geq(u32 a, u32 b)
Definition: svm_fifo.h:162
rb_node_index_t deq_rb_index
deq node index if chunk in rbtree
Definition: fifo_types.h:35
static void clib_mem_free(void *p)
Definition: mem.h:215
u8 data[0]
start of chunk data
Definition: fifo_types.h:36
void svm_fifo_del_subscriber(svm_fifo_t *f, u8 subscriber)
Remove io events subscriber form list.
Definition: svm_fifo.c:1246
int svm_fifo_enqueue_with_offset(svm_fifo_t *f, u32 offset, u32 len, u8 *src)
Enqueue a future segment.
Definition: svm_fifo.c:894
static u32 ooo_segment_end_pos(ooo_segment_t *s)
Definition: svm_fifo.c:105
static void f_update_ooo_deq(svm_fifo_t *f, u32 start_pos, u32 end_pos)
Definition: svm_fifo.c:644
static uword pointer_to_uword(const void *p)
Definition: types.h:131
#define clib_atomic_store_rel_n(a, b)
Definition: atomics.h:49
#define clib_max(x, y)
Definition: clib.h:320
u32 length
Length of segment.
Definition: fifo_types.h:44
u8 * svm_fifo_dump_trace(u8 *s, svm_fifo_t *f)
Definition: svm_fifo.c:1380
u32 next
Next linked-list element pool index.
Definition: fifo_types.h:41
u32 svm_fifo_max_read_chunk(svm_fifo_t *f)
Max contiguous chunk of data that can be read.
Definition: svm_fifo.c:507
static int f_pos_leq(u32 a, u32 b)
Definition: svm_fifo.h:150
template key/value backing page structure
Definition: bihash_doc.h:44
int svm_fifo_fill_chunk_list(svm_fifo_t *f)
Ensure the whole fifo size is writeable.
Definition: svm_fifo.c:1123
static u8 rb_node_is_tnil(rb_tree_t *rt, rb_node_t *n)
Definition: rbtree.h:76
vl_api_mac_event_action_t action
Definition: l2.api:181
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
static uword max_log2(uword x)
Definition: clib.h:208
static rb_node_t * f_find_node_rbtree(rb_tree_t *rt, u32 pos)
Definition: svm_fifo.c:538
u32 index
Definition: flow_types.api:221
static svm_fifo_chunk_t * f_find_chunk_rbtree(rb_tree_t *rt, u32 pos)
Definition: svm_fifo.c:576
rb_node_index_t enq_rb_index
enq node index if chunk in rbtree
Definition: fifo_types.h:34
struct clib_bihash_value offset
template key/value backing page structure
static int f_try_chunk_alloc(svm_fifo_t *f, u32 head, u32 tail, u32 len)
Definition: svm_fifo.c:798
static __clib_unused ooo_segment_t * ooo_segment_last(svm_fifo_t *f)
Definition: svm_fifo.c:354
#define vec_foreach(var, vec)
Vector iterator.
save_rewrite_length must be aligned so that reass doesn t overwrite it
Definition: buffer.h:401
u32 svm_fifo_max_write_chunk(svm_fifo_t *f)
Max contiguous chunk of data that can be written.
Definition: svm_fifo.c:527
void rb_tree_del_node(rb_tree_t *rt, rb_node_t *z)
Definition: rbtree.c:445
rb_node_index_t rb_tree_add_custom(rb_tree_t *rt, u32 key, uword opaque, rb_tree_lt_fn ltfn)
Definition: rbtree.c:195
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
int svm_fifo_dequeue_drop(svm_fifo_t *f, u32 len)
Dequeue and drop bytes from fifo.
Definition: svm_fifo.c:1065
struct _svm_fifo svm_fifo_t
rb_node_index_t root
root index
Definition: rbtree.h:45
u32 start
Start of segment, normalized.
Definition: fifo_types.h:43
void svm_fifo_init_ooo_lookup(svm_fifo_t *f, u8 ooo_type)
Initialize rbtrees used for ooo lookups.
Definition: svm_fifo.c:401
static int f_pos_lt(u32 a, u32 b)
Definition: svm_fifo.h:144
static void ooo_segment_add(svm_fifo_t *f, u32 offset, u32 head, u32 tail, u32 length)
Add segment to fifo&#39;s out-of-order segment list.
Definition: svm_fifo.c:176
svm_fifo_t * svm_fifo_alloc(u32 data_size_in_bytes)
Creates a fifo in the current heap.
Definition: svm_fifo.c:419
static void f_update_ooo_enq(svm_fifo_t *f, u32 start_pos, u32 end_pos)
Definition: svm_fifo.c:595
static uword pool_elts(void *v)
Number of active elements in a pool.
Definition: pool.h:128