FD.io VPP  v20.05.1-5-g09f167997
Vector Packet Processing
svm_fifo.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016-2019 Cisco and/or its affiliates.
3  * Copyright (c) 2019 Arm Limited
4  * Copyright (c) 2010-2017 Intel Corporation and/or its affiliates.
5  * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
6  * Inspired from DPDK rte_ring.h (SPSC only) (derived from freebsd bufring.h).
7  * Licensed under the Apache License, Version 2.0 (the "License");
8  * you may not use this file except in compliance with the License.
9  * You may obtain a copy of the License at:
10  *
11  * http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an "AS IS" BASIS,
15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  */
19 
20 #include <svm/svm_fifo.h>
21 #include <svm/fifo_segment.h>
22 #include <vppinfra/cpu.h>
23 
25  svm_fifo_chunk_t * c, u32 tail_idx, const u8 * src, u32 len,
27 {
28  u32 n_chunk;
29 
30  ASSERT (f_pos_geq (tail_idx, c->start_byte)
31  && f_pos_lt (tail_idx, c->start_byte + c->length));
32 
33  tail_idx -= c->start_byte;
34  n_chunk = c->length - tail_idx;
35  if (n_chunk <= len)
36  {
37  u32 to_copy = len;
38  clib_memcpy_fast (&c->data[tail_idx], src, n_chunk);
39  c = c->next;
40  while ((to_copy -= n_chunk))
41  {
42  n_chunk = clib_min (c->length, to_copy);
43  clib_memcpy_fast (&c->data[0], src + (len - to_copy), n_chunk);
44  c = c->length <= to_copy ? c->next : c;
45  }
46  if (*last)
47  *last = c;
48  }
49  else
50  {
51  clib_memcpy_fast (&c->data[tail_idx], src, len);
52  }
53 }
54 
56  svm_fifo_chunk_t * c, u32 head_idx, u8 * dst, u32 len,
58 {
59  u32 n_chunk;
60 
61  ASSERT (f_pos_geq (head_idx, c->start_byte)
62  && f_pos_lt (head_idx, c->start_byte + c->length));
63 
64  head_idx -= c->start_byte;
65  n_chunk = c->length - head_idx;
66  if (n_chunk <= len)
67  {
68  u32 to_copy = len;
69  clib_memcpy_fast (dst, &c->data[head_idx], n_chunk);
70  c = c->next;
71  while ((to_copy -= n_chunk))
72  {
73  n_chunk = clib_min (c->length, to_copy);
74  clib_memcpy_fast (dst + (len - to_copy), &c->data[0], n_chunk);
75  c = c->length <= to_copy ? c->next : c;
76  }
77  if (*last)
78  *last = c;
79  }
80  else
81  {
82  clib_memcpy_fast (dst, &c->data[head_idx], len);
83  }
84 }
85 
86 #ifndef CLIB_MARCH_VARIANT
87 
88 static inline void
90  const u8 * src, u32 len, svm_fifo_chunk_t ** last)
91 {
93  last);
94 }
95 
96 static inline void
99 {
101  last);
102 }
103 
104 static inline u32
106 {
107  return (s->start + s->length);
108 }
109 
110 void
112 {
113  pool_free (f->ooo_segments);
114 }
115 
116 static inline ooo_segment_t *
118 {
120  return 0;
121  return pool_elt_at_index (f->ooo_segments, s->prev);
122 }
123 
124 static inline ooo_segment_t *
126 {
128  return 0;
129  return pool_elt_at_index (f->ooo_segments, s->next);
130 }
131 
132 static inline ooo_segment_t *
133 ooo_segment_alloc (svm_fifo_t * f, u32 start, u32 length)
134 {
135  ooo_segment_t *s;
136 
137  pool_get (f->ooo_segments, s);
138 
139  s->start = start;
140  s->length = length;
142 
143  return s;
144 }
145 
146 static inline void
148 {
149  ooo_segment_t *cur, *prev = 0, *next = 0;
150  cur = pool_elt_at_index (f->ooo_segments, index);
151 
152  if (cur->next != OOO_SEGMENT_INVALID_INDEX)
153  {
154  next = pool_elt_at_index (f->ooo_segments, cur->next);
155  next->prev = cur->prev;
156  }
157 
158  if (cur->prev != OOO_SEGMENT_INVALID_INDEX)
159  {
160  prev = pool_elt_at_index (f->ooo_segments, cur->prev);
161  prev->next = cur->next;
162  }
163  else
164  {
165  f->ooos_list_head = cur->next;
166  }
167 
168  pool_put (f->ooo_segments, cur);
169 }
170 
171 /**
172  * Add segment to fifo's out-of-order segment list. Takes care of merging
173  * adjacent segments and removing overlapping ones.
174  */
175 static void
176 ooo_segment_add (svm_fifo_t * f, u32 offset, u32 head, u32 tail, u32 length)
177 {
178  ooo_segment_t *s, *new_s, *prev, *next, *it;
179  u32 new_index, s_end_pos, s_index;
180  u32 offset_pos, offset_end_pos;
181 
182  ASSERT (offset + length <= f_free_count (f, head, tail));
183 
184  offset_pos = tail + offset;
185  offset_end_pos = tail + offset + length;
186 
187  f->ooos_newest = OOO_SEGMENT_INVALID_INDEX;
188 
189  if (f->ooos_list_head == OOO_SEGMENT_INVALID_INDEX)
190  {
191  s = ooo_segment_alloc (f, offset_pos, length);
192  f->ooos_list_head = s - f->ooo_segments;
193  f->ooos_newest = f->ooos_list_head;
194  return;
195  }
196 
197  /* Find first segment that starts after new segment */
198  s = pool_elt_at_index (f->ooo_segments, f->ooos_list_head);
199  while (s->next != OOO_SEGMENT_INVALID_INDEX
200  && f_pos_lt (s->start, offset_pos))
201  s = pool_elt_at_index (f->ooo_segments, s->next);
202 
203  /* If we have a previous and we overlap it, use it as starting point */
204  prev = ooo_segment_prev (f, s);
205  if (prev && f_pos_leq (offset_pos, ooo_segment_end_pos (prev)))
206  {
207  s = prev;
208  s_end_pos = ooo_segment_end_pos (s);
209 
210  /* Since we have previous, offset start position cannot be smaller
211  * than prev->start. Check tail */
212  ASSERT (f_pos_lt (s->start, offset_pos));
213  goto check_tail;
214  }
215 
216  s_index = s - f->ooo_segments;
217  s_end_pos = ooo_segment_end_pos (s);
218 
219  /* No overlap, add before current segment */
220  if (f_pos_lt (offset_end_pos, s->start))
221  {
222  new_s = ooo_segment_alloc (f, offset_pos, length);
223  new_index = new_s - f->ooo_segments;
224 
225  /* Pool might've moved, get segment again */
226  s = pool_elt_at_index (f->ooo_segments, s_index);
228  {
229  new_s->prev = s->prev;
230  prev = pool_elt_at_index (f->ooo_segments, new_s->prev);
231  prev->next = new_index;
232  }
233  else
234  {
235  /* New head */
236  f->ooos_list_head = new_index;
237  }
238 
239  new_s->next = s_index;
240  s->prev = new_index;
241  f->ooos_newest = new_index;
242  return;
243  }
244  /* No overlap, add after current segment */
245  else if (f_pos_gt (offset_pos, s_end_pos))
246  {
247  new_s = ooo_segment_alloc (f, offset_pos, length);
248  new_index = new_s - f->ooo_segments;
249 
250  /* Pool might've moved, get segment again */
251  s = pool_elt_at_index (f->ooo_segments, s_index);
252 
253  /* Needs to be last */
255 
256  new_s->prev = s_index;
257  s->next = new_index;
258  f->ooos_newest = new_index;
259 
260  return;
261  }
262 
263  /*
264  * Merge needed
265  */
266 
267  /* Merge at head */
268  if (f_pos_lt (offset_pos, s->start))
269  {
270  s->start = offset_pos;
271  s->length = s_end_pos - s->start;
272  f->ooos_newest = s - f->ooo_segments;
273  }
274 
275 check_tail:
276 
277  /* Overlapping tail */
278  if (f_pos_gt (offset_end_pos, s_end_pos))
279  {
280  s->length = offset_end_pos - s->start;
281 
282  /* Remove the completely overlapped segments in the tail */
283  it = ooo_segment_next (f, s);
284  while (it && f_pos_leq (ooo_segment_end_pos (it), offset_end_pos))
285  {
286  next = ooo_segment_next (f, it);
287  ooo_segment_free (f, it - f->ooo_segments);
288  it = next;
289  }
290 
291  /* If partial overlap with last, merge */
292  if (it && f_pos_leq (it->start, offset_end_pos))
293  {
294  s->length = ooo_segment_end_pos (it) - s->start;
295  ooo_segment_free (f, it - f->ooo_segments);
296  }
297  f->ooos_newest = s - f->ooo_segments;
298  }
299 }
300 
301 /**
302  * Removes segments that can now be enqueued because the fifo's tail has
303  * advanced. Returns the number of bytes added to tail.
304  */
305 static int
306 ooo_segment_try_collect (svm_fifo_t * f, u32 n_bytes_enqueued, u32 * tail)
307 {
308  u32 s_index, bytes = 0;
309  ooo_segment_t *s;
310  i32 diff;
311 
312  s = pool_elt_at_index (f->ooo_segments, f->ooos_list_head);
313  diff = *tail - s->start;
314 
315  ASSERT (diff != n_bytes_enqueued);
316 
317  if (diff > n_bytes_enqueued)
318  return 0;
319 
320  /* If last tail update overlaps one/multiple ooo segments, remove them */
321  while (0 <= diff && diff < n_bytes_enqueued)
322  {
323  s_index = s - f->ooo_segments;
324 
325  /* Segment end is beyond the tail. Advance tail and remove segment */
326  if (s->length > diff)
327  {
328  bytes = s->length - diff;
329  *tail = *tail + bytes;
330  ooo_segment_free (f, s_index);
331  break;
332  }
333 
334  /* If we have next go on */
336  {
337  s = pool_elt_at_index (f->ooo_segments, s->next);
338  diff = *tail - s->start;
339  ooo_segment_free (f, s_index);
340  }
341  /* End of search */
342  else
343  {
344  ooo_segment_free (f, s_index);
345  break;
346  }
347  }
348 
349  ASSERT (bytes <= f->size);
350  return bytes;
351 }
352 
353 __clib_unused static ooo_segment_t *
355 {
356  ooo_segment_t *s;
357 
358  if (f->ooos_list_head == OOO_SEGMENT_INVALID_INDEX)
359  return 0;
360 
362  while (s->next != OOO_SEGMENT_INVALID_INDEX)
363  s = pool_elt_at_index (f->ooo_segments, s->next);
364  return s;
365 }
366 
367 void
369 {
370  svm_fifo_chunk_t *c, *prev;
371  u32 min_alloc;
372 
373  f->size = size;
374  f->ooos_list_head = OOO_SEGMENT_INVALID_INDEX;
375  f->segment_index = SVM_FIFO_INVALID_INDEX;
376  f->refcnt = 1;
377  f->head = f->tail = f->flags = 0;
378  f->head_chunk = f->tail_chunk = f->start_chunk;
379  f->ooo_deq = f->ooo_enq = 0;
380 
381  min_alloc = size > 32 << 10 ? size >> 3 : 4096;
382  min_alloc = clib_min (min_alloc, 64 << 10);
383  f->min_alloc = min_alloc;
384 
385  /*
386  * Initialize chunks
387  */
388  f->start_chunk->start_byte = 0;
389  prev = f->start_chunk;
390  c = prev->next;
391 
392  while (c)
393  {
394  c->start_byte = prev->start_byte + prev->length;
395  prev = c;
396  c = c->next;
397  }
398 }
399 
400 void
402 {
403  if (ooo_type == 0)
404  {
405  ASSERT (!rb_tree_is_init (&f->ooo_enq_lookup));
406  rb_tree_init (&f->ooo_enq_lookup);
407  }
408  else
409  {
410  ASSERT (!rb_tree_is_init (&f->ooo_deq_lookup));
411  rb_tree_init (&f->ooo_deq_lookup);
412  }
413 }
414 
415 /**
416  * Creates a fifo in the current heap. Fails vs blow up the process
417  */
418 svm_fifo_t *
419 svm_fifo_alloc (u32 data_size_in_bytes)
420 {
421  u32 rounded_data_size;
423  svm_fifo_t *f;
424 
426  if (f == 0)
427  return 0;
428 
429  clib_memset (f, 0, sizeof (*f));
430 
431  /* always round fifo data size to the next highest power-of-two */
432  rounded_data_size = (1 << (max_log2 (data_size_in_bytes)));
433  c = clib_mem_alloc_aligned_or_null (sizeof (*c) + rounded_data_size,
435  if (!c)
436  {
437  clib_mem_free (f);
438  return 0;
439  }
440 
441  clib_memset (c, 0, sizeof (*c));
442  c->start_byte = 0;
443  c->length = data_size_in_bytes;
446  f->start_chunk = f->end_chunk = c;
447 
448  return f;
449 }
450 
451 /**
452  * Creates a fifo chunk in the current heap
453  */
456 {
458  u32 rounded_size;
459 
460  /* round chunk size to the next highest power-of-two */
461  rounded_size = (1 << (max_log2 (size)));
462  c = clib_mem_alloc_aligned_or_null (sizeof (*c) + rounded_size,
464  if (c == 0)
465  return 0;
466 
467  clib_memset (c, 0, sizeof (*c));
468  c->length = rounded_size;
469  return c;
470 }
471 
472 /**
473  * Find chunk for given byte position
474  *
475  * @param f fifo
476  * @param pos normalized position in fifo
477  *
478  * @return chunk that includes given position or 0
479  */
480 static svm_fifo_chunk_t *
482 {
484 
485  c = f->start_chunk;
486  while (c && !f_chunk_includes_pos (c, pos))
487  c = c->next;
488 
489  return c;
490 }
491 
492 static svm_fifo_chunk_t *
494 {
496 
497  ASSERT (start != 0);
498 
499  c = start;
500  while (c && !f_chunk_includes_pos (c, pos))
501  c = c->next;
502 
503  return c;
504 }
505 
506 u32
508 {
509  u32 head, tail, end_chunk;
510 
511  f_load_head_tail_cons (f, &head, &tail);
512  ASSERT (!f->head_chunk || f_chunk_includes_pos (f->head_chunk, head));
513 
514  if (!f->head_chunk)
515  {
516  f->head_chunk = svm_fifo_find_chunk (f, head);
517  if (PREDICT_FALSE (!f->head_chunk))
518  return 0;
519  }
520 
521  end_chunk = f_chunk_end (f->head_chunk);
522 
523  return f_pos_lt (end_chunk, tail) ? end_chunk - head : tail - head;
524 }
525 
526 u32
528 {
529  u32 head, tail;
530 
531  f_load_head_tail_prod (f, &head, &tail);
532  ASSERT (!f->tail_chunk || f_chunk_includes_pos (f->tail_chunk, tail));
533 
534  return f->tail_chunk ? f_chunk_end (f->tail_chunk) - tail : 0;
535 }
536 
537 static rb_node_t *
539 {
540  rb_node_t *cur, *prev;
541 
542  cur = rb_node (rt, rt->root);
543  if (PREDICT_FALSE (rb_node_is_tnil (rt, cur)))
544  return 0;
545 
546  while (pos != cur->key)
547  {
548  prev = cur;
549  if (f_pos_lt (pos, cur->key))
550  {
551  cur = rb_node_left (rt, cur);
552  if (rb_node_is_tnil (rt, cur))
553  {
554  cur = rb_tree_predecessor (rt, prev);
555  break;
556  }
557  }
558  else
559  {
560  cur = rb_node_right (rt, cur);
561  if (rb_node_is_tnil (rt, cur))
562  {
563  cur = prev;
564  break;
565  }
566  }
567  }
568 
569  if (rb_node_is_tnil (rt, cur))
570  return 0;
571 
572  return cur;
573 }
574 
575 static svm_fifo_chunk_t *
577 {
579  rb_node_t *n;
580 
581  if (!rb_tree_is_init (rt))
582  return 0;
583 
584  n = f_find_node_rbtree (rt, pos);
585  if (!n)
586  return 0;
588  if (f_chunk_includes_pos (c, pos))
589  return c;
590 
591  return 0;
592 }
593 
594 static void
595 f_update_ooo_enq (svm_fifo_t * f, u32 start_pos, u32 end_pos)
596 {
597  rb_tree_t *rt = &f->ooo_enq_lookup;
599  rb_node_t *cur;
600 
601  /* Use linear search if rbtree is not initialized */
602  if (PREDICT_FALSE (!rb_tree_is_init (rt)))
603  {
604  f->ooo_enq = svm_fifo_find_next_chunk (f, f->tail_chunk, start_pos);
605  return;
606  }
607 
608  if (rt->root == RBTREE_TNIL_INDEX)
609  {
610  c = f->tail_chunk;
614  }
615  else
616  {
617  cur = f_find_node_rbtree (rt, start_pos);
619  ASSERT (f_pos_leq (c->start_byte, start_pos));
620  }
621 
622  if (f_chunk_includes_pos (c, start_pos))
623  f->ooo_enq = c;
624 
625  if (f_chunk_includes_pos (c, end_pos))
626  return;
627 
628  do
629  {
630  c = c->next;
631  if (!c || c->enq_rb_index != RBTREE_TNIL_INDEX)
632  break;
633 
636 
637  if (f_chunk_includes_pos (c, start_pos))
638  f->ooo_enq = c;
639  }
640  while (!f_chunk_includes_pos (c, end_pos));
641 }
642 
643 static void
644 f_update_ooo_deq (svm_fifo_t * f, u32 start_pos, u32 end_pos)
645 {
646  rb_tree_t *rt = &f->ooo_deq_lookup;
647  rb_node_t *cur;
649 
650  /* Use linear search if rbtree is not initialized */
651  if (PREDICT_FALSE (!rb_tree_is_init (rt)))
652  {
653  f->ooo_deq = svm_fifo_find_chunk (f, start_pos);
654  return;
655  }
656 
657  if (rt->root == RBTREE_TNIL_INDEX)
658  {
659  c = f->start_chunk;
663  }
664  else
665  {
666  cur = f_find_node_rbtree (rt, start_pos);
668  ASSERT (f_pos_leq (c->start_byte, start_pos));
669  }
670 
671  if (f_chunk_includes_pos (c, start_pos))
672  f->ooo_deq = c;
673 
674  if (f_chunk_includes_pos (c, end_pos))
675  return;
676 
677  do
678  {
679  c = c->next;
680  if (!c || c->deq_rb_index != RBTREE_TNIL_INDEX)
681  break;
682 
685 
686  if (f_chunk_includes_pos (c, start_pos))
687  f->ooo_deq = c;
688  }
689  while (!f_chunk_includes_pos (c, end_pos));
690 }
691 
692 static svm_fifo_chunk_t *
694  u32 end_pos)
695 {
696  rb_tree_t *rt = &f->ooo_enq_lookup;
698  rb_node_t *n;
699 
700  c = start;
701  while (c && !f_chunk_includes_pos (c, end_pos))
702  {
704  {
705  n = rb_node (rt, c->enq_rb_index);
706  rb_tree_del_node (rt, n);
708  }
709 
710  c = c->next;
711  }
712 
713  /* No ooo segments left, so make sure the current chunk
714  * is not tracked in the enq rbtree */
715  if (f->ooos_list_head == OOO_SEGMENT_INVALID_INDEX
716  && c && c->enq_rb_index != RBTREE_TNIL_INDEX)
717  {
718  n = rb_node (rt, c->enq_rb_index);
719  rb_tree_del_node (rt, n);
721  }
722 
723  return c;
724 }
725 
726 static svm_fifo_chunk_t *
728  u32 end_pos)
729 {
730  rb_tree_t *rt = &f->ooo_deq_lookup;
732  rb_node_t *n;
733 
734  c = start;
735  while (c && !f_chunk_includes_pos (c, end_pos))
736  {
738  {
739  n = rb_node (rt, c->deq_rb_index);
740  rb_tree_del_node (rt, n);
742  }
743 
744  c = c->next;
745  }
746 
747  return c;
748 }
749 
750 void
752 {
753  rb_tree_free_nodes (&f->ooo_enq_lookup);
754  rb_tree_free_nodes (&f->ooo_deq_lookup);
755 }
756 
757 void
759 {
760  ASSERT (f->refcnt > 0);
761 
762  if (--f->refcnt == 0)
763  {
764  /* ooo data is not allocated on segment heap */
766  clib_mem_free (f);
767  }
768 }
769 
770 void
772 {
773  u32 n_chunk;
774  u32 head, tail, head_idx;
776 
777  ASSERT (len <= f->size);
778 
779  f_load_head_tail_cons (f, &head, &tail);
780 
781  if (!f->head_chunk)
782  f->head_chunk = svm_fifo_find_chunk (f, head);
783 
784  c = f->head_chunk;
785  head_idx = head - c->start_byte;
786  n_chunk = c->length - head_idx;
787  if (len <= n_chunk)
788  clib_memcpy_fast (&c->data[head_idx], src, len);
789  else
790  {
791  ASSERT (len - n_chunk <= c->next->length);
792  clib_memcpy_fast (&c->data[head_idx], src, n_chunk);
793  clib_memcpy_fast (&c->next->data[0], src + n_chunk, len - n_chunk);
794  }
795 }
796 
797 static int
799 {
800  svm_fifo_chunk_t *c, *cur, *prev;
801  u32 alloc_size, free_alloced;
802 
803  free_alloced = f_chunk_end (f->end_chunk) - tail;
804 
805  alloc_size = clib_min (f->min_alloc, f->size - (tail - head));
806  alloc_size = clib_max (alloc_size, len - free_alloced);
807 
808  c = fsh_alloc_chunk (f->fs_hdr, f->slice_index, alloc_size);
809  if (PREDICT_FALSE (!c))
810  return -1;
811 
812  cur = c;
813  prev = f->end_chunk;
814 
815  while (cur)
816  {
817  cur->start_byte = prev->start_byte + prev->length;
820 
821  prev = cur;
822  cur = cur->next;
823  }
824 
825  prev->next = 0;
826  f->end_chunk->next = c;
827  f->end_chunk = prev;
828 
829  if (!f->tail_chunk)
830  f->tail_chunk = c;
831 
832  return 0;
833 }
834 
835 int
837 {
838  u32 tail, head, free_count;
839  svm_fifo_chunk_t *old_tail_c;
840 
841  f->ooos_newest = OOO_SEGMENT_INVALID_INDEX;
842 
843  f_load_head_tail_prod (f, &head, &tail);
844 
845  /* free space in fifo can only increase during enqueue: SPSC */
846  free_count = f_free_count (f, head, tail);
847 
848  if (PREDICT_FALSE (free_count == 0))
849  return SVM_FIFO_EFULL;
850 
851  /* number of bytes we're going to copy */
852  len = clib_min (free_count, len);
853 
854  if (f_pos_gt (tail + len, f_chunk_end (f->end_chunk)))
855  {
856  if (PREDICT_FALSE (f_try_chunk_alloc (f, head, tail, len)))
857  {
858  len = f_chunk_end (f->end_chunk) - tail;
859  if (!len)
860  return SVM_FIFO_EGROW;
861  }
862  }
863 
864  old_tail_c = f->tail_chunk;
865 
866  svm_fifo_copy_to_chunk (f, f->tail_chunk, tail, src, len, &f->tail_chunk);
867  tail = tail + len;
868 
869  svm_fifo_trace_add (f, head, len, 2);
870 
871  /* collect out-of-order segments */
872  if (PREDICT_FALSE (f->ooos_list_head != OOO_SEGMENT_INVALID_INDEX))
873  {
874  len += ooo_segment_try_collect (f, len, &tail);
875  /* Tail chunk might've changed even if nothing was collected */
876  f->tail_chunk = f_lookup_clear_enq_chunks (f, old_tail_c, tail);
877  f->ooo_enq = 0;
878  }
879 
880  /* store-rel: producer owned index (paired with load-acq in consumer) */
881  clib_atomic_store_rel_n (&f->tail, tail);
882 
883  return len;
884 }
885 
886 /**
887  * Enqueue a future segment.
888  *
889  * Two choices: either copies the entire segment, or copies nothing
890  * Returns 0 of the entire segment was copied
891  * Returns -1 if none of the segment was copied due to lack of space
892  */
893 int
895 {
896  u32 tail, head, free_count, enq_pos;
897 
898  f_load_head_tail_prod (f, &head, &tail);
899 
900  /* free space in fifo can only increase during enqueue: SPSC */
901  free_count = f_free_count (f, head, tail);
902  f->ooos_newest = OOO_SEGMENT_INVALID_INDEX;
903 
904  /* will this request fit? */
905  if ((len + offset) > free_count)
906  return SVM_FIFO_EFULL;
907 
908  enq_pos = tail + offset;
909 
910  if (f_pos_gt (enq_pos + len, f_chunk_end (f->end_chunk)))
911  {
912  if (PREDICT_FALSE (f_try_chunk_alloc (f, head, tail, offset + len)))
913  return SVM_FIFO_EGROW;
914  }
915 
916  svm_fifo_trace_add (f, offset, len, 1);
917  ooo_segment_add (f, offset, head, tail, len);
918 
919  if (!f->ooo_enq || !f_chunk_includes_pos (f->ooo_enq, enq_pos))
920  f_update_ooo_enq (f, enq_pos, enq_pos + len);
921 
922  svm_fifo_copy_to_chunk (f, f->ooo_enq, enq_pos, src, len, &f->ooo_enq);
923 
924  return 0;
925 }
926 
927 /**
928  * Advance tail
929  */
930 void
932 {
933  u32 tail;
934 
935  ASSERT (len <= svm_fifo_max_enqueue_prod (f));
936  /* load-relaxed: producer owned index */
937  tail = f->tail;
938  tail = tail + len;
939 
940  if (rb_tree_is_init (&f->ooo_enq_lookup))
941  {
942  f->tail_chunk = f_lookup_clear_enq_chunks (f, f->tail_chunk, tail);
943  f->ooo_enq = 0;
944  }
945  else
946  {
947  f->tail_chunk = svm_fifo_find_next_chunk (f, f->tail_chunk, tail);
948  }
949 
950  /* store-rel: producer owned index (paired with load-acq in consumer) */
951  clib_atomic_store_rel_n (&f->tail, tail);
952 }
953 
955 f_unlink_chunks (svm_fifo_t * f, u32 end_pos, u8 maybe_ooo)
956 {
957  svm_fifo_chunk_t *start, *prev = 0, *c;
958  rb_tree_t *rt;
959  rb_node_t *n;
960 
961  ASSERT (!f_chunk_includes_pos (f->start_chunk, end_pos));
962 
963  if (maybe_ooo)
964  rt = &f->ooo_deq_lookup;
965 
966  c = f->start_chunk;
967 
968  do
969  {
970  if (maybe_ooo && c->deq_rb_index != RBTREE_TNIL_INDEX)
971  {
972  n = rb_node (rt, c->deq_rb_index);
973  ASSERT (n == f_find_node_rbtree (rt, c->start_byte));
974  rb_tree_del_node (rt, n);
975  c->deq_rb_index = RBTREE_TNIL_INDEX;
976  }
977  if (!c->next)
978  break;
979  prev = c;
980  c = c->next;
981  }
982  while (!f_chunk_includes_pos (c, end_pos));
983 
984  if (maybe_ooo)
985  {
986  if (f->ooo_deq && f_pos_lt (f->ooo_deq->start_byte, f_chunk_end (c)))
987  f->ooo_deq = 0;
988  }
989  else
990  {
991  if (PREDICT_FALSE (f->ooo_deq != 0))
992  f->ooo_deq = 0;
993  }
994 
995  /* Avoid unlinking the last chunk */
996  if (!prev)
997  return 0;
998 
999  prev->next = 0;
1000  start = f->start_chunk;
1001  f->start_chunk = c;
1002 
1003  return start;
1004 }
1005 
1006 int
1008 {
1009  u32 tail, head, cursize;
1010 
1011  f_load_head_tail_cons (f, &head, &tail);
1012 
1013  /* current size of fifo can only increase during dequeue: SPSC */
1014  cursize = f_cursize (f, head, tail);
1015 
1016  if (PREDICT_FALSE (cursize == 0))
1017  return SVM_FIFO_EEMPTY;
1018 
1019  len = clib_min (cursize, len);
1020 
1021  if (!f->head_chunk)
1022  f->head_chunk = svm_fifo_find_chunk (f, head);
1023 
1024  svm_fifo_copy_from_chunk (f, f->head_chunk, head, dst, len, &f->head_chunk);
1025  head = head + len;
1026 
1027  if (f_pos_geq (head, f_chunk_end (f->start_chunk)))
1028  fsh_collect_chunks (f->fs_hdr, f->slice_index,
1029  f_unlink_chunks (f, head, 0));
1030 
1031  /* store-rel: consumer owned index (paired with load-acq in producer) */
1032  clib_atomic_store_rel_n (&f->head, head);
1033 
1034  return len;
1035 }
1036 
1037 int
1039 {
1040  u32 tail, head, cursize, head_idx;
1041 
1042  f_load_head_tail_cons (f, &head, &tail);
1043 
1044  /* current size of fifo can only increase during peek: SPSC */
1045  cursize = f_cursize (f, head, tail);
1046 
1047  if (PREDICT_FALSE (cursize < offset))
1048  return SVM_FIFO_EEMPTY;
1049 
1050  len = clib_min (cursize - offset, len);
1051  head_idx = head + offset;
1052 
1053  if (!f->ooo_deq || !f_chunk_includes_pos (f->ooo_deq, head_idx))
1054  f_update_ooo_deq (f, head_idx, head_idx + len);
1055 
1056  svm_fifo_copy_from_chunk (f, f->ooo_deq, head_idx, dst, len, &f->ooo_deq);
1057  return len;
1058 }
1059 
1060 int
1062 {
1063  u32 total_drop_bytes, tail, head, cursize;
1064 
1065  f_load_head_tail_cons (f, &head, &tail);
1066 
1067  /* number of bytes available */
1068  cursize = f_cursize (f, head, tail);
1069  if (PREDICT_FALSE (cursize == 0))
1070  return SVM_FIFO_EEMPTY;
1071 
1072  /* number of bytes we're going to drop */
1073  total_drop_bytes = clib_min (cursize, len);
1074 
1075  svm_fifo_trace_add (f, tail, total_drop_bytes, 3);
1076 
1077  /* move head */
1078  head = head + total_drop_bytes;
1079 
1080  if (f_pos_geq (head, f_chunk_end (f->start_chunk)))
1081  {
1082  fsh_collect_chunks (f->fs_hdr, f->slice_index,
1083  f_unlink_chunks (f, head, 1));
1084  f->head_chunk =
1085  f_chunk_includes_pos (f->start_chunk, head) ? f->start_chunk : 0;
1086  }
1087 
1088  /* store-rel: consumer owned index (paired with load-acq in producer) */
1089  clib_atomic_store_rel_n (&f->head, head);
1090 
1091  return total_drop_bytes;
1092 }
1093 
1094 /**
1095  * Drop all data from fifo
1096  *
1097  */
1098 void
1100 {
1101  u32 head, tail;
1102 
1103  f_load_head_tail_all_acq (f, &head, &tail);
1104 
1105  if (!f->head_chunk || !f_chunk_includes_pos (f->head_chunk, head))
1106  f->head_chunk = svm_fifo_find_chunk (f, head);
1107 
1108  f->head_chunk = f_lookup_clear_deq_chunks (f, f->head_chunk, tail);
1109 
1110  if (f_pos_geq (tail, f_chunk_end (f->start_chunk)))
1111  fsh_collect_chunks (f->fs_hdr, f->slice_index,
1112  f_unlink_chunks (f, tail, 0));
1113 
1114  /* store-rel: consumer owned index (paired with load-acq in producer) */
1115  clib_atomic_store_rel_n (&f->head, tail);
1116 }
1117 
1118 int
1120 {
1121  u32 head, tail;
1122 
1123  f_load_head_tail_prod (f, &head, &tail);
1124 
1125  if (f_chunk_end (f->end_chunk) - head >= f->size)
1126  return 0;
1127 
1128  if (f_try_chunk_alloc (f, head, tail, f->size - (tail - head)))
1129  return SVM_FIFO_EGROW;
1130 
1131  return 0;
1132 }
1133 
1134 int
1136 {
1137  u32 cursize, head, tail, head_idx;
1138 
1139  f_load_head_tail_cons (f, &head, &tail);
1140 
1141  /* consumer function, cursize can only increase while we're working */
1142  cursize = f_cursize (f, head, tail);
1143 
1144  if (PREDICT_FALSE (cursize == 0))
1145  return SVM_FIFO_EEMPTY;
1146 
1147  head_idx = head;
1148 
1149  if (tail < head)
1150  {
1151  fs[0].len = f->size - head_idx;
1152  fs[0].data = f->head_chunk->data + head_idx;
1153  fs[1].len = cursize - fs[0].len;
1154  fs[1].data = f->head_chunk->data;
1155  }
1156  else
1157  {
1158  fs[0].len = cursize;
1159  fs[0].data = f->head_chunk->data + head_idx;
1160  fs[1].len = 0;
1161  fs[1].data = 0;
1162  }
1163  return cursize;
1164 }
1165 
1166 void
1168 {
1169  u32 head;
1170 
1171  /* consumer owned index */
1172  head = f->head;
1173 
1174  ASSERT (fs[0].data == f->head_chunk->data + head);
1175  head = (head + fs[0].len + fs[1].len);
1176  /* store-rel: consumer owned index (paired with load-acq in producer) */
1177  clib_atomic_store_rel_n (&f->head, head);
1178 }
1179 
1180 /**
1181  * Clones fifo
1182  *
1183  * Assumptions:
1184  * - no prod and cons are accessing either dest or src fifo
1185  * - fifo is not multi chunk
1186  */
1187 void
1189 {
1190  u32 head, tail;
1191 
1192  /* Support only single chunk clones for now */
1193  ASSERT (svm_fifo_n_chunks (sf) == 1);
1194 
1195  clib_memcpy_fast (df->head_chunk->data, sf->head_chunk->data, sf->size);
1196 
1197  f_load_head_tail_all_acq (sf, &head, &tail);
1198  clib_atomic_store_rel_n (&df->head, head);
1199  clib_atomic_store_rel_n (&df->tail, tail);
1200 }
1201 
1202 u32
1204 {
1205  return pool_elts (f->ooo_segments);
1206 }
1207 
1208 ooo_segment_t *
1210 {
1211  return pool_elt_at_index (f->ooo_segments, f->ooos_list_head);
1212 }
1213 
1214 /**
1215  * Set fifo pointers to requested offset
1216  */
1217 void
1219 {
1221 
1222  clib_atomic_store_rel_n (&f->head, head);
1223  clib_atomic_store_rel_n (&f->tail, tail);
1224 
1225  c = svm_fifo_find_chunk (f, head);
1226  ASSERT (c != 0);
1227  f->head_chunk = f->ooo_deq = c;
1228  c = svm_fifo_find_chunk (f, tail);
1229  ASSERT (c != 0);
1230  f->tail_chunk = f->ooo_enq = c;
1231 }
1232 
1233 void
1235 {
1236  if (f->n_subscribers >= SVM_FIFO_MAX_EVT_SUBSCRIBERS)
1237  return;
1238  f->subscribers[f->n_subscribers++] = subscriber;
1239 }
1240 
1241 void
1243 {
1244  int i;
1245 
1246  for (i = 0; i < f->n_subscribers; i++)
1247  {
1248  if (f->subscribers[i] != subscriber)
1249  continue;
1250  f->subscribers[i] = f->subscribers[f->n_subscribers - 1];
1251  f->n_subscribers--;
1252  break;
1253  }
1254 }
1255 
1256 u8
1258 {
1259  svm_fifo_chunk_t *tmp;
1260 
1261  if (f->head_chunk && !f_chunk_includes_pos (f->head_chunk, f->head))
1262  return 0;
1263  if (f->tail_chunk && !f_chunk_includes_pos (f->tail_chunk, f->tail))
1264  return 0;
1265  if (f->ooo_deq)
1266  {
1267  if (rb_tree_is_init (&f->ooo_deq_lookup))
1268  {
1269  if (f_pos_lt (f->ooo_deq->start_byte, f->start_chunk->start_byte)
1270  || f_pos_gt (f->ooo_deq->start_byte,
1271  f_chunk_end (f->end_chunk)))
1272  return 0;
1273 
1274  tmp = f_find_chunk_rbtree (&f->ooo_deq_lookup,
1275  f->ooo_deq->start_byte);
1276  }
1277  else
1278  tmp = svm_fifo_find_chunk (f, f->ooo_deq->start_byte);
1279  if (tmp != f->ooo_deq)
1280  return 0;
1281  }
1282  if (f->ooo_enq)
1283  {
1284  if (rb_tree_is_init (&f->ooo_enq_lookup))
1285  {
1286  if (f_pos_lt (f->ooo_enq->start_byte, f->start_chunk->start_byte)
1287  || f_pos_gt (f->ooo_enq->start_byte,
1288  f_chunk_end (f->end_chunk)))
1289  return 0;
1290 
1291  tmp = f_find_chunk_rbtree (&f->ooo_enq_lookup,
1292  f->ooo_enq->start_byte);
1293  }
1294  else
1295  {
1296  tmp = svm_fifo_find_next_chunk (f, f->tail_chunk,
1297  f->ooo_enq->start_byte);
1298  }
1299  if (tmp != f->ooo_enq)
1300  return 0;
1301  }
1302 
1303  if (f->start_chunk->next)
1304  {
1305  svm_fifo_chunk_t *c, *prev = 0, *tmp;
1306  u32 chunks_bytes = 0;
1307 
1308  c = f->start_chunk;
1309  do
1310  {
1311  tmp = svm_fifo_find_chunk (f, c->start_byte);
1312  if (tmp != c)
1313  return 0;
1314  if (prev && (prev->start_byte + prev->length != c->start_byte))
1315  return 0;
1316 
1317  if (c->enq_rb_index != RBTREE_TNIL_INDEX)
1318  {
1319  tmp = f_find_chunk_rbtree (&f->ooo_enq_lookup, c->start_byte);
1320  if (tmp)
1321  {
1322  if (tmp != c)
1323  return 0;
1324  }
1325  }
1326  if (c->deq_rb_index != RBTREE_TNIL_INDEX)
1327  {
1328  tmp = f_find_chunk_rbtree (&f->ooo_deq_lookup, c->start_byte);
1329  if (tmp)
1330  {
1331  if (tmp != c)
1332  return 0;
1333  }
1334  }
1335 
1336  chunks_bytes += c->length;
1337  prev = c;
1338  c = c->next;
1339  }
1340  while (c);
1341 
1342  if (chunks_bytes < f->tail - f->head)
1343  return 0;
1344  }
1345 
1346  return 1;
1347 }
1348 
1349 u32
1351 {
1353  int n_chunks = 0;
1354 
1355  c = f->start_chunk;
1356  while (c)
1357  {
1358  n_chunks++;
1359  c = c->next;
1360  }
1361 
1362  return n_chunks;
1363 }
1364 
1365 u8 *
1366 format_ooo_segment (u8 * s, va_list * args)
1367 {
1368  svm_fifo_t __clib_unused *f = va_arg (*args, svm_fifo_t *);
1369  ooo_segment_t *seg = va_arg (*args, ooo_segment_t *);
1370  s = format (s, "[%u, %u], len %u, next %d, prev %d", seg->start,
1371  seg->start + seg->length, seg->length, seg->next, seg->prev);
1372  return s;
1373 }
1374 
1375 u8 *
1377 {
1378 #if SVM_FIFO_TRACE
1379  svm_fifo_trace_elem_t *seg = 0;
1380  int i = 0;
1381 
1382  if (f->trace)
1383  {
1384  vec_foreach (seg, f->trace)
1385  {
1386  s = format (s, "{%u, %u, %u}, ", seg->offset, seg->len, seg->action);
1387  i++;
1388  if (i % 5 == 0)
1389  s = format (s, "\n");
1390  }
1391  s = format (s, "\n");
1392  }
1393  return s;
1394 #else
1395  return 0;
1396 #endif
1397 }
1398 
1399 u8 *
1400 svm_fifo_replay (u8 * s, svm_fifo_t * f, u8 no_read, u8 verbose)
1401 {
1402  int i, trace_len;
1403  u8 *data = 0;
1405  u32 offset;
1406  svm_fifo_t *dummy_fifo;
1407 
1408  if (!f)
1409  return s;
1410 
1411 #if SVM_FIFO_TRACE
1412  trace = f->trace;
1413  trace_len = vec_len (trace);
1414 #else
1415  trace = 0;
1416  trace_len = 0;
1417 #endif
1418 
1419  dummy_fifo = svm_fifo_alloc (f->size);
1420  svm_fifo_init (f, f->size);
1421  clib_memset (f->head_chunk->data, 0xFF, f->size);
1422  vec_validate (data, f->size);
1423  for (i = 0; i < vec_len (data); i++)
1424  data[i] = i;
1425 
1426  for (i = 0; i < trace_len; i++)
1427  {
1428  offset = trace[i].offset;
1429  if (trace[i].action == 1)
1430  {
1431  if (verbose)
1432  s = format (s, "adding [%u, %u]:", trace[i].offset,
1433  (trace[i].offset + trace[i].len));
1434  svm_fifo_enqueue_with_offset (dummy_fifo, trace[i].offset,
1435  trace[i].len, &data[offset]);
1436  }
1437  else if (trace[i].action == 2)
1438  {
1439  if (verbose)
1440  s = format (s, "adding [%u, %u]:", 0, trace[i].len);
1441  svm_fifo_enqueue (dummy_fifo, trace[i].len, &data[offset]);
1442  }
1443  else if (!no_read)
1444  {
1445  if (verbose)
1446  s = format (s, "read: %u", trace[i].len);
1447  svm_fifo_dequeue_drop (dummy_fifo, trace[i].len);
1448  }
1449  if (verbose)
1450  s = format (s, "%U", format_svm_fifo, dummy_fifo, 1);
1451  }
1452 
1453  s = format (s, "result: %U", format_svm_fifo, dummy_fifo, 1);
1454 
1455  return s;
1456 }
1457 
1458 u8 *
1459 format_ooo_list (u8 * s, va_list * args)
1460 {
1461  svm_fifo_t *f = va_arg (*args, svm_fifo_t *);
1462  u32 indent = va_arg (*args, u32);
1463  u32 ooo_segment_index = f->ooos_list_head;
1464  ooo_segment_t *seg;
1465 
1466  while (ooo_segment_index != OOO_SEGMENT_INVALID_INDEX)
1467  {
1468  seg = pool_elt_at_index (f->ooo_segments, ooo_segment_index);
1469  s = format (s, "%U%U\n", format_white_space, indent, format_ooo_segment,
1470  f, seg);
1471  ooo_segment_index = seg->next;
1472  }
1473 
1474  return s;
1475 }
1476 
1477 u8 *
1478 format_svm_fifo (u8 * s, va_list * args)
1479 {
1480  svm_fifo_t *f = va_arg (*args, svm_fifo_t *);
1481  int verbose = va_arg (*args, int);
1482  u32 indent;
1483 
1484  if (!s)
1485  return s;
1486 
1487  indent = format_get_indent (s);
1488  s = format (s, "cursize %u nitems %u has_event %d min_alloc %u\n",
1489  svm_fifo_max_dequeue (f), f->size, f->has_event, f->min_alloc);
1490  s = format (s, "%Uhead %u tail %u segment manager %u\n", format_white_space,
1491  indent, f->head, f->tail, f->segment_manager);
1492 
1493  if (verbose > 1)
1494  s = format (s, "%Uvpp session %d thread %d app session %d thread %d\n",
1495  format_white_space, indent, f->master_session_index,
1496  f->master_thread_index, f->client_session_index,
1497  f->client_thread_index);
1498 
1499  if (verbose)
1500  {
1501  s = format (s, "%Uooo pool %d active elts newest %u\n",
1502  format_white_space, indent, pool_elts (f->ooo_segments),
1503  f->ooos_newest);
1504  if (svm_fifo_has_ooo_data (f))
1505  s = format (s, " %U", format_ooo_list, f, indent, verbose);
1506  }
1507  return s;
1508 }
1509 
1510 #endif
1511 /*
1512  * fd.io coding-style-patch-verification: ON
1513  *
1514  * Local Variables:
1515  * eval: (c-set-style "gnu")
1516  * End:
1517  */
u32 length
length of chunk in bytes
Definition: fifo_types.h:32
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:507
static void svm_fifo_copy_to_chunk(svm_fifo_t *f, svm_fifo_chunk_t *c, u32 tail_idx, const u8 *src, u32 len, svm_fifo_chunk_t **last)
Definition: svm_fifo.c:89
rb_node_t * rb_tree_predecessor(rb_tree_t *rt, rb_node_t *x)
Definition: rbtree.c:287
static void f_load_head_tail_all_acq(svm_fifo_t *f, u32 *head, u32 *tail)
Load head and tail independent of producer/consumer role.
Definition: svm_fifo.h:107
static vlib_cli_command_t trace
(constructor) VLIB_CLI_COMMAND (trace)
Definition: vlib_api_cli.c:899
#define clib_min(x, y)
Definition: clib.h:319
static int ooo_segment_try_collect(svm_fifo_t *f, u32 n_bytes_enqueued, u32 *tail)
Removes segments that can now be enqueued because the fifo&#39;s tail has advanced.
Definition: svm_fifo.c:306
int svm_fifo_segments(svm_fifo_t *f, svm_fifo_seg_t *fs)
Definition: svm_fifo.c:1135
static u32 svm_fifo_max_enqueue_prod(svm_fifo_t *f)
Maximum number of bytes that can be enqueued into fifo.
Definition: svm_fifo.h:522
static ooo_segment_t * ooo_segment_next(svm_fifo_t *f, ooo_segment_t *s)
Definition: svm_fifo.c:125
static rb_node_t * rb_node_left(rb_tree_t *rt, rb_node_t *n)
Definition: rbtree.h:94
void svm_fifo_free_chunk_lookup(svm_fifo_t *f)
Cleanup fifo chunk lookup rb tree.
Definition: svm_fifo.c:751
static u8 svm_fifo_has_ooo_data(svm_fifo_t *f)
Check if fifo has out-of-order data.
Definition: svm_fifo.h:628
void svm_fifo_init_pointers(svm_fifo_t *f, u32 head, u32 tail)
Set fifo pointers to requested offset.
Definition: svm_fifo.c:1218
static u32 f_free_count(svm_fifo_t *f, u32 head, u32 tail)
Fifo free bytes, i.e., number of free bytes.
Definition: svm_fifo.h:132
void svm_fifo_segments_free(svm_fifo_t *f, svm_fifo_seg_t *fs)
Definition: svm_fifo.c:1167
void svm_fifo_free(svm_fifo_t *f)
Free fifo and associated state.
Definition: svm_fifo.c:758
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
u32 prev
Previous linked-list element pool index.
Definition: fifo_types.h:42
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
static void f_load_head_tail_cons(svm_fifo_t *f, u32 *head, u32 *tail)
Load head and tail optimized for consumer.
Definition: svm_fifo.h:80
static void svm_fifo_copy_from_chunk(svm_fifo_t *f, svm_fifo_chunk_t *c, u32 head_idx, u8 *dst, u32 len, svm_fifo_chunk_t **last)
Definition: svm_fifo.c:97
#define CLIB_MARCH_FN_SELECT(fn)
Definition: cpu.h:409
svm_fifo_chunk_t * fsh_alloc_chunk(fifo_segment_header_t *fsh, u32 slice_index, u32 chunk_size)
Allocate chunks in fifo segment.
Definition: fifo_segment.c:695
vl_api_address_t src
Definition: gre.api:54
static heap_elt_t * last(heap_header_t *h)
Definition: heap.c:53
int svm_fifo_peek(svm_fifo_t *f, u32 offset, u32 len, u8 *dst)
Peek data from fifo.
Definition: svm_fifo.c:1038
static svm_fifo_chunk_t * f_lookup_clear_deq_chunks(svm_fifo_t *f, svm_fifo_chunk_t *start, u32 end_pos)
Definition: svm_fifo.c:727
void svm_fifo_enqueue_nocopy(svm_fifo_t *f, u32 len)
Advance tail.
Definition: svm_fifo.c:931
void svm_fifo_init(svm_fifo_t *f, u32 size)
Initialize fifo.
Definition: svm_fifo.c:368
static rb_node_t * rb_node(rb_tree_t *rt, rb_node_index_t ri)
Definition: rbtree.h:82
static u32 format_get_indent(u8 *s)
Definition: format.h:72
#define RBTREE_TNIL_INDEX
Definition: rbtree.h:22
ooo_segment_t * svm_fifo_first_ooo_segment(svm_fifo_t *f)
First out-of-order segment for fifo.
Definition: svm_fifo.c:1209
void svm_fifo_dequeue_drop_all(svm_fifo_t *f)
Drop all data from fifo.
Definition: svm_fifo.c:1099
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
static rb_node_t * rb_node_right(rb_tree_t *rt, rb_node_t *n)
Definition: rbtree.h:88
void svm_fifo_overwrite_head(svm_fifo_t *f, u8 *src, u32 len)
Overwrite fifo head with new data.
Definition: svm_fifo.c:771
#define SVM_FIFO_INVALID_INDEX
Definition: svm_fifo.h:30
void rb_tree_free_nodes(rb_tree_t *rt)
Definition: rbtree.c:476
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:252
unsigned char u8
Definition: types.h:56
int rb_tree_is_init(rb_tree_t *rt)
Definition: rbtree.c:496
static ooo_segment_t * ooo_segment_alloc(svm_fifo_t *f, u32 start, u32 length)
Definition: svm_fifo.c:133
static svm_fifo_chunk_t * svm_fifo_find_chunk(svm_fifo_t *f, u32 pos)
Find chunk for given byte position.
Definition: svm_fifo.c:481
void svm_fifo_clone(svm_fifo_t *df, svm_fifo_t *sf)
Clones fifo.
Definition: svm_fifo.c:1188
static u32 svm_fifo_max_dequeue(svm_fifo_t *f)
Fifo max bytes to dequeue.
Definition: svm_fifo.h:431
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:129
u32 svm_fifo_n_chunks(svm_fifo_t *f)
Number of chunks linked into the fifo.
Definition: svm_fifo.c:1350
svm_fifo_chunk_t * svm_fifo_chunk_alloc(u32 size)
Creates a fifo chunk in the current heap.
Definition: svm_fifo.c:455
u8 * format_ooo_list(u8 *s, va_list *args)
Definition: svm_fifo.c:1459
void svm_fifo_free_ooo_data(svm_fifo_t *f)
Cleanup fifo ooo data.
Definition: svm_fifo.c:111
static void ooo_segment_free(svm_fifo_t *f, u32 index)
Definition: svm_fifo.c:147
unsigned int u32
Definition: types.h:88
int svm_fifo_dequeue(svm_fifo_t *f, u32 len, u8 *dst)
Dequeue data from fifo.
Definition: svm_fifo.c:1007
u32 key
node key
Definition: rbtree.h:38
static svm_fifo_chunk_t * f_unlink_chunks(svm_fifo_t *f, u32 end_pos, u8 maybe_ooo)
Definition: svm_fifo.c:955
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:534
static int f_pos_gt(u32 a, u32 b)
Definition: svm_fifo.h:156
uword opaque
value stored by node
Definition: rbtree.h:39
static ooo_segment_t * ooo_segment_prev(svm_fifo_t *f, ooo_segment_t *s)
Definition: svm_fifo.c:117
struct svm_fifo_chunk_ * next
pointer to next chunk in linked-lists
Definition: fifo_types.h:33
void fsh_collect_chunks(fifo_segment_header_t *fsh, u32 slice_index, svm_fifo_chunk_t *c)
Return chunks to fifo segment.
Definition: fifo_segment.c:804
u64 size
Definition: vhost_user.h:150
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:302
int svm_fifo_enqueue(svm_fifo_t *f, u32 len, const u8 *src)
Enqueue data to fifo.
Definition: svm_fifo.c:836
#define SVM_FIFO_MAX_EVT_SUBSCRIBERS
Definition: fifo_types.h:25
#define PREDICT_FALSE(x)
Definition: clib.h:118
void rb_tree_init(rb_tree_t *rt)
Definition: rbtree.c:483
#define always_inline
Definition: ipsec.h:28
static u32 f_chunk_end(svm_fifo_chunk_t *c)
Definition: svm_fifo.h:138
#define svm_fifo_trace_add(_f, _s, _l, _t)
Definition: svm_fifo.h:68
vl_api_address_t dst
Definition: gre.api:55
u8 * svm_fifo_replay(u8 *s, svm_fifo_t *f, u8 no_read, u8 verbose)
Definition: svm_fifo.c:1400
CLIB_MARCH_FN(svm_fifo_copy_to_chunk, void, svm_fifo_t *f, svm_fifo_chunk_t *c, u32 tail_idx, const u8 *src, u32 len, svm_fifo_chunk_t **last)
Definition: svm_fifo.c:24
u8 len
Definition: ip_types.api:92
void svm_fifo_add_subscriber(svm_fifo_t *f, u8 subscriber)
Add io events subscriber to list.
Definition: svm_fifo.c:1234
#define pool_free(p)
Free a pool.
Definition: pool.h:427
svmdb_client_t * c
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
static u32 f_cursize(svm_fifo_t *f, u32 head, u32 tail)
Fifo current size, i.e., number of bytes enqueued.
Definition: svm_fifo.h:121
static void f_load_head_tail_prod(svm_fifo_t *f, u32 *head, u32 *tail)
Load head and tail optimized for producer.
Definition: svm_fifo.h:93
u32 start_byte
chunk start byte
Definition: fifo_types.h:31
u8 svm_fifo_is_sane(svm_fifo_t *f)
Check if fifo is sane.
Definition: svm_fifo.c:1257
static svm_fifo_chunk_t * svm_fifo_find_next_chunk(svm_fifo_t *f, svm_fifo_chunk_t *start, u32 pos)
Definition: svm_fifo.c:493
#define OOO_SEGMENT_INVALID_INDEX
Definition: svm_fifo.h:28
u8 * format_ooo_segment(u8 *s, va_list *args)
Definition: svm_fifo.c:1366
static svm_fifo_chunk_t * f_lookup_clear_enq_chunks(svm_fifo_t *f, svm_fifo_chunk_t *start, u32 end_pos)
Definition: svm_fifo.c:693
u32 svm_fifo_n_ooo_segments(svm_fifo_t *f)
Number of out-of-order segments for fifo.
Definition: svm_fifo.c:1203
static void * clib_mem_alloc_aligned_or_null(uword size, uword align)
Definition: mem.h:181
signed int i32
Definition: types.h:77
#define uword_to_pointer(u, type)
Definition: types.h:136
u8 * format_svm_fifo(u8 *s, va_list *args)
Definition: svm_fifo.c:1478
#define ASSERT(truth)
static u8 f_chunk_includes_pos(svm_fifo_chunk_t *c, u32 pos)
Definition: svm_fifo.h:168
u8 data[128]
Definition: ipsec_types.api:89
static int f_pos_geq(u32 a, u32 b)
Definition: svm_fifo.h:162
rb_node_index_t deq_rb_index
deq node index if chunk in rbtree
Definition: fifo_types.h:35
static void clib_mem_free(void *p)
Definition: mem.h:215
u8 data[0]
start of chunk data
Definition: fifo_types.h:36
void svm_fifo_del_subscriber(svm_fifo_t *f, u8 subscriber)
Remove io events subscriber form list.
Definition: svm_fifo.c:1242
int svm_fifo_enqueue_with_offset(svm_fifo_t *f, u32 offset, u32 len, u8 *src)
Enqueue a future segment.
Definition: svm_fifo.c:894
static u32 ooo_segment_end_pos(ooo_segment_t *s)
Definition: svm_fifo.c:105
static void f_update_ooo_deq(svm_fifo_t *f, u32 start_pos, u32 end_pos)
Definition: svm_fifo.c:644
static uword pointer_to_uword(const void *p)
Definition: types.h:131
#define clib_atomic_store_rel_n(a, b)
Definition: atomics.h:49
#define clib_max(x, y)
Definition: clib.h:312
u32 length
Length of segment.
Definition: fifo_types.h:44
u8 * svm_fifo_dump_trace(u8 *s, svm_fifo_t *f)
Definition: svm_fifo.c:1376
u32 next
Next linked-list element pool index.
Definition: fifo_types.h:41
u32 svm_fifo_max_read_chunk(svm_fifo_t *f)
Max contiguous chunk of data that can be read.
Definition: svm_fifo.c:507
static int f_pos_leq(u32 a, u32 b)
Definition: svm_fifo.h:150
template key/value backing page structure
Definition: bihash_doc.h:44
int svm_fifo_fill_chunk_list(svm_fifo_t *f)
Ensure the whole fifo size is writeable.
Definition: svm_fifo.c:1119
static u8 rb_node_is_tnil(rb_tree_t *rt, rb_node_t *n)
Definition: rbtree.h:76
vl_api_mac_event_action_t action
Definition: l2.api:181
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
static uword max_log2(uword x)
Definition: clib.h:206
static rb_node_t * f_find_node_rbtree(rb_tree_t *rt, u32 pos)
Definition: svm_fifo.c:538
static svm_fifo_chunk_t * f_find_chunk_rbtree(rb_tree_t *rt, u32 pos)
Definition: svm_fifo.c:576
rb_node_index_t enq_rb_index
enq node index if chunk in rbtree
Definition: fifo_types.h:34
struct clib_bihash_value offset
template key/value backing page structure
static int f_try_chunk_alloc(svm_fifo_t *f, u32 head, u32 tail, u32 len)
Definition: svm_fifo.c:798
static __clib_unused ooo_segment_t * ooo_segment_last(svm_fifo_t *f)
Definition: svm_fifo.c:354
#define vec_foreach(var, vec)
Vector iterator.
save_rewrite_length must be aligned so that reass doesn t overwrite it
Definition: buffer.h:401
u32 svm_fifo_max_write_chunk(svm_fifo_t *f)
Max contiguous chunk of data that can be written.
Definition: svm_fifo.c:527
void rb_tree_del_node(rb_tree_t *rt, rb_node_t *z)
Definition: rbtree.c:445
rb_node_index_t rb_tree_add_custom(rb_tree_t *rt, u32 key, uword opaque, rb_tree_lt_fn ltfn)
Definition: rbtree.c:195
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
int svm_fifo_dequeue_drop(svm_fifo_t *f, u32 len)
Dequeue and drop bytes from fifo.
Definition: svm_fifo.c:1061
struct _svm_fifo svm_fifo_t
rb_node_index_t root
root index
Definition: rbtree.h:45
u32 start
Start of segment, normalized.
Definition: fifo_types.h:43
void svm_fifo_init_ooo_lookup(svm_fifo_t *f, u8 ooo_type)
Initialize rbtrees used for ooo lookups.
Definition: svm_fifo.c:401
static int f_pos_lt(u32 a, u32 b)
Definition: svm_fifo.h:144
static void ooo_segment_add(svm_fifo_t *f, u32 offset, u32 head, u32 tail, u32 length)
Add segment to fifo&#39;s out-of-order segment list.
Definition: svm_fifo.c:176
svm_fifo_t * svm_fifo_alloc(u32 data_size_in_bytes)
Creates a fifo in the current heap.
Definition: svm_fifo.c:419
static void f_update_ooo_enq(svm_fifo_t *f, u32 start_pos, u32 end_pos)
Definition: svm_fifo.c:595
static uword pool_elts(void *v)
Number of active elements in a pool.
Definition: pool.h:128