FD.io VPP  v17.07-30-g839fa73
Vector Packet Processing
svm_fifo.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <svm/svm_fifo.h>
17 #include <vppinfra/cpu.h>
18 
19 static inline u8
21 {
22  return (ooo_segment_distance_to_tail (f, a)
24 }
25 
26 static inline u8
28 {
29  return (ooo_segment_distance_to_tail (f, a)
31 }
32 
33 static inline u8
35 {
36  return (ooo_segment_distance_to_tail (f, a)
38 }
39 
40 static inline u32
41 position_diff (svm_fifo_t * f, u32 posa, u32 posb)
42 {
43  return ooo_segment_distance_to_tail (f, posa)
44  - ooo_segment_distance_to_tail (f, posb);
45 }
46 
47 static inline u32
49 {
50  return (s->start + s->length) % f->nitems;
51 }
52 
53 u8 *
54 format_ooo_segment (u8 * s, va_list * args)
55 {
56  ooo_segment_t *seg = va_arg (*args, ooo_segment_t *);
57 
58  s = format (s, "pos %u, len %u, next %d, prev %d",
59  seg->start, seg->length, seg->next, seg->prev);
60  return s;
61 }
62 
63 u8 *
64 format_ooo_list (u8 * s, va_list * args)
65 {
66  svm_fifo_t *f = va_arg (*args, svm_fifo_t *);
67  u32 ooo_segment_index = f->ooos_list_head;
68  ooo_segment_t *seg;
69 
70  while (ooo_segment_index != OOO_SEGMENT_INVALID_INDEX)
71  {
72  seg = pool_elt_at_index (f->ooo_segments, ooo_segment_index);
73  s = format (s, " %U\n", format_ooo_segment, seg);
74  ooo_segment_index = seg->next;
75  }
76  return s;
77 }
78 
79 u8 *
80 format_svm_fifo (u8 * s, va_list * args)
81 {
82  svm_fifo_t *f = va_arg (*args, svm_fifo_t *);
83  int verbose = va_arg (*args, int);
84 
85  s = format (s, "cursize %u nitems %u has_event %d\n",
86  f->cursize, f->nitems, f->has_event);
87  s = format (s, " head %d tail %d\n", f->head, f->tail);
88 
89  if (verbose > 1)
90  s = format
91  (s, " server session %d thread %d client session %d thread %d\n",
92  f->master_session_index, f->master_thread_index,
93  f->client_session_index, f->client_thread_index);
94 
95  if (verbose)
96  {
97  s = format (s, " ooo pool %d active elts\n",
98  pool_elts (f->ooo_segments));
99  if (svm_fifo_has_ooo_data (f))
100  s = format (s, " %U", format_ooo_list, f);
101  }
102  return s;
103 }
104 
105 /** create an svm fifo, in the current heap. Fails vs blow up the process */
106 svm_fifo_t *
107 svm_fifo_create (u32 data_size_in_bytes)
108 {
109  svm_fifo_t *f;
110 
111  f = clib_mem_alloc_aligned_or_null (sizeof (*f) + data_size_in_bytes,
113  if (f == 0)
114  return 0;
115 
116  memset (f, 0, sizeof (*f) + data_size_in_bytes);
117  f->nitems = data_size_in_bytes;
118  f->ooos_list_head = OOO_SEGMENT_INVALID_INDEX;
119 
120  return (f);
121 }
122 
123 void
125 {
126  pool_free (f->ooo_segments);
127  clib_mem_free (f);
128 }
129 
131 ooo_segment_new (svm_fifo_t * f, u32 start, u32 length)
132 {
133  ooo_segment_t *s;
134 
135  pool_get (f->ooo_segments, s);
136 
137  s->start = start;
138  s->length = length;
139 
141 
142  return s;
143 }
144 
145 always_inline void
147 {
148  ooo_segment_t *cur, *prev = 0, *next = 0;
149  cur = pool_elt_at_index (f->ooo_segments, index);
150 
151  if (cur->next != OOO_SEGMENT_INVALID_INDEX)
152  {
153  next = pool_elt_at_index (f->ooo_segments, cur->next);
154  next->prev = cur->prev;
155  }
156 
157  if (cur->prev != OOO_SEGMENT_INVALID_INDEX)
158  {
159  prev = pool_elt_at_index (f->ooo_segments, cur->prev);
160  prev->next = cur->next;
161  }
162  else
163  {
164  f->ooos_list_head = cur->next;
165  }
166 
167  pool_put (f->ooo_segments, cur);
168 }
169 
170 /**
171  * Add segment to fifo's out-of-order segment list. Takes care of merging
172  * adjacent segments and removing overlapping ones.
173  */
174 static void
176 {
177  ooo_segment_t *s, *new_s, *prev, *next, *it;
178  u32 new_index, s_end_pos, s_index;
179  u32 normalized_position, normalized_end_position;
180 
181  normalized_position = (f->tail + offset) % f->nitems;
182  normalized_end_position = (f->tail + offset + length) % f->nitems;
183 
184  f->ooos_newest = OOO_SEGMENT_INVALID_INDEX;
185 
186  if (f->ooos_list_head == OOO_SEGMENT_INVALID_INDEX)
187  {
188  s = ooo_segment_new (f, normalized_position, length);
189  f->ooos_list_head = s - f->ooo_segments;
190  f->ooos_newest = f->ooos_list_head;
191  return;
192  }
193 
194  /* Find first segment that starts after new segment */
195  s = pool_elt_at_index (f->ooo_segments, f->ooos_list_head);
196  while (s->next != OOO_SEGMENT_INVALID_INDEX
197  && position_lt (f, s->start, normalized_position))
198  s = pool_elt_at_index (f->ooo_segments, s->next);
199 
200  /* If we have a previous and we overlap it, use it as starting point */
201  prev = ooo_segment_get_prev (f, s);
202  if (prev
203  && position_leq (f, normalized_position, ooo_segment_end_pos (f, prev)))
204  {
205  s = prev;
206  s_end_pos = ooo_segment_end_pos (f, s);
207  goto merge;
208  }
209 
210  s_index = s - f->ooo_segments;
211  s_end_pos = ooo_segment_end_pos (f, s);
212 
213  /* No overlap, add before current segment */
214  if (position_lt (f, normalized_end_position, s->start))
215  {
216  new_s = ooo_segment_new (f, normalized_position, length);
217  new_index = new_s - f->ooo_segments;
218 
219  /* Pool might've moved, get segment again */
220  s = pool_elt_at_index (f->ooo_segments, s_index);
222  {
223  new_s->prev = s->prev;
224  prev = pool_elt_at_index (f->ooo_segments, new_s->prev);
225  prev->next = new_index;
226  }
227  else
228  {
229  /* New head */
230  f->ooos_list_head = new_index;
231  }
232 
233  new_s->next = s_index;
234  s->prev = new_index;
235  f->ooos_newest = new_index;
236  return;
237  }
238  /* No overlap, add after current segment */
239  else if (position_gt (f, normalized_position, s_end_pos))
240  {
241  new_s = ooo_segment_new (f, normalized_position, length);
242  new_index = new_s - f->ooo_segments;
243 
244  /* Pool might've moved, get segment again */
245  s = pool_elt_at_index (f->ooo_segments, s_index);
246 
248 
249  new_s->prev = s_index;
250  s->next = new_index;
251  f->ooos_newest = new_index;
252 
253  return;
254  }
255 
256  /*
257  * Merge needed
258  */
259 
260 merge:
261 
262  /* Merge at head */
263  if (position_lt (f, normalized_position, s->start))
264  {
265  s->start = normalized_position;
266  s->length = position_diff (f, s_end_pos, s->start);
267  }
268  /* Overlapping tail */
269  else if (position_gt (f, normalized_end_position, s_end_pos))
270  {
271  s->length = position_diff (f, normalized_end_position, s->start);
272  }
273  /* New segment completely covered by current one */
274  else
275  {
276  /* Do Nothing */
277  s = 0;
278  goto done;
279  }
280 
281  /* The new segment's tail may cover multiple smaller ones */
282  if (position_gt (f, normalized_end_position, s_end_pos))
283  {
284  /* Remove the completely overlapped segments */
285  it = (s->next != OOO_SEGMENT_INVALID_INDEX) ?
286  pool_elt_at_index (f->ooo_segments, s->next) : 0;
287  while (it && position_leq (f, ooo_segment_end_pos (f, it),
288  normalized_end_position))
289  {
290  next = (it->next != OOO_SEGMENT_INVALID_INDEX) ?
291  pool_elt_at_index (f->ooo_segments, it->next) : 0;
292  ooo_segment_del (f, it - f->ooo_segments);
293  it = next;
294  }
295 
296  /* If partial overlap with last, merge */
297  if (it && position_leq (f, it->start, normalized_end_position))
298  {
299  s->length = ooo_segment_end_pos (f, it) - s->start;
300  ooo_segment_del (f, it - f->ooo_segments);
301  }
302  }
303 
304 done:
305  /* Most recently updated segment */
306  if (s)
307  f->ooos_newest = s - f->ooo_segments;
308 }
309 
310 /**
311  * Removes segments that can now be enqueued because the fifo's tail has
312  * advanced. Returns the number of bytes added to tail.
313  */
314 static int
315 ooo_segment_try_collect (svm_fifo_t * f, u32 n_bytes_enqueued)
316 {
317  ooo_segment_t *s;
318  u32 index, bytes = 0;
319  i32 diff;
320 
321  s = pool_elt_at_index (f->ooo_segments, f->ooos_list_head);
322 
323  diff = (f->tail >= s->start) ?
324  f->tail - s->start : f->nitems + f->tail - s->start;
325 
326  if (diff > n_bytes_enqueued)
327  return 0;
328 
329  /* If last tail update overlaps one/multiple ooo segments, remove them */
330  while (0 <= diff && diff < n_bytes_enqueued)
331  {
332  index = s - f->ooo_segments;
333 
334  /* Segment end is beyond the tail. Advance tail and remove segment */
335  if (s->length > diff)
336  {
337  bytes = s->length - diff;
338  f->tail += bytes;
339  f->tail %= f->nitems;
340  ooo_segment_del (f, index);
341  break;
342  }
343 
344  /* If we have next go on */
346  {
347  s = pool_elt_at_index (f->ooo_segments, s->next);
348  diff = (f->tail >= s->start) ?
349  f->tail - s->start : f->nitems + f->tail - s->start;
350  ooo_segment_del (f, index);
351  }
352  /* End of search */
353  else
354  {
355  ooo_segment_del (f, index);
356  break;
357  }
358  }
359 
360  return bytes;
361 }
362 
363 static int
364 svm_fifo_enqueue_internal (svm_fifo_t * f, u32 max_bytes, u8 * copy_from_here)
365 {
366  u32 total_copy_bytes, first_copy_bytes, second_copy_bytes;
367  u32 cursize, nitems;
368 
369  /* read cursize, which can only increase while we're working */
370  cursize = svm_fifo_max_dequeue (f);
371  f->ooos_newest = OOO_SEGMENT_INVALID_INDEX;
372 
373  if (PREDICT_FALSE (cursize == f->nitems))
374  return -2; /* fifo stuffed */
375 
376  nitems = f->nitems;
377 
378  /* Number of bytes we're going to copy */
379  total_copy_bytes = (nitems - cursize) < max_bytes ?
380  (nitems - cursize) : max_bytes;
381 
382  if (PREDICT_TRUE (copy_from_here != 0))
383  {
384  /* Number of bytes in first copy segment */
385  first_copy_bytes = ((nitems - f->tail) < total_copy_bytes)
386  ? (nitems - f->tail) : total_copy_bytes;
387 
388  clib_memcpy (&f->data[f->tail], copy_from_here, first_copy_bytes);
389  f->tail += first_copy_bytes;
390  f->tail = (f->tail == nitems) ? 0 : f->tail;
391 
392  /* Number of bytes in second copy segment, if any */
393  second_copy_bytes = total_copy_bytes - first_copy_bytes;
394  if (second_copy_bytes)
395  {
396  clib_memcpy (&f->data[f->tail], copy_from_here + first_copy_bytes,
397  second_copy_bytes);
398  f->tail += second_copy_bytes;
399  f->tail = (f->tail == nitems) ? 0 : f->tail;
400  }
401  }
402  else
403  {
404  /* Account for a zero-copy enqueue done elsewhere */
405  ASSERT (max_bytes <= (nitems - cursize));
406  f->tail += max_bytes;
407  f->tail = f->tail % nitems;
408  total_copy_bytes = max_bytes;
409  }
410 
411  /* Any out-of-order segments to collect? */
412  if (PREDICT_FALSE (f->ooos_list_head != OOO_SEGMENT_INVALID_INDEX))
413  total_copy_bytes += ooo_segment_try_collect (f, total_copy_bytes);
414 
415  /* Atomically increase the queue length */
416  __sync_fetch_and_add (&f->cursize, total_copy_bytes);
417 
418  return (total_copy_bytes);
419 }
420 
421 #define SVM_ENQUEUE_CLONE_TEMPLATE(arch, fn, tgt) \
422  uword \
423  __attribute__ ((flatten)) \
424  __attribute__ ((target (tgt))) \
425  CLIB_CPU_OPTIMIZED \
426  fn ## _ ## arch ( svm_fifo_t * f, u32 max_bytes, u8 * copy_from_here) \
427  { return fn (f, max_bytes, copy_from_here);}
428 
429 static int
431  u8 * copy_from_here)
432 {
433  return svm_fifo_enqueue_internal (f, max_bytes, copy_from_here);
434 }
435 
439 
440 int
441 svm_fifo_enqueue_nowait (svm_fifo_t * f, u32 max_bytes, u8 * copy_from_here)
442 {
443 #if CLIB_DEBUG > 0
444  return svm_fifo_enqueue_nowait_ma (f, max_bytes, copy_from_here);
445 #else
446  static int (*fp) (svm_fifo_t *, u32, u8 *);
447 
448  if (PREDICT_FALSE (fp == 0))
449  fp = (void *) svm_fifo_enqueue_nowait_ma_multiarch_select ();
450 
451  return (*fp) (f, max_bytes, copy_from_here);
452 #endif
453 }
454 
455 /**
456  * Enqueue a future segment.
457  *
458  * Two choices: either copies the entire segment, or copies nothing
459  * Returns 0 of the entire segment was copied
460  * Returns -1 if none of the segment was copied due to lack of space
461  */
462 static int
464  u32 offset,
465  u32 required_bytes,
466  u8 * copy_from_here)
467 {
468  u32 total_copy_bytes, first_copy_bytes, second_copy_bytes;
469  u32 cursize, nitems, normalized_offset;
470  u32 offset_from_tail;
471 
472  f->ooos_newest = OOO_SEGMENT_INVALID_INDEX;
473 
474  /* read cursize, which can only increase while we're working */
475  cursize = svm_fifo_max_dequeue (f);
476  nitems = f->nitems;
477 
478  normalized_offset = (f->tail + offset) % nitems;
479 
480  /* Will this request fit? */
481  offset_from_tail = (nitems + normalized_offset - f->tail) % nitems;
482  if ((required_bytes + offset_from_tail) > (nitems - cursize))
483  return -1;
484 
485  ooo_segment_add (f, offset, required_bytes);
486 
487  /* Number of bytes we're going to copy */
488  total_copy_bytes = required_bytes;
489 
490  /* Number of bytes in first copy segment */
491  first_copy_bytes = ((nitems - normalized_offset) < total_copy_bytes)
492  ? (nitems - normalized_offset) : total_copy_bytes;
493 
494  clib_memcpy (&f->data[normalized_offset], copy_from_here, first_copy_bytes);
495 
496  /* Number of bytes in second copy segment, if any */
497  second_copy_bytes = total_copy_bytes - first_copy_bytes;
498  if (second_copy_bytes)
499  {
500  normalized_offset += first_copy_bytes;
501  normalized_offset %= nitems;
502 
503  ASSERT (normalized_offset == 0);
504 
505  clib_memcpy (&f->data[normalized_offset],
506  copy_from_here + first_copy_bytes, second_copy_bytes);
507  }
508 
509  return (0);
510 }
511 
512 
513 int
515  u32 offset,
516  u32 required_bytes, u8 * copy_from_here)
517 {
518  return svm_fifo_enqueue_with_offset_internal (f, offset, required_bytes,
519  copy_from_here);
520 }
521 
522 
523 static int
524 svm_fifo_dequeue_internal (svm_fifo_t * f, u32 max_bytes, u8 * copy_here)
525 {
526  u32 total_copy_bytes, first_copy_bytes, second_copy_bytes;
527  u32 cursize, nitems;
528 
529  /* read cursize, which can only increase while we're working */
530  cursize = svm_fifo_max_dequeue (f);
531  if (PREDICT_FALSE (cursize == 0))
532  return -2; /* nothing in the fifo */
533 
534  nitems = f->nitems;
535 
536  /* Number of bytes we're going to copy */
537  total_copy_bytes = (cursize < max_bytes) ? cursize : max_bytes;
538 
539  if (PREDICT_TRUE (copy_here != 0))
540  {
541  /* Number of bytes in first copy segment */
542  first_copy_bytes = ((nitems - f->head) < total_copy_bytes)
543  ? (nitems - f->head) : total_copy_bytes;
544  clib_memcpy (copy_here, &f->data[f->head], first_copy_bytes);
545  f->head += first_copy_bytes;
546  f->head = (f->head == nitems) ? 0 : f->head;
547 
548  /* Number of bytes in second copy segment, if any */
549  second_copy_bytes = total_copy_bytes - first_copy_bytes;
550  if (second_copy_bytes)
551  {
552  clib_memcpy (copy_here + first_copy_bytes,
553  &f->data[f->head], second_copy_bytes);
554  f->head += second_copy_bytes;
555  f->head = (f->head == nitems) ? 0 : f->head;
556  }
557  }
558  else
559  {
560  /* Account for a zero-copy dequeue done elsewhere */
561  ASSERT (max_bytes <= cursize);
562  f->head += max_bytes;
563  f->head = f->head % nitems;
564  cursize -= max_bytes;
565  total_copy_bytes = max_bytes;
566  }
567 
568  __sync_fetch_and_sub (&f->cursize, total_copy_bytes);
569 
570  return (total_copy_bytes);
571 }
572 
573 static int
574 svm_fifo_dequeue_nowait_ma (svm_fifo_t * f, u32 max_bytes, u8 * copy_here)
575 {
576  return svm_fifo_dequeue_internal (f, max_bytes, copy_here);
577 }
578 
579 #define SVM_FIFO_DEQUEUE_CLONE_TEMPLATE(arch, fn, tgt) \
580  uword \
581  __attribute__ ((flatten)) \
582  __attribute__ ((target (tgt))) \
583  CLIB_CPU_OPTIMIZED \
584  fn ## _ ## arch ( svm_fifo_t * f, u32 max_bytes, \
585  u8 * copy_here) \
586  { return fn (f, max_bytes, copy_here);}
587 
591 
592 int
593 svm_fifo_dequeue_nowait (svm_fifo_t * f, u32 max_bytes, u8 * copy_here)
594 {
595 #if CLIB_DEBUG > 0
596  return svm_fifo_dequeue_nowait_ma (f, max_bytes, copy_here);
597 #else
598  static int (*fp) (svm_fifo_t *, u32, u8 *);
599 
600  if (PREDICT_FALSE (fp == 0))
601  fp = (void *) svm_fifo_dequeue_nowait_ma_multiarch_select ();
602 
603  return (*fp) (f, max_bytes, copy_here);
604 #endif
605 }
606 
607 static int
608 svm_fifo_peek_ma (svm_fifo_t * f, u32 relative_offset, u32 max_bytes,
609  u8 * copy_here)
610 {
611  u32 total_copy_bytes, first_copy_bytes, second_copy_bytes;
612  u32 cursize, nitems, real_head;
613 
614  /* read cursize, which can only increase while we're working */
615  cursize = svm_fifo_max_dequeue (f);
616  if (PREDICT_FALSE (cursize < relative_offset))
617  return -2; /* nothing in the fifo */
618 
619  nitems = f->nitems;
620  real_head = f->head + relative_offset;
621  real_head = real_head >= nitems ? real_head - nitems : real_head;
622 
623  /* Number of bytes we're going to copy */
624  total_copy_bytes = (cursize - relative_offset < max_bytes) ?
625  cursize - relative_offset : max_bytes;
626 
627  if (PREDICT_TRUE (copy_here != 0))
628  {
629  /* Number of bytes in first copy segment */
630  first_copy_bytes =
631  ((nitems - real_head) < total_copy_bytes) ?
632  (nitems - real_head) : total_copy_bytes;
633  clib_memcpy (copy_here, &f->data[real_head], first_copy_bytes);
634 
635  /* Number of bytes in second copy segment, if any */
636  second_copy_bytes = total_copy_bytes - first_copy_bytes;
637  if (second_copy_bytes)
638  {
639  clib_memcpy (copy_here + first_copy_bytes, &f->data[0],
640  second_copy_bytes);
641  }
642  }
643  return total_copy_bytes;
644 }
645 
646 #define SVM_FIFO_PEEK_CLONE_TEMPLATE(arch, fn, tgt) \
647  uword \
648  __attribute__ ((flatten)) \
649  __attribute__ ((target (tgt))) \
650  CLIB_CPU_OPTIMIZED \
651  fn ## _ ## arch ( svm_fifo_t * f, u32 relative_offset, u32 max_bytes, \
652  u8 * copy_here) \
653  { return fn (f, relative_offset, max_bytes, copy_here);}
654 
657 
658 int
659 svm_fifo_peek (svm_fifo_t * f, u32 relative_offset, u32 max_bytes,
660  u8 * copy_here)
661 {
662 #if CLIB_DEBUG > 0
663  return svm_fifo_peek_ma (f, relative_offset, max_bytes, copy_here);
664 #else
665  static int (*fp) (svm_fifo_t *, u32, u32, u8 *);
666 
667  if (PREDICT_FALSE (fp == 0))
668  fp = (void *) svm_fifo_peek_ma_multiarch_select ();
669 
670  return (*fp) (f, relative_offset, max_bytes, copy_here);
671 #endif
672 }
673 
674 int
676 {
677  u32 total_drop_bytes, first_drop_bytes, second_drop_bytes;
678  u32 cursize, nitems;
679 
680  /* read cursize, which can only increase while we're working */
681  cursize = svm_fifo_max_dequeue (f);
682  if (PREDICT_FALSE (cursize == 0))
683  return -2; /* nothing in the fifo */
684 
685  nitems = f->nitems;
686 
687  /* Number of bytes we're going to drop */
688  total_drop_bytes = (cursize < max_bytes) ? cursize : max_bytes;
689 
690  /* Number of bytes in first copy segment */
691  first_drop_bytes =
692  ((nitems - f->head) < total_drop_bytes) ?
693  (nitems - f->head) : total_drop_bytes;
694  f->head += first_drop_bytes;
695  f->head = (f->head == nitems) ? 0 : f->head;
696 
697  /* Number of bytes in second drop segment, if any */
698  second_drop_bytes = total_drop_bytes - first_drop_bytes;
699  if (second_drop_bytes)
700  {
701  f->head += second_drop_bytes;
702  f->head = (f->head == nitems) ? 0 : f->head;
703  }
704 
705  __sync_fetch_and_sub (&f->cursize, total_drop_bytes);
706 
707  return total_drop_bytes;
708 }
709 
710 u32
712 {
713  return pool_elts (f->ooo_segments);
714 }
715 
718 {
719  return pool_elt_at_index (f->ooo_segments, f->ooos_list_head);
720 }
721 
722 /**
723  * Set fifo pointers to requested offset
724  */
725 void
727 {
728  f->head = f->tail = pointer % f->nitems;
729 }
730 
731 /*
732  * fd.io coding-style-patch-verification: ON
733  *
734  * Local Variables:
735  * eval: (c-set-style "gnu")
736  * End:
737  */
void svm_fifo_init_pointers(svm_fifo_t *f, u32 pointer)
Set fifo pointers to requested offset.
Definition: svm_fifo.c:726
static u32 position_diff(svm_fifo_t *f, u32 posa, u32 posb)
Definition: svm_fifo.c:41
a
Definition: bitmap.h:516
#define SVM_ENQUEUE_CLONE_TEMPLATE(arch, fn, tgt)
Definition: svm_fifo.c:421
static u8 svm_fifo_has_ooo_data(svm_fifo_t *f)
Definition: svm_fifo.h:83
#define PREDICT_TRUE(x)
Definition: clib.h:98
void svm_fifo_free(svm_fifo_t *f)
Definition: svm_fifo.c:124
u32 prev
Previous linked-list element pool index.
Definition: svm_fifo.h:30
static int ooo_segment_try_collect(svm_fifo_t *f, u32 n_bytes_enqueued)
Removes segments that can now be enqueued because the fifo&#39;s tail has advanced.
Definition: svm_fifo.c:315
ooo_segment_t * svm_fifo_first_ooo_segment(svm_fifo_t *f)
Definition: svm_fifo.c:717
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:200
static int svm_fifo_enqueue_internal(svm_fifo_t *f, u32 max_bytes, u8 *copy_from_here)
Definition: svm_fifo.c:364
struct _svm_fifo svm_fifo_t
static void ooo_segment_add(svm_fifo_t *f, u32 offset, u32 length)
Add segment to fifo&#39;s out-of-order segment list.
Definition: svm_fifo.c:175
#define always_inline
Definition: clib.h:84
static u32 svm_fifo_max_dequeue(svm_fifo_t *f)
Definition: svm_fifo.h:71
u8 * format_ooo_list(u8 *s, va_list *args)
Definition: svm_fifo.c:64
int i32
Definition: types.h:81
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:397
static u8 position_gt(svm_fifo_t *f, u32 a, u32 b)
Definition: svm_fifo.c:34
static int svm_fifo_enqueue_nowait_ma(svm_fifo_t *f, u32 max_bytes, u8 *copy_from_here)
Definition: svm_fifo.c:430
u32 svm_fifo_number_ooo_segments(svm_fifo_t *f)
Definition: svm_fifo.c:711
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:241
#define PREDICT_FALSE(x)
Definition: clib.h:97
#define SVM_FIFO_DEQUEUE_CLONE_TEMPLATE(arch, fn, tgt)
Definition: svm_fifo.c:579
static int svm_fifo_peek_ma(svm_fifo_t *f, u32 relative_offset, u32 max_bytes, u8 *copy_here)
Definition: svm_fifo.c:608
#define pool_free(p)
Free a pool.
Definition: pool.h:290
static u8 position_leq(svm_fifo_t *f, u32 a, u32 b)
Definition: svm_fifo.c:27
#define clib_memcpy(a, b, c)
Definition: string.h:69
static ooo_segment_t * ooo_segment_get_prev(svm_fifo_t *f, ooo_segment_t *s)
Definition: svm_fifo.h:164
int svm_fifo_enqueue_nowait(svm_fifo_t *f, u32 max_bytes, u8 *copy_from_here)
Definition: svm_fifo.c:441
static void ooo_segment_del(svm_fifo_t *f, u32 index)
Definition: svm_fifo.c:146
#define OOO_SEGMENT_INVALID_INDEX
Definition: svm_fifo.h:39
u8 * format_ooo_segment(u8 *s, va_list *args)
Definition: svm_fifo.c:54
static void * clib_mem_alloc_aligned_or_null(uword size, uword align)
Definition: mem.h:133
u8 * format_svm_fifo(u8 *s, va_list *args)
Definition: svm_fifo.c:80
#define ASSERT(truth)
unsigned int u32
Definition: types.h:88
static u8 position_lt(svm_fifo_t *f, u32 a, u32 b)
Definition: svm_fifo.c:20
static void clib_mem_free(void *p)
Definition: mem.h:176
static int svm_fifo_dequeue_nowait_ma(svm_fifo_t *f, u32 max_bytes, u8 *copy_here)
Definition: svm_fifo.c:574
int svm_fifo_enqueue_with_offset(svm_fifo_t *f, u32 offset, u32 required_bytes, u8 *copy_from_here)
Definition: svm_fifo.c:514
Out-of-order segment.
Definition: svm_fifo.h:27
u32 length
Length of segment.
Definition: svm_fifo.h:33
CLIB_MULTIARCH_SELECT_FN(svm_fifo_enqueue_nowait_ma)
u32 next
Next linked-list element pool index.
Definition: svm_fifo.h:29
template key/value backing page structure
Definition: bihash_doc.h:44
int svm_fifo_dequeue_drop(svm_fifo_t *f, u32 max_bytes)
Definition: svm_fifo.c:675
static u32 ooo_segment_end_pos(svm_fifo_t *f, ooo_segment_t *s)
Definition: svm_fifo.c:48
unsigned char u8
Definition: types.h:56
static ooo_segment_t * ooo_segment_new(svm_fifo_t *f, u32 start, u32 length)
Definition: svm_fifo.c:131
foreach_march_variant(SVM_ENQUEUE_CLONE_TEMPLATE, svm_fifo_enqueue_nowait_ma)
static int svm_fifo_dequeue_internal(svm_fifo_t *f, u32 max_bytes, u8 *copy_here)
Definition: svm_fifo.c:524
static int svm_fifo_enqueue_with_offset_internal(svm_fifo_t *f, u32 offset, u32 required_bytes, u8 *copy_from_here)
Enqueue a future segment.
Definition: svm_fifo.c:463
struct clib_bihash_value offset
template key/value backing page structure
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:67
int svm_fifo_peek(svm_fifo_t *f, u32 relative_offset, u32 max_bytes, u8 *copy_here)
Definition: svm_fifo.c:659
#define SVM_FIFO_PEEK_CLONE_TEMPLATE(arch, fn, tgt)
Definition: svm_fifo.c:646
int svm_fifo_dequeue_nowait(svm_fifo_t *f, u32 max_bytes, u8 *copy_here)
Definition: svm_fifo.c:593
u32 start
Start of segment, normalized.
Definition: svm_fifo.h:32
static u32 ooo_segment_distance_to_tail(svm_fifo_t *f, u32 a)
Definition: svm_fifo.h:136
svm_fifo_t * svm_fifo_create(u32 data_size_in_bytes)
create an svm fifo, in the current heap.
Definition: svm_fifo.c:107
static uword pool_elts(void *v)
Number of active elements in a pool.
Definition: pool.h:109