FD.io VPP  v20.09-rc2-28-g3c5414029
Vector Packet Processing
pmalloc.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #define _GNU_SOURCE
17 #include <stdlib.h>
18 #include <sys/types.h>
19 #include <sys/stat.h>
20 #include <fcntl.h>
21 #include <unistd.h>
22 #include <linux/mempolicy.h>
23 #include <linux/memfd.h>
24 #include <sched.h>
25 
26 #include <vppinfra/format.h>
27 #include <vppinfra/linux/syscall.h>
28 #include <vppinfra/linux/sysfs.h>
29 #include <vppinfra/mem.h>
30 #include <vppinfra/hash.h>
31 #include <vppinfra/pmalloc.h>
32 
33 #if __SIZEOF_POINTER__ >= 8
34 #define DEFAULT_RESERVED_MB 16384
35 #else
36 #define DEFAULT_RESERVED_MB 256
37 #endif
38 
39 static inline clib_pmalloc_chunk_t *
41 {
42  return pool_elt_at_index (pp->chunks, index);
43 }
44 
45 static inline uword
47 {
48  return round_pow2 (size, 1ULL << log2_page_sz) >> log2_page_sz;
49 }
50 
51 static inline int
53 {
54  if (*numa_node == CLIB_PMALLOC_NUMA_LOCAL)
55  {
56  u32 cpu;
57  if (getcpu (&cpu, numa_node) != 0)
58  return 1;
59  }
60  return 0;
61 }
62 
63 int
65 {
66  uword base, pagesize;
67  u64 *pt = 0;
68 
69  ASSERT (pm->error == 0);
70 
72  pm->def_log2_page_sz = min_log2 (pagesize);
73  pm->sys_log2_page_sz = min_log2 (sysconf (_SC_PAGESIZE));
75 
76  /* check if pagemap is accessible */
77  pt = clib_mem_vm_get_paddr (&pt, pm->sys_log2_page_sz, 1);
78  if (pt == 0 || pt[0] == 0)
80 
81  size = size ? size : ((u64) DEFAULT_RESERVED_MB) << 20;
82  size = round_pow2 (size, pagesize);
83 
84  pm->max_pages = size >> pm->def_log2_page_sz;
85 
86  base = clib_mem_vm_reserve (base_addr, size, pm->def_log2_page_sz);
87 
88  if (base == ~0)
89  {
90  pm->error = clib_error_return (0, "failed to reserve %u pages",
91  pm->max_pages);
92  return -1;
93  }
94 
95  pm->base = uword_to_pointer (base, void *);
96  return 0;
97 }
98 
99 static inline void *
101  u32 n_blocks, u32 block_align, u32 numa_node)
102 {
103  clib_pmalloc_chunk_t *c = 0;
105  void *va;
106  u32 off;
107  u32 alloc_chunk_index;
108 
109  a = pool_elt_at_index (pm->arenas, pp->arena_index);
110 
111  if (pp->chunks == 0)
112  {
113  u32 i, start = 0, prev = ~0;
114 
115  for (i = 0; i < a->subpages_per_page; i++)
116  {
117  pool_get (pp->chunks, c);
118  c->start = start;
119  c->prev = prev;
120  c->size = pp->n_free_blocks / a->subpages_per_page;
121  start += c->size;
122  if (prev == ~0)
123  pp->first_chunk_index = c - pp->chunks;
124  else
125  pp->chunks[prev].next = c - pp->chunks;
126  prev = c - pp->chunks;
127  }
128  c->next = ~0;
130  }
131 
132  if (pp->n_free_blocks < n_blocks)
133  return 0;
134 
135  alloc_chunk_index = pp->first_chunk_index;
136 
137 next_chunk:
138  c = pool_elt_at_index (pp->chunks, alloc_chunk_index);
139  off = (block_align - (c->start & (block_align - 1))) & (block_align - 1);
140 
141  if (c->used || n_blocks + off > c->size)
142  {
143  if (c->next == ~0)
144  return 0;
145  alloc_chunk_index = c->next;
146  goto next_chunk;
147  }
148 
149  /* if alignment is needed create new empty chunk */
150  if (off)
151  {
152  u32 offset_chunk_index;
154  pool_get (pp->chunks, c);
155  pp->n_free_chunks++;
156  offset_chunk_index = alloc_chunk_index;
157  alloc_chunk_index = c - pp->chunks;
158 
159  co = pool_elt_at_index (pp->chunks, offset_chunk_index);
160  c->size = co->size - off;
161  c->next = co->next;
162  c->start = co->start + off;
163  c->prev = offset_chunk_index;
164  co->size = off;
165  co->next = alloc_chunk_index;
166  }
167 
168  c->used = 1;
169  if (c->size > n_blocks)
170  {
171  u32 tail_chunk_index;
173  pool_get (pp->chunks, ct);
174  pp->n_free_chunks++;
175  tail_chunk_index = ct - pp->chunks;
176  c = pool_elt_at_index (pp->chunks, alloc_chunk_index);
177  ct->size = c->size - n_blocks;
178  ct->next = c->next;
179  ct->prev = alloc_chunk_index;
180  ct->start = c->start + n_blocks;
181 
182  c->size = n_blocks;
183  c->next = tail_chunk_index;
184  if (ct->next != ~0)
185  pool_elt_at_index (pp->chunks, ct->next)->prev = tail_chunk_index;
186  }
187  else if (c->next != ~0)
188  pool_elt_at_index (pp->chunks, c->next)->prev = alloc_chunk_index;
189 
190  c = get_chunk (pp, alloc_chunk_index);
191  va = pm->base + ((pp - pm->pages) << pm->def_log2_page_sz) +
193  hash_set (pm->chunk_index_by_va, pointer_to_uword (va), alloc_chunk_index);
194  pp->n_free_blocks -= n_blocks;
195  pp->n_free_chunks--;
196  return va;
197 }
198 
199 static void
201 {
202  uword seek, va, pa, p;
203  int fd;
204  u32 elts_per_page = 1U << (pm->def_log2_page_sz - pm->lookup_log2_page_sz);
205 
207  elts_per_page - 1, CLIB_CACHE_LINE_BYTES);
208 
209  p = (uword) first *elts_per_page;
211  {
212  while (p < (uword) elts_per_page * count)
213  {
214  pm->lookup_table[p] = pointer_to_uword (pm->base) +
215  (p << pm->lookup_log2_page_sz);
216  p++;
217  }
218  return;
219  }
220 
221  fd = open ((char *) "/proc/self/pagemap", O_RDONLY);
222  while (p < (uword) elts_per_page * count)
223  {
224  va = pointer_to_uword (pm->base) + (p << pm->lookup_log2_page_sz);
225  pa = 0;
226  seek = (va >> pm->sys_log2_page_sz) * sizeof (pa);
227  if (fd != -1 && lseek (fd, seek, SEEK_SET) == seek &&
228  read (fd, &pa, sizeof (pa)) == (sizeof (pa)) &&
229  pa & (1ULL << 63) /* page present bit */ )
230  {
231  pa = (pa & pow2_mask (55)) << pm->sys_log2_page_sz;
232  }
233  pm->lookup_table[p] = va - pa;
234  p++;
235  }
236 
237  if (fd != -1)
238  close (fd);
239 }
240 
241 static inline clib_pmalloc_page_t *
243  u32 numa_node, u32 n_pages)
244 {
245  clib_pmalloc_page_t *pp = 0;
246  int status, rv, i, mmap_flags;
247  void *va = MAP_FAILED;
248  int old_mpol = -1;
249  long unsigned int mask[16] = { 0 };
250  long unsigned int old_mask[16] = { 0 };
251  uword size = (uword) n_pages << pm->def_log2_page_sz;
252 
253  clib_error_free (pm->error);
254 
255  if (pm->max_pages <= vec_len (pm->pages))
256  {
257  pm->error = clib_error_return (0, "maximum number of pages reached");
258  return 0;
259  }
260 
261  if (a->log2_subpage_sz != pm->sys_log2_page_sz)
262  {
263  pm->error = clib_sysfs_prealloc_hugepages (numa_node,
264  a->log2_subpage_sz, n_pages);
265 
266  if (pm->error)
267  return 0;
268  }
269 
270  rv = get_mempolicy (&old_mpol, old_mask, sizeof (old_mask) * 8 + 1, 0, 0);
271  /* failure to get mempolicy means we can only proceed with numa 0 maps */
272  if (rv == -1 && numa_node != 0)
273  {
274  pm->error = clib_error_return_unix (0, "failed to get mempolicy");
275  return 0;
276  }
277 
278  mask[0] = 1 << numa_node;
279  rv = set_mempolicy (MPOL_BIND, mask, sizeof (mask) * 8 + 1);
280  if (rv == -1 && numa_node != 0)
281  {
282  pm->error = clib_error_return_unix (0, "failed to set mempolicy for "
283  "numa node %u", numa_node);
284  return 0;
285  }
286 
287  mmap_flags = MAP_FIXED;
288 
290  {
291  mmap_flags |= MAP_SHARED;
292  if (a->log2_subpage_sz != pm->sys_log2_page_sz)
293  pm->error = clib_mem_create_hugetlb_fd ((char *) a->name, &a->fd);
294  else
295  pm->error = clib_mem_create_fd ((char *) a->name, &a->fd);
296  if (a->fd == -1)
297  goto error;
298  if ((ftruncate (a->fd, size)) == -1)
299  goto error;
300  }
301  else
302  {
303  if (a->log2_subpage_sz != pm->sys_log2_page_sz)
304  mmap_flags |= MAP_HUGETLB;
305 
306  mmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;
307  a->fd = -1;
308  }
309 
310  va = pm->base + (((uword) vec_len (pm->pages)) << pm->def_log2_page_sz);
311  if (mmap (va, size, PROT_READ | PROT_WRITE, mmap_flags, a->fd, 0) ==
312  MAP_FAILED)
313  {
314  pm->error = clib_error_return_unix (0, "failed to mmap %u pages at %p "
315  "fd %d numa %d flags 0x%x", n_pages,
316  va, a->fd, numa_node, mmap_flags);
317  va = MAP_FAILED;
318  goto error;
319  }
320 
321  if (a->log2_subpage_sz != pm->sys_log2_page_sz && mlock (va, size) != 0)
322  {
323  pm->error = clib_error_return_unix (0, "Unable to lock pages");
324  goto error;
325  }
326 
327  clib_memset (va, 0, size);
328 
329  rv = set_mempolicy (old_mpol, old_mask, sizeof (old_mask) * 8 + 1);
330  if (rv == -1 && numa_node != 0)
331  {
332  pm->error = clib_error_return_unix (0, "failed to restore mempolicy");
333  goto error;
334  }
335 
336  /* we tolerate move_pages failure only if request os for numa node 0
337  to support non-numa kernels */
338  rv = move_pages (0, 1, &va, 0, &status, 0);
339  if ((rv == 0 && status != numa_node) || (rv != 0 && numa_node != 0))
340  {
341  pm->error = rv == -1 ?
342  clib_error_return_unix (0, "page allocated on wrong node, numa node "
343  "%u status %d", numa_node, status) :
344  clib_error_return (0, "page allocated on wrong node, numa node "
345  "%u status %d", numa_node, status);
346 
347  goto error;
348  }
349 
350  for (i = 0; i < n_pages; i++)
351  {
352  vec_add2 (pm->pages, pp, 1);
354  pp->index = pp - pm->pages;
355  pp->arena_index = a->index;
356  vec_add1 (a->page_indices, pp->index);
357  a->n_pages++;
358  }
359 
360 
361  /* if new arena is using smaller page size, we need to rebuild whole
362  lookup table */
364  {
366  pmalloc_update_lookup_table (pm, vec_len (pm->pages) - n_pages,
367  n_pages);
368  }
369  else
371 
372  /* return pointer to 1st page */
373  return pp - (n_pages - 1);
374 
375 error:
376  if (va != MAP_FAILED)
377  {
378  /* unmap & reserve */
379  munmap (va, size);
380  mmap (va, size, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
381  -1, 0);
382  }
383  if (a->fd != -1)
384  close (a->fd);
385  return 0;
386 }
387 
388 void *
390  uword size, u32 log2_page_sz, u32 numa_node)
391 {
394  u32 n_pages;
395 
396  clib_error_free (pm->error);
397 
398  if (log2_page_sz == 0)
399  log2_page_sz = pm->def_log2_page_sz;
400  else if (log2_page_sz != pm->def_log2_page_sz &&
401  log2_page_sz != pm->sys_log2_page_sz)
402  {
403  pm->error = clib_error_create ("unsupported page size (%uKB)",
404  1 << (log2_page_sz - 10));
405  return 0;
406  }
407 
408  n_pages = pmalloc_size2pages (size, pm->def_log2_page_sz);
409 
410  if (n_pages + vec_len (pm->pages) > pm->max_pages)
411  return 0;
412 
413  if (pmalloc_validate_numa_node (&numa_node))
414  return 0;
415 
416  pool_get (pm->arenas, a);
417  a->index = a - pm->arenas;
418  a->name = format (0, "%s%c", name, 0);
419  a->numa_node = numa_node;
421  a->log2_subpage_sz = log2_page_sz;
422  a->subpages_per_page = 1U << (pm->def_log2_page_sz - log2_page_sz);
423 
424  if ((pp = pmalloc_map_pages (pm, a, numa_node, n_pages)) == 0)
425  {
426  vec_free (a->name);
427  memset (a, 0, sizeof (*a));
428  pool_put (pm->arenas, a);
429  return 0;
430  }
431 
432  return pm->base + ((uword) pp->index << pm->def_log2_page_sz);
433 }
434 
435 static inline void *
437  uword size, uword align, u32 numa_node)
438 {
440  u32 n_blocks, block_align, *page_index;
441 
442  ASSERT (is_pow2 (align));
443 
444  if (pmalloc_validate_numa_node (&numa_node))
445  return 0;
446 
447  if (a == 0)
448  {
449  if (size > 1ULL << pm->def_log2_page_sz)
450  return 0;
451 
453  numa_node, ~0);
454  if (pm->default_arena_for_numa_node[numa_node] == ~0)
455  {
456  pool_get (pm->arenas, a);
457  pm->default_arena_for_numa_node[numa_node] = a - pm->arenas;
458  a->name = format (0, "default-numa-%u%c", numa_node, 0);
459  a->numa_node = numa_node;
461  a->subpages_per_page = 1;
462  }
463  else
464  a = pool_elt_at_index (pm->arenas,
465  pm->default_arena_for_numa_node[numa_node]);
466  }
467  else if (size > 1ULL << a->log2_subpage_sz)
468  return 0;
469 
470  n_blocks = round_pow2 (size, PMALLOC_BLOCK_SZ) / PMALLOC_BLOCK_SZ;
471  block_align = align >> PMALLOC_LOG2_BLOCK_SZ;
472 
473  vec_foreach (page_index, a->page_indices)
474  {
475  pp = vec_elt_at_index (pm->pages, *page_index);
476  void *rv = alloc_chunk_from_page (pm, pp, n_blocks, block_align,
477  numa_node);
478 
479  if (rv)
480  return rv;
481  }
482 
483  if ((a->flags & CLIB_PMALLOC_ARENA_F_SHARED_MEM) == 0 &&
484  (pp = pmalloc_map_pages (pm, a, numa_node, 1)))
485  return alloc_chunk_from_page (pm, pp, n_blocks, block_align, numa_node);
486 
487  return 0;
488 }
489 
490 void *
492  uword align, u32 numa_node)
493 {
494  return clib_pmalloc_alloc_inline (pm, 0, size, align, numa_node);
495 }
496 
497 void *
499 {
500  return clib_pmalloc_alloc_inline (pm, 0, size, align,
502 }
503 
504 void *
506  uword size, uword align)
507 {
509  return clib_pmalloc_alloc_inline (pm, a, size, align, 0);
510 }
511 
512 static inline int
514  u32 ci1, u32 ci2)
515 {
516  clib_pmalloc_chunk_t *c1, *c2;
517 
518  if (ci1 == ~0 || ci2 == ~0)
519  return 0;
520 
521  c1 = get_chunk (pp, ci1);
522  c2 = get_chunk (pp, ci2);
523 
524  if (c1->used || c2->used)
525  return 0;
526 
527  if (c1->start >> (a->log2_subpage_sz - PMALLOC_LOG2_BLOCK_SZ) !=
529  return 0;
530 
531  return 1;
532 }
533 
534 void
536 {
540  uword *p;
541  u32 chunk_index, page_index;
542 
544 
545  if (p == 0)
546  os_panic ();
547 
548  chunk_index = p[0];
549  page_index = clib_pmalloc_get_page_index (pm, va);
551 
552  pp = vec_elt_at_index (pm->pages, page_index);
553  c = pool_elt_at_index (pp->chunks, chunk_index);
554  a = pool_elt_at_index (pm->arenas, pp->arena_index);
555  c->used = 0;
556  pp->n_free_blocks += c->size;
557  pp->n_free_chunks++;
558 
559  /* merge with next if free */
560  if (pmalloc_chunks_mergeable (a, pp, chunk_index, c->next))
561  {
562  clib_pmalloc_chunk_t *next = get_chunk (pp, c->next);
563  c->size += next->size;
564  c->next = next->next;
565  if (next->next != ~0)
566  get_chunk (pp, next->next)->prev = chunk_index;
567  memset (next, 0, sizeof (*next));
568  pool_put (pp->chunks, next);
569  pp->n_free_chunks--;
570  }
571 
572  /* merge with prev if free */
573  if (pmalloc_chunks_mergeable (a, pp, c->prev, chunk_index))
574  {
575  clib_pmalloc_chunk_t *prev = get_chunk (pp, c->prev);
576  prev->size += c->size;
577  prev->next = c->next;
578  if (c->next != ~0)
579  get_chunk (pp, c->next)->prev = c->prev;
580  memset (c, 0, sizeof (*c));
581  pool_put (pp->chunks, c);
582  pp->n_free_chunks--;
583  }
584 }
585 
586 static u8 *
587 format_log2_page_size (u8 * s, va_list * va)
588 {
589  u32 log2_page_sz = va_arg (*va, u32);
590 
591  if (log2_page_sz >= 30)
592  return format (s, "%uGB", 1 << (log2_page_sz - 30));
593 
594  if (log2_page_sz >= 20)
595  return format (s, "%uMB", 1 << (log2_page_sz - 20));
596 
597  if (log2_page_sz >= 10)
598  return format (s, "%uKB", 1 << (log2_page_sz - 10));
599 
600  return format (s, "%uB", 1 << log2_page_sz);
601 }
602 
603 
604 static u8 *
605 format_pmalloc_page (u8 * s, va_list * va)
606 {
607  clib_pmalloc_page_t *pp = va_arg (*va, clib_pmalloc_page_t *);
608  int verbose = va_arg (*va, int);
609  u32 indent = format_get_indent (s);
610 
611  if (pp->chunks == 0)
612  return s;
613 
614  s = format (s, "free %u chunks %u free-chunks %d ",
616  pool_elts (pp->chunks), pp->n_free_chunks);
617 
618  if (verbose >= 2)
619  {
622  s = format (s, "\n%U%12s%12s%8s%8s%8s%8s",
623  format_white_space, indent + 2,
624  "chunk offset", "size", "used", "index", "prev", "next");
625  while (1)
626  {
627  s = format (s, "\n%U%12u%12u%8s%8d%8d%8d",
628  format_white_space, indent + 2,
631  c->used ? "yes" : "no",
632  c - pp->chunks, c->prev, c->next);
633  if (c->next == ~0)
634  break;
635  c = pool_elt_at_index (pp->chunks, c->next);
636  }
637  }
638  return s;
639 }
640 
641 u8 *
642 format_pmalloc (u8 * s, va_list * va)
643 {
644  clib_pmalloc_main_t *pm = va_arg (*va, clib_pmalloc_main_t *);
645  int verbose = va_arg (*va, int);
646  u32 indent = format_get_indent (s);
647 
650 
651  s = format (s, "used-pages %u reserved-pages %u default-page-size %U "
652  "lookup-page-size %U%s", vec_len (pm->pages), pm->max_pages,
655  pm->flags & CLIB_PMALLOC_F_NO_PAGEMAP ? " no-pagemap" : "");
656 
657 
658  if (verbose >= 2)
659  s = format (s, " va-start %p", pm->base);
660 
661  if (pm->error)
662  s = format (s, "\n%Ulast-error: %U", format_white_space, indent + 2,
663  format_clib_error, pm->error);
664 
665 
666  /* *INDENT-OFF* */
667  pool_foreach (a, pm->arenas,
668  {
669  u32 *page_index;
670  s = format (s, "\n%Uarena '%s' pages %u subpage-size %U numa-node %u",
671  format_white_space, indent + 2, a->name,
672  vec_len (a->page_indices), format_log2_page_size,
673  a->log2_subpage_sz, a->numa_node);
674  if (a->fd != -1)
675  s = format (s, " shared fd %d", a->fd);
676  if (verbose >= 1)
677  vec_foreach (page_index, a->page_indices)
678  {
679  pp = vec_elt_at_index (pm->pages, *page_index);
680  s = format (s, "\n%U%U", format_white_space, indent + 4,
681  format_pmalloc_page, pp, verbose);
682  }
683  });
684  /* *INDENT-ON* */
685 
686  return s;
687 }
688 
689 u8 *
690 format_pmalloc_map (u8 * s, va_list * va)
691 {
692  clib_pmalloc_main_t *pm = va_arg (*va, clib_pmalloc_main_t *);
693 
694  u32 index;
695  s = format (s, "%16s %13s %8s", "virtual-addr", "physical-addr", "size");
696  vec_foreach_index (index, pm->lookup_table)
697  {
698  uword *lookup_val, pa, va;
699  lookup_val = vec_elt_at_index (pm->lookup_table, index);
700  va =
701  pointer_to_uword (pm->base) +
702  ((uword) index << pm->lookup_log2_page_sz);
703  pa = va - *lookup_val;
704  s =
705  format (s, "\n %16p %13p %8U", uword_to_pointer (va, u64),
707  pm->lookup_log2_page_sz);
708  }
709  return s;
710 }
711 
712 /*
713  * fd.io coding-style-patch-verification: ON
714  *
715  * Local Variables:
716  * eval: (c-set-style "gnu")
717  * End:
718  */
u8 count
Definition: dhcp.api:208
#define PMALLOC_BLOCK_SZ
Definition: pmalloc.h:22
#define vec_foreach_index(var, v)
Iterate over vector indices.
u8 * format_clib_error(u8 *s, va_list *va)
Definition: error.c:191
#define hash_set(h, key, value)
Definition: hash.h:255
u32 lookup_log2_page_sz
Definition: pmalloc.h:99
clib_error_t * clib_sysfs_prealloc_hugepages(int numa_node, int log2_page_size, int nr)
Definition: sysfs.c:239
#define hash_unset(h, key)
Definition: hash.h:261
a
Definition: bitmap.h:538
static u8 * format_pmalloc_page(u8 *s, va_list *va)
Definition: pmalloc.c:605
unsigned long u64
Definition: types.h:89
#define CLIB_PMALLOC_NUMA_LOCAL
Definition: pmalloc.h:24
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
void os_panic(void)
Definition: unix-misc.c:175
void * clib_pmalloc_alloc_aligned_on_numa(clib_pmalloc_main_t *pm, uword size, uword align, u32 numa_node)
Definition: pmalloc.c:491
static void * clib_pmalloc_alloc_inline(clib_pmalloc_main_t *pm, clib_pmalloc_arena_t *a, uword size, uword align, u32 numa_node)
Definition: pmalloc.c:436
static void pmalloc_update_lookup_table(clib_pmalloc_main_t *pm, u32 first, u32 count)
Definition: pmalloc.c:200
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:592
static u32 clib_pmalloc_get_page_index(clib_pmalloc_main_t *pm, void *va)
Definition: pmalloc.h:131
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:630
void * clib_pmalloc_create_shared_arena(clib_pmalloc_main_t *pm, char *name, uword size, u32 log2_page_sz, u32 numa_node)
Definition: pmalloc.c:389
static u32 format_get_indent(u8 *s)
Definition: format.h:72
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
Definition: vec.h:520
u16 mask
Definition: flow_types.api:52
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:252
void * clib_pmalloc_alloc_aligned(clib_pmalloc_main_t *pm, uword size, uword align)
Definition: pmalloc.c:498
unsigned char u8
Definition: types.h:56
static int pmalloc_chunks_mergeable(clib_pmalloc_arena_t *a, clib_pmalloc_page_t *pp, u32 ci1, u32 ci2)
Definition: pmalloc.c:513
static uword min_log2(uword x)
Definition: clib.h:161
clib_pmalloc_chunk_t * chunks
Definition: pmalloc.h:39
clib_pmalloc_arena_t * arenas
Definition: pmalloc.h:89
static u8 * format_log2_page_size(u8 *s, va_list *va)
Definition: pmalloc.c:587
static int pmalloc_validate_numa_node(u32 *numa_node)
Definition: pmalloc.c:52
static clib_pmalloc_page_t * pmalloc_map_pages(clib_pmalloc_main_t *pm, clib_pmalloc_arena_t *a, u32 numa_node, u32 n_pages)
Definition: pmalloc.c:242
int clib_pmalloc_init(clib_pmalloc_main_t *pm, uword base_addr, uword size)
Definition: pmalloc.c:64
static clib_pmalloc_chunk_t * get_chunk(clib_pmalloc_page_t *pp, u32 index)
Definition: pmalloc.c:40
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
Definition: pool.h:513
static long set_mempolicy(int mode, const unsigned long *nodemask, unsigned long maxnode)
Definition: syscall.h:31
u8 * format_pmalloc_map(u8 *s, va_list *va)
Definition: pmalloc.c:690
static void * alloc_chunk_from_page(clib_pmalloc_main_t *pm, clib_pmalloc_page_t *pp, u32 n_blocks, u32 block_align, u32 numa_node)
Definition: pmalloc.c:100
static uword pow2_mask(uword x)
Definition: clib.h:237
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:129
#define DEFAULT_RESERVED_MB
Definition: pmalloc.c:36
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define clib_error_return(e, args...)
Definition: error.h:99
static uword pmalloc_size2pages(uword size, u32 log2_page_sz)
Definition: pmalloc.c:46
unsigned int u32
Definition: types.h:88
#define clib_error_create(args...)
Definition: error.h:96
static heap_elt_t * first(heap_header_t *h)
Definition: heap.c:59
#define PMALLOC_LOG2_BLOCK_SZ
Definition: pmalloc.h:21
#define hash_get(h, key)
Definition: hash.h:249
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:534
void * clib_pmalloc_alloc_from_arena(clib_pmalloc_main_t *pm, void *arena_va, uword size, uword align)
Definition: pmalloc.c:505
#define CLIB_PMALLOC_F_NO_PAGEMAP
Definition: pmalloc.h:64
static int getcpu(unsigned *cpu, unsigned *node)
Definition: syscall.h:24
#define clib_error_return_unix(e, args...)
Definition: error.h:102
u32 size
Definition: vhost_user.h:106
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:302
u32 * default_arena_for_numa_node
Definition: pmalloc.h:93
uword clib_mem_vm_reserve(uword start, uword size, u32 log2_page_sz)
Definition: mem.c:348
static int get_mempolicy(int *mode, unsigned long *nodemask, unsigned long maxnode, void *addr, unsigned long flags)
Definition: syscall.h:37
uword * lookup_table
Definition: pmalloc.h:96
svmdb_client_t * c
clib_pmalloc_page_t * pages
Definition: pmalloc.h:80
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:380
clib_error_t * error
Definition: pmalloc.h:102
static uword round_pow2(uword x, uword pow2)
Definition: clib.h:264
string name[64]
Definition: ip.api:44
#define uword_to_pointer(u, type)
Definition: types.h:136
#define ASSERT(truth)
void clib_pmalloc_free(clib_pmalloc_main_t *pm, void *va)
Definition: pmalloc.c:535
uword * chunk_index_by_va
Definition: pmalloc.h:84
uword clib_mem_get_default_hugepage_size(void)
Definition: mem.c:57
static uword pointer_to_uword(const void *p)
Definition: types.h:131
clib_error_t * clib_mem_create_hugetlb_fd(char *name, int *fdp)
Definition: mem.c:146
static uword is_pow2(uword x)
Definition: clib.h:252
clib_error_t * clib_mem_create_fd(char *name, int *fdp)
Definition: mem.c:126
static clib_pmalloc_arena_t * clib_pmalloc_get_arena(clib_pmalloc_main_t *pm, void *va)
Definition: pmalloc.h:142
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u64 uword
Definition: types.h:112
u64 * clib_mem_vm_get_paddr(void *mem, int log2_page_size, int n_pages)
Definition: mem.c:380
u32 index
Definition: flow_types.api:221
#define next_chunk(p)
Definition: dlmalloc.c:859
#define clib_error_free(e)
Definition: error.h:86
#define vec_foreach(var, vec)
Vector iterator.
static long move_pages(int pid, unsigned long count, void **pages, const int *nodes, int *status, int flags)
Definition: syscall.h:44
#define vec_validate_init_empty(V, I, INIT)
Make sure vector is long enough for given index and initialize empty space (no header, unspecified alignment)
Definition: vec.h:556
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
u8 * format_pmalloc(u8 *s, va_list *va)
Definition: pmalloc.c:642
#define CLIB_PMALLOC_ARENA_F_SHARED_MEM
Definition: pmalloc.h:49
static uword pool_elts(void *v)
Number of active elements in a pool.
Definition: pool.h:128