FD.io VPP  v19.08.1-401-g8e4ed521a
Vector Packet Processing
mem.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /*
16  Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
17 
18  Permission is hereby granted, free of charge, to any person obtaining
19  a copy of this software and associated documentation files (the
20  "Software"), to deal in the Software without restriction, including
21  without limitation the rights to use, copy, modify, merge, publish,
22  distribute, sublicense, and/or sell copies of the Software, and to
23  permit persons to whom the Software is furnished to do so, subject to
24  the following conditions:
25 
26  The above copyright notice and this permission notice shall be
27  included in all copies or substantial portions of the Software.
28 
29  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32  NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33  LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34  OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35  WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
36 */
37 
38 #ifndef _included_clib_mem_h
39 #define _included_clib_mem_h
40 
41 #include <stdarg.h>
42 #include <unistd.h>
43 #include <sys/mman.h>
44 
45 #include <vppinfra/clib.h> /* uword, etc */
46 #include <vppinfra/clib_error.h>
47 
48 #if USE_DLMALLOC == 0
50 #else
51 #include <vppinfra/dlmalloc.h>
52 #endif
53 
54 #include <vppinfra/os.h>
55 #include <vppinfra/string.h> /* memcpy, clib_memset */
56 #include <vppinfra/valgrind.h>
57 
58 #define CLIB_MAX_MHEAPS 256
59 
60 /* Per CPU heaps. */
62 
63 always_inline void
65 {
66  /*
67  * Find an unused slot in the per-cpu-mheaps array,
68  * and grab it for this thread. We need to be able to
69  * push/pop the thread heap without affecting other thread(s).
70  */
71  int i;
72  if (__os_thread_index != 0)
73  return;
74  for (i = 0; i < ARRAY_LEN (clib_per_cpu_mheaps); i++)
76  0, clib_per_cpu_mheaps[0]))
77  {
79  break;
80  }
81  ASSERT (__os_thread_index > 0);
82 }
83 
84 always_inline void *
86 {
87  int cpu = os_get_thread_index ();
88  return clib_per_cpu_mheaps[cpu];
89 }
90 
91 always_inline void *
93 {
94  int cpu = os_get_thread_index ();
95  void *old = clib_per_cpu_mheaps[cpu];
96  clib_per_cpu_mheaps[cpu] = new_heap;
97  return old;
98 }
99 
100 /* Memory allocator which may call os_out_of_memory() if it fails */
101 always_inline void *
103  int os_out_of_memory_on_failure)
104 {
105  void *heap, *p;
106  uword cpu;
107 
108  if (align_offset > align)
109  {
110  if (align > 0)
111  align_offset %= align;
112  else
113  align_offset = align;
114  }
115 
116  cpu = os_get_thread_index ();
117  heap = clib_per_cpu_mheaps[cpu];
118 
119 #if USE_DLMALLOC == 0
120  uword offset;
121  heap = mheap_get_aligned (heap, size, align, align_offset, &offset);
122  clib_per_cpu_mheaps[cpu] = heap;
123 
124  if (offset != ~0)
125  {
126  p = heap + offset;
127 #if CLIB_DEBUG > 0
128  VALGRIND_MALLOCLIKE_BLOCK (p, mheap_data_bytes (heap, offset), 0, 0);
129 #endif
130  return p;
131  }
132  else
133  {
134  if (os_out_of_memory_on_failure)
135  os_out_of_memory ();
136  return 0;
137  }
138 #else
139  p = mspace_get_aligned (heap, size, align, align_offset);
140  if (PREDICT_FALSE (p == 0))
141  {
142  if (os_out_of_memory_on_failure)
143  os_out_of_memory ();
144  return 0;
145  }
146 
147  return p;
148 #endif /* USE_DLMALLOC */
149 }
150 
151 /* Memory allocator which calls os_out_of_memory() when it fails */
152 always_inline void *
154 {
155  return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
156  /* align_offset */ 0,
157  /* os_out_of_memory */ 1);
158 }
159 
160 always_inline void *
162 {
163  return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
164  /* os_out_of_memory */ 1);
165 }
166 
167 /* Memory allocator which calls os_out_of_memory() when it fails */
168 always_inline void *
170 {
171  return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
172  /* align_offset */ 0,
173  /* os_out_of_memory */ 0);
174 }
175 
176 always_inline void *
178 {
179  return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
180  /* os_out_of_memory */ 0);
181 }
182 
183 
184 
185 /* Memory allocator which panics when it fails.
186  Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
187 #define clib_mem_alloc_aligned_no_fail(size,align) \
188 ({ \
189  uword _clib_mem_alloc_size = (size); \
190  void * _clib_mem_alloc_p; \
191  _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \
192  if (! _clib_mem_alloc_p) \
193  clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \
194  _clib_mem_alloc_p; \
195 })
196 
197 #define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
198 
199 /* Alias to stack allocator for naming consistency. */
200 #define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
201 
204 {
205 #if USE_DLMALLOC == 0
206  void *heap = clib_mem_get_per_cpu_heap ();
207  uword offset = (uword) p - (uword) heap;
208  mheap_elt_t *e, *n;
209 
210  if (offset >= vec_len (heap))
211  return 0;
212 
213  e = mheap_elt_at_uoffset (heap, offset);
214  n = mheap_next_elt (e);
215 
216  /* Check that heap forward and reverse pointers agree. */
217  return e->n_user_data == n->prev_n_user_data;
218 #else
219  void *heap = clib_mem_get_per_cpu_heap ();
220 
221  return mspace_is_heap_object (heap, p);
222 #endif /* USE_DLMALLOC */
223 }
224 
225 always_inline void
226 clib_mem_free (void *p)
227 {
228  u8 *heap = clib_mem_get_per_cpu_heap ();
229 
230  /* Make sure object is in the correct heap. */
232 
233 #if USE_DLMALLOC == 0
234  mheap_put (heap, (u8 *) p - heap);
235 #else
236  mspace_put (heap, p);
237 #endif
238 
239 #if CLIB_DEBUG > 0
241 #endif
242 }
243 
244 always_inline void *
245 clib_mem_realloc (void *p, uword new_size, uword old_size)
246 {
247  /* By default use alloc, copy and free to emulate realloc. */
248  void *q = clib_mem_alloc (new_size);
249  if (q)
250  {
251  uword copy_size;
252  if (old_size < new_size)
253  copy_size = old_size;
254  else
255  copy_size = new_size;
256  clib_memcpy_fast (q, p, copy_size);
257  clib_mem_free (p);
258  }
259  return q;
260 }
261 
263 clib_mem_size (void *p)
264 {
265 #if USE_DLMALLOC == 0
268  return mheap_elt_data_bytes (e);
269 #else
272 #endif
273 }
274 
275 always_inline void
277 {
278  uword size = clib_mem_size (p);
279  memset_s_inline (p, size, 0, size);
280  clib_mem_free (p);
281 }
282 
283 always_inline void *
285 {
286  return clib_mem_get_per_cpu_heap ();
287 }
288 
289 always_inline void *
290 clib_mem_set_heap (void *heap)
291 {
292  return clib_mem_set_per_cpu_heap (heap);
293 }
294 
295 void *clib_mem_init (void *heap, uword size);
297 
298 void clib_mem_exit (void);
299 
301 
302 void clib_mem_validate (void);
303 
304 void clib_mem_trace (int enable);
305 
306 int clib_mem_is_traced (void);
307 
308 typedef struct
309 {
310  /* Total number of objects allocated. */
312 
313  /* Total allocated bytes. Bytes used and free.
314  used + free = total */
315  uword bytes_total, bytes_used, bytes_free;
316 
317  /* Number of bytes used by mheap data structure overhead
318  (e.g. free lists, mheap header). */
320 
321  /* Amount of free space returned to operating system. */
323 
324  /* For malloc which puts small objects in sbrk region and
325  large objects in mmap'ed regions. */
328 
329  /* Max. number of bytes in this heap. */
332 
334 
335 u8 *format_clib_mem_usage (u8 * s, va_list * args);
336 
337 /* Allocate virtual address space. */
338 always_inline void *
340 {
341  void *mmap_addr;
342  uword flags = MAP_PRIVATE;
343 
344 #ifdef MAP_ANONYMOUS
345  flags |= MAP_ANONYMOUS;
346 #endif
347 
348  mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
349  if (mmap_addr == (void *) -1)
350  mmap_addr = 0;
351 
352  return mmap_addr;
353 }
354 
355 always_inline void
357 {
358  munmap (addr, size);
359 }
360 
361 always_inline void *
363 {
364  void *mmap_addr;
365  uword flags = MAP_PRIVATE | MAP_FIXED;
366 
367  /* To unmap we "map" with no protection. If we actually called
368  munmap then other callers could steal the address space. By
369  changing to PROT_NONE the kernel can free up the pages which is
370  really what we want "unmap" to mean. */
371  mmap_addr = mmap (addr, size, PROT_NONE, flags, -1, 0);
372  if (mmap_addr == (void *) -1)
373  mmap_addr = 0;
374 
375  return mmap_addr;
376 }
377 
378 always_inline void *
380 {
381  void *mmap_addr;
382  uword flags = MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS;
383 
384  mmap_addr = mmap (addr, size, (PROT_READ | PROT_WRITE), flags, -1, 0);
385  if (mmap_addr == (void *) -1)
386  mmap_addr = 0;
387 
388  return mmap_addr;
389 }
390 
391 typedef struct
392 {
393 #define CLIB_MEM_VM_F_SHARED (1 << 0)
394 #define CLIB_MEM_VM_F_HUGETLB (1 << 1)
395 #define CLIB_MEM_VM_F_NUMA_PREFER (1 << 2)
396 #define CLIB_MEM_VM_F_NUMA_FORCE (1 << 3)
397 #define CLIB_MEM_VM_F_HUGETLB_PREALLOC (1 << 4)
398 #define CLIB_MEM_VM_F_LOCKED (1 << 5)
399  u32 flags; /**< vm allocation flags:
400  <br> CLIB_MEM_VM_F_SHARED: request shared memory, file
401  descriptor will be provided on successful allocation.
402  <br> CLIB_MEM_VM_F_HUGETLB: request hugepages.
403  <br> CLIB_MEM_VM_F_NUMA_PREFER: numa_node field contains valid
404  numa node preference.
405  <br> CLIB_MEM_VM_F_NUMA_FORCE: fail if setting numa policy fails.
406  <br> CLIB_MEM_VM_F_HUGETLB_PREALLOC: pre-allocate hugepages if
407  number of available pages is not sufficient.
408  <br> CLIB_MEM_VM_F_LOCKED: request locked memory.
409  */
410  char *name; /**< Name for memory allocation, set by caller. */
411  uword size; /**< Allocation size, set by caller. */
412  int numa_node; /**< numa node preference. Valid if CLIB_MEM_VM_F_NUMA_PREFER set. */
413  void *addr; /**< Pointer to allocated memory, set on successful allocation. */
414  int fd; /**< File descriptor, set on successful allocation if CLIB_MEM_VM_F_SHARED is set. */
415  int log2_page_size; /* Page size in log2 format, set on successful allocation. */
416  int n_pages; /* Number of pages. */
417  uword requested_va; /**< Request fixed position mapping */
419 
420 clib_error_t *clib_mem_create_fd (char *name, int *fdp);
421 clib_error_t *clib_mem_create_hugetlb_fd (char *name, int *fdp);
426 int clib_mem_get_fd_log2_page_size (int fd);
427 u64 *clib_mem_vm_get_paddr (void *mem, int log2_page_size, int n_pages);
428 
429 typedef struct
430 {
431  uword size; /**< Map size */
432  int fd; /**< File descriptor to be mapped */
433  uword requested_va; /**< Request fixed position mapping */
434  void *addr; /**< Pointer to mapped memory, if successful */
436 
438 void clib_mem_vm_randomize_va (uword * requested_va, u32 log2_page_size);
439 void mheap_trace (void *v, int enable);
441 void clib_mem_trace (int enable);
442 
443 #include <vppinfra/error.h> /* clib_panic */
444 
445 #endif /* _included_clib_mem_h */
446 
447 /*
448  * fd.io coding-style-patch-verification: ON
449  *
450  * Local Variables:
451  * eval: (c-set-style "gnu")
452  * End:
453  */
uword bytes_overhead
Definition: mem.h:319
void clib_mem_validate(void)
Definition: mem_dlmalloc.c:405
u32 flags
Definition: vhost_user.h:141
vhost_user_memory_t memory
Definition: vhost_user.h:148
void * clib_per_cpu_mheaps[CLIB_MAX_MHEAPS]
Definition: mem_dlmalloc.c:23
static void * clib_mem_alloc_aligned_at_offset(uword size, uword align, uword align_offset, int os_out_of_memory_on_failure)
Definition: mem.h:102
a
Definition: bitmap.h:538
int clib_mem_get_fd_log2_page_size(int fd)
Definition: mem.c:100
uword bytes_free_reclaimed
Definition: mem.h:322
Optimized string handling code, including c11-compliant "safe C library" variants.
clib_error_t * clib_mem_vm_ext_alloc(clib_mem_vm_alloc_t *a)
Definition: mem.c:193
unsigned long u64
Definition: types.h:89
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
void * addr
Pointer to allocated memory, set on successful allocation.
Definition: mem.h:413
uword requested_va
Request fixed position mapping.
Definition: mem.h:417
static void * clib_mem_realloc(void *p, uword new_size, uword old_size)
Definition: mem.h:245
void os_out_of_memory(void)
Definition: unix-misc.c:219
int i
int numa_node
numa node preference.
Definition: mem.h:412
static void usage(void)
Definition: health_check.c:14
uword bytes_used_sbrk
Definition: mem.h:326
clib_error_t * clib_mem_vm_ext_map(clib_mem_vm_map_t *a)
Definition: mem.c:386
vhost_vring_addr_t addr
Definition: vhost_user.h:147
#define CLIB_MAX_MHEAPS
Definition: mem.h:58
uword bytes_used
Definition: mem.h:315
unsigned char u8
Definition: types.h:56
uword object_count
Definition: mem.h:311
DLMALLOC_EXPORT int mspace_is_heap_object(mspace msp, void *p)
static void clib_mem_set_thread_index(void)
Definition: mem.h:64
uword requested_va
Request fixed position mapping.
Definition: mem.h:433
#define always_inline
Definition: clib.h:98
static errno_t memset_s_inline(void *s, rsize_t smax, int c, rsize_t n)
Definition: string.h:185
char * name
Name for memory allocation, set by caller.
Definition: mem.h:410
static uword mheap_elt_data_bytes(mheap_elt_t *e)
void mheap_trace(void *v, int enable)
Definition: mem_dlmalloc.c:411
uword size
Allocation size, set by caller.
Definition: mem.h:411
unsigned int u32
Definition: types.h:88
uword clib_mem_trace_enable_disable(uword enable)
Definition: mem_dlmalloc.c:441
uword size
int fd
File descriptor, set on successful allocation if CLIB_MEM_VM_F_SHARED is set.
Definition: mem.h:414
u64 memory_size
Definition: vhost_user.h:141
static void * clib_mem_get_per_cpu_heap(void)
Definition: mem.h:85
u8 * format_clib_mem_usage(u8 *s, va_list *args)
Definition: mem_dlmalloc.c:232
uword bytes_used_mmap
Definition: mem.h:327
#define PREDICT_FALSE(x)
Definition: clib.h:111
u8 name[64]
Definition: memclnt.api:152
u64 * clib_mem_vm_get_paddr(void *mem, int log2_page_size, int n_pages)
Definition: mem.c:347
void * clib_mem_init(void *heap, uword size)
Definition: mem_dlmalloc.c:205
#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)
Definition: valgrind.h:3894
static void * clib_mem_alloc_or_null(uword size)
Definition: mem.h:169
void * mheap_get_aligned(void *v, uword n_user_data_bytes, uword align, uword align_offset, uword *offset_return)
Definition: mheap.c:643
uword bytes_max
Definition: mem.h:330
static void * clib_mem_set_heap(void *heap)
Definition: mem.h:290
static uword clib_mem_size(void *p)
Definition: mem.h:263
#define ARRAY_LEN(x)
Definition: clib.h:62
clib_error_t * clib_mem_create_fd(char *name, int *fdp)
Definition: mem.c:126
u32 flags
vm allocation flags: CLIB_MEM_VM_F_SHARED: request shared memory, file descriptor will be provided ...
Definition: mem.h:399
static void * clib_mem_get_heap(void)
Definition: mem.h:284
int clib_mem_is_traced(void)
Definition: mem_dlmalloc.c:435
static void * clib_mem_alloc_aligned_or_null(uword size, uword align)
Definition: mem.h:177
#define align_offset(A)
Definition: dlmalloc.c:199
#define ASSERT(truth)
uword size
Map size.
Definition: mem.h:431
static void clib_mem_free(void *p)
Definition: mem.h:226
static uword clib_mem_is_heap_object(void *p)
Definition: mem.h:203
clib_error_t * clib_mem_create_hugetlb_fd(char *name, int *fdp)
Definition: mem.c:146
static void clib_mem_free_s(void *p)
Definition: mem.h:276
static void * clib_mem_alloc(uword size)
Definition: mem.h:153
static void * clib_mem_set_per_cpu_heap(u8 *new_heap)
Definition: mem.h:92
static mheap_elt_t * mheap_elt_at_uoffset(void *v, uword uo)
template key/value backing page structure
Definition: bihash_doc.h:44
#define clib_atomic_bool_cmp_and_swap(addr, old, new)
Definition: atomics.h:38
static void clib_mem_vm_free(void *addr, uword size)
Definition: mem.h:356
uword clib_mem_get_default_hugepage_size(void)
Definition: mem.c:57
void mheap_put(void *v, uword uoffset)
Definition: mheap.c:771
void clib_mem_vm_ext_free(clib_mem_vm_alloc_t *a)
Definition: mem.c:336
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
void clib_mem_usage(clib_mem_usage_t *usage)
Definition: mem_dlmalloc.c:381
u64 uword
Definition: types.h:112
u64 clib_mem_get_fd_page_size(int fd)
Definition: mem.c:91
DLMALLOC_EXPORT void * mspace_get_aligned(mspace msp, unsigned long n_user_data_bytes, unsigned long align, unsigned long align_offset)
static_always_inline uword os_get_thread_index(void)
Definition: os.h:62
static mheap_elt_t * mheap_next_elt(mheap_elt_t *e)
struct clib_bihash_value offset
template key/value backing page structure
void * mem
static void * clib_mem_alloc_aligned(uword size, uword align)
Definition: mem.h:161
int fd
File descriptor to be mapped.
Definition: mem.h:432
static void * clib_mem_vm_unmap(void *addr, uword size)
Definition: mem.h:362
DLMALLOC_EXPORT void mspace_put(mspace msp, void *p)
void clib_mem_vm_randomize_va(uword *requested_va, u32 log2_page_size)
Definition: mem.c:106
#define VALGRIND_FREELIKE_BLOCK(addr, rzB)
Definition: valgrind.h:3905
static uword mheap_data_bytes(void *v, uword uo)
int log2_page_size
Definition: mem.h:415
DLMALLOC_EXPORT size_t mspace_usable_size_with_delta(const void *p)
void clib_mem_trace(int enable)
Definition: mem_dlmalloc.c:420
void * clib_mem_init_thread_safe(void *memory, uword memory_size)
Definition: mem_dlmalloc.c:226
uword clib_mem_get_page_size(void)
Definition: mem.c:51
static void * clib_mem_vm_alloc(uword size)
Definition: mem.h:339
static_always_inline void os_set_thread_index(uword thread_index)
Definition: os.h:68
void clib_mem_exit(void)
Definition: mem_mheap.c:49
static mheap_elt_t * mheap_user_pointer_to_elt(void *v)
void * addr
Pointer to mapped memory, if successful.
Definition: mem.h:434
static void * clib_mem_vm_map(void *addr, uword size)
Definition: mem.h:379