FD.io VPP  v19.04.4-rc0-5-ge88582fac
Vector Packet Processing
svm.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * svm.c - shared VM allocation, mmap(...MAP_FIXED...)
4  * library
5  *
6  * Copyright (c) 2009 Cisco and/or its affiliates.
7  * Licensed under the Apache License, Version 2.0 (the "License");
8  * you may not use this file except in compliance with the License.
9  * You may obtain a copy of the License at:
10  *
11  * http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an "AS IS" BASIS,
15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  *------------------------------------------------------------------
19  */
20 
21 #include <stdio.h>
22 #include <stdlib.h>
23 #include <sys/types.h>
24 #include <sys/mman.h>
25 #include <sys/stat.h>
26 #include <netinet/in.h>
27 #include <signal.h>
28 #include <pthread.h>
29 #include <unistd.h>
30 #include <time.h>
31 #include <fcntl.h>
32 #include <string.h>
33 #include <vppinfra/clib.h>
34 #include <vppinfra/vec.h>
35 #include <vppinfra/hash.h>
36 #include <vppinfra/bitmap.h>
37 #include <vppinfra/fifo.h>
38 #include <vppinfra/time.h>
39 #include <vppinfra/mheap.h>
40 #include <vppinfra/heap.h>
41 #include <vppinfra/pool.h>
42 #include <vppinfra/format.h>
43 
44 #include "svm.h"
45 
47 static int root_rp_refcount;
48 
49 #define MAXLOCK 2
50 static pthread_mutex_t *mutexes_held[MAXLOCK];
51 static int nheld;
52 
55 {
56  return root_rp;
57 }
58 
59 #define MUTEX_DEBUG
60 
61 u64
63 {
64 #if __aarch64__
65  /* On AArch64 VA space can have different size, from 36 to 48 bits.
66  Here we are trying to detect VA bits by parsing /proc/self/maps
67  address ranges */
68  int fd;
69  unformat_input_t input;
70  u64 start, end = 0;
71  u8 bits = 0;
72 
73  if ((fd = open ("/proc/self/maps", 0)) < 0)
74  clib_unix_error ("open '/proc/self/maps'");
75 
76  unformat_init_clib_file (&input, fd);
78  {
79  if (unformat (&input, "%llx-%llx", &start, &end))
80  end--;
81  unformat_skip_line (&input);
82  }
83  unformat_free (&input);
84  close (fd);
85 
86  bits = count_leading_zeros (end);
87  bits = 64 - bits;
88  if (bits >= 36 && bits <= 48)
89  return ((1ul << bits) / 4) - (2 * SVM_GLOBAL_REGION_SIZE);
90  else
91  clib_unix_error ("unexpected va bits '%u'", bits);
92 #endif
93 
94  /* default value */
95  return 0x130000000ULL;
96 }
97 
98 static void
99 region_lock (svm_region_t * rp, int tag)
100 {
101  pthread_mutex_lock (&rp->mutex);
102 #ifdef MUTEX_DEBUG
103  rp->mutex_owner_pid = getpid ();
104  rp->mutex_owner_tag = tag;
105 #endif
106  ASSERT (nheld < MAXLOCK);
107  /*
108  * Keep score of held mutexes so we can try to exit
109  * cleanly if the world comes to an end at the worst possible
110  * moment
111  */
112  mutexes_held[nheld++] = &rp->mutex;
113 }
114 
115 static void
117 {
118  int i, j;
119 #ifdef MUTEX_DEBUG
120  rp->mutex_owner_pid = 0;
121  rp->mutex_owner_tag = 0;
122 #endif
123 
124  for (i = nheld - 1; i >= 0; i--)
125  {
126  if (mutexes_held[i] == &rp->mutex)
127  {
128  for (j = i; j < MAXLOCK - 1; j++)
129  mutexes_held[j] = mutexes_held[j + 1];
130  nheld--;
131  goto found;
132  }
133  }
134  ASSERT (0);
135 
136 found:
138  pthread_mutex_unlock (&rp->mutex);
139 }
140 
141 
142 static u8 *
143 format_svm_flags (u8 * s, va_list * args)
144 {
145  uword f = va_arg (*args, uword);
146 
147  if (f & SVM_FLAGS_MHEAP)
148  s = format (s, "MHEAP ");
149  if (f & SVM_FLAGS_FILE)
150  s = format (s, "FILE ");
151  if (f & SVM_FLAGS_NODATA)
152  s = format (s, "NODATA ");
153  if (f & SVM_FLAGS_NEED_DATA_INIT)
154  s = format (s, "INIT ");
155 
156  return (s);
157 }
158 
159 static u8 *
160 format_svm_size (u8 * s, va_list * args)
161 {
162  uword size = va_arg (*args, uword);
163 
164  if (size >= (1 << 20))
165  {
166  s = format (s, "(%d mb)", size >> 20);
167  }
168  else if (size >= (1 << 10))
169  {
170  s = format (s, "(%d kb)", size >> 10);
171  }
172  else
173  {
174  s = format (s, "(%d bytes)", size);
175  }
176  return (s);
177 }
178 
179 u8 *
180 format_svm_region (u8 * s, va_list * args)
181 {
182  svm_region_t *rp = va_arg (*args, svm_region_t *);
183  int verbose = va_arg (*args, int);
184  int i;
185  uword lo, hi;
186 
187  s = format (s, "%s: base va 0x%x size 0x%x %U\n",
188  rp->region_name, rp->virtual_base,
190  s = format (s, " user_ctx 0x%x, bitmap_size %d\n",
191  rp->user_ctx, rp->bitmap_size);
192 
193  if (verbose)
194  {
195  s = format (s, " flags: 0x%x %U\n", rp->flags,
196  format_svm_flags, rp->flags);
197  s = format (s,
198  " region_heap 0x%x data_base 0x%x data_heap 0x%x\n",
199  rp->region_heap, rp->data_base, rp->data_heap);
200  }
201 
202  s = format (s, " %d clients, pids: ", vec_len (rp->client_pids));
203 
204  for (i = 0; i < vec_len (rp->client_pids); i++)
205  s = format (s, "%d ", rp->client_pids[i]);
206 
207  s = format (s, "\n");
208 
209  if (verbose)
210  {
211  lo = hi = ~0;
212 
213  s = format (s, " VM in use: ");
214 
215  for (i = 0; i < rp->bitmap_size; i++)
216  {
217  if (clib_bitmap_get_no_check (rp->bitmap, i) != 0)
218  {
219  if (lo == ~0)
220  {
221  hi = lo = rp->virtual_base + i * MMAP_PAGESIZE;
222  }
223  else
224  {
225  hi = rp->virtual_base + i * MMAP_PAGESIZE;
226  }
227  }
228  else
229  {
230  if (lo != ~0)
231  {
232  hi = rp->virtual_base + i * MMAP_PAGESIZE - 1;
233  s = format (s, " 0x%x - 0x%x (%dk)\n", lo, hi,
234  (hi - lo) >> 10);
235  lo = hi = ~0;
236  }
237  }
238  }
239 #if USE_DLMALLOC == 0
240  s = format (s, " rgn heap stats: %U", format_mheap,
241  rp->region_heap, 0);
242  if ((rp->flags & SVM_FLAGS_MHEAP) && rp->data_heap)
243  {
244  s = format (s, "\n data heap stats: %U", format_mheap,
245  rp->data_heap, 1);
246  }
247  s = format (s, "\n");
248 #endif
249  }
250 
251  return (s);
252 }
253 
254 /*
255  * rnd_pagesize
256  * Round to a pagesize multiple, presumably 4k works
257  */
258 static u64
260 {
261  u64 rv;
262 
263  rv = (size + (MMAP_PAGESIZE - 1)) & ~(MMAP_PAGESIZE - 1);
264  return (rv);
265 }
266 
267 /*
268  * svm_data_region_setup
269  */
270 static int
272 {
273  int fd;
274  u8 junk = 0;
275  uword map_size;
276 
277  map_size = rp->virtual_size - (MMAP_PAGESIZE +
278  (a->pvt_heap_size ? a->pvt_heap_size :
280 
281  if (a->flags & SVM_FLAGS_FILE)
282  {
283  struct stat statb;
284 
285  fd = open (a->backing_file, O_RDWR | O_CREAT, 0777);
286 
287  if (fd < 0)
288  {
289  clib_unix_warning ("open");
290  return -1;
291  }
292 
293  if (fstat (fd, &statb) < 0)
294  {
295  clib_unix_warning ("fstat");
296  close (fd);
297  return -2;
298  }
299 
300  if (statb.st_mode & S_IFREG)
301  {
302  if (statb.st_size == 0)
303  {
304  if (lseek (fd, map_size, SEEK_SET) == (off_t) - 1)
305  {
306  clib_unix_warning ("seek region size");
307  close (fd);
308  return -3;
309  }
310  if (write (fd, &junk, 1) != 1)
311  {
312  clib_unix_warning ("set region size");
313  close (fd);
314  return -3;
315  }
316  }
317  else
318  {
319  map_size = rnd_pagesize (statb.st_size);
320  }
321  }
322  else
323  {
324  map_size = a->backing_mmap_size;
325  }
326 
327  ASSERT (map_size <= rp->virtual_size -
329 
330  if (mmap (rp->data_base, map_size, PROT_READ | PROT_WRITE,
331  MAP_SHARED | MAP_FIXED, fd, 0) == MAP_FAILED)
332  {
333  clib_unix_warning ("mmap");
334  close (fd);
335  return -3;
336  }
337  close (fd);
338  rp->backing_file = (char *) format (0, "%s\0", a->backing_file);
339  rp->flags |= SVM_FLAGS_FILE;
340  }
341 
342  if (a->flags & SVM_FLAGS_MHEAP)
343  {
344 #if USE_DLMALLOC == 0
346  rp->data_heap =
347  mheap_alloc_with_flags ((void *) (rp->data_base), map_size,
349  heap_header = mheap_header (rp->data_heap);
350  heap_header->flags |= MHEAP_FLAG_THREAD_SAFE;
351 #else
353  map_size, 1 /* locked */ );
355 #endif
356 
357  rp->flags |= SVM_FLAGS_MHEAP;
358  }
359  return 0;
360 }
361 
362 static int
364 {
365  int fd;
366  u8 junk = 0;
367  uword map_size;
368  struct stat statb;
369 
370  map_size = rp->virtual_size -
373 
374  if (a->flags & SVM_FLAGS_FILE)
375  {
376 
377  fd = open (a->backing_file, O_RDWR, 0777);
378 
379  if (fd < 0)
380  {
381  clib_unix_warning ("open");
382  return -1;
383  }
384 
385  if (fstat (fd, &statb) < 0)
386  {
387  clib_unix_warning ("fstat");
388  close (fd);
389  return -2;
390  }
391 
392  if (statb.st_mode & S_IFREG)
393  {
394  if (statb.st_size == 0)
395  {
396  if (lseek (fd, map_size, SEEK_SET) == (off_t) - 1)
397  {
398  clib_unix_warning ("seek region size");
399  close (fd);
400  return -3;
401  }
402  if (write (fd, &junk, 1) != 1)
403  {
404  clib_unix_warning ("set region size");
405  close (fd);
406  return -3;
407  }
408  }
409  else
410  {
411  map_size = rnd_pagesize (statb.st_size);
412  }
413  }
414  else
415  {
416  map_size = a->backing_mmap_size;
417  }
418 
419  ASSERT (map_size <= rp->virtual_size
420  - (MMAP_PAGESIZE
421  +
423 
424  if (mmap (rp->data_base, map_size, PROT_READ | PROT_WRITE,
425  MAP_SHARED | MAP_FIXED, fd, 0) == MAP_FAILED)
426  {
427  clib_unix_warning ("mmap");
428  close (fd);
429  return -3;
430  }
431  close (fd);
432  }
433  return 0;
434 }
435 
436 u8 *
438 {
439  u8 *path;
440  u8 *shm_name;
441  u8 *split_point;
442  u8 *mkdir_arg = 0;
443  int root_path_offset = 0;
444  int name_offset = 0;
445 
446  if (a->root_path)
447  {
448  /* Tolerate present or absent slashes */
449  if (a->root_path[0] == '/')
450  root_path_offset++;
451 
452  /* create the root_path under /dev/shm
453  iterate through path creating directories */
454 
455  path = format (0, "/dev/shm/%s%c", &a->root_path[root_path_offset], 0);
456  split_point = path + 1;
457  vec_add1 (mkdir_arg, '-');
458 
459  while (*split_point)
460  {
461  while (*split_point && *split_point != '/')
462  {
463  vec_add1 (mkdir_arg, *split_point);
464  split_point++;
465  }
466  vec_add1 (mkdir_arg, 0);
467 
468  /* ready to descend another level */
469  mkdir_arg[vec_len (mkdir_arg) - 1] = '-';
470  split_point++;
471  }
472  vec_free (mkdir_arg);
473  vec_free (path);
474 
475  if (a->name[0] == '/')
476  name_offset = 1;
477 
478  shm_name = format (0, "/%s-%s%c", &a->root_path[root_path_offset],
479  &a->name[name_offset], 0);
480  }
481  else
482  shm_name = format (0, "%s%c", a->name, 0);
483  return (shm_name);
484 }
485 
486 void
488 {
489  pthread_mutexattr_t attr;
490  pthread_condattr_t cattr;
491  int nbits, words, bit;
492  int overhead_space;
493  void *oldheap;
494  uword data_base;
495  ASSERT (rp);
496  int rv;
497 
498  clib_memset (rp, 0, sizeof (*rp));
499 
500  if (pthread_mutexattr_init (&attr))
501  clib_unix_warning ("mutexattr_init");
502 
503  if (pthread_mutexattr_setpshared (&attr, PTHREAD_PROCESS_SHARED))
504  clib_unix_warning ("mutexattr_setpshared");
505 
506  if (pthread_mutex_init (&rp->mutex, &attr))
507  clib_unix_warning ("mutex_init");
508 
509  if (pthread_mutexattr_destroy (&attr))
510  clib_unix_warning ("mutexattr_destroy");
511 
512  if (pthread_condattr_init (&cattr))
513  clib_unix_warning ("condattr_init");
514 
515  if (pthread_condattr_setpshared (&cattr, PTHREAD_PROCESS_SHARED))
516  clib_unix_warning ("condattr_setpshared");
517 
518  if (pthread_cond_init (&rp->condvar, &cattr))
519  clib_unix_warning ("cond_init");
520 
521  if (pthread_condattr_destroy (&cattr))
522  clib_unix_warning ("condattr_destroy");
523 
524  region_lock (rp, 1);
525 
526  rp->virtual_base = a->baseva;
527  rp->virtual_size = a->size;
528 
529 #if USE_DLMALLOC == 0
530  rp->region_heap =
532  (a->baseva + MMAP_PAGESIZE, void *),
533  (a->pvt_heap_size !=
536 #else
538  (uword_to_pointer (a->baseva + MMAP_PAGESIZE, void *),
539  (a->pvt_heap_size !=
540  0) ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE, 1 /* locked */ );
541 
543 #endif
544 
545  oldheap = svm_push_pvt_heap (rp);
546 
547  rp->region_name = (char *) format (0, "%s%c", a->name, 0);
548  vec_add1 (rp->client_pids, getpid ());
549 
550  nbits = rp->virtual_size / MMAP_PAGESIZE;
551 
552  ASSERT (nbits > 0);
553  rp->bitmap_size = nbits;
554  words = (nbits + BITS (uword) - 1) / BITS (uword);
555  vec_validate (rp->bitmap, words - 1);
556 
557  overhead_space = MMAP_PAGESIZE /* header */ +
559 
560  bit = 0;
561  data_base = (uword) rp->virtual_base;
562 
563  if (a->flags & SVM_FLAGS_NODATA)
565 
566  do
567  {
568  clib_bitmap_set_no_check (rp->bitmap, bit, 1);
569  bit++;
570  overhead_space -= MMAP_PAGESIZE;
571  data_base += MMAP_PAGESIZE;
572  }
573  while (overhead_space > 0);
574 
575  rp->data_base = (void *) data_base;
576 
577  /*
578  * Note: although the POSIX spec guarantees that only one
579  * process enters this block, we have to play games
580  * to hold off clients until e.g. the mutex is ready
581  */
582  rp->version = SVM_VERSION;
583 
584  /* setup the data portion of the region */
585 
586  rv = svm_data_region_create (a, rp);
587  if (rv)
588  {
589  clib_warning ("data_region_create: %d", rv);
590  }
591 
592  region_unlock (rp);
593 
594  svm_pop_heap (oldheap);
595 }
596 
597 /*
598  * svm_map_region
599  */
600 void *
602 {
603  int svm_fd;
604  svm_region_t *rp;
605  int deadman = 0;
606  u8 junk = 0;
607  void *oldheap;
608  int rv;
609  int pid_holding_region_lock;
610  u8 *shm_name;
611  int dead_region_recovery = 0;
612  int time_left;
613  struct stat stat;
614  struct timespec ts, tsrem;
615 
616  ASSERT ((a->size & ~(MMAP_PAGESIZE - 1)) == a->size);
617  ASSERT (a->name);
618 
619  shm_name = shm_name_from_svm_map_region_args (a);
620 
621  if (CLIB_DEBUG > 1)
622  clib_warning ("[%d] map region %s: shm_open (%s)",
623  getpid (), a->name, shm_name);
624 
625  svm_fd = shm_open ((char *) shm_name, O_RDWR | O_CREAT | O_EXCL, 0777);
626 
627  if (svm_fd >= 0)
628  {
629  if (fchmod (svm_fd, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP) < 0)
630  clib_unix_warning ("segment chmod");
631  /* This turns out to fail harmlessly if the client starts first */
632  if (fchown (svm_fd, a->uid, a->gid) < 0)
633  clib_unix_warning ("segment chown [ok if client starts first]");
634 
635  vec_free (shm_name);
636 
637  if (lseek (svm_fd, a->size, SEEK_SET) == (off_t) - 1)
638  {
639  clib_warning ("seek region size");
640  close (svm_fd);
641  return (0);
642  }
643  if (write (svm_fd, &junk, 1) != 1)
644  {
645  clib_warning ("set region size");
646  close (svm_fd);
647  return (0);
648  }
649 
650  rp = mmap (uword_to_pointer (a->baseva, void *), a->size,
651  PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, svm_fd, 0);
652 
653  if (rp == (svm_region_t *) MAP_FAILED)
654  {
655  clib_unix_warning ("mmap create");
656  close (svm_fd);
657  return (0);
658  }
659  close (svm_fd);
660 
662 
663  return ((void *) rp);
664  }
665  else
666  {
667  svm_fd = shm_open ((char *) shm_name, O_RDWR, 0777);
668 
669  vec_free (shm_name);
670 
671  if (svm_fd < 0)
672  {
673  perror ("svm_region_map(mmap open)");
674  return (0);
675  }
676 
677  /* Reset ownership in case the client started first */
678  if (fchown (svm_fd, a->uid, a->gid) < 0)
679  clib_unix_warning ("segment chown [ok if client starts first]");
680 
681  time_left = 20;
682  while (1)
683  {
684  if (0 != fstat (svm_fd, &stat))
685  {
686  clib_warning ("fstat failed: %d", errno);
687  close (svm_fd);
688  return (0);
689  }
690  if (stat.st_size > 0)
691  {
692  break;
693  }
694  if (0 == time_left)
695  {
696  clib_warning ("waiting for resize of shm file timed out");
697  close (svm_fd);
698  return (0);
699  }
700  ts.tv_sec = 0;
701  ts.tv_nsec = 100000000;
702  while (nanosleep (&ts, &tsrem) < 0)
703  ts = tsrem;
704  time_left--;
705  }
706 
707  rp = mmap (0, MMAP_PAGESIZE,
708  PROT_READ | PROT_WRITE, MAP_SHARED, svm_fd, 0);
709 
710  if (rp == (svm_region_t *) MAP_FAILED)
711  {
712  close (svm_fd);
713  clib_warning ("mmap");
714  return (0);
715  }
716  /*
717  * We lost the footrace to create this region; make sure
718  * the winner has crossed the finish line.
719  */
720  while (rp->version == 0 && deadman++ < 5)
721  {
722  sleep (1);
723  }
724 
725  /*
726  * <bleep>-ed?
727  */
728  if (rp->version == 0)
729  {
730  clib_warning ("rp->version %d not %d", rp->version, SVM_VERSION);
731  close (svm_fd);
732  munmap (rp, a->size);
733  return (0);
734  }
735  /* Remap now that the region has been placed */
736  a->baseva = rp->virtual_base;
737  a->size = rp->virtual_size;
738  munmap (rp, MMAP_PAGESIZE);
739 
740  rp = (void *) mmap (uword_to_pointer (a->baseva, void *), a->size,
741  PROT_READ | PROT_WRITE,
742  MAP_SHARED | MAP_FIXED, svm_fd, 0);
743  if ((uword) rp == (uword) MAP_FAILED)
744  {
745  clib_unix_warning ("mmap");
746  close (svm_fd);
747  return (0);
748  }
749 
750  close (svm_fd);
751 
752  if ((uword) rp != rp->virtual_base)
753  {
754  clib_warning ("mmap botch");
755  }
756 
757  /*
758  * Try to fix the region mutex if it is held by
759  * a dead process
760  */
761  pid_holding_region_lock = rp->mutex_owner_pid;
762  if (pid_holding_region_lock && kill (pid_holding_region_lock, 0) < 0)
763  {
765  ("region %s mutex held by dead pid %d, tag %d, force unlock",
766  rp->region_name, pid_holding_region_lock, rp->mutex_owner_tag);
767  /* owner pid is nonexistent */
768  rp->mutex.__data.__owner = 0;
769  rp->mutex.__data.__lock = 0;
770  dead_region_recovery = 1;
771  }
772 
773  if (dead_region_recovery)
774  clib_warning ("recovery: attempt to re-lock region");
775 
776  region_lock (rp, 2);
777  oldheap = svm_push_pvt_heap (rp);
778  vec_add1 (rp->client_pids, getpid ());
779 
780  if (dead_region_recovery)
781  clib_warning ("recovery: attempt svm_data_region_map");
782 
783  rv = svm_data_region_map (a, rp);
784  if (rv)
785  {
786  clib_warning ("data_region_map: %d", rv);
787  }
788 
789  if (dead_region_recovery)
790  clib_warning ("unlock and continue");
791 
792  region_unlock (rp);
793 
794  svm_pop_heap (oldheap);
795 
796  return ((void *) rp);
797 
798  }
799  return 0; /* NOTREACHED */
800 }
801 
802 static void
804 {
805  int i;
806  for (i = 0; i < nheld; i++)
807  {
808  pthread_mutex_unlock (mutexes_held[i]);
809  }
810 }
811 
812 static int
814 {
815  svm_region_t *rp;
816  u64 ticks = clib_cpu_time_now ();
817  uword randomize_baseva;
818 
819  /* guard against klutz calls */
820  if (root_rp)
821  return -1;
822 
824 
825  atexit (svm_mutex_cleanup);
826 
827  /* Randomize the shared-VM base at init time */
828  if (MMAP_PAGESIZE <= (4 << 10))
829  randomize_baseva = (ticks & 15) * MMAP_PAGESIZE;
830  else
831  randomize_baseva = (ticks & 3) * MMAP_PAGESIZE;
832 
833  a->baseva += randomize_baseva;
834 
835  rp = svm_map_region (a);
836  if (!rp)
837  return -1;
838 
839  region_lock (rp, 3);
840 
841  /* Set up the main region data structures */
843  {
844  svm_main_region_t *mp = 0;
845  void *oldheap;
846 
848 
849  oldheap = svm_push_pvt_heap (rp);
850  vec_validate (mp, 0);
851  mp->name_hash = hash_create_string (0, sizeof (uword));
852  mp->root_path = a->root_path ? format (0, "%s%c", a->root_path, 0) : 0;
853  mp->uid = a->uid;
854  mp->gid = a->gid;
855  rp->data_base = mp;
856  svm_pop_heap (oldheap);
857  }
858  region_unlock (rp);
859  root_rp = rp;
860 
861  return 0;
862 }
863 
864 void
866 {
867  svm_map_region_args_t _a, *a = &_a;
868 
869  clib_memset (a, 0, sizeof (*a));
870  a->root_path = 0;
874  a->flags = SVM_FLAGS_NODATA;
875  a->uid = 0;
876  a->gid = 0;
877 
879 }
880 
881 int
882 svm_region_init_chroot (const char *root_path)
883 {
884  svm_map_region_args_t _a, *a = &_a;
885 
886  clib_memset (a, 0, sizeof (*a));
887  a->root_path = root_path;
891  a->flags = SVM_FLAGS_NODATA;
892  a->uid = 0;
893  a->gid = 0;
894 
895  return svm_region_init_internal (a);
896 }
897 
898 void
899 svm_region_init_chroot_uid_gid (const char *root_path, int uid, int gid)
900 {
901  svm_map_region_args_t _a, *a = &_a;
902 
903  clib_memset (a, 0, sizeof (*a));
904  a->root_path = root_path;
908  a->flags = SVM_FLAGS_NODATA;
909  a->uid = uid;
910  a->gid = gid;
911 
913 }
914 
915 void
917 {
919 }
920 
921 void *
923 {
924  svm_main_region_t *mp;
925  svm_region_t *rp;
926  uword need_nbits;
927  int index, i;
928  void *oldheap;
929  uword *p;
930  u8 *name;
931  svm_subregion_t *subp;
932 
933  ASSERT (root_rp);
934 
935  a->size += MMAP_PAGESIZE +
937  a->size = rnd_pagesize (a->size);
938 
939  region_lock (root_rp, 4);
940  oldheap = svm_push_pvt_heap (root_rp);
941  mp = root_rp->data_base;
942 
943  ASSERT (mp);
944 
945  /* Map the named region from the correct chroot environment */
946  if (a->root_path == NULL)
947  a->root_path = (char *) mp->root_path;
948 
949  /*
950  * See if this region is already known. If it is, we're
951  * almost done...
952  */
953  p = hash_get_mem (mp->name_hash, a->name);
954 
955  if (p)
956  {
957  rp = svm_map_region (a);
958  region_unlock (root_rp);
959  svm_pop_heap (oldheap);
960  return rp;
961  }
962 
963  /* Create the region. */
964  ASSERT ((a->size & ~(MMAP_PAGESIZE - 1)) == a->size);
965 
966  need_nbits = a->size / MMAP_PAGESIZE;
967 
968  index = 1; /* $$$ fixme, figure out how many bit to really skip */
969 
970  /*
971  * Scan the virtual space allocation bitmap, looking for a large
972  * enough chunk
973  */
974  do
975  {
976  if (clib_bitmap_get_no_check (root_rp->bitmap, index) == 0)
977  {
978  for (i = 0; i < (need_nbits - 1); i++)
979  {
980  if (clib_bitmap_get_no_check (root_rp->bitmap, index + i) == 1)
981  {
982  index = index + i;
983  goto next;
984  }
985  }
986  break;
987  }
988  index++;
989  next:;
990  }
991  while (index < root_rp->bitmap_size);
992 
993  /* Completely out of VM? */
994  if (index >= root_rp->bitmap_size)
995  {
996  clib_warning ("region %s: not enough VM to allocate 0x%llx (%lld)",
997  root_rp->region_name, a->size, a->size);
998  svm_pop_heap (oldheap);
999  region_unlock (root_rp);
1000  return 0;
1001  }
1002 
1003  /*
1004  * Mark virtual space allocated
1005  */
1006 #if CLIB_DEBUG > 1
1007  clib_warning ("set %d bits at index %d", need_nbits, index);
1008 #endif
1009 
1010  for (i = 0; i < need_nbits; i++)
1011  {
1012  clib_bitmap_set_no_check (root_rp->bitmap, index + i, 1);
1013  }
1014 
1015  /* Place this region where it goes... */
1016  a->baseva = root_rp->virtual_base + index * MMAP_PAGESIZE;
1017 
1018  rp = svm_map_region (a);
1019 
1020  pool_get (mp->subregions, subp);
1021  name = format (0, "%s%c", a->name, 0);
1022  subp->subregion_name = name;
1023 
1024  hash_set_mem (mp->name_hash, name, subp - mp->subregions);
1025 
1026  svm_pop_heap (oldheap);
1027 
1028  region_unlock (root_rp);
1029 
1030  return (rp);
1031 }
1032 
1033 void
1035 {
1036  svm_map_region_args_t _a, *a = &_a;
1037  svm_main_region_t *mp;
1038  u8 *shm_name;
1039 
1040  ASSERT (root_rp);
1041  ASSERT (rp);
1043 
1044  mp = root_rp->data_base;
1045  ASSERT (mp);
1046 
1047  a->root_path = (char *) mp->root_path;
1048  a->name = rp->region_name;
1049  shm_name = shm_name_from_svm_map_region_args (a);
1050  if (CLIB_DEBUG > 1)
1051  clib_warning ("[%d] shm_unlink (%s)", getpid (), shm_name);
1052  shm_unlink ((const char *) shm_name);
1053  vec_free (shm_name);
1054 }
1055 
1056 /*
1057  * svm_region_unmap
1058  *
1059  * Let go of the indicated region. If the calling process
1060  * is the last customer, throw it away completely.
1061  * The root region mutex guarantees atomicity with respect to
1062  * a new region client showing up at the wrong moment.
1063  */
1064 void
1065 svm_region_unmap_internal (void *rp_arg, u8 is_client)
1066 {
1067  int i, mypid = getpid ();
1068  int nclients_left;
1069  void *oldheap;
1070  uword virtual_base, virtual_size;
1071  svm_region_t *rp = rp_arg;
1072  char *name;
1073 
1074  /*
1075  * If we take a signal while holding one or more shared-memory
1076  * mutexes, we may end up back here from an otherwise
1077  * benign exit handler. Bail out to avoid a recursive
1078  * mutex screw-up.
1079  */
1080  if (nheld)
1081  return;
1082 
1083  ASSERT (rp);
1084  ASSERT (root_rp);
1085 
1086  if (CLIB_DEBUG > 1)
1087  clib_warning ("[%d] unmap region %s", getpid (), rp->region_name);
1088 
1089  region_lock (root_rp, 5);
1090  region_lock (rp, 6);
1091 
1092  oldheap = svm_push_pvt_heap (rp); /* nb vec_delete() in the loop */
1093 
1094  /* Remove the caller from the list of mappers */
1095  for (i = 0; i < vec_len (rp->client_pids); i++)
1096  {
1097  if (rp->client_pids[i] == mypid)
1098  {
1099  vec_delete (rp->client_pids, 1, i);
1100  goto found;
1101  }
1102  }
1103  clib_warning ("pid %d AWOL", mypid);
1104 
1105 found:
1106 
1107  svm_pop_heap (oldheap);
1108 
1109  nclients_left = vec_len (rp->client_pids);
1110  virtual_base = rp->virtual_base;
1111  virtual_size = rp->virtual_size;
1112 
1113  if (nclients_left == 0)
1114  {
1115  int index, nbits, i;
1116  svm_main_region_t *mp;
1117  uword *p;
1118  svm_subregion_t *subp;
1119 
1120  /* Kill the region, last guy on his way out */
1121 
1122  oldheap = svm_push_pvt_heap (root_rp);
1123  name = vec_dup (rp->region_name);
1124 
1125  virtual_base = rp->virtual_base;
1126  virtual_size = rp->virtual_size;
1127 
1128  /* Figure out which bits to clear in the root region bitmap */
1129  index = (virtual_base - root_rp->virtual_base) / MMAP_PAGESIZE;
1130 
1131  nbits = (virtual_size + MMAP_PAGESIZE - 1) / MMAP_PAGESIZE;
1132 
1133 #if CLIB_DEBUG > 1
1134  clib_warning ("clear %d bits at index %d", nbits, index);
1135 #endif
1136  /* Give back the allocated VM */
1137  for (i = 0; i < nbits; i++)
1138  {
1139  clib_bitmap_set_no_check (root_rp->bitmap, index + i, 0);
1140  }
1141 
1142  mp = root_rp->data_base;
1143 
1144  p = hash_get_mem (mp->name_hash, name);
1145 
1146  /* Better never happen ... */
1147  if (p == NULL)
1148  {
1149  region_unlock (rp);
1150  region_unlock (root_rp);
1151  svm_pop_heap (oldheap);
1152  clib_warning ("Region name '%s' not found?", name);
1153  return;
1154  }
1155 
1156  /* Remove from the root region subregion pool */
1157  subp = mp->subregions + p[0];
1158  pool_put (mp->subregions, subp);
1159 
1160  hash_unset_mem (mp->name_hash, name);
1161 
1162  vec_free (name);
1163 
1164  region_unlock (rp);
1165 
1166  /* If a client asks for the cleanup, don't unlink the backing
1167  * file since we can't tell if it has been recreated. */
1168  if (!is_client)
1169  svm_region_unlink (rp);
1170 
1171  munmap ((void *) virtual_base, virtual_size);
1172  region_unlock (root_rp);
1173  svm_pop_heap (oldheap);
1174  return;
1175  }
1176 
1177  region_unlock (rp);
1178  region_unlock (root_rp);
1179 
1180  munmap ((void *) virtual_base, virtual_size);
1181 }
1182 
1183 void
1184 svm_region_unmap (void *rp_arg)
1185 {
1186  svm_region_unmap_internal (rp_arg, 0 /* is_client */ );
1187 }
1188 
1189 void
1191 {
1192  svm_region_unmap_internal (rp_arg, 1 /* is_client */ );
1193 }
1194 
1195 /*
1196  * svm_region_exit
1197  */
1198 static void
1200 {
1201  void *oldheap;
1202  int i, mypid = getpid ();
1203  uword virtual_base, virtual_size;
1204 
1205  /* It felt so nice we did it twice... */
1206  if (root_rp == 0)
1207  return;
1208 
1209  if (--root_rp_refcount > 0)
1210  return;
1211 
1212  /*
1213  * If we take a signal while holding one or more shared-memory
1214  * mutexes, we may end up back here from an otherwise
1215  * benign exit handler. Bail out to avoid a recursive
1216  * mutex screw-up.
1217  */
1218  if (nheld)
1219  return;
1220 
1221  region_lock (root_rp, 7);
1222  oldheap = svm_push_pvt_heap (root_rp);
1223 
1224  virtual_base = root_rp->virtual_base;
1225  virtual_size = root_rp->virtual_size;
1226 
1227  for (i = 0; i < vec_len (root_rp->client_pids); i++)
1228  {
1229  if (root_rp->client_pids[i] == mypid)
1230  {
1231  vec_delete (root_rp->client_pids, 1, i);
1232  goto found;
1233  }
1234  }
1235  clib_warning ("pid %d AWOL", mypid);
1236 
1237 found:
1238 
1239  if (!is_client && vec_len (root_rp->client_pids) == 0)
1240  svm_region_unlink (root_rp);
1241 
1242  region_unlock (root_rp);
1243  svm_pop_heap (oldheap);
1244 
1245  root_rp = 0;
1246  munmap ((void *) virtual_base, virtual_size);
1247 }
1248 
1249 void
1251 {
1252  svm_region_exit_internal (0 /* is_client */ );
1253 }
1254 
1255 void
1257 {
1258  svm_region_exit_internal (1 /* is_client */ );
1259 }
1260 
1261 void
1263 {
1264  int j;
1265  int mypid = getpid ();
1266  void *oldheap;
1267 
1268  for (j = 0; j < vec_len (rp->client_pids); j++)
1269  {
1270  if (mypid == rp->client_pids[j])
1271  continue;
1272  if (rp->client_pids[j] && (kill (rp->client_pids[j], 0) < 0))
1273  {
1274  clib_warning ("%s: cleanup ghost pid %d",
1275  rp->region_name, rp->client_pids[j]);
1276  /* nb: client vec in rp->region_heap */
1277  oldheap = svm_push_pvt_heap (rp);
1278  vec_delete (rp->client_pids, 1, j);
1279  j--;
1280  svm_pop_heap (oldheap);
1281  }
1282  }
1283 }
1284 
1285 
1286 /*
1287  * Scan svm regions for dead clients
1288  */
1289 void
1290 svm_client_scan (const char *root_path)
1291 {
1292  int i, j;
1293  svm_main_region_t *mp;
1294  svm_map_region_args_t *a = 0;
1296  svm_region_t *rp;
1297  svm_subregion_t *subp;
1298  u8 *name = 0;
1299  u8 **svm_names = 0;
1300  void *oldheap;
1301  int mypid = getpid ();
1302 
1303  vec_validate (a, 0);
1304 
1305  svm_region_init_chroot (root_path);
1306 
1307  root_rp = svm_get_root_rp ();
1308 
1309  pthread_mutex_lock (&root_rp->mutex);
1310 
1311  mp = root_rp->data_base;
1312 
1313  for (j = 0; j < vec_len (root_rp->client_pids); j++)
1314  {
1315  if (mypid == root_rp->client_pids[j])
1316  continue;
1317  if (root_rp->client_pids[j] && (kill (root_rp->client_pids[j], 0) < 0))
1318  {
1319  clib_warning ("%s: cleanup ghost pid %d",
1320  root_rp->region_name, root_rp->client_pids[j]);
1321  /* nb: client vec in root_rp->region_heap */
1322  oldheap = svm_push_pvt_heap (root_rp);
1323  vec_delete (root_rp->client_pids, 1, j);
1324  j--;
1325  svm_pop_heap (oldheap);
1326  }
1327  }
1328 
1329  /*
1330  * Snapshoot names, can't hold root rp mutex across
1331  * find_or_create.
1332  */
1333  /* *INDENT-OFF* */
1334  pool_foreach (subp, mp->subregions, ({
1335  name = vec_dup (subp->subregion_name);
1336  vec_add1(svm_names, name);
1337  }));
1338  /* *INDENT-ON* */
1339 
1340  pthread_mutex_unlock (&root_rp->mutex);
1341 
1342  for (i = 0; i < vec_len (svm_names); i++)
1343  {
1344  vec_validate (a, 0);
1345  a->root_path = root_path;
1346  a->name = (char *) svm_names[i];
1347  rp = svm_region_find_or_create (a);
1348  if (rp)
1349  {
1350  pthread_mutex_lock (&rp->mutex);
1351 
1353 
1354  pthread_mutex_unlock (&rp->mutex);
1355  svm_region_unmap (rp);
1356  vec_free (svm_names[i]);
1357  }
1358  vec_free (a);
1359  }
1360  vec_free (svm_names);
1361 
1362  svm_region_exit ();
1363 
1364  vec_free (a);
1365 }
1366 
1367 /*
1368  * fd.io coding-style-patch-verification: ON
1369  *
1370  * Local Variables:
1371  * eval: (c-set-style "gnu")
1372  * End:
1373  */
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:439
vmrglw vmrglh hi
void svm_region_init_chroot_uid_gid(const char *root_path, int uid, int gid)
Definition: svm.c:899
svm_region_t * svm_get_root_rp(void)
Definition: svm.c:54
#define SVM_GLOBAL_REGION_NAME
Definition: svm_common.h:97
const char * root_path
Definition: svm_common.h:67
static int nheld
Definition: svm.c:51
static void svm_pop_heap(void *oldheap)
Definition: svm.h:94
#define vec_c_string_is_terminated(V)
Test whether a vector is a NULL terminated c-string.
Definition: vec.h:1006
a
Definition: bitmap.h:538
#define SVM_FLAGS_NODATA
Definition: svm_common.h:29
#define SVM_FLAGS_NEED_DATA_INIT
Definition: svm_common.h:30
void * svm_map_region(svm_map_region_args_t *a)
Definition: svm.c:601
#define count_leading_zeros(x)
Definition: clib.h:138
Optimized string handling code, including c11-compliant "safe C library" variants.
unsigned long u64
Definition: types.h:89
Fixed length block allocator.
#define NULL
Definition: clib.h:58
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:522
static u64 clib_cpu_time_now(void)
Definition: time.h:75
void svm_client_scan(const char *root_path)
Definition: svm.c:1290
uword virtual_base
Definition: svm_common.h:42
#define SVM_PVT_MHEAP_SIZE
Definition: svm_common.h:32
int i
static mheap_t * mheap_header(u8 *v)
void svm_region_unmap_client(void *rp_arg)
Definition: svm.c:1190
#define hash_set_mem(h, key, value)
Definition: hash.h:275
void svm_region_exit_client(void)
Definition: svm.c:1256
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
#define MHEAP_FLAG_THREAD_SAFE
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:236
static u8 * format_svm_size(u8 *s, va_list *args)
Definition: svm.c:160
static uword clib_bitmap_get_no_check(uword *ai, uword i)
Gets the ith bit value from a bitmap Does not sanity-check the bit position.
Definition: bitmap.h:212
unsigned char u8
Definition: types.h:56
void unformat_init_clib_file(unformat_input_t *input, int file_descriptor)
Definition: unformat.c:1058
#define clib_unix_error(format, args...)
Definition: error.h:65
static uword clib_bitmap_set_no_check(uword *a, uword i, uword new_value)
Sets the ith bit of a bitmap to new_value.
Definition: bitmap.h:141
#define SVM_VERSION
Definition: svm_common.h:25
DLMALLOC_EXPORT mspace create_mspace_with_base(void *base, size_t capacity, int locked)
void svm_region_exit(void)
Definition: svm.c:1250
static void unformat_skip_line(unformat_input_t *i)
Definition: format.h:221
void svm_region_init(void)
Definition: svm.c:865
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
Definition: pool.h:493
uword * client_pids
Definition: svm_common.h:54
#define MHEAP_FLAG_DISABLE_VM
void * svm_region_find_or_create(svm_map_region_args_t *a)
Definition: svm.c:922
volatile void * user_ctx
Definition: svm_common.h:47
static u64 rnd_pagesize(u64 size)
Definition: svm.c:259
pthread_cond_t condvar
Definition: svm_common.h:38
u8 * format_svm_region(u8 *s, va_list *args)
Definition: svm.c:180
#define hash_create_string(elts, value_bytes)
Definition: hash.h:690
u8 * format_mheap(u8 *s, va_list *va)
Definition: mem_dlmalloc.c:354
#define SVM_FLAGS_MHEAP
Definition: svm_common.h:27
static int svm_region_init_internal(svm_map_region_args_t *a)
Definition: svm.c:813
void * data_base
Definition: svm_common.h:45
uword size
#define hash_unset_mem(h, key)
Definition: hash.h:291
lo
#define MAXLOCK
Definition: svm.c:49
DLMALLOC_EXPORT void mspace_disable_expand(mspace msp)
struct _unformat_input_t unformat_input_t
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:286
#define vec_dup(V)
Return copy of vector (no header, no alignment)
Definition: vec.h:375
svm_subregion_t * subregions
Definition: svm_common.h:115
char * backing_file
Definition: svm_common.h:52
uword virtual_size
Definition: svm_common.h:43
void svm_region_init_args(svm_map_region_args_t *a)
Definition: svm.c:916
#define SVM_GLOBAL_REGION_SIZE
Definition: svm_common.h:96
char * region_name
Definition: svm_common.h:51
void svm_region_init_mapped_region(svm_map_region_args_t *a, svm_region_t *rp)
Definition: svm.c:487
u8 name[64]
Definition: memclnt.api:152
static void * svm_push_pvt_heap(svm_region_t *rp)
Definition: svm.h:78
static int root_rp_refcount
Definition: svm.c:47
#define UNFORMAT_END_OF_INPUT
Definition: format.h:144
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:341
#define clib_warning(format, args...)
Definition: error.h:59
static pthread_mutex_t * mutexes_held[MAXLOCK]
Definition: svm.c:50
u8 * shm_name_from_svm_map_region_args(svm_map_region_args_t *a)
Definition: svm.c:437
uword bitmap_size
Definition: svm_common.h:49
static int svm_data_region_map(svm_map_region_args_t *a, svm_region_t *rp)
Definition: svm.c:363
void * mheap_alloc_with_flags(void *memory, uword memory_size, uword flags)
Definition: mheap.c:885
#define uword_to_pointer(u, type)
Definition: types.h:136
#define ASSERT(truth)
#define vec_delete(V, N, M)
Delete N elements starting at element M.
Definition: vec.h:784
static void region_lock(svm_region_t *rp, int tag)
Definition: svm.c:99
volatile uword version
Definition: svm_common.h:36
u64 svm_get_global_region_base_va()
Definition: svm.c:62
Bitmaps built as vectors of machine words.
static void region_unlock(svm_region_t *rp)
Definition: svm.c:116
int mutex_owner_tag
Definition: svm_common.h:40
void svm_region_unmap_internal(void *rp_arg, u8 is_client)
Definition: svm.c:1065
static heap_header_t * heap_header(void *v)
Definition: heap.h:161
#define MMAP_PAGESIZE
Definition: ssvm.h:42
void svm_region_unlink(svm_region_t *rp)
Definition: svm.c:1034
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
const char * name
Definition: svm_common.h:68
u64 uword
Definition: types.h:112
static void unformat_free(unformat_input_t *i)
Definition: format.h:162
#define clib_unix_warning(format, args...)
Definition: error.h:68
void svm_region_unmap(void *rp_arg)
Definition: svm.c:1184
void svm_client_scan_this_region_nolock(svm_region_t *rp)
Definition: svm.c:1262
#define hash_get_mem(h, key)
Definition: hash.h:269
void * region_heap
Definition: svm_common.h:44
uword * bitmap
Definition: svm_common.h:50
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:115
void * data_heap
Definition: svm_common.h:46
static int svm_data_region_create(svm_map_region_args_t *a, svm_region_t *rp)
Definition: svm.c:271
static u8 * format_svm_flags(u8 *s, va_list *args)
Definition: svm.c:143
int mutex_owner_pid
Definition: svm_common.h:39
#define BITS(x)
Definition: clib.h:61
uword flags
Definition: svm_common.h:41
static void svm_mutex_cleanup(void)
Definition: svm.c:803
uword unformat(unformat_input_t *i, const char *fmt,...)
Definition: unformat.c:972
pthread_mutex_t mutex
Definition: svm_common.h:37
#define SVM_FLAGS_FILE
Definition: svm_common.h:28
CLIB vectors are ubiquitous dynamically resized arrays with by user defined "headers".
static uword unformat_check_input(unformat_input_t *i)
Definition: format.h:170
static svm_region_t * root_rp
Definition: svm.c:46
int svm_region_init_chroot(const char *root_path)
Definition: svm.c:882
static void svm_region_exit_internal(u8 is_client)
Definition: svm.c:1199