FD.io VPP  v17.01-9-ge7dcee4
Vector Packet Processing
smp.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /*
16  Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
17 
18  Permission is hereby granted, free of charge, to any person obtaining
19  a copy of this software and associated documentation files (the
20  "Software"), to deal in the Software without restriction, including
21  without limitation the rights to use, copy, modify, merge, publish,
22  distribute, sublicense, and/or sell copies of the Software, and to
23  permit persons to whom the Software is furnished to do so, subject to
24  the following conditions:
25 
26  The above copyright notice and this permission notice shall be
27  included in all copies or substantial portions of the Software.
28 
29  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32  NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33  LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34  OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35  WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
36 */
37 
38 #include <vppinfra/longjmp.h>
39 #include <vppinfra/mheap.h>
40 #include <vppinfra/os.h>
41 
42 void
43 clib_smp_free (clib_smp_main_t * m)
44 {
45  clib_mem_vm_free (m->vm_base,
46  (uword) ((1 + m->n_cpus) << m->log2_n_per_cpu_vm_bytes));
47 }
48 
49 static uword
51 {
52  clib_smp_main_t *m = &clib_smp_main;
53  void *heap;
54  uword vm_size, stack_size, mheap_flags;
55 
56  ASSERT (os_get_cpu_number () == cpu);
57 
58  vm_size = (uword) 1 << m->log2_n_per_cpu_vm_bytes;
59  stack_size = (uword) 1 << m->log2_n_per_cpu_stack_bytes;
60 
61  mheap_flags = MHEAP_FLAG_SMALL_OBJECT_CACHE;
62 
63  /* Heap extends up to start of stack. */
64  heap = mheap_alloc_with_flags (clib_smp_vm_base_for_cpu (m, cpu),
65  vm_size - stack_size, mheap_flags);
66  clib_mem_set_heap (heap);
67 
68  if (cpu == 0)
69  {
70  /* Now that we have a heap, allocate main structure on cpu 0. */
71  vec_resize (m->per_cpu_mains, m->n_cpus);
72 
73  /* Allocate shared global heap (thread safe). */
74  m->global_heap =
75  mheap_alloc_with_flags (clib_smp_vm_base_for_cpu (m, cpu + m->n_cpus),
76  vm_size,
77  mheap_flags | MHEAP_FLAG_THREAD_SAFE);
78  }
79 
80  m->per_cpu_mains[cpu].heap = heap;
81  return 0;
82 }
83 
84 void
86 {
87  clib_smp_main_t *m = &clib_smp_main;
88  uword cpu;
89 
90  m->vm_base =
91  clib_mem_vm_alloc ((uword) (m->n_cpus + 1) << m->log2_n_per_cpu_vm_bytes);
92  if (!m->vm_base)
93  clib_error ("error allocating virtual memory");
94 
95  for (cpu = 0; cpu < m->n_cpus; cpu++)
97  clib_smp_stack_top_for_cpu (m, cpu));
98 }
99 
100 void
101 clib_smp_lock_init (clib_smp_lock_t ** pl)
102 {
103  clib_smp_lock_t *l;
104  uword i, n_bytes, n_fifo_elts;
105 
106  /* No locking necessary if n_cpus <= 1.
107  Null means no locking is necessary. */
108  if (clib_smp_main.n_cpus < 2)
109  {
110  *pl = 0;
111  return;
112  }
113 
114  /* Need n_cpus - 1 elts in waiting fifo. One CPU holds lock
115  and others could potentially be waiting. */
116  n_fifo_elts = clib_smp_main.n_cpus - 1;
117 
118  n_bytes = sizeof (l[0]) + n_fifo_elts * sizeof (l->waiting_fifo[0]);
119  ASSERT_AND_PANIC (n_bytes % CLIB_CACHE_LINE_BYTES == 0);
120 
122 
123  memset (l, 0, n_bytes);
124  l->n_waiting_fifo_elts = n_fifo_elts;
125 
126  for (i = 0; i < l->n_waiting_fifo_elts; i++)
127  l->waiting_fifo[i].wait_type = CLIB_SMP_LOCK_WAIT_EMPTY;
128 
129  *pl = l;
130 }
131 
132 void
133 clib_smp_lock_free (clib_smp_lock_t ** pl)
134 {
135  if (*pl)
136  clib_mem_free (*pl);
137  *pl = 0;
138 }
139 
140 void
141 clib_smp_lock_slow_path (clib_smp_lock_t * l,
142  uword my_cpu,
143  clib_smp_lock_header_t h0, clib_smp_lock_type_t type)
144 {
145  clib_smp_lock_header_t h1, h2, h3;
146  uword is_reader = type == CLIB_SMP_LOCK_TYPE_READER;
147  uword n_fifo_elts = l->n_waiting_fifo_elts;
148  uword my_tail;
149 
150  /* Atomically advance waiting FIFO tail pointer; my_tail will point
151  to entry where we can insert ourselves to wait for lock to be granted. */
152  while (1)
153  {
154  h1 = h0;
155  my_tail = h1.waiting_fifo.head_index + h1.waiting_fifo.n_elts;
156  my_tail = my_tail >= n_fifo_elts ? my_tail - n_fifo_elts : my_tail;
157  h1.waiting_fifo.n_elts += 1;
158  h1.request_cpu = my_cpu;
159 
160  ASSERT_AND_PANIC (h1.waiting_fifo.n_elts <= n_fifo_elts);
161  ASSERT_AND_PANIC (my_tail >= 0 && my_tail < n_fifo_elts);
162 
163  h2 = clib_smp_lock_set_header (l, h1, h0);
164 
165  /* Tail successfully advanced? */
166  if (clib_smp_lock_header_is_equal (h0, h2))
167  break;
168 
169  /* It is possible that if head and tail are both zero, CPU with lock would have unlocked lock. */
170  else if (type == CLIB_SMP_LOCK_TYPE_SPIN)
171  {
172  while (!h2.writer_has_lock)
173  {
174  ASSERT_AND_PANIC (h2.waiting_fifo.n_elts == 0);
175  h1 = h2;
176  h1.request_cpu = my_cpu;
177  h1.writer_has_lock = 1;
178 
179  h3 = clib_smp_lock_set_header (l, h1, h2);
180 
181  /* Got it? */
182  if (clib_smp_lock_header_is_equal (h2, h3))
183  return;
184 
185  h2 = h3;
186  }
187  }
188 
189  /* Try to advance tail again. */
190  h0 = h2;
191  }
192 
193  {
194  clib_smp_lock_waiting_fifo_elt_t *w;
195 
196  w = l->waiting_fifo + my_tail;
197 
198  while (w->wait_type != CLIB_SMP_LOCK_WAIT_EMPTY)
199  clib_smp_pause ();
200 
201  w->wait_type = (is_reader
202  ? CLIB_SMP_LOCK_WAIT_READER : CLIB_SMP_LOCK_WAIT_WRITER);
203 
204  /* Wait until CPU holding the lock grants us the lock. */
205  while (w->wait_type != CLIB_SMP_LOCK_WAIT_DONE)
206  clib_smp_pause ();
207 
208  w->wait_type = CLIB_SMP_LOCK_WAIT_EMPTY;
209  }
210 }
211 
212 void
213 clib_smp_unlock_slow_path (clib_smp_lock_t * l,
214  uword my_cpu,
215  clib_smp_lock_header_t h0,
216  clib_smp_lock_type_t type)
217 {
218  clib_smp_lock_header_t h1, h2;
219  clib_smp_lock_waiting_fifo_elt_t *head;
220  clib_smp_lock_wait_type_t head_wait_type;
221  uword is_reader = type == CLIB_SMP_LOCK_TYPE_READER;
222  uword n_fifo_elts = l->n_waiting_fifo_elts;
223  uword head_index, must_wait_for_readers;
224 
225  while (1)
226  {
227  /* Advance waiting fifo giving lock to first waiter. */
228  while (1)
229  {
230  ASSERT_AND_PANIC (h0.waiting_fifo.n_elts != 0);
231 
232  h1 = h0;
233 
234  head_index = h1.waiting_fifo.head_index;
235  head = l->waiting_fifo + head_index;
236  if (is_reader)
237  {
238  ASSERT_AND_PANIC (h1.n_readers_with_lock > 0);
239  h1.n_readers_with_lock -= 1;
240  }
241  else
242  {
243  /* Writer will already have lock. */
244  ASSERT_AND_PANIC (h1.writer_has_lock);
245  }
246 
247  while ((head_wait_type =
248  head->wait_type) == CLIB_SMP_LOCK_WAIT_EMPTY)
249  clib_smp_pause ();
250 
251  /* Don't advance FIFO to writer unless all readers have unlocked. */
252  must_wait_for_readers =
253  (type != CLIB_SMP_LOCK_TYPE_SPIN
254  && head_wait_type == CLIB_SMP_LOCK_WAIT_WRITER
255  && h1.n_readers_with_lock != 0);
256 
257  if (!must_wait_for_readers)
258  {
259  head_index += 1;
260  h1.waiting_fifo.n_elts -= 1;
261  if (type != CLIB_SMP_LOCK_TYPE_SPIN)
262  {
263  if (head_wait_type == CLIB_SMP_LOCK_WAIT_WRITER)
264  h1.writer_has_lock = h1.n_readers_with_lock == 0;
265  else
266  {
267  h1.writer_has_lock = 0;
268  h1.n_readers_with_lock += 1;
269  }
270  }
271  }
272 
273  h1.waiting_fifo.head_index =
274  head_index == n_fifo_elts ? 0 : head_index;
275  h1.request_cpu = my_cpu;
276 
277  ASSERT_AND_PANIC (h1.waiting_fifo.head_index >= 0
278  && h1.waiting_fifo.head_index < n_fifo_elts);
279  ASSERT_AND_PANIC (h1.waiting_fifo.n_elts >= 0
280  && h1.waiting_fifo.n_elts <= n_fifo_elts);
281 
282  h2 = clib_smp_lock_set_header (l, h1, h0);
283 
284  if (clib_smp_lock_header_is_equal (h2, h0))
285  break;
286 
287  h0 = h2;
288 
289  if (h0.waiting_fifo.n_elts == 0)
290  return clib_smp_unlock_inline (l, type);
291  }
292 
293  if (must_wait_for_readers)
294  return;
295 
296  /* Wake up head of waiting fifo. */
297  {
298  uword done_waking;
299 
300  /* Shift lock to first thread waiting in fifo. */
301  head->wait_type = CLIB_SMP_LOCK_WAIT_DONE;
302 
303  /* For read locks we may be able to wake multiple readers. */
304  done_waking = 1;
305  if (head_wait_type == CLIB_SMP_LOCK_WAIT_READER)
306  {
307  uword hi = h0.waiting_fifo.head_index;
308  if (h0.waiting_fifo.n_elts != 0
309  && l->waiting_fifo[hi].wait_type == CLIB_SMP_LOCK_WAIT_READER)
310  done_waking = 0;
311  }
312 
313  if (done_waking)
314  break;
315  }
316  }
317 }
318 
319 /*
320  * fd.io coding-style-patch-verification: ON
321  *
322  * Local Variables:
323  * eval: (c-set-style "gnu")
324  * End:
325  */
vmrglw vmrglh hi
sll srl srl sll sra u16x4 i
Definition: vector_sse2.h:343
bad routing header type(not 4)") sr_error (NO_MORE_SEGMENTS
#define clib_error(format, args...)
Definition: error.h:62
void clib_smp_lock_slow_path(clib_smp_lock_t *l, uword my_cpu, clib_smp_lock_header_t h0, clib_smp_lock_type_t type)
Definition: smp.c:141
static void clib_mem_vm_free(void *addr, uword size)
#define MHEAP_FLAG_THREAD_SAFE
#define vec_resize(V, N)
Resize a vector (no header, unspecified alignment) Add N elements to end of given vector V...
Definition: vec.h:201
void clib_smp_lock_free(clib_smp_lock_t **pl)
Definition: smp.c:133
uword os_get_cpu_number(void)
Definition: unix-misc.c:224
#define ASSERT_AND_PANIC(truth)
#define clib_smp_pause()
Definition: smp.h:49
static void * clib_mem_set_heap(void *heap)
Definition: mem.h:223
void * mheap_alloc_with_flags(void *memory, uword memory_size, uword flags)
Definition: mheap.c:875
#define ASSERT(truth)
static uword allocate_per_cpu_mheap(uword cpu)
Definition: smp.c:50
static void clib_mem_free(void *p)
Definition: mem.h:176
static void * clib_mem_vm_alloc(uword size)
u64 uword
Definition: types.h:112
#define MHEAP_FLAG_SMALL_OBJECT_CACHE
void clib_smp_init(void)
Definition: smp.c:85
void clib_smp_lock_init(clib_smp_lock_t **pl)
Definition: smp.c:101
static void * clib_mem_alloc_aligned(uword size, uword align)
Definition: mem.h:117
uword clib_calljmp(uword(*func)(uword func_arg), uword func_arg, void *stack)
void clib_smp_free(clib_smp_main_t *m)
Definition: smp.c:43
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:67
void clib_smp_unlock_slow_path(clib_smp_lock_t *l, uword my_cpu, clib_smp_lock_header_t h0, clib_smp_lock_type_t type)
Definition: smp.c:213