FD.io VPP  v19.08-27-gf4dcae4
Vector Packet Processing
lock.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef included_clib_lock_h
17 #define included_clib_lock_h
18 
19 #include <vppinfra/clib.h>
20 #include <vppinfra/atomics.h>
21 
22 #if __x86_64__
23 #define CLIB_PAUSE() __builtin_ia32_pause ()
24 #else
25 #define CLIB_PAUSE()
26 #endif
27 
28 #if CLIB_DEBUG > 1
29 #define CLIB_LOCK_DBG(_p) \
30 do { \
31  (*_p)->frame_address = __builtin_frame_address (0); \
32  (*_p)->pid = getpid (); \
33  (*_p)->thread_index = os_get_thread_index (); \
34 } while (0)
35 #define CLIB_LOCK_DBG_CLEAR(_p) \
36 do { \
37  (*_p)->frame_address = 0; \
38  (*_p)->pid = 0; \
39  (*_p)->thread_index = 0; \
40 } while (0)
41 #else
42 #define CLIB_LOCK_DBG(_p)
43 #define CLIB_LOCK_DBG_CLEAR(_p)
44 #endif
45 
46 #define CLIB_SPINLOCK_IS_LOCKED(_p) (*(_p))->lock
47 #define CLIB_SPINLOCK_ASSERT_LOCKED(_p) ASSERT(CLIB_SPINLOCK_IS_LOCKED((_p)))
48 
49 typedef struct
50 {
51  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
53 #if CLIB_DEBUG > 0
54  pid_t pid;
57 #endif
59 
60 static inline void
62 {
64  clib_memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES);
65 }
66 
67 static inline void
69 {
70  if (*p)
71  {
72  clib_mem_free ((void *) *p);
73  *p = 0;
74  }
75 }
76 
79 {
80  u32 free = 0;
81  while (!clib_atomic_cmp_and_swap_acq_relax_n (&(*p)->lock, &free, 1, 0))
82  {
83  /* atomic load limits number of compare_exchange executions */
84  while (clib_atomic_load_relax_n (&(*p)->lock))
85  CLIB_PAUSE ();
86  /* on failure, compare_exchange writes (*p)->lock into free */
87  free = 0;
88  }
89  CLIB_LOCK_DBG (p);
90 }
91 
94 {
95  if (PREDICT_FALSE (*p != 0))
97 }
98 
101 {
103  /* Make sure all reads/writes are complete before releasing the lock */
104  clib_atomic_release (&(*p)->lock);
105 }
106 
109 {
110  if (PREDICT_FALSE (*p != 0))
112 }
113 
114 /*
115  * Readers-Writer Lock
116  */
117 
118 typedef struct clib_rw_lock_
119 {
120  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
121  /* -1 when W lock held, > 0 when R lock held */
122  volatile i32 rw_cnt;
123 #if CLIB_DEBUG > 0
124  pid_t pid;
127 #endif
128 } *clib_rwlock_t;
129 
130 always_inline void
132 {
134  clib_memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES);
135 }
136 
137 always_inline void
139 {
140  if (*p)
141  {
142  clib_mem_free ((void *) *p);
143  *p = 0;
144  }
145 }
146 
147 always_inline void
149 {
150  i32 cnt;
151  do
152  {
153  /* rwlock held by a writer */
154  while ((cnt = clib_atomic_load_relax_n (&(*p)->rw_cnt)) < 0)
155  CLIB_PAUSE ();
156  }
158  (&(*p)->rw_cnt, &cnt, cnt + 1, 1));
159  CLIB_LOCK_DBG (p);
160 }
161 
162 always_inline void
164 {
165  ASSERT ((*p)->rw_cnt > 0);
167  clib_atomic_fetch_sub_rel (&(*p)->rw_cnt, 1);
168 }
169 
170 always_inline void
172 {
173  i32 cnt = 0;
174  do
175  {
176  /* rwlock held by writer or reader(s) */
177  while ((cnt = clib_atomic_load_relax_n (&(*p)->rw_cnt)) != 0)
178  CLIB_PAUSE ();
179  }
180  while (!clib_atomic_cmp_and_swap_acq_relax_n (&(*p)->rw_cnt, &cnt, -1, 1));
181  CLIB_LOCK_DBG (p);
182 }
183 
184 always_inline void
186 {
188  clib_atomic_release (&(*p)->rw_cnt);
189 }
190 
191 #endif
192 
193 /*
194  * fd.io coding-style-patch-verification: ON
195  *
196  * Local Variables:
197  * eval: (c-set-style "gnu")
198  * End:
199  */
static void clib_rwlock_reader_lock(clib_rwlock_t *p)
Definition: lock.h:148
uword thread_index
Definition: lock.h:55
#define CLIB_PAUSE()
Definition: lock.h:23
#define CLIB_CACHE_LINE_ALIGN_MARK(mark)
Definition: cache.h:60
static_always_inline void clib_spinlock_unlock(clib_spinlock_t *p)
Definition: lock.h:100
static_always_inline void clib_spinlock_lock(clib_spinlock_t *p)
Definition: lock.h:78
static void clib_rwlock_writer_lock(clib_rwlock_t *p)
Definition: lock.h:171
CLIB_CACHE_LINE_ALIGN_MARK(cacheline0)
static void clib_rwlock_free(clib_rwlock_t *p)
Definition: lock.h:138
static_always_inline void clib_spinlock_unlock_if_init(clib_spinlock_t *p)
Definition: lock.h:108
pid_t pid
Definition: lock.h:124
clib_memset(h->entries, 0, sizeof(h->entries[0])*entries)
void * frame_address
Definition: lock.h:126
static void clib_spinlock_free(clib_spinlock_t *p)
Definition: lock.h:68
#define CLIB_LOCK_DBG_CLEAR(_p)
Definition: lock.h:43
#define static_always_inline
Definition: clib.h:99
#define always_inline
Definition: clib.h:98
unsigned int u32
Definition: types.h:88
uword thread_index
Definition: lock.h:125
u32 lock
Definition: lock.h:52
static void clib_spinlock_init(clib_spinlock_t *p)
Definition: lock.h:61
static void clib_rwlock_init(clib_rwlock_t *p)
Definition: lock.h:131
#define clib_atomic_release(a)
Definition: atomics.h:43
static void clib_rwlock_reader_unlock(clib_rwlock_t *p)
Definition: lock.h:163
#define PREDICT_FALSE(x)
Definition: clib.h:111
static void clib_rwlock_writer_unlock(clib_rwlock_t *p)
Definition: lock.h:185
pid_t pid
Definition: lock.h:54
struct clib_rw_lock_ * clib_rwlock_t
#define clib_atomic_load_relax_n(a)
Definition: atomics.h:47
signed int i32
Definition: types.h:77
#define ASSERT(truth)
static void clib_mem_free(void *p)
Definition: mem.h:226
volatile i32 rw_cnt
Definition: lock.h:122
#define clib_atomic_fetch_sub_rel(a, b)
Definition: atomics.h:55
u64 uword
Definition: types.h:112
#define clib_atomic_cmp_and_swap_acq_relax_n(addr, exp, new, weak)
Definition: atomics.h:40
static void * clib_mem_alloc_aligned(uword size, uword align)
Definition: mem.h:161
void * frame_address
Definition: lock.h:56
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
static_always_inline void clib_spinlock_lock_if_init(clib_spinlock_t *p)
Definition: lock.h:93
#define CLIB_LOCK_DBG(_p)
Definition: lock.h:42