FD.io VPP  v18.04-17-g3a0d853
Vector Packet Processing
threads.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef included_vlib_threads_h
16 #define included_vlib_threads_h
17 
18 #include <vlib/main.h>
19 #include <linux/sched.h>
20 
21 /*
22  * To enable detailed tracing of barrier usage, including call stacks and
23  * timings, define BARRIER_TRACING here or in relevant TAGS. If also used
24  * with CLIB_DEBUG, timing will _not_ be representative of normal code
25  * execution.
26  *
27  */
28 
29 // #define BARRIER_TRACING 1
30 
31 /*
32  * Two options for barrier tracing output: syslog & elog.
33  */
34 
35 // #define BARRIER_TRACING_ELOG 1
36 
37 extern vlib_main_t **vlib_mains;
38 
39 void vlib_set_thread_name (char *name);
40 
41 /* arg is actually a vlib__thread_t * */
42 typedef void (vlib_thread_function_t) (void *arg);
43 
45 {
46  /* constructor generated list of thread registrations */
48 
49  /* config parameters */
50  char *name;
51  char *short_name;
58 
59  /* All threads of this type run on pthreads */
64 
65 /*
66  * Frames have their cpu / vlib_main_t index in the low-order N bits
67  * Make VLIB_MAX_CPUS a power-of-two, please...
68  */
69 
70 #ifndef VLIB_MAX_CPUS
71 #define VLIB_MAX_CPUS 256
72 #endif
73 
74 #if VLIB_MAX_CPUS > CLIB_MAX_MHEAPS
75 #error Please increase number of per-cpu mheaps
76 #endif
77 
78 #define VLIB_CPU_MASK (VLIB_MAX_CPUS - 1) /* 0x3f, max */
79 #define VLIB_OFFSET_MASK (~VLIB_CPU_MASK)
80 
81 #define VLIB_LOG2_THREAD_STACK_SIZE (21)
82 #define VLIB_THREAD_STACK_SIZE (1<<VLIB_LOG2_THREAD_STACK_SIZE)
83 
84 typedef enum
85 {
88 
89 typedef struct
90 {
91  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
92  volatile u32 valid;
96 
97  /* 256 * 4 = 1024 bytes, even mult of cache line size */
98  u32 buffer_index[VLIB_FRAME_SIZE];
99 }
101 
102 typedef struct
103 {
104  /* First cache line */
105  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
106  volatile u32 *wait_at_barrier;
108 
109  /* Second Cache Line */
110  CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
113  void (*thread_function) (void *);
121 #ifdef BARRIER_TRACING
122  const char *barrier_caller;
123  const char *barrier_context;
124 #endif
126 
127  long lwp;
128  int lcore_id;
129  pthread_t thread_id;
131 
133 
134 typedef struct
135 {
136  /* enqueue side */
137  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
138  volatile u64 tail;
143 
144  /* dequeue side */
145  CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
146  volatile u64 head;
152 
153  /* dequeue hint to enqueue side */
154  CLIB_CACHE_LINE_ALIGN_MARK (cacheline2);
155  volatile u64 head_hint;
156 
157  /* read-only, constant, shared */
158  CLIB_CACHE_LINE_ALIGN_MARK (cacheline3);
161 }
163 
164 typedef struct
165 {
168 
169  /* for frame queue tracing */
173 
174 typedef struct
175 {
180 
181 /* Called early, in thread 0's context */
183 
184 int vlib_frame_queue_enqueue (vlib_main_t * vm, u32 node_runtime_index,
185  u32 frame_queue_index, vlib_frame_t * frame,
186  vlib_frame_queue_msg_type_t type);
187 
188 int
190 
192 
193 void vlib_create_worker_threads (vlib_main_t * vm, int n,
194  void (*thread_function) (void *));
195 
198 
199 /* Check for a barrier sync request every 30ms */
200 #define BARRIER_SYNC_DELAY (0.030000)
201 
202 #if CLIB_DEBUG > 0
203 /* long barrier timeout, for gdb... */
204 #define BARRIER_SYNC_TIMEOUT (600.1)
205 #else
206 #define BARRIER_SYNC_TIMEOUT (1.0)
207 #endif
208 
209 #ifdef BARRIER_TRACING
210 #define vlib_worker_thread_barrier_sync(X) {vlib_worker_threads[0].barrier_caller=__FUNCTION__;vlib_worker_thread_barrier_sync_int(X);}
211 #else
212 #define vlib_worker_thread_barrier_sync(X) vlib_worker_thread_barrier_sync_int(X)
213 #endif
214 
215 
219 
222 {
223  return __os_thread_index;
224 }
225 
226 always_inline void
228 {
229  if (CLIB_DEBUG > 0)
230  {
231  if (vlib_get_thread_index ())
232  fformat (stderr, "%s: SMP unsafe warning...\n", __FUNCTION__);
233  }
234 }
235 
236 typedef enum
237 {
241 
242 void vlib_worker_thread_fork_fixup (vlib_fork_fixup_t which);
243 
244 #define foreach_vlib_main(body) \
245 do { \
246  vlib_main_t ** __vlib_mains = 0, *this_vlib_main; \
247  int ii; \
248  \
249  for (ii = 0; ii < vec_len (vlib_mains); ii++) \
250  { \
251  this_vlib_main = vlib_mains[ii]; \
252  ASSERT (ii == 0 || \
253  this_vlib_main->parked_at_barrier == 1); \
254  if (this_vlib_main) \
255  vec_add1 (__vlib_mains, this_vlib_main); \
256  } \
257  \
258  for (ii = 0; ii < vec_len (__vlib_mains); ii++) \
259  { \
260  this_vlib_main = __vlib_mains[ii]; \
261  /* body uses this_vlib_main... */ \
262  (body); \
263  } \
264  vec_free (__vlib_mains); \
265 } while (0);
266 
267 #define foreach_sched_policy \
268  _(SCHED_OTHER, OTHER, "other") \
269  _(SCHED_BATCH, BATCH, "batch") \
270  _(SCHED_IDLE, IDLE, "idle") \
271  _(SCHED_FIFO, FIFO, "fifo") \
272  _(SCHED_RR, RR, "rr")
273 
274 typedef enum
275 {
276 #define _(v,f,s) SCHED_POLICY_##f = v,
278 #undef _
281 
282 typedef struct
283 {
284  clib_error_t *(*vlib_launch_thread_cb) (void *fp, vlib_worker_thread_t * w,
285  unsigned lcore_id);
286  clib_error_t *(*vlib_thread_set_lcore_cb) (u32 thread, u16 lcore);
287 } vlib_thread_callbacks_t;
288 
289 typedef struct
290 {
291  /* Link list of registrations, built by constructors */
293 
294  /* Vector of registrations, w/ non-data-structure clones at the top */
296 
298 
300 
301  /*
302  * Launch all threads as pthreads,
303  * not eal_rte_launch (strict affinity) threads
304  */
306 
307  /* Number of vlib_main / vnet_main clones */
309 
310  /* Number of thread stacks to create */
312 
313  /* Number of pthreads */
315 
316  /* Number of threads */
318 
319  /* Number of cores to skip, must match the core mask */
321 
322  /* Thread prefix name */
324 
325  /* main thread lcore */
327 
328  /* Bitmap of available CPU cores */
330 
331  /* Bitmap of available CPU sockets (NUMA nodes) */
333 
334  /* Worker handoff queues */
336 
337  /* worker thread initialization barrier */
339 
340  /* scheduling policy */
342 
343  /* scheduling policy priority */
345 
346  /* callbacks */
347  vlib_thread_callbacks_t cb;
350 
352 
353 #include <vlib/global_funcs.h>
354 
355 #define VLIB_REGISTER_THREAD(x,...) \
356  __VA_ARGS__ vlib_thread_registration_t x; \
357 static void __vlib_add_thread_registration_##x (void) \
358  __attribute__((__constructor__)) ; \
359 static void __vlib_add_thread_registration_##x (void) \
360 { \
361  vlib_thread_main_t * tm = &vlib_thread_main; \
362  x.next = tm->next; \
363  tm->next = &x; \
364 } \
365 __VA_ARGS__ vlib_thread_registration_t x
366 
369 {
370  return vlib_thread_main.n_vlib_mains - 1;
371 }
372 
375 {
376  return worker_index + 1;
377 }
378 
381 {
382  return thread_index - 1;
383 }
384 
387 {
388  return vlib_get_thread_index () - 1;
389 }
390 
391 static inline void
393 {
394  if (PREDICT_FALSE (*vlib_worker_threads->wait_at_barrier))
395  {
396  vlib_main_t *vm;
397  clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, 1);
398  if (CLIB_DEBUG > 0)
399  {
400  vm = vlib_get_main ();
401  vm->parked_at_barrier = 1;
402  }
403  while (*vlib_worker_threads->wait_at_barrier)
404  ;
405  if (CLIB_DEBUG > 0)
406  vm->parked_at_barrier = 0;
407  clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, -1);
408 
409  if (PREDICT_FALSE (*vlib_worker_threads->node_reforks_required))
410  {
412  clib_smp_atomic_add (vlib_worker_threads->node_reforks_required,
413  -1);
414  while (*vlib_worker_threads->node_reforks_required)
415  ;
416  }
417  }
418 }
419 
422 {
423  vlib_main_t *vm;
425  ASSERT (worker_index < tm->n_vlib_mains - 1);
426  vm = vlib_mains[worker_index + 1];
427  ASSERT (vm);
428  return vm;
429 }
430 
431 static inline void
433 {
435  hf->valid = 1;
436 }
437 
438 static inline vlib_frame_queue_elt_t *
439 vlib_get_frame_queue_elt (u32 frame_queue_index, u32 index)
440 {
441  vlib_frame_queue_t *fq;
445  vec_elt_at_index (tm->frame_queue_mains, frame_queue_index);
446  u64 new_tail;
447 
448  fq = fqm->vlib_frame_queues[index];
449  ASSERT (fq);
450 
451  new_tail = __sync_add_and_fetch (&fq->tail, 1);
452 
453  /* Wait until a ring slot is available */
454  while (new_tail >= fq->head_hint + fq->nelts)
456 
457  elt = fq->elts + (new_tail & (fq->nelts - 1));
458 
459  /* this would be very bad... */
460  while (elt->valid)
461  ;
462 
464  elt->last_n_vectors = elt->n_vectors = 0;
465 
466  return elt;
467 }
468 
469 static inline vlib_frame_queue_t *
471  u32 index,
472  u32 queue_hi_thresh,
474  handoff_queue_by_worker_index)
475 {
476  vlib_frame_queue_t *fq;
479  vec_elt_at_index (tm->frame_queue_mains, frame_queue_index);
480 
481  fq = handoff_queue_by_worker_index[index];
482  if (fq != (vlib_frame_queue_t *) (~0))
483  return fq;
484 
485  fq = fqm->vlib_frame_queues[index];
486  ASSERT (fq);
487 
488  if (PREDICT_FALSE (fq->tail >= (fq->head_hint + queue_hi_thresh)))
489  {
490  /* a valid entry in the array will indicate the queue has reached
491  * the specified threshold and is congested
492  */
493  handoff_queue_by_worker_index[index] = fq;
494  fq->enqueue_full_events++;
495  return fq;
496  }
497 
498  return NULL;
499 }
500 
501 static inline vlib_frame_queue_elt_t *
503  u32 vlib_worker_index,
505  handoff_queue_elt_by_worker_index)
506 {
508 
509  if (handoff_queue_elt_by_worker_index[vlib_worker_index])
510  return handoff_queue_elt_by_worker_index[vlib_worker_index];
511 
512  elt = vlib_get_frame_queue_elt (frame_queue_index, vlib_worker_index);
513 
514  handoff_queue_elt_by_worker_index[vlib_worker_index] = elt;
515 
516  return elt;
517 }
518 
519 u8 *vlib_thread_stack_init (uword thread_index);
520 int vlib_thread_cb_register (struct vlib_main_t *vm,
521  vlib_thread_callbacks_t * cb);
522 extern void *rpc_call_main_thread_cb_fn;
523 
524 void
526  args);
527 void vlib_rpc_call_main_thread (void *function, u8 * args, u32 size);
528 
529 #endif /* included_vlib_threads_h */
530 
531 /*
532  * fd.io coding-style-patch-verification: ON
533  *
534  * Local Variables:
535  * eval: (c-set-style "gnu")
536  * End:
537  */
#define CLIB_CACHE_LINE_ALIGN_MARK(mark)
Definition: cache.h:60
void vlib_worker_thread_init(vlib_worker_thread_t *w)
Definition: threads.c:633
int vlib_frame_queue_enqueue(vlib_main_t *vm, u32 node_runtime_index, u32 frame_queue_index, vlib_frame_t *frame, vlib_frame_queue_msg_type_t type)
#define NULL
Definition: clib.h:55
void vlib_set_thread_name(char *name)
Definition: threads.c:267
void * thread_function_arg
Definition: threads.h:114
frame_queue_trace_t * frame_queue_traces
Definition: threads.h:170
elog_track_t elog_track
Definition: threads.h:116
vlib_worker_thread_t * vlib_worker_threads
Definition: threads.c:35
void vlib_rpc_call_main_thread(void *function, u8 *args, u32 size)
Definition: threads.c:1804
struct vlib_thread_registration_ * next
Definition: threads.h:47
int vlib_thread_cb_register(struct vlib_main_t *vm, vlib_thread_callbacks_t *cb)
Definition: threads.c:1780
volatile u32 valid
Definition: threads.h:92
static vlib_frame_queue_t * is_vlib_frame_queue_congested(u32 frame_queue_index, u32 index, u32 queue_hi_thresh, vlib_frame_queue_t **handoff_queue_by_worker_index)
Definition: threads.h:470
static void vlib_smp_unsafe_warning(void)
Definition: threads.h:227
pthread_t thread[MAX_CONNS]
Definition: main.c:125
volatile int parked_at_barrier
Definition: main.h:196
static void vlib_worker_thread_barrier_check(void)
Definition: threads.h:392
vlib_thread_registration_t * next
Definition: threads.h:292
static u32 vlib_get_worker_index(u32 thread_index)
Definition: threads.h:380
#define static_always_inline
Definition: clib.h:93
#define clib_smp_atomic_add(addr, increment)
Definition: smp.h:46
#define always_inline
Definition: clib.h:92
vlib_frame_queue_msg_type_t
Definition: threads.h:84
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
uword * cpu_core_bitmap
Definition: threads.h:329
unsigned long u64
Definition: types.h:89
int vlib_frame_queue_dequeue(vlib_main_t *vm, vlib_frame_queue_main_t *fqm)
Definition: threads.c:1592
vlib_frame_queue_elt_t * elts
Definition: threads.h:159
static u32 vlib_get_current_worker_index()
Definition: threads.h:386
#define VLIB_FRAME_SIZE
Definition: node.h:328
vlib_fork_fixup_t
Definition: threads.h:236
void vlib_process_signal_event_mt_helper(vlib_process_signal_event_mt_args_t *args)
Definition: threads.c:1793
int extern_thread_mgmt
Definition: threads.h:348
sched_policy_t
Definition: threads.h:274
volatile u64 head
Definition: threads.h:146
u8 * vlib_thread_stack_init(uword thread_index)
Definition: main.c:575
vlib_thread_callbacks_t cb
Definition: threads.h:347
void vlib_worker_thread_barrier_release(vlib_main_t *vm)
Definition: threads.c:1497
#define PREDICT_FALSE(x)
Definition: clib.h:105
static vlib_frame_queue_elt_t * vlib_get_worker_handoff_queue_elt(u32 frame_queue_index, u32 vlib_worker_index, vlib_frame_queue_elt_t **handoff_queue_elt_by_worker_index)
Definition: threads.h:502
long i64
Definition: types.h:82
word fformat(FILE *f, char *fmt,...)
Definition: format.c:453
void vlib_worker_thread_node_runtime_update(void)
Definition: threads.c:1227
void vlib_worker_thread_fork_fixup(vlib_fork_fixup_t which)
Definition: threads.c:1402
volatile u64 tail
Definition: threads.h:138
vlib_frame_queue_t ** vlib_frame_queues
Definition: threads.h:167
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:221
vlib_main_t * vm
Definition: buffer.c:294
frame_queue_nelt_counter_t * frame_queue_histogram
Definition: threads.h:171
void * rpc_call_main_thread_cb_fn
Definition: threads.c:1801
vlib_worker_thread_t * worker_threads
Definition: threads.h:299
static vlib_frame_queue_elt_t * vlib_get_frame_queue_elt(u32 frame_queue_index, u32 index)
Definition: threads.h:439
volatile u32 * wait_at_barrier
Definition: threads.h:106
#define ASSERT(truth)
unsigned int u32
Definition: types.h:88
vlib_frame_queue_main_t * frame_queue_mains
Definition: threads.h:335
struct vlib_thread_registration_ vlib_thread_registration_t
uword * thread_registrations_by_name
Definition: threads.h:297
u32 vlib_frame_queue_main_init(u32 node_index, u32 frame_queue_nelts)
Definition: threads.c:1754
volatile u32 * node_reforks_required
Definition: threads.h:125
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
clib_error_t * vlib_thread_init(vlib_main_t *vm)
Definition: threads.c:322
u64 uword
Definition: types.h:112
u32 enqueue_full_events
Definition: threads.h:142
unsigned short u16
Definition: types.h:57
unsigned char u8
Definition: types.h:56
vlib_main_t ** vlib_mains
Definition: buffer.c:303
void vlib_create_worker_threads(vlib_main_t *vm, int n, void(*thread_function)(void *))
volatile u64 head_hint
Definition: threads.h:155
static void vlib_put_frame_queue_elt(vlib_frame_queue_elt_t *hf)
Definition: threads.h:432
volatile u32 * workers_at_barrier
Definition: threads.h:107
static u32 vlib_num_workers()
Definition: threads.h:368
void vlib_worker_thread_node_refork(void)
Definition: threads.c:1072
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:109
uword * cpu_socket_bitmap
Definition: threads.h:332
#define foreach_sched_policy
Definition: threads.h:267
void( vlib_thread_function_t)(void *arg)
Definition: threads.h:42
vlib_thread_registration_t ** registrations
Definition: threads.h:295
vlib_thread_main_t vlib_thread_main
Definition: threads.c:36
pthread_t thread_id
Definition: threads.h:129
vlib_thread_registration_t * registration
Definition: threads.h:118
volatile u32 worker_thread_release
Definition: threads.h:338
void vlib_worker_thread_barrier_sync_int(vlib_main_t *vm)
Definition: threads.c:1440
static vlib_main_t * vlib_get_worker_vlib_main(u32 worker_index)
Definition: threads.h:421
static u32 vlib_get_worker_thread_index(u32 worker_index)
Definition: threads.h:374