15 #ifndef included_vlib_threads_h 16 #define included_vlib_threads_h 19 #include <linux/sched.h> 55 #define VLIB_MAX_CPUS 256 58 #if VLIB_MAX_CPUS > CLIB_MAX_MHEAPS 59 #error Please increase number of per-cpu mheaps 62 #define VLIB_CPU_MASK (VLIB_MAX_CPUS - 1) 63 #define VLIB_OFFSET_MASK (~VLIB_CPU_MASK) 65 #define VLIB_LOG2_THREAD_STACK_SIZE (20) 66 #define VLIB_THREAD_STACK_SIZE (1<<VLIB_LOG2_THREAD_STACK_SIZE) 97 void (*thread_function) (
void *);
160 vlib_frame_queue_msg_type_t
type);
177 void (*thread_function) (
void *));
183 #define BARRIER_SYNC_DELAY (0.030000) 187 #define BARRIER_SYNC_TIMEOUT (600.1) 189 #define BARRIER_SYNC_TIMEOUT (1.0) 201 fformat (stderr,
"%s: SMP unsafe warning...\n", __FUNCTION__);
225 #define foreach_vlib_main(body) \ 227 vlib_main_t ** __vlib_mains = 0, *this_vlib_main; \ 230 if (vec_len (vlib_mains) == 0) \ 231 vec_add1 (__vlib_mains, &vlib_global_main); \ 234 for (ii = 0; ii < vec_len (vlib_mains); ii++) \ 236 this_vlib_main = vlib_mains[ii]; \ 237 if (this_vlib_main) \ 238 vec_add1 (__vlib_mains, this_vlib_main); \ 242 for (ii = 0; ii < vec_len (__vlib_mains); ii++) \ 244 this_vlib_main = __vlib_mains[ii]; \ 248 vec_free (__vlib_mains); \ 251 #define foreach_sched_policy \ 252 _(SCHED_OTHER, OTHER, "other") \ 253 _(SCHED_BATCH, BATCH, "batch") \ 254 _(SCHED_IDLE, IDLE, "idle") \ 255 _(SCHED_FIFO, FIFO, "fifo") \ 256 _(SCHED_RR, RR, "rr") 260 #define _(v,f,s) SCHED_POLICY_##f = v, 327 #define VLIB_REGISTER_THREAD(x,...) \ 328 __VA_ARGS__ vlib_thread_registration_t x; \ 329 static void __vlib_add_thread_registration_##x (void) \ 330 __attribute__((__constructor__)) ; \ 331 static void __vlib_add_thread_registration_##x (void) \ 333 vlib_thread_main_t * tm = &vlib_thread_main; \ 337 __VA_ARGS__ vlib_thread_registration_t x 348 return worker_index + 1;
354 return cpu_index - 1;
368 ASSERT (worker_index < tm->n_vlib_mains - 1);
369 vm = vlib_mains[worker_index + 1];
394 new_tail = __sync_add_and_fetch (&fq->
tail, 1);
400 elt = fq->
elts + (new_tail & (fq->
nelts - 1));
417 handoff_queue_by_worker_index)
424 fq = handoff_queue_by_worker_index[index];
436 handoff_queue_by_worker_index[index] = fq;
446 u32 vlib_worker_index,
448 handoff_queue_elt_by_worker_index)
452 if (handoff_queue_elt_by_worker_index[vlib_worker_index])
453 return handoff_queue_elt_by_worker_index[vlib_worker_index];
457 handoff_queue_elt_by_worker_index[vlib_worker_index] = elt;
#define CLIB_CACHE_LINE_ALIGN_MARK(mark)
void vlib_worker_thread_init(vlib_worker_thread_t *w)
int vlib_frame_queue_enqueue(vlib_main_t *vm, u32 node_runtime_index, u32 frame_queue_index, vlib_frame_t *frame, vlib_frame_queue_msg_type_t type)
bad routing header type(not 4)") sr_error (NO_MORE_SEGMENTS
vlib_worker_thread_t * vlib_alloc_thread(vlib_main_t *vm)
void vlib_set_thread_name(char *name)
void * thread_function_arg
frame_queue_trace_t * frame_queue_traces
vlib_worker_thread_t * vlib_worker_threads
struct vlib_thread_registration_ * next
static vlib_frame_queue_t * is_vlib_frame_queue_congested(u32 frame_queue_index, u32 index, u32 queue_hi_thresh, vlib_frame_queue_t **handoff_queue_by_worker_index)
static void vlib_smp_unsafe_warning(void)
static void vlib_worker_thread_barrier_check(void)
vlib_thread_registration_t * next
u64 dispatch_node(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_node_type_t type, vlib_node_state_t dispatch_state, vlib_frame_t *frame, u64 last_time_stamp)
#define clib_smp_atomic_add(addr, increment)
vlib_frame_queue_msg_type_t
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
vlib_frame_queue_elt_t * elts
static u32 vlib_get_current_worker_index()
uword os_get_cpu_number(void)
void vlib_worker_thread_barrier_release(vlib_main_t *vm)
static vlib_frame_queue_elt_t * vlib_get_worker_handoff_queue_elt(u32 frame_queue_index, u32 vlib_worker_index, vlib_frame_queue_elt_t **handoff_queue_elt_by_worker_index)
void vlib_worker_thread_node_runtime_update(void)
void vlib_worker_thread_fork_fixup(vlib_fork_fixup_t which)
vlib_frame_queue_t ** vlib_frame_queues
static u32 vlib_get_worker_index(u32 cpu_index)
frame_queue_nelt_counter_t * frame_queue_histogram
vlib_worker_thread_t * worker_threads
static vlib_frame_queue_elt_t * vlib_get_frame_queue_elt(u32 frame_queue_index, u32 index)
int vlib_frame_queue_dequeue(int thread_id, vlib_main_t *vm, vlib_node_main_t *nm)
volatile u32 * wait_at_barrier
vlib_frame_queue_main_t * frame_queue_mains
struct vlib_thread_registration_ vlib_thread_registration_t
uword * thread_registrations_by_name
u32 vlib_frame_queue_main_init(u32 node_index, u32 frame_queue_nelts)
clib_error_t * vlib_thread_init(vlib_main_t *vm)
int no_data_structure_clone
vlib_main_t ** vlib_mains
void vlib_create_worker_threads(vlib_main_t *vm, int n, void(*thread_function)(void *))
static void vlib_put_frame_queue_elt(vlib_frame_queue_elt_t *hf)
static u32 vlib_get_worker_cpu_index(u32 worker_index)
void vlib_worker_thread_barrier_sync(vlib_main_t *vm)
volatile u32 * workers_at_barrier
static u32 vlib_num_workers()
u64 dispatch_pending_node(vlib_main_t *vm, vlib_pending_frame_t *p, u64 last_time_stamp)
#define CLIB_MEMORY_BARRIER()
uword * cpu_socket_bitmap
#define foreach_sched_policy
void( vlib_thread_function_t)(void *arg)
vlib_thread_registration_t ** registrations
vlib_thread_main_t vlib_thread_main
vlib_thread_registration_t * registration
volatile u32 worker_thread_release
static vlib_main_t * vlib_get_worker_vlib_main(u32 worker_index)