Searched defs:wq (Results 1 - 4 of 4) sorted by relevance

/kernel/sched/
H A Dwait.c321 __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q, argument
327 prepare_to_wait(wq, &q->wait, mode);
331 finish_wait(wq, &q->wait);
339 wait_queue_head_t *wq = bit_waitqueue(word, bit); local
342 return __wait_on_bit(wq, &wait, action, mode);
350 wait_queue_head_t *wq = bit_waitqueue(word, bit); local
354 return __wait_on_bit(wq, &wait, action, mode);
359 __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q, argument
365 prepare_to_wait_exclusive(wq, &q->wait, mode);
371 abort_exclusive_wait(wq,
382 wait_queue_head_t *wq = bit_waitqueue(word, bit); local
389 __wake_up_bit(wait_queue_head_t *wq, void *word, int bit) argument
465 __wait_on_atomic_t(wait_queue_head_t *wq, struct wait_bit_queue *q, int (*action)(atomic_t *), unsigned mode) argument
496 wait_queue_head_t *wq = atomic_t_waitqueue(p); local
[all...]
/kernel/
H A Dpadata.c143 queue_work_on(target_cpu, pinst->wq, &queue->work);
266 queue_work_on(cb_cpu, pinst->wq, &squeue->work);
1025 * @wq: workqueue to use for the allocated padata instance
1027 struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq) argument
1029 return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask);
1037 * @wq: workqueue to use for the allocated padata instance
1041 struct padata_instance *padata_alloc(struct workqueue_struct *wq, argument
1069 pinst->wq = wq;
H A Dworkqueue.c130 * WQ: wq->mutex protected.
132 * WR: wq->mutex protected for writes. Sched-RCU protected for reads.
194 struct workqueue_struct *wq; /* I: the owning workqueue */ member in struct:pool_workqueue
203 struct list_head pwqs_node; /* WR: node on wq->pwqs */
204 struct list_head mayday_node; /* MD: node on wq->maydays */
210 * determined without grabbing wq->mutex.
232 struct list_head pwqs; /* WR: all pwqs of this wq */
235 struct mutex mutex; /* protects this wq */
289 static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
336 #define assert_rcu_or_wq_mutex(wq) \
551 unbound_pwq_by_node(struct workqueue_struct *wq, int node) argument
1269 is_chained_work(struct workqueue_struct *wq) argument
1281 __queue_work(int cpu, struct workqueue_struct *wq, struct work_struct *work) argument
1393 queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) argument
1420 __queue_delayed_work(int cpu, struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) argument
1465 queue_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) argument
1503 mod_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) argument
1782 struct workqueue_struct *wq = pwq->wq; local
2204 struct workqueue_struct *wq = rescuer->rescue_wq; local
2397 flush_workqueue_prep_pwqs(struct workqueue_struct *wq, int flush_color, int work_color) argument
2444 flush_workqueue(struct workqueue_struct *wq) argument
2601 drain_workqueue(struct workqueue_struct *wq) argument
2951 struct workqueue_struct *wq; member in struct:wq_device
2965 struct workqueue_struct *wq = dev_to_wq(dev); local
2974 struct workqueue_struct *wq = dev_to_wq(dev); local
2983 struct workqueue_struct *wq = dev_to_wq(dev); local
3004 struct workqueue_struct *wq = dev_to_wq(dev); local
3024 struct workqueue_struct *wq = dev_to_wq(dev); local
3035 wq_sysfs_prep_attrs(struct workqueue_struct *wq) argument
3052 struct workqueue_struct *wq = dev_to_wq(dev); local
3073 struct workqueue_struct *wq = dev_to_wq(dev); local
3088 struct workqueue_struct *wq = dev_to_wq(dev); local
3107 struct workqueue_struct *wq = dev_to_wq(dev); local
3121 struct workqueue_struct *wq = dev_to_wq(dev); local
3180 workqueue_sysfs_register(struct workqueue_struct *wq) argument
3239 workqueue_sysfs_unregister(struct workqueue_struct *wq) argument
3250 workqueue_sysfs_unregister(struct workqueue_struct *wq) argument
3528 struct workqueue_struct *wq = pwq->wq; local
3566 struct workqueue_struct *wq = pwq->wq; local
3603 init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, struct worker_pool *pool) argument
3623 struct workqueue_struct *wq = pwq->wq; local
3642 alloc_unbound_pwq(struct workqueue_struct *wq, const struct workqueue_attrs *attrs) argument
3721 numa_pwq_tbl_install(struct workqueue_struct *wq, int node, struct pool_workqueue *pwq) argument
3753 apply_workqueue_attrs(struct workqueue_struct *wq, const struct workqueue_attrs *attrs) argument
3879 wq_update_unbound_numa(struct workqueue_struct *wq, int cpu, bool online) argument
3952 alloc_and_link_pwqs(struct workqueue_struct *wq) argument
4007 struct workqueue_struct *wq; local
4112 destroy_workqueue(struct workqueue_struct *wq) argument
4197 workqueue_set_max_active(struct workqueue_struct *wq, int max_active) argument
4251 workqueue_congested(int cpu, struct workqueue_struct *wq) argument
4349 struct workqueue_struct *wq = NULL; local
4559 struct workqueue_struct *wq; local
4607 struct workqueue_struct *wq; local
4683 struct workqueue_struct *wq; local
4717 struct workqueue_struct *wq; local
4758 struct workqueue_struct *wq; local
[all...]
/kernel/events/
H A Duprobes.c108 wait_queue_head_t wq; /* if all slots are busy */ member in struct:xol_area
1183 init_waitqueue_head(&area->wq);
1273 wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE));
1336 if (waitqueue_active(&area->wq))
1337 wake_up(&area->wq);

Completed in 58 milliseconds