/drivers/net/wireless/ath/ar5523/ |
H A D | ar5523.h | 94 struct workqueue_struct *wq; member in struct:ar5523
|
/drivers/scsi/esas2r/ |
H A D | esas2r_init.c | 569 struct workqueue_struct *wq; local 601 wq = a->fw_event_q; 604 if (wq) 605 destroy_workqueue(wq);
|
/drivers/target/tcm_fc/ |
H A D | tfc_conf.c | 303 struct workqueue_struct *wq; local 334 wq = alloc_workqueue("tcm_fc", 0, 1); 335 if (!wq) { 343 destroy_workqueue(wq); 347 tpg->workqueue = wq;
|
/drivers/thunderbolt/ |
H A D | tb.h | 106 struct workqueue_struct *wq; /* ordered workqueue for plug events */ member in struct:tb 113 * wq after cfg has been paused.
|
/drivers/video/fbdev/msm/ |
H A D | mdp.c | 152 static int mdp_wait(struct mdp_info *mdp, uint32_t mask, wait_queue_head_t *wq) argument 157 wait_event_timeout(*wq, !mdp_check_mask(mask), HZ);
|
/drivers/gpu/drm/i915/ |
H A D | intel_sprite.c | 56 wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base); local 85 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); 104 finish_wait(wq, &wait);
|
/drivers/infiniband/core/ |
H A D | mad_priv.h | 208 struct workqueue_struct *wq; member in struct:ib_mad_port_private
|
/drivers/infiniband/hw/qib/ |
H A D | qib_qp.c | 410 if (qp->r_rq.wq) { 411 qp->r_rq.wq->head = 0; 412 qp->r_rq.wq->tail = 0; 528 if (qp->r_rq.wq) { 529 struct qib_rwq *wq; local 536 wq = qp->r_rq.wq; 537 head = wq->head; 540 tail = wq->tail; 549 wq 923 struct qib_rwq *wq = qp->r_rq.wq; local [all...] |
/drivers/iommu/ |
H A D | amd_iommu_v2.c | 59 wait_queue_head_t wq; /* To wait for count == 0 */ member in struct:pasid_state 74 wait_queue_head_t wq; member in struct:device_state 158 wake_up(&dev_state->wq); 165 prepare_to_wait(&dev_state->wq, &wait, TASK_UNINTERRUPTIBLE); 168 finish_wait(&dev_state->wq, &wait); 284 wake_up(&pasid_state->wq); 292 prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE); 299 finish_wait(&pasid_state->wq, &wait); 662 init_waitqueue_head(&pasid_state->wq); 786 init_waitqueue_head(&dev_state->wq); [all...] |
/drivers/net/ethernet/cisco/enic/ |
H A D | enic.h | 167 ____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX]; member in struct:enic 202 static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq) argument 204 return enic->rq_count + wq; 229 unsigned int wq) 231 return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset; 228 enic_msix_wq_intr(struct enic *enic, unsigned int wq) argument
|
/drivers/net/xen-netback/ |
H A D | common.h | 172 wait_queue_head_t wq; member in struct:xenvif_queue
|
/drivers/scsi/bfa/ |
H A D | bfad_im.c | 161 wait_queue_head_t *wq; local 165 wq = (wait_queue_head_t *) cmnd->SCp.ptr; 168 if (wq) 169 wake_up(wq); 298 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); 326 cmnd->SCp.ptr = (char *)&wq; 334 wait_event(wq, test_bit(IO_DONE_BIT, 361 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); 368 cmnd->SCp.ptr = (char *)&wq; 377 wait_event(wq, test_bi [all...] |
/drivers/scsi/fnic/ |
H A D | fnic_scsi.c | 143 static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq) argument 153 if (wq->to_clean_index <= fnic->fw_ack_index[0]) 154 wq->ring.desc_avail += (fnic->fw_ack_index[0] 155 - wq->to_clean_index + 1); 157 wq->ring.desc_avail += (wq->ring.desc_count 158 - wq->to_clean_index 166 wq->to_clean_index = 167 (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count; 208 struct vnic_wq_copy *wq local 261 struct vnic_wq_copy *wq = &fnic->wq_copy[0]; local 317 fnic_queue_wq_copy_desc(struct fnic *fnic, struct vnic_wq_copy *wq, struct fnic_io_req *io_req, struct scsi_cmnd *sc, int sg_count) argument 427 struct vnic_wq_copy *wq; local 707 is_ack_index_in_range(struct vnic_wq_copy *wq, u16 request_out) argument 736 struct vnic_wq_copy *wq; local 1340 fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, struct fcpio_host_req *desc) argument 1405 struct vnic_wq_copy *wq = &fnic->wq_copy[0]; local 1913 struct vnic_wq_copy *wq = &fnic->wq_copy[0]; local [all...] |
/drivers/usb/chipidea/ |
H A D | ci.h | 146 * @wq: workqueue thread 183 struct workqueue_struct *wq; member in struct:ci_hdrc
|
/drivers/block/xen-blkback/ |
H A D | common.h | 277 wait_queue_head_t wq; member in struct:xen_blkif
|
/drivers/infiniband/hw/cxgb3/ |
H A D | cxio_hal.c | 275 struct t3_wq *wq, struct cxio_ucontext *uctx) 277 int depth = 1UL << wq->size_log2; 278 int rqsize = 1UL << wq->rq_size_log2; 280 wq->qpid = get_qpid(rdev_p, uctx); 281 if (!wq->qpid) 284 wq->rq = kzalloc(depth * sizeof(struct t3_swrq), GFP_KERNEL); 285 if (!wq->rq) 288 wq->rq_addr = cxio_hal_rqtpool_alloc(rdev_p, rqsize); 289 if (!wq->rq_addr) 292 wq 274 cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain, struct t3_wq *wq, struct cxio_ucontext *uctx) argument 336 cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq, struct cxio_ucontext *uctx) argument 350 insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq) argument 368 cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count) argument 386 insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq, struct t3_swsq *sqp) argument 407 cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count) argument 446 cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq) argument 464 cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count) argument 482 cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count) argument 1073 flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq) argument 1102 create_read_req_cqe(struct t3_wq *wq, struct t3_cqe *hw_cqe, struct t3_cqe *read_cqe) argument 1116 advance_oldest_read(struct t3_wq *wq) argument 1147 cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe, u8 *cqe_flushed, u64 *cookie, u32 *credit) argument [all...] |
H A D | iwch_provider.h | 164 struct t3_wq wq; member in struct:iwch_qp
|
/drivers/infiniband/hw/cxgb4/ |
H A D | device.c | 116 void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe) argument 121 if (!wq->rdev->wr_log) 124 idx = (atomic_inc_return(&wq->rdev->wr_log_idx) - 1) & 125 (wq->rdev->wr_log_size - 1); 126 le.poll_sge_ts = cxgb4_read_sge_timestamp(wq->rdev->lldi.ports[0]); 131 le.qid = wq->sq.qid; 133 le.post_host_ts = wq->sq.sw_sq[wq->sq.cidx].host_ts; 134 le.post_sge_ts = wq->sq.sw_sq[wq [all...] |
H A D | qp.c | 149 static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, argument 157 wq->rq.memsize, wq->rq.queue, 158 dma_unmap_addr(&wq->rq, mapping)); 159 dealloc_sq(rdev, &wq->sq); 160 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); 161 kfree(wq->rq.sw_rq); 162 kfree(wq->sq.sw_sq); 163 c4iw_put_qpid(rdev, wq 168 create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, struct t4_cq *rcq, struct t4_cq *scq, struct c4iw_dev_ucontext *uctx) argument [all...] |
/drivers/infiniband/hw/mlx5/ |
H A D | cq.c | 102 static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx) argument 104 switch (wq->wr_data[idx]) { 121 struct mlx5_ib_wq *wq, int idx) 160 wc->opcode = get_umr_comp(wq, idx); 175 struct mlx5_ib_wq *wq; local 197 wq = &qp->rq; 198 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 199 ++wq 120 handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, struct mlx5_ib_wq *wq, int idx) argument 410 struct mlx5_ib_wq *wq; local [all...] |
/drivers/infiniband/hw/mthca/ |
H A D | mthca_cq.c | 489 struct mthca_wq *wq; local 539 wq = &(*cur_qp)->sq; 541 >> wq->wqe_shift); 547 wq = NULL; 553 wq = &(*cur_qp)->rq; 555 wqe_index = wqe >> wq->wqe_shift; 562 wqe_index = wq->max - 1; 566 if (wq) { 567 if (wq->last_comp < wqe_index) 568 wq [all...] |
/drivers/media/usb/au0828/ |
H A D | au0828.h | 191 wait_queue_head_t wq; member in struct:au0828_dmaqueue
|
/drivers/media/usb/gspca/ |
H A D | gspca.h | 195 wait_queue_head_t wq; /* wait queue */ member in struct:gspca_dev
|
/drivers/net/ethernet/qlogic/qlcnic/ |
H A D | qlcnic_sriov_pf.c | 421 struct workqueue_struct *wq; local 423 wq = create_singlethread_workqueue("qlcnic-flr"); 424 if (wq == NULL) { 429 bc->bc_flr_wq = wq;
|
/drivers/net/ |
H A D | macvtap.c | 38 struct socket_wq wq; member in struct:macvtap_queue 469 RCU_INIT_POINTER(q->sock.wq, &q->wq); 470 init_waitqueue_head(&q->wq.wait); 519 poll_wait(file, &q->wq.wait, wait);
|