/drivers/scsi/fnic/ |
H A D | vnic_wq_copy.h | 36 static inline unsigned int vnic_wq_copy_desc_avail(struct vnic_wq_copy *wq) argument 38 return wq->ring.desc_avail; 41 static inline unsigned int vnic_wq_copy_desc_in_use(struct vnic_wq_copy *wq) argument 43 return wq->ring.desc_count - 1 - wq->ring.desc_avail; 46 static inline void *vnic_wq_copy_next_desc(struct vnic_wq_copy *wq) argument 48 struct fcpio_host_req *desc = wq->ring.descs; 49 return &desc[wq->to_use_index]; 52 static inline void vnic_wq_copy_post(struct vnic_wq_copy *wq) argument 55 ((wq 69 vnic_wq_copy_desc_process(struct vnic_wq_copy *wq, u16 index) argument 83 vnic_wq_copy_service(struct vnic_wq_copy *wq, u16 completed_index, void (*q_service)(struct vnic_wq_copy *wq, struct fcpio_host_req *wq_desc)) argument [all...] |
H A D | vnic_wq.c | 27 static int vnic_wq_alloc_bufs(struct vnic_wq *wq) argument 31 unsigned int i, j, count = wq->ring.desc_count; 34 vdev = wq->vdev; 37 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC); 38 if (!wq->bufs[i]) { 45 buf = wq->bufs[i]; 48 buf->desc = (u8 *)wq->ring.descs + 49 wq->ring.desc_size * buf->index; 51 buf->next = wq->bufs[0]; 54 buf->next = wq 67 vnic_wq_free(struct vnic_wq *wq) argument 85 vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, unsigned int desc_count, unsigned int desc_size) argument 114 vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) argument 131 vnic_wq_error_status(struct vnic_wq *wq) argument 136 vnic_wq_enable(struct vnic_wq *wq) argument 141 vnic_wq_disable(struct vnic_wq *wq) argument 159 vnic_wq_clean(struct vnic_wq *wq, void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)) argument [all...] |
H A D | vnic_wq_copy.c | 25 void vnic_wq_copy_enable(struct vnic_wq_copy *wq) argument 27 iowrite32(1, &wq->ctrl->enable); 30 int vnic_wq_copy_disable(struct vnic_wq_copy *wq) argument 34 iowrite32(0, &wq->ctrl->enable); 38 if (!(ioread32(&wq->ctrl->running))) 45 wq->index, ioread32(&wq->ctrl->fetch_index), 46 ioread32(&wq->ctrl->posted_index)); 51 void vnic_wq_copy_clean(struct vnic_wq_copy *wq, argument 52 void (*q_clean)(struct vnic_wq_copy *wq, 69 vnic_wq_copy_free(struct vnic_wq_copy *wq) argument 78 vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq, unsigned int index, unsigned int desc_count, unsigned int desc_size) argument 102 vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) argument [all...] |
H A D | vnic_wq.h | 96 static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) argument 99 return wq->ring.desc_avail; 102 static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) argument 105 return wq->ring.desc_count - wq->ring.desc_avail - 1; 108 static inline void *vnic_wq_next_desc(struct vnic_wq *wq) argument 110 return wq->to_use->desc; 113 static inline void vnic_wq_post(struct vnic_wq *wq, argument 117 struct vnic_wq_buf *buf = wq->to_use; 132 iowrite32(buf->index, &wq 139 vnic_wq_service(struct vnic_wq *wq, struct cq_desc *cq_desc, u16 completed_index, void (*buf_service)(struct vnic_wq *wq, struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque), void *opaque) argument [all...] |
H A D | fnic_res.h | 30 static inline void fnic_queue_wq_desc(struct vnic_wq *wq, argument 37 struct wq_enet_desc *desc = vnic_wq_next_desc(wq); 51 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop); 54 static inline void fnic_queue_wq_eth_desc(struct vnic_wq *wq, argument 61 struct wq_enet_desc *desc = vnic_wq_next_desc(wq); 76 vnic_wq_post(wq, os_buf, dma_addr, len, 1, 1); 79 static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq, argument 91 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); 121 vnic_wq_copy_post(wq); 124 static inline void fnic_queue_wq_copy_desc_itmf(struct vnic_wq_copy *wq, argument 150 fnic_queue_wq_copy_desc_flogi_reg(struct vnic_wq_copy *wq, u32 req_id, u8 format, u32 s_id, u8 *gw_mac) argument 169 fnic_queue_wq_copy_desc_fip_reg(struct vnic_wq_copy *wq, u32 req_id, u32 s_id, u8 *fcf_mac, u8 *ha_mac, u32 r_a_tov, u32 e_d_tov) argument 193 fnic_queue_wq_copy_desc_fw_reset(struct vnic_wq_copy *wq, u32 req_id) argument 206 fnic_queue_wq_copy_desc_lunmap(struct vnic_wq_copy *wq, u32 req_id, u64 lunmap_addr, u32 lunmap_len) argument [all...] |
H A D | fnic_scsi.c | 136 static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq) argument 146 if (wq->to_clean_index <= fnic->fw_ack_index[0]) 147 wq->ring.desc_avail += (fnic->fw_ack_index[0] 148 - wq->to_clean_index + 1); 150 wq->ring.desc_avail += (wq->ring.desc_count 151 - wq->to_clean_index 159 wq->to_clean_index = 160 (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count; 174 struct vnic_wq_copy *wq local 209 struct vnic_wq_copy *wq = &fnic->wq_copy[0]; local 259 fnic_queue_wq_copy_desc(struct fnic *fnic, struct vnic_wq_copy *wq, struct fnic_io_req *io_req, struct scsi_cmnd *sc, int sg_count) argument 358 struct vnic_wq_copy *wq; local 593 is_ack_index_in_range(struct vnic_wq_copy *wq, u16 request_out) argument 622 struct vnic_wq_copy *wq; local 992 fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, struct fcpio_host_req *desc) argument 1046 struct vnic_wq_copy *wq = &fnic->wq_copy[0]; local 1377 struct vnic_wq_copy *wq = &fnic->wq_copy[0]; local [all...] |
H A D | fnic_fcs.c | 471 struct vnic_wq *wq = &fnic->wq[0]; local 490 if (!vnic_wq_desc_avail(wq)) { 497 fnic_queue_wq_eth_desc(wq, skb, pa, skb->len, 507 struct vnic_wq *wq = &fnic->wq[0]; local 558 if (!vnic_wq_desc_avail(wq)) { 565 fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp), 664 static void fnic_wq_complete_frame_send(struct vnic_wq *wq, argument 670 struct fnic *fnic = vnic_dev_priv(wq 710 fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) argument [all...] |
/drivers/net/ethernet/cisco/enic/ |
H A D | vnic_wq.c | 30 static int vnic_wq_alloc_bufs(struct vnic_wq *wq) argument 34 unsigned int i, j, count = wq->ring.desc_count; 37 vdev = wq->vdev; 40 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC); 41 if (!wq->bufs[i]) 46 buf = wq->bufs[i]; 49 buf->desc = (u8 *)wq->ring.descs + 50 wq->ring.desc_size * buf->index; 52 buf->next = wq->bufs[0]; 55 buf->next = wq 68 vnic_wq_free(struct vnic_wq *wq) argument 87 vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, unsigned int desc_count, unsigned int desc_size) argument 116 vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, unsigned int fetch_index, unsigned int posted_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) argument 139 vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) argument 148 vnic_wq_error_status(struct vnic_wq *wq) argument 153 vnic_wq_enable(struct vnic_wq *wq) argument 158 vnic_wq_disable(struct vnic_wq *wq) argument 176 vnic_wq_clean(struct vnic_wq *wq, void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)) argument [all...] |
H A D | vnic_wq.h | 86 static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) argument 89 return wq->ring.desc_avail; 92 static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) argument 95 return wq->ring.desc_count - wq->ring.desc_avail - 1; 98 static inline void *vnic_wq_next_desc(struct vnic_wq *wq) argument 100 return wq->to_use->desc; 103 static inline void vnic_wq_post(struct vnic_wq *wq, argument 107 struct vnic_wq_buf *buf = wq->to_use; 122 iowrite32(buf->index, &wq 129 vnic_wq_service(struct vnic_wq *wq, struct cq_desc *cq_desc, u16 completed_index, void (*buf_service)(struct vnic_wq *wq, struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque), void *opaque) argument [all...] |
H A D | enic_res.h | 43 static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq, argument 49 struct wq_enet_desc *desc = vnic_wq_next_desc(wq); 62 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop); 65 static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq, argument 69 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, 74 static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf, argument 78 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, 84 static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq, argument 89 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, 96 static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq, argument 107 enic_queue_wq_desc_tso(struct vnic_wq *wq, void *os_buf, dma_addr_t dma_addr, unsigned int len, unsigned int mss, unsigned int hdr_len, int vlan_tag_insert, unsigned int vlan_tag, int eop, int loopback) argument [all...] |
/drivers/infiniband/hw/cxgb4/ |
H A D | t4.h | 340 static inline int t4_rqes_posted(struct t4_wq *wq) argument 342 return wq->rq.in_use; 345 static inline int t4_rq_empty(struct t4_wq *wq) argument 347 return wq->rq.in_use == 0; 350 static inline int t4_rq_full(struct t4_wq *wq) argument 352 return wq->rq.in_use == (wq->rq.size - 1); 355 static inline u32 t4_rq_avail(struct t4_wq *wq) argument 357 return wq->rq.size - 1 - wq 360 t4_rq_produce(struct t4_wq *wq, u8 len16) argument 370 t4_rq_consume(struct t4_wq *wq) argument 383 t4_sq_empty(struct t4_wq *wq) argument 388 t4_sq_full(struct t4_wq *wq) argument 393 t4_sq_avail(struct t4_wq *wq) argument 398 t4_sq_produce(struct t4_wq *wq, u8 len16) argument 408 t4_sq_consume(struct t4_wq *wq) argument 415 t4_ring_sq_db(struct t4_wq *wq, u16 inc) argument 421 t4_ring_rq_db(struct t4_wq *wq, u16 inc) argument 427 t4_wq_in_error(struct t4_wq *wq) argument 432 t4_set_wq_in_error(struct t4_wq *wq) argument 437 t4_disable_wq_db(struct t4_wq *wq) argument 442 t4_enable_wq_db(struct t4_wq *wq) argument 447 t4_wq_db_enabled(struct t4_wq *wq) argument [all...] |
H A D | cq.c | 177 static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq) argument 181 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__, 182 wq, cq, cq->sw_cidx, cq->sw_pidx); 188 V_CQE_QPID(wq->sq.qid)); 194 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count) argument 197 int in_use = wq->rq.in_use - count; 200 PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__, 201 wq, cq, wq->rq.in_use, count); 203 insert_recv_cqe(wq, c 209 insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq, struct t4_swsqe *swcqe) argument 228 c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count) argument 268 cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) argument 284 c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count) argument 303 c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count) argument 322 flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq) argument 353 create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe, struct t4_cqe *read_cqe) argument 368 advance_oldest_read(struct t4_wq *wq) argument 402 poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, u8 *cqe_flushed, u64 *cookie, u32 *credit) argument 569 struct t4_wq *wq; local [all...] |
H A D | qp.c | 94 static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, argument 102 wq->rq.memsize, wq->rq.queue, 103 dma_unmap_addr(&wq->rq, mapping)); 104 dealloc_sq(rdev, &wq->sq); 105 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); 106 kfree(wq->rq.sw_rq); 107 kfree(wq->sq.sw_sq); 108 c4iw_put_qpid(rdev, wq 113 create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, struct t4_cq *rcq, struct t4_cq *scq, struct c4iw_dev_ucontext *uctx) argument [all...] |
/drivers/infiniband/hw/ipath/ |
H A D | ipath_srq.c | 52 struct ipath_rwq *wq; local 68 wq = srq->rq.wq; 69 next = wq->head + 1; 72 if (next == wq->tail) { 79 wqe = get_rwqe_ptr(&srq->rq, wq->head); 86 wq->head = next; 139 srq->rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + srq->rq.size * sz); 140 if (!srq->rq.wq) { 156 srq->rq.wq); 220 struct ipath_rwq *wq; local [all...] |
H A D | ipath_qp.c | 360 if (qp->r_rq.wq) { 361 qp->r_rq.wq->head = 0; 362 qp->r_rq.wq->tail = 0; 410 if (qp->r_rq.wq) { 411 struct ipath_rwq *wq; local 418 wq = qp->r_rq.wq; 419 head = wq->head; 422 tail = wq->tail; 431 wq 690 struct ipath_rwq *wq = qp->r_rq.wq; local [all...] |
/drivers/infiniband/hw/qib/ |
H A D | qib_srq.c | 52 struct qib_rwq *wq; local 68 wq = srq->rq.wq; 69 next = wq->head + 1; 72 if (next == wq->tail) { 79 wqe = get_rwqe_ptr(&srq->rq, wq->head); 86 wq->head = next; 136 srq->rq.wq = vmalloc_user(sizeof(struct qib_rwq) + srq->rq.size * sz); 137 if (!srq->rq.wq) { 152 srq->rq.wq); 216 struct qib_rwq *wq; local [all...] |
/drivers/infiniband/hw/cxgb3/ |
H A D | cxio_hal.c | 275 struct t3_wq *wq, struct cxio_ucontext *uctx) 277 int depth = 1UL << wq->size_log2; 278 int rqsize = 1UL << wq->rq_size_log2; 280 wq->qpid = get_qpid(rdev_p, uctx); 281 if (!wq->qpid) 284 wq->rq = kzalloc(depth * sizeof(struct t3_swrq), GFP_KERNEL); 285 if (!wq->rq) 288 wq->rq_addr = cxio_hal_rqtpool_alloc(rdev_p, rqsize); 289 if (!wq->rq_addr) 292 wq 274 cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain, struct t3_wq *wq, struct cxio_ucontext *uctx) argument 336 cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq, struct cxio_ucontext *uctx) argument 350 insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq) argument 368 cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count) argument 386 insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq, struct t3_swsq *sqp) argument 407 cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count) argument 446 cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq) argument 464 cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count) argument 482 cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count) argument 1075 flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq) argument 1104 create_read_req_cqe(struct t3_wq *wq, struct t3_cqe *hw_cqe, struct t3_cqe *read_cqe) argument 1118 advance_oldest_read(struct t3_wq *wq) argument 1149 cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe, u8 *cqe_flushed, u64 *cookie, u32 *credit) argument [all...] |
H A D | iwch_qp.c | 150 u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq) 174 wqe = (union t3_wr *)(wq->queue + 175 Q_PTR2IDX((wq->wptr+1), wq->size_log2)); 177 Q_GENBIT(wq->wptr + 1, wq->size_log2), 280 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr, 281 qhp->wq.rq_size_log2)].wr_id = wr->wr_id; 282 qhp->wq 149 build_fastreg(union t3_wr *wqe, struct ib_send_wr *wr, u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq) argument [all...] |
H A D | iwch_cq.c | 49 struct t3_wq *wq; local 62 wq = NULL; 65 wq = &(qhp->wq); 67 ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, 190 if (wq)
|
H A D | cxio_hal.h | 165 int cxio_create_qp(struct cxio_rdev *rdev, u32 kernel_domain, struct t3_wq *wq, 167 int cxio_destroy_qp(struct cxio_rdev *rdev, struct t3_wq *wq, 190 int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count); 191 int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count); 192 void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count); 193 void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count); 195 int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
|
/drivers/i2c/busses/ |
H A D | i2c-taos-evm.c | 42 static DECLARE_WAIT_QUEUE_HEAD(wq); 116 wait_event_interruptible_timeout(wq, taos->state == TAOS_STATE_IDLE, 167 wake_up_interruptible(&wq); 172 wake_up_interruptible(&wq); 179 wake_up_interruptible(&wq); 232 wait_event_interruptible_timeout(wq, taos->state == TAOS_STATE_IDLE, 254 wait_event_interruptible_timeout(wq, taos->state == TAOS_STATE_IDLE,
|
/drivers/mtd/chips/ |
H A D | cfi_cmdset_0020.c | 160 init_waitqueue_head(&(cfi->chips[i].wq)); 300 wake_up(&chip->wq); 355 add_wait_queue(&chip->wq, &wait); 358 remove_wait_queue(&chip->wq, &wait); 380 wake_up(&chip->wq); 488 add_wait_queue(&chip->wq, &wait); 491 remove_wait_queue(&chip->wq, &wait); 545 add_wait_queue(&chip->wq, &wait); 548 remove_wait_queue(&chip->wq, &wait); 598 wake_up(&chip->wq); [all...] |
/drivers/media/dvb/ddbridge/ |
H A D | ddbridge.h | 85 wait_queue_head_t wq; member in struct:ddb_input 113 wait_queue_head_t wq; member in struct:ddb_output 132 wait_queue_head_t wq; member in struct:ddb_i2c
|
/drivers/usb/misc/ |
H A D | appledisplay.c | 86 static struct workqueue_struct *wq; variable in typeref:struct:workqueue_struct 122 queue_delayed_work(wq, &pdata->work, 0); 360 wq = create_singlethread_workqueue("appledisplay"); 361 if (!wq) { 371 flush_workqueue(wq); 372 destroy_workqueue(wq);
|
/drivers/message/i2o/ |
H A D | exec-osm.c | 49 wait_queue_head_t *wq; /* Pointer to Wait queue */ member in struct:i2o_exec_wait 125 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); 152 wait->wq = &wq; 165 wait_event_interruptible_timeout(wq, wait->complete, timeout * HZ); 169 wait->wq = NULL; 240 if (wait->wq) 257 wake_up_interruptible(wait->wq);
|