Searched refs:wq (Results 1 - 25 of 265) sorted by relevance

1234567891011

/drivers/scsi/fnic/
H A Dvnic_wq_copy.h36 static inline unsigned int vnic_wq_copy_desc_avail(struct vnic_wq_copy *wq) argument
38 return wq->ring.desc_avail;
41 static inline unsigned int vnic_wq_copy_desc_in_use(struct vnic_wq_copy *wq) argument
43 return wq->ring.desc_count - 1 - wq->ring.desc_avail;
46 static inline void *vnic_wq_copy_next_desc(struct vnic_wq_copy *wq) argument
48 struct fcpio_host_req *desc = wq->ring.descs;
49 return &desc[wq->to_use_index];
52 static inline void vnic_wq_copy_post(struct vnic_wq_copy *wq) argument
55 ((wq
69 vnic_wq_copy_desc_process(struct vnic_wq_copy *wq, u16 index) argument
83 vnic_wq_copy_service(struct vnic_wq_copy *wq, u16 completed_index, void (*q_service)(struct vnic_wq_copy *wq, struct fcpio_host_req *wq_desc)) argument
[all...]
H A Dvnic_wq.c27 static int vnic_wq_alloc_bufs(struct vnic_wq *wq) argument
31 unsigned int i, j, count = wq->ring.desc_count;
34 vdev = wq->vdev;
37 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC);
38 if (!wq->bufs[i]) {
45 buf = wq->bufs[i];
48 buf->desc = (u8 *)wq->ring.descs +
49 wq->ring.desc_size * buf->index;
51 buf->next = wq->bufs[0];
54 buf->next = wq
67 vnic_wq_free(struct vnic_wq *wq) argument
85 vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, unsigned int desc_count, unsigned int desc_size) argument
114 vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) argument
131 vnic_wq_error_status(struct vnic_wq *wq) argument
136 vnic_wq_enable(struct vnic_wq *wq) argument
141 vnic_wq_disable(struct vnic_wq *wq) argument
159 vnic_wq_clean(struct vnic_wq *wq, void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)) argument
[all...]
H A Dvnic_wq_copy.c25 void vnic_wq_copy_enable(struct vnic_wq_copy *wq) argument
27 iowrite32(1, &wq->ctrl->enable);
30 int vnic_wq_copy_disable(struct vnic_wq_copy *wq) argument
34 iowrite32(0, &wq->ctrl->enable);
38 if (!(ioread32(&wq->ctrl->running)))
45 wq->index, ioread32(&wq->ctrl->fetch_index),
46 ioread32(&wq->ctrl->posted_index));
51 void vnic_wq_copy_clean(struct vnic_wq_copy *wq, argument
52 void (*q_clean)(struct vnic_wq_copy *wq,
69 vnic_wq_copy_free(struct vnic_wq_copy *wq) argument
78 vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq, unsigned int index, unsigned int desc_count, unsigned int desc_size) argument
102 vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) argument
[all...]
H A Dvnic_wq.h96 static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) argument
99 return wq->ring.desc_avail;
102 static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) argument
105 return wq->ring.desc_count - wq->ring.desc_avail - 1;
108 static inline void *vnic_wq_next_desc(struct vnic_wq *wq) argument
110 return wq->to_use->desc;
113 static inline void vnic_wq_post(struct vnic_wq *wq, argument
117 struct vnic_wq_buf *buf = wq->to_use;
132 iowrite32(buf->index, &wq
139 vnic_wq_service(struct vnic_wq *wq, struct cq_desc *cq_desc, u16 completed_index, void (*buf_service)(struct vnic_wq *wq, struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque), void *opaque) argument
[all...]
H A Dfnic_res.h30 static inline void fnic_queue_wq_desc(struct vnic_wq *wq, argument
37 struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
51 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop);
54 static inline void fnic_queue_wq_eth_desc(struct vnic_wq *wq, argument
61 struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
76 vnic_wq_post(wq, os_buf, dma_addr, len, 1, 1);
79 static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq, argument
91 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq);
121 vnic_wq_copy_post(wq);
124 static inline void fnic_queue_wq_copy_desc_itmf(struct vnic_wq_copy *wq, argument
150 fnic_queue_wq_copy_desc_flogi_reg(struct vnic_wq_copy *wq, u32 req_id, u8 format, u32 s_id, u8 *gw_mac) argument
169 fnic_queue_wq_copy_desc_fip_reg(struct vnic_wq_copy *wq, u32 req_id, u32 s_id, u8 *fcf_mac, u8 *ha_mac, u32 r_a_tov, u32 e_d_tov) argument
193 fnic_queue_wq_copy_desc_fw_reset(struct vnic_wq_copy *wq, u32 req_id) argument
206 fnic_queue_wq_copy_desc_lunmap(struct vnic_wq_copy *wq, u32 req_id, u64 lunmap_addr, u32 lunmap_len) argument
[all...]
/drivers/net/ethernet/cisco/enic/
H A Dvnic_wq.c30 static int vnic_wq_alloc_bufs(struct vnic_wq *wq) argument
33 unsigned int i, j, count = wq->ring.desc_count;
37 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC);
38 if (!wq->bufs[i])
43 buf = wq->bufs[i];
46 buf->desc = (u8 *)wq->ring.descs +
47 wq->ring.desc_size * buf->index;
49 buf->next = wq->bufs[0];
52 buf->next = wq->bufs[i + 1];
60 wq
65 vnic_wq_free(struct vnic_wq *wq) argument
84 vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, unsigned int desc_count, unsigned int desc_size) argument
113 vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, unsigned int fetch_index, unsigned int posted_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) argument
136 vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) argument
145 vnic_wq_error_status(struct vnic_wq *wq) argument
150 vnic_wq_enable(struct vnic_wq *wq) argument
155 vnic_wq_disable(struct vnic_wq *wq) argument
173 vnic_wq_clean(struct vnic_wq *wq, void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)) argument
[all...]
H A Dvnic_wq.h90 static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) argument
93 return wq->ring.desc_avail;
96 static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) argument
99 return wq->ring.desc_count - wq->ring.desc_avail - 1;
102 static inline void *vnic_wq_next_desc(struct vnic_wq *wq) argument
104 return wq->to_use->desc;
107 static inline void vnic_wq_post(struct vnic_wq *wq, argument
113 struct vnic_wq_buf *buf = wq->to_use;
132 iowrite32(buf->index, &wq
139 vnic_wq_service(struct vnic_wq *wq, struct cq_desc *cq_desc, u16 completed_index, void (*buf_service)(struct vnic_wq *wq, struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque), void *opaque) argument
[all...]
H A Denic_res.h43 static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq, argument
49 struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
65 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop, desc_skip_cnt,
69 static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq, argument
73 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
78 static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf, argument
82 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
88 static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq, argument
93 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
100 static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq, argument
111 enic_queue_wq_desc_tso(struct vnic_wq *wq, void *os_buf, dma_addr_t dma_addr, unsigned int len, unsigned int mss, unsigned int hdr_len, int vlan_tag_insert, unsigned int vlan_tag, int eop, int loopback) argument
[all...]
H A Denic.h167 ____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX]; member in struct:enic
202 static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq) argument
204 return enic->rq_count + wq;
229 unsigned int wq)
231 return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset;
228 enic_msix_wq_intr(struct enic *enic, unsigned int wq) argument
/drivers/infiniband/hw/cxgb4/
H A Dt4.h340 static inline int t4_rqes_posted(struct t4_wq *wq) argument
342 return wq->rq.in_use;
345 static inline int t4_rq_empty(struct t4_wq *wq) argument
347 return wq->rq.in_use == 0;
350 static inline int t4_rq_full(struct t4_wq *wq) argument
352 return wq->rq.in_use == (wq->rq.size - 1);
355 static inline u32 t4_rq_avail(struct t4_wq *wq) argument
357 return wq->rq.size - 1 - wq
360 t4_rq_produce(struct t4_wq *wq, u8 len16) argument
370 t4_rq_consume(struct t4_wq *wq) argument
378 t4_rq_host_wq_pidx(struct t4_wq *wq) argument
383 t4_rq_wq_size(struct t4_wq *wq) argument
393 t4_sq_empty(struct t4_wq *wq) argument
398 t4_sq_full(struct t4_wq *wq) argument
403 t4_sq_avail(struct t4_wq *wq) argument
408 t4_sq_produce(struct t4_wq *wq, u8 len16) argument
418 t4_sq_consume(struct t4_wq *wq) argument
428 t4_sq_host_wq_pidx(struct t4_wq *wq) argument
433 t4_sq_wq_size(struct t4_wq *wq) argument
454 t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t5, union t4_wr *wqe) argument
478 t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5, union t4_recv_wr *wqe) argument
502 t4_wq_in_error(struct t4_wq *wq) argument
507 t4_set_wq_in_error(struct t4_wq *wq) argument
512 t4_disable_wq_db(struct t4_wq *wq) argument
517 t4_enable_wq_db(struct t4_wq *wq) argument
522 t4_wq_db_enabled(struct t4_wq *wq) argument
[all...]
H A Dcq.c178 static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq) argument
182 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
183 wq, cq, cq->sw_cidx, cq->sw_pidx);
189 V_CQE_QPID(wq->sq.qid));
195 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count) argument
198 int in_use = wq->rq.in_use - count;
201 PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
202 wq, cq, wq->rq.in_use, count);
204 insert_recv_cqe(wq, c
210 insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq, struct t4_swsqe *swcqe) argument
234 struct t4_wq *wq = &qhp->wq; local
263 flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq) argument
299 create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe, struct t4_cqe *read_cqe) argument
311 advance_oldest_read(struct t4_wq *wq) argument
412 cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) argument
428 c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count) argument
463 poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, u8 *cqe_flushed, u64 *cookie, u32 *credit) argument
681 struct t4_wq *wq; local
[all...]
H A Dqp.c149 static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, argument
157 wq->rq.memsize, wq->rq.queue,
158 dma_unmap_addr(&wq->rq, mapping));
159 dealloc_sq(rdev, &wq->sq);
160 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
161 kfree(wq->rq.sw_rq);
162 kfree(wq->sq.sw_sq);
163 c4iw_put_qpid(rdev, wq
168 create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, struct t4_cq *rcq, struct t4_cq *scq, struct c4iw_dev_ucontext *uctx) argument
[all...]
/drivers/infiniband/hw/ipath/
H A Dipath_srq.c52 struct ipath_rwq *wq; local
68 wq = srq->rq.wq;
69 next = wq->head + 1;
72 if (next == wq->tail) {
79 wqe = get_rwqe_ptr(&srq->rq, wq->head);
86 wq->head = next;
139 srq->rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + srq->rq.size * sz);
140 if (!srq->rq.wq) {
156 srq->rq.wq);
220 struct ipath_rwq *wq; local
[all...]
/drivers/infiniband/hw/qib/
H A Dqib_srq.c52 struct qib_rwq *wq; local
68 wq = srq->rq.wq;
69 next = wq->head + 1;
72 if (next == wq->tail) {
79 wqe = get_rwqe_ptr(&srq->rq, wq->head);
86 wq->head = next;
136 srq->rq.wq = vmalloc_user(sizeof(struct qib_rwq) + srq->rq.size * sz);
137 if (!srq->rq.wq) {
152 srq->rq.wq);
216 struct qib_rwq *wq; local
[all...]
/drivers/infiniband/hw/cxgb3/
H A Dcxio_hal.c275 struct t3_wq *wq, struct cxio_ucontext *uctx)
277 int depth = 1UL << wq->size_log2;
278 int rqsize = 1UL << wq->rq_size_log2;
280 wq->qpid = get_qpid(rdev_p, uctx);
281 if (!wq->qpid)
284 wq->rq = kzalloc(depth * sizeof(struct t3_swrq), GFP_KERNEL);
285 if (!wq->rq)
288 wq->rq_addr = cxio_hal_rqtpool_alloc(rdev_p, rqsize);
289 if (!wq->rq_addr)
292 wq
274 cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain, struct t3_wq *wq, struct cxio_ucontext *uctx) argument
336 cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq, struct cxio_ucontext *uctx) argument
350 insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq) argument
368 cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count) argument
386 insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq, struct t3_swsq *sqp) argument
407 cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count) argument
446 cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq) argument
464 cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count) argument
482 cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count) argument
1073 flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq) argument
1102 create_read_req_cqe(struct t3_wq *wq, struct t3_cqe *hw_cqe, struct t3_cqe *read_cqe) argument
1116 advance_oldest_read(struct t3_wq *wq) argument
1147 cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe, u8 *cqe_flushed, u64 *cookie, u32 *credit) argument
[all...]
H A Diwch_qp.c150 u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq)
174 wqe = (union t3_wr *)(wq->queue +
175 Q_PTR2IDX((wq->wptr+1), wq->size_log2));
177 Q_GENBIT(wq->wptr + 1, wq->size_log2),
280 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
281 qhp->wq.rq_size_log2)].wr_id = wr->wr_id;
282 qhp->wq
149 build_fastreg(union t3_wr *wqe, struct ib_send_wr *wr, u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq) argument
[all...]
H A Diwch_cq.c49 struct t3_wq *wq; local
62 wq = NULL;
65 wq = &(qhp->wq);
67 ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie,
190 if (wq)
/drivers/md/bcache/
H A Dclosure.h151 struct workqueue_struct *wq; member in struct:closure::__anon1970::__anon1971
239 struct workqueue_struct *wq)
244 cl->wq = wq;
251 struct workqueue_struct *wq = cl->wq; local
252 if (wq) {
254 BUG_ON(!queue_work(wq, &cl->work));
311 * of @wq (or, if @wq i
238 set_closure_fn(struct closure *cl, closure_fn *fn, struct workqueue_struct *wq) argument
378 closure_call(struct closure *cl, closure_fn fn, struct workqueue_struct *wq, struct closure *parent) argument
[all...]
H A Drequest.h8 struct workqueue_struct *wq; member in struct:data_insert_op
/drivers/usb/chipidea/
H A Dotg.c119 ci->wq = create_singlethread_workqueue("ci_otg");
120 if (!ci->wq) {
137 if (ci->wq) {
138 flush_workqueue(ci->wq);
139 destroy_workqueue(ci->wq);
H A Dotg.h23 queue_work(ci->wq, &ci->work);
/drivers/gpu/drm/radeon/
H A Dradeon_sa.c56 init_waitqueue_head(&sa_manager->wq);
333 spin_lock(&sa_manager->wq.lock);
345 spin_unlock(&sa_manager->wq.lock);
352 spin_unlock(&sa_manager->wq.lock);
354 spin_lock(&sa_manager->wq.lock);
358 sa_manager->wq,
365 spin_unlock(&sa_manager->wq.lock);
381 spin_lock(&sa_manager->wq.lock);
389 wake_up_all_locked(&sa_manager->wq);
390 spin_unlock(&sa_manager->wq
[all...]
/drivers/staging/lustre/lustre/include/
H A Dlustre_lib.h449 * l_wait_event(&obj->wq, ....); (1)
451 * wake_up(&obj->wq): (2)
531 #define __l_wait_event(wq, condition, info, ret, l_add_wait) \
543 l_add_wait(&wq, &__wait); \
609 remove_wait_queue(&wq, &__wait); \
614 #define l_wait_event(wq, condition, info) \
619 __l_wait_event(wq, condition, __info, \
624 #define l_wait_event_exclusive(wq, condition, info) \
629 __l_wait_event(wq, condition, __info, \
634 #define l_wait_event_exclusive_head(wq, conditio
[all...]
/drivers/gpu/drm/
H A Ddrm_flip_work.c49 * @wq: the work-queue to run the queued work on
57 struct workqueue_struct *wq)
62 queue_work(wq, &work->worker);
56 drm_flip_work_commit(struct drm_flip_work *work, struct workqueue_struct *wq) argument
/drivers/hid/
H A Dhid-elo.c35 static struct workqueue_struct *wq; variable in typeref:struct:workqueue_struct
175 queue_delayed_work(wq, &priv->work, ELO_PERIODIC_READ_INTERVAL);
248 queue_delayed_work(wq, &priv->work, ELO_PERIODIC_READ_INTERVAL);
262 flush_workqueue(wq);
286 wq = create_singlethread_workqueue("elousb");
287 if (!wq)
292 destroy_workqueue(wq);
301 destroy_workqueue(wq);

Completed in 4317 milliseconds

1234567891011