Lines Matching refs:rq

105 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
108 return rq->ring.desc_avail;
111 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
114 return rq->ring.desc_count - rq->ring.desc_avail - 1;
117 static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
119 return rq->to_use->desc;
122 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
124 return rq->to_use->index;
127 static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq)
129 return rq->buf_index++;
132 static inline void vnic_rq_post(struct vnic_rq *rq,
136 struct vnic_rq_buf *buf = rq->to_use;
144 rq->to_use = buf;
145 rq->ring.desc_avail--;
161 iowrite32(buf->index, &rq->ctrl->posted_index);
165 static inline int vnic_rq_posting_soon(struct vnic_rq *rq)
167 return (rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0;
170 static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
172 rq->ring.desc_avail += count;
180 static inline void vnic_rq_service(struct vnic_rq *rq,
182 int desc_return, void (*buf_service)(struct vnic_rq *rq,
189 buf = rq->to_clean;
194 (*buf_service)(rq, cq_desc, buf, skipped, opaque);
197 rq->ring.desc_avail++;
199 rq->to_clean = buf->next;
204 buf = rq->to_clean;
208 static inline int vnic_rq_fill(struct vnic_rq *rq,
209 int (*buf_fill)(struct vnic_rq *rq))
213 while (vnic_rq_desc_avail(rq) > 1) {
215 err = (*buf_fill)(rq);
223 void vnic_rq_free(struct vnic_rq *rq);
224 int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
226 void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
229 unsigned int vnic_rq_error_status(struct vnic_rq *rq);
230 void vnic_rq_enable(struct vnic_rq *rq);
231 int vnic_rq_disable(struct vnic_rq *rq);
232 void vnic_rq_clean(struct vnic_rq *rq,
233 void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));