Lines Matching refs:rq

105 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
108 return rq->ring.desc_avail;
111 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
114 return rq->ring.desc_count - rq->ring.desc_avail - 1;
117 static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
119 return rq->to_use->desc;
122 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
124 return rq->to_use->index;
127 static inline void vnic_rq_post(struct vnic_rq *rq,
132 struct vnic_rq_buf *buf = rq->to_use;
141 rq->to_use = buf;
142 rq->ring.desc_avail--;
158 iowrite32(buf->index, &rq->ctrl->posted_index);
162 static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
164 rq->ring.desc_avail += count;
172 static inline void vnic_rq_service(struct vnic_rq *rq,
174 int desc_return, void (*buf_service)(struct vnic_rq *rq,
181 buf = rq->to_clean;
186 (*buf_service)(rq, cq_desc, buf, skipped, opaque);
189 rq->ring.desc_avail++;
191 rq->to_clean = buf->next;
196 buf = rq->to_clean;
200 static inline int vnic_rq_fill(struct vnic_rq *rq,
201 int (*buf_fill)(struct vnic_rq *rq))
205 while (vnic_rq_desc_avail(rq) > 0) {
207 err = (*buf_fill)(rq);
216 static inline void enic_busy_poll_init_lock(struct vnic_rq *rq)
218 spin_lock_init(&rq->bpoll_lock);
219 rq->bpoll_state = ENIC_POLL_STATE_IDLE;
222 static inline bool enic_poll_lock_napi(struct vnic_rq *rq)
226 spin_lock(&rq->bpoll_lock);
227 if (rq->bpoll_state & ENIC_POLL_LOCKED) {
228 WARN_ON(rq->bpoll_state & ENIC_POLL_STATE_NAPI);
229 rq->bpoll_state |= ENIC_POLL_STATE_NAPI_YIELD;
232 rq->bpoll_state = ENIC_POLL_STATE_NAPI;
234 spin_unlock(&rq->bpoll_lock);
239 static inline bool enic_poll_unlock_napi(struct vnic_rq *rq)
243 spin_lock(&rq->bpoll_lock);
244 WARN_ON(rq->bpoll_state &
246 if (rq->bpoll_state & ENIC_POLL_STATE_POLL_YIELD)
248 rq->bpoll_state = ENIC_POLL_STATE_IDLE;
249 spin_unlock(&rq->bpoll_lock);
254 static inline bool enic_poll_lock_poll(struct vnic_rq *rq)
258 spin_lock_bh(&rq->bpoll_lock);
259 if (rq->bpoll_state & ENIC_POLL_LOCKED) {
260 rq->bpoll_state |= ENIC_POLL_STATE_POLL_YIELD;
263 rq->bpoll_state |= ENIC_POLL_STATE_POLL;
265 spin_unlock_bh(&rq->bpoll_lock);
270 static inline bool enic_poll_unlock_poll(struct vnic_rq *rq)
274 spin_lock_bh(&rq->bpoll_lock);
275 WARN_ON(rq->bpoll_state & ENIC_POLL_STATE_NAPI);
276 if (rq->bpoll_state & ENIC_POLL_STATE_POLL_YIELD)
278 rq->bpoll_state = ENIC_POLL_STATE_IDLE;
279 spin_unlock_bh(&rq->bpoll_lock);
284 static inline bool enic_poll_busy_polling(struct vnic_rq *rq)
286 WARN_ON(!(rq->bpoll_state & ENIC_POLL_LOCKED));
287 return rq->bpoll_state & ENIC_POLL_USER_PEND;
292 static inline void enic_busy_poll_init_lock(struct vnic_rq *rq)
296 static inline bool enic_poll_lock_napi(struct vnic_rq *rq)
301 static inline bool enic_poll_unlock_napi(struct vnic_rq *rq)
306 static inline bool enic_poll_lock_poll(struct vnic_rq *rq)
311 static inline bool enic_poll_unlock_poll(struct vnic_rq *rq)
316 static inline bool enic_poll_ll_polling(struct vnic_rq *rq)
322 void vnic_rq_free(struct vnic_rq *rq);
323 int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
325 void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
328 unsigned int vnic_rq_error_status(struct vnic_rq *rq);
329 void vnic_rq_enable(struct vnic_rq *rq);
330 int vnic_rq_disable(struct vnic_rq *rq);
331 void vnic_rq_clean(struct vnic_rq *rq,
332 void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));