Searched refs:rq (Results 26 - 50 of 315) sorted by relevance

1234567891011>>

/drivers/scsi/device_handler/
H A Dscsi_dh_emc.c273 struct request *rq; local
276 rq = blk_get_request(sdev->request_queue,
278 if (IS_ERR(rq)) {
283 blk_rq_set_block_pc(rq);
284 rq->cmd_len = COMMAND_SIZE(cmd);
285 rq->cmd[0] = cmd;
290 rq->cmd[1] = 0x10;
291 rq->cmd[4] = len;
295 rq->cmd[1] = 0x10;
296 rq
324 struct request *rq = get_req(sdev, INQUIRY, csdev->buffer); local
357 struct request *rq; local
[all...]
H A Dscsi_dh_alua.c113 struct request *rq; local
116 rq = blk_get_request(q, rw, GFP_NOIO);
118 if (IS_ERR(rq)) {
123 blk_rq_set_block_pc(rq);
125 if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {
126 blk_put_request(rq);
132 rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
134 rq->retries = ALUA_FAILOVER_RETRIES;
135 rq->timeout = ALUA_FAILOVER_TIMEOUT * HZ;
137 return rq;
146 struct request *rq; local
184 struct request *rq; local
287 struct request *rq; local
[all...]
H A Dscsi_dh_rdac.c272 struct request *rq; local
275 rq = blk_get_request(q, rw, GFP_NOIO);
277 if (IS_ERR(rq)) {
282 blk_rq_set_block_pc(rq);
284 if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {
285 blk_put_request(rq);
291 rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
293 rq->retries = RDAC_RETRIES;
294 rq->timeout = RDAC_TIMEOUT;
296 return rq;
302 struct request *rq; local
409 struct request *rq; local
591 struct request *rq; local
[all...]
/drivers/infiniband/hw/cxgb4/
H A Dt4.h333 struct t4_rq rq; member in struct:t4_wq
342 return wq->rq.in_use;
347 return wq->rq.in_use == 0;
352 return wq->rq.in_use == (wq->rq.size - 1);
357 return wq->rq.size - 1 - wq->rq.in_use;
362 wq->rq.in_use++;
363 if (++wq->rq.pidx == wq->rq
[all...]
H A Dqp.c157 wq->rq.memsize, wq->rq.queue,
158 dma_unmap_addr(&wq->rq, mapping));
160 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
161 kfree(wq->rq.sw_rq);
163 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
185 wq->rq.qid = c4iw_get_qpid(rdev, uctx);
186 if (!wq->rq.qid) {
199 wq->rq
[all...]
/drivers/usb/misc/
H A Duss720.c95 struct uss720_async_request *rq = container_of(kref, struct uss720_async_request, ref_count); local
96 struct parport_uss720_private *priv = rq->priv;
99 if (likely(rq->urb))
100 usb_free_urb(rq->urb);
101 kfree(rq->dr);
103 list_del_init(&rq->asynclist);
105 kfree(rq);
113 struct uss720_async_request *rq; local
118 rq = urb->context;
119 priv = rq
147 struct uss720_async_request *rq; local
200 struct uss720_async_request *rq; local
218 struct uss720_async_request *rq; local
254 struct uss720_async_request *rq; local
[all...]
/drivers/scsi/esas2r/
H A Desas2r_flash.c134 struct esas2r_request *rq)
136 struct atto_vda_flash_req *vrq = &rq->vrq->flash;
138 (struct esas2r_flash_context *)rq->interrupt_cx;
140 if (rq->req_stat == RS_SUCCESS) {
148 rq->req_stat = RS_PENDING;
154 rq->req_stat = RS_PENDING;
155 rq->interrupt_cb = fc->interrupt_cb;
163 if (rq->req_stat != RS_PENDING)
169 (*fc->interrupt_cb)(a, rq);
177 struct esas2r_request *rq)
133 esas2r_fmapi_callback(struct esas2r_adapter *a, struct esas2r_request *rq) argument
176 build_flash_msg(struct esas2r_adapter *a, struct esas2r_request *rq) argument
227 load_image(struct esas2r_adapter *a, struct esas2r_request *rq) argument
302 complete_fmapi_req(struct esas2r_adapter *a, struct esas2r_request *rq, u8 fi_stat) argument
323 fw_download_proc(struct esas2r_adapter *a, struct esas2r_request *rq) argument
827 esas2r_complete_fs_ioctl(struct esas2r_adapter *a, struct esas2r_request *rq) argument
845 esas2r_process_fs_ioctl(struct esas2r_adapter *a, struct esas2r_ioctl_fs *fs, struct esas2r_request *rq, struct esas2r_sg_context *sgc) argument
1211 esas2r_nvram_callback(struct esas2r_adapter *a, struct esas2r_request *rq) argument
1257 esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq, struct esas2r_sas_nvram *nvram) argument
1389 esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi, struct esas2r_request *rq, struct esas2r_sg_context *sgc) argument
[all...]
H A Desas2r_main.c145 struct esas2r_request *rq; local
148 rq = esas2r_alloc_request(a);
149 if (rq == NULL)
152 if (esas2r_write_params(a, rq, (struct esas2r_sas_nvram *)buf))
155 esas2r_free_request(a, rq);
885 struct esas2r_request *rq; local
898 rq = esas2r_alloc_request(a);
899 if (unlikely(rq == NULL)) {
904 rq->cmd = cmd;
909 rq
952 complete_task_management_request(struct esas2r_adapter *a, struct esas2r_request *rq) argument
976 struct esas2r_request *rq; local
1175 struct esas2r_request *rq; local
1316 esas2r_log_request_failure(struct esas2r_adapter *a, struct esas2r_request *rq) argument
1354 esas2r_wait_request(struct esas2r_adapter *a, struct esas2r_request *rq) argument
1602 esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq) argument
1614 struct esas2r_request *rq; local
1634 esas2r_complete_request_cb(struct esas2r_adapter *a, struct esas2r_request *rq) argument
[all...]
H A Desas2r.h406 struct esas2r_request *rq);
967 int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq,
1010 bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,
1015 struct esas2r_request *rq);
1021 void esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq);
1028 void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq);
1042 struct esas2r_request *rq,
1048 struct esas2r_request *rq,
1054 void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq);
1056 struct esas2r_request *rq,
1173 esas2r_sgc_init(struct esas2r_sg_context *sgc, struct esas2r_adapter *a, struct esas2r_request *rq, struct atto_vda_sge *first) argument
1206 esas2r_rq_init_request(struct esas2r_request *rq, struct esas2r_adapter *a) argument
1269 esas2r_rq_free_sg_lists(struct esas2r_request *rq, struct esas2r_adapter *a) argument
1282 esas2r_rq_destroy_request(struct esas2r_request *rq, struct esas2r_adapter *a) argument
1308 esas2r_build_sg_list(struct esas2r_adapter *a, struct esas2r_request *rq, struct esas2r_sg_context *sgc) argument
1398 esas2r_start_ae_request(struct esas2r_adapter *a, struct esas2r_request *rq) argument
1413 struct esas2r_request *rq; local
[all...]
H A Desas2r_init.c104 struct esas2r_request *rq)
126 rq->vrq_md = memdesc;
127 rq->vrq = (union atto_vda_req *)memdesc->virt_addr;
128 rq->vrq->scsi.handle = a->num_vrqs;
839 struct esas2r_request *rq; local
989 for (rq = a->first_ae_req, i = 0; i < num_ae_requests; rq++,
991 INIT_LIST_HEAD(&rq->req_list);
992 if (!alloc_vda_req(a, rq)) {
998 esas2r_rq_init_request(rq,
103 alloc_vda_req(struct esas2r_adapter *a, struct esas2r_request *rq) argument
1217 esas2r_format_init_msg(struct esas2r_adapter *a, struct esas2r_request *rq) argument
1318 struct esas2r_request *rq = &a->general_req; local
1359 struct esas2r_request *rq; local
[all...]
/drivers/gpu/drm/nouveau/core/subdev/i2c/
H A Dnve0.c28 nve0_aux_stat(struct nouveau_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx) argument
32 for (i = 0, *hi = *lo = *rq = *tx = 0; i < 8; i++) {
35 if ((stat & (4 << (i * 4)))) *rq |= 1 << i;
/drivers/block/
H A Dosdblk.c97 struct request *rq; /* blk layer request */ member in struct:osdblk_request
106 struct gendisk *disk; /* blkdev's gendisk and rq */
249 __blk_end_request_all(orq->rq, ret);
300 struct request *rq; local
307 rq = blk_fetch_request(q);
308 if (!rq)
312 if (rq->cmd_type != REQ_TYPE_FS) {
313 blk_end_request_all(rq, 0);
324 do_flush = rq->cmd_flags & REQ_FLUSH;
325 do_write = (rq_data_dir(rq)
[all...]
H A Dnull_blk.c16 struct request *rq; member in struct:nullb_cmd
82 MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
180 blk_mq_end_request(cmd->rq, 0);
183 INIT_LIST_HEAD(&cmd->rq->queuelist);
184 blk_end_request_all(cmd->rq, 0);
228 static void null_softirq_done_fn(struct request *rq) argument
231 end_cmd(blk_mq_rq_to_pdu(rq));
233 end_cmd(rq->special);
243 blk_mq_complete_request(cmd->rq);
246 blk_complete_request(cmd->rq);
305 struct request *rq; local
316 null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq, bool last) argument
[all...]
/drivers/s390/block/
H A Dscm_blk.c177 blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY);
185 blk_requeue_request(bdev->rq, scmrq->request);
201 static void scm_blk_request(struct request_queue *rq) argument
203 struct scm_device *scmdev = rq->queuedata;
209 while ((req = blk_peek_request(rq))) {
347 blk_run_queue(bdev->rq);
356 struct request_queue *rq; local
377 rq = blk_init_queue(scm_blk_request, &bdev->rq_lock);
378 if (!rq)
381 bdev->rq
[all...]
/drivers/scsi/
H A Dhpsa.h397 struct reply_queue_buffer *rq = &h->reply_queue[q]; local
413 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
414 register_value = rq->head[rq->current_entry];
415 rq->current_entry++;
423 if (rq->current_entry == h->max_commands) {
424 rq->current_entry = 0;
425 rq
512 struct reply_queue_buffer *rq = &h->reply_queue[q]; local
[all...]
/drivers/mtd/
H A Dmtd_blkdevs.c47 blk_cleanup_queue(dev->rq);
133 struct request_queue *rq = dev->rq; local
137 spin_lock_irq(rq->queue_lock);
143 if (!req && !(req = blk_fetch_request(rq))) {
145 spin_unlock_irq(rq->queue_lock);
149 spin_lock_irq(rq->queue_lock);
160 spin_unlock_irq(rq->queue_lock);
166 spin_lock_irq(rq->queue_lock);
177 spin_unlock_irq(rq
180 mtd_blktrans_request(struct request_queue *rq) argument
[all...]
/drivers/mtd/ubi/
H A Dblock.c84 struct request_queue *rq; member in struct:ubiblock
267 struct request_queue *rq = dev->rq; local
271 spin_lock_irq(rq->queue_lock);
273 req = blk_fetch_request(rq);
276 spin_unlock_irq(rq->queue_lock);
278 spin_lock_irq(rq->queue_lock);
285 req = blk_fetch_request(rq);
288 spin_unlock_irq(rq->queue_lock);
291 static void ubiblock_request(struct request_queue *rq) argument
[all...]
/drivers/ide/
H A Dide-ioctls.c126 struct request *rq; local
128 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
129 rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
130 err = blk_execute_rq(drive->queue, NULL, rq, 0);
131 blk_put_request(rq);
221 struct request *rq; local
224 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
225 rq->cmd_type = REQ_TYPE_SPECIAL;
226 rq->cmd_len = 1;
227 rq
[all...]
H A Dide-tape.c272 struct request *rq = drive->hwif->rq; local
273 u8 *sense = bio_data(rq->bio);
281 rq->cmd[0], tape->sense_key, tape->asc, tape->ascq);
285 rq->resid_len = tape->blk_size * get_unaligned_be32(&sense[3]);
319 (blk_rq_bytes(rq) - rq->resid_len))
330 struct request *rq = drive->hwif->rq; local
334 ide_debug_log(IDE_DBG_FUNC, "cmd: 0x%x, dsc: %d, err: %d", rq
444 struct request *rq = drive->hwif->rq; local
543 ide_tape_create_rw_cmd(idetape_tape_t *tape, struct ide_atapi_pc *pc, struct request *rq, u8 opcode) argument
566 idetape_do_request(ide_drive_t *drive, struct request *rq, sector_t block) argument
847 struct request *rq; local
[all...]
/drivers/isdn/mISDN/
H A Dstack.c429 struct channel_req rq; local
445 rq.protocol = protocol;
446 rq.adr.channel = adr->channel;
447 err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
466 struct channel_req rq, rq2; local
478 rq.protocol = protocol;
479 rq.adr = *adr;
480 err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
483 ch->recv = rq.ch->send;
484 ch->peer = rq
523 struct channel_req rq; local
[all...]
/drivers/net/vmxnet3/
H A Dvmxnet3_drv.c202 "%s: rq[%d] error 0x%x\n",
561 vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, argument
565 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
566 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
582 rq->stats.rx_buf_alloc_failure++;
601 rq->stats.rx_buf_alloc_failure++;
1126 vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd, argument
1129 rq->stats.drop_err++;
1131 rq->stats.drop_fcs++;
1133 rq
1153 vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter, int quota) argument
1350 vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) argument
1399 vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) argument
1443 vmxnet3_rq_init(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) argument
1514 vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) argument
1624 struct vmxnet3_rx_queue *rq = container_of(napi, local
1688 struct vmxnet3_rx_queue *rq = data; local
1879 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; local
2181 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; local
2487 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[0]; local
2554 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; local
[all...]
/drivers/staging/octeon/
H A Dethernet-mdio.h43 int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
/drivers/infiniband/hw/mthca/
H A Dmthca_qp.c210 return qp->queue.direct.buf + (n << qp->rq.wqe_shift);
212 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf +
213 ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1));
499 qp_attr->cap.max_recv_wr = qp->rq.max;
501 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
598 if (qp->rq.max)
599 qp_context->rq_size_stride = ilog2(qp->rq.max) << 3;
600 qp_context->rq_size_stride |= qp->rq.wqe_shift - 4;
761 qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index);
827 mthca_wq_reset(&qp->rq);
[all...]
/drivers/net/ethernet/cisco/enic/
H A Denic.h106 * @rq_id: desired rq index
174 ____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX]; member in struct:enic
197 static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq) argument
199 return rq;
223 unsigned int rq)
225 return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset;
222 enic_msix_rq_intr(struct enic *enic, unsigned int rq) argument
/drivers/net/
H A Difb.c46 struct sk_buff_head rq; member in struct:ifb_private
73 skb_queue_splice_tail_init(&dp->rq, &dp->tq);
115 if ((skb = skb_peek(&dp->rq)) == NULL) {
209 if (skb_queue_len(&dp->rq) >= dev->tx_queue_len) {
213 __skb_queue_tail(&dp->rq, skb);
228 __skb_queue_purge(&dp->rq);
238 __skb_queue_head_init(&dp->rq);

Completed in 1232 milliseconds

1234567891011>>