/drivers/ptp/ |
H A D | ptp_chardev.c | 31 struct ptp_clock_request rq; local 34 memset(&rq, 0, sizeof(rq)); 40 rq.type = PTP_CLK_REQ_EXTTS; 41 rq.extts.index = chan; 42 err = ops->enable(ops, &rq, 0); 45 rq.type = PTP_CLK_REQ_PEROUT; 46 rq.perout.index = chan; 47 err = ops->enable(ops, &rq, 0);
|
/drivers/block/aoe/ |
H A D | aoedev.c | 163 struct request *rq; local 169 rq = d->ip.rq; 170 if (rq == NULL) 175 n = (unsigned long) rq->special; 176 rq->special = (void *) --n; 178 if ((unsigned long) rq->special == 0) 179 aoe_end_request(d, rq, 0); 201 struct request *rq; local 229 while ((rq [all...] |
H A D | aoecmd.c | 896 bufinit(struct buf *buf, struct request *rq, struct bio *bio) argument 899 buf->rq = rq; 908 struct request *rq; local 918 rq = d->ip.rq; 919 if (rq == NULL) { 920 rq = blk_peek_request(q); 921 if (rq == NULL) 923 blk_start_request(rq); 1102 aoe_end_request(struct aoedev *d, struct request *rq, int fastfail) argument 1124 struct request *rq; local [all...] |
/drivers/s390/block/ |
H A D | scm_blk.h | 18 struct request_queue *rq; member in struct:scm_blk_dev 48 #define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data)
|
/drivers/infiniband/hw/ipath/ |
H A D | ipath_ud.c | 56 struct ipath_rq *rq; local 110 rq = &srq->rq; 114 rq = &qp->r_rq; 119 * Note that it is safe to drop the lock after changing rq->tail 122 spin_lock_irqsave(&rq->lock, flags); 123 wq = rq->wq; 126 if (tail >= rq->size) 129 spin_unlock_irqrestore(&rq->lock, flags); 133 wqe = get_rwqe_ptr(rq, tai [all...] |
H A D | ipath_ruc.c | 169 struct ipath_rq *rq; local 180 rq = &srq->rq; 184 rq = &qp->r_rq; 187 spin_lock_irqsave(&rq->lock, flags); 193 wq = rq->wq; 196 if (tail >= rq->size) 205 wqe = get_rwqe_ptr(rq, tail); 206 if (++tail >= rq->size) 225 if (n >= rq [all...] |
/drivers/ide/ |
H A D | ide-disk.c | 81 static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq, argument 85 u16 nsectors = (u16)blk_rq_sectors(rq); 93 if (block + blk_rq_sectors(rq) > 1ULL << 28) 151 if (rq_data_dir(rq)) 155 cmd.rq = rq; 181 static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq, argument 187 BUG_ON(rq->cmd_type != REQ_TYPE_FS); 192 drive->name, rq_data_dir(rq) == READ ? "read" : "writ", 193 (unsigned long long)block, blk_rq_sectors(rq)); 429 idedisk_prep_fn(struct request_queue *q, struct request *rq) argument 471 struct request *rq; local [all...] |
H A D | ide-lib.c | 94 struct request *rq = drive->hwif->rq; local 98 if (rq) 100 (unsigned long long)blk_rq_pos(rq));
|
H A D | ide-taskfile.c | 187 struct request *rq = hwif->rq; local 189 if (blk_pm_request(rq)) 190 ide_complete_pm_rq(drive, rq); 289 cmd->rq->errors = 0; 326 struct request *rq = drive->hwif->rq; local 331 rq->errors = err; 338 ide_complete_rq(drive, err ? -EIO : 0, blk_rq_bytes(rq)); 396 ide_complete_rq(drive, 0, blk_rq_sectors(cmd->rq) << 429 struct request *rq; local [all...] |
H A D | ide-cd_ioctl.c | 303 struct request *rq; local 306 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 307 rq->cmd_type = REQ_TYPE_SPECIAL; 308 rq->cmd_flags = REQ_QUIET; 309 ret = blk_execute_rq(drive->queue, cd->disk, rq, 0); 310 blk_put_request(rq);
|
/drivers/net/ethernet/cisco/enic/ |
H A D | enic_clsf.h | 9 int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq);
|
H A D | enic_main.c | 193 error_status = vnic_rq_error_status(&enic->rq[i]); 919 static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) argument 921 struct enic *enic = vnic_dev_priv(rq->vdev); 932 static int enic_rq_alloc_buf(struct vnic_rq *rq) argument 934 struct enic *enic = vnic_dev_priv(rq->vdev); 940 struct vnic_rq_buf *buf = rq->to_use; 943 enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr, 955 enic_queue_rq_desc(rq, skb, os_buf_index, 989 static void enic_rq_indicate_buf(struct vnic_rq *rq, argument 993 struct enic *enic = vnic_dev_priv(rq 1167 enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq) argument 1179 enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq) argument 1268 unsigned int rq = (napi - &enic->napi[0]); local 1324 unsigned int rq = (napi - &enic->napi[0]); local [all...] |
H A D | enic_res.h | 122 static inline void enic_queue_rq_desc(struct vnic_rq *rq, argument 126 struct rq_enet_desc *desc = vnic_rq_next_desc(rq); 135 vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len, wrid);
|
/drivers/staging/rtl8188eu/include/ |
H A D | osdep_intf.h | 38 int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
|
/drivers/block/ |
H A D | xen-blkfront.c | 120 struct request_queue *rq; member in struct:blkfront_info 597 static void do_blkif_request(struct request_queue *rq) argument 607 while ((req = blk_peek_request(rq)) != NULL) { 627 blk_requeue_request(rq, req); 630 blk_stop_queue(rq); 645 struct request_queue *rq; local 648 rq = blk_init_queue(do_blkif_request, &info->io_lock); 649 if (rq == NULL) 652 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); 655 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq); 1163 struct request_queue *rq = info->rq; local [all...] |
H A D | sx8.c | 262 struct request *rq; member in struct:carm_request 554 struct request *rq; local 570 rq = blk_get_request(host->oob_q, WRITE /* bogus */, GFP_KERNEL); 571 if (IS_ERR(rq)) { 578 crq->rq = rq; 623 crq->rq->cmd_type = REQ_TYPE_SPECIAL; 624 crq->rq->special = crq; 625 blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL); 664 crq->rq 812 struct request *rq; local 843 struct request *rq; local [all...] |
/drivers/md/ |
H A D | dm.c | 99 union map_info *dm_get_rq_mapinfo(struct request *rq) argument 101 if (rq && rq->end_io_data) 102 return &((struct dm_rq_target_io *)rq->end_io_data)->info; 973 * So the completing bio should always be rq->bio. 1032 struct request *rq = tio->orig; local 1034 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 1035 rq->errors = clone->errors; 1036 rq->resid_len = clone->resid_len; 1038 if (rq 1052 dm_unprep_request(struct request *rq) argument 1070 struct request *rq = tio->orig; local 1148 dm_softirq_done(struct request *rq) argument 1167 struct request *rq = tio->orig; local 1183 struct request *rq = tio->orig; local 1693 dm_dispatch_request(struct request *rq) argument 1721 setup_clone(struct request *clone, struct request *rq, struct dm_rq_target_io *tio) argument 1740 clone_rq(struct request *rq, struct mapped_device *md, gfp_t gfp_mask) argument 1769 dm_prep_fn(struct request_queue *q, struct request *rq) argument 1861 struct request *rq, *clone; local [all...] |
/drivers/scsi/ |
H A D | sd.c | 704 * @rq: Request to prepare 711 struct request *rq = cmd->request; local 713 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); 714 sector_t sector = blk_rq_pos(rq); 715 unsigned int nr_sectors = blk_rq_sectors(rq); 716 unsigned int nr_bytes = blk_rq_bytes(rq); 772 rq->completion_data = page; 773 rq->timeout = SD_TIMEOUT; 786 blk_add_request_payload(rq, page, len); 788 rq 836 struct request *rq = cmd->request; local 888 struct request *rq = cmd->request; local 904 struct request *rq = SCpnt->request; local 1142 struct request *rq = cmd->request; local 1156 struct request *rq = SCpnt->request; local [all...] |
/drivers/char/agp/ |
H A D | isoch.c | 75 u32 rq; member in struct:isoch_data 126 target.rq = (tstatus >> 24) & 0xff; 216 master[cdev].rq = master[cdev].n; 218 master[cdev].rq *= (1 << (master[cdev].y - 1)); 220 tot_rq += master[cdev].rq; 227 rq_async = target.rq - rq_isoch; 254 master[cdev].rq += (cdev == ndevs - 1) 266 mcmd |= master[cdev].rq << 24;
|
/drivers/s390/char/ |
H A D | con3270.c | 192 con3270_write_callback(struct raw3270_request *rq, void *data) argument 194 raw3270_request_reset(rq); 195 xchg(&((struct con3270 *) rq->view)->write, rq); 350 con3270_read_callback(struct raw3270_request *rq, void *data) argument 352 raw3270_get_view(rq->view); 354 tasklet_schedule(&((struct con3270 *) rq->view)->readlet); 404 con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb) argument 410 if (rq) { 412 rq [all...] |
/drivers/staging/lustre/lustre/include/linux/ |
H A D | lustre_compat25.h | 167 #define queue_max_phys_segments(rq) queue_max_segments(rq) 168 #define queue_max_hw_segments(rq) queue_max_segments(rq)
|
/drivers/net/ethernet/intel/fm10k/ |
H A D | fm10k_ptp.c | 325 struct ptp_clock_request *rq, int on) 327 struct ptp_clock_time *t = &rq->perout.period; 334 if (rq->type != PTP_CLK_REQ_PEROUT) 338 if (rq->perout.index >= ptp->n_per_out) 369 fm10k_write_sw_reg(hw, FM10K_SW_SYSTIME_PULSE(rq->perout.index), 324 fm10k_ptp_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) argument
|
/drivers/isdn/mISDN/ |
H A D | l1oip_core.c | 994 open_dchannel(struct l1oip *hc, struct dchannel *dch, struct channel_req *rq) argument 999 if (rq->protocol == ISDN_P_NONE) 1002 (dch->dev.D.protocol != rq->protocol)) { 1005 __func__, dch->dev.D.protocol, rq->protocol); 1007 if (dch->dev.D.protocol != rq->protocol) 1008 dch->dev.D.protocol = rq->protocol; 1014 rq->ch = &dch->dev.D; 1021 open_bchannel(struct l1oip *hc, struct dchannel *dch, struct channel_req *rq) argument 1026 if (!test_channelmap(rq->adr.channel, dch->dev.channelmap)) 1028 if (rq 1052 struct channel_req *rq; local [all...] |
/drivers/infiniband/hw/mlx5/ |
H A D | qp.c | 105 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); 171 qp->rq.max_gs = 0; 172 qp->rq.wqe_cnt = 0; 173 qp->rq.wqe_shift = 0; 176 qp->rq.wqe_cnt = ucmd->rq_wqe_count; 177 qp->rq.wqe_shift = ucmd->rq_wqe_shift; 178 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig; 179 qp->rq [all...] |
/drivers/infiniband/hw/mlx4/ |
H A D | qp.c | 195 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); 394 qp->rq.wqe_cnt = qp->rq.max_gs = 0; 400 qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, cap->max_recv_wr)); 401 qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge)); 402 qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg)); 407 cap->max_recv_wr = qp->rq.max_post = qp->rq [all...] |