Lines Matching refs:nvmeq

150 static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
152 return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)];
162 * @nvmeq: The queue that will be used for this command
175 static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx,
178 int depth = nvmeq->q_depth - 1;
179 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
183 cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth);
186 } while (test_and_set_bit(cmdid, nvmeq->cmdid_data));
195 static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
199 wait_event_killable(nvmeq->sq_full,
200 (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0);
211 static void special_completion(struct nvme_queue *nvmeq, void *ctx,
217 ++nvmeq->dev->abort_limit;
221 dev_warn(nvmeq->q_dmadev,
227 dev_warn(nvmeq->q_dmadev,
233 dev_warn(nvmeq->q_dmadev, "Unknown special completion %p\n", ctx);
236 static void async_completion(struct nvme_queue *nvmeq, void *ctx,
248 static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid,
252 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
254 if (cmdid >= nvmeq->q_depth || !info[cmdid].fn) {
264 clear_bit(cmdid, nvmeq->cmdid_data);
265 wake_up(&nvmeq->sq_full);
269 static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid,
273 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
289 struct nvme_queue *nvmeq;
293 nvmeq = rcu_dereference(dev->queues[queue_id]);
294 if (nvmeq)
295 return nvmeq;
302 static void put_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
305 put_cpu_var(nvmeq->dev->io_queue);
311 struct nvme_queue *nvmeq;
314 nvmeq = rcu_dereference(dev->queues[q_idx]);
315 if (nvmeq)
316 return nvmeq;
322 static void unlock_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
329 * @nvmeq: The queue to use
334 static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
338 spin_lock_irqsave(&nvmeq->q_lock, flags);
339 if (nvmeq->q_suspended) {
340 spin_unlock_irqrestore(&nvmeq->q_lock, flags);
343 tail = nvmeq->sq_tail;
344 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
345 if (++tail == nvmeq->q_depth)
347 writel(tail, nvmeq->q_db);
348 nvmeq->sq_tail = tail;
349 spin_unlock_irqrestore(&nvmeq->q_lock, flags);
436 static void bio_completion(struct nvme_queue *nvmeq, void *ctx,
448 if (!waitqueue_active(&nvmeq->sq_full))
449 add_wait_queue(&nvmeq->sq_full,
450 &nvmeq->sq_cong_wait);
451 list_add_tail(&iod->node, &nvmeq->iod_bio);
452 wake_up(&nvmeq->sq_full);
458 dma_unmap_sg(nvmeq->q_dmadev, iod->sg, iod->nents,
462 nvme_free_iod(nvmeq->dev, iod);
547 static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
558 if (!waitqueue_active(&nvmeq->sq_full))
559 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
560 bio_list_add(&nvmeq->sq_cong, split);
561 bio_list_add(&nvmeq->sq_cong, bio);
562 wake_up(&nvmeq->sq_full);
571 static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
580 if (nvmeq->dev->stripe_size)
581 split_len = nvmeq->dev->stripe_size -
583 (nvmeq->dev->stripe_size - 1));
591 return nvme_split_and_submit(bio, nvmeq,
601 return nvme_split_and_submit(bio, nvmeq, split_len);
608 if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0)
615 static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
620 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
634 if (++nvmeq->sq_tail == nvmeq->q_depth)
635 nvmeq->sq_tail = 0;
636 writel(nvmeq->sq_tail, nvmeq->q_db);
641 static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
644 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
651 if (++nvmeq->sq_tail == nvmeq->q_depth)
652 nvmeq->sq_tail = 0;
653 writel(nvmeq->sq_tail, nvmeq->q_db);
658 static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod)
667 cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT);
672 return nvme_submit_discard(nvmeq, ns, bio, iod, cmdid);
674 return nvme_submit_flush(nvmeq, ns, cmdid);
686 cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
700 if (++nvmeq->sq_tail == nvmeq->q_depth)
701 nvmeq->sq_tail = 0;
702 writel(nvmeq->sq_tail, nvmeq->q_db);
707 static int nvme_split_flush_data(struct nvme_queue *nvmeq, struct bio *bio)
718 if (!waitqueue_active(&nvmeq->sq_full))
719 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
720 bio_list_add(&nvmeq->sq_cong, split);
721 bio_list_add(&nvmeq->sq_cong, bio);
730 static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
738 return nvme_split_flush_data(nvmeq, bio);
752 range = dma_pool_alloc(nvmeq->dev->prp_small_pool,
762 result = nvme_map_bio(nvmeq, iod, bio,
767 if (nvme_setup_prps(nvmeq->dev, iod, result, GFP_ATOMIC) !=
774 if (unlikely(nvme_submit_iod(nvmeq, iod))) {
775 if (!waitqueue_active(&nvmeq->sq_full))
776 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
777 list_add_tail(&iod->node, &nvmeq->iod_bio);
782 nvme_free_iod(nvmeq->dev, iod);
786 static int nvme_process_cq(struct nvme_queue *nvmeq)
790 head = nvmeq->cq_head;
791 phase = nvmeq->cq_phase;
796 struct nvme_completion cqe = nvmeq->cqes[head];
799 nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
800 if (++head == nvmeq->q_depth) {
805 ctx = free_cmdid(nvmeq, cqe.command_id, &fn);
806 fn(nvmeq, ctx, &cqe);
815 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
818 writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
819 nvmeq->cq_head = head;
820 nvmeq->cq_phase = phase;
822 nvmeq->cqe_seen = 1;
829 struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
832 if (!nvmeq) {
837 spin_lock_irq(&nvmeq->q_lock);
838 if (!nvmeq->q_suspended && bio_list_empty(&nvmeq->sq_cong))
839 result = nvme_submit_bio_queue(nvmeq, ns, bio);
841 if (!waitqueue_active(&nvmeq->sq_full))
842 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
843 bio_list_add(&nvmeq->sq_cong, bio);
846 nvme_process_cq(nvmeq);
847 spin_unlock_irq(&nvmeq->q_lock);
848 put_nvmeq(nvmeq);
854 struct nvme_queue *nvmeq = data;
855 spin_lock(&nvmeq->q_lock);
856 nvme_process_cq(nvmeq);
857 result = nvmeq->cqe_seen ? IRQ_HANDLED : IRQ_NONE;
858 nvmeq->cqe_seen = 0;
859 spin_unlock(&nvmeq->q_lock);
865 struct nvme_queue *nvmeq = data;
866 struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head];
867 if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase)
872 static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid)
874 spin_lock_irq(&nvmeq->q_lock);
875 cancel_cmdid(nvmeq, cmdid, NULL);
876 spin_unlock_irq(&nvmeq->q_lock);
885 static void sync_completion(struct nvme_queue *nvmeq, void *ctx,
904 struct nvme_queue *nvmeq;
906 nvmeq = lock_nvmeq(dev, q_idx);
907 if (!nvmeq)
913 cmdid = alloc_cmdid(nvmeq, &cmdinfo, sync_completion, timeout);
915 unlock_nvmeq(nvmeq);
921 ret = nvme_submit_cmd(nvmeq, cmd);
923 free_cmdid(nvmeq, cmdid, NULL);
924 unlock_nvmeq(nvmeq);
928 unlock_nvmeq(nvmeq);
932 nvmeq = lock_nvmeq(dev, q_idx);
933 if (nvmeq) {
934 nvme_abort_command(nvmeq, cmdid);
935 unlock_nvmeq(nvmeq);
946 static int nvme_submit_async_cmd(struct nvme_queue *nvmeq,
952 cmdid = alloc_cmdid_killable(nvmeq, cmdinfo, async_completion, timeout);
957 return nvme_submit_cmd(nvmeq, cmd);
996 struct nvme_queue *nvmeq)
1004 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
1006 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
1008 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
1017 struct nvme_queue *nvmeq)
1025 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
1027 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
1097 static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
1101 struct nvme_dev *dev = nvmeq->dev;
1102 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
1105 if (!nvmeq->qid || info[cmdid].aborted) {
1111 nvmeq->qid);
1129 cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
1136 dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", cmdid,
1137 nvmeq->qid);
1146 static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
1148 int depth = nvmeq->q_depth - 1;
1149 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
1153 for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
1164 if (timeout && nvmeq->dev->initialized) {
1165 nvme_abort_cmd(cmdid, nvmeq);
1168 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", cmdid,
1169 nvmeq->qid);
1170 ctx = cancel_cmdid(nvmeq, cmdid, &fn);
1171 fn(nvmeq, ctx, &cqe);
1177 struct nvme_queue *nvmeq = container_of(r, struct nvme_queue, r_head);
1179 spin_lock_irq(&nvmeq->q_lock);
1180 while (bio_list_peek(&nvmeq->sq_cong)) {
1181 struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
1184 while (!list_empty(&nvmeq->iod_bio)) {
1189 struct nvme_iod *iod = list_first_entry(&nvmeq->iod_bio,
1193 bio_completion(nvmeq, iod, &cqe);
1195 spin_unlock_irq(&nvmeq->q_lock);
1197 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
1198 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
1199 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
1200 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
1201 if (nvmeq->qid)
1202 free_cpumask_var(nvmeq->cpu_mask);
1203 kfree(nvmeq);
1211 struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
1213 call_rcu(&nvmeq->r_head, nvme_free_queue);
1220 * @nvmeq - queue to suspend
1224 static int nvme_suspend_queue(struct nvme_queue *nvmeq)
1226 int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
1228 spin_lock_irq(&nvmeq->q_lock);
1229 if (nvmeq->q_suspended) {
1230 spin_unlock_irq(&nvmeq->q_lock);
1233 nvmeq->q_suspended = 1;
1234 nvmeq->dev->online_queues--;
1235 spin_unlock_irq(&nvmeq->q_lock);
1238 free_irq(vector, nvmeq);
1243 static void nvme_clear_queue(struct nvme_queue *nvmeq)
1245 spin_lock_irq(&nvmeq->q_lock);
1246 nvme_process_cq(nvmeq);
1247 nvme_cancel_ios(nvmeq, false);
1248 spin_unlock_irq(&nvmeq->q_lock);
1253 struct nvme_queue *nvmeq = raw_nvmeq(dev, qid);
1255 if (!nvmeq)
1257 if (nvme_suspend_queue(nvmeq))
1266 nvme_clear_queue(nvmeq);
1274 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
1275 if (!nvmeq)
1278 nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth),
1279 &nvmeq->cq_dma_addr, GFP_KERNEL);
1280 if (!nvmeq->cqes)
1282 memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth));
1284 nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth),
1285 &nvmeq->sq_dma_addr, GFP_KERNEL);
1286 if (!nvmeq->sq_cmds)
1289 if (qid && !zalloc_cpumask_var(&nvmeq->cpu_mask, GFP_KERNEL))
1292 nvmeq->q_dmadev = dmadev;
1293 nvmeq->dev = dev;
1294 snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
1296 spin_lock_init(&nvmeq->q_lock);
1297 nvmeq->cq_head = 0;
1298 nvmeq->cq_phase = 1;
1299 init_waitqueue_head(&nvmeq->sq_full);
1300 init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
1301 bio_list_init(&nvmeq->sq_cong);
1302 INIT_LIST_HEAD(&nvmeq->iod_bio);
1303 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1304 nvmeq->q_depth = depth;
1305 nvmeq->cq_vector = vector;
1306 nvmeq->qid = qid;
1307 nvmeq->q_suspended = 1;
1309 rcu_assign_pointer(dev->queues[qid], nvmeq);
1311 return nvmeq;
1314 dma_free_coherent(dmadev, SQ_SIZE(depth), (void *)nvmeq->sq_cmds,
1315 nvmeq->sq_dma_addr);
1317 dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes,
1318 nvmeq->cq_dma_addr);
1320 kfree(nvmeq);
1324 static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
1328 return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
1330 name, nvmeq);
1331 return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
1332 IRQF_SHARED, name, nvmeq);
1335 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
1337 struct nvme_dev *dev = nvmeq->dev;
1338 unsigned extra = nvme_queue_extra(nvmeq->q_depth);
1340 nvmeq->sq_tail = 0;
1341 nvmeq->cq_head = 0;
1342 nvmeq->cq_phase = 1;
1343 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1344 memset(nvmeq->cmdid_data, 0, extra);
1345 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
1346 nvme_cancel_ios(nvmeq, false);
1347 nvmeq->q_suspended = 0;
1351 static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
1353 struct nvme_dev *dev = nvmeq->dev;
1356 result = adapter_alloc_cq(dev, qid, nvmeq);
1360 result = adapter_alloc_sq(dev, qid, nvmeq);
1364 result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
1368 spin_lock_irq(&nvmeq->q_lock);
1369 nvme_init_queue(nvmeq, qid);
1370 spin_unlock_irq(&nvmeq->q_lock);
1452 struct nvme_queue *nvmeq;
1458 nvmeq = raw_nvmeq(dev, 0);
1459 if (!nvmeq) {
1460 nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
1461 if (!nvmeq)
1465 aqa = nvmeq->q_depth - 1;
1474 writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
1475 writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
1482 result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
1486 spin_lock_irq(&nvmeq->q_lock);
1487 nvme_init_queue(nvmeq, 0);
1488 spin_unlock_irq(&nvmeq->q_lock);
1811 static void nvme_resubmit_iods(struct nvme_queue *nvmeq)
1815 list_for_each_entry_safe(iod, next, &nvmeq->iod_bio, node) {
1816 if (unlikely(nvme_submit_iod(nvmeq, iod)))
1819 if (bio_list_empty(&nvmeq->sq_cong) &&
1820 list_empty(&nvmeq->iod_bio))
1821 remove_wait_queue(&nvmeq->sq_full,
1822 &nvmeq->sq_cong_wait);
1826 static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
1828 while (bio_list_peek(&nvmeq->sq_cong)) {
1829 struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
1832 if (bio_list_empty(&nvmeq->sq_cong) &&
1833 list_empty(&nvmeq->iod_bio))
1834 remove_wait_queue(&nvmeq->sq_full,
1835 &nvmeq->sq_cong_wait);
1836 if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
1837 if (!waitqueue_active(&nvmeq->sq_full))
1838 add_wait_queue(&nvmeq->sq_full,
1839 &nvmeq->sq_cong_wait);
1840 bio_list_add_head(&nvmeq->sq_cong, bio);
1868 struct nvme_queue *nvmeq =
1870 if (!nvmeq)
1872 spin_lock_irq(&nvmeq->q_lock);
1873 if (nvmeq->q_suspended)
1875 nvme_process_cq(nvmeq);
1876 nvme_cancel_ios(nvmeq, true);
1877 nvme_resubmit_bios(nvmeq);
1878 nvme_resubmit_iods(nvmeq);
1880 spin_unlock_irq(&nvmeq->q_lock);
1976 static void nvme_set_queue_cpus(cpumask_t *qmask, struct nvme_queue *nvmeq,
1981 if (cpumask_weight(nvmeq->cpu_mask) >= count)
1983 if (!cpumask_test_and_set_cpu(cpu, nvmeq->cpu_mask))
1984 *per_cpu_ptr(nvmeq->dev->io_queue, cpu) = nvmeq->qid;
1989 const cpumask_t *new_mask, struct nvme_queue *nvmeq, int cpus_per_queue)
1996 nvme_set_queue_cpus(mask, nvmeq, cpus_per_queue);
2041 struct nvme_queue *nvmeq = lock_nvmeq(dev, i);
2044 cpumask_clear(nvmeq->cpu_mask);
2046 unlock_nvmeq(nvmeq);
2051 nvme_set_queue_cpus(&mask, nvmeq, cpus_per_queue);
2055 nvmeq, cpus_per_queue);
2059 nvmeq, cpus_per_queue);
2063 nvmeq, cpus_per_queue);
2069 nvmeq, cpus_per_queue);
2073 nvmeq, cpus_per_queue);
2075 WARN(cpumask_weight(nvmeq->cpu_mask) != cpus_per_queue,
2079 irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
2080 nvmeq->cpu_mask);
2082 nvmeq->cpu_mask);
2086 unlock_nvmeq(nvmeq);
2387 static void nvme_del_queue_end(struct nvme_queue *nvmeq)
2389 struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx;
2391 nvme_clear_queue(nvmeq);
2395 static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode,
2402 c.delete_queue.qid = cpu_to_le16(nvmeq->qid);
2404 init_kthread_work(&nvmeq->cmdinfo.work, fn);
2405 return nvme_submit_admin_cmd_async(nvmeq->dev, &c, &nvmeq->cmdinfo);
2410 struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
2412 nvme_del_queue_end(nvmeq);
2415 static int nvme_delete_cq(struct nvme_queue *nvmeq)
2417 return adapter_async_del_queue(nvmeq, nvme_admin_delete_cq,
2423 struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
2425 int status = nvmeq->cmdinfo.status;
2428 status = nvme_delete_cq(nvmeq);
2430 nvme_del_queue_end(nvmeq);
2433 static int nvme_delete_sq(struct nvme_queue *nvmeq)
2435 return adapter_async_del_queue(nvmeq, nvme_admin_delete_sq,
2441 struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
2444 if (nvme_delete_sq(nvmeq))
2445 nvme_del_queue_end(nvmeq);
2468 struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
2470 if (nvme_suspend_queue(nvmeq))
2472 nvmeq->cmdinfo.ctx = nvme_get_dq(&dq);
2473 nvmeq->cmdinfo.worker = dq.worker;
2474 init_kthread_work(&nvmeq->cmdinfo.work, nvme_del_queue_start);
2475 queue_kthread_work(dq.worker, &nvmeq->cmdinfo.work);
2510 struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
2511 nvme_suspend_queue(nvmeq);
2512 nvme_clear_queue(nvmeq);