nvme-core.c revision f8ebf8409abfdaeeb8c847381629a2a8b8e3d816
1/* 2 * NVM Express device driver 3 * Copyright (c) 2011, Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 */ 18 19#include <linux/nvme.h> 20#include <linux/bio.h> 21#include <linux/bitops.h> 22#include <linux/blkdev.h> 23#include <linux/delay.h> 24#include <linux/errno.h> 25#include <linux/fs.h> 26#include <linux/genhd.h> 27#include <linux/idr.h> 28#include <linux/init.h> 29#include <linux/interrupt.h> 30#include <linux/io.h> 31#include <linux/kdev_t.h> 32#include <linux/kthread.h> 33#include <linux/kernel.h> 34#include <linux/mm.h> 35#include <linux/module.h> 36#include <linux/moduleparam.h> 37#include <linux/pci.h> 38#include <linux/poison.h> 39#include <linux/sched.h> 40#include <linux/slab.h> 41#include <linux/types.h> 42 43#include <asm-generic/io-64-nonatomic-lo-hi.h> 44 45#define NVME_Q_DEPTH 1024 46#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) 47#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) 48#define NVME_MINORS 64 49#define ADMIN_TIMEOUT (60 * HZ) 50 51static int nvme_major; 52module_param(nvme_major, int, 0); 53 54static int use_threaded_interrupts; 55module_param(use_threaded_interrupts, int, 0); 56 57static DEFINE_SPINLOCK(dev_list_lock); 58static LIST_HEAD(dev_list); 59static struct task_struct *nvme_thread; 60 61/* 62 * An NVM Express queue. Each device has at least two (one for admin 63 * commands and one for I/O commands). 64 */ 65struct nvme_queue { 66 struct device *q_dmadev; 67 struct nvme_dev *dev; 68 spinlock_t q_lock; 69 struct nvme_command *sq_cmds; 70 volatile struct nvme_completion *cqes; 71 dma_addr_t sq_dma_addr; 72 dma_addr_t cq_dma_addr; 73 wait_queue_head_t sq_full; 74 wait_queue_t sq_cong_wait; 75 struct bio_list sq_cong; 76 u32 __iomem *q_db; 77 u16 q_depth; 78 u16 cq_vector; 79 u16 sq_head; 80 u16 sq_tail; 81 u16 cq_head; 82 u16 cq_phase; 83 unsigned long cmdid_data[]; 84}; 85 86/* 87 * Check we didin't inadvertently grow the command struct 88 */ 89static inline void _nvme_check_size(void) 90{ 91 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); 92 BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64); 93 BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); 94 BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); 95 BUILD_BUG_ON(sizeof(struct nvme_features) != 64); 96 BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64); 97 BUILD_BUG_ON(sizeof(struct nvme_command) != 64); 98 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096); 99 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096); 100 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); 101 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); 102} 103 104typedef void (*nvme_completion_fn)(struct nvme_dev *, void *, 105 struct nvme_completion *); 106 107struct nvme_cmd_info { 108 nvme_completion_fn fn; 109 void *ctx; 110 unsigned long timeout; 111}; 112 113static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq) 114{ 115 return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)]; 116} 117 118/** 119 * alloc_cmdid() - Allocate a Command ID 120 * @nvmeq: The queue that will be used for this command 121 * @ctx: A pointer that will be passed to the handler 122 * @handler: The function to call on completion 123 * 124 * Allocate a Command ID for a queue. The data passed in will 125 * be passed to the completion handler. This is implemented by using 126 * the bottom two bits of the ctx pointer to store the handler ID. 127 * Passing in a pointer that's not 4-byte aligned will cause a BUG. 128 * We can change this if it becomes a problem. 129 * 130 * May be called with local interrupts disabled and the q_lock held, 131 * or with interrupts enabled and no locks held. 132 */ 133static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx, 134 nvme_completion_fn handler, unsigned timeout) 135{ 136 int depth = nvmeq->q_depth - 1; 137 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 138 int cmdid; 139 140 do { 141 cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth); 142 if (cmdid >= depth) 143 return -EBUSY; 144 } while (test_and_set_bit(cmdid, nvmeq->cmdid_data)); 145 146 info[cmdid].fn = handler; 147 info[cmdid].ctx = ctx; 148 info[cmdid].timeout = jiffies + timeout; 149 return cmdid; 150} 151 152static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx, 153 nvme_completion_fn handler, unsigned timeout) 154{ 155 int cmdid; 156 wait_event_killable(nvmeq->sq_full, 157 (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0); 158 return (cmdid < 0) ? -EINTR : cmdid; 159} 160 161/* Special values must be less than 0x1000 */ 162#define CMD_CTX_BASE ((void *)POISON_POINTER_DELTA) 163#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE) 164#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE) 165#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE) 166#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE) 167 168static void special_completion(struct nvme_dev *dev, void *ctx, 169 struct nvme_completion *cqe) 170{ 171 if (ctx == CMD_CTX_CANCELLED) 172 return; 173 if (ctx == CMD_CTX_FLUSH) 174 return; 175 if (ctx == CMD_CTX_COMPLETED) { 176 dev_warn(&dev->pci_dev->dev, 177 "completed id %d twice on queue %d\n", 178 cqe->command_id, le16_to_cpup(&cqe->sq_id)); 179 return; 180 } 181 if (ctx == CMD_CTX_INVALID) { 182 dev_warn(&dev->pci_dev->dev, 183 "invalid id %d completed on queue %d\n", 184 cqe->command_id, le16_to_cpup(&cqe->sq_id)); 185 return; 186 } 187 188 dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx); 189} 190 191/* 192 * Called with local interrupts disabled and the q_lock held. May not sleep. 193 */ 194static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid, 195 nvme_completion_fn *fn) 196{ 197 void *ctx; 198 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 199 200 if (cmdid >= nvmeq->q_depth) { 201 *fn = special_completion; 202 return CMD_CTX_INVALID; 203 } 204 if (fn) 205 *fn = info[cmdid].fn; 206 ctx = info[cmdid].ctx; 207 info[cmdid].fn = special_completion; 208 info[cmdid].ctx = CMD_CTX_COMPLETED; 209 clear_bit(cmdid, nvmeq->cmdid_data); 210 wake_up(&nvmeq->sq_full); 211 return ctx; 212} 213 214static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid, 215 nvme_completion_fn *fn) 216{ 217 void *ctx; 218 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 219 if (fn) 220 *fn = info[cmdid].fn; 221 ctx = info[cmdid].ctx; 222 info[cmdid].fn = special_completion; 223 info[cmdid].ctx = CMD_CTX_CANCELLED; 224 return ctx; 225} 226 227static struct nvme_queue *get_nvmeq(struct nvme_dev *dev) 228{ 229 return dev->queues[get_cpu() + 1]; 230} 231 232static void put_nvmeq(struct nvme_queue *nvmeq) 233{ 234 put_cpu(); 235} 236 237/** 238 * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell 239 * @nvmeq: The queue to use 240 * @cmd: The command to send 241 * 242 * Safe to use from interrupt context 243 */ 244static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd) 245{ 246 unsigned long flags; 247 u16 tail; 248 spin_lock_irqsave(&nvmeq->q_lock, flags); 249 tail = nvmeq->sq_tail; 250 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd)); 251 if (++tail == nvmeq->q_depth) 252 tail = 0; 253 writel(tail, nvmeq->q_db); 254 nvmeq->sq_tail = tail; 255 spin_unlock_irqrestore(&nvmeq->q_lock, flags); 256 257 return 0; 258} 259 260static __le64 **iod_list(struct nvme_iod *iod) 261{ 262 return ((void *)iod) + iod->offset; 263} 264 265/* 266 * Will slightly overestimate the number of pages needed. This is OK 267 * as it only leads to a small amount of wasted memory for the lifetime of 268 * the I/O. 269 */ 270static int nvme_npages(unsigned size) 271{ 272 unsigned nprps = DIV_ROUND_UP(size + PAGE_SIZE, PAGE_SIZE); 273 return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8); 274} 275 276static struct nvme_iod * 277nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp) 278{ 279 struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) + 280 sizeof(__le64 *) * nvme_npages(nbytes) + 281 sizeof(struct scatterlist) * nseg, gfp); 282 283 if (iod) { 284 iod->offset = offsetof(struct nvme_iod, sg[nseg]); 285 iod->npages = -1; 286 iod->length = nbytes; 287 iod->nents = 0; 288 } 289 290 return iod; 291} 292 293static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod) 294{ 295 const int last_prp = PAGE_SIZE / 8 - 1; 296 int i; 297 __le64 **list = iod_list(iod); 298 dma_addr_t prp_dma = iod->first_dma; 299 300 if (iod->npages == 0) 301 dma_pool_free(dev->prp_small_pool, list[0], prp_dma); 302 for (i = 0; i < iod->npages; i++) { 303 __le64 *prp_list = list[i]; 304 dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]); 305 dma_pool_free(dev->prp_page_pool, prp_list, prp_dma); 306 prp_dma = next_prp_dma; 307 } 308 kfree(iod); 309} 310 311static void requeue_bio(struct nvme_dev *dev, struct bio *bio) 312{ 313 struct nvme_queue *nvmeq = get_nvmeq(dev); 314 if (bio_list_empty(&nvmeq->sq_cong)) 315 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); 316 bio_list_add(&nvmeq->sq_cong, bio); 317 put_nvmeq(nvmeq); 318 wake_up_process(nvme_thread); 319} 320 321static void bio_completion(struct nvme_dev *dev, void *ctx, 322 struct nvme_completion *cqe) 323{ 324 struct nvme_iod *iod = ctx; 325 struct bio *bio = iod->private; 326 u16 status = le16_to_cpup(&cqe->status) >> 1; 327 328 if (iod->nents) 329 dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, 330 bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 331 nvme_free_iod(dev, iod); 332 if (status) { 333 bio_endio(bio, -EIO); 334 } else if (bio->bi_vcnt > bio->bi_idx) { 335 requeue_bio(dev, bio); 336 } else { 337 bio_endio(bio, 0); 338 } 339} 340 341/* length is in bytes. gfp flags indicates whether we may sleep. */ 342static int nvme_setup_prps(struct nvme_dev *dev, 343 struct nvme_common_command *cmd, struct nvme_iod *iod, 344 int total_len, gfp_t gfp) 345{ 346 struct dma_pool *pool; 347 int length = total_len; 348 struct scatterlist *sg = iod->sg; 349 int dma_len = sg_dma_len(sg); 350 u64 dma_addr = sg_dma_address(sg); 351 int offset = offset_in_page(dma_addr); 352 __le64 *prp_list; 353 __le64 **list = iod_list(iod); 354 dma_addr_t prp_dma; 355 int nprps, i; 356 357 cmd->prp1 = cpu_to_le64(dma_addr); 358 length -= (PAGE_SIZE - offset); 359 if (length <= 0) 360 return total_len; 361 362 dma_len -= (PAGE_SIZE - offset); 363 if (dma_len) { 364 dma_addr += (PAGE_SIZE - offset); 365 } else { 366 sg = sg_next(sg); 367 dma_addr = sg_dma_address(sg); 368 dma_len = sg_dma_len(sg); 369 } 370 371 if (length <= PAGE_SIZE) { 372 cmd->prp2 = cpu_to_le64(dma_addr); 373 return total_len; 374 } 375 376 nprps = DIV_ROUND_UP(length, PAGE_SIZE); 377 if (nprps <= (256 / 8)) { 378 pool = dev->prp_small_pool; 379 iod->npages = 0; 380 } else { 381 pool = dev->prp_page_pool; 382 iod->npages = 1; 383 } 384 385 prp_list = dma_pool_alloc(pool, gfp, &prp_dma); 386 if (!prp_list) { 387 cmd->prp2 = cpu_to_le64(dma_addr); 388 iod->npages = -1; 389 return (total_len - length) + PAGE_SIZE; 390 } 391 list[0] = prp_list; 392 iod->first_dma = prp_dma; 393 cmd->prp2 = cpu_to_le64(prp_dma); 394 i = 0; 395 for (;;) { 396 if (i == PAGE_SIZE / 8) { 397 __le64 *old_prp_list = prp_list; 398 prp_list = dma_pool_alloc(pool, gfp, &prp_dma); 399 if (!prp_list) 400 return total_len - length; 401 list[iod->npages++] = prp_list; 402 prp_list[0] = old_prp_list[i - 1]; 403 old_prp_list[i - 1] = cpu_to_le64(prp_dma); 404 i = 1; 405 } 406 prp_list[i++] = cpu_to_le64(dma_addr); 407 dma_len -= PAGE_SIZE; 408 dma_addr += PAGE_SIZE; 409 length -= PAGE_SIZE; 410 if (length <= 0) 411 break; 412 if (dma_len > 0) 413 continue; 414 BUG_ON(dma_len < 0); 415 sg = sg_next(sg); 416 dma_addr = sg_dma_address(sg); 417 dma_len = sg_dma_len(sg); 418 } 419 420 return total_len; 421} 422 423/* NVMe scatterlists require no holes in the virtual address */ 424#define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \ 425 (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE)) 426 427static int nvme_map_bio(struct device *dev, struct nvme_iod *iod, 428 struct bio *bio, enum dma_data_direction dma_dir, int psegs) 429{ 430 struct bio_vec *bvec, *bvprv = NULL; 431 struct scatterlist *sg = NULL; 432 int i, old_idx, length = 0, nsegs = 0; 433 434 sg_init_table(iod->sg, psegs); 435 old_idx = bio->bi_idx; 436 bio_for_each_segment(bvec, bio, i) { 437 if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) { 438 sg->length += bvec->bv_len; 439 } else { 440 if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec)) 441 break; 442 sg = sg ? sg + 1 : iod->sg; 443 sg_set_page(sg, bvec->bv_page, bvec->bv_len, 444 bvec->bv_offset); 445 nsegs++; 446 } 447 length += bvec->bv_len; 448 bvprv = bvec; 449 } 450 bio->bi_idx = i; 451 iod->nents = nsegs; 452 sg_mark_end(sg); 453 if (dma_map_sg(dev, iod->sg, iod->nents, dma_dir) == 0) { 454 bio->bi_idx = old_idx; 455 return -ENOMEM; 456 } 457 return length; 458} 459 460/* 461 * We reuse the small pool to allocate the 16-byte range here as it is not 462 * worth having a special pool for these or additional cases to handle freeing 463 * the iod. 464 */ 465static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns, 466 struct bio *bio, struct nvme_iod *iod, int cmdid) 467{ 468 struct nvme_dsm_range *range; 469 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; 470 471 range = dma_pool_alloc(nvmeq->dev->prp_small_pool, GFP_ATOMIC, 472 &iod->first_dma); 473 if (!range) 474 return -ENOMEM; 475 476 iod_list(iod)[0] = (__le64 *)range; 477 iod->npages = 0; 478 479 range->cattr = cpu_to_le32(0); 480 range->nlb = cpu_to_le32(bio->bi_size >> ns->lba_shift); 481 range->slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9)); 482 483 memset(cmnd, 0, sizeof(*cmnd)); 484 cmnd->dsm.opcode = nvme_cmd_dsm; 485 cmnd->dsm.command_id = cmdid; 486 cmnd->dsm.nsid = cpu_to_le32(ns->ns_id); 487 cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma); 488 cmnd->dsm.nr = 0; 489 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); 490 491 if (++nvmeq->sq_tail == nvmeq->q_depth) 492 nvmeq->sq_tail = 0; 493 writel(nvmeq->sq_tail, nvmeq->q_db); 494 495 return 0; 496} 497 498static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns, 499 int cmdid) 500{ 501 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; 502 503 memset(cmnd, 0, sizeof(*cmnd)); 504 cmnd->common.opcode = nvme_cmd_flush; 505 cmnd->common.command_id = cmdid; 506 cmnd->common.nsid = cpu_to_le32(ns->ns_id); 507 508 if (++nvmeq->sq_tail == nvmeq->q_depth) 509 nvmeq->sq_tail = 0; 510 writel(nvmeq->sq_tail, nvmeq->q_db); 511 512 return 0; 513} 514 515static int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns) 516{ 517 int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH, 518 special_completion, NVME_IO_TIMEOUT); 519 if (unlikely(cmdid < 0)) 520 return cmdid; 521 522 return nvme_submit_flush(nvmeq, ns, cmdid); 523} 524 525/* 526 * Called with local interrupts disabled and the q_lock held. May not sleep. 527 */ 528static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, 529 struct bio *bio) 530{ 531 struct nvme_command *cmnd; 532 struct nvme_iod *iod; 533 enum dma_data_direction dma_dir; 534 int cmdid, length, result = -ENOMEM; 535 u16 control; 536 u32 dsmgmt; 537 int psegs = bio_phys_segments(ns->queue, bio); 538 539 if ((bio->bi_rw & REQ_FLUSH) && psegs) { 540 result = nvme_submit_flush_data(nvmeq, ns); 541 if (result) 542 return result; 543 } 544 545 iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC); 546 if (!iod) 547 goto nomem; 548 iod->private = bio; 549 550 result = -EBUSY; 551 cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT); 552 if (unlikely(cmdid < 0)) 553 goto free_iod; 554 555 if (bio->bi_rw & REQ_DISCARD) { 556 result = nvme_submit_discard(nvmeq, ns, bio, iod, cmdid); 557 if (result) 558 goto free_cmdid; 559 return result; 560 } 561 if ((bio->bi_rw & REQ_FLUSH) && !psegs) 562 return nvme_submit_flush(nvmeq, ns, cmdid); 563 564 control = 0; 565 if (bio->bi_rw & REQ_FUA) 566 control |= NVME_RW_FUA; 567 if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD)) 568 control |= NVME_RW_LR; 569 570 dsmgmt = 0; 571 if (bio->bi_rw & REQ_RAHEAD) 572 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; 573 574 cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; 575 576 memset(cmnd, 0, sizeof(*cmnd)); 577 if (bio_data_dir(bio)) { 578 cmnd->rw.opcode = nvme_cmd_write; 579 dma_dir = DMA_TO_DEVICE; 580 } else { 581 cmnd->rw.opcode = nvme_cmd_read; 582 dma_dir = DMA_FROM_DEVICE; 583 } 584 585 result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs); 586 if (result < 0) 587 goto free_cmdid; 588 length = result; 589 590 cmnd->rw.command_id = cmdid; 591 cmnd->rw.nsid = cpu_to_le32(ns->ns_id); 592 length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length, 593 GFP_ATOMIC); 594 cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9)); 595 cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1); 596 cmnd->rw.control = cpu_to_le16(control); 597 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 598 599 bio->bi_sector += length >> 9; 600 601 if (++nvmeq->sq_tail == nvmeq->q_depth) 602 nvmeq->sq_tail = 0; 603 writel(nvmeq->sq_tail, nvmeq->q_db); 604 605 return 0; 606 607 free_cmdid: 608 free_cmdid(nvmeq, cmdid, NULL); 609 free_iod: 610 nvme_free_iod(nvmeq->dev, iod); 611 nomem: 612 return result; 613} 614 615static void nvme_make_request(struct request_queue *q, struct bio *bio) 616{ 617 struct nvme_ns *ns = q->queuedata; 618 struct nvme_queue *nvmeq = get_nvmeq(ns->dev); 619 int result = -EBUSY; 620 621 spin_lock_irq(&nvmeq->q_lock); 622 if (bio_list_empty(&nvmeq->sq_cong)) 623 result = nvme_submit_bio_queue(nvmeq, ns, bio); 624 if (unlikely(result)) { 625 if (bio_list_empty(&nvmeq->sq_cong)) 626 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); 627 bio_list_add(&nvmeq->sq_cong, bio); 628 } 629 630 spin_unlock_irq(&nvmeq->q_lock); 631 put_nvmeq(nvmeq); 632} 633 634static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq) 635{ 636 u16 head, phase; 637 638 head = nvmeq->cq_head; 639 phase = nvmeq->cq_phase; 640 641 for (;;) { 642 void *ctx; 643 nvme_completion_fn fn; 644 struct nvme_completion cqe = nvmeq->cqes[head]; 645 if ((le16_to_cpu(cqe.status) & 1) != phase) 646 break; 647 nvmeq->sq_head = le16_to_cpu(cqe.sq_head); 648 if (++head == nvmeq->q_depth) { 649 head = 0; 650 phase = !phase; 651 } 652 653 ctx = free_cmdid(nvmeq, cqe.command_id, &fn); 654 fn(nvmeq->dev, ctx, &cqe); 655 } 656 657 /* If the controller ignores the cq head doorbell and continuously 658 * writes to the queue, it is theoretically possible to wrap around 659 * the queue twice and mistakenly return IRQ_NONE. Linux only 660 * requires that 0.1% of your interrupts are handled, so this isn't 661 * a big problem. 662 */ 663 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) 664 return IRQ_NONE; 665 666 writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride)); 667 nvmeq->cq_head = head; 668 nvmeq->cq_phase = phase; 669 670 return IRQ_HANDLED; 671} 672 673static irqreturn_t nvme_irq(int irq, void *data) 674{ 675 irqreturn_t result; 676 struct nvme_queue *nvmeq = data; 677 spin_lock(&nvmeq->q_lock); 678 result = nvme_process_cq(nvmeq); 679 spin_unlock(&nvmeq->q_lock); 680 return result; 681} 682 683static irqreturn_t nvme_irq_check(int irq, void *data) 684{ 685 struct nvme_queue *nvmeq = data; 686 struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head]; 687 if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase) 688 return IRQ_NONE; 689 return IRQ_WAKE_THREAD; 690} 691 692static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid) 693{ 694 spin_lock_irq(&nvmeq->q_lock); 695 cancel_cmdid(nvmeq, cmdid, NULL); 696 spin_unlock_irq(&nvmeq->q_lock); 697} 698 699struct sync_cmd_info { 700 struct task_struct *task; 701 u32 result; 702 int status; 703}; 704 705static void sync_completion(struct nvme_dev *dev, void *ctx, 706 struct nvme_completion *cqe) 707{ 708 struct sync_cmd_info *cmdinfo = ctx; 709 cmdinfo->result = le32_to_cpup(&cqe->result); 710 cmdinfo->status = le16_to_cpup(&cqe->status) >> 1; 711 wake_up_process(cmdinfo->task); 712} 713 714/* 715 * Returns 0 on success. If the result is negative, it's a Linux error code; 716 * if the result is positive, it's an NVM Express status code 717 */ 718static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, 719 struct nvme_command *cmd, u32 *result, unsigned timeout) 720{ 721 int cmdid; 722 struct sync_cmd_info cmdinfo; 723 724 cmdinfo.task = current; 725 cmdinfo.status = -EINTR; 726 727 cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion, 728 timeout); 729 if (cmdid < 0) 730 return cmdid; 731 cmd->common.command_id = cmdid; 732 733 set_current_state(TASK_KILLABLE); 734 nvme_submit_cmd(nvmeq, cmd); 735 schedule(); 736 737 if (cmdinfo.status == -EINTR) { 738 nvme_abort_command(nvmeq, cmdid); 739 return -EINTR; 740 } 741 742 if (result) 743 *result = cmdinfo.result; 744 745 return cmdinfo.status; 746} 747 748static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd, 749 u32 *result) 750{ 751 return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT); 752} 753 754static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) 755{ 756 int status; 757 struct nvme_command c; 758 759 memset(&c, 0, sizeof(c)); 760 c.delete_queue.opcode = opcode; 761 c.delete_queue.qid = cpu_to_le16(id); 762 763 status = nvme_submit_admin_cmd(dev, &c, NULL); 764 if (status) 765 return -EIO; 766 return 0; 767} 768 769static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, 770 struct nvme_queue *nvmeq) 771{ 772 int status; 773 struct nvme_command c; 774 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED; 775 776 memset(&c, 0, sizeof(c)); 777 c.create_cq.opcode = nvme_admin_create_cq; 778 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); 779 c.create_cq.cqid = cpu_to_le16(qid); 780 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 781 c.create_cq.cq_flags = cpu_to_le16(flags); 782 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector); 783 784 status = nvme_submit_admin_cmd(dev, &c, NULL); 785 if (status) 786 return -EIO; 787 return 0; 788} 789 790static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, 791 struct nvme_queue *nvmeq) 792{ 793 int status; 794 struct nvme_command c; 795 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM; 796 797 memset(&c, 0, sizeof(c)); 798 c.create_sq.opcode = nvme_admin_create_sq; 799 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); 800 c.create_sq.sqid = cpu_to_le16(qid); 801 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 802 c.create_sq.sq_flags = cpu_to_le16(flags); 803 c.create_sq.cqid = cpu_to_le16(qid); 804 805 status = nvme_submit_admin_cmd(dev, &c, NULL); 806 if (status) 807 return -EIO; 808 return 0; 809} 810 811static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) 812{ 813 return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid); 814} 815 816static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) 817{ 818 return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); 819} 820 821static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns, 822 dma_addr_t dma_addr) 823{ 824 struct nvme_command c; 825 826 memset(&c, 0, sizeof(c)); 827 c.identify.opcode = nvme_admin_identify; 828 c.identify.nsid = cpu_to_le32(nsid); 829 c.identify.prp1 = cpu_to_le64(dma_addr); 830 c.identify.cns = cpu_to_le32(cns); 831 832 return nvme_submit_admin_cmd(dev, &c, NULL); 833} 834 835static int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, 836 dma_addr_t dma_addr, u32 *result) 837{ 838 struct nvme_command c; 839 840 memset(&c, 0, sizeof(c)); 841 c.features.opcode = nvme_admin_get_features; 842 c.features.nsid = cpu_to_le32(nsid); 843 c.features.prp1 = cpu_to_le64(dma_addr); 844 c.features.fid = cpu_to_le32(fid); 845 846 return nvme_submit_admin_cmd(dev, &c, result); 847} 848 849static int nvme_set_features(struct nvme_dev *dev, unsigned fid, 850 unsigned dword11, dma_addr_t dma_addr, u32 *result) 851{ 852 struct nvme_command c; 853 854 memset(&c, 0, sizeof(c)); 855 c.features.opcode = nvme_admin_set_features; 856 c.features.prp1 = cpu_to_le64(dma_addr); 857 c.features.fid = cpu_to_le32(fid); 858 c.features.dword11 = cpu_to_le32(dword11); 859 860 return nvme_submit_admin_cmd(dev, &c, result); 861} 862 863/** 864 * nvme_cancel_ios - Cancel outstanding I/Os 865 * @queue: The queue to cancel I/Os on 866 * @timeout: True to only cancel I/Os which have timed out 867 */ 868static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout) 869{ 870 int depth = nvmeq->q_depth - 1; 871 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 872 unsigned long now = jiffies; 873 int cmdid; 874 875 for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) { 876 void *ctx; 877 nvme_completion_fn fn; 878 static struct nvme_completion cqe = { 879 .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, 880 }; 881 882 if (timeout && !time_after(now, info[cmdid].timeout)) 883 continue; 884 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid); 885 ctx = cancel_cmdid(nvmeq, cmdid, &fn); 886 fn(nvmeq->dev, ctx, &cqe); 887 } 888} 889 890static void nvme_free_queue_mem(struct nvme_queue *nvmeq) 891{ 892 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), 893 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 894 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), 895 nvmeq->sq_cmds, nvmeq->sq_dma_addr); 896 kfree(nvmeq); 897} 898 899static void nvme_free_queue(struct nvme_dev *dev, int qid) 900{ 901 struct nvme_queue *nvmeq = dev->queues[qid]; 902 int vector = dev->entry[nvmeq->cq_vector].vector; 903 904 spin_lock_irq(&nvmeq->q_lock); 905 nvme_cancel_ios(nvmeq, false); 906 while (bio_list_peek(&nvmeq->sq_cong)) { 907 struct bio *bio = bio_list_pop(&nvmeq->sq_cong); 908 bio_endio(bio, -EIO); 909 } 910 spin_unlock_irq(&nvmeq->q_lock); 911 912 irq_set_affinity_hint(vector, NULL); 913 free_irq(vector, nvmeq); 914 915 /* Don't tell the adapter to delete the admin queue */ 916 if (qid) { 917 adapter_delete_sq(dev, qid); 918 adapter_delete_cq(dev, qid); 919 } 920 921 nvme_free_queue_mem(nvmeq); 922} 923 924static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, 925 int depth, int vector) 926{ 927 struct device *dmadev = &dev->pci_dev->dev; 928 unsigned extra = DIV_ROUND_UP(depth, 8) + (depth * 929 sizeof(struct nvme_cmd_info)); 930 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL); 931 if (!nvmeq) 932 return NULL; 933 934 nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth), 935 &nvmeq->cq_dma_addr, GFP_KERNEL); 936 if (!nvmeq->cqes) 937 goto free_nvmeq; 938 memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth)); 939 940 nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth), 941 &nvmeq->sq_dma_addr, GFP_KERNEL); 942 if (!nvmeq->sq_cmds) 943 goto free_cqdma; 944 945 nvmeq->q_dmadev = dmadev; 946 nvmeq->dev = dev; 947 spin_lock_init(&nvmeq->q_lock); 948 nvmeq->cq_head = 0; 949 nvmeq->cq_phase = 1; 950 init_waitqueue_head(&nvmeq->sq_full); 951 init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread); 952 bio_list_init(&nvmeq->sq_cong); 953 nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)]; 954 nvmeq->q_depth = depth; 955 nvmeq->cq_vector = vector; 956 957 return nvmeq; 958 959 free_cqdma: 960 dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes, 961 nvmeq->cq_dma_addr); 962 free_nvmeq: 963 kfree(nvmeq); 964 return NULL; 965} 966 967static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq, 968 const char *name) 969{ 970 if (use_threaded_interrupts) 971 return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector, 972 nvme_irq_check, nvme_irq, 973 IRQF_DISABLED | IRQF_SHARED, 974 name, nvmeq); 975 return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq, 976 IRQF_DISABLED | IRQF_SHARED, name, nvmeq); 977} 978 979static struct nvme_queue *nvme_create_queue(struct nvme_dev *dev, int qid, 980 int cq_size, int vector) 981{ 982 int result; 983 struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector); 984 985 if (!nvmeq) 986 return ERR_PTR(-ENOMEM); 987 988 result = adapter_alloc_cq(dev, qid, nvmeq); 989 if (result < 0) 990 goto free_nvmeq; 991 992 result = adapter_alloc_sq(dev, qid, nvmeq); 993 if (result < 0) 994 goto release_cq; 995 996 result = queue_request_irq(dev, nvmeq, "nvme"); 997 if (result < 0) 998 goto release_sq; 999 1000 return nvmeq; 1001 1002 release_sq: 1003 adapter_delete_sq(dev, qid); 1004 release_cq: 1005 adapter_delete_cq(dev, qid); 1006 free_nvmeq: 1007 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), 1008 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 1009 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), 1010 nvmeq->sq_cmds, nvmeq->sq_dma_addr); 1011 kfree(nvmeq); 1012 return ERR_PTR(result); 1013} 1014 1015static int nvme_configure_admin_queue(struct nvme_dev *dev) 1016{ 1017 int result = 0; 1018 u32 aqa; 1019 u64 cap; 1020 unsigned long timeout; 1021 struct nvme_queue *nvmeq; 1022 1023 dev->dbs = ((void __iomem *)dev->bar) + 4096; 1024 1025 nvmeq = nvme_alloc_queue(dev, 0, 64, 0); 1026 if (!nvmeq) 1027 return -ENOMEM; 1028 1029 aqa = nvmeq->q_depth - 1; 1030 aqa |= aqa << 16; 1031 1032 dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM; 1033 dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT; 1034 dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE; 1035 dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; 1036 1037 writel(0, &dev->bar->cc); 1038 writel(aqa, &dev->bar->aqa); 1039 writeq(nvmeq->sq_dma_addr, &dev->bar->asq); 1040 writeq(nvmeq->cq_dma_addr, &dev->bar->acq); 1041 writel(dev->ctrl_config, &dev->bar->cc); 1042 1043 cap = readq(&dev->bar->cap); 1044 timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; 1045 dev->db_stride = NVME_CAP_STRIDE(cap); 1046 1047 while (!result && !(readl(&dev->bar->csts) & NVME_CSTS_RDY)) { 1048 msleep(100); 1049 if (fatal_signal_pending(current)) 1050 result = -EINTR; 1051 if (time_after(jiffies, timeout)) { 1052 dev_err(&dev->pci_dev->dev, 1053 "Device not ready; aborting initialisation\n"); 1054 result = -ENODEV; 1055 } 1056 } 1057 1058 if (result) { 1059 nvme_free_queue_mem(nvmeq); 1060 return result; 1061 } 1062 1063 result = queue_request_irq(dev, nvmeq, "nvme admin"); 1064 dev->queues[0] = nvmeq; 1065 return result; 1066} 1067 1068static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, 1069 unsigned long addr, unsigned length) 1070{ 1071 int i, err, count, nents, offset; 1072 struct scatterlist *sg; 1073 struct page **pages; 1074 struct nvme_iod *iod; 1075 1076 if (addr & 3) 1077 return ERR_PTR(-EINVAL); 1078 if (!length) 1079 return ERR_PTR(-EINVAL); 1080 1081 offset = offset_in_page(addr); 1082 count = DIV_ROUND_UP(offset + length, PAGE_SIZE); 1083 pages = kcalloc(count, sizeof(*pages), GFP_KERNEL); 1084 if (!pages) 1085 return ERR_PTR(-ENOMEM); 1086 1087 err = get_user_pages_fast(addr, count, 1, pages); 1088 if (err < count) { 1089 count = err; 1090 err = -EFAULT; 1091 goto put_pages; 1092 } 1093 1094 iod = nvme_alloc_iod(count, length, GFP_KERNEL); 1095 sg = iod->sg; 1096 sg_init_table(sg, count); 1097 for (i = 0; i < count; i++) { 1098 sg_set_page(&sg[i], pages[i], 1099 min_t(int, length, PAGE_SIZE - offset), offset); 1100 length -= (PAGE_SIZE - offset); 1101 offset = 0; 1102 } 1103 sg_mark_end(&sg[i - 1]); 1104 iod->nents = count; 1105 1106 err = -ENOMEM; 1107 nents = dma_map_sg(&dev->pci_dev->dev, sg, count, 1108 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 1109 if (!nents) 1110 goto free_iod; 1111 1112 kfree(pages); 1113 return iod; 1114 1115 free_iod: 1116 kfree(iod); 1117 put_pages: 1118 for (i = 0; i < count; i++) 1119 put_page(pages[i]); 1120 kfree(pages); 1121 return ERR_PTR(err); 1122} 1123 1124static void nvme_unmap_user_pages(struct nvme_dev *dev, int write, 1125 struct nvme_iod *iod) 1126{ 1127 int i; 1128 1129 dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, 1130 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 1131 1132 for (i = 0; i < iod->nents; i++) 1133 put_page(sg_page(&iod->sg[i])); 1134} 1135 1136static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) 1137{ 1138 struct nvme_dev *dev = ns->dev; 1139 struct nvme_queue *nvmeq; 1140 struct nvme_user_io io; 1141 struct nvme_command c; 1142 unsigned length; 1143 int status; 1144 struct nvme_iod *iod; 1145 1146 if (copy_from_user(&io, uio, sizeof(io))) 1147 return -EFAULT; 1148 length = (io.nblocks + 1) << ns->lba_shift; 1149 1150 switch (io.opcode) { 1151 case nvme_cmd_write: 1152 case nvme_cmd_read: 1153 case nvme_cmd_compare: 1154 iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length); 1155 break; 1156 default: 1157 return -EINVAL; 1158 } 1159 1160 if (IS_ERR(iod)) 1161 return PTR_ERR(iod); 1162 1163 memset(&c, 0, sizeof(c)); 1164 c.rw.opcode = io.opcode; 1165 c.rw.flags = io.flags; 1166 c.rw.nsid = cpu_to_le32(ns->ns_id); 1167 c.rw.slba = cpu_to_le64(io.slba); 1168 c.rw.length = cpu_to_le16(io.nblocks); 1169 c.rw.control = cpu_to_le16(io.control); 1170 c.rw.dsmgmt = cpu_to_le16(io.dsmgmt); 1171 c.rw.reftag = io.reftag; 1172 c.rw.apptag = io.apptag; 1173 c.rw.appmask = io.appmask; 1174 /* XXX: metadata */ 1175 length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL); 1176 1177 nvmeq = get_nvmeq(dev); 1178 /* 1179 * Since nvme_submit_sync_cmd sleeps, we can't keep preemption 1180 * disabled. We may be preempted at any point, and be rescheduled 1181 * to a different CPU. That will cause cacheline bouncing, but no 1182 * additional races since q_lock already protects against other CPUs. 1183 */ 1184 put_nvmeq(nvmeq); 1185 if (length != (io.nblocks + 1) << ns->lba_shift) 1186 status = -ENOMEM; 1187 else 1188 status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT); 1189 1190 nvme_unmap_user_pages(dev, io.opcode & 1, iod); 1191 nvme_free_iod(dev, iod); 1192 return status; 1193} 1194 1195static int nvme_user_admin_cmd(struct nvme_dev *dev, 1196 struct nvme_admin_cmd __user *ucmd) 1197{ 1198 struct nvme_admin_cmd cmd; 1199 struct nvme_command c; 1200 int status, length; 1201 struct nvme_iod *uninitialized_var(iod); 1202 1203 if (!capable(CAP_SYS_ADMIN)) 1204 return -EACCES; 1205 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) 1206 return -EFAULT; 1207 1208 memset(&c, 0, sizeof(c)); 1209 c.common.opcode = cmd.opcode; 1210 c.common.flags = cmd.flags; 1211 c.common.nsid = cpu_to_le32(cmd.nsid); 1212 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); 1213 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); 1214 c.common.cdw10[0] = cpu_to_le32(cmd.cdw10); 1215 c.common.cdw10[1] = cpu_to_le32(cmd.cdw11); 1216 c.common.cdw10[2] = cpu_to_le32(cmd.cdw12); 1217 c.common.cdw10[3] = cpu_to_le32(cmd.cdw13); 1218 c.common.cdw10[4] = cpu_to_le32(cmd.cdw14); 1219 c.common.cdw10[5] = cpu_to_le32(cmd.cdw15); 1220 1221 length = cmd.data_len; 1222 if (cmd.data_len) { 1223 iod = nvme_map_user_pages(dev, cmd.opcode & 1, cmd.addr, 1224 length); 1225 if (IS_ERR(iod)) 1226 return PTR_ERR(iod); 1227 length = nvme_setup_prps(dev, &c.common, iod, length, 1228 GFP_KERNEL); 1229 } 1230 1231 if (length != cmd.data_len) 1232 status = -ENOMEM; 1233 else 1234 status = nvme_submit_admin_cmd(dev, &c, &cmd.result); 1235 1236 if (cmd.data_len) { 1237 nvme_unmap_user_pages(dev, cmd.opcode & 1, iod); 1238 nvme_free_iod(dev, iod); 1239 } 1240 1241 if (!status && copy_to_user(&ucmd->result, &cmd.result, 1242 sizeof(cmd.result))) 1243 status = -EFAULT; 1244 1245 return status; 1246} 1247 1248static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, 1249 unsigned long arg) 1250{ 1251 struct nvme_ns *ns = bdev->bd_disk->private_data; 1252 1253 switch (cmd) { 1254 case NVME_IOCTL_ID: 1255 return ns->ns_id; 1256 case NVME_IOCTL_ADMIN_CMD: 1257 return nvme_user_admin_cmd(ns->dev, (void __user *)arg); 1258 case NVME_IOCTL_SUBMIT_IO: 1259 return nvme_submit_io(ns, (void __user *)arg); 1260 default: 1261 return -ENOTTY; 1262 } 1263} 1264 1265static const struct block_device_operations nvme_fops = { 1266 .owner = THIS_MODULE, 1267 .ioctl = nvme_ioctl, 1268 .compat_ioctl = nvme_ioctl, 1269}; 1270 1271static void nvme_resubmit_bios(struct nvme_queue *nvmeq) 1272{ 1273 while (bio_list_peek(&nvmeq->sq_cong)) { 1274 struct bio *bio = bio_list_pop(&nvmeq->sq_cong); 1275 struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data; 1276 if (nvme_submit_bio_queue(nvmeq, ns, bio)) { 1277 bio_list_add_head(&nvmeq->sq_cong, bio); 1278 break; 1279 } 1280 if (bio_list_empty(&nvmeq->sq_cong)) 1281 remove_wait_queue(&nvmeq->sq_full, 1282 &nvmeq->sq_cong_wait); 1283 } 1284} 1285 1286static int nvme_kthread(void *data) 1287{ 1288 struct nvme_dev *dev; 1289 1290 while (!kthread_should_stop()) { 1291 __set_current_state(TASK_RUNNING); 1292 spin_lock(&dev_list_lock); 1293 list_for_each_entry(dev, &dev_list, node) { 1294 int i; 1295 for (i = 0; i < dev->queue_count; i++) { 1296 struct nvme_queue *nvmeq = dev->queues[i]; 1297 if (!nvmeq) 1298 continue; 1299 spin_lock_irq(&nvmeq->q_lock); 1300 if (nvme_process_cq(nvmeq)) 1301 printk("process_cq did something\n"); 1302 nvme_cancel_ios(nvmeq, true); 1303 nvme_resubmit_bios(nvmeq); 1304 spin_unlock_irq(&nvmeq->q_lock); 1305 } 1306 } 1307 spin_unlock(&dev_list_lock); 1308 set_current_state(TASK_INTERRUPTIBLE); 1309 schedule_timeout(HZ); 1310 } 1311 return 0; 1312} 1313 1314static DEFINE_IDA(nvme_index_ida); 1315 1316static int nvme_get_ns_idx(void) 1317{ 1318 int index, error; 1319 1320 do { 1321 if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL)) 1322 return -1; 1323 1324 spin_lock(&dev_list_lock); 1325 error = ida_get_new(&nvme_index_ida, &index); 1326 spin_unlock(&dev_list_lock); 1327 } while (error == -EAGAIN); 1328 1329 if (error) 1330 index = -1; 1331 return index; 1332} 1333 1334static void nvme_put_ns_idx(int index) 1335{ 1336 spin_lock(&dev_list_lock); 1337 ida_remove(&nvme_index_ida, index); 1338 spin_unlock(&dev_list_lock); 1339} 1340 1341static void nvme_config_discard(struct nvme_ns *ns) 1342{ 1343 u32 logical_block_size = queue_logical_block_size(ns->queue); 1344 ns->queue->limits.discard_zeroes_data = 0; 1345 ns->queue->limits.discard_alignment = logical_block_size; 1346 ns->queue->limits.discard_granularity = logical_block_size; 1347 ns->queue->limits.max_discard_sectors = 0xffffffff; 1348 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); 1349} 1350 1351static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid, 1352 struct nvme_id_ns *id, struct nvme_lba_range_type *rt) 1353{ 1354 struct nvme_ns *ns; 1355 struct gendisk *disk; 1356 int lbaf; 1357 1358 if (rt->attributes & NVME_LBART_ATTRIB_HIDE) 1359 return NULL; 1360 1361 ns = kzalloc(sizeof(*ns), GFP_KERNEL); 1362 if (!ns) 1363 return NULL; 1364 ns->queue = blk_alloc_queue(GFP_KERNEL); 1365 if (!ns->queue) 1366 goto out_free_ns; 1367 ns->queue->queue_flags = QUEUE_FLAG_DEFAULT; 1368 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue); 1369 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); 1370 blk_queue_make_request(ns->queue, nvme_make_request); 1371 ns->dev = dev; 1372 ns->queue->queuedata = ns; 1373 1374 disk = alloc_disk(NVME_MINORS); 1375 if (!disk) 1376 goto out_free_queue; 1377 ns->ns_id = nsid; 1378 ns->disk = disk; 1379 lbaf = id->flbas & 0xf; 1380 ns->lba_shift = id->lbaf[lbaf].ds; 1381 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); 1382 if (dev->max_hw_sectors) 1383 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); 1384 1385 disk->major = nvme_major; 1386 disk->minors = NVME_MINORS; 1387 disk->first_minor = NVME_MINORS * nvme_get_ns_idx(); 1388 disk->fops = &nvme_fops; 1389 disk->private_data = ns; 1390 disk->queue = ns->queue; 1391 disk->driverfs_dev = &dev->pci_dev->dev; 1392 sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid); 1393 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); 1394 1395 if (dev->oncs & NVME_CTRL_ONCS_DSM) 1396 nvme_config_discard(ns); 1397 1398 return ns; 1399 1400 out_free_queue: 1401 blk_cleanup_queue(ns->queue); 1402 out_free_ns: 1403 kfree(ns); 1404 return NULL; 1405} 1406 1407static void nvme_ns_free(struct nvme_ns *ns) 1408{ 1409 int index = ns->disk->first_minor / NVME_MINORS; 1410 put_disk(ns->disk); 1411 nvme_put_ns_idx(index); 1412 blk_cleanup_queue(ns->queue); 1413 kfree(ns); 1414} 1415 1416static int set_queue_count(struct nvme_dev *dev, int count) 1417{ 1418 int status; 1419 u32 result; 1420 u32 q_count = (count - 1) | ((count - 1) << 16); 1421 1422 status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0, 1423 &result); 1424 if (status) 1425 return -EIO; 1426 return min(result & 0xffff, result >> 16) + 1; 1427} 1428 1429static int nvme_setup_io_queues(struct nvme_dev *dev) 1430{ 1431 int result, cpu, i, nr_io_queues, db_bar_size, q_depth; 1432 1433 nr_io_queues = num_online_cpus(); 1434 result = set_queue_count(dev, nr_io_queues); 1435 if (result < 0) 1436 return result; 1437 if (result < nr_io_queues) 1438 nr_io_queues = result; 1439 1440 /* Deregister the admin queue's interrupt */ 1441 free_irq(dev->entry[0].vector, dev->queues[0]); 1442 1443 db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3)); 1444 if (db_bar_size > 8192) { 1445 iounmap(dev->bar); 1446 dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0), 1447 db_bar_size); 1448 dev->dbs = ((void __iomem *)dev->bar) + 4096; 1449 dev->queues[0]->q_db = dev->dbs; 1450 } 1451 1452 for (i = 0; i < nr_io_queues; i++) 1453 dev->entry[i].entry = i; 1454 for (;;) { 1455 result = pci_enable_msix(dev->pci_dev, dev->entry, 1456 nr_io_queues); 1457 if (result == 0) { 1458 break; 1459 } else if (result > 0) { 1460 nr_io_queues = result; 1461 continue; 1462 } else { 1463 nr_io_queues = 1; 1464 break; 1465 } 1466 } 1467 1468 result = queue_request_irq(dev, dev->queues[0], "nvme admin"); 1469 /* XXX: handle failure here */ 1470 1471 cpu = cpumask_first(cpu_online_mask); 1472 for (i = 0; i < nr_io_queues; i++) { 1473 irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu)); 1474 cpu = cpumask_next(cpu, cpu_online_mask); 1475 } 1476 1477 q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1, 1478 NVME_Q_DEPTH); 1479 for (i = 0; i < nr_io_queues; i++) { 1480 dev->queues[i + 1] = nvme_create_queue(dev, i + 1, q_depth, i); 1481 if (IS_ERR(dev->queues[i + 1])) 1482 return PTR_ERR(dev->queues[i + 1]); 1483 dev->queue_count++; 1484 } 1485 1486 for (; i < num_possible_cpus(); i++) { 1487 int target = i % rounddown_pow_of_two(dev->queue_count - 1); 1488 dev->queues[i + 1] = dev->queues[target + 1]; 1489 } 1490 1491 return 0; 1492} 1493 1494static void nvme_free_queues(struct nvme_dev *dev) 1495{ 1496 int i; 1497 1498 for (i = dev->queue_count - 1; i >= 0; i--) 1499 nvme_free_queue(dev, i); 1500} 1501 1502static int nvme_dev_add(struct nvme_dev *dev) 1503{ 1504 int res, nn, i; 1505 struct nvme_ns *ns, *next; 1506 struct nvme_id_ctrl *ctrl; 1507 struct nvme_id_ns *id_ns; 1508 void *mem; 1509 dma_addr_t dma_addr; 1510 1511 res = nvme_setup_io_queues(dev); 1512 if (res) 1513 return res; 1514 1515 mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr, 1516 GFP_KERNEL); 1517 1518 res = nvme_identify(dev, 0, 1, dma_addr); 1519 if (res) { 1520 res = -EIO; 1521 goto out_free; 1522 } 1523 1524 ctrl = mem; 1525 nn = le32_to_cpup(&ctrl->nn); 1526 dev->oncs = le16_to_cpup(&ctrl->oncs); 1527 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); 1528 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); 1529 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); 1530 if (ctrl->mdts) { 1531 int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12; 1532 dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9); 1533 } 1534 1535 id_ns = mem; 1536 for (i = 1; i <= nn; i++) { 1537 res = nvme_identify(dev, i, 0, dma_addr); 1538 if (res) 1539 continue; 1540 1541 if (id_ns->ncap == 0) 1542 continue; 1543 1544 res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i, 1545 dma_addr + 4096, NULL); 1546 if (res) 1547 memset(mem + 4096, 0, 4096); 1548 1549 ns = nvme_alloc_ns(dev, i, mem, mem + 4096); 1550 if (ns) 1551 list_add_tail(&ns->list, &dev->namespaces); 1552 } 1553 list_for_each_entry(ns, &dev->namespaces, list) 1554 add_disk(ns->disk); 1555 1556 goto out; 1557 1558 out_free: 1559 list_for_each_entry_safe(ns, next, &dev->namespaces, list) { 1560 list_del(&ns->list); 1561 nvme_ns_free(ns); 1562 } 1563 1564 out: 1565 dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr); 1566 return res; 1567} 1568 1569static int nvme_dev_remove(struct nvme_dev *dev) 1570{ 1571 struct nvme_ns *ns, *next; 1572 1573 spin_lock(&dev_list_lock); 1574 list_del(&dev->node); 1575 spin_unlock(&dev_list_lock); 1576 1577 list_for_each_entry_safe(ns, next, &dev->namespaces, list) { 1578 list_del(&ns->list); 1579 del_gendisk(ns->disk); 1580 nvme_ns_free(ns); 1581 } 1582 1583 nvme_free_queues(dev); 1584 1585 return 0; 1586} 1587 1588static int nvme_setup_prp_pools(struct nvme_dev *dev) 1589{ 1590 struct device *dmadev = &dev->pci_dev->dev; 1591 dev->prp_page_pool = dma_pool_create("prp list page", dmadev, 1592 PAGE_SIZE, PAGE_SIZE, 0); 1593 if (!dev->prp_page_pool) 1594 return -ENOMEM; 1595 1596 /* Optimisation for I/Os between 4k and 128k */ 1597 dev->prp_small_pool = dma_pool_create("prp list 256", dmadev, 1598 256, 256, 0); 1599 if (!dev->prp_small_pool) { 1600 dma_pool_destroy(dev->prp_page_pool); 1601 return -ENOMEM; 1602 } 1603 return 0; 1604} 1605 1606static void nvme_release_prp_pools(struct nvme_dev *dev) 1607{ 1608 dma_pool_destroy(dev->prp_page_pool); 1609 dma_pool_destroy(dev->prp_small_pool); 1610} 1611 1612static DEFINE_IDA(nvme_instance_ida); 1613 1614static int nvme_set_instance(struct nvme_dev *dev) 1615{ 1616 int instance, error; 1617 1618 do { 1619 if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL)) 1620 return -ENODEV; 1621 1622 spin_lock(&dev_list_lock); 1623 error = ida_get_new(&nvme_instance_ida, &instance); 1624 spin_unlock(&dev_list_lock); 1625 } while (error == -EAGAIN); 1626 1627 if (error) 1628 return -ENODEV; 1629 1630 dev->instance = instance; 1631 return 0; 1632} 1633 1634static void nvme_release_instance(struct nvme_dev *dev) 1635{ 1636 spin_lock(&dev_list_lock); 1637 ida_remove(&nvme_instance_ida, dev->instance); 1638 spin_unlock(&dev_list_lock); 1639} 1640 1641static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1642{ 1643 int bars, result = -ENOMEM; 1644 struct nvme_dev *dev; 1645 1646 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1647 if (!dev) 1648 return -ENOMEM; 1649 dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry), 1650 GFP_KERNEL); 1651 if (!dev->entry) 1652 goto free; 1653 dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *), 1654 GFP_KERNEL); 1655 if (!dev->queues) 1656 goto free; 1657 1658 if (pci_enable_device_mem(pdev)) 1659 goto free; 1660 pci_set_master(pdev); 1661 bars = pci_select_bars(pdev, IORESOURCE_MEM); 1662 if (pci_request_selected_regions(pdev, bars, "nvme")) 1663 goto disable; 1664 1665 INIT_LIST_HEAD(&dev->namespaces); 1666 dev->pci_dev = pdev; 1667 pci_set_drvdata(pdev, dev); 1668 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 1669 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 1670 result = nvme_set_instance(dev); 1671 if (result) 1672 goto disable; 1673 1674 dev->entry[0].vector = pdev->irq; 1675 1676 result = nvme_setup_prp_pools(dev); 1677 if (result) 1678 goto disable_msix; 1679 1680 dev->bar = ioremap(pci_resource_start(pdev, 0), 8192); 1681 if (!dev->bar) { 1682 result = -ENOMEM; 1683 goto disable_msix; 1684 } 1685 1686 result = nvme_configure_admin_queue(dev); 1687 if (result) 1688 goto unmap; 1689 dev->queue_count++; 1690 1691 spin_lock(&dev_list_lock); 1692 list_add(&dev->node, &dev_list); 1693 spin_unlock(&dev_list_lock); 1694 1695 result = nvme_dev_add(dev); 1696 if (result) 1697 goto delete; 1698 1699 return 0; 1700 1701 delete: 1702 spin_lock(&dev_list_lock); 1703 list_del(&dev->node); 1704 spin_unlock(&dev_list_lock); 1705 1706 nvme_free_queues(dev); 1707 unmap: 1708 iounmap(dev->bar); 1709 disable_msix: 1710 pci_disable_msix(pdev); 1711 nvme_release_instance(dev); 1712 nvme_release_prp_pools(dev); 1713 disable: 1714 pci_disable_device(pdev); 1715 pci_release_regions(pdev); 1716 free: 1717 kfree(dev->queues); 1718 kfree(dev->entry); 1719 kfree(dev); 1720 return result; 1721} 1722 1723static void nvme_remove(struct pci_dev *pdev) 1724{ 1725 struct nvme_dev *dev = pci_get_drvdata(pdev); 1726 nvme_dev_remove(dev); 1727 pci_disable_msix(pdev); 1728 iounmap(dev->bar); 1729 nvme_release_instance(dev); 1730 nvme_release_prp_pools(dev); 1731 pci_disable_device(pdev); 1732 pci_release_regions(pdev); 1733 kfree(dev->queues); 1734 kfree(dev->entry); 1735 kfree(dev); 1736} 1737 1738/* These functions are yet to be implemented */ 1739#define nvme_error_detected NULL 1740#define nvme_dump_registers NULL 1741#define nvme_link_reset NULL 1742#define nvme_slot_reset NULL 1743#define nvme_error_resume NULL 1744#define nvme_suspend NULL 1745#define nvme_resume NULL 1746 1747static const struct pci_error_handlers nvme_err_handler = { 1748 .error_detected = nvme_error_detected, 1749 .mmio_enabled = nvme_dump_registers, 1750 .link_reset = nvme_link_reset, 1751 .slot_reset = nvme_slot_reset, 1752 .resume = nvme_error_resume, 1753}; 1754 1755/* Move to pci_ids.h later */ 1756#define PCI_CLASS_STORAGE_EXPRESS 0x010802 1757 1758static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = { 1759 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 1760 { 0, } 1761}; 1762MODULE_DEVICE_TABLE(pci, nvme_id_table); 1763 1764static struct pci_driver nvme_driver = { 1765 .name = "nvme", 1766 .id_table = nvme_id_table, 1767 .probe = nvme_probe, 1768 .remove = nvme_remove, 1769 .suspend = nvme_suspend, 1770 .resume = nvme_resume, 1771 .err_handler = &nvme_err_handler, 1772}; 1773 1774static int __init nvme_init(void) 1775{ 1776 int result; 1777 1778 nvme_thread = kthread_run(nvme_kthread, NULL, "nvme"); 1779 if (IS_ERR(nvme_thread)) 1780 return PTR_ERR(nvme_thread); 1781 1782 result = register_blkdev(nvme_major, "nvme"); 1783 if (result < 0) 1784 goto kill_kthread; 1785 else if (result > 0) 1786 nvme_major = result; 1787 1788 result = pci_register_driver(&nvme_driver); 1789 if (result) 1790 goto unregister_blkdev; 1791 return 0; 1792 1793 unregister_blkdev: 1794 unregister_blkdev(nvme_major, "nvme"); 1795 kill_kthread: 1796 kthread_stop(nvme_thread); 1797 return result; 1798} 1799 1800static void __exit nvme_exit(void) 1801{ 1802 pci_unregister_driver(&nvme_driver); 1803 unregister_blkdev(nvme_major, "nvme"); 1804 kthread_stop(nvme_thread); 1805} 1806 1807MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); 1808MODULE_LICENSE("GPL"); 1809MODULE_VERSION("0.8"); 1810module_init(nvme_init); 1811module_exit(nvme_exit); 1812