nvme-core.c revision 08df1e05657fc6712e520e7c09cc6c86160ceb35
1/* 2 * NVM Express device driver 3 * Copyright (c) 2011, Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 */ 18 19#include <linux/nvme.h> 20#include <linux/bio.h> 21#include <linux/bitops.h> 22#include <linux/blkdev.h> 23#include <linux/delay.h> 24#include <linux/errno.h> 25#include <linux/fs.h> 26#include <linux/genhd.h> 27#include <linux/idr.h> 28#include <linux/init.h> 29#include <linux/interrupt.h> 30#include <linux/io.h> 31#include <linux/kdev_t.h> 32#include <linux/kthread.h> 33#include <linux/kernel.h> 34#include <linux/mm.h> 35#include <linux/module.h> 36#include <linux/moduleparam.h> 37#include <linux/pci.h> 38#include <linux/poison.h> 39#include <linux/sched.h> 40#include <linux/slab.h> 41#include <linux/types.h> 42 43#include <asm-generic/io-64-nonatomic-lo-hi.h> 44 45#define NVME_Q_DEPTH 1024 46#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) 47#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) 48#define NVME_MINORS 64 49#define NVME_IO_TIMEOUT (5 * HZ) 50#define ADMIN_TIMEOUT (60 * HZ) 51 52static int nvme_major; 53module_param(nvme_major, int, 0); 54 55static int use_threaded_interrupts; 56module_param(use_threaded_interrupts, int, 0); 57 58static DEFINE_SPINLOCK(dev_list_lock); 59static LIST_HEAD(dev_list); 60static struct task_struct *nvme_thread; 61 62/* 63 * Represents an NVM Express device. Each nvme_dev is a PCI function. 64 */ 65struct nvme_dev { 66 struct list_head node; 67 struct nvme_queue **queues; 68 u32 __iomem *dbs; 69 struct pci_dev *pci_dev; 70 struct dma_pool *prp_page_pool; 71 struct dma_pool *prp_small_pool; 72 int instance; 73 int queue_count; 74 int db_stride; 75 u32 ctrl_config; 76 struct msix_entry *entry; 77 struct nvme_bar __iomem *bar; 78 struct list_head namespaces; 79 char serial[20]; 80 char model[40]; 81 char firmware_rev[8]; 82 u32 max_hw_sectors; 83}; 84 85/* 86 * An NVM Express namespace is equivalent to a SCSI LUN 87 */ 88struct nvme_ns { 89 struct list_head list; 90 91 struct nvme_dev *dev; 92 struct request_queue *queue; 93 struct gendisk *disk; 94 95 int ns_id; 96 int lba_shift; 97}; 98 99/* 100 * An NVM Express queue. Each device has at least two (one for admin 101 * commands and one for I/O commands). 102 */ 103struct nvme_queue { 104 struct device *q_dmadev; 105 struct nvme_dev *dev; 106 spinlock_t q_lock; 107 struct nvme_command *sq_cmds; 108 volatile struct nvme_completion *cqes; 109 dma_addr_t sq_dma_addr; 110 dma_addr_t cq_dma_addr; 111 wait_queue_head_t sq_full; 112 wait_queue_t sq_cong_wait; 113 struct bio_list sq_cong; 114 u32 __iomem *q_db; 115 u16 q_depth; 116 u16 cq_vector; 117 u16 sq_head; 118 u16 sq_tail; 119 u16 cq_head; 120 u16 cq_phase; 121 unsigned long cmdid_data[]; 122}; 123 124/* 125 * Check we didin't inadvertently grow the command struct 126 */ 127static inline void _nvme_check_size(void) 128{ 129 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); 130 BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64); 131 BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); 132 BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); 133 BUILD_BUG_ON(sizeof(struct nvme_features) != 64); 134 BUILD_BUG_ON(sizeof(struct nvme_command) != 64); 135 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096); 136 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096); 137 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); 138} 139 140typedef void (*nvme_completion_fn)(struct nvme_dev *, void *, 141 struct nvme_completion *); 142 143struct nvme_cmd_info { 144 nvme_completion_fn fn; 145 void *ctx; 146 unsigned long timeout; 147}; 148 149static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq) 150{ 151 return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)]; 152} 153 154/** 155 * alloc_cmdid() - Allocate a Command ID 156 * @nvmeq: The queue that will be used for this command 157 * @ctx: A pointer that will be passed to the handler 158 * @handler: The function to call on completion 159 * 160 * Allocate a Command ID for a queue. The data passed in will 161 * be passed to the completion handler. This is implemented by using 162 * the bottom two bits of the ctx pointer to store the handler ID. 163 * Passing in a pointer that's not 4-byte aligned will cause a BUG. 164 * We can change this if it becomes a problem. 165 * 166 * May be called with local interrupts disabled and the q_lock held, 167 * or with interrupts enabled and no locks held. 168 */ 169static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx, 170 nvme_completion_fn handler, unsigned timeout) 171{ 172 int depth = nvmeq->q_depth - 1; 173 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 174 int cmdid; 175 176 do { 177 cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth); 178 if (cmdid >= depth) 179 return -EBUSY; 180 } while (test_and_set_bit(cmdid, nvmeq->cmdid_data)); 181 182 info[cmdid].fn = handler; 183 info[cmdid].ctx = ctx; 184 info[cmdid].timeout = jiffies + timeout; 185 return cmdid; 186} 187 188static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx, 189 nvme_completion_fn handler, unsigned timeout) 190{ 191 int cmdid; 192 wait_event_killable(nvmeq->sq_full, 193 (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0); 194 return (cmdid < 0) ? -EINTR : cmdid; 195} 196 197/* Special values must be less than 0x1000 */ 198#define CMD_CTX_BASE ((void *)POISON_POINTER_DELTA) 199#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE) 200#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE) 201#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE) 202#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE) 203 204static void special_completion(struct nvme_dev *dev, void *ctx, 205 struct nvme_completion *cqe) 206{ 207 if (ctx == CMD_CTX_CANCELLED) 208 return; 209 if (ctx == CMD_CTX_FLUSH) 210 return; 211 if (ctx == CMD_CTX_COMPLETED) { 212 dev_warn(&dev->pci_dev->dev, 213 "completed id %d twice on queue %d\n", 214 cqe->command_id, le16_to_cpup(&cqe->sq_id)); 215 return; 216 } 217 if (ctx == CMD_CTX_INVALID) { 218 dev_warn(&dev->pci_dev->dev, 219 "invalid id %d completed on queue %d\n", 220 cqe->command_id, le16_to_cpup(&cqe->sq_id)); 221 return; 222 } 223 224 dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx); 225} 226 227/* 228 * Called with local interrupts disabled and the q_lock held. May not sleep. 229 */ 230static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid, 231 nvme_completion_fn *fn) 232{ 233 void *ctx; 234 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 235 236 if (cmdid >= nvmeq->q_depth) { 237 *fn = special_completion; 238 return CMD_CTX_INVALID; 239 } 240 if (fn) 241 *fn = info[cmdid].fn; 242 ctx = info[cmdid].ctx; 243 info[cmdid].fn = special_completion; 244 info[cmdid].ctx = CMD_CTX_COMPLETED; 245 clear_bit(cmdid, nvmeq->cmdid_data); 246 wake_up(&nvmeq->sq_full); 247 return ctx; 248} 249 250static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid, 251 nvme_completion_fn *fn) 252{ 253 void *ctx; 254 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 255 if (fn) 256 *fn = info[cmdid].fn; 257 ctx = info[cmdid].ctx; 258 info[cmdid].fn = special_completion; 259 info[cmdid].ctx = CMD_CTX_CANCELLED; 260 return ctx; 261} 262 263static struct nvme_queue *get_nvmeq(struct nvme_dev *dev) 264{ 265 return dev->queues[get_cpu() + 1]; 266} 267 268static void put_nvmeq(struct nvme_queue *nvmeq) 269{ 270 put_cpu(); 271} 272 273/** 274 * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell 275 * @nvmeq: The queue to use 276 * @cmd: The command to send 277 * 278 * Safe to use from interrupt context 279 */ 280static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd) 281{ 282 unsigned long flags; 283 u16 tail; 284 spin_lock_irqsave(&nvmeq->q_lock, flags); 285 tail = nvmeq->sq_tail; 286 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd)); 287 if (++tail == nvmeq->q_depth) 288 tail = 0; 289 writel(tail, nvmeq->q_db); 290 nvmeq->sq_tail = tail; 291 spin_unlock_irqrestore(&nvmeq->q_lock, flags); 292 293 return 0; 294} 295 296/* 297 * The nvme_iod describes the data in an I/O, including the list of PRP 298 * entries. You can't see it in this data structure because C doesn't let 299 * me express that. Use nvme_alloc_iod to ensure there's enough space 300 * allocated to store the PRP list. 301 */ 302struct nvme_iod { 303 void *private; /* For the use of the submitter of the I/O */ 304 int npages; /* In the PRP list. 0 means small pool in use */ 305 int offset; /* Of PRP list */ 306 int nents; /* Used in scatterlist */ 307 int length; /* Of data, in bytes */ 308 dma_addr_t first_dma; 309 struct scatterlist sg[0]; 310}; 311 312static __le64 **iod_list(struct nvme_iod *iod) 313{ 314 return ((void *)iod) + iod->offset; 315} 316 317/* 318 * Will slightly overestimate the number of pages needed. This is OK 319 * as it only leads to a small amount of wasted memory for the lifetime of 320 * the I/O. 321 */ 322static int nvme_npages(unsigned size) 323{ 324 unsigned nprps = DIV_ROUND_UP(size + PAGE_SIZE, PAGE_SIZE); 325 return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8); 326} 327 328static struct nvme_iod * 329nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp) 330{ 331 struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) + 332 sizeof(__le64 *) * nvme_npages(nbytes) + 333 sizeof(struct scatterlist) * nseg, gfp); 334 335 if (iod) { 336 iod->offset = offsetof(struct nvme_iod, sg[nseg]); 337 iod->npages = -1; 338 iod->length = nbytes; 339 } 340 341 return iod; 342} 343 344static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod) 345{ 346 const int last_prp = PAGE_SIZE / 8 - 1; 347 int i; 348 __le64 **list = iod_list(iod); 349 dma_addr_t prp_dma = iod->first_dma; 350 351 if (iod->npages == 0) 352 dma_pool_free(dev->prp_small_pool, list[0], prp_dma); 353 for (i = 0; i < iod->npages; i++) { 354 __le64 *prp_list = list[i]; 355 dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]); 356 dma_pool_free(dev->prp_page_pool, prp_list, prp_dma); 357 prp_dma = next_prp_dma; 358 } 359 kfree(iod); 360} 361 362static void requeue_bio(struct nvme_dev *dev, struct bio *bio) 363{ 364 struct nvme_queue *nvmeq = get_nvmeq(dev); 365 if (bio_list_empty(&nvmeq->sq_cong)) 366 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); 367 bio_list_add(&nvmeq->sq_cong, bio); 368 put_nvmeq(nvmeq); 369 wake_up_process(nvme_thread); 370} 371 372static void bio_completion(struct nvme_dev *dev, void *ctx, 373 struct nvme_completion *cqe) 374{ 375 struct nvme_iod *iod = ctx; 376 struct bio *bio = iod->private; 377 u16 status = le16_to_cpup(&cqe->status) >> 1; 378 379 dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, 380 bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 381 nvme_free_iod(dev, iod); 382 if (status) { 383 bio_endio(bio, -EIO); 384 } else if (bio->bi_vcnt > bio->bi_idx) { 385 requeue_bio(dev, bio); 386 } else { 387 bio_endio(bio, 0); 388 } 389} 390 391/* length is in bytes. gfp flags indicates whether we may sleep. */ 392static int nvme_setup_prps(struct nvme_dev *dev, 393 struct nvme_common_command *cmd, struct nvme_iod *iod, 394 int total_len, gfp_t gfp) 395{ 396 struct dma_pool *pool; 397 int length = total_len; 398 struct scatterlist *sg = iod->sg; 399 int dma_len = sg_dma_len(sg); 400 u64 dma_addr = sg_dma_address(sg); 401 int offset = offset_in_page(dma_addr); 402 __le64 *prp_list; 403 __le64 **list = iod_list(iod); 404 dma_addr_t prp_dma; 405 int nprps, i; 406 407 cmd->prp1 = cpu_to_le64(dma_addr); 408 length -= (PAGE_SIZE - offset); 409 if (length <= 0) 410 return total_len; 411 412 dma_len -= (PAGE_SIZE - offset); 413 if (dma_len) { 414 dma_addr += (PAGE_SIZE - offset); 415 } else { 416 sg = sg_next(sg); 417 dma_addr = sg_dma_address(sg); 418 dma_len = sg_dma_len(sg); 419 } 420 421 if (length <= PAGE_SIZE) { 422 cmd->prp2 = cpu_to_le64(dma_addr); 423 return total_len; 424 } 425 426 nprps = DIV_ROUND_UP(length, PAGE_SIZE); 427 if (nprps <= (256 / 8)) { 428 pool = dev->prp_small_pool; 429 iod->npages = 0; 430 } else { 431 pool = dev->prp_page_pool; 432 iod->npages = 1; 433 } 434 435 prp_list = dma_pool_alloc(pool, gfp, &prp_dma); 436 if (!prp_list) { 437 cmd->prp2 = cpu_to_le64(dma_addr); 438 iod->npages = -1; 439 return (total_len - length) + PAGE_SIZE; 440 } 441 list[0] = prp_list; 442 iod->first_dma = prp_dma; 443 cmd->prp2 = cpu_to_le64(prp_dma); 444 i = 0; 445 for (;;) { 446 if (i == PAGE_SIZE / 8) { 447 __le64 *old_prp_list = prp_list; 448 prp_list = dma_pool_alloc(pool, gfp, &prp_dma); 449 if (!prp_list) 450 return total_len - length; 451 list[iod->npages++] = prp_list; 452 prp_list[0] = old_prp_list[i - 1]; 453 old_prp_list[i - 1] = cpu_to_le64(prp_dma); 454 i = 1; 455 } 456 prp_list[i++] = cpu_to_le64(dma_addr); 457 dma_len -= PAGE_SIZE; 458 dma_addr += PAGE_SIZE; 459 length -= PAGE_SIZE; 460 if (length <= 0) 461 break; 462 if (dma_len > 0) 463 continue; 464 BUG_ON(dma_len < 0); 465 sg = sg_next(sg); 466 dma_addr = sg_dma_address(sg); 467 dma_len = sg_dma_len(sg); 468 } 469 470 return total_len; 471} 472 473/* NVMe scatterlists require no holes in the virtual address */ 474#define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \ 475 (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE)) 476 477static int nvme_map_bio(struct device *dev, struct nvme_iod *iod, 478 struct bio *bio, enum dma_data_direction dma_dir, int psegs) 479{ 480 struct bio_vec *bvec, *bvprv = NULL; 481 struct scatterlist *sg = NULL; 482 int i, old_idx, length = 0, nsegs = 0; 483 484 sg_init_table(iod->sg, psegs); 485 old_idx = bio->bi_idx; 486 bio_for_each_segment(bvec, bio, i) { 487 if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) { 488 sg->length += bvec->bv_len; 489 } else { 490 if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec)) 491 break; 492 sg = sg ? sg + 1 : iod->sg; 493 sg_set_page(sg, bvec->bv_page, bvec->bv_len, 494 bvec->bv_offset); 495 nsegs++; 496 } 497 length += bvec->bv_len; 498 bvprv = bvec; 499 } 500 bio->bi_idx = i; 501 iod->nents = nsegs; 502 sg_mark_end(sg); 503 if (dma_map_sg(dev, iod->sg, iod->nents, dma_dir) == 0) { 504 bio->bi_idx = old_idx; 505 return -ENOMEM; 506 } 507 return length; 508} 509 510static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns, 511 int cmdid) 512{ 513 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; 514 515 memset(cmnd, 0, sizeof(*cmnd)); 516 cmnd->common.opcode = nvme_cmd_flush; 517 cmnd->common.command_id = cmdid; 518 cmnd->common.nsid = cpu_to_le32(ns->ns_id); 519 520 if (++nvmeq->sq_tail == nvmeq->q_depth) 521 nvmeq->sq_tail = 0; 522 writel(nvmeq->sq_tail, nvmeq->q_db); 523 524 return 0; 525} 526 527static int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns) 528{ 529 int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH, 530 special_completion, NVME_IO_TIMEOUT); 531 if (unlikely(cmdid < 0)) 532 return cmdid; 533 534 return nvme_submit_flush(nvmeq, ns, cmdid); 535} 536 537/* 538 * Called with local interrupts disabled and the q_lock held. May not sleep. 539 */ 540static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, 541 struct bio *bio) 542{ 543 struct nvme_command *cmnd; 544 struct nvme_iod *iod; 545 enum dma_data_direction dma_dir; 546 int cmdid, length, result = -ENOMEM; 547 u16 control; 548 u32 dsmgmt; 549 int psegs = bio_phys_segments(ns->queue, bio); 550 551 if ((bio->bi_rw & REQ_FLUSH) && psegs) { 552 result = nvme_submit_flush_data(nvmeq, ns); 553 if (result) 554 return result; 555 } 556 557 iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC); 558 if (!iod) 559 goto nomem; 560 iod->private = bio; 561 562 result = -EBUSY; 563 cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT); 564 if (unlikely(cmdid < 0)) 565 goto free_iod; 566 567 if ((bio->bi_rw & REQ_FLUSH) && !psegs) 568 return nvme_submit_flush(nvmeq, ns, cmdid); 569 570 control = 0; 571 if (bio->bi_rw & REQ_FUA) 572 control |= NVME_RW_FUA; 573 if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD)) 574 control |= NVME_RW_LR; 575 576 dsmgmt = 0; 577 if (bio->bi_rw & REQ_RAHEAD) 578 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; 579 580 cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; 581 582 memset(cmnd, 0, sizeof(*cmnd)); 583 if (bio_data_dir(bio)) { 584 cmnd->rw.opcode = nvme_cmd_write; 585 dma_dir = DMA_TO_DEVICE; 586 } else { 587 cmnd->rw.opcode = nvme_cmd_read; 588 dma_dir = DMA_FROM_DEVICE; 589 } 590 591 result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs); 592 if (result < 0) 593 goto free_cmdid; 594 length = result; 595 596 cmnd->rw.command_id = cmdid; 597 cmnd->rw.nsid = cpu_to_le32(ns->ns_id); 598 length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length, 599 GFP_ATOMIC); 600 cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9)); 601 cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1); 602 cmnd->rw.control = cpu_to_le16(control); 603 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 604 605 bio->bi_sector += length >> 9; 606 607 if (++nvmeq->sq_tail == nvmeq->q_depth) 608 nvmeq->sq_tail = 0; 609 writel(nvmeq->sq_tail, nvmeq->q_db); 610 611 return 0; 612 613 free_cmdid: 614 free_cmdid(nvmeq, cmdid, NULL); 615 free_iod: 616 nvme_free_iod(nvmeq->dev, iod); 617 nomem: 618 return result; 619} 620 621static void nvme_make_request(struct request_queue *q, struct bio *bio) 622{ 623 struct nvme_ns *ns = q->queuedata; 624 struct nvme_queue *nvmeq = get_nvmeq(ns->dev); 625 int result = -EBUSY; 626 627 spin_lock_irq(&nvmeq->q_lock); 628 if (bio_list_empty(&nvmeq->sq_cong)) 629 result = nvme_submit_bio_queue(nvmeq, ns, bio); 630 if (unlikely(result)) { 631 if (bio_list_empty(&nvmeq->sq_cong)) 632 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); 633 bio_list_add(&nvmeq->sq_cong, bio); 634 } 635 636 spin_unlock_irq(&nvmeq->q_lock); 637 put_nvmeq(nvmeq); 638} 639 640static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq) 641{ 642 u16 head, phase; 643 644 head = nvmeq->cq_head; 645 phase = nvmeq->cq_phase; 646 647 for (;;) { 648 void *ctx; 649 nvme_completion_fn fn; 650 struct nvme_completion cqe = nvmeq->cqes[head]; 651 if ((le16_to_cpu(cqe.status) & 1) != phase) 652 break; 653 nvmeq->sq_head = le16_to_cpu(cqe.sq_head); 654 if (++head == nvmeq->q_depth) { 655 head = 0; 656 phase = !phase; 657 } 658 659 ctx = free_cmdid(nvmeq, cqe.command_id, &fn); 660 fn(nvmeq->dev, ctx, &cqe); 661 } 662 663 /* If the controller ignores the cq head doorbell and continuously 664 * writes to the queue, it is theoretically possible to wrap around 665 * the queue twice and mistakenly return IRQ_NONE. Linux only 666 * requires that 0.1% of your interrupts are handled, so this isn't 667 * a big problem. 668 */ 669 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) 670 return IRQ_NONE; 671 672 writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride)); 673 nvmeq->cq_head = head; 674 nvmeq->cq_phase = phase; 675 676 return IRQ_HANDLED; 677} 678 679static irqreturn_t nvme_irq(int irq, void *data) 680{ 681 irqreturn_t result; 682 struct nvme_queue *nvmeq = data; 683 spin_lock(&nvmeq->q_lock); 684 result = nvme_process_cq(nvmeq); 685 spin_unlock(&nvmeq->q_lock); 686 return result; 687} 688 689static irqreturn_t nvme_irq_check(int irq, void *data) 690{ 691 struct nvme_queue *nvmeq = data; 692 struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head]; 693 if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase) 694 return IRQ_NONE; 695 return IRQ_WAKE_THREAD; 696} 697 698static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid) 699{ 700 spin_lock_irq(&nvmeq->q_lock); 701 cancel_cmdid(nvmeq, cmdid, NULL); 702 spin_unlock_irq(&nvmeq->q_lock); 703} 704 705struct sync_cmd_info { 706 struct task_struct *task; 707 u32 result; 708 int status; 709}; 710 711static void sync_completion(struct nvme_dev *dev, void *ctx, 712 struct nvme_completion *cqe) 713{ 714 struct sync_cmd_info *cmdinfo = ctx; 715 cmdinfo->result = le32_to_cpup(&cqe->result); 716 cmdinfo->status = le16_to_cpup(&cqe->status) >> 1; 717 wake_up_process(cmdinfo->task); 718} 719 720/* 721 * Returns 0 on success. If the result is negative, it's a Linux error code; 722 * if the result is positive, it's an NVM Express status code 723 */ 724static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, 725 struct nvme_command *cmd, u32 *result, unsigned timeout) 726{ 727 int cmdid; 728 struct sync_cmd_info cmdinfo; 729 730 cmdinfo.task = current; 731 cmdinfo.status = -EINTR; 732 733 cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion, 734 timeout); 735 if (cmdid < 0) 736 return cmdid; 737 cmd->common.command_id = cmdid; 738 739 set_current_state(TASK_KILLABLE); 740 nvme_submit_cmd(nvmeq, cmd); 741 schedule(); 742 743 if (cmdinfo.status == -EINTR) { 744 nvme_abort_command(nvmeq, cmdid); 745 return -EINTR; 746 } 747 748 if (result) 749 *result = cmdinfo.result; 750 751 return cmdinfo.status; 752} 753 754static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd, 755 u32 *result) 756{ 757 return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT); 758} 759 760static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) 761{ 762 int status; 763 struct nvme_command c; 764 765 memset(&c, 0, sizeof(c)); 766 c.delete_queue.opcode = opcode; 767 c.delete_queue.qid = cpu_to_le16(id); 768 769 status = nvme_submit_admin_cmd(dev, &c, NULL); 770 if (status) 771 return -EIO; 772 return 0; 773} 774 775static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, 776 struct nvme_queue *nvmeq) 777{ 778 int status; 779 struct nvme_command c; 780 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED; 781 782 memset(&c, 0, sizeof(c)); 783 c.create_cq.opcode = nvme_admin_create_cq; 784 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); 785 c.create_cq.cqid = cpu_to_le16(qid); 786 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 787 c.create_cq.cq_flags = cpu_to_le16(flags); 788 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector); 789 790 status = nvme_submit_admin_cmd(dev, &c, NULL); 791 if (status) 792 return -EIO; 793 return 0; 794} 795 796static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, 797 struct nvme_queue *nvmeq) 798{ 799 int status; 800 struct nvme_command c; 801 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM; 802 803 memset(&c, 0, sizeof(c)); 804 c.create_sq.opcode = nvme_admin_create_sq; 805 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); 806 c.create_sq.sqid = cpu_to_le16(qid); 807 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 808 c.create_sq.sq_flags = cpu_to_le16(flags); 809 c.create_sq.cqid = cpu_to_le16(qid); 810 811 status = nvme_submit_admin_cmd(dev, &c, NULL); 812 if (status) 813 return -EIO; 814 return 0; 815} 816 817static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) 818{ 819 return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid); 820} 821 822static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) 823{ 824 return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); 825} 826 827static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns, 828 dma_addr_t dma_addr) 829{ 830 struct nvme_command c; 831 832 memset(&c, 0, sizeof(c)); 833 c.identify.opcode = nvme_admin_identify; 834 c.identify.nsid = cpu_to_le32(nsid); 835 c.identify.prp1 = cpu_to_le64(dma_addr); 836 c.identify.cns = cpu_to_le32(cns); 837 838 return nvme_submit_admin_cmd(dev, &c, NULL); 839} 840 841static int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, 842 dma_addr_t dma_addr, u32 *result) 843{ 844 struct nvme_command c; 845 846 memset(&c, 0, sizeof(c)); 847 c.features.opcode = nvme_admin_get_features; 848 c.features.nsid = cpu_to_le32(nsid); 849 c.features.prp1 = cpu_to_le64(dma_addr); 850 c.features.fid = cpu_to_le32(fid); 851 852 return nvme_submit_admin_cmd(dev, &c, result); 853} 854 855static int nvme_set_features(struct nvme_dev *dev, unsigned fid, 856 unsigned dword11, dma_addr_t dma_addr, u32 *result) 857{ 858 struct nvme_command c; 859 860 memset(&c, 0, sizeof(c)); 861 c.features.opcode = nvme_admin_set_features; 862 c.features.prp1 = cpu_to_le64(dma_addr); 863 c.features.fid = cpu_to_le32(fid); 864 c.features.dword11 = cpu_to_le32(dword11); 865 866 return nvme_submit_admin_cmd(dev, &c, result); 867} 868 869/** 870 * nvme_cancel_ios - Cancel outstanding I/Os 871 * @queue: The queue to cancel I/Os on 872 * @timeout: True to only cancel I/Os which have timed out 873 */ 874static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout) 875{ 876 int depth = nvmeq->q_depth - 1; 877 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 878 unsigned long now = jiffies; 879 int cmdid; 880 881 for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) { 882 void *ctx; 883 nvme_completion_fn fn; 884 static struct nvme_completion cqe = { 885 .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, 886 }; 887 888 if (timeout && !time_after(now, info[cmdid].timeout)) 889 continue; 890 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid); 891 ctx = cancel_cmdid(nvmeq, cmdid, &fn); 892 fn(nvmeq->dev, ctx, &cqe); 893 } 894} 895 896static void nvme_free_queue_mem(struct nvme_queue *nvmeq) 897{ 898 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), 899 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 900 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), 901 nvmeq->sq_cmds, nvmeq->sq_dma_addr); 902 kfree(nvmeq); 903} 904 905static void nvme_free_queue(struct nvme_dev *dev, int qid) 906{ 907 struct nvme_queue *nvmeq = dev->queues[qid]; 908 int vector = dev->entry[nvmeq->cq_vector].vector; 909 910 spin_lock_irq(&nvmeq->q_lock); 911 nvme_cancel_ios(nvmeq, false); 912 while (bio_list_peek(&nvmeq->sq_cong)) { 913 struct bio *bio = bio_list_pop(&nvmeq->sq_cong); 914 bio_endio(bio, -EIO); 915 } 916 spin_unlock_irq(&nvmeq->q_lock); 917 918 irq_set_affinity_hint(vector, NULL); 919 free_irq(vector, nvmeq); 920 921 /* Don't tell the adapter to delete the admin queue */ 922 if (qid) { 923 adapter_delete_sq(dev, qid); 924 adapter_delete_cq(dev, qid); 925 } 926 927 nvme_free_queue_mem(nvmeq); 928} 929 930static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, 931 int depth, int vector) 932{ 933 struct device *dmadev = &dev->pci_dev->dev; 934 unsigned extra = DIV_ROUND_UP(depth, 8) + (depth * 935 sizeof(struct nvme_cmd_info)); 936 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL); 937 if (!nvmeq) 938 return NULL; 939 940 nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth), 941 &nvmeq->cq_dma_addr, GFP_KERNEL); 942 if (!nvmeq->cqes) 943 goto free_nvmeq; 944 memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth)); 945 946 nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth), 947 &nvmeq->sq_dma_addr, GFP_KERNEL); 948 if (!nvmeq->sq_cmds) 949 goto free_cqdma; 950 951 nvmeq->q_dmadev = dmadev; 952 nvmeq->dev = dev; 953 spin_lock_init(&nvmeq->q_lock); 954 nvmeq->cq_head = 0; 955 nvmeq->cq_phase = 1; 956 init_waitqueue_head(&nvmeq->sq_full); 957 init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread); 958 bio_list_init(&nvmeq->sq_cong); 959 nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)]; 960 nvmeq->q_depth = depth; 961 nvmeq->cq_vector = vector; 962 963 return nvmeq; 964 965 free_cqdma: 966 dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes, 967 nvmeq->cq_dma_addr); 968 free_nvmeq: 969 kfree(nvmeq); 970 return NULL; 971} 972 973static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq, 974 const char *name) 975{ 976 if (use_threaded_interrupts) 977 return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector, 978 nvme_irq_check, nvme_irq, 979 IRQF_DISABLED | IRQF_SHARED, 980 name, nvmeq); 981 return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq, 982 IRQF_DISABLED | IRQF_SHARED, name, nvmeq); 983} 984 985static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev, 986 int qid, int cq_size, int vector) 987{ 988 int result; 989 struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector); 990 991 if (!nvmeq) 992 return ERR_PTR(-ENOMEM); 993 994 result = adapter_alloc_cq(dev, qid, nvmeq); 995 if (result < 0) 996 goto free_nvmeq; 997 998 result = adapter_alloc_sq(dev, qid, nvmeq); 999 if (result < 0) 1000 goto release_cq; 1001 1002 result = queue_request_irq(dev, nvmeq, "nvme"); 1003 if (result < 0) 1004 goto release_sq; 1005 1006 return nvmeq; 1007 1008 release_sq: 1009 adapter_delete_sq(dev, qid); 1010 release_cq: 1011 adapter_delete_cq(dev, qid); 1012 free_nvmeq: 1013 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), 1014 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 1015 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), 1016 nvmeq->sq_cmds, nvmeq->sq_dma_addr); 1017 kfree(nvmeq); 1018 return ERR_PTR(result); 1019} 1020 1021static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev) 1022{ 1023 int result = 0; 1024 u32 aqa; 1025 u64 cap; 1026 unsigned long timeout; 1027 struct nvme_queue *nvmeq; 1028 1029 dev->dbs = ((void __iomem *)dev->bar) + 4096; 1030 1031 nvmeq = nvme_alloc_queue(dev, 0, 64, 0); 1032 if (!nvmeq) 1033 return -ENOMEM; 1034 1035 aqa = nvmeq->q_depth - 1; 1036 aqa |= aqa << 16; 1037 1038 dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM; 1039 dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT; 1040 dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE; 1041 dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; 1042 1043 writel(0, &dev->bar->cc); 1044 writel(aqa, &dev->bar->aqa); 1045 writeq(nvmeq->sq_dma_addr, &dev->bar->asq); 1046 writeq(nvmeq->cq_dma_addr, &dev->bar->acq); 1047 writel(dev->ctrl_config, &dev->bar->cc); 1048 1049 cap = readq(&dev->bar->cap); 1050 timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; 1051 dev->db_stride = NVME_CAP_STRIDE(cap); 1052 1053 while (!result && !(readl(&dev->bar->csts) & NVME_CSTS_RDY)) { 1054 msleep(100); 1055 if (fatal_signal_pending(current)) 1056 result = -EINTR; 1057 if (time_after(jiffies, timeout)) { 1058 dev_err(&dev->pci_dev->dev, 1059 "Device not ready; aborting initialisation\n"); 1060 result = -ENODEV; 1061 } 1062 } 1063 1064 if (result) { 1065 nvme_free_queue_mem(nvmeq); 1066 return result; 1067 } 1068 1069 result = queue_request_irq(dev, nvmeq, "nvme admin"); 1070 dev->queues[0] = nvmeq; 1071 return result; 1072} 1073 1074static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, 1075 unsigned long addr, unsigned length) 1076{ 1077 int i, err, count, nents, offset; 1078 struct scatterlist *sg; 1079 struct page **pages; 1080 struct nvme_iod *iod; 1081 1082 if (addr & 3) 1083 return ERR_PTR(-EINVAL); 1084 if (!length) 1085 return ERR_PTR(-EINVAL); 1086 1087 offset = offset_in_page(addr); 1088 count = DIV_ROUND_UP(offset + length, PAGE_SIZE); 1089 pages = kcalloc(count, sizeof(*pages), GFP_KERNEL); 1090 if (!pages) 1091 return ERR_PTR(-ENOMEM); 1092 1093 err = get_user_pages_fast(addr, count, 1, pages); 1094 if (err < count) { 1095 count = err; 1096 err = -EFAULT; 1097 goto put_pages; 1098 } 1099 1100 iod = nvme_alloc_iod(count, length, GFP_KERNEL); 1101 sg = iod->sg; 1102 sg_init_table(sg, count); 1103 for (i = 0; i < count; i++) { 1104 sg_set_page(&sg[i], pages[i], 1105 min_t(int, length, PAGE_SIZE - offset), offset); 1106 length -= (PAGE_SIZE - offset); 1107 offset = 0; 1108 } 1109 sg_mark_end(&sg[i - 1]); 1110 iod->nents = count; 1111 1112 err = -ENOMEM; 1113 nents = dma_map_sg(&dev->pci_dev->dev, sg, count, 1114 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 1115 if (!nents) 1116 goto free_iod; 1117 1118 kfree(pages); 1119 return iod; 1120 1121 free_iod: 1122 kfree(iod); 1123 put_pages: 1124 for (i = 0; i < count; i++) 1125 put_page(pages[i]); 1126 kfree(pages); 1127 return ERR_PTR(err); 1128} 1129 1130static void nvme_unmap_user_pages(struct nvme_dev *dev, int write, 1131 struct nvme_iod *iod) 1132{ 1133 int i; 1134 1135 dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, 1136 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 1137 1138 for (i = 0; i < iod->nents; i++) 1139 put_page(sg_page(&iod->sg[i])); 1140} 1141 1142static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) 1143{ 1144 struct nvme_dev *dev = ns->dev; 1145 struct nvme_queue *nvmeq; 1146 struct nvme_user_io io; 1147 struct nvme_command c; 1148 unsigned length; 1149 int status; 1150 struct nvme_iod *iod; 1151 1152 if (copy_from_user(&io, uio, sizeof(io))) 1153 return -EFAULT; 1154 length = (io.nblocks + 1) << ns->lba_shift; 1155 1156 switch (io.opcode) { 1157 case nvme_cmd_write: 1158 case nvme_cmd_read: 1159 case nvme_cmd_compare: 1160 iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length); 1161 break; 1162 default: 1163 return -EINVAL; 1164 } 1165 1166 if (IS_ERR(iod)) 1167 return PTR_ERR(iod); 1168 1169 memset(&c, 0, sizeof(c)); 1170 c.rw.opcode = io.opcode; 1171 c.rw.flags = io.flags; 1172 c.rw.nsid = cpu_to_le32(ns->ns_id); 1173 c.rw.slba = cpu_to_le64(io.slba); 1174 c.rw.length = cpu_to_le16(io.nblocks); 1175 c.rw.control = cpu_to_le16(io.control); 1176 c.rw.dsmgmt = cpu_to_le16(io.dsmgmt); 1177 c.rw.reftag = io.reftag; 1178 c.rw.apptag = io.apptag; 1179 c.rw.appmask = io.appmask; 1180 /* XXX: metadata */ 1181 length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL); 1182 1183 nvmeq = get_nvmeq(dev); 1184 /* 1185 * Since nvme_submit_sync_cmd sleeps, we can't keep preemption 1186 * disabled. We may be preempted at any point, and be rescheduled 1187 * to a different CPU. That will cause cacheline bouncing, but no 1188 * additional races since q_lock already protects against other CPUs. 1189 */ 1190 put_nvmeq(nvmeq); 1191 if (length != (io.nblocks + 1) << ns->lba_shift) 1192 status = -ENOMEM; 1193 else 1194 status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT); 1195 1196 nvme_unmap_user_pages(dev, io.opcode & 1, iod); 1197 nvme_free_iod(dev, iod); 1198 return status; 1199} 1200 1201static int nvme_user_admin_cmd(struct nvme_dev *dev, 1202 struct nvme_admin_cmd __user *ucmd) 1203{ 1204 struct nvme_admin_cmd cmd; 1205 struct nvme_command c; 1206 int status, length; 1207 struct nvme_iod *uninitialized_var(iod); 1208 1209 if (!capable(CAP_SYS_ADMIN)) 1210 return -EACCES; 1211 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) 1212 return -EFAULT; 1213 1214 memset(&c, 0, sizeof(c)); 1215 c.common.opcode = cmd.opcode; 1216 c.common.flags = cmd.flags; 1217 c.common.nsid = cpu_to_le32(cmd.nsid); 1218 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); 1219 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); 1220 c.common.cdw10[0] = cpu_to_le32(cmd.cdw10); 1221 c.common.cdw10[1] = cpu_to_le32(cmd.cdw11); 1222 c.common.cdw10[2] = cpu_to_le32(cmd.cdw12); 1223 c.common.cdw10[3] = cpu_to_le32(cmd.cdw13); 1224 c.common.cdw10[4] = cpu_to_le32(cmd.cdw14); 1225 c.common.cdw10[5] = cpu_to_le32(cmd.cdw15); 1226 1227 length = cmd.data_len; 1228 if (cmd.data_len) { 1229 iod = nvme_map_user_pages(dev, cmd.opcode & 1, cmd.addr, 1230 length); 1231 if (IS_ERR(iod)) 1232 return PTR_ERR(iod); 1233 length = nvme_setup_prps(dev, &c.common, iod, length, 1234 GFP_KERNEL); 1235 } 1236 1237 if (length != cmd.data_len) 1238 status = -ENOMEM; 1239 else 1240 status = nvme_submit_admin_cmd(dev, &c, &cmd.result); 1241 1242 if (cmd.data_len) { 1243 nvme_unmap_user_pages(dev, cmd.opcode & 1, iod); 1244 nvme_free_iod(dev, iod); 1245 } 1246 1247 if (!status && copy_to_user(&ucmd->result, &cmd.result, 1248 sizeof(cmd.result))) 1249 status = -EFAULT; 1250 1251 return status; 1252} 1253 1254static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, 1255 unsigned long arg) 1256{ 1257 struct nvme_ns *ns = bdev->bd_disk->private_data; 1258 1259 switch (cmd) { 1260 case NVME_IOCTL_ID: 1261 return ns->ns_id; 1262 case NVME_IOCTL_ADMIN_CMD: 1263 return nvme_user_admin_cmd(ns->dev, (void __user *)arg); 1264 case NVME_IOCTL_SUBMIT_IO: 1265 return nvme_submit_io(ns, (void __user *)arg); 1266 default: 1267 return -ENOTTY; 1268 } 1269} 1270 1271static const struct block_device_operations nvme_fops = { 1272 .owner = THIS_MODULE, 1273 .ioctl = nvme_ioctl, 1274 .compat_ioctl = nvme_ioctl, 1275}; 1276 1277static void nvme_resubmit_bios(struct nvme_queue *nvmeq) 1278{ 1279 while (bio_list_peek(&nvmeq->sq_cong)) { 1280 struct bio *bio = bio_list_pop(&nvmeq->sq_cong); 1281 struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data; 1282 if (nvme_submit_bio_queue(nvmeq, ns, bio)) { 1283 bio_list_add_head(&nvmeq->sq_cong, bio); 1284 break; 1285 } 1286 if (bio_list_empty(&nvmeq->sq_cong)) 1287 remove_wait_queue(&nvmeq->sq_full, 1288 &nvmeq->sq_cong_wait); 1289 } 1290} 1291 1292static int nvme_kthread(void *data) 1293{ 1294 struct nvme_dev *dev; 1295 1296 while (!kthread_should_stop()) { 1297 __set_current_state(TASK_RUNNING); 1298 spin_lock(&dev_list_lock); 1299 list_for_each_entry(dev, &dev_list, node) { 1300 int i; 1301 for (i = 0; i < dev->queue_count; i++) { 1302 struct nvme_queue *nvmeq = dev->queues[i]; 1303 if (!nvmeq) 1304 continue; 1305 spin_lock_irq(&nvmeq->q_lock); 1306 if (nvme_process_cq(nvmeq)) 1307 printk("process_cq did something\n"); 1308 nvme_cancel_ios(nvmeq, true); 1309 nvme_resubmit_bios(nvmeq); 1310 spin_unlock_irq(&nvmeq->q_lock); 1311 } 1312 } 1313 spin_unlock(&dev_list_lock); 1314 set_current_state(TASK_INTERRUPTIBLE); 1315 schedule_timeout(HZ); 1316 } 1317 return 0; 1318} 1319 1320static DEFINE_IDA(nvme_index_ida); 1321 1322static int nvme_get_ns_idx(void) 1323{ 1324 int index, error; 1325 1326 do { 1327 if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL)) 1328 return -1; 1329 1330 spin_lock(&dev_list_lock); 1331 error = ida_get_new(&nvme_index_ida, &index); 1332 spin_unlock(&dev_list_lock); 1333 } while (error == -EAGAIN); 1334 1335 if (error) 1336 index = -1; 1337 return index; 1338} 1339 1340static void nvme_put_ns_idx(int index) 1341{ 1342 spin_lock(&dev_list_lock); 1343 ida_remove(&nvme_index_ida, index); 1344 spin_unlock(&dev_list_lock); 1345} 1346 1347static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid, 1348 struct nvme_id_ns *id, struct nvme_lba_range_type *rt) 1349{ 1350 struct nvme_ns *ns; 1351 struct gendisk *disk; 1352 int lbaf; 1353 1354 if (rt->attributes & NVME_LBART_ATTRIB_HIDE) 1355 return NULL; 1356 1357 ns = kzalloc(sizeof(*ns), GFP_KERNEL); 1358 if (!ns) 1359 return NULL; 1360 ns->queue = blk_alloc_queue(GFP_KERNEL); 1361 if (!ns->queue) 1362 goto out_free_ns; 1363 ns->queue->queue_flags = QUEUE_FLAG_DEFAULT; 1364 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue); 1365 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); 1366/* queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); */ 1367 blk_queue_make_request(ns->queue, nvme_make_request); 1368 ns->dev = dev; 1369 ns->queue->queuedata = ns; 1370 1371 disk = alloc_disk(NVME_MINORS); 1372 if (!disk) 1373 goto out_free_queue; 1374 ns->ns_id = nsid; 1375 ns->disk = disk; 1376 lbaf = id->flbas & 0xf; 1377 ns->lba_shift = id->lbaf[lbaf].ds; 1378 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); 1379 if (dev->max_hw_sectors) 1380 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); 1381 1382 disk->major = nvme_major; 1383 disk->minors = NVME_MINORS; 1384 disk->first_minor = NVME_MINORS * nvme_get_ns_idx(); 1385 disk->fops = &nvme_fops; 1386 disk->private_data = ns; 1387 disk->queue = ns->queue; 1388 disk->driverfs_dev = &dev->pci_dev->dev; 1389 sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid); 1390 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); 1391 1392 return ns; 1393 1394 out_free_queue: 1395 blk_cleanup_queue(ns->queue); 1396 out_free_ns: 1397 kfree(ns); 1398 return NULL; 1399} 1400 1401static void nvme_ns_free(struct nvme_ns *ns) 1402{ 1403 int index = ns->disk->first_minor / NVME_MINORS; 1404 put_disk(ns->disk); 1405 nvme_put_ns_idx(index); 1406 blk_cleanup_queue(ns->queue); 1407 kfree(ns); 1408} 1409 1410static int set_queue_count(struct nvme_dev *dev, int count) 1411{ 1412 int status; 1413 u32 result; 1414 u32 q_count = (count - 1) | ((count - 1) << 16); 1415 1416 status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0, 1417 &result); 1418 if (status) 1419 return -EIO; 1420 return min(result & 0xffff, result >> 16) + 1; 1421} 1422 1423static int __devinit nvme_setup_io_queues(struct nvme_dev *dev) 1424{ 1425 int result, cpu, i, nr_io_queues, db_bar_size, q_depth; 1426 1427 nr_io_queues = num_online_cpus(); 1428 result = set_queue_count(dev, nr_io_queues); 1429 if (result < 0) 1430 return result; 1431 if (result < nr_io_queues) 1432 nr_io_queues = result; 1433 1434 /* Deregister the admin queue's interrupt */ 1435 free_irq(dev->entry[0].vector, dev->queues[0]); 1436 1437 db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3)); 1438 if (db_bar_size > 8192) { 1439 iounmap(dev->bar); 1440 dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0), 1441 db_bar_size); 1442 dev->dbs = ((void __iomem *)dev->bar) + 4096; 1443 dev->queues[0]->q_db = dev->dbs; 1444 } 1445 1446 for (i = 0; i < nr_io_queues; i++) 1447 dev->entry[i].entry = i; 1448 for (;;) { 1449 result = pci_enable_msix(dev->pci_dev, dev->entry, 1450 nr_io_queues); 1451 if (result == 0) { 1452 break; 1453 } else if (result > 0) { 1454 nr_io_queues = result; 1455 continue; 1456 } else { 1457 nr_io_queues = 1; 1458 break; 1459 } 1460 } 1461 1462 result = queue_request_irq(dev, dev->queues[0], "nvme admin"); 1463 /* XXX: handle failure here */ 1464 1465 cpu = cpumask_first(cpu_online_mask); 1466 for (i = 0; i < nr_io_queues; i++) { 1467 irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu)); 1468 cpu = cpumask_next(cpu, cpu_online_mask); 1469 } 1470 1471 q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1, 1472 NVME_Q_DEPTH); 1473 for (i = 0; i < nr_io_queues; i++) { 1474 dev->queues[i + 1] = nvme_create_queue(dev, i + 1, q_depth, i); 1475 if (IS_ERR(dev->queues[i + 1])) 1476 return PTR_ERR(dev->queues[i + 1]); 1477 dev->queue_count++; 1478 } 1479 1480 for (; i < num_possible_cpus(); i++) { 1481 int target = i % rounddown_pow_of_two(dev->queue_count - 1); 1482 dev->queues[i + 1] = dev->queues[target + 1]; 1483 } 1484 1485 return 0; 1486} 1487 1488static void nvme_free_queues(struct nvme_dev *dev) 1489{ 1490 int i; 1491 1492 for (i = dev->queue_count - 1; i >= 0; i--) 1493 nvme_free_queue(dev, i); 1494} 1495 1496static int __devinit nvme_dev_add(struct nvme_dev *dev) 1497{ 1498 int res, nn, i; 1499 struct nvme_ns *ns, *next; 1500 struct nvme_id_ctrl *ctrl; 1501 struct nvme_id_ns *id_ns; 1502 void *mem; 1503 dma_addr_t dma_addr; 1504 1505 res = nvme_setup_io_queues(dev); 1506 if (res) 1507 return res; 1508 1509 mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr, 1510 GFP_KERNEL); 1511 1512 res = nvme_identify(dev, 0, 1, dma_addr); 1513 if (res) { 1514 res = -EIO; 1515 goto out_free; 1516 } 1517 1518 ctrl = mem; 1519 nn = le32_to_cpup(&ctrl->nn); 1520 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); 1521 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); 1522 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); 1523 if (ctrl->mdts) { 1524 int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12; 1525 dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9); 1526 } 1527 1528 id_ns = mem; 1529 for (i = 1; i <= nn; i++) { 1530 res = nvme_identify(dev, i, 0, dma_addr); 1531 if (res) 1532 continue; 1533 1534 if (id_ns->ncap == 0) 1535 continue; 1536 1537 res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i, 1538 dma_addr + 4096, NULL); 1539 if (res) 1540 continue; 1541 1542 ns = nvme_alloc_ns(dev, i, mem, mem + 4096); 1543 if (ns) 1544 list_add_tail(&ns->list, &dev->namespaces); 1545 } 1546 list_for_each_entry(ns, &dev->namespaces, list) 1547 add_disk(ns->disk); 1548 1549 goto out; 1550 1551 out_free: 1552 list_for_each_entry_safe(ns, next, &dev->namespaces, list) { 1553 list_del(&ns->list); 1554 nvme_ns_free(ns); 1555 } 1556 1557 out: 1558 dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr); 1559 return res; 1560} 1561 1562static int nvme_dev_remove(struct nvme_dev *dev) 1563{ 1564 struct nvme_ns *ns, *next; 1565 1566 spin_lock(&dev_list_lock); 1567 list_del(&dev->node); 1568 spin_unlock(&dev_list_lock); 1569 1570 list_for_each_entry_safe(ns, next, &dev->namespaces, list) { 1571 list_del(&ns->list); 1572 del_gendisk(ns->disk); 1573 nvme_ns_free(ns); 1574 } 1575 1576 nvme_free_queues(dev); 1577 1578 return 0; 1579} 1580 1581static int nvme_setup_prp_pools(struct nvme_dev *dev) 1582{ 1583 struct device *dmadev = &dev->pci_dev->dev; 1584 dev->prp_page_pool = dma_pool_create("prp list page", dmadev, 1585 PAGE_SIZE, PAGE_SIZE, 0); 1586 if (!dev->prp_page_pool) 1587 return -ENOMEM; 1588 1589 /* Optimisation for I/Os between 4k and 128k */ 1590 dev->prp_small_pool = dma_pool_create("prp list 256", dmadev, 1591 256, 256, 0); 1592 if (!dev->prp_small_pool) { 1593 dma_pool_destroy(dev->prp_page_pool); 1594 return -ENOMEM; 1595 } 1596 return 0; 1597} 1598 1599static void nvme_release_prp_pools(struct nvme_dev *dev) 1600{ 1601 dma_pool_destroy(dev->prp_page_pool); 1602 dma_pool_destroy(dev->prp_small_pool); 1603} 1604 1605static DEFINE_IDA(nvme_instance_ida); 1606 1607static int nvme_set_instance(struct nvme_dev *dev) 1608{ 1609 int instance, error; 1610 1611 do { 1612 if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL)) 1613 return -ENODEV; 1614 1615 spin_lock(&dev_list_lock); 1616 error = ida_get_new(&nvme_instance_ida, &instance); 1617 spin_unlock(&dev_list_lock); 1618 } while (error == -EAGAIN); 1619 1620 if (error) 1621 return -ENODEV; 1622 1623 dev->instance = instance; 1624 return 0; 1625} 1626 1627static void nvme_release_instance(struct nvme_dev *dev) 1628{ 1629 spin_lock(&dev_list_lock); 1630 ida_remove(&nvme_instance_ida, dev->instance); 1631 spin_unlock(&dev_list_lock); 1632} 1633 1634static int __devinit nvme_probe(struct pci_dev *pdev, 1635 const struct pci_device_id *id) 1636{ 1637 int bars, result = -ENOMEM; 1638 struct nvme_dev *dev; 1639 1640 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1641 if (!dev) 1642 return -ENOMEM; 1643 dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry), 1644 GFP_KERNEL); 1645 if (!dev->entry) 1646 goto free; 1647 dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *), 1648 GFP_KERNEL); 1649 if (!dev->queues) 1650 goto free; 1651 1652 if (pci_enable_device_mem(pdev)) 1653 goto free; 1654 pci_set_master(pdev); 1655 bars = pci_select_bars(pdev, IORESOURCE_MEM); 1656 if (pci_request_selected_regions(pdev, bars, "nvme")) 1657 goto disable; 1658 1659 INIT_LIST_HEAD(&dev->namespaces); 1660 dev->pci_dev = pdev; 1661 pci_set_drvdata(pdev, dev); 1662 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 1663 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 1664 result = nvme_set_instance(dev); 1665 if (result) 1666 goto disable; 1667 1668 dev->entry[0].vector = pdev->irq; 1669 1670 result = nvme_setup_prp_pools(dev); 1671 if (result) 1672 goto disable_msix; 1673 1674 dev->bar = ioremap(pci_resource_start(pdev, 0), 8192); 1675 if (!dev->bar) { 1676 result = -ENOMEM; 1677 goto disable_msix; 1678 } 1679 1680 result = nvme_configure_admin_queue(dev); 1681 if (result) 1682 goto unmap; 1683 dev->queue_count++; 1684 1685 spin_lock(&dev_list_lock); 1686 list_add(&dev->node, &dev_list); 1687 spin_unlock(&dev_list_lock); 1688 1689 result = nvme_dev_add(dev); 1690 if (result) 1691 goto delete; 1692 1693 return 0; 1694 1695 delete: 1696 spin_lock(&dev_list_lock); 1697 list_del(&dev->node); 1698 spin_unlock(&dev_list_lock); 1699 1700 nvme_free_queues(dev); 1701 unmap: 1702 iounmap(dev->bar); 1703 disable_msix: 1704 pci_disable_msix(pdev); 1705 nvme_release_instance(dev); 1706 nvme_release_prp_pools(dev); 1707 disable: 1708 pci_disable_device(pdev); 1709 pci_release_regions(pdev); 1710 free: 1711 kfree(dev->queues); 1712 kfree(dev->entry); 1713 kfree(dev); 1714 return result; 1715} 1716 1717static void __devexit nvme_remove(struct pci_dev *pdev) 1718{ 1719 struct nvme_dev *dev = pci_get_drvdata(pdev); 1720 nvme_dev_remove(dev); 1721 pci_disable_msix(pdev); 1722 iounmap(dev->bar); 1723 nvme_release_instance(dev); 1724 nvme_release_prp_pools(dev); 1725 pci_disable_device(pdev); 1726 pci_release_regions(pdev); 1727 kfree(dev->queues); 1728 kfree(dev->entry); 1729 kfree(dev); 1730} 1731 1732/* These functions are yet to be implemented */ 1733#define nvme_error_detected NULL 1734#define nvme_dump_registers NULL 1735#define nvme_link_reset NULL 1736#define nvme_slot_reset NULL 1737#define nvme_error_resume NULL 1738#define nvme_suspend NULL 1739#define nvme_resume NULL 1740 1741static const struct pci_error_handlers nvme_err_handler = { 1742 .error_detected = nvme_error_detected, 1743 .mmio_enabled = nvme_dump_registers, 1744 .link_reset = nvme_link_reset, 1745 .slot_reset = nvme_slot_reset, 1746 .resume = nvme_error_resume, 1747}; 1748 1749/* Move to pci_ids.h later */ 1750#define PCI_CLASS_STORAGE_EXPRESS 0x010802 1751 1752static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = { 1753 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 1754 { 0, } 1755}; 1756MODULE_DEVICE_TABLE(pci, nvme_id_table); 1757 1758static struct pci_driver nvme_driver = { 1759 .name = "nvme", 1760 .id_table = nvme_id_table, 1761 .probe = nvme_probe, 1762 .remove = __devexit_p(nvme_remove), 1763 .suspend = nvme_suspend, 1764 .resume = nvme_resume, 1765 .err_handler = &nvme_err_handler, 1766}; 1767 1768static int __init nvme_init(void) 1769{ 1770 int result; 1771 1772 nvme_thread = kthread_run(nvme_kthread, NULL, "nvme"); 1773 if (IS_ERR(nvme_thread)) 1774 return PTR_ERR(nvme_thread); 1775 1776 result = register_blkdev(nvme_major, "nvme"); 1777 if (result < 0) 1778 goto kill_kthread; 1779 else if (result > 0) 1780 nvme_major = result; 1781 1782 result = pci_register_driver(&nvme_driver); 1783 if (result) 1784 goto unregister_blkdev; 1785 return 0; 1786 1787 unregister_blkdev: 1788 unregister_blkdev(nvme_major, "nvme"); 1789 kill_kthread: 1790 kthread_stop(nvme_thread); 1791 return result; 1792} 1793 1794static void __exit nvme_exit(void) 1795{ 1796 pci_unregister_driver(&nvme_driver); 1797 unregister_blkdev(nvme_major, "nvme"); 1798 kthread_stop(nvme_thread); 1799} 1800 1801MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); 1802MODULE_LICENSE("GPL"); 1803MODULE_VERSION("0.8"); 1804module_init(nvme_init); 1805module_exit(nvme_exit); 1806