nvme-core.c revision 4eeb9215a0d5c9494ca8b20158cc8ee82618840c
1/* 2 * NVM Express device driver 3 * Copyright (c) 2011, Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 */ 18 19#include <linux/nvme.h> 20#include <linux/bio.h> 21#include <linux/bitops.h> 22#include <linux/blkdev.h> 23#include <linux/delay.h> 24#include <linux/errno.h> 25#include <linux/fs.h> 26#include <linux/genhd.h> 27#include <linux/idr.h> 28#include <linux/init.h> 29#include <linux/interrupt.h> 30#include <linux/io.h> 31#include <linux/kdev_t.h> 32#include <linux/kthread.h> 33#include <linux/kernel.h> 34#include <linux/mm.h> 35#include <linux/module.h> 36#include <linux/moduleparam.h> 37#include <linux/pci.h> 38#include <linux/poison.h> 39#include <linux/sched.h> 40#include <linux/slab.h> 41#include <linux/types.h> 42#include <linux/version.h> 43 44#define NVME_Q_DEPTH 1024 45#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) 46#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) 47#define NVME_MINORS 64 48#define NVME_IO_TIMEOUT (5 * HZ) 49#define ADMIN_TIMEOUT (60 * HZ) 50 51static int nvme_major; 52module_param(nvme_major, int, 0); 53 54static int use_threaded_interrupts; 55module_param(use_threaded_interrupts, int, 0); 56 57static DEFINE_SPINLOCK(dev_list_lock); 58static LIST_HEAD(dev_list); 59static struct task_struct *nvme_thread; 60 61/* 62 * Represents an NVM Express device. Each nvme_dev is a PCI function. 63 */ 64struct nvme_dev { 65 struct list_head node; 66 struct nvme_queue **queues; 67 u32 __iomem *dbs; 68 struct pci_dev *pci_dev; 69 struct dma_pool *prp_page_pool; 70 struct dma_pool *prp_small_pool; 71 int instance; 72 int queue_count; 73 int db_stride; 74 u32 ctrl_config; 75 struct msix_entry *entry; 76 struct nvme_bar __iomem *bar; 77 struct list_head namespaces; 78 char serial[20]; 79 char model[40]; 80 char firmware_rev[8]; 81}; 82 83/* 84 * An NVM Express namespace is equivalent to a SCSI LUN 85 */ 86struct nvme_ns { 87 struct list_head list; 88 89 struct nvme_dev *dev; 90 struct request_queue *queue; 91 struct gendisk *disk; 92 93 int ns_id; 94 int lba_shift; 95}; 96 97/* 98 * An NVM Express queue. Each device has at least two (one for admin 99 * commands and one for I/O commands). 100 */ 101struct nvme_queue { 102 struct device *q_dmadev; 103 struct nvme_dev *dev; 104 spinlock_t q_lock; 105 struct nvme_command *sq_cmds; 106 volatile struct nvme_completion *cqes; 107 dma_addr_t sq_dma_addr; 108 dma_addr_t cq_dma_addr; 109 wait_queue_head_t sq_full; 110 wait_queue_t sq_cong_wait; 111 struct bio_list sq_cong; 112 u32 __iomem *q_db; 113 u16 q_depth; 114 u16 cq_vector; 115 u16 sq_head; 116 u16 sq_tail; 117 u16 cq_head; 118 u16 cq_phase; 119 unsigned long cmdid_data[]; 120}; 121 122/* 123 * Check we didin't inadvertently grow the command struct 124 */ 125static inline void _nvme_check_size(void) 126{ 127 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); 128 BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64); 129 BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); 130 BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); 131 BUILD_BUG_ON(sizeof(struct nvme_features) != 64); 132 BUILD_BUG_ON(sizeof(struct nvme_command) != 64); 133 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096); 134 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096); 135 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); 136} 137 138typedef void (*nvme_completion_fn)(struct nvme_dev *, void *, 139 struct nvme_completion *); 140 141struct nvme_cmd_info { 142 nvme_completion_fn fn; 143 void *ctx; 144 unsigned long timeout; 145}; 146 147static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq) 148{ 149 return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)]; 150} 151 152/** 153 * alloc_cmdid() - Allocate a Command ID 154 * @nvmeq: The queue that will be used for this command 155 * @ctx: A pointer that will be passed to the handler 156 * @handler: The function to call on completion 157 * 158 * Allocate a Command ID for a queue. The data passed in will 159 * be passed to the completion handler. This is implemented by using 160 * the bottom two bits of the ctx pointer to store the handler ID. 161 * Passing in a pointer that's not 4-byte aligned will cause a BUG. 162 * We can change this if it becomes a problem. 163 * 164 * May be called with local interrupts disabled and the q_lock held, 165 * or with interrupts enabled and no locks held. 166 */ 167static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx, 168 nvme_completion_fn handler, unsigned timeout) 169{ 170 int depth = nvmeq->q_depth - 1; 171 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 172 int cmdid; 173 174 do { 175 cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth); 176 if (cmdid >= depth) 177 return -EBUSY; 178 } while (test_and_set_bit(cmdid, nvmeq->cmdid_data)); 179 180 info[cmdid].fn = handler; 181 info[cmdid].ctx = ctx; 182 info[cmdid].timeout = jiffies + timeout; 183 return cmdid; 184} 185 186static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx, 187 nvme_completion_fn handler, unsigned timeout) 188{ 189 int cmdid; 190 wait_event_killable(nvmeq->sq_full, 191 (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0); 192 return (cmdid < 0) ? -EINTR : cmdid; 193} 194 195/* Special values must be less than 0x1000 */ 196#define CMD_CTX_BASE ((void *)POISON_POINTER_DELTA) 197#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE) 198#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE) 199#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE) 200#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE) 201 202static void special_completion(struct nvme_dev *dev, void *ctx, 203 struct nvme_completion *cqe) 204{ 205 if (ctx == CMD_CTX_CANCELLED) 206 return; 207 if (ctx == CMD_CTX_FLUSH) 208 return; 209 if (ctx == CMD_CTX_COMPLETED) { 210 dev_warn(&dev->pci_dev->dev, 211 "completed id %d twice on queue %d\n", 212 cqe->command_id, le16_to_cpup(&cqe->sq_id)); 213 return; 214 } 215 if (ctx == CMD_CTX_INVALID) { 216 dev_warn(&dev->pci_dev->dev, 217 "invalid id %d completed on queue %d\n", 218 cqe->command_id, le16_to_cpup(&cqe->sq_id)); 219 return; 220 } 221 222 dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx); 223} 224 225/* 226 * Called with local interrupts disabled and the q_lock held. May not sleep. 227 */ 228static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid, 229 nvme_completion_fn *fn) 230{ 231 void *ctx; 232 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 233 234 if (cmdid >= nvmeq->q_depth) { 235 *fn = special_completion; 236 return CMD_CTX_INVALID; 237 } 238 *fn = info[cmdid].fn; 239 ctx = info[cmdid].ctx; 240 info[cmdid].fn = special_completion; 241 info[cmdid].ctx = CMD_CTX_COMPLETED; 242 clear_bit(cmdid, nvmeq->cmdid_data); 243 wake_up(&nvmeq->sq_full); 244 return ctx; 245} 246 247static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid, 248 nvme_completion_fn *fn) 249{ 250 void *ctx; 251 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 252 if (fn) 253 *fn = info[cmdid].fn; 254 ctx = info[cmdid].ctx; 255 info[cmdid].fn = special_completion; 256 info[cmdid].ctx = CMD_CTX_CANCELLED; 257 return ctx; 258} 259 260static struct nvme_queue *get_nvmeq(struct nvme_dev *dev) 261{ 262 return dev->queues[get_cpu() + 1]; 263} 264 265static void put_nvmeq(struct nvme_queue *nvmeq) 266{ 267 put_cpu(); 268} 269 270/** 271 * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell 272 * @nvmeq: The queue to use 273 * @cmd: The command to send 274 * 275 * Safe to use from interrupt context 276 */ 277static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd) 278{ 279 unsigned long flags; 280 u16 tail; 281 spin_lock_irqsave(&nvmeq->q_lock, flags); 282 tail = nvmeq->sq_tail; 283 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd)); 284 if (++tail == nvmeq->q_depth) 285 tail = 0; 286 writel(tail, nvmeq->q_db); 287 nvmeq->sq_tail = tail; 288 spin_unlock_irqrestore(&nvmeq->q_lock, flags); 289 290 return 0; 291} 292 293/* 294 * The nvme_iod describes the data in an I/O, including the list of PRP 295 * entries. You can't see it in this data structure because C doesn't let 296 * me express that. Use nvme_alloc_iod to ensure there's enough space 297 * allocated to store the PRP list. 298 */ 299struct nvme_iod { 300 void *private; /* For the use of the submitter of the I/O */ 301 int npages; /* In the PRP list. 0 means small pool in use */ 302 int offset; /* Of PRP list */ 303 int nents; /* Used in scatterlist */ 304 int length; /* Of data, in bytes */ 305 dma_addr_t first_dma; 306 struct scatterlist sg[0]; 307}; 308 309static __le64 **iod_list(struct nvme_iod *iod) 310{ 311 return ((void *)iod) + iod->offset; 312} 313 314/* 315 * Will slightly overestimate the number of pages needed. This is OK 316 * as it only leads to a small amount of wasted memory for the lifetime of 317 * the I/O. 318 */ 319static int nvme_npages(unsigned size) 320{ 321 unsigned nprps = DIV_ROUND_UP(size + PAGE_SIZE, PAGE_SIZE); 322 return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8); 323} 324 325static struct nvme_iod * 326nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp) 327{ 328 struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) + 329 sizeof(__le64 *) * nvme_npages(nbytes) + 330 sizeof(struct scatterlist) * nseg, gfp); 331 332 if (iod) { 333 iod->offset = offsetof(struct nvme_iod, sg[nseg]); 334 iod->npages = -1; 335 iod->length = nbytes; 336 } 337 338 return iod; 339} 340 341static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod) 342{ 343 const int last_prp = PAGE_SIZE / 8 - 1; 344 int i; 345 __le64 **list = iod_list(iod); 346 dma_addr_t prp_dma = iod->first_dma; 347 348 if (iod->npages == 0) 349 dma_pool_free(dev->prp_small_pool, list[0], prp_dma); 350 for (i = 0; i < iod->npages; i++) { 351 __le64 *prp_list = list[i]; 352 dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]); 353 dma_pool_free(dev->prp_page_pool, prp_list, prp_dma); 354 prp_dma = next_prp_dma; 355 } 356 kfree(iod); 357} 358 359static void requeue_bio(struct nvme_dev *dev, struct bio *bio) 360{ 361 struct nvme_queue *nvmeq = get_nvmeq(dev); 362 if (bio_list_empty(&nvmeq->sq_cong)) 363 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); 364 bio_list_add(&nvmeq->sq_cong, bio); 365 put_nvmeq(nvmeq); 366 wake_up_process(nvme_thread); 367} 368 369static void bio_completion(struct nvme_dev *dev, void *ctx, 370 struct nvme_completion *cqe) 371{ 372 struct nvme_iod *iod = ctx; 373 struct bio *bio = iod->private; 374 u16 status = le16_to_cpup(&cqe->status) >> 1; 375 376 dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, 377 bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 378 nvme_free_iod(dev, iod); 379 if (status) { 380 bio_endio(bio, -EIO); 381 } else if (bio->bi_vcnt > bio->bi_idx) { 382 requeue_bio(dev, bio); 383 } else { 384 bio_endio(bio, 0); 385 } 386} 387 388/* length is in bytes. gfp flags indicates whether we may sleep. */ 389static int nvme_setup_prps(struct nvme_dev *dev, 390 struct nvme_common_command *cmd, struct nvme_iod *iod, 391 int total_len, gfp_t gfp) 392{ 393 struct dma_pool *pool; 394 int length = total_len; 395 struct scatterlist *sg = iod->sg; 396 int dma_len = sg_dma_len(sg); 397 u64 dma_addr = sg_dma_address(sg); 398 int offset = offset_in_page(dma_addr); 399 __le64 *prp_list; 400 __le64 **list = iod_list(iod); 401 dma_addr_t prp_dma; 402 int nprps, i; 403 404 cmd->prp1 = cpu_to_le64(dma_addr); 405 length -= (PAGE_SIZE - offset); 406 if (length <= 0) 407 return total_len; 408 409 dma_len -= (PAGE_SIZE - offset); 410 if (dma_len) { 411 dma_addr += (PAGE_SIZE - offset); 412 } else { 413 sg = sg_next(sg); 414 dma_addr = sg_dma_address(sg); 415 dma_len = sg_dma_len(sg); 416 } 417 418 if (length <= PAGE_SIZE) { 419 cmd->prp2 = cpu_to_le64(dma_addr); 420 return total_len; 421 } 422 423 nprps = DIV_ROUND_UP(length, PAGE_SIZE); 424 if (nprps <= (256 / 8)) { 425 pool = dev->prp_small_pool; 426 iod->npages = 0; 427 } else { 428 pool = dev->prp_page_pool; 429 iod->npages = 1; 430 } 431 432 prp_list = dma_pool_alloc(pool, gfp, &prp_dma); 433 if (!prp_list) { 434 cmd->prp2 = cpu_to_le64(dma_addr); 435 iod->npages = -1; 436 return (total_len - length) + PAGE_SIZE; 437 } 438 list[0] = prp_list; 439 iod->first_dma = prp_dma; 440 cmd->prp2 = cpu_to_le64(prp_dma); 441 i = 0; 442 for (;;) { 443 if (i == PAGE_SIZE / 8) { 444 __le64 *old_prp_list = prp_list; 445 prp_list = dma_pool_alloc(pool, gfp, &prp_dma); 446 if (!prp_list) 447 return total_len - length; 448 list[iod->npages++] = prp_list; 449 prp_list[0] = old_prp_list[i - 1]; 450 old_prp_list[i - 1] = cpu_to_le64(prp_dma); 451 i = 1; 452 } 453 prp_list[i++] = cpu_to_le64(dma_addr); 454 dma_len -= PAGE_SIZE; 455 dma_addr += PAGE_SIZE; 456 length -= PAGE_SIZE; 457 if (length <= 0) 458 break; 459 if (dma_len > 0) 460 continue; 461 BUG_ON(dma_len < 0); 462 sg = sg_next(sg); 463 dma_addr = sg_dma_address(sg); 464 dma_len = sg_dma_len(sg); 465 } 466 467 return total_len; 468} 469 470/* NVMe scatterlists require no holes in the virtual address */ 471#define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \ 472 (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE)) 473 474static int nvme_map_bio(struct device *dev, struct nvme_iod *iod, 475 struct bio *bio, enum dma_data_direction dma_dir, int psegs) 476{ 477 struct bio_vec *bvec, *bvprv = NULL; 478 struct scatterlist *sg = NULL; 479 int i, old_idx, length = 0, nsegs = 0; 480 481 sg_init_table(iod->sg, psegs); 482 old_idx = bio->bi_idx; 483 bio_for_each_segment(bvec, bio, i) { 484 if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) { 485 sg->length += bvec->bv_len; 486 } else { 487 if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec)) 488 break; 489 sg = sg ? sg + 1 : iod->sg; 490 sg_set_page(sg, bvec->bv_page, bvec->bv_len, 491 bvec->bv_offset); 492 nsegs++; 493 } 494 length += bvec->bv_len; 495 bvprv = bvec; 496 } 497 bio->bi_idx = i; 498 iod->nents = nsegs; 499 sg_mark_end(sg); 500 if (dma_map_sg(dev, iod->sg, iod->nents, dma_dir) == 0) { 501 bio->bi_idx = old_idx; 502 return -ENOMEM; 503 } 504 return length; 505} 506 507static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns, 508 int cmdid) 509{ 510 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; 511 512 memset(cmnd, 0, sizeof(*cmnd)); 513 cmnd->common.opcode = nvme_cmd_flush; 514 cmnd->common.command_id = cmdid; 515 cmnd->common.nsid = cpu_to_le32(ns->ns_id); 516 517 if (++nvmeq->sq_tail == nvmeq->q_depth) 518 nvmeq->sq_tail = 0; 519 writel(nvmeq->sq_tail, nvmeq->q_db); 520 521 return 0; 522} 523 524static int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns) 525{ 526 int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH, 527 special_completion, NVME_IO_TIMEOUT); 528 if (unlikely(cmdid < 0)) 529 return cmdid; 530 531 return nvme_submit_flush(nvmeq, ns, cmdid); 532} 533 534/* 535 * Called with local interrupts disabled and the q_lock held. May not sleep. 536 */ 537static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, 538 struct bio *bio) 539{ 540 struct nvme_command *cmnd; 541 struct nvme_iod *iod; 542 enum dma_data_direction dma_dir; 543 int cmdid, length, result = -ENOMEM; 544 u16 control; 545 u32 dsmgmt; 546 int psegs = bio_phys_segments(ns->queue, bio); 547 548 if ((bio->bi_rw & REQ_FLUSH) && psegs) { 549 result = nvme_submit_flush_data(nvmeq, ns); 550 if (result) 551 return result; 552 } 553 554 iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC); 555 if (!iod) 556 goto nomem; 557 iod->private = bio; 558 559 result = -EBUSY; 560 cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT); 561 if (unlikely(cmdid < 0)) 562 goto free_iod; 563 564 if ((bio->bi_rw & REQ_FLUSH) && !psegs) 565 return nvme_submit_flush(nvmeq, ns, cmdid); 566 567 control = 0; 568 if (bio->bi_rw & REQ_FUA) 569 control |= NVME_RW_FUA; 570 if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD)) 571 control |= NVME_RW_LR; 572 573 dsmgmt = 0; 574 if (bio->bi_rw & REQ_RAHEAD) 575 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; 576 577 cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; 578 579 memset(cmnd, 0, sizeof(*cmnd)); 580 if (bio_data_dir(bio)) { 581 cmnd->rw.opcode = nvme_cmd_write; 582 dma_dir = DMA_TO_DEVICE; 583 } else { 584 cmnd->rw.opcode = nvme_cmd_read; 585 dma_dir = DMA_FROM_DEVICE; 586 } 587 588 result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs); 589 if (result < 0) 590 goto free_iod; 591 length = result; 592 593 cmnd->rw.command_id = cmdid; 594 cmnd->rw.nsid = cpu_to_le32(ns->ns_id); 595 length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length, 596 GFP_ATOMIC); 597 cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9)); 598 cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1); 599 cmnd->rw.control = cpu_to_le16(control); 600 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 601 602 bio->bi_sector += length >> 9; 603 604 if (++nvmeq->sq_tail == nvmeq->q_depth) 605 nvmeq->sq_tail = 0; 606 writel(nvmeq->sq_tail, nvmeq->q_db); 607 608 return 0; 609 610 free_iod: 611 nvme_free_iod(nvmeq->dev, iod); 612 nomem: 613 return result; 614} 615 616/* 617 * NB: return value of non-zero would mean that we were a stacking driver. 618 * make_request must always succeed. 619 */ 620static int nvme_make_request(struct request_queue *q, struct bio *bio) 621{ 622 struct nvme_ns *ns = q->queuedata; 623 struct nvme_queue *nvmeq = get_nvmeq(ns->dev); 624 int result = -EBUSY; 625 626 spin_lock_irq(&nvmeq->q_lock); 627 if (bio_list_empty(&nvmeq->sq_cong)) 628 result = nvme_submit_bio_queue(nvmeq, ns, bio); 629 if (unlikely(result)) { 630 if (bio_list_empty(&nvmeq->sq_cong)) 631 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); 632 bio_list_add(&nvmeq->sq_cong, bio); 633 } 634 635 spin_unlock_irq(&nvmeq->q_lock); 636 put_nvmeq(nvmeq); 637 638 return 0; 639} 640 641static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq) 642{ 643 u16 head, phase; 644 645 head = nvmeq->cq_head; 646 phase = nvmeq->cq_phase; 647 648 for (;;) { 649 void *ctx; 650 nvme_completion_fn fn; 651 struct nvme_completion cqe = nvmeq->cqes[head]; 652 if ((le16_to_cpu(cqe.status) & 1) != phase) 653 break; 654 nvmeq->sq_head = le16_to_cpu(cqe.sq_head); 655 if (++head == nvmeq->q_depth) { 656 head = 0; 657 phase = !phase; 658 } 659 660 ctx = free_cmdid(nvmeq, cqe.command_id, &fn); 661 fn(nvmeq->dev, ctx, &cqe); 662 } 663 664 /* If the controller ignores the cq head doorbell and continuously 665 * writes to the queue, it is theoretically possible to wrap around 666 * the queue twice and mistakenly return IRQ_NONE. Linux only 667 * requires that 0.1% of your interrupts are handled, so this isn't 668 * a big problem. 669 */ 670 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) 671 return IRQ_NONE; 672 673 writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride)); 674 nvmeq->cq_head = head; 675 nvmeq->cq_phase = phase; 676 677 return IRQ_HANDLED; 678} 679 680static irqreturn_t nvme_irq(int irq, void *data) 681{ 682 irqreturn_t result; 683 struct nvme_queue *nvmeq = data; 684 spin_lock(&nvmeq->q_lock); 685 result = nvme_process_cq(nvmeq); 686 spin_unlock(&nvmeq->q_lock); 687 return result; 688} 689 690static irqreturn_t nvme_irq_check(int irq, void *data) 691{ 692 struct nvme_queue *nvmeq = data; 693 struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head]; 694 if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase) 695 return IRQ_NONE; 696 return IRQ_WAKE_THREAD; 697} 698 699static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid) 700{ 701 spin_lock_irq(&nvmeq->q_lock); 702 cancel_cmdid(nvmeq, cmdid, NULL); 703 spin_unlock_irq(&nvmeq->q_lock); 704} 705 706struct sync_cmd_info { 707 struct task_struct *task; 708 u32 result; 709 int status; 710}; 711 712static void sync_completion(struct nvme_dev *dev, void *ctx, 713 struct nvme_completion *cqe) 714{ 715 struct sync_cmd_info *cmdinfo = ctx; 716 cmdinfo->result = le32_to_cpup(&cqe->result); 717 cmdinfo->status = le16_to_cpup(&cqe->status) >> 1; 718 wake_up_process(cmdinfo->task); 719} 720 721/* 722 * Returns 0 on success. If the result is negative, it's a Linux error code; 723 * if the result is positive, it's an NVM Express status code 724 */ 725static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, 726 struct nvme_command *cmd, u32 *result, unsigned timeout) 727{ 728 int cmdid; 729 struct sync_cmd_info cmdinfo; 730 731 cmdinfo.task = current; 732 cmdinfo.status = -EINTR; 733 734 cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion, 735 timeout); 736 if (cmdid < 0) 737 return cmdid; 738 cmd->common.command_id = cmdid; 739 740 set_current_state(TASK_KILLABLE); 741 nvme_submit_cmd(nvmeq, cmd); 742 schedule(); 743 744 if (cmdinfo.status == -EINTR) { 745 nvme_abort_command(nvmeq, cmdid); 746 return -EINTR; 747 } 748 749 if (result) 750 *result = cmdinfo.result; 751 752 return cmdinfo.status; 753} 754 755static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd, 756 u32 *result) 757{ 758 return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT); 759} 760 761static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) 762{ 763 int status; 764 struct nvme_command c; 765 766 memset(&c, 0, sizeof(c)); 767 c.delete_queue.opcode = opcode; 768 c.delete_queue.qid = cpu_to_le16(id); 769 770 status = nvme_submit_admin_cmd(dev, &c, NULL); 771 if (status) 772 return -EIO; 773 return 0; 774} 775 776static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, 777 struct nvme_queue *nvmeq) 778{ 779 int status; 780 struct nvme_command c; 781 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED; 782 783 memset(&c, 0, sizeof(c)); 784 c.create_cq.opcode = nvme_admin_create_cq; 785 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); 786 c.create_cq.cqid = cpu_to_le16(qid); 787 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 788 c.create_cq.cq_flags = cpu_to_le16(flags); 789 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector); 790 791 status = nvme_submit_admin_cmd(dev, &c, NULL); 792 if (status) 793 return -EIO; 794 return 0; 795} 796 797static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, 798 struct nvme_queue *nvmeq) 799{ 800 int status; 801 struct nvme_command c; 802 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM; 803 804 memset(&c, 0, sizeof(c)); 805 c.create_sq.opcode = nvme_admin_create_sq; 806 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); 807 c.create_sq.sqid = cpu_to_le16(qid); 808 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 809 c.create_sq.sq_flags = cpu_to_le16(flags); 810 c.create_sq.cqid = cpu_to_le16(qid); 811 812 status = nvme_submit_admin_cmd(dev, &c, NULL); 813 if (status) 814 return -EIO; 815 return 0; 816} 817 818static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) 819{ 820 return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid); 821} 822 823static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) 824{ 825 return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); 826} 827 828static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns, 829 dma_addr_t dma_addr) 830{ 831 struct nvme_command c; 832 833 memset(&c, 0, sizeof(c)); 834 c.identify.opcode = nvme_admin_identify; 835 c.identify.nsid = cpu_to_le32(nsid); 836 c.identify.prp1 = cpu_to_le64(dma_addr); 837 c.identify.cns = cpu_to_le32(cns); 838 839 return nvme_submit_admin_cmd(dev, &c, NULL); 840} 841 842static int nvme_get_features(struct nvme_dev *dev, unsigned fid, 843 unsigned dword11, dma_addr_t dma_addr, u32 *result) 844{ 845 struct nvme_command c; 846 847 memset(&c, 0, sizeof(c)); 848 c.features.opcode = nvme_admin_get_features; 849 c.features.prp1 = cpu_to_le64(dma_addr); 850 c.features.fid = cpu_to_le32(fid); 851 c.features.dword11 = cpu_to_le32(dword11); 852 853 return nvme_submit_admin_cmd(dev, &c, result); 854} 855 856static void nvme_free_queue(struct nvme_dev *dev, int qid) 857{ 858 struct nvme_queue *nvmeq = dev->queues[qid]; 859 int vector = dev->entry[nvmeq->cq_vector].vector; 860 861 irq_set_affinity_hint(vector, NULL); 862 free_irq(vector, nvmeq); 863 864 /* Don't tell the adapter to delete the admin queue */ 865 if (qid) { 866 adapter_delete_sq(dev, qid); 867 adapter_delete_cq(dev, qid); 868 } 869 870 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), 871 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 872 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), 873 nvmeq->sq_cmds, nvmeq->sq_dma_addr); 874 kfree(nvmeq); 875} 876 877static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, 878 int depth, int vector) 879{ 880 struct device *dmadev = &dev->pci_dev->dev; 881 unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info)); 882 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL); 883 if (!nvmeq) 884 return NULL; 885 886 nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth), 887 &nvmeq->cq_dma_addr, GFP_KERNEL); 888 if (!nvmeq->cqes) 889 goto free_nvmeq; 890 memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth)); 891 892 nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth), 893 &nvmeq->sq_dma_addr, GFP_KERNEL); 894 if (!nvmeq->sq_cmds) 895 goto free_cqdma; 896 897 nvmeq->q_dmadev = dmadev; 898 nvmeq->dev = dev; 899 spin_lock_init(&nvmeq->q_lock); 900 nvmeq->cq_head = 0; 901 nvmeq->cq_phase = 1; 902 init_waitqueue_head(&nvmeq->sq_full); 903 init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread); 904 bio_list_init(&nvmeq->sq_cong); 905 nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)]; 906 nvmeq->q_depth = depth; 907 nvmeq->cq_vector = vector; 908 909 return nvmeq; 910 911 free_cqdma: 912 dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes, 913 nvmeq->cq_dma_addr); 914 free_nvmeq: 915 kfree(nvmeq); 916 return NULL; 917} 918 919static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq, 920 const char *name) 921{ 922 if (use_threaded_interrupts) 923 return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector, 924 nvme_irq_check, nvme_irq, 925 IRQF_DISABLED | IRQF_SHARED, 926 name, nvmeq); 927 return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq, 928 IRQF_DISABLED | IRQF_SHARED, name, nvmeq); 929} 930 931static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev, 932 int qid, int cq_size, int vector) 933{ 934 int result; 935 struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector); 936 937 if (!nvmeq) 938 return ERR_PTR(-ENOMEM); 939 940 result = adapter_alloc_cq(dev, qid, nvmeq); 941 if (result < 0) 942 goto free_nvmeq; 943 944 result = adapter_alloc_sq(dev, qid, nvmeq); 945 if (result < 0) 946 goto release_cq; 947 948 result = queue_request_irq(dev, nvmeq, "nvme"); 949 if (result < 0) 950 goto release_sq; 951 952 return nvmeq; 953 954 release_sq: 955 adapter_delete_sq(dev, qid); 956 release_cq: 957 adapter_delete_cq(dev, qid); 958 free_nvmeq: 959 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), 960 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 961 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), 962 nvmeq->sq_cmds, nvmeq->sq_dma_addr); 963 kfree(nvmeq); 964 return ERR_PTR(result); 965} 966 967static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev) 968{ 969 int result; 970 u32 aqa; 971 u64 cap; 972 unsigned long timeout; 973 struct nvme_queue *nvmeq; 974 975 dev->dbs = ((void __iomem *)dev->bar) + 4096; 976 977 nvmeq = nvme_alloc_queue(dev, 0, 64, 0); 978 if (!nvmeq) 979 return -ENOMEM; 980 981 aqa = nvmeq->q_depth - 1; 982 aqa |= aqa << 16; 983 984 dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM; 985 dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT; 986 dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE; 987 dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; 988 989 writel(0, &dev->bar->cc); 990 writel(aqa, &dev->bar->aqa); 991 writeq(nvmeq->sq_dma_addr, &dev->bar->asq); 992 writeq(nvmeq->cq_dma_addr, &dev->bar->acq); 993 writel(dev->ctrl_config, &dev->bar->cc); 994 995 cap = readq(&dev->bar->cap); 996 timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; 997 dev->db_stride = NVME_CAP_STRIDE(cap); 998 999 while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) { 1000 msleep(100); 1001 if (fatal_signal_pending(current)) 1002 return -EINTR; 1003 if (time_after(jiffies, timeout)) { 1004 dev_err(&dev->pci_dev->dev, 1005 "Device not ready; aborting initialisation\n"); 1006 return -ENODEV; 1007 } 1008 } 1009 1010 result = queue_request_irq(dev, nvmeq, "nvme admin"); 1011 dev->queues[0] = nvmeq; 1012 return result; 1013} 1014 1015static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, 1016 unsigned long addr, unsigned length) 1017{ 1018 int i, err, count, nents, offset; 1019 struct scatterlist *sg; 1020 struct page **pages; 1021 struct nvme_iod *iod; 1022 1023 if (addr & 3) 1024 return ERR_PTR(-EINVAL); 1025 if (!length) 1026 return ERR_PTR(-EINVAL); 1027 1028 offset = offset_in_page(addr); 1029 count = DIV_ROUND_UP(offset + length, PAGE_SIZE); 1030 pages = kcalloc(count, sizeof(*pages), GFP_KERNEL); 1031 1032 err = get_user_pages_fast(addr, count, 1, pages); 1033 if (err < count) { 1034 count = err; 1035 err = -EFAULT; 1036 goto put_pages; 1037 } 1038 1039 iod = nvme_alloc_iod(count, length, GFP_KERNEL); 1040 sg = iod->sg; 1041 sg_init_table(sg, count); 1042 for (i = 0; i < count; i++) { 1043 sg_set_page(&sg[i], pages[i], 1044 min_t(int, length, PAGE_SIZE - offset), offset); 1045 length -= (PAGE_SIZE - offset); 1046 offset = 0; 1047 } 1048 sg_mark_end(&sg[i - 1]); 1049 iod->nents = count; 1050 1051 err = -ENOMEM; 1052 nents = dma_map_sg(&dev->pci_dev->dev, sg, count, 1053 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 1054 if (!nents) 1055 goto free_iod; 1056 1057 kfree(pages); 1058 return iod; 1059 1060 free_iod: 1061 kfree(iod); 1062 put_pages: 1063 for (i = 0; i < count; i++) 1064 put_page(pages[i]); 1065 kfree(pages); 1066 return ERR_PTR(err); 1067} 1068 1069static void nvme_unmap_user_pages(struct nvme_dev *dev, int write, 1070 struct nvme_iod *iod) 1071{ 1072 int i; 1073 1074 dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, 1075 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 1076 1077 for (i = 0; i < iod->nents; i++) 1078 put_page(sg_page(&iod->sg[i])); 1079} 1080 1081static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) 1082{ 1083 struct nvme_dev *dev = ns->dev; 1084 struct nvme_queue *nvmeq; 1085 struct nvme_user_io io; 1086 struct nvme_command c; 1087 unsigned length; 1088 int status; 1089 struct nvme_iod *iod; 1090 1091 if (copy_from_user(&io, uio, sizeof(io))) 1092 return -EFAULT; 1093 length = (io.nblocks + 1) << ns->lba_shift; 1094 1095 switch (io.opcode) { 1096 case nvme_cmd_write: 1097 case nvme_cmd_read: 1098 case nvme_cmd_compare: 1099 iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length); 1100 break; 1101 default: 1102 return -EINVAL; 1103 } 1104 1105 if (IS_ERR(iod)) 1106 return PTR_ERR(iod); 1107 1108 memset(&c, 0, sizeof(c)); 1109 c.rw.opcode = io.opcode; 1110 c.rw.flags = io.flags; 1111 c.rw.nsid = cpu_to_le32(ns->ns_id); 1112 c.rw.slba = cpu_to_le64(io.slba); 1113 c.rw.length = cpu_to_le16(io.nblocks); 1114 c.rw.control = cpu_to_le16(io.control); 1115 c.rw.dsmgmt = cpu_to_le16(io.dsmgmt); 1116 c.rw.reftag = io.reftag; 1117 c.rw.apptag = io.apptag; 1118 c.rw.appmask = io.appmask; 1119 /* XXX: metadata */ 1120 length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL); 1121 1122 nvmeq = get_nvmeq(dev); 1123 /* 1124 * Since nvme_submit_sync_cmd sleeps, we can't keep preemption 1125 * disabled. We may be preempted at any point, and be rescheduled 1126 * to a different CPU. That will cause cacheline bouncing, but no 1127 * additional races since q_lock already protects against other CPUs. 1128 */ 1129 put_nvmeq(nvmeq); 1130 if (length != (io.nblocks + 1) << ns->lba_shift) 1131 status = -ENOMEM; 1132 else 1133 status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT); 1134 1135 nvme_unmap_user_pages(dev, io.opcode & 1, iod); 1136 nvme_free_iod(dev, iod); 1137 return status; 1138} 1139 1140static int nvme_user_admin_cmd(struct nvme_ns *ns, 1141 struct nvme_admin_cmd __user *ucmd) 1142{ 1143 struct nvme_dev *dev = ns->dev; 1144 struct nvme_admin_cmd cmd; 1145 struct nvme_command c; 1146 int status, length; 1147 struct nvme_iod *iod; 1148 1149 if (!capable(CAP_SYS_ADMIN)) 1150 return -EACCES; 1151 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) 1152 return -EFAULT; 1153 1154 memset(&c, 0, sizeof(c)); 1155 c.common.opcode = cmd.opcode; 1156 c.common.flags = cmd.flags; 1157 c.common.nsid = cpu_to_le32(cmd.nsid); 1158 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); 1159 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); 1160 c.common.cdw10[0] = cpu_to_le32(cmd.cdw10); 1161 c.common.cdw10[1] = cpu_to_le32(cmd.cdw11); 1162 c.common.cdw10[2] = cpu_to_le32(cmd.cdw12); 1163 c.common.cdw10[3] = cpu_to_le32(cmd.cdw13); 1164 c.common.cdw10[4] = cpu_to_le32(cmd.cdw14); 1165 c.common.cdw10[5] = cpu_to_le32(cmd.cdw15); 1166 1167 length = cmd.data_len; 1168 if (cmd.data_len) { 1169 iod = nvme_map_user_pages(dev, cmd.opcode & 1, cmd.addr, 1170 length); 1171 if (IS_ERR(iod)) 1172 return PTR_ERR(iod); 1173 length = nvme_setup_prps(dev, &c.common, iod, length, 1174 GFP_KERNEL); 1175 } 1176 1177 if (length != cmd.data_len) 1178 status = -ENOMEM; 1179 else 1180 status = nvme_submit_admin_cmd(dev, &c, NULL); 1181 1182 if (cmd.data_len) { 1183 nvme_unmap_user_pages(dev, cmd.opcode & 1, iod); 1184 nvme_free_iod(dev, iod); 1185 } 1186 return status; 1187} 1188 1189static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, 1190 unsigned long arg) 1191{ 1192 struct nvme_ns *ns = bdev->bd_disk->private_data; 1193 1194 switch (cmd) { 1195 case NVME_IOCTL_ID: 1196 return ns->ns_id; 1197 case NVME_IOCTL_ADMIN_CMD: 1198 return nvme_user_admin_cmd(ns, (void __user *)arg); 1199 case NVME_IOCTL_SUBMIT_IO: 1200 return nvme_submit_io(ns, (void __user *)arg); 1201 default: 1202 return -ENOTTY; 1203 } 1204} 1205 1206static const struct block_device_operations nvme_fops = { 1207 .owner = THIS_MODULE, 1208 .ioctl = nvme_ioctl, 1209 .compat_ioctl = nvme_ioctl, 1210}; 1211 1212static void nvme_timeout_ios(struct nvme_queue *nvmeq) 1213{ 1214 int depth = nvmeq->q_depth - 1; 1215 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 1216 unsigned long now = jiffies; 1217 int cmdid; 1218 1219 for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) { 1220 void *ctx; 1221 nvme_completion_fn fn; 1222 static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, }; 1223 1224 if (!time_after(now, info[cmdid].timeout)) 1225 continue; 1226 dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid); 1227 ctx = cancel_cmdid(nvmeq, cmdid, &fn); 1228 fn(nvmeq->dev, ctx, &cqe); 1229 } 1230} 1231 1232static void nvme_resubmit_bios(struct nvme_queue *nvmeq) 1233{ 1234 while (bio_list_peek(&nvmeq->sq_cong)) { 1235 struct bio *bio = bio_list_pop(&nvmeq->sq_cong); 1236 struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data; 1237 if (nvme_submit_bio_queue(nvmeq, ns, bio)) { 1238 bio_list_add_head(&nvmeq->sq_cong, bio); 1239 break; 1240 } 1241 if (bio_list_empty(&nvmeq->sq_cong)) 1242 remove_wait_queue(&nvmeq->sq_full, 1243 &nvmeq->sq_cong_wait); 1244 } 1245} 1246 1247static int nvme_kthread(void *data) 1248{ 1249 struct nvme_dev *dev; 1250 1251 while (!kthread_should_stop()) { 1252 __set_current_state(TASK_RUNNING); 1253 spin_lock(&dev_list_lock); 1254 list_for_each_entry(dev, &dev_list, node) { 1255 int i; 1256 for (i = 0; i < dev->queue_count; i++) { 1257 struct nvme_queue *nvmeq = dev->queues[i]; 1258 if (!nvmeq) 1259 continue; 1260 spin_lock_irq(&nvmeq->q_lock); 1261 if (nvme_process_cq(nvmeq)) 1262 printk("process_cq did something\n"); 1263 nvme_timeout_ios(nvmeq); 1264 nvme_resubmit_bios(nvmeq); 1265 spin_unlock_irq(&nvmeq->q_lock); 1266 } 1267 } 1268 spin_unlock(&dev_list_lock); 1269 set_current_state(TASK_INTERRUPTIBLE); 1270 schedule_timeout(HZ); 1271 } 1272 return 0; 1273} 1274 1275static DEFINE_IDA(nvme_index_ida); 1276 1277static int nvme_get_ns_idx(void) 1278{ 1279 int index, error; 1280 1281 do { 1282 if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL)) 1283 return -1; 1284 1285 spin_lock(&dev_list_lock); 1286 error = ida_get_new(&nvme_index_ida, &index); 1287 spin_unlock(&dev_list_lock); 1288 } while (error == -EAGAIN); 1289 1290 if (error) 1291 index = -1; 1292 return index; 1293} 1294 1295static void nvme_put_ns_idx(int index) 1296{ 1297 spin_lock(&dev_list_lock); 1298 ida_remove(&nvme_index_ida, index); 1299 spin_unlock(&dev_list_lock); 1300} 1301 1302static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid, 1303 struct nvme_id_ns *id, struct nvme_lba_range_type *rt) 1304{ 1305 struct nvme_ns *ns; 1306 struct gendisk *disk; 1307 int lbaf; 1308 1309 if (rt->attributes & NVME_LBART_ATTRIB_HIDE) 1310 return NULL; 1311 1312 ns = kzalloc(sizeof(*ns), GFP_KERNEL); 1313 if (!ns) 1314 return NULL; 1315 ns->queue = blk_alloc_queue(GFP_KERNEL); 1316 if (!ns->queue) 1317 goto out_free_ns; 1318 ns->queue->queue_flags = QUEUE_FLAG_DEFAULT; 1319 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue); 1320 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); 1321/* queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); */ 1322 blk_queue_make_request(ns->queue, nvme_make_request); 1323 ns->dev = dev; 1324 ns->queue->queuedata = ns; 1325 1326 disk = alloc_disk(NVME_MINORS); 1327 if (!disk) 1328 goto out_free_queue; 1329 ns->ns_id = nsid; 1330 ns->disk = disk; 1331 lbaf = id->flbas & 0xf; 1332 ns->lba_shift = id->lbaf[lbaf].ds; 1333 1334 disk->major = nvme_major; 1335 disk->minors = NVME_MINORS; 1336 disk->first_minor = NVME_MINORS * nvme_get_ns_idx(); 1337 disk->fops = &nvme_fops; 1338 disk->private_data = ns; 1339 disk->queue = ns->queue; 1340 disk->driverfs_dev = &dev->pci_dev->dev; 1341 sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid); 1342 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); 1343 1344 return ns; 1345 1346 out_free_queue: 1347 blk_cleanup_queue(ns->queue); 1348 out_free_ns: 1349 kfree(ns); 1350 return NULL; 1351} 1352 1353static void nvme_ns_free(struct nvme_ns *ns) 1354{ 1355 int index = ns->disk->first_minor / NVME_MINORS; 1356 put_disk(ns->disk); 1357 nvme_put_ns_idx(index); 1358 blk_cleanup_queue(ns->queue); 1359 kfree(ns); 1360} 1361 1362static int set_queue_count(struct nvme_dev *dev, int count) 1363{ 1364 int status; 1365 u32 result; 1366 u32 q_count = (count - 1) | ((count - 1) << 16); 1367 1368 status = nvme_get_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0, 1369 &result); 1370 if (status) 1371 return -EIO; 1372 return min(result & 0xffff, result >> 16) + 1; 1373} 1374 1375static int __devinit nvme_setup_io_queues(struct nvme_dev *dev) 1376{ 1377 int result, cpu, i, nr_io_queues, db_bar_size; 1378 1379 nr_io_queues = num_online_cpus(); 1380 result = set_queue_count(dev, nr_io_queues); 1381 if (result < 0) 1382 return result; 1383 if (result < nr_io_queues) 1384 nr_io_queues = result; 1385 1386 /* Deregister the admin queue's interrupt */ 1387 free_irq(dev->entry[0].vector, dev->queues[0]); 1388 1389 db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3)); 1390 if (db_bar_size > 8192) { 1391 iounmap(dev->bar); 1392 dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0), 1393 db_bar_size); 1394 dev->dbs = ((void __iomem *)dev->bar) + 4096; 1395 dev->queues[0]->q_db = dev->dbs; 1396 } 1397 1398 for (i = 0; i < nr_io_queues; i++) 1399 dev->entry[i].entry = i; 1400 for (;;) { 1401 result = pci_enable_msix(dev->pci_dev, dev->entry, 1402 nr_io_queues); 1403 if (result == 0) { 1404 break; 1405 } else if (result > 0) { 1406 nr_io_queues = result; 1407 continue; 1408 } else { 1409 nr_io_queues = 1; 1410 break; 1411 } 1412 } 1413 1414 result = queue_request_irq(dev, dev->queues[0], "nvme admin"); 1415 /* XXX: handle failure here */ 1416 1417 cpu = cpumask_first(cpu_online_mask); 1418 for (i = 0; i < nr_io_queues; i++) { 1419 irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu)); 1420 cpu = cpumask_next(cpu, cpu_online_mask); 1421 } 1422 1423 for (i = 0; i < nr_io_queues; i++) { 1424 dev->queues[i + 1] = nvme_create_queue(dev, i + 1, 1425 NVME_Q_DEPTH, i); 1426 if (IS_ERR(dev->queues[i + 1])) 1427 return PTR_ERR(dev->queues[i + 1]); 1428 dev->queue_count++; 1429 } 1430 1431 for (; i < num_possible_cpus(); i++) { 1432 int target = i % rounddown_pow_of_two(dev->queue_count - 1); 1433 dev->queues[i + 1] = dev->queues[target + 1]; 1434 } 1435 1436 return 0; 1437} 1438 1439static void nvme_free_queues(struct nvme_dev *dev) 1440{ 1441 int i; 1442 1443 for (i = dev->queue_count - 1; i >= 0; i--) 1444 nvme_free_queue(dev, i); 1445} 1446 1447static int __devinit nvme_dev_add(struct nvme_dev *dev) 1448{ 1449 int res, nn, i; 1450 struct nvme_ns *ns, *next; 1451 struct nvme_id_ctrl *ctrl; 1452 struct nvme_id_ns *id_ns; 1453 void *mem; 1454 dma_addr_t dma_addr; 1455 1456 res = nvme_setup_io_queues(dev); 1457 if (res) 1458 return res; 1459 1460 mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr, 1461 GFP_KERNEL); 1462 1463 res = nvme_identify(dev, 0, 1, dma_addr); 1464 if (res) { 1465 res = -EIO; 1466 goto out_free; 1467 } 1468 1469 ctrl = mem; 1470 nn = le32_to_cpup(&ctrl->nn); 1471 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); 1472 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); 1473 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); 1474 1475 id_ns = mem; 1476 for (i = 1; i <= nn; i++) { 1477 res = nvme_identify(dev, i, 0, dma_addr); 1478 if (res) 1479 continue; 1480 1481 if (id_ns->ncap == 0) 1482 continue; 1483 1484 res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i, 1485 dma_addr + 4096, NULL); 1486 if (res) 1487 continue; 1488 1489 ns = nvme_alloc_ns(dev, i, mem, mem + 4096); 1490 if (ns) 1491 list_add_tail(&ns->list, &dev->namespaces); 1492 } 1493 list_for_each_entry(ns, &dev->namespaces, list) 1494 add_disk(ns->disk); 1495 1496 goto out; 1497 1498 out_free: 1499 list_for_each_entry_safe(ns, next, &dev->namespaces, list) { 1500 list_del(&ns->list); 1501 nvme_ns_free(ns); 1502 } 1503 1504 out: 1505 dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr); 1506 return res; 1507} 1508 1509static int nvme_dev_remove(struct nvme_dev *dev) 1510{ 1511 struct nvme_ns *ns, *next; 1512 1513 spin_lock(&dev_list_lock); 1514 list_del(&dev->node); 1515 spin_unlock(&dev_list_lock); 1516 1517 /* TODO: wait all I/O finished or cancel them */ 1518 1519 list_for_each_entry_safe(ns, next, &dev->namespaces, list) { 1520 list_del(&ns->list); 1521 del_gendisk(ns->disk); 1522 nvme_ns_free(ns); 1523 } 1524 1525 nvme_free_queues(dev); 1526 1527 return 0; 1528} 1529 1530static int nvme_setup_prp_pools(struct nvme_dev *dev) 1531{ 1532 struct device *dmadev = &dev->pci_dev->dev; 1533 dev->prp_page_pool = dma_pool_create("prp list page", dmadev, 1534 PAGE_SIZE, PAGE_SIZE, 0); 1535 if (!dev->prp_page_pool) 1536 return -ENOMEM; 1537 1538 /* Optimisation for I/Os between 4k and 128k */ 1539 dev->prp_small_pool = dma_pool_create("prp list 256", dmadev, 1540 256, 256, 0); 1541 if (!dev->prp_small_pool) { 1542 dma_pool_destroy(dev->prp_page_pool); 1543 return -ENOMEM; 1544 } 1545 return 0; 1546} 1547 1548static void nvme_release_prp_pools(struct nvme_dev *dev) 1549{ 1550 dma_pool_destroy(dev->prp_page_pool); 1551 dma_pool_destroy(dev->prp_small_pool); 1552} 1553 1554/* XXX: Use an ida or something to let remove / add work correctly */ 1555static void nvme_set_instance(struct nvme_dev *dev) 1556{ 1557 static int instance; 1558 dev->instance = instance++; 1559} 1560 1561static void nvme_release_instance(struct nvme_dev *dev) 1562{ 1563} 1564 1565static int __devinit nvme_probe(struct pci_dev *pdev, 1566 const struct pci_device_id *id) 1567{ 1568 int bars, result = -ENOMEM; 1569 struct nvme_dev *dev; 1570 1571 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1572 if (!dev) 1573 return -ENOMEM; 1574 dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry), 1575 GFP_KERNEL); 1576 if (!dev->entry) 1577 goto free; 1578 dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *), 1579 GFP_KERNEL); 1580 if (!dev->queues) 1581 goto free; 1582 1583 if (pci_enable_device_mem(pdev)) 1584 goto free; 1585 pci_set_master(pdev); 1586 bars = pci_select_bars(pdev, IORESOURCE_MEM); 1587 if (pci_request_selected_regions(pdev, bars, "nvme")) 1588 goto disable; 1589 1590 INIT_LIST_HEAD(&dev->namespaces); 1591 dev->pci_dev = pdev; 1592 pci_set_drvdata(pdev, dev); 1593 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 1594 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 1595 nvme_set_instance(dev); 1596 dev->entry[0].vector = pdev->irq; 1597 1598 result = nvme_setup_prp_pools(dev); 1599 if (result) 1600 goto disable_msix; 1601 1602 dev->bar = ioremap(pci_resource_start(pdev, 0), 8192); 1603 if (!dev->bar) { 1604 result = -ENOMEM; 1605 goto disable_msix; 1606 } 1607 1608 result = nvme_configure_admin_queue(dev); 1609 if (result) 1610 goto unmap; 1611 dev->queue_count++; 1612 1613 spin_lock(&dev_list_lock); 1614 list_add(&dev->node, &dev_list); 1615 spin_unlock(&dev_list_lock); 1616 1617 result = nvme_dev_add(dev); 1618 if (result) 1619 goto delete; 1620 1621 return 0; 1622 1623 delete: 1624 spin_lock(&dev_list_lock); 1625 list_del(&dev->node); 1626 spin_unlock(&dev_list_lock); 1627 1628 nvme_free_queues(dev); 1629 unmap: 1630 iounmap(dev->bar); 1631 disable_msix: 1632 pci_disable_msix(pdev); 1633 nvme_release_instance(dev); 1634 nvme_release_prp_pools(dev); 1635 disable: 1636 pci_disable_device(pdev); 1637 pci_release_regions(pdev); 1638 free: 1639 kfree(dev->queues); 1640 kfree(dev->entry); 1641 kfree(dev); 1642 return result; 1643} 1644 1645static void __devexit nvme_remove(struct pci_dev *pdev) 1646{ 1647 struct nvme_dev *dev = pci_get_drvdata(pdev); 1648 nvme_dev_remove(dev); 1649 pci_disable_msix(pdev); 1650 iounmap(dev->bar); 1651 nvme_release_instance(dev); 1652 nvme_release_prp_pools(dev); 1653 pci_disable_device(pdev); 1654 pci_release_regions(pdev); 1655 kfree(dev->queues); 1656 kfree(dev->entry); 1657 kfree(dev); 1658} 1659 1660/* These functions are yet to be implemented */ 1661#define nvme_error_detected NULL 1662#define nvme_dump_registers NULL 1663#define nvme_link_reset NULL 1664#define nvme_slot_reset NULL 1665#define nvme_error_resume NULL 1666#define nvme_suspend NULL 1667#define nvme_resume NULL 1668 1669static struct pci_error_handlers nvme_err_handler = { 1670 .error_detected = nvme_error_detected, 1671 .mmio_enabled = nvme_dump_registers, 1672 .link_reset = nvme_link_reset, 1673 .slot_reset = nvme_slot_reset, 1674 .resume = nvme_error_resume, 1675}; 1676 1677/* Move to pci_ids.h later */ 1678#define PCI_CLASS_STORAGE_EXPRESS 0x010802 1679 1680static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = { 1681 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 1682 { 0, } 1683}; 1684MODULE_DEVICE_TABLE(pci, nvme_id_table); 1685 1686static struct pci_driver nvme_driver = { 1687 .name = "nvme", 1688 .id_table = nvme_id_table, 1689 .probe = nvme_probe, 1690 .remove = __devexit_p(nvme_remove), 1691 .suspend = nvme_suspend, 1692 .resume = nvme_resume, 1693 .err_handler = &nvme_err_handler, 1694}; 1695 1696static int __init nvme_init(void) 1697{ 1698 int result = -EBUSY; 1699 1700 nvme_thread = kthread_run(nvme_kthread, NULL, "nvme"); 1701 if (IS_ERR(nvme_thread)) 1702 return PTR_ERR(nvme_thread); 1703 1704 nvme_major = register_blkdev(nvme_major, "nvme"); 1705 if (nvme_major <= 0) 1706 goto kill_kthread; 1707 1708 result = pci_register_driver(&nvme_driver); 1709 if (result) 1710 goto unregister_blkdev; 1711 return 0; 1712 1713 unregister_blkdev: 1714 unregister_blkdev(nvme_major, "nvme"); 1715 kill_kthread: 1716 kthread_stop(nvme_thread); 1717 return result; 1718} 1719 1720static void __exit nvme_exit(void) 1721{ 1722 pci_unregister_driver(&nvme_driver); 1723 unregister_blkdev(nvme_major, "nvme"); 1724 kthread_stop(nvme_thread); 1725} 1726 1727MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); 1728MODULE_LICENSE("GPL"); 1729MODULE_VERSION("0.7"); 1730module_init(nvme_init); 1731module_exit(nvme_exit); 1732