nvme-core.c revision 6198221fa0df0298513b35796f63f242ea97134e
1/* 2 * NVM Express device driver 3 * Copyright (c) 2011, Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 */ 18 19#include <linux/nvme.h> 20#include <linux/bio.h> 21#include <linux/bitops.h> 22#include <linux/blkdev.h> 23#include <linux/delay.h> 24#include <linux/errno.h> 25#include <linux/fs.h> 26#include <linux/genhd.h> 27#include <linux/idr.h> 28#include <linux/init.h> 29#include <linux/interrupt.h> 30#include <linux/io.h> 31#include <linux/kdev_t.h> 32#include <linux/kthread.h> 33#include <linux/kernel.h> 34#include <linux/mm.h> 35#include <linux/module.h> 36#include <linux/moduleparam.h> 37#include <linux/pci.h> 38#include <linux/poison.h> 39#include <linux/sched.h> 40#include <linux/slab.h> 41#include <linux/types.h> 42#include <scsi/sg.h> 43#include <asm-generic/io-64-nonatomic-lo-hi.h> 44 45#define NVME_Q_DEPTH 1024 46#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) 47#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) 48#define NVME_MINORS 64 49#define ADMIN_TIMEOUT (60 * HZ) 50 51static int nvme_major; 52module_param(nvme_major, int, 0); 53 54static int use_threaded_interrupts; 55module_param(use_threaded_interrupts, int, 0); 56 57static DEFINE_SPINLOCK(dev_list_lock); 58static LIST_HEAD(dev_list); 59static struct task_struct *nvme_thread; 60 61/* 62 * An NVM Express queue. Each device has at least two (one for admin 63 * commands and one for I/O commands). 64 */ 65struct nvme_queue { 66 struct device *q_dmadev; 67 struct nvme_dev *dev; 68 spinlock_t q_lock; 69 struct nvme_command *sq_cmds; 70 volatile struct nvme_completion *cqes; 71 dma_addr_t sq_dma_addr; 72 dma_addr_t cq_dma_addr; 73 wait_queue_head_t sq_full; 74 wait_queue_t sq_cong_wait; 75 struct bio_list sq_cong; 76 u32 __iomem *q_db; 77 u16 q_depth; 78 u16 cq_vector; 79 u16 sq_head; 80 u16 sq_tail; 81 u16 cq_head; 82 u16 cq_phase; 83 unsigned long cmdid_data[]; 84}; 85 86/* 87 * Check we didin't inadvertently grow the command struct 88 */ 89static inline void _nvme_check_size(void) 90{ 91 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); 92 BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64); 93 BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); 94 BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); 95 BUILD_BUG_ON(sizeof(struct nvme_features) != 64); 96 BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64); 97 BUILD_BUG_ON(sizeof(struct nvme_command) != 64); 98 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096); 99 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096); 100 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); 101 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); 102} 103 104typedef void (*nvme_completion_fn)(struct nvme_dev *, void *, 105 struct nvme_completion *); 106 107struct nvme_cmd_info { 108 nvme_completion_fn fn; 109 void *ctx; 110 unsigned long timeout; 111}; 112 113static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq) 114{ 115 return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)]; 116} 117 118/** 119 * alloc_cmdid() - Allocate a Command ID 120 * @nvmeq: The queue that will be used for this command 121 * @ctx: A pointer that will be passed to the handler 122 * @handler: The function to call on completion 123 * 124 * Allocate a Command ID for a queue. The data passed in will 125 * be passed to the completion handler. This is implemented by using 126 * the bottom two bits of the ctx pointer to store the handler ID. 127 * Passing in a pointer that's not 4-byte aligned will cause a BUG. 128 * We can change this if it becomes a problem. 129 * 130 * May be called with local interrupts disabled and the q_lock held, 131 * or with interrupts enabled and no locks held. 132 */ 133static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx, 134 nvme_completion_fn handler, unsigned timeout) 135{ 136 int depth = nvmeq->q_depth - 1; 137 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 138 int cmdid; 139 140 do { 141 cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth); 142 if (cmdid >= depth) 143 return -EBUSY; 144 } while (test_and_set_bit(cmdid, nvmeq->cmdid_data)); 145 146 info[cmdid].fn = handler; 147 info[cmdid].ctx = ctx; 148 info[cmdid].timeout = jiffies + timeout; 149 return cmdid; 150} 151 152static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx, 153 nvme_completion_fn handler, unsigned timeout) 154{ 155 int cmdid; 156 wait_event_killable(nvmeq->sq_full, 157 (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0); 158 return (cmdid < 0) ? -EINTR : cmdid; 159} 160 161/* Special values must be less than 0x1000 */ 162#define CMD_CTX_BASE ((void *)POISON_POINTER_DELTA) 163#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE) 164#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE) 165#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE) 166#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE) 167 168static void special_completion(struct nvme_dev *dev, void *ctx, 169 struct nvme_completion *cqe) 170{ 171 if (ctx == CMD_CTX_CANCELLED) 172 return; 173 if (ctx == CMD_CTX_FLUSH) 174 return; 175 if (ctx == CMD_CTX_COMPLETED) { 176 dev_warn(&dev->pci_dev->dev, 177 "completed id %d twice on queue %d\n", 178 cqe->command_id, le16_to_cpup(&cqe->sq_id)); 179 return; 180 } 181 if (ctx == CMD_CTX_INVALID) { 182 dev_warn(&dev->pci_dev->dev, 183 "invalid id %d completed on queue %d\n", 184 cqe->command_id, le16_to_cpup(&cqe->sq_id)); 185 return; 186 } 187 188 dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx); 189} 190 191/* 192 * Called with local interrupts disabled and the q_lock held. May not sleep. 193 */ 194static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid, 195 nvme_completion_fn *fn) 196{ 197 void *ctx; 198 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 199 200 if (cmdid >= nvmeq->q_depth) { 201 *fn = special_completion; 202 return CMD_CTX_INVALID; 203 } 204 if (fn) 205 *fn = info[cmdid].fn; 206 ctx = info[cmdid].ctx; 207 info[cmdid].fn = special_completion; 208 info[cmdid].ctx = CMD_CTX_COMPLETED; 209 clear_bit(cmdid, nvmeq->cmdid_data); 210 wake_up(&nvmeq->sq_full); 211 return ctx; 212} 213 214static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid, 215 nvme_completion_fn *fn) 216{ 217 void *ctx; 218 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 219 if (fn) 220 *fn = info[cmdid].fn; 221 ctx = info[cmdid].ctx; 222 info[cmdid].fn = special_completion; 223 info[cmdid].ctx = CMD_CTX_CANCELLED; 224 return ctx; 225} 226 227struct nvme_queue *get_nvmeq(struct nvme_dev *dev) 228{ 229 return dev->queues[get_cpu() + 1]; 230} 231 232void put_nvmeq(struct nvme_queue *nvmeq) 233{ 234 put_cpu(); 235} 236 237/** 238 * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell 239 * @nvmeq: The queue to use 240 * @cmd: The command to send 241 * 242 * Safe to use from interrupt context 243 */ 244static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd) 245{ 246 unsigned long flags; 247 u16 tail; 248 spin_lock_irqsave(&nvmeq->q_lock, flags); 249 tail = nvmeq->sq_tail; 250 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd)); 251 if (++tail == nvmeq->q_depth) 252 tail = 0; 253 writel(tail, nvmeq->q_db); 254 nvmeq->sq_tail = tail; 255 spin_unlock_irqrestore(&nvmeq->q_lock, flags); 256 257 return 0; 258} 259 260static __le64 **iod_list(struct nvme_iod *iod) 261{ 262 return ((void *)iod) + iod->offset; 263} 264 265/* 266 * Will slightly overestimate the number of pages needed. This is OK 267 * as it only leads to a small amount of wasted memory for the lifetime of 268 * the I/O. 269 */ 270static int nvme_npages(unsigned size) 271{ 272 unsigned nprps = DIV_ROUND_UP(size + PAGE_SIZE, PAGE_SIZE); 273 return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8); 274} 275 276static struct nvme_iod * 277nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp) 278{ 279 struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) + 280 sizeof(__le64 *) * nvme_npages(nbytes) + 281 sizeof(struct scatterlist) * nseg, gfp); 282 283 if (iod) { 284 iod->offset = offsetof(struct nvme_iod, sg[nseg]); 285 iod->npages = -1; 286 iod->length = nbytes; 287 iod->nents = 0; 288 iod->start_time = jiffies; 289 } 290 291 return iod; 292} 293 294void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod) 295{ 296 const int last_prp = PAGE_SIZE / 8 - 1; 297 int i; 298 __le64 **list = iod_list(iod); 299 dma_addr_t prp_dma = iod->first_dma; 300 301 if (iod->npages == 0) 302 dma_pool_free(dev->prp_small_pool, list[0], prp_dma); 303 for (i = 0; i < iod->npages; i++) { 304 __le64 *prp_list = list[i]; 305 dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]); 306 dma_pool_free(dev->prp_page_pool, prp_list, prp_dma); 307 prp_dma = next_prp_dma; 308 } 309 kfree(iod); 310} 311 312static void nvme_start_io_acct(struct bio *bio) 313{ 314 struct gendisk *disk = bio->bi_bdev->bd_disk; 315 const int rw = bio_data_dir(bio); 316 int cpu = part_stat_lock(); 317 part_round_stats(cpu, &disk->part0); 318 part_stat_inc(cpu, &disk->part0, ios[rw]); 319 part_stat_add(cpu, &disk->part0, sectors[rw], bio_sectors(bio)); 320 part_inc_in_flight(&disk->part0, rw); 321 part_stat_unlock(); 322} 323 324static void nvme_end_io_acct(struct bio *bio, unsigned long start_time) 325{ 326 struct gendisk *disk = bio->bi_bdev->bd_disk; 327 const int rw = bio_data_dir(bio); 328 unsigned long duration = jiffies - start_time; 329 int cpu = part_stat_lock(); 330 part_stat_add(cpu, &disk->part0, ticks[rw], duration); 331 part_round_stats(cpu, &disk->part0); 332 part_dec_in_flight(&disk->part0, rw); 333 part_stat_unlock(); 334} 335 336static void bio_completion(struct nvme_dev *dev, void *ctx, 337 struct nvme_completion *cqe) 338{ 339 struct nvme_iod *iod = ctx; 340 struct bio *bio = iod->private; 341 u16 status = le16_to_cpup(&cqe->status) >> 1; 342 343 if (iod->nents) 344 dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, 345 bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 346 347 nvme_end_io_acct(bio, iod->start_time); 348 nvme_free_iod(dev, iod); 349 if (status) 350 bio_endio(bio, -EIO); 351 else 352 bio_endio(bio, 0); 353} 354 355/* length is in bytes. gfp flags indicates whether we may sleep. */ 356int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd, 357 struct nvme_iod *iod, int total_len, gfp_t gfp) 358{ 359 struct dma_pool *pool; 360 int length = total_len; 361 struct scatterlist *sg = iod->sg; 362 int dma_len = sg_dma_len(sg); 363 u64 dma_addr = sg_dma_address(sg); 364 int offset = offset_in_page(dma_addr); 365 __le64 *prp_list; 366 __le64 **list = iod_list(iod); 367 dma_addr_t prp_dma; 368 int nprps, i; 369 370 cmd->prp1 = cpu_to_le64(dma_addr); 371 length -= (PAGE_SIZE - offset); 372 if (length <= 0) 373 return total_len; 374 375 dma_len -= (PAGE_SIZE - offset); 376 if (dma_len) { 377 dma_addr += (PAGE_SIZE - offset); 378 } else { 379 sg = sg_next(sg); 380 dma_addr = sg_dma_address(sg); 381 dma_len = sg_dma_len(sg); 382 } 383 384 if (length <= PAGE_SIZE) { 385 cmd->prp2 = cpu_to_le64(dma_addr); 386 return total_len; 387 } 388 389 nprps = DIV_ROUND_UP(length, PAGE_SIZE); 390 if (nprps <= (256 / 8)) { 391 pool = dev->prp_small_pool; 392 iod->npages = 0; 393 } else { 394 pool = dev->prp_page_pool; 395 iod->npages = 1; 396 } 397 398 prp_list = dma_pool_alloc(pool, gfp, &prp_dma); 399 if (!prp_list) { 400 cmd->prp2 = cpu_to_le64(dma_addr); 401 iod->npages = -1; 402 return (total_len - length) + PAGE_SIZE; 403 } 404 list[0] = prp_list; 405 iod->first_dma = prp_dma; 406 cmd->prp2 = cpu_to_le64(prp_dma); 407 i = 0; 408 for (;;) { 409 if (i == PAGE_SIZE / 8) { 410 __le64 *old_prp_list = prp_list; 411 prp_list = dma_pool_alloc(pool, gfp, &prp_dma); 412 if (!prp_list) 413 return total_len - length; 414 list[iod->npages++] = prp_list; 415 prp_list[0] = old_prp_list[i - 1]; 416 old_prp_list[i - 1] = cpu_to_le64(prp_dma); 417 i = 1; 418 } 419 prp_list[i++] = cpu_to_le64(dma_addr); 420 dma_len -= PAGE_SIZE; 421 dma_addr += PAGE_SIZE; 422 length -= PAGE_SIZE; 423 if (length <= 0) 424 break; 425 if (dma_len > 0) 426 continue; 427 BUG_ON(dma_len < 0); 428 sg = sg_next(sg); 429 dma_addr = sg_dma_address(sg); 430 dma_len = sg_dma_len(sg); 431 } 432 433 return total_len; 434} 435 436struct nvme_bio_pair { 437 struct bio b1, b2, *parent; 438 struct bio_vec *bv1, *bv2; 439 int err; 440 atomic_t cnt; 441}; 442 443static void nvme_bio_pair_endio(struct bio *bio, int err) 444{ 445 struct nvme_bio_pair *bp = bio->bi_private; 446 447 if (err) 448 bp->err = err; 449 450 if (atomic_dec_and_test(&bp->cnt)) { 451 bio_endio(bp->parent, bp->err); 452 if (bp->bv1) 453 kfree(bp->bv1); 454 if (bp->bv2) 455 kfree(bp->bv2); 456 kfree(bp); 457 } 458} 459 460static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx, 461 int len, int offset) 462{ 463 struct nvme_bio_pair *bp; 464 465 BUG_ON(len > bio->bi_size); 466 BUG_ON(idx > bio->bi_vcnt); 467 468 bp = kmalloc(sizeof(*bp), GFP_ATOMIC); 469 if (!bp) 470 return NULL; 471 bp->err = 0; 472 473 bp->b1 = *bio; 474 bp->b2 = *bio; 475 476 bp->b1.bi_size = len; 477 bp->b2.bi_size -= len; 478 bp->b1.bi_vcnt = idx; 479 bp->b2.bi_idx = idx; 480 bp->b2.bi_sector += len >> 9; 481 482 if (offset) { 483 bp->bv1 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec), 484 GFP_ATOMIC); 485 if (!bp->bv1) 486 goto split_fail_1; 487 488 bp->bv2 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec), 489 GFP_ATOMIC); 490 if (!bp->bv2) 491 goto split_fail_2; 492 493 memcpy(bp->bv1, bio->bi_io_vec, 494 bio->bi_max_vecs * sizeof(struct bio_vec)); 495 memcpy(bp->bv2, bio->bi_io_vec, 496 bio->bi_max_vecs * sizeof(struct bio_vec)); 497 498 bp->b1.bi_io_vec = bp->bv1; 499 bp->b2.bi_io_vec = bp->bv2; 500 bp->b2.bi_io_vec[idx].bv_offset += offset; 501 bp->b2.bi_io_vec[idx].bv_len -= offset; 502 bp->b1.bi_io_vec[idx].bv_len = offset; 503 bp->b1.bi_vcnt++; 504 } else 505 bp->bv1 = bp->bv2 = NULL; 506 507 bp->b1.bi_private = bp; 508 bp->b2.bi_private = bp; 509 510 bp->b1.bi_end_io = nvme_bio_pair_endio; 511 bp->b2.bi_end_io = nvme_bio_pair_endio; 512 513 bp->parent = bio; 514 atomic_set(&bp->cnt, 2); 515 516 return bp; 517 518 split_fail_2: 519 kfree(bp->bv1); 520 split_fail_1: 521 kfree(bp); 522 return NULL; 523} 524 525static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq, 526 int idx, int len, int offset) 527{ 528 struct nvme_bio_pair *bp = nvme_bio_split(bio, idx, len, offset); 529 if (!bp) 530 return -ENOMEM; 531 532 if (bio_list_empty(&nvmeq->sq_cong)) 533 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); 534 bio_list_add(&nvmeq->sq_cong, &bp->b1); 535 bio_list_add(&nvmeq->sq_cong, &bp->b2); 536 537 return 0; 538} 539 540/* NVMe scatterlists require no holes in the virtual address */ 541#define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \ 542 (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE)) 543 544static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod, 545 struct bio *bio, enum dma_data_direction dma_dir, int psegs) 546{ 547 struct bio_vec *bvec, *bvprv = NULL; 548 struct scatterlist *sg = NULL; 549 int i, length = 0, nsegs = 0, split_len = bio->bi_size; 550 551 if (nvmeq->dev->stripe_size) 552 split_len = nvmeq->dev->stripe_size - 553 ((bio->bi_sector << 9) & (nvmeq->dev->stripe_size - 1)); 554 555 sg_init_table(iod->sg, psegs); 556 bio_for_each_segment(bvec, bio, i) { 557 if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) { 558 sg->length += bvec->bv_len; 559 } else { 560 if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec)) 561 return nvme_split_and_submit(bio, nvmeq, i, 562 length, 0); 563 564 sg = sg ? sg + 1 : iod->sg; 565 sg_set_page(sg, bvec->bv_page, bvec->bv_len, 566 bvec->bv_offset); 567 nsegs++; 568 } 569 570 if (split_len - length < bvec->bv_len) 571 return nvme_split_and_submit(bio, nvmeq, i, split_len, 572 split_len - length); 573 length += bvec->bv_len; 574 bvprv = bvec; 575 } 576 iod->nents = nsegs; 577 sg_mark_end(sg); 578 if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0) 579 return -ENOMEM; 580 581 BUG_ON(length != bio->bi_size); 582 return length; 583} 584 585/* 586 * We reuse the small pool to allocate the 16-byte range here as it is not 587 * worth having a special pool for these or additional cases to handle freeing 588 * the iod. 589 */ 590static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns, 591 struct bio *bio, struct nvme_iod *iod, int cmdid) 592{ 593 struct nvme_dsm_range *range; 594 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; 595 596 range = dma_pool_alloc(nvmeq->dev->prp_small_pool, GFP_ATOMIC, 597 &iod->first_dma); 598 if (!range) 599 return -ENOMEM; 600 601 iod_list(iod)[0] = (__le64 *)range; 602 iod->npages = 0; 603 604 range->cattr = cpu_to_le32(0); 605 range->nlb = cpu_to_le32(bio->bi_size >> ns->lba_shift); 606 range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector)); 607 608 memset(cmnd, 0, sizeof(*cmnd)); 609 cmnd->dsm.opcode = nvme_cmd_dsm; 610 cmnd->dsm.command_id = cmdid; 611 cmnd->dsm.nsid = cpu_to_le32(ns->ns_id); 612 cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma); 613 cmnd->dsm.nr = 0; 614 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); 615 616 if (++nvmeq->sq_tail == nvmeq->q_depth) 617 nvmeq->sq_tail = 0; 618 writel(nvmeq->sq_tail, nvmeq->q_db); 619 620 return 0; 621} 622 623static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns, 624 int cmdid) 625{ 626 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; 627 628 memset(cmnd, 0, sizeof(*cmnd)); 629 cmnd->common.opcode = nvme_cmd_flush; 630 cmnd->common.command_id = cmdid; 631 cmnd->common.nsid = cpu_to_le32(ns->ns_id); 632 633 if (++nvmeq->sq_tail == nvmeq->q_depth) 634 nvmeq->sq_tail = 0; 635 writel(nvmeq->sq_tail, nvmeq->q_db); 636 637 return 0; 638} 639 640int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns) 641{ 642 int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH, 643 special_completion, NVME_IO_TIMEOUT); 644 if (unlikely(cmdid < 0)) 645 return cmdid; 646 647 return nvme_submit_flush(nvmeq, ns, cmdid); 648} 649 650/* 651 * Called with local interrupts disabled and the q_lock held. May not sleep. 652 */ 653static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, 654 struct bio *bio) 655{ 656 struct nvme_command *cmnd; 657 struct nvme_iod *iod; 658 enum dma_data_direction dma_dir; 659 int cmdid, length, result; 660 u16 control; 661 u32 dsmgmt; 662 int psegs = bio_phys_segments(ns->queue, bio); 663 664 if ((bio->bi_rw & REQ_FLUSH) && psegs) { 665 result = nvme_submit_flush_data(nvmeq, ns); 666 if (result) 667 return result; 668 } 669 670 result = -ENOMEM; 671 iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC); 672 if (!iod) 673 goto nomem; 674 iod->private = bio; 675 676 result = -EBUSY; 677 cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT); 678 if (unlikely(cmdid < 0)) 679 goto free_iod; 680 681 if (bio->bi_rw & REQ_DISCARD) { 682 result = nvme_submit_discard(nvmeq, ns, bio, iod, cmdid); 683 if (result) 684 goto free_cmdid; 685 return result; 686 } 687 if ((bio->bi_rw & REQ_FLUSH) && !psegs) 688 return nvme_submit_flush(nvmeq, ns, cmdid); 689 690 control = 0; 691 if (bio->bi_rw & REQ_FUA) 692 control |= NVME_RW_FUA; 693 if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD)) 694 control |= NVME_RW_LR; 695 696 dsmgmt = 0; 697 if (bio->bi_rw & REQ_RAHEAD) 698 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; 699 700 cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; 701 702 memset(cmnd, 0, sizeof(*cmnd)); 703 if (bio_data_dir(bio)) { 704 cmnd->rw.opcode = nvme_cmd_write; 705 dma_dir = DMA_TO_DEVICE; 706 } else { 707 cmnd->rw.opcode = nvme_cmd_read; 708 dma_dir = DMA_FROM_DEVICE; 709 } 710 711 result = nvme_map_bio(nvmeq, iod, bio, dma_dir, psegs); 712 if (result <= 0) 713 goto free_cmdid; 714 length = result; 715 716 cmnd->rw.command_id = cmdid; 717 cmnd->rw.nsid = cpu_to_le32(ns->ns_id); 718 length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length, 719 GFP_ATOMIC); 720 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector)); 721 cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1); 722 cmnd->rw.control = cpu_to_le16(control); 723 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 724 725 nvme_start_io_acct(bio); 726 if (++nvmeq->sq_tail == nvmeq->q_depth) 727 nvmeq->sq_tail = 0; 728 writel(nvmeq->sq_tail, nvmeq->q_db); 729 730 return 0; 731 732 free_cmdid: 733 free_cmdid(nvmeq, cmdid, NULL); 734 free_iod: 735 nvme_free_iod(nvmeq->dev, iod); 736 nomem: 737 return result; 738} 739 740static void nvme_make_request(struct request_queue *q, struct bio *bio) 741{ 742 struct nvme_ns *ns = q->queuedata; 743 struct nvme_queue *nvmeq = get_nvmeq(ns->dev); 744 int result = -EBUSY; 745 746 spin_lock_irq(&nvmeq->q_lock); 747 if (bio_list_empty(&nvmeq->sq_cong)) 748 result = nvme_submit_bio_queue(nvmeq, ns, bio); 749 if (unlikely(result)) { 750 if (bio_list_empty(&nvmeq->sq_cong)) 751 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); 752 bio_list_add(&nvmeq->sq_cong, bio); 753 } 754 755 spin_unlock_irq(&nvmeq->q_lock); 756 put_nvmeq(nvmeq); 757} 758 759static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq) 760{ 761 u16 head, phase; 762 763 head = nvmeq->cq_head; 764 phase = nvmeq->cq_phase; 765 766 for (;;) { 767 void *ctx; 768 nvme_completion_fn fn; 769 struct nvme_completion cqe = nvmeq->cqes[head]; 770 if ((le16_to_cpu(cqe.status) & 1) != phase) 771 break; 772 nvmeq->sq_head = le16_to_cpu(cqe.sq_head); 773 if (++head == nvmeq->q_depth) { 774 head = 0; 775 phase = !phase; 776 } 777 778 ctx = free_cmdid(nvmeq, cqe.command_id, &fn); 779 fn(nvmeq->dev, ctx, &cqe); 780 } 781 782 /* If the controller ignores the cq head doorbell and continuously 783 * writes to the queue, it is theoretically possible to wrap around 784 * the queue twice and mistakenly return IRQ_NONE. Linux only 785 * requires that 0.1% of your interrupts are handled, so this isn't 786 * a big problem. 787 */ 788 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) 789 return IRQ_NONE; 790 791 writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride)); 792 nvmeq->cq_head = head; 793 nvmeq->cq_phase = phase; 794 795 return IRQ_HANDLED; 796} 797 798static irqreturn_t nvme_irq(int irq, void *data) 799{ 800 irqreturn_t result; 801 struct nvme_queue *nvmeq = data; 802 spin_lock(&nvmeq->q_lock); 803 result = nvme_process_cq(nvmeq); 804 spin_unlock(&nvmeq->q_lock); 805 return result; 806} 807 808static irqreturn_t nvme_irq_check(int irq, void *data) 809{ 810 struct nvme_queue *nvmeq = data; 811 struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head]; 812 if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase) 813 return IRQ_NONE; 814 return IRQ_WAKE_THREAD; 815} 816 817static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid) 818{ 819 spin_lock_irq(&nvmeq->q_lock); 820 cancel_cmdid(nvmeq, cmdid, NULL); 821 spin_unlock_irq(&nvmeq->q_lock); 822} 823 824struct sync_cmd_info { 825 struct task_struct *task; 826 u32 result; 827 int status; 828}; 829 830static void sync_completion(struct nvme_dev *dev, void *ctx, 831 struct nvme_completion *cqe) 832{ 833 struct sync_cmd_info *cmdinfo = ctx; 834 cmdinfo->result = le32_to_cpup(&cqe->result); 835 cmdinfo->status = le16_to_cpup(&cqe->status) >> 1; 836 wake_up_process(cmdinfo->task); 837} 838 839/* 840 * Returns 0 on success. If the result is negative, it's a Linux error code; 841 * if the result is positive, it's an NVM Express status code 842 */ 843int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd, 844 u32 *result, unsigned timeout) 845{ 846 int cmdid; 847 struct sync_cmd_info cmdinfo; 848 849 cmdinfo.task = current; 850 cmdinfo.status = -EINTR; 851 852 cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion, 853 timeout); 854 if (cmdid < 0) 855 return cmdid; 856 cmd->common.command_id = cmdid; 857 858 set_current_state(TASK_KILLABLE); 859 nvme_submit_cmd(nvmeq, cmd); 860 schedule_timeout(timeout); 861 862 if (cmdinfo.status == -EINTR) { 863 nvme_abort_command(nvmeq, cmdid); 864 return -EINTR; 865 } 866 867 if (result) 868 *result = cmdinfo.result; 869 870 return cmdinfo.status; 871} 872 873int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd, 874 u32 *result) 875{ 876 return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT); 877} 878 879static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) 880{ 881 int status; 882 struct nvme_command c; 883 884 memset(&c, 0, sizeof(c)); 885 c.delete_queue.opcode = opcode; 886 c.delete_queue.qid = cpu_to_le16(id); 887 888 status = nvme_submit_admin_cmd(dev, &c, NULL); 889 if (status) 890 return -EIO; 891 return 0; 892} 893 894static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, 895 struct nvme_queue *nvmeq) 896{ 897 int status; 898 struct nvme_command c; 899 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED; 900 901 memset(&c, 0, sizeof(c)); 902 c.create_cq.opcode = nvme_admin_create_cq; 903 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); 904 c.create_cq.cqid = cpu_to_le16(qid); 905 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 906 c.create_cq.cq_flags = cpu_to_le16(flags); 907 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector); 908 909 status = nvme_submit_admin_cmd(dev, &c, NULL); 910 if (status) 911 return -EIO; 912 return 0; 913} 914 915static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, 916 struct nvme_queue *nvmeq) 917{ 918 int status; 919 struct nvme_command c; 920 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM; 921 922 memset(&c, 0, sizeof(c)); 923 c.create_sq.opcode = nvme_admin_create_sq; 924 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); 925 c.create_sq.sqid = cpu_to_le16(qid); 926 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 927 c.create_sq.sq_flags = cpu_to_le16(flags); 928 c.create_sq.cqid = cpu_to_le16(qid); 929 930 status = nvme_submit_admin_cmd(dev, &c, NULL); 931 if (status) 932 return -EIO; 933 return 0; 934} 935 936static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) 937{ 938 return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid); 939} 940 941static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) 942{ 943 return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); 944} 945 946int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns, 947 dma_addr_t dma_addr) 948{ 949 struct nvme_command c; 950 951 memset(&c, 0, sizeof(c)); 952 c.identify.opcode = nvme_admin_identify; 953 c.identify.nsid = cpu_to_le32(nsid); 954 c.identify.prp1 = cpu_to_le64(dma_addr); 955 c.identify.cns = cpu_to_le32(cns); 956 957 return nvme_submit_admin_cmd(dev, &c, NULL); 958} 959 960int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, 961 dma_addr_t dma_addr, u32 *result) 962{ 963 struct nvme_command c; 964 965 memset(&c, 0, sizeof(c)); 966 c.features.opcode = nvme_admin_get_features; 967 c.features.nsid = cpu_to_le32(nsid); 968 c.features.prp1 = cpu_to_le64(dma_addr); 969 c.features.fid = cpu_to_le32(fid); 970 971 return nvme_submit_admin_cmd(dev, &c, result); 972} 973 974int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11, 975 dma_addr_t dma_addr, u32 *result) 976{ 977 struct nvme_command c; 978 979 memset(&c, 0, sizeof(c)); 980 c.features.opcode = nvme_admin_set_features; 981 c.features.prp1 = cpu_to_le64(dma_addr); 982 c.features.fid = cpu_to_le32(fid); 983 c.features.dword11 = cpu_to_le32(dword11); 984 985 return nvme_submit_admin_cmd(dev, &c, result); 986} 987 988/** 989 * nvme_cancel_ios - Cancel outstanding I/Os 990 * @queue: The queue to cancel I/Os on 991 * @timeout: True to only cancel I/Os which have timed out 992 */ 993static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout) 994{ 995 int depth = nvmeq->q_depth - 1; 996 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 997 unsigned long now = jiffies; 998 int cmdid; 999 1000 for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) { 1001 void *ctx; 1002 nvme_completion_fn fn; 1003 static struct nvme_completion cqe = { 1004 .status = cpu_to_le16(NVME_SC_ABORT_REQ << 1), 1005 }; 1006 1007 if (timeout && !time_after(now, info[cmdid].timeout)) 1008 continue; 1009 if (info[cmdid].ctx == CMD_CTX_CANCELLED) 1010 continue; 1011 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid); 1012 ctx = cancel_cmdid(nvmeq, cmdid, &fn); 1013 fn(nvmeq->dev, ctx, &cqe); 1014 } 1015} 1016 1017static void nvme_free_queue_mem(struct nvme_queue *nvmeq) 1018{ 1019 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), 1020 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 1021 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), 1022 nvmeq->sq_cmds, nvmeq->sq_dma_addr); 1023 kfree(nvmeq); 1024} 1025 1026static void nvme_free_queue(struct nvme_dev *dev, int qid) 1027{ 1028 struct nvme_queue *nvmeq = dev->queues[qid]; 1029 int vector = dev->entry[nvmeq->cq_vector].vector; 1030 1031 spin_lock_irq(&nvmeq->q_lock); 1032 nvme_cancel_ios(nvmeq, false); 1033 while (bio_list_peek(&nvmeq->sq_cong)) { 1034 struct bio *bio = bio_list_pop(&nvmeq->sq_cong); 1035 bio_endio(bio, -EIO); 1036 } 1037 spin_unlock_irq(&nvmeq->q_lock); 1038 1039 irq_set_affinity_hint(vector, NULL); 1040 free_irq(vector, nvmeq); 1041 1042 /* Don't tell the adapter to delete the admin queue */ 1043 if (qid) { 1044 adapter_delete_sq(dev, qid); 1045 adapter_delete_cq(dev, qid); 1046 } 1047 1048 nvme_free_queue_mem(nvmeq); 1049} 1050 1051static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, 1052 int depth, int vector) 1053{ 1054 struct device *dmadev = &dev->pci_dev->dev; 1055 unsigned extra = DIV_ROUND_UP(depth, 8) + (depth * 1056 sizeof(struct nvme_cmd_info)); 1057 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL); 1058 if (!nvmeq) 1059 return NULL; 1060 1061 nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth), 1062 &nvmeq->cq_dma_addr, GFP_KERNEL); 1063 if (!nvmeq->cqes) 1064 goto free_nvmeq; 1065 memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth)); 1066 1067 nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth), 1068 &nvmeq->sq_dma_addr, GFP_KERNEL); 1069 if (!nvmeq->sq_cmds) 1070 goto free_cqdma; 1071 1072 nvmeq->q_dmadev = dmadev; 1073 nvmeq->dev = dev; 1074 spin_lock_init(&nvmeq->q_lock); 1075 nvmeq->cq_head = 0; 1076 nvmeq->cq_phase = 1; 1077 init_waitqueue_head(&nvmeq->sq_full); 1078 init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread); 1079 bio_list_init(&nvmeq->sq_cong); 1080 nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)]; 1081 nvmeq->q_depth = depth; 1082 nvmeq->cq_vector = vector; 1083 1084 return nvmeq; 1085 1086 free_cqdma: 1087 dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes, 1088 nvmeq->cq_dma_addr); 1089 free_nvmeq: 1090 kfree(nvmeq); 1091 return NULL; 1092} 1093 1094static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq, 1095 const char *name) 1096{ 1097 if (use_threaded_interrupts) 1098 return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector, 1099 nvme_irq_check, nvme_irq, 1100 IRQF_DISABLED | IRQF_SHARED, 1101 name, nvmeq); 1102 return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq, 1103 IRQF_DISABLED | IRQF_SHARED, name, nvmeq); 1104} 1105 1106static struct nvme_queue *nvme_create_queue(struct nvme_dev *dev, int qid, 1107 int cq_size, int vector) 1108{ 1109 int result; 1110 struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector); 1111 1112 if (!nvmeq) 1113 return ERR_PTR(-ENOMEM); 1114 1115 result = adapter_alloc_cq(dev, qid, nvmeq); 1116 if (result < 0) 1117 goto free_nvmeq; 1118 1119 result = adapter_alloc_sq(dev, qid, nvmeq); 1120 if (result < 0) 1121 goto release_cq; 1122 1123 result = queue_request_irq(dev, nvmeq, "nvme"); 1124 if (result < 0) 1125 goto release_sq; 1126 1127 return nvmeq; 1128 1129 release_sq: 1130 adapter_delete_sq(dev, qid); 1131 release_cq: 1132 adapter_delete_cq(dev, qid); 1133 free_nvmeq: 1134 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), 1135 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 1136 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), 1137 nvmeq->sq_cmds, nvmeq->sq_dma_addr); 1138 kfree(nvmeq); 1139 return ERR_PTR(result); 1140} 1141 1142static int nvme_wait_ready(struct nvme_dev *dev, u64 cap, bool enabled) 1143{ 1144 unsigned long timeout; 1145 u32 bit = enabled ? NVME_CSTS_RDY : 0; 1146 1147 timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; 1148 1149 while ((readl(&dev->bar->csts) & NVME_CSTS_RDY) != bit) { 1150 msleep(100); 1151 if (fatal_signal_pending(current)) 1152 return -EINTR; 1153 if (time_after(jiffies, timeout)) { 1154 dev_err(&dev->pci_dev->dev, 1155 "Device not ready; aborting initialisation\n"); 1156 return -ENODEV; 1157 } 1158 } 1159 1160 return 0; 1161} 1162 1163/* 1164 * If the device has been passed off to us in an enabled state, just clear 1165 * the enabled bit. The spec says we should set the 'shutdown notification 1166 * bits', but doing so may cause the device to complete commands to the 1167 * admin queue ... and we don't know what memory that might be pointing at! 1168 */ 1169static int nvme_disable_ctrl(struct nvme_dev *dev, u64 cap) 1170{ 1171 u32 cc = readl(&dev->bar->cc); 1172 1173 if (cc & NVME_CC_ENABLE) 1174 writel(cc & ~NVME_CC_ENABLE, &dev->bar->cc); 1175 return nvme_wait_ready(dev, cap, false); 1176} 1177 1178static int nvme_enable_ctrl(struct nvme_dev *dev, u64 cap) 1179{ 1180 return nvme_wait_ready(dev, cap, true); 1181} 1182 1183static int nvme_configure_admin_queue(struct nvme_dev *dev) 1184{ 1185 int result; 1186 u32 aqa; 1187 u64 cap = readq(&dev->bar->cap); 1188 struct nvme_queue *nvmeq; 1189 1190 dev->dbs = ((void __iomem *)dev->bar) + 4096; 1191 dev->db_stride = NVME_CAP_STRIDE(cap); 1192 1193 result = nvme_disable_ctrl(dev, cap); 1194 if (result < 0) 1195 return result; 1196 1197 nvmeq = nvme_alloc_queue(dev, 0, 64, 0); 1198 if (!nvmeq) 1199 return -ENOMEM; 1200 1201 aqa = nvmeq->q_depth - 1; 1202 aqa |= aqa << 16; 1203 1204 dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM; 1205 dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT; 1206 dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE; 1207 dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; 1208 1209 writel(aqa, &dev->bar->aqa); 1210 writeq(nvmeq->sq_dma_addr, &dev->bar->asq); 1211 writeq(nvmeq->cq_dma_addr, &dev->bar->acq); 1212 writel(dev->ctrl_config, &dev->bar->cc); 1213 1214 result = nvme_enable_ctrl(dev, cap); 1215 if (result) 1216 goto free_q; 1217 1218 result = queue_request_irq(dev, nvmeq, "nvme admin"); 1219 if (result) 1220 goto free_q; 1221 1222 dev->queues[0] = nvmeq; 1223 return result; 1224 1225 free_q: 1226 nvme_free_queue_mem(nvmeq); 1227 return result; 1228} 1229 1230struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, 1231 unsigned long addr, unsigned length) 1232{ 1233 int i, err, count, nents, offset; 1234 struct scatterlist *sg; 1235 struct page **pages; 1236 struct nvme_iod *iod; 1237 1238 if (addr & 3) 1239 return ERR_PTR(-EINVAL); 1240 if (!length || length > INT_MAX - PAGE_SIZE) 1241 return ERR_PTR(-EINVAL); 1242 1243 offset = offset_in_page(addr); 1244 count = DIV_ROUND_UP(offset + length, PAGE_SIZE); 1245 pages = kcalloc(count, sizeof(*pages), GFP_KERNEL); 1246 if (!pages) 1247 return ERR_PTR(-ENOMEM); 1248 1249 err = get_user_pages_fast(addr, count, 1, pages); 1250 if (err < count) { 1251 count = err; 1252 err = -EFAULT; 1253 goto put_pages; 1254 } 1255 1256 iod = nvme_alloc_iod(count, length, GFP_KERNEL); 1257 sg = iod->sg; 1258 sg_init_table(sg, count); 1259 for (i = 0; i < count; i++) { 1260 sg_set_page(&sg[i], pages[i], 1261 min_t(unsigned, length, PAGE_SIZE - offset), 1262 offset); 1263 length -= (PAGE_SIZE - offset); 1264 offset = 0; 1265 } 1266 sg_mark_end(&sg[i - 1]); 1267 iod->nents = count; 1268 1269 err = -ENOMEM; 1270 nents = dma_map_sg(&dev->pci_dev->dev, sg, count, 1271 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 1272 if (!nents) 1273 goto free_iod; 1274 1275 kfree(pages); 1276 return iod; 1277 1278 free_iod: 1279 kfree(iod); 1280 put_pages: 1281 for (i = 0; i < count; i++) 1282 put_page(pages[i]); 1283 kfree(pages); 1284 return ERR_PTR(err); 1285} 1286 1287void nvme_unmap_user_pages(struct nvme_dev *dev, int write, 1288 struct nvme_iod *iod) 1289{ 1290 int i; 1291 1292 dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, 1293 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 1294 1295 for (i = 0; i < iod->nents; i++) 1296 put_page(sg_page(&iod->sg[i])); 1297} 1298 1299static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) 1300{ 1301 struct nvme_dev *dev = ns->dev; 1302 struct nvme_queue *nvmeq; 1303 struct nvme_user_io io; 1304 struct nvme_command c; 1305 unsigned length, meta_len; 1306 int status, i; 1307 struct nvme_iod *iod, *meta_iod = NULL; 1308 dma_addr_t meta_dma_addr; 1309 void *meta, *uninitialized_var(meta_mem); 1310 1311 if (copy_from_user(&io, uio, sizeof(io))) 1312 return -EFAULT; 1313 length = (io.nblocks + 1) << ns->lba_shift; 1314 meta_len = (io.nblocks + 1) * ns->ms; 1315 1316 if (meta_len && ((io.metadata & 3) || !io.metadata)) 1317 return -EINVAL; 1318 1319 switch (io.opcode) { 1320 case nvme_cmd_write: 1321 case nvme_cmd_read: 1322 case nvme_cmd_compare: 1323 iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length); 1324 break; 1325 default: 1326 return -EINVAL; 1327 } 1328 1329 if (IS_ERR(iod)) 1330 return PTR_ERR(iod); 1331 1332 memset(&c, 0, sizeof(c)); 1333 c.rw.opcode = io.opcode; 1334 c.rw.flags = io.flags; 1335 c.rw.nsid = cpu_to_le32(ns->ns_id); 1336 c.rw.slba = cpu_to_le64(io.slba); 1337 c.rw.length = cpu_to_le16(io.nblocks); 1338 c.rw.control = cpu_to_le16(io.control); 1339 c.rw.dsmgmt = cpu_to_le32(io.dsmgmt); 1340 c.rw.reftag = cpu_to_le32(io.reftag); 1341 c.rw.apptag = cpu_to_le16(io.apptag); 1342 c.rw.appmask = cpu_to_le16(io.appmask); 1343 1344 if (meta_len) { 1345 meta_iod = nvme_map_user_pages(dev, io.opcode & 1, io.metadata, meta_len); 1346 if (IS_ERR(meta_iod)) { 1347 status = PTR_ERR(meta_iod); 1348 meta_iod = NULL; 1349 goto unmap; 1350 } 1351 1352 meta_mem = dma_alloc_coherent(&dev->pci_dev->dev, meta_len, 1353 &meta_dma_addr, GFP_KERNEL); 1354 if (!meta_mem) { 1355 status = -ENOMEM; 1356 goto unmap; 1357 } 1358 1359 if (io.opcode & 1) { 1360 int meta_offset = 0; 1361 1362 for (i = 0; i < meta_iod->nents; i++) { 1363 meta = kmap_atomic(sg_page(&meta_iod->sg[i])) + 1364 meta_iod->sg[i].offset; 1365 memcpy(meta_mem + meta_offset, meta, 1366 meta_iod->sg[i].length); 1367 kunmap_atomic(meta); 1368 meta_offset += meta_iod->sg[i].length; 1369 } 1370 } 1371 1372 c.rw.metadata = cpu_to_le64(meta_dma_addr); 1373 } 1374 1375 length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL); 1376 1377 nvmeq = get_nvmeq(dev); 1378 /* 1379 * Since nvme_submit_sync_cmd sleeps, we can't keep preemption 1380 * disabled. We may be preempted at any point, and be rescheduled 1381 * to a different CPU. That will cause cacheline bouncing, but no 1382 * additional races since q_lock already protects against other CPUs. 1383 */ 1384 put_nvmeq(nvmeq); 1385 if (length != (io.nblocks + 1) << ns->lba_shift) 1386 status = -ENOMEM; 1387 else 1388 status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT); 1389 1390 if (meta_len) { 1391 if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) { 1392 int meta_offset = 0; 1393 1394 for (i = 0; i < meta_iod->nents; i++) { 1395 meta = kmap_atomic(sg_page(&meta_iod->sg[i])) + 1396 meta_iod->sg[i].offset; 1397 memcpy(meta, meta_mem + meta_offset, 1398 meta_iod->sg[i].length); 1399 kunmap_atomic(meta); 1400 meta_offset += meta_iod->sg[i].length; 1401 } 1402 } 1403 1404 dma_free_coherent(&dev->pci_dev->dev, meta_len, meta_mem, 1405 meta_dma_addr); 1406 } 1407 1408 unmap: 1409 nvme_unmap_user_pages(dev, io.opcode & 1, iod); 1410 nvme_free_iod(dev, iod); 1411 1412 if (meta_iod) { 1413 nvme_unmap_user_pages(dev, io.opcode & 1, meta_iod); 1414 nvme_free_iod(dev, meta_iod); 1415 } 1416 1417 return status; 1418} 1419 1420static int nvme_user_admin_cmd(struct nvme_dev *dev, 1421 struct nvme_admin_cmd __user *ucmd) 1422{ 1423 struct nvme_admin_cmd cmd; 1424 struct nvme_command c; 1425 int status, length; 1426 struct nvme_iod *uninitialized_var(iod); 1427 unsigned timeout; 1428 1429 if (!capable(CAP_SYS_ADMIN)) 1430 return -EACCES; 1431 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) 1432 return -EFAULT; 1433 1434 memset(&c, 0, sizeof(c)); 1435 c.common.opcode = cmd.opcode; 1436 c.common.flags = cmd.flags; 1437 c.common.nsid = cpu_to_le32(cmd.nsid); 1438 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); 1439 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); 1440 c.common.cdw10[0] = cpu_to_le32(cmd.cdw10); 1441 c.common.cdw10[1] = cpu_to_le32(cmd.cdw11); 1442 c.common.cdw10[2] = cpu_to_le32(cmd.cdw12); 1443 c.common.cdw10[3] = cpu_to_le32(cmd.cdw13); 1444 c.common.cdw10[4] = cpu_to_le32(cmd.cdw14); 1445 c.common.cdw10[5] = cpu_to_le32(cmd.cdw15); 1446 1447 length = cmd.data_len; 1448 if (cmd.data_len) { 1449 iod = nvme_map_user_pages(dev, cmd.opcode & 1, cmd.addr, 1450 length); 1451 if (IS_ERR(iod)) 1452 return PTR_ERR(iod); 1453 length = nvme_setup_prps(dev, &c.common, iod, length, 1454 GFP_KERNEL); 1455 } 1456 1457 timeout = cmd.timeout_ms ? msecs_to_jiffies(cmd.timeout_ms) : 1458 ADMIN_TIMEOUT; 1459 if (length != cmd.data_len) 1460 status = -ENOMEM; 1461 else 1462 status = nvme_submit_sync_cmd(dev->queues[0], &c, &cmd.result, 1463 timeout); 1464 1465 if (cmd.data_len) { 1466 nvme_unmap_user_pages(dev, cmd.opcode & 1, iod); 1467 nvme_free_iod(dev, iod); 1468 } 1469 1470 if ((status >= 0) && copy_to_user(&ucmd->result, &cmd.result, 1471 sizeof(cmd.result))) 1472 status = -EFAULT; 1473 1474 return status; 1475} 1476 1477static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, 1478 unsigned long arg) 1479{ 1480 struct nvme_ns *ns = bdev->bd_disk->private_data; 1481 1482 switch (cmd) { 1483 case NVME_IOCTL_ID: 1484 return ns->ns_id; 1485 case NVME_IOCTL_ADMIN_CMD: 1486 return nvme_user_admin_cmd(ns->dev, (void __user *)arg); 1487 case NVME_IOCTL_SUBMIT_IO: 1488 return nvme_submit_io(ns, (void __user *)arg); 1489 case SG_GET_VERSION_NUM: 1490 return nvme_sg_get_version_num((void __user *)arg); 1491 case SG_IO: 1492 return nvme_sg_io(ns, (void __user *)arg); 1493 default: 1494 return -ENOTTY; 1495 } 1496} 1497 1498static const struct block_device_operations nvme_fops = { 1499 .owner = THIS_MODULE, 1500 .ioctl = nvme_ioctl, 1501 .compat_ioctl = nvme_ioctl, 1502}; 1503 1504static void nvme_resubmit_bios(struct nvme_queue *nvmeq) 1505{ 1506 while (bio_list_peek(&nvmeq->sq_cong)) { 1507 struct bio *bio = bio_list_pop(&nvmeq->sq_cong); 1508 struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data; 1509 1510 if (bio_list_empty(&nvmeq->sq_cong)) 1511 remove_wait_queue(&nvmeq->sq_full, 1512 &nvmeq->sq_cong_wait); 1513 if (nvme_submit_bio_queue(nvmeq, ns, bio)) { 1514 if (bio_list_empty(&nvmeq->sq_cong)) 1515 add_wait_queue(&nvmeq->sq_full, 1516 &nvmeq->sq_cong_wait); 1517 bio_list_add_head(&nvmeq->sq_cong, bio); 1518 break; 1519 } 1520 } 1521} 1522 1523static int nvme_kthread(void *data) 1524{ 1525 struct nvme_dev *dev; 1526 1527 while (!kthread_should_stop()) { 1528 set_current_state(TASK_INTERRUPTIBLE); 1529 spin_lock(&dev_list_lock); 1530 list_for_each_entry(dev, &dev_list, node) { 1531 int i; 1532 for (i = 0; i < dev->queue_count; i++) { 1533 struct nvme_queue *nvmeq = dev->queues[i]; 1534 if (!nvmeq) 1535 continue; 1536 spin_lock_irq(&nvmeq->q_lock); 1537 if (nvme_process_cq(nvmeq)) 1538 printk("process_cq did something\n"); 1539 nvme_cancel_ios(nvmeq, true); 1540 nvme_resubmit_bios(nvmeq); 1541 spin_unlock_irq(&nvmeq->q_lock); 1542 } 1543 } 1544 spin_unlock(&dev_list_lock); 1545 schedule_timeout(round_jiffies_relative(HZ)); 1546 } 1547 return 0; 1548} 1549 1550static DEFINE_IDA(nvme_index_ida); 1551 1552static int nvme_get_ns_idx(void) 1553{ 1554 int index, error; 1555 1556 do { 1557 if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL)) 1558 return -1; 1559 1560 spin_lock(&dev_list_lock); 1561 error = ida_get_new(&nvme_index_ida, &index); 1562 spin_unlock(&dev_list_lock); 1563 } while (error == -EAGAIN); 1564 1565 if (error) 1566 index = -1; 1567 return index; 1568} 1569 1570static void nvme_put_ns_idx(int index) 1571{ 1572 spin_lock(&dev_list_lock); 1573 ida_remove(&nvme_index_ida, index); 1574 spin_unlock(&dev_list_lock); 1575} 1576 1577static void nvme_config_discard(struct nvme_ns *ns) 1578{ 1579 u32 logical_block_size = queue_logical_block_size(ns->queue); 1580 ns->queue->limits.discard_zeroes_data = 0; 1581 ns->queue->limits.discard_alignment = logical_block_size; 1582 ns->queue->limits.discard_granularity = logical_block_size; 1583 ns->queue->limits.max_discard_sectors = 0xffffffff; 1584 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); 1585} 1586 1587static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid, 1588 struct nvme_id_ns *id, struct nvme_lba_range_type *rt) 1589{ 1590 struct nvme_ns *ns; 1591 struct gendisk *disk; 1592 int lbaf; 1593 1594 if (rt->attributes & NVME_LBART_ATTRIB_HIDE) 1595 return NULL; 1596 1597 ns = kzalloc(sizeof(*ns), GFP_KERNEL); 1598 if (!ns) 1599 return NULL; 1600 ns->queue = blk_alloc_queue(GFP_KERNEL); 1601 if (!ns->queue) 1602 goto out_free_ns; 1603 ns->queue->queue_flags = QUEUE_FLAG_DEFAULT; 1604 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue); 1605 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); 1606 blk_queue_make_request(ns->queue, nvme_make_request); 1607 ns->dev = dev; 1608 ns->queue->queuedata = ns; 1609 1610 disk = alloc_disk(NVME_MINORS); 1611 if (!disk) 1612 goto out_free_queue; 1613 ns->ns_id = nsid; 1614 ns->disk = disk; 1615 lbaf = id->flbas & 0xf; 1616 ns->lba_shift = id->lbaf[lbaf].ds; 1617 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); 1618 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); 1619 if (dev->max_hw_sectors) 1620 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); 1621 1622 disk->major = nvme_major; 1623 disk->minors = NVME_MINORS; 1624 disk->first_minor = NVME_MINORS * nvme_get_ns_idx(); 1625 disk->fops = &nvme_fops; 1626 disk->private_data = ns; 1627 disk->queue = ns->queue; 1628 disk->driverfs_dev = &dev->pci_dev->dev; 1629 sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid); 1630 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); 1631 1632 if (dev->oncs & NVME_CTRL_ONCS_DSM) 1633 nvme_config_discard(ns); 1634 1635 return ns; 1636 1637 out_free_queue: 1638 blk_cleanup_queue(ns->queue); 1639 out_free_ns: 1640 kfree(ns); 1641 return NULL; 1642} 1643 1644static void nvme_ns_free(struct nvme_ns *ns) 1645{ 1646 int index = ns->disk->first_minor / NVME_MINORS; 1647 put_disk(ns->disk); 1648 nvme_put_ns_idx(index); 1649 blk_cleanup_queue(ns->queue); 1650 kfree(ns); 1651} 1652 1653static int set_queue_count(struct nvme_dev *dev, int count) 1654{ 1655 int status; 1656 u32 result; 1657 u32 q_count = (count - 1) | ((count - 1) << 16); 1658 1659 status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0, 1660 &result); 1661 if (status) 1662 return -EIO; 1663 return min(result & 0xffff, result >> 16) + 1; 1664} 1665 1666static int nvme_setup_io_queues(struct nvme_dev *dev) 1667{ 1668 struct pci_dev *pdev = dev->pci_dev; 1669 int result, cpu, i, vecs, nr_io_queues, db_bar_size, q_depth; 1670 1671 nr_io_queues = num_online_cpus(); 1672 result = set_queue_count(dev, nr_io_queues); 1673 if (result < 0) 1674 return result; 1675 if (result < nr_io_queues) 1676 nr_io_queues = result; 1677 1678 /* Deregister the admin queue's interrupt */ 1679 free_irq(dev->entry[0].vector, dev->queues[0]); 1680 1681 db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3)); 1682 if (db_bar_size > 8192) { 1683 iounmap(dev->bar); 1684 dev->bar = ioremap(pci_resource_start(pdev, 0), db_bar_size); 1685 dev->dbs = ((void __iomem *)dev->bar) + 4096; 1686 dev->queues[0]->q_db = dev->dbs; 1687 } 1688 1689 vecs = nr_io_queues; 1690 for (i = 0; i < vecs; i++) 1691 dev->entry[i].entry = i; 1692 for (;;) { 1693 result = pci_enable_msix(pdev, dev->entry, vecs); 1694 if (result <= 0) 1695 break; 1696 vecs = result; 1697 } 1698 1699 if (result < 0) { 1700 vecs = nr_io_queues; 1701 if (vecs > 32) 1702 vecs = 32; 1703 for (;;) { 1704 result = pci_enable_msi_block(pdev, vecs); 1705 if (result == 0) { 1706 for (i = 0; i < vecs; i++) 1707 dev->entry[i].vector = i + pdev->irq; 1708 break; 1709 } else if (result < 0) { 1710 vecs = 1; 1711 break; 1712 } 1713 vecs = result; 1714 } 1715 } 1716 1717 /* 1718 * Should investigate if there's a performance win from allocating 1719 * more queues than interrupt vectors; it might allow the submission 1720 * path to scale better, even if the receive path is limited by the 1721 * number of interrupts. 1722 */ 1723 nr_io_queues = vecs; 1724 1725 result = queue_request_irq(dev, dev->queues[0], "nvme admin"); 1726 /* XXX: handle failure here */ 1727 1728 cpu = cpumask_first(cpu_online_mask); 1729 for (i = 0; i < nr_io_queues; i++) { 1730 irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu)); 1731 cpu = cpumask_next(cpu, cpu_online_mask); 1732 } 1733 1734 q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1, 1735 NVME_Q_DEPTH); 1736 for (i = 0; i < nr_io_queues; i++) { 1737 dev->queues[i + 1] = nvme_create_queue(dev, i + 1, q_depth, i); 1738 if (IS_ERR(dev->queues[i + 1])) 1739 return PTR_ERR(dev->queues[i + 1]); 1740 dev->queue_count++; 1741 } 1742 1743 for (; i < num_possible_cpus(); i++) { 1744 int target = i % rounddown_pow_of_two(dev->queue_count - 1); 1745 dev->queues[i + 1] = dev->queues[target + 1]; 1746 } 1747 1748 return 0; 1749} 1750 1751static void nvme_free_queues(struct nvme_dev *dev) 1752{ 1753 int i; 1754 1755 for (i = dev->queue_count - 1; i >= 0; i--) 1756 nvme_free_queue(dev, i); 1757} 1758 1759/* 1760 * Return: error value if an error occurred setting up the queues or calling 1761 * Identify Device. 0 if these succeeded, even if adding some of the 1762 * namespaces failed. At the moment, these failures are silent. TBD which 1763 * failures should be reported. 1764 */ 1765static int nvme_dev_add(struct nvme_dev *dev) 1766{ 1767 int res, nn, i; 1768 struct nvme_ns *ns; 1769 struct nvme_id_ctrl *ctrl; 1770 struct nvme_id_ns *id_ns; 1771 void *mem; 1772 dma_addr_t dma_addr; 1773 int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12; 1774 1775 res = nvme_setup_io_queues(dev); 1776 if (res) 1777 return res; 1778 1779 mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr, 1780 GFP_KERNEL); 1781 if (!mem) 1782 return -ENOMEM; 1783 1784 res = nvme_identify(dev, 0, 1, dma_addr); 1785 if (res) { 1786 res = -EIO; 1787 goto out; 1788 } 1789 1790 ctrl = mem; 1791 nn = le32_to_cpup(&ctrl->nn); 1792 dev->oncs = le16_to_cpup(&ctrl->oncs); 1793 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); 1794 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); 1795 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); 1796 if (ctrl->mdts) 1797 dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9); 1798 if ((dev->pci_dev->vendor == PCI_VENDOR_ID_INTEL) && 1799 (dev->pci_dev->device == 0x0953) && ctrl->vs[3]) 1800 dev->stripe_size = 1 << (ctrl->vs[3] + shift); 1801 1802 id_ns = mem; 1803 for (i = 1; i <= nn; i++) { 1804 res = nvme_identify(dev, i, 0, dma_addr); 1805 if (res) 1806 continue; 1807 1808 if (id_ns->ncap == 0) 1809 continue; 1810 1811 res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i, 1812 dma_addr + 4096, NULL); 1813 if (res) 1814 memset(mem + 4096, 0, 4096); 1815 1816 ns = nvme_alloc_ns(dev, i, mem, mem + 4096); 1817 if (ns) 1818 list_add_tail(&ns->list, &dev->namespaces); 1819 } 1820 list_for_each_entry(ns, &dev->namespaces, list) 1821 add_disk(ns->disk); 1822 res = 0; 1823 1824 out: 1825 dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr); 1826 return res; 1827} 1828 1829static int nvme_dev_remove(struct nvme_dev *dev) 1830{ 1831 struct nvme_ns *ns, *next; 1832 1833 spin_lock(&dev_list_lock); 1834 list_del(&dev->node); 1835 spin_unlock(&dev_list_lock); 1836 1837 list_for_each_entry_safe(ns, next, &dev->namespaces, list) { 1838 list_del(&ns->list); 1839 del_gendisk(ns->disk); 1840 nvme_ns_free(ns); 1841 } 1842 1843 nvme_free_queues(dev); 1844 1845 return 0; 1846} 1847 1848static int nvme_setup_prp_pools(struct nvme_dev *dev) 1849{ 1850 struct device *dmadev = &dev->pci_dev->dev; 1851 dev->prp_page_pool = dma_pool_create("prp list page", dmadev, 1852 PAGE_SIZE, PAGE_SIZE, 0); 1853 if (!dev->prp_page_pool) 1854 return -ENOMEM; 1855 1856 /* Optimisation for I/Os between 4k and 128k */ 1857 dev->prp_small_pool = dma_pool_create("prp list 256", dmadev, 1858 256, 256, 0); 1859 if (!dev->prp_small_pool) { 1860 dma_pool_destroy(dev->prp_page_pool); 1861 return -ENOMEM; 1862 } 1863 return 0; 1864} 1865 1866static void nvme_release_prp_pools(struct nvme_dev *dev) 1867{ 1868 dma_pool_destroy(dev->prp_page_pool); 1869 dma_pool_destroy(dev->prp_small_pool); 1870} 1871 1872static DEFINE_IDA(nvme_instance_ida); 1873 1874static int nvme_set_instance(struct nvme_dev *dev) 1875{ 1876 int instance, error; 1877 1878 do { 1879 if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL)) 1880 return -ENODEV; 1881 1882 spin_lock(&dev_list_lock); 1883 error = ida_get_new(&nvme_instance_ida, &instance); 1884 spin_unlock(&dev_list_lock); 1885 } while (error == -EAGAIN); 1886 1887 if (error) 1888 return -ENODEV; 1889 1890 dev->instance = instance; 1891 return 0; 1892} 1893 1894static void nvme_release_instance(struct nvme_dev *dev) 1895{ 1896 spin_lock(&dev_list_lock); 1897 ida_remove(&nvme_instance_ida, dev->instance); 1898 spin_unlock(&dev_list_lock); 1899} 1900 1901static void nvme_free_dev(struct kref *kref) 1902{ 1903 struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref); 1904 nvme_dev_remove(dev); 1905 if (dev->pci_dev->msi_enabled) 1906 pci_disable_msi(dev->pci_dev); 1907 else if (dev->pci_dev->msix_enabled) 1908 pci_disable_msix(dev->pci_dev); 1909 iounmap(dev->bar); 1910 nvme_release_instance(dev); 1911 nvme_release_prp_pools(dev); 1912 pci_disable_device(dev->pci_dev); 1913 pci_release_regions(dev->pci_dev); 1914 kfree(dev->queues); 1915 kfree(dev->entry); 1916 kfree(dev); 1917} 1918 1919static int nvme_dev_open(struct inode *inode, struct file *f) 1920{ 1921 struct nvme_dev *dev = container_of(f->private_data, struct nvme_dev, 1922 miscdev); 1923 kref_get(&dev->kref); 1924 f->private_data = dev; 1925 return 0; 1926} 1927 1928static int nvme_dev_release(struct inode *inode, struct file *f) 1929{ 1930 struct nvme_dev *dev = f->private_data; 1931 kref_put(&dev->kref, nvme_free_dev); 1932 return 0; 1933} 1934 1935static long nvme_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg) 1936{ 1937 struct nvme_dev *dev = f->private_data; 1938 switch (cmd) { 1939 case NVME_IOCTL_ADMIN_CMD: 1940 return nvme_user_admin_cmd(dev, (void __user *)arg); 1941 default: 1942 return -ENOTTY; 1943 } 1944} 1945 1946static const struct file_operations nvme_dev_fops = { 1947 .owner = THIS_MODULE, 1948 .open = nvme_dev_open, 1949 .release = nvme_dev_release, 1950 .unlocked_ioctl = nvme_dev_ioctl, 1951 .compat_ioctl = nvme_dev_ioctl, 1952}; 1953 1954static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1955{ 1956 int bars, result = -ENOMEM; 1957 struct nvme_dev *dev; 1958 1959 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1960 if (!dev) 1961 return -ENOMEM; 1962 dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry), 1963 GFP_KERNEL); 1964 if (!dev->entry) 1965 goto free; 1966 dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *), 1967 GFP_KERNEL); 1968 if (!dev->queues) 1969 goto free; 1970 1971 if (pci_enable_device_mem(pdev)) 1972 goto free; 1973 pci_set_master(pdev); 1974 bars = pci_select_bars(pdev, IORESOURCE_MEM); 1975 if (pci_request_selected_regions(pdev, bars, "nvme")) 1976 goto disable; 1977 1978 INIT_LIST_HEAD(&dev->namespaces); 1979 dev->pci_dev = pdev; 1980 pci_set_drvdata(pdev, dev); 1981 1982 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) 1983 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 1984 else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) 1985 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 1986 else 1987 goto disable; 1988 1989 result = nvme_set_instance(dev); 1990 if (result) 1991 goto disable; 1992 1993 dev->entry[0].vector = pdev->irq; 1994 1995 result = nvme_setup_prp_pools(dev); 1996 if (result) 1997 goto disable_msix; 1998 1999 dev->bar = ioremap(pci_resource_start(pdev, 0), 8192); 2000 if (!dev->bar) { 2001 result = -ENOMEM; 2002 goto disable_msix; 2003 } 2004 2005 result = nvme_configure_admin_queue(dev); 2006 if (result) 2007 goto unmap; 2008 dev->queue_count++; 2009 2010 spin_lock(&dev_list_lock); 2011 list_add(&dev->node, &dev_list); 2012 spin_unlock(&dev_list_lock); 2013 2014 result = nvme_dev_add(dev); 2015 if (result) 2016 goto delete; 2017 2018 scnprintf(dev->name, sizeof(dev->name), "nvme%d", dev->instance); 2019 dev->miscdev.minor = MISC_DYNAMIC_MINOR; 2020 dev->miscdev.parent = &pdev->dev; 2021 dev->miscdev.name = dev->name; 2022 dev->miscdev.fops = &nvme_dev_fops; 2023 result = misc_register(&dev->miscdev); 2024 if (result) 2025 goto remove; 2026 2027 kref_init(&dev->kref); 2028 return 0; 2029 2030 remove: 2031 nvme_dev_remove(dev); 2032 delete: 2033 spin_lock(&dev_list_lock); 2034 list_del(&dev->node); 2035 spin_unlock(&dev_list_lock); 2036 2037 nvme_free_queues(dev); 2038 unmap: 2039 iounmap(dev->bar); 2040 disable_msix: 2041 if (dev->pci_dev->msi_enabled) 2042 pci_disable_msi(dev->pci_dev); 2043 else if (dev->pci_dev->msix_enabled) 2044 pci_disable_msix(dev->pci_dev); 2045 nvme_release_instance(dev); 2046 nvme_release_prp_pools(dev); 2047 disable: 2048 pci_disable_device(pdev); 2049 pci_release_regions(pdev); 2050 free: 2051 kfree(dev->queues); 2052 kfree(dev->entry); 2053 kfree(dev); 2054 return result; 2055} 2056 2057static void nvme_remove(struct pci_dev *pdev) 2058{ 2059 struct nvme_dev *dev = pci_get_drvdata(pdev); 2060 misc_deregister(&dev->miscdev); 2061 kref_put(&dev->kref, nvme_free_dev); 2062} 2063 2064/* These functions are yet to be implemented */ 2065#define nvme_error_detected NULL 2066#define nvme_dump_registers NULL 2067#define nvme_link_reset NULL 2068#define nvme_slot_reset NULL 2069#define nvme_error_resume NULL 2070#define nvme_suspend NULL 2071#define nvme_resume NULL 2072 2073static const struct pci_error_handlers nvme_err_handler = { 2074 .error_detected = nvme_error_detected, 2075 .mmio_enabled = nvme_dump_registers, 2076 .link_reset = nvme_link_reset, 2077 .slot_reset = nvme_slot_reset, 2078 .resume = nvme_error_resume, 2079}; 2080 2081/* Move to pci_ids.h later */ 2082#define PCI_CLASS_STORAGE_EXPRESS 0x010802 2083 2084static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = { 2085 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 2086 { 0, } 2087}; 2088MODULE_DEVICE_TABLE(pci, nvme_id_table); 2089 2090static struct pci_driver nvme_driver = { 2091 .name = "nvme", 2092 .id_table = nvme_id_table, 2093 .probe = nvme_probe, 2094 .remove = nvme_remove, 2095 .suspend = nvme_suspend, 2096 .resume = nvme_resume, 2097 .err_handler = &nvme_err_handler, 2098}; 2099 2100static int __init nvme_init(void) 2101{ 2102 int result; 2103 2104 nvme_thread = kthread_run(nvme_kthread, NULL, "nvme"); 2105 if (IS_ERR(nvme_thread)) 2106 return PTR_ERR(nvme_thread); 2107 2108 result = register_blkdev(nvme_major, "nvme"); 2109 if (result < 0) 2110 goto kill_kthread; 2111 else if (result > 0) 2112 nvme_major = result; 2113 2114 result = pci_register_driver(&nvme_driver); 2115 if (result) 2116 goto unregister_blkdev; 2117 return 0; 2118 2119 unregister_blkdev: 2120 unregister_blkdev(nvme_major, "nvme"); 2121 kill_kthread: 2122 kthread_stop(nvme_thread); 2123 return result; 2124} 2125 2126static void __exit nvme_exit(void) 2127{ 2128 pci_unregister_driver(&nvme_driver); 2129 unregister_blkdev(nvme_major, "nvme"); 2130 kthread_stop(nvme_thread); 2131} 2132 2133MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); 2134MODULE_LICENSE("GPL"); 2135MODULE_VERSION("0.8"); 2136module_init(nvme_init); 2137module_exit(nvme_exit); 2138