nvme-core.c revision 0877cb0d285c7f1d53d0b84b360bdea4be4f3f59
1/* 2 * NVM Express device driver 3 * Copyright (c) 2011, Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 */ 18 19#include <linux/nvme.h> 20#include <linux/bio.h> 21#include <linux/bitops.h> 22#include <linux/blkdev.h> 23#include <linux/delay.h> 24#include <linux/errno.h> 25#include <linux/fs.h> 26#include <linux/genhd.h> 27#include <linux/idr.h> 28#include <linux/init.h> 29#include <linux/interrupt.h> 30#include <linux/io.h> 31#include <linux/kdev_t.h> 32#include <linux/kthread.h> 33#include <linux/kernel.h> 34#include <linux/mm.h> 35#include <linux/module.h> 36#include <linux/moduleparam.h> 37#include <linux/pci.h> 38#include <linux/poison.h> 39#include <linux/ptrace.h> 40#include <linux/sched.h> 41#include <linux/slab.h> 42#include <linux/types.h> 43#include <scsi/sg.h> 44#include <asm-generic/io-64-nonatomic-lo-hi.h> 45 46#define NVME_Q_DEPTH 1024 47#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) 48#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) 49#define NVME_MINORS 64 50#define ADMIN_TIMEOUT (60 * HZ) 51 52static int nvme_major; 53module_param(nvme_major, int, 0); 54 55static int use_threaded_interrupts; 56module_param(use_threaded_interrupts, int, 0); 57 58static DEFINE_SPINLOCK(dev_list_lock); 59static LIST_HEAD(dev_list); 60static struct task_struct *nvme_thread; 61 62/* 63 * An NVM Express queue. Each device has at least two (one for admin 64 * commands and one for I/O commands). 65 */ 66struct nvme_queue { 67 struct device *q_dmadev; 68 struct nvme_dev *dev; 69 spinlock_t q_lock; 70 struct nvme_command *sq_cmds; 71 volatile struct nvme_completion *cqes; 72 dma_addr_t sq_dma_addr; 73 dma_addr_t cq_dma_addr; 74 wait_queue_head_t sq_full; 75 wait_queue_t sq_cong_wait; 76 struct bio_list sq_cong; 77 u32 __iomem *q_db; 78 u16 q_depth; 79 u16 cq_vector; 80 u16 sq_head; 81 u16 sq_tail; 82 u16 cq_head; 83 u8 cq_phase; 84 u8 cqe_seen; 85 unsigned long cmdid_data[]; 86}; 87 88/* 89 * Check we didin't inadvertently grow the command struct 90 */ 91static inline void _nvme_check_size(void) 92{ 93 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); 94 BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64); 95 BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); 96 BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); 97 BUILD_BUG_ON(sizeof(struct nvme_features) != 64); 98 BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64); 99 BUILD_BUG_ON(sizeof(struct nvme_command) != 64); 100 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096); 101 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096); 102 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); 103 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); 104} 105 106typedef void (*nvme_completion_fn)(struct nvme_dev *, void *, 107 struct nvme_completion *); 108 109struct nvme_cmd_info { 110 nvme_completion_fn fn; 111 void *ctx; 112 unsigned long timeout; 113}; 114 115static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq) 116{ 117 return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)]; 118} 119 120/** 121 * alloc_cmdid() - Allocate a Command ID 122 * @nvmeq: The queue that will be used for this command 123 * @ctx: A pointer that will be passed to the handler 124 * @handler: The function to call on completion 125 * 126 * Allocate a Command ID for a queue. The data passed in will 127 * be passed to the completion handler. This is implemented by using 128 * the bottom two bits of the ctx pointer to store the handler ID. 129 * Passing in a pointer that's not 4-byte aligned will cause a BUG. 130 * We can change this if it becomes a problem. 131 * 132 * May be called with local interrupts disabled and the q_lock held, 133 * or with interrupts enabled and no locks held. 134 */ 135static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx, 136 nvme_completion_fn handler, unsigned timeout) 137{ 138 int depth = nvmeq->q_depth - 1; 139 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 140 int cmdid; 141 142 do { 143 cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth); 144 if (cmdid >= depth) 145 return -EBUSY; 146 } while (test_and_set_bit(cmdid, nvmeq->cmdid_data)); 147 148 info[cmdid].fn = handler; 149 info[cmdid].ctx = ctx; 150 info[cmdid].timeout = jiffies + timeout; 151 return cmdid; 152} 153 154static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx, 155 nvme_completion_fn handler, unsigned timeout) 156{ 157 int cmdid; 158 wait_event_killable(nvmeq->sq_full, 159 (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0); 160 return (cmdid < 0) ? -EINTR : cmdid; 161} 162 163/* Special values must be less than 0x1000 */ 164#define CMD_CTX_BASE ((void *)POISON_POINTER_DELTA) 165#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE) 166#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE) 167#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE) 168#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE) 169 170static void special_completion(struct nvme_dev *dev, void *ctx, 171 struct nvme_completion *cqe) 172{ 173 if (ctx == CMD_CTX_CANCELLED) 174 return; 175 if (ctx == CMD_CTX_FLUSH) 176 return; 177 if (ctx == CMD_CTX_COMPLETED) { 178 dev_warn(&dev->pci_dev->dev, 179 "completed id %d twice on queue %d\n", 180 cqe->command_id, le16_to_cpup(&cqe->sq_id)); 181 return; 182 } 183 if (ctx == CMD_CTX_INVALID) { 184 dev_warn(&dev->pci_dev->dev, 185 "invalid id %d completed on queue %d\n", 186 cqe->command_id, le16_to_cpup(&cqe->sq_id)); 187 return; 188 } 189 190 dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx); 191} 192 193/* 194 * Called with local interrupts disabled and the q_lock held. May not sleep. 195 */ 196static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid, 197 nvme_completion_fn *fn) 198{ 199 void *ctx; 200 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 201 202 if (cmdid >= nvmeq->q_depth) { 203 *fn = special_completion; 204 return CMD_CTX_INVALID; 205 } 206 if (fn) 207 *fn = info[cmdid].fn; 208 ctx = info[cmdid].ctx; 209 info[cmdid].fn = special_completion; 210 info[cmdid].ctx = CMD_CTX_COMPLETED; 211 clear_bit(cmdid, nvmeq->cmdid_data); 212 wake_up(&nvmeq->sq_full); 213 return ctx; 214} 215 216static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid, 217 nvme_completion_fn *fn) 218{ 219 void *ctx; 220 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 221 if (fn) 222 *fn = info[cmdid].fn; 223 ctx = info[cmdid].ctx; 224 info[cmdid].fn = special_completion; 225 info[cmdid].ctx = CMD_CTX_CANCELLED; 226 return ctx; 227} 228 229struct nvme_queue *get_nvmeq(struct nvme_dev *dev) 230{ 231 return dev->queues[get_cpu() + 1]; 232} 233 234void put_nvmeq(struct nvme_queue *nvmeq) 235{ 236 put_cpu(); 237} 238 239/** 240 * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell 241 * @nvmeq: The queue to use 242 * @cmd: The command to send 243 * 244 * Safe to use from interrupt context 245 */ 246static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd) 247{ 248 unsigned long flags; 249 u16 tail; 250 spin_lock_irqsave(&nvmeq->q_lock, flags); 251 tail = nvmeq->sq_tail; 252 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd)); 253 if (++tail == nvmeq->q_depth) 254 tail = 0; 255 writel(tail, nvmeq->q_db); 256 nvmeq->sq_tail = tail; 257 spin_unlock_irqrestore(&nvmeq->q_lock, flags); 258 259 return 0; 260} 261 262static __le64 **iod_list(struct nvme_iod *iod) 263{ 264 return ((void *)iod) + iod->offset; 265} 266 267/* 268 * Will slightly overestimate the number of pages needed. This is OK 269 * as it only leads to a small amount of wasted memory for the lifetime of 270 * the I/O. 271 */ 272static int nvme_npages(unsigned size) 273{ 274 unsigned nprps = DIV_ROUND_UP(size + PAGE_SIZE, PAGE_SIZE); 275 return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8); 276} 277 278static struct nvme_iod * 279nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp) 280{ 281 struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) + 282 sizeof(__le64 *) * nvme_npages(nbytes) + 283 sizeof(struct scatterlist) * nseg, gfp); 284 285 if (iod) { 286 iod->offset = offsetof(struct nvme_iod, sg[nseg]); 287 iod->npages = -1; 288 iod->length = nbytes; 289 iod->nents = 0; 290 iod->start_time = jiffies; 291 } 292 293 return iod; 294} 295 296void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod) 297{ 298 const int last_prp = PAGE_SIZE / 8 - 1; 299 int i; 300 __le64 **list = iod_list(iod); 301 dma_addr_t prp_dma = iod->first_dma; 302 303 if (iod->npages == 0) 304 dma_pool_free(dev->prp_small_pool, list[0], prp_dma); 305 for (i = 0; i < iod->npages; i++) { 306 __le64 *prp_list = list[i]; 307 dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]); 308 dma_pool_free(dev->prp_page_pool, prp_list, prp_dma); 309 prp_dma = next_prp_dma; 310 } 311 kfree(iod); 312} 313 314static void nvme_start_io_acct(struct bio *bio) 315{ 316 struct gendisk *disk = bio->bi_bdev->bd_disk; 317 const int rw = bio_data_dir(bio); 318 int cpu = part_stat_lock(); 319 part_round_stats(cpu, &disk->part0); 320 part_stat_inc(cpu, &disk->part0, ios[rw]); 321 part_stat_add(cpu, &disk->part0, sectors[rw], bio_sectors(bio)); 322 part_inc_in_flight(&disk->part0, rw); 323 part_stat_unlock(); 324} 325 326static void nvme_end_io_acct(struct bio *bio, unsigned long start_time) 327{ 328 struct gendisk *disk = bio->bi_bdev->bd_disk; 329 const int rw = bio_data_dir(bio); 330 unsigned long duration = jiffies - start_time; 331 int cpu = part_stat_lock(); 332 part_stat_add(cpu, &disk->part0, ticks[rw], duration); 333 part_round_stats(cpu, &disk->part0); 334 part_dec_in_flight(&disk->part0, rw); 335 part_stat_unlock(); 336} 337 338static void bio_completion(struct nvme_dev *dev, void *ctx, 339 struct nvme_completion *cqe) 340{ 341 struct nvme_iod *iod = ctx; 342 struct bio *bio = iod->private; 343 u16 status = le16_to_cpup(&cqe->status) >> 1; 344 345 if (iod->nents) { 346 dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, 347 bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 348 nvme_end_io_acct(bio, iod->start_time); 349 } 350 nvme_free_iod(dev, iod); 351 if (status) 352 bio_endio(bio, -EIO); 353 else 354 bio_endio(bio, 0); 355} 356 357/* length is in bytes. gfp flags indicates whether we may sleep. */ 358int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd, 359 struct nvme_iod *iod, int total_len, gfp_t gfp) 360{ 361 struct dma_pool *pool; 362 int length = total_len; 363 struct scatterlist *sg = iod->sg; 364 int dma_len = sg_dma_len(sg); 365 u64 dma_addr = sg_dma_address(sg); 366 int offset = offset_in_page(dma_addr); 367 __le64 *prp_list; 368 __le64 **list = iod_list(iod); 369 dma_addr_t prp_dma; 370 int nprps, i; 371 372 cmd->prp1 = cpu_to_le64(dma_addr); 373 length -= (PAGE_SIZE - offset); 374 if (length <= 0) 375 return total_len; 376 377 dma_len -= (PAGE_SIZE - offset); 378 if (dma_len) { 379 dma_addr += (PAGE_SIZE - offset); 380 } else { 381 sg = sg_next(sg); 382 dma_addr = sg_dma_address(sg); 383 dma_len = sg_dma_len(sg); 384 } 385 386 if (length <= PAGE_SIZE) { 387 cmd->prp2 = cpu_to_le64(dma_addr); 388 return total_len; 389 } 390 391 nprps = DIV_ROUND_UP(length, PAGE_SIZE); 392 if (nprps <= (256 / 8)) { 393 pool = dev->prp_small_pool; 394 iod->npages = 0; 395 } else { 396 pool = dev->prp_page_pool; 397 iod->npages = 1; 398 } 399 400 prp_list = dma_pool_alloc(pool, gfp, &prp_dma); 401 if (!prp_list) { 402 cmd->prp2 = cpu_to_le64(dma_addr); 403 iod->npages = -1; 404 return (total_len - length) + PAGE_SIZE; 405 } 406 list[0] = prp_list; 407 iod->first_dma = prp_dma; 408 cmd->prp2 = cpu_to_le64(prp_dma); 409 i = 0; 410 for (;;) { 411 if (i == PAGE_SIZE / 8) { 412 __le64 *old_prp_list = prp_list; 413 prp_list = dma_pool_alloc(pool, gfp, &prp_dma); 414 if (!prp_list) 415 return total_len - length; 416 list[iod->npages++] = prp_list; 417 prp_list[0] = old_prp_list[i - 1]; 418 old_prp_list[i - 1] = cpu_to_le64(prp_dma); 419 i = 1; 420 } 421 prp_list[i++] = cpu_to_le64(dma_addr); 422 dma_len -= PAGE_SIZE; 423 dma_addr += PAGE_SIZE; 424 length -= PAGE_SIZE; 425 if (length <= 0) 426 break; 427 if (dma_len > 0) 428 continue; 429 BUG_ON(dma_len < 0); 430 sg = sg_next(sg); 431 dma_addr = sg_dma_address(sg); 432 dma_len = sg_dma_len(sg); 433 } 434 435 return total_len; 436} 437 438struct nvme_bio_pair { 439 struct bio b1, b2, *parent; 440 struct bio_vec *bv1, *bv2; 441 int err; 442 atomic_t cnt; 443}; 444 445static void nvme_bio_pair_endio(struct bio *bio, int err) 446{ 447 struct nvme_bio_pair *bp = bio->bi_private; 448 449 if (err) 450 bp->err = err; 451 452 if (atomic_dec_and_test(&bp->cnt)) { 453 bio_endio(bp->parent, bp->err); 454 kfree(bp->bv1); 455 kfree(bp->bv2); 456 kfree(bp); 457 } 458} 459 460static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx, 461 int len, int offset) 462{ 463 struct nvme_bio_pair *bp; 464 465 BUG_ON(len > bio->bi_size); 466 BUG_ON(idx > bio->bi_vcnt); 467 468 bp = kmalloc(sizeof(*bp), GFP_ATOMIC); 469 if (!bp) 470 return NULL; 471 bp->err = 0; 472 473 bp->b1 = *bio; 474 bp->b2 = *bio; 475 476 bp->b1.bi_size = len; 477 bp->b2.bi_size -= len; 478 bp->b1.bi_vcnt = idx; 479 bp->b2.bi_idx = idx; 480 bp->b2.bi_sector += len >> 9; 481 482 if (offset) { 483 bp->bv1 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec), 484 GFP_ATOMIC); 485 if (!bp->bv1) 486 goto split_fail_1; 487 488 bp->bv2 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec), 489 GFP_ATOMIC); 490 if (!bp->bv2) 491 goto split_fail_2; 492 493 memcpy(bp->bv1, bio->bi_io_vec, 494 bio->bi_max_vecs * sizeof(struct bio_vec)); 495 memcpy(bp->bv2, bio->bi_io_vec, 496 bio->bi_max_vecs * sizeof(struct bio_vec)); 497 498 bp->b1.bi_io_vec = bp->bv1; 499 bp->b2.bi_io_vec = bp->bv2; 500 bp->b2.bi_io_vec[idx].bv_offset += offset; 501 bp->b2.bi_io_vec[idx].bv_len -= offset; 502 bp->b1.bi_io_vec[idx].bv_len = offset; 503 bp->b1.bi_vcnt++; 504 } else 505 bp->bv1 = bp->bv2 = NULL; 506 507 bp->b1.bi_private = bp; 508 bp->b2.bi_private = bp; 509 510 bp->b1.bi_end_io = nvme_bio_pair_endio; 511 bp->b2.bi_end_io = nvme_bio_pair_endio; 512 513 bp->parent = bio; 514 atomic_set(&bp->cnt, 2); 515 516 return bp; 517 518 split_fail_2: 519 kfree(bp->bv1); 520 split_fail_1: 521 kfree(bp); 522 return NULL; 523} 524 525static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq, 526 int idx, int len, int offset) 527{ 528 struct nvme_bio_pair *bp = nvme_bio_split(bio, idx, len, offset); 529 if (!bp) 530 return -ENOMEM; 531 532 if (bio_list_empty(&nvmeq->sq_cong)) 533 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); 534 bio_list_add(&nvmeq->sq_cong, &bp->b1); 535 bio_list_add(&nvmeq->sq_cong, &bp->b2); 536 537 return 0; 538} 539 540/* NVMe scatterlists require no holes in the virtual address */ 541#define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \ 542 (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE)) 543 544static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod, 545 struct bio *bio, enum dma_data_direction dma_dir, int psegs) 546{ 547 struct bio_vec *bvec, *bvprv = NULL; 548 struct scatterlist *sg = NULL; 549 int i, length = 0, nsegs = 0, split_len = bio->bi_size; 550 551 if (nvmeq->dev->stripe_size) 552 split_len = nvmeq->dev->stripe_size - 553 ((bio->bi_sector << 9) & (nvmeq->dev->stripe_size - 1)); 554 555 sg_init_table(iod->sg, psegs); 556 bio_for_each_segment(bvec, bio, i) { 557 if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) { 558 sg->length += bvec->bv_len; 559 } else { 560 if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec)) 561 return nvme_split_and_submit(bio, nvmeq, i, 562 length, 0); 563 564 sg = sg ? sg + 1 : iod->sg; 565 sg_set_page(sg, bvec->bv_page, bvec->bv_len, 566 bvec->bv_offset); 567 nsegs++; 568 } 569 570 if (split_len - length < bvec->bv_len) 571 return nvme_split_and_submit(bio, nvmeq, i, split_len, 572 split_len - length); 573 length += bvec->bv_len; 574 bvprv = bvec; 575 } 576 iod->nents = nsegs; 577 sg_mark_end(sg); 578 if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0) 579 return -ENOMEM; 580 581 BUG_ON(length != bio->bi_size); 582 return length; 583} 584 585/* 586 * We reuse the small pool to allocate the 16-byte range here as it is not 587 * worth having a special pool for these or additional cases to handle freeing 588 * the iod. 589 */ 590static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns, 591 struct bio *bio, struct nvme_iod *iod, int cmdid) 592{ 593 struct nvme_dsm_range *range; 594 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; 595 596 range = dma_pool_alloc(nvmeq->dev->prp_small_pool, GFP_ATOMIC, 597 &iod->first_dma); 598 if (!range) 599 return -ENOMEM; 600 601 iod_list(iod)[0] = (__le64 *)range; 602 iod->npages = 0; 603 604 range->cattr = cpu_to_le32(0); 605 range->nlb = cpu_to_le32(bio->bi_size >> ns->lba_shift); 606 range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector)); 607 608 memset(cmnd, 0, sizeof(*cmnd)); 609 cmnd->dsm.opcode = nvme_cmd_dsm; 610 cmnd->dsm.command_id = cmdid; 611 cmnd->dsm.nsid = cpu_to_le32(ns->ns_id); 612 cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma); 613 cmnd->dsm.nr = 0; 614 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); 615 616 if (++nvmeq->sq_tail == nvmeq->q_depth) 617 nvmeq->sq_tail = 0; 618 writel(nvmeq->sq_tail, nvmeq->q_db); 619 620 return 0; 621} 622 623static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns, 624 int cmdid) 625{ 626 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; 627 628 memset(cmnd, 0, sizeof(*cmnd)); 629 cmnd->common.opcode = nvme_cmd_flush; 630 cmnd->common.command_id = cmdid; 631 cmnd->common.nsid = cpu_to_le32(ns->ns_id); 632 633 if (++nvmeq->sq_tail == nvmeq->q_depth) 634 nvmeq->sq_tail = 0; 635 writel(nvmeq->sq_tail, nvmeq->q_db); 636 637 return 0; 638} 639 640int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns) 641{ 642 int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH, 643 special_completion, NVME_IO_TIMEOUT); 644 if (unlikely(cmdid < 0)) 645 return cmdid; 646 647 return nvme_submit_flush(nvmeq, ns, cmdid); 648} 649 650/* 651 * Called with local interrupts disabled and the q_lock held. May not sleep. 652 */ 653static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, 654 struct bio *bio) 655{ 656 struct nvme_command *cmnd; 657 struct nvme_iod *iod; 658 enum dma_data_direction dma_dir; 659 int cmdid, length, result; 660 u16 control; 661 u32 dsmgmt; 662 int psegs = bio_phys_segments(ns->queue, bio); 663 664 if ((bio->bi_rw & REQ_FLUSH) && psegs) { 665 result = nvme_submit_flush_data(nvmeq, ns); 666 if (result) 667 return result; 668 } 669 670 result = -ENOMEM; 671 iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC); 672 if (!iod) 673 goto nomem; 674 iod->private = bio; 675 676 result = -EBUSY; 677 cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT); 678 if (unlikely(cmdid < 0)) 679 goto free_iod; 680 681 if (bio->bi_rw & REQ_DISCARD) { 682 result = nvme_submit_discard(nvmeq, ns, bio, iod, cmdid); 683 if (result) 684 goto free_cmdid; 685 return result; 686 } 687 if ((bio->bi_rw & REQ_FLUSH) && !psegs) 688 return nvme_submit_flush(nvmeq, ns, cmdid); 689 690 control = 0; 691 if (bio->bi_rw & REQ_FUA) 692 control |= NVME_RW_FUA; 693 if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD)) 694 control |= NVME_RW_LR; 695 696 dsmgmt = 0; 697 if (bio->bi_rw & REQ_RAHEAD) 698 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; 699 700 cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; 701 702 memset(cmnd, 0, sizeof(*cmnd)); 703 if (bio_data_dir(bio)) { 704 cmnd->rw.opcode = nvme_cmd_write; 705 dma_dir = DMA_TO_DEVICE; 706 } else { 707 cmnd->rw.opcode = nvme_cmd_read; 708 dma_dir = DMA_FROM_DEVICE; 709 } 710 711 result = nvme_map_bio(nvmeq, iod, bio, dma_dir, psegs); 712 if (result <= 0) 713 goto free_cmdid; 714 length = result; 715 716 cmnd->rw.command_id = cmdid; 717 cmnd->rw.nsid = cpu_to_le32(ns->ns_id); 718 length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length, 719 GFP_ATOMIC); 720 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector)); 721 cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1); 722 cmnd->rw.control = cpu_to_le16(control); 723 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 724 725 nvme_start_io_acct(bio); 726 if (++nvmeq->sq_tail == nvmeq->q_depth) 727 nvmeq->sq_tail = 0; 728 writel(nvmeq->sq_tail, nvmeq->q_db); 729 730 return 0; 731 732 free_cmdid: 733 free_cmdid(nvmeq, cmdid, NULL); 734 free_iod: 735 nvme_free_iod(nvmeq->dev, iod); 736 nomem: 737 return result; 738} 739 740static int nvme_process_cq(struct nvme_queue *nvmeq) 741{ 742 u16 head, phase; 743 744 head = nvmeq->cq_head; 745 phase = nvmeq->cq_phase; 746 747 for (;;) { 748 void *ctx; 749 nvme_completion_fn fn; 750 struct nvme_completion cqe = nvmeq->cqes[head]; 751 if ((le16_to_cpu(cqe.status) & 1) != phase) 752 break; 753 nvmeq->sq_head = le16_to_cpu(cqe.sq_head); 754 if (++head == nvmeq->q_depth) { 755 head = 0; 756 phase = !phase; 757 } 758 759 ctx = free_cmdid(nvmeq, cqe.command_id, &fn); 760 fn(nvmeq->dev, ctx, &cqe); 761 } 762 763 /* If the controller ignores the cq head doorbell and continuously 764 * writes to the queue, it is theoretically possible to wrap around 765 * the queue twice and mistakenly return IRQ_NONE. Linux only 766 * requires that 0.1% of your interrupts are handled, so this isn't 767 * a big problem. 768 */ 769 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) 770 return 0; 771 772 writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride)); 773 nvmeq->cq_head = head; 774 nvmeq->cq_phase = phase; 775 776 nvmeq->cqe_seen = 1; 777 return 1; 778} 779 780static void nvme_make_request(struct request_queue *q, struct bio *bio) 781{ 782 struct nvme_ns *ns = q->queuedata; 783 struct nvme_queue *nvmeq = get_nvmeq(ns->dev); 784 int result = -EBUSY; 785 786 spin_lock_irq(&nvmeq->q_lock); 787 if (bio_list_empty(&nvmeq->sq_cong)) 788 result = nvme_submit_bio_queue(nvmeq, ns, bio); 789 if (unlikely(result)) { 790 if (bio_list_empty(&nvmeq->sq_cong)) 791 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); 792 bio_list_add(&nvmeq->sq_cong, bio); 793 } 794 795 nvme_process_cq(nvmeq); 796 spin_unlock_irq(&nvmeq->q_lock); 797 put_nvmeq(nvmeq); 798} 799 800static irqreturn_t nvme_irq(int irq, void *data) 801{ 802 irqreturn_t result; 803 struct nvme_queue *nvmeq = data; 804 spin_lock(&nvmeq->q_lock); 805 nvme_process_cq(nvmeq); 806 result = nvmeq->cqe_seen ? IRQ_HANDLED : IRQ_NONE; 807 nvmeq->cqe_seen = 0; 808 spin_unlock(&nvmeq->q_lock); 809 return result; 810} 811 812static irqreturn_t nvme_irq_check(int irq, void *data) 813{ 814 struct nvme_queue *nvmeq = data; 815 struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head]; 816 if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase) 817 return IRQ_NONE; 818 return IRQ_WAKE_THREAD; 819} 820 821static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid) 822{ 823 spin_lock_irq(&nvmeq->q_lock); 824 cancel_cmdid(nvmeq, cmdid, NULL); 825 spin_unlock_irq(&nvmeq->q_lock); 826} 827 828struct sync_cmd_info { 829 struct task_struct *task; 830 u32 result; 831 int status; 832}; 833 834static void sync_completion(struct nvme_dev *dev, void *ctx, 835 struct nvme_completion *cqe) 836{ 837 struct sync_cmd_info *cmdinfo = ctx; 838 cmdinfo->result = le32_to_cpup(&cqe->result); 839 cmdinfo->status = le16_to_cpup(&cqe->status) >> 1; 840 wake_up_process(cmdinfo->task); 841} 842 843/* 844 * Returns 0 on success. If the result is negative, it's a Linux error code; 845 * if the result is positive, it's an NVM Express status code 846 */ 847int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd, 848 u32 *result, unsigned timeout) 849{ 850 int cmdid; 851 struct sync_cmd_info cmdinfo; 852 853 cmdinfo.task = current; 854 cmdinfo.status = -EINTR; 855 856 cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion, 857 timeout); 858 if (cmdid < 0) 859 return cmdid; 860 cmd->common.command_id = cmdid; 861 862 set_current_state(TASK_KILLABLE); 863 nvme_submit_cmd(nvmeq, cmd); 864 schedule_timeout(timeout); 865 866 if (cmdinfo.status == -EINTR) { 867 nvme_abort_command(nvmeq, cmdid); 868 return -EINTR; 869 } 870 871 if (result) 872 *result = cmdinfo.result; 873 874 return cmdinfo.status; 875} 876 877int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd, 878 u32 *result) 879{ 880 return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT); 881} 882 883static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) 884{ 885 int status; 886 struct nvme_command c; 887 888 memset(&c, 0, sizeof(c)); 889 c.delete_queue.opcode = opcode; 890 c.delete_queue.qid = cpu_to_le16(id); 891 892 status = nvme_submit_admin_cmd(dev, &c, NULL); 893 if (status) 894 return -EIO; 895 return 0; 896} 897 898static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, 899 struct nvme_queue *nvmeq) 900{ 901 int status; 902 struct nvme_command c; 903 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED; 904 905 memset(&c, 0, sizeof(c)); 906 c.create_cq.opcode = nvme_admin_create_cq; 907 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); 908 c.create_cq.cqid = cpu_to_le16(qid); 909 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 910 c.create_cq.cq_flags = cpu_to_le16(flags); 911 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector); 912 913 status = nvme_submit_admin_cmd(dev, &c, NULL); 914 if (status) 915 return -EIO; 916 return 0; 917} 918 919static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, 920 struct nvme_queue *nvmeq) 921{ 922 int status; 923 struct nvme_command c; 924 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM; 925 926 memset(&c, 0, sizeof(c)); 927 c.create_sq.opcode = nvme_admin_create_sq; 928 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); 929 c.create_sq.sqid = cpu_to_le16(qid); 930 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 931 c.create_sq.sq_flags = cpu_to_le16(flags); 932 c.create_sq.cqid = cpu_to_le16(qid); 933 934 status = nvme_submit_admin_cmd(dev, &c, NULL); 935 if (status) 936 return -EIO; 937 return 0; 938} 939 940static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) 941{ 942 return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid); 943} 944 945static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) 946{ 947 return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); 948} 949 950int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns, 951 dma_addr_t dma_addr) 952{ 953 struct nvme_command c; 954 955 memset(&c, 0, sizeof(c)); 956 c.identify.opcode = nvme_admin_identify; 957 c.identify.nsid = cpu_to_le32(nsid); 958 c.identify.prp1 = cpu_to_le64(dma_addr); 959 c.identify.cns = cpu_to_le32(cns); 960 961 return nvme_submit_admin_cmd(dev, &c, NULL); 962} 963 964int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, 965 dma_addr_t dma_addr, u32 *result) 966{ 967 struct nvme_command c; 968 969 memset(&c, 0, sizeof(c)); 970 c.features.opcode = nvme_admin_get_features; 971 c.features.nsid = cpu_to_le32(nsid); 972 c.features.prp1 = cpu_to_le64(dma_addr); 973 c.features.fid = cpu_to_le32(fid); 974 975 return nvme_submit_admin_cmd(dev, &c, result); 976} 977 978int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11, 979 dma_addr_t dma_addr, u32 *result) 980{ 981 struct nvme_command c; 982 983 memset(&c, 0, sizeof(c)); 984 c.features.opcode = nvme_admin_set_features; 985 c.features.prp1 = cpu_to_le64(dma_addr); 986 c.features.fid = cpu_to_le32(fid); 987 c.features.dword11 = cpu_to_le32(dword11); 988 989 return nvme_submit_admin_cmd(dev, &c, result); 990} 991 992/** 993 * nvme_cancel_ios - Cancel outstanding I/Os 994 * @queue: The queue to cancel I/Os on 995 * @timeout: True to only cancel I/Os which have timed out 996 */ 997static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout) 998{ 999 int depth = nvmeq->q_depth - 1; 1000 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 1001 unsigned long now = jiffies; 1002 int cmdid; 1003 1004 for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) { 1005 void *ctx; 1006 nvme_completion_fn fn; 1007 static struct nvme_completion cqe = { 1008 .status = cpu_to_le16(NVME_SC_ABORT_REQ << 1), 1009 }; 1010 1011 if (timeout && !time_after(now, info[cmdid].timeout)) 1012 continue; 1013 if (info[cmdid].ctx == CMD_CTX_CANCELLED) 1014 continue; 1015 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid); 1016 ctx = cancel_cmdid(nvmeq, cmdid, &fn); 1017 fn(nvmeq->dev, ctx, &cqe); 1018 } 1019} 1020 1021static void nvme_free_queue_mem(struct nvme_queue *nvmeq) 1022{ 1023 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), 1024 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 1025 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), 1026 nvmeq->sq_cmds, nvmeq->sq_dma_addr); 1027 kfree(nvmeq); 1028} 1029 1030static void nvme_free_queue(struct nvme_dev *dev, int qid) 1031{ 1032 struct nvme_queue *nvmeq = dev->queues[qid]; 1033 int vector = dev->entry[nvmeq->cq_vector].vector; 1034 1035 spin_lock_irq(&nvmeq->q_lock); 1036 nvme_cancel_ios(nvmeq, false); 1037 while (bio_list_peek(&nvmeq->sq_cong)) { 1038 struct bio *bio = bio_list_pop(&nvmeq->sq_cong); 1039 bio_endio(bio, -EIO); 1040 } 1041 spin_unlock_irq(&nvmeq->q_lock); 1042 1043 irq_set_affinity_hint(vector, NULL); 1044 free_irq(vector, nvmeq); 1045 1046 /* Don't tell the adapter to delete the admin queue */ 1047 if (qid) { 1048 adapter_delete_sq(dev, qid); 1049 adapter_delete_cq(dev, qid); 1050 } 1051 1052 nvme_free_queue_mem(nvmeq); 1053} 1054 1055static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, 1056 int depth, int vector) 1057{ 1058 struct device *dmadev = &dev->pci_dev->dev; 1059 unsigned extra = DIV_ROUND_UP(depth, 8) + (depth * 1060 sizeof(struct nvme_cmd_info)); 1061 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL); 1062 if (!nvmeq) 1063 return NULL; 1064 1065 nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth), 1066 &nvmeq->cq_dma_addr, GFP_KERNEL); 1067 if (!nvmeq->cqes) 1068 goto free_nvmeq; 1069 memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth)); 1070 1071 nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth), 1072 &nvmeq->sq_dma_addr, GFP_KERNEL); 1073 if (!nvmeq->sq_cmds) 1074 goto free_cqdma; 1075 1076 nvmeq->q_dmadev = dmadev; 1077 nvmeq->dev = dev; 1078 spin_lock_init(&nvmeq->q_lock); 1079 nvmeq->cq_head = 0; 1080 nvmeq->cq_phase = 1; 1081 init_waitqueue_head(&nvmeq->sq_full); 1082 init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread); 1083 bio_list_init(&nvmeq->sq_cong); 1084 nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)]; 1085 nvmeq->q_depth = depth; 1086 nvmeq->cq_vector = vector; 1087 1088 return nvmeq; 1089 1090 free_cqdma: 1091 dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes, 1092 nvmeq->cq_dma_addr); 1093 free_nvmeq: 1094 kfree(nvmeq); 1095 return NULL; 1096} 1097 1098static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq, 1099 const char *name) 1100{ 1101 if (use_threaded_interrupts) 1102 return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector, 1103 nvme_irq_check, nvme_irq, 1104 IRQF_DISABLED | IRQF_SHARED, 1105 name, nvmeq); 1106 return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq, 1107 IRQF_DISABLED | IRQF_SHARED, name, nvmeq); 1108} 1109 1110static struct nvme_queue *nvme_create_queue(struct nvme_dev *dev, int qid, 1111 int cq_size, int vector) 1112{ 1113 int result; 1114 struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector); 1115 1116 if (!nvmeq) 1117 return ERR_PTR(-ENOMEM); 1118 1119 result = adapter_alloc_cq(dev, qid, nvmeq); 1120 if (result < 0) 1121 goto free_nvmeq; 1122 1123 result = adapter_alloc_sq(dev, qid, nvmeq); 1124 if (result < 0) 1125 goto release_cq; 1126 1127 result = queue_request_irq(dev, nvmeq, "nvme"); 1128 if (result < 0) 1129 goto release_sq; 1130 1131 return nvmeq; 1132 1133 release_sq: 1134 adapter_delete_sq(dev, qid); 1135 release_cq: 1136 adapter_delete_cq(dev, qid); 1137 free_nvmeq: 1138 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), 1139 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 1140 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), 1141 nvmeq->sq_cmds, nvmeq->sq_dma_addr); 1142 kfree(nvmeq); 1143 return ERR_PTR(result); 1144} 1145 1146static int nvme_wait_ready(struct nvme_dev *dev, u64 cap, bool enabled) 1147{ 1148 unsigned long timeout; 1149 u32 bit = enabled ? NVME_CSTS_RDY : 0; 1150 1151 timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; 1152 1153 while ((readl(&dev->bar->csts) & NVME_CSTS_RDY) != bit) { 1154 msleep(100); 1155 if (fatal_signal_pending(current)) 1156 return -EINTR; 1157 if (time_after(jiffies, timeout)) { 1158 dev_err(&dev->pci_dev->dev, 1159 "Device not ready; aborting initialisation\n"); 1160 return -ENODEV; 1161 } 1162 } 1163 1164 return 0; 1165} 1166 1167/* 1168 * If the device has been passed off to us in an enabled state, just clear 1169 * the enabled bit. The spec says we should set the 'shutdown notification 1170 * bits', but doing so may cause the device to complete commands to the 1171 * admin queue ... and we don't know what memory that might be pointing at! 1172 */ 1173static int nvme_disable_ctrl(struct nvme_dev *dev, u64 cap) 1174{ 1175 u32 cc = readl(&dev->bar->cc); 1176 1177 if (cc & NVME_CC_ENABLE) 1178 writel(cc & ~NVME_CC_ENABLE, &dev->bar->cc); 1179 return nvme_wait_ready(dev, cap, false); 1180} 1181 1182static int nvme_enable_ctrl(struct nvme_dev *dev, u64 cap) 1183{ 1184 return nvme_wait_ready(dev, cap, true); 1185} 1186 1187static int nvme_configure_admin_queue(struct nvme_dev *dev) 1188{ 1189 int result; 1190 u32 aqa; 1191 u64 cap = readq(&dev->bar->cap); 1192 struct nvme_queue *nvmeq; 1193 1194 result = nvme_disable_ctrl(dev, cap); 1195 if (result < 0) 1196 return result; 1197 1198 nvmeq = nvme_alloc_queue(dev, 0, 64, 0); 1199 if (!nvmeq) 1200 return -ENOMEM; 1201 1202 aqa = nvmeq->q_depth - 1; 1203 aqa |= aqa << 16; 1204 1205 dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM; 1206 dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT; 1207 dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE; 1208 dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; 1209 1210 writel(aqa, &dev->bar->aqa); 1211 writeq(nvmeq->sq_dma_addr, &dev->bar->asq); 1212 writeq(nvmeq->cq_dma_addr, &dev->bar->acq); 1213 writel(dev->ctrl_config, &dev->bar->cc); 1214 1215 result = nvme_enable_ctrl(dev, cap); 1216 if (result) 1217 goto free_q; 1218 1219 result = queue_request_irq(dev, nvmeq, "nvme admin"); 1220 if (result) 1221 goto free_q; 1222 1223 dev->queues[0] = nvmeq; 1224 return result; 1225 1226 free_q: 1227 nvme_free_queue_mem(nvmeq); 1228 return result; 1229} 1230 1231struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, 1232 unsigned long addr, unsigned length) 1233{ 1234 int i, err, count, nents, offset; 1235 struct scatterlist *sg; 1236 struct page **pages; 1237 struct nvme_iod *iod; 1238 1239 if (addr & 3) 1240 return ERR_PTR(-EINVAL); 1241 if (!length || length > INT_MAX - PAGE_SIZE) 1242 return ERR_PTR(-EINVAL); 1243 1244 offset = offset_in_page(addr); 1245 count = DIV_ROUND_UP(offset + length, PAGE_SIZE); 1246 pages = kcalloc(count, sizeof(*pages), GFP_KERNEL); 1247 if (!pages) 1248 return ERR_PTR(-ENOMEM); 1249 1250 err = get_user_pages_fast(addr, count, 1, pages); 1251 if (err < count) { 1252 count = err; 1253 err = -EFAULT; 1254 goto put_pages; 1255 } 1256 1257 iod = nvme_alloc_iod(count, length, GFP_KERNEL); 1258 sg = iod->sg; 1259 sg_init_table(sg, count); 1260 for (i = 0; i < count; i++) { 1261 sg_set_page(&sg[i], pages[i], 1262 min_t(unsigned, length, PAGE_SIZE - offset), 1263 offset); 1264 length -= (PAGE_SIZE - offset); 1265 offset = 0; 1266 } 1267 sg_mark_end(&sg[i - 1]); 1268 iod->nents = count; 1269 1270 err = -ENOMEM; 1271 nents = dma_map_sg(&dev->pci_dev->dev, sg, count, 1272 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 1273 if (!nents) 1274 goto free_iod; 1275 1276 kfree(pages); 1277 return iod; 1278 1279 free_iod: 1280 kfree(iod); 1281 put_pages: 1282 for (i = 0; i < count; i++) 1283 put_page(pages[i]); 1284 kfree(pages); 1285 return ERR_PTR(err); 1286} 1287 1288void nvme_unmap_user_pages(struct nvme_dev *dev, int write, 1289 struct nvme_iod *iod) 1290{ 1291 int i; 1292 1293 dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, 1294 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 1295 1296 for (i = 0; i < iod->nents; i++) 1297 put_page(sg_page(&iod->sg[i])); 1298} 1299 1300static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) 1301{ 1302 struct nvme_dev *dev = ns->dev; 1303 struct nvme_queue *nvmeq; 1304 struct nvme_user_io io; 1305 struct nvme_command c; 1306 unsigned length, meta_len; 1307 int status, i; 1308 struct nvme_iod *iod, *meta_iod = NULL; 1309 dma_addr_t meta_dma_addr; 1310 void *meta, *uninitialized_var(meta_mem); 1311 1312 if (copy_from_user(&io, uio, sizeof(io))) 1313 return -EFAULT; 1314 length = (io.nblocks + 1) << ns->lba_shift; 1315 meta_len = (io.nblocks + 1) * ns->ms; 1316 1317 if (meta_len && ((io.metadata & 3) || !io.metadata)) 1318 return -EINVAL; 1319 1320 switch (io.opcode) { 1321 case nvme_cmd_write: 1322 case nvme_cmd_read: 1323 case nvme_cmd_compare: 1324 iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length); 1325 break; 1326 default: 1327 return -EINVAL; 1328 } 1329 1330 if (IS_ERR(iod)) 1331 return PTR_ERR(iod); 1332 1333 memset(&c, 0, sizeof(c)); 1334 c.rw.opcode = io.opcode; 1335 c.rw.flags = io.flags; 1336 c.rw.nsid = cpu_to_le32(ns->ns_id); 1337 c.rw.slba = cpu_to_le64(io.slba); 1338 c.rw.length = cpu_to_le16(io.nblocks); 1339 c.rw.control = cpu_to_le16(io.control); 1340 c.rw.dsmgmt = cpu_to_le32(io.dsmgmt); 1341 c.rw.reftag = cpu_to_le32(io.reftag); 1342 c.rw.apptag = cpu_to_le16(io.apptag); 1343 c.rw.appmask = cpu_to_le16(io.appmask); 1344 1345 if (meta_len) { 1346 meta_iod = nvme_map_user_pages(dev, io.opcode & 1, io.metadata, 1347 meta_len); 1348 if (IS_ERR(meta_iod)) { 1349 status = PTR_ERR(meta_iod); 1350 meta_iod = NULL; 1351 goto unmap; 1352 } 1353 1354 meta_mem = dma_alloc_coherent(&dev->pci_dev->dev, meta_len, 1355 &meta_dma_addr, GFP_KERNEL); 1356 if (!meta_mem) { 1357 status = -ENOMEM; 1358 goto unmap; 1359 } 1360 1361 if (io.opcode & 1) { 1362 int meta_offset = 0; 1363 1364 for (i = 0; i < meta_iod->nents; i++) { 1365 meta = kmap_atomic(sg_page(&meta_iod->sg[i])) + 1366 meta_iod->sg[i].offset; 1367 memcpy(meta_mem + meta_offset, meta, 1368 meta_iod->sg[i].length); 1369 kunmap_atomic(meta); 1370 meta_offset += meta_iod->sg[i].length; 1371 } 1372 } 1373 1374 c.rw.metadata = cpu_to_le64(meta_dma_addr); 1375 } 1376 1377 length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL); 1378 1379 nvmeq = get_nvmeq(dev); 1380 /* 1381 * Since nvme_submit_sync_cmd sleeps, we can't keep preemption 1382 * disabled. We may be preempted at any point, and be rescheduled 1383 * to a different CPU. That will cause cacheline bouncing, but no 1384 * additional races since q_lock already protects against other CPUs. 1385 */ 1386 put_nvmeq(nvmeq); 1387 if (length != (io.nblocks + 1) << ns->lba_shift) 1388 status = -ENOMEM; 1389 else 1390 status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT); 1391 1392 if (meta_len) { 1393 if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) { 1394 int meta_offset = 0; 1395 1396 for (i = 0; i < meta_iod->nents; i++) { 1397 meta = kmap_atomic(sg_page(&meta_iod->sg[i])) + 1398 meta_iod->sg[i].offset; 1399 memcpy(meta, meta_mem + meta_offset, 1400 meta_iod->sg[i].length); 1401 kunmap_atomic(meta); 1402 meta_offset += meta_iod->sg[i].length; 1403 } 1404 } 1405 1406 dma_free_coherent(&dev->pci_dev->dev, meta_len, meta_mem, 1407 meta_dma_addr); 1408 } 1409 1410 unmap: 1411 nvme_unmap_user_pages(dev, io.opcode & 1, iod); 1412 nvme_free_iod(dev, iod); 1413 1414 if (meta_iod) { 1415 nvme_unmap_user_pages(dev, io.opcode & 1, meta_iod); 1416 nvme_free_iod(dev, meta_iod); 1417 } 1418 1419 return status; 1420} 1421 1422static int nvme_user_admin_cmd(struct nvme_dev *dev, 1423 struct nvme_admin_cmd __user *ucmd) 1424{ 1425 struct nvme_admin_cmd cmd; 1426 struct nvme_command c; 1427 int status, length; 1428 struct nvme_iod *uninitialized_var(iod); 1429 unsigned timeout; 1430 1431 if (!capable(CAP_SYS_ADMIN)) 1432 return -EACCES; 1433 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) 1434 return -EFAULT; 1435 1436 memset(&c, 0, sizeof(c)); 1437 c.common.opcode = cmd.opcode; 1438 c.common.flags = cmd.flags; 1439 c.common.nsid = cpu_to_le32(cmd.nsid); 1440 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); 1441 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); 1442 c.common.cdw10[0] = cpu_to_le32(cmd.cdw10); 1443 c.common.cdw10[1] = cpu_to_le32(cmd.cdw11); 1444 c.common.cdw10[2] = cpu_to_le32(cmd.cdw12); 1445 c.common.cdw10[3] = cpu_to_le32(cmd.cdw13); 1446 c.common.cdw10[4] = cpu_to_le32(cmd.cdw14); 1447 c.common.cdw10[5] = cpu_to_le32(cmd.cdw15); 1448 1449 length = cmd.data_len; 1450 if (cmd.data_len) { 1451 iod = nvme_map_user_pages(dev, cmd.opcode & 1, cmd.addr, 1452 length); 1453 if (IS_ERR(iod)) 1454 return PTR_ERR(iod); 1455 length = nvme_setup_prps(dev, &c.common, iod, length, 1456 GFP_KERNEL); 1457 } 1458 1459 timeout = cmd.timeout_ms ? msecs_to_jiffies(cmd.timeout_ms) : 1460 ADMIN_TIMEOUT; 1461 if (length != cmd.data_len) 1462 status = -ENOMEM; 1463 else 1464 status = nvme_submit_sync_cmd(dev->queues[0], &c, &cmd.result, 1465 timeout); 1466 1467 if (cmd.data_len) { 1468 nvme_unmap_user_pages(dev, cmd.opcode & 1, iod); 1469 nvme_free_iod(dev, iod); 1470 } 1471 1472 if ((status >= 0) && copy_to_user(&ucmd->result, &cmd.result, 1473 sizeof(cmd.result))) 1474 status = -EFAULT; 1475 1476 return status; 1477} 1478 1479static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, 1480 unsigned long arg) 1481{ 1482 struct nvme_ns *ns = bdev->bd_disk->private_data; 1483 1484 switch (cmd) { 1485 case NVME_IOCTL_ID: 1486 force_successful_syscall_return(); 1487 return ns->ns_id; 1488 case NVME_IOCTL_ADMIN_CMD: 1489 return nvme_user_admin_cmd(ns->dev, (void __user *)arg); 1490 case NVME_IOCTL_SUBMIT_IO: 1491 return nvme_submit_io(ns, (void __user *)arg); 1492 case SG_GET_VERSION_NUM: 1493 return nvme_sg_get_version_num((void __user *)arg); 1494 case SG_IO: 1495 return nvme_sg_io(ns, (void __user *)arg); 1496 default: 1497 return -ENOTTY; 1498 } 1499} 1500 1501static const struct block_device_operations nvme_fops = { 1502 .owner = THIS_MODULE, 1503 .ioctl = nvme_ioctl, 1504 .compat_ioctl = nvme_ioctl, 1505}; 1506 1507static void nvme_resubmit_bios(struct nvme_queue *nvmeq) 1508{ 1509 while (bio_list_peek(&nvmeq->sq_cong)) { 1510 struct bio *bio = bio_list_pop(&nvmeq->sq_cong); 1511 struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data; 1512 1513 if (bio_list_empty(&nvmeq->sq_cong)) 1514 remove_wait_queue(&nvmeq->sq_full, 1515 &nvmeq->sq_cong_wait); 1516 if (nvme_submit_bio_queue(nvmeq, ns, bio)) { 1517 if (bio_list_empty(&nvmeq->sq_cong)) 1518 add_wait_queue(&nvmeq->sq_full, 1519 &nvmeq->sq_cong_wait); 1520 bio_list_add_head(&nvmeq->sq_cong, bio); 1521 break; 1522 } 1523 } 1524} 1525 1526static int nvme_kthread(void *data) 1527{ 1528 struct nvme_dev *dev; 1529 1530 while (!kthread_should_stop()) { 1531 set_current_state(TASK_INTERRUPTIBLE); 1532 spin_lock(&dev_list_lock); 1533 list_for_each_entry(dev, &dev_list, node) { 1534 int i; 1535 for (i = 0; i < dev->queue_count; i++) { 1536 struct nvme_queue *nvmeq = dev->queues[i]; 1537 if (!nvmeq) 1538 continue; 1539 spin_lock_irq(&nvmeq->q_lock); 1540 nvme_process_cq(nvmeq); 1541 nvme_cancel_ios(nvmeq, true); 1542 nvme_resubmit_bios(nvmeq); 1543 spin_unlock_irq(&nvmeq->q_lock); 1544 } 1545 } 1546 spin_unlock(&dev_list_lock); 1547 schedule_timeout(round_jiffies_relative(HZ)); 1548 } 1549 return 0; 1550} 1551 1552static DEFINE_IDA(nvme_index_ida); 1553 1554static int nvme_get_ns_idx(void) 1555{ 1556 int index, error; 1557 1558 do { 1559 if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL)) 1560 return -1; 1561 1562 spin_lock(&dev_list_lock); 1563 error = ida_get_new(&nvme_index_ida, &index); 1564 spin_unlock(&dev_list_lock); 1565 } while (error == -EAGAIN); 1566 1567 if (error) 1568 index = -1; 1569 return index; 1570} 1571 1572static void nvme_put_ns_idx(int index) 1573{ 1574 spin_lock(&dev_list_lock); 1575 ida_remove(&nvme_index_ida, index); 1576 spin_unlock(&dev_list_lock); 1577} 1578 1579static void nvme_config_discard(struct nvme_ns *ns) 1580{ 1581 u32 logical_block_size = queue_logical_block_size(ns->queue); 1582 ns->queue->limits.discard_zeroes_data = 0; 1583 ns->queue->limits.discard_alignment = logical_block_size; 1584 ns->queue->limits.discard_granularity = logical_block_size; 1585 ns->queue->limits.max_discard_sectors = 0xffffffff; 1586 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); 1587} 1588 1589static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid, 1590 struct nvme_id_ns *id, struct nvme_lba_range_type *rt) 1591{ 1592 struct nvme_ns *ns; 1593 struct gendisk *disk; 1594 int lbaf; 1595 1596 if (rt->attributes & NVME_LBART_ATTRIB_HIDE) 1597 return NULL; 1598 1599 ns = kzalloc(sizeof(*ns), GFP_KERNEL); 1600 if (!ns) 1601 return NULL; 1602 ns->queue = blk_alloc_queue(GFP_KERNEL); 1603 if (!ns->queue) 1604 goto out_free_ns; 1605 ns->queue->queue_flags = QUEUE_FLAG_DEFAULT; 1606 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue); 1607 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); 1608 blk_queue_make_request(ns->queue, nvme_make_request); 1609 ns->dev = dev; 1610 ns->queue->queuedata = ns; 1611 1612 disk = alloc_disk(NVME_MINORS); 1613 if (!disk) 1614 goto out_free_queue; 1615 ns->ns_id = nsid; 1616 ns->disk = disk; 1617 lbaf = id->flbas & 0xf; 1618 ns->lba_shift = id->lbaf[lbaf].ds; 1619 ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); 1620 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); 1621 if (dev->max_hw_sectors) 1622 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); 1623 1624 disk->major = nvme_major; 1625 disk->minors = NVME_MINORS; 1626 disk->first_minor = NVME_MINORS * nvme_get_ns_idx(); 1627 disk->fops = &nvme_fops; 1628 disk->private_data = ns; 1629 disk->queue = ns->queue; 1630 disk->driverfs_dev = &dev->pci_dev->dev; 1631 sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid); 1632 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); 1633 1634 if (dev->oncs & NVME_CTRL_ONCS_DSM) 1635 nvme_config_discard(ns); 1636 1637 return ns; 1638 1639 out_free_queue: 1640 blk_cleanup_queue(ns->queue); 1641 out_free_ns: 1642 kfree(ns); 1643 return NULL; 1644} 1645 1646static void nvme_ns_free(struct nvme_ns *ns) 1647{ 1648 int index = ns->disk->first_minor / NVME_MINORS; 1649 put_disk(ns->disk); 1650 nvme_put_ns_idx(index); 1651 blk_cleanup_queue(ns->queue); 1652 kfree(ns); 1653} 1654 1655static int set_queue_count(struct nvme_dev *dev, int count) 1656{ 1657 int status; 1658 u32 result; 1659 u32 q_count = (count - 1) | ((count - 1) << 16); 1660 1661 status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0, 1662 &result); 1663 if (status) 1664 return status < 0 ? -EIO : -EBUSY; 1665 return min(result & 0xffff, result >> 16) + 1; 1666} 1667 1668static int nvme_setup_io_queues(struct nvme_dev *dev) 1669{ 1670 struct pci_dev *pdev = dev->pci_dev; 1671 int result, cpu, i, vecs, nr_io_queues, db_bar_size, q_depth; 1672 1673 nr_io_queues = num_online_cpus(); 1674 result = set_queue_count(dev, nr_io_queues); 1675 if (result < 0) 1676 return result; 1677 if (result < nr_io_queues) 1678 nr_io_queues = result; 1679 1680 /* Deregister the admin queue's interrupt */ 1681 free_irq(dev->entry[0].vector, dev->queues[0]); 1682 1683 db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3)); 1684 if (db_bar_size > 8192) { 1685 iounmap(dev->bar); 1686 dev->bar = ioremap(pci_resource_start(pdev, 0), db_bar_size); 1687 dev->dbs = ((void __iomem *)dev->bar) + 4096; 1688 dev->queues[0]->q_db = dev->dbs; 1689 } 1690 1691 vecs = nr_io_queues; 1692 for (i = 0; i < vecs; i++) 1693 dev->entry[i].entry = i; 1694 for (;;) { 1695 result = pci_enable_msix(pdev, dev->entry, vecs); 1696 if (result <= 0) 1697 break; 1698 vecs = result; 1699 } 1700 1701 if (result < 0) { 1702 vecs = nr_io_queues; 1703 if (vecs > 32) 1704 vecs = 32; 1705 for (;;) { 1706 result = pci_enable_msi_block(pdev, vecs); 1707 if (result == 0) { 1708 for (i = 0; i < vecs; i++) 1709 dev->entry[i].vector = i + pdev->irq; 1710 break; 1711 } else if (result < 0) { 1712 vecs = 1; 1713 break; 1714 } 1715 vecs = result; 1716 } 1717 } 1718 1719 /* 1720 * Should investigate if there's a performance win from allocating 1721 * more queues than interrupt vectors; it might allow the submission 1722 * path to scale better, even if the receive path is limited by the 1723 * number of interrupts. 1724 */ 1725 nr_io_queues = vecs; 1726 1727 result = queue_request_irq(dev, dev->queues[0], "nvme admin"); 1728 /* XXX: handle failure here */ 1729 1730 cpu = cpumask_first(cpu_online_mask); 1731 for (i = 0; i < nr_io_queues; i++) { 1732 irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu)); 1733 cpu = cpumask_next(cpu, cpu_online_mask); 1734 } 1735 1736 q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1, 1737 NVME_Q_DEPTH); 1738 for (i = 0; i < nr_io_queues; i++) { 1739 dev->queues[i + 1] = nvme_create_queue(dev, i + 1, q_depth, i); 1740 if (IS_ERR(dev->queues[i + 1])) 1741 return PTR_ERR(dev->queues[i + 1]); 1742 dev->queue_count++; 1743 } 1744 1745 for (; i < num_possible_cpus(); i++) { 1746 int target = i % rounddown_pow_of_two(dev->queue_count - 1); 1747 dev->queues[i + 1] = dev->queues[target + 1]; 1748 } 1749 1750 return 0; 1751} 1752 1753static void nvme_free_queues(struct nvme_dev *dev) 1754{ 1755 int i; 1756 1757 for (i = dev->queue_count - 1; i >= 0; i--) 1758 nvme_free_queue(dev, i); 1759} 1760 1761/* 1762 * Return: error value if an error occurred setting up the queues or calling 1763 * Identify Device. 0 if these succeeded, even if adding some of the 1764 * namespaces failed. At the moment, these failures are silent. TBD which 1765 * failures should be reported. 1766 */ 1767static int nvme_dev_add(struct nvme_dev *dev) 1768{ 1769 int res; 1770 unsigned nn, i; 1771 struct nvme_ns *ns; 1772 struct nvme_id_ctrl *ctrl; 1773 struct nvme_id_ns *id_ns; 1774 void *mem; 1775 dma_addr_t dma_addr; 1776 int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12; 1777 1778 res = nvme_setup_io_queues(dev); 1779 if (res) 1780 return res; 1781 1782 mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr, 1783 GFP_KERNEL); 1784 if (!mem) 1785 return -ENOMEM; 1786 1787 res = nvme_identify(dev, 0, 1, dma_addr); 1788 if (res) { 1789 res = -EIO; 1790 goto out; 1791 } 1792 1793 ctrl = mem; 1794 nn = le32_to_cpup(&ctrl->nn); 1795 dev->oncs = le16_to_cpup(&ctrl->oncs); 1796 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); 1797 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); 1798 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); 1799 if (ctrl->mdts) 1800 dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9); 1801 if ((dev->pci_dev->vendor == PCI_VENDOR_ID_INTEL) && 1802 (dev->pci_dev->device == 0x0953) && ctrl->vs[3]) 1803 dev->stripe_size = 1 << (ctrl->vs[3] + shift); 1804 1805 id_ns = mem; 1806 for (i = 1; i <= nn; i++) { 1807 res = nvme_identify(dev, i, 0, dma_addr); 1808 if (res) 1809 continue; 1810 1811 if (id_ns->ncap == 0) 1812 continue; 1813 1814 res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i, 1815 dma_addr + 4096, NULL); 1816 if (res) 1817 memset(mem + 4096, 0, 4096); 1818 1819 ns = nvme_alloc_ns(dev, i, mem, mem + 4096); 1820 if (ns) 1821 list_add_tail(&ns->list, &dev->namespaces); 1822 } 1823 list_for_each_entry(ns, &dev->namespaces, list) 1824 add_disk(ns->disk); 1825 res = 0; 1826 1827 out: 1828 dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr); 1829 return res; 1830} 1831 1832static int nvme_dev_map(struct nvme_dev *dev) 1833{ 1834 int bars, result = -ENOMEM; 1835 struct pci_dev *pdev = dev->pci_dev; 1836 1837 if (pci_enable_device_mem(pdev)) 1838 return result; 1839 1840 dev->entry[0].vector = pdev->irq; 1841 pci_set_master(pdev); 1842 bars = pci_select_bars(pdev, IORESOURCE_MEM); 1843 if (pci_request_selected_regions(pdev, bars, "nvme")) 1844 goto disable_pci; 1845 1846 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) 1847 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 1848 else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) 1849 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 1850 else 1851 goto disable_pci; 1852 1853 pci_set_drvdata(pdev, dev); 1854 dev->bar = ioremap(pci_resource_start(pdev, 0), 8192); 1855 if (!dev->bar) 1856 goto disable; 1857 1858 dev->db_stride = NVME_CAP_STRIDE(readq(&dev->bar->cap)); 1859 dev->dbs = ((void __iomem *)dev->bar) + 4096; 1860 1861 return 0; 1862 1863 disable: 1864 pci_release_regions(pdev); 1865 disable_pci: 1866 pci_disable_device(pdev); 1867 return result; 1868} 1869 1870static void nvme_dev_unmap(struct nvme_dev *dev) 1871{ 1872 if (dev->pci_dev->msi_enabled) 1873 pci_disable_msi(dev->pci_dev); 1874 else if (dev->pci_dev->msix_enabled) 1875 pci_disable_msix(dev->pci_dev); 1876 1877 if (dev->bar) { 1878 iounmap(dev->bar); 1879 dev->bar = NULL; 1880 } 1881 1882 pci_release_regions(dev->pci_dev); 1883 if (pci_is_enabled(dev->pci_dev)) 1884 pci_disable_device(dev->pci_dev); 1885} 1886 1887static int nvme_dev_remove(struct nvme_dev *dev) 1888{ 1889 struct nvme_ns *ns, *next; 1890 1891 spin_lock(&dev_list_lock); 1892 list_del(&dev->node); 1893 spin_unlock(&dev_list_lock); 1894 1895 list_for_each_entry_safe(ns, next, &dev->namespaces, list) { 1896 list_del(&ns->list); 1897 del_gendisk(ns->disk); 1898 nvme_ns_free(ns); 1899 } 1900 1901 nvme_free_queues(dev); 1902 1903 return 0; 1904} 1905 1906static int nvme_setup_prp_pools(struct nvme_dev *dev) 1907{ 1908 struct device *dmadev = &dev->pci_dev->dev; 1909 dev->prp_page_pool = dma_pool_create("prp list page", dmadev, 1910 PAGE_SIZE, PAGE_SIZE, 0); 1911 if (!dev->prp_page_pool) 1912 return -ENOMEM; 1913 1914 /* Optimisation for I/Os between 4k and 128k */ 1915 dev->prp_small_pool = dma_pool_create("prp list 256", dmadev, 1916 256, 256, 0); 1917 if (!dev->prp_small_pool) { 1918 dma_pool_destroy(dev->prp_page_pool); 1919 return -ENOMEM; 1920 } 1921 return 0; 1922} 1923 1924static void nvme_release_prp_pools(struct nvme_dev *dev) 1925{ 1926 dma_pool_destroy(dev->prp_page_pool); 1927 dma_pool_destroy(dev->prp_small_pool); 1928} 1929 1930static DEFINE_IDA(nvme_instance_ida); 1931 1932static int nvme_set_instance(struct nvme_dev *dev) 1933{ 1934 int instance, error; 1935 1936 do { 1937 if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL)) 1938 return -ENODEV; 1939 1940 spin_lock(&dev_list_lock); 1941 error = ida_get_new(&nvme_instance_ida, &instance); 1942 spin_unlock(&dev_list_lock); 1943 } while (error == -EAGAIN); 1944 1945 if (error) 1946 return -ENODEV; 1947 1948 dev->instance = instance; 1949 return 0; 1950} 1951 1952static void nvme_release_instance(struct nvme_dev *dev) 1953{ 1954 spin_lock(&dev_list_lock); 1955 ida_remove(&nvme_instance_ida, dev->instance); 1956 spin_unlock(&dev_list_lock); 1957} 1958 1959static void nvme_free_dev(struct kref *kref) 1960{ 1961 struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref); 1962 nvme_dev_remove(dev); 1963 nvme_dev_unmap(dev); 1964 nvme_release_instance(dev); 1965 nvme_release_prp_pools(dev); 1966 kfree(dev->queues); 1967 kfree(dev->entry); 1968 kfree(dev); 1969} 1970 1971static int nvme_dev_open(struct inode *inode, struct file *f) 1972{ 1973 struct nvme_dev *dev = container_of(f->private_data, struct nvme_dev, 1974 miscdev); 1975 kref_get(&dev->kref); 1976 f->private_data = dev; 1977 return 0; 1978} 1979 1980static int nvme_dev_release(struct inode *inode, struct file *f) 1981{ 1982 struct nvme_dev *dev = f->private_data; 1983 kref_put(&dev->kref, nvme_free_dev); 1984 return 0; 1985} 1986 1987static long nvme_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg) 1988{ 1989 struct nvme_dev *dev = f->private_data; 1990 switch (cmd) { 1991 case NVME_IOCTL_ADMIN_CMD: 1992 return nvme_user_admin_cmd(dev, (void __user *)arg); 1993 default: 1994 return -ENOTTY; 1995 } 1996} 1997 1998static const struct file_operations nvme_dev_fops = { 1999 .owner = THIS_MODULE, 2000 .open = nvme_dev_open, 2001 .release = nvme_dev_release, 2002 .unlocked_ioctl = nvme_dev_ioctl, 2003 .compat_ioctl = nvme_dev_ioctl, 2004}; 2005 2006static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2007{ 2008 int result = -ENOMEM; 2009 struct nvme_dev *dev; 2010 2011 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 2012 if (!dev) 2013 return -ENOMEM; 2014 dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry), 2015 GFP_KERNEL); 2016 if (!dev->entry) 2017 goto free; 2018 dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *), 2019 GFP_KERNEL); 2020 if (!dev->queues) 2021 goto free; 2022 2023 INIT_LIST_HEAD(&dev->namespaces); 2024 dev->pci_dev = pdev; 2025 result = nvme_set_instance(dev); 2026 if (result) 2027 goto free; 2028 2029 result = nvme_setup_prp_pools(dev); 2030 if (result) 2031 goto release; 2032 2033 result = nvme_dev_map(dev); 2034 if (result) 2035 goto release_pools; 2036 2037 result = nvme_configure_admin_queue(dev); 2038 if (result) 2039 goto unmap; 2040 dev->queue_count++; 2041 2042 spin_lock(&dev_list_lock); 2043 list_add(&dev->node, &dev_list); 2044 spin_unlock(&dev_list_lock); 2045 2046 result = nvme_dev_add(dev); 2047 if (result && result != -EBUSY) 2048 goto delete; 2049 2050 scnprintf(dev->name, sizeof(dev->name), "nvme%d", dev->instance); 2051 dev->miscdev.minor = MISC_DYNAMIC_MINOR; 2052 dev->miscdev.parent = &pdev->dev; 2053 dev->miscdev.name = dev->name; 2054 dev->miscdev.fops = &nvme_dev_fops; 2055 result = misc_register(&dev->miscdev); 2056 if (result) 2057 goto remove; 2058 2059 kref_init(&dev->kref); 2060 return 0; 2061 2062 remove: 2063 nvme_dev_remove(dev); 2064 delete: 2065 spin_lock(&dev_list_lock); 2066 list_del(&dev->node); 2067 spin_unlock(&dev_list_lock); 2068 2069 nvme_free_queues(dev); 2070 unmap: 2071 nvme_dev_unmap(dev); 2072 release_pools: 2073 nvme_release_prp_pools(dev); 2074 release: 2075 nvme_release_instance(dev); 2076 free: 2077 kfree(dev->queues); 2078 kfree(dev->entry); 2079 kfree(dev); 2080 return result; 2081} 2082 2083static void nvme_remove(struct pci_dev *pdev) 2084{ 2085 struct nvme_dev *dev = pci_get_drvdata(pdev); 2086 misc_deregister(&dev->miscdev); 2087 kref_put(&dev->kref, nvme_free_dev); 2088} 2089 2090/* These functions are yet to be implemented */ 2091#define nvme_error_detected NULL 2092#define nvme_dump_registers NULL 2093#define nvme_link_reset NULL 2094#define nvme_slot_reset NULL 2095#define nvme_error_resume NULL 2096#define nvme_suspend NULL 2097#define nvme_resume NULL 2098 2099static const struct pci_error_handlers nvme_err_handler = { 2100 .error_detected = nvme_error_detected, 2101 .mmio_enabled = nvme_dump_registers, 2102 .link_reset = nvme_link_reset, 2103 .slot_reset = nvme_slot_reset, 2104 .resume = nvme_error_resume, 2105}; 2106 2107/* Move to pci_ids.h later */ 2108#define PCI_CLASS_STORAGE_EXPRESS 0x010802 2109 2110static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = { 2111 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 2112 { 0, } 2113}; 2114MODULE_DEVICE_TABLE(pci, nvme_id_table); 2115 2116static struct pci_driver nvme_driver = { 2117 .name = "nvme", 2118 .id_table = nvme_id_table, 2119 .probe = nvme_probe, 2120 .remove = nvme_remove, 2121 .suspend = nvme_suspend, 2122 .resume = nvme_resume, 2123 .err_handler = &nvme_err_handler, 2124}; 2125 2126static int __init nvme_init(void) 2127{ 2128 int result; 2129 2130 nvme_thread = kthread_run(nvme_kthread, NULL, "nvme"); 2131 if (IS_ERR(nvme_thread)) 2132 return PTR_ERR(nvme_thread); 2133 2134 result = register_blkdev(nvme_major, "nvme"); 2135 if (result < 0) 2136 goto kill_kthread; 2137 else if (result > 0) 2138 nvme_major = result; 2139 2140 result = pci_register_driver(&nvme_driver); 2141 if (result) 2142 goto unregister_blkdev; 2143 return 0; 2144 2145 unregister_blkdev: 2146 unregister_blkdev(nvme_major, "nvme"); 2147 kill_kthread: 2148 kthread_stop(nvme_thread); 2149 return result; 2150} 2151 2152static void __exit nvme_exit(void) 2153{ 2154 pci_unregister_driver(&nvme_driver); 2155 unregister_blkdev(nvme_major, "nvme"); 2156 kthread_stop(nvme_thread); 2157} 2158 2159MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); 2160MODULE_LICENSE("GPL"); 2161MODULE_VERSION("0.8"); 2162module_init(nvme_init); 2163module_exit(nvme_exit); 2164