nvme-core.c revision b348b7d54368c87811907a8e88f0d96713c43009
1/* 2 * NVM Express device driver 3 * Copyright (c) 2011, Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 */ 18 19#include <linux/nvme.h> 20#include <linux/bio.h> 21#include <linux/blkdev.h> 22#include <linux/errno.h> 23#include <linux/fs.h> 24#include <linux/genhd.h> 25#include <linux/init.h> 26#include <linux/interrupt.h> 27#include <linux/io.h> 28#include <linux/kdev_t.h> 29#include <linux/kthread.h> 30#include <linux/kernel.h> 31#include <linux/mm.h> 32#include <linux/module.h> 33#include <linux/moduleparam.h> 34#include <linux/pci.h> 35#include <linux/poison.h> 36#include <linux/sched.h> 37#include <linux/slab.h> 38#include <linux/types.h> 39#include <linux/version.h> 40 41#define NVME_Q_DEPTH 1024 42#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) 43#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) 44#define NVME_MINORS 64 45#define IO_TIMEOUT (5 * HZ) 46#define ADMIN_TIMEOUT (60 * HZ) 47 48static int nvme_major; 49module_param(nvme_major, int, 0); 50 51static int use_threaded_interrupts; 52module_param(use_threaded_interrupts, int, 0); 53 54static DEFINE_SPINLOCK(dev_list_lock); 55static LIST_HEAD(dev_list); 56static struct task_struct *nvme_thread; 57 58/* 59 * Represents an NVM Express device. Each nvme_dev is a PCI function. 60 */ 61struct nvme_dev { 62 struct list_head node; 63 struct nvme_queue **queues; 64 u32 __iomem *dbs; 65 struct pci_dev *pci_dev; 66 struct dma_pool *prp_page_pool; 67 struct dma_pool *prp_small_pool; 68 int instance; 69 int queue_count; 70 u32 ctrl_config; 71 struct msix_entry *entry; 72 struct nvme_bar __iomem *bar; 73 struct list_head namespaces; 74 char serial[20]; 75 char model[40]; 76 char firmware_rev[8]; 77}; 78 79/* 80 * An NVM Express namespace is equivalent to a SCSI LUN 81 */ 82struct nvme_ns { 83 struct list_head list; 84 85 struct nvme_dev *dev; 86 struct request_queue *queue; 87 struct gendisk *disk; 88 89 int ns_id; 90 int lba_shift; 91}; 92 93/* 94 * An NVM Express queue. Each device has at least two (one for admin 95 * commands and one for I/O commands). 96 */ 97struct nvme_queue { 98 struct device *q_dmadev; 99 struct nvme_dev *dev; 100 spinlock_t q_lock; 101 struct nvme_command *sq_cmds; 102 volatile struct nvme_completion *cqes; 103 dma_addr_t sq_dma_addr; 104 dma_addr_t cq_dma_addr; 105 wait_queue_head_t sq_full; 106 wait_queue_t sq_cong_wait; 107 struct bio_list sq_cong; 108 u32 __iomem *q_db; 109 u16 q_depth; 110 u16 cq_vector; 111 u16 sq_head; 112 u16 sq_tail; 113 u16 cq_head; 114 u16 cq_phase; 115 unsigned long cmdid_data[]; 116}; 117 118/* 119 * Check we didin't inadvertently grow the command struct 120 */ 121static inline void _nvme_check_size(void) 122{ 123 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); 124 BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64); 125 BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); 126 BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); 127 BUILD_BUG_ON(sizeof(struct nvme_features) != 64); 128 BUILD_BUG_ON(sizeof(struct nvme_command) != 64); 129 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096); 130 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096); 131 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); 132} 133 134struct nvme_cmd_info { 135 unsigned long ctx; 136 unsigned long timeout; 137}; 138 139static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq) 140{ 141 return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)]; 142} 143 144/** 145 * alloc_cmdid - Allocate a Command ID 146 * @param nvmeq The queue that will be used for this command 147 * @param ctx A pointer that will be passed to the handler 148 * @param handler The ID of the handler to call 149 * 150 * Allocate a Command ID for a queue. The data passed in will 151 * be passed to the completion handler. This is implemented by using 152 * the bottom two bits of the ctx pointer to store the handler ID. 153 * Passing in a pointer that's not 4-byte aligned will cause a BUG. 154 * We can change this if it becomes a problem. 155 */ 156static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx, int handler, 157 unsigned timeout) 158{ 159 int depth = nvmeq->q_depth; 160 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 161 int cmdid; 162 163 BUG_ON((unsigned long)ctx & 3); 164 165 do { 166 cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth); 167 if (cmdid >= depth) 168 return -EBUSY; 169 } while (test_and_set_bit(cmdid, nvmeq->cmdid_data)); 170 171 info[cmdid].ctx = (unsigned long)ctx | handler; 172 info[cmdid].timeout = jiffies + timeout; 173 return cmdid; 174} 175 176static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx, 177 int handler, unsigned timeout) 178{ 179 int cmdid; 180 wait_event_killable(nvmeq->sq_full, 181 (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0); 182 return (cmdid < 0) ? -EINTR : cmdid; 183} 184 185/* If you need more than four handlers, you'll need to change how 186 * alloc_cmdid and nvme_process_cq work. Consider using a special 187 * CMD_CTX value instead, if that works for your situation. 188 */ 189enum { 190 sync_completion_id = 0, 191 bio_completion_id, 192}; 193 194#define CMD_CTX_BASE (POISON_POINTER_DELTA + sync_completion_id) 195#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE) 196#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE) 197#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE) 198 199static unsigned long free_cmdid(struct nvme_queue *nvmeq, int cmdid) 200{ 201 unsigned long data; 202 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 203 204 if (cmdid >= nvmeq->q_depth) 205 return CMD_CTX_INVALID; 206 data = info[cmdid].ctx; 207 info[cmdid].ctx = CMD_CTX_COMPLETED; 208 clear_bit(cmdid, nvmeq->cmdid_data); 209 wake_up(&nvmeq->sq_full); 210 return data; 211} 212 213static void cancel_cmdid_data(struct nvme_queue *nvmeq, int cmdid) 214{ 215 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); 216 info[cmdid].ctx = CMD_CTX_CANCELLED; 217} 218 219static struct nvme_queue *get_nvmeq(struct nvme_ns *ns) 220{ 221 int qid, cpu = get_cpu(); 222 if (cpu < ns->dev->queue_count) 223 qid = cpu + 1; 224 else 225 qid = (cpu % rounddown_pow_of_two(ns->dev->queue_count)) + 1; 226 return ns->dev->queues[qid]; 227} 228 229static void put_nvmeq(struct nvme_queue *nvmeq) 230{ 231 put_cpu(); 232} 233 234/** 235 * nvme_submit_cmd: Copy a command into a queue and ring the doorbell 236 * @nvmeq: The queue to use 237 * @cmd: The command to send 238 * 239 * Safe to use from interrupt context 240 */ 241static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd) 242{ 243 unsigned long flags; 244 u16 tail; 245 /* XXX: Need to check tail isn't going to overrun head */ 246 spin_lock_irqsave(&nvmeq->q_lock, flags); 247 tail = nvmeq->sq_tail; 248 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd)); 249 writel(tail, nvmeq->q_db); 250 if (++tail == nvmeq->q_depth) 251 tail = 0; 252 nvmeq->sq_tail = tail; 253 spin_unlock_irqrestore(&nvmeq->q_lock, flags); 254 255 return 0; 256} 257 258struct nvme_prps { 259 int npages; 260 dma_addr_t first_dma; 261 __le64 *list[0]; 262}; 263 264static void nvme_free_prps(struct nvme_dev *dev, struct nvme_prps *prps) 265{ 266 const int last_prp = PAGE_SIZE / 8 - 1; 267 int i; 268 dma_addr_t prp_dma; 269 270 if (!prps) 271 return; 272 273 prp_dma = prps->first_dma; 274 275 if (prps->npages == 0) 276 dma_pool_free(dev->prp_small_pool, prps->list[0], prp_dma); 277 for (i = 0; i < prps->npages; i++) { 278 __le64 *prp_list = prps->list[i]; 279 dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]); 280 dma_pool_free(dev->prp_page_pool, prp_list, prp_dma); 281 prp_dma = next_prp_dma; 282 } 283 kfree(prps); 284} 285 286struct nvme_bio { 287 struct bio *bio; 288 int nents; 289 struct nvme_prps *prps; 290 struct scatterlist sg[0]; 291}; 292 293/* XXX: use a mempool */ 294static struct nvme_bio *alloc_nbio(unsigned nseg, gfp_t gfp) 295{ 296 return kzalloc(sizeof(struct nvme_bio) + 297 sizeof(struct scatterlist) * nseg, gfp); 298} 299 300static void free_nbio(struct nvme_queue *nvmeq, struct nvme_bio *nbio) 301{ 302 nvme_free_prps(nvmeq->dev, nbio->prps); 303 kfree(nbio); 304} 305 306static void bio_completion(struct nvme_queue *nvmeq, void *ctx, 307 struct nvme_completion *cqe) 308{ 309 struct nvme_bio *nbio = ctx; 310 struct bio *bio = nbio->bio; 311 u16 status = le16_to_cpup(&cqe->status) >> 1; 312 313 dma_unmap_sg(nvmeq->q_dmadev, nbio->sg, nbio->nents, 314 bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 315 free_nbio(nvmeq, nbio); 316 bio_endio(bio, status ? -EIO : 0); 317} 318 319/* length is in bytes */ 320static struct nvme_prps *nvme_setup_prps(struct nvme_dev *dev, 321 struct nvme_common_command *cmd, 322 struct scatterlist *sg, int length) 323{ 324 struct dma_pool *pool; 325 int dma_len = sg_dma_len(sg); 326 u64 dma_addr = sg_dma_address(sg); 327 int offset = offset_in_page(dma_addr); 328 __le64 *prp_list; 329 dma_addr_t prp_dma; 330 int nprps, npages, i, prp_page; 331 struct nvme_prps *prps = NULL; 332 333 cmd->prp1 = cpu_to_le64(dma_addr); 334 length -= (PAGE_SIZE - offset); 335 if (length <= 0) 336 return prps; 337 338 dma_len -= (PAGE_SIZE - offset); 339 if (dma_len) { 340 dma_addr += (PAGE_SIZE - offset); 341 } else { 342 sg = sg_next(sg); 343 dma_addr = sg_dma_address(sg); 344 dma_len = sg_dma_len(sg); 345 } 346 347 if (length <= PAGE_SIZE) { 348 cmd->prp2 = cpu_to_le64(dma_addr); 349 return prps; 350 } 351 352 nprps = DIV_ROUND_UP(length, PAGE_SIZE); 353 npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE); 354 prps = kmalloc(sizeof(*prps) + sizeof(__le64 *) * npages, GFP_ATOMIC); 355 prp_page = 0; 356 if (nprps <= (256 / 8)) { 357 pool = dev->prp_small_pool; 358 prps->npages = 0; 359 } else { 360 pool = dev->prp_page_pool; 361 prps->npages = npages; 362 } 363 364 prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); 365 prps->list[prp_page++] = prp_list; 366 prps->first_dma = prp_dma; 367 cmd->prp2 = cpu_to_le64(prp_dma); 368 i = 0; 369 for (;;) { 370 if (i == PAGE_SIZE / 8 - 1) { 371 __le64 *old_prp_list = prp_list; 372 prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); 373 prps->list[prp_page++] = prp_list; 374 old_prp_list[i] = cpu_to_le64(prp_dma); 375 i = 0; 376 } 377 prp_list[i++] = cpu_to_le64(dma_addr); 378 dma_len -= PAGE_SIZE; 379 dma_addr += PAGE_SIZE; 380 length -= PAGE_SIZE; 381 if (length <= 0) 382 break; 383 if (dma_len > 0) 384 continue; 385 BUG_ON(dma_len < 0); 386 sg = sg_next(sg); 387 dma_addr = sg_dma_address(sg); 388 dma_len = sg_dma_len(sg); 389 } 390 391 return prps; 392} 393 394static int nvme_map_bio(struct device *dev, struct nvme_bio *nbio, 395 struct bio *bio, enum dma_data_direction dma_dir, int psegs) 396{ 397 struct bio_vec *bvec, *bvprv = NULL; 398 struct scatterlist *sg = NULL; 399 int i, nsegs = 0; 400 401 sg_init_table(nbio->sg, psegs); 402 bio_for_each_segment(bvec, bio, i) { 403 if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) { 404 sg->length += bvec->bv_len; 405 } else { 406 /* Check bvprv && offset == 0 */ 407 sg = sg ? sg + 1 : nbio->sg; 408 sg_set_page(sg, bvec->bv_page, bvec->bv_len, 409 bvec->bv_offset); 410 nsegs++; 411 } 412 bvprv = bvec; 413 } 414 nbio->nents = nsegs; 415 sg_mark_end(sg); 416 return dma_map_sg(dev, nbio->sg, nbio->nents, dma_dir); 417} 418 419static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, 420 struct bio *bio) 421{ 422 struct nvme_command *cmnd; 423 struct nvme_bio *nbio; 424 enum dma_data_direction dma_dir; 425 int cmdid, result = -ENOMEM; 426 u16 control; 427 u32 dsmgmt; 428 int psegs = bio_phys_segments(ns->queue, bio); 429 430 nbio = alloc_nbio(psegs, GFP_ATOMIC); 431 if (!nbio) 432 goto nomem; 433 nbio->bio = bio; 434 435 result = -EBUSY; 436 cmdid = alloc_cmdid(nvmeq, nbio, bio_completion_id, IO_TIMEOUT); 437 if (unlikely(cmdid < 0)) 438 goto free_nbio; 439 440 control = 0; 441 if (bio->bi_rw & REQ_FUA) 442 control |= NVME_RW_FUA; 443 if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD)) 444 control |= NVME_RW_LR; 445 446 dsmgmt = 0; 447 if (bio->bi_rw & REQ_RAHEAD) 448 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; 449 450 cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; 451 452 memset(cmnd, 0, sizeof(*cmnd)); 453 if (bio_data_dir(bio)) { 454 cmnd->rw.opcode = nvme_cmd_write; 455 dma_dir = DMA_TO_DEVICE; 456 } else { 457 cmnd->rw.opcode = nvme_cmd_read; 458 dma_dir = DMA_FROM_DEVICE; 459 } 460 461 result = -ENOMEM; 462 if (nvme_map_bio(nvmeq->q_dmadev, nbio, bio, dma_dir, psegs) == 0) 463 goto free_nbio; 464 465 cmnd->rw.command_id = cmdid; 466 cmnd->rw.nsid = cpu_to_le32(ns->ns_id); 467 nbio->prps = nvme_setup_prps(nvmeq->dev, &cmnd->common, nbio->sg, 468 bio->bi_size); 469 cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9)); 470 cmnd->rw.length = cpu_to_le16((bio->bi_size >> ns->lba_shift) - 1); 471 cmnd->rw.control = cpu_to_le16(control); 472 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 473 474 writel(nvmeq->sq_tail, nvmeq->q_db); 475 if (++nvmeq->sq_tail == nvmeq->q_depth) 476 nvmeq->sq_tail = 0; 477 478 return 0; 479 480 free_nbio: 481 free_nbio(nvmeq, nbio); 482 nomem: 483 return result; 484} 485 486/* 487 * NB: return value of non-zero would mean that we were a stacking driver. 488 * make_request must always succeed. 489 */ 490static int nvme_make_request(struct request_queue *q, struct bio *bio) 491{ 492 struct nvme_ns *ns = q->queuedata; 493 struct nvme_queue *nvmeq = get_nvmeq(ns); 494 int result = -EBUSY; 495 496 spin_lock_irq(&nvmeq->q_lock); 497 if (bio_list_empty(&nvmeq->sq_cong)) 498 result = nvme_submit_bio_queue(nvmeq, ns, bio); 499 if (unlikely(result)) { 500 if (bio_list_empty(&nvmeq->sq_cong)) 501 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); 502 bio_list_add(&nvmeq->sq_cong, bio); 503 } 504 505 spin_unlock_irq(&nvmeq->q_lock); 506 put_nvmeq(nvmeq); 507 508 return 0; 509} 510 511struct sync_cmd_info { 512 struct task_struct *task; 513 u32 result; 514 int status; 515}; 516 517static void sync_completion(struct nvme_queue *nvmeq, void *ctx, 518 struct nvme_completion *cqe) 519{ 520 struct sync_cmd_info *cmdinfo = ctx; 521 if ((unsigned long)cmdinfo == CMD_CTX_CANCELLED) 522 return; 523 if (unlikely((unsigned long)cmdinfo == CMD_CTX_COMPLETED)) { 524 dev_warn(nvmeq->q_dmadev, 525 "completed id %d twice on queue %d\n", 526 cqe->command_id, le16_to_cpup(&cqe->sq_id)); 527 return; 528 } 529 if (unlikely((unsigned long)cmdinfo == CMD_CTX_INVALID)) { 530 dev_warn(nvmeq->q_dmadev, 531 "invalid id %d completed on queue %d\n", 532 cqe->command_id, le16_to_cpup(&cqe->sq_id)); 533 return; 534 } 535 cmdinfo->result = le32_to_cpup(&cqe->result); 536 cmdinfo->status = le16_to_cpup(&cqe->status) >> 1; 537 wake_up_process(cmdinfo->task); 538} 539 540typedef void (*completion_fn)(struct nvme_queue *, void *, 541 struct nvme_completion *); 542 543static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq) 544{ 545 u16 head, phase; 546 547 static const completion_fn completions[4] = { 548 [sync_completion_id] = sync_completion, 549 [bio_completion_id] = bio_completion, 550 }; 551 552 head = nvmeq->cq_head; 553 phase = nvmeq->cq_phase; 554 555 for (;;) { 556 unsigned long data; 557 void *ptr; 558 unsigned char handler; 559 struct nvme_completion cqe = nvmeq->cqes[head]; 560 if ((le16_to_cpu(cqe.status) & 1) != phase) 561 break; 562 nvmeq->sq_head = le16_to_cpu(cqe.sq_head); 563 if (++head == nvmeq->q_depth) { 564 head = 0; 565 phase = !phase; 566 } 567 568 data = free_cmdid(nvmeq, cqe.command_id); 569 handler = data & 3; 570 ptr = (void *)(data & ~3UL); 571 completions[handler](nvmeq, ptr, &cqe); 572 } 573 574 /* If the controller ignores the cq head doorbell and continuously 575 * writes to the queue, it is theoretically possible to wrap around 576 * the queue twice and mistakenly return IRQ_NONE. Linux only 577 * requires that 0.1% of your interrupts are handled, so this isn't 578 * a big problem. 579 */ 580 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) 581 return IRQ_NONE; 582 583 writel(head, nvmeq->q_db + 1); 584 nvmeq->cq_head = head; 585 nvmeq->cq_phase = phase; 586 587 return IRQ_HANDLED; 588} 589 590static irqreturn_t nvme_irq(int irq, void *data) 591{ 592 irqreturn_t result; 593 struct nvme_queue *nvmeq = data; 594 spin_lock(&nvmeq->q_lock); 595 result = nvme_process_cq(nvmeq); 596 spin_unlock(&nvmeq->q_lock); 597 return result; 598} 599 600static irqreturn_t nvme_irq_check(int irq, void *data) 601{ 602 struct nvme_queue *nvmeq = data; 603 struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head]; 604 if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase) 605 return IRQ_NONE; 606 return IRQ_WAKE_THREAD; 607} 608 609static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid) 610{ 611 spin_lock_irq(&nvmeq->q_lock); 612 cancel_cmdid_data(nvmeq, cmdid); 613 spin_unlock_irq(&nvmeq->q_lock); 614} 615 616/* 617 * Returns 0 on success. If the result is negative, it's a Linux error code; 618 * if the result is positive, it's an NVM Express status code 619 */ 620static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, 621 struct nvme_command *cmd, u32 *result, unsigned timeout) 622{ 623 int cmdid; 624 struct sync_cmd_info cmdinfo; 625 626 cmdinfo.task = current; 627 cmdinfo.status = -EINTR; 628 629 cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion_id, 630 timeout); 631 if (cmdid < 0) 632 return cmdid; 633 cmd->common.command_id = cmdid; 634 635 set_current_state(TASK_KILLABLE); 636 nvme_submit_cmd(nvmeq, cmd); 637 schedule(); 638 639 if (cmdinfo.status == -EINTR) { 640 nvme_abort_command(nvmeq, cmdid); 641 return -EINTR; 642 } 643 644 if (result) 645 *result = cmdinfo.result; 646 647 return cmdinfo.status; 648} 649 650static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd, 651 u32 *result) 652{ 653 return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT); 654} 655 656static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) 657{ 658 int status; 659 struct nvme_command c; 660 661 memset(&c, 0, sizeof(c)); 662 c.delete_queue.opcode = opcode; 663 c.delete_queue.qid = cpu_to_le16(id); 664 665 status = nvme_submit_admin_cmd(dev, &c, NULL); 666 if (status) 667 return -EIO; 668 return 0; 669} 670 671static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, 672 struct nvme_queue *nvmeq) 673{ 674 int status; 675 struct nvme_command c; 676 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED; 677 678 memset(&c, 0, sizeof(c)); 679 c.create_cq.opcode = nvme_admin_create_cq; 680 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); 681 c.create_cq.cqid = cpu_to_le16(qid); 682 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 683 c.create_cq.cq_flags = cpu_to_le16(flags); 684 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector); 685 686 status = nvme_submit_admin_cmd(dev, &c, NULL); 687 if (status) 688 return -EIO; 689 return 0; 690} 691 692static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, 693 struct nvme_queue *nvmeq) 694{ 695 int status; 696 struct nvme_command c; 697 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM; 698 699 memset(&c, 0, sizeof(c)); 700 c.create_sq.opcode = nvme_admin_create_sq; 701 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); 702 c.create_sq.sqid = cpu_to_le16(qid); 703 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 704 c.create_sq.sq_flags = cpu_to_le16(flags); 705 c.create_sq.cqid = cpu_to_le16(qid); 706 707 status = nvme_submit_admin_cmd(dev, &c, NULL); 708 if (status) 709 return -EIO; 710 return 0; 711} 712 713static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) 714{ 715 return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid); 716} 717 718static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) 719{ 720 return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); 721} 722 723static void nvme_free_queue(struct nvme_dev *dev, int qid) 724{ 725 struct nvme_queue *nvmeq = dev->queues[qid]; 726 727 free_irq(dev->entry[nvmeq->cq_vector].vector, nvmeq); 728 729 /* Don't tell the adapter to delete the admin queue */ 730 if (qid) { 731 adapter_delete_sq(dev, qid); 732 adapter_delete_cq(dev, qid); 733 } 734 735 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), 736 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 737 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), 738 nvmeq->sq_cmds, nvmeq->sq_dma_addr); 739 kfree(nvmeq); 740} 741 742static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, 743 int depth, int vector) 744{ 745 struct device *dmadev = &dev->pci_dev->dev; 746 unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info)); 747 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL); 748 if (!nvmeq) 749 return NULL; 750 751 nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth), 752 &nvmeq->cq_dma_addr, GFP_KERNEL); 753 if (!nvmeq->cqes) 754 goto free_nvmeq; 755 memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth)); 756 757 nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth), 758 &nvmeq->sq_dma_addr, GFP_KERNEL); 759 if (!nvmeq->sq_cmds) 760 goto free_cqdma; 761 762 nvmeq->q_dmadev = dmadev; 763 nvmeq->dev = dev; 764 spin_lock_init(&nvmeq->q_lock); 765 nvmeq->cq_head = 0; 766 nvmeq->cq_phase = 1; 767 init_waitqueue_head(&nvmeq->sq_full); 768 init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread); 769 bio_list_init(&nvmeq->sq_cong); 770 nvmeq->q_db = &dev->dbs[qid * 2]; 771 nvmeq->q_depth = depth; 772 nvmeq->cq_vector = vector; 773 774 return nvmeq; 775 776 free_cqdma: 777 dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes, 778 nvmeq->cq_dma_addr); 779 free_nvmeq: 780 kfree(nvmeq); 781 return NULL; 782} 783 784static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq, 785 const char *name) 786{ 787 if (use_threaded_interrupts) 788 return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector, 789 nvme_irq_check, nvme_irq, 790 IRQF_DISABLED | IRQF_SHARED, 791 name, nvmeq); 792 return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq, 793 IRQF_DISABLED | IRQF_SHARED, name, nvmeq); 794} 795 796static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev, 797 int qid, int cq_size, int vector) 798{ 799 int result; 800 struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector); 801 802 if (!nvmeq) 803 return NULL; 804 805 result = adapter_alloc_cq(dev, qid, nvmeq); 806 if (result < 0) 807 goto free_nvmeq; 808 809 result = adapter_alloc_sq(dev, qid, nvmeq); 810 if (result < 0) 811 goto release_cq; 812 813 result = queue_request_irq(dev, nvmeq, "nvme"); 814 if (result < 0) 815 goto release_sq; 816 817 return nvmeq; 818 819 release_sq: 820 adapter_delete_sq(dev, qid); 821 release_cq: 822 adapter_delete_cq(dev, qid); 823 free_nvmeq: 824 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), 825 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 826 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), 827 nvmeq->sq_cmds, nvmeq->sq_dma_addr); 828 kfree(nvmeq); 829 return NULL; 830} 831 832static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev) 833{ 834 int result; 835 u32 aqa; 836 struct nvme_queue *nvmeq; 837 838 dev->dbs = ((void __iomem *)dev->bar) + 4096; 839 840 nvmeq = nvme_alloc_queue(dev, 0, 64, 0); 841 if (!nvmeq) 842 return -ENOMEM; 843 844 aqa = nvmeq->q_depth - 1; 845 aqa |= aqa << 16; 846 847 dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM; 848 dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT; 849 dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE; 850 851 writel(0, &dev->bar->cc); 852 writel(aqa, &dev->bar->aqa); 853 writeq(nvmeq->sq_dma_addr, &dev->bar->asq); 854 writeq(nvmeq->cq_dma_addr, &dev->bar->acq); 855 writel(dev->ctrl_config, &dev->bar->cc); 856 857 while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) { 858 msleep(100); 859 if (fatal_signal_pending(current)) 860 return -EINTR; 861 } 862 863 result = queue_request_irq(dev, nvmeq, "nvme admin"); 864 dev->queues[0] = nvmeq; 865 return result; 866} 867 868static int nvme_map_user_pages(struct nvme_dev *dev, int write, 869 unsigned long addr, unsigned length, 870 struct scatterlist **sgp) 871{ 872 int i, err, count, nents, offset; 873 struct scatterlist *sg; 874 struct page **pages; 875 876 if (addr & 3) 877 return -EINVAL; 878 if (!length) 879 return -EINVAL; 880 881 offset = offset_in_page(addr); 882 count = DIV_ROUND_UP(offset + length, PAGE_SIZE); 883 pages = kcalloc(count, sizeof(*pages), GFP_KERNEL); 884 885 err = get_user_pages_fast(addr, count, 1, pages); 886 if (err < count) { 887 count = err; 888 err = -EFAULT; 889 goto put_pages; 890 } 891 892 sg = kcalloc(count, sizeof(*sg), GFP_KERNEL); 893 sg_init_table(sg, count); 894 sg_set_page(&sg[0], pages[0], PAGE_SIZE - offset, offset); 895 length -= (PAGE_SIZE - offset); 896 for (i = 1; i < count; i++) { 897 sg_set_page(&sg[i], pages[i], min_t(int, length, PAGE_SIZE), 0); 898 length -= PAGE_SIZE; 899 } 900 901 err = -ENOMEM; 902 nents = dma_map_sg(&dev->pci_dev->dev, sg, count, 903 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 904 if (!nents) 905 goto put_pages; 906 907 kfree(pages); 908 *sgp = sg; 909 return nents; 910 911 put_pages: 912 for (i = 0; i < count; i++) 913 put_page(pages[i]); 914 kfree(pages); 915 return err; 916} 917 918static void nvme_unmap_user_pages(struct nvme_dev *dev, int write, 919 unsigned long addr, int length, 920 struct scatterlist *sg, int nents) 921{ 922 int i, count; 923 924 count = DIV_ROUND_UP(offset_in_page(addr) + length, PAGE_SIZE); 925 dma_unmap_sg(&dev->pci_dev->dev, sg, nents, DMA_FROM_DEVICE); 926 927 for (i = 0; i < count; i++) 928 put_page(sg_page(&sg[i])); 929} 930 931static int nvme_submit_user_admin_command(struct nvme_dev *dev, 932 unsigned long addr, unsigned length, 933 struct nvme_command *cmd) 934{ 935 int err, nents; 936 struct scatterlist *sg; 937 struct nvme_prps *prps; 938 939 nents = nvme_map_user_pages(dev, 0, addr, length, &sg); 940 if (nents < 0) 941 return nents; 942 prps = nvme_setup_prps(dev, &cmd->common, sg, length); 943 err = nvme_submit_admin_cmd(dev, cmd, NULL); 944 nvme_unmap_user_pages(dev, 0, addr, length, sg, nents); 945 nvme_free_prps(dev, prps); 946 return err ? -EIO : 0; 947} 948 949static int nvme_identify(struct nvme_ns *ns, unsigned long addr, int cns) 950{ 951 struct nvme_command c; 952 953 memset(&c, 0, sizeof(c)); 954 c.identify.opcode = nvme_admin_identify; 955 c.identify.nsid = cns ? 0 : cpu_to_le32(ns->ns_id); 956 c.identify.cns = cpu_to_le32(cns); 957 958 return nvme_submit_user_admin_command(ns->dev, addr, 4096, &c); 959} 960 961static int nvme_get_range_type(struct nvme_ns *ns, unsigned long addr) 962{ 963 struct nvme_command c; 964 965 memset(&c, 0, sizeof(c)); 966 c.features.opcode = nvme_admin_get_features; 967 c.features.nsid = cpu_to_le32(ns->ns_id); 968 c.features.fid = cpu_to_le32(NVME_FEAT_LBA_RANGE); 969 970 return nvme_submit_user_admin_command(ns->dev, addr, 4096, &c); 971} 972 973static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) 974{ 975 struct nvme_dev *dev = ns->dev; 976 struct nvme_queue *nvmeq; 977 struct nvme_user_io io; 978 struct nvme_command c; 979 unsigned length; 980 u32 result; 981 int nents, status; 982 struct scatterlist *sg; 983 struct nvme_prps *prps; 984 985 if (copy_from_user(&io, uio, sizeof(io))) 986 return -EFAULT; 987 length = io.nblocks << io.block_shift; 988 nents = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length, &sg); 989 if (nents < 0) 990 return nents; 991 992 memset(&c, 0, sizeof(c)); 993 c.rw.opcode = io.opcode; 994 c.rw.flags = io.flags; 995 c.rw.nsid = cpu_to_le32(io.nsid); 996 c.rw.slba = cpu_to_le64(io.slba); 997 c.rw.length = cpu_to_le16(io.nblocks - 1); 998 c.rw.control = cpu_to_le16(io.control); 999 c.rw.dsmgmt = cpu_to_le16(io.dsmgmt); 1000 c.rw.reftag = cpu_to_le32(io.reftag); /* XXX: endian? */ 1001 c.rw.apptag = cpu_to_le16(io.apptag); 1002 c.rw.appmask = cpu_to_le16(io.appmask); 1003 /* XXX: metadata */ 1004 prps = nvme_setup_prps(dev, &c.common, sg, length); 1005 1006 nvmeq = get_nvmeq(ns); 1007 /* Since nvme_submit_sync_cmd sleeps, we can't keep preemption 1008 * disabled. We may be preempted at any point, and be rescheduled 1009 * to a different CPU. That will cause cacheline bouncing, but no 1010 * additional races since q_lock already protects against other CPUs. 1011 */ 1012 put_nvmeq(nvmeq); 1013 status = nvme_submit_sync_cmd(nvmeq, &c, &result, IO_TIMEOUT); 1014 1015 nvme_unmap_user_pages(dev, io.opcode & 1, io.addr, length, sg, nents); 1016 nvme_free_prps(dev, prps); 1017 put_user(result, &uio->result); 1018 return status; 1019} 1020 1021static int nvme_download_firmware(struct nvme_ns *ns, 1022 struct nvme_dlfw __user *udlfw) 1023{ 1024 struct nvme_dev *dev = ns->dev; 1025 struct nvme_dlfw dlfw; 1026 struct nvme_command c; 1027 int nents, status; 1028 struct scatterlist *sg; 1029 struct nvme_prps *prps; 1030 1031 if (copy_from_user(&dlfw, udlfw, sizeof(dlfw))) 1032 return -EFAULT; 1033 if (dlfw.length >= (1 << 30)) 1034 return -EINVAL; 1035 1036 nents = nvme_map_user_pages(dev, 1, dlfw.addr, dlfw.length * 4, &sg); 1037 if (nents < 0) 1038 return nents; 1039 1040 memset(&c, 0, sizeof(c)); 1041 c.dlfw.opcode = nvme_admin_download_fw; 1042 c.dlfw.numd = cpu_to_le32(dlfw.length); 1043 c.dlfw.offset = cpu_to_le32(dlfw.offset); 1044 prps = nvme_setup_prps(dev, &c.common, sg, dlfw.length * 4); 1045 1046 status = nvme_submit_admin_cmd(dev, &c, NULL); 1047 nvme_unmap_user_pages(dev, 0, dlfw.addr, dlfw.length * 4, sg, nents); 1048 nvme_free_prps(dev, prps); 1049 return status; 1050} 1051 1052static int nvme_activate_firmware(struct nvme_ns *ns, unsigned long arg) 1053{ 1054 struct nvme_dev *dev = ns->dev; 1055 struct nvme_command c; 1056 1057 memset(&c, 0, sizeof(c)); 1058 c.common.opcode = nvme_admin_activate_fw; 1059 c.common.rsvd10[0] = cpu_to_le32(arg); 1060 1061 return nvme_submit_admin_cmd(dev, &c, NULL); 1062} 1063 1064static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, 1065 unsigned long arg) 1066{ 1067 struct nvme_ns *ns = bdev->bd_disk->private_data; 1068 1069 switch (cmd) { 1070 case NVME_IOCTL_IDENTIFY_NS: 1071 return nvme_identify(ns, arg, 0); 1072 case NVME_IOCTL_IDENTIFY_CTRL: 1073 return nvme_identify(ns, arg, 1); 1074 case NVME_IOCTL_GET_RANGE_TYPE: 1075 return nvme_get_range_type(ns, arg); 1076 case NVME_IOCTL_SUBMIT_IO: 1077 return nvme_submit_io(ns, (void __user *)arg); 1078 case NVME_IOCTL_DOWNLOAD_FW: 1079 return nvme_download_firmware(ns, (void __user *)arg); 1080 case NVME_IOCTL_ACTIVATE_FW: 1081 return nvme_activate_firmware(ns, arg); 1082 default: 1083 return -ENOTTY; 1084 } 1085} 1086 1087static const struct block_device_operations nvme_fops = { 1088 .owner = THIS_MODULE, 1089 .ioctl = nvme_ioctl, 1090}; 1091 1092static void nvme_resubmit_bios(struct nvme_queue *nvmeq) 1093{ 1094 while (bio_list_peek(&nvmeq->sq_cong)) { 1095 struct bio *bio = bio_list_pop(&nvmeq->sq_cong); 1096 struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data; 1097 if (nvme_submit_bio_queue(nvmeq, ns, bio)) { 1098 bio_list_add_head(&nvmeq->sq_cong, bio); 1099 break; 1100 } 1101 } 1102} 1103 1104static int nvme_kthread(void *data) 1105{ 1106 struct nvme_dev *dev; 1107 1108 while (!kthread_should_stop()) { 1109 __set_current_state(TASK_RUNNING); 1110 spin_lock(&dev_list_lock); 1111 list_for_each_entry(dev, &dev_list, node) { 1112 int i; 1113 for (i = 0; i < dev->queue_count; i++) { 1114 struct nvme_queue *nvmeq = dev->queues[i]; 1115 spin_lock_irq(&nvmeq->q_lock); 1116 if (nvme_process_cq(nvmeq)) 1117 printk("process_cq did something\n"); 1118 nvme_resubmit_bios(nvmeq); 1119 spin_unlock_irq(&nvmeq->q_lock); 1120 } 1121 } 1122 spin_unlock(&dev_list_lock); 1123 set_current_state(TASK_INTERRUPTIBLE); 1124 schedule_timeout(HZ); 1125 } 1126 return 0; 1127} 1128 1129static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int index, 1130 struct nvme_id_ns *id, struct nvme_lba_range_type *rt) 1131{ 1132 struct nvme_ns *ns; 1133 struct gendisk *disk; 1134 int lbaf; 1135 1136 if (rt->attributes & NVME_LBART_ATTRIB_HIDE) 1137 return NULL; 1138 1139 ns = kzalloc(sizeof(*ns), GFP_KERNEL); 1140 if (!ns) 1141 return NULL; 1142 ns->queue = blk_alloc_queue(GFP_KERNEL); 1143 if (!ns->queue) 1144 goto out_free_ns; 1145 ns->queue->queue_flags = QUEUE_FLAG_DEFAULT | QUEUE_FLAG_NOMERGES | 1146 QUEUE_FLAG_NONROT | QUEUE_FLAG_DISCARD; 1147 blk_queue_make_request(ns->queue, nvme_make_request); 1148 ns->dev = dev; 1149 ns->queue->queuedata = ns; 1150 1151 disk = alloc_disk(NVME_MINORS); 1152 if (!disk) 1153 goto out_free_queue; 1154 ns->ns_id = index; 1155 ns->disk = disk; 1156 lbaf = id->flbas & 0xf; 1157 ns->lba_shift = id->lbaf[lbaf].ds; 1158 1159 disk->major = nvme_major; 1160 disk->minors = NVME_MINORS; 1161 disk->first_minor = NVME_MINORS * index; 1162 disk->fops = &nvme_fops; 1163 disk->private_data = ns; 1164 disk->queue = ns->queue; 1165 disk->driverfs_dev = &dev->pci_dev->dev; 1166 sprintf(disk->disk_name, "nvme%dn%d", dev->instance, index); 1167 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); 1168 1169 return ns; 1170 1171 out_free_queue: 1172 blk_cleanup_queue(ns->queue); 1173 out_free_ns: 1174 kfree(ns); 1175 return NULL; 1176} 1177 1178static void nvme_ns_free(struct nvme_ns *ns) 1179{ 1180 put_disk(ns->disk); 1181 blk_cleanup_queue(ns->queue); 1182 kfree(ns); 1183} 1184 1185static int set_queue_count(struct nvme_dev *dev, int count) 1186{ 1187 int status; 1188 u32 result; 1189 struct nvme_command c; 1190 u32 q_count = (count - 1) | ((count - 1) << 16); 1191 1192 memset(&c, 0, sizeof(c)); 1193 c.features.opcode = nvme_admin_get_features; 1194 c.features.fid = cpu_to_le32(NVME_FEAT_NUM_QUEUES); 1195 c.features.dword11 = cpu_to_le32(q_count); 1196 1197 status = nvme_submit_admin_cmd(dev, &c, &result); 1198 if (status) 1199 return -EIO; 1200 return min(result & 0xffff, result >> 16) + 1; 1201} 1202 1203static int __devinit nvme_setup_io_queues(struct nvme_dev *dev) 1204{ 1205 int result, cpu, i, nr_io_queues; 1206 1207 nr_io_queues = num_online_cpus(); 1208 result = set_queue_count(dev, nr_io_queues); 1209 if (result < 0) 1210 return result; 1211 if (result < nr_io_queues) 1212 nr_io_queues = result; 1213 1214 /* Deregister the admin queue's interrupt */ 1215 free_irq(dev->entry[0].vector, dev->queues[0]); 1216 1217 for (i = 0; i < nr_io_queues; i++) 1218 dev->entry[i].entry = i; 1219 for (;;) { 1220 result = pci_enable_msix(dev->pci_dev, dev->entry, 1221 nr_io_queues); 1222 if (result == 0) { 1223 break; 1224 } else if (result > 0) { 1225 nr_io_queues = result; 1226 continue; 1227 } else { 1228 nr_io_queues = 1; 1229 break; 1230 } 1231 } 1232 1233 result = queue_request_irq(dev, dev->queues[0], "nvme admin"); 1234 /* XXX: handle failure here */ 1235 1236 cpu = cpumask_first(cpu_online_mask); 1237 for (i = 0; i < nr_io_queues; i++) { 1238 irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu)); 1239 cpu = cpumask_next(cpu, cpu_online_mask); 1240 } 1241 1242 for (i = 0; i < nr_io_queues; i++) { 1243 dev->queues[i + 1] = nvme_create_queue(dev, i + 1, 1244 NVME_Q_DEPTH, i); 1245 if (!dev->queues[i + 1]) 1246 return -ENOMEM; 1247 dev->queue_count++; 1248 } 1249 1250 return 0; 1251} 1252 1253static void nvme_free_queues(struct nvme_dev *dev) 1254{ 1255 int i; 1256 1257 for (i = dev->queue_count - 1; i >= 0; i--) 1258 nvme_free_queue(dev, i); 1259} 1260 1261static int __devinit nvme_dev_add(struct nvme_dev *dev) 1262{ 1263 int res, nn, i; 1264 struct nvme_ns *ns, *next; 1265 struct nvme_id_ctrl *ctrl; 1266 void *id; 1267 dma_addr_t dma_addr; 1268 struct nvme_command cid, crt; 1269 1270 res = nvme_setup_io_queues(dev); 1271 if (res) 1272 return res; 1273 1274 /* XXX: Switch to a SG list once prp2 works */ 1275 id = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr, 1276 GFP_KERNEL); 1277 1278 memset(&cid, 0, sizeof(cid)); 1279 cid.identify.opcode = nvme_admin_identify; 1280 cid.identify.nsid = 0; 1281 cid.identify.prp1 = cpu_to_le64(dma_addr); 1282 cid.identify.cns = cpu_to_le32(1); 1283 1284 res = nvme_submit_admin_cmd(dev, &cid, NULL); 1285 if (res) { 1286 res = -EIO; 1287 goto out_free; 1288 } 1289 1290 ctrl = id; 1291 nn = le32_to_cpup(&ctrl->nn); 1292 memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); 1293 memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); 1294 memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); 1295 1296 cid.identify.cns = 0; 1297 memset(&crt, 0, sizeof(crt)); 1298 crt.features.opcode = nvme_admin_get_features; 1299 crt.features.prp1 = cpu_to_le64(dma_addr + 4096); 1300 crt.features.fid = cpu_to_le32(NVME_FEAT_LBA_RANGE); 1301 1302 for (i = 0; i < nn; i++) { 1303 cid.identify.nsid = cpu_to_le32(i); 1304 res = nvme_submit_admin_cmd(dev, &cid, NULL); 1305 if (res) 1306 continue; 1307 1308 if (((struct nvme_id_ns *)id)->ncap == 0) 1309 continue; 1310 1311 crt.features.nsid = cpu_to_le32(i); 1312 res = nvme_submit_admin_cmd(dev, &crt, NULL); 1313 if (res) 1314 continue; 1315 1316 ns = nvme_alloc_ns(dev, i, id, id + 4096); 1317 if (ns) 1318 list_add_tail(&ns->list, &dev->namespaces); 1319 } 1320 list_for_each_entry(ns, &dev->namespaces, list) 1321 add_disk(ns->disk); 1322 1323 dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr); 1324 return 0; 1325 1326 out_free: 1327 list_for_each_entry_safe(ns, next, &dev->namespaces, list) { 1328 list_del(&ns->list); 1329 nvme_ns_free(ns); 1330 } 1331 1332 dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr); 1333 return res; 1334} 1335 1336static int nvme_dev_remove(struct nvme_dev *dev) 1337{ 1338 struct nvme_ns *ns, *next; 1339 1340 spin_lock(&dev_list_lock); 1341 list_del(&dev->node); 1342 spin_unlock(&dev_list_lock); 1343 1344 /* TODO: wait all I/O finished or cancel them */ 1345 1346 list_for_each_entry_safe(ns, next, &dev->namespaces, list) { 1347 list_del(&ns->list); 1348 del_gendisk(ns->disk); 1349 nvme_ns_free(ns); 1350 } 1351 1352 nvme_free_queues(dev); 1353 1354 return 0; 1355} 1356 1357static int nvme_setup_prp_pools(struct nvme_dev *dev) 1358{ 1359 struct device *dmadev = &dev->pci_dev->dev; 1360 dev->prp_page_pool = dma_pool_create("prp list page", dmadev, 1361 PAGE_SIZE, PAGE_SIZE, 0); 1362 if (!dev->prp_page_pool) 1363 return -ENOMEM; 1364 1365 /* Optimisation for I/Os between 4k and 128k */ 1366 dev->prp_small_pool = dma_pool_create("prp list 256", dmadev, 1367 256, 256, 0); 1368 if (!dev->prp_small_pool) { 1369 dma_pool_destroy(dev->prp_page_pool); 1370 return -ENOMEM; 1371 } 1372 return 0; 1373} 1374 1375static void nvme_release_prp_pools(struct nvme_dev *dev) 1376{ 1377 dma_pool_destroy(dev->prp_page_pool); 1378 dma_pool_destroy(dev->prp_small_pool); 1379} 1380 1381/* XXX: Use an ida or something to let remove / add work correctly */ 1382static void nvme_set_instance(struct nvme_dev *dev) 1383{ 1384 static int instance; 1385 dev->instance = instance++; 1386} 1387 1388static void nvme_release_instance(struct nvme_dev *dev) 1389{ 1390} 1391 1392static int __devinit nvme_probe(struct pci_dev *pdev, 1393 const struct pci_device_id *id) 1394{ 1395 int bars, result = -ENOMEM; 1396 struct nvme_dev *dev; 1397 1398 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1399 if (!dev) 1400 return -ENOMEM; 1401 dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry), 1402 GFP_KERNEL); 1403 if (!dev->entry) 1404 goto free; 1405 dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *), 1406 GFP_KERNEL); 1407 if (!dev->queues) 1408 goto free; 1409 1410 if (pci_enable_device_mem(pdev)) 1411 goto free; 1412 pci_set_master(pdev); 1413 bars = pci_select_bars(pdev, IORESOURCE_MEM); 1414 if (pci_request_selected_regions(pdev, bars, "nvme")) 1415 goto disable; 1416 1417 INIT_LIST_HEAD(&dev->namespaces); 1418 dev->pci_dev = pdev; 1419 pci_set_drvdata(pdev, dev); 1420 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 1421 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 1422 nvme_set_instance(dev); 1423 dev->entry[0].vector = pdev->irq; 1424 1425 result = nvme_setup_prp_pools(dev); 1426 if (result) 1427 goto disable_msix; 1428 1429 dev->bar = ioremap(pci_resource_start(pdev, 0), 8192); 1430 if (!dev->bar) { 1431 result = -ENOMEM; 1432 goto disable_msix; 1433 } 1434 1435 result = nvme_configure_admin_queue(dev); 1436 if (result) 1437 goto unmap; 1438 dev->queue_count++; 1439 1440 result = nvme_dev_add(dev); 1441 if (result) 1442 goto delete; 1443 1444 spin_lock(&dev_list_lock); 1445 list_add(&dev->node, &dev_list); 1446 spin_unlock(&dev_list_lock); 1447 1448 return 0; 1449 1450 delete: 1451 nvme_free_queues(dev); 1452 unmap: 1453 iounmap(dev->bar); 1454 disable_msix: 1455 pci_disable_msix(pdev); 1456 nvme_release_instance(dev); 1457 nvme_release_prp_pools(dev); 1458 disable: 1459 pci_disable_device(pdev); 1460 pci_release_regions(pdev); 1461 free: 1462 kfree(dev->queues); 1463 kfree(dev->entry); 1464 kfree(dev); 1465 return result; 1466} 1467 1468static void __devexit nvme_remove(struct pci_dev *pdev) 1469{ 1470 struct nvme_dev *dev = pci_get_drvdata(pdev); 1471 nvme_dev_remove(dev); 1472 pci_disable_msix(pdev); 1473 iounmap(dev->bar); 1474 nvme_release_instance(dev); 1475 nvme_release_prp_pools(dev); 1476 pci_disable_device(pdev); 1477 pci_release_regions(pdev); 1478 kfree(dev->queues); 1479 kfree(dev->entry); 1480 kfree(dev); 1481} 1482 1483/* These functions are yet to be implemented */ 1484#define nvme_error_detected NULL 1485#define nvme_dump_registers NULL 1486#define nvme_link_reset NULL 1487#define nvme_slot_reset NULL 1488#define nvme_error_resume NULL 1489#define nvme_suspend NULL 1490#define nvme_resume NULL 1491 1492static struct pci_error_handlers nvme_err_handler = { 1493 .error_detected = nvme_error_detected, 1494 .mmio_enabled = nvme_dump_registers, 1495 .link_reset = nvme_link_reset, 1496 .slot_reset = nvme_slot_reset, 1497 .resume = nvme_error_resume, 1498}; 1499 1500/* Move to pci_ids.h later */ 1501#define PCI_CLASS_STORAGE_EXPRESS 0x010802 1502 1503static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = { 1504 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 1505 { 0, } 1506}; 1507MODULE_DEVICE_TABLE(pci, nvme_id_table); 1508 1509static struct pci_driver nvme_driver = { 1510 .name = "nvme", 1511 .id_table = nvme_id_table, 1512 .probe = nvme_probe, 1513 .remove = __devexit_p(nvme_remove), 1514 .suspend = nvme_suspend, 1515 .resume = nvme_resume, 1516 .err_handler = &nvme_err_handler, 1517}; 1518 1519static int __init nvme_init(void) 1520{ 1521 int result = -EBUSY; 1522 1523 nvme_thread = kthread_run(nvme_kthread, NULL, "nvme"); 1524 if (IS_ERR(nvme_thread)) 1525 return PTR_ERR(nvme_thread); 1526 1527 nvme_major = register_blkdev(nvme_major, "nvme"); 1528 if (nvme_major <= 0) 1529 goto kill_kthread; 1530 1531 result = pci_register_driver(&nvme_driver); 1532 if (result) 1533 goto unregister_blkdev; 1534 return 0; 1535 1536 unregister_blkdev: 1537 unregister_blkdev(nvme_major, "nvme"); 1538 kill_kthread: 1539 kthread_stop(nvme_thread); 1540 return result; 1541} 1542 1543static void __exit nvme_exit(void) 1544{ 1545 pci_unregister_driver(&nvme_driver); 1546 unregister_blkdev(nvme_major, "nvme"); 1547 kthread_stop(nvme_thread); 1548} 1549 1550MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); 1551MODULE_LICENSE("GPL"); 1552MODULE_VERSION("0.3"); 1553module_init(nvme_init); 1554module_exit(nvme_exit); 1555