nvme-core.c revision b3b06812e199f248561ce7824a4a8a9cd573c05a
1/* 2 * NVM Express device driver 3 * Copyright (c) 2011, Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 */ 18 19#include <linux/nvme.h> 20#include <linux/bio.h> 21#include <linux/blkdev.h> 22#include <linux/errno.h> 23#include <linux/fs.h> 24#include <linux/genhd.h> 25#include <linux/init.h> 26#include <linux/interrupt.h> 27#include <linux/io.h> 28#include <linux/kdev_t.h> 29#include <linux/kernel.h> 30#include <linux/mm.h> 31#include <linux/module.h> 32#include <linux/moduleparam.h> 33#include <linux/pci.h> 34#include <linux/sched.h> 35#include <linux/slab.h> 36#include <linux/types.h> 37#include <linux/version.h> 38 39#define NVME_Q_DEPTH 1024 40#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) 41#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) 42#define NVME_MINORS 64 43 44static int nvme_major; 45module_param(nvme_major, int, 0); 46 47/* 48 * Represents an NVM Express device. Each nvme_dev is a PCI function. 49 */ 50struct nvme_dev { 51 struct list_head node; 52 struct nvme_queue **queues; 53 u32 __iomem *dbs; 54 struct pci_dev *pci_dev; 55 int instance; 56 int queue_count; 57 u32 ctrl_config; 58 struct msix_entry *entry; 59 struct nvme_bar __iomem *bar; 60 struct list_head namespaces; 61}; 62 63/* 64 * An NVM Express namespace is equivalent to a SCSI LUN 65 */ 66struct nvme_ns { 67 struct list_head list; 68 69 struct nvme_dev *dev; 70 struct request_queue *queue; 71 struct gendisk *disk; 72 73 int ns_id; 74 int lba_shift; 75}; 76 77/* 78 * An NVM Express queue. Each device has at least two (one for admin 79 * commands and one for I/O commands). 80 */ 81struct nvme_queue { 82 struct device *q_dmadev; 83 spinlock_t q_lock; 84 struct nvme_command *sq_cmds; 85 volatile struct nvme_completion *cqes; 86 dma_addr_t sq_dma_addr; 87 dma_addr_t cq_dma_addr; 88 wait_queue_head_t sq_full; 89 struct bio_list sq_cong; 90 u32 __iomem *q_db; 91 u16 q_depth; 92 u16 cq_vector; 93 u16 sq_head; 94 u16 sq_tail; 95 u16 cq_head; 96 u16 cq_cycle; 97 unsigned long cmdid_data[]; 98}; 99 100/* 101 * Check we didin't inadvertently grow the command struct 102 */ 103static inline void _nvme_check_size(void) 104{ 105 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); 106 BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64); 107 BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); 108 BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); 109 BUILD_BUG_ON(sizeof(struct nvme_features) != 64); 110 BUILD_BUG_ON(sizeof(struct nvme_command) != 64); 111 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096); 112 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096); 113 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); 114} 115 116/** 117 * alloc_cmdid - Allocate a Command ID 118 * @param nvmeq The queue that will be used for this command 119 * @param ctx A pointer that will be passed to the handler 120 * @param handler The ID of the handler to call 121 * 122 * Allocate a Command ID for a queue. The data passed in will 123 * be passed to the completion handler. This is implemented by using 124 * the bottom two bits of the ctx pointer to store the handler ID. 125 * Passing in a pointer that's not 4-byte aligned will cause a BUG. 126 * We can change this if it becomes a problem. 127 */ 128static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx, int handler) 129{ 130 int depth = nvmeq->q_depth; 131 unsigned long data = (unsigned long)ctx | handler; 132 int cmdid; 133 134 BUG_ON((unsigned long)ctx & 3); 135 136 do { 137 cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth); 138 if (cmdid >= depth) 139 return -EBUSY; 140 } while (test_and_set_bit(cmdid, nvmeq->cmdid_data)); 141 142 nvmeq->cmdid_data[cmdid + BITS_TO_LONGS(depth)] = data; 143 return cmdid; 144} 145 146static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx, 147 int handler) 148{ 149 int cmdid; 150 wait_event_killable(nvmeq->sq_full, 151 (cmdid = alloc_cmdid(nvmeq, ctx, handler)) >= 0); 152 return (cmdid < 0) ? -EINTR : cmdid; 153} 154 155/* If you need more than four handlers, you'll need to change how 156 * alloc_cmdid and nvme_process_cq work 157 */ 158enum { 159 sync_completion_id = 0, 160 bio_completion_id, 161}; 162 163static unsigned long free_cmdid(struct nvme_queue *nvmeq, int cmdid) 164{ 165 unsigned long data; 166 167 data = nvmeq->cmdid_data[cmdid + BITS_TO_LONGS(nvmeq->q_depth)]; 168 clear_bit(cmdid, nvmeq->cmdid_data); 169 wake_up(&nvmeq->sq_full); 170 return data; 171} 172 173static struct nvme_queue *get_nvmeq(struct nvme_ns *ns) 174{ 175 return ns->dev->queues[1]; 176} 177 178static void put_nvmeq(struct nvme_queue *nvmeq) 179{ 180} 181 182/** 183 * nvme_submit_cmd: Copy a command into a queue and ring the doorbell 184 * @nvmeq: The queue to use 185 * @cmd: The command to send 186 * 187 * Safe to use from interrupt context 188 */ 189static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd) 190{ 191 unsigned long flags; 192 u16 tail; 193 /* XXX: Need to check tail isn't going to overrun head */ 194 spin_lock_irqsave(&nvmeq->q_lock, flags); 195 tail = nvmeq->sq_tail; 196 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd)); 197 writel(tail, nvmeq->q_db); 198 if (++tail == nvmeq->q_depth) 199 tail = 0; 200 nvmeq->sq_tail = tail; 201 spin_unlock_irqrestore(&nvmeq->q_lock, flags); 202 203 return 0; 204} 205 206struct nvme_req_info { 207 struct bio *bio; 208 int nents; 209 struct scatterlist sg[0]; 210}; 211 212/* XXX: use a mempool */ 213static struct nvme_req_info *alloc_info(unsigned nseg, gfp_t gfp) 214{ 215 return kmalloc(sizeof(struct nvme_req_info) + 216 sizeof(struct scatterlist) * nseg, gfp); 217} 218 219static void free_info(struct nvme_req_info *info) 220{ 221 kfree(info); 222} 223 224static void bio_completion(struct nvme_queue *nvmeq, void *ctx, 225 struct nvme_completion *cqe) 226{ 227 struct nvme_req_info *info = ctx; 228 struct bio *bio = info->bio; 229 u16 status = le16_to_cpup(&cqe->status) >> 1; 230 231 dma_unmap_sg(nvmeq->q_dmadev, info->sg, info->nents, 232 bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 233 free_info(info); 234 bio_endio(bio, status ? -EIO : 0); 235} 236 237static int nvme_map_bio(struct device *dev, struct nvme_req_info *info, 238 struct bio *bio, enum dma_data_direction dma_dir, int psegs) 239{ 240 struct bio_vec *bvec; 241 struct scatterlist *sg = info->sg; 242 int i, nsegs; 243 244 sg_init_table(sg, psegs); 245 bio_for_each_segment(bvec, bio, i) { 246 sg_set_page(sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset); 247 /* XXX: handle non-mergable here */ 248 nsegs++; 249 } 250 info->nents = nsegs; 251 252 return dma_map_sg(dev, info->sg, info->nents, dma_dir); 253} 254 255static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, 256 struct bio *bio) 257{ 258 struct nvme_rw_command *cmnd; 259 struct nvme_req_info *info; 260 enum dma_data_direction dma_dir; 261 int cmdid; 262 u16 control; 263 u32 dsmgmt; 264 unsigned long flags; 265 int psegs = bio_phys_segments(ns->queue, bio); 266 267 info = alloc_info(psegs, GFP_NOIO); 268 if (!info) 269 goto congestion; 270 info->bio = bio; 271 272 cmdid = alloc_cmdid(nvmeq, info, bio_completion_id); 273 if (unlikely(cmdid < 0)) 274 goto free_info; 275 276 control = 0; 277 if (bio->bi_rw & REQ_FUA) 278 control |= NVME_RW_FUA; 279 if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD)) 280 control |= NVME_RW_LR; 281 282 dsmgmt = 0; 283 if (bio->bi_rw & REQ_RAHEAD) 284 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; 285 286 spin_lock_irqsave(&nvmeq->q_lock, flags); 287 cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail].rw; 288 289 if (bio_data_dir(bio)) { 290 cmnd->opcode = nvme_cmd_write; 291 dma_dir = DMA_TO_DEVICE; 292 } else { 293 cmnd->opcode = nvme_cmd_read; 294 dma_dir = DMA_FROM_DEVICE; 295 } 296 297 nvme_map_bio(nvmeq->q_dmadev, info, bio, dma_dir, psegs); 298 299 cmnd->flags = 1; 300 cmnd->command_id = cmdid; 301 cmnd->nsid = cpu_to_le32(ns->ns_id); 302 cmnd->prp1 = cpu_to_le64(sg_phys(info->sg)); 303 /* XXX: Support more than one PRP */ 304 cmnd->slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9)); 305 cmnd->length = cpu_to_le16((bio->bi_size >> ns->lba_shift) - 1); 306 cmnd->control = cpu_to_le16(control); 307 cmnd->dsmgmt = cpu_to_le32(dsmgmt); 308 309 writel(nvmeq->sq_tail, nvmeq->q_db); 310 if (++nvmeq->sq_tail == nvmeq->q_depth) 311 nvmeq->sq_tail = 0; 312 313 spin_unlock_irqrestore(&nvmeq->q_lock, flags); 314 315 return 0; 316 317 free_info: 318 free_info(info); 319 congestion: 320 return -EBUSY; 321} 322 323/* 324 * NB: return value of non-zero would mean that we were a stacking driver. 325 * make_request must always succeed. 326 */ 327static int nvme_make_request(struct request_queue *q, struct bio *bio) 328{ 329 struct nvme_ns *ns = q->queuedata; 330 struct nvme_queue *nvmeq = get_nvmeq(ns); 331 332 if (nvme_submit_bio_queue(nvmeq, ns, bio)) { 333 blk_set_queue_congested(q, rw_is_sync(bio->bi_rw)); 334 bio_list_add(&nvmeq->sq_cong, bio); 335 } 336 put_nvmeq(nvmeq); 337 338 return 0; 339} 340 341struct sync_cmd_info { 342 struct task_struct *task; 343 u32 result; 344 int status; 345}; 346 347static void sync_completion(struct nvme_queue *nvmeq, void *ctx, 348 struct nvme_completion *cqe) 349{ 350 struct sync_cmd_info *cmdinfo = ctx; 351 cmdinfo->result = le32_to_cpup(&cqe->result); 352 cmdinfo->status = le16_to_cpup(&cqe->status) >> 1; 353 wake_up_process(cmdinfo->task); 354} 355 356typedef void (*completion_fn)(struct nvme_queue *, void *, 357 struct nvme_completion *); 358 359static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq) 360{ 361 u16 head, cycle; 362 363 static const completion_fn completions[4] = { 364 [sync_completion_id] = sync_completion, 365 [bio_completion_id] = bio_completion, 366 }; 367 368 head = nvmeq->cq_head; 369 cycle = nvmeq->cq_cycle; 370 371 for (;;) { 372 unsigned long data; 373 void *ptr; 374 unsigned char handler; 375 struct nvme_completion cqe = nvmeq->cqes[head]; 376 if ((le16_to_cpu(cqe.status) & 1) != cycle) 377 break; 378 nvmeq->sq_head = le16_to_cpu(cqe.sq_head); 379 if (++head == nvmeq->q_depth) { 380 head = 0; 381 cycle = !cycle; 382 } 383 384 data = free_cmdid(nvmeq, cqe.command_id); 385 handler = data & 3; 386 ptr = (void *)(data & ~3UL); 387 completions[handler](nvmeq, ptr, &cqe); 388 } 389 390 /* If the controller ignores the cq head doorbell and continuously 391 * writes to the queue, it is theoretically possible to wrap around 392 * the queue twice and mistakenly return IRQ_NONE. Linux only 393 * requires that 0.1% of your interrupts are handled, so this isn't 394 * a big problem. 395 */ 396 if (head == nvmeq->cq_head && cycle == nvmeq->cq_cycle) 397 return IRQ_NONE; 398 399 writel(head, nvmeq->q_db + 1); 400 nvmeq->cq_head = head; 401 nvmeq->cq_cycle = cycle; 402 403 return IRQ_HANDLED; 404} 405 406static irqreturn_t nvme_irq(int irq, void *data) 407{ 408 return nvme_process_cq(data); 409} 410 411/* 412 * Returns 0 on success. If the result is negative, it's a Linux error code; 413 * if the result is positive, it's an NVM Express status code 414 */ 415static int nvme_submit_sync_cmd(struct nvme_queue *q, struct nvme_command *cmd, 416 u32 *result) 417{ 418 int cmdid; 419 struct sync_cmd_info cmdinfo; 420 421 cmdinfo.task = current; 422 cmdinfo.status = -EINTR; 423 424 cmdid = alloc_cmdid_killable(q, &cmdinfo, sync_completion_id); 425 if (cmdid < 0) 426 return cmdid; 427 cmd->common.command_id = cmdid; 428 429 set_current_state(TASK_UNINTERRUPTIBLE); 430 nvme_submit_cmd(q, cmd); 431 schedule(); 432 433 if (result) 434 *result = cmdinfo.result; 435 436 return cmdinfo.status; 437} 438 439static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd, 440 u32 *result) 441{ 442 return nvme_submit_sync_cmd(dev->queues[0], cmd, result); 443} 444 445static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) 446{ 447 int status; 448 struct nvme_command c; 449 450 memset(&c, 0, sizeof(c)); 451 c.delete_queue.opcode = opcode; 452 c.delete_queue.qid = cpu_to_le16(id); 453 454 status = nvme_submit_admin_cmd(dev, &c, NULL); 455 if (status) 456 return -EIO; 457 return 0; 458} 459 460static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, 461 struct nvme_queue *nvmeq) 462{ 463 int status; 464 struct nvme_command c; 465 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED; 466 467 memset(&c, 0, sizeof(c)); 468 c.create_cq.opcode = nvme_admin_create_cq; 469 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); 470 c.create_cq.cqid = cpu_to_le16(qid); 471 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 472 c.create_cq.cq_flags = cpu_to_le16(flags); 473 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector); 474 475 status = nvme_submit_admin_cmd(dev, &c, NULL); 476 if (status) 477 return -EIO; 478 return 0; 479} 480 481static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, 482 struct nvme_queue *nvmeq) 483{ 484 int status; 485 struct nvme_command c; 486 int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM; 487 488 memset(&c, 0, sizeof(c)); 489 c.create_sq.opcode = nvme_admin_create_sq; 490 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); 491 c.create_sq.sqid = cpu_to_le16(qid); 492 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 493 c.create_sq.sq_flags = cpu_to_le16(flags); 494 c.create_sq.cqid = cpu_to_le16(qid); 495 496 status = nvme_submit_admin_cmd(dev, &c, NULL); 497 if (status) 498 return -EIO; 499 return 0; 500} 501 502static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) 503{ 504 return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid); 505} 506 507static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) 508{ 509 return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); 510} 511 512static void nvme_free_queue(struct nvme_dev *dev, int qid) 513{ 514 struct nvme_queue *nvmeq = dev->queues[qid]; 515 516 free_irq(dev->entry[nvmeq->cq_vector].vector, nvmeq); 517 518 /* Don't tell the adapter to delete the admin queue */ 519 if (qid) { 520 adapter_delete_sq(dev, qid); 521 adapter_delete_cq(dev, qid); 522 } 523 524 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), 525 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 526 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), 527 nvmeq->sq_cmds, nvmeq->sq_dma_addr); 528 kfree(nvmeq); 529} 530 531static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, 532 int depth, int vector) 533{ 534 struct device *dmadev = &dev->pci_dev->dev; 535 unsigned extra = (depth + BITS_TO_LONGS(depth)) * sizeof(long); 536 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL); 537 if (!nvmeq) 538 return NULL; 539 540 nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth), 541 &nvmeq->cq_dma_addr, GFP_KERNEL); 542 if (!nvmeq->cqes) 543 goto free_nvmeq; 544 memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth)); 545 546 nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth), 547 &nvmeq->sq_dma_addr, GFP_KERNEL); 548 if (!nvmeq->sq_cmds) 549 goto free_cqdma; 550 551 nvmeq->q_dmadev = dmadev; 552 spin_lock_init(&nvmeq->q_lock); 553 nvmeq->cq_head = 0; 554 nvmeq->cq_cycle = 1; 555 init_waitqueue_head(&nvmeq->sq_full); 556 bio_list_init(&nvmeq->sq_cong); 557 nvmeq->q_db = &dev->dbs[qid * 2]; 558 nvmeq->q_depth = depth; 559 nvmeq->cq_vector = vector; 560 561 return nvmeq; 562 563 free_cqdma: 564 dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes, 565 nvmeq->cq_dma_addr); 566 free_nvmeq: 567 kfree(nvmeq); 568 return NULL; 569} 570 571static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq, 572 const char *name) 573{ 574 return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq, 575 IRQF_DISABLED | IRQF_SHARED, name, nvmeq); 576} 577 578static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev, 579 int qid, int cq_size, int vector) 580{ 581 int result; 582 struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector); 583 584 result = adapter_alloc_cq(dev, qid, nvmeq); 585 if (result < 0) 586 goto free_nvmeq; 587 588 result = adapter_alloc_sq(dev, qid, nvmeq); 589 if (result < 0) 590 goto release_cq; 591 592 result = queue_request_irq(dev, nvmeq, "nvme"); 593 if (result < 0) 594 goto release_sq; 595 596 return nvmeq; 597 598 release_sq: 599 adapter_delete_sq(dev, qid); 600 release_cq: 601 adapter_delete_cq(dev, qid); 602 free_nvmeq: 603 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), 604 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 605 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), 606 nvmeq->sq_cmds, nvmeq->sq_dma_addr); 607 kfree(nvmeq); 608 return NULL; 609} 610 611static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev) 612{ 613 int result; 614 u32 aqa; 615 struct nvme_queue *nvmeq; 616 617 dev->dbs = ((void __iomem *)dev->bar) + 4096; 618 619 nvmeq = nvme_alloc_queue(dev, 0, 64, 0); 620 621 aqa = nvmeq->q_depth - 1; 622 aqa |= aqa << 16; 623 624 dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM; 625 dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT; 626 dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE; 627 628 writel(aqa, &dev->bar->aqa); 629 writeq(nvmeq->sq_dma_addr, &dev->bar->asq); 630 writeq(nvmeq->cq_dma_addr, &dev->bar->acq); 631 writel(dev->ctrl_config, &dev->bar->cc); 632 633 while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) { 634 msleep(100); 635 if (fatal_signal_pending(current)) 636 return -EINTR; 637 } 638 639 result = queue_request_irq(dev, nvmeq, "nvme admin"); 640 dev->queues[0] = nvmeq; 641 return result; 642} 643 644static int nvme_identify(struct nvme_ns *ns, void __user *addr, int cns) 645{ 646 struct nvme_dev *dev = ns->dev; 647 int status; 648 struct nvme_command c; 649 void *page; 650 dma_addr_t dma_addr; 651 652 page = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr, 653 GFP_KERNEL); 654 655 memset(&c, 0, sizeof(c)); 656 c.identify.opcode = nvme_admin_identify; 657 c.identify.nsid = cns ? 0 : cpu_to_le32(ns->ns_id); 658 c.identify.prp1 = cpu_to_le64(dma_addr); 659 c.identify.cns = cpu_to_le32(cns); 660 661 status = nvme_submit_admin_cmd(dev, &c, NULL); 662 663 if (status) 664 status = -EIO; 665 else if (copy_to_user(addr, page, 4096)) 666 status = -EFAULT; 667 668 dma_free_coherent(&dev->pci_dev->dev, 4096, page, dma_addr); 669 670 return status; 671} 672 673static int nvme_get_range_type(struct nvme_ns *ns, void __user *addr) 674{ 675 struct nvme_dev *dev = ns->dev; 676 int status; 677 struct nvme_command c; 678 void *page; 679 dma_addr_t dma_addr; 680 681 page = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr, 682 GFP_KERNEL); 683 684 memset(&c, 0, sizeof(c)); 685 c.features.opcode = nvme_admin_get_features; 686 c.features.nsid = cpu_to_le32(ns->ns_id); 687 c.features.prp1 = cpu_to_le64(dma_addr); 688 c.features.fid = cpu_to_le32(NVME_FEAT_LBA_RANGE); 689 690 status = nvme_submit_admin_cmd(dev, &c, NULL); 691 692 /* XXX: Assuming first range for now */ 693 if (status) 694 status = -EIO; 695 else if (copy_to_user(addr, page, 64)) 696 status = -EFAULT; 697 698 dma_free_coherent(&dev->pci_dev->dev, 4096, page, dma_addr); 699 700 return status; 701} 702 703static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, 704 unsigned long arg) 705{ 706 struct nvme_ns *ns = bdev->bd_disk->private_data; 707 708 switch (cmd) { 709 case NVME_IOCTL_IDENTIFY_NS: 710 return nvme_identify(ns, (void __user *)arg, 0); 711 case NVME_IOCTL_IDENTIFY_CTRL: 712 return nvme_identify(ns, (void __user *)arg, 1); 713 case NVME_IOCTL_GET_RANGE_TYPE: 714 return nvme_get_range_type(ns, (void __user *)arg); 715 default: 716 return -ENOTTY; 717 } 718} 719 720static const struct block_device_operations nvme_fops = { 721 .owner = THIS_MODULE, 722 .ioctl = nvme_ioctl, 723}; 724 725static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int index, 726 struct nvme_id_ns *id, struct nvme_lba_range_type *rt) 727{ 728 struct nvme_ns *ns; 729 struct gendisk *disk; 730 int lbaf; 731 732 if (rt->attributes & NVME_LBART_ATTRIB_HIDE) 733 return NULL; 734 735 ns = kzalloc(sizeof(*ns), GFP_KERNEL); 736 if (!ns) 737 return NULL; 738 ns->queue = blk_alloc_queue(GFP_KERNEL); 739 if (!ns->queue) 740 goto out_free_ns; 741 ns->queue->queue_flags = QUEUE_FLAG_DEFAULT | QUEUE_FLAG_NOMERGES | 742 QUEUE_FLAG_NONROT | QUEUE_FLAG_DISCARD; 743 blk_queue_make_request(ns->queue, nvme_make_request); 744 ns->dev = dev; 745 ns->queue->queuedata = ns; 746 747 disk = alloc_disk(NVME_MINORS); 748 if (!disk) 749 goto out_free_queue; 750 ns->ns_id = index; 751 ns->disk = disk; 752 lbaf = id->flbas & 0xf; 753 ns->lba_shift = id->lbaf[lbaf].ds; 754 755 disk->major = nvme_major; 756 disk->minors = NVME_MINORS; 757 disk->first_minor = NVME_MINORS * index; 758 disk->fops = &nvme_fops; 759 disk->private_data = ns; 760 disk->queue = ns->queue; 761 sprintf(disk->disk_name, "nvme%dn%d", dev->instance, index); 762 set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); 763 764 return ns; 765 766 out_free_queue: 767 blk_cleanup_queue(ns->queue); 768 out_free_ns: 769 kfree(ns); 770 return NULL; 771} 772 773static void nvme_ns_free(struct nvme_ns *ns) 774{ 775 put_disk(ns->disk); 776 blk_cleanup_queue(ns->queue); 777 kfree(ns); 778} 779 780static int set_queue_count(struct nvme_dev *dev, int count) 781{ 782 int status; 783 u32 result; 784 struct nvme_command c; 785 u32 q_count = (count - 1) | ((count - 1) << 16); 786 787 memset(&c, 0, sizeof(c)); 788 c.features.opcode = nvme_admin_get_features; 789 c.features.fid = cpu_to_le32(NVME_FEAT_NUM_QUEUES); 790 c.features.dword11 = cpu_to_le32(q_count); 791 792 status = nvme_submit_admin_cmd(dev, &c, &result); 793 if (status) 794 return -EIO; 795 return min(result & 0xffff, result >> 16) + 1; 796} 797 798/* XXX: Create per-CPU queues */ 799static int __devinit nvme_setup_io_queues(struct nvme_dev *dev) 800{ 801 int this_cpu; 802 803 set_queue_count(dev, 1); 804 805 this_cpu = get_cpu(); 806 dev->queues[1] = nvme_create_queue(dev, 1, NVME_Q_DEPTH, this_cpu); 807 put_cpu(); 808 if (!dev->queues[1]) 809 return -ENOMEM; 810 dev->queue_count++; 811 812 return 0; 813} 814 815static void nvme_free_queues(struct nvme_dev *dev) 816{ 817 int i; 818 819 for (i = dev->queue_count - 1; i >= 0; i--) 820 nvme_free_queue(dev, i); 821} 822 823static int __devinit nvme_dev_add(struct nvme_dev *dev) 824{ 825 int res, nn, i; 826 struct nvme_ns *ns, *next; 827 void *id; 828 dma_addr_t dma_addr; 829 struct nvme_command cid, crt; 830 831 res = nvme_setup_io_queues(dev); 832 if (res) 833 return res; 834 835 /* XXX: Switch to a SG list once prp2 works */ 836 id = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr, 837 GFP_KERNEL); 838 839 memset(&cid, 0, sizeof(cid)); 840 cid.identify.opcode = nvme_admin_identify; 841 cid.identify.nsid = 0; 842 cid.identify.prp1 = cpu_to_le64(dma_addr); 843 cid.identify.cns = cpu_to_le32(1); 844 845 res = nvme_submit_admin_cmd(dev, &cid, NULL); 846 if (res) { 847 res = -EIO; 848 goto out_free; 849 } 850 851 nn = le32_to_cpup(&((struct nvme_id_ctrl *)id)->nn); 852 853 cid.identify.cns = 0; 854 memset(&crt, 0, sizeof(crt)); 855 crt.features.opcode = nvme_admin_get_features; 856 crt.features.prp1 = cpu_to_le64(dma_addr + 4096); 857 crt.features.fid = cpu_to_le32(NVME_FEAT_LBA_RANGE); 858 859 for (i = 0; i < nn; i++) { 860 cid.identify.nsid = cpu_to_le32(i); 861 res = nvme_submit_admin_cmd(dev, &cid, NULL); 862 if (res) 863 continue; 864 865 if (((struct nvme_id_ns *)id)->ncap == 0) 866 continue; 867 868 crt.features.nsid = cpu_to_le32(i); 869 res = nvme_submit_admin_cmd(dev, &crt, NULL); 870 if (res) 871 continue; 872 873 ns = nvme_alloc_ns(dev, i, id, id + 4096); 874 if (ns) 875 list_add_tail(&ns->list, &dev->namespaces); 876 } 877 list_for_each_entry(ns, &dev->namespaces, list) 878 add_disk(ns->disk); 879 880 dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr); 881 return 0; 882 883 out_free: 884 list_for_each_entry_safe(ns, next, &dev->namespaces, list) { 885 list_del(&ns->list); 886 nvme_ns_free(ns); 887 } 888 889 dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr); 890 return res; 891} 892 893static int nvme_dev_remove(struct nvme_dev *dev) 894{ 895 struct nvme_ns *ns, *next; 896 897 /* TODO: wait all I/O finished or cancel them */ 898 899 list_for_each_entry_safe(ns, next, &dev->namespaces, list) { 900 list_del(&ns->list); 901 del_gendisk(ns->disk); 902 nvme_ns_free(ns); 903 } 904 905 nvme_free_queues(dev); 906 907 return 0; 908} 909 910/* XXX: Use an ida or something to let remove / add work correctly */ 911static void nvme_set_instance(struct nvme_dev *dev) 912{ 913 static int instance; 914 dev->instance = instance++; 915} 916 917static void nvme_release_instance(struct nvme_dev *dev) 918{ 919} 920 921static int __devinit nvme_probe(struct pci_dev *pdev, 922 const struct pci_device_id *id) 923{ 924 int result = -ENOMEM; 925 struct nvme_dev *dev; 926 927 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 928 if (!dev) 929 return -ENOMEM; 930 dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry), 931 GFP_KERNEL); 932 if (!dev->entry) 933 goto free; 934 dev->queues = kcalloc(2, sizeof(void *), GFP_KERNEL); 935 if (!dev->queues) 936 goto free; 937 938 INIT_LIST_HEAD(&dev->namespaces); 939 dev->pci_dev = pdev; 940 pci_set_drvdata(pdev, dev); 941 dma_set_mask(&dev->pci_dev->dev, DMA_BIT_MASK(64)); 942 nvme_set_instance(dev); 943 944 dev->bar = ioremap(pci_resource_start(pdev, 0), 8192); 945 if (!dev->bar) { 946 result = -ENOMEM; 947 goto disable; 948 } 949 950 result = nvme_configure_admin_queue(dev); 951 if (result) 952 goto unmap; 953 dev->queue_count++; 954 955 result = nvme_dev_add(dev); 956 if (result) 957 goto delete; 958 return 0; 959 960 delete: 961 nvme_free_queues(dev); 962 unmap: 963 iounmap(dev->bar); 964 disable: 965 pci_disable_msix(pdev); 966 nvme_release_instance(dev); 967 free: 968 kfree(dev->queues); 969 kfree(dev->entry); 970 kfree(dev); 971 return result; 972} 973 974static void __devexit nvme_remove(struct pci_dev *pdev) 975{ 976 struct nvme_dev *dev = pci_get_drvdata(pdev); 977 nvme_dev_remove(dev); 978 pci_disable_msix(pdev); 979 iounmap(dev->bar); 980 nvme_release_instance(dev); 981 kfree(dev->queues); 982 kfree(dev->entry); 983 kfree(dev); 984} 985 986/* These functions are yet to be implemented */ 987#define nvme_error_detected NULL 988#define nvme_dump_registers NULL 989#define nvme_link_reset NULL 990#define nvme_slot_reset NULL 991#define nvme_error_resume NULL 992#define nvme_suspend NULL 993#define nvme_resume NULL 994 995static struct pci_error_handlers nvme_err_handler = { 996 .error_detected = nvme_error_detected, 997 .mmio_enabled = nvme_dump_registers, 998 .link_reset = nvme_link_reset, 999 .slot_reset = nvme_slot_reset, 1000 .resume = nvme_error_resume, 1001}; 1002 1003/* Move to pci_ids.h later */ 1004#define PCI_CLASS_STORAGE_EXPRESS 0x010802 1005 1006static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = { 1007 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 1008 { 0, } 1009}; 1010MODULE_DEVICE_TABLE(pci, nvme_id_table); 1011 1012static struct pci_driver nvme_driver = { 1013 .name = "nvme", 1014 .id_table = nvme_id_table, 1015 .probe = nvme_probe, 1016 .remove = __devexit_p(nvme_remove), 1017 .suspend = nvme_suspend, 1018 .resume = nvme_resume, 1019 .err_handler = &nvme_err_handler, 1020}; 1021 1022static int __init nvme_init(void) 1023{ 1024 int result; 1025 1026 nvme_major = register_blkdev(nvme_major, "nvme"); 1027 if (nvme_major <= 0) 1028 return -EBUSY; 1029 1030 result = pci_register_driver(&nvme_driver); 1031 if (!result) 1032 return 0; 1033 1034 unregister_blkdev(nvme_major, "nvme"); 1035 return result; 1036} 1037 1038static void __exit nvme_exit(void) 1039{ 1040 pci_unregister_driver(&nvme_driver); 1041 unregister_blkdev(nvme_major, "nvme"); 1042} 1043 1044MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); 1045MODULE_LICENSE("GPL"); 1046MODULE_VERSION("0.1"); 1047module_init(nvme_init); 1048module_exit(nvme_exit); 1049