nvme-core.c revision b3b06812e199f248561ce7824a4a8a9cd573c05a
1b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox/*
2b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * NVM Express device driver
3b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * Copyright (c) 2011, Intel Corporation.
4b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox *
5b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * This program is free software; you can redistribute it and/or modify it
6b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * under the terms and conditions of the GNU General Public License,
7b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * version 2, as published by the Free Software Foundation.
8b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox *
9b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * This program is distributed in the hope it will be useful, but WITHOUT
10b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * more details.
13b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox *
14b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * You should have received a copy of the GNU General Public License along with
15b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * this program; if not, write to the Free Software Foundation, Inc.,
16b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox */
18b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
19b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox#include <linux/nvme.h>
20b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox#include <linux/bio.h>
21b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox#include <linux/blkdev.h>
22b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox#include <linux/errno.h>
23b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox#include <linux/fs.h>
24b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox#include <linux/genhd.h>
25b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox#include <linux/init.h>
26b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox#include <linux/interrupt.h>
27b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox#include <linux/io.h>
28b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox#include <linux/kdev_t.h>
29b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox#include <linux/kernel.h>
30b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox#include <linux/mm.h>
31b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox#include <linux/module.h>
32b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox#include <linux/moduleparam.h>
33b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox#include <linux/pci.h>
34b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox#include <linux/sched.h>
35b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox#include <linux/slab.h>
36b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox#include <linux/types.h>
37b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox#include <linux/version.h>
38b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
39b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox#define NVME_Q_DEPTH 1024
40b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox#define SQ_SIZE(depth)		(depth * sizeof(struct nvme_command))
41b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox#define CQ_SIZE(depth)		(depth * sizeof(struct nvme_completion))
42b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox#define NVME_MINORS 64
43b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
44b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic int nvme_major;
45b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxmodule_param(nvme_major, int, 0);
46b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
47b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox/*
48b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * Represents an NVM Express device.  Each nvme_dev is a PCI function.
49b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox */
50b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstruct nvme_dev {
51b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct list_head node;
52b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct nvme_queue **queues;
53b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	u32 __iomem *dbs;
54b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct pci_dev *pci_dev;
55b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	int instance;
56b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	int queue_count;
57b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	u32 ctrl_config;
58b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct msix_entry *entry;
59b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct nvme_bar __iomem *bar;
60b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct list_head namespaces;
61b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox};
62b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
63b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox/*
64b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * An NVM Express namespace is equivalent to a SCSI LUN
65b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox */
66b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstruct nvme_ns {
67b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct list_head list;
68b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
69b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct nvme_dev *dev;
70b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct request_queue *queue;
71b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct gendisk *disk;
72b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
73b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	int ns_id;
74b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	int lba_shift;
75b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox};
76b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
77b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox/*
78b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * An NVM Express queue.  Each device has at least two (one for admin
79b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * commands and one for I/O commands).
80b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox */
81b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstruct nvme_queue {
82b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct device *q_dmadev;
83b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	spinlock_t q_lock;
84b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct nvme_command *sq_cmds;
85b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	volatile struct nvme_completion *cqes;
86b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	dma_addr_t sq_dma_addr;
87b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	dma_addr_t cq_dma_addr;
88b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	wait_queue_head_t sq_full;
89b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct bio_list sq_cong;
90b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	u32 __iomem *q_db;
91b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	u16 q_depth;
92b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	u16 cq_vector;
93b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	u16 sq_head;
94b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	u16 sq_tail;
95b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	u16 cq_head;
96b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	u16 cq_cycle;
97b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	unsigned long cmdid_data[];
98b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox};
99b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
100b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox/*
101b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * Check we didin't inadvertently grow the command struct
102b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox */
103b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic inline void _nvme_check_size(void)
104b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
105b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
106b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
107b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
108b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
109b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
110b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
111b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
112b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
113b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
114b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
115b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
116b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox/**
117b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * alloc_cmdid - Allocate a Command ID
118b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * @param nvmeq The queue that will be used for this command
119b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * @param ctx A pointer that will be passed to the handler
120b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * @param handler The ID of the handler to call
121b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox *
122b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * Allocate a Command ID for a queue.  The data passed in will
123b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * be passed to the completion handler.  This is implemented by using
124b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * the bottom two bits of the ctx pointer to store the handler ID.
125b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * Passing in a pointer that's not 4-byte aligned will cause a BUG.
126b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * We can change this if it becomes a problem.
127b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox */
128b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx, int handler)
129b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
130b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	int depth = nvmeq->q_depth;
131b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	unsigned long data = (unsigned long)ctx | handler;
132b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	int cmdid;
133b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
134b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	BUG_ON((unsigned long)ctx & 3);
135b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
136b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	do {
137b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth);
138b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		if (cmdid >= depth)
139b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox			return -EBUSY;
140b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	} while (test_and_set_bit(cmdid, nvmeq->cmdid_data));
141b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
142b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	nvmeq->cmdid_data[cmdid + BITS_TO_LONGS(depth)] = data;
143b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return cmdid;
144b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
145b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
146b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
147b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox								int handler)
148b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
149b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	int cmdid;
150b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	wait_event_killable(nvmeq->sq_full,
151b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox			(cmdid = alloc_cmdid(nvmeq, ctx, handler)) >= 0);
152b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return (cmdid < 0) ? -EINTR : cmdid;
153b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
154b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
155b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox/* If you need more than four handlers, you'll need to change how
156b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * alloc_cmdid and nvme_process_cq work
157b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox */
158b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxenum {
159b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	sync_completion_id = 0,
160b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	bio_completion_id,
161b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox};
162b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
163b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic unsigned long free_cmdid(struct nvme_queue *nvmeq, int cmdid)
164b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
165b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	unsigned long data;
166b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
167b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	data = nvmeq->cmdid_data[cmdid + BITS_TO_LONGS(nvmeq->q_depth)];
168b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	clear_bit(cmdid, nvmeq->cmdid_data);
169b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	wake_up(&nvmeq->sq_full);
170b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return data;
171b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
172b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
173b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic struct nvme_queue *get_nvmeq(struct nvme_ns *ns)
174b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
175b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return ns->dev->queues[1];
176b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
177b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
178b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic void put_nvmeq(struct nvme_queue *nvmeq)
179b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
180b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
181b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
182b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox/**
183b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * nvme_submit_cmd: Copy a command into a queue and ring the doorbell
184b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * @nvmeq: The queue to use
185b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * @cmd: The command to send
186b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox *
187b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * Safe to use from interrupt context
188b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox */
189b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
190b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
191b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	unsigned long flags;
192b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	u16 tail;
193b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	/* XXX: Need to check tail isn't going to overrun head */
194b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	spin_lock_irqsave(&nvmeq->q_lock, flags);
195b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	tail = nvmeq->sq_tail;
196b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
197b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	writel(tail, nvmeq->q_db);
198b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (++tail == nvmeq->q_depth)
199b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		tail = 0;
200b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	nvmeq->sq_tail = tail;
201b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	spin_unlock_irqrestore(&nvmeq->q_lock, flags);
202b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
203b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return 0;
204b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
205b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
206b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstruct nvme_req_info {
207b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct bio *bio;
208b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	int nents;
209b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct scatterlist sg[0];
210b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox};
211b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
212b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox/* XXX: use a mempool */
213b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic struct nvme_req_info *alloc_info(unsigned nseg, gfp_t gfp)
214b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
215b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return kmalloc(sizeof(struct nvme_req_info) +
216b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox			sizeof(struct scatterlist) * nseg, gfp);
217b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
218b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
219b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic void free_info(struct nvme_req_info *info)
220b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
221b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	kfree(info);
222b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
223b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
224b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic void bio_completion(struct nvme_queue *nvmeq, void *ctx,
225b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox						struct nvme_completion *cqe)
226b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
227b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct nvme_req_info *info = ctx;
228b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct bio *bio = info->bio;
229b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	u16 status = le16_to_cpup(&cqe->status) >> 1;
230b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
231b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	dma_unmap_sg(nvmeq->q_dmadev, info->sg, info->nents,
232b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox			bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
233b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	free_info(info);
234b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	bio_endio(bio, status ? -EIO : 0);
235b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
236b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
237b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic int nvme_map_bio(struct device *dev, struct nvme_req_info *info,
238b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		struct bio *bio, enum dma_data_direction dma_dir, int psegs)
239b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
240b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct bio_vec *bvec;
241b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct scatterlist *sg = info->sg;
242b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	int i, nsegs;
243b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
244b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	sg_init_table(sg, psegs);
245b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	bio_for_each_segment(bvec, bio, i) {
246b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		sg_set_page(sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset);
247b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		/* XXX: handle non-mergable here */
248b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		nsegs++;
249b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	}
250b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	info->nents = nsegs;
251b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
252b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return dma_map_sg(dev, info->sg, info->nents, dma_dir);
253b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
254b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
255b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
256b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox								struct bio *bio)
257b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
258b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct nvme_rw_command *cmnd;
259b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct nvme_req_info *info;
260b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	enum dma_data_direction dma_dir;
261b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	int cmdid;
262b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	u16 control;
263b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	u32 dsmgmt;
264b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	unsigned long flags;
265b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	int psegs = bio_phys_segments(ns->queue, bio);
266b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
267b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	info = alloc_info(psegs, GFP_NOIO);
268b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (!info)
269b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		goto congestion;
270b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	info->bio = bio;
271b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
272b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	cmdid = alloc_cmdid(nvmeq, info, bio_completion_id);
273b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (unlikely(cmdid < 0))
274b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		goto free_info;
275b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
276b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	control = 0;
277b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (bio->bi_rw & REQ_FUA)
278b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		control |= NVME_RW_FUA;
279b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD))
280b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		control |= NVME_RW_LR;
281b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
282b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	dsmgmt = 0;
283b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (bio->bi_rw & REQ_RAHEAD)
284b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
285b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
286b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	spin_lock_irqsave(&nvmeq->q_lock, flags);
287b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail].rw;
288b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
289b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (bio_data_dir(bio)) {
290b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		cmnd->opcode = nvme_cmd_write;
291b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		dma_dir = DMA_TO_DEVICE;
292b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	} else {
293b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		cmnd->opcode = nvme_cmd_read;
294b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		dma_dir = DMA_FROM_DEVICE;
295b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	}
296b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
297b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	nvme_map_bio(nvmeq->q_dmadev, info, bio, dma_dir, psegs);
298b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
299b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	cmnd->flags = 1;
300b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	cmnd->command_id = cmdid;
301b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	cmnd->nsid = cpu_to_le32(ns->ns_id);
302b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	cmnd->prp1 = cpu_to_le64(sg_phys(info->sg));
303b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	/* XXX: Support more than one PRP */
304b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	cmnd->slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
305b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	cmnd->length = cpu_to_le16((bio->bi_size >> ns->lba_shift) - 1);
306b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	cmnd->control = cpu_to_le16(control);
307b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	cmnd->dsmgmt = cpu_to_le32(dsmgmt);
308b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
309b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	writel(nvmeq->sq_tail, nvmeq->q_db);
310b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (++nvmeq->sq_tail == nvmeq->q_depth)
311b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		nvmeq->sq_tail = 0;
312b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
313b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	spin_unlock_irqrestore(&nvmeq->q_lock, flags);
314b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
315b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return 0;
316b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
317b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox free_info:
318b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	free_info(info);
319b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox congestion:
320b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return -EBUSY;
321b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
322b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
323b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox/*
324b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * NB: return value of non-zero would mean that we were a stacking driver.
325b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * make_request must always succeed.
326b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox */
327b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic int nvme_make_request(struct request_queue *q, struct bio *bio)
328b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
329b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct nvme_ns *ns = q->queuedata;
330b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct nvme_queue *nvmeq = get_nvmeq(ns);
331b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
332b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
333b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		blk_set_queue_congested(q, rw_is_sync(bio->bi_rw));
334b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		bio_list_add(&nvmeq->sq_cong, bio);
335b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	}
336b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	put_nvmeq(nvmeq);
337b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
338b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return 0;
339b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
340b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
341b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstruct sync_cmd_info {
342b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct task_struct *task;
343b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	u32 result;
344b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	int status;
345b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox};
346b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
347b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic void sync_completion(struct nvme_queue *nvmeq, void *ctx,
348b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox						struct nvme_completion *cqe)
349b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
350b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct sync_cmd_info *cmdinfo = ctx;
351b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	cmdinfo->result = le32_to_cpup(&cqe->result);
352b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
353b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	wake_up_process(cmdinfo->task);
354b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
355b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
356b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxtypedef void (*completion_fn)(struct nvme_queue *, void *,
357b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox						struct nvme_completion *);
358b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
359b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
360b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
361b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	u16 head, cycle;
362b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
363b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	static const completion_fn completions[4] = {
364b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		[sync_completion_id] = sync_completion,
365b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		[bio_completion_id]  = bio_completion,
366b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	};
367b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
368b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	head = nvmeq->cq_head;
369b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	cycle = nvmeq->cq_cycle;
370b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
371b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	for (;;) {
372b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		unsigned long data;
373b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		void *ptr;
374b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		unsigned char handler;
375b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		struct nvme_completion cqe = nvmeq->cqes[head];
376b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		if ((le16_to_cpu(cqe.status) & 1) != cycle)
377b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox			break;
378b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
379b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		if (++head == nvmeq->q_depth) {
380b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox			head = 0;
381b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox			cycle = !cycle;
382b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		}
383b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
384b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		data = free_cmdid(nvmeq, cqe.command_id);
385b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		handler = data & 3;
386b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		ptr = (void *)(data & ~3UL);
387b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		completions[handler](nvmeq, ptr, &cqe);
388b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	}
389b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
390b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	/* If the controller ignores the cq head doorbell and continuously
391b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	 * writes to the queue, it is theoretically possible to wrap around
392b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	 * the queue twice and mistakenly return IRQ_NONE.  Linux only
393b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	 * requires that 0.1% of your interrupts are handled, so this isn't
394b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	 * a big problem.
395b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	 */
396b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (head == nvmeq->cq_head && cycle == nvmeq->cq_cycle)
397b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		return IRQ_NONE;
398b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
399b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	writel(head, nvmeq->q_db + 1);
400b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	nvmeq->cq_head = head;
401b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	nvmeq->cq_cycle = cycle;
402b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
403b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return IRQ_HANDLED;
404b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
405b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
406b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic irqreturn_t nvme_irq(int irq, void *data)
407b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
408b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return nvme_process_cq(data);
409b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
410b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
411b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox/*
412b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * Returns 0 on success.  If the result is negative, it's a Linux error code;
413b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox * if the result is positive, it's an NVM Express status code
414b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox */
415b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic int nvme_submit_sync_cmd(struct nvme_queue *q, struct nvme_command *cmd,
416b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox								u32 *result)
417b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
418b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	int cmdid;
419b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct sync_cmd_info cmdinfo;
420b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
421b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	cmdinfo.task = current;
422b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	cmdinfo.status = -EINTR;
423b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
424b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	cmdid = alloc_cmdid_killable(q, &cmdinfo, sync_completion_id);
425b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (cmdid < 0)
426b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		return cmdid;
427b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	cmd->common.command_id = cmdid;
428b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
429b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	set_current_state(TASK_UNINTERRUPTIBLE);
430b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	nvme_submit_cmd(q, cmd);
431b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	schedule();
432b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
433b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (result)
434b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		*result = cmdinfo.result;
435b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
436b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return cmdinfo.status;
437b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
438b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
439b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
440b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox								u32 *result)
441b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
442b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return nvme_submit_sync_cmd(dev->queues[0], cmd, result);
443b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
444b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
445b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
446b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
447b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	int status;
448b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct nvme_command c;
449b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
450b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	memset(&c, 0, sizeof(c));
451b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	c.delete_queue.opcode = opcode;
452b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	c.delete_queue.qid = cpu_to_le16(id);
453b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
454b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	status = nvme_submit_admin_cmd(dev, &c, NULL);
455b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (status)
456b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		return -EIO;
457b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return 0;
458b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
459b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
460b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
461b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox						struct nvme_queue *nvmeq)
462b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
463b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	int status;
464b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct nvme_command c;
465b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
466b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
467b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	memset(&c, 0, sizeof(c));
468b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	c.create_cq.opcode = nvme_admin_create_cq;
469b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
470b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	c.create_cq.cqid = cpu_to_le16(qid);
471b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
472b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	c.create_cq.cq_flags = cpu_to_le16(flags);
473b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
474b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
475b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	status = nvme_submit_admin_cmd(dev, &c, NULL);
476b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (status)
477b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		return -EIO;
478b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return 0;
479b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
480b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
481b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
482b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox						struct nvme_queue *nvmeq)
483b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
484b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	int status;
485b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct nvme_command c;
486b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
487b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
488b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	memset(&c, 0, sizeof(c));
489b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	c.create_sq.opcode = nvme_admin_create_sq;
490b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
491b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	c.create_sq.sqid = cpu_to_le16(qid);
492b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
493b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	c.create_sq.sq_flags = cpu_to_le16(flags);
494b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	c.create_sq.cqid = cpu_to_le16(qid);
495b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
496b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	status = nvme_submit_admin_cmd(dev, &c, NULL);
497b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (status)
498b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		return -EIO;
499b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return 0;
500b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
501b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
502b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
503b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
504b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
505b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
506b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
507b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
508b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
509b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
510b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
511b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
512b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic void nvme_free_queue(struct nvme_dev *dev, int qid)
513b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
514b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct nvme_queue *nvmeq = dev->queues[qid];
515b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
516b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	free_irq(dev->entry[nvmeq->cq_vector].vector, nvmeq);
517b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
518b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	/* Don't tell the adapter to delete the admin queue */
519b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (qid) {
520b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		adapter_delete_sq(dev, qid);
521b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		adapter_delete_cq(dev, qid);
522b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	}
523b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
524b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
525b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox				(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
526b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
527b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox					nvmeq->sq_cmds, nvmeq->sq_dma_addr);
528b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	kfree(nvmeq);
529b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
530b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
531b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
532b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox							int depth, int vector)
533b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
534b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct device *dmadev = &dev->pci_dev->dev;
535b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	unsigned extra = (depth + BITS_TO_LONGS(depth)) * sizeof(long);
536b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
537b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (!nvmeq)
538b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		return NULL;
539b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
540b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth),
541b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox					&nvmeq->cq_dma_addr, GFP_KERNEL);
542b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (!nvmeq->cqes)
543b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		goto free_nvmeq;
544b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth));
545b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
546b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth),
547b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox					&nvmeq->sq_dma_addr, GFP_KERNEL);
548b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (!nvmeq->sq_cmds)
549b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		goto free_cqdma;
550b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
551b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	nvmeq->q_dmadev = dmadev;
552b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	spin_lock_init(&nvmeq->q_lock);
553b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	nvmeq->cq_head = 0;
554b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	nvmeq->cq_cycle = 1;
555b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	init_waitqueue_head(&nvmeq->sq_full);
556b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	bio_list_init(&nvmeq->sq_cong);
557b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	nvmeq->q_db = &dev->dbs[qid * 2];
558b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	nvmeq->q_depth = depth;
559b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	nvmeq->cq_vector = vector;
560b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
561b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return nvmeq;
562b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
563b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox free_cqdma:
564b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes,
565b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox							nvmeq->cq_dma_addr);
566b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox free_nvmeq:
567b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	kfree(nvmeq);
568b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return NULL;
569b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
570b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
5713001082cac4bf6ffd09f72b39e6292ad6394ef17Matthew Wilcoxstatic int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
5723001082cac4bf6ffd09f72b39e6292ad6394ef17Matthew Wilcox							const char *name)
5733001082cac4bf6ffd09f72b39e6292ad6394ef17Matthew Wilcox{
5743001082cac4bf6ffd09f72b39e6292ad6394ef17Matthew Wilcox	return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
5753001082cac4bf6ffd09f72b39e6292ad6394ef17Matthew Wilcox				IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
5763001082cac4bf6ffd09f72b39e6292ad6394ef17Matthew Wilcox}
5773001082cac4bf6ffd09f72b39e6292ad6394ef17Matthew Wilcox
578b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
579b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox					int qid, int cq_size, int vector)
580b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
581b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	int result;
582b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector);
583b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
584b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	result = adapter_alloc_cq(dev, qid, nvmeq);
585b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (result < 0)
586b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		goto free_nvmeq;
587b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
588b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	result = adapter_alloc_sq(dev, qid, nvmeq);
589b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (result < 0)
590b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		goto release_cq;
591b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
5923001082cac4bf6ffd09f72b39e6292ad6394ef17Matthew Wilcox	result = queue_request_irq(dev, nvmeq, "nvme");
593b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (result < 0)
594b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		goto release_sq;
595b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
596b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return nvmeq;
597b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
598b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox release_sq:
599b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	adapter_delete_sq(dev, qid);
600b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox release_cq:
601b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	adapter_delete_cq(dev, qid);
602b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox free_nvmeq:
603b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
604b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox				(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
605b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
606b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox					nvmeq->sq_cmds, nvmeq->sq_dma_addr);
607b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	kfree(nvmeq);
608b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return NULL;
609b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
610b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
611b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
612b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
613b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	int result;
614b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	u32 aqa;
615b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct nvme_queue *nvmeq;
616b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
617b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	dev->dbs = ((void __iomem *)dev->bar) + 4096;
618b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
619b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
620b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
621b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	aqa = nvmeq->q_depth - 1;
622b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	aqa |= aqa << 16;
623b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
624b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM;
625b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
626b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
627b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
628b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	writel(aqa, &dev->bar->aqa);
629b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
630b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
631b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	writel(dev->ctrl_config, &dev->bar->cc);
632b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
633b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
634b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		msleep(100);
635b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		if (fatal_signal_pending(current))
636b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox			return -EINTR;
637b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	}
638b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
6393001082cac4bf6ffd09f72b39e6292ad6394ef17Matthew Wilcox	result = queue_request_irq(dev, nvmeq, "nvme admin");
640b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	dev->queues[0] = nvmeq;
641b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return result;
642b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
643b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
644b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic int nvme_identify(struct nvme_ns *ns, void __user *addr, int cns)
645b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
646b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct nvme_dev *dev = ns->dev;
647b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	int status;
648b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct nvme_command c;
649b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	void *page;
650b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	dma_addr_t dma_addr;
651b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
652b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	page = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr,
653b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox								GFP_KERNEL);
654b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
655b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	memset(&c, 0, sizeof(c));
656b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	c.identify.opcode = nvme_admin_identify;
657b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	c.identify.nsid = cns ? 0 : cpu_to_le32(ns->ns_id);
658b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	c.identify.prp1 = cpu_to_le64(dma_addr);
659b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	c.identify.cns = cpu_to_le32(cns);
660b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
661b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	status = nvme_submit_admin_cmd(dev, &c, NULL);
662b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
663b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (status)
664b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		status = -EIO;
665b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	else if (copy_to_user(addr, page, 4096))
666b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		status = -EFAULT;
667b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
668b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	dma_free_coherent(&dev->pci_dev->dev, 4096, page, dma_addr);
669b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
670b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return status;
671b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
672b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
673b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic int nvme_get_range_type(struct nvme_ns *ns, void __user *addr)
674b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
675b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct nvme_dev *dev = ns->dev;
676b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	int status;
677b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct nvme_command c;
678b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	void *page;
679b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	dma_addr_t dma_addr;
680b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
681b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	page = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr,
682b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox								GFP_KERNEL);
683b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
684b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	memset(&c, 0, sizeof(c));
685b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	c.features.opcode = nvme_admin_get_features;
686b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	c.features.nsid = cpu_to_le32(ns->ns_id);
687b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	c.features.prp1 = cpu_to_le64(dma_addr);
688b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	c.features.fid = cpu_to_le32(NVME_FEAT_LBA_RANGE);
689b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
690b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	status = nvme_submit_admin_cmd(dev, &c, NULL);
691b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
692b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	/* XXX: Assuming first range for now */
693b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (status)
694b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		status = -EIO;
695b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	else if (copy_to_user(addr, page, 64))
696b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		status = -EFAULT;
697b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
698b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	dma_free_coherent(&dev->pci_dev->dev, 4096, page, dma_addr);
699b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
700b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return status;
701b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
702b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
703b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
704b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox							unsigned long arg)
705b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
706b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct nvme_ns *ns = bdev->bd_disk->private_data;
707b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
708b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	switch (cmd) {
709b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	case NVME_IOCTL_IDENTIFY_NS:
710b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		return nvme_identify(ns, (void __user *)arg, 0);
711b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	case NVME_IOCTL_IDENTIFY_CTRL:
712b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		return nvme_identify(ns, (void __user *)arg, 1);
713b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	case NVME_IOCTL_GET_RANGE_TYPE:
714b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		return nvme_get_range_type(ns, (void __user *)arg);
715b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	default:
716b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		return -ENOTTY;
717b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	}
718b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
719b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
720b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic const struct block_device_operations nvme_fops = {
721b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	.owner		= THIS_MODULE,
722b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	.ioctl		= nvme_ioctl,
723b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox};
724b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
725b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int index,
726b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox			struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
727b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
728b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct nvme_ns *ns;
729b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct gendisk *disk;
730b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	int lbaf;
731b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
732b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
733b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		return NULL;
734b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
735b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	ns = kzalloc(sizeof(*ns), GFP_KERNEL);
736b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (!ns)
737b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		return NULL;
738b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	ns->queue = blk_alloc_queue(GFP_KERNEL);
739b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (!ns->queue)
740b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		goto out_free_ns;
741b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	ns->queue->queue_flags = QUEUE_FLAG_DEFAULT | QUEUE_FLAG_NOMERGES |
742b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox				QUEUE_FLAG_NONROT | QUEUE_FLAG_DISCARD;
743b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	blk_queue_make_request(ns->queue, nvme_make_request);
744b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	ns->dev = dev;
745b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	ns->queue->queuedata = ns;
746b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
747b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	disk = alloc_disk(NVME_MINORS);
748b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (!disk)
749b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		goto out_free_queue;
750b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	ns->ns_id = index;
751b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	ns->disk = disk;
752b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	lbaf = id->flbas & 0xf;
753b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	ns->lba_shift = id->lbaf[lbaf].ds;
754b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
755b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	disk->major = nvme_major;
756b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	disk->minors = NVME_MINORS;
757b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	disk->first_minor = NVME_MINORS * index;
758b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	disk->fops = &nvme_fops;
759b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	disk->private_data = ns;
760b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	disk->queue = ns->queue;
761b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	sprintf(disk->disk_name, "nvme%dn%d", dev->instance, index);
762b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
763b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
764b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return ns;
765b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
766b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox out_free_queue:
767b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	blk_cleanup_queue(ns->queue);
768b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox out_free_ns:
769b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	kfree(ns);
770b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return NULL;
771b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
772b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
773b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic void nvme_ns_free(struct nvme_ns *ns)
774b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
775b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	put_disk(ns->disk);
776b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	blk_cleanup_queue(ns->queue);
777b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	kfree(ns);
778b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
779b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
780b3b06812e199f248561ce7824a4a8a9cd573c05aMatthew Wilcoxstatic int set_queue_count(struct nvme_dev *dev, int count)
781b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
782b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	int status;
783b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	u32 result;
784b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct nvme_command c;
785b3b06812e199f248561ce7824a4a8a9cd573c05aMatthew Wilcox	u32 q_count = (count - 1) | ((count - 1) << 16);
786b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
787b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	memset(&c, 0, sizeof(c));
788b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	c.features.opcode = nvme_admin_get_features;
789b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	c.features.fid = cpu_to_le32(NVME_FEAT_NUM_QUEUES);
790b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	c.features.dword11 = cpu_to_le32(q_count);
791b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
792b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	status = nvme_submit_admin_cmd(dev, &c, &result);
793b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (status)
794b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		return -EIO;
795b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return min(result & 0xffff, result >> 16) + 1;
796b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
797b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
798b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox/* XXX: Create per-CPU queues */
799b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
800b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
801b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	int this_cpu;
802b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
803b3b06812e199f248561ce7824a4a8a9cd573c05aMatthew Wilcox	set_queue_count(dev, 1);
804b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
805b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	this_cpu = get_cpu();
806b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	dev->queues[1] = nvme_create_queue(dev, 1, NVME_Q_DEPTH, this_cpu);
807b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	put_cpu();
808b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (!dev->queues[1])
809b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		return -ENOMEM;
810b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	dev->queue_count++;
811b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
812b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return 0;
813b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
814b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
815b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic void nvme_free_queues(struct nvme_dev *dev)
816b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
817b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	int i;
818b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
819b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	for (i = dev->queue_count - 1; i >= 0; i--)
820b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		nvme_free_queue(dev, i);
821b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
822b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
823b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic int __devinit nvme_dev_add(struct nvme_dev *dev)
824b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
825b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	int res, nn, i;
826b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct nvme_ns *ns, *next;
827b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	void *id;
828b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	dma_addr_t dma_addr;
829b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct nvme_command cid, crt;
830b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
831b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	res = nvme_setup_io_queues(dev);
832b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (res)
833b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		return res;
834b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
835b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	/* XXX: Switch to a SG list once prp2 works */
836b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	id = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
837b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox								GFP_KERNEL);
838b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
839b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	memset(&cid, 0, sizeof(cid));
840b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	cid.identify.opcode = nvme_admin_identify;
841b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	cid.identify.nsid = 0;
842b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	cid.identify.prp1 = cpu_to_le64(dma_addr);
843b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	cid.identify.cns = cpu_to_le32(1);
844b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
845b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	res = nvme_submit_admin_cmd(dev, &cid, NULL);
846b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (res) {
847b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		res = -EIO;
848b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		goto out_free;
849b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	}
850b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
851b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	nn = le32_to_cpup(&((struct nvme_id_ctrl *)id)->nn);
852b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
853b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	cid.identify.cns = 0;
854b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	memset(&crt, 0, sizeof(crt));
855b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	crt.features.opcode = nvme_admin_get_features;
856b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	crt.features.prp1 = cpu_to_le64(dma_addr + 4096);
857b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	crt.features.fid = cpu_to_le32(NVME_FEAT_LBA_RANGE);
858b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
859b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	for (i = 0; i < nn; i++) {
860b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		cid.identify.nsid = cpu_to_le32(i);
861b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		res = nvme_submit_admin_cmd(dev, &cid, NULL);
862b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		if (res)
863b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox			continue;
864b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
865b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		if (((struct nvme_id_ns *)id)->ncap == 0)
866b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox			continue;
867b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
868b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		crt.features.nsid = cpu_to_le32(i);
869b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		res = nvme_submit_admin_cmd(dev, &crt, NULL);
870b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		if (res)
871b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox			continue;
872b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
873b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		ns = nvme_alloc_ns(dev, i, id, id + 4096);
874b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		if (ns)
875b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox			list_add_tail(&ns->list, &dev->namespaces);
876b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	}
877b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	list_for_each_entry(ns, &dev->namespaces, list)
878b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		add_disk(ns->disk);
879b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
880b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr);
881b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return 0;
882b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
883b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox out_free:
884b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
885b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		list_del(&ns->list);
886b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		nvme_ns_free(ns);
887b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	}
888b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
889b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr);
890b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return res;
891b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
892b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
893b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic int nvme_dev_remove(struct nvme_dev *dev)
894b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
895b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct nvme_ns *ns, *next;
896b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
897b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	/* TODO: wait all I/O finished or cancel them */
898b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
899b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
900b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		list_del(&ns->list);
901b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		del_gendisk(ns->disk);
902b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		nvme_ns_free(ns);
903b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	}
904b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
905b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	nvme_free_queues(dev);
906b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
907b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return 0;
908b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
909b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
910b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox/* XXX: Use an ida or something to let remove / add work correctly */
911b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic void nvme_set_instance(struct nvme_dev *dev)
912b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
913b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	static int instance;
914b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	dev->instance = instance++;
915b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
916b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
917b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic void nvme_release_instance(struct nvme_dev *dev)
918b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
919b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
920b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
921b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic int __devinit nvme_probe(struct pci_dev *pdev,
922b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox						const struct pci_device_id *id)
923b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
924b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	int result = -ENOMEM;
925b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct nvme_dev *dev;
926b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
927b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
928b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (!dev)
929b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		return -ENOMEM;
930b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry),
931b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox								GFP_KERNEL);
932b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (!dev->entry)
933b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		goto free;
934b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	dev->queues = kcalloc(2, sizeof(void *), GFP_KERNEL);
935b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (!dev->queues)
936b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		goto free;
937b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
938b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	INIT_LIST_HEAD(&dev->namespaces);
939b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	dev->pci_dev = pdev;
940b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	pci_set_drvdata(pdev, dev);
941b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	dma_set_mask(&dev->pci_dev->dev, DMA_BIT_MASK(64));
942b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	nvme_set_instance(dev);
943b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
944b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
945b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (!dev->bar) {
946b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		result = -ENOMEM;
947b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		goto disable;
948b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	}
949b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
950b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	result = nvme_configure_admin_queue(dev);
951b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (result)
952b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		goto unmap;
953b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	dev->queue_count++;
954b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
955b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	result = nvme_dev_add(dev);
956b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (result)
957b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		goto delete;
958b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return 0;
959b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
960b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox delete:
961b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	nvme_free_queues(dev);
962b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox unmap:
963b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	iounmap(dev->bar);
964b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox disable:
965b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	pci_disable_msix(pdev);
966b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	nvme_release_instance(dev);
967b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox free:
968b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	kfree(dev->queues);
969b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	kfree(dev->entry);
970b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	kfree(dev);
971b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return result;
972b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
973b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
974b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic void __devexit nvme_remove(struct pci_dev *pdev)
975b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
976b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	struct nvme_dev *dev = pci_get_drvdata(pdev);
977b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	nvme_dev_remove(dev);
978b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	pci_disable_msix(pdev);
979b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	iounmap(dev->bar);
980b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	nvme_release_instance(dev);
981b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	kfree(dev->queues);
982b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	kfree(dev->entry);
983b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	kfree(dev);
984b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
985b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
986b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox/* These functions are yet to be implemented */
987b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox#define nvme_error_detected NULL
988b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox#define nvme_dump_registers NULL
989b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox#define nvme_link_reset NULL
990b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox#define nvme_slot_reset NULL
991b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox#define nvme_error_resume NULL
992b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox#define nvme_suspend NULL
993b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox#define nvme_resume NULL
994b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
995b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic struct pci_error_handlers nvme_err_handler = {
996b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	.error_detected	= nvme_error_detected,
997b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	.mmio_enabled	= nvme_dump_registers,
998b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	.link_reset	= nvme_link_reset,
999b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	.slot_reset	= nvme_slot_reset,
1000b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	.resume		= nvme_error_resume,
1001b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox};
1002b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
1003b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox/* Move to pci_ids.h later */
1004b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox#define PCI_CLASS_STORAGE_EXPRESS	0x010802
1005b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
1006b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = {
1007b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
1008b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	{ 0, }
1009b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox};
1010b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew WilcoxMODULE_DEVICE_TABLE(pci, nvme_id_table);
1011b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
1012b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic struct pci_driver nvme_driver = {
1013b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	.name		= "nvme",
1014b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	.id_table	= nvme_id_table,
1015b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	.probe		= nvme_probe,
1016b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	.remove		= __devexit_p(nvme_remove),
1017b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	.suspend	= nvme_suspend,
1018b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	.resume		= nvme_resume,
1019b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	.err_handler	= &nvme_err_handler,
1020b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox};
1021b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
1022b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic int __init nvme_init(void)
1023b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
1024b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	int result;
1025b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
1026b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	nvme_major = register_blkdev(nvme_major, "nvme");
1027b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (nvme_major <= 0)
1028b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		return -EBUSY;
1029b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
1030b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	result = pci_register_driver(&nvme_driver);
1031b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	if (!result)
1032b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox		return 0;
1033b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
1034b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	unregister_blkdev(nvme_major, "nvme");
1035b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	return result;
1036b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
1037b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
1038b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxstatic void __exit nvme_exit(void)
1039b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox{
1040b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	pci_unregister_driver(&nvme_driver);
1041b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox	unregister_blkdev(nvme_major, "nvme");
1042b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox}
1043b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcox
1044b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew WilcoxMODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
1045b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew WilcoxMODULE_LICENSE("GPL");
1046b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew WilcoxMODULE_VERSION("0.1");
1047b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxmodule_init(nvme_init);
1048b60503ba432b16fc84442a84e29a7aad2c0c363dMatthew Wilcoxmodule_exit(nvme_exit);
1049