aoeblk.c revision 83d5cde47dedf01b6a4a4331882cbc0a7eea3c2e
1/* Copyright (c) 2007 Coraid, Inc.  See COPYING for GPL terms. */
2/*
3 * aoeblk.c
4 * block device routines
5 */
6
7#include <linux/hdreg.h>
8#include <linux/blkdev.h>
9#include <linux/backing-dev.h>
10#include <linux/fs.h>
11#include <linux/ioctl.h>
12#include <linux/genhd.h>
13#include <linux/netdevice.h>
14#include "aoe.h"
15
16static struct kmem_cache *buf_pool_cache;
17
18static ssize_t aoedisk_show_state(struct device *dev,
19				  struct device_attribute *attr, char *page)
20{
21	struct gendisk *disk = dev_to_disk(dev);
22	struct aoedev *d = disk->private_data;
23
24	return snprintf(page, PAGE_SIZE,
25			"%s%s\n",
26			(d->flags & DEVFL_UP) ? "up" : "down",
27			(d->flags & DEVFL_KICKME) ? ",kickme" :
28			(d->nopen && !(d->flags & DEVFL_UP)) ? ",closewait" : "");
29	/* I'd rather see nopen exported so we can ditch closewait */
30}
31static ssize_t aoedisk_show_mac(struct device *dev,
32				struct device_attribute *attr, char *page)
33{
34	struct gendisk *disk = dev_to_disk(dev);
35	struct aoedev *d = disk->private_data;
36	struct aoetgt *t = d->targets[0];
37
38	if (t == NULL)
39		return snprintf(page, PAGE_SIZE, "none\n");
40	return snprintf(page, PAGE_SIZE, "%pm\n", t->addr);
41}
42static ssize_t aoedisk_show_netif(struct device *dev,
43				  struct device_attribute *attr, char *page)
44{
45	struct gendisk *disk = dev_to_disk(dev);
46	struct aoedev *d = disk->private_data;
47	struct net_device *nds[8], **nd, **nnd, **ne;
48	struct aoetgt **t, **te;
49	struct aoeif *ifp, *e;
50	char *p;
51
52	memset(nds, 0, sizeof nds);
53	nd = nds;
54	ne = nd + ARRAY_SIZE(nds);
55	t = d->targets;
56	te = t + NTARGETS;
57	for (; t < te && *t; t++) {
58		ifp = (*t)->ifs;
59		e = ifp + NAOEIFS;
60		for (; ifp < e && ifp->nd; ifp++) {
61			for (nnd = nds; nnd < nd; nnd++)
62				if (*nnd == ifp->nd)
63					break;
64			if (nnd == nd && nd != ne)
65				*nd++ = ifp->nd;
66		}
67	}
68
69	ne = nd;
70	nd = nds;
71	if (*nd == NULL)
72		return snprintf(page, PAGE_SIZE, "none\n");
73	for (p = page; nd < ne; nd++)
74		p += snprintf(p, PAGE_SIZE - (p-page), "%s%s",
75			p == page ? "" : ",", (*nd)->name);
76	p += snprintf(p, PAGE_SIZE - (p-page), "\n");
77	return p-page;
78}
79/* firmware version */
80static ssize_t aoedisk_show_fwver(struct device *dev,
81				  struct device_attribute *attr, char *page)
82{
83	struct gendisk *disk = dev_to_disk(dev);
84	struct aoedev *d = disk->private_data;
85
86	return snprintf(page, PAGE_SIZE, "0x%04x\n", (unsigned int) d->fw_ver);
87}
88
89static DEVICE_ATTR(state, S_IRUGO, aoedisk_show_state, NULL);
90static DEVICE_ATTR(mac, S_IRUGO, aoedisk_show_mac, NULL);
91static DEVICE_ATTR(netif, S_IRUGO, aoedisk_show_netif, NULL);
92static struct device_attribute dev_attr_firmware_version = {
93	.attr = { .name = "firmware-version", .mode = S_IRUGO },
94	.show = aoedisk_show_fwver,
95};
96
97static struct attribute *aoe_attrs[] = {
98	&dev_attr_state.attr,
99	&dev_attr_mac.attr,
100	&dev_attr_netif.attr,
101	&dev_attr_firmware_version.attr,
102	NULL,
103};
104
105static const struct attribute_group attr_group = {
106	.attrs = aoe_attrs,
107};
108
109static int
110aoedisk_add_sysfs(struct aoedev *d)
111{
112	return sysfs_create_group(&disk_to_dev(d->gd)->kobj, &attr_group);
113}
114void
115aoedisk_rm_sysfs(struct aoedev *d)
116{
117	sysfs_remove_group(&disk_to_dev(d->gd)->kobj, &attr_group);
118}
119
120static int
121aoeblk_open(struct block_device *bdev, fmode_t mode)
122{
123	struct aoedev *d = bdev->bd_disk->private_data;
124	ulong flags;
125
126	spin_lock_irqsave(&d->lock, flags);
127	if (d->flags & DEVFL_UP) {
128		d->nopen++;
129		spin_unlock_irqrestore(&d->lock, flags);
130		return 0;
131	}
132	spin_unlock_irqrestore(&d->lock, flags);
133	return -ENODEV;
134}
135
136static int
137aoeblk_release(struct gendisk *disk, fmode_t mode)
138{
139	struct aoedev *d = disk->private_data;
140	ulong flags;
141
142	spin_lock_irqsave(&d->lock, flags);
143
144	if (--d->nopen == 0) {
145		spin_unlock_irqrestore(&d->lock, flags);
146		aoecmd_cfg(d->aoemajor, d->aoeminor);
147		return 0;
148	}
149	spin_unlock_irqrestore(&d->lock, flags);
150
151	return 0;
152}
153
154static int
155aoeblk_make_request(struct request_queue *q, struct bio *bio)
156{
157	struct sk_buff_head queue;
158	struct aoedev *d;
159	struct buf *buf;
160	ulong flags;
161
162	blk_queue_bounce(q, &bio);
163
164	if (bio == NULL) {
165		printk(KERN_ERR "aoe: bio is NULL\n");
166		BUG();
167		return 0;
168	}
169	d = bio->bi_bdev->bd_disk->private_data;
170	if (d == NULL) {
171		printk(KERN_ERR "aoe: bd_disk->private_data is NULL\n");
172		BUG();
173		bio_endio(bio, -ENXIO);
174		return 0;
175	} else if (bio_rw_flagged(bio, BIO_RW_BARRIER)) {
176		bio_endio(bio, -EOPNOTSUPP);
177		return 0;
178	} else if (bio->bi_io_vec == NULL) {
179		printk(KERN_ERR "aoe: bi_io_vec is NULL\n");
180		BUG();
181		bio_endio(bio, -ENXIO);
182		return 0;
183	}
184	buf = mempool_alloc(d->bufpool, GFP_NOIO);
185	if (buf == NULL) {
186		printk(KERN_INFO "aoe: buf allocation failure\n");
187		bio_endio(bio, -ENOMEM);
188		return 0;
189	}
190	memset(buf, 0, sizeof(*buf));
191	INIT_LIST_HEAD(&buf->bufs);
192	buf->stime = jiffies;
193	buf->bio = bio;
194	buf->resid = bio->bi_size;
195	buf->sector = bio->bi_sector;
196	buf->bv = &bio->bi_io_vec[bio->bi_idx];
197	buf->bv_resid = buf->bv->bv_len;
198	WARN_ON(buf->bv_resid == 0);
199	buf->bv_off = buf->bv->bv_offset;
200
201	spin_lock_irqsave(&d->lock, flags);
202
203	if ((d->flags & DEVFL_UP) == 0) {
204		printk(KERN_INFO "aoe: device %ld.%d is not up\n",
205			d->aoemajor, d->aoeminor);
206		spin_unlock_irqrestore(&d->lock, flags);
207		mempool_free(buf, d->bufpool);
208		bio_endio(bio, -ENXIO);
209		return 0;
210	}
211
212	list_add_tail(&buf->bufs, &d->bufq);
213
214	aoecmd_work(d);
215	__skb_queue_head_init(&queue);
216	skb_queue_splice_init(&d->sendq, &queue);
217
218	spin_unlock_irqrestore(&d->lock, flags);
219	aoenet_xmit(&queue);
220
221	return 0;
222}
223
224static int
225aoeblk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
226{
227	struct aoedev *d = bdev->bd_disk->private_data;
228
229	if ((d->flags & DEVFL_UP) == 0) {
230		printk(KERN_ERR "aoe: disk not up\n");
231		return -ENODEV;
232	}
233
234	geo->cylinders = d->geo.cylinders;
235	geo->heads = d->geo.heads;
236	geo->sectors = d->geo.sectors;
237	return 0;
238}
239
240static const struct block_device_operations aoe_bdops = {
241	.open = aoeblk_open,
242	.release = aoeblk_release,
243	.getgeo = aoeblk_getgeo,
244	.owner = THIS_MODULE,
245};
246
247/* alloc_disk and add_disk can sleep */
248void
249aoeblk_gdalloc(void *vp)
250{
251	struct aoedev *d = vp;
252	struct gendisk *gd;
253	ulong flags;
254
255	gd = alloc_disk(AOE_PARTITIONS);
256	if (gd == NULL) {
257		printk(KERN_ERR
258			"aoe: cannot allocate disk structure for %ld.%d\n",
259			d->aoemajor, d->aoeminor);
260		goto err;
261	}
262
263	d->bufpool = mempool_create_slab_pool(MIN_BUFS, buf_pool_cache);
264	if (d->bufpool == NULL) {
265		printk(KERN_ERR "aoe: cannot allocate bufpool for %ld.%d\n",
266			d->aoemajor, d->aoeminor);
267		goto err_disk;
268	}
269
270	d->blkq = blk_alloc_queue(GFP_KERNEL);
271	if (!d->blkq)
272		goto err_mempool;
273	blk_queue_make_request(d->blkq, aoeblk_make_request);
274	d->blkq->backing_dev_info.name = "aoe";
275	if (bdi_init(&d->blkq->backing_dev_info))
276		goto err_blkq;
277	spin_lock_irqsave(&d->lock, flags);
278	gd->major = AOE_MAJOR;
279	gd->first_minor = d->sysminor * AOE_PARTITIONS;
280	gd->fops = &aoe_bdops;
281	gd->private_data = d;
282	set_capacity(gd, d->ssize);
283	snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d",
284		d->aoemajor, d->aoeminor);
285
286	gd->queue = d->blkq;
287	d->gd = gd;
288	d->flags &= ~DEVFL_GDALLOC;
289	d->flags |= DEVFL_UP;
290
291	spin_unlock_irqrestore(&d->lock, flags);
292
293	add_disk(gd);
294	aoedisk_add_sysfs(d);
295	return;
296
297err_blkq:
298	blk_cleanup_queue(d->blkq);
299	d->blkq = NULL;
300err_mempool:
301	mempool_destroy(d->bufpool);
302err_disk:
303	put_disk(gd);
304err:
305	spin_lock_irqsave(&d->lock, flags);
306	d->flags &= ~DEVFL_GDALLOC;
307	spin_unlock_irqrestore(&d->lock, flags);
308}
309
310void
311aoeblk_exit(void)
312{
313	kmem_cache_destroy(buf_pool_cache);
314}
315
316int __init
317aoeblk_init(void)
318{
319	buf_pool_cache = kmem_cache_create("aoe_bufs",
320					   sizeof(struct buf),
321					   0, 0, NULL);
322	if (buf_pool_cache == NULL)
323		return -ENOMEM;
324
325	return 0;
326}
327
328