1/*  Xenbus code for blkif backend
2    Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
3    Copyright (C) 2005 XenSource Ltd
4
5    This program is free software; you can redistribute it and/or modify
6    it under the terms of the GNU General Public License as published by
7    the Free Software Foundation; either version 2 of the License, or
8    (at your option) any later version.
9
10    This program is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13    GNU General Public License for more details.
14
15*/
16
17#include <stdarg.h>
18#include <linux/module.h>
19#include <linux/kthread.h>
20#include <xen/events.h>
21#include <xen/grant_table.h>
22#include "common.h"
23
24struct backend_info {
25	struct xenbus_device	*dev;
26	struct xen_blkif	*blkif;
27	struct xenbus_watch	backend_watch;
28	unsigned		major;
29	unsigned		minor;
30	char			*mode;
31};
32
33static struct kmem_cache *xen_blkif_cachep;
34static void connect(struct backend_info *);
35static int connect_ring(struct backend_info *);
36static void backend_changed(struct xenbus_watch *, const char **,
37			    unsigned int);
38
39struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be)
40{
41	return be->dev;
42}
43
44static int blkback_name(struct xen_blkif *blkif, char *buf)
45{
46	char *devpath, *devname;
47	struct xenbus_device *dev = blkif->be->dev;
48
49	devpath = xenbus_read(XBT_NIL, dev->nodename, "dev", NULL);
50	if (IS_ERR(devpath))
51		return PTR_ERR(devpath);
52
53	devname = strstr(devpath, "/dev/");
54	if (devname != NULL)
55		devname += strlen("/dev/");
56	else
57		devname  = devpath;
58
59	snprintf(buf, TASK_COMM_LEN, "blkback.%d.%s", blkif->domid, devname);
60	kfree(devpath);
61
62	return 0;
63}
64
65static void xen_update_blkif_status(struct xen_blkif *blkif)
66{
67	int err;
68	char name[TASK_COMM_LEN];
69
70	/* Not ready to connect? */
71	if (!blkif->irq || !blkif->vbd.bdev)
72		return;
73
74	/* Already connected? */
75	if (blkif->be->dev->state == XenbusStateConnected)
76		return;
77
78	/* Attempt to connect: exit if we fail to. */
79	connect(blkif->be);
80	if (blkif->be->dev->state != XenbusStateConnected)
81		return;
82
83	err = blkback_name(blkif, name);
84	if (err) {
85		xenbus_dev_error(blkif->be->dev, err, "get blkback dev name");
86		return;
87	}
88
89	err = filemap_write_and_wait(blkif->vbd.bdev->bd_inode->i_mapping);
90	if (err) {
91		xenbus_dev_error(blkif->be->dev, err, "block flush");
92		return;
93	}
94	invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping);
95
96	blkif->xenblkd = kthread_run(xen_blkif_schedule, blkif, name);
97	if (IS_ERR(blkif->xenblkd)) {
98		err = PTR_ERR(blkif->xenblkd);
99		blkif->xenblkd = NULL;
100		xenbus_dev_error(blkif->be->dev, err, "start xenblkd");
101	}
102}
103
104static struct xen_blkif *xen_blkif_alloc(domid_t domid)
105{
106	struct xen_blkif *blkif;
107
108	blkif = kmem_cache_alloc(xen_blkif_cachep, GFP_KERNEL);
109	if (!blkif)
110		return ERR_PTR(-ENOMEM);
111
112	memset(blkif, 0, sizeof(*blkif));
113	blkif->domid = domid;
114	spin_lock_init(&blkif->blk_ring_lock);
115	atomic_set(&blkif->refcnt, 1);
116	init_waitqueue_head(&blkif->wq);
117	init_completion(&blkif->drain_complete);
118	atomic_set(&blkif->drain, 0);
119	blkif->st_print = jiffies;
120	init_waitqueue_head(&blkif->waiting_to_free);
121
122	return blkif;
123}
124
125static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page,
126			 unsigned int evtchn)
127{
128	int err;
129
130	/* Already connected through? */
131	if (blkif->irq)
132		return 0;
133
134	err = xenbus_map_ring_valloc(blkif->be->dev, shared_page, &blkif->blk_ring);
135	if (err < 0)
136		return err;
137
138	switch (blkif->blk_protocol) {
139	case BLKIF_PROTOCOL_NATIVE:
140	{
141		struct blkif_sring *sring;
142		sring = (struct blkif_sring *)blkif->blk_ring;
143		BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
144		break;
145	}
146	case BLKIF_PROTOCOL_X86_32:
147	{
148		struct blkif_x86_32_sring *sring_x86_32;
149		sring_x86_32 = (struct blkif_x86_32_sring *)blkif->blk_ring;
150		BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
151		break;
152	}
153	case BLKIF_PROTOCOL_X86_64:
154	{
155		struct blkif_x86_64_sring *sring_x86_64;
156		sring_x86_64 = (struct blkif_x86_64_sring *)blkif->blk_ring;
157		BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
158		break;
159	}
160	default:
161		BUG();
162	}
163
164	err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn,
165						    xen_blkif_be_int, 0,
166						    "blkif-backend", blkif);
167	if (err < 0) {
168		xenbus_unmap_ring_vfree(blkif->be->dev, blkif->blk_ring);
169		blkif->blk_rings.common.sring = NULL;
170		return err;
171	}
172	blkif->irq = err;
173
174	return 0;
175}
176
177static void xen_blkif_disconnect(struct xen_blkif *blkif)
178{
179	if (blkif->xenblkd) {
180		kthread_stop(blkif->xenblkd);
181		blkif->xenblkd = NULL;
182	}
183
184	atomic_dec(&blkif->refcnt);
185	wait_event(blkif->waiting_to_free, atomic_read(&blkif->refcnt) == 0);
186	atomic_inc(&blkif->refcnt);
187
188	if (blkif->irq) {
189		unbind_from_irqhandler(blkif->irq, blkif);
190		blkif->irq = 0;
191	}
192
193	if (blkif->blk_rings.common.sring) {
194		xenbus_unmap_ring_vfree(blkif->be->dev, blkif->blk_ring);
195		blkif->blk_rings.common.sring = NULL;
196	}
197}
198
199void xen_blkif_free(struct xen_blkif *blkif)
200{
201	if (!atomic_dec_and_test(&blkif->refcnt))
202		BUG();
203	kmem_cache_free(xen_blkif_cachep, blkif);
204}
205
206int __init xen_blkif_interface_init(void)
207{
208	xen_blkif_cachep = kmem_cache_create("blkif_cache",
209					     sizeof(struct xen_blkif),
210					     0, 0, NULL);
211	if (!xen_blkif_cachep)
212		return -ENOMEM;
213
214	return 0;
215}
216
217/*
218 *  sysfs interface for VBD I/O requests
219 */
220
221#define VBD_SHOW(name, format, args...)					\
222	static ssize_t show_##name(struct device *_dev,			\
223				   struct device_attribute *attr,	\
224				   char *buf)				\
225	{								\
226		struct xenbus_device *dev = to_xenbus_device(_dev);	\
227		struct backend_info *be = dev_get_drvdata(&dev->dev);	\
228									\
229		return sprintf(buf, format, ##args);			\
230	}								\
231	static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
232
233VBD_SHOW(oo_req,  "%d\n", be->blkif->st_oo_req);
234VBD_SHOW(rd_req,  "%d\n", be->blkif->st_rd_req);
235VBD_SHOW(wr_req,  "%d\n", be->blkif->st_wr_req);
236VBD_SHOW(f_req,  "%d\n", be->blkif->st_f_req);
237VBD_SHOW(ds_req,  "%d\n", be->blkif->st_ds_req);
238VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect);
239VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect);
240
241static struct attribute *xen_vbdstat_attrs[] = {
242	&dev_attr_oo_req.attr,
243	&dev_attr_rd_req.attr,
244	&dev_attr_wr_req.attr,
245	&dev_attr_f_req.attr,
246	&dev_attr_ds_req.attr,
247	&dev_attr_rd_sect.attr,
248	&dev_attr_wr_sect.attr,
249	NULL
250};
251
252static struct attribute_group xen_vbdstat_group = {
253	.name = "statistics",
254	.attrs = xen_vbdstat_attrs,
255};
256
257VBD_SHOW(physical_device, "%x:%x\n", be->major, be->minor);
258VBD_SHOW(mode, "%s\n", be->mode);
259
260int xenvbd_sysfs_addif(struct xenbus_device *dev)
261{
262	int error;
263
264	error = device_create_file(&dev->dev, &dev_attr_physical_device);
265	if (error)
266		goto fail1;
267
268	error = device_create_file(&dev->dev, &dev_attr_mode);
269	if (error)
270		goto fail2;
271
272	error = sysfs_create_group(&dev->dev.kobj, &xen_vbdstat_group);
273	if (error)
274		goto fail3;
275
276	return 0;
277
278fail3:	sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
279fail2:	device_remove_file(&dev->dev, &dev_attr_mode);
280fail1:	device_remove_file(&dev->dev, &dev_attr_physical_device);
281	return error;
282}
283
284void xenvbd_sysfs_delif(struct xenbus_device *dev)
285{
286	sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
287	device_remove_file(&dev->dev, &dev_attr_mode);
288	device_remove_file(&dev->dev, &dev_attr_physical_device);
289}
290
291
292static void xen_vbd_free(struct xen_vbd *vbd)
293{
294	if (vbd->bdev)
295		blkdev_put(vbd->bdev, vbd->readonly ? FMODE_READ : FMODE_WRITE);
296	vbd->bdev = NULL;
297}
298
299static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
300			  unsigned major, unsigned minor, int readonly,
301			  int cdrom)
302{
303	struct xen_vbd *vbd;
304	struct block_device *bdev;
305	struct request_queue *q;
306
307	vbd = &blkif->vbd;
308	vbd->handle   = handle;
309	vbd->readonly = readonly;
310	vbd->type     = 0;
311
312	vbd->pdevice  = MKDEV(major, minor);
313
314	bdev = blkdev_get_by_dev(vbd->pdevice, vbd->readonly ?
315				 FMODE_READ : FMODE_WRITE, NULL);
316
317	if (IS_ERR(bdev)) {
318		DPRINTK("xen_vbd_create: device %08x could not be opened.\n",
319			vbd->pdevice);
320		return -ENOENT;
321	}
322
323	vbd->bdev = bdev;
324	if (vbd->bdev->bd_disk == NULL) {
325		DPRINTK("xen_vbd_create: device %08x doesn't exist.\n",
326			vbd->pdevice);
327		xen_vbd_free(vbd);
328		return -ENOENT;
329	}
330	vbd->size = vbd_sz(vbd);
331
332	if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom)
333		vbd->type |= VDISK_CDROM;
334	if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
335		vbd->type |= VDISK_REMOVABLE;
336
337	q = bdev_get_queue(bdev);
338	if (q && q->flush_flags)
339		vbd->flush_support = true;
340
341	if (q && blk_queue_secdiscard(q))
342		vbd->discard_secure = true;
343
344	DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
345		handle, blkif->domid);
346	return 0;
347}
348static int xen_blkbk_remove(struct xenbus_device *dev)
349{
350	struct backend_info *be = dev_get_drvdata(&dev->dev);
351
352	DPRINTK("");
353
354	if (be->major || be->minor)
355		xenvbd_sysfs_delif(dev);
356
357	if (be->backend_watch.node) {
358		unregister_xenbus_watch(&be->backend_watch);
359		kfree(be->backend_watch.node);
360		be->backend_watch.node = NULL;
361	}
362
363	if (be->blkif) {
364		xen_blkif_disconnect(be->blkif);
365		xen_vbd_free(&be->blkif->vbd);
366		xen_blkif_free(be->blkif);
367		be->blkif = NULL;
368	}
369
370	kfree(be);
371	dev_set_drvdata(&dev->dev, NULL);
372	return 0;
373}
374
375int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
376			      struct backend_info *be, int state)
377{
378	struct xenbus_device *dev = be->dev;
379	int err;
380
381	err = xenbus_printf(xbt, dev->nodename, "feature-flush-cache",
382			    "%d", state);
383	if (err)
384		dev_warn(&dev->dev, "writing feature-flush-cache (%d)", err);
385
386	return err;
387}
388
389static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be)
390{
391	struct xenbus_device *dev = be->dev;
392	struct xen_blkif *blkif = be->blkif;
393	int err;
394	int state = 0;
395	struct block_device *bdev = be->blkif->vbd.bdev;
396	struct request_queue *q = bdev_get_queue(bdev);
397
398	if (blk_queue_discard(q)) {
399		err = xenbus_printf(xbt, dev->nodename,
400			"discard-granularity", "%u",
401			q->limits.discard_granularity);
402		if (err) {
403			dev_warn(&dev->dev, "writing discard-granularity (%d)", err);
404			return;
405		}
406		err = xenbus_printf(xbt, dev->nodename,
407			"discard-alignment", "%u",
408			q->limits.discard_alignment);
409		if (err) {
410			dev_warn(&dev->dev, "writing discard-alignment (%d)", err);
411			return;
412		}
413		state = 1;
414		/* Optional. */
415		err = xenbus_printf(xbt, dev->nodename,
416				    "discard-secure", "%d",
417				    blkif->vbd.discard_secure);
418		if (err) {
419			dev_warn(&dev->dev, "writing discard-secure (%d)", err);
420			return;
421		}
422	}
423	err = xenbus_printf(xbt, dev->nodename, "feature-discard",
424			    "%d", state);
425	if (err)
426		dev_warn(&dev->dev, "writing feature-discard (%d)", err);
427}
428int xen_blkbk_barrier(struct xenbus_transaction xbt,
429		      struct backend_info *be, int state)
430{
431	struct xenbus_device *dev = be->dev;
432	int err;
433
434	err = xenbus_printf(xbt, dev->nodename, "feature-barrier",
435			    "%d", state);
436	if (err)
437		dev_warn(&dev->dev, "writing feature-barrier (%d)", err);
438
439	return err;
440}
441
442/*
443 * Entry point to this code when a new device is created.  Allocate the basic
444 * structures, and watch the store waiting for the hotplug scripts to tell us
445 * the device's physical major and minor numbers.  Switch to InitWait.
446 */
447static int xen_blkbk_probe(struct xenbus_device *dev,
448			   const struct xenbus_device_id *id)
449{
450	int err;
451	struct backend_info *be = kzalloc(sizeof(struct backend_info),
452					  GFP_KERNEL);
453	if (!be) {
454		xenbus_dev_fatal(dev, -ENOMEM,
455				 "allocating backend structure");
456		return -ENOMEM;
457	}
458	be->dev = dev;
459	dev_set_drvdata(&dev->dev, be);
460
461	be->blkif = xen_blkif_alloc(dev->otherend_id);
462	if (IS_ERR(be->blkif)) {
463		err = PTR_ERR(be->blkif);
464		be->blkif = NULL;
465		xenbus_dev_fatal(dev, err, "creating block interface");
466		goto fail;
467	}
468
469	/* setup back pointer */
470	be->blkif->be = be;
471
472	err = xenbus_watch_pathfmt(dev, &be->backend_watch, backend_changed,
473				   "%s/%s", dev->nodename, "physical-device");
474	if (err)
475		goto fail;
476
477	err = xenbus_switch_state(dev, XenbusStateInitWait);
478	if (err)
479		goto fail;
480
481	return 0;
482
483fail:
484	DPRINTK("failed");
485	xen_blkbk_remove(dev);
486	return err;
487}
488
489
490/*
491 * Callback received when the hotplug scripts have placed the physical-device
492 * node.  Read it and the mode node, and create a vbd.  If the frontend is
493 * ready, connect.
494 */
495static void backend_changed(struct xenbus_watch *watch,
496			    const char **vec, unsigned int len)
497{
498	int err;
499	unsigned major;
500	unsigned minor;
501	struct backend_info *be
502		= container_of(watch, struct backend_info, backend_watch);
503	struct xenbus_device *dev = be->dev;
504	int cdrom = 0;
505	char *device_type;
506
507	DPRINTK("");
508
509	err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x",
510			   &major, &minor);
511	if (XENBUS_EXIST_ERR(err)) {
512		/*
513		 * Since this watch will fire once immediately after it is
514		 * registered, we expect this.  Ignore it, and wait for the
515		 * hotplug scripts.
516		 */
517		return;
518	}
519	if (err != 2) {
520		xenbus_dev_fatal(dev, err, "reading physical-device");
521		return;
522	}
523
524	if ((be->major || be->minor) &&
525	    ((be->major != major) || (be->minor != minor))) {
526		pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n",
527			be->major, be->minor, major, minor);
528		return;
529	}
530
531	be->mode = xenbus_read(XBT_NIL, dev->nodename, "mode", NULL);
532	if (IS_ERR(be->mode)) {
533		err = PTR_ERR(be->mode);
534		be->mode = NULL;
535		xenbus_dev_fatal(dev, err, "reading mode");
536		return;
537	}
538
539	device_type = xenbus_read(XBT_NIL, dev->otherend, "device-type", NULL);
540	if (!IS_ERR(device_type)) {
541		cdrom = strcmp(device_type, "cdrom") == 0;
542		kfree(device_type);
543	}
544
545	if (be->major == 0 && be->minor == 0) {
546		/* Front end dir is a number, which is used as the handle. */
547
548		char *p = strrchr(dev->otherend, '/') + 1;
549		long handle;
550		err = strict_strtoul(p, 0, &handle);
551		if (err)
552			return;
553
554		be->major = major;
555		be->minor = minor;
556
557		err = xen_vbd_create(be->blkif, handle, major, minor,
558				 (NULL == strchr(be->mode, 'w')), cdrom);
559		if (err) {
560			be->major = 0;
561			be->minor = 0;
562			xenbus_dev_fatal(dev, err, "creating vbd structure");
563			return;
564		}
565
566		err = xenvbd_sysfs_addif(dev);
567		if (err) {
568			xen_vbd_free(&be->blkif->vbd);
569			be->major = 0;
570			be->minor = 0;
571			xenbus_dev_fatal(dev, err, "creating sysfs entries");
572			return;
573		}
574
575		/* We're potentially connected now */
576		xen_update_blkif_status(be->blkif);
577	}
578}
579
580
581/*
582 * Callback received when the frontend's state changes.
583 */
584static void frontend_changed(struct xenbus_device *dev,
585			     enum xenbus_state frontend_state)
586{
587	struct backend_info *be = dev_get_drvdata(&dev->dev);
588	int err;
589
590	DPRINTK("%s", xenbus_strstate(frontend_state));
591
592	switch (frontend_state) {
593	case XenbusStateInitialising:
594		if (dev->state == XenbusStateClosed) {
595			pr_info(DRV_PFX "%s: prepare for reconnect\n",
596				dev->nodename);
597			xenbus_switch_state(dev, XenbusStateInitWait);
598		}
599		break;
600
601	case XenbusStateInitialised:
602	case XenbusStateConnected:
603		/*
604		 * Ensure we connect even when two watches fire in
605		 * close succession and we miss the intermediate value
606		 * of frontend_state.
607		 */
608		if (dev->state == XenbusStateConnected)
609			break;
610
611		/*
612		 * Enforce precondition before potential leak point.
613		 * xen_blkif_disconnect() is idempotent.
614		 */
615		xen_blkif_disconnect(be->blkif);
616
617		err = connect_ring(be);
618		if (err)
619			break;
620		xen_update_blkif_status(be->blkif);
621		break;
622
623	case XenbusStateClosing:
624		xenbus_switch_state(dev, XenbusStateClosing);
625		break;
626
627	case XenbusStateClosed:
628		xen_blkif_disconnect(be->blkif);
629		xenbus_switch_state(dev, XenbusStateClosed);
630		if (xenbus_dev_is_online(dev))
631			break;
632		/* fall through if not online */
633	case XenbusStateUnknown:
634		/* implies xen_blkif_disconnect() via xen_blkbk_remove() */
635		device_unregister(&dev->dev);
636		break;
637
638	default:
639		xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
640				 frontend_state);
641		break;
642	}
643}
644
645
646/* ** Connection ** */
647
648
649/*
650 * Write the physical details regarding the block device to the store, and
651 * switch to Connected state.
652 */
653static void connect(struct backend_info *be)
654{
655	struct xenbus_transaction xbt;
656	int err;
657	struct xenbus_device *dev = be->dev;
658
659	DPRINTK("%s", dev->otherend);
660
661	/* Supply the information about the device the frontend needs */
662again:
663	err = xenbus_transaction_start(&xbt);
664	if (err) {
665		xenbus_dev_fatal(dev, err, "starting transaction");
666		return;
667	}
668
669	/* If we can't advertise it is OK. */
670	xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
671
672	xen_blkbk_discard(xbt, be);
673
674	xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
675
676	err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
677			    (unsigned long long)vbd_sz(&be->blkif->vbd));
678	if (err) {
679		xenbus_dev_fatal(dev, err, "writing %s/sectors",
680				 dev->nodename);
681		goto abort;
682	}
683
684	/* FIXME: use a typename instead */
685	err = xenbus_printf(xbt, dev->nodename, "info", "%u",
686			    be->blkif->vbd.type |
687			    (be->blkif->vbd.readonly ? VDISK_READONLY : 0));
688	if (err) {
689		xenbus_dev_fatal(dev, err, "writing %s/info",
690				 dev->nodename);
691		goto abort;
692	}
693	err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu",
694			    (unsigned long)
695			    bdev_logical_block_size(be->blkif->vbd.bdev));
696	if (err) {
697		xenbus_dev_fatal(dev, err, "writing %s/sector-size",
698				 dev->nodename);
699		goto abort;
700	}
701
702	err = xenbus_transaction_end(xbt, 0);
703	if (err == -EAGAIN)
704		goto again;
705	if (err)
706		xenbus_dev_fatal(dev, err, "ending transaction");
707
708	err = xenbus_switch_state(dev, XenbusStateConnected);
709	if (err)
710		xenbus_dev_fatal(dev, err, "%s: switching to Connected state",
711				 dev->nodename);
712
713	return;
714 abort:
715	xenbus_transaction_end(xbt, 1);
716}
717
718
719static int connect_ring(struct backend_info *be)
720{
721	struct xenbus_device *dev = be->dev;
722	unsigned long ring_ref;
723	unsigned int evtchn;
724	char protocol[64] = "";
725	int err;
726
727	DPRINTK("%s", dev->otherend);
728
729	err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref", "%lu",
730			    &ring_ref, "event-channel", "%u", &evtchn, NULL);
731	if (err) {
732		xenbus_dev_fatal(dev, err,
733				 "reading %s/ring-ref and event-channel",
734				 dev->otherend);
735		return err;
736	}
737
738	be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
739	err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
740			    "%63s", protocol, NULL);
741	if (err)
742		strcpy(protocol, "unspecified, assuming native");
743	else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
744		be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
745	else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
746		be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
747	else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
748		be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
749	else {
750		xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
751		return -1;
752	}
753	pr_info(DRV_PFX "ring-ref %ld, event-channel %d, protocol %d (%s)\n",
754		ring_ref, evtchn, be->blkif->blk_protocol, protocol);
755
756	/* Map the shared frame, irq etc. */
757	err = xen_blkif_map(be->blkif, ring_ref, evtchn);
758	if (err) {
759		xenbus_dev_fatal(dev, err, "mapping ring-ref %lu port %u",
760				 ring_ref, evtchn);
761		return err;
762	}
763
764	return 0;
765}
766
767
768/* ** Driver Registration ** */
769
770
771static const struct xenbus_device_id xen_blkbk_ids[] = {
772	{ "vbd" },
773	{ "" }
774};
775
776
777static DEFINE_XENBUS_DRIVER(xen_blkbk, ,
778	.probe = xen_blkbk_probe,
779	.remove = xen_blkbk_remove,
780	.otherend_changed = frontend_changed
781);
782
783
784int xen_blkif_xenbus_init(void)
785{
786	return xenbus_register_backend(&xen_blkbk_driver);
787}
788