raw.c revision 47aa5793f78c274d51711f6a621fa6b02d4e6402
1/*
2 * linux/drivers/char/raw.c
3 *
4 * Front-end raw character devices.  These can be bound to any block
5 * devices to provide genuine Unix raw character device semantics.
6 *
7 * We reserve minor number 0 for a control interface.  ioctl()s on this
8 * device are used to bind the other minor numbers to block devices.
9 */
10
11#include <linux/init.h>
12#include <linux/fs.h>
13#include <linux/major.h>
14#include <linux/blkdev.h>
15#include <linux/module.h>
16#include <linux/raw.h>
17#include <linux/capability.h>
18#include <linux/uio.h>
19#include <linux/cdev.h>
20#include <linux/device.h>
21#include <linux/mutex.h>
22#include <linux/smp_lock.h>
23
24#include <asm/uaccess.h>
25
26struct raw_device_data {
27	struct block_device *binding;
28	int inuse;
29};
30
31static struct class *raw_class;
32static struct raw_device_data raw_devices[MAX_RAW_MINORS];
33static DEFINE_MUTEX(raw_mutex);
34static const struct file_operations raw_ctl_fops; /* forward declaration */
35
36/*
37 * Open/close code for raw IO.
38 *
39 * We just rewrite the i_mapping for the /dev/raw/rawN file descriptor to
40 * point at the blockdev's address_space and set the file handle to use
41 * O_DIRECT.
42 *
43 * Set the device's soft blocksize to the minimum possible.  This gives the
44 * finest possible alignment and has no adverse impact on performance.
45 */
46static int raw_open(struct inode *inode, struct file *filp)
47{
48	const int minor = iminor(inode);
49	struct block_device *bdev;
50	int err;
51
52	if (minor == 0) {	/* It is the control device */
53		filp->f_op = &raw_ctl_fops;
54		return 0;
55	}
56
57	lock_kernel();
58	mutex_lock(&raw_mutex);
59
60	/*
61	 * All we need to do on open is check that the device is bound.
62	 */
63	bdev = raw_devices[minor].binding;
64	err = -ENODEV;
65	if (!bdev)
66		goto out;
67	igrab(bdev->bd_inode);
68	err = blkdev_get(bdev, filp->f_mode, 0);
69	if (err)
70		goto out;
71	err = bd_claim(bdev, raw_open);
72	if (err)
73		goto out1;
74	err = set_blocksize(bdev, bdev_hardsect_size(bdev));
75	if (err)
76		goto out2;
77	filp->f_flags |= O_DIRECT;
78	filp->f_mapping = bdev->bd_inode->i_mapping;
79	if (++raw_devices[minor].inuse == 1)
80		filp->f_path.dentry->d_inode->i_mapping =
81			bdev->bd_inode->i_mapping;
82	filp->private_data = bdev;
83	mutex_unlock(&raw_mutex);
84	unlock_kernel();
85	return 0;
86
87out2:
88	bd_release(bdev);
89out1:
90	blkdev_put(bdev);
91out:
92	mutex_unlock(&raw_mutex);
93	return err;
94}
95
96/*
97 * When the final fd which refers to this character-special node is closed, we
98 * make its ->mapping point back at its own i_data.
99 */
100static int raw_release(struct inode *inode, struct file *filp)
101{
102	const int minor= iminor(inode);
103	struct block_device *bdev;
104
105	mutex_lock(&raw_mutex);
106	bdev = raw_devices[minor].binding;
107	if (--raw_devices[minor].inuse == 0) {
108		/* Here  inode->i_mapping == bdev->bd_inode->i_mapping  */
109		inode->i_mapping = &inode->i_data;
110		inode->i_mapping->backing_dev_info = &default_backing_dev_info;
111	}
112	mutex_unlock(&raw_mutex);
113
114	bd_release(bdev);
115	blkdev_put(bdev);
116	return 0;
117}
118
119/*
120 * Forward ioctls to the underlying block device.
121 */
122static int
123raw_ioctl(struct inode *inode, struct file *filp,
124		  unsigned int command, unsigned long arg)
125{
126	struct block_device *bdev = filp->private_data;
127
128	return blkdev_ioctl(bdev->bd_inode, NULL, command, arg);
129}
130
131static void bind_device(struct raw_config_request *rq)
132{
133	device_destroy(raw_class, MKDEV(RAW_MAJOR, rq->raw_minor));
134	device_create_drvdata(raw_class, NULL, MKDEV(RAW_MAJOR, rq->raw_minor),
135			      NULL, "raw%d", rq->raw_minor);
136}
137
138/*
139 * Deal with ioctls against the raw-device control interface, to bind
140 * and unbind other raw devices.
141 */
142static int raw_ctl_ioctl(struct inode *inode, struct file *filp,
143			unsigned int command, unsigned long arg)
144{
145	struct raw_config_request rq;
146	struct raw_device_data *rawdev;
147	int err = 0;
148
149	switch (command) {
150	case RAW_SETBIND:
151	case RAW_GETBIND:
152
153		/* First, find out which raw minor we want */
154
155		if (copy_from_user(&rq, (void __user *) arg, sizeof(rq))) {
156			err = -EFAULT;
157			goto out;
158		}
159
160		if (rq.raw_minor <= 0 || rq.raw_minor >= MAX_RAW_MINORS) {
161			err = -EINVAL;
162			goto out;
163		}
164		rawdev = &raw_devices[rq.raw_minor];
165
166		if (command == RAW_SETBIND) {
167			dev_t dev;
168
169			/*
170			 * This is like making block devices, so demand the
171			 * same capability
172			 */
173			if (!capable(CAP_SYS_ADMIN)) {
174				err = -EPERM;
175				goto out;
176			}
177
178			/*
179			 * For now, we don't need to check that the underlying
180			 * block device is present or not: we can do that when
181			 * the raw device is opened.  Just check that the
182			 * major/minor numbers make sense.
183			 */
184
185			dev = MKDEV(rq.block_major, rq.block_minor);
186			if ((rq.block_major == 0 && rq.block_minor != 0) ||
187					MAJOR(dev) != rq.block_major ||
188					MINOR(dev) != rq.block_minor) {
189				err = -EINVAL;
190				goto out;
191			}
192
193			mutex_lock(&raw_mutex);
194			if (rawdev->inuse) {
195				mutex_unlock(&raw_mutex);
196				err = -EBUSY;
197				goto out;
198			}
199			if (rawdev->binding) {
200				bdput(rawdev->binding);
201				module_put(THIS_MODULE);
202			}
203			if (rq.block_major == 0 && rq.block_minor == 0) {
204				/* unbind */
205				rawdev->binding = NULL;
206				device_destroy(raw_class,
207						MKDEV(RAW_MAJOR, rq.raw_minor));
208			} else {
209				rawdev->binding = bdget(dev);
210				if (rawdev->binding == NULL)
211					err = -ENOMEM;
212				else {
213					__module_get(THIS_MODULE);
214					bind_device(&rq);
215				}
216			}
217			mutex_unlock(&raw_mutex);
218		} else {
219			struct block_device *bdev;
220
221			mutex_lock(&raw_mutex);
222			bdev = rawdev->binding;
223			if (bdev) {
224				rq.block_major = MAJOR(bdev->bd_dev);
225				rq.block_minor = MINOR(bdev->bd_dev);
226			} else {
227				rq.block_major = rq.block_minor = 0;
228			}
229			mutex_unlock(&raw_mutex);
230			if (copy_to_user((void __user *)arg, &rq, sizeof(rq))) {
231				err = -EFAULT;
232				goto out;
233			}
234		}
235		break;
236	default:
237		err = -EINVAL;
238		break;
239	}
240out:
241	return err;
242}
243
244static const struct file_operations raw_fops = {
245	.read	=	do_sync_read,
246	.aio_read = 	generic_file_aio_read,
247	.write	=	do_sync_write,
248	.aio_write = 	generic_file_aio_write_nolock,
249	.open	=	raw_open,
250	.release=	raw_release,
251	.ioctl	=	raw_ioctl,
252	.owner	=	THIS_MODULE,
253};
254
255static const struct file_operations raw_ctl_fops = {
256	.ioctl	=	raw_ctl_ioctl,
257	.open	=	raw_open,
258	.owner	=	THIS_MODULE,
259};
260
261static struct cdev raw_cdev;
262
263static int __init raw_init(void)
264{
265	dev_t dev = MKDEV(RAW_MAJOR, 0);
266	int ret;
267
268	ret = register_chrdev_region(dev, MAX_RAW_MINORS, "raw");
269	if (ret)
270		goto error;
271
272	cdev_init(&raw_cdev, &raw_fops);
273	ret = cdev_add(&raw_cdev, dev, MAX_RAW_MINORS);
274	if (ret) {
275		kobject_put(&raw_cdev.kobj);
276		goto error_region;
277	}
278
279	raw_class = class_create(THIS_MODULE, "raw");
280	if (IS_ERR(raw_class)) {
281		printk(KERN_ERR "Error creating raw class.\n");
282		cdev_del(&raw_cdev);
283		ret = PTR_ERR(raw_class);
284		goto error_region;
285	}
286	device_create_drvdata(raw_class, NULL, MKDEV(RAW_MAJOR, 0), NULL,
287			      "rawctl");
288
289	return 0;
290
291error_region:
292	unregister_chrdev_region(dev, MAX_RAW_MINORS);
293error:
294	return ret;
295}
296
297static void __exit raw_exit(void)
298{
299	device_destroy(raw_class, MKDEV(RAW_MAJOR, 0));
300	class_destroy(raw_class);
301	cdev_del(&raw_cdev);
302	unregister_chrdev_region(MKDEV(RAW_MAJOR, 0), MAX_RAW_MINORS);
303}
304
305module_init(raw_init);
306module_exit(raw_exit);
307MODULE_LICENSE("GPL");
308