mem.c revision 7fabaddd09ab32a7c0c08da80315758a2245189d
1/*
2 *  linux/drivers/char/mem.c
3 *
4 *  Copyright (C) 1991, 1992  Linus Torvalds
5 *
6 *  Added devfs support.
7 *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 *  Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9 */
10
11#include <linux/mm.h>
12#include <linux/miscdevice.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
15#include <linux/mman.h>
16#include <linux/random.h>
17#include <linux/init.h>
18#include <linux/raw.h>
19#include <linux/tty.h>
20#include <linux/capability.h>
21#include <linux/ptrace.h>
22#include <linux/device.h>
23#include <linux/highmem.h>
24#include <linux/crash_dump.h>
25#include <linux/backing-dev.h>
26#include <linux/bootmem.h>
27#include <linux/splice.h>
28#include <linux/pfn.h>
29
30#include <asm/uaccess.h>
31#include <asm/io.h>
32
33#ifdef CONFIG_IA64
34# include <linux/efi.h>
35#endif
36
37static inline unsigned long size_inside_page(unsigned long start,
38					     unsigned long size)
39{
40	unsigned long sz;
41
42	sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
43
44	return min(sz, size);
45}
46
47/*
48 * Architectures vary in how they handle caching for addresses
49 * outside of main memory.
50 *
51 */
52static inline int uncached_access(struct file *file, unsigned long addr)
53{
54#if defined(CONFIG_IA64)
55	/*
56	 * On ia64, we ignore O_DSYNC because we cannot tolerate memory attribute aliases.
57	 */
58	return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
59#elif defined(CONFIG_MIPS)
60	{
61		extern int __uncached_access(struct file *file,
62					     unsigned long addr);
63
64		return __uncached_access(file, addr);
65	}
66#else
67	/*
68	 * Accessing memory above the top the kernel knows about or through a file pointer
69	 * that was marked O_DSYNC will be done non-cached.
70	 */
71	if (file->f_flags & O_DSYNC)
72		return 1;
73	return addr >= __pa(high_memory);
74#endif
75}
76
77#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
78static inline int valid_phys_addr_range(unsigned long addr, size_t count)
79{
80	if (addr + count > __pa(high_memory))
81		return 0;
82
83	return 1;
84}
85
86static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
87{
88	return 1;
89}
90#endif
91
92#ifdef CONFIG_STRICT_DEVMEM
93static inline int range_is_allowed(unsigned long pfn, unsigned long size)
94{
95	u64 from = ((u64)pfn) << PAGE_SHIFT;
96	u64 to = from + size;
97	u64 cursor = from;
98
99	while (cursor < to) {
100		if (!devmem_is_allowed(pfn)) {
101			printk(KERN_INFO
102		"Program %s tried to access /dev/mem between %Lx->%Lx.\n",
103				current->comm, from, to);
104			return 0;
105		}
106		cursor += PAGE_SIZE;
107		pfn++;
108	}
109	return 1;
110}
111#else
112static inline int range_is_allowed(unsigned long pfn, unsigned long size)
113{
114	return 1;
115}
116#endif
117
118void __attribute__((weak)) unxlate_dev_mem_ptr(unsigned long phys, void *addr)
119{
120}
121
122/*
123 * This funcion reads the *physical* memory. The f_pos points directly to the
124 * memory location.
125 */
126static ssize_t read_mem(struct file * file, char __user * buf,
127			size_t count, loff_t *ppos)
128{
129	unsigned long p = *ppos;
130	ssize_t read, sz;
131	char *ptr;
132
133	if (!valid_phys_addr_range(p, count))
134		return -EFAULT;
135	read = 0;
136#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
137	/* we don't have page 0 mapped on sparc and m68k.. */
138	if (p < PAGE_SIZE) {
139		sz = size_inside_page(p, count);
140		if (sz > 0) {
141			if (clear_user(buf, sz))
142				return -EFAULT;
143			buf += sz;
144			p += sz;
145			count -= sz;
146			read += sz;
147		}
148	}
149#endif
150
151	while (count > 0) {
152		unsigned long remaining;
153
154		sz = size_inside_page(p, count);
155
156		if (!range_is_allowed(p >> PAGE_SHIFT, count))
157			return -EPERM;
158
159		/*
160		 * On ia64 if a page has been mapped somewhere as
161		 * uncached, then it must also be accessed uncached
162		 * by the kernel or data corruption may occur
163		 */
164		ptr = xlate_dev_mem_ptr(p);
165		if (!ptr)
166			return -EFAULT;
167
168		remaining = copy_to_user(buf, ptr, sz);
169		unxlate_dev_mem_ptr(p, ptr);
170		if (remaining)
171			return -EFAULT;
172
173		buf += sz;
174		p += sz;
175		count -= sz;
176		read += sz;
177	}
178
179	*ppos += read;
180	return read;
181}
182
183static ssize_t write_mem(struct file * file, const char __user * buf,
184			 size_t count, loff_t *ppos)
185{
186	unsigned long p = *ppos;
187	ssize_t written, sz;
188	unsigned long copied;
189	void *ptr;
190
191	if (!valid_phys_addr_range(p, count))
192		return -EFAULT;
193
194	written = 0;
195
196#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
197	/* we don't have page 0 mapped on sparc and m68k.. */
198	if (p < PAGE_SIZE) {
199		sz = size_inside_page(p, count);
200		/* Hmm. Do something? */
201		buf += sz;
202		p += sz;
203		count -= sz;
204		written += sz;
205	}
206#endif
207
208	while (count > 0) {
209		sz = size_inside_page(p, count);
210
211		if (!range_is_allowed(p >> PAGE_SHIFT, sz))
212			return -EPERM;
213
214		/*
215		 * On ia64 if a page has been mapped somewhere as
216		 * uncached, then it must also be accessed uncached
217		 * by the kernel or data corruption may occur
218		 */
219		ptr = xlate_dev_mem_ptr(p);
220		if (!ptr) {
221			if (written)
222				break;
223			return -EFAULT;
224		}
225
226		copied = copy_from_user(ptr, buf, sz);
227		unxlate_dev_mem_ptr(p, ptr);
228		if (copied) {
229			written += sz - copied;
230			if (written)
231				break;
232			return -EFAULT;
233		}
234
235		buf += sz;
236		p += sz;
237		count -= sz;
238		written += sz;
239	}
240
241	*ppos += written;
242	return written;
243}
244
245int __attribute__((weak)) phys_mem_access_prot_allowed(struct file *file,
246	unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
247{
248	return 1;
249}
250
251#ifndef __HAVE_PHYS_MEM_ACCESS_PROT
252static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
253				     unsigned long size, pgprot_t vma_prot)
254{
255#ifdef pgprot_noncached
256	unsigned long offset = pfn << PAGE_SHIFT;
257
258	if (uncached_access(file, offset))
259		return pgprot_noncached(vma_prot);
260#endif
261	return vma_prot;
262}
263#endif
264
265#ifndef CONFIG_MMU
266static unsigned long get_unmapped_area_mem(struct file *file,
267					   unsigned long addr,
268					   unsigned long len,
269					   unsigned long pgoff,
270					   unsigned long flags)
271{
272	if (!valid_mmap_phys_addr_range(pgoff, len))
273		return (unsigned long) -EINVAL;
274	return pgoff << PAGE_SHIFT;
275}
276
277/* can't do an in-place private mapping if there's no MMU */
278static inline int private_mapping_ok(struct vm_area_struct *vma)
279{
280	return vma->vm_flags & VM_MAYSHARE;
281}
282#else
283#define get_unmapped_area_mem	NULL
284
285static inline int private_mapping_ok(struct vm_area_struct *vma)
286{
287	return 1;
288}
289#endif
290
291static const struct vm_operations_struct mmap_mem_ops = {
292#ifdef CONFIG_HAVE_IOREMAP_PROT
293	.access = generic_access_phys
294#endif
295};
296
297static int mmap_mem(struct file * file, struct vm_area_struct * vma)
298{
299	size_t size = vma->vm_end - vma->vm_start;
300
301	if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
302		return -EINVAL;
303
304	if (!private_mapping_ok(vma))
305		return -ENOSYS;
306
307	if (!range_is_allowed(vma->vm_pgoff, size))
308		return -EPERM;
309
310	if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
311						&vma->vm_page_prot))
312		return -EINVAL;
313
314	vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
315						 size,
316						 vma->vm_page_prot);
317
318	vma->vm_ops = &mmap_mem_ops;
319
320	/* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
321	if (remap_pfn_range(vma,
322			    vma->vm_start,
323			    vma->vm_pgoff,
324			    size,
325			    vma->vm_page_prot)) {
326		return -EAGAIN;
327	}
328	return 0;
329}
330
331#ifdef CONFIG_DEVKMEM
332static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
333{
334	unsigned long pfn;
335
336	/* Turn a kernel-virtual address into a physical page frame */
337	pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
338
339	/*
340	 * RED-PEN: on some architectures there is more mapped memory
341	 * than available in mem_map which pfn_valid checks
342	 * for. Perhaps should add a new macro here.
343	 *
344	 * RED-PEN: vmalloc is not supported right now.
345	 */
346	if (!pfn_valid(pfn))
347		return -EIO;
348
349	vma->vm_pgoff = pfn;
350	return mmap_mem(file, vma);
351}
352#endif
353
354#ifdef CONFIG_CRASH_DUMP
355/*
356 * Read memory corresponding to the old kernel.
357 */
358static ssize_t read_oldmem(struct file *file, char __user *buf,
359				size_t count, loff_t *ppos)
360{
361	unsigned long pfn, offset;
362	size_t read = 0, csize;
363	int rc = 0;
364
365	while (count) {
366		pfn = *ppos / PAGE_SIZE;
367		if (pfn > saved_max_pfn)
368			return read;
369
370		offset = (unsigned long)(*ppos % PAGE_SIZE);
371		if (count > PAGE_SIZE - offset)
372			csize = PAGE_SIZE - offset;
373		else
374			csize = count;
375
376		rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
377		if (rc < 0)
378			return rc;
379		buf += csize;
380		*ppos += csize;
381		read += csize;
382		count -= csize;
383	}
384	return read;
385}
386#endif
387
388#ifdef CONFIG_DEVKMEM
389/*
390 * This function reads the *virtual* memory as seen by the kernel.
391 */
392static ssize_t read_kmem(struct file *file, char __user *buf,
393			 size_t count, loff_t *ppos)
394{
395	unsigned long p = *ppos;
396	ssize_t low_count, read, sz;
397	char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
398
399	read = 0;
400	if (p < (unsigned long) high_memory) {
401		low_count = count;
402		if (count > (unsigned long) high_memory - p)
403			low_count = (unsigned long) high_memory - p;
404
405#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
406		/* we don't have page 0 mapped on sparc and m68k.. */
407		if (p < PAGE_SIZE && low_count > 0) {
408			sz = size_inside_page(p, low_count);
409			if (clear_user(buf, sz))
410				return -EFAULT;
411			buf += sz;
412			p += sz;
413			read += sz;
414			low_count -= sz;
415			count -= sz;
416		}
417#endif
418		while (low_count > 0) {
419			sz = size_inside_page(p, low_count);
420
421			/*
422			 * On ia64 if a page has been mapped somewhere as
423			 * uncached, then it must also be accessed uncached
424			 * by the kernel or data corruption may occur
425			 */
426			kbuf = xlate_dev_kmem_ptr((char *)p);
427
428			if (copy_to_user(buf, kbuf, sz))
429				return -EFAULT;
430			buf += sz;
431			p += sz;
432			read += sz;
433			low_count -= sz;
434			count -= sz;
435		}
436	}
437
438	if (count > 0) {
439		kbuf = (char *)__get_free_page(GFP_KERNEL);
440		if (!kbuf)
441			return -ENOMEM;
442		while (count > 0) {
443			int len = size_inside_page(p, count);
444
445			len = vread(kbuf, (char *)p, len);
446			if (!len)
447				break;
448			if (copy_to_user(buf, kbuf, len)) {
449				free_page((unsigned long)kbuf);
450				return -EFAULT;
451			}
452			count -= len;
453			buf += len;
454			read += len;
455			p += len;
456		}
457		free_page((unsigned long)kbuf);
458	}
459 	*ppos = p;
460 	return read;
461}
462
463
464static inline ssize_t
465do_write_kmem(void *p, unsigned long realp, const char __user * buf,
466	      size_t count, loff_t *ppos)
467{
468	ssize_t written, sz;
469	unsigned long copied;
470
471	written = 0;
472#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
473	/* we don't have page 0 mapped on sparc and m68k.. */
474	if (realp < PAGE_SIZE) {
475		sz = size_inside_page(realp, count);
476		/* Hmm. Do something? */
477		buf += sz;
478		p += sz;
479		realp += sz;
480		count -= sz;
481		written += sz;
482	}
483#endif
484
485	while (count > 0) {
486		char *ptr;
487
488		sz = size_inside_page(realp, count);
489
490		/*
491		 * On ia64 if a page has been mapped somewhere as
492		 * uncached, then it must also be accessed uncached
493		 * by the kernel or data corruption may occur
494		 */
495		ptr = xlate_dev_kmem_ptr(p);
496
497		copied = copy_from_user(ptr, buf, sz);
498		if (copied) {
499			written += sz - copied;
500			if (written)
501				break;
502			return -EFAULT;
503		}
504		buf += sz;
505		p += sz;
506		realp += sz;
507		count -= sz;
508		written += sz;
509	}
510
511	*ppos += written;
512	return written;
513}
514
515
516/*
517 * This function writes to the *virtual* memory as seen by the kernel.
518 */
519static ssize_t write_kmem(struct file * file, const char __user * buf,
520			  size_t count, loff_t *ppos)
521{
522	unsigned long p = *ppos;
523	ssize_t wrote = 0;
524	ssize_t virtr = 0;
525	ssize_t written;
526	char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
527
528	if (p < (unsigned long) high_memory) {
529
530		wrote = count;
531		if (count > (unsigned long) high_memory - p)
532			wrote = (unsigned long) high_memory - p;
533
534		written = do_write_kmem((void*)p, p, buf, wrote, ppos);
535		if (written != wrote)
536			return written;
537		wrote = written;
538		p += wrote;
539		buf += wrote;
540		count -= wrote;
541	}
542
543	if (count > 0) {
544		kbuf = (char *)__get_free_page(GFP_KERNEL);
545		if (!kbuf)
546			return wrote ? wrote : -ENOMEM;
547		while (count > 0) {
548			int len = size_inside_page(p, count);
549
550			written = copy_from_user(kbuf, buf, len);
551			if (written) {
552				if (wrote + virtr)
553					break;
554				free_page((unsigned long)kbuf);
555				return -EFAULT;
556			}
557			len = vwrite(kbuf, (char *)p, len);
558			count -= len;
559			buf += len;
560			virtr += len;
561			p += len;
562		}
563		free_page((unsigned long)kbuf);
564	}
565
566 	*ppos = p;
567 	return virtr + wrote;
568}
569#endif
570
571#ifdef CONFIG_DEVPORT
572static ssize_t read_port(struct file * file, char __user * buf,
573			 size_t count, loff_t *ppos)
574{
575	unsigned long i = *ppos;
576	char __user *tmp = buf;
577
578	if (!access_ok(VERIFY_WRITE, buf, count))
579		return -EFAULT;
580	while (count-- > 0 && i < 65536) {
581		if (__put_user(inb(i),tmp) < 0)
582			return -EFAULT;
583		i++;
584		tmp++;
585	}
586	*ppos = i;
587	return tmp-buf;
588}
589
590static ssize_t write_port(struct file * file, const char __user * buf,
591			  size_t count, loff_t *ppos)
592{
593	unsigned long i = *ppos;
594	const char __user * tmp = buf;
595
596	if (!access_ok(VERIFY_READ,buf,count))
597		return -EFAULT;
598	while (count-- > 0 && i < 65536) {
599		char c;
600		if (__get_user(c, tmp)) {
601			if (tmp > buf)
602				break;
603			return -EFAULT;
604		}
605		outb(c,i);
606		i++;
607		tmp++;
608	}
609	*ppos = i;
610	return tmp-buf;
611}
612#endif
613
614static ssize_t read_null(struct file * file, char __user * buf,
615			 size_t count, loff_t *ppos)
616{
617	return 0;
618}
619
620static ssize_t write_null(struct file * file, const char __user * buf,
621			  size_t count, loff_t *ppos)
622{
623	return count;
624}
625
626static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
627			struct splice_desc *sd)
628{
629	return sd->len;
630}
631
632static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out,
633				 loff_t *ppos, size_t len, unsigned int flags)
634{
635	return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
636}
637
638static ssize_t read_zero(struct file * file, char __user * buf,
639			 size_t count, loff_t *ppos)
640{
641	size_t written;
642
643	if (!count)
644		return 0;
645
646	if (!access_ok(VERIFY_WRITE, buf, count))
647		return -EFAULT;
648
649	written = 0;
650	while (count) {
651		unsigned long unwritten;
652		size_t chunk = count;
653
654		if (chunk > PAGE_SIZE)
655			chunk = PAGE_SIZE;	/* Just for latency reasons */
656		unwritten = __clear_user(buf, chunk);
657		written += chunk - unwritten;
658		if (unwritten)
659			break;
660		if (signal_pending(current))
661			return written ? written : -ERESTARTSYS;
662		buf += chunk;
663		count -= chunk;
664		cond_resched();
665	}
666	return written ? written : -EFAULT;
667}
668
669static int mmap_zero(struct file * file, struct vm_area_struct * vma)
670{
671#ifndef CONFIG_MMU
672	return -ENOSYS;
673#endif
674	if (vma->vm_flags & VM_SHARED)
675		return shmem_zero_setup(vma);
676	return 0;
677}
678
679static ssize_t write_full(struct file * file, const char __user * buf,
680			  size_t count, loff_t *ppos)
681{
682	return -ENOSPC;
683}
684
685/*
686 * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
687 * can fopen() both devices with "a" now.  This was previously impossible.
688 * -- SRB.
689 */
690
691static loff_t null_lseek(struct file * file, loff_t offset, int orig)
692{
693	return file->f_pos = 0;
694}
695
696/*
697 * The memory devices use the full 32/64 bits of the offset, and so we cannot
698 * check against negative addresses: they are ok. The return value is weird,
699 * though, in that case (0).
700 *
701 * also note that seeking relative to the "end of file" isn't supported:
702 * it has no meaning, so it returns -EINVAL.
703 */
704static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
705{
706	loff_t ret;
707
708	mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
709	switch (orig) {
710		case 0:
711			file->f_pos = offset;
712			ret = file->f_pos;
713			force_successful_syscall_return();
714			break;
715		case 1:
716			file->f_pos += offset;
717			ret = file->f_pos;
718			force_successful_syscall_return();
719			break;
720		default:
721			ret = -EINVAL;
722	}
723	mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
724	return ret;
725}
726
727static int open_port(struct inode * inode, struct file * filp)
728{
729	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
730}
731
732#define zero_lseek	null_lseek
733#define full_lseek      null_lseek
734#define write_zero	write_null
735#define read_full       read_zero
736#define open_mem	open_port
737#define open_kmem	open_mem
738#define open_oldmem	open_mem
739
740static const struct file_operations mem_fops = {
741	.llseek		= memory_lseek,
742	.read		= read_mem,
743	.write		= write_mem,
744	.mmap		= mmap_mem,
745	.open		= open_mem,
746	.get_unmapped_area = get_unmapped_area_mem,
747};
748
749#ifdef CONFIG_DEVKMEM
750static const struct file_operations kmem_fops = {
751	.llseek		= memory_lseek,
752	.read		= read_kmem,
753	.write		= write_kmem,
754	.mmap		= mmap_kmem,
755	.open		= open_kmem,
756	.get_unmapped_area = get_unmapped_area_mem,
757};
758#endif
759
760static const struct file_operations null_fops = {
761	.llseek		= null_lseek,
762	.read		= read_null,
763	.write		= write_null,
764	.splice_write	= splice_write_null,
765};
766
767#ifdef CONFIG_DEVPORT
768static const struct file_operations port_fops = {
769	.llseek		= memory_lseek,
770	.read		= read_port,
771	.write		= write_port,
772	.open		= open_port,
773};
774#endif
775
776static const struct file_operations zero_fops = {
777	.llseek		= zero_lseek,
778	.read		= read_zero,
779	.write		= write_zero,
780	.mmap		= mmap_zero,
781};
782
783/*
784 * capabilities for /dev/zero
785 * - permits private mappings, "copies" are taken of the source of zeros
786 */
787static struct backing_dev_info zero_bdi = {
788	.name		= "char/mem",
789	.capabilities	= BDI_CAP_MAP_COPY,
790};
791
792static const struct file_operations full_fops = {
793	.llseek		= full_lseek,
794	.read		= read_full,
795	.write		= write_full,
796};
797
798#ifdef CONFIG_CRASH_DUMP
799static const struct file_operations oldmem_fops = {
800	.read	= read_oldmem,
801	.open	= open_oldmem,
802};
803#endif
804
805static ssize_t kmsg_write(struct file * file, const char __user * buf,
806			  size_t count, loff_t *ppos)
807{
808	char *tmp;
809	ssize_t ret;
810
811	tmp = kmalloc(count + 1, GFP_KERNEL);
812	if (tmp == NULL)
813		return -ENOMEM;
814	ret = -EFAULT;
815	if (!copy_from_user(tmp, buf, count)) {
816		tmp[count] = 0;
817		ret = printk("%s", tmp);
818		if (ret > count)
819			/* printk can add a prefix */
820			ret = count;
821	}
822	kfree(tmp);
823	return ret;
824}
825
826static const struct file_operations kmsg_fops = {
827	.write =	kmsg_write,
828};
829
830static const struct memdev {
831	const char *name;
832	mode_t mode;
833	const struct file_operations *fops;
834	struct backing_dev_info *dev_info;
835} devlist[] = {
836	 [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
837#ifdef CONFIG_DEVKMEM
838	 [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
839#endif
840	 [3] = { "null", 0666, &null_fops, NULL },
841#ifdef CONFIG_DEVPORT
842	 [4] = { "port", 0, &port_fops, NULL },
843#endif
844	 [5] = { "zero", 0666, &zero_fops, &zero_bdi },
845	 [7] = { "full", 0666, &full_fops, NULL },
846	 [8] = { "random", 0666, &random_fops, NULL },
847	 [9] = { "urandom", 0666, &urandom_fops, NULL },
848	[11] = { "kmsg", 0, &kmsg_fops, NULL },
849#ifdef CONFIG_CRASH_DUMP
850	[12] = { "oldmem", 0, &oldmem_fops, NULL },
851#endif
852};
853
854static int memory_open(struct inode *inode, struct file *filp)
855{
856	int minor;
857	const struct memdev *dev;
858
859	minor = iminor(inode);
860	if (minor >= ARRAY_SIZE(devlist))
861		return -ENXIO;
862
863	dev = &devlist[minor];
864	if (!dev->fops)
865		return -ENXIO;
866
867	filp->f_op = dev->fops;
868	if (dev->dev_info)
869		filp->f_mapping->backing_dev_info = dev->dev_info;
870
871	if (dev->fops->open)
872		return dev->fops->open(inode, filp);
873
874	return 0;
875}
876
877static const struct file_operations memory_fops = {
878	.open		= memory_open,
879};
880
881static char *mem_devnode(struct device *dev, mode_t *mode)
882{
883	if (mode && devlist[MINOR(dev->devt)].mode)
884		*mode = devlist[MINOR(dev->devt)].mode;
885	return NULL;
886}
887
888static struct class *mem_class;
889
890static int __init chr_dev_init(void)
891{
892	int minor;
893	int err;
894
895	err = bdi_init(&zero_bdi);
896	if (err)
897		return err;
898
899	if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
900		printk("unable to get major %d for memory devs\n", MEM_MAJOR);
901
902	mem_class = class_create(THIS_MODULE, "mem");
903	mem_class->devnode = mem_devnode;
904	for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
905		if (!devlist[minor].name)
906			continue;
907		device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
908			      NULL, devlist[minor].name);
909	}
910
911	return 0;
912}
913
914fs_initcall(chr_dev_init);
915