1#ifndef __i386_UACCESS_H
2#define __i386_UACCESS_H
3
4/*
5 * User space memory access functions
6 */
7#include <linux/errno.h>
8#include <linux/thread_info.h>
9#include <linux/prefetch.h>
10#include <linux/string.h>
11#include <asm/page.h>
12
13#define VERIFY_READ 0
14#define VERIFY_WRITE 1
15
16/*
17 * The fs value determines whether argument validity checking should be
18 * performed or not.  If get_fs() == USER_DS, checking is performed, with
19 * get_fs() == KERNEL_DS, checking is bypassed.
20 *
21 * For historical reasons, these macros are grossly misnamed.
22 */
23
24#define MAKE_MM_SEG(s)	((mm_segment_t) { (s) })
25
26
27#define KERNEL_DS	MAKE_MM_SEG(0xFFFFFFFFUL)
28#define USER_DS		MAKE_MM_SEG(PAGE_OFFSET)
29
30#define get_ds()	(KERNEL_DS)
31#define get_fs()	(current_thread_info()->addr_limit)
32#define set_fs(x)	(current_thread_info()->addr_limit = (x))
33
34#define segment_eq(a,b)	((a).seg == (b).seg)
35
36/*
37 * movsl can be slow when source and dest are not both 8-byte aligned
38 */
39#ifdef CONFIG_X86_INTEL_USERCOPY
40extern struct movsl_mask {
41	int mask;
42} ____cacheline_aligned_in_smp movsl_mask;
43#endif
44
45#define __addr_ok(addr) ((unsigned long __force)(addr) < (current_thread_info()->addr_limit.seg))
46
47/*
48 * Test whether a block of memory is a valid user space address.
49 * Returns 0 if the range is valid, nonzero otherwise.
50 *
51 * This is equivalent to the following test:
52 * (u33)addr + (u33)size >= (u33)current->addr_limit.seg
53 *
54 * This needs 33-bit arithmetic. We have a carry...
55 */
56#define __range_ok(addr,size) ({ \
57	unsigned long flag,roksum; \
58	__chk_user_ptr(addr); \
59	asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \
60		:"=&r" (flag), "=r" (roksum) \
61		:"1" (addr),"g" ((int)(size)),"rm" (current_thread_info()->addr_limit.seg)); \
62	flag; })
63
64/**
65 * access_ok: - Checks if a user space pointer is valid
66 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
67 *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
68 *        to write to a block, it is always safe to read from it.
69 * @addr: User space pointer to start of block to check
70 * @size: Size of block to check
71 *
72 * Context: User context only.  This function may sleep.
73 *
74 * Checks if a pointer to a block of memory in user space is valid.
75 *
76 * Returns true (nonzero) if the memory block may be valid, false (zero)
77 * if it is definitely invalid.
78 *
79 * Note that, depending on architecture, this function probably just
80 * checks that the pointer is in the user space range - after calling
81 * this function, memory access functions may still return -EFAULT.
82 */
83#define access_ok(type,addr,size) (likely(__range_ok(addr,size) == 0))
84
85/*
86 * The exception table consists of pairs of addresses: the first is the
87 * address of an instruction that is allowed to fault, and the second is
88 * the address at which the program should continue.  No registers are
89 * modified, so it is entirely up to the continuation code to figure out
90 * what to do.
91 *
92 * All the routines below use bits of fixup code that are out of line
93 * with the main instruction path.  This means when everything is well,
94 * we don't even have to jump over them.  Further, they do not intrude
95 * on our cache or tlb entries.
96 */
97
98struct exception_table_entry
99{
100	unsigned long insn, fixup;
101};
102
103extern int fixup_exception(struct pt_regs *regs);
104
105/*
106 * These are the main single-value transfer routines.  They automatically
107 * use the right size if we just have the right pointer type.
108 *
109 * This gets kind of ugly. We want to return _two_ values in "get_user()"
110 * and yet we don't want to do any pointers, because that is too much
111 * of a performance impact. Thus we have a few rather ugly macros here,
112 * and hide all the ugliness from the user.
113 *
114 * The "__xxx" versions of the user access functions are versions that
115 * do not verify the address space, that must have been done previously
116 * with a separate "access_ok()" call (this is used when we do multiple
117 * accesses to the same area of user memory).
118 */
119
120extern void __get_user_1(void);
121extern void __get_user_2(void);
122extern void __get_user_4(void);
123
124#define __get_user_x(size,ret,x,ptr) \
125	__asm__ __volatile__("call __get_user_" #size \
126		:"=a" (ret),"=d" (x) \
127		:"0" (ptr))
128
129
130/* Careful: we have to cast the result to the type of the pointer for sign reasons */
131/**
132 * get_user: - Get a simple variable from user space.
133 * @x:   Variable to store result.
134 * @ptr: Source address, in user space.
135 *
136 * Context: User context only.  This function may sleep.
137 *
138 * This macro copies a single simple variable from user space to kernel
139 * space.  It supports simple types like char and int, but not larger
140 * data types like structures or arrays.
141 *
142 * @ptr must have pointer-to-simple-variable type, and the result of
143 * dereferencing @ptr must be assignable to @x without a cast.
144 *
145 * Returns zero on success, or -EFAULT on error.
146 * On error, the variable @x is set to zero.
147 */
148#define get_user(x,ptr)							\
149({	int __ret_gu;							\
150	unsigned long __val_gu;						\
151	__chk_user_ptr(ptr);						\
152	switch(sizeof (*(ptr))) {					\
153	case 1:  __get_user_x(1,__ret_gu,__val_gu,ptr); break;		\
154	case 2:  __get_user_x(2,__ret_gu,__val_gu,ptr); break;		\
155	case 4:  __get_user_x(4,__ret_gu,__val_gu,ptr); break;		\
156	default: __get_user_x(X,__ret_gu,__val_gu,ptr); break;		\
157	}								\
158	(x) = (__typeof__(*(ptr)))__val_gu;				\
159	__ret_gu;							\
160})
161
162extern void __put_user_bad(void);
163
164/*
165 * Strange magic calling convention: pointer in %ecx,
166 * value in %eax(:%edx), return value in %eax, no clobbers.
167 */
168extern void __put_user_1(void);
169extern void __put_user_2(void);
170extern void __put_user_4(void);
171extern void __put_user_8(void);
172
173#define __put_user_1(x, ptr) __asm__ __volatile__("call __put_user_1":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr))
174#define __put_user_2(x, ptr) __asm__ __volatile__("call __put_user_2":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr))
175#define __put_user_4(x, ptr) __asm__ __volatile__("call __put_user_4":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr))
176#define __put_user_8(x, ptr) __asm__ __volatile__("call __put_user_8":"=a" (__ret_pu):"A" ((typeof(*(ptr)))(x)), "c" (ptr))
177#define __put_user_X(x, ptr) __asm__ __volatile__("call __put_user_X":"=a" (__ret_pu):"c" (ptr))
178
179/**
180 * put_user: - Write a simple value into user space.
181 * @x:   Value to copy to user space.
182 * @ptr: Destination address, in user space.
183 *
184 * Context: User context only.  This function may sleep.
185 *
186 * This macro copies a single simple value from kernel space to user
187 * space.  It supports simple types like char and int, but not larger
188 * data types like structures or arrays.
189 *
190 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
191 * to the result of dereferencing @ptr.
192 *
193 * Returns zero on success, or -EFAULT on error.
194 */
195#ifdef CONFIG_X86_WP_WORKS_OK
196
197#define put_user(x,ptr)						\
198({	int __ret_pu;						\
199	__typeof__(*(ptr)) __pu_val;				\
200	__chk_user_ptr(ptr);					\
201	__pu_val = x;						\
202	switch(sizeof(*(ptr))) {				\
203	case 1: __put_user_1(__pu_val, ptr); break;		\
204	case 2: __put_user_2(__pu_val, ptr); break;		\
205	case 4: __put_user_4(__pu_val, ptr); break;		\
206	case 8: __put_user_8(__pu_val, ptr); break;		\
207	default:__put_user_X(__pu_val, ptr); break;		\
208	}							\
209	__ret_pu;						\
210})
211
212#else
213#define put_user(x,ptr)						\
214({								\
215 	int __ret_pu;						\
216	__typeof__(*(ptr)) __pus_tmp = x;			\
217	__ret_pu=0;						\
218	if(unlikely(__copy_to_user_ll(ptr, &__pus_tmp,		\
219				sizeof(*(ptr))) != 0))		\
220 		__ret_pu=-EFAULT;				\
221 	__ret_pu;						\
222 })
223
224
225#endif
226
227/**
228 * __get_user: - Get a simple variable from user space, with less checking.
229 * @x:   Variable to store result.
230 * @ptr: Source address, in user space.
231 *
232 * Context: User context only.  This function may sleep.
233 *
234 * This macro copies a single simple variable from user space to kernel
235 * space.  It supports simple types like char and int, but not larger
236 * data types like structures or arrays.
237 *
238 * @ptr must have pointer-to-simple-variable type, and the result of
239 * dereferencing @ptr must be assignable to @x without a cast.
240 *
241 * Caller must check the pointer with access_ok() before calling this
242 * function.
243 *
244 * Returns zero on success, or -EFAULT on error.
245 * On error, the variable @x is set to zero.
246 */
247#define __get_user(x,ptr) \
248  __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
249
250
251/**
252 * __put_user: - Write a simple value into user space, with less checking.
253 * @x:   Value to copy to user space.
254 * @ptr: Destination address, in user space.
255 *
256 * Context: User context only.  This function may sleep.
257 *
258 * This macro copies a single simple value from kernel space to user
259 * space.  It supports simple types like char and int, but not larger
260 * data types like structures or arrays.
261 *
262 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
263 * to the result of dereferencing @ptr.
264 *
265 * Caller must check the pointer with access_ok() before calling this
266 * function.
267 *
268 * Returns zero on success, or -EFAULT on error.
269 */
270#define __put_user(x,ptr) \
271  __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
272
273#define __put_user_nocheck(x,ptr,size)				\
274({								\
275	long __pu_err;						\
276	__put_user_size((x),(ptr),(size),__pu_err,-EFAULT);	\
277	__pu_err;						\
278})
279
280
281#define __put_user_u64(x, addr, err)				\
282	__asm__ __volatile__(					\
283		"1:	movl %%eax,0(%2)\n"			\
284		"2:	movl %%edx,4(%2)\n"			\
285		"3:\n"						\
286		".section .fixup,\"ax\"\n"			\
287		"4:	movl %3,%0\n"				\
288		"	jmp 3b\n"				\
289		".previous\n"					\
290		".section __ex_table,\"a\"\n"			\
291		"	.align 4\n"				\
292		"	.long 1b,4b\n"				\
293		"	.long 2b,4b\n"				\
294		".previous"					\
295		: "=r"(err)					\
296		: "A" (x), "r" (addr), "i"(-EFAULT), "0"(err))
297
298#ifdef CONFIG_X86_WP_WORKS_OK
299
300#define __put_user_size(x,ptr,size,retval,errret)			\
301do {									\
302	retval = 0;							\
303	__chk_user_ptr(ptr);						\
304	switch (size) {							\
305	case 1: __put_user_asm(x,ptr,retval,"b","b","iq",errret);break;	\
306	case 2: __put_user_asm(x,ptr,retval,"w","w","ir",errret);break; \
307	case 4: __put_user_asm(x,ptr,retval,"l","","ir",errret); break;	\
308	case 8: __put_user_u64((__typeof__(*ptr))(x),ptr,retval); break;\
309	  default: __put_user_bad();					\
310	}								\
311} while (0)
312
313#else
314
315#define __put_user_size(x,ptr,size,retval,errret)			\
316do {									\
317	__typeof__(*(ptr)) __pus_tmp = x;				\
318	retval = 0;							\
319									\
320	if(unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0))	\
321		retval = errret;					\
322} while (0)
323
324#endif
325struct __large_struct { unsigned long buf[100]; };
326#define __m(x) (*(struct __large_struct __user *)(x))
327
328/*
329 * Tell gcc we read from memory instead of writing: this is because
330 * we do not write to any memory gcc knows about, so there are no
331 * aliasing issues.
332 */
333#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)	\
334	__asm__ __volatile__(						\
335		"1:	mov"itype" %"rtype"1,%2\n"			\
336		"2:\n"							\
337		".section .fixup,\"ax\"\n"				\
338		"3:	movl %3,%0\n"					\
339		"	jmp 2b\n"					\
340		".previous\n"						\
341		".section __ex_table,\"a\"\n"				\
342		"	.align 4\n"					\
343		"	.long 1b,3b\n"					\
344		".previous"						\
345		: "=r"(err)						\
346		: ltype (x), "m"(__m(addr)), "i"(errret), "0"(err))
347
348
349#define __get_user_nocheck(x,ptr,size)				\
350({								\
351	long __gu_err;						\
352	unsigned long __gu_val;					\
353	__get_user_size(__gu_val,(ptr),(size),__gu_err,-EFAULT);\
354	(x) = (__typeof__(*(ptr)))__gu_val;			\
355	__gu_err;						\
356})
357
358extern long __get_user_bad(void);
359
360#define __get_user_size(x,ptr,size,retval,errret)			\
361do {									\
362	retval = 0;							\
363	__chk_user_ptr(ptr);						\
364	switch (size) {							\
365	case 1: __get_user_asm(x,ptr,retval,"b","b","=q",errret);break;	\
366	case 2: __get_user_asm(x,ptr,retval,"w","w","=r",errret);break;	\
367	case 4: __get_user_asm(x,ptr,retval,"l","","=r",errret);break;	\
368	default: (x) = __get_user_bad();				\
369	}								\
370} while (0)
371
372#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)	\
373	__asm__ __volatile__(						\
374		"1:	mov"itype" %2,%"rtype"1\n"			\
375		"2:\n"							\
376		".section .fixup,\"ax\"\n"				\
377		"3:	movl %3,%0\n"					\
378		"	xor"itype" %"rtype"1,%"rtype"1\n"		\
379		"	jmp 2b\n"					\
380		".previous\n"						\
381		".section __ex_table,\"a\"\n"				\
382		"	.align 4\n"					\
383		"	.long 1b,3b\n"					\
384		".previous"						\
385		: "=r"(err), ltype (x)					\
386		: "m"(__m(addr)), "i"(errret), "0"(err))
387
388
389unsigned long __must_check __copy_to_user_ll(void __user *to,
390				const void *from, unsigned long n);
391unsigned long __must_check __copy_from_user_ll(void *to,
392				const void __user *from, unsigned long n);
393unsigned long __must_check __copy_from_user_ll_nozero(void *to,
394				const void __user *from, unsigned long n);
395unsigned long __must_check __copy_from_user_ll_nocache(void *to,
396				const void __user *from, unsigned long n);
397unsigned long __must_check __copy_from_user_ll_nocache_nozero(void *to,
398				const void __user *from, unsigned long n);
399
400/**
401 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
402 * @to:   Destination address, in user space.
403 * @from: Source address, in kernel space.
404 * @n:    Number of bytes to copy.
405 *
406 * Context: User context only.
407 *
408 * Copy data from kernel space to user space.  Caller must check
409 * the specified block with access_ok() before calling this function.
410 * The caller should also make sure he pins the user space address
411 * so that the we don't result in page fault and sleep.
412 *
413 * Here we special-case 1, 2 and 4-byte copy_*_user invocations.  On a fault
414 * we return the initial request size (1, 2 or 4), as copy_*_user should do.
415 * If a store crosses a page boundary and gets a fault, the x86 will not write
416 * anything, so this is accurate.
417 */
418
419static __always_inline unsigned long __must_check
420__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
421{
422	if (__builtin_constant_p(n)) {
423		unsigned long ret;
424
425		switch (n) {
426		case 1:
427			__put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret, 1);
428			return ret;
429		case 2:
430			__put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret, 2);
431			return ret;
432		case 4:
433			__put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret, 4);
434			return ret;
435		}
436	}
437	return __copy_to_user_ll(to, from, n);
438}
439
440/**
441 * __copy_to_user: - Copy a block of data into user space, with less checking.
442 * @to:   Destination address, in user space.
443 * @from: Source address, in kernel space.
444 * @n:    Number of bytes to copy.
445 *
446 * Context: User context only.  This function may sleep.
447 *
448 * Copy data from kernel space to user space.  Caller must check
449 * the specified block with access_ok() before calling this function.
450 *
451 * Returns number of bytes that could not be copied.
452 * On success, this will be zero.
453 */
454static __always_inline unsigned long __must_check
455__copy_to_user(void __user *to, const void *from, unsigned long n)
456{
457       might_sleep();
458       return __copy_to_user_inatomic(to, from, n);
459}
460
461static __always_inline unsigned long
462__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
463{
464	/* Avoid zeroing the tail if the copy fails..
465	 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
466	 * but as the zeroing behaviour is only significant when n is not
467	 * constant, that shouldn't be a problem.
468	 */
469	if (__builtin_constant_p(n)) {
470		unsigned long ret;
471
472		switch (n) {
473		case 1:
474			__get_user_size(*(u8 *)to, from, 1, ret, 1);
475			return ret;
476		case 2:
477			__get_user_size(*(u16 *)to, from, 2, ret, 2);
478			return ret;
479		case 4:
480			__get_user_size(*(u32 *)to, from, 4, ret, 4);
481			return ret;
482		}
483	}
484	return __copy_from_user_ll_nozero(to, from, n);
485}
486
487/**
488 * __copy_from_user: - Copy a block of data from user space, with less checking.
489 * @to:   Destination address, in kernel space.
490 * @from: Source address, in user space.
491 * @n:    Number of bytes to copy.
492 *
493 * Context: User context only.  This function may sleep.
494 *
495 * Copy data from user space to kernel space.  Caller must check
496 * the specified block with access_ok() before calling this function.
497 *
498 * Returns number of bytes that could not be copied.
499 * On success, this will be zero.
500 *
501 * If some data could not be copied, this function will pad the copied
502 * data to the requested size using zero bytes.
503 *
504 * An alternate version - __copy_from_user_inatomic() - may be called from
505 * atomic context and will fail rather than sleep.  In this case the
506 * uncopied bytes will *NOT* be padded with zeros.  See fs/filemap.h
507 * for explanation of why this is needed.
508 */
509static __always_inline unsigned long
510__copy_from_user(void *to, const void __user *from, unsigned long n)
511{
512	might_sleep();
513	if (__builtin_constant_p(n)) {
514		unsigned long ret;
515
516		switch (n) {
517		case 1:
518			__get_user_size(*(u8 *)to, from, 1, ret, 1);
519			return ret;
520		case 2:
521			__get_user_size(*(u16 *)to, from, 2, ret, 2);
522			return ret;
523		case 4:
524			__get_user_size(*(u32 *)to, from, 4, ret, 4);
525			return ret;
526		}
527	}
528	return __copy_from_user_ll(to, from, n);
529}
530
531#define ARCH_HAS_NOCACHE_UACCESS
532
533static __always_inline unsigned long __copy_from_user_nocache(void *to,
534				const void __user *from, unsigned long n)
535{
536	might_sleep();
537	if (__builtin_constant_p(n)) {
538		unsigned long ret;
539
540		switch (n) {
541		case 1:
542			__get_user_size(*(u8 *)to, from, 1, ret, 1);
543			return ret;
544		case 2:
545			__get_user_size(*(u16 *)to, from, 2, ret, 2);
546			return ret;
547		case 4:
548			__get_user_size(*(u32 *)to, from, 4, ret, 4);
549			return ret;
550		}
551	}
552	return __copy_from_user_ll_nocache(to, from, n);
553}
554
555static __always_inline unsigned long
556__copy_from_user_inatomic_nocache(void *to, const void __user *from, unsigned long n)
557{
558       return __copy_from_user_ll_nocache_nozero(to, from, n);
559}
560
561unsigned long __must_check copy_to_user(void __user *to,
562				const void *from, unsigned long n);
563unsigned long __must_check copy_from_user(void *to,
564				const void __user *from, unsigned long n);
565long __must_check strncpy_from_user(char *dst, const char __user *src,
566				long count);
567long __must_check __strncpy_from_user(char *dst,
568				const char __user *src, long count);
569
570/**
571 * strlen_user: - Get the size of a string in user space.
572 * @str: The string to measure.
573 *
574 * Context: User context only.  This function may sleep.
575 *
576 * Get the size of a NUL-terminated string in user space.
577 *
578 * Returns the size of the string INCLUDING the terminating NUL.
579 * On exception, returns 0.
580 *
581 * If there is a limit on the length of a valid string, you may wish to
582 * consider using strnlen_user() instead.
583 */
584#define strlen_user(str) strnlen_user(str, LONG_MAX)
585
586long strnlen_user(const char __user *str, long n);
587unsigned long __must_check clear_user(void __user *mem, unsigned long len);
588unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
589
590#endif /* __i386_UACCESS_H */
591