thread_info.h revision caf4b323b02a16c92fba449952ac6515ddc76d7a
1/* thread_info.h: low-level thread information
2 *
3 * Copyright (C) 2002  David Howells (dhowells@redhat.com)
4 * - Incorporating suggestions made by Linus Torvalds and Dave Miller
5 */
6
7#ifndef _ASM_X86_THREAD_INFO_H
8#define _ASM_X86_THREAD_INFO_H
9
10#include <linux/compiler.h>
11#include <asm/page.h>
12#include <asm/types.h>
13
14/*
15 * low level task data that entry.S needs immediate access to
16 * - this struct should fit entirely inside of one cache line
17 * - this struct shares the supervisor stack pages
18 */
19#ifndef __ASSEMBLY__
20struct task_struct;
21struct exec_domain;
22#include <asm/processor.h>
23#include <asm/ftrace.h>
24
25struct thread_info {
26	struct task_struct	*task;		/* main task structure */
27	struct exec_domain	*exec_domain;	/* execution domain */
28	unsigned long		flags;		/* low level flags */
29	__u32			status;		/* thread synchronous flags */
30	__u32			cpu;		/* current CPU */
31	int			preempt_count;	/* 0 => preemptable,
32						   <0 => BUG */
33	mm_segment_t		addr_limit;
34	struct restart_block    restart_block;
35	void __user		*sysenter_return;
36#ifdef CONFIG_X86_32
37	unsigned long           previous_esp;   /* ESP of the previous stack in
38						   case of nested (IRQ) stacks
39						*/
40	__u8			supervisor_stack[0];
41#endif
42
43#ifdef CONFIG_FUNCTION_RET_TRACER
44	/* Index of current stored adress in ret_stack */
45	int		curr_ret_stack;
46	/* Stack of return addresses for return function tracing */
47	struct ftrace_ret_stack	ret_stack[FTRACE_RET_STACK_SIZE];
48#endif
49};
50
51#ifdef CONFIG_FUNCTION_RET_TRACER
52#define INIT_THREAD_INFO(tsk)			\
53{						\
54	.task		= &tsk,			\
55	.exec_domain	= &default_exec_domain,	\
56	.flags		= 0,			\
57	.cpu		= 0,			\
58	.preempt_count	= 1,			\
59	.addr_limit	= KERNEL_DS,		\
60	.restart_block = {			\
61		.fn = do_no_restart_syscall,	\
62	},					\
63	.curr_ret_stack = -1,\
64}
65#else
66#define INIT_THREAD_INFO(tsk)			\
67{						\
68	.task		= &tsk,			\
69	.exec_domain	= &default_exec_domain,	\
70	.flags		= 0,			\
71	.cpu		= 0,			\
72	.preempt_count	= 1,			\
73	.addr_limit	= KERNEL_DS,		\
74	.restart_block = {			\
75		.fn = do_no_restart_syscall,	\
76	},					\
77}
78#endif
79
80#define init_thread_info	(init_thread_union.thread_info)
81#define init_stack		(init_thread_union.stack)
82
83#else /* !__ASSEMBLY__ */
84
85#include <asm/asm-offsets.h>
86
87#endif
88
89/*
90 * thread information flags
91 * - these are process state flags that various assembly files
92 *   may need to access
93 * - pending work-to-be-done flags are in LSW
94 * - other flags in MSW
95 * Warning: layout of LSW is hardcoded in entry.S
96 */
97#define TIF_SYSCALL_TRACE	0	/* syscall trace active */
98#define TIF_NOTIFY_RESUME	1	/* callback before returning to user */
99#define TIF_SIGPENDING		2	/* signal pending */
100#define TIF_NEED_RESCHED	3	/* rescheduling necessary */
101#define TIF_SINGLESTEP		4	/* reenable singlestep on user return*/
102#define TIF_IRET		5	/* force IRET */
103#define TIF_SYSCALL_EMU		6	/* syscall emulation active */
104#define TIF_SYSCALL_AUDIT	7	/* syscall auditing active */
105#define TIF_SECCOMP		8	/* secure computing */
106#define TIF_MCE_NOTIFY		10	/* notify userspace of an MCE */
107#define TIF_NOTSC		16	/* TSC is not accessible in userland */
108#define TIF_IA32		17	/* 32bit process */
109#define TIF_FORK		18	/* ret_from_fork */
110#define TIF_ABI_PENDING		19
111#define TIF_MEMDIE		20
112#define TIF_DEBUG		21	/* uses debug registers */
113#define TIF_IO_BITMAP		22	/* uses I/O bitmap */
114#define TIF_FREEZE		23	/* is freezing for suspend */
115#define TIF_FORCED_TF		24	/* true if TF in eflags artificially */
116#define TIF_DEBUGCTLMSR		25	/* uses thread_struct.debugctlmsr */
117#define TIF_DS_AREA_MSR		26      /* uses thread_struct.ds_area_msr */
118#define TIF_BTS_TRACE_TS	27      /* record scheduling event timestamps */
119
120#define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
121#define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
122#define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
123#define _TIF_SINGLESTEP		(1 << TIF_SINGLESTEP)
124#define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
125#define _TIF_IRET		(1 << TIF_IRET)
126#define _TIF_SYSCALL_EMU	(1 << TIF_SYSCALL_EMU)
127#define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
128#define _TIF_SECCOMP		(1 << TIF_SECCOMP)
129#define _TIF_MCE_NOTIFY		(1 << TIF_MCE_NOTIFY)
130#define _TIF_NOTSC		(1 << TIF_NOTSC)
131#define _TIF_IA32		(1 << TIF_IA32)
132#define _TIF_FORK		(1 << TIF_FORK)
133#define _TIF_ABI_PENDING	(1 << TIF_ABI_PENDING)
134#define _TIF_DEBUG		(1 << TIF_DEBUG)
135#define _TIF_IO_BITMAP		(1 << TIF_IO_BITMAP)
136#define _TIF_FREEZE		(1 << TIF_FREEZE)
137#define _TIF_FORCED_TF		(1 << TIF_FORCED_TF)
138#define _TIF_DEBUGCTLMSR	(1 << TIF_DEBUGCTLMSR)
139#define _TIF_DS_AREA_MSR	(1 << TIF_DS_AREA_MSR)
140#define _TIF_BTS_TRACE_TS	(1 << TIF_BTS_TRACE_TS)
141
142/* work to do in syscall_trace_enter() */
143#define _TIF_WORK_SYSCALL_ENTRY	\
144	(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | \
145	 _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | _TIF_SINGLESTEP)
146
147/* work to do in syscall_trace_leave() */
148#define _TIF_WORK_SYSCALL_EXIT	\
149	(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP)
150
151/* work to do on interrupt/exception return */
152#define _TIF_WORK_MASK							\
153	(0x0000FFFF &							\
154	 ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|			\
155	   _TIF_SINGLESTEP|_TIF_SECCOMP|_TIF_SYSCALL_EMU))
156
157/* work to do on any return to user space */
158#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)
159
160/* Only used for 64 bit */
161#define _TIF_DO_NOTIFY_MASK						\
162	(_TIF_SIGPENDING|_TIF_MCE_NOTIFY|_TIF_NOTIFY_RESUME)
163
164/* flags to check in __switch_to() */
165#define _TIF_WORK_CTXSW							\
166	(_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_BTS_TRACE_TS| \
167								_TIF_NOTSC)
168
169#define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW
170#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG)
171
172#define PREEMPT_ACTIVE		0x10000000
173
174/* thread information allocation */
175#ifdef CONFIG_DEBUG_STACK_USAGE
176#define THREAD_FLAGS (GFP_KERNEL | __GFP_ZERO)
177#else
178#define THREAD_FLAGS GFP_KERNEL
179#endif
180
181#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
182
183#define alloc_thread_info(tsk)						\
184	((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
185
186#ifdef CONFIG_X86_32
187
188#define STACK_WARN	(THREAD_SIZE/8)
189/*
190 * macros/functions for gaining access to the thread information structure
191 *
192 * preempt_count needs to be 1 initially, until the scheduler is functional.
193 */
194#ifndef __ASSEMBLY__
195
196
197/* how to get the current stack pointer from C */
198register unsigned long current_stack_pointer asm("esp") __used;
199
200/* how to get the thread information struct from C */
201static inline struct thread_info *current_thread_info(void)
202{
203	return (struct thread_info *)
204		(current_stack_pointer & ~(THREAD_SIZE - 1));
205}
206
207#else /* !__ASSEMBLY__ */
208
209/* how to get the thread information struct from ASM */
210#define GET_THREAD_INFO(reg)	 \
211	movl $-THREAD_SIZE, reg; \
212	andl %esp, reg
213
214/* use this one if reg already contains %esp */
215#define GET_THREAD_INFO_WITH_ESP(reg) \
216	andl $-THREAD_SIZE, reg
217
218#endif
219
220#else /* X86_32 */
221
222#include <asm/pda.h>
223
224/*
225 * macros/functions for gaining access to the thread information structure
226 * preempt_count needs to be 1 initially, until the scheduler is functional.
227 */
228#ifndef __ASSEMBLY__
229static inline struct thread_info *current_thread_info(void)
230{
231	struct thread_info *ti;
232	ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE);
233	return ti;
234}
235
236/* do not use in interrupt context */
237static inline struct thread_info *stack_thread_info(void)
238{
239	struct thread_info *ti;
240	asm("andq %%rsp,%0; " : "=r" (ti) : "0" (~(THREAD_SIZE - 1)));
241	return ti;
242}
243
244#else /* !__ASSEMBLY__ */
245
246/* how to get the thread information struct from ASM */
247#define GET_THREAD_INFO(reg) \
248	movq %gs:pda_kernelstack,reg ; \
249	subq $(THREAD_SIZE-PDA_STACKOFFSET),reg
250
251#endif
252
253#endif /* !X86_32 */
254
255/*
256 * Thread-synchronous status.
257 *
258 * This is different from the flags in that nobody else
259 * ever touches our thread-synchronous status, so we don't
260 * have to worry about atomic accesses.
261 */
262#define TS_USEDFPU		0x0001	/* FPU was used by this task
263					   this quantum (SMP) */
264#define TS_COMPAT		0x0002	/* 32bit syscall active (64BIT)*/
265#define TS_POLLING		0x0004	/* true if in idle loop
266					   and not sleeping */
267#define TS_RESTORE_SIGMASK	0x0008	/* restore signal mask in do_signal() */
268#define TS_XSAVE		0x0010	/* Use xsave/xrstor */
269
270#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
271
272#ifndef __ASSEMBLY__
273#define HAVE_SET_RESTORE_SIGMASK	1
274static inline void set_restore_sigmask(void)
275{
276	struct thread_info *ti = current_thread_info();
277	ti->status |= TS_RESTORE_SIGMASK;
278	set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags);
279}
280#endif	/* !__ASSEMBLY__ */
281
282#ifndef __ASSEMBLY__
283extern void arch_task_cache_init(void);
284extern void free_thread_info(struct thread_info *ti);
285extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
286#define arch_task_cache_init arch_task_cache_init
287#endif
288#endif /* _ASM_X86_THREAD_INFO_H */
289