1#ifndef _I386_TLBFLUSH_H
2#define _I386_TLBFLUSH_H
3
4#include <linux/mm.h>
5#include <asm/processor.h>
6
7#ifdef CONFIG_PARAVIRT
8#include <asm/paravirt.h>
9#else
10#define __flush_tlb() __native_flush_tlb()
11#define __flush_tlb_global() __native_flush_tlb_global()
12#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
13#endif
14
15#define __native_flush_tlb()						\
16	do {								\
17		unsigned int tmpreg;					\
18									\
19		__asm__ __volatile__(					\
20			"movl %%cr3, %0;              \n"		\
21			"movl %0, %%cr3;  # flush TLB \n"		\
22			: "=r" (tmpreg)					\
23			:: "memory");					\
24	} while (0)
25
26/*
27 * Global pages have to be flushed a bit differently. Not a real
28 * performance problem because this does not happen often.
29 */
30#define __native_flush_tlb_global()					\
31	do {								\
32		unsigned int tmpreg, cr4, cr4_orig;			\
33									\
34		__asm__ __volatile__(					\
35			"movl %%cr4, %2;  # turn off PGE     \n"	\
36			"movl %2, %1;                        \n"	\
37			"andl %3, %1;                        \n"	\
38			"movl %1, %%cr4;                     \n"	\
39			"movl %%cr3, %0;                     \n"	\
40			"movl %0, %%cr3;  # flush TLB        \n"	\
41			"movl %2, %%cr4;  # turn PGE back on \n"	\
42			: "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig)	\
43			: "i" (~X86_CR4_PGE)				\
44			: "memory");					\
45	} while (0)
46
47#define __native_flush_tlb_single(addr) 				\
48	__asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory")
49
50# define __flush_tlb_all()						\
51	do {								\
52		if (cpu_has_pge)					\
53			__flush_tlb_global();				\
54		else							\
55			__flush_tlb();					\
56	} while (0)
57
58#define cpu_has_invlpg	(boot_cpu_data.x86 > 3)
59
60#ifdef CONFIG_X86_INVLPG
61# define __flush_tlb_one(addr) __flush_tlb_single(addr)
62#else
63# define __flush_tlb_one(addr)						\
64	do {								\
65		if (cpu_has_invlpg)					\
66			__flush_tlb_single(addr);			\
67		else							\
68			__flush_tlb();					\
69	} while (0)
70#endif
71
72/*
73 * TLB flushing:
74 *
75 *  - flush_tlb() flushes the current mm struct TLBs
76 *  - flush_tlb_all() flushes all processes TLBs
77 *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
78 *  - flush_tlb_page(vma, vmaddr) flushes one page
79 *  - flush_tlb_range(vma, start, end) flushes a range of pages
80 *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
81 *  - flush_tlb_others(cpumask, mm, va) flushes a TLBs on other cpus
82 *
83 * ..but the i386 has somewhat limited tlb flushing capabilities,
84 * and page-granular flushes are available only on i486 and up.
85 */
86
87#define TLB_FLUSH_ALL	0xffffffff
88
89
90#ifndef CONFIG_SMP
91
92#include <linux/sched.h>
93
94#define flush_tlb() __flush_tlb()
95#define flush_tlb_all() __flush_tlb_all()
96#define local_flush_tlb() __flush_tlb()
97
98static inline void flush_tlb_mm(struct mm_struct *mm)
99{
100	if (mm == current->active_mm)
101		__flush_tlb();
102}
103
104static inline void flush_tlb_page(struct vm_area_struct *vma,
105	unsigned long addr)
106{
107	if (vma->vm_mm == current->active_mm)
108		__flush_tlb_one(addr);
109}
110
111static inline void flush_tlb_range(struct vm_area_struct *vma,
112	unsigned long start, unsigned long end)
113{
114	if (vma->vm_mm == current->active_mm)
115		__flush_tlb();
116}
117
118static inline void native_flush_tlb_others(const cpumask_t *cpumask,
119					   struct mm_struct *mm, unsigned long va)
120{
121}
122
123#else  /* SMP */
124
125#include <asm/smp.h>
126
127#define local_flush_tlb() \
128	__flush_tlb()
129
130extern void flush_tlb_all(void);
131extern void flush_tlb_current_task(void);
132extern void flush_tlb_mm(struct mm_struct *);
133extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
134
135#define flush_tlb()	flush_tlb_current_task()
136
137static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
138{
139	flush_tlb_mm(vma->vm_mm);
140}
141
142void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm,
143			     unsigned long va);
144
145#define TLBSTATE_OK	1
146#define TLBSTATE_LAZY	2
147
148struct tlb_state
149{
150	struct mm_struct *active_mm;
151	int state;
152	char __cacheline_padding[L1_CACHE_BYTES-8];
153};
154DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
155#endif	/* SMP */
156
157#ifndef CONFIG_PARAVIRT
158#define flush_tlb_others(mask, mm, va)		\
159	native_flush_tlb_others(&mask, mm, va)
160#endif
161
162static inline void flush_tlb_kernel_range(unsigned long start,
163					unsigned long end)
164{
165	flush_tlb_all();
166}
167
168#endif /* _I386_TLBFLUSH_H */
169