1#ifndef _ASM_X86_PGTABLE_3LEVEL_H
2#define _ASM_X86_PGTABLE_3LEVEL_H
3
4/*
5 * Intel Physical Address Extension (PAE) Mode - three-level page
6 * tables on PPro+ CPUs.
7 *
8 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
9 */
10
11#define pte_ERROR(e)							\
12	pr_err("%s:%d: bad pte %p(%08lx%08lx)\n",			\
13	       __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
14#define pmd_ERROR(e)							\
15	pr_err("%s:%d: bad pmd %p(%016Lx)\n",				\
16	       __FILE__, __LINE__, &(e), pmd_val(e))
17#define pgd_ERROR(e)							\
18	pr_err("%s:%d: bad pgd %p(%016Lx)\n",				\
19	       __FILE__, __LINE__, &(e), pgd_val(e))
20
21/* Rules for using set_pte: the pte being assigned *must* be
22 * either not present or in a state where the hardware will
23 * not attempt to update the pte.  In places where this is
24 * not possible, use pte_get_and_clear to obtain the old pte
25 * value and then use set_pte to update it.  -ben
26 */
27static inline void native_set_pte(pte_t *ptep, pte_t pte)
28{
29	ptep->pte_high = pte.pte_high;
30	smp_wmb();
31	ptep->pte_low = pte.pte_low;
32}
33
34#define pmd_read_atomic pmd_read_atomic
35/*
36 * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with
37 * a "*pmdp" dereference done by gcc. Problem is, in certain places
38 * where pte_offset_map_lock is called, concurrent page faults are
39 * allowed, if the mmap_sem is hold for reading. An example is mincore
40 * vs page faults vs MADV_DONTNEED. On the page fault side
41 * pmd_populate rightfully does a set_64bit, but if we're reading the
42 * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen
43 * because gcc will not read the 64bit of the pmd atomically. To fix
44 * this all places running pmd_offset_map_lock() while holding the
45 * mmap_sem in read mode, shall read the pmdp pointer using this
46 * function to know if the pmd is null nor not, and in turn to know if
47 * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
48 * operations.
49 *
50 * Without THP if the mmap_sem is hold for reading, the pmd can only
51 * transition from null to not null while pmd_read_atomic runs. So
52 * we can always return atomic pmd values with this function.
53 *
54 * With THP if the mmap_sem is hold for reading, the pmd can become
55 * trans_huge or none or point to a pte (and in turn become "stable")
56 * at any time under pmd_read_atomic. We could read it really
57 * atomically here with a atomic64_read for the THP enabled case (and
58 * it would be a whole lot simpler), but to avoid using cmpxchg8b we
59 * only return an atomic pmdval if the low part of the pmdval is later
60 * found stable (i.e. pointing to a pte). And we're returning a none
61 * pmdval if the low part of the pmd is none. In some cases the high
62 * and low part of the pmdval returned may not be consistent if THP is
63 * enabled (the low part may point to previously mapped hugepage,
64 * while the high part may point to a more recently mapped hugepage),
65 * but pmd_none_or_trans_huge_or_clear_bad() only needs the low part
66 * of the pmd to be read atomically to decide if the pmd is unstable
67 * or not, with the only exception of when the low part of the pmd is
68 * zero in which case we return a none pmd.
69 */
70static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
71{
72	pmdval_t ret;
73	u32 *tmp = (u32 *)pmdp;
74
75	ret = (pmdval_t) (*tmp);
76	if (ret) {
77		/*
78		 * If the low part is null, we must not read the high part
79		 * or we can end up with a partial pmd.
80		 */
81		smp_rmb();
82		ret |= ((pmdval_t)*(tmp + 1)) << 32;
83	}
84
85	return (pmd_t) { ret };
86}
87
88static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
89{
90	set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
91}
92
93static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
94{
95	set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
96}
97
98static inline void native_set_pud(pud_t *pudp, pud_t pud)
99{
100	set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
101}
102
103/*
104 * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
105 * entry, so clear the bottom half first and enforce ordering with a compiler
106 * barrier.
107 */
108static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
109				    pte_t *ptep)
110{
111	ptep->pte_low = 0;
112	smp_wmb();
113	ptep->pte_high = 0;
114}
115
116static inline void native_pmd_clear(pmd_t *pmd)
117{
118	u32 *tmp = (u32 *)pmd;
119	*tmp = 0;
120	smp_wmb();
121	*(tmp + 1) = 0;
122}
123
124static inline void pud_clear(pud_t *pudp)
125{
126	set_pud(pudp, __pud(0));
127
128	/*
129	 * According to Intel App note "TLBs, Paging-Structure Caches,
130	 * and Their Invalidation", April 2007, document 317080-001,
131	 * section 8.1: in PAE mode we explicitly have to flush the
132	 * TLB via cr3 if the top-level pgd is changed...
133	 *
134	 * Currently all places where pud_clear() is called either have
135	 * flush_tlb_mm() followed or don't need TLB flush (x86_64 code or
136	 * pud_clear_bad()), so we don't need TLB flush here.
137	 */
138}
139
140#ifdef CONFIG_SMP
141static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
142{
143	pte_t res;
144
145	/* xchg acts as a barrier before the setting of the high bits */
146	res.pte_low = xchg(&ptep->pte_low, 0);
147	res.pte_high = ptep->pte_high;
148	ptep->pte_high = 0;
149
150	return res;
151}
152#else
153#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
154#endif
155
156#ifdef CONFIG_SMP
157union split_pmd {
158	struct {
159		u32 pmd_low;
160		u32 pmd_high;
161	};
162	pmd_t pmd;
163};
164static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
165{
166	union split_pmd res, *orig = (union split_pmd *)pmdp;
167
168	/* xchg acts as a barrier before setting of the high bits */
169	res.pmd_low = xchg(&orig->pmd_low, 0);
170	res.pmd_high = orig->pmd_high;
171	orig->pmd_high = 0;
172
173	return res.pmd;
174}
175#else
176#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
177#endif
178
179/*
180 * Bits 0, 6 and 7 are taken in the low part of the pte,
181 * put the 32 bits of offset into the high part.
182 *
183 * For soft-dirty tracking 11 bit is taken from
184 * the low part of pte as well.
185 */
186#define pte_to_pgoff(pte) ((pte).pte_high)
187#define pgoff_to_pte(off)						\
188	((pte_t) { { .pte_low = _PAGE_FILE, .pte_high = (off) } })
189#define PTE_FILE_MAX_BITS       32
190
191/* Encode and de-code a swap entry */
192#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
193#define __swp_type(x)			(((x).val) & 0x1f)
194#define __swp_offset(x)			((x).val >> 5)
195#define __swp_entry(type, offset)	((swp_entry_t){(type) | (offset) << 5})
196#define __pte_to_swp_entry(pte)		((swp_entry_t){ (pte).pte_high })
197#define __swp_entry_to_pte(x)		((pte_t){ { .pte_high = (x).val } })
198
199#endif /* _ASM_X86_PGTABLE_3LEVEL_H */
200