1#ifndef _ASM_X86_PGTABLE_64_H
2#define _ASM_X86_PGTABLE_64_H
3
4#include <linux/const.h>
5#include <asm/pgtable_64_types.h>
6
7#ifndef __ASSEMBLY__
8
9/*
10 * This file contains the functions and defines necessary to modify and use
11 * the x86-64 page table tree.
12 */
13#include <asm/processor.h>
14#include <linux/bitops.h>
15#include <linux/threads.h>
16
17extern pud_t level3_kernel_pgt[512];
18extern pud_t level3_ident_pgt[512];
19extern pmd_t level2_kernel_pgt[512];
20extern pmd_t level2_fixmap_pgt[512];
21extern pmd_t level2_ident_pgt[512];
22extern pgd_t init_level4_pgt[];
23
24#define swapper_pg_dir init_level4_pgt
25
26extern void paging_init(void);
27
28#define pte_ERROR(e)					\
29	printk("%s:%d: bad pte %p(%016lx).\n",		\
30	       __FILE__, __LINE__, &(e), pte_val(e))
31#define pmd_ERROR(e)					\
32	printk("%s:%d: bad pmd %p(%016lx).\n",		\
33	       __FILE__, __LINE__, &(e), pmd_val(e))
34#define pud_ERROR(e)					\
35	printk("%s:%d: bad pud %p(%016lx).\n",		\
36	       __FILE__, __LINE__, &(e), pud_val(e))
37#define pgd_ERROR(e)					\
38	printk("%s:%d: bad pgd %p(%016lx).\n",		\
39	       __FILE__, __LINE__, &(e), pgd_val(e))
40
41struct mm_struct;
42
43void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
44
45
46static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
47				    pte_t *ptep)
48{
49	*ptep = native_make_pte(0);
50}
51
52static inline void native_set_pte(pte_t *ptep, pte_t pte)
53{
54	*ptep = pte;
55}
56
57static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
58{
59	native_set_pte(ptep, pte);
60}
61
62static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
63{
64	*pmdp = pmd;
65}
66
67static inline void native_pmd_clear(pmd_t *pmd)
68{
69	native_set_pmd(pmd, native_make_pmd(0));
70}
71
72static inline pte_t native_ptep_get_and_clear(pte_t *xp)
73{
74#ifdef CONFIG_SMP
75	return native_make_pte(xchg(&xp->pte, 0));
76#else
77	/* native_local_ptep_get_and_clear,
78	   but duplicated because of cyclic dependency */
79	pte_t ret = *xp;
80	native_pte_clear(NULL, 0, xp);
81	return ret;
82#endif
83}
84
85static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
86{
87#ifdef CONFIG_SMP
88	return native_make_pmd(xchg(&xp->pmd, 0));
89#else
90	/* native_local_pmdp_get_and_clear,
91	   but duplicated because of cyclic dependency */
92	pmd_t ret = *xp;
93	native_pmd_clear(xp);
94	return ret;
95#endif
96}
97
98static inline void native_set_pud(pud_t *pudp, pud_t pud)
99{
100	*pudp = pud;
101}
102
103static inline void native_pud_clear(pud_t *pud)
104{
105	native_set_pud(pud, native_make_pud(0));
106}
107
108static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
109{
110	*pgdp = pgd;
111}
112
113static inline void native_pgd_clear(pgd_t *pgd)
114{
115	native_set_pgd(pgd, native_make_pgd(0));
116}
117
118extern void sync_global_pgds(unsigned long start, unsigned long end);
119
120/*
121 * Conversion functions: convert a page and protection to a page entry,
122 * and a page entry and page directory to the page they refer to.
123 */
124
125/*
126 * Level 4 access.
127 */
128static inline int pgd_large(pgd_t pgd) { return 0; }
129#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
130
131/* PUD - Level3 access */
132
133/* PMD  - Level 2 access */
134#define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
135#define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) |	\
136					    _PAGE_FILE })
137#define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
138
139/* PTE - Level 1 access. */
140
141/* x86-64 always has all page tables mapped. */
142#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
143#define pte_unmap(pte) ((void)(pte))/* NOP */
144
145#define update_mmu_cache(vma, address, ptep) do { } while (0)
146
147/* Encode and de-code a swap entry */
148#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
149#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
150#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1)
151#else
152#define SWP_TYPE_BITS (_PAGE_BIT_PROTNONE - _PAGE_BIT_PRESENT - 1)
153#define SWP_OFFSET_SHIFT (_PAGE_BIT_FILE + 1)
154#endif
155
156#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
157
158#define __swp_type(x)			(((x).val >> (_PAGE_BIT_PRESENT + 1)) \
159					 & ((1U << SWP_TYPE_BITS) - 1))
160#define __swp_offset(x)			((x).val >> SWP_OFFSET_SHIFT)
161#define __swp_entry(type, offset)	((swp_entry_t) { \
162					 ((type) << (_PAGE_BIT_PRESENT + 1)) \
163					 | ((offset) << SWP_OFFSET_SHIFT) })
164#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val((pte)) })
165#define __swp_entry_to_pte(x)		((pte_t) { .pte = (x).val })
166
167extern int kern_addr_valid(unsigned long addr);
168extern void cleanup_highmap(void);
169
170#define HAVE_ARCH_UNMAPPED_AREA
171#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
172
173#define pgtable_cache_init()   do { } while (0)
174#define check_pgt_cache()      do { } while (0)
175
176#define PAGE_AGP    PAGE_KERNEL_NOCACHE
177#define HAVE_PAGE_AGP 1
178
179/* fs/proc/kcore.c */
180#define	kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
181#define	kc_offset_to_vaddr(o) ((o) | ~__VIRTUAL_MASK)
182
183#define __HAVE_ARCH_PTE_SAME
184
185#endif /* !__ASSEMBLY__ */
186
187#endif /* _ASM_X86_PGTABLE_64_H */
188