1/* 2 * IBM System z Huge TLB Page Support for Kernel. 3 * 4 * Copyright IBM Corp. 2008 5 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com> 6 */ 7 8#ifndef _ASM_S390_HUGETLB_H 9#define _ASM_S390_HUGETLB_H 10 11#include <asm/page.h> 12#include <asm/pgtable.h> 13 14 15#define is_hugepage_only_range(mm, addr, len) 0 16#define hugetlb_free_pgd_range free_pgd_range 17 18void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 19 pte_t *ptep, pte_t pte); 20 21/* 22 * If the arch doesn't supply something else, assume that hugepage 23 * size aligned regions are ok without further preparation. 24 */ 25static inline int prepare_hugepage_range(struct file *file, 26 unsigned long addr, unsigned long len) 27{ 28 if (len & ~HPAGE_MASK) 29 return -EINVAL; 30 if (addr & ~HPAGE_MASK) 31 return -EINVAL; 32 return 0; 33} 34 35#define hugetlb_prefault_arch_hook(mm) do { } while (0) 36 37int arch_prepare_hugepage(struct page *page); 38void arch_release_hugepage(struct page *page); 39 40static inline pte_t huge_pte_wrprotect(pte_t pte) 41{ 42 pte_val(pte) |= _PAGE_RO; 43 return pte; 44} 45 46static inline int huge_pte_none(pte_t pte) 47{ 48 return (pte_val(pte) & _SEGMENT_ENTRY_INV) && 49 !(pte_val(pte) & _SEGMENT_ENTRY_RO); 50} 51 52static inline pte_t huge_ptep_get(pte_t *ptep) 53{ 54 pte_t pte = *ptep; 55 unsigned long mask; 56 57 if (!MACHINE_HAS_HPAGE) { 58 ptep = (pte_t *) (pte_val(pte) & _SEGMENT_ENTRY_ORIGIN); 59 if (ptep) { 60 mask = pte_val(pte) & 61 (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO); 62 pte = pte_mkhuge(*ptep); 63 pte_val(pte) |= mask; 64 } 65 } 66 return pte; 67} 68 69static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, 70 unsigned long addr, pte_t *ptep) 71{ 72 pte_t pte = huge_ptep_get(ptep); 73 74 mm->context.flush_mm = 1; 75 pmd_clear((pmd_t *) ptep); 76 return pte; 77} 78 79static inline void __pmd_csp(pmd_t *pmdp) 80{ 81 register unsigned long reg2 asm("2") = pmd_val(*pmdp); 82 register unsigned long reg3 asm("3") = pmd_val(*pmdp) | 83 _SEGMENT_ENTRY_INV; 84 register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5; 85 86 asm volatile( 87 " csp %1,%3" 88 : "=m" (*pmdp) 89 : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc"); 90 pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY; 91} 92 93static inline void __pmd_idte(unsigned long address, pmd_t *pmdp) 94{ 95 unsigned long sto = (unsigned long) pmdp - 96 pmd_index(address) * sizeof(pmd_t); 97 98 if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) { 99 asm volatile( 100 " .insn rrf,0xb98e0000,%2,%3,0,0" 101 : "=m" (*pmdp) 102 : "m" (*pmdp), "a" (sto), 103 "a" ((address & HPAGE_MASK)) 104 ); 105 } 106 pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY; 107} 108 109static inline void huge_ptep_invalidate(struct mm_struct *mm, 110 unsigned long address, pte_t *ptep) 111{ 112 pmd_t *pmdp = (pmd_t *) ptep; 113 114 if (MACHINE_HAS_IDTE) 115 __pmd_idte(address, pmdp); 116 else 117 __pmd_csp(pmdp); 118} 119 120#define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ 121({ \ 122 int __changed = !pte_same(huge_ptep_get(__ptep), __entry); \ 123 if (__changed) { \ 124 huge_ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \ 125 set_huge_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \ 126 } \ 127 __changed; \ 128}) 129 130#define huge_ptep_set_wrprotect(__mm, __addr, __ptep) \ 131({ \ 132 pte_t __pte = huge_ptep_get(__ptep); \ 133 if (pte_write(__pte)) { \ 134 (__mm)->context.flush_mm = 1; \ 135 if (atomic_read(&(__mm)->context.attach_count) > 1 || \ 136 (__mm) != current->active_mm) \ 137 huge_ptep_invalidate(__mm, __addr, __ptep); \ 138 set_huge_pte_at(__mm, __addr, __ptep, \ 139 huge_pte_wrprotect(__pte)); \ 140 } \ 141}) 142 143static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, 144 unsigned long address, pte_t *ptep) 145{ 146 huge_ptep_invalidate(vma->vm_mm, address, ptep); 147} 148 149#endif /* _ASM_S390_HUGETLB_H */ 150