1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net) 7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org 8 * Carsten Langgaard, carstenl@mips.com 9 * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved. 10 */ 11#include <linux/init.h> 12#include <linux/sched.h> 13#include <linux/smp.h> 14#include <linux/mm.h> 15 16#include <asm/cpu.h> 17#include <asm/bootinfo.h> 18#include <asm/mmu_context.h> 19#include <asm/pgtable.h> 20 21extern void build_tlb_refill_handler(void); 22 23#define TFP_TLB_SIZE 384 24#define TFP_TLB_SET_SHIFT 7 25 26/* CP0 hazard avoidance. */ 27#define BARRIER __asm__ __volatile__(".set noreorder\n\t" \ 28 "nop; nop; nop; nop; nop; nop;\n\t" \ 29 ".set reorder\n\t") 30 31void local_flush_tlb_all(void) 32{ 33 unsigned long flags; 34 unsigned long old_ctx; 35 int entry; 36 37 local_irq_save(flags); 38 /* Save old context and create impossible VPN2 value */ 39 old_ctx = read_c0_entryhi(); 40 write_c0_entrylo(0); 41 42 for (entry = 0; entry < TFP_TLB_SIZE; entry++) { 43 write_c0_tlbset(entry >> TFP_TLB_SET_SHIFT); 44 write_c0_vaddr(entry << PAGE_SHIFT); 45 write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1))); 46 mtc0_tlbw_hazard(); 47 tlb_write(); 48 } 49 tlbw_use_hazard(); 50 write_c0_entryhi(old_ctx); 51 local_irq_restore(flags); 52} 53 54void local_flush_tlb_mm(struct mm_struct *mm) 55{ 56 int cpu = smp_processor_id(); 57 58 if (cpu_context(cpu, mm) != 0) 59 drop_mmu_context(mm, cpu); 60} 61 62void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 63 unsigned long end) 64{ 65 struct mm_struct *mm = vma->vm_mm; 66 int cpu = smp_processor_id(); 67 unsigned long flags; 68 int oldpid, newpid, size; 69 70 if (!cpu_context(cpu, mm)) 71 return; 72 73 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 74 size = (size + 1) >> 1; 75 76 local_irq_save(flags); 77 78 if (size > TFP_TLB_SIZE / 2) { 79 drop_mmu_context(mm, cpu); 80 goto out_restore; 81 } 82 83 oldpid = read_c0_entryhi(); 84 newpid = cpu_asid(cpu, mm); 85 86 write_c0_entrylo(0); 87 88 start &= PAGE_MASK; 89 end += (PAGE_SIZE - 1); 90 end &= PAGE_MASK; 91 while (start < end) { 92 signed long idx; 93 94 write_c0_vaddr(start); 95 write_c0_entryhi(start); 96 start += PAGE_SIZE; 97 tlb_probe(); 98 idx = read_c0_tlbset(); 99 if (idx < 0) 100 continue; 101 102 write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1))); 103 tlb_write(); 104 } 105 write_c0_entryhi(oldpid); 106 107out_restore: 108 local_irq_restore(flags); 109} 110 111/* Usable for KV1 addresses only! */ 112void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) 113{ 114 unsigned long size, flags; 115 116 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 117 size = (size + 1) >> 1; 118 119 if (size > TFP_TLB_SIZE / 2) { 120 local_flush_tlb_all(); 121 return; 122 } 123 124 local_irq_save(flags); 125 126 write_c0_entrylo(0); 127 128 start &= PAGE_MASK; 129 end += (PAGE_SIZE - 1); 130 end &= PAGE_MASK; 131 while (start < end) { 132 signed long idx; 133 134 write_c0_vaddr(start); 135 write_c0_entryhi(start); 136 start += PAGE_SIZE; 137 tlb_probe(); 138 idx = read_c0_tlbset(); 139 if (idx < 0) 140 continue; 141 142 write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1))); 143 tlb_write(); 144 } 145 146 local_irq_restore(flags); 147} 148 149void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 150{ 151 int cpu = smp_processor_id(); 152 unsigned long flags; 153 int oldpid, newpid; 154 signed long idx; 155 156 if (!cpu_context(cpu, vma->vm_mm)) 157 return; 158 159 newpid = cpu_asid(cpu, vma->vm_mm); 160 page &= PAGE_MASK; 161 local_irq_save(flags); 162 oldpid = read_c0_entryhi(); 163 write_c0_vaddr(page); 164 write_c0_entryhi(newpid); 165 tlb_probe(); 166 idx = read_c0_tlbset(); 167 if (idx < 0) 168 goto finish; 169 170 write_c0_entrylo(0); 171 write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1))); 172 tlb_write(); 173 174finish: 175 write_c0_entryhi(oldpid); 176 local_irq_restore(flags); 177} 178 179/* 180 * We will need multiple versions of update_mmu_cache(), one that just 181 * updates the TLB with the new pte(s), and another which also checks 182 * for the R4k "end of page" hardware bug and does the needy. 183 */ 184void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) 185{ 186 unsigned long flags; 187 pgd_t *pgdp; 188 pmd_t *pmdp; 189 pte_t *ptep; 190 int pid; 191 192 /* 193 * Handle debugger faulting in for debugee. 194 */ 195 if (current->active_mm != vma->vm_mm) 196 return; 197 198 pid = read_c0_entryhi() & ASID_MASK; 199 200 local_irq_save(flags); 201 address &= PAGE_MASK; 202 write_c0_vaddr(address); 203 write_c0_entryhi(pid); 204 pgdp = pgd_offset(vma->vm_mm, address); 205 pmdp = pmd_offset(pgdp, address); 206 ptep = pte_offset_map(pmdp, address); 207 tlb_probe(); 208 209 write_c0_entrylo(pte_val(*ptep++) >> 6); 210 tlb_write(); 211 212 write_c0_entryhi(pid); 213 local_irq_restore(flags); 214} 215 216static void __cpuinit probe_tlb(unsigned long config) 217{ 218 struct cpuinfo_mips *c = ¤t_cpu_data; 219 220 c->tlbsize = 3 * 128; /* 3 sets each 128 entries */ 221} 222 223void __cpuinit tlb_init(void) 224{ 225 unsigned int config = read_c0_config(); 226 unsigned long status; 227 228 probe_tlb(config); 229 230 status = read_c0_status(); 231 status &= ~(ST0_UPS | ST0_KPS); 232#ifdef CONFIG_PAGE_SIZE_4KB 233 status |= (TFP_PAGESIZE_4K << 32) | (TFP_PAGESIZE_4K << 36); 234#elif defined(CONFIG_PAGE_SIZE_8KB) 235 status |= (TFP_PAGESIZE_8K << 32) | (TFP_PAGESIZE_8K << 36); 236#elif defined(CONFIG_PAGE_SIZE_16KB) 237 status |= (TFP_PAGESIZE_16K << 32) | (TFP_PAGESIZE_16K << 36); 238#elif defined(CONFIG_PAGE_SIZE_64KB) 239 status |= (TFP_PAGESIZE_64K << 32) | (TFP_PAGESIZE_64K << 36); 240#endif 241 write_c0_status(status); 242 243 write_c0_wired(0); 244 245 local_flush_tlb_all(); 246 247 build_tlb_refill_handler(); 248} 249