cache-inv-by-reg.S revision 9731d23710736b96786d68c2e63148ff3f22e6eb
1/* MN10300 CPU cache invalidation routines, using automatic purge registers 2 * 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public Licence 8 * as published by the Free Software Foundation; either version 9 * 2 of the Licence, or (at your option) any later version. 10 */ 11#include <linux/sys.h> 12#include <linux/linkage.h> 13#include <asm/smp.h> 14#include <asm/page.h> 15#include <asm/cache.h> 16#include <asm/irqflags.h> 17#include <asm/cacheflush.h> 18 19#define mn10300_local_dcache_inv_range_intr_interval \ 20 +((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1) 21 22#if mn10300_local_dcache_inv_range_intr_interval > 0xff 23#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less 24#endif 25 26 .am33_2 27 28#ifndef CONFIG_SMP 29 .globl mn10300_icache_inv 30 .globl mn10300_icache_inv_page 31 .globl mn10300_icache_inv_range 32 .globl mn10300_icache_inv_range2 33 .globl mn10300_dcache_inv 34 .globl mn10300_dcache_inv_page 35 .globl mn10300_dcache_inv_range 36 .globl mn10300_dcache_inv_range2 37 38mn10300_icache_inv = mn10300_local_icache_inv 39mn10300_icache_inv_page = mn10300_local_icache_inv_page 40mn10300_icache_inv_range = mn10300_local_icache_inv_range 41mn10300_icache_inv_range2 = mn10300_local_icache_inv_range2 42mn10300_dcache_inv = mn10300_local_dcache_inv 43mn10300_dcache_inv_page = mn10300_local_dcache_inv_page 44mn10300_dcache_inv_range = mn10300_local_dcache_inv_range 45mn10300_dcache_inv_range2 = mn10300_local_dcache_inv_range2 46 47#endif /* !CONFIG_SMP */ 48 49############################################################################### 50# 51# void mn10300_local_icache_inv(void) 52# Invalidate the entire icache 53# 54############################################################################### 55 ALIGN 56 .globl mn10300_local_icache_inv 57 .type mn10300_local_icache_inv,@function 58mn10300_local_icache_inv: 59 mov CHCTR,a0 60 61 movhu (a0),d0 62 btst CHCTR_ICEN,d0 63 beq mn10300_local_icache_inv_end 64 65 # invalidate 66 or CHCTR_ICINV,d0 67 movhu d0,(a0) 68 movhu (a0),d0 69 70mn10300_local_icache_inv_end: 71 ret [],0 72 .size mn10300_local_icache_inv,.-mn10300_local_icache_inv 73 74############################################################################### 75# 76# void mn10300_local_dcache_inv(void) 77# Invalidate the entire dcache 78# 79############################################################################### 80 ALIGN 81 .globl mn10300_local_dcache_inv 82 .type mn10300_local_dcache_inv,@function 83mn10300_local_dcache_inv: 84 mov CHCTR,a0 85 86 movhu (a0),d0 87 btst CHCTR_DCEN,d0 88 beq mn10300_local_dcache_inv_end 89 90 # invalidate 91 or CHCTR_DCINV,d0 92 movhu d0,(a0) 93 movhu (a0),d0 94 95mn10300_local_dcache_inv_end: 96 ret [],0 97 .size mn10300_local_dcache_inv,.-mn10300_local_dcache_inv 98 99############################################################################### 100# 101# void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end) 102# void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size) 103# void mn10300_local_dcache_inv_page(unsigned long start) 104# Invalidate a range of addresses on a page in the dcache 105# 106############################################################################### 107 ALIGN 108 .globl mn10300_local_dcache_inv_page 109 .globl mn10300_local_dcache_inv_range 110 .globl mn10300_local_dcache_inv_range2 111 .type mn10300_local_dcache_inv_page,@function 112 .type mn10300_local_dcache_inv_range,@function 113 .type mn10300_local_dcache_inv_range2,@function 114mn10300_local_dcache_inv_page: 115 and ~(PAGE_SIZE-1),d0 116 mov PAGE_SIZE,d1 117mn10300_local_dcache_inv_range2: 118 add d0,d1 119mn10300_local_dcache_inv_range: 120 # If we are in writeback mode we check the start and end alignments, 121 # and if they're not cacheline-aligned, we must flush any bits outside 122 # the range that share cachelines with stuff inside the range 123#ifdef CONFIG_MN10300_CACHE_WBACK 124 btst ~(L1_CACHE_BYTES-1),d0 125 bne 1f 126 btst ~(L1_CACHE_BYTES-1),d1 127 beq 2f 1281: 129 bra mn10300_local_dcache_flush_inv_range 1302: 131#endif /* CONFIG_MN10300_CACHE_WBACK */ 132 133 movm [d2,d3,a2],(sp) 134 135 mov CHCTR,a0 136 movhu (a0),d2 137 btst CHCTR_DCEN,d2 138 beq mn10300_local_dcache_inv_range_end 139 140 # round the addresses out to be full cachelines, unless we're in 141 # writeback mode, in which case we would be in flush and invalidate by 142 # now 143#ifndef CONFIG_MN10300_CACHE_WBACK 144 and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start 145 # addr down 146 147 mov L1_CACHE_BYTES-1,d2 148 add d2,d1 149 and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1 # round end addr up 150#endif /* !CONFIG_MN10300_CACHE_WBACK */ 151 152 sub d0,d1,d2 # calculate the total size 153 mov d0,a2 # A2 = start address 154 mov d1,a1 # A1 = end address 155 156 LOCAL_CLI_SAVE(d3) 157 158 mov DCPGCR,a0 # make sure the purger isn't busy 159 setlb 160 mov (a0),d0 161 btst DCPGCR_DCPGBSY,d0 162 lne 163 164 # skip initial address alignment calculation if address is zero 165 mov d2,d1 166 cmp 0,a2 167 beq 1f 168 169dcivloop: 170 /* calculate alignsize 171 * 172 * alignsize = L1_CACHE_BYTES; 173 * while (! start & alignsize) { 174 * alignsize <<=1; 175 * } 176 * d1 = alignsize; 177 */ 178 mov L1_CACHE_BYTES,d1 179 lsr 1,d1 180 setlb 181 add d1,d1 182 mov d1,d0 183 and a2,d0 184 leq 185 1861: 187 /* calculate invsize 188 * 189 * if (totalsize > alignsize) { 190 * invsize = alignsize; 191 * } else { 192 * invsize = totalsize; 193 * tmp = 0x80000000; 194 * while (! invsize & tmp) { 195 * tmp >>= 1; 196 * } 197 * invsize = tmp; 198 * } 199 * d1 = invsize 200 */ 201 cmp d2,d1 202 bns 2f 203 mov d2,d1 204 205 mov 0x80000000,d0 # start from 31bit=1 206 setlb 207 lsr 1,d0 208 mov d0,e0 209 and d1,e0 210 leq 211 mov d0,d1 212 2132: 214 /* set mask 215 * 216 * mask = ~(invsize-1); 217 * DCPGMR = mask; 218 */ 219 mov d1,d0 220 add -1,d0 221 not d0 222 mov d0,(DCPGMR) 223 224 # invalidate area 225 mov a2,d0 226 or DCPGCR_DCI,d0 227 mov d0,(a0) # DCPGCR = (mask & start) | DCPGCR_DCI 228 229 setlb # wait for the purge to complete 230 mov (a0),d0 231 btst DCPGCR_DCPGBSY,d0 232 lne 233 234 sub d1,d2 # decrease size remaining 235 add d1,a2 # increase next start address 236 237 /* check invalidating of end address 238 * 239 * a2 = a2 + invsize 240 * if (a2 < end) { 241 * goto dcivloop; 242 * } */ 243 cmp a1,a2 244 bns dcivloop 245 246 LOCAL_IRQ_RESTORE(d3) 247 248mn10300_local_dcache_inv_range_end: 249 ret [d2,d3,a2],12 250 .size mn10300_local_dcache_inv_page,.-mn10300_local_dcache_inv_page 251 .size mn10300_local_dcache_inv_range,.-mn10300_local_dcache_inv_range 252 .size mn10300_local_dcache_inv_range2,.-mn10300_local_dcache_inv_range2 253 254############################################################################### 255# 256# void mn10300_local_icache_inv_page(unsigned long start) 257# void mn10300_local_icache_inv_range2(unsigned long start, unsigned long size) 258# void mn10300_local_icache_inv_range(unsigned long start, unsigned long end) 259# Invalidate a range of addresses on a page in the icache 260# 261############################################################################### 262 ALIGN 263 .globl mn10300_local_icache_inv_page 264 .globl mn10300_local_icache_inv_range 265 .globl mn10300_local_icache_inv_range2 266 .type mn10300_local_icache_inv_page,@function 267 .type mn10300_local_icache_inv_range,@function 268 .type mn10300_local_icache_inv_range2,@function 269mn10300_local_icache_inv_page: 270 and ~(PAGE_SIZE-1),d0 271 mov PAGE_SIZE,d1 272mn10300_local_icache_inv_range2: 273 add d0,d1 274mn10300_local_icache_inv_range: 275 movm [d2,d3,a2],(sp) 276 277 mov CHCTR,a0 278 movhu (a0),d2 279 btst CHCTR_ICEN,d2 280 beq mn10300_local_icache_inv_range_reg_end 281 282 /* calculate alignsize 283 * 284 * alignsize = L1_CACHE_BYTES; 285 * for (i = (end - start - 1) / L1_CACHE_BYTES ; i > 0; i >>= 1) { 286 * alignsize <<= 1; 287 * } 288 * d2 = alignsize; 289 */ 290 mov L1_CACHE_BYTES,d2 291 sub d0,d1,d3 292 add -1,d3 293 lsr L1_CACHE_SHIFT,d3 294 beq 2f 2951: 296 add d2,d2 297 lsr 1,d3 298 bne 1b 2992: 300 301 /* a1 = end */ 302 mov d1,a1 303 304 LOCAL_CLI_SAVE(d3) 305 306 mov ICIVCR,a0 307 /* wait for busy bit of area invalidation */ 308 setlb 309 mov (a0),d1 310 btst ICIVCR_ICIVBSY,d1 311 lne 312 313 /* set mask 314 * 315 * mask = ~(alignsize-1); 316 * ICIVMR = mask; 317 */ 318 mov d2,d1 319 add -1,d1 320 not d1 321 mov d1,(ICIVMR) 322 /* a2 = mask & start */ 323 and d1,d0,a2 324 325icivloop: 326 /* area invalidate 327 * 328 * ICIVCR = (mask & start) | ICIVCR_ICI 329 */ 330 mov a2,d0 331 or ICIVCR_ICI,d0 332 mov d0,(a0) 333 334 /* wait for busy bit of area invalidation */ 335 setlb 336 mov (a0),d1 337 btst ICIVCR_ICIVBSY,d1 338 lne 339 340 /* check invalidating of end address 341 * 342 * a2 = a2 + alignsize 343 * if (a2 < end) { 344 * goto icivloop; 345 * } */ 346 add d2,a2 347 cmp a1,a2 348 bns icivloop 349 350 LOCAL_IRQ_RESTORE(d3) 351 352mn10300_local_icache_inv_range_reg_end: 353 ret [d2,d3,a2],12 354 .size mn10300_local_icache_inv_page,.-mn10300_local_icache_inv_page 355 .size mn10300_local_icache_inv_range,.-mn10300_local_icache_inv_range 356 .size mn10300_local_icache_inv_range2,.-mn10300_local_icache_inv_range2 357