1/* 2 * native hashtable management. 3 * 4 * SMP scalability work: 5 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13#undef DEBUG_LOW 14 15#include <linux/spinlock.h> 16#include <linux/bitops.h> 17#include <linux/of.h> 18#include <linux/threads.h> 19#include <linux/smp.h> 20 21#include <asm/machdep.h> 22#include <asm/mmu.h> 23#include <asm/mmu_context.h> 24#include <asm/pgtable.h> 25#include <asm/tlbflush.h> 26#include <asm/tlb.h> 27#include <asm/cputable.h> 28#include <asm/udbg.h> 29#include <asm/kexec.h> 30#include <asm/ppc-opcode.h> 31 32#include <misc/cxl.h> 33 34#ifdef DEBUG_LOW 35#define DBG_LOW(fmt...) udbg_printf(fmt) 36#else 37#define DBG_LOW(fmt...) 38#endif 39 40#ifdef __BIG_ENDIAN__ 41#define HPTE_LOCK_BIT 3 42#else 43#define HPTE_LOCK_BIT (56+3) 44#endif 45 46DEFINE_RAW_SPINLOCK(native_tlbie_lock); 47 48static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize) 49{ 50 unsigned long va; 51 unsigned int penc; 52 unsigned long sllp; 53 54 /* 55 * We need 14 to 65 bits of va for a tlibe of 4K page 56 * With vpn we ignore the lower VPN_SHIFT bits already. 57 * And top two bits are already ignored because we can 58 * only accomadate 76 bits in a 64 bit vpn with a VPN_SHIFT 59 * of 12. 60 */ 61 va = vpn << VPN_SHIFT; 62 /* 63 * clear top 16 bits of 64bit va, non SLS segment 64 * Older versions of the architecture (2.02 and earler) require the 65 * masking of the top 16 bits. 66 */ 67 va &= ~(0xffffULL << 48); 68 69 switch (psize) { 70 case MMU_PAGE_4K: 71 /* clear out bits after (52) [0....52.....63] */ 72 va &= ~((1ul << (64 - 52)) - 1); 73 va |= ssize << 8; 74 sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) | 75 ((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4); 76 va |= sllp << 5; 77 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2) 78 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) 79 : "memory"); 80 break; 81 default: 82 /* We need 14 to 14 + i bits of va */ 83 penc = mmu_psize_defs[psize].penc[apsize]; 84 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1); 85 va |= penc << 12; 86 va |= ssize << 8; 87 /* 88 * AVAL bits: 89 * We don't need all the bits, but rest of the bits 90 * must be ignored by the processor. 91 * vpn cover upto 65 bits of va. (0...65) and we need 92 * 58..64 bits of va. 93 */ 94 va |= (vpn & 0xfe); /* AVAL */ 95 va |= 1; /* L */ 96 asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2) 97 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) 98 : "memory"); 99 break; 100 } 101} 102 103static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize) 104{ 105 unsigned long va; 106 unsigned int penc; 107 unsigned long sllp; 108 109 /* VPN_SHIFT can be atmost 12 */ 110 va = vpn << VPN_SHIFT; 111 /* 112 * clear top 16 bits of 64 bit va, non SLS segment 113 * Older versions of the architecture (2.02 and earler) require the 114 * masking of the top 16 bits. 115 */ 116 va &= ~(0xffffULL << 48); 117 118 switch (psize) { 119 case MMU_PAGE_4K: 120 /* clear out bits after(52) [0....52.....63] */ 121 va &= ~((1ul << (64 - 52)) - 1); 122 va |= ssize << 8; 123 sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) | 124 ((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4); 125 va |= sllp << 5; 126 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)" 127 : : "r"(va) : "memory"); 128 break; 129 default: 130 /* We need 14 to 14 + i bits of va */ 131 penc = mmu_psize_defs[psize].penc[apsize]; 132 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1); 133 va |= penc << 12; 134 va |= ssize << 8; 135 /* 136 * AVAL bits: 137 * We don't need all the bits, but rest of the bits 138 * must be ignored by the processor. 139 * vpn cover upto 65 bits of va. (0...65) and we need 140 * 58..64 bits of va. 141 */ 142 va |= (vpn & 0xfe); 143 va |= 1; /* L */ 144 asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)" 145 : : "r"(va) : "memory"); 146 break; 147 } 148 149} 150 151static inline void tlbie(unsigned long vpn, int psize, int apsize, 152 int ssize, int local) 153{ 154 unsigned int use_local; 155 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); 156 157 use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use(); 158 159 if (use_local) 160 use_local = mmu_psize_defs[psize].tlbiel; 161 if (lock_tlbie && !use_local) 162 raw_spin_lock(&native_tlbie_lock); 163 asm volatile("ptesync": : :"memory"); 164 if (use_local) { 165 __tlbiel(vpn, psize, apsize, ssize); 166 asm volatile("ptesync": : :"memory"); 167 } else { 168 __tlbie(vpn, psize, apsize, ssize); 169 asm volatile("eieio; tlbsync; ptesync": : :"memory"); 170 } 171 if (lock_tlbie && !use_local) 172 raw_spin_unlock(&native_tlbie_lock); 173} 174 175static inline void native_lock_hpte(struct hash_pte *hptep) 176{ 177 unsigned long *word = (unsigned long *)&hptep->v; 178 179 while (1) { 180 if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word)) 181 break; 182 while(test_bit(HPTE_LOCK_BIT, word)) 183 cpu_relax(); 184 } 185} 186 187static inline void native_unlock_hpte(struct hash_pte *hptep) 188{ 189 unsigned long *word = (unsigned long *)&hptep->v; 190 191 clear_bit_unlock(HPTE_LOCK_BIT, word); 192} 193 194static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn, 195 unsigned long pa, unsigned long rflags, 196 unsigned long vflags, int psize, int apsize, int ssize) 197{ 198 struct hash_pte *hptep = htab_address + hpte_group; 199 unsigned long hpte_v, hpte_r; 200 int i; 201 202 if (!(vflags & HPTE_V_BOLTED)) { 203 DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx," 204 " rflags=%lx, vflags=%lx, psize=%d)\n", 205 hpte_group, vpn, pa, rflags, vflags, psize); 206 } 207 208 for (i = 0; i < HPTES_PER_GROUP; i++) { 209 if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) { 210 /* retry with lock held */ 211 native_lock_hpte(hptep); 212 if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) 213 break; 214 native_unlock_hpte(hptep); 215 } 216 217 hptep++; 218 } 219 220 if (i == HPTES_PER_GROUP) 221 return -1; 222 223 hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID; 224 hpte_r = hpte_encode_r(pa, psize, apsize) | rflags; 225 226 if (!(vflags & HPTE_V_BOLTED)) { 227 DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n", 228 i, hpte_v, hpte_r); 229 } 230 231 hptep->r = cpu_to_be64(hpte_r); 232 /* Guarantee the second dword is visible before the valid bit */ 233 eieio(); 234 /* 235 * Now set the first dword including the valid bit 236 * NOTE: this also unlocks the hpte 237 */ 238 hptep->v = cpu_to_be64(hpte_v); 239 240 __asm__ __volatile__ ("ptesync" : : : "memory"); 241 242 return i | (!!(vflags & HPTE_V_SECONDARY) << 3); 243} 244 245static long native_hpte_remove(unsigned long hpte_group) 246{ 247 struct hash_pte *hptep; 248 int i; 249 int slot_offset; 250 unsigned long hpte_v; 251 252 DBG_LOW(" remove(group=%lx)\n", hpte_group); 253 254 /* pick a random entry to start at */ 255 slot_offset = mftb() & 0x7; 256 257 for (i = 0; i < HPTES_PER_GROUP; i++) { 258 hptep = htab_address + hpte_group + slot_offset; 259 hpte_v = be64_to_cpu(hptep->v); 260 261 if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) { 262 /* retry with lock held */ 263 native_lock_hpte(hptep); 264 hpte_v = be64_to_cpu(hptep->v); 265 if ((hpte_v & HPTE_V_VALID) 266 && !(hpte_v & HPTE_V_BOLTED)) 267 break; 268 native_unlock_hpte(hptep); 269 } 270 271 slot_offset++; 272 slot_offset &= 0x7; 273 } 274 275 if (i == HPTES_PER_GROUP) 276 return -1; 277 278 /* Invalidate the hpte. NOTE: this also unlocks it */ 279 hptep->v = 0; 280 281 return i; 282} 283 284static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, 285 unsigned long vpn, int bpsize, 286 int apsize, int ssize, int local) 287{ 288 struct hash_pte *hptep = htab_address + slot; 289 unsigned long hpte_v, want_v; 290 int ret = 0; 291 292 want_v = hpte_encode_avpn(vpn, bpsize, ssize); 293 294 DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)", 295 vpn, want_v & HPTE_V_AVPN, slot, newpp); 296 297 native_lock_hpte(hptep); 298 299 hpte_v = be64_to_cpu(hptep->v); 300 /* 301 * We need to invalidate the TLB always because hpte_remove doesn't do 302 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less 303 * random entry from it. When we do that we don't invalidate the TLB 304 * (hpte_remove) because we assume the old translation is still 305 * technically "valid". 306 */ 307 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) { 308 DBG_LOW(" -> miss\n"); 309 ret = -1; 310 } else { 311 DBG_LOW(" -> hit\n"); 312 /* Update the HPTE */ 313 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & ~(HPTE_R_PP | HPTE_R_N)) | 314 (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C))); 315 } 316 native_unlock_hpte(hptep); 317 318 /* Ensure it is out of the tlb too. */ 319 tlbie(vpn, bpsize, apsize, ssize, local); 320 321 return ret; 322} 323 324static long native_hpte_find(unsigned long vpn, int psize, int ssize) 325{ 326 struct hash_pte *hptep; 327 unsigned long hash; 328 unsigned long i; 329 long slot; 330 unsigned long want_v, hpte_v; 331 332 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize); 333 want_v = hpte_encode_avpn(vpn, psize, ssize); 334 335 /* Bolted mappings are only ever in the primary group */ 336 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 337 for (i = 0; i < HPTES_PER_GROUP; i++) { 338 hptep = htab_address + slot; 339 hpte_v = be64_to_cpu(hptep->v); 340 341 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) 342 /* HPTE matches */ 343 return slot; 344 ++slot; 345 } 346 347 return -1; 348} 349 350/* 351 * Update the page protection bits. Intended to be used to create 352 * guard pages for kernel data structures on pages which are bolted 353 * in the HPT. Assumes pages being operated on will not be stolen. 354 * 355 * No need to lock here because we should be the only user. 356 */ 357static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, 358 int psize, int ssize) 359{ 360 unsigned long vpn; 361 unsigned long vsid; 362 long slot; 363 struct hash_pte *hptep; 364 365 vsid = get_kernel_vsid(ea, ssize); 366 vpn = hpt_vpn(ea, vsid, ssize); 367 368 slot = native_hpte_find(vpn, psize, ssize); 369 if (slot == -1) 370 panic("could not find page to bolt\n"); 371 hptep = htab_address + slot; 372 373 /* Update the HPTE */ 374 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & 375 ~(HPTE_R_PP | HPTE_R_N)) | 376 (newpp & (HPTE_R_PP | HPTE_R_N))); 377 /* 378 * Ensure it is out of the tlb too. Bolted entries base and 379 * actual page size will be same. 380 */ 381 tlbie(vpn, psize, psize, ssize, 0); 382} 383 384static void native_hpte_invalidate(unsigned long slot, unsigned long vpn, 385 int bpsize, int apsize, int ssize, int local) 386{ 387 struct hash_pte *hptep = htab_address + slot; 388 unsigned long hpte_v; 389 unsigned long want_v; 390 unsigned long flags; 391 392 local_irq_save(flags); 393 394 DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot); 395 396 want_v = hpte_encode_avpn(vpn, bpsize, ssize); 397 native_lock_hpte(hptep); 398 hpte_v = be64_to_cpu(hptep->v); 399 400 /* 401 * We need to invalidate the TLB always because hpte_remove doesn't do 402 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less 403 * random entry from it. When we do that we don't invalidate the TLB 404 * (hpte_remove) because we assume the old translation is still 405 * technically "valid". 406 */ 407 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) 408 native_unlock_hpte(hptep); 409 else 410 /* Invalidate the hpte. NOTE: this also unlocks it */ 411 hptep->v = 0; 412 413 /* Invalidate the TLB */ 414 tlbie(vpn, bpsize, apsize, ssize, local); 415 416 local_irq_restore(flags); 417} 418 419static void native_hugepage_invalidate(unsigned long vsid, 420 unsigned long addr, 421 unsigned char *hpte_slot_array, 422 int psize, int ssize) 423{ 424 int i; 425 struct hash_pte *hptep; 426 int actual_psize = MMU_PAGE_16M; 427 unsigned int max_hpte_count, valid; 428 unsigned long flags, s_addr = addr; 429 unsigned long hpte_v, want_v, shift; 430 unsigned long hidx, vpn = 0, hash, slot; 431 432 shift = mmu_psize_defs[psize].shift; 433 max_hpte_count = 1U << (PMD_SHIFT - shift); 434 435 local_irq_save(flags); 436 for (i = 0; i < max_hpte_count; i++) { 437 valid = hpte_valid(hpte_slot_array, i); 438 if (!valid) 439 continue; 440 hidx = hpte_hash_index(hpte_slot_array, i); 441 442 /* get the vpn */ 443 addr = s_addr + (i * (1ul << shift)); 444 vpn = hpt_vpn(addr, vsid, ssize); 445 hash = hpt_hash(vpn, shift, ssize); 446 if (hidx & _PTEIDX_SECONDARY) 447 hash = ~hash; 448 449 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 450 slot += hidx & _PTEIDX_GROUP_IX; 451 452 hptep = htab_address + slot; 453 want_v = hpte_encode_avpn(vpn, psize, ssize); 454 native_lock_hpte(hptep); 455 hpte_v = be64_to_cpu(hptep->v); 456 457 /* Even if we miss, we need to invalidate the TLB */ 458 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) 459 native_unlock_hpte(hptep); 460 else 461 /* Invalidate the hpte. NOTE: this also unlocks it */ 462 hptep->v = 0; 463 /* 464 * We need to do tlb invalidate for all the address, tlbie 465 * instruction compares entry_VA in tlb with the VA specified 466 * here 467 */ 468 tlbie(vpn, psize, actual_psize, ssize, 0); 469 } 470 local_irq_restore(flags); 471} 472 473static inline int __hpte_actual_psize(unsigned int lp, int psize) 474{ 475 int i, shift; 476 unsigned int mask; 477 478 /* start from 1 ignoring MMU_PAGE_4K */ 479 for (i = 1; i < MMU_PAGE_COUNT; i++) { 480 481 /* invalid penc */ 482 if (mmu_psize_defs[psize].penc[i] == -1) 483 continue; 484 /* 485 * encoding bits per actual page size 486 * PTE LP actual page size 487 * rrrr rrrz >=8KB 488 * rrrr rrzz >=16KB 489 * rrrr rzzz >=32KB 490 * rrrr zzzz >=64KB 491 * ....... 492 */ 493 shift = mmu_psize_defs[i].shift - LP_SHIFT; 494 if (shift > LP_BITS) 495 shift = LP_BITS; 496 mask = (1 << shift) - 1; 497 if ((lp & mask) == mmu_psize_defs[psize].penc[i]) 498 return i; 499 } 500 return -1; 501} 502 503static void hpte_decode(struct hash_pte *hpte, unsigned long slot, 504 int *psize, int *apsize, int *ssize, unsigned long *vpn) 505{ 506 unsigned long avpn, pteg, vpi; 507 unsigned long hpte_v = be64_to_cpu(hpte->v); 508 unsigned long hpte_r = be64_to_cpu(hpte->r); 509 unsigned long vsid, seg_off; 510 int size, a_size, shift; 511 /* Look at the 8 bit LP value */ 512 unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1); 513 514 if (!(hpte_v & HPTE_V_LARGE)) { 515 size = MMU_PAGE_4K; 516 a_size = MMU_PAGE_4K; 517 } else { 518 for (size = 0; size < MMU_PAGE_COUNT; size++) { 519 520 /* valid entries have a shift value */ 521 if (!mmu_psize_defs[size].shift) 522 continue; 523 524 a_size = __hpte_actual_psize(lp, size); 525 if (a_size != -1) 526 break; 527 } 528 } 529 /* This works for all page sizes, and for 256M and 1T segments */ 530 *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT; 531 shift = mmu_psize_defs[size].shift; 532 533 avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm); 534 pteg = slot / HPTES_PER_GROUP; 535 if (hpte_v & HPTE_V_SECONDARY) 536 pteg = ~pteg; 537 538 switch (*ssize) { 539 case MMU_SEGSIZE_256M: 540 /* We only have 28 - 23 bits of seg_off in avpn */ 541 seg_off = (avpn & 0x1f) << 23; 542 vsid = avpn >> 5; 543 /* We can find more bits from the pteg value */ 544 if (shift < 23) { 545 vpi = (vsid ^ pteg) & htab_hash_mask; 546 seg_off |= vpi << shift; 547 } 548 *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT; 549 break; 550 case MMU_SEGSIZE_1T: 551 /* We only have 40 - 23 bits of seg_off in avpn */ 552 seg_off = (avpn & 0x1ffff) << 23; 553 vsid = avpn >> 17; 554 if (shift < 23) { 555 vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask; 556 seg_off |= vpi << shift; 557 } 558 *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT; 559 break; 560 default: 561 *vpn = size = 0; 562 } 563 *psize = size; 564 *apsize = a_size; 565} 566 567/* 568 * clear all mappings on kexec. All cpus are in real mode (or they will 569 * be when they isi), and we are the only one left. We rely on our kernel 570 * mapping being 0xC0's and the hardware ignoring those two real bits. 571 * 572 * TODO: add batching support when enabled. remember, no dynamic memory here, 573 * athough there is the control page available... 574 */ 575static void native_hpte_clear(void) 576{ 577 unsigned long vpn = 0; 578 unsigned long slot, slots, flags; 579 struct hash_pte *hptep = htab_address; 580 unsigned long hpte_v; 581 unsigned long pteg_count; 582 int psize, apsize, ssize; 583 584 pteg_count = htab_hash_mask + 1; 585 586 local_irq_save(flags); 587 588 /* we take the tlbie lock and hold it. Some hardware will 589 * deadlock if we try to tlbie from two processors at once. 590 */ 591 raw_spin_lock(&native_tlbie_lock); 592 593 slots = pteg_count * HPTES_PER_GROUP; 594 595 for (slot = 0; slot < slots; slot++, hptep++) { 596 /* 597 * we could lock the pte here, but we are the only cpu 598 * running, right? and for crash dump, we probably 599 * don't want to wait for a maybe bad cpu. 600 */ 601 hpte_v = be64_to_cpu(hptep->v); 602 603 /* 604 * Call __tlbie() here rather than tlbie() since we 605 * already hold the native_tlbie_lock. 606 */ 607 if (hpte_v & HPTE_V_VALID) { 608 hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn); 609 hptep->v = 0; 610 __tlbie(vpn, psize, apsize, ssize); 611 } 612 } 613 614 asm volatile("eieio; tlbsync; ptesync":::"memory"); 615 raw_spin_unlock(&native_tlbie_lock); 616 local_irq_restore(flags); 617} 618 619/* 620 * Batched hash table flush, we batch the tlbie's to avoid taking/releasing 621 * the lock all the time 622 */ 623static void native_flush_hash_range(unsigned long number, int local) 624{ 625 unsigned long vpn; 626 unsigned long hash, index, hidx, shift, slot; 627 struct hash_pte *hptep; 628 unsigned long hpte_v; 629 unsigned long want_v; 630 unsigned long flags; 631 real_pte_t pte; 632 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 633 unsigned long psize = batch->psize; 634 int ssize = batch->ssize; 635 int i; 636 637 local_irq_save(flags); 638 639 for (i = 0; i < number; i++) { 640 vpn = batch->vpn[i]; 641 pte = batch->pte[i]; 642 643 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { 644 hash = hpt_hash(vpn, shift, ssize); 645 hidx = __rpte_to_hidx(pte, index); 646 if (hidx & _PTEIDX_SECONDARY) 647 hash = ~hash; 648 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 649 slot += hidx & _PTEIDX_GROUP_IX; 650 hptep = htab_address + slot; 651 want_v = hpte_encode_avpn(vpn, psize, ssize); 652 native_lock_hpte(hptep); 653 hpte_v = be64_to_cpu(hptep->v); 654 if (!HPTE_V_COMPARE(hpte_v, want_v) || 655 !(hpte_v & HPTE_V_VALID)) 656 native_unlock_hpte(hptep); 657 else 658 hptep->v = 0; 659 } pte_iterate_hashed_end(); 660 } 661 662 if (mmu_has_feature(MMU_FTR_TLBIEL) && 663 mmu_psize_defs[psize].tlbiel && local) { 664 asm volatile("ptesync":::"memory"); 665 for (i = 0; i < number; i++) { 666 vpn = batch->vpn[i]; 667 pte = batch->pte[i]; 668 669 pte_iterate_hashed_subpages(pte, psize, 670 vpn, index, shift) { 671 __tlbiel(vpn, psize, psize, ssize); 672 } pte_iterate_hashed_end(); 673 } 674 asm volatile("ptesync":::"memory"); 675 } else { 676 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); 677 678 if (lock_tlbie) 679 raw_spin_lock(&native_tlbie_lock); 680 681 asm volatile("ptesync":::"memory"); 682 for (i = 0; i < number; i++) { 683 vpn = batch->vpn[i]; 684 pte = batch->pte[i]; 685 686 pte_iterate_hashed_subpages(pte, psize, 687 vpn, index, shift) { 688 __tlbie(vpn, psize, psize, ssize); 689 } pte_iterate_hashed_end(); 690 } 691 asm volatile("eieio; tlbsync; ptesync":::"memory"); 692 693 if (lock_tlbie) 694 raw_spin_unlock(&native_tlbie_lock); 695 } 696 697 local_irq_restore(flags); 698} 699 700void __init hpte_init_native(void) 701{ 702 ppc_md.hpte_invalidate = native_hpte_invalidate; 703 ppc_md.hpte_updatepp = native_hpte_updatepp; 704 ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp; 705 ppc_md.hpte_insert = native_hpte_insert; 706 ppc_md.hpte_remove = native_hpte_remove; 707 ppc_md.hpte_clear_all = native_hpte_clear; 708 ppc_md.flush_hash_range = native_flush_hash_range; 709 ppc_md.hugepage_invalidate = native_hugepage_invalidate; 710} 711