head_fsl_booke.S revision 3c5df5c26ed17828760945d59653a2e22e3fb63f
1/* 2 * Kernel execution entry point code. 3 * 4 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org> 5 * Initial PowerPC version. 6 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu> 7 * Rewritten for PReP 8 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> 9 * Low-level exception handers, MMU support, and rewrite. 10 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net> 11 * PowerPC 8xx modifications. 12 * Copyright (c) 1998-1999 TiVo, Inc. 13 * PowerPC 403GCX modifications. 14 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> 15 * PowerPC 403GCX/405GP modifications. 16 * Copyright 2000 MontaVista Software Inc. 17 * PPC405 modifications 18 * PowerPC 403GCX/405GP modifications. 19 * Author: MontaVista Software, Inc. 20 * frank_rowand@mvista.com or source@mvista.com 21 * debbie_chu@mvista.com 22 * Copyright 2002-2004 MontaVista Software, Inc. 23 * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org> 24 * Copyright 2004 Freescale Semiconductor, Inc 25 * PowerPC e500 modifications, Kumar Gala <galak@kernel.crashing.org> 26 * 27 * This program is free software; you can redistribute it and/or modify it 28 * under the terms of the GNU General Public License as published by the 29 * Free Software Foundation; either version 2 of the License, or (at your 30 * option) any later version. 31 */ 32 33#include <linux/threads.h> 34#include <asm/processor.h> 35#include <asm/page.h> 36#include <asm/mmu.h> 37#include <asm/pgtable.h> 38#include <asm/cputable.h> 39#include <asm/thread_info.h> 40#include <asm/ppc_asm.h> 41#include <asm/asm-offsets.h> 42#include "head_booke.h" 43 44/* As with the other PowerPC ports, it is expected that when code 45 * execution begins here, the following registers contain valid, yet 46 * optional, information: 47 * 48 * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.) 49 * r4 - Starting address of the init RAM disk 50 * r5 - Ending address of the init RAM disk 51 * r6 - Start of kernel command line string (e.g. "mem=128") 52 * r7 - End of kernel command line string 53 * 54 */ 55 .section .text.head, "ax" 56_ENTRY(_stext); 57_ENTRY(_start); 58 /* 59 * Reserve a word at a fixed location to store the address 60 * of abatron_pteptrs 61 */ 62 nop 63/* 64 * Save parameters we are passed 65 */ 66 mr r31,r3 67 mr r30,r4 68 mr r29,r5 69 mr r28,r6 70 mr r27,r7 71 li r24,0 /* CPU number */ 72 73/* We try to not make any assumptions about how the boot loader 74 * setup or used the TLBs. We invalidate all mappings from the 75 * boot loader and load a single entry in TLB1[0] to map the 76 * first 16M of kernel memory. Any boot info passed from the 77 * bootloader needs to live in this first 16M. 78 * 79 * Requirement on bootloader: 80 * - The page we're executing in needs to reside in TLB1 and 81 * have IPROT=1. If not an invalidate broadcast could 82 * evict the entry we're currently executing in. 83 * 84 * r3 = Index of TLB1 were executing in 85 * r4 = Current MSR[IS] 86 * r5 = Index of TLB1 temp mapping 87 * 88 * Later in mapin_ram we will correctly map lowmem, and resize TLB1[0] 89 * if needed 90 */ 91 92/* 1. Find the index of the entry we're executing in */ 93 bl invstr /* Find our address */ 94invstr: mflr r6 /* Make it accessible */ 95 mfmsr r7 96 rlwinm r4,r7,27,31,31 /* extract MSR[IS] */ 97 mfspr r7, SPRN_PID0 98 slwi r7,r7,16 99 or r7,r7,r4 100 mtspr SPRN_MAS6,r7 101 tlbsx 0,r6 /* search MSR[IS], SPID=PID0 */ 102#ifndef CONFIG_E200 103 mfspr r7,SPRN_MAS1 104 andis. r7,r7,MAS1_VALID@h 105 bne match_TLB 106 mfspr r7,SPRN_PID1 107 slwi r7,r7,16 108 or r7,r7,r4 109 mtspr SPRN_MAS6,r7 110 tlbsx 0,r6 /* search MSR[IS], SPID=PID1 */ 111 mfspr r7,SPRN_MAS1 112 andis. r7,r7,MAS1_VALID@h 113 bne match_TLB 114 mfspr r7, SPRN_PID2 115 slwi r7,r7,16 116 or r7,r7,r4 117 mtspr SPRN_MAS6,r7 118 tlbsx 0,r6 /* Fall through, we had to match */ 119#endif 120match_TLB: 121 mfspr r7,SPRN_MAS0 122 rlwinm r3,r7,16,20,31 /* Extract MAS0(Entry) */ 123 124 mfspr r7,SPRN_MAS1 /* Insure IPROT set */ 125 oris r7,r7,MAS1_IPROT@h 126 mtspr SPRN_MAS1,r7 127 tlbwe 128 129/* 2. Invalidate all entries except the entry we're executing in */ 130 mfspr r9,SPRN_TLB1CFG 131 andi. r9,r9,0xfff 132 li r6,0 /* Set Entry counter to 0 */ 1331: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ 134 rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */ 135 mtspr SPRN_MAS0,r7 136 tlbre 137 mfspr r7,SPRN_MAS1 138 rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */ 139 cmpw r3,r6 140 beq skpinv /* Dont update the current execution TLB */ 141 mtspr SPRN_MAS1,r7 142 tlbwe 143 isync 144skpinv: addi r6,r6,1 /* Increment */ 145 cmpw r6,r9 /* Are we done? */ 146 bne 1b /* If not, repeat */ 147 148 /* Invalidate TLB0 */ 149 li r6,0x04 150 tlbivax 0,r6 151#ifdef CONFIG_SMP 152 tlbsync 153#endif 154 /* Invalidate TLB1 */ 155 li r6,0x0c 156 tlbivax 0,r6 157#ifdef CONFIG_SMP 158 tlbsync 159#endif 160 msync 161 162/* 3. Setup a temp mapping and jump to it */ 163 andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */ 164 addi r5, r5, 0x1 165 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ 166 rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ 167 mtspr SPRN_MAS0,r7 168 tlbre 169 170 /* Just modify the entry ID and EPN for the temp mapping */ 171 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ 172 rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */ 173 mtspr SPRN_MAS0,r7 174 xori r6,r4,1 /* Setup TMP mapping in the other Address space */ 175 slwi r6,r6,12 176 oris r6,r6,(MAS1_VALID|MAS1_IPROT)@h 177 ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_4K))@l 178 mtspr SPRN_MAS1,r6 179 mfspr r6,SPRN_MAS2 180 li r7,0 /* temp EPN = 0 */ 181 rlwimi r7,r6,0,20,31 182 mtspr SPRN_MAS2,r7 183 tlbwe 184 185 xori r6,r4,1 186 slwi r6,r6,5 /* setup new context with other address space */ 187 bl 1f /* Find our address */ 1881: mflr r9 189 rlwimi r7,r9,0,20,31 190 addi r7,r7,24 191 mtspr SPRN_SRR0,r7 192 mtspr SPRN_SRR1,r6 193 rfi 194 195/* 4. Clear out PIDs & Search info */ 196 li r6,0 197 mtspr SPRN_PID0,r6 198#ifndef CONFIG_E200 199 mtspr SPRN_PID1,r6 200 mtspr SPRN_PID2,r6 201#endif 202 mtspr SPRN_MAS6,r6 203 204/* 5. Invalidate mapping we started in */ 205 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ 206 rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ 207 mtspr SPRN_MAS0,r7 208 tlbre 209 mfspr r6,SPRN_MAS1 210 rlwinm r6,r6,0,2,0 /* clear IPROT */ 211 mtspr SPRN_MAS1,r6 212 tlbwe 213 /* Invalidate TLB1 */ 214 li r9,0x0c 215 tlbivax 0,r9 216#ifdef CONFIG_SMP 217 tlbsync 218#endif 219 msync 220 221/* 6. Setup KERNELBASE mapping in TLB1[0] */ 222 lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */ 223 mtspr SPRN_MAS0,r6 224 lis r6,(MAS1_VALID|MAS1_IPROT)@h 225 ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_16M))@l 226 mtspr SPRN_MAS1,r6 227 li r7,0 228 lis r6,KERNELBASE@h 229 ori r6,r6,KERNELBASE@l 230 rlwimi r6,r7,0,20,31 231 mtspr SPRN_MAS2,r6 232 li r7,(MAS3_SX|MAS3_SW|MAS3_SR) 233 mtspr SPRN_MAS3,r7 234 tlbwe 235 236/* 7. Jump to KERNELBASE mapping */ 237 lis r7,MSR_KERNEL@h 238 ori r7,r7,MSR_KERNEL@l 239 bl 1f /* Find our address */ 2401: mflr r9 241 rlwimi r6,r9,0,20,31 242 addi r6,r6,24 243 mtspr SPRN_SRR0,r6 244 mtspr SPRN_SRR1,r7 245 rfi /* start execution out of TLB1[0] entry */ 246 247/* 8. Clear out the temp mapping */ 248 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ 249 rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */ 250 mtspr SPRN_MAS0,r7 251 tlbre 252 mfspr r8,SPRN_MAS1 253 rlwinm r8,r8,0,2,0 /* clear IPROT */ 254 mtspr SPRN_MAS1,r8 255 tlbwe 256 /* Invalidate TLB1 */ 257 li r9,0x0c 258 tlbivax 0,r9 259#ifdef CONFIG_SMP 260 tlbsync 261#endif 262 msync 263 264 /* Establish the interrupt vector offsets */ 265 SET_IVOR(0, CriticalInput); 266 SET_IVOR(1, MachineCheck); 267 SET_IVOR(2, DataStorage); 268 SET_IVOR(3, InstructionStorage); 269 SET_IVOR(4, ExternalInput); 270 SET_IVOR(5, Alignment); 271 SET_IVOR(6, Program); 272 SET_IVOR(7, FloatingPointUnavailable); 273 SET_IVOR(8, SystemCall); 274 SET_IVOR(9, AuxillaryProcessorUnavailable); 275 SET_IVOR(10, Decrementer); 276 SET_IVOR(11, FixedIntervalTimer); 277 SET_IVOR(12, WatchdogTimer); 278 SET_IVOR(13, DataTLBError); 279 SET_IVOR(14, InstructionTLBError); 280 SET_IVOR(15, Debug); 281 SET_IVOR(32, SPEUnavailable); 282 SET_IVOR(33, SPEFloatingPointData); 283 SET_IVOR(34, SPEFloatingPointRound); 284#ifndef CONFIG_E200 285 SET_IVOR(35, PerformanceMonitor); 286#endif 287 288 /* Establish the interrupt vector base */ 289 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ 290 mtspr SPRN_IVPR,r4 291 292 /* Setup the defaults for TLB entries */ 293 li r2,(MAS4_TSIZED(BOOKE_PAGESZ_4K))@l 294#ifdef CONFIG_E200 295 oris r2,r2,MAS4_TLBSELD(1)@h 296#endif 297 mtspr SPRN_MAS4, r2 298 299#if 0 300 /* Enable DOZE */ 301 mfspr r2,SPRN_HID0 302 oris r2,r2,HID0_DOZE@h 303 mtspr SPRN_HID0, r2 304#endif 305#ifdef CONFIG_E200 306 /* enable dedicated debug exception handling resources (Debug APU) */ 307 mfspr r2,SPRN_HID0 308 ori r2,r2,HID0_DAPUEN@l 309 mtspr SPRN_HID0,r2 310#endif 311 312#if !defined(CONFIG_BDI_SWITCH) 313 /* 314 * The Abatron BDI JTAG debugger does not tolerate others 315 * mucking with the debug registers. 316 */ 317 lis r2,DBCR0_IDM@h 318 mtspr SPRN_DBCR0,r2 319 isync 320 /* clear any residual debug events */ 321 li r2,-1 322 mtspr SPRN_DBSR,r2 323#endif 324 325 /* 326 * This is where the main kernel code starts. 327 */ 328 329 /* ptr to current */ 330 lis r2,init_task@h 331 ori r2,r2,init_task@l 332 333 /* ptr to current thread */ 334 addi r4,r2,THREAD /* init task's THREAD */ 335 mtspr SPRN_SPRG3,r4 336 337 /* stack */ 338 lis r1,init_thread_union@h 339 ori r1,r1,init_thread_union@l 340 li r0,0 341 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) 342 343 bl early_init 344 345 mfspr r3,SPRN_TLB1CFG 346 andi. r3,r3,0xfff 347 lis r4,num_tlbcam_entries@ha 348 stw r3,num_tlbcam_entries@l(r4) 349/* 350 * Decide what sort of machine this is and initialize the MMU. 351 */ 352 mr r3,r31 353 mr r4,r30 354 mr r5,r29 355 mr r6,r28 356 mr r7,r27 357 bl machine_init 358 bl MMU_init 359 360 /* Setup PTE pointers for the Abatron bdiGDB */ 361 lis r6, swapper_pg_dir@h 362 ori r6, r6, swapper_pg_dir@l 363 lis r5, abatron_pteptrs@h 364 ori r5, r5, abatron_pteptrs@l 365 lis r4, KERNELBASE@h 366 ori r4, r4, KERNELBASE@l 367 stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */ 368 stw r6, 0(r5) 369 370 /* Let's move on */ 371 lis r4,start_kernel@h 372 ori r4,r4,start_kernel@l 373 lis r3,MSR_KERNEL@h 374 ori r3,r3,MSR_KERNEL@l 375 mtspr SPRN_SRR0,r4 376 mtspr SPRN_SRR1,r3 377 rfi /* change context and jump to start_kernel */ 378 379/* Macros to hide the PTE size differences 380 * 381 * FIND_PTE -- walks the page tables given EA & pgdir pointer 382 * r10 -- EA of fault 383 * r11 -- PGDIR pointer 384 * r12 -- free 385 * label 2: is the bailout case 386 * 387 * if we find the pte (fall through): 388 * r11 is low pte word 389 * r12 is pointer to the pte 390 */ 391#ifdef CONFIG_PTE_64BIT 392#define PTE_FLAGS_OFFSET 4 393#define FIND_PTE \ 394 rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \ 395 lwzx r11, r12, r11; /* Get pgd/pmd entry */ \ 396 rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \ 397 beq 2f; /* Bail if no table */ \ 398 rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \ 399 lwz r11, 4(r12); /* Get pte entry */ 400#else 401#define PTE_FLAGS_OFFSET 0 402#define FIND_PTE \ 403 rlwimi r11, r10, 12, 20, 29; /* Create L1 (pgdir/pmd) address */ \ 404 lwz r11, 0(r11); /* Get L1 entry */ \ 405 rlwinm. r12, r11, 0, 0, 19; /* Extract L2 (pte) base address */ \ 406 beq 2f; /* Bail if no table */ \ 407 rlwimi r12, r10, 22, 20, 29; /* Compute PTE address */ \ 408 lwz r11, 0(r12); /* Get Linux PTE */ 409#endif 410 411/* 412 * Interrupt vector entry code 413 * 414 * The Book E MMUs are always on so we don't need to handle 415 * interrupts in real mode as with previous PPC processors. In 416 * this case we handle interrupts in the kernel virtual address 417 * space. 418 * 419 * Interrupt vectors are dynamically placed relative to the 420 * interrupt prefix as determined by the address of interrupt_base. 421 * The interrupt vectors offsets are programmed using the labels 422 * for each interrupt vector entry. 423 * 424 * Interrupt vectors must be aligned on a 16 byte boundary. 425 * We align on a 32 byte cache line boundary for good measure. 426 */ 427 428interrupt_base: 429 /* Critical Input Interrupt */ 430 CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception) 431 432 /* Machine Check Interrupt */ 433#ifdef CONFIG_E200 434 /* no RFMCI, MCSRRs on E200 */ 435 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception) 436#else 437 MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception) 438#endif 439 440 /* Data Storage Interrupt */ 441 START_EXCEPTION(DataStorage) 442 mtspr SPRN_SPRG0, r10 /* Save some working registers */ 443 mtspr SPRN_SPRG1, r11 444 mtspr SPRN_SPRG4W, r12 445 mtspr SPRN_SPRG5W, r13 446 mfcr r11 447 mtspr SPRN_SPRG7W, r11 448 449 /* 450 * Check if it was a store fault, if not then bail 451 * because a user tried to access a kernel or 452 * read-protected page. Otherwise, get the 453 * offending address and handle it. 454 */ 455 mfspr r10, SPRN_ESR 456 andis. r10, r10, ESR_ST@h 457 beq 2f 458 459 mfspr r10, SPRN_DEAR /* Get faulting address */ 460 461 /* If we are faulting a kernel address, we have to use the 462 * kernel page tables. 463 */ 464 lis r11, TASK_SIZE@h 465 ori r11, r11, TASK_SIZE@l 466 cmplw 0, r10, r11 467 bge 2f 468 469 /* Get the PGD for the current thread */ 4703: 471 mfspr r11,SPRN_SPRG3 472 lwz r11,PGDIR(r11) 4734: 474 FIND_PTE 475 476 /* Are _PAGE_USER & _PAGE_RW set & _PAGE_HWWRITE not? */ 477 andi. r13, r11, _PAGE_RW|_PAGE_USER|_PAGE_HWWRITE 478 cmpwi 0, r13, _PAGE_RW|_PAGE_USER 479 bne 2f /* Bail if not */ 480 481 /* Update 'changed'. */ 482 ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE 483 stw r11, PTE_FLAGS_OFFSET(r12) /* Update Linux page table */ 484 485 /* MAS2 not updated as the entry does exist in the tlb, this 486 fault taken to detect state transition (eg: COW -> DIRTY) 487 */ 488 andi. r11, r11, _PAGE_HWEXEC 489 rlwimi r11, r11, 31, 27, 27 /* SX <- _PAGE_HWEXEC */ 490 ori r11, r11, (MAS3_UW|MAS3_SW|MAS3_UR|MAS3_SR)@l /* set static perms */ 491 492 /* update search PID in MAS6, AS = 0 */ 493 mfspr r12, SPRN_PID0 494 slwi r12, r12, 16 495 mtspr SPRN_MAS6, r12 496 497 /* find the TLB index that caused the fault. It has to be here. */ 498 tlbsx 0, r10 499 500 /* only update the perm bits, assume the RPN is fine */ 501 mfspr r12, SPRN_MAS3 502 rlwimi r12, r11, 0, 20, 31 503 mtspr SPRN_MAS3,r12 504 tlbwe 505 506 /* Done...restore registers and get out of here. */ 507 mfspr r11, SPRN_SPRG7R 508 mtcr r11 509 mfspr r13, SPRN_SPRG5R 510 mfspr r12, SPRN_SPRG4R 511 mfspr r11, SPRN_SPRG1 512 mfspr r10, SPRN_SPRG0 513 rfi /* Force context change */ 514 5152: 516 /* 517 * The bailout. Restore registers to pre-exception conditions 518 * and call the heavyweights to help us out. 519 */ 520 mfspr r11, SPRN_SPRG7R 521 mtcr r11 522 mfspr r13, SPRN_SPRG5R 523 mfspr r12, SPRN_SPRG4R 524 mfspr r11, SPRN_SPRG1 525 mfspr r10, SPRN_SPRG0 526 b data_access 527 528 /* Instruction Storage Interrupt */ 529 INSTRUCTION_STORAGE_EXCEPTION 530 531 /* External Input Interrupt */ 532 EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE) 533 534 /* Alignment Interrupt */ 535 ALIGNMENT_EXCEPTION 536 537 /* Program Interrupt */ 538 PROGRAM_EXCEPTION 539 540 /* Floating Point Unavailable Interrupt */ 541#ifdef CONFIG_PPC_FPU 542 FP_UNAVAILABLE_EXCEPTION 543#else 544#ifdef CONFIG_E200 545 /* E200 treats 'normal' floating point instructions as FP Unavail exception */ 546 EXCEPTION(0x0800, FloatingPointUnavailable, program_check_exception, EXC_XFER_EE) 547#else 548 EXCEPTION(0x0800, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE) 549#endif 550#endif 551 552 /* System Call Interrupt */ 553 START_EXCEPTION(SystemCall) 554 NORMAL_EXCEPTION_PROLOG 555 EXC_XFER_EE_LITE(0x0c00, DoSyscall) 556 557 /* Auxillary Processor Unavailable Interrupt */ 558 EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) 559 560 /* Decrementer Interrupt */ 561 DECREMENTER_EXCEPTION 562 563 /* Fixed Internal Timer Interrupt */ 564 /* TODO: Add FIT support */ 565 EXCEPTION(0x3100, FixedIntervalTimer, unknown_exception, EXC_XFER_EE) 566 567 /* Watchdog Timer Interrupt */ 568#ifdef CONFIG_BOOKE_WDT 569 CRITICAL_EXCEPTION(0x3200, WatchdogTimer, WatchdogException) 570#else 571 CRITICAL_EXCEPTION(0x3200, WatchdogTimer, unknown_exception) 572#endif 573 574 /* Data TLB Error Interrupt */ 575 START_EXCEPTION(DataTLBError) 576 mtspr SPRN_SPRG0, r10 /* Save some working registers */ 577 mtspr SPRN_SPRG1, r11 578 mtspr SPRN_SPRG4W, r12 579 mtspr SPRN_SPRG5W, r13 580 mfcr r11 581 mtspr SPRN_SPRG7W, r11 582 mfspr r10, SPRN_DEAR /* Get faulting address */ 583 584 /* If we are faulting a kernel address, we have to use the 585 * kernel page tables. 586 */ 587 lis r11, TASK_SIZE@h 588 ori r11, r11, TASK_SIZE@l 589 cmplw 5, r10, r11 590 blt 5, 3f 591 lis r11, swapper_pg_dir@h 592 ori r11, r11, swapper_pg_dir@l 593 594 mfspr r12,SPRN_MAS1 /* Set TID to 0 */ 595 rlwinm r12,r12,0,16,1 596 mtspr SPRN_MAS1,r12 597 598 b 4f 599 600 /* Get the PGD for the current thread */ 6013: 602 mfspr r11,SPRN_SPRG3 603 lwz r11,PGDIR(r11) 604 6054: 606 FIND_PTE 607 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ 608 beq 2f /* Bail if not present */ 609 610#ifdef CONFIG_PTE_64BIT 611 lwz r13, 0(r12) 612#endif 613 ori r11, r11, _PAGE_ACCESSED 614 stw r11, PTE_FLAGS_OFFSET(r12) 615 616 /* Jump to common tlb load */ 617 b finish_tlb_load 6182: 619 /* The bailout. Restore registers to pre-exception conditions 620 * and call the heavyweights to help us out. 621 */ 622 mfspr r11, SPRN_SPRG7R 623 mtcr r11 624 mfspr r13, SPRN_SPRG5R 625 mfspr r12, SPRN_SPRG4R 626 mfspr r11, SPRN_SPRG1 627 mfspr r10, SPRN_SPRG0 628 b data_access 629 630 /* Instruction TLB Error Interrupt */ 631 /* 632 * Nearly the same as above, except we get our 633 * information from different registers and bailout 634 * to a different point. 635 */ 636 START_EXCEPTION(InstructionTLBError) 637 mtspr SPRN_SPRG0, r10 /* Save some working registers */ 638 mtspr SPRN_SPRG1, r11 639 mtspr SPRN_SPRG4W, r12 640 mtspr SPRN_SPRG5W, r13 641 mfcr r11 642 mtspr SPRN_SPRG7W, r11 643 mfspr r10, SPRN_SRR0 /* Get faulting address */ 644 645 /* If we are faulting a kernel address, we have to use the 646 * kernel page tables. 647 */ 648 lis r11, TASK_SIZE@h 649 ori r11, r11, TASK_SIZE@l 650 cmplw 5, r10, r11 651 blt 5, 3f 652 lis r11, swapper_pg_dir@h 653 ori r11, r11, swapper_pg_dir@l 654 655 mfspr r12,SPRN_MAS1 /* Set TID to 0 */ 656 rlwinm r12,r12,0,16,1 657 mtspr SPRN_MAS1,r12 658 659 b 4f 660 661 /* Get the PGD for the current thread */ 6623: 663 mfspr r11,SPRN_SPRG3 664 lwz r11,PGDIR(r11) 665 6664: 667 FIND_PTE 668 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ 669 beq 2f /* Bail if not present */ 670 671#ifdef CONFIG_PTE_64BIT 672 lwz r13, 0(r12) 673#endif 674 ori r11, r11, _PAGE_ACCESSED 675 stw r11, PTE_FLAGS_OFFSET(r12) 676 677 /* Jump to common TLB load point */ 678 b finish_tlb_load 679 6802: 681 /* The bailout. Restore registers to pre-exception conditions 682 * and call the heavyweights to help us out. 683 */ 684 mfspr r11, SPRN_SPRG7R 685 mtcr r11 686 mfspr r13, SPRN_SPRG5R 687 mfspr r12, SPRN_SPRG4R 688 mfspr r11, SPRN_SPRG1 689 mfspr r10, SPRN_SPRG0 690 b InstructionStorage 691 692#ifdef CONFIG_SPE 693 /* SPE Unavailable */ 694 START_EXCEPTION(SPEUnavailable) 695 NORMAL_EXCEPTION_PROLOG 696 bne load_up_spe 697 addi r3,r1,STACK_FRAME_OVERHEAD 698 EXC_XFER_EE_LITE(0x2010, KernelSPE) 699#else 700 EXCEPTION(0x2020, SPEUnavailable, unknown_exception, EXC_XFER_EE) 701#endif /* CONFIG_SPE */ 702 703 /* SPE Floating Point Data */ 704#ifdef CONFIG_SPE 705 EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE); 706#else 707 EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE) 708#endif /* CONFIG_SPE */ 709 710 /* SPE Floating Point Round */ 711 EXCEPTION(0x2050, SPEFloatingPointRound, unknown_exception, EXC_XFER_EE) 712 713 /* Performance Monitor */ 714 EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD) 715 716 717 /* Debug Interrupt */ 718 DEBUG_EXCEPTION 719 720/* 721 * Local functions 722 */ 723 724 /* 725 * Data TLB exceptions will bail out to this point 726 * if they can't resolve the lightweight TLB fault. 727 */ 728data_access: 729 NORMAL_EXCEPTION_PROLOG 730 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */ 731 stw r5,_ESR(r11) 732 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ 733 andis. r10,r5,(ESR_ILK|ESR_DLK)@h 734 bne 1f 735 EXC_XFER_EE_LITE(0x0300, handle_page_fault) 7361: 737 addi r3,r1,STACK_FRAME_OVERHEAD 738 EXC_XFER_EE_LITE(0x0300, CacheLockingException) 739 740/* 741 742 * Both the instruction and data TLB miss get to this 743 * point to load the TLB. 744 * r10 - EA of fault 745 * r11 - TLB (info from Linux PTE) 746 * r12, r13 - available to use 747 * CR5 - results of addr < TASK_SIZE 748 * MAS0, MAS1 - loaded with proper value when we get here 749 * MAS2, MAS3 - will need additional info from Linux PTE 750 * Upon exit, we reload everything and RFI. 751 */ 752finish_tlb_load: 753 /* 754 * We set execute, because we don't have the granularity to 755 * properly set this at the page level (Linux problem). 756 * Many of these bits are software only. Bits we don't set 757 * here we (properly should) assume have the appropriate value. 758 */ 759 760 mfspr r12, SPRN_MAS2 761#ifdef CONFIG_PTE_64BIT 762 rlwimi r12, r11, 26, 24, 31 /* extract ...WIMGE from pte */ 763#else 764 rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */ 765#endif 766 mtspr SPRN_MAS2, r12 767 768 bge 5, 1f 769 770 /* is user addr */ 771 andi. r12, r11, (_PAGE_USER | _PAGE_HWWRITE | _PAGE_HWEXEC) 772 andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */ 773 srwi r10, r12, 1 774 or r12, r12, r10 /* Copy user perms into supervisor */ 775 iseleq r12, 0, r12 776 b 2f 777 778 /* is kernel addr */ 7791: rlwinm r12, r11, 31, 29, 29 /* Extract _PAGE_HWWRITE into SW */ 780 ori r12, r12, (MAS3_SX | MAS3_SR) 781 782#ifdef CONFIG_PTE_64BIT 7832: rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */ 784 rlwimi r12, r11, 24, 8, 19 /* grab RPN[40:51] */ 785 mtspr SPRN_MAS3, r12 786BEGIN_FTR_SECTION 787 srwi r10, r13, 8 /* grab RPN[8:31] */ 788 mtspr SPRN_MAS7, r10 789END_FTR_SECTION_IFSET(CPU_FTR_BIG_PHYS) 790#else 7912: rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */ 792 mtspr SPRN_MAS3, r11 793#endif 794#ifdef CONFIG_E200 795 /* Round robin TLB1 entries assignment */ 796 mfspr r12, SPRN_MAS0 797 798 /* Extract TLB1CFG(NENTRY) */ 799 mfspr r11, SPRN_TLB1CFG 800 andi. r11, r11, 0xfff 801 802 /* Extract MAS0(NV) */ 803 andi. r13, r12, 0xfff 804 addi r13, r13, 1 805 cmpw 0, r13, r11 806 addi r12, r12, 1 807 808 /* check if we need to wrap */ 809 blt 7f 810 811 /* wrap back to first free tlbcam entry */ 812 lis r13, tlbcam_index@ha 813 lwz r13, tlbcam_index@l(r13) 814 rlwimi r12, r13, 0, 20, 31 8157: 816 mtspr SPRN_MAS0,r12 817#endif /* CONFIG_E200 */ 818 819 tlbwe 820 821 /* Done...restore registers and get out of here. */ 822 mfspr r11, SPRN_SPRG7R 823 mtcr r11 824 mfspr r13, SPRN_SPRG5R 825 mfspr r12, SPRN_SPRG4R 826 mfspr r11, SPRN_SPRG1 827 mfspr r10, SPRN_SPRG0 828 rfi /* Force context change */ 829 830#ifdef CONFIG_SPE 831/* Note that the SPE support is closely modeled after the AltiVec 832 * support. Changes to one are likely to be applicable to the 833 * other! */ 834load_up_spe: 835/* 836 * Disable SPE for the task which had SPE previously, 837 * and save its SPE registers in its thread_struct. 838 * Enables SPE for use in the kernel on return. 839 * On SMP we know the SPE units are free, since we give it up every 840 * switch. -- Kumar 841 */ 842 mfmsr r5 843 oris r5,r5,MSR_SPE@h 844 mtmsr r5 /* enable use of SPE now */ 845 isync 846/* 847 * For SMP, we don't do lazy SPE switching because it just gets too 848 * horrendously complex, especially when a task switches from one CPU 849 * to another. Instead we call giveup_spe in switch_to. 850 */ 851#ifndef CONFIG_SMP 852 lis r3,last_task_used_spe@ha 853 lwz r4,last_task_used_spe@l(r3) 854 cmpi 0,r4,0 855 beq 1f 856 addi r4,r4,THREAD /* want THREAD of last_task_used_spe */ 857 SAVE_32EVRS(0,r10,r4) 858 evxor evr10, evr10, evr10 /* clear out evr10 */ 859 evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */ 860 li r5,THREAD_ACC 861 evstddx evr10, r4, r5 /* save off accumulator */ 862 lwz r5,PT_REGS(r4) 863 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) 864 lis r10,MSR_SPE@h 865 andc r4,r4,r10 /* disable SPE for previous task */ 866 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) 8671: 868#endif /* !CONFIG_SMP */ 869 /* enable use of SPE after return */ 870 oris r9,r9,MSR_SPE@h 871 mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ 872 li r4,1 873 li r10,THREAD_ACC 874 stw r4,THREAD_USED_SPE(r5) 875 evlddx evr4,r10,r5 876 evmra evr4,evr4 877 REST_32EVRS(0,r10,r5) 878#ifndef CONFIG_SMP 879 subi r4,r5,THREAD 880 stw r4,last_task_used_spe@l(r3) 881#endif /* !CONFIG_SMP */ 882 /* restore registers and return */ 8832: REST_4GPRS(3, r11) 884 lwz r10,_CCR(r11) 885 REST_GPR(1, r11) 886 mtcr r10 887 lwz r10,_LINK(r11) 888 mtlr r10 889 REST_GPR(10, r11) 890 mtspr SPRN_SRR1,r9 891 mtspr SPRN_SRR0,r12 892 REST_GPR(9, r11) 893 REST_GPR(12, r11) 894 lwz r11,GPR11(r11) 895 rfi 896 897/* 898 * SPE unavailable trap from kernel - print a message, but let 899 * the task use SPE in the kernel until it returns to user mode. 900 */ 901KernelSPE: 902 lwz r3,_MSR(r1) 903 oris r3,r3,MSR_SPE@h 904 stw r3,_MSR(r1) /* enable use of SPE after return */ 905 lis r3,87f@h 906 ori r3,r3,87f@l 907 mr r4,r2 /* current */ 908 lwz r5,_NIP(r1) 909 bl printk 910 b ret_from_except 91187: .string "SPE used in kernel (task=%p, pc=%x) \n" 912 .align 4,0 913 914#endif /* CONFIG_SPE */ 915 916/* 917 * Global functions 918 */ 919 920/* 921 * extern void loadcam_entry(unsigned int index) 922 * 923 * Load TLBCAM[index] entry in to the L2 CAM MMU 924 */ 925_GLOBAL(loadcam_entry) 926 lis r4,TLBCAM@ha 927 addi r4,r4,TLBCAM@l 928 mulli r5,r3,20 929 add r3,r5,r4 930 lwz r4,0(r3) 931 mtspr SPRN_MAS0,r4 932 lwz r4,4(r3) 933 mtspr SPRN_MAS1,r4 934 lwz r4,8(r3) 935 mtspr SPRN_MAS2,r4 936 lwz r4,12(r3) 937 mtspr SPRN_MAS3,r4 938 tlbwe 939 isync 940 blr 941 942/* 943 * extern void giveup_altivec(struct task_struct *prev) 944 * 945 * The e500 core does not have an AltiVec unit. 946 */ 947_GLOBAL(giveup_altivec) 948 blr 949 950#ifdef CONFIG_SPE 951/* 952 * extern void giveup_spe(struct task_struct *prev) 953 * 954 */ 955_GLOBAL(giveup_spe) 956 mfmsr r5 957 oris r5,r5,MSR_SPE@h 958 mtmsr r5 /* enable use of SPE now */ 959 isync 960 cmpi 0,r3,0 961 beqlr- /* if no previous owner, done */ 962 addi r3,r3,THREAD /* want THREAD of task */ 963 lwz r5,PT_REGS(r3) 964 cmpi 0,r5,0 965 SAVE_32EVRS(0, r4, r3) 966 evxor evr6, evr6, evr6 /* clear out evr6 */ 967 evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */ 968 li r4,THREAD_ACC 969 evstddx evr6, r4, r3 /* save off accumulator */ 970 mfspr r6,SPRN_SPEFSCR 971 stw r6,THREAD_SPEFSCR(r3) /* save spefscr register value */ 972 beq 1f 973 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) 974 lis r3,MSR_SPE@h 975 andc r4,r4,r3 /* disable SPE for previous task */ 976 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) 9771: 978#ifndef CONFIG_SMP 979 li r5,0 980 lis r4,last_task_used_spe@ha 981 stw r5,last_task_used_spe@l(r4) 982#endif /* !CONFIG_SMP */ 983 blr 984#endif /* CONFIG_SPE */ 985 986/* 987 * extern void giveup_fpu(struct task_struct *prev) 988 * 989 * Not all FSL Book-E cores have an FPU 990 */ 991#ifndef CONFIG_PPC_FPU 992_GLOBAL(giveup_fpu) 993 blr 994#endif 995 996/* 997 * extern void abort(void) 998 * 999 * At present, this routine just applies a system reset. 1000 */ 1001_GLOBAL(abort) 1002 li r13,0 1003 mtspr SPRN_DBCR0,r13 /* disable all debug events */ 1004 isync 1005 mfmsr r13 1006 ori r13,r13,MSR_DE@l /* Enable Debug Events */ 1007 mtmsr r13 1008 isync 1009 mfspr r13,SPRN_DBCR0 1010 lis r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h 1011 mtspr SPRN_DBCR0,r13 1012 isync 1013 1014_GLOBAL(set_context) 1015 1016#ifdef CONFIG_BDI_SWITCH 1017 /* Context switch the PTE pointer for the Abatron BDI2000. 1018 * The PGDIR is the second parameter. 1019 */ 1020 lis r5, abatron_pteptrs@h 1021 ori r5, r5, abatron_pteptrs@l 1022 stw r4, 0x4(r5) 1023#endif 1024 mtspr SPRN_PID,r3 1025 isync /* Force context change */ 1026 blr 1027 1028/* 1029 * We put a few things here that have to be page-aligned. This stuff 1030 * goes at the beginning of the data segment, which is page-aligned. 1031 */ 1032 .data 1033 .align 12 1034 .globl sdata 1035sdata: 1036 .globl empty_zero_page 1037empty_zero_page: 1038 .space 4096 1039 .globl swapper_pg_dir 1040swapper_pg_dir: 1041 .space 4096 1042 1043/* Reserved 4k for the critical exception stack & 4k for the machine 1044 * check stack per CPU for kernel mode exceptions */ 1045 .section .bss 1046 .align 12 1047exception_stack_bottom: 1048 .space BOOKE_EXCEPTION_STACK_SIZE * NR_CPUS 1049 .globl exception_stack_top 1050exception_stack_top: 1051 1052/* 1053 * Room for two PTE pointers, usually the kernel and current user pointers 1054 * to their respective root page table. 1055 */ 1056abatron_pteptrs: 1057 .space 8 1058