irqflags.h revision df9ee29270c11dba7d0fe0b83ce47a4d8e8d2101
1/* 2 * Copyright 2010 Tilera Corporation. All Rights Reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation, version 2. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 * NON INFRINGEMENT. See the GNU General Public License for 12 * more details. 13 */ 14 15#ifndef _ASM_TILE_IRQFLAGS_H 16#define _ASM_TILE_IRQFLAGS_H 17 18#include <arch/interrupts.h> 19#include <arch/chip.h> 20 21/* 22 * The set of interrupts we want to allow when interrupts are nominally 23 * disabled. The remainder are effectively "NMI" interrupts from 24 * the point of view of the generic Linux code. Note that synchronous 25 * interrupts (aka "non-queued") are not blocked by the mask in any case. 26 */ 27#if CHIP_HAS_AUX_PERF_COUNTERS() 28#define LINUX_MASKABLE_INTERRUPTS \ 29 (~(INT_MASK(INT_PERF_COUNT) | INT_MASK(INT_AUX_PERF_COUNT))) 30#else 31#define LINUX_MASKABLE_INTERRUPTS \ 32 (~(INT_MASK(INT_PERF_COUNT))) 33#endif 34 35#ifndef __ASSEMBLY__ 36 37/* NOTE: we can't include <linux/percpu.h> due to #include dependencies. */ 38#include <asm/percpu.h> 39#include <arch/spr_def.h> 40 41/* Set and clear kernel interrupt masks. */ 42#if CHIP_HAS_SPLIT_INTR_MASK() 43#if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32 44# error Fix assumptions about which word various interrupts are in 45#endif 46#define interrupt_mask_set(n) do { \ 47 int __n = (n); \ 48 int __mask = 1 << (__n & 0x1f); \ 49 if (__n < 32) \ 50 __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, __mask); \ 51 else \ 52 __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, __mask); \ 53} while (0) 54#define interrupt_mask_reset(n) do { \ 55 int __n = (n); \ 56 int __mask = 1 << (__n & 0x1f); \ 57 if (__n < 32) \ 58 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, __mask); \ 59 else \ 60 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, __mask); \ 61} while (0) 62#define interrupt_mask_check(n) ({ \ 63 int __n = (n); \ 64 (((__n < 32) ? \ 65 __insn_mfspr(SPR_INTERRUPT_MASK_1_0) : \ 66 __insn_mfspr(SPR_INTERRUPT_MASK_1_1)) \ 67 >> (__n & 0x1f)) & 1; \ 68}) 69#define interrupt_mask_set_mask(mask) do { \ 70 unsigned long long __m = (mask); \ 71 __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, (unsigned long)(__m)); \ 72 __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, (unsigned long)(__m>>32)); \ 73} while (0) 74#define interrupt_mask_reset_mask(mask) do { \ 75 unsigned long long __m = (mask); \ 76 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, (unsigned long)(__m)); \ 77 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, (unsigned long)(__m>>32)); \ 78} while (0) 79#else 80#define interrupt_mask_set(n) \ 81 __insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (1UL << (n))) 82#define interrupt_mask_reset(n) \ 83 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (1UL << (n))) 84#define interrupt_mask_check(n) \ 85 ((__insn_mfspr(SPR_INTERRUPT_MASK_1) >> (n)) & 1) 86#define interrupt_mask_set_mask(mask) \ 87 __insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (mask)) 88#define interrupt_mask_reset_mask(mask) \ 89 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (mask)) 90#endif 91 92/* 93 * The set of interrupts we want active if irqs are enabled. 94 * Note that in particular, the tile timer interrupt comes and goes 95 * from this set, since we have no other way to turn off the timer. 96 * Likewise, INTCTRL_1 is removed and re-added during device 97 * interrupts, as is the the hardwall UDN_FIREWALL interrupt. 98 * We use a low bit (MEM_ERROR) as our sentinel value and make sure it 99 * is always claimed as an "active interrupt" so we can query that bit 100 * to know our current state. 101 */ 102DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); 103#define INITIAL_INTERRUPTS_ENABLED INT_MASK(INT_MEM_ERROR) 104 105/* Disable interrupts. */ 106#define arch_local_irq_disable() \ 107 interrupt_mask_set_mask(LINUX_MASKABLE_INTERRUPTS) 108 109/* Disable all interrupts, including NMIs. */ 110#define arch_local_irq_disable_all() \ 111 interrupt_mask_set_mask(-1UL) 112 113/* Re-enable all maskable interrupts. */ 114#define arch_local_irq_enable() \ 115 interrupt_mask_reset_mask(__get_cpu_var(interrupts_enabled_mask)) 116 117/* Disable or enable interrupts based on flag argument. */ 118#define arch_local_irq_restore(disabled) do { \ 119 if (disabled) \ 120 arch_local_irq_disable(); \ 121 else \ 122 arch_local_irq_enable(); \ 123} while (0) 124 125/* Return true if "flags" argument means interrupts are disabled. */ 126#define arch_irqs_disabled_flags(flags) ((flags) != 0) 127 128/* Return true if interrupts are currently disabled. */ 129#define arch_irqs_disabled() interrupt_mask_check(INT_MEM_ERROR) 130 131/* Save whether interrupts are currently disabled. */ 132#define arch_local_save_flags() arch_irqs_disabled() 133 134/* Save whether interrupts are currently disabled, then disable them. */ 135#define arch_local_irq_save() ({ \ 136 unsigned long __flags = arch_local_save_flags(); \ 137 arch_local_irq_disable(); \ 138 __flags; }) 139 140/* Prevent the given interrupt from being enabled next time we enable irqs. */ 141#define arch_local_irq_mask(interrupt) \ 142 (__get_cpu_var(interrupts_enabled_mask) &= ~INT_MASK(interrupt)) 143 144/* Prevent the given interrupt from being enabled immediately. */ 145#define arch_local_irq_mask_now(interrupt) do { \ 146 arch_local_irq_mask(interrupt); \ 147 interrupt_mask_set(interrupt); \ 148} while (0) 149 150/* Allow the given interrupt to be enabled next time we enable irqs. */ 151#define arch_local_irq_unmask(interrupt) \ 152 (__get_cpu_var(interrupts_enabled_mask) |= INT_MASK(interrupt)) 153 154/* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */ 155#define arch_local_irq_unmask_now(interrupt) do { \ 156 arch_local_irq_unmask(interrupt); \ 157 if (!irqs_disabled()) \ 158 interrupt_mask_reset(interrupt); \ 159} while (0) 160 161#else /* __ASSEMBLY__ */ 162 163/* We provide a somewhat more restricted set for assembly. */ 164 165#ifdef __tilegx__ 166 167#if INT_MEM_ERROR != 0 168# error Fix IRQ_DISABLED() macro 169#endif 170 171/* Return 0 or 1 to indicate whether interrupts are currently disabled. */ 172#define IRQS_DISABLED(tmp) \ 173 mfspr tmp, INTERRUPT_MASK_1; \ 174 andi tmp, tmp, 1 175 176/* Load up a pointer to &interrupts_enabled_mask. */ 177#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \ 178 moveli reg, hw2_last(interrupts_enabled_mask); \ 179 shl16insli reg, reg, hw1(interrupts_enabled_mask); \ 180 shl16insli reg, reg, hw0(interrupts_enabled_mask); \ 181 add reg, reg, tp 182 183/* Disable interrupts. */ 184#define IRQ_DISABLE(tmp0, tmp1) \ 185 moveli tmp0, hw2_last(LINUX_MASKABLE_INTERRUPTS); \ 186 shl16insli tmp0, tmp0, hw1(LINUX_MASKABLE_INTERRUPTS); \ 187 shl16insli tmp0, tmp0, hw0(LINUX_MASKABLE_INTERRUPTS); \ 188 mtspr INTERRUPT_MASK_SET_1, tmp0 189 190/* Disable ALL synchronous interrupts (used by NMI entry). */ 191#define IRQ_DISABLE_ALL(tmp) \ 192 movei tmp, -1; \ 193 mtspr INTERRUPT_MASK_SET_1, tmp 194 195/* Enable interrupts. */ 196#define IRQ_ENABLE(tmp0, tmp1) \ 197 GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \ 198 ld tmp0, tmp0; \ 199 mtspr INTERRUPT_MASK_RESET_1, tmp0 200 201#else /* !__tilegx__ */ 202 203/* 204 * Return 0 or 1 to indicate whether interrupts are currently disabled. 205 * Note that it's important that we use a bit from the "low" mask word, 206 * since when we are enabling, that is the word we write first, so if we 207 * are interrupted after only writing half of the mask, the interrupt 208 * handler will correctly observe that we have interrupts enabled, and 209 * will enable interrupts itself on return from the interrupt handler 210 * (making the original code's write of the "high" mask word idempotent). 211 */ 212#define IRQS_DISABLED(tmp) \ 213 mfspr tmp, INTERRUPT_MASK_1_0; \ 214 shri tmp, tmp, INT_MEM_ERROR; \ 215 andi tmp, tmp, 1 216 217/* Load up a pointer to &interrupts_enabled_mask. */ 218#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \ 219 moveli reg, lo16(interrupts_enabled_mask); \ 220 auli reg, reg, ha16(interrupts_enabled_mask);\ 221 add reg, reg, tp 222 223/* Disable interrupts. */ 224#define IRQ_DISABLE(tmp0, tmp1) \ 225 { \ 226 movei tmp0, -1; \ 227 moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS) \ 228 }; \ 229 { \ 230 mtspr INTERRUPT_MASK_SET_1_0, tmp0; \ 231 auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS) \ 232 }; \ 233 mtspr INTERRUPT_MASK_SET_1_1, tmp1 234 235/* Disable ALL synchronous interrupts (used by NMI entry). */ 236#define IRQ_DISABLE_ALL(tmp) \ 237 movei tmp, -1; \ 238 mtspr INTERRUPT_MASK_SET_1_0, tmp; \ 239 mtspr INTERRUPT_MASK_SET_1_1, tmp 240 241/* Enable interrupts. */ 242#define IRQ_ENABLE(tmp0, tmp1) \ 243 GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \ 244 { \ 245 lw tmp0, tmp0; \ 246 addi tmp1, tmp0, 4 \ 247 }; \ 248 lw tmp1, tmp1; \ 249 mtspr INTERRUPT_MASK_RESET_1_0, tmp0; \ 250 mtspr INTERRUPT_MASK_RESET_1_1, tmp1 251#endif 252 253/* 254 * Do the CPU's IRQ-state tracing from assembly code. We call a 255 * C function, but almost everywhere we do, we don't mind clobbering 256 * all the caller-saved registers. 257 */ 258#ifdef CONFIG_TRACE_IRQFLAGS 259# define TRACE_IRQS_ON jal trace_hardirqs_on 260# define TRACE_IRQS_OFF jal trace_hardirqs_off 261#else 262# define TRACE_IRQS_ON 263# define TRACE_IRQS_OFF 264#endif 265 266#endif /* __ASSEMBLY__ */ 267 268#endif /* _ASM_TILE_IRQFLAGS_H */ 269