irq-armada-370-xp.c revision 627dfcc249e2eae07982272808ad560592e730e0
1/*
2 * Marvell Armada 370 and Armada XP SoC IRQ handling
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Lior Amsalem <alior@marvell.com>
7 * Gregory CLEMENT <gregory.clement@free-electrons.com>
8 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 * Ben Dooks <ben.dooks@codethink.co.uk>
10 *
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2.  This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
14 */
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/irq.h>
20#include <linux/interrupt.h>
21#include <linux/io.h>
22#include <linux/of_address.h>
23#include <linux/of_irq.h>
24#include <linux/irqdomain.h>
25#include <asm/mach/arch.h>
26#include <asm/exception.h>
27#include <asm/smp_plat.h>
28#include <asm/mach/irq.h>
29
30#include "irqchip.h"
31
32/* Interrupt Controller Registers Map */
33#define ARMADA_370_XP_INT_SET_MASK_OFFS		(0x48)
34#define ARMADA_370_XP_INT_CLEAR_MASK_OFFS	(0x4C)
35
36#define ARMADA_370_XP_INT_CONTROL		(0x00)
37#define ARMADA_370_XP_INT_SET_ENABLE_OFFS	(0x30)
38#define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS	(0x34)
39#define ARMADA_370_XP_INT_SOURCE_CTL(irq)	(0x100 + irq*4)
40
41#define ARMADA_370_XP_CPU_INTACK_OFFS		(0x44)
42
43#define ARMADA_370_XP_SW_TRIG_INT_OFFS           (0x4)
44#define ARMADA_370_XP_IN_DRBEL_MSK_OFFS          (0xc)
45#define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS        (0x8)
46
47#define ARMADA_370_XP_MAX_PER_CPU_IRQS		(28)
48
49#define ARMADA_370_XP_TIMER0_PER_CPU_IRQ	(5)
50
51#define IPI_DOORBELL_START                      (0)
52#define IPI_DOORBELL_END                        (8)
53#define IPI_DOORBELL_MASK                       0xFF
54
55static DEFINE_RAW_SPINLOCK(irq_controller_lock);
56
57static void __iomem *per_cpu_int_base;
58static void __iomem *main_int_base;
59static struct irq_domain *armada_370_xp_mpic_domain;
60
61/*
62 * In SMP mode:
63 * For shared global interrupts, mask/unmask global enable bit
64 * For CPU interrupts, mask/unmask the calling CPU's bit
65 */
66static void armada_370_xp_irq_mask(struct irq_data *d)
67{
68	irq_hw_number_t hwirq = irqd_to_hwirq(d);
69
70	if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
71		writel(hwirq, main_int_base +
72				ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
73	else
74		writel(hwirq, per_cpu_int_base +
75				ARMADA_370_XP_INT_SET_MASK_OFFS);
76}
77
78static void armada_370_xp_irq_unmask(struct irq_data *d)
79{
80	irq_hw_number_t hwirq = irqd_to_hwirq(d);
81
82	if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
83		writel(hwirq, main_int_base +
84				ARMADA_370_XP_INT_SET_ENABLE_OFFS);
85	else
86		writel(hwirq, per_cpu_int_base +
87				ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
88}
89
90#ifdef CONFIG_SMP
91static int armada_xp_set_affinity(struct irq_data *d,
92				  const struct cpumask *mask_val, bool force)
93{
94	unsigned long reg;
95	unsigned long new_mask = 0;
96	unsigned long online_mask = 0;
97	unsigned long count = 0;
98	irq_hw_number_t hwirq = irqd_to_hwirq(d);
99	int cpu;
100
101	for_each_cpu(cpu, mask_val) {
102		new_mask |= 1 << cpu_logical_map(cpu);
103		count++;
104	}
105
106	/*
107	 * Forbid mutlicore interrupt affinity
108	 * This is required since the MPIC HW doesn't limit
109	 * several CPUs from acknowledging the same interrupt.
110	 */
111	if (count > 1)
112		return -EINVAL;
113
114	for_each_cpu(cpu, cpu_online_mask)
115		online_mask |= 1 << cpu_logical_map(cpu);
116
117	raw_spin_lock(&irq_controller_lock);
118
119	reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
120	reg = (reg & (~online_mask)) | new_mask;
121	writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
122
123	raw_spin_unlock(&irq_controller_lock);
124
125	return 0;
126}
127#endif
128
129static struct irq_chip armada_370_xp_irq_chip = {
130	.name		= "armada_370_xp_irq",
131	.irq_mask       = armada_370_xp_irq_mask,
132	.irq_mask_ack   = armada_370_xp_irq_mask,
133	.irq_unmask     = armada_370_xp_irq_unmask,
134#ifdef CONFIG_SMP
135	.irq_set_affinity = armada_xp_set_affinity,
136#endif
137};
138
139static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
140				      unsigned int virq, irq_hw_number_t hw)
141{
142	armada_370_xp_irq_mask(irq_get_irq_data(virq));
143	if (hw != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
144		writel(hw, per_cpu_int_base +
145			ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
146	else
147		writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
148	irq_set_status_flags(virq, IRQ_LEVEL);
149
150	if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) {
151		irq_set_percpu_devid(virq);
152		irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
153					handle_percpu_devid_irq);
154
155	} else {
156		irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
157					handle_level_irq);
158	}
159	set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
160
161	return 0;
162}
163
164#ifdef CONFIG_SMP
165void armada_mpic_send_doorbell(const struct cpumask *mask, unsigned int irq)
166{
167	int cpu;
168	unsigned long map = 0;
169
170	/* Convert our logical CPU mask into a physical one. */
171	for_each_cpu(cpu, mask)
172		map |= 1 << cpu_logical_map(cpu);
173
174	/*
175	 * Ensure that stores to Normal memory are visible to the
176	 * other CPUs before issuing the IPI.
177	 */
178	dsb();
179
180	/* submit softirq */
181	writel((map << 8) | irq, main_int_base +
182		ARMADA_370_XP_SW_TRIG_INT_OFFS);
183}
184
185void armada_xp_mpic_smp_cpu_init(void)
186{
187	/* Clear pending IPIs */
188	writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
189
190	/* Enable first 8 IPIs */
191	writel(IPI_DOORBELL_MASK, per_cpu_int_base +
192		ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
193
194	/* Unmask IPI interrupt */
195	writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
196}
197#endif /* CONFIG_SMP */
198
199static struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
200	.map = armada_370_xp_mpic_irq_map,
201	.xlate = irq_domain_xlate_onecell,
202};
203
204static asmlinkage void __exception_irq_entry
205armada_370_xp_handle_irq(struct pt_regs *regs)
206{
207	u32 irqstat, irqnr;
208
209	do {
210		irqstat = readl_relaxed(per_cpu_int_base +
211					ARMADA_370_XP_CPU_INTACK_OFFS);
212		irqnr = irqstat & 0x3FF;
213
214		if (irqnr > 1022)
215			break;
216
217		if (irqnr > 0) {
218			irqnr =	irq_find_mapping(armada_370_xp_mpic_domain,
219					irqnr);
220			handle_IRQ(irqnr, regs);
221			continue;
222		}
223#ifdef CONFIG_SMP
224		/* IPI Handling */
225		if (irqnr == 0) {
226			u32 ipimask, ipinr;
227
228			ipimask = readl_relaxed(per_cpu_int_base +
229						ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
230				& IPI_DOORBELL_MASK;
231
232			writel(~IPI_DOORBELL_MASK, per_cpu_int_base +
233				ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
234
235			/* Handle all pending doorbells */
236			for (ipinr = IPI_DOORBELL_START;
237			     ipinr < IPI_DOORBELL_END; ipinr++) {
238				if (ipimask & (0x1 << ipinr))
239					handle_IPI(ipinr, regs);
240			}
241			continue;
242		}
243#endif
244
245	} while (1);
246}
247
248static int __init armada_370_xp_mpic_of_init(struct device_node *node,
249					     struct device_node *parent)
250{
251	struct resource main_int_res, per_cpu_int_res;
252	u32 control;
253
254	BUG_ON(of_address_to_resource(node, 0, &main_int_res));
255	BUG_ON(of_address_to_resource(node, 1, &per_cpu_int_res));
256
257	BUG_ON(!request_mem_region(main_int_res.start,
258				   resource_size(&main_int_res),
259				   node->full_name));
260	BUG_ON(!request_mem_region(per_cpu_int_res.start,
261				   resource_size(&per_cpu_int_res),
262				   node->full_name));
263
264	main_int_base = ioremap(main_int_res.start,
265				resource_size(&main_int_res));
266	BUG_ON(!main_int_base);
267
268	per_cpu_int_base = ioremap(per_cpu_int_res.start,
269				   resource_size(&per_cpu_int_res));
270	BUG_ON(!per_cpu_int_base);
271
272	control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
273
274	armada_370_xp_mpic_domain =
275		irq_domain_add_linear(node, (control >> 2) & 0x3ff,
276				&armada_370_xp_mpic_irq_ops, NULL);
277
278	BUG_ON(!armada_370_xp_mpic_domain);
279
280	irq_set_default_host(armada_370_xp_mpic_domain);
281
282#ifdef CONFIG_SMP
283	armada_xp_mpic_smp_cpu_init();
284
285	/*
286	 * Set the default affinity from all CPUs to the boot cpu.
287	 * This is required since the MPIC doesn't limit several CPUs
288	 * from acknowledging the same interrupt.
289	 */
290	cpumask_clear(irq_default_affinity);
291	cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
292
293#endif
294
295	set_handle_irq(armada_370_xp_handle_irq);
296
297	return 0;
298}
299
300IRQCHIP_DECLARE(armada_370_xp_mpic, "marvell,mpic", armada_370_xp_mpic_of_init);
301