irq-armada-370-xp.c revision 298dcb2dd0267d51e4f7c94a628cd0765a50ad75
1/*
2 * Marvell Armada 370 and Armada XP SoC IRQ handling
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Lior Amsalem <alior@marvell.com>
7 * Gregory CLEMENT <gregory.clement@free-electrons.com>
8 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 * Ben Dooks <ben.dooks@codethink.co.uk>
10 *
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2.  This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
14 */
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/irq.h>
20#include <linux/interrupt.h>
21#include <linux/irqchip/chained_irq.h>
22#include <linux/cpu.h>
23#include <linux/io.h>
24#include <linux/of_address.h>
25#include <linux/of_irq.h>
26#include <linux/of_pci.h>
27#include <linux/irqdomain.h>
28#include <linux/slab.h>
29#include <linux/msi.h>
30#include <asm/mach/arch.h>
31#include <asm/exception.h>
32#include <asm/smp_plat.h>
33#include <asm/mach/irq.h>
34
35#include "irqchip.h"
36
37/* Interrupt Controller Registers Map */
38#define ARMADA_370_XP_INT_SET_MASK_OFFS		(0x48)
39#define ARMADA_370_XP_INT_CLEAR_MASK_OFFS	(0x4C)
40
41#define ARMADA_370_XP_INT_CONTROL		(0x00)
42#define ARMADA_370_XP_INT_SET_ENABLE_OFFS	(0x30)
43#define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS	(0x34)
44#define ARMADA_370_XP_INT_SOURCE_CTL(irq)	(0x100 + irq*4)
45#define ARMADA_370_XP_INT_SOURCE_CPU_MASK	0xF
46
47#define ARMADA_370_XP_CPU_INTACK_OFFS		(0x44)
48#define ARMADA_375_PPI_CAUSE			(0x10)
49
50#define ARMADA_370_XP_SW_TRIG_INT_OFFS           (0x4)
51#define ARMADA_370_XP_IN_DRBEL_MSK_OFFS          (0xc)
52#define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS        (0x8)
53
54#define ARMADA_370_XP_MAX_PER_CPU_IRQS		(28)
55
56#define ARMADA_370_XP_TIMER0_PER_CPU_IRQ	(5)
57
58#define IPI_DOORBELL_START                      (0)
59#define IPI_DOORBELL_END                        (8)
60#define IPI_DOORBELL_MASK                       0xFF
61#define PCI_MSI_DOORBELL_START                  (16)
62#define PCI_MSI_DOORBELL_NR                     (16)
63#define PCI_MSI_DOORBELL_END                    (32)
64#define PCI_MSI_DOORBELL_MASK                   0xFFFF0000
65
66static void __iomem *per_cpu_int_base;
67static void __iomem *main_int_base;
68static struct irq_domain *armada_370_xp_mpic_domain;
69#ifdef CONFIG_PCI_MSI
70static struct irq_domain *armada_370_xp_msi_domain;
71static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR);
72static DEFINE_MUTEX(msi_used_lock);
73static phys_addr_t msi_doorbell_addr;
74#endif
75
76/*
77 * In SMP mode:
78 * For shared global interrupts, mask/unmask global enable bit
79 * For CPU interrupts, mask/unmask the calling CPU's bit
80 */
81static void armada_370_xp_irq_mask(struct irq_data *d)
82{
83	irq_hw_number_t hwirq = irqd_to_hwirq(d);
84
85	if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
86		writel(hwirq, main_int_base +
87				ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
88	else
89		writel(hwirq, per_cpu_int_base +
90				ARMADA_370_XP_INT_SET_MASK_OFFS);
91}
92
93static void armada_370_xp_irq_unmask(struct irq_data *d)
94{
95	irq_hw_number_t hwirq = irqd_to_hwirq(d);
96
97	if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
98		writel(hwirq, main_int_base +
99				ARMADA_370_XP_INT_SET_ENABLE_OFFS);
100	else
101		writel(hwirq, per_cpu_int_base +
102				ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
103}
104
105#ifdef CONFIG_PCI_MSI
106
107static int armada_370_xp_alloc_msi(void)
108{
109	int hwirq;
110
111	mutex_lock(&msi_used_lock);
112	hwirq = find_first_zero_bit(&msi_used, PCI_MSI_DOORBELL_NR);
113	if (hwirq >= PCI_MSI_DOORBELL_NR)
114		hwirq = -ENOSPC;
115	else
116		set_bit(hwirq, msi_used);
117	mutex_unlock(&msi_used_lock);
118
119	return hwirq;
120}
121
122static void armada_370_xp_free_msi(int hwirq)
123{
124	mutex_lock(&msi_used_lock);
125	if (!test_bit(hwirq, msi_used))
126		pr_err("trying to free unused MSI#%d\n", hwirq);
127	else
128		clear_bit(hwirq, msi_used);
129	mutex_unlock(&msi_used_lock);
130}
131
132static int armada_370_xp_setup_msi_irq(struct msi_chip *chip,
133				       struct pci_dev *pdev,
134				       struct msi_desc *desc)
135{
136	struct msi_msg msg;
137	int virq, hwirq;
138
139	/* We support MSI, but not MSI-X */
140	if (desc->msi_attrib.is_msix)
141		return -EINVAL;
142
143	hwirq = armada_370_xp_alloc_msi();
144	if (hwirq < 0)
145		return hwirq;
146
147	virq = irq_create_mapping(armada_370_xp_msi_domain, hwirq);
148	if (!virq) {
149		armada_370_xp_free_msi(hwirq);
150		return -EINVAL;
151	}
152
153	irq_set_msi_desc(virq, desc);
154
155	msg.address_lo = msi_doorbell_addr;
156	msg.address_hi = 0;
157	msg.data = 0xf00 | (hwirq + 16);
158
159	write_msi_msg(virq, &msg);
160	return 0;
161}
162
163static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip,
164					   unsigned int irq)
165{
166	struct irq_data *d = irq_get_irq_data(irq);
167	unsigned long hwirq = d->hwirq;
168
169	irq_dispose_mapping(irq);
170	armada_370_xp_free_msi(hwirq);
171}
172
173static struct irq_chip armada_370_xp_msi_irq_chip = {
174	.name = "armada_370_xp_msi_irq",
175	.irq_enable = unmask_msi_irq,
176	.irq_disable = mask_msi_irq,
177	.irq_mask = mask_msi_irq,
178	.irq_unmask = unmask_msi_irq,
179};
180
181static int armada_370_xp_msi_map(struct irq_domain *domain, unsigned int virq,
182				 irq_hw_number_t hw)
183{
184	irq_set_chip_and_handler(virq, &armada_370_xp_msi_irq_chip,
185				 handle_simple_irq);
186	set_irq_flags(virq, IRQF_VALID);
187
188	return 0;
189}
190
191static const struct irq_domain_ops armada_370_xp_msi_irq_ops = {
192	.map = armada_370_xp_msi_map,
193};
194
195static int armada_370_xp_msi_init(struct device_node *node,
196				  phys_addr_t main_int_phys_base)
197{
198	struct msi_chip *msi_chip;
199	u32 reg;
200	int ret;
201
202	msi_doorbell_addr = main_int_phys_base +
203		ARMADA_370_XP_SW_TRIG_INT_OFFS;
204
205	msi_chip = kzalloc(sizeof(*msi_chip), GFP_KERNEL);
206	if (!msi_chip)
207		return -ENOMEM;
208
209	msi_chip->setup_irq = armada_370_xp_setup_msi_irq;
210	msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq;
211	msi_chip->of_node = node;
212
213	armada_370_xp_msi_domain =
214		irq_domain_add_linear(NULL, PCI_MSI_DOORBELL_NR,
215				      &armada_370_xp_msi_irq_ops,
216				      NULL);
217	if (!armada_370_xp_msi_domain) {
218		kfree(msi_chip);
219		return -ENOMEM;
220	}
221
222	ret = of_pci_msi_chip_add(msi_chip);
223	if (ret < 0) {
224		irq_domain_remove(armada_370_xp_msi_domain);
225		kfree(msi_chip);
226		return ret;
227	}
228
229	reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS)
230		| PCI_MSI_DOORBELL_MASK;
231
232	writel(reg, per_cpu_int_base +
233	       ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
234
235	/* Unmask IPI interrupt */
236	writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
237
238	return 0;
239}
240#else
241static inline int armada_370_xp_msi_init(struct device_node *node,
242					 phys_addr_t main_int_phys_base)
243{
244	return 0;
245}
246#endif
247
248#ifdef CONFIG_SMP
249static DEFINE_RAW_SPINLOCK(irq_controller_lock);
250
251static int armada_xp_set_affinity(struct irq_data *d,
252				  const struct cpumask *mask_val, bool force)
253{
254	irq_hw_number_t hwirq = irqd_to_hwirq(d);
255	unsigned long reg, mask;
256	int cpu;
257
258	/* Select a single core from the affinity mask which is online */
259	cpu = cpumask_any_and(mask_val, cpu_online_mask);
260	mask = 1UL << cpu_logical_map(cpu);
261
262	raw_spin_lock(&irq_controller_lock);
263	reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
264	reg = (reg & (~ARMADA_370_XP_INT_SOURCE_CPU_MASK)) | mask;
265	writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
266	raw_spin_unlock(&irq_controller_lock);
267
268	return 0;
269}
270#endif
271
272static struct irq_chip armada_370_xp_irq_chip = {
273	.name		= "armada_370_xp_irq",
274	.irq_mask       = armada_370_xp_irq_mask,
275	.irq_mask_ack   = armada_370_xp_irq_mask,
276	.irq_unmask     = armada_370_xp_irq_unmask,
277#ifdef CONFIG_SMP
278	.irq_set_affinity = armada_xp_set_affinity,
279#endif
280};
281
282static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
283				      unsigned int virq, irq_hw_number_t hw)
284{
285	armada_370_xp_irq_mask(irq_get_irq_data(virq));
286	if (hw != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
287		writel(hw, per_cpu_int_base +
288			ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
289	else
290		writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
291	irq_set_status_flags(virq, IRQ_LEVEL);
292
293	if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) {
294		irq_set_percpu_devid(virq);
295		irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
296					handle_percpu_devid_irq);
297
298	} else {
299		irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
300					handle_level_irq);
301	}
302	set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
303
304	return 0;
305}
306
307#ifdef CONFIG_SMP
308static void armada_mpic_send_doorbell(const struct cpumask *mask,
309				      unsigned int irq)
310{
311	int cpu;
312	unsigned long map = 0;
313
314	/* Convert our logical CPU mask into a physical one. */
315	for_each_cpu(cpu, mask)
316		map |= 1 << cpu_logical_map(cpu);
317
318	/*
319	 * Ensure that stores to Normal memory are visible to the
320	 * other CPUs before issuing the IPI.
321	 */
322	dsb();
323
324	/* submit softirq */
325	writel((map << 8) | irq, main_int_base +
326		ARMADA_370_XP_SW_TRIG_INT_OFFS);
327}
328
329static void armada_xp_mpic_smp_cpu_init(void)
330{
331	u32 control;
332	int nr_irqs, i;
333
334	control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
335	nr_irqs = (control >> 2) & 0x3ff;
336
337	for (i = 0; i < nr_irqs; i++)
338		writel(i, per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
339
340	/* Clear pending IPIs */
341	writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
342
343	/* Enable first 8 IPIs */
344	writel(IPI_DOORBELL_MASK, per_cpu_int_base +
345		ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
346
347	/* Unmask IPI interrupt */
348	writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
349}
350
351static int armada_xp_mpic_secondary_init(struct notifier_block *nfb,
352					 unsigned long action, void *hcpu)
353{
354	if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
355		armada_xp_mpic_smp_cpu_init();
356	return NOTIFY_OK;
357}
358
359static struct notifier_block armada_370_xp_mpic_cpu_notifier = {
360	.notifier_call = armada_xp_mpic_secondary_init,
361	.priority = 100,
362};
363
364#endif /* CONFIG_SMP */
365
366static struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
367	.map = armada_370_xp_mpic_irq_map,
368	.xlate = irq_domain_xlate_onecell,
369};
370
371#ifdef CONFIG_PCI_MSI
372static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
373{
374	u32 msimask, msinr;
375
376	msimask = readl_relaxed(per_cpu_int_base +
377				ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
378		& PCI_MSI_DOORBELL_MASK;
379
380	writel(~msimask, per_cpu_int_base +
381	       ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
382
383	for (msinr = PCI_MSI_DOORBELL_START;
384	     msinr < PCI_MSI_DOORBELL_END; msinr++) {
385		int irq;
386
387		if (!(msimask & BIT(msinr)))
388			continue;
389
390		if (is_chained) {
391			irq = irq_find_mapping(armada_370_xp_msi_domain,
392					       msinr - 16);
393			generic_handle_irq(irq);
394		} else {
395			irq = msinr - 16;
396			handle_domain_irq(armada_370_xp_msi_domain,
397					  irq, regs);
398		}
399	}
400}
401#else
402static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {}
403#endif
404
405static void armada_370_xp_mpic_handle_cascade_irq(unsigned int irq,
406						  struct irq_desc *desc)
407{
408	struct irq_chip *chip = irq_get_chip(irq);
409	unsigned long irqmap, irqn;
410	unsigned int cascade_irq;
411
412	chained_irq_enter(chip, desc);
413
414	irqmap = readl_relaxed(per_cpu_int_base + ARMADA_375_PPI_CAUSE);
415
416	if (irqmap & BIT(1)) {
417		armada_370_xp_handle_msi_irq(NULL, true);
418		irqmap &= ~BIT(1);
419	}
420
421	for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) {
422		cascade_irq = irq_find_mapping(armada_370_xp_mpic_domain, irqn);
423		generic_handle_irq(cascade_irq);
424	}
425
426	chained_irq_exit(chip, desc);
427}
428
429static void __exception_irq_entry
430armada_370_xp_handle_irq(struct pt_regs *regs)
431{
432	u32 irqstat, irqnr;
433
434	do {
435		irqstat = readl_relaxed(per_cpu_int_base +
436					ARMADA_370_XP_CPU_INTACK_OFFS);
437		irqnr = irqstat & 0x3FF;
438
439		if (irqnr > 1022)
440			break;
441
442		if (irqnr > 1) {
443			handle_domain_irq(armada_370_xp_mpic_domain,
444					  irqnr, regs);
445			continue;
446		}
447
448		/* MSI handling */
449		if (irqnr == 1)
450			armada_370_xp_handle_msi_irq(regs, false);
451
452#ifdef CONFIG_SMP
453		/* IPI Handling */
454		if (irqnr == 0) {
455			u32 ipimask, ipinr;
456
457			ipimask = readl_relaxed(per_cpu_int_base +
458						ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
459				& IPI_DOORBELL_MASK;
460
461			writel(~ipimask, per_cpu_int_base +
462				ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
463
464			/* Handle all pending doorbells */
465			for (ipinr = IPI_DOORBELL_START;
466			     ipinr < IPI_DOORBELL_END; ipinr++) {
467				if (ipimask & (0x1 << ipinr))
468					handle_IPI(ipinr, regs);
469			}
470			continue;
471		}
472#endif
473
474	} while (1);
475}
476
477static int __init armada_370_xp_mpic_of_init(struct device_node *node,
478					     struct device_node *parent)
479{
480	struct resource main_int_res, per_cpu_int_res;
481	int parent_irq, nr_irqs, i;
482	u32 control;
483
484	BUG_ON(of_address_to_resource(node, 0, &main_int_res));
485	BUG_ON(of_address_to_resource(node, 1, &per_cpu_int_res));
486
487	BUG_ON(!request_mem_region(main_int_res.start,
488				   resource_size(&main_int_res),
489				   node->full_name));
490	BUG_ON(!request_mem_region(per_cpu_int_res.start,
491				   resource_size(&per_cpu_int_res),
492				   node->full_name));
493
494	main_int_base = ioremap(main_int_res.start,
495				resource_size(&main_int_res));
496	BUG_ON(!main_int_base);
497
498	per_cpu_int_base = ioremap(per_cpu_int_res.start,
499				   resource_size(&per_cpu_int_res));
500	BUG_ON(!per_cpu_int_base);
501
502	control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
503	nr_irqs = (control >> 2) & 0x3ff;
504
505	for (i = 0; i < nr_irqs; i++)
506		writel(i, main_int_base + ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
507
508	armada_370_xp_mpic_domain =
509		irq_domain_add_linear(node, nr_irqs,
510				&armada_370_xp_mpic_irq_ops, NULL);
511
512	BUG_ON(!armada_370_xp_mpic_domain);
513
514#ifdef CONFIG_SMP
515	armada_xp_mpic_smp_cpu_init();
516#endif
517
518	armada_370_xp_msi_init(node, main_int_res.start);
519
520	parent_irq = irq_of_parse_and_map(node, 0);
521	if (parent_irq <= 0) {
522		irq_set_default_host(armada_370_xp_mpic_domain);
523		set_handle_irq(armada_370_xp_handle_irq);
524#ifdef CONFIG_SMP
525		set_smp_cross_call(armada_mpic_send_doorbell);
526		register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier);
527#endif
528	} else {
529		irq_set_chained_handler(parent_irq,
530					armada_370_xp_mpic_handle_cascade_irq);
531	}
532
533	return 0;
534}
535
536IRQCHIP_DECLARE(armada_370_xp_mpic, "marvell,mpic", armada_370_xp_mpic_of_init);
537