1/*
2 *  Support for C64x+ Megamodule Interrupt Controller
3 *
4 *  Copyright (C) 2010, 2011 Texas Instruments Incorporated
5 *  Contributed by: Mark Salter <msalter@redhat.com>
6 *
7 *  This program is free software; you can redistribute it and/or modify
8 *  it under the terms of the GNU General Public License version 2 as
9 *  published by the Free Software Foundation.
10 */
11#include <linux/module.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/of.h>
15#include <linux/of_irq.h>
16#include <linux/of_address.h>
17#include <linux/slab.h>
18#include <asm/soc.h>
19#include <asm/megamod-pic.h>
20
21#define NR_COMBINERS	4
22#define NR_MUX_OUTPUTS  12
23
24#define IRQ_UNMAPPED 0xffff
25
26/*
27 * Megamodule Interrupt Controller register layout
28 */
29struct megamod_regs {
30	u32	evtflag[8];
31	u32	evtset[8];
32	u32	evtclr[8];
33	u32	reserved0[8];
34	u32	evtmask[8];
35	u32	mevtflag[8];
36	u32	expmask[8];
37	u32	mexpflag[8];
38	u32	intmux_unused;
39	u32	intmux[7];
40	u32	reserved1[8];
41	u32	aegmux[2];
42	u32	reserved2[14];
43	u32	intxstat;
44	u32	intxclr;
45	u32	intdmask;
46	u32	reserved3[13];
47	u32	evtasrt;
48};
49
50struct megamod_pic {
51	struct irq_domain *irqhost;
52	struct megamod_regs __iomem *regs;
53	raw_spinlock_t lock;
54
55	/* hw mux mapping */
56	unsigned int output_to_irq[NR_MUX_OUTPUTS];
57};
58
59static struct megamod_pic *mm_pic;
60
61struct megamod_cascade_data {
62	struct megamod_pic *pic;
63	int index;
64};
65
66static struct megamod_cascade_data cascade_data[NR_COMBINERS];
67
68static void mask_megamod(struct irq_data *data)
69{
70	struct megamod_pic *pic = irq_data_get_irq_chip_data(data);
71	irq_hw_number_t src = irqd_to_hwirq(data);
72	u32 __iomem *evtmask = &pic->regs->evtmask[src / 32];
73
74	raw_spin_lock(&pic->lock);
75	soc_writel(soc_readl(evtmask) | (1 << (src & 31)), evtmask);
76	raw_spin_unlock(&pic->lock);
77}
78
79static void unmask_megamod(struct irq_data *data)
80{
81	struct megamod_pic *pic = irq_data_get_irq_chip_data(data);
82	irq_hw_number_t src = irqd_to_hwirq(data);
83	u32 __iomem *evtmask = &pic->regs->evtmask[src / 32];
84
85	raw_spin_lock(&pic->lock);
86	soc_writel(soc_readl(evtmask) & ~(1 << (src & 31)), evtmask);
87	raw_spin_unlock(&pic->lock);
88}
89
90static struct irq_chip megamod_chip = {
91	.name		= "megamod",
92	.irq_mask	= mask_megamod,
93	.irq_unmask	= unmask_megamod,
94};
95
96static void megamod_irq_cascade(unsigned int irq, struct irq_desc *desc)
97{
98	struct megamod_cascade_data *cascade;
99	struct megamod_pic *pic;
100	u32 events;
101	int n, idx;
102
103	cascade = irq_desc_get_handler_data(desc);
104
105	pic = cascade->pic;
106	idx = cascade->index;
107
108	while ((events = soc_readl(&pic->regs->mevtflag[idx])) != 0) {
109		n = __ffs(events);
110
111		irq = irq_linear_revmap(pic->irqhost, idx * 32 + n);
112
113		soc_writel(1 << n, &pic->regs->evtclr[idx]);
114
115		generic_handle_irq(irq);
116	}
117}
118
119static int megamod_map(struct irq_domain *h, unsigned int virq,
120		       irq_hw_number_t hw)
121{
122	struct megamod_pic *pic = h->host_data;
123	int i;
124
125	/* We shouldn't see a hwirq which is muxed to core controller */
126	for (i = 0; i < NR_MUX_OUTPUTS; i++)
127		if (pic->output_to_irq[i] == hw)
128			return -1;
129
130	irq_set_chip_data(virq, pic);
131	irq_set_chip_and_handler(virq, &megamod_chip, handle_level_irq);
132
133	/* Set default irq type */
134	irq_set_irq_type(virq, IRQ_TYPE_NONE);
135
136	return 0;
137}
138
139static const struct irq_domain_ops megamod_domain_ops = {
140	.map	= megamod_map,
141	.xlate	= irq_domain_xlate_onecell,
142};
143
144static void __init set_megamod_mux(struct megamod_pic *pic, int src, int output)
145{
146	int index, offset;
147	u32 val;
148
149	if (src < 0 || src >= (NR_COMBINERS * 32)) {
150		pic->output_to_irq[output] = IRQ_UNMAPPED;
151		return;
152	}
153
154	/* four mappings per mux register */
155	index = output / 4;
156	offset = (output & 3) * 8;
157
158	val = soc_readl(&pic->regs->intmux[index]);
159	val &= ~(0xff << offset);
160	val |= src << offset;
161	soc_writel(val, &pic->regs->intmux[index]);
162}
163
164/*
165 * Parse the MUX mapping, if one exists.
166 *
167 * The MUX map is an array of up to 12 cells; one for each usable core priority
168 * interrupt. The value of a given cell is the megamodule interrupt source
169 * which is to me MUXed to the output corresponding to the cell position
170 * withing the array. The first cell in the array corresponds to priority
171 * 4 and the last (12th) cell corresponds to priority 15. The allowed
172 * values are 4 - ((NR_COMBINERS * 32) - 1). Note that the combined interrupt
173 * sources (0 - 3) are not allowed to be mapped through this property. They
174 * are handled through the "interrupts" property. This allows us to use a
175 * value of zero as a "do not map" placeholder.
176 */
177static void __init parse_priority_map(struct megamod_pic *pic,
178				      int *mapping, int size)
179{
180	struct device_node *np = pic->irqhost->of_node;
181	const __be32 *map;
182	int i, maplen;
183	u32 val;
184
185	map = of_get_property(np, "ti,c64x+megamod-pic-mux", &maplen);
186	if (map) {
187		maplen /= 4;
188		if (maplen > size)
189			maplen = size;
190
191		for (i = 0; i < maplen; i++) {
192			val = be32_to_cpup(map);
193			if (val && val >= 4)
194				mapping[i] = val;
195			++map;
196		}
197	}
198}
199
200static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
201{
202	struct megamod_pic *pic;
203	int i, irq;
204	int mapping[NR_MUX_OUTPUTS];
205
206	pr_info("Initializing C64x+ Megamodule PIC\n");
207
208	pic = kzalloc(sizeof(struct megamod_pic), GFP_KERNEL);
209	if (!pic) {
210		pr_err("%s: Could not alloc PIC structure.\n", np->full_name);
211		return NULL;
212	}
213
214	pic->irqhost = irq_domain_add_linear(np, NR_COMBINERS * 32,
215					     &megamod_domain_ops, pic);
216	if (!pic->irqhost) {
217		pr_err("%s: Could not alloc host.\n", np->full_name);
218		goto error_free;
219	}
220
221	pic->irqhost->host_data = pic;
222
223	raw_spin_lock_init(&pic->lock);
224
225	pic->regs = of_iomap(np, 0);
226	if (!pic->regs) {
227		pr_err("%s: Could not map registers.\n", np->full_name);
228		goto error_free;
229	}
230
231	/* Initialize MUX map */
232	for (i = 0; i < ARRAY_SIZE(mapping); i++)
233		mapping[i] = IRQ_UNMAPPED;
234
235	parse_priority_map(pic, mapping, ARRAY_SIZE(mapping));
236
237	/*
238	 * We can have up to 12 interrupts cascading to the core controller.
239	 * These cascades can be from the combined interrupt sources or for
240	 * individual interrupt sources. The "interrupts" property only
241	 * deals with the cascaded combined interrupts. The individual
242	 * interrupts muxed to the core controller use the core controller
243	 * as their interrupt parent.
244	 */
245	for (i = 0; i < NR_COMBINERS; i++) {
246		struct irq_data *irq_data;
247		irq_hw_number_t hwirq;
248
249		irq = irq_of_parse_and_map(np, i);
250		if (irq == NO_IRQ)
251			continue;
252
253		irq_data = irq_get_irq_data(irq);
254		if (!irq_data) {
255			pr_err("%s: combiner-%d no irq_data for virq %d!\n",
256			       np->full_name, i, irq);
257			continue;
258		}
259
260		hwirq = irq_data->hwirq;
261
262		/*
263		 * Check that device tree provided something in the range
264		 * of the core priority interrupts (4 - 15).
265		 */
266		if (hwirq < 4 || hwirq >= NR_PRIORITY_IRQS) {
267			pr_err("%s: combiner-%d core irq %ld out of range!\n",
268			       np->full_name, i, hwirq);
269			continue;
270		}
271
272		/* record the mapping */
273		mapping[hwirq - 4] = i;
274
275		pr_debug("%s: combiner-%d cascading to hwirq %ld\n",
276			 np->full_name, i, hwirq);
277
278		cascade_data[i].pic = pic;
279		cascade_data[i].index = i;
280
281		/* mask and clear all events in combiner */
282		soc_writel(~0, &pic->regs->evtmask[i]);
283		soc_writel(~0, &pic->regs->evtclr[i]);
284
285		irq_set_handler_data(irq, &cascade_data[i]);
286		irq_set_chained_handler(irq, megamod_irq_cascade);
287	}
288
289	/* Finally, set up the MUX registers */
290	for (i = 0; i < NR_MUX_OUTPUTS; i++) {
291		if (mapping[i] != IRQ_UNMAPPED) {
292			pr_debug("%s: setting mux %d to priority %d\n",
293				 np->full_name, mapping[i], i + 4);
294			set_megamod_mux(pic, mapping[i], i);
295		}
296	}
297
298	return pic;
299
300error_free:
301	kfree(pic);
302
303	return NULL;
304}
305
306/*
307 * Return next active event after ACK'ing it.
308 * Return -1 if no events active.
309 */
310static int get_exception(void)
311{
312	int i, bit;
313	u32 mask;
314
315	for (i = 0; i < NR_COMBINERS; i++) {
316		mask = soc_readl(&mm_pic->regs->mexpflag[i]);
317		if (mask) {
318			bit = __ffs(mask);
319			soc_writel(1 << bit, &mm_pic->regs->evtclr[i]);
320			return (i * 32) + bit;
321		}
322	}
323	return -1;
324}
325
326static void assert_event(unsigned int val)
327{
328	soc_writel(val, &mm_pic->regs->evtasrt);
329}
330
331void __init megamod_pic_init(void)
332{
333	struct device_node *np;
334
335	np = of_find_compatible_node(NULL, NULL, "ti,c64x+megamod-pic");
336	if (!np)
337		return;
338
339	mm_pic = init_megamod_pic(np);
340	of_node_put(np);
341
342	soc_ops.get_exception = get_exception;
343	soc_ops.assert_event = assert_event;
344
345	return;
346}
347