1/*
2 * OMAP2+ common Power & Reset Management (PRM) IP block functions
3 *
4 * Copyright (C) 2011 Texas Instruments, Inc.
5 * Tero Kristo <t-kristo@ti.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 *
12 * For historical purposes, the API used to configure the PRM
13 * interrupt handler refers to it as the "PRCM interrupt."  The
14 * underlying registers are located in the PRM on OMAP3/4.
15 *
16 * XXX This code should eventually be moved to a PRM driver.
17 */
18
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/io.h>
23#include <linux/irq.h>
24#include <linux/interrupt.h>
25#include <linux/slab.h>
26#include <linux/of.h>
27#include <linux/of_address.h>
28#include <linux/clk-provider.h>
29#include <linux/clk/ti.h>
30
31#include "soc.h"
32#include "prm2xxx_3xxx.h"
33#include "prm2xxx.h"
34#include "prm3xxx.h"
35#include "prm44xx.h"
36#include "common.h"
37#include "clock.h"
38
39/*
40 * OMAP_PRCM_MAX_NR_PENDING_REG: maximum number of PRM_IRQ*_MPU regs
41 * XXX this is technically not needed, since
42 * omap_prcm_register_chain_handler() could allocate this based on the
43 * actual amount of memory needed for the SoC
44 */
45#define OMAP_PRCM_MAX_NR_PENDING_REG		2
46
47/*
48 * prcm_irq_chips: an array of all of the "generic IRQ chips" in use
49 * by the PRCM interrupt handler code.  There will be one 'chip' per
50 * PRM_{IRQSTATUS,IRQENABLE}_MPU register pair.  (So OMAP3 will have
51 * one "chip" and OMAP4 will have two.)
52 */
53static struct irq_chip_generic **prcm_irq_chips;
54
55/*
56 * prcm_irq_setup: the PRCM IRQ parameters for the hardware the code
57 * is currently running on.  Defined and passed by initialization code
58 * that calls omap_prcm_register_chain_handler().
59 */
60static struct omap_prcm_irq_setup *prcm_irq_setup;
61
62/* prm_base: base virtual address of the PRM IP block */
63void __iomem *prm_base;
64
65u16 prm_features;
66
67/*
68 * prm_ll_data: function pointers to SoC-specific implementations of
69 * common PRM functions
70 */
71static struct prm_ll_data null_prm_ll_data;
72static struct prm_ll_data *prm_ll_data = &null_prm_ll_data;
73
74/* Private functions */
75
76/*
77 * Move priority events from events to priority_events array
78 */
79static void omap_prcm_events_filter_priority(unsigned long *events,
80	unsigned long *priority_events)
81{
82	int i;
83
84	for (i = 0; i < prcm_irq_setup->nr_regs; i++) {
85		priority_events[i] =
86			events[i] & prcm_irq_setup->priority_mask[i];
87		events[i] ^= priority_events[i];
88	}
89}
90
91/*
92 * PRCM Interrupt Handler
93 *
94 * This is a common handler for the OMAP PRCM interrupts. Pending
95 * interrupts are detected by a call to prcm_pending_events and
96 * dispatched accordingly. Clearing of the wakeup events should be
97 * done by the SoC specific individual handlers.
98 */
99static void omap_prcm_irq_handler(unsigned int irq, struct irq_desc *desc)
100{
101	unsigned long pending[OMAP_PRCM_MAX_NR_PENDING_REG];
102	unsigned long priority_pending[OMAP_PRCM_MAX_NR_PENDING_REG];
103	struct irq_chip *chip = irq_desc_get_chip(desc);
104	unsigned int virtirq;
105	int nr_irq = prcm_irq_setup->nr_regs * 32;
106
107	/*
108	 * If we are suspended, mask all interrupts from PRCM level,
109	 * this does not ack them, and they will be pending until we
110	 * re-enable the interrupts, at which point the
111	 * omap_prcm_irq_handler will be executed again.  The
112	 * _save_and_clear_irqen() function must ensure that the PRM
113	 * write to disable all IRQs has reached the PRM before
114	 * returning, or spurious PRCM interrupts may occur during
115	 * suspend.
116	 */
117	if (prcm_irq_setup->suspended) {
118		prcm_irq_setup->save_and_clear_irqen(prcm_irq_setup->saved_mask);
119		prcm_irq_setup->suspend_save_flag = true;
120	}
121
122	/*
123	 * Loop until all pending irqs are handled, since
124	 * generic_handle_irq() can cause new irqs to come
125	 */
126	while (!prcm_irq_setup->suspended) {
127		prcm_irq_setup->read_pending_irqs(pending);
128
129		/* No bit set, then all IRQs are handled */
130		if (find_first_bit(pending, nr_irq) >= nr_irq)
131			break;
132
133		omap_prcm_events_filter_priority(pending, priority_pending);
134
135		/*
136		 * Loop on all currently pending irqs so that new irqs
137		 * cannot starve previously pending irqs
138		 */
139
140		/* Serve priority events first */
141		for_each_set_bit(virtirq, priority_pending, nr_irq)
142			generic_handle_irq(prcm_irq_setup->base_irq + virtirq);
143
144		/* Serve normal events next */
145		for_each_set_bit(virtirq, pending, nr_irq)
146			generic_handle_irq(prcm_irq_setup->base_irq + virtirq);
147	}
148	if (chip->irq_ack)
149		chip->irq_ack(&desc->irq_data);
150	if (chip->irq_eoi)
151		chip->irq_eoi(&desc->irq_data);
152	chip->irq_unmask(&desc->irq_data);
153
154	prcm_irq_setup->ocp_barrier(); /* avoid spurious IRQs */
155}
156
157/* Public functions */
158
159/**
160 * omap_prcm_event_to_irq - given a PRCM event name, returns the
161 * corresponding IRQ on which the handler should be registered
162 * @name: name of the PRCM interrupt bit to look up - see struct omap_prcm_irq
163 *
164 * Returns the Linux internal IRQ ID corresponding to @name upon success,
165 * or -ENOENT upon failure.
166 */
167int omap_prcm_event_to_irq(const char *name)
168{
169	int i;
170
171	if (!prcm_irq_setup || !name)
172		return -ENOENT;
173
174	for (i = 0; i < prcm_irq_setup->nr_irqs; i++)
175		if (!strcmp(prcm_irq_setup->irqs[i].name, name))
176			return prcm_irq_setup->base_irq +
177				prcm_irq_setup->irqs[i].offset;
178
179	return -ENOENT;
180}
181
182/**
183 * omap_prcm_irq_cleanup - reverses memory allocated and other steps
184 * done by omap_prcm_register_chain_handler()
185 *
186 * No return value.
187 */
188void omap_prcm_irq_cleanup(void)
189{
190	int i;
191
192	if (!prcm_irq_setup) {
193		pr_err("PRCM: IRQ handler not initialized; cannot cleanup\n");
194		return;
195	}
196
197	if (prcm_irq_chips) {
198		for (i = 0; i < prcm_irq_setup->nr_regs; i++) {
199			if (prcm_irq_chips[i])
200				irq_remove_generic_chip(prcm_irq_chips[i],
201					0xffffffff, 0, 0);
202			prcm_irq_chips[i] = NULL;
203		}
204		kfree(prcm_irq_chips);
205		prcm_irq_chips = NULL;
206	}
207
208	kfree(prcm_irq_setup->saved_mask);
209	prcm_irq_setup->saved_mask = NULL;
210
211	kfree(prcm_irq_setup->priority_mask);
212	prcm_irq_setup->priority_mask = NULL;
213
214	irq_set_chained_handler(prcm_irq_setup->irq, NULL);
215
216	if (prcm_irq_setup->base_irq > 0)
217		irq_free_descs(prcm_irq_setup->base_irq,
218			prcm_irq_setup->nr_regs * 32);
219	prcm_irq_setup->base_irq = 0;
220}
221
222void omap_prcm_irq_prepare(void)
223{
224	prcm_irq_setup->suspended = true;
225}
226
227void omap_prcm_irq_complete(void)
228{
229	prcm_irq_setup->suspended = false;
230
231	/* If we have not saved the masks, do not attempt to restore */
232	if (!prcm_irq_setup->suspend_save_flag)
233		return;
234
235	prcm_irq_setup->suspend_save_flag = false;
236
237	/*
238	 * Re-enable all masked PRCM irq sources, this causes the PRCM
239	 * interrupt to fire immediately if the events were masked
240	 * previously in the chain handler
241	 */
242	prcm_irq_setup->restore_irqen(prcm_irq_setup->saved_mask);
243}
244
245/**
246 * omap_prcm_register_chain_handler - initializes the prcm chained interrupt
247 * handler based on provided parameters
248 * @irq_setup: hardware data about the underlying PRM/PRCM
249 *
250 * Set up the PRCM chained interrupt handler on the PRCM IRQ.  Sets up
251 * one generic IRQ chip per PRM interrupt status/enable register pair.
252 * Returns 0 upon success, -EINVAL if called twice or if invalid
253 * arguments are passed, or -ENOMEM on any other error.
254 */
255int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup)
256{
257	int nr_regs;
258	u32 mask[OMAP_PRCM_MAX_NR_PENDING_REG];
259	int offset, i;
260	struct irq_chip_generic *gc;
261	struct irq_chip_type *ct;
262
263	if (!irq_setup)
264		return -EINVAL;
265
266	nr_regs = irq_setup->nr_regs;
267
268	if (prcm_irq_setup) {
269		pr_err("PRCM: already initialized; won't reinitialize\n");
270		return -EINVAL;
271	}
272
273	if (nr_regs > OMAP_PRCM_MAX_NR_PENDING_REG) {
274		pr_err("PRCM: nr_regs too large\n");
275		return -EINVAL;
276	}
277
278	prcm_irq_setup = irq_setup;
279
280	prcm_irq_chips = kzalloc(sizeof(void *) * nr_regs, GFP_KERNEL);
281	prcm_irq_setup->saved_mask = kzalloc(sizeof(u32) * nr_regs, GFP_KERNEL);
282	prcm_irq_setup->priority_mask = kzalloc(sizeof(u32) * nr_regs,
283		GFP_KERNEL);
284
285	if (!prcm_irq_chips || !prcm_irq_setup->saved_mask ||
286	    !prcm_irq_setup->priority_mask) {
287		pr_err("PRCM: kzalloc failed\n");
288		goto err;
289	}
290
291	memset(mask, 0, sizeof(mask));
292
293	for (i = 0; i < irq_setup->nr_irqs; i++) {
294		offset = irq_setup->irqs[i].offset;
295		mask[offset >> 5] |= 1 << (offset & 0x1f);
296		if (irq_setup->irqs[i].priority)
297			irq_setup->priority_mask[offset >> 5] |=
298				1 << (offset & 0x1f);
299	}
300
301	irq_set_chained_handler(irq_setup->irq, omap_prcm_irq_handler);
302
303	irq_setup->base_irq = irq_alloc_descs(-1, 0, irq_setup->nr_regs * 32,
304		0);
305
306	if (irq_setup->base_irq < 0) {
307		pr_err("PRCM: failed to allocate irq descs: %d\n",
308			irq_setup->base_irq);
309		goto err;
310	}
311
312	for (i = 0; i < irq_setup->nr_regs; i++) {
313		gc = irq_alloc_generic_chip("PRCM", 1,
314			irq_setup->base_irq + i * 32, prm_base,
315			handle_level_irq);
316
317		if (!gc) {
318			pr_err("PRCM: failed to allocate generic chip\n");
319			goto err;
320		}
321		ct = gc->chip_types;
322		ct->chip.irq_ack = irq_gc_ack_set_bit;
323		ct->chip.irq_mask = irq_gc_mask_clr_bit;
324		ct->chip.irq_unmask = irq_gc_mask_set_bit;
325
326		ct->regs.ack = irq_setup->ack + i * 4;
327		ct->regs.mask = irq_setup->mask + i * 4;
328
329		irq_setup_generic_chip(gc, mask[i], 0, IRQ_NOREQUEST, 0);
330		prcm_irq_chips[i] = gc;
331	}
332
333	if (of_have_populated_dt()) {
334		int irq = omap_prcm_event_to_irq("io");
335		omap_pcs_legacy_init(irq, irq_setup->reconfigure_io_chain);
336	}
337
338	return 0;
339
340err:
341	omap_prcm_irq_cleanup();
342	return -ENOMEM;
343}
344
345/**
346 * omap2_set_globals_prm - set the PRM base address (for early use)
347 * @prm: PRM base virtual address
348 *
349 * XXX Will be replaced when the PRM/CM drivers are completed.
350 */
351void __init omap2_set_globals_prm(void __iomem *prm)
352{
353	prm_base = prm;
354}
355
356/**
357 * prm_read_reset_sources - return the sources of the SoC's last reset
358 *
359 * Return a u32 bitmask representing the reset sources that caused the
360 * SoC to reset.  The low-level per-SoC functions called by this
361 * function remap the SoC-specific reset source bits into an
362 * OMAP-common set of reset source bits, defined in
363 * arch/arm/mach-omap2/prm.h.  Returns the standardized reset source
364 * u32 bitmask from the hardware upon success, or returns (1 <<
365 * OMAP_UNKNOWN_RST_SRC_ID_SHIFT) if no low-level read_reset_sources()
366 * function was registered.
367 */
368u32 prm_read_reset_sources(void)
369{
370	u32 ret = 1 << OMAP_UNKNOWN_RST_SRC_ID_SHIFT;
371
372	if (prm_ll_data->read_reset_sources)
373		ret = prm_ll_data->read_reset_sources();
374	else
375		WARN_ONCE(1, "prm: %s: no mapping function defined for reset sources\n", __func__);
376
377	return ret;
378}
379
380/**
381 * prm_was_any_context_lost_old - was device context lost? (old API)
382 * @part: PRM partition ID (e.g., OMAP4430_PRM_PARTITION)
383 * @inst: PRM instance offset (e.g., OMAP4430_PRM_MPU_INST)
384 * @idx: CONTEXT register offset
385 *
386 * Return 1 if any bits were set in the *_CONTEXT_* register
387 * identified by (@part, @inst, @idx), which means that some context
388 * was lost for that module; otherwise, return 0.  XXX Deprecated;
389 * callers need to use a less-SoC-dependent way to identify hardware
390 * IP blocks.
391 */
392bool prm_was_any_context_lost_old(u8 part, s16 inst, u16 idx)
393{
394	bool ret = true;
395
396	if (prm_ll_data->was_any_context_lost_old)
397		ret = prm_ll_data->was_any_context_lost_old(part, inst, idx);
398	else
399		WARN_ONCE(1, "prm: %s: no mapping function defined\n",
400			  __func__);
401
402	return ret;
403}
404
405/**
406 * prm_clear_context_lost_flags_old - clear context loss flags (old API)
407 * @part: PRM partition ID (e.g., OMAP4430_PRM_PARTITION)
408 * @inst: PRM instance offset (e.g., OMAP4430_PRM_MPU_INST)
409 * @idx: CONTEXT register offset
410 *
411 * Clear hardware context loss bits for the module identified by
412 * (@part, @inst, @idx).  No return value.  XXX Deprecated; callers
413 * need to use a less-SoC-dependent way to identify hardware IP
414 * blocks.
415 */
416void prm_clear_context_loss_flags_old(u8 part, s16 inst, u16 idx)
417{
418	if (prm_ll_data->clear_context_loss_flags_old)
419		prm_ll_data->clear_context_loss_flags_old(part, inst, idx);
420	else
421		WARN_ONCE(1, "prm: %s: no mapping function defined\n",
422			  __func__);
423}
424
425/**
426 * prm_register - register per-SoC low-level data with the PRM
427 * @pld: low-level per-SoC OMAP PRM data & function pointers to register
428 *
429 * Register per-SoC low-level OMAP PRM data and function pointers with
430 * the OMAP PRM common interface.  The caller must keep the data
431 * pointed to by @pld valid until it calls prm_unregister() and
432 * it returns successfully.  Returns 0 upon success, -EINVAL if @pld
433 * is NULL, or -EEXIST if prm_register() has already been called
434 * without an intervening prm_unregister().
435 */
436int prm_register(struct prm_ll_data *pld)
437{
438	if (!pld)
439		return -EINVAL;
440
441	if (prm_ll_data != &null_prm_ll_data)
442		return -EEXIST;
443
444	prm_ll_data = pld;
445
446	return 0;
447}
448
449/**
450 * prm_unregister - unregister per-SoC low-level data & function pointers
451 * @pld: low-level per-SoC OMAP PRM data & function pointers to unregister
452 *
453 * Unregister per-SoC low-level OMAP PRM data and function pointers
454 * that were previously registered with prm_register().  The
455 * caller may not destroy any of the data pointed to by @pld until
456 * this function returns successfully.  Returns 0 upon success, or
457 * -EINVAL if @pld is NULL or if @pld does not match the struct
458 * prm_ll_data * previously registered by prm_register().
459 */
460int prm_unregister(struct prm_ll_data *pld)
461{
462	if (!pld || prm_ll_data != pld)
463		return -EINVAL;
464
465	prm_ll_data = &null_prm_ll_data;
466
467	return 0;
468}
469
470static const struct of_device_id omap_prcm_dt_match_table[] = {
471	{ .compatible = "ti,am3-prcm" },
472	{ .compatible = "ti,am3-scrm" },
473	{ .compatible = "ti,am4-prcm" },
474	{ .compatible = "ti,am4-scrm" },
475	{ .compatible = "ti,omap2-prcm" },
476	{ .compatible = "ti,omap2-scrm" },
477	{ .compatible = "ti,omap3-prm" },
478	{ .compatible = "ti,omap3-cm" },
479	{ .compatible = "ti,omap3-scrm" },
480	{ .compatible = "ti,omap4-cm1" },
481	{ .compatible = "ti,omap4-prm" },
482	{ .compatible = "ti,omap4-cm2" },
483	{ .compatible = "ti,omap4-scrm" },
484	{ .compatible = "ti,omap5-prm" },
485	{ .compatible = "ti,omap5-cm-core-aon" },
486	{ .compatible = "ti,omap5-scrm" },
487	{ .compatible = "ti,omap5-cm-core" },
488	{ .compatible = "ti,dra7-prm" },
489	{ .compatible = "ti,dra7-cm-core-aon" },
490	{ .compatible = "ti,dra7-cm-core" },
491	{ }
492};
493
494static struct clk_hw_omap memmap_dummy_ck = {
495	.flags = MEMMAP_ADDRESSING,
496};
497
498static u32 prm_clk_readl(void __iomem *reg)
499{
500	return omap2_clk_readl(&memmap_dummy_ck, reg);
501}
502
503static void prm_clk_writel(u32 val, void __iomem *reg)
504{
505	omap2_clk_writel(val, &memmap_dummy_ck, reg);
506}
507
508static struct ti_clk_ll_ops omap_clk_ll_ops = {
509	.clk_readl = prm_clk_readl,
510	.clk_writel = prm_clk_writel,
511};
512
513int __init of_prcm_init(void)
514{
515	struct device_node *np;
516	void __iomem *mem;
517	int memmap_index = 0;
518
519	ti_clk_ll_ops = &omap_clk_ll_ops;
520
521	for_each_matching_node(np, omap_prcm_dt_match_table) {
522		mem = of_iomap(np, 0);
523		clk_memmaps[memmap_index] = mem;
524		ti_dt_clk_init_provider(np, memmap_index);
525		memmap_index++;
526	}
527
528	return 0;
529}
530
531static int __init prm_late_init(void)
532{
533	if (prm_ll_data->late_init)
534		return prm_ll_data->late_init();
535	return 0;
536}
537subsys_initcall(prm_late_init);
538