1/*
2 *  linux/arch/arm/plat-pxa/gpio.c
3 *
4 *  Generic PXA GPIO handling
5 *
6 *  Author:	Nicolas Pitre
7 *  Created:	Jun 15, 2001
8 *  Copyright:	MontaVista Software Inc.
9 *
10 *  This program is free software; you can redistribute it and/or modify
11 *  it under the terms of the GNU General Public License version 2 as
12 *  published by the Free Software Foundation.
13 */
14#include <linux/clk.h>
15#include <linux/err.h>
16#include <linux/gpio.h>
17#include <linux/gpio-pxa.h>
18#include <linux/init.h>
19#include <linux/irq.h>
20#include <linux/io.h>
21#include <linux/platform_device.h>
22#include <linux/syscore_ops.h>
23#include <linux/slab.h>
24
25/*
26 * We handle the GPIOs by banks, each bank covers up to 32 GPIOs with
27 * one set of registers. The register offsets are organized below:
28 *
29 *           GPLR    GPDR    GPSR    GPCR    GRER    GFER    GEDR
30 * BANK 0 - 0x0000  0x000C  0x0018  0x0024  0x0030  0x003C  0x0048
31 * BANK 1 - 0x0004  0x0010  0x001C  0x0028  0x0034  0x0040  0x004C
32 * BANK 2 - 0x0008  0x0014  0x0020  0x002C  0x0038  0x0044  0x0050
33 *
34 * BANK 3 - 0x0100  0x010C  0x0118  0x0124  0x0130  0x013C  0x0148
35 * BANK 4 - 0x0104  0x0110  0x011C  0x0128  0x0134  0x0140  0x014C
36 * BANK 5 - 0x0108  0x0114  0x0120  0x012C  0x0138  0x0144  0x0150
37 *
38 * NOTE:
39 *   BANK 3 is only available on PXA27x and later processors.
40 *   BANK 4 and 5 are only available on PXA935
41 */
42
43#define GPLR_OFFSET	0x00
44#define GPDR_OFFSET	0x0C
45#define GPSR_OFFSET	0x18
46#define GPCR_OFFSET	0x24
47#define GRER_OFFSET	0x30
48#define GFER_OFFSET	0x3C
49#define GEDR_OFFSET	0x48
50#define GAFR_OFFSET	0x54
51#define ED_MASK_OFFSET	0x9C	/* GPIO edge detection for AP side */
52
53#define BANK_OFF(n)	(((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
54
55int pxa_last_gpio;
56
57struct pxa_gpio_chip {
58	struct gpio_chip chip;
59	void __iomem	*regbase;
60	char label[10];
61
62	unsigned long	irq_mask;
63	unsigned long	irq_edge_rise;
64	unsigned long	irq_edge_fall;
65
66#ifdef CONFIG_PM
67	unsigned long	saved_gplr;
68	unsigned long	saved_gpdr;
69	unsigned long	saved_grer;
70	unsigned long	saved_gfer;
71#endif
72};
73
74enum {
75	PXA25X_GPIO = 0,
76	PXA26X_GPIO,
77	PXA27X_GPIO,
78	PXA3XX_GPIO,
79	PXA93X_GPIO,
80	MMP_GPIO = 0x10,
81	MMP2_GPIO,
82};
83
84static DEFINE_SPINLOCK(gpio_lock);
85static struct pxa_gpio_chip *pxa_gpio_chips;
86static int gpio_type;
87static void __iomem *gpio_reg_base;
88
89#define for_each_gpio_chip(i, c)			\
90	for (i = 0, c = &pxa_gpio_chips[0]; i <= pxa_last_gpio; i += 32, c++)
91
92static inline void __iomem *gpio_chip_base(struct gpio_chip *c)
93{
94	return container_of(c, struct pxa_gpio_chip, chip)->regbase;
95}
96
97static inline struct pxa_gpio_chip *gpio_to_pxachip(unsigned gpio)
98{
99	return &pxa_gpio_chips[gpio_to_bank(gpio)];
100}
101
102static inline int gpio_is_pxa_type(int type)
103{
104	return (type & MMP_GPIO) == 0;
105}
106
107static inline int gpio_is_mmp_type(int type)
108{
109	return (type & MMP_GPIO) != 0;
110}
111
112/* GPIO86/87/88/89 on PXA26x have their direction bits in PXA_GPDR(2 inverted,
113 * as well as their Alternate Function value being '1' for GPIO in GAFRx.
114 */
115static inline int __gpio_is_inverted(int gpio)
116{
117	if ((gpio_type == PXA26X_GPIO) && (gpio > 85))
118		return 1;
119	return 0;
120}
121
122/*
123 * On PXA25x and PXA27x, GAFRx and GPDRx together decide the alternate
124 * function of a GPIO, and GPDRx cannot be altered once configured. It
125 * is attributed as "occupied" here (I know this terminology isn't
126 * accurate, you are welcome to propose a better one :-)
127 */
128static inline int __gpio_is_occupied(unsigned gpio)
129{
130	struct pxa_gpio_chip *pxachip;
131	void __iomem *base;
132	unsigned long gafr = 0, gpdr = 0;
133	int ret, af = 0, dir = 0;
134
135	pxachip = gpio_to_pxachip(gpio);
136	base = gpio_chip_base(&pxachip->chip);
137	gpdr = readl_relaxed(base + GPDR_OFFSET);
138
139	switch (gpio_type) {
140	case PXA25X_GPIO:
141	case PXA26X_GPIO:
142	case PXA27X_GPIO:
143		gafr = readl_relaxed(base + GAFR_OFFSET);
144		af = (gafr >> ((gpio & 0xf) * 2)) & 0x3;
145		dir = gpdr & GPIO_bit(gpio);
146
147		if (__gpio_is_inverted(gpio))
148			ret = (af != 1) || (dir == 0);
149		else
150			ret = (af != 0) || (dir != 0);
151		break;
152	default:
153		ret = gpdr & GPIO_bit(gpio);
154		break;
155	}
156	return ret;
157}
158
159#ifdef CONFIG_ARCH_PXA
160static inline int __pxa_gpio_to_irq(int gpio)
161{
162	if (gpio_is_pxa_type(gpio_type))
163		return PXA_GPIO_TO_IRQ(gpio);
164	return -1;
165}
166
167static inline int __pxa_irq_to_gpio(int irq)
168{
169	if (gpio_is_pxa_type(gpio_type))
170		return irq - PXA_GPIO_TO_IRQ(0);
171	return -1;
172}
173#else
174static inline int __pxa_gpio_to_irq(int gpio) { return -1; }
175static inline int __pxa_irq_to_gpio(int irq) { return -1; }
176#endif
177
178#ifdef CONFIG_ARCH_MMP
179static inline int __mmp_gpio_to_irq(int gpio)
180{
181	if (gpio_is_mmp_type(gpio_type))
182		return MMP_GPIO_TO_IRQ(gpio);
183	return -1;
184}
185
186static inline int __mmp_irq_to_gpio(int irq)
187{
188	if (gpio_is_mmp_type(gpio_type))
189		return irq - MMP_GPIO_TO_IRQ(0);
190	return -1;
191}
192#else
193static inline int __mmp_gpio_to_irq(int gpio) { return -1; }
194static inline int __mmp_irq_to_gpio(int irq) { return -1; }
195#endif
196
197static int pxa_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
198{
199	int gpio, ret;
200
201	gpio = chip->base + offset;
202	ret = __pxa_gpio_to_irq(gpio);
203	if (ret >= 0)
204		return ret;
205	return __mmp_gpio_to_irq(gpio);
206}
207
208int pxa_irq_to_gpio(int irq)
209{
210	int ret;
211
212	ret = __pxa_irq_to_gpio(irq);
213	if (ret >= 0)
214		return ret;
215	return __mmp_irq_to_gpio(irq);
216}
217
218static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
219{
220	void __iomem *base = gpio_chip_base(chip);
221	uint32_t value, mask = 1 << offset;
222	unsigned long flags;
223
224	spin_lock_irqsave(&gpio_lock, flags);
225
226	value = readl_relaxed(base + GPDR_OFFSET);
227	if (__gpio_is_inverted(chip->base + offset))
228		value |= mask;
229	else
230		value &= ~mask;
231	writel_relaxed(value, base + GPDR_OFFSET);
232
233	spin_unlock_irqrestore(&gpio_lock, flags);
234	return 0;
235}
236
237static int pxa_gpio_direction_output(struct gpio_chip *chip,
238				     unsigned offset, int value)
239{
240	void __iomem *base = gpio_chip_base(chip);
241	uint32_t tmp, mask = 1 << offset;
242	unsigned long flags;
243
244	writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));
245
246	spin_lock_irqsave(&gpio_lock, flags);
247
248	tmp = readl_relaxed(base + GPDR_OFFSET);
249	if (__gpio_is_inverted(chip->base + offset))
250		tmp &= ~mask;
251	else
252		tmp |= mask;
253	writel_relaxed(tmp, base + GPDR_OFFSET);
254
255	spin_unlock_irqrestore(&gpio_lock, flags);
256	return 0;
257}
258
259static int pxa_gpio_get(struct gpio_chip *chip, unsigned offset)
260{
261	return readl_relaxed(gpio_chip_base(chip) + GPLR_OFFSET) & (1 << offset);
262}
263
264static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
265{
266	writel_relaxed(1 << offset, gpio_chip_base(chip) +
267				(value ? GPSR_OFFSET : GPCR_OFFSET));
268}
269
270static int __devinit pxa_init_gpio_chip(int gpio_end)
271{
272	int i, gpio, nbanks = gpio_to_bank(gpio_end) + 1;
273	struct pxa_gpio_chip *chips;
274
275	chips = kzalloc(nbanks * sizeof(struct pxa_gpio_chip), GFP_KERNEL);
276	if (chips == NULL) {
277		pr_err("%s: failed to allocate GPIO chips\n", __func__);
278		return -ENOMEM;
279	}
280
281	for (i = 0, gpio = 0; i < nbanks; i++, gpio += 32) {
282		struct gpio_chip *c = &chips[i].chip;
283
284		sprintf(chips[i].label, "gpio-%d", i);
285		chips[i].regbase = gpio_reg_base + BANK_OFF(i);
286
287		c->base  = gpio;
288		c->label = chips[i].label;
289
290		c->direction_input  = pxa_gpio_direction_input;
291		c->direction_output = pxa_gpio_direction_output;
292		c->get = pxa_gpio_get;
293		c->set = pxa_gpio_set;
294		c->to_irq = pxa_gpio_to_irq;
295
296		/* number of GPIOs on last bank may be less than 32 */
297		c->ngpio = (gpio + 31 > gpio_end) ? (gpio_end - gpio + 1) : 32;
298		gpiochip_add(c);
299	}
300	pxa_gpio_chips = chips;
301	return 0;
302}
303
304/* Update only those GRERx and GFERx edge detection register bits if those
305 * bits are set in c->irq_mask
306 */
307static inline void update_edge_detect(struct pxa_gpio_chip *c)
308{
309	uint32_t grer, gfer;
310
311	grer = readl_relaxed(c->regbase + GRER_OFFSET) & ~c->irq_mask;
312	gfer = readl_relaxed(c->regbase + GFER_OFFSET) & ~c->irq_mask;
313	grer |= c->irq_edge_rise & c->irq_mask;
314	gfer |= c->irq_edge_fall & c->irq_mask;
315	writel_relaxed(grer, c->regbase + GRER_OFFSET);
316	writel_relaxed(gfer, c->regbase + GFER_OFFSET);
317}
318
319static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type)
320{
321	struct pxa_gpio_chip *c;
322	int gpio = pxa_irq_to_gpio(d->irq);
323	unsigned long gpdr, mask = GPIO_bit(gpio);
324
325	c = gpio_to_pxachip(gpio);
326
327	if (type == IRQ_TYPE_PROBE) {
328		/* Don't mess with enabled GPIOs using preconfigured edges or
329		 * GPIOs set to alternate function or to output during probe
330		 */
331		if ((c->irq_edge_rise | c->irq_edge_fall) & GPIO_bit(gpio))
332			return 0;
333
334		if (__gpio_is_occupied(gpio))
335			return 0;
336
337		type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
338	}
339
340	gpdr = readl_relaxed(c->regbase + GPDR_OFFSET);
341
342	if (__gpio_is_inverted(gpio))
343		writel_relaxed(gpdr | mask,  c->regbase + GPDR_OFFSET);
344	else
345		writel_relaxed(gpdr & ~mask, c->regbase + GPDR_OFFSET);
346
347	if (type & IRQ_TYPE_EDGE_RISING)
348		c->irq_edge_rise |= mask;
349	else
350		c->irq_edge_rise &= ~mask;
351
352	if (type & IRQ_TYPE_EDGE_FALLING)
353		c->irq_edge_fall |= mask;
354	else
355		c->irq_edge_fall &= ~mask;
356
357	update_edge_detect(c);
358
359	pr_debug("%s: IRQ%d (GPIO%d) - edge%s%s\n", __func__, d->irq, gpio,
360		((type & IRQ_TYPE_EDGE_RISING)  ? " rising"  : ""),
361		((type & IRQ_TYPE_EDGE_FALLING) ? " falling" : ""));
362	return 0;
363}
364
365static void pxa_gpio_demux_handler(unsigned int irq, struct irq_desc *desc)
366{
367	struct pxa_gpio_chip *c;
368	int loop, gpio, gpio_base, n;
369	unsigned long gedr;
370
371	do {
372		loop = 0;
373		for_each_gpio_chip(gpio, c) {
374			gpio_base = c->chip.base;
375
376			gedr = readl_relaxed(c->regbase + GEDR_OFFSET);
377			gedr = gedr & c->irq_mask;
378			writel_relaxed(gedr, c->regbase + GEDR_OFFSET);
379
380			n = find_first_bit(&gedr, BITS_PER_LONG);
381			while (n < BITS_PER_LONG) {
382				loop = 1;
383
384				generic_handle_irq(gpio_to_irq(gpio_base + n));
385				n = find_next_bit(&gedr, BITS_PER_LONG, n + 1);
386			}
387		}
388	} while (loop);
389}
390
391static void pxa_ack_muxed_gpio(struct irq_data *d)
392{
393	int gpio = pxa_irq_to_gpio(d->irq);
394	struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
395
396	writel_relaxed(GPIO_bit(gpio), c->regbase + GEDR_OFFSET);
397}
398
399static void pxa_mask_muxed_gpio(struct irq_data *d)
400{
401	int gpio = pxa_irq_to_gpio(d->irq);
402	struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
403	uint32_t grer, gfer;
404
405	c->irq_mask &= ~GPIO_bit(gpio);
406
407	grer = readl_relaxed(c->regbase + GRER_OFFSET) & ~GPIO_bit(gpio);
408	gfer = readl_relaxed(c->regbase + GFER_OFFSET) & ~GPIO_bit(gpio);
409	writel_relaxed(grer, c->regbase + GRER_OFFSET);
410	writel_relaxed(gfer, c->regbase + GFER_OFFSET);
411}
412
413static void pxa_unmask_muxed_gpio(struct irq_data *d)
414{
415	int gpio = pxa_irq_to_gpio(d->irq);
416	struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
417
418	c->irq_mask |= GPIO_bit(gpio);
419	update_edge_detect(c);
420}
421
422static struct irq_chip pxa_muxed_gpio_chip = {
423	.name		= "GPIO",
424	.irq_ack	= pxa_ack_muxed_gpio,
425	.irq_mask	= pxa_mask_muxed_gpio,
426	.irq_unmask	= pxa_unmask_muxed_gpio,
427	.irq_set_type	= pxa_gpio_irq_type,
428};
429
430static int pxa_gpio_nums(void)
431{
432	int count = 0;
433
434#ifdef CONFIG_ARCH_PXA
435	if (cpu_is_pxa25x()) {
436#ifdef CONFIG_CPU_PXA26x
437		count = 89;
438		gpio_type = PXA26X_GPIO;
439#elif defined(CONFIG_PXA25x)
440		count = 84;
441		gpio_type = PXA26X_GPIO;
442#endif /* CONFIG_CPU_PXA26x */
443	} else if (cpu_is_pxa27x()) {
444		count = 120;
445		gpio_type = PXA27X_GPIO;
446	} else if (cpu_is_pxa93x() || cpu_is_pxa95x()) {
447		count = 191;
448		gpio_type = PXA93X_GPIO;
449	} else if (cpu_is_pxa3xx()) {
450		count = 127;
451		gpio_type = PXA3XX_GPIO;
452	}
453#endif /* CONFIG_ARCH_PXA */
454
455#ifdef CONFIG_ARCH_MMP
456	if (cpu_is_pxa168() || cpu_is_pxa910()) {
457		count = 127;
458		gpio_type = MMP_GPIO;
459	} else if (cpu_is_mmp2()) {
460		count = 191;
461		gpio_type = MMP2_GPIO;
462	}
463#endif /* CONFIG_ARCH_MMP */
464	return count;
465}
466
467static int __devinit pxa_gpio_probe(struct platform_device *pdev)
468{
469	struct pxa_gpio_chip *c;
470	struct resource *res;
471	struct clk *clk;
472	int gpio, irq, ret;
473	int irq0 = 0, irq1 = 0, irq_mux, gpio_offset = 0;
474
475	pxa_last_gpio = pxa_gpio_nums();
476	if (!pxa_last_gpio)
477		return -EINVAL;
478
479	irq0 = platform_get_irq_byname(pdev, "gpio0");
480	irq1 = platform_get_irq_byname(pdev, "gpio1");
481	irq_mux = platform_get_irq_byname(pdev, "gpio_mux");
482	if ((irq0 > 0 && irq1 <= 0) || (irq0 <= 0 && irq1 > 0)
483		|| (irq_mux <= 0))
484		return -EINVAL;
485	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
486	if (!res)
487		return -EINVAL;
488	gpio_reg_base = ioremap(res->start, resource_size(res));
489	if (!gpio_reg_base)
490		return -EINVAL;
491
492	if (irq0 > 0)
493		gpio_offset = 2;
494
495	clk = clk_get(&pdev->dev, NULL);
496	if (IS_ERR(clk)) {
497		dev_err(&pdev->dev, "Error %ld to get gpio clock\n",
498			PTR_ERR(clk));
499		iounmap(gpio_reg_base);
500		return PTR_ERR(clk);
501	}
502	ret = clk_prepare(clk);
503	if (ret) {
504		clk_put(clk);
505		iounmap(gpio_reg_base);
506		return ret;
507	}
508	ret = clk_enable(clk);
509	if (ret) {
510		clk_unprepare(clk);
511		clk_put(clk);
512		iounmap(gpio_reg_base);
513		return ret;
514	}
515
516	/* Initialize GPIO chips */
517	pxa_init_gpio_chip(pxa_last_gpio);
518
519	/* clear all GPIO edge detects */
520	for_each_gpio_chip(gpio, c) {
521		writel_relaxed(0, c->regbase + GFER_OFFSET);
522		writel_relaxed(0, c->regbase + GRER_OFFSET);
523		writel_relaxed(~0,c->regbase + GEDR_OFFSET);
524		/* unmask GPIO edge detect for AP side */
525		if (gpio_is_mmp_type(gpio_type))
526			writel_relaxed(~0, c->regbase + ED_MASK_OFFSET);
527	}
528
529#ifdef CONFIG_ARCH_PXA
530	irq = gpio_to_irq(0);
531	irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
532				 handle_edge_irq);
533	set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
534	irq_set_chained_handler(IRQ_GPIO0, pxa_gpio_demux_handler);
535
536	irq = gpio_to_irq(1);
537	irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
538				 handle_edge_irq);
539	set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
540	irq_set_chained_handler(IRQ_GPIO1, pxa_gpio_demux_handler);
541#endif
542
543	for (irq  = gpio_to_irq(gpio_offset);
544		irq <= gpio_to_irq(pxa_last_gpio); irq++) {
545		irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
546					 handle_edge_irq);
547		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
548	}
549
550	irq_set_chained_handler(irq_mux, pxa_gpio_demux_handler);
551	return 0;
552}
553
554static struct platform_driver pxa_gpio_driver = {
555	.probe		= pxa_gpio_probe,
556	.driver		= {
557		.name	= "pxa-gpio",
558	},
559};
560
561static int __init pxa_gpio_init(void)
562{
563	return platform_driver_register(&pxa_gpio_driver);
564}
565postcore_initcall(pxa_gpio_init);
566
567#ifdef CONFIG_PM
568static int pxa_gpio_suspend(void)
569{
570	struct pxa_gpio_chip *c;
571	int gpio;
572
573	for_each_gpio_chip(gpio, c) {
574		c->saved_gplr = readl_relaxed(c->regbase + GPLR_OFFSET);
575		c->saved_gpdr = readl_relaxed(c->regbase + GPDR_OFFSET);
576		c->saved_grer = readl_relaxed(c->regbase + GRER_OFFSET);
577		c->saved_gfer = readl_relaxed(c->regbase + GFER_OFFSET);
578
579		/* Clear GPIO transition detect bits */
580		writel_relaxed(0xffffffff, c->regbase + GEDR_OFFSET);
581	}
582	return 0;
583}
584
585static void pxa_gpio_resume(void)
586{
587	struct pxa_gpio_chip *c;
588	int gpio;
589
590	for_each_gpio_chip(gpio, c) {
591		/* restore level with set/clear */
592		writel_relaxed( c->saved_gplr, c->regbase + GPSR_OFFSET);
593		writel_relaxed(~c->saved_gplr, c->regbase + GPCR_OFFSET);
594
595		writel_relaxed(c->saved_grer, c->regbase + GRER_OFFSET);
596		writel_relaxed(c->saved_gfer, c->regbase + GFER_OFFSET);
597		writel_relaxed(c->saved_gpdr, c->regbase + GPDR_OFFSET);
598	}
599}
600#else
601#define pxa_gpio_suspend	NULL
602#define pxa_gpio_resume		NULL
603#endif
604
605struct syscore_ops pxa_gpio_syscore_ops = {
606	.suspend	= pxa_gpio_suspend,
607	.resume		= pxa_gpio_resume,
608};
609
610static int __init pxa_gpio_sysinit(void)
611{
612	register_syscore_ops(&pxa_gpio_syscore_ops);
613	return 0;
614}
615postcore_initcall(pxa_gpio_sysinit);
616