omap2.c revision 9ac4e613a88d7f6a7a9651d863e9c8f63b582718
1/*
2 *  linux/drivers/mtd/onenand/omap2.c
3 *
4 *  OneNAND driver for OMAP2 / OMAP3
5 *
6 *  Copyright © 2005-2006 Nokia Corporation
7 *
8 *  Author: Jarkko Lavinen <jarkko.lavinen@nokia.com> and Juha Yrjölä
9 *  IRQ and DMA support written by Timo Teras
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published by
13 * the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 * You should have received a copy of the GNU General Public License along with
21 * this program; see the file COPYING. If not, write to the Free Software
22 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 *
24 */
25
26#include <linux/device.h>
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/mtd/mtd.h>
30#include <linux/mtd/onenand.h>
31#include <linux/mtd/partitions.h>
32#include <linux/platform_device.h>
33#include <linux/interrupt.h>
34#include <linux/delay.h>
35#include <linux/dma-mapping.h>
36#include <linux/io.h>
37#include <linux/slab.h>
38#include <linux/regulator/consumer.h>
39
40#include <asm/mach/flash.h>
41#include <plat/gpmc.h>
42#include <plat/onenand.h>
43#include <mach/gpio.h>
44
45#include <plat/dma.h>
46
47#include <plat/board.h>
48
49#define DRIVER_NAME "omap2-onenand"
50
51#define ONENAND_IO_SIZE		SZ_128K
52#define ONENAND_BUFRAM_SIZE	(1024 * 5)
53
54struct omap2_onenand {
55	struct platform_device *pdev;
56	int gpmc_cs;
57	unsigned long phys_base;
58	int gpio_irq;
59	struct mtd_info mtd;
60	struct mtd_partition *parts;
61	struct onenand_chip onenand;
62	struct completion irq_done;
63	struct completion dma_done;
64	int dma_channel;
65	int freq;
66	int (*setup)(void __iomem *base, int freq);
67	struct regulator *regulator;
68};
69
70#ifdef CONFIG_MTD_PARTITIONS
71static const char *part_probes[] = { "cmdlinepart", NULL,  };
72#endif
73
74static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
75{
76	struct omap2_onenand *c = data;
77
78	complete(&c->dma_done);
79}
80
81static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
82{
83	struct omap2_onenand *c = dev_id;
84
85	complete(&c->irq_done);
86
87	return IRQ_HANDLED;
88}
89
90static inline unsigned short read_reg(struct omap2_onenand *c, int reg)
91{
92	return readw(c->onenand.base + reg);
93}
94
95static inline void write_reg(struct omap2_onenand *c, unsigned short value,
96			     int reg)
97{
98	writew(value, c->onenand.base + reg);
99}
100
101static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
102{
103	printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
104	       msg, state, ctrl, intr);
105}
106
107static void wait_warn(char *msg, int state, unsigned int ctrl,
108		      unsigned int intr)
109{
110	printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x "
111	       "intr 0x%04x\n", msg, state, ctrl, intr);
112}
113
114static int omap2_onenand_wait(struct mtd_info *mtd, int state)
115{
116	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
117	struct onenand_chip *this = mtd->priv;
118	unsigned int intr = 0;
119	unsigned int ctrl, ctrl_mask;
120	unsigned long timeout;
121	u32 syscfg;
122
123	if (state == FL_RESETING || state == FL_PREPARING_ERASE ||
124	    state == FL_VERIFYING_ERASE) {
125		int i = 21;
126		unsigned int intr_flags = ONENAND_INT_MASTER;
127
128		switch (state) {
129		case FL_RESETING:
130			intr_flags |= ONENAND_INT_RESET;
131			break;
132		case FL_PREPARING_ERASE:
133			intr_flags |= ONENAND_INT_ERASE;
134			break;
135		case FL_VERIFYING_ERASE:
136			i = 101;
137			break;
138		}
139
140		while (--i) {
141			udelay(1);
142			intr = read_reg(c, ONENAND_REG_INTERRUPT);
143			if (intr & ONENAND_INT_MASTER)
144				break;
145		}
146		ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
147		if (ctrl & ONENAND_CTRL_ERROR) {
148			wait_err("controller error", state, ctrl, intr);
149			return -EIO;
150		}
151		if ((intr & intr_flags) != intr_flags) {
152			wait_err("timeout", state, ctrl, intr);
153			return -EIO;
154		}
155		return 0;
156	}
157
158	if (state != FL_READING) {
159		int result;
160
161		/* Turn interrupts on */
162		syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
163		if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
164			syscfg |= ONENAND_SYS_CFG1_IOBE;
165			write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
166			if (cpu_is_omap34xx())
167				/* Add a delay to let GPIO settle */
168				syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
169		}
170
171		INIT_COMPLETION(c->irq_done);
172		if (c->gpio_irq) {
173			result = gpio_get_value(c->gpio_irq);
174			if (result == -1) {
175				ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
176				intr = read_reg(c, ONENAND_REG_INTERRUPT);
177				wait_err("gpio error", state, ctrl, intr);
178				return -EIO;
179			}
180		} else
181			result = 0;
182		if (result == 0) {
183			int retry_cnt = 0;
184retry:
185			result = wait_for_completion_timeout(&c->irq_done,
186						    msecs_to_jiffies(20));
187			if (result == 0) {
188				/* Timeout after 20ms */
189				ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
190				if (ctrl & ONENAND_CTRL_ONGO &&
191				    !this->ongoing) {
192					/*
193					 * The operation seems to be still going
194					 * so give it some more time.
195					 */
196					retry_cnt += 1;
197					if (retry_cnt < 3)
198						goto retry;
199					intr = read_reg(c,
200							ONENAND_REG_INTERRUPT);
201					wait_err("timeout", state, ctrl, intr);
202					return -EIO;
203				}
204				intr = read_reg(c, ONENAND_REG_INTERRUPT);
205				if ((intr & ONENAND_INT_MASTER) == 0)
206					wait_warn("timeout", state, ctrl, intr);
207			}
208		}
209	} else {
210		int retry_cnt = 0;
211
212		/* Turn interrupts off */
213		syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
214		syscfg &= ~ONENAND_SYS_CFG1_IOBE;
215		write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
216
217		timeout = jiffies + msecs_to_jiffies(20);
218		while (1) {
219			if (time_before(jiffies, timeout)) {
220				intr = read_reg(c, ONENAND_REG_INTERRUPT);
221				if (intr & ONENAND_INT_MASTER)
222					break;
223			} else {
224				/* Timeout after 20ms */
225				ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
226				if (ctrl & ONENAND_CTRL_ONGO) {
227					/*
228					 * The operation seems to be still going
229					 * so give it some more time.
230					 */
231					retry_cnt += 1;
232					if (retry_cnt < 3) {
233						timeout = jiffies +
234							  msecs_to_jiffies(20);
235						continue;
236					}
237				}
238				break;
239			}
240		}
241	}
242
243	intr = read_reg(c, ONENAND_REG_INTERRUPT);
244	ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
245
246	if (intr & ONENAND_INT_READ) {
247		int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);
248
249		if (ecc) {
250			unsigned int addr1, addr8;
251
252			addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
253			addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
254			if (ecc & ONENAND_ECC_2BIT_ALL) {
255				printk(KERN_ERR "onenand_wait: ECC error = "
256				       "0x%04x, addr1 %#x, addr8 %#x\n",
257				       ecc, addr1, addr8);
258				mtd->ecc_stats.failed++;
259				return -EBADMSG;
260			} else if (ecc & ONENAND_ECC_1BIT_ALL) {
261				printk(KERN_NOTICE "onenand_wait: correctable "
262				       "ECC error = 0x%04x, addr1 %#x, "
263				       "addr8 %#x\n", ecc, addr1, addr8);
264				mtd->ecc_stats.corrected++;
265			}
266		}
267	} else if (state == FL_READING) {
268		wait_err("timeout", state, ctrl, intr);
269		return -EIO;
270	}
271
272	if (ctrl & ONENAND_CTRL_ERROR) {
273		wait_err("controller error", state, ctrl, intr);
274		if (ctrl & ONENAND_CTRL_LOCK)
275			printk(KERN_ERR "onenand_wait: "
276					"Device is write protected!!!\n");
277		return -EIO;
278	}
279
280	ctrl_mask = 0xFE9F;
281	if (this->ongoing)
282		ctrl_mask &= ~0x8000;
283
284	if (ctrl & ctrl_mask)
285		wait_warn("unexpected controller status", state, ctrl, intr);
286
287	return 0;
288}
289
290static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
291{
292	struct onenand_chip *this = mtd->priv;
293
294	if (ONENAND_CURRENT_BUFFERRAM(this)) {
295		if (area == ONENAND_DATARAM)
296			return this->writesize;
297		if (area == ONENAND_SPARERAM)
298			return mtd->oobsize;
299	}
300
301	return 0;
302}
303
304#if defined(CONFIG_ARCH_OMAP3) || defined(MULTI_OMAP2)
305
306static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
307					unsigned char *buffer, int offset,
308					size_t count)
309{
310	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
311	struct onenand_chip *this = mtd->priv;
312	dma_addr_t dma_src, dma_dst;
313	int bram_offset;
314	unsigned long timeout;
315	void *buf = (void *)buffer;
316	size_t xtra;
317	volatile unsigned *done;
318
319	bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
320	if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
321		goto out_copy;
322
323	/* panic_write() may be in an interrupt context */
324	if (in_interrupt() || oops_in_progress)
325		goto out_copy;
326
327	if (buf >= high_memory) {
328		struct page *p1;
329
330		if (((size_t)buf & PAGE_MASK) !=
331		    ((size_t)(buf + count - 1) & PAGE_MASK))
332			goto out_copy;
333		p1 = vmalloc_to_page(buf);
334		if (!p1)
335			goto out_copy;
336		buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
337	}
338
339	xtra = count & 3;
340	if (xtra) {
341		count -= xtra;
342		memcpy(buf + count, this->base + bram_offset + count, xtra);
343	}
344
345	dma_src = c->phys_base + bram_offset;
346	dma_dst = dma_map_single(&c->pdev->dev, buf, count, DMA_FROM_DEVICE);
347	if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
348		dev_err(&c->pdev->dev,
349			"Couldn't DMA map a %d byte buffer\n",
350			count);
351		goto out_copy;
352	}
353
354	omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
355				     count >> 2, 1, 0, 0, 0);
356	omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
357				dma_src, 0, 0);
358	omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
359				 dma_dst, 0, 0);
360
361	INIT_COMPLETION(c->dma_done);
362	omap_start_dma(c->dma_channel);
363
364	timeout = jiffies + msecs_to_jiffies(20);
365	done = &c->dma_done.done;
366	while (time_before(jiffies, timeout))
367		if (*done)
368			break;
369
370	dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
371
372	if (!*done) {
373		dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
374		goto out_copy;
375	}
376
377	return 0;
378
379out_copy:
380	memcpy(buf, this->base + bram_offset, count);
381	return 0;
382}
383
384static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
385					 const unsigned char *buffer,
386					 int offset, size_t count)
387{
388	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
389	struct onenand_chip *this = mtd->priv;
390	dma_addr_t dma_src, dma_dst;
391	int bram_offset;
392	unsigned long timeout;
393	void *buf = (void *)buffer;
394	volatile unsigned *done;
395
396	bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
397	if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
398		goto out_copy;
399
400	/* panic_write() may be in an interrupt context */
401	if (in_interrupt() || oops_in_progress)
402		goto out_copy;
403
404	if (buf >= high_memory) {
405		struct page *p1;
406
407		if (((size_t)buf & PAGE_MASK) !=
408		    ((size_t)(buf + count - 1) & PAGE_MASK))
409			goto out_copy;
410		p1 = vmalloc_to_page(buf);
411		if (!p1)
412			goto out_copy;
413		buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
414	}
415
416	dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
417	dma_dst = c->phys_base + bram_offset;
418	if (dma_mapping_error(&c->pdev->dev, dma_src)) {
419		dev_err(&c->pdev->dev,
420			"Couldn't DMA map a %d byte buffer\n",
421			count);
422		return -1;
423	}
424
425	omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
426				     count >> 2, 1, 0, 0, 0);
427	omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
428				dma_src, 0, 0);
429	omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
430				 dma_dst, 0, 0);
431
432	INIT_COMPLETION(c->dma_done);
433	omap_start_dma(c->dma_channel);
434
435	timeout = jiffies + msecs_to_jiffies(20);
436	done = &c->dma_done.done;
437	while (time_before(jiffies, timeout))
438		if (*done)
439			break;
440
441	dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
442
443	if (!*done) {
444		dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
445		goto out_copy;
446	}
447
448	return 0;
449
450out_copy:
451	memcpy(this->base + bram_offset, buf, count);
452	return 0;
453}
454
455#else
456
457int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
458				 unsigned char *buffer, int offset,
459				 size_t count);
460
461int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
462				  const unsigned char *buffer,
463				  int offset, size_t count);
464
465#endif
466
467#if defined(CONFIG_ARCH_OMAP2) || defined(MULTI_OMAP2)
468
469static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
470					unsigned char *buffer, int offset,
471					size_t count)
472{
473	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
474	struct onenand_chip *this = mtd->priv;
475	dma_addr_t dma_src, dma_dst;
476	int bram_offset;
477
478	bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
479	/* DMA is not used.  Revisit PM requirements before enabling it. */
480	if (1 || (c->dma_channel < 0) ||
481	    ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
482	    (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
483		memcpy(buffer, (__force void *)(this->base + bram_offset),
484		       count);
485		return 0;
486	}
487
488	dma_src = c->phys_base + bram_offset;
489	dma_dst = dma_map_single(&c->pdev->dev, buffer, count,
490				 DMA_FROM_DEVICE);
491	if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
492		dev_err(&c->pdev->dev,
493			"Couldn't DMA map a %d byte buffer\n",
494			count);
495		return -1;
496	}
497
498	omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
499				     count / 4, 1, 0, 0, 0);
500	omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
501				dma_src, 0, 0);
502	omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
503				 dma_dst, 0, 0);
504
505	INIT_COMPLETION(c->dma_done);
506	omap_start_dma(c->dma_channel);
507	wait_for_completion(&c->dma_done);
508
509	dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
510
511	return 0;
512}
513
514static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
515					 const unsigned char *buffer,
516					 int offset, size_t count)
517{
518	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
519	struct onenand_chip *this = mtd->priv;
520	dma_addr_t dma_src, dma_dst;
521	int bram_offset;
522
523	bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
524	/* DMA is not used.  Revisit PM requirements before enabling it. */
525	if (1 || (c->dma_channel < 0) ||
526	    ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
527	    (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
528		memcpy((__force void *)(this->base + bram_offset), buffer,
529		       count);
530		return 0;
531	}
532
533	dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
534				 DMA_TO_DEVICE);
535	dma_dst = c->phys_base + bram_offset;
536	if (dma_mapping_error(&c->pdev->dev, dma_src)) {
537		dev_err(&c->pdev->dev,
538			"Couldn't DMA map a %d byte buffer\n",
539			count);
540		return -1;
541	}
542
543	omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S16,
544				     count / 2, 1, 0, 0, 0);
545	omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
546				dma_src, 0, 0);
547	omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
548				 dma_dst, 0, 0);
549
550	INIT_COMPLETION(c->dma_done);
551	omap_start_dma(c->dma_channel);
552	wait_for_completion(&c->dma_done);
553
554	dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
555
556	return 0;
557}
558
559#else
560
561int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
562				 unsigned char *buffer, int offset,
563				 size_t count);
564
565int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
566				  const unsigned char *buffer,
567				  int offset, size_t count);
568
569#endif
570
571static struct platform_driver omap2_onenand_driver;
572
573static int __adjust_timing(struct device *dev, void *data)
574{
575	int ret = 0;
576	struct omap2_onenand *c;
577
578	c = dev_get_drvdata(dev);
579
580	BUG_ON(c->setup == NULL);
581
582	/* DMA is not in use so this is all that is needed */
583	/* Revisit for OMAP3! */
584	ret = c->setup(c->onenand.base, c->freq);
585
586	return ret;
587}
588
589int omap2_onenand_rephase(void)
590{
591	return driver_for_each_device(&omap2_onenand_driver.driver, NULL,
592				      NULL, __adjust_timing);
593}
594
595static void omap2_onenand_shutdown(struct platform_device *pdev)
596{
597	struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
598
599	/* With certain content in the buffer RAM, the OMAP boot ROM code
600	 * can recognize the flash chip incorrectly. Zero it out before
601	 * soft reset.
602	 */
603	memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
604}
605
606static int omap2_onenand_enable(struct mtd_info *mtd)
607{
608	int ret;
609	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
610
611	ret = regulator_enable(c->regulator);
612	if (ret != 0)
613		dev_err(&c->pdev->dev, "cant enable regulator\n");
614
615	return ret;
616}
617
618static int omap2_onenand_disable(struct mtd_info *mtd)
619{
620	int ret;
621	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
622
623	ret = regulator_disable(c->regulator);
624	if (ret != 0)
625		dev_err(&c->pdev->dev, "cant disable regulator\n");
626
627	return ret;
628}
629
630static int __devinit omap2_onenand_probe(struct platform_device *pdev)
631{
632	struct omap_onenand_platform_data *pdata;
633	struct omap2_onenand *c;
634	int r;
635
636	pdata = pdev->dev.platform_data;
637	if (pdata == NULL) {
638		dev_err(&pdev->dev, "platform data missing\n");
639		return -ENODEV;
640	}
641
642	c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL);
643	if (!c)
644		return -ENOMEM;
645
646	init_completion(&c->irq_done);
647	init_completion(&c->dma_done);
648	c->gpmc_cs = pdata->cs;
649	c->gpio_irq = pdata->gpio_irq;
650	c->dma_channel = pdata->dma_channel;
651	if (c->dma_channel < 0) {
652		/* if -1, don't use DMA */
653		c->gpio_irq = 0;
654	}
655
656	r = gpmc_cs_request(c->gpmc_cs, ONENAND_IO_SIZE, &c->phys_base);
657	if (r < 0) {
658		dev_err(&pdev->dev, "Cannot request GPMC CS\n");
659		goto err_kfree;
660	}
661
662	if (request_mem_region(c->phys_base, ONENAND_IO_SIZE,
663			       pdev->dev.driver->name) == NULL) {
664		dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, "
665			"size: 0x%x\n",	c->phys_base, ONENAND_IO_SIZE);
666		r = -EBUSY;
667		goto err_free_cs;
668	}
669	c->onenand.base = ioremap(c->phys_base, ONENAND_IO_SIZE);
670	if (c->onenand.base == NULL) {
671		r = -ENOMEM;
672		goto err_release_mem_region;
673	}
674
675	if (pdata->onenand_setup != NULL) {
676		r = pdata->onenand_setup(c->onenand.base, c->freq);
677		if (r < 0) {
678			dev_err(&pdev->dev, "Onenand platform setup failed: "
679				"%d\n", r);
680			goto err_iounmap;
681		}
682		c->setup = pdata->onenand_setup;
683	}
684
685	if (c->gpio_irq) {
686		if ((r = gpio_request(c->gpio_irq, "OneNAND irq")) < 0) {
687			dev_err(&pdev->dev,  "Failed to request GPIO%d for "
688				"OneNAND\n", c->gpio_irq);
689			goto err_iounmap;
690	}
691	gpio_direction_input(c->gpio_irq);
692
693	if ((r = request_irq(gpio_to_irq(c->gpio_irq),
694			     omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
695			     pdev->dev.driver->name, c)) < 0)
696		goto err_release_gpio;
697	}
698
699	if (c->dma_channel >= 0) {
700		r = omap_request_dma(0, pdev->dev.driver->name,
701				     omap2_onenand_dma_cb, (void *) c,
702				     &c->dma_channel);
703		if (r == 0) {
704			omap_set_dma_write_mode(c->dma_channel,
705						OMAP_DMA_WRITE_NON_POSTED);
706			omap_set_dma_src_data_pack(c->dma_channel, 1);
707			omap_set_dma_src_burst_mode(c->dma_channel,
708						    OMAP_DMA_DATA_BURST_8);
709			omap_set_dma_dest_data_pack(c->dma_channel, 1);
710			omap_set_dma_dest_burst_mode(c->dma_channel,
711						     OMAP_DMA_DATA_BURST_8);
712		} else {
713			dev_info(&pdev->dev,
714				 "failed to allocate DMA for OneNAND, "
715				 "using PIO instead\n");
716			c->dma_channel = -1;
717		}
718	}
719
720	dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
721		 "base %p\n", c->gpmc_cs, c->phys_base,
722		 c->onenand.base);
723
724	c->pdev = pdev;
725	c->mtd.name = dev_name(&pdev->dev);
726	c->mtd.priv = &c->onenand;
727	c->mtd.owner = THIS_MODULE;
728
729	c->mtd.dev.parent = &pdev->dev;
730
731	if (c->dma_channel >= 0) {
732		struct onenand_chip *this = &c->onenand;
733
734		this->wait = omap2_onenand_wait;
735		if (cpu_is_omap34xx()) {
736			this->read_bufferram = omap3_onenand_read_bufferram;
737			this->write_bufferram = omap3_onenand_write_bufferram;
738		} else {
739			this->read_bufferram = omap2_onenand_read_bufferram;
740			this->write_bufferram = omap2_onenand_write_bufferram;
741		}
742	}
743
744	if (pdata->regulator_can_sleep) {
745		c->regulator = regulator_get(&pdev->dev, "vonenand");
746		if (IS_ERR(c->regulator)) {
747			dev_err(&pdev->dev,  "Failed to get regulator\n");
748			goto err_release_dma;
749		}
750		c->onenand.enable = omap2_onenand_enable;
751		c->onenand.disable = omap2_onenand_disable;
752	}
753
754	if ((r = onenand_scan(&c->mtd, 1)) < 0)
755		goto err_release_regulator;
756
757	switch ((c->onenand.version_id >> 4) & 0xf) {
758	case 0:
759		c->freq = 40;
760		break;
761	case 1:
762		c->freq = 54;
763		break;
764	case 2:
765		c->freq = 66;
766		break;
767	case 3:
768		c->freq = 83;
769		break;
770	}
771
772#ifdef CONFIG_MTD_PARTITIONS
773	r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0);
774	if (r > 0)
775		r = add_mtd_partitions(&c->mtd, c->parts, r);
776	else if (pdata->parts != NULL)
777		r = add_mtd_partitions(&c->mtd, pdata->parts, pdata->nr_parts);
778	else
779#endif
780		r = add_mtd_device(&c->mtd);
781	if (r)
782		goto err_release_onenand;
783
784	platform_set_drvdata(pdev, c);
785
786	return 0;
787
788err_release_onenand:
789	onenand_release(&c->mtd);
790err_release_regulator:
791	regulator_put(c->regulator);
792err_release_dma:
793	if (c->dma_channel != -1)
794		omap_free_dma(c->dma_channel);
795	if (c->gpio_irq)
796		free_irq(gpio_to_irq(c->gpio_irq), c);
797err_release_gpio:
798	if (c->gpio_irq)
799		gpio_free(c->gpio_irq);
800err_iounmap:
801	iounmap(c->onenand.base);
802err_release_mem_region:
803	release_mem_region(c->phys_base, ONENAND_IO_SIZE);
804err_free_cs:
805	gpmc_cs_free(c->gpmc_cs);
806err_kfree:
807	kfree(c->parts);
808	kfree(c);
809
810	return r;
811}
812
813static int __devexit omap2_onenand_remove(struct platform_device *pdev)
814{
815	struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
816
817	onenand_release(&c->mtd);
818	regulator_put(c->regulator);
819	if (c->dma_channel != -1)
820		omap_free_dma(c->dma_channel);
821	omap2_onenand_shutdown(pdev);
822	platform_set_drvdata(pdev, NULL);
823	if (c->gpio_irq) {
824		free_irq(gpio_to_irq(c->gpio_irq), c);
825		gpio_free(c->gpio_irq);
826	}
827	iounmap(c->onenand.base);
828	release_mem_region(c->phys_base, ONENAND_IO_SIZE);
829	gpmc_cs_free(c->gpmc_cs);
830	kfree(c->parts);
831	kfree(c);
832
833	return 0;
834}
835
836static struct platform_driver omap2_onenand_driver = {
837	.probe		= omap2_onenand_probe,
838	.remove		= __devexit_p(omap2_onenand_remove),
839	.shutdown	= omap2_onenand_shutdown,
840	.driver		= {
841		.name	= DRIVER_NAME,
842		.owner  = THIS_MODULE,
843	},
844};
845
846static int __init omap2_onenand_init(void)
847{
848	printk(KERN_INFO "OneNAND driver initializing\n");
849	return platform_driver_register(&omap2_onenand_driver);
850}
851
852static void __exit omap2_onenand_exit(void)
853{
854	platform_driver_unregister(&omap2_onenand_driver);
855}
856
857module_init(omap2_onenand_init);
858module_exit(omap2_onenand_exit);
859
860MODULE_ALIAS(DRIVER_NAME);
861MODULE_LICENSE("GPL");
862MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
863MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");
864