omap2.c revision 3cae1cc149c40c14424162496eb5a7c8db1cd4fb
1/*
2 *  linux/drivers/mtd/onenand/omap2.c
3 *
4 *  OneNAND driver for OMAP2 / OMAP3
5 *
6 *  Copyright © 2005-2006 Nokia Corporation
7 *
8 *  Author: Jarkko Lavinen <jarkko.lavinen@nokia.com> and Juha Yrjölä
9 *  IRQ and DMA support written by Timo Teras
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published by
13 * the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 * You should have received a copy of the GNU General Public License along with
21 * this program; see the file COPYING. If not, write to the Free Software
22 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 *
24 */
25
26#include <linux/device.h>
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/mtd/mtd.h>
30#include <linux/mtd/onenand.h>
31#include <linux/mtd/partitions.h>
32#include <linux/platform_device.h>
33#include <linux/interrupt.h>
34#include <linux/delay.h>
35#include <linux/dma-mapping.h>
36#include <linux/io.h>
37
38#include <asm/mach/flash.h>
39#include <mach/gpmc.h>
40#include <mach/onenand.h>
41#include <mach/gpio.h>
42
43#include <mach/dma.h>
44
45#include <mach/board.h>
46
47#define DRIVER_NAME "omap2-onenand"
48
49#define ONENAND_IO_SIZE		SZ_128K
50#define ONENAND_BUFRAM_SIZE	(1024 * 5)
51
52struct omap2_onenand {
53	struct platform_device *pdev;
54	int gpmc_cs;
55	unsigned long phys_base;
56	int gpio_irq;
57	struct mtd_info mtd;
58	struct mtd_partition *parts;
59	struct onenand_chip onenand;
60	struct completion irq_done;
61	struct completion dma_done;
62	int dma_channel;
63	int freq;
64	int (*setup)(void __iomem *base, int freq);
65};
66
67static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
68{
69	struct omap2_onenand *c = data;
70
71	complete(&c->dma_done);
72}
73
74static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
75{
76	struct omap2_onenand *c = dev_id;
77
78	complete(&c->irq_done);
79
80	return IRQ_HANDLED;
81}
82
83static inline unsigned short read_reg(struct omap2_onenand *c, int reg)
84{
85	return readw(c->onenand.base + reg);
86}
87
88static inline void write_reg(struct omap2_onenand *c, unsigned short value,
89			     int reg)
90{
91	writew(value, c->onenand.base + reg);
92}
93
94static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
95{
96	printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
97	       msg, state, ctrl, intr);
98}
99
100static void wait_warn(char *msg, int state, unsigned int ctrl,
101		      unsigned int intr)
102{
103	printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x "
104	       "intr 0x%04x\n", msg, state, ctrl, intr);
105}
106
107static int omap2_onenand_wait(struct mtd_info *mtd, int state)
108{
109	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
110	unsigned int intr = 0;
111	unsigned int ctrl;
112	unsigned long timeout;
113	u32 syscfg;
114
115	if (state == FL_RESETING) {
116		int i;
117
118		for (i = 0; i < 20; i++) {
119			udelay(1);
120			intr = read_reg(c, ONENAND_REG_INTERRUPT);
121			if (intr & ONENAND_INT_MASTER)
122				break;
123		}
124		ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
125		if (ctrl & ONENAND_CTRL_ERROR) {
126			wait_err("controller error", state, ctrl, intr);
127			return -EIO;
128		}
129		if (!(intr & ONENAND_INT_RESET)) {
130			wait_err("timeout", state, ctrl, intr);
131			return -EIO;
132		}
133		return 0;
134	}
135
136	if (state != FL_READING) {
137		int result;
138
139		/* Turn interrupts on */
140		syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
141		if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
142			syscfg |= ONENAND_SYS_CFG1_IOBE;
143			write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
144			if (cpu_is_omap34xx())
145				/* Add a delay to let GPIO settle */
146				syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
147		}
148
149		INIT_COMPLETION(c->irq_done);
150		if (c->gpio_irq) {
151			result = gpio_get_value(c->gpio_irq);
152			if (result == -1) {
153				ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
154				intr = read_reg(c, ONENAND_REG_INTERRUPT);
155				wait_err("gpio error", state, ctrl, intr);
156				return -EIO;
157			}
158		} else
159			result = 0;
160		if (result == 0) {
161			int retry_cnt = 0;
162retry:
163			result = wait_for_completion_timeout(&c->irq_done,
164						    msecs_to_jiffies(20));
165			if (result == 0) {
166				/* Timeout after 20ms */
167				ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
168				if (ctrl & ONENAND_CTRL_ONGO) {
169					/*
170					 * The operation seems to be still going
171					 * so give it some more time.
172					 */
173					retry_cnt += 1;
174					if (retry_cnt < 3)
175						goto retry;
176					intr = read_reg(c,
177							ONENAND_REG_INTERRUPT);
178					wait_err("timeout", state, ctrl, intr);
179					return -EIO;
180				}
181				intr = read_reg(c, ONENAND_REG_INTERRUPT);
182				if ((intr & ONENAND_INT_MASTER) == 0)
183					wait_warn("timeout", state, ctrl, intr);
184			}
185		}
186	} else {
187		int retry_cnt = 0;
188
189		/* Turn interrupts off */
190		syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
191		syscfg &= ~ONENAND_SYS_CFG1_IOBE;
192		write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
193
194		timeout = jiffies + msecs_to_jiffies(20);
195		while (1) {
196			if (time_before(jiffies, timeout)) {
197				intr = read_reg(c, ONENAND_REG_INTERRUPT);
198				if (intr & ONENAND_INT_MASTER)
199					break;
200			} else {
201				/* Timeout after 20ms */
202				ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
203				if (ctrl & ONENAND_CTRL_ONGO) {
204					/*
205					 * The operation seems to be still going
206					 * so give it some more time.
207					 */
208					retry_cnt += 1;
209					if (retry_cnt < 3) {
210						timeout = jiffies +
211							  msecs_to_jiffies(20);
212						continue;
213					}
214				}
215				break;
216			}
217		}
218	}
219
220	intr = read_reg(c, ONENAND_REG_INTERRUPT);
221	ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
222
223	if (intr & ONENAND_INT_READ) {
224		int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);
225
226		if (ecc) {
227			unsigned int addr1, addr8;
228
229			addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
230			addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
231			if (ecc & ONENAND_ECC_2BIT_ALL) {
232				printk(KERN_ERR "onenand_wait: ECC error = "
233				       "0x%04x, addr1 %#x, addr8 %#x\n",
234				       ecc, addr1, addr8);
235				mtd->ecc_stats.failed++;
236				return -EBADMSG;
237			} else if (ecc & ONENAND_ECC_1BIT_ALL) {
238				printk(KERN_NOTICE "onenand_wait: correctable "
239				       "ECC error = 0x%04x, addr1 %#x, "
240				       "addr8 %#x\n", ecc, addr1, addr8);
241				mtd->ecc_stats.corrected++;
242			}
243		}
244	} else if (state == FL_READING) {
245		wait_err("timeout", state, ctrl, intr);
246		return -EIO;
247	}
248
249	if (ctrl & ONENAND_CTRL_ERROR) {
250		wait_err("controller error", state, ctrl, intr);
251		if (ctrl & ONENAND_CTRL_LOCK)
252			printk(KERN_ERR "onenand_wait: "
253					"Device is write protected!!!\n");
254		return -EIO;
255	}
256
257	if (ctrl & 0xFE9F)
258		wait_warn("unexpected controller status", state, ctrl, intr);
259
260	return 0;
261}
262
263static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
264{
265	struct onenand_chip *this = mtd->priv;
266
267	if (ONENAND_CURRENT_BUFFERRAM(this)) {
268		if (area == ONENAND_DATARAM)
269			return this->writesize;
270		if (area == ONENAND_SPARERAM)
271			return mtd->oobsize;
272	}
273
274	return 0;
275}
276
277#if defined(CONFIG_ARCH_OMAP3) || defined(MULTI_OMAP2)
278
279static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
280					unsigned char *buffer, int offset,
281					size_t count)
282{
283	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
284	struct onenand_chip *this = mtd->priv;
285	dma_addr_t dma_src, dma_dst;
286	int bram_offset;
287	unsigned long timeout;
288	void *buf = (void *)buffer;
289	size_t xtra;
290	volatile unsigned *done;
291
292	bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
293	if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
294		goto out_copy;
295
296	/* panic_write() may be in an interrupt context */
297	if (in_interrupt())
298		goto out_copy;
299
300	if (buf >= high_memory) {
301		struct page *p1;
302
303		if (((size_t)buf & PAGE_MASK) !=
304		    ((size_t)(buf + count - 1) & PAGE_MASK))
305			goto out_copy;
306		p1 = vmalloc_to_page(buf);
307		if (!p1)
308			goto out_copy;
309		buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
310	}
311
312	xtra = count & 3;
313	if (xtra) {
314		count -= xtra;
315		memcpy(buf + count, this->base + bram_offset + count, xtra);
316	}
317
318	dma_src = c->phys_base + bram_offset;
319	dma_dst = dma_map_single(&c->pdev->dev, buf, count, DMA_FROM_DEVICE);
320	if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
321		dev_err(&c->pdev->dev,
322			"Couldn't DMA map a %d byte buffer\n",
323			count);
324		goto out_copy;
325	}
326
327	omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
328				     count >> 2, 1, 0, 0, 0);
329	omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
330				dma_src, 0, 0);
331	omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
332				 dma_dst, 0, 0);
333
334	INIT_COMPLETION(c->dma_done);
335	omap_start_dma(c->dma_channel);
336
337	timeout = jiffies + msecs_to_jiffies(20);
338	done = &c->dma_done.done;
339	while (time_before(jiffies, timeout))
340		if (*done)
341			break;
342
343	dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
344
345	if (!*done) {
346		dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
347		goto out_copy;
348	}
349
350	return 0;
351
352out_copy:
353	memcpy(buf, this->base + bram_offset, count);
354	return 0;
355}
356
357static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
358					 const unsigned char *buffer,
359					 int offset, size_t count)
360{
361	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
362	struct onenand_chip *this = mtd->priv;
363	dma_addr_t dma_src, dma_dst;
364	int bram_offset;
365	unsigned long timeout;
366	void *buf = (void *)buffer;
367	volatile unsigned *done;
368
369	bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
370	if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
371		goto out_copy;
372
373	/* panic_write() may be in an interrupt context */
374	if (in_interrupt())
375		goto out_copy;
376
377	if (buf >= high_memory) {
378		struct page *p1;
379
380		if (((size_t)buf & PAGE_MASK) !=
381		    ((size_t)(buf + count - 1) & PAGE_MASK))
382			goto out_copy;
383		p1 = vmalloc_to_page(buf);
384		if (!p1)
385			goto out_copy;
386		buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
387	}
388
389	dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
390	dma_dst = c->phys_base + bram_offset;
391	if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
392		dev_err(&c->pdev->dev,
393			"Couldn't DMA map a %d byte buffer\n",
394			count);
395		return -1;
396	}
397
398	omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
399				     count >> 2, 1, 0, 0, 0);
400	omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
401				dma_src, 0, 0);
402	omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
403				 dma_dst, 0, 0);
404
405	INIT_COMPLETION(c->dma_done);
406	omap_start_dma(c->dma_channel);
407
408	timeout = jiffies + msecs_to_jiffies(20);
409	done = &c->dma_done.done;
410	while (time_before(jiffies, timeout))
411		if (*done)
412			break;
413
414	dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
415
416	if (!*done) {
417		dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
418		goto out_copy;
419	}
420
421	return 0;
422
423out_copy:
424	memcpy(this->base + bram_offset, buf, count);
425	return 0;
426}
427
428#else
429
430int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
431				 unsigned char *buffer, int offset,
432				 size_t count);
433
434int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
435				  const unsigned char *buffer,
436				  int offset, size_t count);
437
438#endif
439
440#if defined(CONFIG_ARCH_OMAP2) || defined(MULTI_OMAP2)
441
442static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
443					unsigned char *buffer, int offset,
444					size_t count)
445{
446	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
447	struct onenand_chip *this = mtd->priv;
448	dma_addr_t dma_src, dma_dst;
449	int bram_offset;
450
451	bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
452	/* DMA is not used.  Revisit PM requirements before enabling it. */
453	if (1 || (c->dma_channel < 0) ||
454	    ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
455	    (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
456		memcpy(buffer, (__force void *)(this->base + bram_offset),
457		       count);
458		return 0;
459	}
460
461	dma_src = c->phys_base + bram_offset;
462	dma_dst = dma_map_single(&c->pdev->dev, buffer, count,
463				 DMA_FROM_DEVICE);
464	if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
465		dev_err(&c->pdev->dev,
466			"Couldn't DMA map a %d byte buffer\n",
467			count);
468		return -1;
469	}
470
471	omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
472				     count / 4, 1, 0, 0, 0);
473	omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
474				dma_src, 0, 0);
475	omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
476				 dma_dst, 0, 0);
477
478	INIT_COMPLETION(c->dma_done);
479	omap_start_dma(c->dma_channel);
480	wait_for_completion(&c->dma_done);
481
482	dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
483
484	return 0;
485}
486
487static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
488					 const unsigned char *buffer,
489					 int offset, size_t count)
490{
491	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
492	struct onenand_chip *this = mtd->priv;
493	dma_addr_t dma_src, dma_dst;
494	int bram_offset;
495
496	bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
497	/* DMA is not used.  Revisit PM requirements before enabling it. */
498	if (1 || (c->dma_channel < 0) ||
499	    ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
500	    (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
501		memcpy((__force void *)(this->base + bram_offset), buffer,
502		       count);
503		return 0;
504	}
505
506	dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
507				 DMA_TO_DEVICE);
508	dma_dst = c->phys_base + bram_offset;
509	if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
510		dev_err(&c->pdev->dev,
511			"Couldn't DMA map a %d byte buffer\n",
512			count);
513		return -1;
514	}
515
516	omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S16,
517				     count / 2, 1, 0, 0, 0);
518	omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
519				dma_src, 0, 0);
520	omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
521				 dma_dst, 0, 0);
522
523	INIT_COMPLETION(c->dma_done);
524	omap_start_dma(c->dma_channel);
525	wait_for_completion(&c->dma_done);
526
527	dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
528
529	return 0;
530}
531
532#else
533
534int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
535				 unsigned char *buffer, int offset,
536				 size_t count);
537
538int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
539				  const unsigned char *buffer,
540				  int offset, size_t count);
541
542#endif
543
544static struct platform_driver omap2_onenand_driver;
545
546static int __adjust_timing(struct device *dev, void *data)
547{
548	int ret = 0;
549	struct omap2_onenand *c;
550
551	c = dev_get_drvdata(dev);
552
553	BUG_ON(c->setup == NULL);
554
555	/* DMA is not in use so this is all that is needed */
556	/* Revisit for OMAP3! */
557	ret = c->setup(c->onenand.base, c->freq);
558
559	return ret;
560}
561
562int omap2_onenand_rephase(void)
563{
564	return driver_for_each_device(&omap2_onenand_driver.driver, NULL,
565				      NULL, __adjust_timing);
566}
567
568static void omap2_onenand_shutdown(struct platform_device *pdev)
569{
570	struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
571
572	/* With certain content in the buffer RAM, the OMAP boot ROM code
573	 * can recognize the flash chip incorrectly. Zero it out before
574	 * soft reset.
575	 */
576	memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
577}
578
579static int __devinit omap2_onenand_probe(struct platform_device *pdev)
580{
581	struct omap_onenand_platform_data *pdata;
582	struct omap2_onenand *c;
583	int r;
584
585	pdata = pdev->dev.platform_data;
586	if (pdata == NULL) {
587		dev_err(&pdev->dev, "platform data missing\n");
588		return -ENODEV;
589	}
590
591	c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL);
592	if (!c)
593		return -ENOMEM;
594
595	init_completion(&c->irq_done);
596	init_completion(&c->dma_done);
597	c->gpmc_cs = pdata->cs;
598	c->gpio_irq = pdata->gpio_irq;
599	c->dma_channel = pdata->dma_channel;
600	if (c->dma_channel < 0) {
601		/* if -1, don't use DMA */
602		c->gpio_irq = 0;
603	}
604
605	r = gpmc_cs_request(c->gpmc_cs, ONENAND_IO_SIZE, &c->phys_base);
606	if (r < 0) {
607		dev_err(&pdev->dev, "Cannot request GPMC CS\n");
608		goto err_kfree;
609	}
610
611	if (request_mem_region(c->phys_base, ONENAND_IO_SIZE,
612			       pdev->dev.driver->name) == NULL) {
613		dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, "
614			"size: 0x%x\n",	c->phys_base, ONENAND_IO_SIZE);
615		r = -EBUSY;
616		goto err_free_cs;
617	}
618	c->onenand.base = ioremap(c->phys_base, ONENAND_IO_SIZE);
619	if (c->onenand.base == NULL) {
620		r = -ENOMEM;
621		goto err_release_mem_region;
622	}
623
624	if (pdata->onenand_setup != NULL) {
625		r = pdata->onenand_setup(c->onenand.base, c->freq);
626		if (r < 0) {
627			dev_err(&pdev->dev, "Onenand platform setup failed: "
628				"%d\n", r);
629			goto err_iounmap;
630		}
631		c->setup = pdata->onenand_setup;
632	}
633
634	if (c->gpio_irq) {
635		if ((r = gpio_request(c->gpio_irq, "OneNAND irq")) < 0) {
636			dev_err(&pdev->dev,  "Failed to request GPIO%d for "
637				"OneNAND\n", c->gpio_irq);
638			goto err_iounmap;
639	}
640	gpio_direction_input(c->gpio_irq);
641
642	if ((r = request_irq(gpio_to_irq(c->gpio_irq),
643			     omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
644			     pdev->dev.driver->name, c)) < 0)
645		goto err_release_gpio;
646	}
647
648	if (c->dma_channel >= 0) {
649		r = omap_request_dma(0, pdev->dev.driver->name,
650				     omap2_onenand_dma_cb, (void *) c,
651				     &c->dma_channel);
652		if (r == 0) {
653			omap_set_dma_write_mode(c->dma_channel,
654						OMAP_DMA_WRITE_NON_POSTED);
655			omap_set_dma_src_data_pack(c->dma_channel, 1);
656			omap_set_dma_src_burst_mode(c->dma_channel,
657						    OMAP_DMA_DATA_BURST_8);
658			omap_set_dma_dest_data_pack(c->dma_channel, 1);
659			omap_set_dma_dest_burst_mode(c->dma_channel,
660						     OMAP_DMA_DATA_BURST_8);
661		} else {
662			dev_info(&pdev->dev,
663				 "failed to allocate DMA for OneNAND, "
664				 "using PIO instead\n");
665			c->dma_channel = -1;
666		}
667	}
668
669	dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
670		 "base %p\n", c->gpmc_cs, c->phys_base,
671		 c->onenand.base);
672
673	c->pdev = pdev;
674	c->mtd.name = dev_name(&pdev->dev);
675	c->mtd.priv = &c->onenand;
676	c->mtd.owner = THIS_MODULE;
677
678	c->mtd.dev.parent = &pdev->dev;
679
680	if (c->dma_channel >= 0) {
681		struct onenand_chip *this = &c->onenand;
682
683		this->wait = omap2_onenand_wait;
684		if (cpu_is_omap34xx()) {
685			this->read_bufferram = omap3_onenand_read_bufferram;
686			this->write_bufferram = omap3_onenand_write_bufferram;
687		} else {
688			this->read_bufferram = omap2_onenand_read_bufferram;
689			this->write_bufferram = omap2_onenand_write_bufferram;
690		}
691	}
692
693	if ((r = onenand_scan(&c->mtd, 1)) < 0)
694		goto err_release_dma;
695
696	switch ((c->onenand.version_id >> 4) & 0xf) {
697	case 0:
698		c->freq = 40;
699		break;
700	case 1:
701		c->freq = 54;
702		break;
703	case 2:
704		c->freq = 66;
705		break;
706	case 3:
707		c->freq = 83;
708		break;
709	}
710
711#ifdef CONFIG_MTD_PARTITIONS
712	if (pdata->parts != NULL)
713		r = add_mtd_partitions(&c->mtd, pdata->parts,
714				       pdata->nr_parts);
715	else
716#endif
717		r = add_mtd_device(&c->mtd);
718	if (r < 0)
719		goto err_release_onenand;
720
721	platform_set_drvdata(pdev, c);
722
723	return 0;
724
725err_release_onenand:
726	onenand_release(&c->mtd);
727err_release_dma:
728	if (c->dma_channel != -1)
729		omap_free_dma(c->dma_channel);
730	if (c->gpio_irq)
731		free_irq(gpio_to_irq(c->gpio_irq), c);
732err_release_gpio:
733	if (c->gpio_irq)
734		gpio_free(c->gpio_irq);
735err_iounmap:
736	iounmap(c->onenand.base);
737err_release_mem_region:
738	release_mem_region(c->phys_base, ONENAND_IO_SIZE);
739err_free_cs:
740	gpmc_cs_free(c->gpmc_cs);
741err_kfree:
742	kfree(c);
743
744	return r;
745}
746
747static int __devexit omap2_onenand_remove(struct platform_device *pdev)
748{
749	struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
750
751	BUG_ON(c == NULL);
752
753#ifdef CONFIG_MTD_PARTITIONS
754	if (c->parts)
755		del_mtd_partitions(&c->mtd);
756	else
757		del_mtd_device(&c->mtd);
758#else
759	del_mtd_device(&c->mtd);
760#endif
761
762	onenand_release(&c->mtd);
763	if (c->dma_channel != -1)
764		omap_free_dma(c->dma_channel);
765	omap2_onenand_shutdown(pdev);
766	platform_set_drvdata(pdev, NULL);
767	if (c->gpio_irq) {
768		free_irq(gpio_to_irq(c->gpio_irq), c);
769		gpio_free(c->gpio_irq);
770	}
771	iounmap(c->onenand.base);
772	release_mem_region(c->phys_base, ONENAND_IO_SIZE);
773	gpmc_cs_free(c->gpmc_cs);
774	kfree(c);
775
776	return 0;
777}
778
779static struct platform_driver omap2_onenand_driver = {
780	.probe		= omap2_onenand_probe,
781	.remove		= __devexit_p(omap2_onenand_remove),
782	.shutdown	= omap2_onenand_shutdown,
783	.driver		= {
784		.name	= DRIVER_NAME,
785		.owner  = THIS_MODULE,
786	},
787};
788
789static int __init omap2_onenand_init(void)
790{
791	printk(KERN_INFO "OneNAND driver initializing\n");
792	return platform_driver_register(&omap2_onenand_driver);
793}
794
795static void __exit omap2_onenand_exit(void)
796{
797	platform_driver_unregister(&omap2_onenand_driver);
798}
799
800module_init(omap2_onenand_init);
801module_exit(omap2_onenand_exit);
802
803MODULE_ALIAS(DRIVER_NAME);
804MODULE_LICENSE("GPL");
805MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
806MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");
807