pxa3xx_nand.c revision bff3c10d369440bc87ba612b45ba2777d2bf017f
1/*
2 * drivers/mtd/nand/pxa3xx_nand.c
3 *
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/interrupt.h>
15#include <linux/platform_device.h>
16#include <linux/dma-mapping.h>
17#include <linux/delay.h>
18#include <linux/clk.h>
19#include <linux/mtd/mtd.h>
20#include <linux/mtd/nand.h>
21#include <linux/mtd/partitions.h>
22#include <linux/io.h>
23#include <linux/irq.h>
24
25#include <mach/dma.h>
26#include <plat/pxa3xx_nand.h>
27
28#define	CHIP_DELAY_TIMEOUT	(2 * HZ/10)
29
30/* registers and bit definitions */
31#define NDCR		(0x00) /* Control register */
32#define NDTR0CS0	(0x04) /* Timing Parameter 0 for CS0 */
33#define NDTR1CS0	(0x0C) /* Timing Parameter 1 for CS0 */
34#define NDSR		(0x14) /* Status Register */
35#define NDPCR		(0x18) /* Page Count Register */
36#define NDBDR0		(0x1C) /* Bad Block Register 0 */
37#define NDBDR1		(0x20) /* Bad Block Register 1 */
38#define NDDB		(0x40) /* Data Buffer */
39#define NDCB0		(0x48) /* Command Buffer0 */
40#define NDCB1		(0x4C) /* Command Buffer1 */
41#define NDCB2		(0x50) /* Command Buffer2 */
42
43#define NDCR_SPARE_EN		(0x1 << 31)
44#define NDCR_ECC_EN		(0x1 << 30)
45#define NDCR_DMA_EN		(0x1 << 29)
46#define NDCR_ND_RUN		(0x1 << 28)
47#define NDCR_DWIDTH_C		(0x1 << 27)
48#define NDCR_DWIDTH_M		(0x1 << 26)
49#define NDCR_PAGE_SZ		(0x1 << 24)
50#define NDCR_NCSX		(0x1 << 23)
51#define NDCR_ND_MODE		(0x3 << 21)
52#define NDCR_NAND_MODE   	(0x0)
53#define NDCR_CLR_PG_CNT		(0x1 << 20)
54#define NDCR_CLR_ECC		(0x1 << 19)
55#define NDCR_RD_ID_CNT_MASK	(0x7 << 16)
56#define NDCR_RD_ID_CNT(x)	(((x) << 16) & NDCR_RD_ID_CNT_MASK)
57
58#define NDCR_RA_START		(0x1 << 15)
59#define NDCR_PG_PER_BLK		(0x1 << 14)
60#define NDCR_ND_ARB_EN		(0x1 << 12)
61
62#define NDSR_MASK		(0xfff)
63#define NDSR_RDY		(0x1 << 11)
64#define NDSR_CS0_PAGED		(0x1 << 10)
65#define NDSR_CS1_PAGED		(0x1 << 9)
66#define NDSR_CS0_CMDD		(0x1 << 8)
67#define NDSR_CS1_CMDD		(0x1 << 7)
68#define NDSR_CS0_BBD		(0x1 << 6)
69#define NDSR_CS1_BBD		(0x1 << 5)
70#define NDSR_DBERR		(0x1 << 4)
71#define NDSR_SBERR		(0x1 << 3)
72#define NDSR_WRDREQ		(0x1 << 2)
73#define NDSR_RDDREQ		(0x1 << 1)
74#define NDSR_WRCMDREQ		(0x1)
75
76#define NDCB0_AUTO_RS		(0x1 << 25)
77#define NDCB0_CSEL		(0x1 << 24)
78#define NDCB0_CMD_TYPE_MASK	(0x7 << 21)
79#define NDCB0_CMD_TYPE(x)	(((x) << 21) & NDCB0_CMD_TYPE_MASK)
80#define NDCB0_NC		(0x1 << 20)
81#define NDCB0_DBC		(0x1 << 19)
82#define NDCB0_ADDR_CYC_MASK	(0x7 << 16)
83#define NDCB0_ADDR_CYC(x)	(((x) << 16) & NDCB0_ADDR_CYC_MASK)
84#define NDCB0_CMD2_MASK		(0xff << 8)
85#define NDCB0_CMD1_MASK		(0xff)
86#define NDCB0_ADDR_CYC_SHIFT	(16)
87
88/* macros for registers read/write */
89#define nand_writel(info, off, val)	\
90	__raw_writel((val), (info)->mmio_base + (off))
91
92#define nand_readl(info, off)		\
93	__raw_readl((info)->mmio_base + (off))
94
95/* error code and state */
96enum {
97	ERR_NONE	= 0,
98	ERR_DMABUSERR	= -1,
99	ERR_SENDCMD	= -2,
100	ERR_DBERR	= -3,
101	ERR_BBERR	= -4,
102	ERR_SBERR	= -5,
103};
104
105enum {
106	STATE_READY	= 0,
107	STATE_CMD_HANDLE,
108	STATE_DMA_READING,
109	STATE_DMA_WRITING,
110	STATE_DMA_DONE,
111	STATE_PIO_READING,
112	STATE_PIO_WRITING,
113};
114
115struct pxa3xx_nand_info {
116	struct nand_chip	nand_chip;
117
118	struct platform_device	 *pdev;
119	const struct pxa3xx_nand_flash *flash_info;
120
121	struct clk		*clk;
122	void __iomem		*mmio_base;
123	unsigned long		mmio_phys;
124
125	unsigned int 		buf_start;
126	unsigned int		buf_count;
127
128	/* DMA information */
129	int			drcmr_dat;
130	int			drcmr_cmd;
131
132	unsigned char		*data_buff;
133	dma_addr_t 		data_buff_phys;
134	size_t			data_buff_size;
135	int 			data_dma_ch;
136	struct pxa_dma_desc	*data_desc;
137	dma_addr_t 		data_desc_addr;
138
139	uint32_t		reg_ndcr;
140
141	/* saved column/page_addr during CMD_SEQIN */
142	int			seqin_column;
143	int			seqin_page_addr;
144
145	/* relate to the command */
146	unsigned int		state;
147
148	int			use_ecc;	/* use HW ECC ? */
149	int			use_dma;	/* use DMA ? */
150
151	size_t			data_size;	/* data size in FIFO */
152	int 			retcode;
153	struct completion 	cmd_complete;
154
155	/* generated NDCBx register values */
156	uint32_t		ndcb0;
157	uint32_t		ndcb1;
158	uint32_t		ndcb2;
159
160	/* calculated from pxa3xx_nand_flash data */
161	size_t		oob_size;
162	size_t		read_id_bytes;
163
164	unsigned int	col_addr_cycles;
165	unsigned int	row_addr_cycles;
166};
167
168static int use_dma = 1;
169module_param(use_dma, bool, 0444);
170MODULE_PARM_DESC(use_dma, "enable DMA for data transfering to/from NAND HW");
171
172/*
173 * Default NAND flash controller configuration setup by the
174 * bootloader. This configuration is used only when pdata->keep_config is set
175 */
176static struct pxa3xx_nand_timing default_timing;
177static struct pxa3xx_nand_flash default_flash;
178
179static struct pxa3xx_nand_cmdset smallpage_cmdset = {
180	.read1		= 0x0000,
181	.read2		= 0x0050,
182	.program	= 0x1080,
183	.read_status	= 0x0070,
184	.read_id	= 0x0090,
185	.erase		= 0xD060,
186	.reset		= 0x00FF,
187	.lock		= 0x002A,
188	.unlock		= 0x2423,
189	.lock_status	= 0x007A,
190};
191
192static struct pxa3xx_nand_cmdset largepage_cmdset = {
193	.read1		= 0x3000,
194	.read2		= 0x0050,
195	.program	= 0x1080,
196	.read_status	= 0x0070,
197	.read_id	= 0x0090,
198	.erase		= 0xD060,
199	.reset		= 0x00FF,
200	.lock		= 0x002A,
201	.unlock		= 0x2423,
202	.lock_status	= 0x007A,
203};
204
205#ifdef CONFIG_MTD_NAND_PXA3xx_BUILTIN
206static struct pxa3xx_nand_timing samsung512MbX16_timing = {
207	.tCH	= 10,
208	.tCS	= 0,
209	.tWH	= 20,
210	.tWP	= 40,
211	.tRH	= 30,
212	.tRP	= 40,
213	.tR	= 11123,
214	.tWHR	= 110,
215	.tAR	= 10,
216};
217
218static struct pxa3xx_nand_flash samsung512MbX16 = {
219	.timing		= &samsung512MbX16_timing,
220	.cmdset		= &smallpage_cmdset,
221	.page_per_block	= 32,
222	.page_size	= 512,
223	.flash_width	= 16,
224	.dfc_width	= 16,
225	.num_blocks	= 4096,
226	.chip_id	= 0x46ec,
227};
228
229static struct pxa3xx_nand_flash samsung2GbX8 = {
230	.timing		= &samsung512MbX16_timing,
231	.cmdset		= &smallpage_cmdset,
232	.page_per_block	= 64,
233	.page_size	= 2048,
234	.flash_width	= 8,
235	.dfc_width	= 8,
236	.num_blocks	= 2048,
237	.chip_id	= 0xdaec,
238};
239
240static struct pxa3xx_nand_flash samsung32GbX8 = {
241	.timing		= &samsung512MbX16_timing,
242	.cmdset		= &smallpage_cmdset,
243	.page_per_block	= 128,
244	.page_size	= 4096,
245	.flash_width	= 8,
246	.dfc_width	= 8,
247	.num_blocks	= 8192,
248	.chip_id	= 0xd7ec,
249};
250
251static struct pxa3xx_nand_timing micron_timing = {
252	.tCH	= 10,
253	.tCS	= 25,
254	.tWH	= 15,
255	.tWP	= 25,
256	.tRH	= 15,
257	.tRP	= 30,
258	.tR	= 25000,
259	.tWHR	= 60,
260	.tAR	= 10,
261};
262
263static struct pxa3xx_nand_flash micron1GbX8 = {
264	.timing		= &micron_timing,
265	.cmdset		= &largepage_cmdset,
266	.page_per_block	= 64,
267	.page_size	= 2048,
268	.flash_width	= 8,
269	.dfc_width	= 8,
270	.num_blocks	= 1024,
271	.chip_id	= 0xa12c,
272};
273
274static struct pxa3xx_nand_flash micron1GbX16 = {
275	.timing		= &micron_timing,
276	.cmdset		= &largepage_cmdset,
277	.page_per_block	= 64,
278	.page_size	= 2048,
279	.flash_width	= 16,
280	.dfc_width	= 16,
281	.num_blocks	= 1024,
282	.chip_id	= 0xb12c,
283};
284
285static struct pxa3xx_nand_flash micron4GbX8 = {
286	.timing		= &micron_timing,
287	.cmdset		= &largepage_cmdset,
288	.page_per_block	= 64,
289	.page_size	= 2048,
290	.flash_width	= 8,
291	.dfc_width	= 8,
292	.num_blocks	= 4096,
293	.chip_id	= 0xdc2c,
294};
295
296static struct pxa3xx_nand_flash micron4GbX16 = {
297	.timing		= &micron_timing,
298	.cmdset		= &largepage_cmdset,
299	.page_per_block	= 64,
300	.page_size	= 2048,
301	.flash_width	= 16,
302	.dfc_width	= 16,
303	.num_blocks	= 4096,
304	.chip_id	= 0xcc2c,
305};
306
307static struct pxa3xx_nand_timing stm2GbX16_timing = {
308	.tCH = 10,
309	.tCS = 35,
310	.tWH = 15,
311	.tWP = 25,
312	.tRH = 15,
313	.tRP = 25,
314	.tR = 25000,
315	.tWHR = 60,
316	.tAR = 10,
317};
318
319static struct pxa3xx_nand_flash stm2GbX16 = {
320	.timing = &stm2GbX16_timing,
321	.cmdset	= &largepage_cmdset,
322	.page_per_block = 64,
323	.page_size = 2048,
324	.flash_width = 16,
325	.dfc_width = 16,
326	.num_blocks = 2048,
327	.chip_id = 0xba20,
328};
329
330static struct pxa3xx_nand_flash *builtin_flash_types[] = {
331	&samsung512MbX16,
332	&samsung2GbX8,
333	&samsung32GbX8,
334	&micron1GbX8,
335	&micron1GbX16,
336	&micron4GbX8,
337	&micron4GbX16,
338	&stm2GbX16,
339};
340#endif /* CONFIG_MTD_NAND_PXA3xx_BUILTIN */
341
342#define NDTR0_tCH(c)	(min((c), 7) << 19)
343#define NDTR0_tCS(c)	(min((c), 7) << 16)
344#define NDTR0_tWH(c)	(min((c), 7) << 11)
345#define NDTR0_tWP(c)	(min((c), 7) << 8)
346#define NDTR0_tRH(c)	(min((c), 7) << 3)
347#define NDTR0_tRP(c)	(min((c), 7) << 0)
348
349#define NDTR1_tR(c)	(min((c), 65535) << 16)
350#define NDTR1_tWHR(c)	(min((c), 15) << 4)
351#define NDTR1_tAR(c)	(min((c), 15) << 0)
352
353#define tCH_NDTR0(r)	(((r) >> 19) & 0x7)
354#define tCS_NDTR0(r)	(((r) >> 16) & 0x7)
355#define tWH_NDTR0(r)	(((r) >> 11) & 0x7)
356#define tWP_NDTR0(r)	(((r) >> 8) & 0x7)
357#define tRH_NDTR0(r)	(((r) >> 3) & 0x7)
358#define tRP_NDTR0(r)	(((r) >> 0) & 0x7)
359
360#define tR_NDTR1(r)	(((r) >> 16) & 0xffff)
361#define tWHR_NDTR1(r)	(((r) >> 4) & 0xf)
362#define tAR_NDTR1(r)	(((r) >> 0) & 0xf)
363
364/* convert nano-seconds to nand flash controller clock cycles */
365#define ns2cycle(ns, clk)	(int)(((ns) * (clk / 1000000) / 1000) - 1)
366
367/* convert nand flash controller clock cycles to nano-seconds */
368#define cycle2ns(c, clk)	((((c) + 1) * 1000000 + clk / 500) / (clk / 1000))
369
370static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info,
371				   const struct pxa3xx_nand_timing *t)
372{
373	unsigned long nand_clk = clk_get_rate(info->clk);
374	uint32_t ndtr0, ndtr1;
375
376	ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
377		NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
378		NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
379		NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
380		NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
381		NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
382
383	ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
384		NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
385		NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
386
387	nand_writel(info, NDTR0CS0, ndtr0);
388	nand_writel(info, NDTR1CS0, ndtr1);
389}
390
391#define WAIT_EVENT_TIMEOUT	10
392
393static int wait_for_event(struct pxa3xx_nand_info *info, uint32_t event)
394{
395	int timeout = WAIT_EVENT_TIMEOUT;
396	uint32_t ndsr;
397
398	while (timeout--) {
399		ndsr = nand_readl(info, NDSR) & NDSR_MASK;
400		if (ndsr & event) {
401			nand_writel(info, NDSR, ndsr);
402			return 0;
403		}
404		udelay(10);
405	}
406
407	return -ETIMEDOUT;
408}
409
410static int prepare_read_prog_cmd(struct pxa3xx_nand_info *info,
411			uint16_t cmd, int column, int page_addr)
412{
413	const struct pxa3xx_nand_flash *f = info->flash_info;
414	const struct pxa3xx_nand_cmdset *cmdset = f->cmdset;
415
416	/* calculate data size */
417	switch (f->page_size) {
418	case 2048:
419		info->data_size = (info->use_ecc) ? 2088 : 2112;
420		break;
421	case 512:
422		info->data_size = (info->use_ecc) ? 520 : 528;
423		break;
424	default:
425		return -EINVAL;
426	}
427
428	/* generate values for NDCBx registers */
429	info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
430	info->ndcb1 = 0;
431	info->ndcb2 = 0;
432	info->ndcb0 |= NDCB0_ADDR_CYC(info->row_addr_cycles + info->col_addr_cycles);
433
434	if (info->col_addr_cycles == 2) {
435		/* large block, 2 cycles for column address
436		 * row address starts from 3rd cycle
437		 */
438		info->ndcb1 |= page_addr << 16;
439		if (info->row_addr_cycles == 3)
440			info->ndcb2 = (page_addr >> 16) & 0xff;
441	} else
442		/* small block, 1 cycles for column address
443		 * row address starts from 2nd cycle
444		 */
445		info->ndcb1 = page_addr << 8;
446
447	if (cmd == cmdset->program)
448		info->ndcb0 |= NDCB0_CMD_TYPE(1) | NDCB0_AUTO_RS;
449
450	return 0;
451}
452
453static int prepare_erase_cmd(struct pxa3xx_nand_info *info,
454			uint16_t cmd, int page_addr)
455{
456	info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
457	info->ndcb0 |= NDCB0_CMD_TYPE(2) | NDCB0_AUTO_RS | NDCB0_ADDR_CYC(3);
458	info->ndcb1 = page_addr;
459	info->ndcb2 = 0;
460	return 0;
461}
462
463static int prepare_other_cmd(struct pxa3xx_nand_info *info, uint16_t cmd)
464{
465	const struct pxa3xx_nand_cmdset *cmdset = info->flash_info->cmdset;
466
467	info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
468	info->ndcb1 = 0;
469	info->ndcb2 = 0;
470
471	if (cmd == cmdset->read_id) {
472		info->ndcb0 |= NDCB0_CMD_TYPE(3);
473		info->data_size = 8;
474	} else if (cmd == cmdset->read_status) {
475		info->ndcb0 |= NDCB0_CMD_TYPE(4);
476		info->data_size = 8;
477	} else if (cmd == cmdset->reset || cmd == cmdset->lock ||
478		   cmd == cmdset->unlock) {
479		info->ndcb0 |= NDCB0_CMD_TYPE(5);
480	} else
481		return -EINVAL;
482
483	return 0;
484}
485
486static void enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
487{
488	uint32_t ndcr;
489
490	ndcr = nand_readl(info, NDCR);
491	nand_writel(info, NDCR, ndcr & ~int_mask);
492}
493
494static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
495{
496	uint32_t ndcr;
497
498	ndcr = nand_readl(info, NDCR);
499	nand_writel(info, NDCR, ndcr | int_mask);
500}
501
502/* NOTE: it is a must to set ND_RUN firstly, then write command buffer
503 * otherwise, it does not work
504 */
505static int write_cmd(struct pxa3xx_nand_info *info)
506{
507	uint32_t ndcr;
508
509	/* clear status bits and run */
510	nand_writel(info, NDSR, NDSR_MASK);
511
512	ndcr = info->reg_ndcr;
513
514	ndcr |= info->use_ecc ? NDCR_ECC_EN : 0;
515	ndcr |= info->use_dma ? NDCR_DMA_EN : 0;
516	ndcr |= NDCR_ND_RUN;
517
518	nand_writel(info, NDCR, ndcr);
519
520	if (wait_for_event(info, NDSR_WRCMDREQ)) {
521		printk(KERN_ERR "timed out writing command\n");
522		return -ETIMEDOUT;
523	}
524
525	nand_writel(info, NDCB0, info->ndcb0);
526	nand_writel(info, NDCB0, info->ndcb1);
527	nand_writel(info, NDCB0, info->ndcb2);
528	return 0;
529}
530
531static int handle_data_pio(struct pxa3xx_nand_info *info)
532{
533	int ret, timeout = CHIP_DELAY_TIMEOUT;
534
535	switch (info->state) {
536	case STATE_PIO_WRITING:
537		__raw_writesl(info->mmio_base + NDDB, info->data_buff,
538				DIV_ROUND_UP(info->data_size, 4));
539
540		enable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
541
542		ret = wait_for_completion_timeout(&info->cmd_complete, timeout);
543		if (!ret) {
544			printk(KERN_ERR "program command time out\n");
545			return -1;
546		}
547		break;
548	case STATE_PIO_READING:
549		__raw_readsl(info->mmio_base + NDDB, info->data_buff,
550				DIV_ROUND_UP(info->data_size, 4));
551		break;
552	default:
553		printk(KERN_ERR "%s: invalid state %d\n", __func__,
554				info->state);
555		return -EINVAL;
556	}
557
558	info->state = STATE_READY;
559	return 0;
560}
561
562static void start_data_dma(struct pxa3xx_nand_info *info, int dir_out)
563{
564	struct pxa_dma_desc *desc = info->data_desc;
565	int dma_len = ALIGN(info->data_size, 32);
566
567	desc->ddadr = DDADR_STOP;
568	desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
569
570	if (dir_out) {
571		desc->dsadr = info->data_buff_phys;
572		desc->dtadr = info->mmio_phys + NDDB;
573		desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
574	} else {
575		desc->dtadr = info->data_buff_phys;
576		desc->dsadr = info->mmio_phys + NDDB;
577		desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
578	}
579
580	DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch;
581	DDADR(info->data_dma_ch) = info->data_desc_addr;
582	DCSR(info->data_dma_ch) |= DCSR_RUN;
583}
584
585static void pxa3xx_nand_data_dma_irq(int channel, void *data)
586{
587	struct pxa3xx_nand_info *info = data;
588	uint32_t dcsr;
589
590	dcsr = DCSR(channel);
591	DCSR(channel) = dcsr;
592
593	if (dcsr & DCSR_BUSERR) {
594		info->retcode = ERR_DMABUSERR;
595		complete(&info->cmd_complete);
596	}
597
598	if (info->state == STATE_DMA_WRITING) {
599		info->state = STATE_DMA_DONE;
600		enable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
601	} else {
602		info->state = STATE_READY;
603		complete(&info->cmd_complete);
604	}
605}
606
607static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
608{
609	struct pxa3xx_nand_info *info = devid;
610	unsigned int status;
611
612	status = nand_readl(info, NDSR);
613
614	if (status & (NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR)) {
615		if (status & NDSR_DBERR)
616			info->retcode = ERR_DBERR;
617		else if (status & NDSR_SBERR)
618			info->retcode = ERR_SBERR;
619
620		disable_int(info, NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR);
621
622		if (info->use_dma) {
623			info->state = STATE_DMA_READING;
624			start_data_dma(info, 0);
625		} else {
626			info->state = STATE_PIO_READING;
627			complete(&info->cmd_complete);
628		}
629	} else if (status & NDSR_WRDREQ) {
630		disable_int(info, NDSR_WRDREQ);
631		if (info->use_dma) {
632			info->state = STATE_DMA_WRITING;
633			start_data_dma(info, 1);
634		} else {
635			info->state = STATE_PIO_WRITING;
636			complete(&info->cmd_complete);
637		}
638	} else if (status & (NDSR_CS0_BBD | NDSR_CS0_CMDD)) {
639		if (status & NDSR_CS0_BBD)
640			info->retcode = ERR_BBERR;
641
642		disable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
643		info->state = STATE_READY;
644		complete(&info->cmd_complete);
645	}
646	nand_writel(info, NDSR, status);
647	return IRQ_HANDLED;
648}
649
650static int pxa3xx_nand_do_cmd(struct pxa3xx_nand_info *info, uint32_t event)
651{
652	uint32_t ndcr;
653	int ret, timeout = CHIP_DELAY_TIMEOUT;
654
655	if (write_cmd(info)) {
656		info->retcode = ERR_SENDCMD;
657		goto fail_stop;
658	}
659
660	info->state = STATE_CMD_HANDLE;
661
662	enable_int(info, event);
663
664	ret = wait_for_completion_timeout(&info->cmd_complete, timeout);
665	if (!ret) {
666		printk(KERN_ERR "command execution timed out\n");
667		info->retcode = ERR_SENDCMD;
668		goto fail_stop;
669	}
670
671	if (info->use_dma == 0 && info->data_size > 0)
672		if (handle_data_pio(info))
673			goto fail_stop;
674
675	return 0;
676
677fail_stop:
678	ndcr = nand_readl(info, NDCR);
679	nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN);
680	udelay(10);
681	return -ETIMEDOUT;
682}
683
684static int pxa3xx_nand_dev_ready(struct mtd_info *mtd)
685{
686	struct pxa3xx_nand_info *info = mtd->priv;
687	return (nand_readl(info, NDSR) & NDSR_RDY) ? 1 : 0;
688}
689
690static inline int is_buf_blank(uint8_t *buf, size_t len)
691{
692	for (; len > 0; len--)
693		if (*buf++ != 0xff)
694			return 0;
695	return 1;
696}
697
698static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
699				int column, int page_addr)
700{
701	struct pxa3xx_nand_info *info = mtd->priv;
702	const struct pxa3xx_nand_flash *flash_info = info->flash_info;
703	const struct pxa3xx_nand_cmdset *cmdset = flash_info->cmdset;
704	int ret;
705
706	info->use_dma = (use_dma) ? 1 : 0;
707	info->use_ecc = 0;
708	info->data_size = 0;
709	info->state = STATE_READY;
710
711	init_completion(&info->cmd_complete);
712
713	switch (command) {
714	case NAND_CMD_READOOB:
715		/* disable HW ECC to get all the OOB data */
716		info->buf_count = mtd->writesize + mtd->oobsize;
717		info->buf_start = mtd->writesize + column;
718		memset(info->data_buff, 0xFF, info->buf_count);
719
720		if (prepare_read_prog_cmd(info, cmdset->read1, column, page_addr))
721			break;
722
723		pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR);
724
725		/* We only are OOB, so if the data has error, does not matter */
726		if (info->retcode == ERR_DBERR)
727			info->retcode = ERR_NONE;
728		break;
729
730	case NAND_CMD_READ0:
731		info->use_ecc = 1;
732		info->retcode = ERR_NONE;
733		info->buf_start = column;
734		info->buf_count = mtd->writesize + mtd->oobsize;
735		memset(info->data_buff, 0xFF, info->buf_count);
736
737		if (prepare_read_prog_cmd(info, cmdset->read1, column, page_addr))
738			break;
739
740		pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR);
741
742		if (info->retcode == ERR_DBERR) {
743			/* for blank page (all 0xff), HW will calculate its ECC as
744			 * 0, which is different from the ECC information within
745			 * OOB, ignore such double bit errors
746			 */
747			if (is_buf_blank(info->data_buff, mtd->writesize))
748				info->retcode = ERR_NONE;
749		}
750		break;
751	case NAND_CMD_SEQIN:
752		info->buf_start = column;
753		info->buf_count = mtd->writesize + mtd->oobsize;
754		memset(info->data_buff, 0xff, info->buf_count);
755
756		/* save column/page_addr for next CMD_PAGEPROG */
757		info->seqin_column = column;
758		info->seqin_page_addr = page_addr;
759		break;
760	case NAND_CMD_PAGEPROG:
761		info->use_ecc = (info->seqin_column >= mtd->writesize) ? 0 : 1;
762
763		if (prepare_read_prog_cmd(info, cmdset->program,
764				info->seqin_column, info->seqin_page_addr))
765			break;
766
767		pxa3xx_nand_do_cmd(info, NDSR_WRDREQ);
768		break;
769	case NAND_CMD_ERASE1:
770		if (prepare_erase_cmd(info, cmdset->erase, page_addr))
771			break;
772
773		pxa3xx_nand_do_cmd(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
774		break;
775	case NAND_CMD_ERASE2:
776		break;
777	case NAND_CMD_READID:
778	case NAND_CMD_STATUS:
779		info->use_dma = 0;	/* force PIO read */
780		info->buf_start = 0;
781		info->buf_count = (command == NAND_CMD_READID) ?
782				info->read_id_bytes : 1;
783
784		if (prepare_other_cmd(info, (command == NAND_CMD_READID) ?
785				cmdset->read_id : cmdset->read_status))
786			break;
787
788		pxa3xx_nand_do_cmd(info, NDSR_RDDREQ);
789		break;
790	case NAND_CMD_RESET:
791		if (prepare_other_cmd(info, cmdset->reset))
792			break;
793
794		ret = pxa3xx_nand_do_cmd(info, NDSR_CS0_CMDD);
795		if (ret == 0) {
796			int timeout = 2;
797			uint32_t ndcr;
798
799			while (timeout--) {
800				if (nand_readl(info, NDSR) & NDSR_RDY)
801					break;
802				msleep(10);
803			}
804
805			ndcr = nand_readl(info, NDCR);
806			nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN);
807		}
808		break;
809	default:
810		printk(KERN_ERR "non-supported command.\n");
811		break;
812	}
813
814	if (info->retcode == ERR_DBERR) {
815		printk(KERN_ERR "double bit error @ page %08x\n", page_addr);
816		info->retcode = ERR_NONE;
817	}
818}
819
820static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
821{
822	struct pxa3xx_nand_info *info = mtd->priv;
823	char retval = 0xFF;
824
825	if (info->buf_start < info->buf_count)
826		/* Has just send a new command? */
827		retval = info->data_buff[info->buf_start++];
828
829	return retval;
830}
831
832static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
833{
834	struct pxa3xx_nand_info *info = mtd->priv;
835	u16 retval = 0xFFFF;
836
837	if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
838		retval = *((u16 *)(info->data_buff+info->buf_start));
839		info->buf_start += 2;
840	}
841	return retval;
842}
843
844static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
845{
846	struct pxa3xx_nand_info *info = mtd->priv;
847	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
848
849	memcpy(buf, info->data_buff + info->buf_start, real_len);
850	info->buf_start += real_len;
851}
852
853static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
854		const uint8_t *buf, int len)
855{
856	struct pxa3xx_nand_info *info = mtd->priv;
857	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
858
859	memcpy(info->data_buff + info->buf_start, buf, real_len);
860	info->buf_start += real_len;
861}
862
863static int pxa3xx_nand_verify_buf(struct mtd_info *mtd,
864		const uint8_t *buf, int len)
865{
866	return 0;
867}
868
869static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
870{
871	return;
872}
873
874static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
875{
876	struct pxa3xx_nand_info *info = mtd->priv;
877
878	/* pxa3xx_nand_send_command has waited for command complete */
879	if (this->state == FL_WRITING || this->state == FL_ERASING) {
880		if (info->retcode == ERR_NONE)
881			return 0;
882		else {
883			/*
884			 * any error make it return 0x01 which will tell
885			 * the caller the erase and write fail
886			 */
887			return 0x01;
888		}
889	}
890
891	return 0;
892}
893
894static void pxa3xx_nand_ecc_hwctl(struct mtd_info *mtd, int mode)
895{
896	return;
897}
898
899static int pxa3xx_nand_ecc_calculate(struct mtd_info *mtd,
900		const uint8_t *dat, uint8_t *ecc_code)
901{
902	return 0;
903}
904
905static int pxa3xx_nand_ecc_correct(struct mtd_info *mtd,
906		uint8_t *dat, uint8_t *read_ecc, uint8_t *calc_ecc)
907{
908	struct pxa3xx_nand_info *info = mtd->priv;
909	/*
910	 * Any error include ERR_SEND_CMD, ERR_DBERR, ERR_BUSERR, we
911	 * consider it as a ecc error which will tell the caller the
912	 * read fail We have distinguish all the errors, but the
913	 * nand_read_ecc only check this function return value
914	 *
915	 * Corrected (single-bit) errors must also be noted.
916	 */
917	if (info->retcode == ERR_SBERR)
918		return 1;
919	else if (info->retcode != ERR_NONE)
920		return -1;
921
922	return 0;
923}
924
925static int __readid(struct pxa3xx_nand_info *info, uint32_t *id)
926{
927	const struct pxa3xx_nand_flash *f = info->flash_info;
928	const struct pxa3xx_nand_cmdset *cmdset = f->cmdset;
929	uint32_t ndcr;
930	uint8_t  id_buff[8];
931
932	if (prepare_other_cmd(info, cmdset->read_id)) {
933		printk(KERN_ERR "failed to prepare command\n");
934		return -EINVAL;
935	}
936
937	/* Send command */
938	if (write_cmd(info))
939		goto fail_timeout;
940
941	/* Wait for CMDDM(command done successfully) */
942	if (wait_for_event(info, NDSR_RDDREQ))
943		goto fail_timeout;
944
945	__raw_readsl(info->mmio_base + NDDB, id_buff, 2);
946	*id = id_buff[0] | (id_buff[1] << 8);
947	return 0;
948
949fail_timeout:
950	ndcr = nand_readl(info, NDCR);
951	nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN);
952	udelay(10);
953	return -ETIMEDOUT;
954}
955
956static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
957				    const struct pxa3xx_nand_flash *f)
958{
959	struct platform_device *pdev = info->pdev;
960	struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
961	uint32_t ndcr = 0x00000FFF; /* disable all interrupts */
962
963	if (f->page_size != 2048 && f->page_size != 512)
964		return -EINVAL;
965
966	if (f->flash_width != 16 && f->flash_width != 8)
967		return -EINVAL;
968
969	/* calculate flash information */
970	info->oob_size = (f->page_size == 2048) ? 64 : 16;
971	info->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
972
973	/* calculate addressing information */
974	info->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
975
976	if (f->num_blocks * f->page_per_block > 65536)
977		info->row_addr_cycles = 3;
978	else
979		info->row_addr_cycles = 2;
980
981	ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
982	ndcr |= (info->col_addr_cycles == 2) ? NDCR_RA_START : 0;
983	ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
984	ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
985	ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
986	ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
987
988	ndcr |= NDCR_RD_ID_CNT(info->read_id_bytes);
989	ndcr |= NDCR_SPARE_EN; /* enable spare by default */
990
991	info->reg_ndcr = ndcr;
992
993	pxa3xx_nand_set_timing(info, f->timing);
994	info->flash_info = f;
995	return 0;
996}
997
998static void pxa3xx_nand_detect_timing(struct pxa3xx_nand_info *info,
999				      struct pxa3xx_nand_timing *t)
1000{
1001	unsigned long nand_clk = clk_get_rate(info->clk);
1002	uint32_t ndtr0 = nand_readl(info, NDTR0CS0);
1003	uint32_t ndtr1 = nand_readl(info, NDTR1CS0);
1004
1005	t->tCH = cycle2ns(tCH_NDTR0(ndtr0), nand_clk);
1006	t->tCS = cycle2ns(tCS_NDTR0(ndtr0), nand_clk);
1007	t->tWH = cycle2ns(tWH_NDTR0(ndtr0), nand_clk);
1008	t->tWP = cycle2ns(tWP_NDTR0(ndtr0), nand_clk);
1009	t->tRH = cycle2ns(tRH_NDTR0(ndtr0), nand_clk);
1010	t->tRP = cycle2ns(tRP_NDTR0(ndtr0), nand_clk);
1011
1012	t->tR = cycle2ns(tR_NDTR1(ndtr1), nand_clk);
1013	t->tWHR = cycle2ns(tWHR_NDTR1(ndtr1), nand_clk);
1014	t->tAR = cycle2ns(tAR_NDTR1(ndtr1), nand_clk);
1015}
1016
1017static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1018{
1019	uint32_t ndcr = nand_readl(info, NDCR);
1020	struct nand_flash_dev *type = NULL;
1021	uint32_t id = -1;
1022	int i;
1023
1024	default_flash.page_per_block = ndcr & NDCR_PG_PER_BLK ? 64 : 32;
1025	default_flash.page_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1026	default_flash.flash_width = ndcr & NDCR_DWIDTH_M ? 16 : 8;
1027	default_flash.dfc_width = ndcr & NDCR_DWIDTH_C ? 16 : 8;
1028
1029	if (default_flash.page_size == 2048)
1030		default_flash.cmdset = &largepage_cmdset;
1031	else
1032		default_flash.cmdset = &smallpage_cmdset;
1033
1034	/* set info fields needed to __readid */
1035	info->flash_info = &default_flash;
1036	info->read_id_bytes = (default_flash.page_size == 2048) ? 4 : 2;
1037	info->reg_ndcr = ndcr;
1038
1039	if (__readid(info, &id))
1040		return -ENODEV;
1041
1042	/* Lookup the flash id */
1043	id = (id >> 8) & 0xff;		/* device id is byte 2 */
1044	for (i = 0; nand_flash_ids[i].name != NULL; i++) {
1045		if (id == nand_flash_ids[i].id) {
1046			type =  &nand_flash_ids[i];
1047			break;
1048		}
1049	}
1050
1051	if (!type)
1052		return -ENODEV;
1053
1054	/* fill the missing flash information */
1055	i = __ffs(default_flash.page_per_block * default_flash.page_size);
1056	default_flash.num_blocks = type->chipsize << (20 - i);
1057
1058	info->oob_size = (default_flash.page_size == 2048) ? 64 : 16;
1059
1060	/* calculate addressing information */
1061	info->col_addr_cycles = (default_flash.page_size == 2048) ? 2 : 1;
1062
1063	if (default_flash.num_blocks * default_flash.page_per_block > 65536)
1064		info->row_addr_cycles = 3;
1065	else
1066		info->row_addr_cycles = 2;
1067
1068	pxa3xx_nand_detect_timing(info, &default_timing);
1069	default_flash.timing = &default_timing;
1070
1071	return 0;
1072}
1073
1074static int pxa3xx_nand_detect_flash(struct pxa3xx_nand_info *info,
1075				    const struct pxa3xx_nand_platform_data *pdata)
1076{
1077	const struct pxa3xx_nand_flash *f;
1078	uint32_t id = -1;
1079	int i;
1080
1081	if (pdata->keep_config)
1082		if (pxa3xx_nand_detect_config(info) == 0)
1083			return 0;
1084
1085	for (i = 0; i<pdata->num_flash; ++i) {
1086		f = pdata->flash + i;
1087
1088		if (pxa3xx_nand_config_flash(info, f))
1089			continue;
1090
1091		if (__readid(info, &id))
1092			continue;
1093
1094		if (id == f->chip_id)
1095			return 0;
1096	}
1097
1098#ifdef CONFIG_MTD_NAND_PXA3xx_BUILTIN
1099	for (i = 0; i < ARRAY_SIZE(builtin_flash_types); i++) {
1100
1101		f = builtin_flash_types[i];
1102
1103		if (pxa3xx_nand_config_flash(info, f))
1104			continue;
1105
1106		if (__readid(info, &id))
1107			continue;
1108
1109		if (id == f->chip_id)
1110			return 0;
1111	}
1112#endif
1113
1114	dev_warn(&info->pdev->dev,
1115		 "failed to detect configured nand flash; found %04x instead of\n",
1116		 id);
1117	return -ENODEV;
1118}
1119
1120/* the maximum possible buffer size for large page with OOB data
1121 * is: 2048 + 64 = 2112 bytes, allocate a page here for both the
1122 * data buffer and the DMA descriptor
1123 */
1124#define MAX_BUFF_SIZE	PAGE_SIZE
1125
1126static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1127{
1128	struct platform_device *pdev = info->pdev;
1129	int data_desc_offset = MAX_BUFF_SIZE - sizeof(struct pxa_dma_desc);
1130
1131	if (use_dma == 0) {
1132		info->data_buff = kmalloc(MAX_BUFF_SIZE, GFP_KERNEL);
1133		if (info->data_buff == NULL)
1134			return -ENOMEM;
1135		return 0;
1136	}
1137
1138	info->data_buff = dma_alloc_coherent(&pdev->dev, MAX_BUFF_SIZE,
1139				&info->data_buff_phys, GFP_KERNEL);
1140	if (info->data_buff == NULL) {
1141		dev_err(&pdev->dev, "failed to allocate dma buffer\n");
1142		return -ENOMEM;
1143	}
1144
1145	info->data_buff_size = MAX_BUFF_SIZE;
1146	info->data_desc = (void *)info->data_buff + data_desc_offset;
1147	info->data_desc_addr = info->data_buff_phys + data_desc_offset;
1148
1149	info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW,
1150				pxa3xx_nand_data_dma_irq, info);
1151	if (info->data_dma_ch < 0) {
1152		dev_err(&pdev->dev, "failed to request data dma\n");
1153		dma_free_coherent(&pdev->dev, info->data_buff_size,
1154				info->data_buff, info->data_buff_phys);
1155		return info->data_dma_ch;
1156	}
1157
1158	return 0;
1159}
1160
1161static struct nand_ecclayout hw_smallpage_ecclayout = {
1162	.eccbytes = 6,
1163	.eccpos = {8, 9, 10, 11, 12, 13 },
1164	.oobfree = { {2, 6} }
1165};
1166
1167static struct nand_ecclayout hw_largepage_ecclayout = {
1168	.eccbytes = 24,
1169	.eccpos = {
1170		40, 41, 42, 43, 44, 45, 46, 47,
1171		48, 49, 50, 51, 52, 53, 54, 55,
1172		56, 57, 58, 59, 60, 61, 62, 63},
1173	.oobfree = { {2, 38} }
1174};
1175
1176static void pxa3xx_nand_init_mtd(struct mtd_info *mtd,
1177				 struct pxa3xx_nand_info *info)
1178{
1179	const struct pxa3xx_nand_flash *f = info->flash_info;
1180	struct nand_chip *this = &info->nand_chip;
1181
1182	this->options = (f->flash_width == 16) ? NAND_BUSWIDTH_16: 0;
1183
1184	this->waitfunc		= pxa3xx_nand_waitfunc;
1185	this->select_chip	= pxa3xx_nand_select_chip;
1186	this->dev_ready		= pxa3xx_nand_dev_ready;
1187	this->cmdfunc		= pxa3xx_nand_cmdfunc;
1188	this->read_word		= pxa3xx_nand_read_word;
1189	this->read_byte		= pxa3xx_nand_read_byte;
1190	this->read_buf		= pxa3xx_nand_read_buf;
1191	this->write_buf		= pxa3xx_nand_write_buf;
1192	this->verify_buf	= pxa3xx_nand_verify_buf;
1193
1194	this->ecc.mode		= NAND_ECC_HW;
1195	this->ecc.hwctl		= pxa3xx_nand_ecc_hwctl;
1196	this->ecc.calculate	= pxa3xx_nand_ecc_calculate;
1197	this->ecc.correct	= pxa3xx_nand_ecc_correct;
1198	this->ecc.size		= f->page_size;
1199
1200	if (f->page_size == 2048)
1201		this->ecc.layout = &hw_largepage_ecclayout;
1202	else
1203		this->ecc.layout = &hw_smallpage_ecclayout;
1204
1205	this->chip_delay = 25;
1206}
1207
1208static int pxa3xx_nand_probe(struct platform_device *pdev)
1209{
1210	struct pxa3xx_nand_platform_data *pdata;
1211	struct pxa3xx_nand_info *info;
1212	struct nand_chip *this;
1213	struct mtd_info *mtd;
1214	struct resource *r;
1215	int ret = 0, irq;
1216
1217	pdata = pdev->dev.platform_data;
1218
1219	if (!pdata) {
1220		dev_err(&pdev->dev, "no platform data defined\n");
1221		return -ENODEV;
1222	}
1223
1224	mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct pxa3xx_nand_info),
1225			GFP_KERNEL);
1226	if (!mtd) {
1227		dev_err(&pdev->dev, "failed to allocate memory\n");
1228		return -ENOMEM;
1229	}
1230
1231	info = (struct pxa3xx_nand_info *)(&mtd[1]);
1232	info->pdev = pdev;
1233
1234	this = &info->nand_chip;
1235	mtd->priv = info;
1236	mtd->owner = THIS_MODULE;
1237
1238	info->clk = clk_get(&pdev->dev, NULL);
1239	if (IS_ERR(info->clk)) {
1240		dev_err(&pdev->dev, "failed to get nand clock\n");
1241		ret = PTR_ERR(info->clk);
1242		goto fail_free_mtd;
1243	}
1244	clk_enable(info->clk);
1245
1246	r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1247	if (r == NULL) {
1248		dev_err(&pdev->dev, "no resource defined for data DMA\n");
1249		ret = -ENXIO;
1250		goto fail_put_clk;
1251	}
1252	info->drcmr_dat = r->start;
1253
1254	r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1255	if (r == NULL) {
1256		dev_err(&pdev->dev, "no resource defined for command DMA\n");
1257		ret = -ENXIO;
1258		goto fail_put_clk;
1259	}
1260	info->drcmr_cmd = r->start;
1261
1262	irq = platform_get_irq(pdev, 0);
1263	if (irq < 0) {
1264		dev_err(&pdev->dev, "no IRQ resource defined\n");
1265		ret = -ENXIO;
1266		goto fail_put_clk;
1267	}
1268
1269	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1270	if (r == NULL) {
1271		dev_err(&pdev->dev, "no IO memory resource defined\n");
1272		ret = -ENODEV;
1273		goto fail_put_clk;
1274	}
1275
1276	r = request_mem_region(r->start, resource_size(r), pdev->name);
1277	if (r == NULL) {
1278		dev_err(&pdev->dev, "failed to request memory resource\n");
1279		ret = -EBUSY;
1280		goto fail_put_clk;
1281	}
1282
1283	info->mmio_base = ioremap(r->start, resource_size(r));
1284	if (info->mmio_base == NULL) {
1285		dev_err(&pdev->dev, "ioremap() failed\n");
1286		ret = -ENODEV;
1287		goto fail_free_res;
1288	}
1289	info->mmio_phys = r->start;
1290
1291	ret = pxa3xx_nand_init_buff(info);
1292	if (ret)
1293		goto fail_free_io;
1294
1295	/* initialize all interrupts to be disabled */
1296	disable_int(info, NDSR_MASK);
1297
1298	ret = request_irq(irq, pxa3xx_nand_irq, IRQF_DISABLED,
1299			  pdev->name, info);
1300	if (ret < 0) {
1301		dev_err(&pdev->dev, "failed to request IRQ\n");
1302		goto fail_free_buf;
1303	}
1304
1305	ret = pxa3xx_nand_detect_flash(info, pdata);
1306	if (ret) {
1307		dev_err(&pdev->dev, "failed to detect flash\n");
1308		ret = -ENODEV;
1309		goto fail_free_irq;
1310	}
1311
1312	pxa3xx_nand_init_mtd(mtd, info);
1313
1314	platform_set_drvdata(pdev, mtd);
1315
1316	if (nand_scan(mtd, 1)) {
1317		dev_err(&pdev->dev, "failed to scan nand\n");
1318		ret = -ENXIO;
1319		goto fail_free_irq;
1320	}
1321
1322	if (mtd_has_cmdlinepart()) {
1323		static const char *probes[] = { "cmdlinepart", NULL };
1324		struct mtd_partition *parts;
1325		int nr_parts;
1326
1327		nr_parts = parse_mtd_partitions(mtd, probes, &parts, 0);
1328
1329		if (nr_parts)
1330			return add_mtd_partitions(mtd, parts, nr_parts);
1331	}
1332
1333	return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
1334
1335fail_free_irq:
1336	free_irq(irq, info);
1337fail_free_buf:
1338	if (use_dma) {
1339		pxa_free_dma(info->data_dma_ch);
1340		dma_free_coherent(&pdev->dev, info->data_buff_size,
1341			info->data_buff, info->data_buff_phys);
1342	} else
1343		kfree(info->data_buff);
1344fail_free_io:
1345	iounmap(info->mmio_base);
1346fail_free_res:
1347	release_mem_region(r->start, resource_size(r));
1348fail_put_clk:
1349	clk_disable(info->clk);
1350	clk_put(info->clk);
1351fail_free_mtd:
1352	kfree(mtd);
1353	return ret;
1354}
1355
1356static int pxa3xx_nand_remove(struct platform_device *pdev)
1357{
1358	struct mtd_info *mtd = platform_get_drvdata(pdev);
1359	struct pxa3xx_nand_info *info = mtd->priv;
1360	struct resource *r;
1361	int irq;
1362
1363	platform_set_drvdata(pdev, NULL);
1364
1365	del_mtd_device(mtd);
1366	del_mtd_partitions(mtd);
1367	irq = platform_get_irq(pdev, 0);
1368	if (irq >= 0)
1369		free_irq(irq, info);
1370	if (use_dma) {
1371		pxa_free_dma(info->data_dma_ch);
1372		dma_free_writecombine(&pdev->dev, info->data_buff_size,
1373				info->data_buff, info->data_buff_phys);
1374	} else
1375		kfree(info->data_buff);
1376
1377	iounmap(info->mmio_base);
1378	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1379	release_mem_region(r->start, resource_size(r));
1380
1381	clk_disable(info->clk);
1382	clk_put(info->clk);
1383
1384	kfree(mtd);
1385	return 0;
1386}
1387
1388#ifdef CONFIG_PM
1389static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1390{
1391	struct mtd_info *mtd = (struct mtd_info *)platform_get_drvdata(pdev);
1392	struct pxa3xx_nand_info *info = mtd->priv;
1393
1394	if (info->state != STATE_READY) {
1395		dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1396		return -EAGAIN;
1397	}
1398
1399	return 0;
1400}
1401
1402static int pxa3xx_nand_resume(struct platform_device *pdev)
1403{
1404	struct mtd_info *mtd = (struct mtd_info *)platform_get_drvdata(pdev);
1405	struct pxa3xx_nand_info *info = mtd->priv;
1406
1407	clk_enable(info->clk);
1408
1409	return pxa3xx_nand_config_flash(info, info->flash_info);
1410}
1411#else
1412#define pxa3xx_nand_suspend	NULL
1413#define pxa3xx_nand_resume	NULL
1414#endif
1415
1416static struct platform_driver pxa3xx_nand_driver = {
1417	.driver = {
1418		.name	= "pxa3xx-nand",
1419	},
1420	.probe		= pxa3xx_nand_probe,
1421	.remove		= pxa3xx_nand_remove,
1422	.suspend	= pxa3xx_nand_suspend,
1423	.resume		= pxa3xx_nand_resume,
1424};
1425
1426static int __init pxa3xx_nand_init(void)
1427{
1428	return platform_driver_register(&pxa3xx_nand_driver);
1429}
1430module_init(pxa3xx_nand_init);
1431
1432static void __exit pxa3xx_nand_exit(void)
1433{
1434	platform_driver_unregister(&pxa3xx_nand_driver);
1435}
1436module_exit(pxa3xx_nand_exit);
1437
1438MODULE_LICENSE("GPL");
1439MODULE_DESCRIPTION("PXA3xx NAND controller driver");
1440