pxa3xx_nand.c revision f271049e2010b918f83dc1c7bbd5d75f4710506a
1/*
2 * drivers/mtd/nand/pxa3xx_nand.c
3 *
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/interrupt.h>
14#include <linux/platform_device.h>
15#include <linux/dma-mapping.h>
16#include <linux/delay.h>
17#include <linux/clk.h>
18#include <linux/mtd/mtd.h>
19#include <linux/mtd/nand.h>
20#include <linux/mtd/partitions.h>
21#include <linux/io.h>
22#include <linux/irq.h>
23
24#include <mach/dma.h>
25#include <mach/pxa-regs.h>
26#include <mach/pxa3xx_nand.h>
27
28#define	CHIP_DELAY_TIMEOUT	(2 * HZ/10)
29
30/* registers and bit definitions */
31#define NDCR		(0x00) /* Control register */
32#define NDTR0CS0	(0x04) /* Timing Parameter 0 for CS0 */
33#define NDTR1CS0	(0x0C) /* Timing Parameter 1 for CS0 */
34#define NDSR		(0x14) /* Status Register */
35#define NDPCR		(0x18) /* Page Count Register */
36#define NDBDR0		(0x1C) /* Bad Block Register 0 */
37#define NDBDR1		(0x20) /* Bad Block Register 1 */
38#define NDDB		(0x40) /* Data Buffer */
39#define NDCB0		(0x48) /* Command Buffer0 */
40#define NDCB1		(0x4C) /* Command Buffer1 */
41#define NDCB2		(0x50) /* Command Buffer2 */
42
43#define NDCR_SPARE_EN		(0x1 << 31)
44#define NDCR_ECC_EN		(0x1 << 30)
45#define NDCR_DMA_EN		(0x1 << 29)
46#define NDCR_ND_RUN		(0x1 << 28)
47#define NDCR_DWIDTH_C		(0x1 << 27)
48#define NDCR_DWIDTH_M		(0x1 << 26)
49#define NDCR_PAGE_SZ		(0x1 << 24)
50#define NDCR_NCSX		(0x1 << 23)
51#define NDCR_ND_MODE		(0x3 << 21)
52#define NDCR_NAND_MODE   	(0x0)
53#define NDCR_CLR_PG_CNT		(0x1 << 20)
54#define NDCR_CLR_ECC		(0x1 << 19)
55#define NDCR_RD_ID_CNT_MASK	(0x7 << 16)
56#define NDCR_RD_ID_CNT(x)	(((x) << 16) & NDCR_RD_ID_CNT_MASK)
57
58#define NDCR_RA_START		(0x1 << 15)
59#define NDCR_PG_PER_BLK		(0x1 << 14)
60#define NDCR_ND_ARB_EN		(0x1 << 12)
61
62#define NDSR_MASK		(0xfff)
63#define NDSR_RDY		(0x1 << 11)
64#define NDSR_CS0_PAGED		(0x1 << 10)
65#define NDSR_CS1_PAGED		(0x1 << 9)
66#define NDSR_CS0_CMDD		(0x1 << 8)
67#define NDSR_CS1_CMDD		(0x1 << 7)
68#define NDSR_CS0_BBD		(0x1 << 6)
69#define NDSR_CS1_BBD		(0x1 << 5)
70#define NDSR_DBERR		(0x1 << 4)
71#define NDSR_SBERR		(0x1 << 3)
72#define NDSR_WRDREQ		(0x1 << 2)
73#define NDSR_RDDREQ		(0x1 << 1)
74#define NDSR_WRCMDREQ		(0x1)
75
76#define NDCB0_AUTO_RS		(0x1 << 25)
77#define NDCB0_CSEL		(0x1 << 24)
78#define NDCB0_CMD_TYPE_MASK	(0x7 << 21)
79#define NDCB0_CMD_TYPE(x)	(((x) << 21) & NDCB0_CMD_TYPE_MASK)
80#define NDCB0_NC		(0x1 << 20)
81#define NDCB0_DBC		(0x1 << 19)
82#define NDCB0_ADDR_CYC_MASK	(0x7 << 16)
83#define NDCB0_ADDR_CYC(x)	(((x) << 16) & NDCB0_ADDR_CYC_MASK)
84#define NDCB0_CMD2_MASK		(0xff << 8)
85#define NDCB0_CMD1_MASK		(0xff)
86#define NDCB0_ADDR_CYC_SHIFT	(16)
87
88/* dma-able I/O address for the NAND data and commands */
89#define NDCB0_DMA_ADDR		(0x43100048)
90#define NDDB_DMA_ADDR		(0x43100040)
91
92/* macros for registers read/write */
93#define nand_writel(info, off, val)	\
94	__raw_writel((val), (info)->mmio_base + (off))
95
96#define nand_readl(info, off)		\
97	__raw_readl((info)->mmio_base + (off))
98
99/* error code and state */
100enum {
101	ERR_NONE	= 0,
102	ERR_DMABUSERR	= -1,
103	ERR_SENDCMD	= -2,
104	ERR_DBERR	= -3,
105	ERR_BBERR	= -4,
106};
107
108enum {
109	STATE_READY	= 0,
110	STATE_CMD_HANDLE,
111	STATE_DMA_READING,
112	STATE_DMA_WRITING,
113	STATE_DMA_DONE,
114	STATE_PIO_READING,
115	STATE_PIO_WRITING,
116};
117
118struct pxa3xx_nand_info {
119	struct nand_chip	nand_chip;
120
121	struct platform_device	 *pdev;
122	const struct pxa3xx_nand_flash *flash_info;
123
124	struct clk		*clk;
125	void __iomem		*mmio_base;
126
127	unsigned int 		buf_start;
128	unsigned int		buf_count;
129
130	/* DMA information */
131	int			drcmr_dat;
132	int			drcmr_cmd;
133
134	unsigned char		*data_buff;
135	dma_addr_t 		data_buff_phys;
136	size_t			data_buff_size;
137	int 			data_dma_ch;
138	struct pxa_dma_desc	*data_desc;
139	dma_addr_t 		data_desc_addr;
140
141	uint32_t		reg_ndcr;
142
143	/* saved column/page_addr during CMD_SEQIN */
144	int			seqin_column;
145	int			seqin_page_addr;
146
147	/* relate to the command */
148	unsigned int		state;
149
150	int			use_ecc;	/* use HW ECC ? */
151	int			use_dma;	/* use DMA ? */
152
153	size_t			data_size;	/* data size in FIFO */
154	int 			retcode;
155	struct completion 	cmd_complete;
156
157	/* generated NDCBx register values */
158	uint32_t		ndcb0;
159	uint32_t		ndcb1;
160	uint32_t		ndcb2;
161
162	/* calculated from pxa3xx_nand_flash data */
163	size_t		oob_size;
164	size_t		read_id_bytes;
165
166	unsigned int	col_addr_cycles;
167	unsigned int	row_addr_cycles;
168};
169
170static int use_dma = 1;
171module_param(use_dma, bool, 0444);
172MODULE_PARM_DESC(use_dma, "enable DMA for data transfering to/from NAND HW");
173
174/*
175 * Default NAND flash controller configuration setup by the
176 * bootloader. This configuration is used only when pdata->keep_config is set
177 */
178static struct pxa3xx_nand_timing default_timing;
179static struct pxa3xx_nand_flash default_flash;
180
181static struct pxa3xx_nand_cmdset smallpage_cmdset = {
182	.read1		= 0x0000,
183	.read2		= 0x0050,
184	.program	= 0x1080,
185	.read_status	= 0x0070,
186	.read_id	= 0x0090,
187	.erase		= 0xD060,
188	.reset		= 0x00FF,
189	.lock		= 0x002A,
190	.unlock		= 0x2423,
191	.lock_status	= 0x007A,
192};
193
194static struct pxa3xx_nand_cmdset largepage_cmdset = {
195	.read1		= 0x3000,
196	.read2		= 0x0050,
197	.program	= 0x1080,
198	.read_status	= 0x0070,
199	.read_id	= 0x0090,
200	.erase		= 0xD060,
201	.reset		= 0x00FF,
202	.lock		= 0x002A,
203	.unlock		= 0x2423,
204	.lock_status	= 0x007A,
205};
206
207#ifdef CONFIG_MTD_NAND_PXA3xx_BUILTIN
208static struct pxa3xx_nand_timing samsung512MbX16_timing = {
209	.tCH	= 10,
210	.tCS	= 0,
211	.tWH	= 20,
212	.tWP	= 40,
213	.tRH	= 30,
214	.tRP	= 40,
215	.tR	= 11123,
216	.tWHR	= 110,
217	.tAR	= 10,
218};
219
220static struct pxa3xx_nand_flash samsung512MbX16 = {
221	.timing		= &samsung512MbX16_timing,
222	.cmdset		= &smallpage_cmdset,
223	.page_per_block	= 32,
224	.page_size	= 512,
225	.flash_width	= 16,
226	.dfc_width	= 16,
227	.num_blocks	= 4096,
228	.chip_id	= 0x46ec,
229};
230
231static struct pxa3xx_nand_timing micron_timing = {
232	.tCH	= 10,
233	.tCS	= 25,
234	.tWH	= 15,
235	.tWP	= 25,
236	.tRH	= 15,
237	.tRP	= 25,
238	.tR	= 25000,
239	.tWHR	= 60,
240	.tAR	= 10,
241};
242
243static struct pxa3xx_nand_flash micron1GbX8 = {
244	.timing		= &micron_timing,
245	.cmdset		= &largepage_cmdset,
246	.page_per_block	= 64,
247	.page_size	= 2048,
248	.flash_width	= 8,
249	.dfc_width	= 8,
250	.num_blocks	= 1024,
251	.chip_id	= 0xa12c,
252};
253
254static struct pxa3xx_nand_flash micron1GbX16 = {
255	.timing		= &micron_timing,
256	.cmdset		= &largepage_cmdset,
257	.page_per_block	= 64,
258	.page_size	= 2048,
259	.flash_width	= 16,
260	.dfc_width	= 16,
261	.num_blocks	= 1024,
262	.chip_id	= 0xb12c,
263};
264
265static struct pxa3xx_nand_timing stm2GbX16_timing = {
266	.tCH = 10,
267	.tCS = 35,
268	.tWH = 15,
269	.tWP = 25,
270	.tRH = 15,
271	.tRP = 25,
272	.tR = 25000,
273	.tWHR = 60,
274	.tAR = 10,
275};
276
277static struct pxa3xx_nand_flash stm2GbX16 = {
278	.timing = &stm2GbX16_timing,
279	.cmdset	= &largepage_cmdset,
280	.page_per_block = 64,
281	.page_size = 2048,
282	.flash_width = 16,
283	.dfc_width = 16,
284	.num_blocks = 2048,
285	.chip_id = 0xba20,
286};
287
288static struct pxa3xx_nand_flash *builtin_flash_types[] = {
289	&samsung512MbX16,
290	&micron1GbX8,
291	&micron1GbX16,
292	&stm2GbX16,
293};
294#endif /* CONFIG_MTD_NAND_PXA3xx_BUILTIN */
295
296#define NDTR0_tCH(c)	(min((c), 7) << 19)
297#define NDTR0_tCS(c)	(min((c), 7) << 16)
298#define NDTR0_tWH(c)	(min((c), 7) << 11)
299#define NDTR0_tWP(c)	(min((c), 7) << 8)
300#define NDTR0_tRH(c)	(min((c), 7) << 3)
301#define NDTR0_tRP(c)	(min((c), 7) << 0)
302
303#define NDTR1_tR(c)	(min((c), 65535) << 16)
304#define NDTR1_tWHR(c)	(min((c), 15) << 4)
305#define NDTR1_tAR(c)	(min((c), 15) << 0)
306
307#define tCH_NDTR0(r)	(((r) >> 19) & 0x7)
308#define tCS_NDTR0(r)	(((r) >> 16) & 0x7)
309#define tWH_NDTR0(r)	(((r) >> 11) & 0x7)
310#define tWP_NDTR0(r)	(((r) >> 8) & 0x7)
311#define tRH_NDTR0(r)	(((r) >> 3) & 0x7)
312#define tRP_NDTR0(r)	(((r) >> 0) & 0x7)
313
314#define tR_NDTR1(r)	(((r) >> 16) & 0xffff)
315#define tWHR_NDTR1(r)	(((r) >> 4) & 0xf)
316#define tAR_NDTR1(r)	(((r) >> 0) & 0xf)
317
318/* convert nano-seconds to nand flash controller clock cycles */
319#define ns2cycle(ns, clk)	(int)(((ns) * (clk / 1000000) / 1000) - 1)
320
321/* convert nand flash controller clock cycles to nano-seconds */
322#define cycle2ns(c, clk)	((((c) + 1) * 1000000 + clk / 500) / (clk / 1000))
323
324static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info,
325				   const struct pxa3xx_nand_timing *t)
326{
327	unsigned long nand_clk = clk_get_rate(info->clk);
328	uint32_t ndtr0, ndtr1;
329
330	ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
331		NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
332		NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
333		NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
334		NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
335		NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
336
337	ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
338		NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
339		NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
340
341	nand_writel(info, NDTR0CS0, ndtr0);
342	nand_writel(info, NDTR1CS0, ndtr1);
343}
344
345#define WAIT_EVENT_TIMEOUT	10
346
347static int wait_for_event(struct pxa3xx_nand_info *info, uint32_t event)
348{
349	int timeout = WAIT_EVENT_TIMEOUT;
350	uint32_t ndsr;
351
352	while (timeout--) {
353		ndsr = nand_readl(info, NDSR) & NDSR_MASK;
354		if (ndsr & event) {
355			nand_writel(info, NDSR, ndsr);
356			return 0;
357		}
358		udelay(10);
359	}
360
361	return -ETIMEDOUT;
362}
363
364static int prepare_read_prog_cmd(struct pxa3xx_nand_info *info,
365			uint16_t cmd, int column, int page_addr)
366{
367	const struct pxa3xx_nand_flash *f = info->flash_info;
368	const struct pxa3xx_nand_cmdset *cmdset = f->cmdset;
369
370	/* calculate data size */
371	switch (f->page_size) {
372	case 2048:
373		info->data_size = (info->use_ecc) ? 2088 : 2112;
374		break;
375	case 512:
376		info->data_size = (info->use_ecc) ? 520 : 528;
377		break;
378	default:
379		return -EINVAL;
380	}
381
382	/* generate values for NDCBx registers */
383	info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
384	info->ndcb1 = 0;
385	info->ndcb2 = 0;
386	info->ndcb0 |= NDCB0_ADDR_CYC(info->row_addr_cycles + info->col_addr_cycles);
387
388	if (info->col_addr_cycles == 2) {
389		/* large block, 2 cycles for column address
390		 * row address starts from 3rd cycle
391		 */
392		info->ndcb1 |= page_addr << 16;
393		if (info->row_addr_cycles == 3)
394			info->ndcb2 = (page_addr >> 16) & 0xff;
395	} else
396		/* small block, 1 cycles for column address
397		 * row address starts from 2nd cycle
398		 */
399		info->ndcb1 = page_addr << 8;
400
401	if (cmd == cmdset->program)
402		info->ndcb0 |= NDCB0_CMD_TYPE(1) | NDCB0_AUTO_RS;
403
404	return 0;
405}
406
407static int prepare_erase_cmd(struct pxa3xx_nand_info *info,
408			uint16_t cmd, int page_addr)
409{
410	info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
411	info->ndcb0 |= NDCB0_CMD_TYPE(2) | NDCB0_AUTO_RS | NDCB0_ADDR_CYC(3);
412	info->ndcb1 = page_addr;
413	info->ndcb2 = 0;
414	return 0;
415}
416
417static int prepare_other_cmd(struct pxa3xx_nand_info *info, uint16_t cmd)
418{
419	const struct pxa3xx_nand_cmdset *cmdset = info->flash_info->cmdset;
420
421	info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
422	info->ndcb1 = 0;
423	info->ndcb2 = 0;
424
425	if (cmd == cmdset->read_id) {
426		info->ndcb0 |= NDCB0_CMD_TYPE(3);
427		info->data_size = 8;
428	} else if (cmd == cmdset->read_status) {
429		info->ndcb0 |= NDCB0_CMD_TYPE(4);
430		info->data_size = 8;
431	} else if (cmd == cmdset->reset || cmd == cmdset->lock ||
432		   cmd == cmdset->unlock) {
433		info->ndcb0 |= NDCB0_CMD_TYPE(5);
434	} else
435		return -EINVAL;
436
437	return 0;
438}
439
440static void enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
441{
442	uint32_t ndcr;
443
444	ndcr = nand_readl(info, NDCR);
445	nand_writel(info, NDCR, ndcr & ~int_mask);
446}
447
448static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
449{
450	uint32_t ndcr;
451
452	ndcr = nand_readl(info, NDCR);
453	nand_writel(info, NDCR, ndcr | int_mask);
454}
455
456/* NOTE: it is a must to set ND_RUN firstly, then write command buffer
457 * otherwise, it does not work
458 */
459static int write_cmd(struct pxa3xx_nand_info *info)
460{
461	uint32_t ndcr;
462
463	/* clear status bits and run */
464	nand_writel(info, NDSR, NDSR_MASK);
465
466	ndcr = info->reg_ndcr;
467
468	ndcr |= info->use_ecc ? NDCR_ECC_EN : 0;
469	ndcr |= info->use_dma ? NDCR_DMA_EN : 0;
470	ndcr |= NDCR_ND_RUN;
471
472	nand_writel(info, NDCR, ndcr);
473
474	if (wait_for_event(info, NDSR_WRCMDREQ)) {
475		printk(KERN_ERR "timed out writing command\n");
476		return -ETIMEDOUT;
477	}
478
479	nand_writel(info, NDCB0, info->ndcb0);
480	nand_writel(info, NDCB0, info->ndcb1);
481	nand_writel(info, NDCB0, info->ndcb2);
482	return 0;
483}
484
485static int handle_data_pio(struct pxa3xx_nand_info *info)
486{
487	int ret, timeout = CHIP_DELAY_TIMEOUT;
488
489	switch (info->state) {
490	case STATE_PIO_WRITING:
491		__raw_writesl(info->mmio_base + NDDB, info->data_buff,
492				info->data_size << 2);
493
494		enable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
495
496		ret = wait_for_completion_timeout(&info->cmd_complete, timeout);
497		if (!ret) {
498			printk(KERN_ERR "program command time out\n");
499			return -1;
500		}
501		break;
502	case STATE_PIO_READING:
503		__raw_readsl(info->mmio_base + NDDB, info->data_buff,
504				info->data_size << 2);
505		break;
506	default:
507		printk(KERN_ERR "%s: invalid state %d\n", __func__,
508				info->state);
509		return -EINVAL;
510	}
511
512	info->state = STATE_READY;
513	return 0;
514}
515
516static void start_data_dma(struct pxa3xx_nand_info *info, int dir_out)
517{
518	struct pxa_dma_desc *desc = info->data_desc;
519	int dma_len = ALIGN(info->data_size, 32);
520
521	desc->ddadr = DDADR_STOP;
522	desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
523
524	if (dir_out) {
525		desc->dsadr = info->data_buff_phys;
526		desc->dtadr = NDDB_DMA_ADDR;
527		desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
528	} else {
529		desc->dtadr = info->data_buff_phys;
530		desc->dsadr = NDDB_DMA_ADDR;
531		desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
532	}
533
534	DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch;
535	DDADR(info->data_dma_ch) = info->data_desc_addr;
536	DCSR(info->data_dma_ch) |= DCSR_RUN;
537}
538
539static void pxa3xx_nand_data_dma_irq(int channel, void *data)
540{
541	struct pxa3xx_nand_info *info = data;
542	uint32_t dcsr;
543
544	dcsr = DCSR(channel);
545	DCSR(channel) = dcsr;
546
547	if (dcsr & DCSR_BUSERR) {
548		info->retcode = ERR_DMABUSERR;
549		complete(&info->cmd_complete);
550	}
551
552	if (info->state == STATE_DMA_WRITING) {
553		info->state = STATE_DMA_DONE;
554		enable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
555	} else {
556		info->state = STATE_READY;
557		complete(&info->cmd_complete);
558	}
559}
560
561static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
562{
563	struct pxa3xx_nand_info *info = devid;
564	unsigned int status;
565
566	status = nand_readl(info, NDSR);
567
568	if (status & (NDSR_RDDREQ | NDSR_DBERR)) {
569		if (status & NDSR_DBERR)
570			info->retcode = ERR_DBERR;
571
572		disable_int(info, NDSR_RDDREQ | NDSR_DBERR);
573
574		if (info->use_dma) {
575			info->state = STATE_DMA_READING;
576			start_data_dma(info, 0);
577		} else {
578			info->state = STATE_PIO_READING;
579			complete(&info->cmd_complete);
580		}
581	} else if (status & NDSR_WRDREQ) {
582		disable_int(info, NDSR_WRDREQ);
583		if (info->use_dma) {
584			info->state = STATE_DMA_WRITING;
585			start_data_dma(info, 1);
586		} else {
587			info->state = STATE_PIO_WRITING;
588			complete(&info->cmd_complete);
589		}
590	} else if (status & (NDSR_CS0_BBD | NDSR_CS0_CMDD)) {
591		if (status & NDSR_CS0_BBD)
592			info->retcode = ERR_BBERR;
593
594		disable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
595		info->state = STATE_READY;
596		complete(&info->cmd_complete);
597	}
598	nand_writel(info, NDSR, status);
599	return IRQ_HANDLED;
600}
601
602static int pxa3xx_nand_do_cmd(struct pxa3xx_nand_info *info, uint32_t event)
603{
604	uint32_t ndcr;
605	int ret, timeout = CHIP_DELAY_TIMEOUT;
606
607	if (write_cmd(info)) {
608		info->retcode = ERR_SENDCMD;
609		goto fail_stop;
610	}
611
612	info->state = STATE_CMD_HANDLE;
613
614	enable_int(info, event);
615
616	ret = wait_for_completion_timeout(&info->cmd_complete, timeout);
617	if (!ret) {
618		printk(KERN_ERR "command execution timed out\n");
619		info->retcode = ERR_SENDCMD;
620		goto fail_stop;
621	}
622
623	if (info->use_dma == 0 && info->data_size > 0)
624		if (handle_data_pio(info))
625			goto fail_stop;
626
627	return 0;
628
629fail_stop:
630	ndcr = nand_readl(info, NDCR);
631	nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN);
632	udelay(10);
633	return -ETIMEDOUT;
634}
635
636static int pxa3xx_nand_dev_ready(struct mtd_info *mtd)
637{
638	struct pxa3xx_nand_info *info = mtd->priv;
639	return (nand_readl(info, NDSR) & NDSR_RDY) ? 1 : 0;
640}
641
642static inline int is_buf_blank(uint8_t *buf, size_t len)
643{
644	for (; len > 0; len--)
645		if (*buf++ != 0xff)
646			return 0;
647	return 1;
648}
649
650static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
651				int column, int page_addr)
652{
653	struct pxa3xx_nand_info *info = mtd->priv;
654	const struct pxa3xx_nand_flash *flash_info = info->flash_info;
655	const struct pxa3xx_nand_cmdset *cmdset = flash_info->cmdset;
656	int ret;
657
658	info->use_dma = (use_dma) ? 1 : 0;
659	info->use_ecc = 0;
660	info->data_size = 0;
661	info->state = STATE_READY;
662
663	init_completion(&info->cmd_complete);
664
665	switch (command) {
666	case NAND_CMD_READOOB:
667		/* disable HW ECC to get all the OOB data */
668		info->buf_count = mtd->writesize + mtd->oobsize;
669		info->buf_start = mtd->writesize + column;
670
671		if (prepare_read_prog_cmd(info, cmdset->read1, column, page_addr))
672			break;
673
674		pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR);
675
676		/* We only are OOB, so if the data has error, does not matter */
677		if (info->retcode == ERR_DBERR)
678			info->retcode = ERR_NONE;
679		break;
680
681	case NAND_CMD_READ0:
682		info->use_ecc = 1;
683		info->retcode = ERR_NONE;
684		info->buf_start = column;
685		info->buf_count = mtd->writesize + mtd->oobsize;
686		memset(info->data_buff, 0xFF, info->buf_count);
687
688		if (prepare_read_prog_cmd(info, cmdset->read1, column, page_addr))
689			break;
690
691		pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR);
692
693		if (info->retcode == ERR_DBERR) {
694			/* for blank page (all 0xff), HW will calculate its ECC as
695			 * 0, which is different from the ECC information within
696			 * OOB, ignore such double bit errors
697			 */
698			if (is_buf_blank(info->data_buff, mtd->writesize))
699				info->retcode = ERR_NONE;
700		}
701		break;
702	case NAND_CMD_SEQIN:
703		info->buf_start = column;
704		info->buf_count = mtd->writesize + mtd->oobsize;
705		memset(info->data_buff, 0xff, info->buf_count);
706
707		/* save column/page_addr for next CMD_PAGEPROG */
708		info->seqin_column = column;
709		info->seqin_page_addr = page_addr;
710		break;
711	case NAND_CMD_PAGEPROG:
712		info->use_ecc = (info->seqin_column >= mtd->writesize) ? 0 : 1;
713
714		if (prepare_read_prog_cmd(info, cmdset->program,
715				info->seqin_column, info->seqin_page_addr))
716			break;
717
718		pxa3xx_nand_do_cmd(info, NDSR_WRDREQ);
719		break;
720	case NAND_CMD_ERASE1:
721		if (prepare_erase_cmd(info, cmdset->erase, page_addr))
722			break;
723
724		pxa3xx_nand_do_cmd(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
725		break;
726	case NAND_CMD_ERASE2:
727		break;
728	case NAND_CMD_READID:
729	case NAND_CMD_STATUS:
730		info->use_dma = 0;	/* force PIO read */
731		info->buf_start = 0;
732		info->buf_count = (command == NAND_CMD_READID) ?
733				info->read_id_bytes : 1;
734
735		if (prepare_other_cmd(info, (command == NAND_CMD_READID) ?
736				cmdset->read_id : cmdset->read_status))
737			break;
738
739		pxa3xx_nand_do_cmd(info, NDSR_RDDREQ);
740		break;
741	case NAND_CMD_RESET:
742		if (prepare_other_cmd(info, cmdset->reset))
743			break;
744
745		ret = pxa3xx_nand_do_cmd(info, NDSR_CS0_CMDD);
746		if (ret == 0) {
747			int timeout = 2;
748			uint32_t ndcr;
749
750			while (timeout--) {
751				if (nand_readl(info, NDSR) & NDSR_RDY)
752					break;
753				msleep(10);
754			}
755
756			ndcr = nand_readl(info, NDCR);
757			nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN);
758		}
759		break;
760	default:
761		printk(KERN_ERR "non-supported command.\n");
762		break;
763	}
764
765	if (info->retcode == ERR_DBERR) {
766		printk(KERN_ERR "double bit error @ page %08x\n", page_addr);
767		info->retcode = ERR_NONE;
768	}
769}
770
771static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
772{
773	struct pxa3xx_nand_info *info = mtd->priv;
774	char retval = 0xFF;
775
776	if (info->buf_start < info->buf_count)
777		/* Has just send a new command? */
778		retval = info->data_buff[info->buf_start++];
779
780	return retval;
781}
782
783static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
784{
785	struct pxa3xx_nand_info *info = mtd->priv;
786	u16 retval = 0xFFFF;
787
788	if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
789		retval = *((u16 *)(info->data_buff+info->buf_start));
790		info->buf_start += 2;
791	}
792	return retval;
793}
794
795static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
796{
797	struct pxa3xx_nand_info *info = mtd->priv;
798	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
799
800	memcpy(buf, info->data_buff + info->buf_start, real_len);
801	info->buf_start += real_len;
802}
803
804static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
805		const uint8_t *buf, int len)
806{
807	struct pxa3xx_nand_info *info = mtd->priv;
808	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
809
810	memcpy(info->data_buff + info->buf_start, buf, real_len);
811	info->buf_start += real_len;
812}
813
814static int pxa3xx_nand_verify_buf(struct mtd_info *mtd,
815		const uint8_t *buf, int len)
816{
817	return 0;
818}
819
820static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
821{
822	return;
823}
824
825static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
826{
827	struct pxa3xx_nand_info *info = mtd->priv;
828
829	/* pxa3xx_nand_send_command has waited for command complete */
830	if (this->state == FL_WRITING || this->state == FL_ERASING) {
831		if (info->retcode == ERR_NONE)
832			return 0;
833		else {
834			/*
835			 * any error make it return 0x01 which will tell
836			 * the caller the erase and write fail
837			 */
838			return 0x01;
839		}
840	}
841
842	return 0;
843}
844
845static void pxa3xx_nand_ecc_hwctl(struct mtd_info *mtd, int mode)
846{
847	return;
848}
849
850static int pxa3xx_nand_ecc_calculate(struct mtd_info *mtd,
851		const uint8_t *dat, uint8_t *ecc_code)
852{
853	return 0;
854}
855
856static int pxa3xx_nand_ecc_correct(struct mtd_info *mtd,
857		uint8_t *dat, uint8_t *read_ecc, uint8_t *calc_ecc)
858{
859	struct pxa3xx_nand_info *info = mtd->priv;
860	/*
861	 * Any error include ERR_SEND_CMD, ERR_DBERR, ERR_BUSERR, we
862	 * consider it as a ecc error which will tell the caller the
863	 * read fail We have distinguish all the errors, but the
864	 * nand_read_ecc only check this function return value
865	 */
866	if (info->retcode != ERR_NONE)
867		return -1;
868
869	return 0;
870}
871
872static int __readid(struct pxa3xx_nand_info *info, uint32_t *id)
873{
874	const struct pxa3xx_nand_flash *f = info->flash_info;
875	const struct pxa3xx_nand_cmdset *cmdset = f->cmdset;
876	uint32_t ndcr;
877	uint8_t  id_buff[8];
878
879	if (prepare_other_cmd(info, cmdset->read_id)) {
880		printk(KERN_ERR "failed to prepare command\n");
881		return -EINVAL;
882	}
883
884	/* Send command */
885	if (write_cmd(info))
886		goto fail_timeout;
887
888	/* Wait for CMDDM(command done successfully) */
889	if (wait_for_event(info, NDSR_RDDREQ))
890		goto fail_timeout;
891
892	__raw_readsl(info->mmio_base + NDDB, id_buff, 2);
893	*id = id_buff[0] | (id_buff[1] << 8);
894	return 0;
895
896fail_timeout:
897	ndcr = nand_readl(info, NDCR);
898	nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN);
899	udelay(10);
900	return -ETIMEDOUT;
901}
902
903static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
904				    const struct pxa3xx_nand_flash *f)
905{
906	struct platform_device *pdev = info->pdev;
907	struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
908	uint32_t ndcr = 0x00000FFF; /* disable all interrupts */
909
910	if (f->page_size != 2048 && f->page_size != 512)
911		return -EINVAL;
912
913	if (f->flash_width != 16 && f->flash_width != 8)
914		return -EINVAL;
915
916	/* calculate flash information */
917	info->oob_size = (f->page_size == 2048) ? 64 : 16;
918	info->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
919
920	/* calculate addressing information */
921	info->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
922
923	if (f->num_blocks * f->page_per_block > 65536)
924		info->row_addr_cycles = 3;
925	else
926		info->row_addr_cycles = 2;
927
928	ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
929	ndcr |= (info->col_addr_cycles == 2) ? NDCR_RA_START : 0;
930	ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
931	ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
932	ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
933	ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
934
935	ndcr |= NDCR_RD_ID_CNT(info->read_id_bytes);
936	ndcr |= NDCR_SPARE_EN; /* enable spare by default */
937
938	info->reg_ndcr = ndcr;
939
940	pxa3xx_nand_set_timing(info, f->timing);
941	info->flash_info = f;
942	return 0;
943}
944
945static void pxa3xx_nand_detect_timing(struct pxa3xx_nand_info *info,
946				      struct pxa3xx_nand_timing *t)
947{
948	unsigned long nand_clk = clk_get_rate(info->clk);
949	uint32_t ndtr0 = nand_readl(info, NDTR0CS0);
950	uint32_t ndtr1 = nand_readl(info, NDTR1CS0);
951
952	t->tCH = cycle2ns(tCH_NDTR0(ndtr0), nand_clk);
953	t->tCS = cycle2ns(tCS_NDTR0(ndtr0), nand_clk);
954	t->tWH = cycle2ns(tWH_NDTR0(ndtr0), nand_clk);
955	t->tWP = cycle2ns(tWP_NDTR0(ndtr0), nand_clk);
956	t->tRH = cycle2ns(tRH_NDTR0(ndtr0), nand_clk);
957	t->tRP = cycle2ns(tRP_NDTR0(ndtr0), nand_clk);
958
959	t->tR = cycle2ns(tR_NDTR1(ndtr1), nand_clk);
960	t->tWHR = cycle2ns(tWHR_NDTR1(ndtr1), nand_clk);
961	t->tAR = cycle2ns(tAR_NDTR1(ndtr1), nand_clk);
962}
963
964static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
965{
966	uint32_t ndcr = nand_readl(info, NDCR);
967	struct nand_flash_dev *type = NULL;
968	uint32_t id = -1;
969	int i;
970
971	default_flash.page_per_block = ndcr & NDCR_PG_PER_BLK ? 64 : 32;
972	default_flash.page_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
973	default_flash.flash_width = ndcr & NDCR_DWIDTH_M ? 16 : 8;
974	default_flash.dfc_width = ndcr & NDCR_DWIDTH_C ? 16 : 8;
975
976	if (default_flash.page_size == 2048)
977		default_flash.cmdset = &largepage_cmdset;
978	else
979		default_flash.cmdset = &smallpage_cmdset;
980
981	/* set info fields needed to __readid */
982	info->flash_info = &default_flash;
983	info->read_id_bytes = (default_flash.page_size == 2048) ? 4 : 2;
984	info->reg_ndcr = ndcr;
985
986	if (__readid(info, &id))
987		return -ENODEV;
988
989	/* Lookup the flash id */
990	id = (id >> 8) & 0xff;		/* device id is byte 2 */
991	for (i = 0; nand_flash_ids[i].name != NULL; i++) {
992		if (id == nand_flash_ids[i].id) {
993			type =  &nand_flash_ids[i];
994			break;
995		}
996	}
997
998	if (!type)
999		return -ENODEV;
1000
1001	/* fill the missing flash information */
1002	i = __ffs(default_flash.page_per_block * default_flash.page_size);
1003	default_flash.num_blocks = type->chipsize << (20 - i);
1004
1005	info->oob_size = (default_flash.page_size == 2048) ? 64 : 16;
1006
1007	/* calculate addressing information */
1008	info->col_addr_cycles = (default_flash.page_size == 2048) ? 2 : 1;
1009
1010	if (default_flash.num_blocks * default_flash.page_per_block > 65536)
1011		info->row_addr_cycles = 3;
1012	else
1013		info->row_addr_cycles = 2;
1014
1015	pxa3xx_nand_detect_timing(info, &default_timing);
1016	default_flash.timing = &default_timing;
1017
1018	return 0;
1019}
1020
1021static int pxa3xx_nand_detect_flash(struct pxa3xx_nand_info *info,
1022				    const struct pxa3xx_nand_platform_data *pdata)
1023{
1024	const struct pxa3xx_nand_flash *f;
1025	uint32_t id = -1;
1026	int i;
1027
1028	if (pdata->keep_config)
1029		if (pxa3xx_nand_detect_config(info) == 0)
1030			return 0;
1031
1032	for (i = 0; i<pdata->num_flash; ++i) {
1033		f = pdata->flash + i;
1034
1035		if (pxa3xx_nand_config_flash(info, f))
1036			continue;
1037
1038		if (__readid(info, &id))
1039			continue;
1040
1041		if (id == f->chip_id)
1042			return 0;
1043	}
1044
1045#ifdef CONFIG_MTD_NAND_PXA3xx_BUILTIN
1046	for (i = 0; i < ARRAY_SIZE(builtin_flash_types); i++) {
1047
1048		f = builtin_flash_types[i];
1049
1050		if (pxa3xx_nand_config_flash(info, f))
1051			continue;
1052
1053		if (__readid(info, &id))
1054			continue;
1055
1056		if (id == f->chip_id)
1057			return 0;
1058	}
1059#endif
1060
1061	dev_warn(&info->pdev->dev,
1062		 "failed to detect configured nand flash; found %04x instead of\n",
1063		 id);
1064	return -ENODEV;
1065}
1066
1067/* the maximum possible buffer size for large page with OOB data
1068 * is: 2048 + 64 = 2112 bytes, allocate a page here for both the
1069 * data buffer and the DMA descriptor
1070 */
1071#define MAX_BUFF_SIZE	PAGE_SIZE
1072
1073static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1074{
1075	struct platform_device *pdev = info->pdev;
1076	int data_desc_offset = MAX_BUFF_SIZE - sizeof(struct pxa_dma_desc);
1077
1078	if (use_dma == 0) {
1079		info->data_buff = kmalloc(MAX_BUFF_SIZE, GFP_KERNEL);
1080		if (info->data_buff == NULL)
1081			return -ENOMEM;
1082		return 0;
1083	}
1084
1085	info->data_buff = dma_alloc_coherent(&pdev->dev, MAX_BUFF_SIZE,
1086				&info->data_buff_phys, GFP_KERNEL);
1087	if (info->data_buff == NULL) {
1088		dev_err(&pdev->dev, "failed to allocate dma buffer\n");
1089		return -ENOMEM;
1090	}
1091
1092	info->data_buff_size = MAX_BUFF_SIZE;
1093	info->data_desc = (void *)info->data_buff + data_desc_offset;
1094	info->data_desc_addr = info->data_buff_phys + data_desc_offset;
1095
1096	info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW,
1097				pxa3xx_nand_data_dma_irq, info);
1098	if (info->data_dma_ch < 0) {
1099		dev_err(&pdev->dev, "failed to request data dma\n");
1100		dma_free_coherent(&pdev->dev, info->data_buff_size,
1101				info->data_buff, info->data_buff_phys);
1102		return info->data_dma_ch;
1103	}
1104
1105	return 0;
1106}
1107
1108static struct nand_ecclayout hw_smallpage_ecclayout = {
1109	.eccbytes = 6,
1110	.eccpos = {8, 9, 10, 11, 12, 13 },
1111	.oobfree = { {2, 6} }
1112};
1113
1114static struct nand_ecclayout hw_largepage_ecclayout = {
1115	.eccbytes = 24,
1116	.eccpos = {
1117		40, 41, 42, 43, 44, 45, 46, 47,
1118		48, 49, 50, 51, 52, 53, 54, 55,
1119		56, 57, 58, 59, 60, 61, 62, 63},
1120	.oobfree = { {2, 38} }
1121};
1122
1123static void pxa3xx_nand_init_mtd(struct mtd_info *mtd,
1124				 struct pxa3xx_nand_info *info)
1125{
1126	const struct pxa3xx_nand_flash *f = info->flash_info;
1127	struct nand_chip *this = &info->nand_chip;
1128
1129	this->options = (f->flash_width == 16) ? NAND_BUSWIDTH_16: 0;
1130
1131	this->waitfunc		= pxa3xx_nand_waitfunc;
1132	this->select_chip	= pxa3xx_nand_select_chip;
1133	this->dev_ready		= pxa3xx_nand_dev_ready;
1134	this->cmdfunc		= pxa3xx_nand_cmdfunc;
1135	this->read_word		= pxa3xx_nand_read_word;
1136	this->read_byte		= pxa3xx_nand_read_byte;
1137	this->read_buf		= pxa3xx_nand_read_buf;
1138	this->write_buf		= pxa3xx_nand_write_buf;
1139	this->verify_buf	= pxa3xx_nand_verify_buf;
1140
1141	this->ecc.mode		= NAND_ECC_HW;
1142	this->ecc.hwctl		= pxa3xx_nand_ecc_hwctl;
1143	this->ecc.calculate	= pxa3xx_nand_ecc_calculate;
1144	this->ecc.correct	= pxa3xx_nand_ecc_correct;
1145	this->ecc.size		= f->page_size;
1146
1147	if (f->page_size == 2048)
1148		this->ecc.layout = &hw_largepage_ecclayout;
1149	else
1150		this->ecc.layout = &hw_smallpage_ecclayout;
1151
1152	this->chip_delay = 25;
1153}
1154
1155static int pxa3xx_nand_probe(struct platform_device *pdev)
1156{
1157	struct pxa3xx_nand_platform_data *pdata;
1158	struct pxa3xx_nand_info *info;
1159	struct nand_chip *this;
1160	struct mtd_info *mtd;
1161	struct resource *r;
1162	int ret = 0, irq;
1163
1164	pdata = pdev->dev.platform_data;
1165
1166	if (!pdata) {
1167		dev_err(&pdev->dev, "no platform data defined\n");
1168		return -ENODEV;
1169	}
1170
1171	mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct pxa3xx_nand_info),
1172			GFP_KERNEL);
1173	if (!mtd) {
1174		dev_err(&pdev->dev, "failed to allocate memory\n");
1175		return -ENOMEM;
1176	}
1177
1178	info = (struct pxa3xx_nand_info *)(&mtd[1]);
1179	info->pdev = pdev;
1180
1181	this = &info->nand_chip;
1182	mtd->priv = info;
1183	mtd->owner = THIS_MODULE;
1184
1185	info->clk = clk_get(&pdev->dev, NULL);
1186	if (IS_ERR(info->clk)) {
1187		dev_err(&pdev->dev, "failed to get nand clock\n");
1188		ret = PTR_ERR(info->clk);
1189		goto fail_free_mtd;
1190	}
1191	clk_enable(info->clk);
1192
1193	r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1194	if (r == NULL) {
1195		dev_err(&pdev->dev, "no resource defined for data DMA\n");
1196		ret = -ENXIO;
1197		goto fail_put_clk;
1198	}
1199	info->drcmr_dat = r->start;
1200
1201	r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1202	if (r == NULL) {
1203		dev_err(&pdev->dev, "no resource defined for command DMA\n");
1204		ret = -ENXIO;
1205		goto fail_put_clk;
1206	}
1207	info->drcmr_cmd = r->start;
1208
1209	irq = platform_get_irq(pdev, 0);
1210	if (irq < 0) {
1211		dev_err(&pdev->dev, "no IRQ resource defined\n");
1212		ret = -ENXIO;
1213		goto fail_put_clk;
1214	}
1215
1216	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1217	if (r == NULL) {
1218		dev_err(&pdev->dev, "no IO memory resource defined\n");
1219		ret = -ENODEV;
1220		goto fail_put_clk;
1221	}
1222
1223	r = request_mem_region(r->start, resource_size(r), pdev->name);
1224	if (r == NULL) {
1225		dev_err(&pdev->dev, "failed to request memory resource\n");
1226		ret = -EBUSY;
1227		goto fail_put_clk;
1228	}
1229
1230	info->mmio_base = ioremap(r->start, resource_size(r));
1231	if (info->mmio_base == NULL) {
1232		dev_err(&pdev->dev, "ioremap() failed\n");
1233		ret = -ENODEV;
1234		goto fail_free_res;
1235	}
1236
1237	ret = pxa3xx_nand_init_buff(info);
1238	if (ret)
1239		goto fail_free_io;
1240
1241	ret = request_irq(IRQ_NAND, pxa3xx_nand_irq, IRQF_DISABLED,
1242				pdev->name, info);
1243	if (ret < 0) {
1244		dev_err(&pdev->dev, "failed to request IRQ\n");
1245		goto fail_free_buf;
1246	}
1247
1248	ret = pxa3xx_nand_detect_flash(info, pdata);
1249	if (ret) {
1250		dev_err(&pdev->dev, "failed to detect flash\n");
1251		ret = -ENODEV;
1252		goto fail_free_irq;
1253	}
1254
1255	pxa3xx_nand_init_mtd(mtd, info);
1256
1257	platform_set_drvdata(pdev, mtd);
1258
1259	if (nand_scan(mtd, 1)) {
1260		dev_err(&pdev->dev, "failed to scan nand\n");
1261		ret = -ENXIO;
1262		goto fail_free_irq;
1263	}
1264
1265	return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
1266
1267fail_free_irq:
1268	free_irq(IRQ_NAND, info);
1269fail_free_buf:
1270	if (use_dma) {
1271		pxa_free_dma(info->data_dma_ch);
1272		dma_free_coherent(&pdev->dev, info->data_buff_size,
1273			info->data_buff, info->data_buff_phys);
1274	} else
1275		kfree(info->data_buff);
1276fail_free_io:
1277	iounmap(info->mmio_base);
1278fail_free_res:
1279	release_mem_region(r->start, resource_size(r));
1280fail_put_clk:
1281	clk_disable(info->clk);
1282	clk_put(info->clk);
1283fail_free_mtd:
1284	kfree(mtd);
1285	return ret;
1286}
1287
1288static int pxa3xx_nand_remove(struct platform_device *pdev)
1289{
1290	struct mtd_info *mtd = platform_get_drvdata(pdev);
1291	struct pxa3xx_nand_info *info = mtd->priv;
1292	struct resource *r;
1293
1294	platform_set_drvdata(pdev, NULL);
1295
1296	del_mtd_device(mtd);
1297	del_mtd_partitions(mtd);
1298	free_irq(IRQ_NAND, info);
1299	if (use_dma) {
1300		pxa_free_dma(info->data_dma_ch);
1301		dma_free_writecombine(&pdev->dev, info->data_buff_size,
1302				info->data_buff, info->data_buff_phys);
1303	} else
1304		kfree(info->data_buff);
1305
1306	iounmap(info->mmio_base);
1307	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1308	release_mem_region(r->start, resource_size(r));
1309
1310	clk_disable(info->clk);
1311	clk_put(info->clk);
1312
1313	kfree(mtd);
1314	return 0;
1315}
1316
1317#ifdef CONFIG_PM
1318static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1319{
1320	struct mtd_info *mtd = (struct mtd_info *)platform_get_drvdata(pdev);
1321	struct pxa3xx_nand_info *info = mtd->priv;
1322
1323	if (info->state != STATE_READY) {
1324		dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1325		return -EAGAIN;
1326	}
1327
1328	return 0;
1329}
1330
1331static int pxa3xx_nand_resume(struct platform_device *pdev)
1332{
1333	struct mtd_info *mtd = (struct mtd_info *)platform_get_drvdata(pdev);
1334	struct pxa3xx_nand_info *info = mtd->priv;
1335
1336	clk_enable(info->clk);
1337
1338	return pxa3xx_nand_config_flash(info, info->flash_info);
1339}
1340#else
1341#define pxa3xx_nand_suspend	NULL
1342#define pxa3xx_nand_resume	NULL
1343#endif
1344
1345static struct platform_driver pxa3xx_nand_driver = {
1346	.driver = {
1347		.name	= "pxa3xx-nand",
1348	},
1349	.probe		= pxa3xx_nand_probe,
1350	.remove		= pxa3xx_nand_remove,
1351	.suspend	= pxa3xx_nand_suspend,
1352	.resume		= pxa3xx_nand_resume,
1353};
1354
1355static int __init pxa3xx_nand_init(void)
1356{
1357	return platform_driver_register(&pxa3xx_nand_driver);
1358}
1359module_init(pxa3xx_nand_init);
1360
1361static void __exit pxa3xx_nand_exit(void)
1362{
1363	platform_driver_unregister(&pxa3xx_nand_driver);
1364}
1365module_exit(pxa3xx_nand_exit);
1366
1367MODULE_LICENSE("GPL");
1368MODULE_DESCRIPTION("PXA3xx NAND controller driver");
1369