pxa3xx_nand.c revision c1f82478c535f1de9fecf3cafa82014f312d5d4e
1/*
2 * drivers/mtd/nand/pxa3xx_nand.c
3 *
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/interrupt.h>
15#include <linux/platform_device.h>
16#include <linux/dma-mapping.h>
17#include <linux/delay.h>
18#include <linux/clk.h>
19#include <linux/mtd/mtd.h>
20#include <linux/mtd/nand.h>
21#include <linux/mtd/partitions.h>
22#include <linux/io.h>
23#include <linux/irq.h>
24#include <linux/slab.h>
25
26#include <mach/dma.h>
27#include <plat/pxa3xx_nand.h>
28
29#define	CHIP_DELAY_TIMEOUT	(2 * HZ/10)
30
31/* registers and bit definitions */
32#define NDCR		(0x00) /* Control register */
33#define NDTR0CS0	(0x04) /* Timing Parameter 0 for CS0 */
34#define NDTR1CS0	(0x0C) /* Timing Parameter 1 for CS0 */
35#define NDSR		(0x14) /* Status Register */
36#define NDPCR		(0x18) /* Page Count Register */
37#define NDBDR0		(0x1C) /* Bad Block Register 0 */
38#define NDBDR1		(0x20) /* Bad Block Register 1 */
39#define NDDB		(0x40) /* Data Buffer */
40#define NDCB0		(0x48) /* Command Buffer0 */
41#define NDCB1		(0x4C) /* Command Buffer1 */
42#define NDCB2		(0x50) /* Command Buffer2 */
43
44#define NDCR_SPARE_EN		(0x1 << 31)
45#define NDCR_ECC_EN		(0x1 << 30)
46#define NDCR_DMA_EN		(0x1 << 29)
47#define NDCR_ND_RUN		(0x1 << 28)
48#define NDCR_DWIDTH_C		(0x1 << 27)
49#define NDCR_DWIDTH_M		(0x1 << 26)
50#define NDCR_PAGE_SZ		(0x1 << 24)
51#define NDCR_NCSX		(0x1 << 23)
52#define NDCR_ND_MODE		(0x3 << 21)
53#define NDCR_NAND_MODE   	(0x0)
54#define NDCR_CLR_PG_CNT		(0x1 << 20)
55#define NDCR_CLR_ECC		(0x1 << 19)
56#define NDCR_RD_ID_CNT_MASK	(0x7 << 16)
57#define NDCR_RD_ID_CNT(x)	(((x) << 16) & NDCR_RD_ID_CNT_MASK)
58
59#define NDCR_RA_START		(0x1 << 15)
60#define NDCR_PG_PER_BLK		(0x1 << 14)
61#define NDCR_ND_ARB_EN		(0x1 << 12)
62
63#define NDSR_MASK		(0xfff)
64#define NDSR_RDY		(0x1 << 11)
65#define NDSR_CS0_PAGED		(0x1 << 10)
66#define NDSR_CS1_PAGED		(0x1 << 9)
67#define NDSR_CS0_CMDD		(0x1 << 8)
68#define NDSR_CS1_CMDD		(0x1 << 7)
69#define NDSR_CS0_BBD		(0x1 << 6)
70#define NDSR_CS1_BBD		(0x1 << 5)
71#define NDSR_DBERR		(0x1 << 4)
72#define NDSR_SBERR		(0x1 << 3)
73#define NDSR_WRDREQ		(0x1 << 2)
74#define NDSR_RDDREQ		(0x1 << 1)
75#define NDSR_WRCMDREQ		(0x1)
76
77#define NDCB0_AUTO_RS		(0x1 << 25)
78#define NDCB0_CSEL		(0x1 << 24)
79#define NDCB0_CMD_TYPE_MASK	(0x7 << 21)
80#define NDCB0_CMD_TYPE(x)	(((x) << 21) & NDCB0_CMD_TYPE_MASK)
81#define NDCB0_NC		(0x1 << 20)
82#define NDCB0_DBC		(0x1 << 19)
83#define NDCB0_ADDR_CYC_MASK	(0x7 << 16)
84#define NDCB0_ADDR_CYC(x)	(((x) << 16) & NDCB0_ADDR_CYC_MASK)
85#define NDCB0_CMD2_MASK		(0xff << 8)
86#define NDCB0_CMD1_MASK		(0xff)
87#define NDCB0_ADDR_CYC_SHIFT	(16)
88
89/* macros for registers read/write */
90#define nand_writel(info, off, val)	\
91	__raw_writel((val), (info)->mmio_base + (off))
92
93#define nand_readl(info, off)		\
94	__raw_readl((info)->mmio_base + (off))
95
96/* error code and state */
97enum {
98	ERR_NONE	= 0,
99	ERR_DMABUSERR	= -1,
100	ERR_SENDCMD	= -2,
101	ERR_DBERR	= -3,
102	ERR_BBERR	= -4,
103	ERR_SBERR	= -5,
104};
105
106enum {
107	STATE_READY	= 0,
108	STATE_CMD_HANDLE,
109	STATE_DMA_READING,
110	STATE_DMA_WRITING,
111	STATE_DMA_DONE,
112	STATE_PIO_READING,
113	STATE_PIO_WRITING,
114};
115
116struct pxa3xx_nand_info {
117	struct nand_chip	nand_chip;
118
119	struct platform_device	 *pdev;
120	const struct pxa3xx_nand_flash *flash_info;
121
122	struct clk		*clk;
123	void __iomem		*mmio_base;
124	unsigned long		mmio_phys;
125
126	unsigned int 		buf_start;
127	unsigned int		buf_count;
128
129	/* DMA information */
130	int			drcmr_dat;
131	int			drcmr_cmd;
132
133	unsigned char		*data_buff;
134	dma_addr_t 		data_buff_phys;
135	size_t			data_buff_size;
136	int 			data_dma_ch;
137	struct pxa_dma_desc	*data_desc;
138	dma_addr_t 		data_desc_addr;
139
140	uint32_t		reg_ndcr;
141
142	/* saved column/page_addr during CMD_SEQIN */
143	int			seqin_column;
144	int			seqin_page_addr;
145
146	/* relate to the command */
147	unsigned int		state;
148
149	int			use_ecc;	/* use HW ECC ? */
150	int			use_dma;	/* use DMA ? */
151
152	size_t			data_size;	/* data size in FIFO */
153	int 			retcode;
154	struct completion 	cmd_complete;
155
156	/* generated NDCBx register values */
157	uint32_t		ndcb0;
158	uint32_t		ndcb1;
159	uint32_t		ndcb2;
160
161	/* calculated from pxa3xx_nand_flash data */
162	size_t		oob_size;
163	size_t		read_id_bytes;
164
165	unsigned int	col_addr_cycles;
166	unsigned int	row_addr_cycles;
167};
168
169static int use_dma = 1;
170module_param(use_dma, bool, 0444);
171MODULE_PARM_DESC(use_dma, "enable DMA for data transfering to/from NAND HW");
172
173/*
174 * Default NAND flash controller configuration setup by the
175 * bootloader. This configuration is used only when pdata->keep_config is set
176 */
177static struct pxa3xx_nand_timing default_timing;
178static struct pxa3xx_nand_flash default_flash;
179static struct pxa3xx_nand_cmdset default_cmdset = {
180	.read1		= 0x3000,
181	.read2		= 0x0050,
182	.program	= 0x1080,
183	.read_status	= 0x0070,
184	.read_id	= 0x0090,
185	.erase		= 0xD060,
186	.reset		= 0x00FF,
187	.lock		= 0x002A,
188	.unlock		= 0x2423,
189	.lock_status	= 0x007A,
190};
191
192static struct pxa3xx_nand_timing timing[] = {
193	{ 10,  0, 20, 40, 30, 40, 11123, 110, 10, },
194	{ 10, 25, 15, 25, 15, 30, 25000,  60, 10, },
195	{ 10, 35, 15, 25, 15, 25, 25000,  60, 10, },
196};
197
198static struct pxa3xx_nand_flash builtin_flash_types[] = {
199	{ 0x46ec,  32,  512, 16, 16, 4096, &default_cmdset, &timing[0] },
200	{ 0xdaec,  64, 2048,  8,  8, 2048, &default_cmdset, &timing[0] },
201	{ 0xd7ec, 128, 4096,  8,  8, 8192, &default_cmdset, &timing[0] },
202	{ 0xa12c,  64, 2048,  8,  8, 1024, &default_cmdset, &timing[1] },
203	{ 0xb12c,  64, 2048, 16, 16, 1024, &default_cmdset, &timing[1] },
204	{ 0xdc2c,  64, 2048,  8,  8, 4096, &default_cmdset, &timing[1] },
205	{ 0xcc2c,  64, 2048, 16, 16, 4096, &default_cmdset, &timing[1] },
206	{ 0xba20,  64, 2048, 16, 16, 2048, &default_cmdset, &timing[2] },
207};
208
209#define NDTR0_tCH(c)	(min((c), 7) << 19)
210#define NDTR0_tCS(c)	(min((c), 7) << 16)
211#define NDTR0_tWH(c)	(min((c), 7) << 11)
212#define NDTR0_tWP(c)	(min((c), 7) << 8)
213#define NDTR0_tRH(c)	(min((c), 7) << 3)
214#define NDTR0_tRP(c)	(min((c), 7) << 0)
215
216#define NDTR1_tR(c)	(min((c), 65535) << 16)
217#define NDTR1_tWHR(c)	(min((c), 15) << 4)
218#define NDTR1_tAR(c)	(min((c), 15) << 0)
219
220#define tCH_NDTR0(r)	(((r) >> 19) & 0x7)
221#define tCS_NDTR0(r)	(((r) >> 16) & 0x7)
222#define tWH_NDTR0(r)	(((r) >> 11) & 0x7)
223#define tWP_NDTR0(r)	(((r) >> 8) & 0x7)
224#define tRH_NDTR0(r)	(((r) >> 3) & 0x7)
225#define tRP_NDTR0(r)	(((r) >> 0) & 0x7)
226
227#define tR_NDTR1(r)	(((r) >> 16) & 0xffff)
228#define tWHR_NDTR1(r)	(((r) >> 4) & 0xf)
229#define tAR_NDTR1(r)	(((r) >> 0) & 0xf)
230
231/* convert nano-seconds to nand flash controller clock cycles */
232#define ns2cycle(ns, clk)	(int)((ns) * (clk / 1000000) / 1000)
233
234/* convert nand flash controller clock cycles to nano-seconds */
235#define cycle2ns(c, clk)	((((c) + 1) * 1000000 + clk / 500) / (clk / 1000))
236
237static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info,
238				   const struct pxa3xx_nand_timing *t)
239{
240	unsigned long nand_clk = clk_get_rate(info->clk);
241	uint32_t ndtr0, ndtr1;
242
243	ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
244		NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
245		NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
246		NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
247		NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
248		NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
249
250	ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
251		NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
252		NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
253
254	nand_writel(info, NDTR0CS0, ndtr0);
255	nand_writel(info, NDTR1CS0, ndtr1);
256}
257
258#define WAIT_EVENT_TIMEOUT	10
259
260static int wait_for_event(struct pxa3xx_nand_info *info, uint32_t event)
261{
262	int timeout = WAIT_EVENT_TIMEOUT;
263	uint32_t ndsr;
264
265	while (timeout--) {
266		ndsr = nand_readl(info, NDSR) & NDSR_MASK;
267		if (ndsr & event) {
268			nand_writel(info, NDSR, ndsr);
269			return 0;
270		}
271		udelay(10);
272	}
273
274	return -ETIMEDOUT;
275}
276
277static int prepare_read_prog_cmd(struct pxa3xx_nand_info *info,
278			uint16_t cmd, int column, int page_addr)
279{
280	const struct pxa3xx_nand_flash *f = info->flash_info;
281	const struct pxa3xx_nand_cmdset *cmdset = f->cmdset;
282
283	/* calculate data size */
284	switch (f->page_size) {
285	case 2048:
286		info->data_size = (info->use_ecc) ? 2088 : 2112;
287		break;
288	case 512:
289		info->data_size = (info->use_ecc) ? 520 : 528;
290		break;
291	default:
292		return -EINVAL;
293	}
294
295	/* generate values for NDCBx registers */
296	info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
297	info->ndcb1 = 0;
298	info->ndcb2 = 0;
299	info->ndcb0 |= NDCB0_ADDR_CYC(info->row_addr_cycles + info->col_addr_cycles);
300
301	if (info->col_addr_cycles == 2) {
302		/* large block, 2 cycles for column address
303		 * row address starts from 3rd cycle
304		 */
305		info->ndcb1 |= page_addr << 16;
306		if (info->row_addr_cycles == 3)
307			info->ndcb2 = (page_addr >> 16) & 0xff;
308	} else
309		/* small block, 1 cycles for column address
310		 * row address starts from 2nd cycle
311		 */
312		info->ndcb1 = page_addr << 8;
313
314	if (cmd == cmdset->program)
315		info->ndcb0 |= NDCB0_CMD_TYPE(1) | NDCB0_AUTO_RS;
316
317	return 0;
318}
319
320static int prepare_erase_cmd(struct pxa3xx_nand_info *info,
321			uint16_t cmd, int page_addr)
322{
323	info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
324	info->ndcb0 |= NDCB0_CMD_TYPE(2) | NDCB0_AUTO_RS | NDCB0_ADDR_CYC(3);
325	info->ndcb1 = page_addr;
326	info->ndcb2 = 0;
327	return 0;
328}
329
330static int prepare_other_cmd(struct pxa3xx_nand_info *info, uint16_t cmd)
331{
332	const struct pxa3xx_nand_cmdset *cmdset = info->flash_info->cmdset;
333
334	info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
335	info->ndcb1 = 0;
336	info->ndcb2 = 0;
337
338	if (cmd == cmdset->read_id) {
339		info->ndcb0 |= NDCB0_CMD_TYPE(3);
340		info->data_size = 8;
341	} else if (cmd == cmdset->read_status) {
342		info->ndcb0 |= NDCB0_CMD_TYPE(4);
343		info->data_size = 8;
344	} else if (cmd == cmdset->reset || cmd == cmdset->lock ||
345		   cmd == cmdset->unlock) {
346		info->ndcb0 |= NDCB0_CMD_TYPE(5);
347	} else
348		return -EINVAL;
349
350	return 0;
351}
352
353static void enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
354{
355	uint32_t ndcr;
356
357	ndcr = nand_readl(info, NDCR);
358	nand_writel(info, NDCR, ndcr & ~int_mask);
359}
360
361static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
362{
363	uint32_t ndcr;
364
365	ndcr = nand_readl(info, NDCR);
366	nand_writel(info, NDCR, ndcr | int_mask);
367}
368
369/* NOTE: it is a must to set ND_RUN firstly, then write command buffer
370 * otherwise, it does not work
371 */
372static int write_cmd(struct pxa3xx_nand_info *info)
373{
374	uint32_t ndcr;
375
376	/* clear status bits and run */
377	nand_writel(info, NDSR, NDSR_MASK);
378
379	ndcr = info->reg_ndcr;
380
381	ndcr |= info->use_ecc ? NDCR_ECC_EN : 0;
382	ndcr |= info->use_dma ? NDCR_DMA_EN : 0;
383	ndcr |= NDCR_ND_RUN;
384
385	nand_writel(info, NDCR, ndcr);
386
387	if (wait_for_event(info, NDSR_WRCMDREQ)) {
388		printk(KERN_ERR "timed out writing command\n");
389		return -ETIMEDOUT;
390	}
391
392	nand_writel(info, NDCB0, info->ndcb0);
393	nand_writel(info, NDCB0, info->ndcb1);
394	nand_writel(info, NDCB0, info->ndcb2);
395	return 0;
396}
397
398static int handle_data_pio(struct pxa3xx_nand_info *info)
399{
400	int ret, timeout = CHIP_DELAY_TIMEOUT;
401
402	switch (info->state) {
403	case STATE_PIO_WRITING:
404		__raw_writesl(info->mmio_base + NDDB, info->data_buff,
405				DIV_ROUND_UP(info->data_size, 4));
406
407		enable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
408
409		ret = wait_for_completion_timeout(&info->cmd_complete, timeout);
410		if (!ret) {
411			printk(KERN_ERR "program command time out\n");
412			return -1;
413		}
414		break;
415	case STATE_PIO_READING:
416		__raw_readsl(info->mmio_base + NDDB, info->data_buff,
417				DIV_ROUND_UP(info->data_size, 4));
418		break;
419	default:
420		printk(KERN_ERR "%s: invalid state %d\n", __func__,
421				info->state);
422		return -EINVAL;
423	}
424
425	info->state = STATE_READY;
426	return 0;
427}
428
429static void start_data_dma(struct pxa3xx_nand_info *info, int dir_out)
430{
431	struct pxa_dma_desc *desc = info->data_desc;
432	int dma_len = ALIGN(info->data_size, 32);
433
434	desc->ddadr = DDADR_STOP;
435	desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
436
437	if (dir_out) {
438		desc->dsadr = info->data_buff_phys;
439		desc->dtadr = info->mmio_phys + NDDB;
440		desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
441	} else {
442		desc->dtadr = info->data_buff_phys;
443		desc->dsadr = info->mmio_phys + NDDB;
444		desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
445	}
446
447	DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch;
448	DDADR(info->data_dma_ch) = info->data_desc_addr;
449	DCSR(info->data_dma_ch) |= DCSR_RUN;
450}
451
452static void pxa3xx_nand_data_dma_irq(int channel, void *data)
453{
454	struct pxa3xx_nand_info *info = data;
455	uint32_t dcsr;
456
457	dcsr = DCSR(channel);
458	DCSR(channel) = dcsr;
459
460	if (dcsr & DCSR_BUSERR) {
461		info->retcode = ERR_DMABUSERR;
462		complete(&info->cmd_complete);
463	}
464
465	if (info->state == STATE_DMA_WRITING) {
466		info->state = STATE_DMA_DONE;
467		enable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
468	} else {
469		info->state = STATE_READY;
470		complete(&info->cmd_complete);
471	}
472}
473
474static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
475{
476	struct pxa3xx_nand_info *info = devid;
477	unsigned int status;
478
479	status = nand_readl(info, NDSR);
480
481	if (status & (NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR)) {
482		if (status & NDSR_DBERR)
483			info->retcode = ERR_DBERR;
484		else if (status & NDSR_SBERR)
485			info->retcode = ERR_SBERR;
486
487		disable_int(info, NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR);
488
489		if (info->use_dma) {
490			info->state = STATE_DMA_READING;
491			start_data_dma(info, 0);
492		} else {
493			info->state = STATE_PIO_READING;
494			complete(&info->cmd_complete);
495		}
496	} else if (status & NDSR_WRDREQ) {
497		disable_int(info, NDSR_WRDREQ);
498		if (info->use_dma) {
499			info->state = STATE_DMA_WRITING;
500			start_data_dma(info, 1);
501		} else {
502			info->state = STATE_PIO_WRITING;
503			complete(&info->cmd_complete);
504		}
505	} else if (status & (NDSR_CS0_BBD | NDSR_CS0_CMDD)) {
506		if (status & NDSR_CS0_BBD)
507			info->retcode = ERR_BBERR;
508
509		disable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
510		info->state = STATE_READY;
511		complete(&info->cmd_complete);
512	}
513	nand_writel(info, NDSR, status);
514	return IRQ_HANDLED;
515}
516
517static int pxa3xx_nand_do_cmd(struct pxa3xx_nand_info *info, uint32_t event)
518{
519	uint32_t ndcr;
520	int ret, timeout = CHIP_DELAY_TIMEOUT;
521
522	if (write_cmd(info)) {
523		info->retcode = ERR_SENDCMD;
524		goto fail_stop;
525	}
526
527	info->state = STATE_CMD_HANDLE;
528
529	enable_int(info, event);
530
531	ret = wait_for_completion_timeout(&info->cmd_complete, timeout);
532	if (!ret) {
533		printk(KERN_ERR "command execution timed out\n");
534		info->retcode = ERR_SENDCMD;
535		goto fail_stop;
536	}
537
538	if (info->use_dma == 0 && info->data_size > 0)
539		if (handle_data_pio(info))
540			goto fail_stop;
541
542	return 0;
543
544fail_stop:
545	ndcr = nand_readl(info, NDCR);
546	nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN);
547	udelay(10);
548	return -ETIMEDOUT;
549}
550
551static int pxa3xx_nand_dev_ready(struct mtd_info *mtd)
552{
553	struct pxa3xx_nand_info *info = mtd->priv;
554	return (nand_readl(info, NDSR) & NDSR_RDY) ? 1 : 0;
555}
556
557static inline int is_buf_blank(uint8_t *buf, size_t len)
558{
559	for (; len > 0; len--)
560		if (*buf++ != 0xff)
561			return 0;
562	return 1;
563}
564
565static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
566				int column, int page_addr)
567{
568	struct pxa3xx_nand_info *info = mtd->priv;
569	const struct pxa3xx_nand_flash *flash_info = info->flash_info;
570	const struct pxa3xx_nand_cmdset *cmdset = flash_info->cmdset;
571	int ret;
572
573	info->use_dma = (use_dma) ? 1 : 0;
574	info->use_ecc = 0;
575	info->data_size = 0;
576	info->state = STATE_READY;
577
578	init_completion(&info->cmd_complete);
579
580	switch (command) {
581	case NAND_CMD_READOOB:
582		/* disable HW ECC to get all the OOB data */
583		info->buf_count = mtd->writesize + mtd->oobsize;
584		info->buf_start = mtd->writesize + column;
585		memset(info->data_buff, 0xFF, info->buf_count);
586
587		if (prepare_read_prog_cmd(info, cmdset->read1, column, page_addr))
588			break;
589
590		pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR);
591
592		/* We only are OOB, so if the data has error, does not matter */
593		if (info->retcode == ERR_DBERR)
594			info->retcode = ERR_NONE;
595		break;
596
597	case NAND_CMD_READ0:
598		info->use_ecc = 1;
599		info->retcode = ERR_NONE;
600		info->buf_start = column;
601		info->buf_count = mtd->writesize + mtd->oobsize;
602		memset(info->data_buff, 0xFF, info->buf_count);
603
604		if (prepare_read_prog_cmd(info, cmdset->read1, column, page_addr))
605			break;
606
607		pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR);
608
609		if (info->retcode == ERR_DBERR) {
610			/* for blank page (all 0xff), HW will calculate its ECC as
611			 * 0, which is different from the ECC information within
612			 * OOB, ignore such double bit errors
613			 */
614			if (is_buf_blank(info->data_buff, mtd->writesize))
615				info->retcode = ERR_NONE;
616		}
617		break;
618	case NAND_CMD_SEQIN:
619		info->buf_start = column;
620		info->buf_count = mtd->writesize + mtd->oobsize;
621		memset(info->data_buff, 0xff, info->buf_count);
622
623		/* save column/page_addr for next CMD_PAGEPROG */
624		info->seqin_column = column;
625		info->seqin_page_addr = page_addr;
626		break;
627	case NAND_CMD_PAGEPROG:
628		info->use_ecc = (info->seqin_column >= mtd->writesize) ? 0 : 1;
629
630		if (prepare_read_prog_cmd(info, cmdset->program,
631				info->seqin_column, info->seqin_page_addr))
632			break;
633
634		pxa3xx_nand_do_cmd(info, NDSR_WRDREQ);
635		break;
636	case NAND_CMD_ERASE1:
637		if (prepare_erase_cmd(info, cmdset->erase, page_addr))
638			break;
639
640		pxa3xx_nand_do_cmd(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
641		break;
642	case NAND_CMD_ERASE2:
643		break;
644	case NAND_CMD_READID:
645	case NAND_CMD_STATUS:
646		info->use_dma = 0;	/* force PIO read */
647		info->buf_start = 0;
648		info->buf_count = (command == NAND_CMD_READID) ?
649				info->read_id_bytes : 1;
650
651		if (prepare_other_cmd(info, (command == NAND_CMD_READID) ?
652				cmdset->read_id : cmdset->read_status))
653			break;
654
655		pxa3xx_nand_do_cmd(info, NDSR_RDDREQ);
656		break;
657	case NAND_CMD_RESET:
658		if (prepare_other_cmd(info, cmdset->reset))
659			break;
660
661		ret = pxa3xx_nand_do_cmd(info, NDSR_CS0_CMDD);
662		if (ret == 0) {
663			int timeout = 2;
664			uint32_t ndcr;
665
666			while (timeout--) {
667				if (nand_readl(info, NDSR) & NDSR_RDY)
668					break;
669				msleep(10);
670			}
671
672			ndcr = nand_readl(info, NDCR);
673			nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN);
674		}
675		break;
676	default:
677		printk(KERN_ERR "non-supported command.\n");
678		break;
679	}
680
681	if (info->retcode == ERR_DBERR) {
682		printk(KERN_ERR "double bit error @ page %08x\n", page_addr);
683		info->retcode = ERR_NONE;
684	}
685}
686
687static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
688{
689	struct pxa3xx_nand_info *info = mtd->priv;
690	char retval = 0xFF;
691
692	if (info->buf_start < info->buf_count)
693		/* Has just send a new command? */
694		retval = info->data_buff[info->buf_start++];
695
696	return retval;
697}
698
699static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
700{
701	struct pxa3xx_nand_info *info = mtd->priv;
702	u16 retval = 0xFFFF;
703
704	if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
705		retval = *((u16 *)(info->data_buff+info->buf_start));
706		info->buf_start += 2;
707	}
708	return retval;
709}
710
711static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
712{
713	struct pxa3xx_nand_info *info = mtd->priv;
714	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
715
716	memcpy(buf, info->data_buff + info->buf_start, real_len);
717	info->buf_start += real_len;
718}
719
720static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
721		const uint8_t *buf, int len)
722{
723	struct pxa3xx_nand_info *info = mtd->priv;
724	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
725
726	memcpy(info->data_buff + info->buf_start, buf, real_len);
727	info->buf_start += real_len;
728}
729
730static int pxa3xx_nand_verify_buf(struct mtd_info *mtd,
731		const uint8_t *buf, int len)
732{
733	return 0;
734}
735
736static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
737{
738	return;
739}
740
741static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
742{
743	struct pxa3xx_nand_info *info = mtd->priv;
744
745	/* pxa3xx_nand_send_command has waited for command complete */
746	if (this->state == FL_WRITING || this->state == FL_ERASING) {
747		if (info->retcode == ERR_NONE)
748			return 0;
749		else {
750			/*
751			 * any error make it return 0x01 which will tell
752			 * the caller the erase and write fail
753			 */
754			return 0x01;
755		}
756	}
757
758	return 0;
759}
760
761static void pxa3xx_nand_ecc_hwctl(struct mtd_info *mtd, int mode)
762{
763	return;
764}
765
766static int pxa3xx_nand_ecc_calculate(struct mtd_info *mtd,
767		const uint8_t *dat, uint8_t *ecc_code)
768{
769	return 0;
770}
771
772static int pxa3xx_nand_ecc_correct(struct mtd_info *mtd,
773		uint8_t *dat, uint8_t *read_ecc, uint8_t *calc_ecc)
774{
775	struct pxa3xx_nand_info *info = mtd->priv;
776	/*
777	 * Any error include ERR_SEND_CMD, ERR_DBERR, ERR_BUSERR, we
778	 * consider it as a ecc error which will tell the caller the
779	 * read fail We have distinguish all the errors, but the
780	 * nand_read_ecc only check this function return value
781	 *
782	 * Corrected (single-bit) errors must also be noted.
783	 */
784	if (info->retcode == ERR_SBERR)
785		return 1;
786	else if (info->retcode != ERR_NONE)
787		return -1;
788
789	return 0;
790}
791
792static int __readid(struct pxa3xx_nand_info *info, uint32_t *id)
793{
794	const struct pxa3xx_nand_flash *f = info->flash_info;
795	const struct pxa3xx_nand_cmdset *cmdset = f->cmdset;
796	uint32_t ndcr;
797	uint8_t  id_buff[8];
798
799	if (prepare_other_cmd(info, cmdset->read_id)) {
800		printk(KERN_ERR "failed to prepare command\n");
801		return -EINVAL;
802	}
803
804	/* Send command */
805	if (write_cmd(info))
806		goto fail_timeout;
807
808	/* Wait for CMDDM(command done successfully) */
809	if (wait_for_event(info, NDSR_RDDREQ))
810		goto fail_timeout;
811
812	__raw_readsl(info->mmio_base + NDDB, id_buff, 2);
813	*id = id_buff[0] | (id_buff[1] << 8);
814	return 0;
815
816fail_timeout:
817	ndcr = nand_readl(info, NDCR);
818	nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN);
819	udelay(10);
820	return -ETIMEDOUT;
821}
822
823static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
824				    const struct pxa3xx_nand_flash *f)
825{
826	struct platform_device *pdev = info->pdev;
827	struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
828	uint32_t ndcr = 0x00000FFF; /* disable all interrupts */
829
830	if (f->page_size != 2048 && f->page_size != 512)
831		return -EINVAL;
832
833	if (f->flash_width != 16 && f->flash_width != 8)
834		return -EINVAL;
835
836	/* calculate flash information */
837	info->oob_size = (f->page_size == 2048) ? 64 : 16;
838	info->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
839
840	/* calculate addressing information */
841	info->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
842
843	if (f->num_blocks * f->page_per_block > 65536)
844		info->row_addr_cycles = 3;
845	else
846		info->row_addr_cycles = 2;
847
848	ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
849	ndcr |= (info->col_addr_cycles == 2) ? NDCR_RA_START : 0;
850	ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
851	ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
852	ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
853	ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
854
855	ndcr |= NDCR_RD_ID_CNT(info->read_id_bytes);
856	ndcr |= NDCR_SPARE_EN; /* enable spare by default */
857
858	info->reg_ndcr = ndcr;
859
860	pxa3xx_nand_set_timing(info, f->timing);
861	info->flash_info = f;
862	return 0;
863}
864
865static void pxa3xx_nand_detect_timing(struct pxa3xx_nand_info *info,
866				      struct pxa3xx_nand_timing *t)
867{
868	unsigned long nand_clk = clk_get_rate(info->clk);
869	uint32_t ndtr0 = nand_readl(info, NDTR0CS0);
870	uint32_t ndtr1 = nand_readl(info, NDTR1CS0);
871
872	t->tCH = cycle2ns(tCH_NDTR0(ndtr0), nand_clk);
873	t->tCS = cycle2ns(tCS_NDTR0(ndtr0), nand_clk);
874	t->tWH = cycle2ns(tWH_NDTR0(ndtr0), nand_clk);
875	t->tWP = cycle2ns(tWP_NDTR0(ndtr0), nand_clk);
876	t->tRH = cycle2ns(tRH_NDTR0(ndtr0), nand_clk);
877	t->tRP = cycle2ns(tRP_NDTR0(ndtr0), nand_clk);
878
879	t->tR = cycle2ns(tR_NDTR1(ndtr1), nand_clk);
880	t->tWHR = cycle2ns(tWHR_NDTR1(ndtr1), nand_clk);
881	t->tAR = cycle2ns(tAR_NDTR1(ndtr1), nand_clk);
882}
883
884static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
885{
886	uint32_t ndcr = nand_readl(info, NDCR);
887	struct nand_flash_dev *type = NULL;
888	uint32_t id = -1;
889	int i;
890
891	default_flash.page_per_block = ndcr & NDCR_PG_PER_BLK ? 64 : 32;
892	default_flash.page_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
893	default_flash.flash_width = ndcr & NDCR_DWIDTH_M ? 16 : 8;
894	default_flash.dfc_width = ndcr & NDCR_DWIDTH_C ? 16 : 8;
895
896	/* set info fields needed to __readid */
897	info->flash_info = &default_flash;
898	info->read_id_bytes = (default_flash.page_size == 2048) ? 4 : 2;
899	info->reg_ndcr = ndcr;
900
901	if (__readid(info, &id))
902		return -ENODEV;
903
904	/* Lookup the flash id */
905	id = (id >> 8) & 0xff;		/* device id is byte 2 */
906	for (i = 0; nand_flash_ids[i].name != NULL; i++) {
907		if (id == nand_flash_ids[i].id) {
908			type =  &nand_flash_ids[i];
909			break;
910		}
911	}
912
913	if (!type)
914		return -ENODEV;
915
916	/* fill the missing flash information */
917	i = __ffs(default_flash.page_per_block * default_flash.page_size);
918	default_flash.num_blocks = type->chipsize << (20 - i);
919
920	info->oob_size = (default_flash.page_size == 2048) ? 64 : 16;
921
922	/* calculate addressing information */
923	info->col_addr_cycles = (default_flash.page_size == 2048) ? 2 : 1;
924
925	if (default_flash.num_blocks * default_flash.page_per_block > 65536)
926		info->row_addr_cycles = 3;
927	else
928		info->row_addr_cycles = 2;
929
930	pxa3xx_nand_detect_timing(info, &default_timing);
931	default_flash.timing = &default_timing;
932	default_flash.cmdset = &default_cmdset;
933
934	return 0;
935}
936
937static int pxa3xx_nand_detect_flash(struct pxa3xx_nand_info *info,
938				    const struct pxa3xx_nand_platform_data *pdata)
939{
940	const struct pxa3xx_nand_flash *f;
941	uint32_t id = -1;
942	int i;
943
944	if (pdata->keep_config)
945		if (pxa3xx_nand_detect_config(info) == 0)
946			return 0;
947
948	for (i = 0; i<pdata->num_flash; ++i) {
949		f = pdata->flash + i;
950
951		if (pxa3xx_nand_config_flash(info, f))
952			continue;
953
954		if (__readid(info, &id))
955			continue;
956
957		if (id == f->chip_id)
958			return 0;
959	}
960
961	for (i = 0; i < ARRAY_SIZE(builtin_flash_types); i++) {
962
963		f = &builtin_flash_types[i];
964
965		if (pxa3xx_nand_config_flash(info, f))
966			continue;
967
968		if (__readid(info, &id))
969			continue;
970
971		if (id == f->chip_id)
972			return 0;
973	}
974
975	dev_warn(&info->pdev->dev,
976		 "failed to detect configured nand flash; found %04x instead of\n",
977		 id);
978	return -ENODEV;
979}
980
981/* the maximum possible buffer size for large page with OOB data
982 * is: 2048 + 64 = 2112 bytes, allocate a page here for both the
983 * data buffer and the DMA descriptor
984 */
985#define MAX_BUFF_SIZE	PAGE_SIZE
986
987static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
988{
989	struct platform_device *pdev = info->pdev;
990	int data_desc_offset = MAX_BUFF_SIZE - sizeof(struct pxa_dma_desc);
991
992	if (use_dma == 0) {
993		info->data_buff = kmalloc(MAX_BUFF_SIZE, GFP_KERNEL);
994		if (info->data_buff == NULL)
995			return -ENOMEM;
996		return 0;
997	}
998
999	info->data_buff = dma_alloc_coherent(&pdev->dev, MAX_BUFF_SIZE,
1000				&info->data_buff_phys, GFP_KERNEL);
1001	if (info->data_buff == NULL) {
1002		dev_err(&pdev->dev, "failed to allocate dma buffer\n");
1003		return -ENOMEM;
1004	}
1005
1006	info->data_buff_size = MAX_BUFF_SIZE;
1007	info->data_desc = (void *)info->data_buff + data_desc_offset;
1008	info->data_desc_addr = info->data_buff_phys + data_desc_offset;
1009
1010	info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW,
1011				pxa3xx_nand_data_dma_irq, info);
1012	if (info->data_dma_ch < 0) {
1013		dev_err(&pdev->dev, "failed to request data dma\n");
1014		dma_free_coherent(&pdev->dev, info->data_buff_size,
1015				info->data_buff, info->data_buff_phys);
1016		return info->data_dma_ch;
1017	}
1018
1019	return 0;
1020}
1021
1022static struct nand_ecclayout hw_smallpage_ecclayout = {
1023	.eccbytes = 6,
1024	.eccpos = {8, 9, 10, 11, 12, 13 },
1025	.oobfree = { {2, 6} }
1026};
1027
1028static struct nand_ecclayout hw_largepage_ecclayout = {
1029	.eccbytes = 24,
1030	.eccpos = {
1031		40, 41, 42, 43, 44, 45, 46, 47,
1032		48, 49, 50, 51, 52, 53, 54, 55,
1033		56, 57, 58, 59, 60, 61, 62, 63},
1034	.oobfree = { {2, 38} }
1035};
1036
1037static void pxa3xx_nand_init_mtd(struct mtd_info *mtd,
1038				 struct pxa3xx_nand_info *info)
1039{
1040	const struct pxa3xx_nand_flash *f = info->flash_info;
1041	struct nand_chip *this = &info->nand_chip;
1042
1043	this->options = (f->flash_width == 16) ? NAND_BUSWIDTH_16: 0;
1044
1045	this->waitfunc		= pxa3xx_nand_waitfunc;
1046	this->select_chip	= pxa3xx_nand_select_chip;
1047	this->dev_ready		= pxa3xx_nand_dev_ready;
1048	this->cmdfunc		= pxa3xx_nand_cmdfunc;
1049	this->read_word		= pxa3xx_nand_read_word;
1050	this->read_byte		= pxa3xx_nand_read_byte;
1051	this->read_buf		= pxa3xx_nand_read_buf;
1052	this->write_buf		= pxa3xx_nand_write_buf;
1053	this->verify_buf	= pxa3xx_nand_verify_buf;
1054
1055	this->ecc.mode		= NAND_ECC_HW;
1056	this->ecc.hwctl		= pxa3xx_nand_ecc_hwctl;
1057	this->ecc.calculate	= pxa3xx_nand_ecc_calculate;
1058	this->ecc.correct	= pxa3xx_nand_ecc_correct;
1059	this->ecc.size		= f->page_size;
1060
1061	if (f->page_size == 2048)
1062		this->ecc.layout = &hw_largepage_ecclayout;
1063	else
1064		this->ecc.layout = &hw_smallpage_ecclayout;
1065
1066	this->chip_delay = 25;
1067}
1068
1069static int pxa3xx_nand_probe(struct platform_device *pdev)
1070{
1071	struct pxa3xx_nand_platform_data *pdata;
1072	struct pxa3xx_nand_info *info;
1073	struct nand_chip *this;
1074	struct mtd_info *mtd;
1075	struct resource *r;
1076	int ret = 0, irq;
1077
1078	pdata = pdev->dev.platform_data;
1079
1080	if (!pdata) {
1081		dev_err(&pdev->dev, "no platform data defined\n");
1082		return -ENODEV;
1083	}
1084
1085	mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct pxa3xx_nand_info),
1086			GFP_KERNEL);
1087	if (!mtd) {
1088		dev_err(&pdev->dev, "failed to allocate memory\n");
1089		return -ENOMEM;
1090	}
1091
1092	info = (struct pxa3xx_nand_info *)(&mtd[1]);
1093	info->pdev = pdev;
1094
1095	this = &info->nand_chip;
1096	mtd->priv = info;
1097	mtd->owner = THIS_MODULE;
1098
1099	info->clk = clk_get(&pdev->dev, NULL);
1100	if (IS_ERR(info->clk)) {
1101		dev_err(&pdev->dev, "failed to get nand clock\n");
1102		ret = PTR_ERR(info->clk);
1103		goto fail_free_mtd;
1104	}
1105	clk_enable(info->clk);
1106
1107	r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1108	if (r == NULL) {
1109		dev_err(&pdev->dev, "no resource defined for data DMA\n");
1110		ret = -ENXIO;
1111		goto fail_put_clk;
1112	}
1113	info->drcmr_dat = r->start;
1114
1115	r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1116	if (r == NULL) {
1117		dev_err(&pdev->dev, "no resource defined for command DMA\n");
1118		ret = -ENXIO;
1119		goto fail_put_clk;
1120	}
1121	info->drcmr_cmd = r->start;
1122
1123	irq = platform_get_irq(pdev, 0);
1124	if (irq < 0) {
1125		dev_err(&pdev->dev, "no IRQ resource defined\n");
1126		ret = -ENXIO;
1127		goto fail_put_clk;
1128	}
1129
1130	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1131	if (r == NULL) {
1132		dev_err(&pdev->dev, "no IO memory resource defined\n");
1133		ret = -ENODEV;
1134		goto fail_put_clk;
1135	}
1136
1137	r = request_mem_region(r->start, resource_size(r), pdev->name);
1138	if (r == NULL) {
1139		dev_err(&pdev->dev, "failed to request memory resource\n");
1140		ret = -EBUSY;
1141		goto fail_put_clk;
1142	}
1143
1144	info->mmio_base = ioremap(r->start, resource_size(r));
1145	if (info->mmio_base == NULL) {
1146		dev_err(&pdev->dev, "ioremap() failed\n");
1147		ret = -ENODEV;
1148		goto fail_free_res;
1149	}
1150	info->mmio_phys = r->start;
1151
1152	ret = pxa3xx_nand_init_buff(info);
1153	if (ret)
1154		goto fail_free_io;
1155
1156	/* initialize all interrupts to be disabled */
1157	disable_int(info, NDSR_MASK);
1158
1159	ret = request_irq(irq, pxa3xx_nand_irq, IRQF_DISABLED,
1160			  pdev->name, info);
1161	if (ret < 0) {
1162		dev_err(&pdev->dev, "failed to request IRQ\n");
1163		goto fail_free_buf;
1164	}
1165
1166	ret = pxa3xx_nand_detect_flash(info, pdata);
1167	if (ret) {
1168		dev_err(&pdev->dev, "failed to detect flash\n");
1169		ret = -ENODEV;
1170		goto fail_free_irq;
1171	}
1172
1173	pxa3xx_nand_init_mtd(mtd, info);
1174
1175	platform_set_drvdata(pdev, mtd);
1176
1177	if (nand_scan(mtd, 1)) {
1178		dev_err(&pdev->dev, "failed to scan nand\n");
1179		ret = -ENXIO;
1180		goto fail_free_irq;
1181	}
1182
1183#ifdef CONFIG_MTD_PARTITIONS
1184	if (mtd_has_cmdlinepart()) {
1185		static const char *probes[] = { "cmdlinepart", NULL };
1186		struct mtd_partition *parts;
1187		int nr_parts;
1188
1189		nr_parts = parse_mtd_partitions(mtd, probes, &parts, 0);
1190
1191		if (nr_parts)
1192			return add_mtd_partitions(mtd, parts, nr_parts);
1193	}
1194
1195	return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
1196#else
1197	return 0;
1198#endif
1199
1200fail_free_irq:
1201	free_irq(irq, info);
1202fail_free_buf:
1203	if (use_dma) {
1204		pxa_free_dma(info->data_dma_ch);
1205		dma_free_coherent(&pdev->dev, info->data_buff_size,
1206			info->data_buff, info->data_buff_phys);
1207	} else
1208		kfree(info->data_buff);
1209fail_free_io:
1210	iounmap(info->mmio_base);
1211fail_free_res:
1212	release_mem_region(r->start, resource_size(r));
1213fail_put_clk:
1214	clk_disable(info->clk);
1215	clk_put(info->clk);
1216fail_free_mtd:
1217	kfree(mtd);
1218	return ret;
1219}
1220
1221static int pxa3xx_nand_remove(struct platform_device *pdev)
1222{
1223	struct mtd_info *mtd = platform_get_drvdata(pdev);
1224	struct pxa3xx_nand_info *info = mtd->priv;
1225	struct resource *r;
1226	int irq;
1227
1228	platform_set_drvdata(pdev, NULL);
1229
1230	del_mtd_device(mtd);
1231#ifdef CONFIG_MTD_PARTITIONS
1232	del_mtd_partitions(mtd);
1233#endif
1234	irq = platform_get_irq(pdev, 0);
1235	if (irq >= 0)
1236		free_irq(irq, info);
1237	if (use_dma) {
1238		pxa_free_dma(info->data_dma_ch);
1239		dma_free_writecombine(&pdev->dev, info->data_buff_size,
1240				info->data_buff, info->data_buff_phys);
1241	} else
1242		kfree(info->data_buff);
1243
1244	iounmap(info->mmio_base);
1245	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1246	release_mem_region(r->start, resource_size(r));
1247
1248	clk_disable(info->clk);
1249	clk_put(info->clk);
1250
1251	kfree(mtd);
1252	return 0;
1253}
1254
1255#ifdef CONFIG_PM
1256static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1257{
1258	struct mtd_info *mtd = (struct mtd_info *)platform_get_drvdata(pdev);
1259	struct pxa3xx_nand_info *info = mtd->priv;
1260
1261	if (info->state != STATE_READY) {
1262		dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1263		return -EAGAIN;
1264	}
1265
1266	return 0;
1267}
1268
1269static int pxa3xx_nand_resume(struct platform_device *pdev)
1270{
1271	struct mtd_info *mtd = (struct mtd_info *)platform_get_drvdata(pdev);
1272	struct pxa3xx_nand_info *info = mtd->priv;
1273
1274	clk_enable(info->clk);
1275
1276	return pxa3xx_nand_config_flash(info, info->flash_info);
1277}
1278#else
1279#define pxa3xx_nand_suspend	NULL
1280#define pxa3xx_nand_resume	NULL
1281#endif
1282
1283static struct platform_driver pxa3xx_nand_driver = {
1284	.driver = {
1285		.name	= "pxa3xx-nand",
1286	},
1287	.probe		= pxa3xx_nand_probe,
1288	.remove		= pxa3xx_nand_remove,
1289	.suspend	= pxa3xx_nand_suspend,
1290	.resume		= pxa3xx_nand_resume,
1291};
1292
1293static int __init pxa3xx_nand_init(void)
1294{
1295	return platform_driver_register(&pxa3xx_nand_driver);
1296}
1297module_init(pxa3xx_nand_init);
1298
1299static void __exit pxa3xx_nand_exit(void)
1300{
1301	platform_driver_unregister(&pxa3xx_nand_driver);
1302}
1303module_exit(pxa3xx_nand_exit);
1304
1305MODULE_LICENSE("GPL");
1306MODULE_DESCRIPTION("PXA3xx NAND controller driver");
1307