pxa3xx_nand.c revision 227a886c7ead6420465abfd1242f449895b2c332
1/*
2 * drivers/mtd/nand/pxa3xx_nand.c
3 *
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/interrupt.h>
15#include <linux/platform_device.h>
16#include <linux/dma-mapping.h>
17#include <linux/delay.h>
18#include <linux/clk.h>
19#include <linux/mtd/mtd.h>
20#include <linux/mtd/nand.h>
21#include <linux/mtd/partitions.h>
22#include <linux/io.h>
23#include <linux/irq.h>
24#include <linux/slab.h>
25
26#include <mach/dma.h>
27#include <plat/pxa3xx_nand.h>
28
29#define	CHIP_DELAY_TIMEOUT	(2 * HZ/10)
30
31/* registers and bit definitions */
32#define NDCR		(0x00) /* Control register */
33#define NDTR0CS0	(0x04) /* Timing Parameter 0 for CS0 */
34#define NDTR1CS0	(0x0C) /* Timing Parameter 1 for CS0 */
35#define NDSR		(0x14) /* Status Register */
36#define NDPCR		(0x18) /* Page Count Register */
37#define NDBDR0		(0x1C) /* Bad Block Register 0 */
38#define NDBDR1		(0x20) /* Bad Block Register 1 */
39#define NDDB		(0x40) /* Data Buffer */
40#define NDCB0		(0x48) /* Command Buffer0 */
41#define NDCB1		(0x4C) /* Command Buffer1 */
42#define NDCB2		(0x50) /* Command Buffer2 */
43
44#define NDCR_SPARE_EN		(0x1 << 31)
45#define NDCR_ECC_EN		(0x1 << 30)
46#define NDCR_DMA_EN		(0x1 << 29)
47#define NDCR_ND_RUN		(0x1 << 28)
48#define NDCR_DWIDTH_C		(0x1 << 27)
49#define NDCR_DWIDTH_M		(0x1 << 26)
50#define NDCR_PAGE_SZ		(0x1 << 24)
51#define NDCR_NCSX		(0x1 << 23)
52#define NDCR_ND_MODE		(0x3 << 21)
53#define NDCR_NAND_MODE   	(0x0)
54#define NDCR_CLR_PG_CNT		(0x1 << 20)
55#define NDCR_CLR_ECC		(0x1 << 19)
56#define NDCR_RD_ID_CNT_MASK	(0x7 << 16)
57#define NDCR_RD_ID_CNT(x)	(((x) << 16) & NDCR_RD_ID_CNT_MASK)
58
59#define NDCR_RA_START		(0x1 << 15)
60#define NDCR_PG_PER_BLK		(0x1 << 14)
61#define NDCR_ND_ARB_EN		(0x1 << 12)
62
63#define NDSR_MASK		(0xfff)
64#define NDSR_RDY		(0x1 << 11)
65#define NDSR_CS0_PAGED		(0x1 << 10)
66#define NDSR_CS1_PAGED		(0x1 << 9)
67#define NDSR_CS0_CMDD		(0x1 << 8)
68#define NDSR_CS1_CMDD		(0x1 << 7)
69#define NDSR_CS0_BBD		(0x1 << 6)
70#define NDSR_CS1_BBD		(0x1 << 5)
71#define NDSR_DBERR		(0x1 << 4)
72#define NDSR_SBERR		(0x1 << 3)
73#define NDSR_WRDREQ		(0x1 << 2)
74#define NDSR_RDDREQ		(0x1 << 1)
75#define NDSR_WRCMDREQ		(0x1)
76
77#define NDCB0_AUTO_RS		(0x1 << 25)
78#define NDCB0_CSEL		(0x1 << 24)
79#define NDCB0_CMD_TYPE_MASK	(0x7 << 21)
80#define NDCB0_CMD_TYPE(x)	(((x) << 21) & NDCB0_CMD_TYPE_MASK)
81#define NDCB0_NC		(0x1 << 20)
82#define NDCB0_DBC		(0x1 << 19)
83#define NDCB0_ADDR_CYC_MASK	(0x7 << 16)
84#define NDCB0_ADDR_CYC(x)	(((x) << 16) & NDCB0_ADDR_CYC_MASK)
85#define NDCB0_CMD2_MASK		(0xff << 8)
86#define NDCB0_CMD1_MASK		(0xff)
87#define NDCB0_ADDR_CYC_SHIFT	(16)
88
89/* macros for registers read/write */
90#define nand_writel(info, off, val)	\
91	__raw_writel((val), (info)->mmio_base + (off))
92
93#define nand_readl(info, off)		\
94	__raw_readl((info)->mmio_base + (off))
95
96/* error code and state */
97enum {
98	ERR_NONE	= 0,
99	ERR_DMABUSERR	= -1,
100	ERR_SENDCMD	= -2,
101	ERR_DBERR	= -3,
102	ERR_BBERR	= -4,
103	ERR_SBERR	= -5,
104};
105
106enum {
107	STATE_READY	= 0,
108	STATE_CMD_HANDLE,
109	STATE_DMA_READING,
110	STATE_DMA_WRITING,
111	STATE_DMA_DONE,
112	STATE_PIO_READING,
113	STATE_PIO_WRITING,
114};
115
116struct pxa3xx_nand_info {
117	struct nand_chip	nand_chip;
118
119	struct platform_device	 *pdev;
120	const struct pxa3xx_nand_flash *flash_info;
121
122	struct clk		*clk;
123	void __iomem		*mmio_base;
124	unsigned long		mmio_phys;
125
126	unsigned int 		buf_start;
127	unsigned int		buf_count;
128
129	/* DMA information */
130	int			drcmr_dat;
131	int			drcmr_cmd;
132
133	unsigned char		*data_buff;
134	dma_addr_t 		data_buff_phys;
135	size_t			data_buff_size;
136	int 			data_dma_ch;
137	struct pxa_dma_desc	*data_desc;
138	dma_addr_t 		data_desc_addr;
139
140	uint32_t		reg_ndcr;
141
142	/* saved column/page_addr during CMD_SEQIN */
143	int			seqin_column;
144	int			seqin_page_addr;
145
146	/* relate to the command */
147	unsigned int		state;
148
149	int			use_ecc;	/* use HW ECC ? */
150	int			use_dma;	/* use DMA ? */
151
152	size_t			data_size;	/* data size in FIFO */
153	int 			retcode;
154	struct completion 	cmd_complete;
155
156	/* generated NDCBx register values */
157	uint32_t		ndcb0;
158	uint32_t		ndcb1;
159	uint32_t		ndcb2;
160
161	/* calculated from pxa3xx_nand_flash data */
162	size_t		oob_size;
163	size_t		read_id_bytes;
164
165	unsigned int	col_addr_cycles;
166	unsigned int	row_addr_cycles;
167};
168
169static int use_dma = 1;
170module_param(use_dma, bool, 0444);
171MODULE_PARM_DESC(use_dma, "enable DMA for data transfering to/from NAND HW");
172
173/*
174 * Default NAND flash controller configuration setup by the
175 * bootloader. This configuration is used only when pdata->keep_config is set
176 */
177static struct pxa3xx_nand_timing default_timing;
178static struct pxa3xx_nand_flash default_flash;
179static struct pxa3xx_nand_cmdset default_cmdset = {
180	.read1		= 0x3000,
181	.read2		= 0x0050,
182	.program	= 0x1080,
183	.read_status	= 0x0070,
184	.read_id	= 0x0090,
185	.erase		= 0xD060,
186	.reset		= 0x00FF,
187	.lock		= 0x002A,
188	.unlock		= 0x2423,
189	.lock_status	= 0x007A,
190};
191
192static struct pxa3xx_nand_timing timing[] = {
193	{ 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
194	{ 10,  0, 20,  40, 30,  40, 11123, 110, 10, },
195	{ 10, 25, 15,  25, 15,  30, 25000,  60, 10, },
196	{ 10, 35, 15,  25, 15,  25, 25000,  60, 10, },
197};
198
199static struct pxa3xx_nand_flash builtin_flash_types[] = {
200	{      0,   0, 2048,  8,  8,    0, &default_cmdset, &timing[0] },
201	{ 0x46ec,  32,  512, 16, 16, 4096, &default_cmdset, &timing[1] },
202	{ 0xdaec,  64, 2048,  8,  8, 2048, &default_cmdset, &timing[1] },
203	{ 0xd7ec, 128, 4096,  8,  8, 8192, &default_cmdset, &timing[1] },
204	{ 0xa12c,  64, 2048,  8,  8, 1024, &default_cmdset, &timing[2] },
205	{ 0xb12c,  64, 2048, 16, 16, 1024, &default_cmdset, &timing[2] },
206	{ 0xdc2c,  64, 2048,  8,  8, 4096, &default_cmdset, &timing[2] },
207	{ 0xcc2c,  64, 2048, 16, 16, 4096, &default_cmdset, &timing[2] },
208	{ 0xba20,  64, 2048, 16, 16, 2048, &default_cmdset, &timing[3] },
209};
210
211/* Define a default flash type setting serve as flash detecting only */
212#define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
213
214#define NDTR0_tCH(c)	(min((c), 7) << 19)
215#define NDTR0_tCS(c)	(min((c), 7) << 16)
216#define NDTR0_tWH(c)	(min((c), 7) << 11)
217#define NDTR0_tWP(c)	(min((c), 7) << 8)
218#define NDTR0_tRH(c)	(min((c), 7) << 3)
219#define NDTR0_tRP(c)	(min((c), 7) << 0)
220
221#define NDTR1_tR(c)	(min((c), 65535) << 16)
222#define NDTR1_tWHR(c)	(min((c), 15) << 4)
223#define NDTR1_tAR(c)	(min((c), 15) << 0)
224
225#define tCH_NDTR0(r)	(((r) >> 19) & 0x7)
226#define tCS_NDTR0(r)	(((r) >> 16) & 0x7)
227#define tWH_NDTR0(r)	(((r) >> 11) & 0x7)
228#define tWP_NDTR0(r)	(((r) >> 8) & 0x7)
229#define tRH_NDTR0(r)	(((r) >> 3) & 0x7)
230#define tRP_NDTR0(r)	(((r) >> 0) & 0x7)
231
232#define tR_NDTR1(r)	(((r) >> 16) & 0xffff)
233#define tWHR_NDTR1(r)	(((r) >> 4) & 0xf)
234#define tAR_NDTR1(r)	(((r) >> 0) & 0xf)
235
236/* convert nano-seconds to nand flash controller clock cycles */
237#define ns2cycle(ns, clk)	(int)((ns) * (clk / 1000000) / 1000)
238
239/* convert nand flash controller clock cycles to nano-seconds */
240#define cycle2ns(c, clk)	((((c) + 1) * 1000000 + clk / 500) / (clk / 1000))
241
242static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info,
243				   const struct pxa3xx_nand_timing *t)
244{
245	unsigned long nand_clk = clk_get_rate(info->clk);
246	uint32_t ndtr0, ndtr1;
247
248	ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
249		NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
250		NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
251		NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
252		NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
253		NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
254
255	ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
256		NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
257		NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
258
259	nand_writel(info, NDTR0CS0, ndtr0);
260	nand_writel(info, NDTR1CS0, ndtr1);
261}
262
263#define WAIT_EVENT_TIMEOUT	10
264
265static int wait_for_event(struct pxa3xx_nand_info *info, uint32_t event)
266{
267	int timeout = WAIT_EVENT_TIMEOUT;
268	uint32_t ndsr;
269
270	while (timeout--) {
271		ndsr = nand_readl(info, NDSR) & NDSR_MASK;
272		if (ndsr & event) {
273			nand_writel(info, NDSR, ndsr);
274			return 0;
275		}
276		udelay(10);
277	}
278
279	return -ETIMEDOUT;
280}
281
282static int prepare_read_prog_cmd(struct pxa3xx_nand_info *info,
283			uint16_t cmd, int column, int page_addr)
284{
285	const struct pxa3xx_nand_flash *f = info->flash_info;
286	const struct pxa3xx_nand_cmdset *cmdset = f->cmdset;
287
288	/* calculate data size */
289	switch (f->page_size) {
290	case 2048:
291		info->data_size = (info->use_ecc) ? 2088 : 2112;
292		break;
293	case 512:
294		info->data_size = (info->use_ecc) ? 520 : 528;
295		break;
296	default:
297		return -EINVAL;
298	}
299
300	/* generate values for NDCBx registers */
301	info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
302	info->ndcb1 = 0;
303	info->ndcb2 = 0;
304	info->ndcb0 |= NDCB0_ADDR_CYC(info->row_addr_cycles + info->col_addr_cycles);
305
306	if (info->col_addr_cycles == 2) {
307		/* large block, 2 cycles for column address
308		 * row address starts from 3rd cycle
309		 */
310		info->ndcb1 |= page_addr << 16;
311		if (info->row_addr_cycles == 3)
312			info->ndcb2 = (page_addr >> 16) & 0xff;
313	} else
314		/* small block, 1 cycles for column address
315		 * row address starts from 2nd cycle
316		 */
317		info->ndcb1 = page_addr << 8;
318
319	if (cmd == cmdset->program)
320		info->ndcb0 |= NDCB0_CMD_TYPE(1) | NDCB0_AUTO_RS;
321
322	return 0;
323}
324
325static int prepare_erase_cmd(struct pxa3xx_nand_info *info,
326			uint16_t cmd, int page_addr)
327{
328	info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
329	info->ndcb0 |= NDCB0_CMD_TYPE(2) | NDCB0_AUTO_RS | NDCB0_ADDR_CYC(3);
330	info->ndcb1 = page_addr;
331	info->ndcb2 = 0;
332	return 0;
333}
334
335static int prepare_other_cmd(struct pxa3xx_nand_info *info, uint16_t cmd)
336{
337	const struct pxa3xx_nand_cmdset *cmdset = info->flash_info->cmdset;
338
339	info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
340	info->ndcb1 = 0;
341	info->ndcb2 = 0;
342
343	if (cmd == cmdset->read_id) {
344		info->ndcb0 |= NDCB0_CMD_TYPE(3);
345		info->data_size = 8;
346	} else if (cmd == cmdset->read_status) {
347		info->ndcb0 |= NDCB0_CMD_TYPE(4);
348		info->data_size = 8;
349	} else if (cmd == cmdset->reset || cmd == cmdset->lock ||
350		   cmd == cmdset->unlock) {
351		info->ndcb0 |= NDCB0_CMD_TYPE(5);
352	} else
353		return -EINVAL;
354
355	return 0;
356}
357
358static void enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
359{
360	uint32_t ndcr;
361
362	ndcr = nand_readl(info, NDCR);
363	nand_writel(info, NDCR, ndcr & ~int_mask);
364}
365
366static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
367{
368	uint32_t ndcr;
369
370	ndcr = nand_readl(info, NDCR);
371	nand_writel(info, NDCR, ndcr | int_mask);
372}
373
374/* NOTE: it is a must to set ND_RUN firstly, then write command buffer
375 * otherwise, it does not work
376 */
377static int write_cmd(struct pxa3xx_nand_info *info)
378{
379	uint32_t ndcr;
380
381	/* clear status bits and run */
382	nand_writel(info, NDSR, NDSR_MASK);
383
384	ndcr = info->reg_ndcr;
385
386	ndcr |= info->use_ecc ? NDCR_ECC_EN : 0;
387	ndcr |= info->use_dma ? NDCR_DMA_EN : 0;
388	ndcr |= NDCR_ND_RUN;
389
390	nand_writel(info, NDCR, ndcr);
391
392	if (wait_for_event(info, NDSR_WRCMDREQ)) {
393		printk(KERN_ERR "timed out writing command\n");
394		return -ETIMEDOUT;
395	}
396
397	nand_writel(info, NDCB0, info->ndcb0);
398	nand_writel(info, NDCB0, info->ndcb1);
399	nand_writel(info, NDCB0, info->ndcb2);
400	return 0;
401}
402
403static int handle_data_pio(struct pxa3xx_nand_info *info)
404{
405	int ret, timeout = CHIP_DELAY_TIMEOUT;
406
407	switch (info->state) {
408	case STATE_PIO_WRITING:
409		__raw_writesl(info->mmio_base + NDDB, info->data_buff,
410				DIV_ROUND_UP(info->data_size, 4));
411
412		enable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
413
414		ret = wait_for_completion_timeout(&info->cmd_complete, timeout);
415		if (!ret) {
416			printk(KERN_ERR "program command time out\n");
417			return -1;
418		}
419		break;
420	case STATE_PIO_READING:
421		__raw_readsl(info->mmio_base + NDDB, info->data_buff,
422				DIV_ROUND_UP(info->data_size, 4));
423		break;
424	default:
425		printk(KERN_ERR "%s: invalid state %d\n", __func__,
426				info->state);
427		return -EINVAL;
428	}
429
430	info->state = STATE_READY;
431	return 0;
432}
433
434static void start_data_dma(struct pxa3xx_nand_info *info, int dir_out)
435{
436	struct pxa_dma_desc *desc = info->data_desc;
437	int dma_len = ALIGN(info->data_size, 32);
438
439	desc->ddadr = DDADR_STOP;
440	desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
441
442	if (dir_out) {
443		desc->dsadr = info->data_buff_phys;
444		desc->dtadr = info->mmio_phys + NDDB;
445		desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
446	} else {
447		desc->dtadr = info->data_buff_phys;
448		desc->dsadr = info->mmio_phys + NDDB;
449		desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
450	}
451
452	DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch;
453	DDADR(info->data_dma_ch) = info->data_desc_addr;
454	DCSR(info->data_dma_ch) |= DCSR_RUN;
455}
456
457static void pxa3xx_nand_data_dma_irq(int channel, void *data)
458{
459	struct pxa3xx_nand_info *info = data;
460	uint32_t dcsr;
461
462	dcsr = DCSR(channel);
463	DCSR(channel) = dcsr;
464
465	if (dcsr & DCSR_BUSERR) {
466		info->retcode = ERR_DMABUSERR;
467		complete(&info->cmd_complete);
468	}
469
470	if (info->state == STATE_DMA_WRITING) {
471		info->state = STATE_DMA_DONE;
472		enable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
473	} else {
474		info->state = STATE_READY;
475		complete(&info->cmd_complete);
476	}
477}
478
479static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
480{
481	struct pxa3xx_nand_info *info = devid;
482	unsigned int status;
483
484	status = nand_readl(info, NDSR);
485
486	if (status & (NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR)) {
487		if (status & NDSR_DBERR)
488			info->retcode = ERR_DBERR;
489		else if (status & NDSR_SBERR)
490			info->retcode = ERR_SBERR;
491
492		disable_int(info, NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR);
493
494		if (info->use_dma) {
495			info->state = STATE_DMA_READING;
496			start_data_dma(info, 0);
497		} else {
498			info->state = STATE_PIO_READING;
499			complete(&info->cmd_complete);
500		}
501	} else if (status & NDSR_WRDREQ) {
502		disable_int(info, NDSR_WRDREQ);
503		if (info->use_dma) {
504			info->state = STATE_DMA_WRITING;
505			start_data_dma(info, 1);
506		} else {
507			info->state = STATE_PIO_WRITING;
508			complete(&info->cmd_complete);
509		}
510	} else if (status & (NDSR_CS0_BBD | NDSR_CS0_CMDD)) {
511		if (status & NDSR_CS0_BBD)
512			info->retcode = ERR_BBERR;
513
514		disable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
515		info->state = STATE_READY;
516		complete(&info->cmd_complete);
517	}
518	nand_writel(info, NDSR, status);
519	return IRQ_HANDLED;
520}
521
522static int pxa3xx_nand_do_cmd(struct pxa3xx_nand_info *info, uint32_t event)
523{
524	uint32_t ndcr;
525	int ret, timeout = CHIP_DELAY_TIMEOUT;
526
527	if (write_cmd(info)) {
528		info->retcode = ERR_SENDCMD;
529		goto fail_stop;
530	}
531
532	info->state = STATE_CMD_HANDLE;
533
534	enable_int(info, event);
535
536	ret = wait_for_completion_timeout(&info->cmd_complete, timeout);
537	if (!ret) {
538		printk(KERN_ERR "command execution timed out\n");
539		info->retcode = ERR_SENDCMD;
540		goto fail_stop;
541	}
542
543	if (info->use_dma == 0 && info->data_size > 0)
544		if (handle_data_pio(info))
545			goto fail_stop;
546
547	return 0;
548
549fail_stop:
550	ndcr = nand_readl(info, NDCR);
551	nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN);
552	udelay(10);
553	return -ETIMEDOUT;
554}
555
556static int pxa3xx_nand_dev_ready(struct mtd_info *mtd)
557{
558	struct pxa3xx_nand_info *info = mtd->priv;
559	return (nand_readl(info, NDSR) & NDSR_RDY) ? 1 : 0;
560}
561
562static inline int is_buf_blank(uint8_t *buf, size_t len)
563{
564	for (; len > 0; len--)
565		if (*buf++ != 0xff)
566			return 0;
567	return 1;
568}
569
570static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
571				int column, int page_addr)
572{
573	struct pxa3xx_nand_info *info = mtd->priv;
574	const struct pxa3xx_nand_flash *flash_info = info->flash_info;
575	const struct pxa3xx_nand_cmdset *cmdset = flash_info->cmdset;
576	int ret;
577
578	info->use_dma = (use_dma) ? 1 : 0;
579	info->use_ecc = 0;
580	info->data_size = 0;
581	info->state = STATE_READY;
582
583	init_completion(&info->cmd_complete);
584
585	switch (command) {
586	case NAND_CMD_READOOB:
587		/* disable HW ECC to get all the OOB data */
588		info->buf_count = mtd->writesize + mtd->oobsize;
589		info->buf_start = mtd->writesize + column;
590		memset(info->data_buff, 0xFF, info->buf_count);
591
592		if (prepare_read_prog_cmd(info, cmdset->read1, column, page_addr))
593			break;
594
595		pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR);
596
597		/* We only are OOB, so if the data has error, does not matter */
598		if (info->retcode == ERR_DBERR)
599			info->retcode = ERR_NONE;
600		break;
601
602	case NAND_CMD_READ0:
603		info->use_ecc = 1;
604		info->retcode = ERR_NONE;
605		info->buf_start = column;
606		info->buf_count = mtd->writesize + mtd->oobsize;
607		memset(info->data_buff, 0xFF, info->buf_count);
608
609		if (prepare_read_prog_cmd(info, cmdset->read1, column, page_addr))
610			break;
611
612		pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR);
613
614		if (info->retcode == ERR_DBERR) {
615			/* for blank page (all 0xff), HW will calculate its ECC as
616			 * 0, which is different from the ECC information within
617			 * OOB, ignore such double bit errors
618			 */
619			if (is_buf_blank(info->data_buff, mtd->writesize))
620				info->retcode = ERR_NONE;
621		}
622		break;
623	case NAND_CMD_SEQIN:
624		info->buf_start = column;
625		info->buf_count = mtd->writesize + mtd->oobsize;
626		memset(info->data_buff, 0xff, info->buf_count);
627
628		/* save column/page_addr for next CMD_PAGEPROG */
629		info->seqin_column = column;
630		info->seqin_page_addr = page_addr;
631		break;
632	case NAND_CMD_PAGEPROG:
633		info->use_ecc = (info->seqin_column >= mtd->writesize) ? 0 : 1;
634
635		if (prepare_read_prog_cmd(info, cmdset->program,
636				info->seqin_column, info->seqin_page_addr))
637			break;
638
639		pxa3xx_nand_do_cmd(info, NDSR_WRDREQ);
640		break;
641	case NAND_CMD_ERASE1:
642		if (prepare_erase_cmd(info, cmdset->erase, page_addr))
643			break;
644
645		pxa3xx_nand_do_cmd(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
646		break;
647	case NAND_CMD_ERASE2:
648		break;
649	case NAND_CMD_READID:
650	case NAND_CMD_STATUS:
651		info->use_dma = 0;	/* force PIO read */
652		info->buf_start = 0;
653		info->buf_count = (command == NAND_CMD_READID) ?
654				info->read_id_bytes : 1;
655
656		if (prepare_other_cmd(info, (command == NAND_CMD_READID) ?
657				cmdset->read_id : cmdset->read_status))
658			break;
659
660		pxa3xx_nand_do_cmd(info, NDSR_RDDREQ);
661		break;
662	case NAND_CMD_RESET:
663		if (prepare_other_cmd(info, cmdset->reset))
664			break;
665
666		ret = pxa3xx_nand_do_cmd(info, NDSR_CS0_CMDD);
667		if (ret == 0) {
668			int timeout = 2;
669			uint32_t ndcr;
670
671			while (timeout--) {
672				if (nand_readl(info, NDSR) & NDSR_RDY)
673					break;
674				msleep(10);
675			}
676
677			ndcr = nand_readl(info, NDCR);
678			nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN);
679		}
680		break;
681	default:
682		printk(KERN_ERR "non-supported command.\n");
683		break;
684	}
685
686	if (info->retcode == ERR_DBERR) {
687		printk(KERN_ERR "double bit error @ page %08x\n", page_addr);
688		info->retcode = ERR_NONE;
689	}
690}
691
692static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
693{
694	struct pxa3xx_nand_info *info = mtd->priv;
695	char retval = 0xFF;
696
697	if (info->buf_start < info->buf_count)
698		/* Has just send a new command? */
699		retval = info->data_buff[info->buf_start++];
700
701	return retval;
702}
703
704static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
705{
706	struct pxa3xx_nand_info *info = mtd->priv;
707	u16 retval = 0xFFFF;
708
709	if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
710		retval = *((u16 *)(info->data_buff+info->buf_start));
711		info->buf_start += 2;
712	}
713	return retval;
714}
715
716static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
717{
718	struct pxa3xx_nand_info *info = mtd->priv;
719	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
720
721	memcpy(buf, info->data_buff + info->buf_start, real_len);
722	info->buf_start += real_len;
723}
724
725static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
726		const uint8_t *buf, int len)
727{
728	struct pxa3xx_nand_info *info = mtd->priv;
729	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
730
731	memcpy(info->data_buff + info->buf_start, buf, real_len);
732	info->buf_start += real_len;
733}
734
735static int pxa3xx_nand_verify_buf(struct mtd_info *mtd,
736		const uint8_t *buf, int len)
737{
738	return 0;
739}
740
741static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
742{
743	return;
744}
745
746static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
747{
748	struct pxa3xx_nand_info *info = mtd->priv;
749
750	/* pxa3xx_nand_send_command has waited for command complete */
751	if (this->state == FL_WRITING || this->state == FL_ERASING) {
752		if (info->retcode == ERR_NONE)
753			return 0;
754		else {
755			/*
756			 * any error make it return 0x01 which will tell
757			 * the caller the erase and write fail
758			 */
759			return 0x01;
760		}
761	}
762
763	return 0;
764}
765
766static void pxa3xx_nand_ecc_hwctl(struct mtd_info *mtd, int mode)
767{
768	return;
769}
770
771static int pxa3xx_nand_ecc_calculate(struct mtd_info *mtd,
772		const uint8_t *dat, uint8_t *ecc_code)
773{
774	return 0;
775}
776
777static int pxa3xx_nand_ecc_correct(struct mtd_info *mtd,
778		uint8_t *dat, uint8_t *read_ecc, uint8_t *calc_ecc)
779{
780	struct pxa3xx_nand_info *info = mtd->priv;
781	/*
782	 * Any error include ERR_SEND_CMD, ERR_DBERR, ERR_BUSERR, we
783	 * consider it as a ecc error which will tell the caller the
784	 * read fail We have distinguish all the errors, but the
785	 * nand_read_ecc only check this function return value
786	 *
787	 * Corrected (single-bit) errors must also be noted.
788	 */
789	if (info->retcode == ERR_SBERR)
790		return 1;
791	else if (info->retcode != ERR_NONE)
792		return -1;
793
794	return 0;
795}
796
797static int __readid(struct pxa3xx_nand_info *info, uint32_t *id)
798{
799	const struct pxa3xx_nand_flash *f = info->flash_info;
800	const struct pxa3xx_nand_cmdset *cmdset = f->cmdset;
801	uint32_t ndcr;
802	uint8_t  id_buff[8];
803
804	if (prepare_other_cmd(info, cmdset->read_id)) {
805		printk(KERN_ERR "failed to prepare command\n");
806		return -EINVAL;
807	}
808
809	/* Send command */
810	if (write_cmd(info))
811		goto fail_timeout;
812
813	/* Wait for CMDDM(command done successfully) */
814	if (wait_for_event(info, NDSR_RDDREQ))
815		goto fail_timeout;
816
817	__raw_readsl(info->mmio_base + NDDB, id_buff, 2);
818	*id = id_buff[0] | (id_buff[1] << 8);
819	return 0;
820
821fail_timeout:
822	ndcr = nand_readl(info, NDCR);
823	nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN);
824	udelay(10);
825	return -ETIMEDOUT;
826}
827
828static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
829				    const struct pxa3xx_nand_flash *f)
830{
831	struct platform_device *pdev = info->pdev;
832	struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
833	uint32_t ndcr = 0x00000FFF; /* disable all interrupts */
834
835	if (f->page_size != 2048 && f->page_size != 512)
836		return -EINVAL;
837
838	if (f->flash_width != 16 && f->flash_width != 8)
839		return -EINVAL;
840
841	/* calculate flash information */
842	info->oob_size = (f->page_size == 2048) ? 64 : 16;
843	info->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
844
845	/* calculate addressing information */
846	info->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
847
848	if (f->num_blocks * f->page_per_block > 65536)
849		info->row_addr_cycles = 3;
850	else
851		info->row_addr_cycles = 2;
852
853	ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
854	ndcr |= (info->col_addr_cycles == 2) ? NDCR_RA_START : 0;
855	ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
856	ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
857	ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
858	ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
859
860	ndcr |= NDCR_RD_ID_CNT(info->read_id_bytes);
861	ndcr |= NDCR_SPARE_EN; /* enable spare by default */
862
863	info->reg_ndcr = ndcr;
864
865	pxa3xx_nand_set_timing(info, f->timing);
866	info->flash_info = f;
867	return 0;
868}
869
870static void pxa3xx_nand_detect_timing(struct pxa3xx_nand_info *info,
871				      struct pxa3xx_nand_timing *t)
872{
873	unsigned long nand_clk = clk_get_rate(info->clk);
874	uint32_t ndtr0 = nand_readl(info, NDTR0CS0);
875	uint32_t ndtr1 = nand_readl(info, NDTR1CS0);
876
877	t->tCH = cycle2ns(tCH_NDTR0(ndtr0), nand_clk);
878	t->tCS = cycle2ns(tCS_NDTR0(ndtr0), nand_clk);
879	t->tWH = cycle2ns(tWH_NDTR0(ndtr0), nand_clk);
880	t->tWP = cycle2ns(tWP_NDTR0(ndtr0), nand_clk);
881	t->tRH = cycle2ns(tRH_NDTR0(ndtr0), nand_clk);
882	t->tRP = cycle2ns(tRP_NDTR0(ndtr0), nand_clk);
883
884	t->tR = cycle2ns(tR_NDTR1(ndtr1), nand_clk);
885	t->tWHR = cycle2ns(tWHR_NDTR1(ndtr1), nand_clk);
886	t->tAR = cycle2ns(tAR_NDTR1(ndtr1), nand_clk);
887}
888
889static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
890{
891	uint32_t ndcr = nand_readl(info, NDCR);
892	struct nand_flash_dev *type = NULL;
893	uint32_t id = -1;
894	int i;
895
896	default_flash.page_per_block = ndcr & NDCR_PG_PER_BLK ? 64 : 32;
897	default_flash.page_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
898	default_flash.flash_width = ndcr & NDCR_DWIDTH_M ? 16 : 8;
899	default_flash.dfc_width = ndcr & NDCR_DWIDTH_C ? 16 : 8;
900
901	/* set info fields needed to __readid */
902	info->flash_info = &default_flash;
903	info->read_id_bytes = (default_flash.page_size == 2048) ? 4 : 2;
904	info->reg_ndcr = ndcr;
905
906	if (__readid(info, &id))
907		return -ENODEV;
908
909	/* Lookup the flash id */
910	id = (id >> 8) & 0xff;		/* device id is byte 2 */
911	for (i = 0; nand_flash_ids[i].name != NULL; i++) {
912		if (id == nand_flash_ids[i].id) {
913			type =  &nand_flash_ids[i];
914			break;
915		}
916	}
917
918	if (!type)
919		return -ENODEV;
920
921	/* fill the missing flash information */
922	i = __ffs(default_flash.page_per_block * default_flash.page_size);
923	default_flash.num_blocks = type->chipsize << (20 - i);
924
925	info->oob_size = (default_flash.page_size == 2048) ? 64 : 16;
926
927	/* calculate addressing information */
928	info->col_addr_cycles = (default_flash.page_size == 2048) ? 2 : 1;
929
930	if (default_flash.num_blocks * default_flash.page_per_block > 65536)
931		info->row_addr_cycles = 3;
932	else
933		info->row_addr_cycles = 2;
934
935	pxa3xx_nand_detect_timing(info, &default_timing);
936	default_flash.timing = &default_timing;
937	default_flash.cmdset = &default_cmdset;
938
939	return 0;
940}
941
942static int pxa3xx_nand_detect_flash(struct pxa3xx_nand_info *info,
943				    const struct pxa3xx_nand_platform_data *pdata)
944{
945	const struct pxa3xx_nand_flash *f;
946	uint32_t id = -1;
947	int i;
948
949	if (pdata->keep_config)
950		if (pxa3xx_nand_detect_config(info) == 0)
951			return 0;
952
953	/* we use default timing to detect id */
954	f = DEFAULT_FLASH_TYPE;
955	pxa3xx_nand_config_flash(info, f);
956	if (__readid(info, &id))
957		goto fail_detect;
958
959	for (i=0; i<ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1; i++) {
960		/* we first choose the flash definition from platfrom */
961		if (i < pdata->num_flash)
962			f = pdata->flash + i;
963		else
964			f = &builtin_flash_types[i - pdata->num_flash + 1];
965		if (f->chip_id == id) {
966			dev_info(&info->pdev->dev, "detect chip id: 0x%x\n", id);
967			pxa3xx_nand_config_flash(info, f);
968			return 0;
969		}
970	}
971
972	dev_warn(&info->pdev->dev,
973		 "failed to detect configured nand flash; found %04x instead of\n",
974		 id);
975fail_detect:
976	return -ENODEV;
977}
978
979/* the maximum possible buffer size for large page with OOB data
980 * is: 2048 + 64 = 2112 bytes, allocate a page here for both the
981 * data buffer and the DMA descriptor
982 */
983#define MAX_BUFF_SIZE	PAGE_SIZE
984
985static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
986{
987	struct platform_device *pdev = info->pdev;
988	int data_desc_offset = MAX_BUFF_SIZE - sizeof(struct pxa_dma_desc);
989
990	if (use_dma == 0) {
991		info->data_buff = kmalloc(MAX_BUFF_SIZE, GFP_KERNEL);
992		if (info->data_buff == NULL)
993			return -ENOMEM;
994		return 0;
995	}
996
997	info->data_buff = dma_alloc_coherent(&pdev->dev, MAX_BUFF_SIZE,
998				&info->data_buff_phys, GFP_KERNEL);
999	if (info->data_buff == NULL) {
1000		dev_err(&pdev->dev, "failed to allocate dma buffer\n");
1001		return -ENOMEM;
1002	}
1003
1004	info->data_buff_size = MAX_BUFF_SIZE;
1005	info->data_desc = (void *)info->data_buff + data_desc_offset;
1006	info->data_desc_addr = info->data_buff_phys + data_desc_offset;
1007
1008	info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW,
1009				pxa3xx_nand_data_dma_irq, info);
1010	if (info->data_dma_ch < 0) {
1011		dev_err(&pdev->dev, "failed to request data dma\n");
1012		dma_free_coherent(&pdev->dev, info->data_buff_size,
1013				info->data_buff, info->data_buff_phys);
1014		return info->data_dma_ch;
1015	}
1016
1017	return 0;
1018}
1019
1020static struct nand_ecclayout hw_smallpage_ecclayout = {
1021	.eccbytes = 6,
1022	.eccpos = {8, 9, 10, 11, 12, 13 },
1023	.oobfree = { {2, 6} }
1024};
1025
1026static struct nand_ecclayout hw_largepage_ecclayout = {
1027	.eccbytes = 24,
1028	.eccpos = {
1029		40, 41, 42, 43, 44, 45, 46, 47,
1030		48, 49, 50, 51, 52, 53, 54, 55,
1031		56, 57, 58, 59, 60, 61, 62, 63},
1032	.oobfree = { {2, 38} }
1033};
1034
1035static void pxa3xx_nand_init_mtd(struct mtd_info *mtd,
1036				 struct pxa3xx_nand_info *info)
1037{
1038	const struct pxa3xx_nand_flash *f = info->flash_info;
1039	struct nand_chip *this = &info->nand_chip;
1040
1041	this->options = (f->flash_width == 16) ? NAND_BUSWIDTH_16: 0;
1042
1043	this->waitfunc		= pxa3xx_nand_waitfunc;
1044	this->select_chip	= pxa3xx_nand_select_chip;
1045	this->dev_ready		= pxa3xx_nand_dev_ready;
1046	this->cmdfunc		= pxa3xx_nand_cmdfunc;
1047	this->read_word		= pxa3xx_nand_read_word;
1048	this->read_byte		= pxa3xx_nand_read_byte;
1049	this->read_buf		= pxa3xx_nand_read_buf;
1050	this->write_buf		= pxa3xx_nand_write_buf;
1051	this->verify_buf	= pxa3xx_nand_verify_buf;
1052
1053	this->ecc.mode		= NAND_ECC_HW;
1054	this->ecc.hwctl		= pxa3xx_nand_ecc_hwctl;
1055	this->ecc.calculate	= pxa3xx_nand_ecc_calculate;
1056	this->ecc.correct	= pxa3xx_nand_ecc_correct;
1057	this->ecc.size		= f->page_size;
1058
1059	if (f->page_size == 2048)
1060		this->ecc.layout = &hw_largepage_ecclayout;
1061	else
1062		this->ecc.layout = &hw_smallpage_ecclayout;
1063
1064	this->chip_delay = 25;
1065}
1066
1067static int pxa3xx_nand_probe(struct platform_device *pdev)
1068{
1069	struct pxa3xx_nand_platform_data *pdata;
1070	struct pxa3xx_nand_info *info;
1071	struct nand_chip *this;
1072	struct mtd_info *mtd;
1073	struct resource *r;
1074	int ret = 0, irq;
1075
1076	pdata = pdev->dev.platform_data;
1077
1078	if (!pdata) {
1079		dev_err(&pdev->dev, "no platform data defined\n");
1080		return -ENODEV;
1081	}
1082
1083	mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct pxa3xx_nand_info),
1084			GFP_KERNEL);
1085	if (!mtd) {
1086		dev_err(&pdev->dev, "failed to allocate memory\n");
1087		return -ENOMEM;
1088	}
1089
1090	info = (struct pxa3xx_nand_info *)(&mtd[1]);
1091	info->pdev = pdev;
1092
1093	this = &info->nand_chip;
1094	mtd->priv = info;
1095	mtd->owner = THIS_MODULE;
1096
1097	info->clk = clk_get(&pdev->dev, NULL);
1098	if (IS_ERR(info->clk)) {
1099		dev_err(&pdev->dev, "failed to get nand clock\n");
1100		ret = PTR_ERR(info->clk);
1101		goto fail_free_mtd;
1102	}
1103	clk_enable(info->clk);
1104
1105	r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1106	if (r == NULL) {
1107		dev_err(&pdev->dev, "no resource defined for data DMA\n");
1108		ret = -ENXIO;
1109		goto fail_put_clk;
1110	}
1111	info->drcmr_dat = r->start;
1112
1113	r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1114	if (r == NULL) {
1115		dev_err(&pdev->dev, "no resource defined for command DMA\n");
1116		ret = -ENXIO;
1117		goto fail_put_clk;
1118	}
1119	info->drcmr_cmd = r->start;
1120
1121	irq = platform_get_irq(pdev, 0);
1122	if (irq < 0) {
1123		dev_err(&pdev->dev, "no IRQ resource defined\n");
1124		ret = -ENXIO;
1125		goto fail_put_clk;
1126	}
1127
1128	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1129	if (r == NULL) {
1130		dev_err(&pdev->dev, "no IO memory resource defined\n");
1131		ret = -ENODEV;
1132		goto fail_put_clk;
1133	}
1134
1135	r = request_mem_region(r->start, resource_size(r), pdev->name);
1136	if (r == NULL) {
1137		dev_err(&pdev->dev, "failed to request memory resource\n");
1138		ret = -EBUSY;
1139		goto fail_put_clk;
1140	}
1141
1142	info->mmio_base = ioremap(r->start, resource_size(r));
1143	if (info->mmio_base == NULL) {
1144		dev_err(&pdev->dev, "ioremap() failed\n");
1145		ret = -ENODEV;
1146		goto fail_free_res;
1147	}
1148	info->mmio_phys = r->start;
1149
1150	ret = pxa3xx_nand_init_buff(info);
1151	if (ret)
1152		goto fail_free_io;
1153
1154	/* initialize all interrupts to be disabled */
1155	disable_int(info, NDSR_MASK);
1156
1157	ret = request_irq(irq, pxa3xx_nand_irq, IRQF_DISABLED,
1158			  pdev->name, info);
1159	if (ret < 0) {
1160		dev_err(&pdev->dev, "failed to request IRQ\n");
1161		goto fail_free_buf;
1162	}
1163
1164	ret = pxa3xx_nand_detect_flash(info, pdata);
1165	if (ret) {
1166		dev_err(&pdev->dev, "failed to detect flash\n");
1167		ret = -ENODEV;
1168		goto fail_free_irq;
1169	}
1170
1171	pxa3xx_nand_init_mtd(mtd, info);
1172
1173	platform_set_drvdata(pdev, mtd);
1174
1175	if (nand_scan(mtd, 1)) {
1176		dev_err(&pdev->dev, "failed to scan nand\n");
1177		ret = -ENXIO;
1178		goto fail_free_irq;
1179	}
1180
1181#ifdef CONFIG_MTD_PARTITIONS
1182	if (mtd_has_cmdlinepart()) {
1183		static const char *probes[] = { "cmdlinepart", NULL };
1184		struct mtd_partition *parts;
1185		int nr_parts;
1186
1187		nr_parts = parse_mtd_partitions(mtd, probes, &parts, 0);
1188
1189		if (nr_parts)
1190			return add_mtd_partitions(mtd, parts, nr_parts);
1191	}
1192
1193	return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
1194#else
1195	return 0;
1196#endif
1197
1198fail_free_irq:
1199	free_irq(irq, info);
1200fail_free_buf:
1201	if (use_dma) {
1202		pxa_free_dma(info->data_dma_ch);
1203		dma_free_coherent(&pdev->dev, info->data_buff_size,
1204			info->data_buff, info->data_buff_phys);
1205	} else
1206		kfree(info->data_buff);
1207fail_free_io:
1208	iounmap(info->mmio_base);
1209fail_free_res:
1210	release_mem_region(r->start, resource_size(r));
1211fail_put_clk:
1212	clk_disable(info->clk);
1213	clk_put(info->clk);
1214fail_free_mtd:
1215	kfree(mtd);
1216	return ret;
1217}
1218
1219static int pxa3xx_nand_remove(struct platform_device *pdev)
1220{
1221	struct mtd_info *mtd = platform_get_drvdata(pdev);
1222	struct pxa3xx_nand_info *info = mtd->priv;
1223	struct resource *r;
1224	int irq;
1225
1226	platform_set_drvdata(pdev, NULL);
1227
1228	del_mtd_device(mtd);
1229#ifdef CONFIG_MTD_PARTITIONS
1230	del_mtd_partitions(mtd);
1231#endif
1232	irq = platform_get_irq(pdev, 0);
1233	if (irq >= 0)
1234		free_irq(irq, info);
1235	if (use_dma) {
1236		pxa_free_dma(info->data_dma_ch);
1237		dma_free_writecombine(&pdev->dev, info->data_buff_size,
1238				info->data_buff, info->data_buff_phys);
1239	} else
1240		kfree(info->data_buff);
1241
1242	iounmap(info->mmio_base);
1243	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1244	release_mem_region(r->start, resource_size(r));
1245
1246	clk_disable(info->clk);
1247	clk_put(info->clk);
1248
1249	kfree(mtd);
1250	return 0;
1251}
1252
1253#ifdef CONFIG_PM
1254static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1255{
1256	struct mtd_info *mtd = (struct mtd_info *)platform_get_drvdata(pdev);
1257	struct pxa3xx_nand_info *info = mtd->priv;
1258
1259	if (info->state != STATE_READY) {
1260		dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1261		return -EAGAIN;
1262	}
1263
1264	return 0;
1265}
1266
1267static int pxa3xx_nand_resume(struct platform_device *pdev)
1268{
1269	struct mtd_info *mtd = (struct mtd_info *)platform_get_drvdata(pdev);
1270	struct pxa3xx_nand_info *info = mtd->priv;
1271
1272	clk_enable(info->clk);
1273
1274	return pxa3xx_nand_config_flash(info, info->flash_info);
1275}
1276#else
1277#define pxa3xx_nand_suspend	NULL
1278#define pxa3xx_nand_resume	NULL
1279#endif
1280
1281static struct platform_driver pxa3xx_nand_driver = {
1282	.driver = {
1283		.name	= "pxa3xx-nand",
1284	},
1285	.probe		= pxa3xx_nand_probe,
1286	.remove		= pxa3xx_nand_remove,
1287	.suspend	= pxa3xx_nand_suspend,
1288	.resume		= pxa3xx_nand_resume,
1289};
1290
1291static int __init pxa3xx_nand_init(void)
1292{
1293	return platform_driver_register(&pxa3xx_nand_driver);
1294}
1295module_init(pxa3xx_nand_init);
1296
1297static void __exit pxa3xx_nand_exit(void)
1298{
1299	platform_driver_unregister(&pxa3xx_nand_driver);
1300}
1301module_exit(pxa3xx_nand_exit);
1302
1303MODULE_LICENSE("GPL");
1304MODULE_DESCRIPTION("PXA3xx NAND controller driver");
1305