pxa3xx_nand.c revision 223cf6c3b517cf6ef040cafe45af89f3b8adba74
1/*
2 * drivers/mtd/nand/pxa3xx_nand.c
3 *
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/interrupt.h>
14#include <linux/platform_device.h>
15#include <linux/dma-mapping.h>
16#include <linux/delay.h>
17#include <linux/clk.h>
18#include <linux/mtd/mtd.h>
19#include <linux/mtd/nand.h>
20#include <linux/mtd/partitions.h>
21#include <linux/io.h>
22#include <linux/irq.h>
23
24#include <mach/dma.h>
25#include <mach/pxa3xx_nand.h>
26
27#define	CHIP_DELAY_TIMEOUT	(2 * HZ/10)
28
29/* registers and bit definitions */
30#define NDCR		(0x00) /* Control register */
31#define NDTR0CS0	(0x04) /* Timing Parameter 0 for CS0 */
32#define NDTR1CS0	(0x0C) /* Timing Parameter 1 for CS0 */
33#define NDSR		(0x14) /* Status Register */
34#define NDPCR		(0x18) /* Page Count Register */
35#define NDBDR0		(0x1C) /* Bad Block Register 0 */
36#define NDBDR1		(0x20) /* Bad Block Register 1 */
37#define NDDB		(0x40) /* Data Buffer */
38#define NDCB0		(0x48) /* Command Buffer0 */
39#define NDCB1		(0x4C) /* Command Buffer1 */
40#define NDCB2		(0x50) /* Command Buffer2 */
41
42#define NDCR_SPARE_EN		(0x1 << 31)
43#define NDCR_ECC_EN		(0x1 << 30)
44#define NDCR_DMA_EN		(0x1 << 29)
45#define NDCR_ND_RUN		(0x1 << 28)
46#define NDCR_DWIDTH_C		(0x1 << 27)
47#define NDCR_DWIDTH_M		(0x1 << 26)
48#define NDCR_PAGE_SZ		(0x1 << 24)
49#define NDCR_NCSX		(0x1 << 23)
50#define NDCR_ND_MODE		(0x3 << 21)
51#define NDCR_NAND_MODE   	(0x0)
52#define NDCR_CLR_PG_CNT		(0x1 << 20)
53#define NDCR_CLR_ECC		(0x1 << 19)
54#define NDCR_RD_ID_CNT_MASK	(0x7 << 16)
55#define NDCR_RD_ID_CNT(x)	(((x) << 16) & NDCR_RD_ID_CNT_MASK)
56
57#define NDCR_RA_START		(0x1 << 15)
58#define NDCR_PG_PER_BLK		(0x1 << 14)
59#define NDCR_ND_ARB_EN		(0x1 << 12)
60
61#define NDSR_MASK		(0xfff)
62#define NDSR_RDY		(0x1 << 11)
63#define NDSR_CS0_PAGED		(0x1 << 10)
64#define NDSR_CS1_PAGED		(0x1 << 9)
65#define NDSR_CS0_CMDD		(0x1 << 8)
66#define NDSR_CS1_CMDD		(0x1 << 7)
67#define NDSR_CS0_BBD		(0x1 << 6)
68#define NDSR_CS1_BBD		(0x1 << 5)
69#define NDSR_DBERR		(0x1 << 4)
70#define NDSR_SBERR		(0x1 << 3)
71#define NDSR_WRDREQ		(0x1 << 2)
72#define NDSR_RDDREQ		(0x1 << 1)
73#define NDSR_WRCMDREQ		(0x1)
74
75#define NDCB0_AUTO_RS		(0x1 << 25)
76#define NDCB0_CSEL		(0x1 << 24)
77#define NDCB0_CMD_TYPE_MASK	(0x7 << 21)
78#define NDCB0_CMD_TYPE(x)	(((x) << 21) & NDCB0_CMD_TYPE_MASK)
79#define NDCB0_NC		(0x1 << 20)
80#define NDCB0_DBC		(0x1 << 19)
81#define NDCB0_ADDR_CYC_MASK	(0x7 << 16)
82#define NDCB0_ADDR_CYC(x)	(((x) << 16) & NDCB0_ADDR_CYC_MASK)
83#define NDCB0_CMD2_MASK		(0xff << 8)
84#define NDCB0_CMD1_MASK		(0xff)
85#define NDCB0_ADDR_CYC_SHIFT	(16)
86
87/* dma-able I/O address for the NAND data and commands */
88#define NDCB0_DMA_ADDR		(0x43100048)
89#define NDDB_DMA_ADDR		(0x43100040)
90
91/* macros for registers read/write */
92#define nand_writel(info, off, val)	\
93	__raw_writel((val), (info)->mmio_base + (off))
94
95#define nand_readl(info, off)		\
96	__raw_readl((info)->mmio_base + (off))
97
98/* error code and state */
99enum {
100	ERR_NONE	= 0,
101	ERR_DMABUSERR	= -1,
102	ERR_SENDCMD	= -2,
103	ERR_DBERR	= -3,
104	ERR_BBERR	= -4,
105	ERR_SBERR	= -5,
106};
107
108enum {
109	STATE_READY	= 0,
110	STATE_CMD_HANDLE,
111	STATE_DMA_READING,
112	STATE_DMA_WRITING,
113	STATE_DMA_DONE,
114	STATE_PIO_READING,
115	STATE_PIO_WRITING,
116};
117
118struct pxa3xx_nand_info {
119	struct nand_chip	nand_chip;
120
121	struct platform_device	 *pdev;
122	const struct pxa3xx_nand_flash *flash_info;
123
124	struct clk		*clk;
125	void __iomem		*mmio_base;
126
127	unsigned int 		buf_start;
128	unsigned int		buf_count;
129
130	/* DMA information */
131	int			drcmr_dat;
132	int			drcmr_cmd;
133
134	unsigned char		*data_buff;
135	dma_addr_t 		data_buff_phys;
136	size_t			data_buff_size;
137	int 			data_dma_ch;
138	struct pxa_dma_desc	*data_desc;
139	dma_addr_t 		data_desc_addr;
140
141	uint32_t		reg_ndcr;
142
143	/* saved column/page_addr during CMD_SEQIN */
144	int			seqin_column;
145	int			seqin_page_addr;
146
147	/* relate to the command */
148	unsigned int		state;
149
150	int			use_ecc;	/* use HW ECC ? */
151	int			use_dma;	/* use DMA ? */
152
153	size_t			data_size;	/* data size in FIFO */
154	int 			retcode;
155	struct completion 	cmd_complete;
156
157	/* generated NDCBx register values */
158	uint32_t		ndcb0;
159	uint32_t		ndcb1;
160	uint32_t		ndcb2;
161
162	/* calculated from pxa3xx_nand_flash data */
163	size_t		oob_size;
164	size_t		read_id_bytes;
165
166	unsigned int	col_addr_cycles;
167	unsigned int	row_addr_cycles;
168};
169
170static int use_dma = 1;
171module_param(use_dma, bool, 0444);
172MODULE_PARM_DESC(use_dma, "enable DMA for data transfering to/from NAND HW");
173
174/*
175 * Default NAND flash controller configuration setup by the
176 * bootloader. This configuration is used only when pdata->keep_config is set
177 */
178static struct pxa3xx_nand_timing default_timing;
179static struct pxa3xx_nand_flash default_flash;
180
181static struct pxa3xx_nand_cmdset smallpage_cmdset = {
182	.read1		= 0x0000,
183	.read2		= 0x0050,
184	.program	= 0x1080,
185	.read_status	= 0x0070,
186	.read_id	= 0x0090,
187	.erase		= 0xD060,
188	.reset		= 0x00FF,
189	.lock		= 0x002A,
190	.unlock		= 0x2423,
191	.lock_status	= 0x007A,
192};
193
194static struct pxa3xx_nand_cmdset largepage_cmdset = {
195	.read1		= 0x3000,
196	.read2		= 0x0050,
197	.program	= 0x1080,
198	.read_status	= 0x0070,
199	.read_id	= 0x0090,
200	.erase		= 0xD060,
201	.reset		= 0x00FF,
202	.lock		= 0x002A,
203	.unlock		= 0x2423,
204	.lock_status	= 0x007A,
205};
206
207#ifdef CONFIG_MTD_NAND_PXA3xx_BUILTIN
208static struct pxa3xx_nand_timing samsung512MbX16_timing = {
209	.tCH	= 10,
210	.tCS	= 0,
211	.tWH	= 20,
212	.tWP	= 40,
213	.tRH	= 30,
214	.tRP	= 40,
215	.tR	= 11123,
216	.tWHR	= 110,
217	.tAR	= 10,
218};
219
220static struct pxa3xx_nand_flash samsung512MbX16 = {
221	.timing		= &samsung512MbX16_timing,
222	.cmdset		= &smallpage_cmdset,
223	.page_per_block	= 32,
224	.page_size	= 512,
225	.flash_width	= 16,
226	.dfc_width	= 16,
227	.num_blocks	= 4096,
228	.chip_id	= 0x46ec,
229};
230
231static struct pxa3xx_nand_timing micron_timing = {
232	.tCH	= 10,
233	.tCS	= 25,
234	.tWH	= 15,
235	.tWP	= 25,
236	.tRH	= 15,
237	.tRP	= 25,
238	.tR	= 25000,
239	.tWHR	= 60,
240	.tAR	= 10,
241};
242
243static struct pxa3xx_nand_flash micron1GbX8 = {
244	.timing		= &micron_timing,
245	.cmdset		= &largepage_cmdset,
246	.page_per_block	= 64,
247	.page_size	= 2048,
248	.flash_width	= 8,
249	.dfc_width	= 8,
250	.num_blocks	= 1024,
251	.chip_id	= 0xa12c,
252};
253
254static struct pxa3xx_nand_flash micron1GbX16 = {
255	.timing		= &micron_timing,
256	.cmdset		= &largepage_cmdset,
257	.page_per_block	= 64,
258	.page_size	= 2048,
259	.flash_width	= 16,
260	.dfc_width	= 16,
261	.num_blocks	= 1024,
262	.chip_id	= 0xb12c,
263};
264
265static struct pxa3xx_nand_timing stm2GbX16_timing = {
266	.tCH = 10,
267	.tCS = 35,
268	.tWH = 15,
269	.tWP = 25,
270	.tRH = 15,
271	.tRP = 25,
272	.tR = 25000,
273	.tWHR = 60,
274	.tAR = 10,
275};
276
277static struct pxa3xx_nand_flash stm2GbX16 = {
278	.timing = &stm2GbX16_timing,
279	.cmdset	= &largepage_cmdset,
280	.page_per_block = 64,
281	.page_size = 2048,
282	.flash_width = 16,
283	.dfc_width = 16,
284	.num_blocks = 2048,
285	.chip_id = 0xba20,
286};
287
288static struct pxa3xx_nand_flash *builtin_flash_types[] = {
289	&samsung512MbX16,
290	&micron1GbX8,
291	&micron1GbX16,
292	&stm2GbX16,
293};
294#endif /* CONFIG_MTD_NAND_PXA3xx_BUILTIN */
295
296#define NDTR0_tCH(c)	(min((c), 7) << 19)
297#define NDTR0_tCS(c)	(min((c), 7) << 16)
298#define NDTR0_tWH(c)	(min((c), 7) << 11)
299#define NDTR0_tWP(c)	(min((c), 7) << 8)
300#define NDTR0_tRH(c)	(min((c), 7) << 3)
301#define NDTR0_tRP(c)	(min((c), 7) << 0)
302
303#define NDTR1_tR(c)	(min((c), 65535) << 16)
304#define NDTR1_tWHR(c)	(min((c), 15) << 4)
305#define NDTR1_tAR(c)	(min((c), 15) << 0)
306
307#define tCH_NDTR0(r)	(((r) >> 19) & 0x7)
308#define tCS_NDTR0(r)	(((r) >> 16) & 0x7)
309#define tWH_NDTR0(r)	(((r) >> 11) & 0x7)
310#define tWP_NDTR0(r)	(((r) >> 8) & 0x7)
311#define tRH_NDTR0(r)	(((r) >> 3) & 0x7)
312#define tRP_NDTR0(r)	(((r) >> 0) & 0x7)
313
314#define tR_NDTR1(r)	(((r) >> 16) & 0xffff)
315#define tWHR_NDTR1(r)	(((r) >> 4) & 0xf)
316#define tAR_NDTR1(r)	(((r) >> 0) & 0xf)
317
318/* convert nano-seconds to nand flash controller clock cycles */
319#define ns2cycle(ns, clk)	(int)(((ns) * (clk / 1000000) / 1000) - 1)
320
321/* convert nand flash controller clock cycles to nano-seconds */
322#define cycle2ns(c, clk)	((((c) + 1) * 1000000 + clk / 500) / (clk / 1000))
323
324static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info,
325				   const struct pxa3xx_nand_timing *t)
326{
327	unsigned long nand_clk = clk_get_rate(info->clk);
328	uint32_t ndtr0, ndtr1;
329
330	ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
331		NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
332		NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
333		NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
334		NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
335		NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
336
337	ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
338		NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
339		NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
340
341	nand_writel(info, NDTR0CS0, ndtr0);
342	nand_writel(info, NDTR1CS0, ndtr1);
343}
344
345#define WAIT_EVENT_TIMEOUT	10
346
347static int wait_for_event(struct pxa3xx_nand_info *info, uint32_t event)
348{
349	int timeout = WAIT_EVENT_TIMEOUT;
350	uint32_t ndsr;
351
352	while (timeout--) {
353		ndsr = nand_readl(info, NDSR) & NDSR_MASK;
354		if (ndsr & event) {
355			nand_writel(info, NDSR, ndsr);
356			return 0;
357		}
358		udelay(10);
359	}
360
361	return -ETIMEDOUT;
362}
363
364static int prepare_read_prog_cmd(struct pxa3xx_nand_info *info,
365			uint16_t cmd, int column, int page_addr)
366{
367	const struct pxa3xx_nand_flash *f = info->flash_info;
368	const struct pxa3xx_nand_cmdset *cmdset = f->cmdset;
369
370	/* calculate data size */
371	switch (f->page_size) {
372	case 2048:
373		info->data_size = (info->use_ecc) ? 2088 : 2112;
374		break;
375	case 512:
376		info->data_size = (info->use_ecc) ? 520 : 528;
377		break;
378	default:
379		return -EINVAL;
380	}
381
382	/* generate values for NDCBx registers */
383	info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
384	info->ndcb1 = 0;
385	info->ndcb2 = 0;
386	info->ndcb0 |= NDCB0_ADDR_CYC(info->row_addr_cycles + info->col_addr_cycles);
387
388	if (info->col_addr_cycles == 2) {
389		/* large block, 2 cycles for column address
390		 * row address starts from 3rd cycle
391		 */
392		info->ndcb1 |= page_addr << 16;
393		if (info->row_addr_cycles == 3)
394			info->ndcb2 = (page_addr >> 16) & 0xff;
395	} else
396		/* small block, 1 cycles for column address
397		 * row address starts from 2nd cycle
398		 */
399		info->ndcb1 = page_addr << 8;
400
401	if (cmd == cmdset->program)
402		info->ndcb0 |= NDCB0_CMD_TYPE(1) | NDCB0_AUTO_RS;
403
404	return 0;
405}
406
407static int prepare_erase_cmd(struct pxa3xx_nand_info *info,
408			uint16_t cmd, int page_addr)
409{
410	info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
411	info->ndcb0 |= NDCB0_CMD_TYPE(2) | NDCB0_AUTO_RS | NDCB0_ADDR_CYC(3);
412	info->ndcb1 = page_addr;
413	info->ndcb2 = 0;
414	return 0;
415}
416
417static int prepare_other_cmd(struct pxa3xx_nand_info *info, uint16_t cmd)
418{
419	const struct pxa3xx_nand_cmdset *cmdset = info->flash_info->cmdset;
420
421	info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
422	info->ndcb1 = 0;
423	info->ndcb2 = 0;
424
425	if (cmd == cmdset->read_id) {
426		info->ndcb0 |= NDCB0_CMD_TYPE(3);
427		info->data_size = 8;
428	} else if (cmd == cmdset->read_status) {
429		info->ndcb0 |= NDCB0_CMD_TYPE(4);
430		info->data_size = 8;
431	} else if (cmd == cmdset->reset || cmd == cmdset->lock ||
432		   cmd == cmdset->unlock) {
433		info->ndcb0 |= NDCB0_CMD_TYPE(5);
434	} else
435		return -EINVAL;
436
437	return 0;
438}
439
440static void enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
441{
442	uint32_t ndcr;
443
444	ndcr = nand_readl(info, NDCR);
445	nand_writel(info, NDCR, ndcr & ~int_mask);
446}
447
448static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
449{
450	uint32_t ndcr;
451
452	ndcr = nand_readl(info, NDCR);
453	nand_writel(info, NDCR, ndcr | int_mask);
454}
455
456/* NOTE: it is a must to set ND_RUN firstly, then write command buffer
457 * otherwise, it does not work
458 */
459static int write_cmd(struct pxa3xx_nand_info *info)
460{
461	uint32_t ndcr;
462
463	/* clear status bits and run */
464	nand_writel(info, NDSR, NDSR_MASK);
465
466	ndcr = info->reg_ndcr;
467
468	ndcr |= info->use_ecc ? NDCR_ECC_EN : 0;
469	ndcr |= info->use_dma ? NDCR_DMA_EN : 0;
470	ndcr |= NDCR_ND_RUN;
471
472	nand_writel(info, NDCR, ndcr);
473
474	if (wait_for_event(info, NDSR_WRCMDREQ)) {
475		printk(KERN_ERR "timed out writing command\n");
476		return -ETIMEDOUT;
477	}
478
479	nand_writel(info, NDCB0, info->ndcb0);
480	nand_writel(info, NDCB0, info->ndcb1);
481	nand_writel(info, NDCB0, info->ndcb2);
482	return 0;
483}
484
485static int handle_data_pio(struct pxa3xx_nand_info *info)
486{
487	int ret, timeout = CHIP_DELAY_TIMEOUT;
488
489	switch (info->state) {
490	case STATE_PIO_WRITING:
491		__raw_writesl(info->mmio_base + NDDB, info->data_buff,
492				info->data_size << 2);
493
494		enable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
495
496		ret = wait_for_completion_timeout(&info->cmd_complete, timeout);
497		if (!ret) {
498			printk(KERN_ERR "program command time out\n");
499			return -1;
500		}
501		break;
502	case STATE_PIO_READING:
503		__raw_readsl(info->mmio_base + NDDB, info->data_buff,
504				info->data_size << 2);
505		break;
506	default:
507		printk(KERN_ERR "%s: invalid state %d\n", __func__,
508				info->state);
509		return -EINVAL;
510	}
511
512	info->state = STATE_READY;
513	return 0;
514}
515
516static void start_data_dma(struct pxa3xx_nand_info *info, int dir_out)
517{
518	struct pxa_dma_desc *desc = info->data_desc;
519	int dma_len = ALIGN(info->data_size, 32);
520
521	desc->ddadr = DDADR_STOP;
522	desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
523
524	if (dir_out) {
525		desc->dsadr = info->data_buff_phys;
526		desc->dtadr = NDDB_DMA_ADDR;
527		desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
528	} else {
529		desc->dtadr = info->data_buff_phys;
530		desc->dsadr = NDDB_DMA_ADDR;
531		desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
532	}
533
534	DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch;
535	DDADR(info->data_dma_ch) = info->data_desc_addr;
536	DCSR(info->data_dma_ch) |= DCSR_RUN;
537}
538
539static void pxa3xx_nand_data_dma_irq(int channel, void *data)
540{
541	struct pxa3xx_nand_info *info = data;
542	uint32_t dcsr;
543
544	dcsr = DCSR(channel);
545	DCSR(channel) = dcsr;
546
547	if (dcsr & DCSR_BUSERR) {
548		info->retcode = ERR_DMABUSERR;
549		complete(&info->cmd_complete);
550	}
551
552	if (info->state == STATE_DMA_WRITING) {
553		info->state = STATE_DMA_DONE;
554		enable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
555	} else {
556		info->state = STATE_READY;
557		complete(&info->cmd_complete);
558	}
559}
560
561static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
562{
563	struct pxa3xx_nand_info *info = devid;
564	unsigned int status;
565
566	status = nand_readl(info, NDSR);
567
568	if (status & (NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR)) {
569		if (status & NDSR_DBERR)
570			info->retcode = ERR_DBERR;
571		else if (status & NDSR_SBERR)
572			info->retcode = ERR_SBERR;
573
574		disable_int(info, NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR);
575
576		if (info->use_dma) {
577			info->state = STATE_DMA_READING;
578			start_data_dma(info, 0);
579		} else {
580			info->state = STATE_PIO_READING;
581			complete(&info->cmd_complete);
582		}
583	} else if (status & NDSR_WRDREQ) {
584		disable_int(info, NDSR_WRDREQ);
585		if (info->use_dma) {
586			info->state = STATE_DMA_WRITING;
587			start_data_dma(info, 1);
588		} else {
589			info->state = STATE_PIO_WRITING;
590			complete(&info->cmd_complete);
591		}
592	} else if (status & (NDSR_CS0_BBD | NDSR_CS0_CMDD)) {
593		if (status & NDSR_CS0_BBD)
594			info->retcode = ERR_BBERR;
595
596		disable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
597		info->state = STATE_READY;
598		complete(&info->cmd_complete);
599	}
600	nand_writel(info, NDSR, status);
601	return IRQ_HANDLED;
602}
603
604static int pxa3xx_nand_do_cmd(struct pxa3xx_nand_info *info, uint32_t event)
605{
606	uint32_t ndcr;
607	int ret, timeout = CHIP_DELAY_TIMEOUT;
608
609	if (write_cmd(info)) {
610		info->retcode = ERR_SENDCMD;
611		goto fail_stop;
612	}
613
614	info->state = STATE_CMD_HANDLE;
615
616	enable_int(info, event);
617
618	ret = wait_for_completion_timeout(&info->cmd_complete, timeout);
619	if (!ret) {
620		printk(KERN_ERR "command execution timed out\n");
621		info->retcode = ERR_SENDCMD;
622		goto fail_stop;
623	}
624
625	if (info->use_dma == 0 && info->data_size > 0)
626		if (handle_data_pio(info))
627			goto fail_stop;
628
629	return 0;
630
631fail_stop:
632	ndcr = nand_readl(info, NDCR);
633	nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN);
634	udelay(10);
635	return -ETIMEDOUT;
636}
637
638static int pxa3xx_nand_dev_ready(struct mtd_info *mtd)
639{
640	struct pxa3xx_nand_info *info = mtd->priv;
641	return (nand_readl(info, NDSR) & NDSR_RDY) ? 1 : 0;
642}
643
644static inline int is_buf_blank(uint8_t *buf, size_t len)
645{
646	for (; len > 0; len--)
647		if (*buf++ != 0xff)
648			return 0;
649	return 1;
650}
651
652static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
653				int column, int page_addr)
654{
655	struct pxa3xx_nand_info *info = mtd->priv;
656	const struct pxa3xx_nand_flash *flash_info = info->flash_info;
657	const struct pxa3xx_nand_cmdset *cmdset = flash_info->cmdset;
658	int ret;
659
660	info->use_dma = (use_dma) ? 1 : 0;
661	info->use_ecc = 0;
662	info->data_size = 0;
663	info->state = STATE_READY;
664
665	init_completion(&info->cmd_complete);
666
667	switch (command) {
668	case NAND_CMD_READOOB:
669		/* disable HW ECC to get all the OOB data */
670		info->buf_count = mtd->writesize + mtd->oobsize;
671		info->buf_start = mtd->writesize + column;
672
673		if (prepare_read_prog_cmd(info, cmdset->read1, column, page_addr))
674			break;
675
676		pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR);
677
678		/* We only are OOB, so if the data has error, does not matter */
679		if (info->retcode == ERR_DBERR)
680			info->retcode = ERR_NONE;
681		break;
682
683	case NAND_CMD_READ0:
684		info->use_ecc = 1;
685		info->retcode = ERR_NONE;
686		info->buf_start = column;
687		info->buf_count = mtd->writesize + mtd->oobsize;
688		memset(info->data_buff, 0xFF, info->buf_count);
689
690		if (prepare_read_prog_cmd(info, cmdset->read1, column, page_addr))
691			break;
692
693		pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR);
694
695		if (info->retcode == ERR_DBERR) {
696			/* for blank page (all 0xff), HW will calculate its ECC as
697			 * 0, which is different from the ECC information within
698			 * OOB, ignore such double bit errors
699			 */
700			if (is_buf_blank(info->data_buff, mtd->writesize))
701				info->retcode = ERR_NONE;
702		}
703		break;
704	case NAND_CMD_SEQIN:
705		info->buf_start = column;
706		info->buf_count = mtd->writesize + mtd->oobsize;
707		memset(info->data_buff, 0xff, info->buf_count);
708
709		/* save column/page_addr for next CMD_PAGEPROG */
710		info->seqin_column = column;
711		info->seqin_page_addr = page_addr;
712		break;
713	case NAND_CMD_PAGEPROG:
714		info->use_ecc = (info->seqin_column >= mtd->writesize) ? 0 : 1;
715
716		if (prepare_read_prog_cmd(info, cmdset->program,
717				info->seqin_column, info->seqin_page_addr))
718			break;
719
720		pxa3xx_nand_do_cmd(info, NDSR_WRDREQ);
721		break;
722	case NAND_CMD_ERASE1:
723		if (prepare_erase_cmd(info, cmdset->erase, page_addr))
724			break;
725
726		pxa3xx_nand_do_cmd(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
727		break;
728	case NAND_CMD_ERASE2:
729		break;
730	case NAND_CMD_READID:
731	case NAND_CMD_STATUS:
732		info->use_dma = 0;	/* force PIO read */
733		info->buf_start = 0;
734		info->buf_count = (command == NAND_CMD_READID) ?
735				info->read_id_bytes : 1;
736
737		if (prepare_other_cmd(info, (command == NAND_CMD_READID) ?
738				cmdset->read_id : cmdset->read_status))
739			break;
740
741		pxa3xx_nand_do_cmd(info, NDSR_RDDREQ);
742		break;
743	case NAND_CMD_RESET:
744		if (prepare_other_cmd(info, cmdset->reset))
745			break;
746
747		ret = pxa3xx_nand_do_cmd(info, NDSR_CS0_CMDD);
748		if (ret == 0) {
749			int timeout = 2;
750			uint32_t ndcr;
751
752			while (timeout--) {
753				if (nand_readl(info, NDSR) & NDSR_RDY)
754					break;
755				msleep(10);
756			}
757
758			ndcr = nand_readl(info, NDCR);
759			nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN);
760		}
761		break;
762	default:
763		printk(KERN_ERR "non-supported command.\n");
764		break;
765	}
766
767	if (info->retcode == ERR_DBERR) {
768		printk(KERN_ERR "double bit error @ page %08x\n", page_addr);
769		info->retcode = ERR_NONE;
770	}
771}
772
773static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
774{
775	struct pxa3xx_nand_info *info = mtd->priv;
776	char retval = 0xFF;
777
778	if (info->buf_start < info->buf_count)
779		/* Has just send a new command? */
780		retval = info->data_buff[info->buf_start++];
781
782	return retval;
783}
784
785static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
786{
787	struct pxa3xx_nand_info *info = mtd->priv;
788	u16 retval = 0xFFFF;
789
790	if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
791		retval = *((u16 *)(info->data_buff+info->buf_start));
792		info->buf_start += 2;
793	}
794	return retval;
795}
796
797static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
798{
799	struct pxa3xx_nand_info *info = mtd->priv;
800	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
801
802	memcpy(buf, info->data_buff + info->buf_start, real_len);
803	info->buf_start += real_len;
804}
805
806static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
807		const uint8_t *buf, int len)
808{
809	struct pxa3xx_nand_info *info = mtd->priv;
810	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
811
812	memcpy(info->data_buff + info->buf_start, buf, real_len);
813	info->buf_start += real_len;
814}
815
816static int pxa3xx_nand_verify_buf(struct mtd_info *mtd,
817		const uint8_t *buf, int len)
818{
819	return 0;
820}
821
822static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
823{
824	return;
825}
826
827static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
828{
829	struct pxa3xx_nand_info *info = mtd->priv;
830
831	/* pxa3xx_nand_send_command has waited for command complete */
832	if (this->state == FL_WRITING || this->state == FL_ERASING) {
833		if (info->retcode == ERR_NONE)
834			return 0;
835		else {
836			/*
837			 * any error make it return 0x01 which will tell
838			 * the caller the erase and write fail
839			 */
840			return 0x01;
841		}
842	}
843
844	return 0;
845}
846
847static void pxa3xx_nand_ecc_hwctl(struct mtd_info *mtd, int mode)
848{
849	return;
850}
851
852static int pxa3xx_nand_ecc_calculate(struct mtd_info *mtd,
853		const uint8_t *dat, uint8_t *ecc_code)
854{
855	return 0;
856}
857
858static int pxa3xx_nand_ecc_correct(struct mtd_info *mtd,
859		uint8_t *dat, uint8_t *read_ecc, uint8_t *calc_ecc)
860{
861	struct pxa3xx_nand_info *info = mtd->priv;
862	/*
863	 * Any error include ERR_SEND_CMD, ERR_DBERR, ERR_BUSERR, we
864	 * consider it as a ecc error which will tell the caller the
865	 * read fail We have distinguish all the errors, but the
866	 * nand_read_ecc only check this function return value
867	 *
868	 * Corrected (single-bit) errors must also be noted.
869	 */
870	if (info->retcode == ERR_SBERR)
871		return 1;
872	else if (info->retcode != ERR_NONE)
873		return -1;
874
875	return 0;
876}
877
878static int __readid(struct pxa3xx_nand_info *info, uint32_t *id)
879{
880	const struct pxa3xx_nand_flash *f = info->flash_info;
881	const struct pxa3xx_nand_cmdset *cmdset = f->cmdset;
882	uint32_t ndcr;
883	uint8_t  id_buff[8];
884
885	if (prepare_other_cmd(info, cmdset->read_id)) {
886		printk(KERN_ERR "failed to prepare command\n");
887		return -EINVAL;
888	}
889
890	/* Send command */
891	if (write_cmd(info))
892		goto fail_timeout;
893
894	/* Wait for CMDDM(command done successfully) */
895	if (wait_for_event(info, NDSR_RDDREQ))
896		goto fail_timeout;
897
898	__raw_readsl(info->mmio_base + NDDB, id_buff, 2);
899	*id = id_buff[0] | (id_buff[1] << 8);
900	return 0;
901
902fail_timeout:
903	ndcr = nand_readl(info, NDCR);
904	nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN);
905	udelay(10);
906	return -ETIMEDOUT;
907}
908
909static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
910				    const struct pxa3xx_nand_flash *f)
911{
912	struct platform_device *pdev = info->pdev;
913	struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
914	uint32_t ndcr = 0x00000FFF; /* disable all interrupts */
915
916	if (f->page_size != 2048 && f->page_size != 512)
917		return -EINVAL;
918
919	if (f->flash_width != 16 && f->flash_width != 8)
920		return -EINVAL;
921
922	/* calculate flash information */
923	info->oob_size = (f->page_size == 2048) ? 64 : 16;
924	info->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
925
926	/* calculate addressing information */
927	info->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
928
929	if (f->num_blocks * f->page_per_block > 65536)
930		info->row_addr_cycles = 3;
931	else
932		info->row_addr_cycles = 2;
933
934	ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
935	ndcr |= (info->col_addr_cycles == 2) ? NDCR_RA_START : 0;
936	ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
937	ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
938	ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
939	ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
940
941	ndcr |= NDCR_RD_ID_CNT(info->read_id_bytes);
942	ndcr |= NDCR_SPARE_EN; /* enable spare by default */
943
944	info->reg_ndcr = ndcr;
945
946	pxa3xx_nand_set_timing(info, f->timing);
947	info->flash_info = f;
948	return 0;
949}
950
951static void pxa3xx_nand_detect_timing(struct pxa3xx_nand_info *info,
952				      struct pxa3xx_nand_timing *t)
953{
954	unsigned long nand_clk = clk_get_rate(info->clk);
955	uint32_t ndtr0 = nand_readl(info, NDTR0CS0);
956	uint32_t ndtr1 = nand_readl(info, NDTR1CS0);
957
958	t->tCH = cycle2ns(tCH_NDTR0(ndtr0), nand_clk);
959	t->tCS = cycle2ns(tCS_NDTR0(ndtr0), nand_clk);
960	t->tWH = cycle2ns(tWH_NDTR0(ndtr0), nand_clk);
961	t->tWP = cycle2ns(tWP_NDTR0(ndtr0), nand_clk);
962	t->tRH = cycle2ns(tRH_NDTR0(ndtr0), nand_clk);
963	t->tRP = cycle2ns(tRP_NDTR0(ndtr0), nand_clk);
964
965	t->tR = cycle2ns(tR_NDTR1(ndtr1), nand_clk);
966	t->tWHR = cycle2ns(tWHR_NDTR1(ndtr1), nand_clk);
967	t->tAR = cycle2ns(tAR_NDTR1(ndtr1), nand_clk);
968}
969
970static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
971{
972	uint32_t ndcr = nand_readl(info, NDCR);
973	struct nand_flash_dev *type = NULL;
974	uint32_t id = -1;
975	int i;
976
977	default_flash.page_per_block = ndcr & NDCR_PG_PER_BLK ? 64 : 32;
978	default_flash.page_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
979	default_flash.flash_width = ndcr & NDCR_DWIDTH_M ? 16 : 8;
980	default_flash.dfc_width = ndcr & NDCR_DWIDTH_C ? 16 : 8;
981
982	if (default_flash.page_size == 2048)
983		default_flash.cmdset = &largepage_cmdset;
984	else
985		default_flash.cmdset = &smallpage_cmdset;
986
987	/* set info fields needed to __readid */
988	info->flash_info = &default_flash;
989	info->read_id_bytes = (default_flash.page_size == 2048) ? 4 : 2;
990	info->reg_ndcr = ndcr;
991
992	if (__readid(info, &id))
993		return -ENODEV;
994
995	/* Lookup the flash id */
996	id = (id >> 8) & 0xff;		/* device id is byte 2 */
997	for (i = 0; nand_flash_ids[i].name != NULL; i++) {
998		if (id == nand_flash_ids[i].id) {
999			type =  &nand_flash_ids[i];
1000			break;
1001		}
1002	}
1003
1004	if (!type)
1005		return -ENODEV;
1006
1007	/* fill the missing flash information */
1008	i = __ffs(default_flash.page_per_block * default_flash.page_size);
1009	default_flash.num_blocks = type->chipsize << (20 - i);
1010
1011	info->oob_size = (default_flash.page_size == 2048) ? 64 : 16;
1012
1013	/* calculate addressing information */
1014	info->col_addr_cycles = (default_flash.page_size == 2048) ? 2 : 1;
1015
1016	if (default_flash.num_blocks * default_flash.page_per_block > 65536)
1017		info->row_addr_cycles = 3;
1018	else
1019		info->row_addr_cycles = 2;
1020
1021	pxa3xx_nand_detect_timing(info, &default_timing);
1022	default_flash.timing = &default_timing;
1023
1024	return 0;
1025}
1026
1027static int pxa3xx_nand_detect_flash(struct pxa3xx_nand_info *info,
1028				    const struct pxa3xx_nand_platform_data *pdata)
1029{
1030	const struct pxa3xx_nand_flash *f;
1031	uint32_t id = -1;
1032	int i;
1033
1034	if (pdata->keep_config)
1035		if (pxa3xx_nand_detect_config(info) == 0)
1036			return 0;
1037
1038	for (i = 0; i<pdata->num_flash; ++i) {
1039		f = pdata->flash + i;
1040
1041		if (pxa3xx_nand_config_flash(info, f))
1042			continue;
1043
1044		if (__readid(info, &id))
1045			continue;
1046
1047		if (id == f->chip_id)
1048			return 0;
1049	}
1050
1051#ifdef CONFIG_MTD_NAND_PXA3xx_BUILTIN
1052	for (i = 0; i < ARRAY_SIZE(builtin_flash_types); i++) {
1053
1054		f = builtin_flash_types[i];
1055
1056		if (pxa3xx_nand_config_flash(info, f))
1057			continue;
1058
1059		if (__readid(info, &id))
1060			continue;
1061
1062		if (id == f->chip_id)
1063			return 0;
1064	}
1065#endif
1066
1067	dev_warn(&info->pdev->dev,
1068		 "failed to detect configured nand flash; found %04x instead of\n",
1069		 id);
1070	return -ENODEV;
1071}
1072
1073/* the maximum possible buffer size for large page with OOB data
1074 * is: 2048 + 64 = 2112 bytes, allocate a page here for both the
1075 * data buffer and the DMA descriptor
1076 */
1077#define MAX_BUFF_SIZE	PAGE_SIZE
1078
1079static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1080{
1081	struct platform_device *pdev = info->pdev;
1082	int data_desc_offset = MAX_BUFF_SIZE - sizeof(struct pxa_dma_desc);
1083
1084	if (use_dma == 0) {
1085		info->data_buff = kmalloc(MAX_BUFF_SIZE, GFP_KERNEL);
1086		if (info->data_buff == NULL)
1087			return -ENOMEM;
1088		return 0;
1089	}
1090
1091	info->data_buff = dma_alloc_coherent(&pdev->dev, MAX_BUFF_SIZE,
1092				&info->data_buff_phys, GFP_KERNEL);
1093	if (info->data_buff == NULL) {
1094		dev_err(&pdev->dev, "failed to allocate dma buffer\n");
1095		return -ENOMEM;
1096	}
1097
1098	info->data_buff_size = MAX_BUFF_SIZE;
1099	info->data_desc = (void *)info->data_buff + data_desc_offset;
1100	info->data_desc_addr = info->data_buff_phys + data_desc_offset;
1101
1102	info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW,
1103				pxa3xx_nand_data_dma_irq, info);
1104	if (info->data_dma_ch < 0) {
1105		dev_err(&pdev->dev, "failed to request data dma\n");
1106		dma_free_coherent(&pdev->dev, info->data_buff_size,
1107				info->data_buff, info->data_buff_phys);
1108		return info->data_dma_ch;
1109	}
1110
1111	return 0;
1112}
1113
1114static struct nand_ecclayout hw_smallpage_ecclayout = {
1115	.eccbytes = 6,
1116	.eccpos = {8, 9, 10, 11, 12, 13 },
1117	.oobfree = { {2, 6} }
1118};
1119
1120static struct nand_ecclayout hw_largepage_ecclayout = {
1121	.eccbytes = 24,
1122	.eccpos = {
1123		40, 41, 42, 43, 44, 45, 46, 47,
1124		48, 49, 50, 51, 52, 53, 54, 55,
1125		56, 57, 58, 59, 60, 61, 62, 63},
1126	.oobfree = { {2, 38} }
1127};
1128
1129static void pxa3xx_nand_init_mtd(struct mtd_info *mtd,
1130				 struct pxa3xx_nand_info *info)
1131{
1132	const struct pxa3xx_nand_flash *f = info->flash_info;
1133	struct nand_chip *this = &info->nand_chip;
1134
1135	this->options = (f->flash_width == 16) ? NAND_BUSWIDTH_16: 0;
1136
1137	this->waitfunc		= pxa3xx_nand_waitfunc;
1138	this->select_chip	= pxa3xx_nand_select_chip;
1139	this->dev_ready		= pxa3xx_nand_dev_ready;
1140	this->cmdfunc		= pxa3xx_nand_cmdfunc;
1141	this->read_word		= pxa3xx_nand_read_word;
1142	this->read_byte		= pxa3xx_nand_read_byte;
1143	this->read_buf		= pxa3xx_nand_read_buf;
1144	this->write_buf		= pxa3xx_nand_write_buf;
1145	this->verify_buf	= pxa3xx_nand_verify_buf;
1146
1147	this->ecc.mode		= NAND_ECC_HW;
1148	this->ecc.hwctl		= pxa3xx_nand_ecc_hwctl;
1149	this->ecc.calculate	= pxa3xx_nand_ecc_calculate;
1150	this->ecc.correct	= pxa3xx_nand_ecc_correct;
1151	this->ecc.size		= f->page_size;
1152
1153	if (f->page_size == 2048)
1154		this->ecc.layout = &hw_largepage_ecclayout;
1155	else
1156		this->ecc.layout = &hw_smallpage_ecclayout;
1157
1158	this->chip_delay = 25;
1159}
1160
1161static int pxa3xx_nand_probe(struct platform_device *pdev)
1162{
1163	struct pxa3xx_nand_platform_data *pdata;
1164	struct pxa3xx_nand_info *info;
1165	struct nand_chip *this;
1166	struct mtd_info *mtd;
1167	struct resource *r;
1168	int ret = 0, irq;
1169
1170	pdata = pdev->dev.platform_data;
1171
1172	if (!pdata) {
1173		dev_err(&pdev->dev, "no platform data defined\n");
1174		return -ENODEV;
1175	}
1176
1177	mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct pxa3xx_nand_info),
1178			GFP_KERNEL);
1179	if (!mtd) {
1180		dev_err(&pdev->dev, "failed to allocate memory\n");
1181		return -ENOMEM;
1182	}
1183
1184	info = (struct pxa3xx_nand_info *)(&mtd[1]);
1185	info->pdev = pdev;
1186
1187	this = &info->nand_chip;
1188	mtd->priv = info;
1189	mtd->owner = THIS_MODULE;
1190
1191	info->clk = clk_get(&pdev->dev, NULL);
1192	if (IS_ERR(info->clk)) {
1193		dev_err(&pdev->dev, "failed to get nand clock\n");
1194		ret = PTR_ERR(info->clk);
1195		goto fail_free_mtd;
1196	}
1197	clk_enable(info->clk);
1198
1199	r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1200	if (r == NULL) {
1201		dev_err(&pdev->dev, "no resource defined for data DMA\n");
1202		ret = -ENXIO;
1203		goto fail_put_clk;
1204	}
1205	info->drcmr_dat = r->start;
1206
1207	r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1208	if (r == NULL) {
1209		dev_err(&pdev->dev, "no resource defined for command DMA\n");
1210		ret = -ENXIO;
1211		goto fail_put_clk;
1212	}
1213	info->drcmr_cmd = r->start;
1214
1215	irq = platform_get_irq(pdev, 0);
1216	if (irq < 0) {
1217		dev_err(&pdev->dev, "no IRQ resource defined\n");
1218		ret = -ENXIO;
1219		goto fail_put_clk;
1220	}
1221
1222	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1223	if (r == NULL) {
1224		dev_err(&pdev->dev, "no IO memory resource defined\n");
1225		ret = -ENODEV;
1226		goto fail_put_clk;
1227	}
1228
1229	r = request_mem_region(r->start, resource_size(r), pdev->name);
1230	if (r == NULL) {
1231		dev_err(&pdev->dev, "failed to request memory resource\n");
1232		ret = -EBUSY;
1233		goto fail_put_clk;
1234	}
1235
1236	info->mmio_base = ioremap(r->start, resource_size(r));
1237	if (info->mmio_base == NULL) {
1238		dev_err(&pdev->dev, "ioremap() failed\n");
1239		ret = -ENODEV;
1240		goto fail_free_res;
1241	}
1242
1243	ret = pxa3xx_nand_init_buff(info);
1244	if (ret)
1245		goto fail_free_io;
1246
1247	ret = request_irq(IRQ_NAND, pxa3xx_nand_irq, IRQF_DISABLED,
1248				pdev->name, info);
1249	if (ret < 0) {
1250		dev_err(&pdev->dev, "failed to request IRQ\n");
1251		goto fail_free_buf;
1252	}
1253
1254	ret = pxa3xx_nand_detect_flash(info, pdata);
1255	if (ret) {
1256		dev_err(&pdev->dev, "failed to detect flash\n");
1257		ret = -ENODEV;
1258		goto fail_free_irq;
1259	}
1260
1261	pxa3xx_nand_init_mtd(mtd, info);
1262
1263	platform_set_drvdata(pdev, mtd);
1264
1265	if (nand_scan(mtd, 1)) {
1266		dev_err(&pdev->dev, "failed to scan nand\n");
1267		ret = -ENXIO;
1268		goto fail_free_irq;
1269	}
1270
1271	return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
1272
1273fail_free_irq:
1274	free_irq(IRQ_NAND, info);
1275fail_free_buf:
1276	if (use_dma) {
1277		pxa_free_dma(info->data_dma_ch);
1278		dma_free_coherent(&pdev->dev, info->data_buff_size,
1279			info->data_buff, info->data_buff_phys);
1280	} else
1281		kfree(info->data_buff);
1282fail_free_io:
1283	iounmap(info->mmio_base);
1284fail_free_res:
1285	release_mem_region(r->start, resource_size(r));
1286fail_put_clk:
1287	clk_disable(info->clk);
1288	clk_put(info->clk);
1289fail_free_mtd:
1290	kfree(mtd);
1291	return ret;
1292}
1293
1294static int pxa3xx_nand_remove(struct platform_device *pdev)
1295{
1296	struct mtd_info *mtd = platform_get_drvdata(pdev);
1297	struct pxa3xx_nand_info *info = mtd->priv;
1298	struct resource *r;
1299
1300	platform_set_drvdata(pdev, NULL);
1301
1302	del_mtd_device(mtd);
1303	del_mtd_partitions(mtd);
1304	free_irq(IRQ_NAND, info);
1305	if (use_dma) {
1306		pxa_free_dma(info->data_dma_ch);
1307		dma_free_writecombine(&pdev->dev, info->data_buff_size,
1308				info->data_buff, info->data_buff_phys);
1309	} else
1310		kfree(info->data_buff);
1311
1312	iounmap(info->mmio_base);
1313	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1314	release_mem_region(r->start, resource_size(r));
1315
1316	clk_disable(info->clk);
1317	clk_put(info->clk);
1318
1319	kfree(mtd);
1320	return 0;
1321}
1322
1323#ifdef CONFIG_PM
1324static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1325{
1326	struct mtd_info *mtd = (struct mtd_info *)platform_get_drvdata(pdev);
1327	struct pxa3xx_nand_info *info = mtd->priv;
1328
1329	if (info->state != STATE_READY) {
1330		dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1331		return -EAGAIN;
1332	}
1333
1334	return 0;
1335}
1336
1337static int pxa3xx_nand_resume(struct platform_device *pdev)
1338{
1339	struct mtd_info *mtd = (struct mtd_info *)platform_get_drvdata(pdev);
1340	struct pxa3xx_nand_info *info = mtd->priv;
1341
1342	clk_enable(info->clk);
1343
1344	return pxa3xx_nand_config_flash(info, info->flash_info);
1345}
1346#else
1347#define pxa3xx_nand_suspend	NULL
1348#define pxa3xx_nand_resume	NULL
1349#endif
1350
1351static struct platform_driver pxa3xx_nand_driver = {
1352	.driver = {
1353		.name	= "pxa3xx-nand",
1354	},
1355	.probe		= pxa3xx_nand_probe,
1356	.remove		= pxa3xx_nand_remove,
1357	.suspend	= pxa3xx_nand_suspend,
1358	.resume		= pxa3xx_nand_resume,
1359};
1360
1361static int __init pxa3xx_nand_init(void)
1362{
1363	return platform_driver_register(&pxa3xx_nand_driver);
1364}
1365module_init(pxa3xx_nand_init);
1366
1367static void __exit pxa3xx_nand_exit(void)
1368{
1369	platform_driver_unregister(&pxa3xx_nand_driver);
1370}
1371module_exit(pxa3xx_nand_exit);
1372
1373MODULE_LICENSE("GPL");
1374MODULE_DESCRIPTION("PXA3xx NAND controller driver");
1375