pxa3xx_nand.c revision 80ebf20f34c30760cfba7b5e0a418241181d2cd9
1/*
2 * drivers/mtd/nand/pxa3xx_nand.c
3 *
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/interrupt.h>
14#include <linux/platform_device.h>
15#include <linux/dma-mapping.h>
16#include <linux/delay.h>
17#include <linux/clk.h>
18#include <linux/mtd/mtd.h>
19#include <linux/mtd/nand.h>
20#include <linux/mtd/partitions.h>
21#include <linux/io.h>
22#include <linux/irq.h>
23#include <asm/dma.h>
24
25#include <mach/pxa-regs.h>
26#include <mach/pxa3xx_nand.h>
27
28#define	CHIP_DELAY_TIMEOUT	(2 * HZ/10)
29
30/* registers and bit definitions */
31#define NDCR		(0x00) /* Control register */
32#define NDTR0CS0	(0x04) /* Timing Parameter 0 for CS0 */
33#define NDTR1CS0	(0x0C) /* Timing Parameter 1 for CS0 */
34#define NDSR		(0x14) /* Status Register */
35#define NDPCR		(0x18) /* Page Count Register */
36#define NDBDR0		(0x1C) /* Bad Block Register 0 */
37#define NDBDR1		(0x20) /* Bad Block Register 1 */
38#define NDDB		(0x40) /* Data Buffer */
39#define NDCB0		(0x48) /* Command Buffer0 */
40#define NDCB1		(0x4C) /* Command Buffer1 */
41#define NDCB2		(0x50) /* Command Buffer2 */
42
43#define NDCR_SPARE_EN		(0x1 << 31)
44#define NDCR_ECC_EN		(0x1 << 30)
45#define NDCR_DMA_EN		(0x1 << 29)
46#define NDCR_ND_RUN		(0x1 << 28)
47#define NDCR_DWIDTH_C		(0x1 << 27)
48#define NDCR_DWIDTH_M		(0x1 << 26)
49#define NDCR_PAGE_SZ		(0x1 << 24)
50#define NDCR_NCSX		(0x1 << 23)
51#define NDCR_ND_MODE		(0x3 << 21)
52#define NDCR_NAND_MODE   	(0x0)
53#define NDCR_CLR_PG_CNT		(0x1 << 20)
54#define NDCR_CLR_ECC		(0x1 << 19)
55#define NDCR_RD_ID_CNT_MASK	(0x7 << 16)
56#define NDCR_RD_ID_CNT(x)	(((x) << 16) & NDCR_RD_ID_CNT_MASK)
57
58#define NDCR_RA_START		(0x1 << 15)
59#define NDCR_PG_PER_BLK		(0x1 << 14)
60#define NDCR_ND_ARB_EN		(0x1 << 12)
61
62#define NDSR_MASK		(0xfff)
63#define NDSR_RDY		(0x1 << 11)
64#define NDSR_CS0_PAGED		(0x1 << 10)
65#define NDSR_CS1_PAGED		(0x1 << 9)
66#define NDSR_CS0_CMDD		(0x1 << 8)
67#define NDSR_CS1_CMDD		(0x1 << 7)
68#define NDSR_CS0_BBD		(0x1 << 6)
69#define NDSR_CS1_BBD		(0x1 << 5)
70#define NDSR_DBERR		(0x1 << 4)
71#define NDSR_SBERR		(0x1 << 3)
72#define NDSR_WRDREQ		(0x1 << 2)
73#define NDSR_RDDREQ		(0x1 << 1)
74#define NDSR_WRCMDREQ		(0x1)
75
76#define NDCB0_AUTO_RS		(0x1 << 25)
77#define NDCB0_CSEL		(0x1 << 24)
78#define NDCB0_CMD_TYPE_MASK	(0x7 << 21)
79#define NDCB0_CMD_TYPE(x)	(((x) << 21) & NDCB0_CMD_TYPE_MASK)
80#define NDCB0_NC		(0x1 << 20)
81#define NDCB0_DBC		(0x1 << 19)
82#define NDCB0_ADDR_CYC_MASK	(0x7 << 16)
83#define NDCB0_ADDR_CYC(x)	(((x) << 16) & NDCB0_ADDR_CYC_MASK)
84#define NDCB0_CMD2_MASK		(0xff << 8)
85#define NDCB0_CMD1_MASK		(0xff)
86#define NDCB0_ADDR_CYC_SHIFT	(16)
87
88/* dma-able I/O address for the NAND data and commands */
89#define NDCB0_DMA_ADDR		(0x43100048)
90#define NDDB_DMA_ADDR		(0x43100040)
91
92/* macros for registers read/write */
93#define nand_writel(info, off, val)	\
94	__raw_writel((val), (info)->mmio_base + (off))
95
96#define nand_readl(info, off)		\
97	__raw_readl((info)->mmio_base + (off))
98
99/* error code and state */
100enum {
101	ERR_NONE	= 0,
102	ERR_DMABUSERR	= -1,
103	ERR_SENDCMD	= -2,
104	ERR_DBERR	= -3,
105	ERR_BBERR	= -4,
106};
107
108enum {
109	STATE_READY	= 0,
110	STATE_CMD_HANDLE,
111	STATE_DMA_READING,
112	STATE_DMA_WRITING,
113	STATE_DMA_DONE,
114	STATE_PIO_READING,
115	STATE_PIO_WRITING,
116};
117
118struct pxa3xx_nand_info {
119	struct nand_chip	nand_chip;
120
121	struct platform_device	 *pdev;
122	struct pxa3xx_nand_flash *flash_info;
123
124	struct clk		*clk;
125	void __iomem		*mmio_base;
126
127	unsigned int 		buf_start;
128	unsigned int		buf_count;
129
130	/* DMA information */
131	int			drcmr_dat;
132	int			drcmr_cmd;
133
134	unsigned char		*data_buff;
135	dma_addr_t 		data_buff_phys;
136	size_t			data_buff_size;
137	int 			data_dma_ch;
138	struct pxa_dma_desc	*data_desc;
139	dma_addr_t 		data_desc_addr;
140
141	uint32_t		reg_ndcr;
142
143	/* saved column/page_addr during CMD_SEQIN */
144	int			seqin_column;
145	int			seqin_page_addr;
146
147	/* relate to the command */
148	unsigned int		state;
149
150	int			use_ecc;	/* use HW ECC ? */
151	int			use_dma;	/* use DMA ? */
152
153	size_t			data_size;	/* data size in FIFO */
154	int 			retcode;
155	struct completion 	cmd_complete;
156
157	/* generated NDCBx register values */
158	uint32_t		ndcb0;
159	uint32_t		ndcb1;
160	uint32_t		ndcb2;
161};
162
163static int use_dma = 1;
164module_param(use_dma, bool, 0444);
165MODULE_PARM_DESC(use_dma, "enable DMA for data transfering to/from NAND HW");
166
167#ifdef CONFIG_MTD_NAND_PXA3xx_BUILTIN
168static struct pxa3xx_nand_cmdset smallpage_cmdset = {
169	.read1		= 0x0000,
170	.read2		= 0x0050,
171	.program	= 0x1080,
172	.read_status	= 0x0070,
173	.read_id	= 0x0090,
174	.erase		= 0xD060,
175	.reset		= 0x00FF,
176	.lock		= 0x002A,
177	.unlock		= 0x2423,
178	.lock_status	= 0x007A,
179};
180
181static struct pxa3xx_nand_cmdset largepage_cmdset = {
182	.read1		= 0x3000,
183	.read2		= 0x0050,
184	.program	= 0x1080,
185	.read_status	= 0x0070,
186	.read_id	= 0x0090,
187	.erase		= 0xD060,
188	.reset		= 0x00FF,
189	.lock		= 0x002A,
190	.unlock		= 0x2423,
191	.lock_status	= 0x007A,
192};
193
194static struct pxa3xx_nand_timing samsung512MbX16_timing = {
195	.tCH	= 10,
196	.tCS	= 0,
197	.tWH	= 20,
198	.tWP	= 40,
199	.tRH	= 30,
200	.tRP	= 40,
201	.tR	= 11123,
202	.tWHR	= 110,
203	.tAR	= 10,
204};
205
206static struct pxa3xx_nand_flash samsung512MbX16 = {
207	.timing		= &samsung512MbX16_timing,
208	.cmdset		= &smallpage_cmdset,
209	.page_per_block	= 32,
210	.page_size	= 512,
211	.flash_width	= 16,
212	.dfc_width	= 16,
213	.num_blocks	= 4096,
214	.chip_id	= 0x46ec,
215};
216
217static struct pxa3xx_nand_timing micron_timing = {
218	.tCH	= 10,
219	.tCS	= 25,
220	.tWH	= 15,
221	.tWP	= 25,
222	.tRH	= 15,
223	.tRP	= 25,
224	.tR	= 25000,
225	.tWHR	= 60,
226	.tAR	= 10,
227};
228
229static struct pxa3xx_nand_flash micron1GbX8 = {
230	.timing		= &micron_timing,
231	.cmdset		= &largepage_cmdset,
232	.page_per_block	= 64,
233	.page_size	= 2048,
234	.flash_width	= 8,
235	.dfc_width	= 8,
236	.num_blocks	= 1024,
237	.chip_id	= 0xa12c,
238};
239
240static struct pxa3xx_nand_flash micron1GbX16 = {
241	.timing		= &micron_timing,
242	.cmdset		= &largepage_cmdset,
243	.page_per_block	= 64,
244	.page_size	= 2048,
245	.flash_width	= 16,
246	.dfc_width	= 16,
247	.num_blocks	= 1024,
248	.chip_id	= 0xb12c,
249};
250
251static struct pxa3xx_nand_timing stm2GbX16_timing = {
252	.tCH = 10,
253	.tCS = 35,
254	.tWH = 15,
255	.tWP = 25,
256	.tRH = 15,
257	.tRP = 25,
258	.tR = 25000,
259	.tWHR = 60,
260	.tAR = 10,
261};
262
263static struct pxa3xx_nand_flash stm2GbX16 = {
264	.timing = &stm2GbX16_timing,
265	.page_per_block = 64,
266	.page_size = 2048,
267	.flash_width = 16,
268	.dfc_width = 16,
269	.num_blocks = 2048,
270	.chip_id = 0xba20,
271};
272
273static struct pxa3xx_nand_flash *builtin_flash_types[] = {
274	&samsung512MbX16,
275	&micron1GbX8,
276	&micron1GbX16,
277	&stm2GbX16,
278};
279#endif /* CONFIG_MTD_NAND_PXA3xx_BUILTIN */
280
281#define NDTR0_tCH(c)	(min((c), 7) << 19)
282#define NDTR0_tCS(c)	(min((c), 7) << 16)
283#define NDTR0_tWH(c)	(min((c), 7) << 11)
284#define NDTR0_tWP(c)	(min((c), 7) << 8)
285#define NDTR0_tRH(c)	(min((c), 7) << 3)
286#define NDTR0_tRP(c)	(min((c), 7) << 0)
287
288#define NDTR1_tR(c)	(min((c), 65535) << 16)
289#define NDTR1_tWHR(c)	(min((c), 15) << 4)
290#define NDTR1_tAR(c)	(min((c), 15) << 0)
291
292/* convert nano-seconds to nand flash controller clock cycles */
293#define ns2cycle(ns, clk)	(int)(((ns) * (clk / 1000000) / 1000) + 1)
294
295static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info,
296				   struct pxa3xx_nand_timing *t)
297{
298	unsigned long nand_clk = clk_get_rate(info->clk);
299	uint32_t ndtr0, ndtr1;
300
301	ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
302		NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
303		NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
304		NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
305		NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
306		NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
307
308	ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
309		NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
310		NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
311
312	nand_writel(info, NDTR0CS0, ndtr0);
313	nand_writel(info, NDTR1CS0, ndtr1);
314}
315
316#define WAIT_EVENT_TIMEOUT	10
317
318static int wait_for_event(struct pxa3xx_nand_info *info, uint32_t event)
319{
320	int timeout = WAIT_EVENT_TIMEOUT;
321	uint32_t ndsr;
322
323	while (timeout--) {
324		ndsr = nand_readl(info, NDSR) & NDSR_MASK;
325		if (ndsr & event) {
326			nand_writel(info, NDSR, ndsr);
327			return 0;
328		}
329		udelay(10);
330	}
331
332	return -ETIMEDOUT;
333}
334
335static int prepare_read_prog_cmd(struct pxa3xx_nand_info *info,
336			uint16_t cmd, int column, int page_addr)
337{
338	struct pxa3xx_nand_flash *f = info->flash_info;
339	struct pxa3xx_nand_cmdset *cmdset = f->cmdset;
340
341	/* calculate data size */
342	switch (f->page_size) {
343	case 2048:
344		info->data_size = (info->use_ecc) ? 2088 : 2112;
345		break;
346	case 512:
347		info->data_size = (info->use_ecc) ? 520 : 528;
348		break;
349	default:
350		return -EINVAL;
351	}
352
353	/* generate values for NDCBx registers */
354	info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
355	info->ndcb1 = 0;
356	info->ndcb2 = 0;
357	info->ndcb0 |= NDCB0_ADDR_CYC(f->row_addr_cycles + f->col_addr_cycles);
358
359	if (f->col_addr_cycles == 2) {
360		/* large block, 2 cycles for column address
361		 * row address starts from 3rd cycle
362		 */
363		info->ndcb1 |= (page_addr << 16) | (column & 0xffff);
364		if (f->row_addr_cycles == 3)
365			info->ndcb2 = (page_addr >> 16) & 0xff;
366	} else
367		/* small block, 1 cycles for column address
368		 * row address starts from 2nd cycle
369		 */
370		info->ndcb1 = (page_addr << 8) | (column & 0xff);
371
372	if (cmd == cmdset->program)
373		info->ndcb0 |= NDCB0_CMD_TYPE(1) | NDCB0_AUTO_RS;
374
375	return 0;
376}
377
378static int prepare_erase_cmd(struct pxa3xx_nand_info *info,
379			uint16_t cmd, int page_addr)
380{
381	info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
382	info->ndcb0 |= NDCB0_CMD_TYPE(2) | NDCB0_AUTO_RS | NDCB0_ADDR_CYC(3);
383	info->ndcb1 = page_addr;
384	info->ndcb2 = 0;
385	return 0;
386}
387
388static int prepare_other_cmd(struct pxa3xx_nand_info *info, uint16_t cmd)
389{
390	struct pxa3xx_nand_cmdset *cmdset = info->flash_info->cmdset;
391
392	info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
393	info->ndcb1 = 0;
394	info->ndcb2 = 0;
395
396	if (cmd == cmdset->read_id) {
397		info->ndcb0 |= NDCB0_CMD_TYPE(3);
398		info->data_size = 8;
399	} else if (cmd == cmdset->read_status) {
400		info->ndcb0 |= NDCB0_CMD_TYPE(4);
401		info->data_size = 8;
402	} else if (cmd == cmdset->reset || cmd == cmdset->lock ||
403		   cmd == cmdset->unlock) {
404		info->ndcb0 |= NDCB0_CMD_TYPE(5);
405	} else
406		return -EINVAL;
407
408	return 0;
409}
410
411static void enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
412{
413	uint32_t ndcr;
414
415	ndcr = nand_readl(info, NDCR);
416	nand_writel(info, NDCR, ndcr & ~int_mask);
417}
418
419static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
420{
421	uint32_t ndcr;
422
423	ndcr = nand_readl(info, NDCR);
424	nand_writel(info, NDCR, ndcr | int_mask);
425}
426
427/* NOTE: it is a must to set ND_RUN firstly, then write command buffer
428 * otherwise, it does not work
429 */
430static int write_cmd(struct pxa3xx_nand_info *info)
431{
432	uint32_t ndcr;
433
434	/* clear status bits and run */
435	nand_writel(info, NDSR, NDSR_MASK);
436
437	ndcr = info->reg_ndcr;
438
439	ndcr |= info->use_ecc ? NDCR_ECC_EN : 0;
440	ndcr |= info->use_dma ? NDCR_DMA_EN : 0;
441	ndcr |= NDCR_ND_RUN;
442
443	nand_writel(info, NDCR, ndcr);
444
445	if (wait_for_event(info, NDSR_WRCMDREQ)) {
446		printk(KERN_ERR "timed out writing command\n");
447		return -ETIMEDOUT;
448	}
449
450	nand_writel(info, NDCB0, info->ndcb0);
451	nand_writel(info, NDCB0, info->ndcb1);
452	nand_writel(info, NDCB0, info->ndcb2);
453	return 0;
454}
455
456static int handle_data_pio(struct pxa3xx_nand_info *info)
457{
458	int ret, timeout = CHIP_DELAY_TIMEOUT;
459
460	switch (info->state) {
461	case STATE_PIO_WRITING:
462		__raw_writesl(info->mmio_base + NDDB, info->data_buff,
463				info->data_size << 2);
464
465		enable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
466
467		ret = wait_for_completion_timeout(&info->cmd_complete, timeout);
468		if (!ret) {
469			printk(KERN_ERR "program command time out\n");
470			return -1;
471		}
472		break;
473	case STATE_PIO_READING:
474		__raw_readsl(info->mmio_base + NDDB, info->data_buff,
475				info->data_size << 2);
476		break;
477	default:
478		printk(KERN_ERR "%s: invalid state %d\n", __func__,
479				info->state);
480		return -EINVAL;
481	}
482
483	info->state = STATE_READY;
484	return 0;
485}
486
487static void start_data_dma(struct pxa3xx_nand_info *info, int dir_out)
488{
489	struct pxa_dma_desc *desc = info->data_desc;
490	int dma_len = ALIGN(info->data_size, 32);
491
492	desc->ddadr = DDADR_STOP;
493	desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
494
495	if (dir_out) {
496		desc->dsadr = info->data_buff_phys;
497		desc->dtadr = NDDB_DMA_ADDR;
498		desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
499	} else {
500		desc->dtadr = info->data_buff_phys;
501		desc->dsadr = NDDB_DMA_ADDR;
502		desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
503	}
504
505	DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch;
506	DDADR(info->data_dma_ch) = info->data_desc_addr;
507	DCSR(info->data_dma_ch) |= DCSR_RUN;
508}
509
510static void pxa3xx_nand_data_dma_irq(int channel, void *data)
511{
512	struct pxa3xx_nand_info *info = data;
513	uint32_t dcsr;
514
515	dcsr = DCSR(channel);
516	DCSR(channel) = dcsr;
517
518	if (dcsr & DCSR_BUSERR) {
519		info->retcode = ERR_DMABUSERR;
520		complete(&info->cmd_complete);
521	}
522
523	if (info->state == STATE_DMA_WRITING) {
524		info->state = STATE_DMA_DONE;
525		enable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
526	} else {
527		info->state = STATE_READY;
528		complete(&info->cmd_complete);
529	}
530}
531
532static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
533{
534	struct pxa3xx_nand_info *info = devid;
535	unsigned int status;
536
537	status = nand_readl(info, NDSR);
538
539	if (status & (NDSR_RDDREQ | NDSR_DBERR)) {
540		if (status & NDSR_DBERR)
541			info->retcode = ERR_DBERR;
542
543		disable_int(info, NDSR_RDDREQ | NDSR_DBERR);
544
545		if (info->use_dma) {
546			info->state = STATE_DMA_READING;
547			start_data_dma(info, 0);
548		} else {
549			info->state = STATE_PIO_READING;
550			complete(&info->cmd_complete);
551		}
552	} else if (status & NDSR_WRDREQ) {
553		disable_int(info, NDSR_WRDREQ);
554		if (info->use_dma) {
555			info->state = STATE_DMA_WRITING;
556			start_data_dma(info, 1);
557		} else {
558			info->state = STATE_PIO_WRITING;
559			complete(&info->cmd_complete);
560		}
561	} else if (status & (NDSR_CS0_BBD | NDSR_CS0_CMDD)) {
562		if (status & NDSR_CS0_BBD)
563			info->retcode = ERR_BBERR;
564
565		disable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
566		info->state = STATE_READY;
567		complete(&info->cmd_complete);
568	}
569	nand_writel(info, NDSR, status);
570	return IRQ_HANDLED;
571}
572
573static int pxa3xx_nand_do_cmd(struct pxa3xx_nand_info *info, uint32_t event)
574{
575	uint32_t ndcr;
576	int ret, timeout = CHIP_DELAY_TIMEOUT;
577
578	if (write_cmd(info)) {
579		info->retcode = ERR_SENDCMD;
580		goto fail_stop;
581	}
582
583	info->state = STATE_CMD_HANDLE;
584
585	enable_int(info, event);
586
587	ret = wait_for_completion_timeout(&info->cmd_complete, timeout);
588	if (!ret) {
589		printk(KERN_ERR "command execution timed out\n");
590		info->retcode = ERR_SENDCMD;
591		goto fail_stop;
592	}
593
594	if (info->use_dma == 0 && info->data_size > 0)
595		if (handle_data_pio(info))
596			goto fail_stop;
597
598	return 0;
599
600fail_stop:
601	ndcr = nand_readl(info, NDCR);
602	nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN);
603	udelay(10);
604	return -ETIMEDOUT;
605}
606
607static int pxa3xx_nand_dev_ready(struct mtd_info *mtd)
608{
609	struct pxa3xx_nand_info *info = mtd->priv;
610	return (nand_readl(info, NDSR) & NDSR_RDY) ? 1 : 0;
611}
612
613static inline int is_buf_blank(uint8_t *buf, size_t len)
614{
615	for (; len > 0; len--)
616		if (*buf++ != 0xff)
617			return 0;
618	return 1;
619}
620
621static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
622				int column, int page_addr)
623{
624	struct pxa3xx_nand_info *info = mtd->priv;
625	struct pxa3xx_nand_flash *flash_info = info->flash_info;
626	struct pxa3xx_nand_cmdset *cmdset = flash_info->cmdset;
627	int ret;
628
629	info->use_dma = (use_dma) ? 1 : 0;
630	info->use_ecc = 0;
631	info->data_size = 0;
632	info->state = STATE_READY;
633
634	init_completion(&info->cmd_complete);
635
636	switch (command) {
637	case NAND_CMD_READOOB:
638		/* disable HW ECC to get all the OOB data */
639		info->buf_count = mtd->writesize + mtd->oobsize;
640		info->buf_start = mtd->writesize + column;
641
642		if (prepare_read_prog_cmd(info, cmdset->read1, column, page_addr))
643			break;
644
645		pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR);
646
647		/* We only are OOB, so if the data has error, does not matter */
648		if (info->retcode == ERR_DBERR)
649			info->retcode = ERR_NONE;
650		break;
651
652	case NAND_CMD_READ0:
653		info->use_ecc = 1;
654		info->retcode = ERR_NONE;
655		info->buf_start = column;
656		info->buf_count = mtd->writesize + mtd->oobsize;
657		memset(info->data_buff, 0xFF, info->buf_count);
658
659		if (prepare_read_prog_cmd(info, cmdset->read1, column, page_addr))
660			break;
661
662		pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR);
663
664		if (info->retcode == ERR_DBERR) {
665			/* for blank page (all 0xff), HW will calculate its ECC as
666			 * 0, which is different from the ECC information within
667			 * OOB, ignore such double bit errors
668			 */
669			if (is_buf_blank(info->data_buff, mtd->writesize))
670				info->retcode = ERR_NONE;
671		}
672		break;
673	case NAND_CMD_SEQIN:
674		info->buf_start = column;
675		info->buf_count = mtd->writesize + mtd->oobsize;
676		memset(info->data_buff, 0xff, info->buf_count);
677
678		/* save column/page_addr for next CMD_PAGEPROG */
679		info->seqin_column = column;
680		info->seqin_page_addr = page_addr;
681		break;
682	case NAND_CMD_PAGEPROG:
683		info->use_ecc = (info->seqin_column >= mtd->writesize) ? 0 : 1;
684
685		if (prepare_read_prog_cmd(info, cmdset->program,
686				info->seqin_column, info->seqin_page_addr))
687			break;
688
689		pxa3xx_nand_do_cmd(info, NDSR_WRDREQ);
690		break;
691	case NAND_CMD_ERASE1:
692		if (prepare_erase_cmd(info, cmdset->erase, page_addr))
693			break;
694
695		pxa3xx_nand_do_cmd(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
696		break;
697	case NAND_CMD_ERASE2:
698		break;
699	case NAND_CMD_READID:
700	case NAND_CMD_STATUS:
701		info->use_dma = 0;	/* force PIO read */
702		info->buf_start = 0;
703		info->buf_count = (command == NAND_CMD_READID) ?
704				flash_info->read_id_bytes : 1;
705
706		if (prepare_other_cmd(info, (command == NAND_CMD_READID) ?
707				cmdset->read_id : cmdset->read_status))
708			break;
709
710		pxa3xx_nand_do_cmd(info, NDSR_RDDREQ);
711		break;
712	case NAND_CMD_RESET:
713		if (prepare_other_cmd(info, cmdset->reset))
714			break;
715
716		ret = pxa3xx_nand_do_cmd(info, NDSR_CS0_CMDD);
717		if (ret == 0) {
718			int timeout = 2;
719			uint32_t ndcr;
720
721			while (timeout--) {
722				if (nand_readl(info, NDSR) & NDSR_RDY)
723					break;
724				msleep(10);
725			}
726
727			ndcr = nand_readl(info, NDCR);
728			nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN);
729		}
730		break;
731	default:
732		printk(KERN_ERR "non-supported command.\n");
733		break;
734	}
735
736	if (info->retcode == ERR_DBERR) {
737		printk(KERN_ERR "double bit error @ page %08x\n", page_addr);
738		info->retcode = ERR_NONE;
739	}
740}
741
742static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
743{
744	struct pxa3xx_nand_info *info = mtd->priv;
745	char retval = 0xFF;
746
747	if (info->buf_start < info->buf_count)
748		/* Has just send a new command? */
749		retval = info->data_buff[info->buf_start++];
750
751	return retval;
752}
753
754static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
755{
756	struct pxa3xx_nand_info *info = mtd->priv;
757	u16 retval = 0xFFFF;
758
759	if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
760		retval = *((u16 *)(info->data_buff+info->buf_start));
761		info->buf_start += 2;
762	}
763	return retval;
764}
765
766static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
767{
768	struct pxa3xx_nand_info *info = mtd->priv;
769	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
770
771	memcpy(buf, info->data_buff + info->buf_start, real_len);
772	info->buf_start += real_len;
773}
774
775static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
776		const uint8_t *buf, int len)
777{
778	struct pxa3xx_nand_info *info = mtd->priv;
779	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
780
781	memcpy(info->data_buff + info->buf_start, buf, real_len);
782	info->buf_start += real_len;
783}
784
785static int pxa3xx_nand_verify_buf(struct mtd_info *mtd,
786		const uint8_t *buf, int len)
787{
788	return 0;
789}
790
791static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
792{
793	return;
794}
795
796static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
797{
798	struct pxa3xx_nand_info *info = mtd->priv;
799
800	/* pxa3xx_nand_send_command has waited for command complete */
801	if (this->state == FL_WRITING || this->state == FL_ERASING) {
802		if (info->retcode == ERR_NONE)
803			return 0;
804		else {
805			/*
806			 * any error make it return 0x01 which will tell
807			 * the caller the erase and write fail
808			 */
809			return 0x01;
810		}
811	}
812
813	return 0;
814}
815
816static void pxa3xx_nand_ecc_hwctl(struct mtd_info *mtd, int mode)
817{
818	return;
819}
820
821static int pxa3xx_nand_ecc_calculate(struct mtd_info *mtd,
822		const uint8_t *dat, uint8_t *ecc_code)
823{
824	return 0;
825}
826
827static int pxa3xx_nand_ecc_correct(struct mtd_info *mtd,
828		uint8_t *dat, uint8_t *read_ecc, uint8_t *calc_ecc)
829{
830	struct pxa3xx_nand_info *info = mtd->priv;
831	/*
832	 * Any error include ERR_SEND_CMD, ERR_DBERR, ERR_BUSERR, we
833	 * consider it as a ecc error which will tell the caller the
834	 * read fail We have distinguish all the errors, but the
835	 * nand_read_ecc only check this function return value
836	 */
837	if (info->retcode != ERR_NONE)
838		return -1;
839
840	return 0;
841}
842
843static int __readid(struct pxa3xx_nand_info *info, uint32_t *id)
844{
845	struct pxa3xx_nand_flash *f = info->flash_info;
846	struct pxa3xx_nand_cmdset *cmdset = f->cmdset;
847	uint32_t ndcr;
848	uint8_t  id_buff[8];
849
850	if (prepare_other_cmd(info, cmdset->read_id)) {
851		printk(KERN_ERR "failed to prepare command\n");
852		return -EINVAL;
853	}
854
855	/* Send command */
856	if (write_cmd(info))
857		goto fail_timeout;
858
859	/* Wait for CMDDM(command done successfully) */
860	if (wait_for_event(info, NDSR_RDDREQ))
861		goto fail_timeout;
862
863	__raw_readsl(info->mmio_base + NDDB, id_buff, 2);
864	*id = id_buff[0] | (id_buff[1] << 8);
865	return 0;
866
867fail_timeout:
868	ndcr = nand_readl(info, NDCR);
869	nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN);
870	udelay(10);
871	return -ETIMEDOUT;
872}
873
874static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
875				    struct pxa3xx_nand_flash *f)
876{
877	struct platform_device *pdev = info->pdev;
878	struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
879	uint32_t ndcr = 0x00000FFF; /* disable all interrupts */
880
881	if (f->page_size != 2048 && f->page_size != 512)
882		return -EINVAL;
883
884	if (f->flash_width != 16 && f->flash_width != 8)
885		return -EINVAL;
886
887	/* calculate flash information */
888	f->oob_size = (f->page_size == 2048) ? 64 : 16;
889	f->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
890
891	/* calculate addressing information */
892	f->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
893
894	if (f->num_blocks * f->page_per_block > 65536)
895		f->row_addr_cycles = 3;
896	else
897		f->row_addr_cycles = 2;
898
899	ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
900	ndcr |= (f->col_addr_cycles == 2) ? NDCR_RA_START : 0;
901	ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
902	ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
903	ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
904	ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
905
906	ndcr |= NDCR_RD_ID_CNT(f->read_id_bytes);
907	ndcr |= NDCR_SPARE_EN; /* enable spare by default */
908
909	info->reg_ndcr = ndcr;
910
911	pxa3xx_nand_set_timing(info, f->timing);
912	info->flash_info = f;
913	return 0;
914}
915
916static int pxa3xx_nand_detect_flash(struct pxa3xx_nand_info *info,
917				    const struct pxa3xx_nand_platform_data *pdata)
918{
919	struct pxa3xx_nand_flash *f;
920	uint32_t id;
921	int i;
922
923	for (i = 0; i<pdata->num_flash; ++i) {
924		f = pdata->flash + i;
925
926		if (pxa3xx_nand_config_flash(info, f))
927			continue;
928
929		if (__readid(info, &id))
930			continue;
931
932		if (id == f->chip_id)
933			return 0;
934	}
935
936#ifdef CONFIG_MTD_NAND_PXA3xx_BUILTIN
937	for (i = 0; i < ARRAY_SIZE(builtin_flash_types); i++) {
938
939		f = builtin_flash_types[i];
940
941		if (pxa3xx_nand_config_flash(info, f))
942			continue;
943
944		if (__readid(info, &id))
945			continue;
946
947		if (id == f->chip_id)
948			return 0;
949	}
950#endif
951
952	return -ENODEV;
953}
954
955/* the maximum possible buffer size for large page with OOB data
956 * is: 2048 + 64 = 2112 bytes, allocate a page here for both the
957 * data buffer and the DMA descriptor
958 */
959#define MAX_BUFF_SIZE	PAGE_SIZE
960
961static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
962{
963	struct platform_device *pdev = info->pdev;
964	int data_desc_offset = MAX_BUFF_SIZE - sizeof(struct pxa_dma_desc);
965
966	if (use_dma == 0) {
967		info->data_buff = kmalloc(MAX_BUFF_SIZE, GFP_KERNEL);
968		if (info->data_buff == NULL)
969			return -ENOMEM;
970		return 0;
971	}
972
973	info->data_buff = dma_alloc_coherent(&pdev->dev, MAX_BUFF_SIZE,
974				&info->data_buff_phys, GFP_KERNEL);
975	if (info->data_buff == NULL) {
976		dev_err(&pdev->dev, "failed to allocate dma buffer\n");
977		return -ENOMEM;
978	}
979
980	info->data_buff_size = MAX_BUFF_SIZE;
981	info->data_desc = (void *)info->data_buff + data_desc_offset;
982	info->data_desc_addr = info->data_buff_phys + data_desc_offset;
983
984	info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW,
985				pxa3xx_nand_data_dma_irq, info);
986	if (info->data_dma_ch < 0) {
987		dev_err(&pdev->dev, "failed to request data dma\n");
988		dma_free_coherent(&pdev->dev, info->data_buff_size,
989				info->data_buff, info->data_buff_phys);
990		return info->data_dma_ch;
991	}
992
993	return 0;
994}
995
996static struct nand_ecclayout hw_smallpage_ecclayout = {
997	.eccbytes = 6,
998	.eccpos = {8, 9, 10, 11, 12, 13 },
999	.oobfree = { {2, 6} }
1000};
1001
1002static struct nand_ecclayout hw_largepage_ecclayout = {
1003	.eccbytes = 24,
1004	.eccpos = {
1005		40, 41, 42, 43, 44, 45, 46, 47,
1006		48, 49, 50, 51, 52, 53, 54, 55,
1007		56, 57, 58, 59, 60, 61, 62, 63},
1008	.oobfree = { {2, 38} }
1009};
1010
1011static void pxa3xx_nand_init_mtd(struct mtd_info *mtd,
1012				 struct pxa3xx_nand_info *info)
1013{
1014	struct pxa3xx_nand_flash *f = info->flash_info;
1015	struct nand_chip *this = &info->nand_chip;
1016
1017	this->options = (f->flash_width == 16) ? NAND_BUSWIDTH_16: 0;
1018
1019	this->waitfunc		= pxa3xx_nand_waitfunc;
1020	this->select_chip	= pxa3xx_nand_select_chip;
1021	this->dev_ready		= pxa3xx_nand_dev_ready;
1022	this->cmdfunc		= pxa3xx_nand_cmdfunc;
1023	this->read_word		= pxa3xx_nand_read_word;
1024	this->read_byte		= pxa3xx_nand_read_byte;
1025	this->read_buf		= pxa3xx_nand_read_buf;
1026	this->write_buf		= pxa3xx_nand_write_buf;
1027	this->verify_buf	= pxa3xx_nand_verify_buf;
1028
1029	this->ecc.mode		= NAND_ECC_HW;
1030	this->ecc.hwctl		= pxa3xx_nand_ecc_hwctl;
1031	this->ecc.calculate	= pxa3xx_nand_ecc_calculate;
1032	this->ecc.correct	= pxa3xx_nand_ecc_correct;
1033	this->ecc.size		= f->page_size;
1034
1035	if (f->page_size == 2048)
1036		this->ecc.layout = &hw_largepage_ecclayout;
1037	else
1038		this->ecc.layout = &hw_smallpage_ecclayout;
1039
1040	this->chip_delay = 25;
1041}
1042
1043static int pxa3xx_nand_probe(struct platform_device *pdev)
1044{
1045	struct pxa3xx_nand_platform_data *pdata;
1046	struct pxa3xx_nand_info *info;
1047	struct nand_chip *this;
1048	struct mtd_info *mtd;
1049	struct resource *r;
1050	int ret = 0, irq;
1051
1052	pdata = pdev->dev.platform_data;
1053
1054	if (!pdata) {
1055		dev_err(&pdev->dev, "no platform data defined\n");
1056		return -ENODEV;
1057	}
1058
1059	mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct pxa3xx_nand_info),
1060			GFP_KERNEL);
1061	if (!mtd) {
1062		dev_err(&pdev->dev, "failed to allocate memory\n");
1063		return -ENOMEM;
1064	}
1065
1066	info = (struct pxa3xx_nand_info *)(&mtd[1]);
1067	info->pdev = pdev;
1068
1069	this = &info->nand_chip;
1070	mtd->priv = info;
1071
1072	info->clk = clk_get(&pdev->dev, "NANDCLK");
1073	if (IS_ERR(info->clk)) {
1074		dev_err(&pdev->dev, "failed to get nand clock\n");
1075		ret = PTR_ERR(info->clk);
1076		goto fail_free_mtd;
1077	}
1078	clk_enable(info->clk);
1079
1080	r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1081	if (r == NULL) {
1082		dev_err(&pdev->dev, "no resource defined for data DMA\n");
1083		ret = -ENXIO;
1084		goto fail_put_clk;
1085	}
1086	info->drcmr_dat = r->start;
1087
1088	r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1089	if (r == NULL) {
1090		dev_err(&pdev->dev, "no resource defined for command DMA\n");
1091		ret = -ENXIO;
1092		goto fail_put_clk;
1093	}
1094	info->drcmr_cmd = r->start;
1095
1096	irq = platform_get_irq(pdev, 0);
1097	if (irq < 0) {
1098		dev_err(&pdev->dev, "no IRQ resource defined\n");
1099		ret = -ENXIO;
1100		goto fail_put_clk;
1101	}
1102
1103	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1104	if (r == NULL) {
1105		dev_err(&pdev->dev, "no IO memory resource defined\n");
1106		ret = -ENODEV;
1107		goto fail_put_clk;
1108	}
1109
1110	r = request_mem_region(r->start, r->end - r->start + 1, pdev->name);
1111	if (r == NULL) {
1112		dev_err(&pdev->dev, "failed to request memory resource\n");
1113		ret = -EBUSY;
1114		goto fail_put_clk;
1115	}
1116
1117	info->mmio_base = ioremap(r->start, r->end - r->start + 1);
1118	if (info->mmio_base == NULL) {
1119		dev_err(&pdev->dev, "ioremap() failed\n");
1120		ret = -ENODEV;
1121		goto fail_free_res;
1122	}
1123
1124	ret = pxa3xx_nand_init_buff(info);
1125	if (ret)
1126		goto fail_free_io;
1127
1128	ret = request_irq(IRQ_NAND, pxa3xx_nand_irq, IRQF_DISABLED,
1129				pdev->name, info);
1130	if (ret < 0) {
1131		dev_err(&pdev->dev, "failed to request IRQ\n");
1132		goto fail_free_buf;
1133	}
1134
1135	ret = pxa3xx_nand_detect_flash(info, pdata);
1136	if (ret) {
1137		dev_err(&pdev->dev, "failed to detect flash\n");
1138		ret = -ENODEV;
1139		goto fail_free_irq;
1140	}
1141
1142	pxa3xx_nand_init_mtd(mtd, info);
1143
1144	platform_set_drvdata(pdev, mtd);
1145
1146	if (nand_scan(mtd, 1)) {
1147		dev_err(&pdev->dev, "failed to scan nand\n");
1148		ret = -ENXIO;
1149		goto fail_free_irq;
1150	}
1151
1152	return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
1153
1154fail_free_irq:
1155	free_irq(IRQ_NAND, info);
1156fail_free_buf:
1157	if (use_dma) {
1158		pxa_free_dma(info->data_dma_ch);
1159		dma_free_coherent(&pdev->dev, info->data_buff_size,
1160			info->data_buff, info->data_buff_phys);
1161	} else
1162		kfree(info->data_buff);
1163fail_free_io:
1164	iounmap(info->mmio_base);
1165fail_free_res:
1166	release_mem_region(r->start, r->end - r->start + 1);
1167fail_put_clk:
1168	clk_disable(info->clk);
1169	clk_put(info->clk);
1170fail_free_mtd:
1171	kfree(mtd);
1172	return ret;
1173}
1174
1175static int pxa3xx_nand_remove(struct platform_device *pdev)
1176{
1177	struct mtd_info *mtd = platform_get_drvdata(pdev);
1178	struct pxa3xx_nand_info *info = mtd->priv;
1179
1180	platform_set_drvdata(pdev, NULL);
1181
1182	del_mtd_device(mtd);
1183	del_mtd_partitions(mtd);
1184	free_irq(IRQ_NAND, info);
1185	if (use_dma) {
1186		pxa_free_dma(info->data_dma_ch);
1187		dma_free_writecombine(&pdev->dev, info->data_buff_size,
1188				info->data_buff, info->data_buff_phys);
1189	} else
1190		kfree(info->data_buff);
1191	kfree(mtd);
1192	return 0;
1193}
1194
1195#ifdef CONFIG_PM
1196static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1197{
1198	struct mtd_info *mtd = (struct mtd_info *)platform_get_drvdata(pdev);
1199	struct pxa3xx_nand_info *info = mtd->priv;
1200
1201	if (info->state != STATE_READY) {
1202		dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1203		return -EAGAIN;
1204	}
1205
1206	return 0;
1207}
1208
1209static int pxa3xx_nand_resume(struct platform_device *pdev)
1210{
1211	struct mtd_info *mtd = (struct mtd_info *)platform_get_drvdata(pdev);
1212	struct pxa3xx_nand_info *info = mtd->priv;
1213
1214	clk_enable(info->clk);
1215
1216	return pxa3xx_nand_config_flash(info, info->flash_info);
1217}
1218#else
1219#define pxa3xx_nand_suspend	NULL
1220#define pxa3xx_nand_resume	NULL
1221#endif
1222
1223static struct platform_driver pxa3xx_nand_driver = {
1224	.driver = {
1225		.name	= "pxa3xx-nand",
1226	},
1227	.probe		= pxa3xx_nand_probe,
1228	.remove		= pxa3xx_nand_remove,
1229	.suspend	= pxa3xx_nand_suspend,
1230	.resume		= pxa3xx_nand_resume,
1231};
1232
1233static int __init pxa3xx_nand_init(void)
1234{
1235	return platform_driver_register(&pxa3xx_nand_driver);
1236}
1237module_init(pxa3xx_nand_init);
1238
1239static void __exit pxa3xx_nand_exit(void)
1240{
1241	platform_driver_unregister(&pxa3xx_nand_driver);
1242}
1243module_exit(pxa3xx_nand_exit);
1244
1245MODULE_LICENSE("GPL");
1246MODULE_DESCRIPTION("PXA3xx NAND controller driver");
1247