pxa3xx_nand.c revision 6033a949b2c466a13e84daebd99fdca5960b4db5
1/*
2 * drivers/mtd/nand/pxa3xx_nand.c
3 *
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/interrupt.h>
17#include <linux/platform_device.h>
18#include <linux/dma-mapping.h>
19#include <linux/delay.h>
20#include <linux/clk.h>
21#include <linux/mtd/mtd.h>
22#include <linux/mtd/nand.h>
23#include <linux/mtd/partitions.h>
24#include <linux/io.h>
25#include <linux/irq.h>
26#include <linux/slab.h>
27#include <linux/of.h>
28#include <linux/of_device.h>
29#include <linux/of_mtd.h>
30
31#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
32#define ARCH_HAS_DMA
33#endif
34
35#ifdef ARCH_HAS_DMA
36#include <mach/dma.h>
37#endif
38
39#include <linux/platform_data/mtd-nand-pxa3xx.h>
40
41#define NAND_DEV_READY_TIMEOUT  50
42#define	CHIP_DELAY_TIMEOUT	(2 * HZ/10)
43#define NAND_STOP_DELAY		(2 * HZ/50)
44#define PAGE_CHUNK_SIZE		(2048)
45
46/*
47 * Define a buffer size for the initial command that detects the flash device:
48 * STATUS, READID and PARAM. The largest of these is the PARAM command,
49 * needing 256 bytes.
50 */
51#define INIT_BUFFER_SIZE	256
52
53/* registers and bit definitions */
54#define NDCR		(0x00) /* Control register */
55#define NDTR0CS0	(0x04) /* Timing Parameter 0 for CS0 */
56#define NDTR1CS0	(0x0C) /* Timing Parameter 1 for CS0 */
57#define NDSR		(0x14) /* Status Register */
58#define NDPCR		(0x18) /* Page Count Register */
59#define NDBDR0		(0x1C) /* Bad Block Register 0 */
60#define NDBDR1		(0x20) /* Bad Block Register 1 */
61#define NDECCCTRL	(0x28) /* ECC control */
62#define NDDB		(0x40) /* Data Buffer */
63#define NDCB0		(0x48) /* Command Buffer0 */
64#define NDCB1		(0x4C) /* Command Buffer1 */
65#define NDCB2		(0x50) /* Command Buffer2 */
66
67#define NDCR_SPARE_EN		(0x1 << 31)
68#define NDCR_ECC_EN		(0x1 << 30)
69#define NDCR_DMA_EN		(0x1 << 29)
70#define NDCR_ND_RUN		(0x1 << 28)
71#define NDCR_DWIDTH_C		(0x1 << 27)
72#define NDCR_DWIDTH_M		(0x1 << 26)
73#define NDCR_PAGE_SZ		(0x1 << 24)
74#define NDCR_NCSX		(0x1 << 23)
75#define NDCR_ND_MODE		(0x3 << 21)
76#define NDCR_NAND_MODE   	(0x0)
77#define NDCR_CLR_PG_CNT		(0x1 << 20)
78#define NDCR_STOP_ON_UNCOR	(0x1 << 19)
79#define NDCR_RD_ID_CNT_MASK	(0x7 << 16)
80#define NDCR_RD_ID_CNT(x)	(((x) << 16) & NDCR_RD_ID_CNT_MASK)
81
82#define NDCR_RA_START		(0x1 << 15)
83#define NDCR_PG_PER_BLK		(0x1 << 14)
84#define NDCR_ND_ARB_EN		(0x1 << 12)
85#define NDCR_INT_MASK           (0xFFF)
86
87#define NDSR_MASK		(0xfff)
88#define NDSR_ERR_CNT_OFF	(16)
89#define NDSR_ERR_CNT_MASK       (0x1f)
90#define NDSR_ERR_CNT(sr)	((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
91#define NDSR_RDY                (0x1 << 12)
92#define NDSR_FLASH_RDY          (0x1 << 11)
93#define NDSR_CS0_PAGED		(0x1 << 10)
94#define NDSR_CS1_PAGED		(0x1 << 9)
95#define NDSR_CS0_CMDD		(0x1 << 8)
96#define NDSR_CS1_CMDD		(0x1 << 7)
97#define NDSR_CS0_BBD		(0x1 << 6)
98#define NDSR_CS1_BBD		(0x1 << 5)
99#define NDSR_UNCORERR		(0x1 << 4)
100#define NDSR_CORERR		(0x1 << 3)
101#define NDSR_WRDREQ		(0x1 << 2)
102#define NDSR_RDDREQ		(0x1 << 1)
103#define NDSR_WRCMDREQ		(0x1)
104
105#define NDCB0_LEN_OVRD		(0x1 << 28)
106#define NDCB0_ST_ROW_EN         (0x1 << 26)
107#define NDCB0_AUTO_RS		(0x1 << 25)
108#define NDCB0_CSEL		(0x1 << 24)
109#define NDCB0_EXT_CMD_TYPE_MASK	(0x7 << 29)
110#define NDCB0_EXT_CMD_TYPE(x)	(((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
111#define NDCB0_CMD_TYPE_MASK	(0x7 << 21)
112#define NDCB0_CMD_TYPE(x)	(((x) << 21) & NDCB0_CMD_TYPE_MASK)
113#define NDCB0_NC		(0x1 << 20)
114#define NDCB0_DBC		(0x1 << 19)
115#define NDCB0_ADDR_CYC_MASK	(0x7 << 16)
116#define NDCB0_ADDR_CYC(x)	(((x) << 16) & NDCB0_ADDR_CYC_MASK)
117#define NDCB0_CMD2_MASK		(0xff << 8)
118#define NDCB0_CMD1_MASK		(0xff)
119#define NDCB0_ADDR_CYC_SHIFT	(16)
120
121#define EXT_CMD_TYPE_DISPATCH	6 /* Command dispatch */
122#define EXT_CMD_TYPE_NAKED_RW	5 /* Naked read or Naked write */
123#define EXT_CMD_TYPE_READ	4 /* Read */
124#define EXT_CMD_TYPE_DISP_WR	4 /* Command dispatch with write */
125#define EXT_CMD_TYPE_FINAL	3 /* Final command */
126#define EXT_CMD_TYPE_LAST_RW	1 /* Last naked read/write */
127#define EXT_CMD_TYPE_MONO	0 /* Monolithic read/write */
128
129/* macros for registers read/write */
130#define nand_writel(info, off, val)	\
131	__raw_writel((val), (info)->mmio_base + (off))
132
133#define nand_readl(info, off)		\
134	__raw_readl((info)->mmio_base + (off))
135
136/* error code and state */
137enum {
138	ERR_NONE	= 0,
139	ERR_DMABUSERR	= -1,
140	ERR_SENDCMD	= -2,
141	ERR_UNCORERR	= -3,
142	ERR_BBERR	= -4,
143	ERR_CORERR	= -5,
144};
145
146enum {
147	STATE_IDLE = 0,
148	STATE_PREPARED,
149	STATE_CMD_HANDLE,
150	STATE_DMA_READING,
151	STATE_DMA_WRITING,
152	STATE_DMA_DONE,
153	STATE_PIO_READING,
154	STATE_PIO_WRITING,
155	STATE_CMD_DONE,
156	STATE_READY,
157};
158
159enum pxa3xx_nand_variant {
160	PXA3XX_NAND_VARIANT_PXA,
161	PXA3XX_NAND_VARIANT_ARMADA370,
162};
163
164struct pxa3xx_nand_host {
165	struct nand_chip	chip;
166	struct mtd_info         *mtd;
167	void			*info_data;
168
169	/* page size of attached chip */
170	int			use_ecc;
171	int			cs;
172
173	/* calculated from pxa3xx_nand_flash data */
174	unsigned int		col_addr_cycles;
175	unsigned int		row_addr_cycles;
176	size_t			read_id_bytes;
177
178};
179
180struct pxa3xx_nand_info {
181	struct nand_hw_control	controller;
182	struct platform_device	 *pdev;
183
184	struct clk		*clk;
185	void __iomem		*mmio_base;
186	unsigned long		mmio_phys;
187	struct completion	cmd_complete, dev_ready;
188
189	unsigned int 		buf_start;
190	unsigned int		buf_count;
191	unsigned int		buf_size;
192	unsigned int		data_buff_pos;
193	unsigned int		oob_buff_pos;
194
195	/* DMA information */
196	int			drcmr_dat;
197	int			drcmr_cmd;
198
199	unsigned char		*data_buff;
200	unsigned char		*oob_buff;
201	dma_addr_t 		data_buff_phys;
202	int 			data_dma_ch;
203	struct pxa_dma_desc	*data_desc;
204	dma_addr_t 		data_desc_addr;
205
206	struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
207	unsigned int		state;
208
209	/*
210	 * This driver supports NFCv1 (as found in PXA SoC)
211	 * and NFCv2 (as found in Armada 370/XP SoC).
212	 */
213	enum pxa3xx_nand_variant variant;
214
215	int			cs;
216	int			use_ecc;	/* use HW ECC ? */
217	int			ecc_bch;	/* using BCH ECC? */
218	int			use_dma;	/* use DMA ? */
219	int			use_spare;	/* use spare ? */
220	int			need_wait;
221
222	unsigned int		data_size;	/* data to be read from FIFO */
223	unsigned int		chunk_size;	/* split commands chunk size */
224	unsigned int		oob_size;
225	unsigned int		spare_size;
226	unsigned int		ecc_size;
227	unsigned int		ecc_err_cnt;
228	unsigned int		max_bitflips;
229	int 			retcode;
230
231	/* cached register value */
232	uint32_t		reg_ndcr;
233	uint32_t		ndtr0cs0;
234	uint32_t		ndtr1cs0;
235
236	/* generated NDCBx register values */
237	uint32_t		ndcb0;
238	uint32_t		ndcb1;
239	uint32_t		ndcb2;
240	uint32_t		ndcb3;
241};
242
243static bool use_dma = 1;
244module_param(use_dma, bool, 0444);
245MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
246
247static struct pxa3xx_nand_timing timing[] = {
248	{ 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
249	{ 10,  0, 20,  40, 30,  40, 11123, 110, 10, },
250	{ 10, 25, 15,  25, 15,  30, 25000,  60, 10, },
251	{ 10, 35, 15,  25, 15,  25, 25000,  60, 10, },
252};
253
254static struct pxa3xx_nand_flash builtin_flash_types[] = {
255{ "DEFAULT FLASH",      0,   0, 2048,  8,  8,    0, &timing[0] },
256{ "64MiB 16-bit",  0x46ec,  32,  512, 16, 16, 4096, &timing[1] },
257{ "256MiB 8-bit",  0xdaec,  64, 2048,  8,  8, 2048, &timing[1] },
258{ "4GiB 8-bit",    0xd7ec, 128, 4096,  8,  8, 8192, &timing[1] },
259{ "128MiB 8-bit",  0xa12c,  64, 2048,  8,  8, 1024, &timing[2] },
260{ "128MiB 16-bit", 0xb12c,  64, 2048, 16, 16, 1024, &timing[2] },
261{ "512MiB 8-bit",  0xdc2c,  64, 2048,  8,  8, 4096, &timing[2] },
262{ "512MiB 16-bit", 0xcc2c,  64, 2048, 16, 16, 4096, &timing[2] },
263{ "256MiB 16-bit", 0xba20,  64, 2048, 16, 16, 2048, &timing[3] },
264};
265
266static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
267static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
268
269static struct nand_bbt_descr bbt_main_descr = {
270	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
271		| NAND_BBT_2BIT | NAND_BBT_VERSION,
272	.offs =	8,
273	.len = 6,
274	.veroffs = 14,
275	.maxblocks = 8,		/* Last 8 blocks in each chip */
276	.pattern = bbt_pattern
277};
278
279static struct nand_bbt_descr bbt_mirror_descr = {
280	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
281		| NAND_BBT_2BIT | NAND_BBT_VERSION,
282	.offs =	8,
283	.len = 6,
284	.veroffs = 14,
285	.maxblocks = 8,		/* Last 8 blocks in each chip */
286	.pattern = bbt_mirror_pattern
287};
288
289static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
290	.eccbytes = 64,
291	.eccpos = {
292		32,  33,  34,  35,  36,  37,  38,  39,
293		40,  41,  42,  43,  44,  45,  46,  47,
294		48,  49,  50,  51,  52,  53,  54,  55,
295		56,  57,  58,  59,  60,  61,  62,  63,
296		96,  97,  98,  99,  100, 101, 102, 103,
297		104, 105, 106, 107, 108, 109, 110, 111,
298		112, 113, 114, 115, 116, 117, 118, 119,
299		120, 121, 122, 123, 124, 125, 126, 127},
300	/* Bootrom looks in bytes 0 & 5 for bad blocks */
301	.oobfree = { {6, 26}, { 64, 32} }
302};
303
304static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
305	.eccbytes = 128,
306	.eccpos = {
307		32,  33,  34,  35,  36,  37,  38,  39,
308		40,  41,  42,  43,  44,  45,  46,  47,
309		48,  49,  50,  51,  52,  53,  54,  55,
310		56,  57,  58,  59,  60,  61,  62,  63},
311	.oobfree = { }
312};
313
314/* Define a default flash type setting serve as flash detecting only */
315#define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
316
317#define NDTR0_tCH(c)	(min((c), 7) << 19)
318#define NDTR0_tCS(c)	(min((c), 7) << 16)
319#define NDTR0_tWH(c)	(min((c), 7) << 11)
320#define NDTR0_tWP(c)	(min((c), 7) << 8)
321#define NDTR0_tRH(c)	(min((c), 7) << 3)
322#define NDTR0_tRP(c)	(min((c), 7) << 0)
323
324#define NDTR1_tR(c)	(min((c), 65535) << 16)
325#define NDTR1_tWHR(c)	(min((c), 15) << 4)
326#define NDTR1_tAR(c)	(min((c), 15) << 0)
327
328/* convert nano-seconds to nand flash controller clock cycles */
329#define ns2cycle(ns, clk)	(int)((ns) * (clk / 1000000) / 1000)
330
331static struct of_device_id pxa3xx_nand_dt_ids[] = {
332	{
333		.compatible = "marvell,pxa3xx-nand",
334		.data       = (void *)PXA3XX_NAND_VARIANT_PXA,
335	},
336	{}
337};
338MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
339
340static enum pxa3xx_nand_variant
341pxa3xx_nand_get_variant(struct platform_device *pdev)
342{
343	const struct of_device_id *of_id =
344			of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
345	if (!of_id)
346		return PXA3XX_NAND_VARIANT_PXA;
347	return (enum pxa3xx_nand_variant)of_id->data;
348}
349
350static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
351				   const struct pxa3xx_nand_timing *t)
352{
353	struct pxa3xx_nand_info *info = host->info_data;
354	unsigned long nand_clk = clk_get_rate(info->clk);
355	uint32_t ndtr0, ndtr1;
356
357	ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
358		NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
359		NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
360		NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
361		NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
362		NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
363
364	ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
365		NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
366		NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
367
368	info->ndtr0cs0 = ndtr0;
369	info->ndtr1cs0 = ndtr1;
370	nand_writel(info, NDTR0CS0, ndtr0);
371	nand_writel(info, NDTR1CS0, ndtr1);
372}
373
374/*
375 * Set the data and OOB size, depending on the selected
376 * spare and ECC configuration.
377 * Only applicable to READ0, READOOB and PAGEPROG commands.
378 */
379static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
380				struct mtd_info *mtd)
381{
382	int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
383
384	info->data_size = mtd->writesize;
385	if (!oob_enable)
386		return;
387
388	info->oob_size = info->spare_size;
389	if (!info->use_ecc)
390		info->oob_size += info->ecc_size;
391}
392
393/**
394 * NOTE: it is a must to set ND_RUN firstly, then write
395 * command buffer, otherwise, it does not work.
396 * We enable all the interrupt at the same time, and
397 * let pxa3xx_nand_irq to handle all logic.
398 */
399static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
400{
401	uint32_t ndcr;
402
403	ndcr = info->reg_ndcr;
404
405	if (info->use_ecc) {
406		ndcr |= NDCR_ECC_EN;
407		if (info->ecc_bch)
408			nand_writel(info, NDECCCTRL, 0x1);
409	} else {
410		ndcr &= ~NDCR_ECC_EN;
411		if (info->ecc_bch)
412			nand_writel(info, NDECCCTRL, 0x0);
413	}
414
415	if (info->use_dma)
416		ndcr |= NDCR_DMA_EN;
417	else
418		ndcr &= ~NDCR_DMA_EN;
419
420	if (info->use_spare)
421		ndcr |= NDCR_SPARE_EN;
422	else
423		ndcr &= ~NDCR_SPARE_EN;
424
425	ndcr |= NDCR_ND_RUN;
426
427	/* clear status bits and run */
428	nand_writel(info, NDCR, 0);
429	nand_writel(info, NDSR, NDSR_MASK);
430	nand_writel(info, NDCR, ndcr);
431}
432
433static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
434{
435	uint32_t ndcr;
436	int timeout = NAND_STOP_DELAY;
437
438	/* wait RUN bit in NDCR become 0 */
439	ndcr = nand_readl(info, NDCR);
440	while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
441		ndcr = nand_readl(info, NDCR);
442		udelay(1);
443	}
444
445	if (timeout <= 0) {
446		ndcr &= ~NDCR_ND_RUN;
447		nand_writel(info, NDCR, ndcr);
448	}
449	/* clear status bits */
450	nand_writel(info, NDSR, NDSR_MASK);
451}
452
453static void __maybe_unused
454enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
455{
456	uint32_t ndcr;
457
458	ndcr = nand_readl(info, NDCR);
459	nand_writel(info, NDCR, ndcr & ~int_mask);
460}
461
462static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
463{
464	uint32_t ndcr;
465
466	ndcr = nand_readl(info, NDCR);
467	nand_writel(info, NDCR, ndcr | int_mask);
468}
469
470static void handle_data_pio(struct pxa3xx_nand_info *info)
471{
472	unsigned int do_bytes = min(info->data_size, info->chunk_size);
473
474	switch (info->state) {
475	case STATE_PIO_WRITING:
476		__raw_writesl(info->mmio_base + NDDB,
477			      info->data_buff + info->data_buff_pos,
478			      DIV_ROUND_UP(do_bytes, 4));
479
480		if (info->oob_size > 0)
481			__raw_writesl(info->mmio_base + NDDB,
482				      info->oob_buff + info->oob_buff_pos,
483				      DIV_ROUND_UP(info->oob_size, 4));
484		break;
485	case STATE_PIO_READING:
486		__raw_readsl(info->mmio_base + NDDB,
487			     info->data_buff + info->data_buff_pos,
488			     DIV_ROUND_UP(do_bytes, 4));
489
490		if (info->oob_size > 0)
491			__raw_readsl(info->mmio_base + NDDB,
492				     info->oob_buff + info->oob_buff_pos,
493				     DIV_ROUND_UP(info->oob_size, 4));
494		break;
495	default:
496		dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
497				info->state);
498		BUG();
499	}
500
501	/* Update buffer pointers for multi-page read/write */
502	info->data_buff_pos += do_bytes;
503	info->oob_buff_pos += info->oob_size;
504	info->data_size -= do_bytes;
505}
506
507#ifdef ARCH_HAS_DMA
508static void start_data_dma(struct pxa3xx_nand_info *info)
509{
510	struct pxa_dma_desc *desc = info->data_desc;
511	int dma_len = ALIGN(info->data_size + info->oob_size, 32);
512
513	desc->ddadr = DDADR_STOP;
514	desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
515
516	switch (info->state) {
517	case STATE_DMA_WRITING:
518		desc->dsadr = info->data_buff_phys;
519		desc->dtadr = info->mmio_phys + NDDB;
520		desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
521		break;
522	case STATE_DMA_READING:
523		desc->dtadr = info->data_buff_phys;
524		desc->dsadr = info->mmio_phys + NDDB;
525		desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
526		break;
527	default:
528		dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
529				info->state);
530		BUG();
531	}
532
533	DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch;
534	DDADR(info->data_dma_ch) = info->data_desc_addr;
535	DCSR(info->data_dma_ch) |= DCSR_RUN;
536}
537
538static void pxa3xx_nand_data_dma_irq(int channel, void *data)
539{
540	struct pxa3xx_nand_info *info = data;
541	uint32_t dcsr;
542
543	dcsr = DCSR(channel);
544	DCSR(channel) = dcsr;
545
546	if (dcsr & DCSR_BUSERR) {
547		info->retcode = ERR_DMABUSERR;
548	}
549
550	info->state = STATE_DMA_DONE;
551	enable_int(info, NDCR_INT_MASK);
552	nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
553}
554#else
555static void start_data_dma(struct pxa3xx_nand_info *info)
556{}
557#endif
558
559static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
560{
561	struct pxa3xx_nand_info *info = devid;
562	unsigned int status, is_completed = 0, is_ready = 0;
563	unsigned int ready, cmd_done;
564
565	if (info->cs == 0) {
566		ready           = NDSR_FLASH_RDY;
567		cmd_done        = NDSR_CS0_CMDD;
568	} else {
569		ready           = NDSR_RDY;
570		cmd_done        = NDSR_CS1_CMDD;
571	}
572
573	status = nand_readl(info, NDSR);
574
575	if (status & NDSR_UNCORERR)
576		info->retcode = ERR_UNCORERR;
577	if (status & NDSR_CORERR) {
578		info->retcode = ERR_CORERR;
579		if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
580		    info->ecc_bch)
581			info->ecc_err_cnt = NDSR_ERR_CNT(status);
582		else
583			info->ecc_err_cnt = 1;
584
585		/*
586		 * Each chunk composing a page is corrected independently,
587		 * and we need to store maximum number of corrected bitflips
588		 * to return it to the MTD layer in ecc.read_page().
589		 */
590		info->max_bitflips = max_t(unsigned int,
591					   info->max_bitflips,
592					   info->ecc_err_cnt);
593	}
594	if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
595		/* whether use dma to transfer data */
596		if (info->use_dma) {
597			disable_int(info, NDCR_INT_MASK);
598			info->state = (status & NDSR_RDDREQ) ?
599				      STATE_DMA_READING : STATE_DMA_WRITING;
600			start_data_dma(info);
601			goto NORMAL_IRQ_EXIT;
602		} else {
603			info->state = (status & NDSR_RDDREQ) ?
604				      STATE_PIO_READING : STATE_PIO_WRITING;
605			handle_data_pio(info);
606		}
607	}
608	if (status & cmd_done) {
609		info->state = STATE_CMD_DONE;
610		is_completed = 1;
611	}
612	if (status & ready) {
613		info->state = STATE_READY;
614		is_ready = 1;
615	}
616
617	if (status & NDSR_WRCMDREQ) {
618		nand_writel(info, NDSR, NDSR_WRCMDREQ);
619		status &= ~NDSR_WRCMDREQ;
620		info->state = STATE_CMD_HANDLE;
621
622		/*
623		 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
624		 * must be loaded by writing directly either 12 or 16
625		 * bytes directly to NDCB0, four bytes at a time.
626		 *
627		 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
628		 * but each NDCBx register can be read.
629		 */
630		nand_writel(info, NDCB0, info->ndcb0);
631		nand_writel(info, NDCB0, info->ndcb1);
632		nand_writel(info, NDCB0, info->ndcb2);
633
634		/* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
635		if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
636			nand_writel(info, NDCB0, info->ndcb3);
637	}
638
639	/* clear NDSR to let the controller exit the IRQ */
640	nand_writel(info, NDSR, status);
641	if (is_completed)
642		complete(&info->cmd_complete);
643	if (is_ready)
644		complete(&info->dev_ready);
645NORMAL_IRQ_EXIT:
646	return IRQ_HANDLED;
647}
648
649static inline int is_buf_blank(uint8_t *buf, size_t len)
650{
651	for (; len > 0; len--)
652		if (*buf++ != 0xff)
653			return 0;
654	return 1;
655}
656
657static void set_command_address(struct pxa3xx_nand_info *info,
658		unsigned int page_size, uint16_t column, int page_addr)
659{
660	/* small page addr setting */
661	if (page_size < PAGE_CHUNK_SIZE) {
662		info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
663				| (column & 0xFF);
664
665		info->ndcb2 = 0;
666	} else {
667		info->ndcb1 = ((page_addr & 0xFFFF) << 16)
668				| (column & 0xFFFF);
669
670		if (page_addr & 0xFF0000)
671			info->ndcb2 = (page_addr & 0xFF0000) >> 16;
672		else
673			info->ndcb2 = 0;
674	}
675}
676
677static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
678{
679	struct pxa3xx_nand_host *host = info->host[info->cs];
680	struct mtd_info *mtd = host->mtd;
681
682	/* reset data and oob column point to handle data */
683	info->buf_start		= 0;
684	info->buf_count		= 0;
685	info->oob_size		= 0;
686	info->data_buff_pos	= 0;
687	info->oob_buff_pos	= 0;
688	info->use_ecc		= 0;
689	info->use_spare		= 1;
690	info->retcode		= ERR_NONE;
691	info->ecc_err_cnt	= 0;
692	info->ndcb3		= 0;
693
694	switch (command) {
695	case NAND_CMD_READ0:
696	case NAND_CMD_PAGEPROG:
697		info->use_ecc = 1;
698	case NAND_CMD_READOOB:
699		pxa3xx_set_datasize(info, mtd);
700		break;
701	case NAND_CMD_PARAM:
702		info->use_spare = 0;
703		break;
704	default:
705		info->ndcb1 = 0;
706		info->ndcb2 = 0;
707		break;
708	}
709
710	/*
711	 * If we are about to issue a read command, or about to set
712	 * the write address, then clean the data buffer.
713	 */
714	if (command == NAND_CMD_READ0 ||
715	    command == NAND_CMD_READOOB ||
716	    command == NAND_CMD_SEQIN) {
717
718		info->buf_count = mtd->writesize + mtd->oobsize;
719		memset(info->data_buff, 0xFF, info->buf_count);
720	}
721
722}
723
724static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
725		int ext_cmd_type, uint16_t column, int page_addr)
726{
727	int addr_cycle, exec_cmd;
728	struct pxa3xx_nand_host *host;
729	struct mtd_info *mtd;
730
731	host = info->host[info->cs];
732	mtd = host->mtd;
733	addr_cycle = 0;
734	exec_cmd = 1;
735
736	if (info->cs != 0)
737		info->ndcb0 = NDCB0_CSEL;
738	else
739		info->ndcb0 = 0;
740
741	if (command == NAND_CMD_SEQIN)
742		exec_cmd = 0;
743
744	addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
745				    + host->col_addr_cycles);
746
747	switch (command) {
748	case NAND_CMD_READOOB:
749	case NAND_CMD_READ0:
750		info->buf_start = column;
751		info->ndcb0 |= NDCB0_CMD_TYPE(0)
752				| addr_cycle
753				| NAND_CMD_READ0;
754
755		if (command == NAND_CMD_READOOB)
756			info->buf_start += mtd->writesize;
757
758		/*
759		 * Multiple page read needs an 'extended command type' field,
760		 * which is either naked-read or last-read according to the
761		 * state.
762		 */
763		if (mtd->writesize == PAGE_CHUNK_SIZE) {
764			info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
765		} else if (mtd->writesize > PAGE_CHUNK_SIZE) {
766			info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
767					| NDCB0_LEN_OVRD
768					| NDCB0_EXT_CMD_TYPE(ext_cmd_type);
769			info->ndcb3 = info->chunk_size +
770				      info->oob_size;
771		}
772
773		set_command_address(info, mtd->writesize, column, page_addr);
774		break;
775
776	case NAND_CMD_SEQIN:
777
778		info->buf_start = column;
779		set_command_address(info, mtd->writesize, 0, page_addr);
780
781		/*
782		 * Multiple page programming needs to execute the initial
783		 * SEQIN command that sets the page address.
784		 */
785		if (mtd->writesize > PAGE_CHUNK_SIZE) {
786			info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
787				| NDCB0_EXT_CMD_TYPE(ext_cmd_type)
788				| addr_cycle
789				| command;
790			/* No data transfer in this case */
791			info->data_size = 0;
792			exec_cmd = 1;
793		}
794		break;
795
796	case NAND_CMD_PAGEPROG:
797		if (is_buf_blank(info->data_buff,
798					(mtd->writesize + mtd->oobsize))) {
799			exec_cmd = 0;
800			break;
801		}
802
803		/* Second command setting for large pages */
804		if (mtd->writesize > PAGE_CHUNK_SIZE) {
805			/*
806			 * Multiple page write uses the 'extended command'
807			 * field. This can be used to issue a command dispatch
808			 * or a naked-write depending on the current stage.
809			 */
810			info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
811					| NDCB0_LEN_OVRD
812					| NDCB0_EXT_CMD_TYPE(ext_cmd_type);
813			info->ndcb3 = info->chunk_size +
814				      info->oob_size;
815
816			/*
817			 * This is the command dispatch that completes a chunked
818			 * page program operation.
819			 */
820			if (info->data_size == 0) {
821				info->ndcb0 = NDCB0_CMD_TYPE(0x1)
822					| NDCB0_EXT_CMD_TYPE(ext_cmd_type)
823					| command;
824				info->ndcb1 = 0;
825				info->ndcb2 = 0;
826				info->ndcb3 = 0;
827			}
828		} else {
829			info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
830					| NDCB0_AUTO_RS
831					| NDCB0_ST_ROW_EN
832					| NDCB0_DBC
833					| (NAND_CMD_PAGEPROG << 8)
834					| NAND_CMD_SEQIN
835					| addr_cycle;
836		}
837		break;
838
839	case NAND_CMD_PARAM:
840		info->buf_count = 256;
841		info->ndcb0 |= NDCB0_CMD_TYPE(0)
842				| NDCB0_ADDR_CYC(1)
843				| NDCB0_LEN_OVRD
844				| command;
845		info->ndcb1 = (column & 0xFF);
846		info->ndcb3 = 256;
847		info->data_size = 256;
848		break;
849
850	case NAND_CMD_READID:
851		info->buf_count = host->read_id_bytes;
852		info->ndcb0 |= NDCB0_CMD_TYPE(3)
853				| NDCB0_ADDR_CYC(1)
854				| command;
855		info->ndcb1 = (column & 0xFF);
856
857		info->data_size = 8;
858		break;
859	case NAND_CMD_STATUS:
860		info->buf_count = 1;
861		info->ndcb0 |= NDCB0_CMD_TYPE(4)
862				| NDCB0_ADDR_CYC(1)
863				| command;
864
865		info->data_size = 8;
866		break;
867
868	case NAND_CMD_ERASE1:
869		info->ndcb0 |= NDCB0_CMD_TYPE(2)
870				| NDCB0_AUTO_RS
871				| NDCB0_ADDR_CYC(3)
872				| NDCB0_DBC
873				| (NAND_CMD_ERASE2 << 8)
874				| NAND_CMD_ERASE1;
875		info->ndcb1 = page_addr;
876		info->ndcb2 = 0;
877
878		break;
879	case NAND_CMD_RESET:
880		info->ndcb0 |= NDCB0_CMD_TYPE(5)
881				| command;
882
883		break;
884
885	case NAND_CMD_ERASE2:
886		exec_cmd = 0;
887		break;
888
889	default:
890		exec_cmd = 0;
891		dev_err(&info->pdev->dev, "non-supported command %x\n",
892				command);
893		break;
894	}
895
896	return exec_cmd;
897}
898
899static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
900				int column, int page_addr)
901{
902	struct pxa3xx_nand_host *host = mtd->priv;
903	struct pxa3xx_nand_info *info = host->info_data;
904	int ret, exec_cmd;
905
906	/*
907	 * if this is a x16 device ,then convert the input
908	 * "byte" address into a "word" address appropriate
909	 * for indexing a word-oriented device
910	 */
911	if (info->reg_ndcr & NDCR_DWIDTH_M)
912		column /= 2;
913
914	/*
915	 * There may be different NAND chip hooked to
916	 * different chip select, so check whether
917	 * chip select has been changed, if yes, reset the timing
918	 */
919	if (info->cs != host->cs) {
920		info->cs = host->cs;
921		nand_writel(info, NDTR0CS0, info->ndtr0cs0);
922		nand_writel(info, NDTR1CS0, info->ndtr1cs0);
923	}
924
925	prepare_start_command(info, command);
926
927	info->state = STATE_PREPARED;
928	exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
929
930	if (exec_cmd) {
931		init_completion(&info->cmd_complete);
932		init_completion(&info->dev_ready);
933		info->need_wait = 1;
934		pxa3xx_nand_start(info);
935
936		ret = wait_for_completion_timeout(&info->cmd_complete,
937				CHIP_DELAY_TIMEOUT);
938		if (!ret) {
939			dev_err(&info->pdev->dev, "Wait time out!!!\n");
940			/* Stop State Machine for next command cycle */
941			pxa3xx_nand_stop(info);
942		}
943	}
944	info->state = STATE_IDLE;
945}
946
947static void armada370_nand_cmdfunc(struct mtd_info *mtd,
948				   const unsigned command,
949				   int column, int page_addr)
950{
951	struct pxa3xx_nand_host *host = mtd->priv;
952	struct pxa3xx_nand_info *info = host->info_data;
953	int ret, exec_cmd, ext_cmd_type;
954
955	/*
956	 * if this is a x16 device then convert the input
957	 * "byte" address into a "word" address appropriate
958	 * for indexing a word-oriented device
959	 */
960	if (info->reg_ndcr & NDCR_DWIDTH_M)
961		column /= 2;
962
963	/*
964	 * There may be different NAND chip hooked to
965	 * different chip select, so check whether
966	 * chip select has been changed, if yes, reset the timing
967	 */
968	if (info->cs != host->cs) {
969		info->cs = host->cs;
970		nand_writel(info, NDTR0CS0, info->ndtr0cs0);
971		nand_writel(info, NDTR1CS0, info->ndtr1cs0);
972	}
973
974	/* Select the extended command for the first command */
975	switch (command) {
976	case NAND_CMD_READ0:
977	case NAND_CMD_READOOB:
978		ext_cmd_type = EXT_CMD_TYPE_MONO;
979		break;
980	case NAND_CMD_SEQIN:
981		ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
982		break;
983	case NAND_CMD_PAGEPROG:
984		ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
985		break;
986	default:
987		ext_cmd_type = 0;
988		break;
989	}
990
991	prepare_start_command(info, command);
992
993	/*
994	 * Prepare the "is ready" completion before starting a command
995	 * transaction sequence. If the command is not executed the
996	 * completion will be completed, see below.
997	 *
998	 * We can do that inside the loop because the command variable
999	 * is invariant and thus so is the exec_cmd.
1000	 */
1001	info->need_wait = 1;
1002	init_completion(&info->dev_ready);
1003	do {
1004		info->state = STATE_PREPARED;
1005		exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1006					       column, page_addr);
1007		if (!exec_cmd) {
1008			info->need_wait = 0;
1009			complete(&info->dev_ready);
1010			break;
1011		}
1012
1013		init_completion(&info->cmd_complete);
1014		pxa3xx_nand_start(info);
1015
1016		ret = wait_for_completion_timeout(&info->cmd_complete,
1017				CHIP_DELAY_TIMEOUT);
1018		if (!ret) {
1019			dev_err(&info->pdev->dev, "Wait time out!!!\n");
1020			/* Stop State Machine for next command cycle */
1021			pxa3xx_nand_stop(info);
1022			break;
1023		}
1024
1025		/* Check if the sequence is complete */
1026		if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1027			break;
1028
1029		/*
1030		 * After a splitted program command sequence has issued
1031		 * the command dispatch, the command sequence is complete.
1032		 */
1033		if (info->data_size == 0 &&
1034		    command == NAND_CMD_PAGEPROG &&
1035		    ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1036			break;
1037
1038		if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1039			/* Last read: issue a 'last naked read' */
1040			if (info->data_size == info->chunk_size)
1041				ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1042			else
1043				ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1044
1045		/*
1046		 * If a splitted program command has no more data to transfer,
1047		 * the command dispatch must be issued to complete.
1048		 */
1049		} else if (command == NAND_CMD_PAGEPROG &&
1050			   info->data_size == 0) {
1051				ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1052		}
1053	} while (1);
1054
1055	info->state = STATE_IDLE;
1056}
1057
1058static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1059		struct nand_chip *chip, const uint8_t *buf, int oob_required)
1060{
1061	chip->write_buf(mtd, buf, mtd->writesize);
1062	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1063
1064	return 0;
1065}
1066
1067static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1068		struct nand_chip *chip, uint8_t *buf, int oob_required,
1069		int page)
1070{
1071	struct pxa3xx_nand_host *host = mtd->priv;
1072	struct pxa3xx_nand_info *info = host->info_data;
1073
1074	chip->read_buf(mtd, buf, mtd->writesize);
1075	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1076
1077	if (info->retcode == ERR_CORERR && info->use_ecc) {
1078		mtd->ecc_stats.corrected += info->ecc_err_cnt;
1079
1080	} else if (info->retcode == ERR_UNCORERR) {
1081		/*
1082		 * for blank page (all 0xff), HW will calculate its ECC as
1083		 * 0, which is different from the ECC information within
1084		 * OOB, ignore such uncorrectable errors
1085		 */
1086		if (is_buf_blank(buf, mtd->writesize))
1087			info->retcode = ERR_NONE;
1088		else
1089			mtd->ecc_stats.failed++;
1090	}
1091
1092	return info->max_bitflips;
1093}
1094
1095static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1096{
1097	struct pxa3xx_nand_host *host = mtd->priv;
1098	struct pxa3xx_nand_info *info = host->info_data;
1099	char retval = 0xFF;
1100
1101	if (info->buf_start < info->buf_count)
1102		/* Has just send a new command? */
1103		retval = info->data_buff[info->buf_start++];
1104
1105	return retval;
1106}
1107
1108static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1109{
1110	struct pxa3xx_nand_host *host = mtd->priv;
1111	struct pxa3xx_nand_info *info = host->info_data;
1112	u16 retval = 0xFFFF;
1113
1114	if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1115		retval = *((u16 *)(info->data_buff+info->buf_start));
1116		info->buf_start += 2;
1117	}
1118	return retval;
1119}
1120
1121static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1122{
1123	struct pxa3xx_nand_host *host = mtd->priv;
1124	struct pxa3xx_nand_info *info = host->info_data;
1125	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1126
1127	memcpy(buf, info->data_buff + info->buf_start, real_len);
1128	info->buf_start += real_len;
1129}
1130
1131static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1132		const uint8_t *buf, int len)
1133{
1134	struct pxa3xx_nand_host *host = mtd->priv;
1135	struct pxa3xx_nand_info *info = host->info_data;
1136	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1137
1138	memcpy(info->data_buff + info->buf_start, buf, real_len);
1139	info->buf_start += real_len;
1140}
1141
1142static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1143{
1144	return;
1145}
1146
1147static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1148{
1149	struct pxa3xx_nand_host *host = mtd->priv;
1150	struct pxa3xx_nand_info *info = host->info_data;
1151	int ret;
1152
1153	if (info->need_wait) {
1154		ret = wait_for_completion_timeout(&info->dev_ready,
1155				CHIP_DELAY_TIMEOUT);
1156		info->need_wait = 0;
1157		if (!ret) {
1158			dev_err(&info->pdev->dev, "Ready time out!!!\n");
1159			return NAND_STATUS_FAIL;
1160		}
1161	}
1162
1163	/* pxa3xx_nand_send_command has waited for command complete */
1164	if (this->state == FL_WRITING || this->state == FL_ERASING) {
1165		if (info->retcode == ERR_NONE)
1166			return 0;
1167		else
1168			return NAND_STATUS_FAIL;
1169	}
1170
1171	return NAND_STATUS_READY;
1172}
1173
1174static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
1175				    const struct pxa3xx_nand_flash *f)
1176{
1177	struct platform_device *pdev = info->pdev;
1178	struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1179	struct pxa3xx_nand_host *host = info->host[info->cs];
1180	uint32_t ndcr = 0x0; /* enable all interrupts */
1181
1182	if (f->page_size != 2048 && f->page_size != 512) {
1183		dev_err(&pdev->dev, "Current only support 2048 and 512 size\n");
1184		return -EINVAL;
1185	}
1186
1187	if (f->flash_width != 16 && f->flash_width != 8) {
1188		dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n");
1189		return -EINVAL;
1190	}
1191
1192	/* calculate flash information */
1193	host->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
1194
1195	/* calculate addressing information */
1196	host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
1197
1198	if (f->num_blocks * f->page_per_block > 65536)
1199		host->row_addr_cycles = 3;
1200	else
1201		host->row_addr_cycles = 2;
1202
1203	ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1204	ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1205	ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
1206	ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
1207	ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
1208	ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
1209
1210	ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
1211	ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1212
1213	info->reg_ndcr = ndcr;
1214
1215	pxa3xx_nand_set_timing(host, f->timing);
1216	return 0;
1217}
1218
1219static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1220{
1221	/*
1222	 * We set 0 by hard coding here, for we don't support keep_config
1223	 * when there is more than one chip attached to the controller
1224	 */
1225	struct pxa3xx_nand_host *host = info->host[0];
1226	uint32_t ndcr = nand_readl(info, NDCR);
1227
1228	if (ndcr & NDCR_PAGE_SZ) {
1229		/* Controller's FIFO size */
1230		info->chunk_size = 2048;
1231		host->read_id_bytes = 4;
1232	} else {
1233		info->chunk_size = 512;
1234		host->read_id_bytes = 2;
1235	}
1236
1237	/* Set an initial chunk size */
1238	info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
1239	info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1240	info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1241	return 0;
1242}
1243
1244#ifdef ARCH_HAS_DMA
1245static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1246{
1247	struct platform_device *pdev = info->pdev;
1248	int data_desc_offset = info->buf_size - sizeof(struct pxa_dma_desc);
1249
1250	if (use_dma == 0) {
1251		info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1252		if (info->data_buff == NULL)
1253			return -ENOMEM;
1254		return 0;
1255	}
1256
1257	info->data_buff = dma_alloc_coherent(&pdev->dev, info->buf_size,
1258				&info->data_buff_phys, GFP_KERNEL);
1259	if (info->data_buff == NULL) {
1260		dev_err(&pdev->dev, "failed to allocate dma buffer\n");
1261		return -ENOMEM;
1262	}
1263
1264	info->data_desc = (void *)info->data_buff + data_desc_offset;
1265	info->data_desc_addr = info->data_buff_phys + data_desc_offset;
1266
1267	info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW,
1268				pxa3xx_nand_data_dma_irq, info);
1269	if (info->data_dma_ch < 0) {
1270		dev_err(&pdev->dev, "failed to request data dma\n");
1271		dma_free_coherent(&pdev->dev, info->buf_size,
1272				info->data_buff, info->data_buff_phys);
1273		return info->data_dma_ch;
1274	}
1275
1276	/*
1277	 * Now that DMA buffers are allocated we turn on
1278	 * DMA proper for I/O operations.
1279	 */
1280	info->use_dma = 1;
1281	return 0;
1282}
1283
1284static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1285{
1286	struct platform_device *pdev = info->pdev;
1287	if (info->use_dma) {
1288		pxa_free_dma(info->data_dma_ch);
1289		dma_free_coherent(&pdev->dev, info->buf_size,
1290				  info->data_buff, info->data_buff_phys);
1291	} else {
1292		kfree(info->data_buff);
1293	}
1294}
1295#else
1296static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1297{
1298	info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1299	if (info->data_buff == NULL)
1300		return -ENOMEM;
1301	return 0;
1302}
1303
1304static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1305{
1306	kfree(info->data_buff);
1307}
1308#endif
1309
1310static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
1311{
1312	struct mtd_info *mtd;
1313	struct nand_chip *chip;
1314	int ret;
1315
1316	mtd = info->host[info->cs]->mtd;
1317	chip = mtd->priv;
1318
1319	/* use the common timing to make a try */
1320	ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
1321	if (ret)
1322		return ret;
1323
1324	chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1325	ret = chip->waitfunc(mtd, chip);
1326	if (ret & NAND_STATUS_FAIL)
1327		return -ENODEV;
1328
1329	return 0;
1330}
1331
1332static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1333			struct nand_ecc_ctrl *ecc,
1334			int strength, int page_size)
1335{
1336	/*
1337	 * We don't use strength here as the PXA variant
1338	 * is used with non-ONFI compliant devices.
1339	 */
1340	if (page_size == 2048) {
1341		info->chunk_size = 2048;
1342		info->spare_size = 40;
1343		info->ecc_size = 24;
1344		ecc->mode = NAND_ECC_HW;
1345		ecc->size = 512;
1346		ecc->strength = 1;
1347		return 1;
1348
1349	} else if (page_size == 512) {
1350		info->chunk_size = 512;
1351		info->spare_size = 8;
1352		info->ecc_size = 8;
1353		ecc->mode = NAND_ECC_HW;
1354		ecc->size = 512;
1355		ecc->strength = 1;
1356		return 1;
1357	}
1358	return 0;
1359}
1360
1361static int armada370_ecc_init(struct pxa3xx_nand_info *info,
1362			      struct nand_ecc_ctrl *ecc,
1363			      int strength, int ecc_stepsize, int page_size)
1364{
1365	/*
1366	 * Required ECC: 4-bit correction per 512 bytes
1367	 * Select: 16-bit correction per 2048 bytes
1368	 */
1369	if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1370		info->ecc_bch = 1;
1371		info->chunk_size = 2048;
1372		info->spare_size = 32;
1373		info->ecc_size = 32;
1374		ecc->mode = NAND_ECC_HW;
1375		ecc->size = info->chunk_size;
1376		ecc->layout = &ecc_layout_4KB_bch4bit;
1377		ecc->strength = 16;
1378		return 1;
1379
1380	/*
1381	 * Required ECC: 8-bit correction per 512 bytes
1382	 * Select: 16-bit correction per 1024 bytes
1383	 */
1384	} else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1385		info->ecc_bch = 1;
1386		info->chunk_size = 1024;
1387		info->spare_size = 0;
1388		info->ecc_size = 32;
1389		ecc->mode = NAND_ECC_HW;
1390		ecc->size = info->chunk_size;
1391		ecc->layout = &ecc_layout_4KB_bch8bit;
1392		ecc->strength = 16;
1393		return 1;
1394	}
1395	return 0;
1396}
1397
1398static int pxa3xx_nand_scan(struct mtd_info *mtd)
1399{
1400	struct pxa3xx_nand_host *host = mtd->priv;
1401	struct pxa3xx_nand_info *info = host->info_data;
1402	struct platform_device *pdev = info->pdev;
1403	struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1404	struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
1405	const struct pxa3xx_nand_flash *f = NULL;
1406	struct nand_chip *chip = mtd->priv;
1407	uint32_t id = -1;
1408	uint64_t chipsize;
1409	int i, ret, num;
1410
1411	if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
1412		goto KEEP_CONFIG;
1413
1414	ret = pxa3xx_nand_sensing(info);
1415	if (ret) {
1416		dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1417			 info->cs);
1418
1419		return ret;
1420	}
1421
1422	chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0);
1423	id = *((uint16_t *)(info->data_buff));
1424	if (id != 0)
1425		dev_info(&info->pdev->dev, "Detect a flash id %x\n", id);
1426	else {
1427		dev_warn(&info->pdev->dev,
1428			 "Read out ID 0, potential timing set wrong!!\n");
1429
1430		return -EINVAL;
1431	}
1432
1433	num = ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1;
1434	for (i = 0; i < num; i++) {
1435		if (i < pdata->num_flash)
1436			f = pdata->flash + i;
1437		else
1438			f = &builtin_flash_types[i - pdata->num_flash + 1];
1439
1440		/* find the chip in default list */
1441		if (f->chip_id == id)
1442			break;
1443	}
1444
1445	if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) {
1446		dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n");
1447
1448		return -EINVAL;
1449	}
1450
1451	ret = pxa3xx_nand_config_flash(info, f);
1452	if (ret) {
1453		dev_err(&info->pdev->dev, "ERROR! Configure failed\n");
1454		return ret;
1455	}
1456
1457	pxa3xx_flash_ids[0].name = f->name;
1458	pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff;
1459	pxa3xx_flash_ids[0].pagesize = f->page_size;
1460	chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size;
1461	pxa3xx_flash_ids[0].chipsize = chipsize >> 20;
1462	pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
1463	if (f->flash_width == 16)
1464		pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
1465	pxa3xx_flash_ids[1].name = NULL;
1466	def = pxa3xx_flash_ids;
1467KEEP_CONFIG:
1468	if (info->reg_ndcr & NDCR_DWIDTH_M)
1469		chip->options |= NAND_BUSWIDTH_16;
1470
1471	/* Device detection must be done with ECC disabled */
1472	if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1473		nand_writel(info, NDECCCTRL, 0x0);
1474
1475	if (nand_scan_ident(mtd, 1, def))
1476		return -ENODEV;
1477
1478	if (pdata->flash_bbt) {
1479		/*
1480		 * We'll use a bad block table stored in-flash and don't
1481		 * allow writing the bad block marker to the flash.
1482		 */
1483		chip->bbt_options |= NAND_BBT_USE_FLASH |
1484				     NAND_BBT_NO_OOB_BBM;
1485		chip->bbt_td = &bbt_main_descr;
1486		chip->bbt_md = &bbt_mirror_descr;
1487	}
1488
1489	if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1490		ret = armada370_ecc_init(info, &chip->ecc,
1491				   chip->ecc_strength_ds,
1492				   chip->ecc_step_ds,
1493				   mtd->writesize);
1494	else
1495		ret = pxa_ecc_init(info, &chip->ecc,
1496				   chip->ecc_strength_ds,
1497				   mtd->writesize);
1498	if (!ret) {
1499		dev_err(&info->pdev->dev,
1500			"ECC strength %d at page size %d is not supported\n",
1501			chip->ecc_strength_ds, mtd->writesize);
1502		return -ENODEV;
1503	}
1504
1505	/* calculate addressing information */
1506	if (mtd->writesize >= 2048)
1507		host->col_addr_cycles = 2;
1508	else
1509		host->col_addr_cycles = 1;
1510
1511	/* release the initial buffer */
1512	kfree(info->data_buff);
1513
1514	/* allocate the real data + oob buffer */
1515	info->buf_size = mtd->writesize + mtd->oobsize;
1516	ret = pxa3xx_nand_init_buff(info);
1517	if (ret)
1518		return ret;
1519	info->oob_buff = info->data_buff + mtd->writesize;
1520
1521	if ((mtd->size >> chip->page_shift) > 65536)
1522		host->row_addr_cycles = 3;
1523	else
1524		host->row_addr_cycles = 2;
1525	return nand_scan_tail(mtd);
1526}
1527
1528static int alloc_nand_resource(struct platform_device *pdev)
1529{
1530	struct pxa3xx_nand_platform_data *pdata;
1531	struct pxa3xx_nand_info *info;
1532	struct pxa3xx_nand_host *host;
1533	struct nand_chip *chip = NULL;
1534	struct mtd_info *mtd;
1535	struct resource *r;
1536	int ret, irq, cs;
1537
1538	pdata = dev_get_platdata(&pdev->dev);
1539	info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
1540			    sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
1541	if (!info)
1542		return -ENOMEM;
1543
1544	info->pdev = pdev;
1545	info->variant = pxa3xx_nand_get_variant(pdev);
1546	for (cs = 0; cs < pdata->num_cs; cs++) {
1547		mtd = (struct mtd_info *)((unsigned int)&info[1] +
1548		      (sizeof(*mtd) + sizeof(*host)) * cs);
1549		chip = (struct nand_chip *)(&mtd[1]);
1550		host = (struct pxa3xx_nand_host *)chip;
1551		info->host[cs] = host;
1552		host->mtd = mtd;
1553		host->cs = cs;
1554		host->info_data = info;
1555		mtd->priv = host;
1556		mtd->owner = THIS_MODULE;
1557
1558		chip->ecc.read_page	= pxa3xx_nand_read_page_hwecc;
1559		chip->ecc.write_page	= pxa3xx_nand_write_page_hwecc;
1560		chip->controller        = &info->controller;
1561		chip->waitfunc		= pxa3xx_nand_waitfunc;
1562		chip->select_chip	= pxa3xx_nand_select_chip;
1563		chip->read_word		= pxa3xx_nand_read_word;
1564		chip->read_byte		= pxa3xx_nand_read_byte;
1565		chip->read_buf		= pxa3xx_nand_read_buf;
1566		chip->write_buf		= pxa3xx_nand_write_buf;
1567		chip->options		|= NAND_NO_SUBPAGE_WRITE;
1568
1569		if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1570			chip->cmdfunc = armada370_nand_cmdfunc;
1571		else
1572			chip->cmdfunc = pxa3xx_nand_cmdfunc;
1573	}
1574
1575	spin_lock_init(&chip->controller->lock);
1576	init_waitqueue_head(&chip->controller->wq);
1577	info->clk = devm_clk_get(&pdev->dev, NULL);
1578	if (IS_ERR(info->clk)) {
1579		dev_err(&pdev->dev, "failed to get nand clock\n");
1580		return PTR_ERR(info->clk);
1581	}
1582	ret = clk_prepare_enable(info->clk);
1583	if (ret < 0)
1584		return ret;
1585
1586	if (use_dma) {
1587		/*
1588		 * This is a dirty hack to make this driver work from
1589		 * devicetree bindings. It can be removed once we have
1590		 * a prober DMA controller framework for DT.
1591		 */
1592		if (pdev->dev.of_node &&
1593		    of_machine_is_compatible("marvell,pxa3xx")) {
1594			info->drcmr_dat = 97;
1595			info->drcmr_cmd = 99;
1596		} else {
1597			r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1598			if (r == NULL) {
1599				dev_err(&pdev->dev,
1600					"no resource defined for data DMA\n");
1601				ret = -ENXIO;
1602				goto fail_disable_clk;
1603			}
1604			info->drcmr_dat = r->start;
1605
1606			r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1607			if (r == NULL) {
1608				dev_err(&pdev->dev,
1609					"no resource defined for cmd DMA\n");
1610				ret = -ENXIO;
1611				goto fail_disable_clk;
1612			}
1613			info->drcmr_cmd = r->start;
1614		}
1615	}
1616
1617	irq = platform_get_irq(pdev, 0);
1618	if (irq < 0) {
1619		dev_err(&pdev->dev, "no IRQ resource defined\n");
1620		ret = -ENXIO;
1621		goto fail_disable_clk;
1622	}
1623
1624	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1625	info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1626	if (IS_ERR(info->mmio_base)) {
1627		ret = PTR_ERR(info->mmio_base);
1628		goto fail_disable_clk;
1629	}
1630	info->mmio_phys = r->start;
1631
1632	/* Allocate a buffer to allow flash detection */
1633	info->buf_size = INIT_BUFFER_SIZE;
1634	info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1635	if (info->data_buff == NULL) {
1636		ret = -ENOMEM;
1637		goto fail_disable_clk;
1638	}
1639
1640	/* initialize all interrupts to be disabled */
1641	disable_int(info, NDSR_MASK);
1642
1643	ret = request_irq(irq, pxa3xx_nand_irq, 0, pdev->name, info);
1644	if (ret < 0) {
1645		dev_err(&pdev->dev, "failed to request IRQ\n");
1646		goto fail_free_buf;
1647	}
1648
1649	platform_set_drvdata(pdev, info);
1650
1651	return 0;
1652
1653fail_free_buf:
1654	free_irq(irq, info);
1655	kfree(info->data_buff);
1656fail_disable_clk:
1657	clk_disable_unprepare(info->clk);
1658	return ret;
1659}
1660
1661static int pxa3xx_nand_remove(struct platform_device *pdev)
1662{
1663	struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1664	struct pxa3xx_nand_platform_data *pdata;
1665	int irq, cs;
1666
1667	if (!info)
1668		return 0;
1669
1670	pdata = dev_get_platdata(&pdev->dev);
1671
1672	irq = platform_get_irq(pdev, 0);
1673	if (irq >= 0)
1674		free_irq(irq, info);
1675	pxa3xx_nand_free_buff(info);
1676
1677	clk_disable_unprepare(info->clk);
1678
1679	for (cs = 0; cs < pdata->num_cs; cs++)
1680		nand_release(info->host[cs]->mtd);
1681	return 0;
1682}
1683
1684static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1685{
1686	struct pxa3xx_nand_platform_data *pdata;
1687	struct device_node *np = pdev->dev.of_node;
1688	const struct of_device_id *of_id =
1689			of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1690
1691	if (!of_id)
1692		return 0;
1693
1694	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1695	if (!pdata)
1696		return -ENOMEM;
1697
1698	if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1699		pdata->enable_arbiter = 1;
1700	if (of_get_property(np, "marvell,nand-keep-config", NULL))
1701		pdata->keep_config = 1;
1702	of_property_read_u32(np, "num-cs", &pdata->num_cs);
1703	pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1704
1705	pdev->dev.platform_data = pdata;
1706
1707	return 0;
1708}
1709
1710static int pxa3xx_nand_probe(struct platform_device *pdev)
1711{
1712	struct pxa3xx_nand_platform_data *pdata;
1713	struct mtd_part_parser_data ppdata = {};
1714	struct pxa3xx_nand_info *info;
1715	int ret, cs, probe_success;
1716
1717#ifndef ARCH_HAS_DMA
1718	if (use_dma) {
1719		use_dma = 0;
1720		dev_warn(&pdev->dev,
1721			 "This platform can't do DMA on this device\n");
1722	}
1723#endif
1724	ret = pxa3xx_nand_probe_dt(pdev);
1725	if (ret)
1726		return ret;
1727
1728	pdata = dev_get_platdata(&pdev->dev);
1729	if (!pdata) {
1730		dev_err(&pdev->dev, "no platform data defined\n");
1731		return -ENODEV;
1732	}
1733
1734	ret = alloc_nand_resource(pdev);
1735	if (ret) {
1736		dev_err(&pdev->dev, "alloc nand resource failed\n");
1737		return ret;
1738	}
1739
1740	info = platform_get_drvdata(pdev);
1741	probe_success = 0;
1742	for (cs = 0; cs < pdata->num_cs; cs++) {
1743		struct mtd_info *mtd = info->host[cs]->mtd;
1744
1745		/*
1746		 * The mtd name matches the one used in 'mtdparts' kernel
1747		 * parameter. This name cannot be changed or otherwise
1748		 * user's mtd partitions configuration would get broken.
1749		 */
1750		mtd->name = "pxa3xx_nand-0";
1751		info->cs = cs;
1752		ret = pxa3xx_nand_scan(mtd);
1753		if (ret) {
1754			dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1755				cs);
1756			continue;
1757		}
1758
1759		ppdata.of_node = pdev->dev.of_node;
1760		ret = mtd_device_parse_register(mtd, NULL,
1761						&ppdata, pdata->parts[cs],
1762						pdata->nr_parts[cs]);
1763		if (!ret)
1764			probe_success = 1;
1765	}
1766
1767	if (!probe_success) {
1768		pxa3xx_nand_remove(pdev);
1769		return -ENODEV;
1770	}
1771
1772	return 0;
1773}
1774
1775#ifdef CONFIG_PM
1776static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1777{
1778	struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1779	struct pxa3xx_nand_platform_data *pdata;
1780	struct mtd_info *mtd;
1781	int cs;
1782
1783	pdata = dev_get_platdata(&pdev->dev);
1784	if (info->state) {
1785		dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1786		return -EAGAIN;
1787	}
1788
1789	for (cs = 0; cs < pdata->num_cs; cs++) {
1790		mtd = info->host[cs]->mtd;
1791		mtd_suspend(mtd);
1792	}
1793
1794	return 0;
1795}
1796
1797static int pxa3xx_nand_resume(struct platform_device *pdev)
1798{
1799	struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1800	struct pxa3xx_nand_platform_data *pdata;
1801	struct mtd_info *mtd;
1802	int cs;
1803
1804	pdata = dev_get_platdata(&pdev->dev);
1805	/* We don't want to handle interrupt without calling mtd routine */
1806	disable_int(info, NDCR_INT_MASK);
1807
1808	/*
1809	 * Directly set the chip select to a invalid value,
1810	 * then the driver would reset the timing according
1811	 * to current chip select at the beginning of cmdfunc
1812	 */
1813	info->cs = 0xff;
1814
1815	/*
1816	 * As the spec says, the NDSR would be updated to 0x1800 when
1817	 * doing the nand_clk disable/enable.
1818	 * To prevent it damaging state machine of the driver, clear
1819	 * all status before resume
1820	 */
1821	nand_writel(info, NDSR, NDSR_MASK);
1822	for (cs = 0; cs < pdata->num_cs; cs++) {
1823		mtd = info->host[cs]->mtd;
1824		mtd_resume(mtd);
1825	}
1826
1827	return 0;
1828}
1829#else
1830#define pxa3xx_nand_suspend	NULL
1831#define pxa3xx_nand_resume	NULL
1832#endif
1833
1834static struct platform_driver pxa3xx_nand_driver = {
1835	.driver = {
1836		.name	= "pxa3xx-nand",
1837		.of_match_table = pxa3xx_nand_dt_ids,
1838	},
1839	.probe		= pxa3xx_nand_probe,
1840	.remove		= pxa3xx_nand_remove,
1841	.suspend	= pxa3xx_nand_suspend,
1842	.resume		= pxa3xx_nand_resume,
1843};
1844
1845module_platform_driver(pxa3xx_nand_driver);
1846
1847MODULE_LICENSE("GPL");
1848MODULE_DESCRIPTION("PXA3xx NAND controller driver");
1849