pxa3xx_nand.c revision 5cbbdc6a9f8830737e099aca12794eb907f96882
1/*
2 * drivers/mtd/nand/pxa3xx_nand.c
3 *
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/interrupt.h>
17#include <linux/platform_device.h>
18#include <linux/dma-mapping.h>
19#include <linux/delay.h>
20#include <linux/clk.h>
21#include <linux/mtd/mtd.h>
22#include <linux/mtd/nand.h>
23#include <linux/mtd/partitions.h>
24#include <linux/io.h>
25#include <linux/irq.h>
26#include <linux/slab.h>
27#include <linux/of.h>
28#include <linux/of_device.h>
29#include <linux/of_mtd.h>
30
31#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
32#define ARCH_HAS_DMA
33#endif
34
35#ifdef ARCH_HAS_DMA
36#include <mach/dma.h>
37#endif
38
39#include <linux/platform_data/mtd-nand-pxa3xx.h>
40
41#define NAND_DEV_READY_TIMEOUT  50
42#define	CHIP_DELAY_TIMEOUT	(2 * HZ/10)
43#define NAND_STOP_DELAY		(2 * HZ/50)
44#define PAGE_CHUNK_SIZE		(2048)
45
46/*
47 * Define a buffer size for the initial command that detects the flash device:
48 * STATUS, READID and PARAM. The largest of these is the PARAM command,
49 * needing 256 bytes.
50 */
51#define INIT_BUFFER_SIZE	256
52
53/* registers and bit definitions */
54#define NDCR		(0x00) /* Control register */
55#define NDTR0CS0	(0x04) /* Timing Parameter 0 for CS0 */
56#define NDTR1CS0	(0x0C) /* Timing Parameter 1 for CS0 */
57#define NDSR		(0x14) /* Status Register */
58#define NDPCR		(0x18) /* Page Count Register */
59#define NDBDR0		(0x1C) /* Bad Block Register 0 */
60#define NDBDR1		(0x20) /* Bad Block Register 1 */
61#define NDECCCTRL	(0x28) /* ECC control */
62#define NDDB		(0x40) /* Data Buffer */
63#define NDCB0		(0x48) /* Command Buffer0 */
64#define NDCB1		(0x4C) /* Command Buffer1 */
65#define NDCB2		(0x50) /* Command Buffer2 */
66
67#define NDCR_SPARE_EN		(0x1 << 31)
68#define NDCR_ECC_EN		(0x1 << 30)
69#define NDCR_DMA_EN		(0x1 << 29)
70#define NDCR_ND_RUN		(0x1 << 28)
71#define NDCR_DWIDTH_C		(0x1 << 27)
72#define NDCR_DWIDTH_M		(0x1 << 26)
73#define NDCR_PAGE_SZ		(0x1 << 24)
74#define NDCR_NCSX		(0x1 << 23)
75#define NDCR_ND_MODE		(0x3 << 21)
76#define NDCR_NAND_MODE   	(0x0)
77#define NDCR_CLR_PG_CNT		(0x1 << 20)
78#define NDCR_STOP_ON_UNCOR	(0x1 << 19)
79#define NDCR_RD_ID_CNT_MASK	(0x7 << 16)
80#define NDCR_RD_ID_CNT(x)	(((x) << 16) & NDCR_RD_ID_CNT_MASK)
81
82#define NDCR_RA_START		(0x1 << 15)
83#define NDCR_PG_PER_BLK		(0x1 << 14)
84#define NDCR_ND_ARB_EN		(0x1 << 12)
85#define NDCR_INT_MASK           (0xFFF)
86
87#define NDSR_MASK		(0xfff)
88#define NDSR_ERR_CNT_OFF	(16)
89#define NDSR_ERR_CNT_MASK       (0x1f)
90#define NDSR_ERR_CNT(sr)	((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
91#define NDSR_RDY                (0x1 << 12)
92#define NDSR_FLASH_RDY          (0x1 << 11)
93#define NDSR_CS0_PAGED		(0x1 << 10)
94#define NDSR_CS1_PAGED		(0x1 << 9)
95#define NDSR_CS0_CMDD		(0x1 << 8)
96#define NDSR_CS1_CMDD		(0x1 << 7)
97#define NDSR_CS0_BBD		(0x1 << 6)
98#define NDSR_CS1_BBD		(0x1 << 5)
99#define NDSR_UNCORERR		(0x1 << 4)
100#define NDSR_CORERR		(0x1 << 3)
101#define NDSR_WRDREQ		(0x1 << 2)
102#define NDSR_RDDREQ		(0x1 << 1)
103#define NDSR_WRCMDREQ		(0x1)
104
105#define NDCB0_LEN_OVRD		(0x1 << 28)
106#define NDCB0_ST_ROW_EN         (0x1 << 26)
107#define NDCB0_AUTO_RS		(0x1 << 25)
108#define NDCB0_CSEL		(0x1 << 24)
109#define NDCB0_EXT_CMD_TYPE_MASK	(0x7 << 29)
110#define NDCB0_EXT_CMD_TYPE(x)	(((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
111#define NDCB0_CMD_TYPE_MASK	(0x7 << 21)
112#define NDCB0_CMD_TYPE(x)	(((x) << 21) & NDCB0_CMD_TYPE_MASK)
113#define NDCB0_NC		(0x1 << 20)
114#define NDCB0_DBC		(0x1 << 19)
115#define NDCB0_ADDR_CYC_MASK	(0x7 << 16)
116#define NDCB0_ADDR_CYC(x)	(((x) << 16) & NDCB0_ADDR_CYC_MASK)
117#define NDCB0_CMD2_MASK		(0xff << 8)
118#define NDCB0_CMD1_MASK		(0xff)
119#define NDCB0_ADDR_CYC_SHIFT	(16)
120
121#define EXT_CMD_TYPE_DISPATCH	6 /* Command dispatch */
122#define EXT_CMD_TYPE_NAKED_RW	5 /* Naked read or Naked write */
123#define EXT_CMD_TYPE_READ	4 /* Read */
124#define EXT_CMD_TYPE_DISP_WR	4 /* Command dispatch with write */
125#define EXT_CMD_TYPE_FINAL	3 /* Final command */
126#define EXT_CMD_TYPE_LAST_RW	1 /* Last naked read/write */
127#define EXT_CMD_TYPE_MONO	0 /* Monolithic read/write */
128
129/* macros for registers read/write */
130#define nand_writel(info, off, val)	\
131	__raw_writel((val), (info)->mmio_base + (off))
132
133#define nand_readl(info, off)		\
134	__raw_readl((info)->mmio_base + (off))
135
136/* error code and state */
137enum {
138	ERR_NONE	= 0,
139	ERR_DMABUSERR	= -1,
140	ERR_SENDCMD	= -2,
141	ERR_UNCORERR	= -3,
142	ERR_BBERR	= -4,
143	ERR_CORERR	= -5,
144};
145
146enum {
147	STATE_IDLE = 0,
148	STATE_PREPARED,
149	STATE_CMD_HANDLE,
150	STATE_DMA_READING,
151	STATE_DMA_WRITING,
152	STATE_DMA_DONE,
153	STATE_PIO_READING,
154	STATE_PIO_WRITING,
155	STATE_CMD_DONE,
156	STATE_READY,
157};
158
159enum pxa3xx_nand_variant {
160	PXA3XX_NAND_VARIANT_PXA,
161	PXA3XX_NAND_VARIANT_ARMADA370,
162};
163
164struct pxa3xx_nand_host {
165	struct nand_chip	chip;
166	struct mtd_info         *mtd;
167	void			*info_data;
168
169	/* page size of attached chip */
170	int			use_ecc;
171	int			cs;
172
173	/* calculated from pxa3xx_nand_flash data */
174	unsigned int		col_addr_cycles;
175	unsigned int		row_addr_cycles;
176	size_t			read_id_bytes;
177
178};
179
180struct pxa3xx_nand_info {
181	struct nand_hw_control	controller;
182	struct platform_device	 *pdev;
183
184	struct clk		*clk;
185	void __iomem		*mmio_base;
186	unsigned long		mmio_phys;
187	struct completion	cmd_complete, dev_ready;
188
189	unsigned int 		buf_start;
190	unsigned int		buf_count;
191	unsigned int		buf_size;
192	unsigned int		data_buff_pos;
193	unsigned int		oob_buff_pos;
194
195	/* DMA information */
196	int			drcmr_dat;
197	int			drcmr_cmd;
198
199	unsigned char		*data_buff;
200	unsigned char		*oob_buff;
201	dma_addr_t 		data_buff_phys;
202	int 			data_dma_ch;
203	struct pxa_dma_desc	*data_desc;
204	dma_addr_t 		data_desc_addr;
205
206	struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
207	unsigned int		state;
208
209	/*
210	 * This driver supports NFCv1 (as found in PXA SoC)
211	 * and NFCv2 (as found in Armada 370/XP SoC).
212	 */
213	enum pxa3xx_nand_variant variant;
214
215	int			cs;
216	int			use_ecc;	/* use HW ECC ? */
217	int			ecc_bch;	/* using BCH ECC? */
218	int			use_dma;	/* use DMA ? */
219	int			use_spare;	/* use spare ? */
220	int			need_wait;
221
222	unsigned int		data_size;	/* data to be read from FIFO */
223	unsigned int		chunk_size;	/* split commands chunk size */
224	unsigned int		oob_size;
225	unsigned int		spare_size;
226	unsigned int		ecc_size;
227	unsigned int		ecc_err_cnt;
228	unsigned int		max_bitflips;
229	int 			retcode;
230
231	/* cached register value */
232	uint32_t		reg_ndcr;
233	uint32_t		ndtr0cs0;
234	uint32_t		ndtr1cs0;
235
236	/* generated NDCBx register values */
237	uint32_t		ndcb0;
238	uint32_t		ndcb1;
239	uint32_t		ndcb2;
240	uint32_t		ndcb3;
241};
242
243static bool use_dma = 1;
244module_param(use_dma, bool, 0444);
245MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
246
247static struct pxa3xx_nand_timing timing[] = {
248	{ 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
249	{ 10,  0, 20,  40, 30,  40, 11123, 110, 10, },
250	{ 10, 25, 15,  25, 15,  30, 25000,  60, 10, },
251	{ 10, 35, 15,  25, 15,  25, 25000,  60, 10, },
252};
253
254static struct pxa3xx_nand_flash builtin_flash_types[] = {
255{ "DEFAULT FLASH",      0,   0, 2048,  8,  8,    0, &timing[0] },
256{ "64MiB 16-bit",  0x46ec,  32,  512, 16, 16, 4096, &timing[1] },
257{ "256MiB 8-bit",  0xdaec,  64, 2048,  8,  8, 2048, &timing[1] },
258{ "4GiB 8-bit",    0xd7ec, 128, 4096,  8,  8, 8192, &timing[1] },
259{ "128MiB 8-bit",  0xa12c,  64, 2048,  8,  8, 1024, &timing[2] },
260{ "128MiB 16-bit", 0xb12c,  64, 2048, 16, 16, 1024, &timing[2] },
261{ "512MiB 8-bit",  0xdc2c,  64, 2048,  8,  8, 4096, &timing[2] },
262{ "512MiB 16-bit", 0xcc2c,  64, 2048, 16, 16, 4096, &timing[2] },
263{ "256MiB 16-bit", 0xba20,  64, 2048, 16, 16, 2048, &timing[3] },
264};
265
266static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
267static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
268
269static struct nand_bbt_descr bbt_main_descr = {
270	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
271		| NAND_BBT_2BIT | NAND_BBT_VERSION,
272	.offs =	8,
273	.len = 6,
274	.veroffs = 14,
275	.maxblocks = 8,		/* Last 8 blocks in each chip */
276	.pattern = bbt_pattern
277};
278
279static struct nand_bbt_descr bbt_mirror_descr = {
280	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
281		| NAND_BBT_2BIT | NAND_BBT_VERSION,
282	.offs =	8,
283	.len = 6,
284	.veroffs = 14,
285	.maxblocks = 8,		/* Last 8 blocks in each chip */
286	.pattern = bbt_mirror_pattern
287};
288
289static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
290	.eccbytes = 64,
291	.eccpos = {
292		32,  33,  34,  35,  36,  37,  38,  39,
293		40,  41,  42,  43,  44,  45,  46,  47,
294		48,  49,  50,  51,  52,  53,  54,  55,
295		56,  57,  58,  59,  60,  61,  62,  63,
296		96,  97,  98,  99,  100, 101, 102, 103,
297		104, 105, 106, 107, 108, 109, 110, 111,
298		112, 113, 114, 115, 116, 117, 118, 119,
299		120, 121, 122, 123, 124, 125, 126, 127},
300	/* Bootrom looks in bytes 0 & 5 for bad blocks */
301	.oobfree = { {6, 26}, { 64, 32} }
302};
303
304static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
305	.eccbytes = 128,
306	.eccpos = {
307		32,  33,  34,  35,  36,  37,  38,  39,
308		40,  41,  42,  43,  44,  45,  46,  47,
309		48,  49,  50,  51,  52,  53,  54,  55,
310		56,  57,  58,  59,  60,  61,  62,  63},
311	.oobfree = { }
312};
313
314/* Define a default flash type setting serve as flash detecting only */
315#define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
316
317#define NDTR0_tCH(c)	(min((c), 7) << 19)
318#define NDTR0_tCS(c)	(min((c), 7) << 16)
319#define NDTR0_tWH(c)	(min((c), 7) << 11)
320#define NDTR0_tWP(c)	(min((c), 7) << 8)
321#define NDTR0_tRH(c)	(min((c), 7) << 3)
322#define NDTR0_tRP(c)	(min((c), 7) << 0)
323
324#define NDTR1_tR(c)	(min((c), 65535) << 16)
325#define NDTR1_tWHR(c)	(min((c), 15) << 4)
326#define NDTR1_tAR(c)	(min((c), 15) << 0)
327
328/* convert nano-seconds to nand flash controller clock cycles */
329#define ns2cycle(ns, clk)	(int)((ns) * (clk / 1000000) / 1000)
330
331static struct of_device_id pxa3xx_nand_dt_ids[] = {
332	{
333		.compatible = "marvell,pxa3xx-nand",
334		.data       = (void *)PXA3XX_NAND_VARIANT_PXA,
335	},
336	{}
337};
338MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
339
340static enum pxa3xx_nand_variant
341pxa3xx_nand_get_variant(struct platform_device *pdev)
342{
343	const struct of_device_id *of_id =
344			of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
345	if (!of_id)
346		return PXA3XX_NAND_VARIANT_PXA;
347	return (enum pxa3xx_nand_variant)of_id->data;
348}
349
350static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
351				   const struct pxa3xx_nand_timing *t)
352{
353	struct pxa3xx_nand_info *info = host->info_data;
354	unsigned long nand_clk = clk_get_rate(info->clk);
355	uint32_t ndtr0, ndtr1;
356
357	ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
358		NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
359		NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
360		NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
361		NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
362		NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
363
364	ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
365		NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
366		NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
367
368	info->ndtr0cs0 = ndtr0;
369	info->ndtr1cs0 = ndtr1;
370	nand_writel(info, NDTR0CS0, ndtr0);
371	nand_writel(info, NDTR1CS0, ndtr1);
372}
373
374/*
375 * Set the data and OOB size, depending on the selected
376 * spare and ECC configuration.
377 * Only applicable to READ0, READOOB and PAGEPROG commands.
378 */
379static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
380				struct mtd_info *mtd)
381{
382	int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
383
384	info->data_size = mtd->writesize;
385	if (!oob_enable)
386		return;
387
388	info->oob_size = info->spare_size;
389	if (!info->use_ecc)
390		info->oob_size += info->ecc_size;
391}
392
393/**
394 * NOTE: it is a must to set ND_RUN firstly, then write
395 * command buffer, otherwise, it does not work.
396 * We enable all the interrupt at the same time, and
397 * let pxa3xx_nand_irq to handle all logic.
398 */
399static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
400{
401	uint32_t ndcr;
402
403	ndcr = info->reg_ndcr;
404
405	if (info->use_ecc) {
406		ndcr |= NDCR_ECC_EN;
407		if (info->ecc_bch)
408			nand_writel(info, NDECCCTRL, 0x1);
409	} else {
410		ndcr &= ~NDCR_ECC_EN;
411		if (info->ecc_bch)
412			nand_writel(info, NDECCCTRL, 0x0);
413	}
414
415	if (info->use_dma)
416		ndcr |= NDCR_DMA_EN;
417	else
418		ndcr &= ~NDCR_DMA_EN;
419
420	if (info->use_spare)
421		ndcr |= NDCR_SPARE_EN;
422	else
423		ndcr &= ~NDCR_SPARE_EN;
424
425	ndcr |= NDCR_ND_RUN;
426
427	/* clear status bits and run */
428	nand_writel(info, NDCR, 0);
429	nand_writel(info, NDSR, NDSR_MASK);
430	nand_writel(info, NDCR, ndcr);
431}
432
433static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
434{
435	uint32_t ndcr;
436	int timeout = NAND_STOP_DELAY;
437
438	/* wait RUN bit in NDCR become 0 */
439	ndcr = nand_readl(info, NDCR);
440	while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
441		ndcr = nand_readl(info, NDCR);
442		udelay(1);
443	}
444
445	if (timeout <= 0) {
446		ndcr &= ~NDCR_ND_RUN;
447		nand_writel(info, NDCR, ndcr);
448	}
449	/* clear status bits */
450	nand_writel(info, NDSR, NDSR_MASK);
451}
452
453static void __maybe_unused
454enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
455{
456	uint32_t ndcr;
457
458	ndcr = nand_readl(info, NDCR);
459	nand_writel(info, NDCR, ndcr & ~int_mask);
460}
461
462static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
463{
464	uint32_t ndcr;
465
466	ndcr = nand_readl(info, NDCR);
467	nand_writel(info, NDCR, ndcr | int_mask);
468}
469
470static void handle_data_pio(struct pxa3xx_nand_info *info)
471{
472	unsigned int do_bytes = min(info->data_size, info->chunk_size);
473
474	switch (info->state) {
475	case STATE_PIO_WRITING:
476		__raw_writesl(info->mmio_base + NDDB,
477			      info->data_buff + info->data_buff_pos,
478			      DIV_ROUND_UP(do_bytes, 4));
479
480		if (info->oob_size > 0)
481			__raw_writesl(info->mmio_base + NDDB,
482				      info->oob_buff + info->oob_buff_pos,
483				      DIV_ROUND_UP(info->oob_size, 4));
484		break;
485	case STATE_PIO_READING:
486		__raw_readsl(info->mmio_base + NDDB,
487			     info->data_buff + info->data_buff_pos,
488			     DIV_ROUND_UP(do_bytes, 4));
489
490		if (info->oob_size > 0)
491			__raw_readsl(info->mmio_base + NDDB,
492				     info->oob_buff + info->oob_buff_pos,
493				     DIV_ROUND_UP(info->oob_size, 4));
494		break;
495	default:
496		dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
497				info->state);
498		BUG();
499	}
500
501	/* Update buffer pointers for multi-page read/write */
502	info->data_buff_pos += do_bytes;
503	info->oob_buff_pos += info->oob_size;
504	info->data_size -= do_bytes;
505}
506
507#ifdef ARCH_HAS_DMA
508static void start_data_dma(struct pxa3xx_nand_info *info)
509{
510	struct pxa_dma_desc *desc = info->data_desc;
511	int dma_len = ALIGN(info->data_size + info->oob_size, 32);
512
513	desc->ddadr = DDADR_STOP;
514	desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
515
516	switch (info->state) {
517	case STATE_DMA_WRITING:
518		desc->dsadr = info->data_buff_phys;
519		desc->dtadr = info->mmio_phys + NDDB;
520		desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
521		break;
522	case STATE_DMA_READING:
523		desc->dtadr = info->data_buff_phys;
524		desc->dsadr = info->mmio_phys + NDDB;
525		desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
526		break;
527	default:
528		dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
529				info->state);
530		BUG();
531	}
532
533	DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch;
534	DDADR(info->data_dma_ch) = info->data_desc_addr;
535	DCSR(info->data_dma_ch) |= DCSR_RUN;
536}
537
538static void pxa3xx_nand_data_dma_irq(int channel, void *data)
539{
540	struct pxa3xx_nand_info *info = data;
541	uint32_t dcsr;
542
543	dcsr = DCSR(channel);
544	DCSR(channel) = dcsr;
545
546	if (dcsr & DCSR_BUSERR) {
547		info->retcode = ERR_DMABUSERR;
548	}
549
550	info->state = STATE_DMA_DONE;
551	enable_int(info, NDCR_INT_MASK);
552	nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
553}
554#else
555static void start_data_dma(struct pxa3xx_nand_info *info)
556{}
557#endif
558
559static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
560{
561	struct pxa3xx_nand_info *info = devid;
562	unsigned int status, is_completed = 0, is_ready = 0;
563	unsigned int ready, cmd_done;
564
565	if (info->cs == 0) {
566		ready           = NDSR_FLASH_RDY;
567		cmd_done        = NDSR_CS0_CMDD;
568	} else {
569		ready           = NDSR_RDY;
570		cmd_done        = NDSR_CS1_CMDD;
571	}
572
573	status = nand_readl(info, NDSR);
574
575	if (status & NDSR_UNCORERR)
576		info->retcode = ERR_UNCORERR;
577	if (status & NDSR_CORERR) {
578		info->retcode = ERR_CORERR;
579		if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
580		    info->ecc_bch)
581			info->ecc_err_cnt = NDSR_ERR_CNT(status);
582		else
583			info->ecc_err_cnt = 1;
584
585		/*
586		 * Each chunk composing a page is corrected independently,
587		 * and we need to store maximum number of corrected bitflips
588		 * to return it to the MTD layer in ecc.read_page().
589		 */
590		info->max_bitflips = max_t(unsigned int,
591					   info->max_bitflips,
592					   info->ecc_err_cnt);
593	}
594	if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
595		/* whether use dma to transfer data */
596		if (info->use_dma) {
597			disable_int(info, NDCR_INT_MASK);
598			info->state = (status & NDSR_RDDREQ) ?
599				      STATE_DMA_READING : STATE_DMA_WRITING;
600			start_data_dma(info);
601			goto NORMAL_IRQ_EXIT;
602		} else {
603			info->state = (status & NDSR_RDDREQ) ?
604				      STATE_PIO_READING : STATE_PIO_WRITING;
605			handle_data_pio(info);
606		}
607	}
608	if (status & cmd_done) {
609		info->state = STATE_CMD_DONE;
610		is_completed = 1;
611	}
612	if (status & ready) {
613		info->state = STATE_READY;
614		is_ready = 1;
615	}
616
617	if (status & NDSR_WRCMDREQ) {
618		nand_writel(info, NDSR, NDSR_WRCMDREQ);
619		status &= ~NDSR_WRCMDREQ;
620		info->state = STATE_CMD_HANDLE;
621
622		/*
623		 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
624		 * must be loaded by writing directly either 12 or 16
625		 * bytes directly to NDCB0, four bytes at a time.
626		 *
627		 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
628		 * but each NDCBx register can be read.
629		 */
630		nand_writel(info, NDCB0, info->ndcb0);
631		nand_writel(info, NDCB0, info->ndcb1);
632		nand_writel(info, NDCB0, info->ndcb2);
633
634		/* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
635		if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
636			nand_writel(info, NDCB0, info->ndcb3);
637	}
638
639	/* clear NDSR to let the controller exit the IRQ */
640	nand_writel(info, NDSR, status);
641	if (is_completed)
642		complete(&info->cmd_complete);
643	if (is_ready)
644		complete(&info->dev_ready);
645NORMAL_IRQ_EXIT:
646	return IRQ_HANDLED;
647}
648
649static inline int is_buf_blank(uint8_t *buf, size_t len)
650{
651	for (; len > 0; len--)
652		if (*buf++ != 0xff)
653			return 0;
654	return 1;
655}
656
657static void set_command_address(struct pxa3xx_nand_info *info,
658		unsigned int page_size, uint16_t column, int page_addr)
659{
660	/* small page addr setting */
661	if (page_size < PAGE_CHUNK_SIZE) {
662		info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
663				| (column & 0xFF);
664
665		info->ndcb2 = 0;
666	} else {
667		info->ndcb1 = ((page_addr & 0xFFFF) << 16)
668				| (column & 0xFFFF);
669
670		if (page_addr & 0xFF0000)
671			info->ndcb2 = (page_addr & 0xFF0000) >> 16;
672		else
673			info->ndcb2 = 0;
674	}
675}
676
677static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
678{
679	struct pxa3xx_nand_host *host = info->host[info->cs];
680	struct mtd_info *mtd = host->mtd;
681
682	/* reset data and oob column point to handle data */
683	info->buf_start		= 0;
684	info->buf_count		= 0;
685	info->oob_size		= 0;
686	info->data_buff_pos	= 0;
687	info->oob_buff_pos	= 0;
688	info->use_ecc		= 0;
689	info->use_spare		= 1;
690	info->retcode		= ERR_NONE;
691	info->ecc_err_cnt	= 0;
692	info->ndcb3		= 0;
693	info->need_wait		= 0;
694
695	switch (command) {
696	case NAND_CMD_READ0:
697	case NAND_CMD_PAGEPROG:
698		info->use_ecc = 1;
699	case NAND_CMD_READOOB:
700		pxa3xx_set_datasize(info, mtd);
701		break;
702	case NAND_CMD_PARAM:
703		info->use_spare = 0;
704		break;
705	default:
706		info->ndcb1 = 0;
707		info->ndcb2 = 0;
708		break;
709	}
710
711	/*
712	 * If we are about to issue a read command, or about to set
713	 * the write address, then clean the data buffer.
714	 */
715	if (command == NAND_CMD_READ0 ||
716	    command == NAND_CMD_READOOB ||
717	    command == NAND_CMD_SEQIN) {
718
719		info->buf_count = mtd->writesize + mtd->oobsize;
720		memset(info->data_buff, 0xFF, info->buf_count);
721	}
722
723}
724
725static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
726		int ext_cmd_type, uint16_t column, int page_addr)
727{
728	int addr_cycle, exec_cmd;
729	struct pxa3xx_nand_host *host;
730	struct mtd_info *mtd;
731
732	host = info->host[info->cs];
733	mtd = host->mtd;
734	addr_cycle = 0;
735	exec_cmd = 1;
736
737	if (info->cs != 0)
738		info->ndcb0 = NDCB0_CSEL;
739	else
740		info->ndcb0 = 0;
741
742	if (command == NAND_CMD_SEQIN)
743		exec_cmd = 0;
744
745	addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
746				    + host->col_addr_cycles);
747
748	switch (command) {
749	case NAND_CMD_READOOB:
750	case NAND_CMD_READ0:
751		info->buf_start = column;
752		info->ndcb0 |= NDCB0_CMD_TYPE(0)
753				| addr_cycle
754				| NAND_CMD_READ0;
755
756		if (command == NAND_CMD_READOOB)
757			info->buf_start += mtd->writesize;
758
759		/*
760		 * Multiple page read needs an 'extended command type' field,
761		 * which is either naked-read or last-read according to the
762		 * state.
763		 */
764		if (mtd->writesize == PAGE_CHUNK_SIZE) {
765			info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
766		} else if (mtd->writesize > PAGE_CHUNK_SIZE) {
767			info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
768					| NDCB0_LEN_OVRD
769					| NDCB0_EXT_CMD_TYPE(ext_cmd_type);
770			info->ndcb3 = info->chunk_size +
771				      info->oob_size;
772		}
773
774		set_command_address(info, mtd->writesize, column, page_addr);
775		break;
776
777	case NAND_CMD_SEQIN:
778
779		info->buf_start = column;
780		set_command_address(info, mtd->writesize, 0, page_addr);
781
782		/*
783		 * Multiple page programming needs to execute the initial
784		 * SEQIN command that sets the page address.
785		 */
786		if (mtd->writesize > PAGE_CHUNK_SIZE) {
787			info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
788				| NDCB0_EXT_CMD_TYPE(ext_cmd_type)
789				| addr_cycle
790				| command;
791			/* No data transfer in this case */
792			info->data_size = 0;
793			exec_cmd = 1;
794		}
795		break;
796
797	case NAND_CMD_PAGEPROG:
798		if (is_buf_blank(info->data_buff,
799					(mtd->writesize + mtd->oobsize))) {
800			exec_cmd = 0;
801			break;
802		}
803
804		/* Second command setting for large pages */
805		if (mtd->writesize > PAGE_CHUNK_SIZE) {
806			/*
807			 * Multiple page write uses the 'extended command'
808			 * field. This can be used to issue a command dispatch
809			 * or a naked-write depending on the current stage.
810			 */
811			info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
812					| NDCB0_LEN_OVRD
813					| NDCB0_EXT_CMD_TYPE(ext_cmd_type);
814			info->ndcb3 = info->chunk_size +
815				      info->oob_size;
816
817			/*
818			 * This is the command dispatch that completes a chunked
819			 * page program operation.
820			 */
821			if (info->data_size == 0) {
822				info->ndcb0 = NDCB0_CMD_TYPE(0x1)
823					| NDCB0_EXT_CMD_TYPE(ext_cmd_type)
824					| command;
825				info->ndcb1 = 0;
826				info->ndcb2 = 0;
827				info->ndcb3 = 0;
828			}
829		} else {
830			info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
831					| NDCB0_AUTO_RS
832					| NDCB0_ST_ROW_EN
833					| NDCB0_DBC
834					| (NAND_CMD_PAGEPROG << 8)
835					| NAND_CMD_SEQIN
836					| addr_cycle;
837		}
838		break;
839
840	case NAND_CMD_PARAM:
841		info->buf_count = 256;
842		info->ndcb0 |= NDCB0_CMD_TYPE(0)
843				| NDCB0_ADDR_CYC(1)
844				| NDCB0_LEN_OVRD
845				| command;
846		info->ndcb1 = (column & 0xFF);
847		info->ndcb3 = 256;
848		info->data_size = 256;
849		break;
850
851	case NAND_CMD_READID:
852		info->buf_count = host->read_id_bytes;
853		info->ndcb0 |= NDCB0_CMD_TYPE(3)
854				| NDCB0_ADDR_CYC(1)
855				| command;
856		info->ndcb1 = (column & 0xFF);
857
858		info->data_size = 8;
859		break;
860	case NAND_CMD_STATUS:
861		info->buf_count = 1;
862		info->ndcb0 |= NDCB0_CMD_TYPE(4)
863				| NDCB0_ADDR_CYC(1)
864				| command;
865
866		info->data_size = 8;
867		break;
868
869	case NAND_CMD_ERASE1:
870		info->ndcb0 |= NDCB0_CMD_TYPE(2)
871				| NDCB0_AUTO_RS
872				| NDCB0_ADDR_CYC(3)
873				| NDCB0_DBC
874				| (NAND_CMD_ERASE2 << 8)
875				| NAND_CMD_ERASE1;
876		info->ndcb1 = page_addr;
877		info->ndcb2 = 0;
878
879		break;
880	case NAND_CMD_RESET:
881		info->ndcb0 |= NDCB0_CMD_TYPE(5)
882				| command;
883
884		break;
885
886	case NAND_CMD_ERASE2:
887		exec_cmd = 0;
888		break;
889
890	default:
891		exec_cmd = 0;
892		dev_err(&info->pdev->dev, "non-supported command %x\n",
893				command);
894		break;
895	}
896
897	return exec_cmd;
898}
899
900static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
901			 int column, int page_addr)
902{
903	struct pxa3xx_nand_host *host = mtd->priv;
904	struct pxa3xx_nand_info *info = host->info_data;
905	int ret, exec_cmd;
906
907	/*
908	 * if this is a x16 device ,then convert the input
909	 * "byte" address into a "word" address appropriate
910	 * for indexing a word-oriented device
911	 */
912	if (info->reg_ndcr & NDCR_DWIDTH_M)
913		column /= 2;
914
915	/*
916	 * There may be different NAND chip hooked to
917	 * different chip select, so check whether
918	 * chip select has been changed, if yes, reset the timing
919	 */
920	if (info->cs != host->cs) {
921		info->cs = host->cs;
922		nand_writel(info, NDTR0CS0, info->ndtr0cs0);
923		nand_writel(info, NDTR1CS0, info->ndtr1cs0);
924	}
925
926	prepare_start_command(info, command);
927
928	info->state = STATE_PREPARED;
929	exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
930
931	if (exec_cmd) {
932		init_completion(&info->cmd_complete);
933		init_completion(&info->dev_ready);
934		info->need_wait = 1;
935		pxa3xx_nand_start(info);
936
937		ret = wait_for_completion_timeout(&info->cmd_complete,
938				CHIP_DELAY_TIMEOUT);
939		if (!ret) {
940			dev_err(&info->pdev->dev, "Wait time out!!!\n");
941			/* Stop State Machine for next command cycle */
942			pxa3xx_nand_stop(info);
943		}
944	}
945	info->state = STATE_IDLE;
946}
947
948static void nand_cmdfunc_extended(struct mtd_info *mtd,
949				  const unsigned command,
950				  int column, int page_addr)
951{
952	struct pxa3xx_nand_host *host = mtd->priv;
953	struct pxa3xx_nand_info *info = host->info_data;
954	int ret, exec_cmd, ext_cmd_type;
955
956	/*
957	 * if this is a x16 device then convert the input
958	 * "byte" address into a "word" address appropriate
959	 * for indexing a word-oriented device
960	 */
961	if (info->reg_ndcr & NDCR_DWIDTH_M)
962		column /= 2;
963
964	/*
965	 * There may be different NAND chip hooked to
966	 * different chip select, so check whether
967	 * chip select has been changed, if yes, reset the timing
968	 */
969	if (info->cs != host->cs) {
970		info->cs = host->cs;
971		nand_writel(info, NDTR0CS0, info->ndtr0cs0);
972		nand_writel(info, NDTR1CS0, info->ndtr1cs0);
973	}
974
975	/* Select the extended command for the first command */
976	switch (command) {
977	case NAND_CMD_READ0:
978	case NAND_CMD_READOOB:
979		ext_cmd_type = EXT_CMD_TYPE_MONO;
980		break;
981	case NAND_CMD_SEQIN:
982		ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
983		break;
984	case NAND_CMD_PAGEPROG:
985		ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
986		break;
987	default:
988		ext_cmd_type = 0;
989		break;
990	}
991
992	prepare_start_command(info, command);
993
994	/*
995	 * Prepare the "is ready" completion before starting a command
996	 * transaction sequence. If the command is not executed the
997	 * completion will be completed, see below.
998	 *
999	 * We can do that inside the loop because the command variable
1000	 * is invariant and thus so is the exec_cmd.
1001	 */
1002	info->need_wait = 1;
1003	init_completion(&info->dev_ready);
1004	do {
1005		info->state = STATE_PREPARED;
1006		exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1007					       column, page_addr);
1008		if (!exec_cmd) {
1009			info->need_wait = 0;
1010			complete(&info->dev_ready);
1011			break;
1012		}
1013
1014		init_completion(&info->cmd_complete);
1015		pxa3xx_nand_start(info);
1016
1017		ret = wait_for_completion_timeout(&info->cmd_complete,
1018				CHIP_DELAY_TIMEOUT);
1019		if (!ret) {
1020			dev_err(&info->pdev->dev, "Wait time out!!!\n");
1021			/* Stop State Machine for next command cycle */
1022			pxa3xx_nand_stop(info);
1023			break;
1024		}
1025
1026		/* Check if the sequence is complete */
1027		if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1028			break;
1029
1030		/*
1031		 * After a splitted program command sequence has issued
1032		 * the command dispatch, the command sequence is complete.
1033		 */
1034		if (info->data_size == 0 &&
1035		    command == NAND_CMD_PAGEPROG &&
1036		    ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1037			break;
1038
1039		if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1040			/* Last read: issue a 'last naked read' */
1041			if (info->data_size == info->chunk_size)
1042				ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1043			else
1044				ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1045
1046		/*
1047		 * If a splitted program command has no more data to transfer,
1048		 * the command dispatch must be issued to complete.
1049		 */
1050		} else if (command == NAND_CMD_PAGEPROG &&
1051			   info->data_size == 0) {
1052				ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1053		}
1054	} while (1);
1055
1056	info->state = STATE_IDLE;
1057}
1058
1059static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1060		struct nand_chip *chip, const uint8_t *buf, int oob_required)
1061{
1062	chip->write_buf(mtd, buf, mtd->writesize);
1063	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1064
1065	return 0;
1066}
1067
1068static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1069		struct nand_chip *chip, uint8_t *buf, int oob_required,
1070		int page)
1071{
1072	struct pxa3xx_nand_host *host = mtd->priv;
1073	struct pxa3xx_nand_info *info = host->info_data;
1074
1075	chip->read_buf(mtd, buf, mtd->writesize);
1076	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1077
1078	if (info->retcode == ERR_CORERR && info->use_ecc) {
1079		mtd->ecc_stats.corrected += info->ecc_err_cnt;
1080
1081	} else if (info->retcode == ERR_UNCORERR) {
1082		/*
1083		 * for blank page (all 0xff), HW will calculate its ECC as
1084		 * 0, which is different from the ECC information within
1085		 * OOB, ignore such uncorrectable errors
1086		 */
1087		if (is_buf_blank(buf, mtd->writesize))
1088			info->retcode = ERR_NONE;
1089		else
1090			mtd->ecc_stats.failed++;
1091	}
1092
1093	return info->max_bitflips;
1094}
1095
1096static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1097{
1098	struct pxa3xx_nand_host *host = mtd->priv;
1099	struct pxa3xx_nand_info *info = host->info_data;
1100	char retval = 0xFF;
1101
1102	if (info->buf_start < info->buf_count)
1103		/* Has just send a new command? */
1104		retval = info->data_buff[info->buf_start++];
1105
1106	return retval;
1107}
1108
1109static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1110{
1111	struct pxa3xx_nand_host *host = mtd->priv;
1112	struct pxa3xx_nand_info *info = host->info_data;
1113	u16 retval = 0xFFFF;
1114
1115	if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1116		retval = *((u16 *)(info->data_buff+info->buf_start));
1117		info->buf_start += 2;
1118	}
1119	return retval;
1120}
1121
1122static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1123{
1124	struct pxa3xx_nand_host *host = mtd->priv;
1125	struct pxa3xx_nand_info *info = host->info_data;
1126	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1127
1128	memcpy(buf, info->data_buff + info->buf_start, real_len);
1129	info->buf_start += real_len;
1130}
1131
1132static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1133		const uint8_t *buf, int len)
1134{
1135	struct pxa3xx_nand_host *host = mtd->priv;
1136	struct pxa3xx_nand_info *info = host->info_data;
1137	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1138
1139	memcpy(info->data_buff + info->buf_start, buf, real_len);
1140	info->buf_start += real_len;
1141}
1142
1143static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1144{
1145	return;
1146}
1147
1148static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1149{
1150	struct pxa3xx_nand_host *host = mtd->priv;
1151	struct pxa3xx_nand_info *info = host->info_data;
1152	int ret;
1153
1154	if (info->need_wait) {
1155		ret = wait_for_completion_timeout(&info->dev_ready,
1156				CHIP_DELAY_TIMEOUT);
1157		info->need_wait = 0;
1158		if (!ret) {
1159			dev_err(&info->pdev->dev, "Ready time out!!!\n");
1160			return NAND_STATUS_FAIL;
1161		}
1162	}
1163
1164	/* pxa3xx_nand_send_command has waited for command complete */
1165	if (this->state == FL_WRITING || this->state == FL_ERASING) {
1166		if (info->retcode == ERR_NONE)
1167			return 0;
1168		else
1169			return NAND_STATUS_FAIL;
1170	}
1171
1172	return NAND_STATUS_READY;
1173}
1174
1175static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
1176				    const struct pxa3xx_nand_flash *f)
1177{
1178	struct platform_device *pdev = info->pdev;
1179	struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1180	struct pxa3xx_nand_host *host = info->host[info->cs];
1181	uint32_t ndcr = 0x0; /* enable all interrupts */
1182
1183	if (f->page_size != 2048 && f->page_size != 512) {
1184		dev_err(&pdev->dev, "Current only support 2048 and 512 size\n");
1185		return -EINVAL;
1186	}
1187
1188	if (f->flash_width != 16 && f->flash_width != 8) {
1189		dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n");
1190		return -EINVAL;
1191	}
1192
1193	/* calculate flash information */
1194	host->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
1195
1196	/* calculate addressing information */
1197	host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
1198
1199	if (f->num_blocks * f->page_per_block > 65536)
1200		host->row_addr_cycles = 3;
1201	else
1202		host->row_addr_cycles = 2;
1203
1204	ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1205	ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1206	ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
1207	ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
1208	ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
1209	ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
1210
1211	ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
1212	ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1213
1214	info->reg_ndcr = ndcr;
1215
1216	pxa3xx_nand_set_timing(host, f->timing);
1217	return 0;
1218}
1219
1220static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1221{
1222	/*
1223	 * We set 0 by hard coding here, for we don't support keep_config
1224	 * when there is more than one chip attached to the controller
1225	 */
1226	struct pxa3xx_nand_host *host = info->host[0];
1227	uint32_t ndcr = nand_readl(info, NDCR);
1228
1229	if (ndcr & NDCR_PAGE_SZ) {
1230		/* Controller's FIFO size */
1231		info->chunk_size = 2048;
1232		host->read_id_bytes = 4;
1233	} else {
1234		info->chunk_size = 512;
1235		host->read_id_bytes = 2;
1236	}
1237
1238	/* Set an initial chunk size */
1239	info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
1240	info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1241	info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1242	return 0;
1243}
1244
1245#ifdef ARCH_HAS_DMA
1246static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1247{
1248	struct platform_device *pdev = info->pdev;
1249	int data_desc_offset = info->buf_size - sizeof(struct pxa_dma_desc);
1250
1251	if (use_dma == 0) {
1252		info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1253		if (info->data_buff == NULL)
1254			return -ENOMEM;
1255		return 0;
1256	}
1257
1258	info->data_buff = dma_alloc_coherent(&pdev->dev, info->buf_size,
1259				&info->data_buff_phys, GFP_KERNEL);
1260	if (info->data_buff == NULL) {
1261		dev_err(&pdev->dev, "failed to allocate dma buffer\n");
1262		return -ENOMEM;
1263	}
1264
1265	info->data_desc = (void *)info->data_buff + data_desc_offset;
1266	info->data_desc_addr = info->data_buff_phys + data_desc_offset;
1267
1268	info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW,
1269				pxa3xx_nand_data_dma_irq, info);
1270	if (info->data_dma_ch < 0) {
1271		dev_err(&pdev->dev, "failed to request data dma\n");
1272		dma_free_coherent(&pdev->dev, info->buf_size,
1273				info->data_buff, info->data_buff_phys);
1274		return info->data_dma_ch;
1275	}
1276
1277	/*
1278	 * Now that DMA buffers are allocated we turn on
1279	 * DMA proper for I/O operations.
1280	 */
1281	info->use_dma = 1;
1282	return 0;
1283}
1284
1285static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1286{
1287	struct platform_device *pdev = info->pdev;
1288	if (info->use_dma) {
1289		pxa_free_dma(info->data_dma_ch);
1290		dma_free_coherent(&pdev->dev, info->buf_size,
1291				  info->data_buff, info->data_buff_phys);
1292	} else {
1293		kfree(info->data_buff);
1294	}
1295}
1296#else
1297static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1298{
1299	info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1300	if (info->data_buff == NULL)
1301		return -ENOMEM;
1302	return 0;
1303}
1304
1305static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1306{
1307	kfree(info->data_buff);
1308}
1309#endif
1310
1311static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
1312{
1313	struct mtd_info *mtd;
1314	struct nand_chip *chip;
1315	int ret;
1316
1317	mtd = info->host[info->cs]->mtd;
1318	chip = mtd->priv;
1319
1320	/* use the common timing to make a try */
1321	ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
1322	if (ret)
1323		return ret;
1324
1325	chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1326	ret = chip->waitfunc(mtd, chip);
1327	if (ret & NAND_STATUS_FAIL)
1328		return -ENODEV;
1329
1330	return 0;
1331}
1332
1333static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1334			struct nand_ecc_ctrl *ecc,
1335			int strength, int page_size)
1336{
1337	/*
1338	 * We don't use strength here as the PXA variant
1339	 * is used with non-ONFI compliant devices.
1340	 */
1341	if (page_size == 2048) {
1342		info->chunk_size = 2048;
1343		info->spare_size = 40;
1344		info->ecc_size = 24;
1345		ecc->mode = NAND_ECC_HW;
1346		ecc->size = 512;
1347		ecc->strength = 1;
1348		return 1;
1349
1350	} else if (page_size == 512) {
1351		info->chunk_size = 512;
1352		info->spare_size = 8;
1353		info->ecc_size = 8;
1354		ecc->mode = NAND_ECC_HW;
1355		ecc->size = 512;
1356		ecc->strength = 1;
1357		return 1;
1358	}
1359	return 0;
1360}
1361
1362static int armada370_ecc_init(struct pxa3xx_nand_info *info,
1363			      struct nand_ecc_ctrl *ecc,
1364			      int strength, int ecc_stepsize, int page_size)
1365{
1366	/*
1367	 * Required ECC: 4-bit correction per 512 bytes
1368	 * Select: 16-bit correction per 2048 bytes
1369	 */
1370	if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1371		info->ecc_bch = 1;
1372		info->chunk_size = 2048;
1373		info->spare_size = 32;
1374		info->ecc_size = 32;
1375		ecc->mode = NAND_ECC_HW;
1376		ecc->size = info->chunk_size;
1377		ecc->layout = &ecc_layout_4KB_bch4bit;
1378		ecc->strength = 16;
1379		return 1;
1380
1381	/*
1382	 * Required ECC: 8-bit correction per 512 bytes
1383	 * Select: 16-bit correction per 1024 bytes
1384	 */
1385	} else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1386		info->ecc_bch = 1;
1387		info->chunk_size = 1024;
1388		info->spare_size = 0;
1389		info->ecc_size = 32;
1390		ecc->mode = NAND_ECC_HW;
1391		ecc->size = info->chunk_size;
1392		ecc->layout = &ecc_layout_4KB_bch8bit;
1393		ecc->strength = 16;
1394		return 1;
1395	}
1396	return 0;
1397}
1398
1399static int pxa3xx_nand_scan(struct mtd_info *mtd)
1400{
1401	struct pxa3xx_nand_host *host = mtd->priv;
1402	struct pxa3xx_nand_info *info = host->info_data;
1403	struct platform_device *pdev = info->pdev;
1404	struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1405	struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
1406	const struct pxa3xx_nand_flash *f = NULL;
1407	struct nand_chip *chip = mtd->priv;
1408	uint32_t id = -1;
1409	uint64_t chipsize;
1410	int i, ret, num;
1411
1412	if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
1413		goto KEEP_CONFIG;
1414
1415	ret = pxa3xx_nand_sensing(info);
1416	if (ret) {
1417		dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1418			 info->cs);
1419
1420		return ret;
1421	}
1422
1423	chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0);
1424	id = *((uint16_t *)(info->data_buff));
1425	if (id != 0)
1426		dev_info(&info->pdev->dev, "Detect a flash id %x\n", id);
1427	else {
1428		dev_warn(&info->pdev->dev,
1429			 "Read out ID 0, potential timing set wrong!!\n");
1430
1431		return -EINVAL;
1432	}
1433
1434	num = ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1;
1435	for (i = 0; i < num; i++) {
1436		if (i < pdata->num_flash)
1437			f = pdata->flash + i;
1438		else
1439			f = &builtin_flash_types[i - pdata->num_flash + 1];
1440
1441		/* find the chip in default list */
1442		if (f->chip_id == id)
1443			break;
1444	}
1445
1446	if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) {
1447		dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n");
1448
1449		return -EINVAL;
1450	}
1451
1452	ret = pxa3xx_nand_config_flash(info, f);
1453	if (ret) {
1454		dev_err(&info->pdev->dev, "ERROR! Configure failed\n");
1455		return ret;
1456	}
1457
1458	pxa3xx_flash_ids[0].name = f->name;
1459	pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff;
1460	pxa3xx_flash_ids[0].pagesize = f->page_size;
1461	chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size;
1462	pxa3xx_flash_ids[0].chipsize = chipsize >> 20;
1463	pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
1464	if (f->flash_width == 16)
1465		pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
1466	pxa3xx_flash_ids[1].name = NULL;
1467	def = pxa3xx_flash_ids;
1468KEEP_CONFIG:
1469	if (info->reg_ndcr & NDCR_DWIDTH_M)
1470		chip->options |= NAND_BUSWIDTH_16;
1471
1472	/* Device detection must be done with ECC disabled */
1473	if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1474		nand_writel(info, NDECCCTRL, 0x0);
1475
1476	if (nand_scan_ident(mtd, 1, def))
1477		return -ENODEV;
1478
1479	if (pdata->flash_bbt) {
1480		/*
1481		 * We'll use a bad block table stored in-flash and don't
1482		 * allow writing the bad block marker to the flash.
1483		 */
1484		chip->bbt_options |= NAND_BBT_USE_FLASH |
1485				     NAND_BBT_NO_OOB_BBM;
1486		chip->bbt_td = &bbt_main_descr;
1487		chip->bbt_md = &bbt_mirror_descr;
1488	}
1489
1490	/*
1491	 * If the page size is bigger than the FIFO size, let's check
1492	 * we are given the right variant and then switch to the extended
1493	 * (aka splitted) command handling,
1494	 */
1495	if (mtd->writesize > PAGE_CHUNK_SIZE) {
1496		if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1497			chip->cmdfunc = nand_cmdfunc_extended;
1498		} else {
1499			dev_err(&info->pdev->dev,
1500				"unsupported page size on this variant\n");
1501			return -ENODEV;
1502		}
1503	}
1504
1505	if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1506		ret = armada370_ecc_init(info, &chip->ecc,
1507				   chip->ecc_strength_ds,
1508				   chip->ecc_step_ds,
1509				   mtd->writesize);
1510	else
1511		ret = pxa_ecc_init(info, &chip->ecc,
1512				   chip->ecc_strength_ds,
1513				   mtd->writesize);
1514	if (!ret) {
1515		dev_err(&info->pdev->dev,
1516			"ECC strength %d at page size %d is not supported\n",
1517			chip->ecc_strength_ds, mtd->writesize);
1518		return -ENODEV;
1519	}
1520
1521	/* calculate addressing information */
1522	if (mtd->writesize >= 2048)
1523		host->col_addr_cycles = 2;
1524	else
1525		host->col_addr_cycles = 1;
1526
1527	/* release the initial buffer */
1528	kfree(info->data_buff);
1529
1530	/* allocate the real data + oob buffer */
1531	info->buf_size = mtd->writesize + mtd->oobsize;
1532	ret = pxa3xx_nand_init_buff(info);
1533	if (ret)
1534		return ret;
1535	info->oob_buff = info->data_buff + mtd->writesize;
1536
1537	if ((mtd->size >> chip->page_shift) > 65536)
1538		host->row_addr_cycles = 3;
1539	else
1540		host->row_addr_cycles = 2;
1541	return nand_scan_tail(mtd);
1542}
1543
1544static int alloc_nand_resource(struct platform_device *pdev)
1545{
1546	struct pxa3xx_nand_platform_data *pdata;
1547	struct pxa3xx_nand_info *info;
1548	struct pxa3xx_nand_host *host;
1549	struct nand_chip *chip = NULL;
1550	struct mtd_info *mtd;
1551	struct resource *r;
1552	int ret, irq, cs;
1553
1554	pdata = dev_get_platdata(&pdev->dev);
1555	info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
1556			    sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
1557	if (!info)
1558		return -ENOMEM;
1559
1560	info->pdev = pdev;
1561	info->variant = pxa3xx_nand_get_variant(pdev);
1562	for (cs = 0; cs < pdata->num_cs; cs++) {
1563		mtd = (struct mtd_info *)((unsigned int)&info[1] +
1564		      (sizeof(*mtd) + sizeof(*host)) * cs);
1565		chip = (struct nand_chip *)(&mtd[1]);
1566		host = (struct pxa3xx_nand_host *)chip;
1567		info->host[cs] = host;
1568		host->mtd = mtd;
1569		host->cs = cs;
1570		host->info_data = info;
1571		mtd->priv = host;
1572		mtd->owner = THIS_MODULE;
1573
1574		chip->ecc.read_page	= pxa3xx_nand_read_page_hwecc;
1575		chip->ecc.write_page	= pxa3xx_nand_write_page_hwecc;
1576		chip->controller        = &info->controller;
1577		chip->waitfunc		= pxa3xx_nand_waitfunc;
1578		chip->select_chip	= pxa3xx_nand_select_chip;
1579		chip->read_word		= pxa3xx_nand_read_word;
1580		chip->read_byte		= pxa3xx_nand_read_byte;
1581		chip->read_buf		= pxa3xx_nand_read_buf;
1582		chip->write_buf		= pxa3xx_nand_write_buf;
1583		chip->options		|= NAND_NO_SUBPAGE_WRITE;
1584		chip->cmdfunc		= nand_cmdfunc;
1585	}
1586
1587	spin_lock_init(&chip->controller->lock);
1588	init_waitqueue_head(&chip->controller->wq);
1589	info->clk = devm_clk_get(&pdev->dev, NULL);
1590	if (IS_ERR(info->clk)) {
1591		dev_err(&pdev->dev, "failed to get nand clock\n");
1592		return PTR_ERR(info->clk);
1593	}
1594	ret = clk_prepare_enable(info->clk);
1595	if (ret < 0)
1596		return ret;
1597
1598	if (use_dma) {
1599		/*
1600		 * This is a dirty hack to make this driver work from
1601		 * devicetree bindings. It can be removed once we have
1602		 * a prober DMA controller framework for DT.
1603		 */
1604		if (pdev->dev.of_node &&
1605		    of_machine_is_compatible("marvell,pxa3xx")) {
1606			info->drcmr_dat = 97;
1607			info->drcmr_cmd = 99;
1608		} else {
1609			r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1610			if (r == NULL) {
1611				dev_err(&pdev->dev,
1612					"no resource defined for data DMA\n");
1613				ret = -ENXIO;
1614				goto fail_disable_clk;
1615			}
1616			info->drcmr_dat = r->start;
1617
1618			r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1619			if (r == NULL) {
1620				dev_err(&pdev->dev,
1621					"no resource defined for cmd DMA\n");
1622				ret = -ENXIO;
1623				goto fail_disable_clk;
1624			}
1625			info->drcmr_cmd = r->start;
1626		}
1627	}
1628
1629	irq = platform_get_irq(pdev, 0);
1630	if (irq < 0) {
1631		dev_err(&pdev->dev, "no IRQ resource defined\n");
1632		ret = -ENXIO;
1633		goto fail_disable_clk;
1634	}
1635
1636	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1637	info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1638	if (IS_ERR(info->mmio_base)) {
1639		ret = PTR_ERR(info->mmio_base);
1640		goto fail_disable_clk;
1641	}
1642	info->mmio_phys = r->start;
1643
1644	/* Allocate a buffer to allow flash detection */
1645	info->buf_size = INIT_BUFFER_SIZE;
1646	info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1647	if (info->data_buff == NULL) {
1648		ret = -ENOMEM;
1649		goto fail_disable_clk;
1650	}
1651
1652	/* initialize all interrupts to be disabled */
1653	disable_int(info, NDSR_MASK);
1654
1655	ret = request_irq(irq, pxa3xx_nand_irq, 0, pdev->name, info);
1656	if (ret < 0) {
1657		dev_err(&pdev->dev, "failed to request IRQ\n");
1658		goto fail_free_buf;
1659	}
1660
1661	platform_set_drvdata(pdev, info);
1662
1663	return 0;
1664
1665fail_free_buf:
1666	free_irq(irq, info);
1667	kfree(info->data_buff);
1668fail_disable_clk:
1669	clk_disable_unprepare(info->clk);
1670	return ret;
1671}
1672
1673static int pxa3xx_nand_remove(struct platform_device *pdev)
1674{
1675	struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1676	struct pxa3xx_nand_platform_data *pdata;
1677	int irq, cs;
1678
1679	if (!info)
1680		return 0;
1681
1682	pdata = dev_get_platdata(&pdev->dev);
1683
1684	irq = platform_get_irq(pdev, 0);
1685	if (irq >= 0)
1686		free_irq(irq, info);
1687	pxa3xx_nand_free_buff(info);
1688
1689	clk_disable_unprepare(info->clk);
1690
1691	for (cs = 0; cs < pdata->num_cs; cs++)
1692		nand_release(info->host[cs]->mtd);
1693	return 0;
1694}
1695
1696static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1697{
1698	struct pxa3xx_nand_platform_data *pdata;
1699	struct device_node *np = pdev->dev.of_node;
1700	const struct of_device_id *of_id =
1701			of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1702
1703	if (!of_id)
1704		return 0;
1705
1706	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1707	if (!pdata)
1708		return -ENOMEM;
1709
1710	if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1711		pdata->enable_arbiter = 1;
1712	if (of_get_property(np, "marvell,nand-keep-config", NULL))
1713		pdata->keep_config = 1;
1714	of_property_read_u32(np, "num-cs", &pdata->num_cs);
1715	pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1716
1717	pdev->dev.platform_data = pdata;
1718
1719	return 0;
1720}
1721
1722static int pxa3xx_nand_probe(struct platform_device *pdev)
1723{
1724	struct pxa3xx_nand_platform_data *pdata;
1725	struct mtd_part_parser_data ppdata = {};
1726	struct pxa3xx_nand_info *info;
1727	int ret, cs, probe_success;
1728
1729#ifndef ARCH_HAS_DMA
1730	if (use_dma) {
1731		use_dma = 0;
1732		dev_warn(&pdev->dev,
1733			 "This platform can't do DMA on this device\n");
1734	}
1735#endif
1736	ret = pxa3xx_nand_probe_dt(pdev);
1737	if (ret)
1738		return ret;
1739
1740	pdata = dev_get_platdata(&pdev->dev);
1741	if (!pdata) {
1742		dev_err(&pdev->dev, "no platform data defined\n");
1743		return -ENODEV;
1744	}
1745
1746	ret = alloc_nand_resource(pdev);
1747	if (ret) {
1748		dev_err(&pdev->dev, "alloc nand resource failed\n");
1749		return ret;
1750	}
1751
1752	info = platform_get_drvdata(pdev);
1753	probe_success = 0;
1754	for (cs = 0; cs < pdata->num_cs; cs++) {
1755		struct mtd_info *mtd = info->host[cs]->mtd;
1756
1757		/*
1758		 * The mtd name matches the one used in 'mtdparts' kernel
1759		 * parameter. This name cannot be changed or otherwise
1760		 * user's mtd partitions configuration would get broken.
1761		 */
1762		mtd->name = "pxa3xx_nand-0";
1763		info->cs = cs;
1764		ret = pxa3xx_nand_scan(mtd);
1765		if (ret) {
1766			dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1767				cs);
1768			continue;
1769		}
1770
1771		ppdata.of_node = pdev->dev.of_node;
1772		ret = mtd_device_parse_register(mtd, NULL,
1773						&ppdata, pdata->parts[cs],
1774						pdata->nr_parts[cs]);
1775		if (!ret)
1776			probe_success = 1;
1777	}
1778
1779	if (!probe_success) {
1780		pxa3xx_nand_remove(pdev);
1781		return -ENODEV;
1782	}
1783
1784	return 0;
1785}
1786
1787#ifdef CONFIG_PM
1788static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1789{
1790	struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1791	struct pxa3xx_nand_platform_data *pdata;
1792	struct mtd_info *mtd;
1793	int cs;
1794
1795	pdata = dev_get_platdata(&pdev->dev);
1796	if (info->state) {
1797		dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1798		return -EAGAIN;
1799	}
1800
1801	for (cs = 0; cs < pdata->num_cs; cs++) {
1802		mtd = info->host[cs]->mtd;
1803		mtd_suspend(mtd);
1804	}
1805
1806	return 0;
1807}
1808
1809static int pxa3xx_nand_resume(struct platform_device *pdev)
1810{
1811	struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1812	struct pxa3xx_nand_platform_data *pdata;
1813	struct mtd_info *mtd;
1814	int cs;
1815
1816	pdata = dev_get_platdata(&pdev->dev);
1817	/* We don't want to handle interrupt without calling mtd routine */
1818	disable_int(info, NDCR_INT_MASK);
1819
1820	/*
1821	 * Directly set the chip select to a invalid value,
1822	 * then the driver would reset the timing according
1823	 * to current chip select at the beginning of cmdfunc
1824	 */
1825	info->cs = 0xff;
1826
1827	/*
1828	 * As the spec says, the NDSR would be updated to 0x1800 when
1829	 * doing the nand_clk disable/enable.
1830	 * To prevent it damaging state machine of the driver, clear
1831	 * all status before resume
1832	 */
1833	nand_writel(info, NDSR, NDSR_MASK);
1834	for (cs = 0; cs < pdata->num_cs; cs++) {
1835		mtd = info->host[cs]->mtd;
1836		mtd_resume(mtd);
1837	}
1838
1839	return 0;
1840}
1841#else
1842#define pxa3xx_nand_suspend	NULL
1843#define pxa3xx_nand_resume	NULL
1844#endif
1845
1846static struct platform_driver pxa3xx_nand_driver = {
1847	.driver = {
1848		.name	= "pxa3xx-nand",
1849		.of_match_table = pxa3xx_nand_dt_ids,
1850	},
1851	.probe		= pxa3xx_nand_probe,
1852	.remove		= pxa3xx_nand_remove,
1853	.suspend	= pxa3xx_nand_suspend,
1854	.resume		= pxa3xx_nand_resume,
1855};
1856
1857module_platform_driver(pxa3xx_nand_driver);
1858
1859MODULE_LICENSE("GPL");
1860MODULE_DESCRIPTION("PXA3xx NAND controller driver");
1861