pxa3xx_nand.c revision 4e86fd22af2c930e741536e8637bca9355fa8bb5
1/* 2 * drivers/mtd/nand/pxa3xx_nand.c 3 * 4 * Copyright © 2005 Intel Corporation 5 * Copyright © 2006 Marvell International Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details. 12 */ 13 14#include <linux/kernel.h> 15#include <linux/module.h> 16#include <linux/interrupt.h> 17#include <linux/platform_device.h> 18#include <linux/dma-mapping.h> 19#include <linux/delay.h> 20#include <linux/clk.h> 21#include <linux/mtd/mtd.h> 22#include <linux/mtd/nand.h> 23#include <linux/mtd/partitions.h> 24#include <linux/io.h> 25#include <linux/irq.h> 26#include <linux/slab.h> 27#include <linux/of.h> 28#include <linux/of_device.h> 29 30#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP) 31#define ARCH_HAS_DMA 32#endif 33 34#ifdef ARCH_HAS_DMA 35#include <mach/dma.h> 36#endif 37 38#include <linux/platform_data/mtd-nand-pxa3xx.h> 39 40#define CHIP_DELAY_TIMEOUT (2 * HZ/10) 41#define NAND_STOP_DELAY (2 * HZ/50) 42#define PAGE_CHUNK_SIZE (2048) 43 44/* 45 * Define a buffer size for the initial command that detects the flash device: 46 * STATUS, READID and PARAM. The largest of these is the PARAM command, 47 * needing 256 bytes. 48 */ 49#define INIT_BUFFER_SIZE 256 50 51/* registers and bit definitions */ 52#define NDCR (0x00) /* Control register */ 53#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */ 54#define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */ 55#define NDSR (0x14) /* Status Register */ 56#define NDPCR (0x18) /* Page Count Register */ 57#define NDBDR0 (0x1C) /* Bad Block Register 0 */ 58#define NDBDR1 (0x20) /* Bad Block Register 1 */ 59#define NDDB (0x40) /* Data Buffer */ 60#define NDCB0 (0x48) /* Command Buffer0 */ 61#define NDCB1 (0x4C) /* Command Buffer1 */ 62#define NDCB2 (0x50) /* Command Buffer2 */ 63 64#define NDCR_SPARE_EN (0x1 << 31) 65#define NDCR_ECC_EN (0x1 << 30) 66#define NDCR_DMA_EN (0x1 << 29) 67#define NDCR_ND_RUN (0x1 << 28) 68#define NDCR_DWIDTH_C (0x1 << 27) 69#define NDCR_DWIDTH_M (0x1 << 26) 70#define NDCR_PAGE_SZ (0x1 << 24) 71#define NDCR_NCSX (0x1 << 23) 72#define NDCR_ND_MODE (0x3 << 21) 73#define NDCR_NAND_MODE (0x0) 74#define NDCR_CLR_PG_CNT (0x1 << 20) 75#define NDCR_STOP_ON_UNCOR (0x1 << 19) 76#define NDCR_RD_ID_CNT_MASK (0x7 << 16) 77#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK) 78 79#define NDCR_RA_START (0x1 << 15) 80#define NDCR_PG_PER_BLK (0x1 << 14) 81#define NDCR_ND_ARB_EN (0x1 << 12) 82#define NDCR_INT_MASK (0xFFF) 83 84#define NDSR_MASK (0xfff) 85#define NDSR_RDY (0x1 << 12) 86#define NDSR_FLASH_RDY (0x1 << 11) 87#define NDSR_CS0_PAGED (0x1 << 10) 88#define NDSR_CS1_PAGED (0x1 << 9) 89#define NDSR_CS0_CMDD (0x1 << 8) 90#define NDSR_CS1_CMDD (0x1 << 7) 91#define NDSR_CS0_BBD (0x1 << 6) 92#define NDSR_CS1_BBD (0x1 << 5) 93#define NDSR_DBERR (0x1 << 4) 94#define NDSR_SBERR (0x1 << 3) 95#define NDSR_WRDREQ (0x1 << 2) 96#define NDSR_RDDREQ (0x1 << 1) 97#define NDSR_WRCMDREQ (0x1) 98 99#define NDCB0_LEN_OVRD (0x1 << 28) 100#define NDCB0_ST_ROW_EN (0x1 << 26) 101#define NDCB0_AUTO_RS (0x1 << 25) 102#define NDCB0_CSEL (0x1 << 24) 103#define NDCB0_CMD_TYPE_MASK (0x7 << 21) 104#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK) 105#define NDCB0_NC (0x1 << 20) 106#define NDCB0_DBC (0x1 << 19) 107#define NDCB0_ADDR_CYC_MASK (0x7 << 16) 108#define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK) 109#define NDCB0_CMD2_MASK (0xff << 8) 110#define NDCB0_CMD1_MASK (0xff) 111#define NDCB0_ADDR_CYC_SHIFT (16) 112 113/* macros for registers read/write */ 114#define nand_writel(info, off, val) \ 115 __raw_writel((val), (info)->mmio_base + (off)) 116 117#define nand_readl(info, off) \ 118 __raw_readl((info)->mmio_base + (off)) 119 120/* error code and state */ 121enum { 122 ERR_NONE = 0, 123 ERR_DMABUSERR = -1, 124 ERR_SENDCMD = -2, 125 ERR_DBERR = -3, 126 ERR_BBERR = -4, 127 ERR_SBERR = -5, 128}; 129 130enum { 131 STATE_IDLE = 0, 132 STATE_PREPARED, 133 STATE_CMD_HANDLE, 134 STATE_DMA_READING, 135 STATE_DMA_WRITING, 136 STATE_DMA_DONE, 137 STATE_PIO_READING, 138 STATE_PIO_WRITING, 139 STATE_CMD_DONE, 140 STATE_READY, 141}; 142 143enum pxa3xx_nand_variant { 144 PXA3XX_NAND_VARIANT_PXA, 145 PXA3XX_NAND_VARIANT_ARMADA370, 146}; 147 148struct pxa3xx_nand_host { 149 struct nand_chip chip; 150 struct mtd_info *mtd; 151 void *info_data; 152 153 /* page size of attached chip */ 154 unsigned int page_size; 155 int use_ecc; 156 int cs; 157 158 /* calculated from pxa3xx_nand_flash data */ 159 unsigned int col_addr_cycles; 160 unsigned int row_addr_cycles; 161 size_t read_id_bytes; 162 163}; 164 165struct pxa3xx_nand_info { 166 struct nand_hw_control controller; 167 struct platform_device *pdev; 168 169 struct clk *clk; 170 void __iomem *mmio_base; 171 unsigned long mmio_phys; 172 struct completion cmd_complete; 173 174 unsigned int buf_start; 175 unsigned int buf_count; 176 unsigned int buf_size; 177 178 /* DMA information */ 179 int drcmr_dat; 180 int drcmr_cmd; 181 182 unsigned char *data_buff; 183 unsigned char *oob_buff; 184 dma_addr_t data_buff_phys; 185 int data_dma_ch; 186 struct pxa_dma_desc *data_desc; 187 dma_addr_t data_desc_addr; 188 189 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT]; 190 unsigned int state; 191 192 /* 193 * This driver supports NFCv1 (as found in PXA SoC) 194 * and NFCv2 (as found in Armada 370/XP SoC). 195 */ 196 enum pxa3xx_nand_variant variant; 197 198 int cs; 199 int use_ecc; /* use HW ECC ? */ 200 int use_dma; /* use DMA ? */ 201 int use_spare; /* use spare ? */ 202 int is_ready; 203 204 unsigned int page_size; /* page size of attached chip */ 205 unsigned int data_size; /* data size in FIFO */ 206 unsigned int oob_size; 207 int retcode; 208 209 /* cached register value */ 210 uint32_t reg_ndcr; 211 uint32_t ndtr0cs0; 212 uint32_t ndtr1cs0; 213 214 /* generated NDCBx register values */ 215 uint32_t ndcb0; 216 uint32_t ndcb1; 217 uint32_t ndcb2; 218 uint32_t ndcb3; 219}; 220 221static bool use_dma = 1; 222module_param(use_dma, bool, 0444); 223MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW"); 224 225static struct pxa3xx_nand_timing timing[] = { 226 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, }, 227 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, }, 228 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, }, 229 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, }, 230}; 231 232static struct pxa3xx_nand_flash builtin_flash_types[] = { 233{ "DEFAULT FLASH", 0, 0, 2048, 8, 8, 0, &timing[0] }, 234{ "64MiB 16-bit", 0x46ec, 32, 512, 16, 16, 4096, &timing[1] }, 235{ "256MiB 8-bit", 0xdaec, 64, 2048, 8, 8, 2048, &timing[1] }, 236{ "4GiB 8-bit", 0xd7ec, 128, 4096, 8, 8, 8192, &timing[1] }, 237{ "128MiB 8-bit", 0xa12c, 64, 2048, 8, 8, 1024, &timing[2] }, 238{ "128MiB 16-bit", 0xb12c, 64, 2048, 16, 16, 1024, &timing[2] }, 239{ "512MiB 8-bit", 0xdc2c, 64, 2048, 8, 8, 4096, &timing[2] }, 240{ "512MiB 16-bit", 0xcc2c, 64, 2048, 16, 16, 4096, &timing[2] }, 241{ "256MiB 16-bit", 0xba20, 64, 2048, 16, 16, 2048, &timing[3] }, 242}; 243 244/* Define a default flash type setting serve as flash detecting only */ 245#define DEFAULT_FLASH_TYPE (&builtin_flash_types[0]) 246 247#define NDTR0_tCH(c) (min((c), 7) << 19) 248#define NDTR0_tCS(c) (min((c), 7) << 16) 249#define NDTR0_tWH(c) (min((c), 7) << 11) 250#define NDTR0_tWP(c) (min((c), 7) << 8) 251#define NDTR0_tRH(c) (min((c), 7) << 3) 252#define NDTR0_tRP(c) (min((c), 7) << 0) 253 254#define NDTR1_tR(c) (min((c), 65535) << 16) 255#define NDTR1_tWHR(c) (min((c), 15) << 4) 256#define NDTR1_tAR(c) (min((c), 15) << 0) 257 258/* convert nano-seconds to nand flash controller clock cycles */ 259#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000) 260 261static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host, 262 const struct pxa3xx_nand_timing *t) 263{ 264 struct pxa3xx_nand_info *info = host->info_data; 265 unsigned long nand_clk = clk_get_rate(info->clk); 266 uint32_t ndtr0, ndtr1; 267 268 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) | 269 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) | 270 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) | 271 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) | 272 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) | 273 NDTR0_tRP(ns2cycle(t->tRP, nand_clk)); 274 275 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) | 276 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) | 277 NDTR1_tAR(ns2cycle(t->tAR, nand_clk)); 278 279 info->ndtr0cs0 = ndtr0; 280 info->ndtr1cs0 = ndtr1; 281 nand_writel(info, NDTR0CS0, ndtr0); 282 nand_writel(info, NDTR1CS0, ndtr1); 283} 284 285static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info) 286{ 287 struct pxa3xx_nand_host *host = info->host[info->cs]; 288 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN; 289 290 info->data_size = host->page_size; 291 if (!oob_enable) { 292 info->oob_size = 0; 293 return; 294 } 295 296 switch (host->page_size) { 297 case 2048: 298 info->oob_size = (info->use_ecc) ? 40 : 64; 299 break; 300 case 512: 301 info->oob_size = (info->use_ecc) ? 8 : 16; 302 break; 303 } 304} 305 306/** 307 * NOTE: it is a must to set ND_RUN firstly, then write 308 * command buffer, otherwise, it does not work. 309 * We enable all the interrupt at the same time, and 310 * let pxa3xx_nand_irq to handle all logic. 311 */ 312static void pxa3xx_nand_start(struct pxa3xx_nand_info *info) 313{ 314 uint32_t ndcr; 315 316 ndcr = info->reg_ndcr; 317 318 if (info->use_ecc) 319 ndcr |= NDCR_ECC_EN; 320 else 321 ndcr &= ~NDCR_ECC_EN; 322 323 if (info->use_dma) 324 ndcr |= NDCR_DMA_EN; 325 else 326 ndcr &= ~NDCR_DMA_EN; 327 328 if (info->use_spare) 329 ndcr |= NDCR_SPARE_EN; 330 else 331 ndcr &= ~NDCR_SPARE_EN; 332 333 ndcr |= NDCR_ND_RUN; 334 335 /* clear status bits and run */ 336 nand_writel(info, NDCR, 0); 337 nand_writel(info, NDSR, NDSR_MASK); 338 nand_writel(info, NDCR, ndcr); 339} 340 341static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info) 342{ 343 uint32_t ndcr; 344 int timeout = NAND_STOP_DELAY; 345 346 /* wait RUN bit in NDCR become 0 */ 347 ndcr = nand_readl(info, NDCR); 348 while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) { 349 ndcr = nand_readl(info, NDCR); 350 udelay(1); 351 } 352 353 if (timeout <= 0) { 354 ndcr &= ~NDCR_ND_RUN; 355 nand_writel(info, NDCR, ndcr); 356 } 357 /* clear status bits */ 358 nand_writel(info, NDSR, NDSR_MASK); 359} 360 361static void __maybe_unused 362enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask) 363{ 364 uint32_t ndcr; 365 366 ndcr = nand_readl(info, NDCR); 367 nand_writel(info, NDCR, ndcr & ~int_mask); 368} 369 370static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask) 371{ 372 uint32_t ndcr; 373 374 ndcr = nand_readl(info, NDCR); 375 nand_writel(info, NDCR, ndcr | int_mask); 376} 377 378static void handle_data_pio(struct pxa3xx_nand_info *info) 379{ 380 switch (info->state) { 381 case STATE_PIO_WRITING: 382 __raw_writesl(info->mmio_base + NDDB, info->data_buff, 383 DIV_ROUND_UP(info->data_size, 4)); 384 if (info->oob_size > 0) 385 __raw_writesl(info->mmio_base + NDDB, info->oob_buff, 386 DIV_ROUND_UP(info->oob_size, 4)); 387 break; 388 case STATE_PIO_READING: 389 __raw_readsl(info->mmio_base + NDDB, info->data_buff, 390 DIV_ROUND_UP(info->data_size, 4)); 391 if (info->oob_size > 0) 392 __raw_readsl(info->mmio_base + NDDB, info->oob_buff, 393 DIV_ROUND_UP(info->oob_size, 4)); 394 break; 395 default: 396 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, 397 info->state); 398 BUG(); 399 } 400} 401 402#ifdef ARCH_HAS_DMA 403static void start_data_dma(struct pxa3xx_nand_info *info) 404{ 405 struct pxa_dma_desc *desc = info->data_desc; 406 int dma_len = ALIGN(info->data_size + info->oob_size, 32); 407 408 desc->ddadr = DDADR_STOP; 409 desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len; 410 411 switch (info->state) { 412 case STATE_DMA_WRITING: 413 desc->dsadr = info->data_buff_phys; 414 desc->dtadr = info->mmio_phys + NDDB; 415 desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG; 416 break; 417 case STATE_DMA_READING: 418 desc->dtadr = info->data_buff_phys; 419 desc->dsadr = info->mmio_phys + NDDB; 420 desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC; 421 break; 422 default: 423 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, 424 info->state); 425 BUG(); 426 } 427 428 DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch; 429 DDADR(info->data_dma_ch) = info->data_desc_addr; 430 DCSR(info->data_dma_ch) |= DCSR_RUN; 431} 432 433static void pxa3xx_nand_data_dma_irq(int channel, void *data) 434{ 435 struct pxa3xx_nand_info *info = data; 436 uint32_t dcsr; 437 438 dcsr = DCSR(channel); 439 DCSR(channel) = dcsr; 440 441 if (dcsr & DCSR_BUSERR) { 442 info->retcode = ERR_DMABUSERR; 443 } 444 445 info->state = STATE_DMA_DONE; 446 enable_int(info, NDCR_INT_MASK); 447 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ); 448} 449#else 450static void start_data_dma(struct pxa3xx_nand_info *info) 451{} 452#endif 453 454static irqreturn_t pxa3xx_nand_irq(int irq, void *devid) 455{ 456 struct pxa3xx_nand_info *info = devid; 457 unsigned int status, is_completed = 0; 458 unsigned int ready, cmd_done; 459 460 if (info->cs == 0) { 461 ready = NDSR_FLASH_RDY; 462 cmd_done = NDSR_CS0_CMDD; 463 } else { 464 ready = NDSR_RDY; 465 cmd_done = NDSR_CS1_CMDD; 466 } 467 468 status = nand_readl(info, NDSR); 469 470 if (status & NDSR_DBERR) 471 info->retcode = ERR_DBERR; 472 if (status & NDSR_SBERR) 473 info->retcode = ERR_SBERR; 474 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) { 475 /* whether use dma to transfer data */ 476 if (info->use_dma) { 477 disable_int(info, NDCR_INT_MASK); 478 info->state = (status & NDSR_RDDREQ) ? 479 STATE_DMA_READING : STATE_DMA_WRITING; 480 start_data_dma(info); 481 goto NORMAL_IRQ_EXIT; 482 } else { 483 info->state = (status & NDSR_RDDREQ) ? 484 STATE_PIO_READING : STATE_PIO_WRITING; 485 handle_data_pio(info); 486 } 487 } 488 if (status & cmd_done) { 489 info->state = STATE_CMD_DONE; 490 is_completed = 1; 491 } 492 if (status & ready) { 493 info->is_ready = 1; 494 info->state = STATE_READY; 495 } 496 497 if (status & NDSR_WRCMDREQ) { 498 nand_writel(info, NDSR, NDSR_WRCMDREQ); 499 status &= ~NDSR_WRCMDREQ; 500 info->state = STATE_CMD_HANDLE; 501 502 /* 503 * Command buffer registers NDCB{0-2} (and optionally NDCB3) 504 * must be loaded by writing directly either 12 or 16 505 * bytes directly to NDCB0, four bytes at a time. 506 * 507 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored 508 * but each NDCBx register can be read. 509 */ 510 nand_writel(info, NDCB0, info->ndcb0); 511 nand_writel(info, NDCB0, info->ndcb1); 512 nand_writel(info, NDCB0, info->ndcb2); 513 514 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */ 515 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) 516 nand_writel(info, NDCB0, info->ndcb3); 517 } 518 519 /* clear NDSR to let the controller exit the IRQ */ 520 nand_writel(info, NDSR, status); 521 if (is_completed) 522 complete(&info->cmd_complete); 523NORMAL_IRQ_EXIT: 524 return IRQ_HANDLED; 525} 526 527static inline int is_buf_blank(uint8_t *buf, size_t len) 528{ 529 for (; len > 0; len--) 530 if (*buf++ != 0xff) 531 return 0; 532 return 1; 533} 534 535static int prepare_command_pool(struct pxa3xx_nand_info *info, int command, 536 uint16_t column, int page_addr) 537{ 538 int addr_cycle, exec_cmd; 539 struct pxa3xx_nand_host *host; 540 struct mtd_info *mtd; 541 542 host = info->host[info->cs]; 543 mtd = host->mtd; 544 addr_cycle = 0; 545 exec_cmd = 1; 546 547 /* reset data and oob column point to handle data */ 548 info->buf_start = 0; 549 info->buf_count = 0; 550 info->oob_size = 0; 551 info->use_ecc = 0; 552 info->use_spare = 1; 553 info->is_ready = 0; 554 info->retcode = ERR_NONE; 555 if (info->cs != 0) 556 info->ndcb0 = NDCB0_CSEL; 557 else 558 info->ndcb0 = 0; 559 560 switch (command) { 561 case NAND_CMD_READ0: 562 case NAND_CMD_PAGEPROG: 563 info->use_ecc = 1; 564 case NAND_CMD_READOOB: 565 pxa3xx_set_datasize(info); 566 break; 567 case NAND_CMD_PARAM: 568 info->use_spare = 0; 569 break; 570 case NAND_CMD_SEQIN: 571 exec_cmd = 0; 572 break; 573 default: 574 info->ndcb1 = 0; 575 info->ndcb2 = 0; 576 info->ndcb3 = 0; 577 break; 578 } 579 580 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles 581 + host->col_addr_cycles); 582 583 switch (command) { 584 case NAND_CMD_READOOB: 585 case NAND_CMD_READ0: 586 info->buf_start = column; 587 info->ndcb0 |= NDCB0_CMD_TYPE(0) 588 | addr_cycle 589 | NAND_CMD_READ0; 590 591 if (command == NAND_CMD_READOOB) 592 info->buf_start += mtd->writesize; 593 594 /* Second command setting for large pages */ 595 if (host->page_size >= PAGE_CHUNK_SIZE) 596 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8); 597 598 case NAND_CMD_SEQIN: 599 /* small page addr setting */ 600 if (unlikely(host->page_size < PAGE_CHUNK_SIZE)) { 601 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8) 602 | (column & 0xFF); 603 604 info->ndcb2 = 0; 605 } else { 606 info->ndcb1 = ((page_addr & 0xFFFF) << 16) 607 | (column & 0xFFFF); 608 609 if (page_addr & 0xFF0000) 610 info->ndcb2 = (page_addr & 0xFF0000) >> 16; 611 else 612 info->ndcb2 = 0; 613 } 614 615 info->buf_count = mtd->writesize + mtd->oobsize; 616 memset(info->data_buff, 0xFF, info->buf_count); 617 618 break; 619 620 case NAND_CMD_PAGEPROG: 621 if (is_buf_blank(info->data_buff, 622 (mtd->writesize + mtd->oobsize))) { 623 exec_cmd = 0; 624 break; 625 } 626 627 info->ndcb0 |= NDCB0_CMD_TYPE(0x1) 628 | NDCB0_AUTO_RS 629 | NDCB0_ST_ROW_EN 630 | NDCB0_DBC 631 | (NAND_CMD_PAGEPROG << 8) 632 | NAND_CMD_SEQIN 633 | addr_cycle; 634 break; 635 636 case NAND_CMD_PARAM: 637 info->buf_count = 256; 638 info->ndcb0 |= NDCB0_CMD_TYPE(0) 639 | NDCB0_ADDR_CYC(1) 640 | NDCB0_LEN_OVRD 641 | command; 642 info->ndcb1 = (column & 0xFF); 643 info->ndcb3 = 256; 644 info->data_size = 256; 645 break; 646 647 case NAND_CMD_READID: 648 info->buf_count = host->read_id_bytes; 649 info->ndcb0 |= NDCB0_CMD_TYPE(3) 650 | NDCB0_ADDR_CYC(1) 651 | command; 652 info->ndcb1 = (column & 0xFF); 653 654 info->data_size = 8; 655 break; 656 case NAND_CMD_STATUS: 657 info->buf_count = 1; 658 info->ndcb0 |= NDCB0_CMD_TYPE(4) 659 | NDCB0_ADDR_CYC(1) 660 | command; 661 662 info->data_size = 8; 663 break; 664 665 case NAND_CMD_ERASE1: 666 info->ndcb0 |= NDCB0_CMD_TYPE(2) 667 | NDCB0_AUTO_RS 668 | NDCB0_ADDR_CYC(3) 669 | NDCB0_DBC 670 | (NAND_CMD_ERASE2 << 8) 671 | NAND_CMD_ERASE1; 672 info->ndcb1 = page_addr; 673 info->ndcb2 = 0; 674 675 break; 676 case NAND_CMD_RESET: 677 info->ndcb0 |= NDCB0_CMD_TYPE(5) 678 | command; 679 680 break; 681 682 case NAND_CMD_ERASE2: 683 exec_cmd = 0; 684 break; 685 686 default: 687 exec_cmd = 0; 688 dev_err(&info->pdev->dev, "non-supported command %x\n", 689 command); 690 break; 691 } 692 693 return exec_cmd; 694} 695 696static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command, 697 int column, int page_addr) 698{ 699 struct pxa3xx_nand_host *host = mtd->priv; 700 struct pxa3xx_nand_info *info = host->info_data; 701 int ret, exec_cmd; 702 703 /* 704 * if this is a x16 device ,then convert the input 705 * "byte" address into a "word" address appropriate 706 * for indexing a word-oriented device 707 */ 708 if (info->reg_ndcr & NDCR_DWIDTH_M) 709 column /= 2; 710 711 /* 712 * There may be different NAND chip hooked to 713 * different chip select, so check whether 714 * chip select has been changed, if yes, reset the timing 715 */ 716 if (info->cs != host->cs) { 717 info->cs = host->cs; 718 nand_writel(info, NDTR0CS0, info->ndtr0cs0); 719 nand_writel(info, NDTR1CS0, info->ndtr1cs0); 720 } 721 722 info->state = STATE_PREPARED; 723 exec_cmd = prepare_command_pool(info, command, column, page_addr); 724 if (exec_cmd) { 725 init_completion(&info->cmd_complete); 726 pxa3xx_nand_start(info); 727 728 ret = wait_for_completion_timeout(&info->cmd_complete, 729 CHIP_DELAY_TIMEOUT); 730 if (!ret) { 731 dev_err(&info->pdev->dev, "Wait time out!!!\n"); 732 /* Stop State Machine for next command cycle */ 733 pxa3xx_nand_stop(info); 734 } 735 } 736 info->state = STATE_IDLE; 737} 738 739static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd, 740 struct nand_chip *chip, const uint8_t *buf, int oob_required) 741{ 742 chip->write_buf(mtd, buf, mtd->writesize); 743 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); 744 745 return 0; 746} 747 748static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd, 749 struct nand_chip *chip, uint8_t *buf, int oob_required, 750 int page) 751{ 752 struct pxa3xx_nand_host *host = mtd->priv; 753 struct pxa3xx_nand_info *info = host->info_data; 754 int max_bitflips = 0; 755 756 chip->read_buf(mtd, buf, mtd->writesize); 757 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 758 759 if (info->retcode == ERR_SBERR) { 760 switch (info->use_ecc) { 761 case 1: 762 max_bitflips = 1; 763 mtd->ecc_stats.corrected++; 764 break; 765 case 0: 766 default: 767 break; 768 } 769 } else if (info->retcode == ERR_DBERR) { 770 /* 771 * for blank page (all 0xff), HW will calculate its ECC as 772 * 0, which is different from the ECC information within 773 * OOB, ignore such double bit errors 774 */ 775 if (is_buf_blank(buf, mtd->writesize)) 776 info->retcode = ERR_NONE; 777 else 778 mtd->ecc_stats.failed++; 779 } 780 781 return max_bitflips; 782} 783 784static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd) 785{ 786 struct pxa3xx_nand_host *host = mtd->priv; 787 struct pxa3xx_nand_info *info = host->info_data; 788 char retval = 0xFF; 789 790 if (info->buf_start < info->buf_count) 791 /* Has just send a new command? */ 792 retval = info->data_buff[info->buf_start++]; 793 794 return retval; 795} 796 797static u16 pxa3xx_nand_read_word(struct mtd_info *mtd) 798{ 799 struct pxa3xx_nand_host *host = mtd->priv; 800 struct pxa3xx_nand_info *info = host->info_data; 801 u16 retval = 0xFFFF; 802 803 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) { 804 retval = *((u16 *)(info->data_buff+info->buf_start)); 805 info->buf_start += 2; 806 } 807 return retval; 808} 809 810static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) 811{ 812 struct pxa3xx_nand_host *host = mtd->priv; 813 struct pxa3xx_nand_info *info = host->info_data; 814 int real_len = min_t(size_t, len, info->buf_count - info->buf_start); 815 816 memcpy(buf, info->data_buff + info->buf_start, real_len); 817 info->buf_start += real_len; 818} 819 820static void pxa3xx_nand_write_buf(struct mtd_info *mtd, 821 const uint8_t *buf, int len) 822{ 823 struct pxa3xx_nand_host *host = mtd->priv; 824 struct pxa3xx_nand_info *info = host->info_data; 825 int real_len = min_t(size_t, len, info->buf_count - info->buf_start); 826 827 memcpy(info->data_buff + info->buf_start, buf, real_len); 828 info->buf_start += real_len; 829} 830 831static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip) 832{ 833 return; 834} 835 836static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this) 837{ 838 struct pxa3xx_nand_host *host = mtd->priv; 839 struct pxa3xx_nand_info *info = host->info_data; 840 841 /* pxa3xx_nand_send_command has waited for command complete */ 842 if (this->state == FL_WRITING || this->state == FL_ERASING) { 843 if (info->retcode == ERR_NONE) 844 return 0; 845 else { 846 /* 847 * any error make it return 0x01 which will tell 848 * the caller the erase and write fail 849 */ 850 return 0x01; 851 } 852 } 853 854 return 0; 855} 856 857static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info, 858 const struct pxa3xx_nand_flash *f) 859{ 860 struct platform_device *pdev = info->pdev; 861 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev); 862 struct pxa3xx_nand_host *host = info->host[info->cs]; 863 uint32_t ndcr = 0x0; /* enable all interrupts */ 864 865 if (f->page_size != 2048 && f->page_size != 512) { 866 dev_err(&pdev->dev, "Current only support 2048 and 512 size\n"); 867 return -EINVAL; 868 } 869 870 if (f->flash_width != 16 && f->flash_width != 8) { 871 dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n"); 872 return -EINVAL; 873 } 874 875 /* calculate flash information */ 876 host->page_size = f->page_size; 877 host->read_id_bytes = (f->page_size == 2048) ? 4 : 2; 878 879 /* calculate addressing information */ 880 host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1; 881 882 if (f->num_blocks * f->page_per_block > 65536) 883 host->row_addr_cycles = 3; 884 else 885 host->row_addr_cycles = 2; 886 887 ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0; 888 ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0; 889 ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0; 890 ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0; 891 ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0; 892 ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0; 893 894 ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes); 895 ndcr |= NDCR_SPARE_EN; /* enable spare by default */ 896 897 info->reg_ndcr = ndcr; 898 899 pxa3xx_nand_set_timing(host, f->timing); 900 return 0; 901} 902 903static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info) 904{ 905 /* 906 * We set 0 by hard coding here, for we don't support keep_config 907 * when there is more than one chip attached to the controller 908 */ 909 struct pxa3xx_nand_host *host = info->host[0]; 910 uint32_t ndcr = nand_readl(info, NDCR); 911 912 if (ndcr & NDCR_PAGE_SZ) { 913 host->page_size = 2048; 914 host->read_id_bytes = 4; 915 } else { 916 host->page_size = 512; 917 host->read_id_bytes = 2; 918 } 919 920 info->reg_ndcr = ndcr & ~NDCR_INT_MASK; 921 info->ndtr0cs0 = nand_readl(info, NDTR0CS0); 922 info->ndtr1cs0 = nand_readl(info, NDTR1CS0); 923 return 0; 924} 925 926#ifdef ARCH_HAS_DMA 927static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info) 928{ 929 struct platform_device *pdev = info->pdev; 930 int data_desc_offset = info->buf_size - sizeof(struct pxa_dma_desc); 931 932 if (use_dma == 0) { 933 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL); 934 if (info->data_buff == NULL) 935 return -ENOMEM; 936 return 0; 937 } 938 939 info->data_buff = dma_alloc_coherent(&pdev->dev, info->buf_size, 940 &info->data_buff_phys, GFP_KERNEL); 941 if (info->data_buff == NULL) { 942 dev_err(&pdev->dev, "failed to allocate dma buffer\n"); 943 return -ENOMEM; 944 } 945 946 info->data_desc = (void *)info->data_buff + data_desc_offset; 947 info->data_desc_addr = info->data_buff_phys + data_desc_offset; 948 949 info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW, 950 pxa3xx_nand_data_dma_irq, info); 951 if (info->data_dma_ch < 0) { 952 dev_err(&pdev->dev, "failed to request data dma\n"); 953 dma_free_coherent(&pdev->dev, info->buf_size, 954 info->data_buff, info->data_buff_phys); 955 return info->data_dma_ch; 956 } 957 958 /* 959 * Now that DMA buffers are allocated we turn on 960 * DMA proper for I/O operations. 961 */ 962 info->use_dma = 1; 963 return 0; 964} 965 966static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info) 967{ 968 struct platform_device *pdev = info->pdev; 969 if (info->use_dma) { 970 pxa_free_dma(info->data_dma_ch); 971 dma_free_coherent(&pdev->dev, info->buf_size, 972 info->data_buff, info->data_buff_phys); 973 } else { 974 kfree(info->data_buff); 975 } 976} 977#else 978static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info) 979{ 980 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL); 981 if (info->data_buff == NULL) 982 return -ENOMEM; 983 return 0; 984} 985 986static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info) 987{ 988 kfree(info->data_buff); 989} 990#endif 991 992static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info) 993{ 994 struct mtd_info *mtd; 995 int ret; 996 mtd = info->host[info->cs]->mtd; 997 /* use the common timing to make a try */ 998 ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]); 999 if (ret) 1000 return ret; 1001 1002 pxa3xx_nand_cmdfunc(mtd, NAND_CMD_RESET, 0, 0); 1003 if (info->is_ready) 1004 return 0; 1005 1006 return -ENODEV; 1007} 1008 1009static int pxa3xx_nand_scan(struct mtd_info *mtd) 1010{ 1011 struct pxa3xx_nand_host *host = mtd->priv; 1012 struct pxa3xx_nand_info *info = host->info_data; 1013 struct platform_device *pdev = info->pdev; 1014 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev); 1015 struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL; 1016 const struct pxa3xx_nand_flash *f = NULL; 1017 struct nand_chip *chip = mtd->priv; 1018 uint32_t id = -1; 1019 uint64_t chipsize; 1020 int i, ret, num; 1021 1022 if (pdata->keep_config && !pxa3xx_nand_detect_config(info)) 1023 goto KEEP_CONFIG; 1024 1025 ret = pxa3xx_nand_sensing(info); 1026 if (ret) { 1027 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n", 1028 info->cs); 1029 1030 return ret; 1031 } 1032 1033 chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0); 1034 id = *((uint16_t *)(info->data_buff)); 1035 if (id != 0) 1036 dev_info(&info->pdev->dev, "Detect a flash id %x\n", id); 1037 else { 1038 dev_warn(&info->pdev->dev, 1039 "Read out ID 0, potential timing set wrong!!\n"); 1040 1041 return -EINVAL; 1042 } 1043 1044 num = ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1; 1045 for (i = 0; i < num; i++) { 1046 if (i < pdata->num_flash) 1047 f = pdata->flash + i; 1048 else 1049 f = &builtin_flash_types[i - pdata->num_flash + 1]; 1050 1051 /* find the chip in default list */ 1052 if (f->chip_id == id) 1053 break; 1054 } 1055 1056 if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) { 1057 dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n"); 1058 1059 return -EINVAL; 1060 } 1061 1062 ret = pxa3xx_nand_config_flash(info, f); 1063 if (ret) { 1064 dev_err(&info->pdev->dev, "ERROR! Configure failed\n"); 1065 return ret; 1066 } 1067 1068 pxa3xx_flash_ids[0].name = f->name; 1069 pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff; 1070 pxa3xx_flash_ids[0].pagesize = f->page_size; 1071 chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size; 1072 pxa3xx_flash_ids[0].chipsize = chipsize >> 20; 1073 pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block; 1074 if (f->flash_width == 16) 1075 pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16; 1076 pxa3xx_flash_ids[1].name = NULL; 1077 def = pxa3xx_flash_ids; 1078KEEP_CONFIG: 1079 chip->ecc.mode = NAND_ECC_HW; 1080 chip->ecc.size = host->page_size; 1081 chip->ecc.strength = 1; 1082 1083 if (info->reg_ndcr & NDCR_DWIDTH_M) 1084 chip->options |= NAND_BUSWIDTH_16; 1085 1086 if (nand_scan_ident(mtd, 1, def)) 1087 return -ENODEV; 1088 /* calculate addressing information */ 1089 if (mtd->writesize >= 2048) 1090 host->col_addr_cycles = 2; 1091 else 1092 host->col_addr_cycles = 1; 1093 1094 /* release the initial buffer */ 1095 kfree(info->data_buff); 1096 1097 /* allocate the real data + oob buffer */ 1098 info->buf_size = mtd->writesize + mtd->oobsize; 1099 ret = pxa3xx_nand_init_buff(info); 1100 if (ret) 1101 return ret; 1102 info->oob_buff = info->data_buff + mtd->writesize; 1103 1104 if ((mtd->size >> chip->page_shift) > 65536) 1105 host->row_addr_cycles = 3; 1106 else 1107 host->row_addr_cycles = 2; 1108 return nand_scan_tail(mtd); 1109} 1110 1111static int alloc_nand_resource(struct platform_device *pdev) 1112{ 1113 struct pxa3xx_nand_platform_data *pdata; 1114 struct pxa3xx_nand_info *info; 1115 struct pxa3xx_nand_host *host; 1116 struct nand_chip *chip = NULL; 1117 struct mtd_info *mtd; 1118 struct resource *r; 1119 int ret, irq, cs; 1120 1121 pdata = dev_get_platdata(&pdev->dev); 1122 info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) + 1123 sizeof(*host)) * pdata->num_cs, GFP_KERNEL); 1124 if (!info) 1125 return -ENOMEM; 1126 1127 info->pdev = pdev; 1128 for (cs = 0; cs < pdata->num_cs; cs++) { 1129 mtd = (struct mtd_info *)((unsigned int)&info[1] + 1130 (sizeof(*mtd) + sizeof(*host)) * cs); 1131 chip = (struct nand_chip *)(&mtd[1]); 1132 host = (struct pxa3xx_nand_host *)chip; 1133 info->host[cs] = host; 1134 host->mtd = mtd; 1135 host->cs = cs; 1136 host->info_data = info; 1137 mtd->priv = host; 1138 mtd->owner = THIS_MODULE; 1139 1140 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc; 1141 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc; 1142 chip->controller = &info->controller; 1143 chip->waitfunc = pxa3xx_nand_waitfunc; 1144 chip->select_chip = pxa3xx_nand_select_chip; 1145 chip->cmdfunc = pxa3xx_nand_cmdfunc; 1146 chip->read_word = pxa3xx_nand_read_word; 1147 chip->read_byte = pxa3xx_nand_read_byte; 1148 chip->read_buf = pxa3xx_nand_read_buf; 1149 chip->write_buf = pxa3xx_nand_write_buf; 1150 chip->options |= NAND_NO_SUBPAGE_WRITE; 1151 } 1152 1153 spin_lock_init(&chip->controller->lock); 1154 init_waitqueue_head(&chip->controller->wq); 1155 info->clk = devm_clk_get(&pdev->dev, NULL); 1156 if (IS_ERR(info->clk)) { 1157 dev_err(&pdev->dev, "failed to get nand clock\n"); 1158 return PTR_ERR(info->clk); 1159 } 1160 ret = clk_prepare_enable(info->clk); 1161 if (ret < 0) 1162 return ret; 1163 1164 if (use_dma) { 1165 /* 1166 * This is a dirty hack to make this driver work from 1167 * devicetree bindings. It can be removed once we have 1168 * a prober DMA controller framework for DT. 1169 */ 1170 if (pdev->dev.of_node && 1171 of_machine_is_compatible("marvell,pxa3xx")) { 1172 info->drcmr_dat = 97; 1173 info->drcmr_cmd = 99; 1174 } else { 1175 r = platform_get_resource(pdev, IORESOURCE_DMA, 0); 1176 if (r == NULL) { 1177 dev_err(&pdev->dev, 1178 "no resource defined for data DMA\n"); 1179 ret = -ENXIO; 1180 goto fail_disable_clk; 1181 } 1182 info->drcmr_dat = r->start; 1183 1184 r = platform_get_resource(pdev, IORESOURCE_DMA, 1); 1185 if (r == NULL) { 1186 dev_err(&pdev->dev, 1187 "no resource defined for cmd DMA\n"); 1188 ret = -ENXIO; 1189 goto fail_disable_clk; 1190 } 1191 info->drcmr_cmd = r->start; 1192 } 1193 } 1194 1195 irq = platform_get_irq(pdev, 0); 1196 if (irq < 0) { 1197 dev_err(&pdev->dev, "no IRQ resource defined\n"); 1198 ret = -ENXIO; 1199 goto fail_disable_clk; 1200 } 1201 1202 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1203 info->mmio_base = devm_ioremap_resource(&pdev->dev, r); 1204 if (IS_ERR(info->mmio_base)) { 1205 ret = PTR_ERR(info->mmio_base); 1206 goto fail_disable_clk; 1207 } 1208 info->mmio_phys = r->start; 1209 1210 /* Allocate a buffer to allow flash detection */ 1211 info->buf_size = INIT_BUFFER_SIZE; 1212 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL); 1213 if (info->data_buff == NULL) { 1214 ret = -ENOMEM; 1215 goto fail_disable_clk; 1216 } 1217 1218 /* initialize all interrupts to be disabled */ 1219 disable_int(info, NDSR_MASK); 1220 1221 ret = request_irq(irq, pxa3xx_nand_irq, 0, pdev->name, info); 1222 if (ret < 0) { 1223 dev_err(&pdev->dev, "failed to request IRQ\n"); 1224 goto fail_free_buf; 1225 } 1226 1227 platform_set_drvdata(pdev, info); 1228 1229 return 0; 1230 1231fail_free_buf: 1232 free_irq(irq, info); 1233 kfree(info->data_buff); 1234fail_disable_clk: 1235 clk_disable_unprepare(info->clk); 1236 return ret; 1237} 1238 1239static int pxa3xx_nand_remove(struct platform_device *pdev) 1240{ 1241 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); 1242 struct pxa3xx_nand_platform_data *pdata; 1243 int irq, cs; 1244 1245 if (!info) 1246 return 0; 1247 1248 pdata = dev_get_platdata(&pdev->dev); 1249 1250 irq = platform_get_irq(pdev, 0); 1251 if (irq >= 0) 1252 free_irq(irq, info); 1253 pxa3xx_nand_free_buff(info); 1254 1255 clk_disable_unprepare(info->clk); 1256 1257 for (cs = 0; cs < pdata->num_cs; cs++) 1258 nand_release(info->host[cs]->mtd); 1259 return 0; 1260} 1261 1262static struct of_device_id pxa3xx_nand_dt_ids[] = { 1263 { 1264 .compatible = "marvell,pxa3xx-nand", 1265 .data = (void *)PXA3XX_NAND_VARIANT_PXA, 1266 }, 1267 {} 1268}; 1269MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids); 1270 1271static enum pxa3xx_nand_variant 1272pxa3xx_nand_get_variant(struct platform_device *pdev) 1273{ 1274 const struct of_device_id *of_id = 1275 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev); 1276 if (!of_id) 1277 return PXA3XX_NAND_VARIANT_PXA; 1278 return (enum pxa3xx_nand_variant)of_id->data; 1279} 1280 1281static int pxa3xx_nand_probe_dt(struct platform_device *pdev) 1282{ 1283 struct pxa3xx_nand_platform_data *pdata; 1284 struct device_node *np = pdev->dev.of_node; 1285 const struct of_device_id *of_id = 1286 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev); 1287 1288 if (!of_id) 1289 return 0; 1290 1291 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 1292 if (!pdata) 1293 return -ENOMEM; 1294 1295 if (of_get_property(np, "marvell,nand-enable-arbiter", NULL)) 1296 pdata->enable_arbiter = 1; 1297 if (of_get_property(np, "marvell,nand-keep-config", NULL)) 1298 pdata->keep_config = 1; 1299 of_property_read_u32(np, "num-cs", &pdata->num_cs); 1300 1301 pdev->dev.platform_data = pdata; 1302 1303 return 0; 1304} 1305 1306static int pxa3xx_nand_probe(struct platform_device *pdev) 1307{ 1308 struct pxa3xx_nand_platform_data *pdata; 1309 struct mtd_part_parser_data ppdata = {}; 1310 struct pxa3xx_nand_info *info; 1311 int ret, cs, probe_success; 1312 1313#ifndef ARCH_HAS_DMA 1314 if (use_dma) { 1315 use_dma = 0; 1316 dev_warn(&pdev->dev, 1317 "This platform can't do DMA on this device\n"); 1318 } 1319#endif 1320 ret = pxa3xx_nand_probe_dt(pdev); 1321 if (ret) 1322 return ret; 1323 1324 pdata = dev_get_platdata(&pdev->dev); 1325 if (!pdata) { 1326 dev_err(&pdev->dev, "no platform data defined\n"); 1327 return -ENODEV; 1328 } 1329 1330 ret = alloc_nand_resource(pdev); 1331 if (ret) { 1332 dev_err(&pdev->dev, "alloc nand resource failed\n"); 1333 return ret; 1334 } 1335 1336 info = platform_get_drvdata(pdev); 1337 info->variant = pxa3xx_nand_get_variant(pdev); 1338 probe_success = 0; 1339 for (cs = 0; cs < pdata->num_cs; cs++) { 1340 struct mtd_info *mtd = info->host[cs]->mtd; 1341 1342 /* 1343 * The mtd name matches the one used in 'mtdparts' kernel 1344 * parameter. This name cannot be changed or otherwise 1345 * user's mtd partitions configuration would get broken. 1346 */ 1347 mtd->name = "pxa3xx_nand-0"; 1348 info->cs = cs; 1349 ret = pxa3xx_nand_scan(mtd); 1350 if (ret) { 1351 dev_warn(&pdev->dev, "failed to scan nand at cs %d\n", 1352 cs); 1353 continue; 1354 } 1355 1356 ppdata.of_node = pdev->dev.of_node; 1357 ret = mtd_device_parse_register(mtd, NULL, 1358 &ppdata, pdata->parts[cs], 1359 pdata->nr_parts[cs]); 1360 if (!ret) 1361 probe_success = 1; 1362 } 1363 1364 if (!probe_success) { 1365 pxa3xx_nand_remove(pdev); 1366 return -ENODEV; 1367 } 1368 1369 return 0; 1370} 1371 1372#ifdef CONFIG_PM 1373static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state) 1374{ 1375 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); 1376 struct pxa3xx_nand_platform_data *pdata; 1377 struct mtd_info *mtd; 1378 int cs; 1379 1380 pdata = dev_get_platdata(&pdev->dev); 1381 if (info->state) { 1382 dev_err(&pdev->dev, "driver busy, state = %d\n", info->state); 1383 return -EAGAIN; 1384 } 1385 1386 for (cs = 0; cs < pdata->num_cs; cs++) { 1387 mtd = info->host[cs]->mtd; 1388 mtd_suspend(mtd); 1389 } 1390 1391 return 0; 1392} 1393 1394static int pxa3xx_nand_resume(struct platform_device *pdev) 1395{ 1396 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); 1397 struct pxa3xx_nand_platform_data *pdata; 1398 struct mtd_info *mtd; 1399 int cs; 1400 1401 pdata = dev_get_platdata(&pdev->dev); 1402 /* We don't want to handle interrupt without calling mtd routine */ 1403 disable_int(info, NDCR_INT_MASK); 1404 1405 /* 1406 * Directly set the chip select to a invalid value, 1407 * then the driver would reset the timing according 1408 * to current chip select at the beginning of cmdfunc 1409 */ 1410 info->cs = 0xff; 1411 1412 /* 1413 * As the spec says, the NDSR would be updated to 0x1800 when 1414 * doing the nand_clk disable/enable. 1415 * To prevent it damaging state machine of the driver, clear 1416 * all status before resume 1417 */ 1418 nand_writel(info, NDSR, NDSR_MASK); 1419 for (cs = 0; cs < pdata->num_cs; cs++) { 1420 mtd = info->host[cs]->mtd; 1421 mtd_resume(mtd); 1422 } 1423 1424 return 0; 1425} 1426#else 1427#define pxa3xx_nand_suspend NULL 1428#define pxa3xx_nand_resume NULL 1429#endif 1430 1431static struct platform_driver pxa3xx_nand_driver = { 1432 .driver = { 1433 .name = "pxa3xx-nand", 1434 .of_match_table = pxa3xx_nand_dt_ids, 1435 }, 1436 .probe = pxa3xx_nand_probe, 1437 .remove = pxa3xx_nand_remove, 1438 .suspend = pxa3xx_nand_suspend, 1439 .resume = pxa3xx_nand_resume, 1440}; 1441 1442module_platform_driver(pxa3xx_nand_driver); 1443 1444MODULE_LICENSE("GPL"); 1445MODULE_DESCRIPTION("PXA3xx NAND controller driver"); 1446