1/*
2 * YAFFS: Yet another FFS. A NAND-flash specific file system.
3 *
4 * Copyright (C) 2002 Aleph One Ltd.
5 *   for Toby Churchill Ltd and Brightstar Engineering
6 *
7 * Created by Charles Manning <charles@aleph1.co.uk>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 *
14 *  This version hacked for emulating 2kpage NAND for YAFFS2 testing.
15 */
16
17#include <linux/config.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/version.h>
21#include <linux/slab.h>
22#include <linux/init.h>
23#include <linux/list.h>
24#include <linux/fs.h>
25#include <linux/proc_fs.h>
26#include <linux/pagemap.h>
27#include <linux/mtd/mtd.h>
28#include <linux/interrupt.h>
29#include <linux/string.h>
30#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
31#include <linux/locks.h>
32#endif
33
34#include <asm/uaccess.h>
35#include <linux/mtd/mtd.h>
36#include <linux/mtd/partitions.h>
37#include <linux/mtd/nand.h>
38#include "../yaffs_nandemul2k.h"
39
40#define ALLOCATE(x) kmalloc(x,GFP_KERNEL)
41#define FREE(x)     kfree(x)
42
43
44
45
46
47#define NAND_SHIFT      (11)   // Shifter for 2k
48#define PAGE_DATA_SIZE  (1 << NAND_SHIFT)
49#define PAGE_SPARE_SIZE (64)
50#define BLK_SHIFT	6
51#define PAGES_PER_BLOCK (1 << BLK_SHIFT)	// = 64
52
53
54#define EM_SIZE_IN_MEG 4
55#define EM_SIZE_IN_BYTES (EM_SIZE_IN_MEG * (1<<20))
56
57#define PAGE_TOTAL_SIZE (PAGE_DATA_SIZE+PAGE_SPARE_SIZE)
58
59#define BLOCK_TOTAL_SIZE (PAGES_PER_BLOCK * PAGE_TOTAL_SIZE)
60
61#define BLOCKS_PER_MEG ((1<<20)/(PAGES_PER_BLOCK * PAGE_DATA_SIZE))
62
63
64static struct mtd_info nandemul2k_mtd;
65
66typedef struct
67{
68	__u8 data[PAGE_TOTAL_SIZE]; // Data + spare
69	int empty;      // is this empty?
70} nandemul_Page;
71
72
73typedef struct
74{
75	nandemul_Page *page[PAGES_PER_BLOCK];
76	int damaged;
77} nandemul_Block;
78
79
80
81typedef struct
82{
83	nandemul_Block**block;
84	int nBlocks;
85} nandemul_Device;
86
87static nandemul_Device ned;
88
89static int sizeInMB = EM_SIZE_IN_MEG;
90
91
92static void nandemul_yield(int n)
93{
94#ifdef __KERNEL__
95	if(n > 0) schedule_timeout(n);
96#endif
97
98}
99
100
101static void nandemul2k_Read(void *buffer, int page, int start, int nBytes)
102{
103	int pg = page%PAGES_PER_BLOCK;
104	int blk = page/PAGES_PER_BLOCK;
105	if(buffer && nBytes > 0)
106	{
107		memcpy(buffer,&ned.block[blk]->page[pg]->data[start],nBytes);
108	}
109
110}
111
112static void nandemul2k_Program(const void *buffer, int page, int start, int nBytes)
113{
114	int pg = page%PAGES_PER_BLOCK;
115	int blk = page/PAGES_PER_BLOCK;
116	__u8 *p;
117	__u8 *b = (__u8 *)buffer;
118
119	p = &ned.block[blk]->page[pg]->data[start];
120
121	while(buffer && nBytes>0)
122	{
123		*p = *p & *b;
124		p++;
125		b++;
126		nBytes--;
127	}
128}
129
130static void nandemul2k_DoErase(int blockNumber)
131{
132	int i;
133
134	nandemul_Block *blk;
135
136	if(blockNumber < 0 || blockNumber >= ned.nBlocks)
137	{
138		return;
139	}
140
141	blk = ned.block[blockNumber];
142
143	for(i = 0; i < PAGES_PER_BLOCK; i++)
144	{
145		memset(blk->page[i],0xff,sizeof(nandemul_Page));
146		blk->page[i]->empty = 1;
147	}
148	nandemul_yield(2);
149}
150
151
152static int nandemul2k_CalcNBlocks(void)
153{
154	return EM_SIZE_IN_MEG * BLOCKS_PER_MEG;
155}
156
157
158
159static int  CheckInit(void)
160{
161	static int initialised = 0;
162
163	int i,j;
164
165	int fail = 0;
166	int nBlocks;
167
168	int nAllocated = 0;
169
170	if(initialised)
171	{
172		return 0;
173	}
174
175
176	ned.nBlocks = nBlocks = nandemul2k_CalcNBlocks();
177
178
179	ned.block = ALLOCATE(sizeof(nandemul_Block*) * nBlocks );
180
181	if(!ned.block) return ENOMEM;
182
183
184
185
186
187	for(i=fail=0; i <nBlocks; i++)
188	{
189
190		nandemul_Block *blk;
191
192		if(!(blk = ned.block[i] = ALLOCATE(sizeof(nandemul_Block))))
193		{
194		 fail = 1;
195		}
196		else
197		{
198			for(j = 0; j < PAGES_PER_BLOCK; j++)
199			{
200				if((blk->page[j] = ALLOCATE(sizeof(nandemul_Page))) == 0)
201				{
202					fail = 1;
203				}
204			}
205			nandemul2k_DoErase(i);
206			ned.block[i]->damaged = 0;
207			nAllocated++;
208		}
209	}
210
211	if(fail)
212	{
213		//Todo thump pages
214
215		for(i = 0; i < nAllocated; i++)
216		{
217			FREE(ned.block[i]);
218		}
219		FREE(ned.block);
220
221		return ENOMEM;
222	}
223
224	ned.nBlocks = nBlocks;
225
226	initialised = 1;
227
228	return 1;
229}
230
231
232
233static void nandemul2k_CleanUp(void)
234{
235	int i,j;
236
237	for(i = 0; i < ned.nBlocks; i++)
238	{
239		for(j = 0; j < PAGES_PER_BLOCK; j++)
240		{
241		   FREE(ned.block[i]->page[j]);
242		}
243		FREE(ned.block[i]);
244
245	}
246	FREE(ned.block);
247	ned.block = 0;
248}
249
250int nandemul2k_GetBytesPerChunk(void) { return PAGE_DATA_SIZE;}
251
252int nandemul2k_GetChunksPerBlock(void) { return PAGES_PER_BLOCK; }
253int nandemul2k_GetNumberOfBlocks(void) {return nandemul2k_CalcNBlocks();}
254
255
256
257static int nandemul2k_ReadId(__u8 *vendorId, __u8 *deviceId)
258{
259	*vendorId = 'Y';
260	*deviceId = '2';
261
262	return 1;
263}
264
265
266static int nandemul2k_ReadStatus(__u8 *status)
267{
268		*status = 0;
269		return 1;
270}
271
272
273#ifdef CONFIG_MTD_NAND_ECC
274#include <linux/mtd/nand_ecc.h>
275#endif
276
277/*
278 * NAND low-level MTD interface functions
279 */
280static int nand_read (struct mtd_info *mtd, loff_t from, size_t len,
281			size_t *retlen, u_char *buf);
282static int nand_read_ecc (struct mtd_info *mtd, loff_t from, size_t len,
283				size_t *retlen, u_char *buf, u_char *oob_buf, struct nand_oobinfo *dummy);
284static int nand_read_oob (struct mtd_info *mtd, loff_t from, size_t len,
285				size_t *retlen, u_char *buf);
286static int nand_write (struct mtd_info *mtd, loff_t to, size_t len,
287			size_t *retlen, const u_char *buf);
288static int nand_write_ecc (struct mtd_info *mtd, loff_t to, size_t len,
289				size_t *retlen, const u_char *buf,
290				u_char *oob_buf, struct nand_oobinfo *dummy);
291static int nand_write_oob (struct mtd_info *mtd, loff_t to, size_t len,
292				size_t *retlen, const u_char *buf);
293#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,7))
294static int nand_writev (struct mtd_info *mtd, const struct kvec *vecs,
295				unsigned long count, loff_t to, size_t *retlen);
296#else
297static int nand_writev (struct mtd_info *mtd, const struct iovec *vecs,
298				unsigned long count, loff_t to, size_t *retlen);
299#endif
300static int nand_erase (struct mtd_info *mtd, struct erase_info *instr);
301static void nand_sync (struct mtd_info *mtd);
302
303
304
305/*
306 * NAND read
307 */
308static int nand_read (struct mtd_info *mtd, loff_t from, size_t len,
309			size_t *retlen, u_char *buf)
310{
311	return nand_read_ecc (mtd, from, len, retlen, buf, NULL,NULL);
312}
313
314
315/*
316 * NAND read with ECC
317 */
318static int nand_read_ecc (struct mtd_info *mtd, loff_t from, size_t len,
319				size_t *retlen, u_char *buf, u_char *oob_buf,struct nand_oobinfo *oobsel)
320{
321	int 	start, page;
322	int n = len;
323	int nToCopy;
324
325
326
327	/* Do not allow reads past end of device */
328	if ((from + len) > mtd->size) {
329		*retlen = 0;
330		return -EINVAL;
331	}
332
333
334	/* Initialize return value */
335	*retlen = 0;
336
337	while(n > 0)
338	{
339
340		/* First we calculate the starting page */
341		page = from >> NAND_SHIFT;
342
343		/* Get raw starting column */
344
345		start = from & (mtd->oobblock-1);
346
347		// OK now check for the curveball where the start and end are in
348		// the same page
349		if((start + n) < mtd->oobblock)
350		{
351			nToCopy = n;
352		}
353		else
354		{
355			nToCopy =  mtd->oobblock - start;
356		}
357
358		nandemul2k_Read(buf, page, start, nToCopy);
359		nandemul2k_Read(oob_buf,page,PAGE_DATA_SIZE,PAGE_SPARE_SIZE);
360
361		n -= nToCopy;
362		from += nToCopy;
363		buf += nToCopy;
364		if(oob_buf) oob_buf += PAGE_SPARE_SIZE;
365		*retlen += nToCopy;
366
367	}
368
369
370	return 0;
371}
372
373/*
374 * NAND read out-of-band
375 */
376static int nand_read_oob (struct mtd_info *mtd, loff_t from, size_t len,
377				size_t *retlen, u_char *buf)
378{
379	int col, page;
380
381	T(0,("nand_read_oob: from = 0x%08x, buf = 0x%08x, len = %i\n", (unsigned int) from, (unsigned int) buf,
382		(int) len));
383
384	/* Shift to get page */
385	page = ((int) from) >> NAND_SHIFT;
386
387	/* Mask to get column */
388	col = from & 0x0f;
389
390	/* Initialize return length value */
391	*retlen = 0;
392
393	/* Do not allow reads past end of device */
394	if ((from + len) > mtd->size) {
395		T(0,
396			("nand_read_oob: Attempt read beyond end of device\n"));
397		*retlen = 0;
398		return -EINVAL;
399	}
400
401	nandemul2k_Read(buf,page,PAGE_DATA_SIZE + col,len);
402
403	/* Return happy */
404	*retlen = len;
405	return 0;
406}
407
408/*
409 * NAND write
410 */
411static int nand_write (struct mtd_info *mtd, loff_t to, size_t len,
412			size_t *retlen, const u_char *buf)
413{
414	return nand_write_ecc (mtd, to, len, retlen, buf, NULL,NULL);
415}
416
417/*
418 * NAND write with ECC
419 */
420static int nand_write_ecc (struct mtd_info *mtd, loff_t to, size_t len,
421				size_t *retlen, const u_char *buf,
422				u_char *oob_buf, struct nand_oobinfo *dummy)
423{
424
425	int 	start, page;
426	int n = len;
427	int nToCopy;
428
429
430
431	/* Do not allow reads past end of device */
432	if ((to + len) > mtd->size) {
433		*retlen = 0;
434		return -EINVAL;
435	}
436
437
438	/* Initialize return value */
439	*retlen = 0;
440
441	while(n > 0)
442	{
443
444		/* First we calculate the starting page */
445		page = to >> NAND_SHIFT;
446
447		/* Get raw starting column */
448
449		start = to & (mtd->oobblock - 1);
450
451		// OK now check for the curveball where the start and end are in
452		// the same page
453		if((start + n) < mtd->oobblock)
454		{
455			nToCopy = n;
456		}
457		else
458		{
459			nToCopy =  mtd->oobblock - start;
460		}
461
462		nandemul2k_Program(buf, page, start, nToCopy);
463		nandemul2k_Program(oob_buf, page, PAGE_DATA_SIZE, PAGE_SPARE_SIZE);
464
465		n -= nToCopy;
466		to += nToCopy;
467		buf += nToCopy;
468		if(oob_buf) oob_buf += PAGE_SPARE_SIZE;
469		*retlen += nToCopy;
470
471	}
472
473
474	return 0;
475}
476
477/*
478 * NAND write out-of-band
479 */
480static int nand_write_oob (struct mtd_info *mtd, loff_t to, size_t len,
481				size_t *retlen, const u_char *buf)
482{
483	int col, page;
484
485
486	T(0,(
487		"nand_read_oob: to = 0x%08x, len = %i\n", (unsigned int) to,
488		(int) len));
489
490	/* Shift to get page */
491	page = ((int) to) >> NAND_SHIFT;
492
493	/* Mask to get column */
494	col = to & 0x0f;
495
496	/* Initialize return length value */
497	*retlen = 0;
498
499	/* Do not allow reads past end of device */
500	if ((to + len) > mtd->size) {
501		T(0,(
502		   "nand_read_oob: Attempt read beyond end of device\n"));
503		*retlen = 0;
504		return -EINVAL;
505	}
506
507	nandemul2k_Program(buf,page,512 + col,len);
508
509	/* Return happy */
510	*retlen = len;
511	return 0;
512
513}
514
515/*
516 * NAND write with iovec
517 */
518#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,7))
519static int nand_writev (struct mtd_info *mtd, const struct kvec *vecs,
520				unsigned long count, loff_t to, size_t *retlen)
521#else
522static int nand_writev (struct mtd_info *mtd, const struct iovec *vecs,
523				unsigned long count, loff_t to, size_t *retlen)
524#endif
525{
526	return -EINVAL;
527}
528
529/*
530 * NAND erase a block
531 */
532static int nand_erase (struct mtd_info *mtd, struct erase_info *instr)
533{
534	int i, nBlocks,block;
535
536	T(0,(
537		"nand_erase: start = 0x%08x, len = %i\n",
538		(unsigned int) instr->addr, (unsigned int) instr->len));
539
540	/* Start address must align on block boundary */
541	if (instr->addr & (mtd->erasesize - 1)) {
542		T(0,(
543			"nand_erase: Unaligned address\n"));
544		return -EINVAL;
545	}
546
547	/* Length must align on block boundary */
548	if (instr->len & (mtd->erasesize - 1)) {
549		T(0,(
550			"nand_erase: Length not block aligned\n"));
551		return -EINVAL;
552	}
553
554	/* Do not allow erase past end of device */
555	if ((instr->len + instr->addr) > mtd->size) {
556		T(0,(
557			"nand_erase: Erase past end of device\n"));
558		return -EINVAL;
559	}
560
561	nBlocks = instr->len >> (NAND_SHIFT + BLK_SHIFT);
562	block = instr->addr >> (NAND_SHIFT + BLK_SHIFT);
563
564	for(i = 0; i < nBlocks; i++)
565	{
566		nandemul2k_DoErase(block);
567		block++;
568	}
569
570
571
572	return 0;
573
574
575}
576
577
578static int nand_block_isbad(struct mtd_info *mtd, loff_t ofs)
579{
580	return 0;
581}
582
583static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
584{
585	return 0;
586}
587
588
589/*
590 * NAND sync
591 */
592static void nand_sync (struct mtd_info *mtd)
593{
594	T(0,("nand_sync: called\n"));
595
596}
597
598/*
599 * Scan for the NAND device
600 */
601static int nandemul2k_scan (struct mtd_info *mtd,int nchips)
602{
603	mtd->oobblock = PAGE_DATA_SIZE;
604	mtd->oobsize =  PAGE_SPARE_SIZE;
605	mtd->erasesize = PAGE_DATA_SIZE * PAGES_PER_BLOCK;
606	mtd->size = sizeInMB * 1024*1024;
607
608
609
610	/* Fill in remaining MTD driver data */
611	mtd->type = MTD_NANDFLASH;
612	mtd->flags = MTD_CAP_NANDFLASH;
613	mtd->owner = THIS_MODULE;
614	mtd->ecctype = MTD_ECC_NONE;
615	mtd->erase = nand_erase;
616	mtd->point = NULL;
617	mtd->unpoint = NULL;
618	mtd->read = nand_read;
619	mtd->write = nand_write;
620	mtd->read_ecc = nand_read_ecc;
621	mtd->write_ecc = nand_write_ecc;
622	mtd->read_oob = nand_read_oob;
623	mtd->write_oob = nand_write_oob;
624	mtd->block_isbad = nand_block_isbad;
625	mtd->block_markbad = nand_block_markbad;
626	mtd->readv = NULL;
627	mtd->writev = nand_writev;
628	mtd->sync = nand_sync;
629	mtd->lock = NULL;
630	mtd->unlock = NULL;
631	mtd->suspend = NULL;
632	mtd->resume = NULL;
633
634	mtd->name = "NANDemul2k";
635
636	/* Return happy */
637	return 0;
638}
639
640#if 0
641#ifdef MODULE
642MODULE_PARM(sizeInMB, "i");
643
644__setup("sizeInMB=",sizeInMB);
645#endif
646#endif
647
648/*
649 * Define partitions for flash devices
650 */
651
652static struct mtd_partition nandemul2k_partition[] =
653{
654	{ .name		= "NANDemul partition 1",
655	  .offset	= 0,
656	  .size		= 0 },
657};
658
659static int nPartitions = sizeof(nandemul2k_partition)/sizeof(nandemul2k_partition[0]);
660
661/*
662 * Main initialization routine
663 */
664int __init nandemul2k_init (void)
665{
666
667	// Do the nand init
668
669	CheckInit();
670
671	nandemul2k_scan(&nandemul2k_mtd,1);
672
673	// Build the partition table
674
675	nandemul2k_partition[0].size = sizeInMB * 1024 * 1024;
676
677	// Register the partition
678	add_mtd_partitions(&nandemul2k_mtd,nandemul2k_partition,nPartitions);
679
680	return 0;
681
682}
683
684module_init(nandemul2k_init);
685
686/*
687 * Clean up routine
688 */
689#ifdef MODULE
690static void __exit nandemul2k_cleanup (void)
691{
692
693	nandemul2k_CleanUp();
694
695	/* Unregister partitions */
696	del_mtd_partitions(&nandemul2k_mtd);
697
698	/* Unregister the device */
699	del_mtd_device (&nandemul2k_mtd);
700
701}
702module_exit(nandemul2k_cleanup);
703#endif
704
705MODULE_LICENSE("GPL");
706MODULE_AUTHOR("Charles Manning <manningc@aleph1.co.uk>");
707MODULE_DESCRIPTION("2k Page/128k Block NAND emulated in RAM");
708
709
710
711
712