mmc_test.c revision 5c25aee5364550d7fa6314886370e76cda18d7e2
1/*
2 *  linux/drivers/mmc/card/mmc_test.c
3 *
4 *  Copyright 2007-2008 Pierre Ossman
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 */
11
12#include <linux/mmc/core.h>
13#include <linux/mmc/card.h>
14#include <linux/mmc/host.h>
15#include <linux/mmc/mmc.h>
16#include <linux/slab.h>
17
18#include <linux/scatterlist.h>
19#include <linux/swap.h>		/* For nr_free_buffer_pages() */
20
21#define RESULT_OK		0
22#define RESULT_FAIL		1
23#define RESULT_UNSUP_HOST	2
24#define RESULT_UNSUP_CARD	3
25
26#define BUFFER_ORDER		2
27#define BUFFER_SIZE		(PAGE_SIZE << BUFFER_ORDER)
28
29/*
30 * Limit the test area size to the maximum MMC HC erase group size.  Note that
31 * the maximum SD allocation unit size is just 4MiB.
32 */
33#define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
34
35/**
36 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
37 * @page: first page in the allocation
38 * @order: order of the number of pages allocated
39 */
40struct mmc_test_pages {
41	struct page *page;
42	unsigned int order;
43};
44
45/**
46 * struct mmc_test_mem - allocated memory.
47 * @arr: array of allocations
48 * @cnt: number of allocations
49 */
50struct mmc_test_mem {
51	struct mmc_test_pages *arr;
52	unsigned int cnt;
53};
54
55/**
56 * struct mmc_test_area - information for performance tests.
57 * @max_sz: test area size (in bytes)
58 * @dev_addr: address on card at which to do performance tests
59 * @max_tfr: maximum transfer size allowed by driver (in bytes)
60 * @max_segs: maximum segments allowed by driver in scatterlist @sg
61 * @max_seg_sz: maximum segment size allowed by driver
62 * @blocks: number of (512 byte) blocks currently mapped by @sg
63 * @sg_len: length of currently mapped scatterlist @sg
64 * @mem: allocated memory
65 * @sg: scatterlist
66 */
67struct mmc_test_area {
68	unsigned long max_sz;
69	unsigned int dev_addr;
70	unsigned int max_tfr;
71	unsigned int max_segs;
72	unsigned int max_seg_sz;
73	unsigned int blocks;
74	unsigned int sg_len;
75	struct mmc_test_mem *mem;
76	struct scatterlist *sg;
77};
78
79/**
80 * struct mmc_test_card - test information.
81 * @card: card under test
82 * @scratch: transfer buffer
83 * @buffer: transfer buffer
84 * @highmem: buffer for highmem tests
85 * @area: information for performance tests
86 */
87struct mmc_test_card {
88	struct mmc_card	*card;
89
90	u8		scratch[BUFFER_SIZE];
91	u8		*buffer;
92#ifdef CONFIG_HIGHMEM
93	struct page	*highmem;
94#endif
95	struct mmc_test_area area;
96};
97
98/*******************************************************************/
99/*  General helper functions                                       */
100/*******************************************************************/
101
102/*
103 * Configure correct block size in card
104 */
105static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
106{
107	struct mmc_command cmd;
108	int ret;
109
110	cmd.opcode = MMC_SET_BLOCKLEN;
111	cmd.arg = size;
112	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
113	ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
114	if (ret)
115		return ret;
116
117	return 0;
118}
119
120/*
121 * Fill in the mmc_request structure given a set of transfer parameters.
122 */
123static void mmc_test_prepare_mrq(struct mmc_test_card *test,
124	struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
125	unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
126{
127	BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);
128
129	if (blocks > 1) {
130		mrq->cmd->opcode = write ?
131			MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
132	} else {
133		mrq->cmd->opcode = write ?
134			MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
135	}
136
137	mrq->cmd->arg = dev_addr;
138	if (!mmc_card_blockaddr(test->card))
139		mrq->cmd->arg <<= 9;
140
141	mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
142
143	if (blocks == 1)
144		mrq->stop = NULL;
145	else {
146		mrq->stop->opcode = MMC_STOP_TRANSMISSION;
147		mrq->stop->arg = 0;
148		mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
149	}
150
151	mrq->data->blksz = blksz;
152	mrq->data->blocks = blocks;
153	mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
154	mrq->data->sg = sg;
155	mrq->data->sg_len = sg_len;
156
157	mmc_set_data_timeout(mrq->data, test->card);
158}
159
160static int mmc_test_busy(struct mmc_command *cmd)
161{
162	return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
163		(R1_CURRENT_STATE(cmd->resp[0]) == 7);
164}
165
166/*
167 * Wait for the card to finish the busy state
168 */
169static int mmc_test_wait_busy(struct mmc_test_card *test)
170{
171	int ret, busy;
172	struct mmc_command cmd;
173
174	busy = 0;
175	do {
176		memset(&cmd, 0, sizeof(struct mmc_command));
177
178		cmd.opcode = MMC_SEND_STATUS;
179		cmd.arg = test->card->rca << 16;
180		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
181
182		ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
183		if (ret)
184			break;
185
186		if (!busy && mmc_test_busy(&cmd)) {
187			busy = 1;
188			printk(KERN_INFO "%s: Warning: Host did not "
189				"wait for busy state to end.\n",
190				mmc_hostname(test->card->host));
191		}
192	} while (mmc_test_busy(&cmd));
193
194	return ret;
195}
196
197/*
198 * Transfer a single sector of kernel addressable data
199 */
200static int mmc_test_buffer_transfer(struct mmc_test_card *test,
201	u8 *buffer, unsigned addr, unsigned blksz, int write)
202{
203	int ret;
204
205	struct mmc_request mrq;
206	struct mmc_command cmd;
207	struct mmc_command stop;
208	struct mmc_data data;
209
210	struct scatterlist sg;
211
212	memset(&mrq, 0, sizeof(struct mmc_request));
213	memset(&cmd, 0, sizeof(struct mmc_command));
214	memset(&data, 0, sizeof(struct mmc_data));
215	memset(&stop, 0, sizeof(struct mmc_command));
216
217	mrq.cmd = &cmd;
218	mrq.data = &data;
219	mrq.stop = &stop;
220
221	sg_init_one(&sg, buffer, blksz);
222
223	mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
224
225	mmc_wait_for_req(test->card->host, &mrq);
226
227	if (cmd.error)
228		return cmd.error;
229	if (data.error)
230		return data.error;
231
232	ret = mmc_test_wait_busy(test);
233	if (ret)
234		return ret;
235
236	return 0;
237}
238
239static void mmc_test_free_mem(struct mmc_test_mem *mem)
240{
241	if (!mem)
242		return;
243	while (mem->cnt--)
244		__free_pages(mem->arr[mem->cnt].page,
245			     mem->arr[mem->cnt].order);
246	kfree(mem->arr);
247	kfree(mem);
248}
249
250/*
251 * Allocate a lot of memory, preferrably max_sz but at least min_sz.  In case
252 * there isn't much memory do not exceed 1/16th total lowmem pages.  Also do
253 * not exceed a maximum number of segments and try not to make segments much
254 * bigger than maximum segment size.
255 */
256static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
257					       unsigned long max_sz,
258					       unsigned int max_segs,
259					       unsigned int max_seg_sz)
260{
261	unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
262	unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
263	unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
264	unsigned long page_cnt = 0;
265	unsigned long limit = nr_free_buffer_pages() >> 4;
266	struct mmc_test_mem *mem;
267
268	if (max_page_cnt > limit)
269		max_page_cnt = limit;
270	if (max_page_cnt < min_page_cnt)
271		max_page_cnt = min_page_cnt;
272
273	if (max_seg_page_cnt > max_page_cnt)
274		max_seg_page_cnt = max_page_cnt;
275
276	if (max_segs > max_page_cnt)
277		max_segs = max_page_cnt;
278
279	mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
280	if (!mem)
281		return NULL;
282
283	mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
284			   GFP_KERNEL);
285	if (!mem->arr)
286		goto out_free;
287
288	while (max_page_cnt) {
289		struct page *page;
290		unsigned int order;
291		gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
292				__GFP_NORETRY;
293
294		order = get_order(max_seg_page_cnt << PAGE_SHIFT);
295		while (1) {
296			page = alloc_pages(flags, order);
297			if (page || !order)
298				break;
299			order -= 1;
300		}
301		if (!page) {
302			if (page_cnt < min_page_cnt)
303				goto out_free;
304			break;
305		}
306		mem->arr[mem->cnt].page = page;
307		mem->arr[mem->cnt].order = order;
308		mem->cnt += 1;
309		if (max_page_cnt <= (1UL << order))
310			break;
311		if (mem->cnt >= max_segs) {
312			if (page_cnt < min_page_cnt)
313				goto out_free;
314			break;
315		}
316		max_page_cnt -= 1UL << order;
317		page_cnt += 1UL << order;
318	}
319
320	return mem;
321
322out_free:
323	mmc_test_free_mem(mem);
324	return NULL;
325}
326
327/*
328 * Map memory into a scatterlist.  Optionally allow the same memory to be
329 * mapped more than once.
330 */
331static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz,
332			   struct scatterlist *sglist, int repeat,
333			   unsigned int max_segs, unsigned int max_seg_sz,
334			   unsigned int *sg_len)
335{
336	struct scatterlist *sg = NULL;
337	unsigned int i;
338
339	sg_init_table(sglist, max_segs);
340
341	*sg_len = 0;
342	do {
343		for (i = 0; i < mem->cnt; i++) {
344			unsigned long len = PAGE_SIZE << mem->arr[i].order;
345
346			if (len > sz)
347				len = sz;
348			if (len > max_seg_sz)
349				len = max_seg_sz;
350			if (sg)
351				sg = sg_next(sg);
352			else
353				sg = sglist;
354			if (!sg)
355				return -EINVAL;
356			sg_set_page(sg, mem->arr[i].page, len, 0);
357			sz -= len;
358			*sg_len += 1;
359			if (!sz)
360				break;
361		}
362	} while (sz && repeat);
363
364	if (sz)
365		return -EINVAL;
366
367	if (sg)
368		sg_mark_end(sg);
369
370	return 0;
371}
372
373/*
374 * Map memory into a scatterlist so that no pages are contiguous.  Allow the
375 * same memory to be mapped more than once.
376 */
377static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
378				       unsigned long sz,
379				       struct scatterlist *sglist,
380				       unsigned int max_segs,
381				       unsigned int max_seg_sz,
382				       unsigned int *sg_len)
383{
384	struct scatterlist *sg = NULL;
385	unsigned int i = mem->cnt, cnt;
386	unsigned long len;
387	void *base, *addr, *last_addr = NULL;
388
389	sg_init_table(sglist, max_segs);
390
391	*sg_len = 0;
392	while (sz) {
393		base = page_address(mem->arr[--i].page);
394		cnt = 1 << mem->arr[i].order;
395		while (sz && cnt) {
396			addr = base + PAGE_SIZE * --cnt;
397			if (last_addr && last_addr + PAGE_SIZE == addr)
398				continue;
399			last_addr = addr;
400			len = PAGE_SIZE;
401			if (len > max_seg_sz)
402				len = max_seg_sz;
403			if (len > sz)
404				len = sz;
405			if (sg)
406				sg = sg_next(sg);
407			else
408				sg = sglist;
409			if (!sg)
410				return -EINVAL;
411			sg_set_page(sg, virt_to_page(addr), len, 0);
412			sz -= len;
413			*sg_len += 1;
414		}
415		if (i == 0)
416			i = mem->cnt;
417	}
418
419	if (sg)
420		sg_mark_end(sg);
421
422	return 0;
423}
424
425/*
426 * Calculate transfer rate in bytes per second.
427 */
428static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
429{
430	uint64_t ns;
431
432	ns = ts->tv_sec;
433	ns *= 1000000000;
434	ns += ts->tv_nsec;
435
436	bytes *= 1000000000;
437
438	while (ns > UINT_MAX) {
439		bytes >>= 1;
440		ns >>= 1;
441	}
442
443	if (!ns)
444		return 0;
445
446	do_div(bytes, (uint32_t)ns);
447
448	return bytes;
449}
450
451/*
452 * Print the transfer rate.
453 */
454static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
455				struct timespec *ts1, struct timespec *ts2)
456{
457	unsigned int rate, sectors = bytes >> 9;
458	struct timespec ts;
459
460	ts = timespec_sub(*ts2, *ts1);
461
462	rate = mmc_test_rate(bytes, &ts);
463
464	printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
465			 "seconds (%u kB/s, %u KiB/s)\n",
466			 mmc_hostname(test->card->host), sectors, sectors >> 1,
467			 (sectors == 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
468			 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024);
469}
470
471/*
472 * Print the average transfer rate.
473 */
474static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
475				    unsigned int count, struct timespec *ts1,
476				    struct timespec *ts2)
477{
478	unsigned int rate, sectors = bytes >> 9;
479	uint64_t tot = bytes * count;
480	struct timespec ts;
481
482	ts = timespec_sub(*ts2, *ts1);
483
484	rate = mmc_test_rate(tot, &ts);
485
486	printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
487			 "%lu.%09lu seconds (%u kB/s, %u KiB/s)\n",
488			 mmc_hostname(test->card->host), count, sectors, count,
489			 sectors >> 1, (sectors == 1 ? ".5" : ""),
490			 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
491			 rate / 1000, rate / 1024);
492}
493
494/*
495 * Return the card size in sectors.
496 */
497static unsigned int mmc_test_capacity(struct mmc_card *card)
498{
499	if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
500		return card->ext_csd.sectors;
501	else
502		return card->csd.capacity << (card->csd.read_blkbits - 9);
503}
504
505/*******************************************************************/
506/*  Test preparation and cleanup                                   */
507/*******************************************************************/
508
509/*
510 * Fill the first couple of sectors of the card with known data
511 * so that bad reads/writes can be detected
512 */
513static int __mmc_test_prepare(struct mmc_test_card *test, int write)
514{
515	int ret, i;
516
517	ret = mmc_test_set_blksize(test, 512);
518	if (ret)
519		return ret;
520
521	if (write)
522		memset(test->buffer, 0xDF, 512);
523	else {
524		for (i = 0;i < 512;i++)
525			test->buffer[i] = i;
526	}
527
528	for (i = 0;i < BUFFER_SIZE / 512;i++) {
529		ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
530		if (ret)
531			return ret;
532	}
533
534	return 0;
535}
536
537static int mmc_test_prepare_write(struct mmc_test_card *test)
538{
539	return __mmc_test_prepare(test, 1);
540}
541
542static int mmc_test_prepare_read(struct mmc_test_card *test)
543{
544	return __mmc_test_prepare(test, 0);
545}
546
547static int mmc_test_cleanup(struct mmc_test_card *test)
548{
549	int ret, i;
550
551	ret = mmc_test_set_blksize(test, 512);
552	if (ret)
553		return ret;
554
555	memset(test->buffer, 0, 512);
556
557	for (i = 0;i < BUFFER_SIZE / 512;i++) {
558		ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
559		if (ret)
560			return ret;
561	}
562
563	return 0;
564}
565
566/*******************************************************************/
567/*  Test execution helpers                                         */
568/*******************************************************************/
569
570/*
571 * Modifies the mmc_request to perform the "short transfer" tests
572 */
573static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
574	struct mmc_request *mrq, int write)
575{
576	BUG_ON(!mrq || !mrq->cmd || !mrq->data);
577
578	if (mrq->data->blocks > 1) {
579		mrq->cmd->opcode = write ?
580			MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
581		mrq->stop = NULL;
582	} else {
583		mrq->cmd->opcode = MMC_SEND_STATUS;
584		mrq->cmd->arg = test->card->rca << 16;
585	}
586}
587
588/*
589 * Checks that a normal transfer didn't have any errors
590 */
591static int mmc_test_check_result(struct mmc_test_card *test,
592	struct mmc_request *mrq)
593{
594	int ret;
595
596	BUG_ON(!mrq || !mrq->cmd || !mrq->data);
597
598	ret = 0;
599
600	if (!ret && mrq->cmd->error)
601		ret = mrq->cmd->error;
602	if (!ret && mrq->data->error)
603		ret = mrq->data->error;
604	if (!ret && mrq->stop && mrq->stop->error)
605		ret = mrq->stop->error;
606	if (!ret && mrq->data->bytes_xfered !=
607		mrq->data->blocks * mrq->data->blksz)
608		ret = RESULT_FAIL;
609
610	if (ret == -EINVAL)
611		ret = RESULT_UNSUP_HOST;
612
613	return ret;
614}
615
616/*
617 * Checks that a "short transfer" behaved as expected
618 */
619static int mmc_test_check_broken_result(struct mmc_test_card *test,
620	struct mmc_request *mrq)
621{
622	int ret;
623
624	BUG_ON(!mrq || !mrq->cmd || !mrq->data);
625
626	ret = 0;
627
628	if (!ret && mrq->cmd->error)
629		ret = mrq->cmd->error;
630	if (!ret && mrq->data->error == 0)
631		ret = RESULT_FAIL;
632	if (!ret && mrq->data->error != -ETIMEDOUT)
633		ret = mrq->data->error;
634	if (!ret && mrq->stop && mrq->stop->error)
635		ret = mrq->stop->error;
636	if (mrq->data->blocks > 1) {
637		if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
638			ret = RESULT_FAIL;
639	} else {
640		if (!ret && mrq->data->bytes_xfered > 0)
641			ret = RESULT_FAIL;
642	}
643
644	if (ret == -EINVAL)
645		ret = RESULT_UNSUP_HOST;
646
647	return ret;
648}
649
650/*
651 * Tests a basic transfer with certain parameters
652 */
653static int mmc_test_simple_transfer(struct mmc_test_card *test,
654	struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
655	unsigned blocks, unsigned blksz, int write)
656{
657	struct mmc_request mrq;
658	struct mmc_command cmd;
659	struct mmc_command stop;
660	struct mmc_data data;
661
662	memset(&mrq, 0, sizeof(struct mmc_request));
663	memset(&cmd, 0, sizeof(struct mmc_command));
664	memset(&data, 0, sizeof(struct mmc_data));
665	memset(&stop, 0, sizeof(struct mmc_command));
666
667	mrq.cmd = &cmd;
668	mrq.data = &data;
669	mrq.stop = &stop;
670
671	mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
672		blocks, blksz, write);
673
674	mmc_wait_for_req(test->card->host, &mrq);
675
676	mmc_test_wait_busy(test);
677
678	return mmc_test_check_result(test, &mrq);
679}
680
681/*
682 * Tests a transfer where the card will fail completely or partly
683 */
684static int mmc_test_broken_transfer(struct mmc_test_card *test,
685	unsigned blocks, unsigned blksz, int write)
686{
687	struct mmc_request mrq;
688	struct mmc_command cmd;
689	struct mmc_command stop;
690	struct mmc_data data;
691
692	struct scatterlist sg;
693
694	memset(&mrq, 0, sizeof(struct mmc_request));
695	memset(&cmd, 0, sizeof(struct mmc_command));
696	memset(&data, 0, sizeof(struct mmc_data));
697	memset(&stop, 0, sizeof(struct mmc_command));
698
699	mrq.cmd = &cmd;
700	mrq.data = &data;
701	mrq.stop = &stop;
702
703	sg_init_one(&sg, test->buffer, blocks * blksz);
704
705	mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
706	mmc_test_prepare_broken_mrq(test, &mrq, write);
707
708	mmc_wait_for_req(test->card->host, &mrq);
709
710	mmc_test_wait_busy(test);
711
712	return mmc_test_check_broken_result(test, &mrq);
713}
714
715/*
716 * Does a complete transfer test where data is also validated
717 *
718 * Note: mmc_test_prepare() must have been done before this call
719 */
720static int mmc_test_transfer(struct mmc_test_card *test,
721	struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
722	unsigned blocks, unsigned blksz, int write)
723{
724	int ret, i;
725	unsigned long flags;
726
727	if (write) {
728		for (i = 0;i < blocks * blksz;i++)
729			test->scratch[i] = i;
730	} else {
731		memset(test->scratch, 0, BUFFER_SIZE);
732	}
733	local_irq_save(flags);
734	sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
735	local_irq_restore(flags);
736
737	ret = mmc_test_set_blksize(test, blksz);
738	if (ret)
739		return ret;
740
741	ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
742		blocks, blksz, write);
743	if (ret)
744		return ret;
745
746	if (write) {
747		int sectors;
748
749		ret = mmc_test_set_blksize(test, 512);
750		if (ret)
751			return ret;
752
753		sectors = (blocks * blksz + 511) / 512;
754		if ((sectors * 512) == (blocks * blksz))
755			sectors++;
756
757		if ((sectors * 512) > BUFFER_SIZE)
758			return -EINVAL;
759
760		memset(test->buffer, 0, sectors * 512);
761
762		for (i = 0;i < sectors;i++) {
763			ret = mmc_test_buffer_transfer(test,
764				test->buffer + i * 512,
765				dev_addr + i, 512, 0);
766			if (ret)
767				return ret;
768		}
769
770		for (i = 0;i < blocks * blksz;i++) {
771			if (test->buffer[i] != (u8)i)
772				return RESULT_FAIL;
773		}
774
775		for (;i < sectors * 512;i++) {
776			if (test->buffer[i] != 0xDF)
777				return RESULT_FAIL;
778		}
779	} else {
780		local_irq_save(flags);
781		sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
782		local_irq_restore(flags);
783		for (i = 0;i < blocks * blksz;i++) {
784			if (test->scratch[i] != (u8)i)
785				return RESULT_FAIL;
786		}
787	}
788
789	return 0;
790}
791
792/*******************************************************************/
793/*  Tests                                                          */
794/*******************************************************************/
795
796struct mmc_test_case {
797	const char *name;
798
799	int (*prepare)(struct mmc_test_card *);
800	int (*run)(struct mmc_test_card *);
801	int (*cleanup)(struct mmc_test_card *);
802};
803
804static int mmc_test_basic_write(struct mmc_test_card *test)
805{
806	int ret;
807	struct scatterlist sg;
808
809	ret = mmc_test_set_blksize(test, 512);
810	if (ret)
811		return ret;
812
813	sg_init_one(&sg, test->buffer, 512);
814
815	ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
816	if (ret)
817		return ret;
818
819	return 0;
820}
821
822static int mmc_test_basic_read(struct mmc_test_card *test)
823{
824	int ret;
825	struct scatterlist sg;
826
827	ret = mmc_test_set_blksize(test, 512);
828	if (ret)
829		return ret;
830
831	sg_init_one(&sg, test->buffer, 512);
832
833	ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
834	if (ret)
835		return ret;
836
837	return 0;
838}
839
840static int mmc_test_verify_write(struct mmc_test_card *test)
841{
842	int ret;
843	struct scatterlist sg;
844
845	sg_init_one(&sg, test->buffer, 512);
846
847	ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
848	if (ret)
849		return ret;
850
851	return 0;
852}
853
854static int mmc_test_verify_read(struct mmc_test_card *test)
855{
856	int ret;
857	struct scatterlist sg;
858
859	sg_init_one(&sg, test->buffer, 512);
860
861	ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
862	if (ret)
863		return ret;
864
865	return 0;
866}
867
868static int mmc_test_multi_write(struct mmc_test_card *test)
869{
870	int ret;
871	unsigned int size;
872	struct scatterlist sg;
873
874	if (test->card->host->max_blk_count == 1)
875		return RESULT_UNSUP_HOST;
876
877	size = PAGE_SIZE * 2;
878	size = min(size, test->card->host->max_req_size);
879	size = min(size, test->card->host->max_seg_size);
880	size = min(size, test->card->host->max_blk_count * 512);
881
882	if (size < 1024)
883		return RESULT_UNSUP_HOST;
884
885	sg_init_one(&sg, test->buffer, size);
886
887	ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
888	if (ret)
889		return ret;
890
891	return 0;
892}
893
894static int mmc_test_multi_read(struct mmc_test_card *test)
895{
896	int ret;
897	unsigned int size;
898	struct scatterlist sg;
899
900	if (test->card->host->max_blk_count == 1)
901		return RESULT_UNSUP_HOST;
902
903	size = PAGE_SIZE * 2;
904	size = min(size, test->card->host->max_req_size);
905	size = min(size, test->card->host->max_seg_size);
906	size = min(size, test->card->host->max_blk_count * 512);
907
908	if (size < 1024)
909		return RESULT_UNSUP_HOST;
910
911	sg_init_one(&sg, test->buffer, size);
912
913	ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
914	if (ret)
915		return ret;
916
917	return 0;
918}
919
920static int mmc_test_pow2_write(struct mmc_test_card *test)
921{
922	int ret, i;
923	struct scatterlist sg;
924
925	if (!test->card->csd.write_partial)
926		return RESULT_UNSUP_CARD;
927
928	for (i = 1; i < 512;i <<= 1) {
929		sg_init_one(&sg, test->buffer, i);
930		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
931		if (ret)
932			return ret;
933	}
934
935	return 0;
936}
937
938static int mmc_test_pow2_read(struct mmc_test_card *test)
939{
940	int ret, i;
941	struct scatterlist sg;
942
943	if (!test->card->csd.read_partial)
944		return RESULT_UNSUP_CARD;
945
946	for (i = 1; i < 512;i <<= 1) {
947		sg_init_one(&sg, test->buffer, i);
948		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
949		if (ret)
950			return ret;
951	}
952
953	return 0;
954}
955
956static int mmc_test_weird_write(struct mmc_test_card *test)
957{
958	int ret, i;
959	struct scatterlist sg;
960
961	if (!test->card->csd.write_partial)
962		return RESULT_UNSUP_CARD;
963
964	for (i = 3; i < 512;i += 7) {
965		sg_init_one(&sg, test->buffer, i);
966		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
967		if (ret)
968			return ret;
969	}
970
971	return 0;
972}
973
974static int mmc_test_weird_read(struct mmc_test_card *test)
975{
976	int ret, i;
977	struct scatterlist sg;
978
979	if (!test->card->csd.read_partial)
980		return RESULT_UNSUP_CARD;
981
982	for (i = 3; i < 512;i += 7) {
983		sg_init_one(&sg, test->buffer, i);
984		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
985		if (ret)
986			return ret;
987	}
988
989	return 0;
990}
991
992static int mmc_test_align_write(struct mmc_test_card *test)
993{
994	int ret, i;
995	struct scatterlist sg;
996
997	for (i = 1;i < 4;i++) {
998		sg_init_one(&sg, test->buffer + i, 512);
999		ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1000		if (ret)
1001			return ret;
1002	}
1003
1004	return 0;
1005}
1006
1007static int mmc_test_align_read(struct mmc_test_card *test)
1008{
1009	int ret, i;
1010	struct scatterlist sg;
1011
1012	for (i = 1;i < 4;i++) {
1013		sg_init_one(&sg, test->buffer + i, 512);
1014		ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1015		if (ret)
1016			return ret;
1017	}
1018
1019	return 0;
1020}
1021
1022static int mmc_test_align_multi_write(struct mmc_test_card *test)
1023{
1024	int ret, i;
1025	unsigned int size;
1026	struct scatterlist sg;
1027
1028	if (test->card->host->max_blk_count == 1)
1029		return RESULT_UNSUP_HOST;
1030
1031	size = PAGE_SIZE * 2;
1032	size = min(size, test->card->host->max_req_size);
1033	size = min(size, test->card->host->max_seg_size);
1034	size = min(size, test->card->host->max_blk_count * 512);
1035
1036	if (size < 1024)
1037		return RESULT_UNSUP_HOST;
1038
1039	for (i = 1;i < 4;i++) {
1040		sg_init_one(&sg, test->buffer + i, size);
1041		ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1042		if (ret)
1043			return ret;
1044	}
1045
1046	return 0;
1047}
1048
1049static int mmc_test_align_multi_read(struct mmc_test_card *test)
1050{
1051	int ret, i;
1052	unsigned int size;
1053	struct scatterlist sg;
1054
1055	if (test->card->host->max_blk_count == 1)
1056		return RESULT_UNSUP_HOST;
1057
1058	size = PAGE_SIZE * 2;
1059	size = min(size, test->card->host->max_req_size);
1060	size = min(size, test->card->host->max_seg_size);
1061	size = min(size, test->card->host->max_blk_count * 512);
1062
1063	if (size < 1024)
1064		return RESULT_UNSUP_HOST;
1065
1066	for (i = 1;i < 4;i++) {
1067		sg_init_one(&sg, test->buffer + i, size);
1068		ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1069		if (ret)
1070			return ret;
1071	}
1072
1073	return 0;
1074}
1075
1076static int mmc_test_xfersize_write(struct mmc_test_card *test)
1077{
1078	int ret;
1079
1080	ret = mmc_test_set_blksize(test, 512);
1081	if (ret)
1082		return ret;
1083
1084	ret = mmc_test_broken_transfer(test, 1, 512, 1);
1085	if (ret)
1086		return ret;
1087
1088	return 0;
1089}
1090
1091static int mmc_test_xfersize_read(struct mmc_test_card *test)
1092{
1093	int ret;
1094
1095	ret = mmc_test_set_blksize(test, 512);
1096	if (ret)
1097		return ret;
1098
1099	ret = mmc_test_broken_transfer(test, 1, 512, 0);
1100	if (ret)
1101		return ret;
1102
1103	return 0;
1104}
1105
1106static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
1107{
1108	int ret;
1109
1110	if (test->card->host->max_blk_count == 1)
1111		return RESULT_UNSUP_HOST;
1112
1113	ret = mmc_test_set_blksize(test, 512);
1114	if (ret)
1115		return ret;
1116
1117	ret = mmc_test_broken_transfer(test, 2, 512, 1);
1118	if (ret)
1119		return ret;
1120
1121	return 0;
1122}
1123
1124static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
1125{
1126	int ret;
1127
1128	if (test->card->host->max_blk_count == 1)
1129		return RESULT_UNSUP_HOST;
1130
1131	ret = mmc_test_set_blksize(test, 512);
1132	if (ret)
1133		return ret;
1134
1135	ret = mmc_test_broken_transfer(test, 2, 512, 0);
1136	if (ret)
1137		return ret;
1138
1139	return 0;
1140}
1141
1142#ifdef CONFIG_HIGHMEM
1143
1144static int mmc_test_write_high(struct mmc_test_card *test)
1145{
1146	int ret;
1147	struct scatterlist sg;
1148
1149	sg_init_table(&sg, 1);
1150	sg_set_page(&sg, test->highmem, 512, 0);
1151
1152	ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1153	if (ret)
1154		return ret;
1155
1156	return 0;
1157}
1158
1159static int mmc_test_read_high(struct mmc_test_card *test)
1160{
1161	int ret;
1162	struct scatterlist sg;
1163
1164	sg_init_table(&sg, 1);
1165	sg_set_page(&sg, test->highmem, 512, 0);
1166
1167	ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1168	if (ret)
1169		return ret;
1170
1171	return 0;
1172}
1173
1174static int mmc_test_multi_write_high(struct mmc_test_card *test)
1175{
1176	int ret;
1177	unsigned int size;
1178	struct scatterlist sg;
1179
1180	if (test->card->host->max_blk_count == 1)
1181		return RESULT_UNSUP_HOST;
1182
1183	size = PAGE_SIZE * 2;
1184	size = min(size, test->card->host->max_req_size);
1185	size = min(size, test->card->host->max_seg_size);
1186	size = min(size, test->card->host->max_blk_count * 512);
1187
1188	if (size < 1024)
1189		return RESULT_UNSUP_HOST;
1190
1191	sg_init_table(&sg, 1);
1192	sg_set_page(&sg, test->highmem, size, 0);
1193
1194	ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1195	if (ret)
1196		return ret;
1197
1198	return 0;
1199}
1200
1201static int mmc_test_multi_read_high(struct mmc_test_card *test)
1202{
1203	int ret;
1204	unsigned int size;
1205	struct scatterlist sg;
1206
1207	if (test->card->host->max_blk_count == 1)
1208		return RESULT_UNSUP_HOST;
1209
1210	size = PAGE_SIZE * 2;
1211	size = min(size, test->card->host->max_req_size);
1212	size = min(size, test->card->host->max_seg_size);
1213	size = min(size, test->card->host->max_blk_count * 512);
1214
1215	if (size < 1024)
1216		return RESULT_UNSUP_HOST;
1217
1218	sg_init_table(&sg, 1);
1219	sg_set_page(&sg, test->highmem, size, 0);
1220
1221	ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1222	if (ret)
1223		return ret;
1224
1225	return 0;
1226}
1227
1228#else
1229
1230static int mmc_test_no_highmem(struct mmc_test_card *test)
1231{
1232	printk(KERN_INFO "%s: Highmem not configured - test skipped\n",
1233	       mmc_hostname(test->card->host));
1234	return 0;
1235}
1236
1237#endif /* CONFIG_HIGHMEM */
1238
1239/*
1240 * Map sz bytes so that it can be transferred.
1241 */
1242static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
1243			     int max_scatter)
1244{
1245	struct mmc_test_area *t = &test->area;
1246	int err;
1247
1248	t->blocks = sz >> 9;
1249
1250	if (max_scatter) {
1251		err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1252						  t->max_segs, t->max_seg_sz,
1253				       &t->sg_len);
1254	} else {
1255		err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1256				      t->max_seg_sz, &t->sg_len);
1257	}
1258	if (err)
1259		printk(KERN_INFO "%s: Failed to map sg list\n",
1260		       mmc_hostname(test->card->host));
1261	return err;
1262}
1263
1264/*
1265 * Transfer bytes mapped by mmc_test_area_map().
1266 */
1267static int mmc_test_area_transfer(struct mmc_test_card *test,
1268				  unsigned int dev_addr, int write)
1269{
1270	struct mmc_test_area *t = &test->area;
1271
1272	return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
1273					t->blocks, 512, write);
1274}
1275
1276/*
1277 * Map and transfer bytes.
1278 */
1279static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1280			    unsigned int dev_addr, int write, int max_scatter,
1281			    int timed)
1282{
1283	struct timespec ts1, ts2;
1284	int ret;
1285
1286	/*
1287	 * In the case of a maximally scattered transfer, the maximum transfer
1288	 * size is further limited by using PAGE_SIZE segments.
1289	 */
1290	if (max_scatter) {
1291		struct mmc_test_area *t = &test->area;
1292		unsigned long max_tfr;
1293
1294		if (t->max_seg_sz >= PAGE_SIZE)
1295			max_tfr = t->max_segs * PAGE_SIZE;
1296		else
1297			max_tfr = t->max_segs * t->max_seg_sz;
1298		if (sz > max_tfr)
1299			sz = max_tfr;
1300	}
1301
1302	ret = mmc_test_area_map(test, sz, max_scatter);
1303	if (ret)
1304		return ret;
1305
1306	if (timed)
1307		getnstimeofday(&ts1);
1308
1309	ret = mmc_test_area_transfer(test, dev_addr, write);
1310	if (ret)
1311		return ret;
1312
1313	if (timed)
1314		getnstimeofday(&ts2);
1315
1316	if (timed)
1317		mmc_test_print_rate(test, sz, &ts1, &ts2);
1318
1319	return 0;
1320}
1321
1322/*
1323 * Write the test area entirely.
1324 */
1325static int mmc_test_area_fill(struct mmc_test_card *test)
1326{
1327	return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr,
1328				1, 0, 0);
1329}
1330
1331/*
1332 * Erase the test area entirely.
1333 */
1334static int mmc_test_area_erase(struct mmc_test_card *test)
1335{
1336	struct mmc_test_area *t = &test->area;
1337
1338	if (!mmc_can_erase(test->card))
1339		return 0;
1340
1341	return mmc_erase(test->card, t->dev_addr, test->area.max_sz >> 9,
1342			 MMC_ERASE_ARG);
1343}
1344
1345/*
1346 * Cleanup struct mmc_test_area.
1347 */
1348static int mmc_test_area_cleanup(struct mmc_test_card *test)
1349{
1350	struct mmc_test_area *t = &test->area;
1351
1352	kfree(t->sg);
1353	mmc_test_free_mem(t->mem);
1354
1355	return 0;
1356}
1357
1358/*
1359 * Initialize an area for testing large transfers.  The size of the area is the
1360 * preferred erase size which is a good size for optimal transfer speed.  Note
1361 * that is typically 4MiB for modern cards.  The test area is set to the middle
1362 * of the card because cards may have different charateristics at the front
1363 * (for FAT file system optimization).  Optionally, the area is erased (if the
1364 * card supports it) which may improve write performance.  Optionally, the area
1365 * is filled with data for subsequent read tests.
1366 */
1367static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1368{
1369	struct mmc_test_area *t = &test->area;
1370	unsigned long min_sz = 64 * 1024;
1371	int ret;
1372
1373	ret = mmc_test_set_blksize(test, 512);
1374	if (ret)
1375		return ret;
1376
1377	if (test->card->pref_erase > TEST_AREA_MAX_SIZE >> 9)
1378		t->max_sz = TEST_AREA_MAX_SIZE;
1379	else
1380		t->max_sz = (unsigned long)test->card->pref_erase << 9;
1381
1382	t->max_segs = test->card->host->max_segs;
1383	t->max_seg_sz = test->card->host->max_seg_size;
1384
1385	t->max_tfr = t->max_sz;
1386	if (t->max_tfr >> 9 > test->card->host->max_blk_count)
1387		t->max_tfr = test->card->host->max_blk_count << 9;
1388	if (t->max_tfr > test->card->host->max_req_size)
1389		t->max_tfr = test->card->host->max_req_size;
1390	if (t->max_tfr / t->max_seg_sz > t->max_segs)
1391		t->max_tfr = t->max_segs * t->max_seg_sz;
1392
1393	/*
1394	 * Try to allocate enough memory for the whole area.  Less is OK
1395	 * because the same memory can be mapped into the scatterlist more than
1396	 * once.  Also, take into account the limits imposed on scatterlist
1397	 * segments by the host driver.
1398	 */
1399	t->mem = mmc_test_alloc_mem(min_sz, t->max_sz, t->max_segs,
1400				    t->max_seg_sz);
1401	if (!t->mem)
1402		return -ENOMEM;
1403
1404	t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
1405	if (!t->sg) {
1406		ret = -ENOMEM;
1407		goto out_free;
1408	}
1409
1410	t->dev_addr = mmc_test_capacity(test->card) / 2;
1411	t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
1412
1413	if (erase) {
1414		ret = mmc_test_area_erase(test);
1415		if (ret)
1416			goto out_free;
1417	}
1418
1419	if (fill) {
1420		ret = mmc_test_area_fill(test);
1421		if (ret)
1422			goto out_free;
1423	}
1424
1425	return 0;
1426
1427out_free:
1428	mmc_test_area_cleanup(test);
1429	return ret;
1430}
1431
1432/*
1433 * Prepare for large transfers.  Do not erase the test area.
1434 */
1435static int mmc_test_area_prepare(struct mmc_test_card *test)
1436{
1437	return mmc_test_area_init(test, 0, 0);
1438}
1439
1440/*
1441 * Prepare for large transfers.  Do erase the test area.
1442 */
1443static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
1444{
1445	return mmc_test_area_init(test, 1, 0);
1446}
1447
1448/*
1449 * Prepare for large transfers.  Erase and fill the test area.
1450 */
1451static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1452{
1453	return mmc_test_area_init(test, 1, 1);
1454}
1455
1456/*
1457 * Test best-case performance.  Best-case performance is expected from
1458 * a single large transfer.
1459 *
1460 * An additional option (max_scatter) allows the measurement of the same
1461 * transfer but with no contiguous pages in the scatter list.  This tests
1462 * the efficiency of DMA to handle scattered pages.
1463 */
1464static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1465				     int max_scatter)
1466{
1467	return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr,
1468				write, max_scatter, 1);
1469}
1470
1471/*
1472 * Best-case read performance.
1473 */
1474static int mmc_test_best_read_performance(struct mmc_test_card *test)
1475{
1476	return mmc_test_best_performance(test, 0, 0);
1477}
1478
1479/*
1480 * Best-case write performance.
1481 */
1482static int mmc_test_best_write_performance(struct mmc_test_card *test)
1483{
1484	return mmc_test_best_performance(test, 1, 0);
1485}
1486
1487/*
1488 * Best-case read performance into scattered pages.
1489 */
1490static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
1491{
1492	return mmc_test_best_performance(test, 0, 1);
1493}
1494
1495/*
1496 * Best-case write performance from scattered pages.
1497 */
1498static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1499{
1500	return mmc_test_best_performance(test, 1, 1);
1501}
1502
1503/*
1504 * Single read performance by transfer size.
1505 */
1506static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1507{
1508	unsigned long sz;
1509	unsigned int dev_addr;
1510	int ret;
1511
1512	for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1513		dev_addr = test->area.dev_addr + (sz >> 9);
1514		ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1515		if (ret)
1516			return ret;
1517	}
1518	sz = test->area.max_tfr;
1519	dev_addr = test->area.dev_addr;
1520	return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1521}
1522
1523/*
1524 * Single write performance by transfer size.
1525 */
1526static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1527{
1528	unsigned long sz;
1529	unsigned int dev_addr;
1530	int ret;
1531
1532	ret = mmc_test_area_erase(test);
1533	if (ret)
1534		return ret;
1535	for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1536		dev_addr = test->area.dev_addr + (sz >> 9);
1537		ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1538		if (ret)
1539			return ret;
1540	}
1541	ret = mmc_test_area_erase(test);
1542	if (ret)
1543		return ret;
1544	sz = test->area.max_tfr;
1545	dev_addr = test->area.dev_addr;
1546	return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1547}
1548
1549/*
1550 * Single trim performance by transfer size.
1551 */
1552static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1553{
1554	unsigned long sz;
1555	unsigned int dev_addr;
1556	struct timespec ts1, ts2;
1557	int ret;
1558
1559	if (!mmc_can_trim(test->card))
1560		return RESULT_UNSUP_CARD;
1561
1562	if (!mmc_can_erase(test->card))
1563		return RESULT_UNSUP_HOST;
1564
1565	for (sz = 512; sz < test->area.max_sz; sz <<= 1) {
1566		dev_addr = test->area.dev_addr + (sz >> 9);
1567		getnstimeofday(&ts1);
1568		ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1569		if (ret)
1570			return ret;
1571		getnstimeofday(&ts2);
1572		mmc_test_print_rate(test, sz, &ts1, &ts2);
1573	}
1574	dev_addr = test->area.dev_addr;
1575	getnstimeofday(&ts1);
1576	ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1577	if (ret)
1578		return ret;
1579	getnstimeofday(&ts2);
1580	mmc_test_print_rate(test, sz, &ts1, &ts2);
1581	return 0;
1582}
1583
1584static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1585{
1586	unsigned int dev_addr, i, cnt;
1587	struct timespec ts1, ts2;
1588	int ret;
1589
1590	cnt = test->area.max_sz / sz;
1591	dev_addr = test->area.dev_addr;
1592	getnstimeofday(&ts1);
1593	for (i = 0; i < cnt; i++) {
1594		ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1595		if (ret)
1596			return ret;
1597		dev_addr += (sz >> 9);
1598	}
1599	getnstimeofday(&ts2);
1600	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1601	return 0;
1602}
1603
1604/*
1605 * Consecutive read performance by transfer size.
1606 */
1607static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1608{
1609	unsigned long sz;
1610	int ret;
1611
1612	for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1613		ret = mmc_test_seq_read_perf(test, sz);
1614		if (ret)
1615			return ret;
1616	}
1617	sz = test->area.max_tfr;
1618	return mmc_test_seq_read_perf(test, sz);
1619}
1620
1621static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1622{
1623	unsigned int dev_addr, i, cnt;
1624	struct timespec ts1, ts2;
1625	int ret;
1626
1627	ret = mmc_test_area_erase(test);
1628	if (ret)
1629		return ret;
1630	cnt = test->area.max_sz / sz;
1631	dev_addr = test->area.dev_addr;
1632	getnstimeofday(&ts1);
1633	for (i = 0; i < cnt; i++) {
1634		ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1635		if (ret)
1636			return ret;
1637		dev_addr += (sz >> 9);
1638	}
1639	getnstimeofday(&ts2);
1640	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1641	return 0;
1642}
1643
1644/*
1645 * Consecutive write performance by transfer size.
1646 */
1647static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1648{
1649	unsigned long sz;
1650	int ret;
1651
1652	for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1653		ret = mmc_test_seq_write_perf(test, sz);
1654		if (ret)
1655			return ret;
1656	}
1657	sz = test->area.max_tfr;
1658	return mmc_test_seq_write_perf(test, sz);
1659}
1660
1661/*
1662 * Consecutive trim performance by transfer size.
1663 */
1664static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1665{
1666	unsigned long sz;
1667	unsigned int dev_addr, i, cnt;
1668	struct timespec ts1, ts2;
1669	int ret;
1670
1671	if (!mmc_can_trim(test->card))
1672		return RESULT_UNSUP_CARD;
1673
1674	if (!mmc_can_erase(test->card))
1675		return RESULT_UNSUP_HOST;
1676
1677	for (sz = 512; sz <= test->area.max_sz; sz <<= 1) {
1678		ret = mmc_test_area_erase(test);
1679		if (ret)
1680			return ret;
1681		ret = mmc_test_area_fill(test);
1682		if (ret)
1683			return ret;
1684		cnt = test->area.max_sz / sz;
1685		dev_addr = test->area.dev_addr;
1686		getnstimeofday(&ts1);
1687		for (i = 0; i < cnt; i++) {
1688			ret = mmc_erase(test->card, dev_addr, sz >> 9,
1689					MMC_TRIM_ARG);
1690			if (ret)
1691				return ret;
1692			dev_addr += (sz >> 9);
1693		}
1694		getnstimeofday(&ts2);
1695		mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1696	}
1697	return 0;
1698}
1699
1700static const struct mmc_test_case mmc_test_cases[] = {
1701	{
1702		.name = "Basic write (no data verification)",
1703		.run = mmc_test_basic_write,
1704	},
1705
1706	{
1707		.name = "Basic read (no data verification)",
1708		.run = mmc_test_basic_read,
1709	},
1710
1711	{
1712		.name = "Basic write (with data verification)",
1713		.prepare = mmc_test_prepare_write,
1714		.run = mmc_test_verify_write,
1715		.cleanup = mmc_test_cleanup,
1716	},
1717
1718	{
1719		.name = "Basic read (with data verification)",
1720		.prepare = mmc_test_prepare_read,
1721		.run = mmc_test_verify_read,
1722		.cleanup = mmc_test_cleanup,
1723	},
1724
1725	{
1726		.name = "Multi-block write",
1727		.prepare = mmc_test_prepare_write,
1728		.run = mmc_test_multi_write,
1729		.cleanup = mmc_test_cleanup,
1730	},
1731
1732	{
1733		.name = "Multi-block read",
1734		.prepare = mmc_test_prepare_read,
1735		.run = mmc_test_multi_read,
1736		.cleanup = mmc_test_cleanup,
1737	},
1738
1739	{
1740		.name = "Power of two block writes",
1741		.prepare = mmc_test_prepare_write,
1742		.run = mmc_test_pow2_write,
1743		.cleanup = mmc_test_cleanup,
1744	},
1745
1746	{
1747		.name = "Power of two block reads",
1748		.prepare = mmc_test_prepare_read,
1749		.run = mmc_test_pow2_read,
1750		.cleanup = mmc_test_cleanup,
1751	},
1752
1753	{
1754		.name = "Weird sized block writes",
1755		.prepare = mmc_test_prepare_write,
1756		.run = mmc_test_weird_write,
1757		.cleanup = mmc_test_cleanup,
1758	},
1759
1760	{
1761		.name = "Weird sized block reads",
1762		.prepare = mmc_test_prepare_read,
1763		.run = mmc_test_weird_read,
1764		.cleanup = mmc_test_cleanup,
1765	},
1766
1767	{
1768		.name = "Badly aligned write",
1769		.prepare = mmc_test_prepare_write,
1770		.run = mmc_test_align_write,
1771		.cleanup = mmc_test_cleanup,
1772	},
1773
1774	{
1775		.name = "Badly aligned read",
1776		.prepare = mmc_test_prepare_read,
1777		.run = mmc_test_align_read,
1778		.cleanup = mmc_test_cleanup,
1779	},
1780
1781	{
1782		.name = "Badly aligned multi-block write",
1783		.prepare = mmc_test_prepare_write,
1784		.run = mmc_test_align_multi_write,
1785		.cleanup = mmc_test_cleanup,
1786	},
1787
1788	{
1789		.name = "Badly aligned multi-block read",
1790		.prepare = mmc_test_prepare_read,
1791		.run = mmc_test_align_multi_read,
1792		.cleanup = mmc_test_cleanup,
1793	},
1794
1795	{
1796		.name = "Correct xfer_size at write (start failure)",
1797		.run = mmc_test_xfersize_write,
1798	},
1799
1800	{
1801		.name = "Correct xfer_size at read (start failure)",
1802		.run = mmc_test_xfersize_read,
1803	},
1804
1805	{
1806		.name = "Correct xfer_size at write (midway failure)",
1807		.run = mmc_test_multi_xfersize_write,
1808	},
1809
1810	{
1811		.name = "Correct xfer_size at read (midway failure)",
1812		.run = mmc_test_multi_xfersize_read,
1813	},
1814
1815#ifdef CONFIG_HIGHMEM
1816
1817	{
1818		.name = "Highmem write",
1819		.prepare = mmc_test_prepare_write,
1820		.run = mmc_test_write_high,
1821		.cleanup = mmc_test_cleanup,
1822	},
1823
1824	{
1825		.name = "Highmem read",
1826		.prepare = mmc_test_prepare_read,
1827		.run = mmc_test_read_high,
1828		.cleanup = mmc_test_cleanup,
1829	},
1830
1831	{
1832		.name = "Multi-block highmem write",
1833		.prepare = mmc_test_prepare_write,
1834		.run = mmc_test_multi_write_high,
1835		.cleanup = mmc_test_cleanup,
1836	},
1837
1838	{
1839		.name = "Multi-block highmem read",
1840		.prepare = mmc_test_prepare_read,
1841		.run = mmc_test_multi_read_high,
1842		.cleanup = mmc_test_cleanup,
1843	},
1844
1845#else
1846
1847	{
1848		.name = "Highmem write",
1849		.run = mmc_test_no_highmem,
1850	},
1851
1852	{
1853		.name = "Highmem read",
1854		.run = mmc_test_no_highmem,
1855	},
1856
1857	{
1858		.name = "Multi-block highmem write",
1859		.run = mmc_test_no_highmem,
1860	},
1861
1862	{
1863		.name = "Multi-block highmem read",
1864		.run = mmc_test_no_highmem,
1865	},
1866
1867#endif /* CONFIG_HIGHMEM */
1868
1869	{
1870		.name = "Best-case read performance",
1871		.prepare = mmc_test_area_prepare_fill,
1872		.run = mmc_test_best_read_performance,
1873		.cleanup = mmc_test_area_cleanup,
1874	},
1875
1876	{
1877		.name = "Best-case write performance",
1878		.prepare = mmc_test_area_prepare_erase,
1879		.run = mmc_test_best_write_performance,
1880		.cleanup = mmc_test_area_cleanup,
1881	},
1882
1883	{
1884		.name = "Best-case read performance into scattered pages",
1885		.prepare = mmc_test_area_prepare_fill,
1886		.run = mmc_test_best_read_perf_max_scatter,
1887		.cleanup = mmc_test_area_cleanup,
1888	},
1889
1890	{
1891		.name = "Best-case write performance from scattered pages",
1892		.prepare = mmc_test_area_prepare_erase,
1893		.run = mmc_test_best_write_perf_max_scatter,
1894		.cleanup = mmc_test_area_cleanup,
1895	},
1896
1897	{
1898		.name = "Single read performance by transfer size",
1899		.prepare = mmc_test_area_prepare_fill,
1900		.run = mmc_test_profile_read_perf,
1901		.cleanup = mmc_test_area_cleanup,
1902	},
1903
1904	{
1905		.name = "Single write performance by transfer size",
1906		.prepare = mmc_test_area_prepare,
1907		.run = mmc_test_profile_write_perf,
1908		.cleanup = mmc_test_area_cleanup,
1909	},
1910
1911	{
1912		.name = "Single trim performance by transfer size",
1913		.prepare = mmc_test_area_prepare_fill,
1914		.run = mmc_test_profile_trim_perf,
1915		.cleanup = mmc_test_area_cleanup,
1916	},
1917
1918	{
1919		.name = "Consecutive read performance by transfer size",
1920		.prepare = mmc_test_area_prepare_fill,
1921		.run = mmc_test_profile_seq_read_perf,
1922		.cleanup = mmc_test_area_cleanup,
1923	},
1924
1925	{
1926		.name = "Consecutive write performance by transfer size",
1927		.prepare = mmc_test_area_prepare,
1928		.run = mmc_test_profile_seq_write_perf,
1929		.cleanup = mmc_test_area_cleanup,
1930	},
1931
1932	{
1933		.name = "Consecutive trim performance by transfer size",
1934		.prepare = mmc_test_area_prepare,
1935		.run = mmc_test_profile_seq_trim_perf,
1936		.cleanup = mmc_test_area_cleanup,
1937	},
1938
1939};
1940
1941static DEFINE_MUTEX(mmc_test_lock);
1942
1943static void mmc_test_run(struct mmc_test_card *test, int testcase)
1944{
1945	int i, ret;
1946
1947	printk(KERN_INFO "%s: Starting tests of card %s...\n",
1948		mmc_hostname(test->card->host), mmc_card_id(test->card));
1949
1950	mmc_claim_host(test->card->host);
1951
1952	for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
1953		if (testcase && ((i + 1) != testcase))
1954			continue;
1955
1956		printk(KERN_INFO "%s: Test case %d. %s...\n",
1957			mmc_hostname(test->card->host), i + 1,
1958			mmc_test_cases[i].name);
1959
1960		if (mmc_test_cases[i].prepare) {
1961			ret = mmc_test_cases[i].prepare(test);
1962			if (ret) {
1963				printk(KERN_INFO "%s: Result: Prepare "
1964					"stage failed! (%d)\n",
1965					mmc_hostname(test->card->host),
1966					ret);
1967				continue;
1968			}
1969		}
1970
1971		ret = mmc_test_cases[i].run(test);
1972		switch (ret) {
1973		case RESULT_OK:
1974			printk(KERN_INFO "%s: Result: OK\n",
1975				mmc_hostname(test->card->host));
1976			break;
1977		case RESULT_FAIL:
1978			printk(KERN_INFO "%s: Result: FAILED\n",
1979				mmc_hostname(test->card->host));
1980			break;
1981		case RESULT_UNSUP_HOST:
1982			printk(KERN_INFO "%s: Result: UNSUPPORTED "
1983				"(by host)\n",
1984				mmc_hostname(test->card->host));
1985			break;
1986		case RESULT_UNSUP_CARD:
1987			printk(KERN_INFO "%s: Result: UNSUPPORTED "
1988				"(by card)\n",
1989				mmc_hostname(test->card->host));
1990			break;
1991		default:
1992			printk(KERN_INFO "%s: Result: ERROR (%d)\n",
1993				mmc_hostname(test->card->host), ret);
1994		}
1995
1996		if (mmc_test_cases[i].cleanup) {
1997			ret = mmc_test_cases[i].cleanup(test);
1998			if (ret) {
1999				printk(KERN_INFO "%s: Warning: Cleanup "
2000					"stage failed! (%d)\n",
2001					mmc_hostname(test->card->host),
2002					ret);
2003			}
2004		}
2005	}
2006
2007	mmc_release_host(test->card->host);
2008
2009	printk(KERN_INFO "%s: Tests completed.\n",
2010		mmc_hostname(test->card->host));
2011}
2012
2013static ssize_t mmc_test_show(struct device *dev,
2014	struct device_attribute *attr, char *buf)
2015{
2016	mutex_lock(&mmc_test_lock);
2017	mutex_unlock(&mmc_test_lock);
2018
2019	return 0;
2020}
2021
2022static ssize_t mmc_test_store(struct device *dev,
2023	struct device_attribute *attr, const char *buf, size_t count)
2024{
2025	struct mmc_card *card = mmc_dev_to_card(dev);
2026	struct mmc_test_card *test;
2027	long testcase;
2028
2029	if (strict_strtol(buf, 10, &testcase))
2030		return -EINVAL;
2031
2032	test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
2033	if (!test)
2034		return -ENOMEM;
2035
2036	test->card = card;
2037
2038	test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
2039#ifdef CONFIG_HIGHMEM
2040	test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
2041#endif
2042
2043#ifdef CONFIG_HIGHMEM
2044	if (test->buffer && test->highmem) {
2045#else
2046	if (test->buffer) {
2047#endif
2048		mutex_lock(&mmc_test_lock);
2049		mmc_test_run(test, testcase);
2050		mutex_unlock(&mmc_test_lock);
2051	}
2052
2053#ifdef CONFIG_HIGHMEM
2054	__free_pages(test->highmem, BUFFER_ORDER);
2055#endif
2056	kfree(test->buffer);
2057	kfree(test);
2058
2059	return count;
2060}
2061
2062static DEVICE_ATTR(test, S_IWUSR | S_IRUGO, mmc_test_show, mmc_test_store);
2063
2064static int mmc_test_probe(struct mmc_card *card)
2065{
2066	int ret;
2067
2068	if (!mmc_card_mmc(card) && !mmc_card_sd(card))
2069		return -ENODEV;
2070
2071	ret = device_create_file(&card->dev, &dev_attr_test);
2072	if (ret)
2073		return ret;
2074
2075	dev_info(&card->dev, "Card claimed for testing.\n");
2076
2077	return 0;
2078}
2079
2080static void mmc_test_remove(struct mmc_card *card)
2081{
2082	device_remove_file(&card->dev, &dev_attr_test);
2083}
2084
2085static struct mmc_driver mmc_driver = {
2086	.drv		= {
2087		.name	= "mmc_test",
2088	},
2089	.probe		= mmc_test_probe,
2090	.remove		= mmc_test_remove,
2091};
2092
2093static int __init mmc_test_init(void)
2094{
2095	return mmc_register_driver(&mmc_driver);
2096}
2097
2098static void __exit mmc_test_exit(void)
2099{
2100	mmc_unregister_driver(&mmc_driver);
2101}
2102
2103module_init(mmc_test_init);
2104module_exit(mmc_test_exit);
2105
2106MODULE_LICENSE("GPL");
2107MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
2108MODULE_AUTHOR("Pierre Ossman");
2109