mmc_test.c revision 0532ff6358ae00615cfba7212f5075356b437c66
1/*
2 *  linux/drivers/mmc/card/mmc_test.c
3 *
4 *  Copyright 2007-2008 Pierre Ossman
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 */
11
12#include <linux/mmc/core.h>
13#include <linux/mmc/card.h>
14#include <linux/mmc/host.h>
15#include <linux/mmc/mmc.h>
16#include <linux/slab.h>
17
18#include <linux/scatterlist.h>
19#include <linux/swap.h>		/* For nr_free_buffer_pages() */
20#include <linux/list.h>
21
22#include <linux/debugfs.h>
23#include <linux/uaccess.h>
24#include <linux/seq_file.h>
25
26#define RESULT_OK		0
27#define RESULT_FAIL		1
28#define RESULT_UNSUP_HOST	2
29#define RESULT_UNSUP_CARD	3
30
31#define BUFFER_ORDER		2
32#define BUFFER_SIZE		(PAGE_SIZE << BUFFER_ORDER)
33
34/*
35 * Limit the test area size to the maximum MMC HC erase group size.  Note that
36 * the maximum SD allocation unit size is just 4MiB.
37 */
38#define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
39
40/**
41 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
42 * @page: first page in the allocation
43 * @order: order of the number of pages allocated
44 */
45struct mmc_test_pages {
46	struct page *page;
47	unsigned int order;
48};
49
50/**
51 * struct mmc_test_mem - allocated memory.
52 * @arr: array of allocations
53 * @cnt: number of allocations
54 */
55struct mmc_test_mem {
56	struct mmc_test_pages *arr;
57	unsigned int cnt;
58};
59
60/**
61 * struct mmc_test_area - information for performance tests.
62 * @max_sz: test area size (in bytes)
63 * @dev_addr: address on card at which to do performance tests
64 * @max_tfr: maximum transfer size allowed by driver (in bytes)
65 * @max_segs: maximum segments allowed by driver in scatterlist @sg
66 * @max_seg_sz: maximum segment size allowed by driver
67 * @blocks: number of (512 byte) blocks currently mapped by @sg
68 * @sg_len: length of currently mapped scatterlist @sg
69 * @mem: allocated memory
70 * @sg: scatterlist
71 */
72struct mmc_test_area {
73	unsigned long max_sz;
74	unsigned int dev_addr;
75	unsigned int max_tfr;
76	unsigned int max_segs;
77	unsigned int max_seg_sz;
78	unsigned int blocks;
79	unsigned int sg_len;
80	struct mmc_test_mem *mem;
81	struct scatterlist *sg;
82};
83
84/**
85 * struct mmc_test_transfer_result - transfer results for performance tests.
86 * @link: double-linked list
87 * @count: amount of group of sectors to check
88 * @sectors: amount of sectors to check in one group
89 * @ts: time values of transfer
90 * @rate: calculated transfer rate
91 */
92struct mmc_test_transfer_result {
93	struct list_head link;
94	unsigned int count;
95	unsigned int sectors;
96	struct timespec ts;
97	unsigned int rate;
98};
99
100/**
101 * struct mmc_test_general_result - results for tests.
102 * @link: double-linked list
103 * @card: card under test
104 * @testcase: number of test case
105 * @result: result of test run
106 * @tr_lst: transfer measurements if any as mmc_test_transfer_result
107 */
108struct mmc_test_general_result {
109	struct list_head link;
110	struct mmc_card *card;
111	int testcase;
112	int result;
113	struct list_head tr_lst;
114};
115
116/**
117 * struct mmc_test_dbgfs_file - debugfs related file.
118 * @link: double-linked list
119 * @card: card under test
120 * @file: file created under debugfs
121 */
122struct mmc_test_dbgfs_file {
123	struct list_head link;
124	struct mmc_card *card;
125	struct dentry *file;
126};
127
128/**
129 * struct mmc_test_card - test information.
130 * @card: card under test
131 * @scratch: transfer buffer
132 * @buffer: transfer buffer
133 * @highmem: buffer for highmem tests
134 * @area: information for performance tests
135 * @gr: pointer to results of current testcase
136 */
137struct mmc_test_card {
138	struct mmc_card	*card;
139
140	u8		scratch[BUFFER_SIZE];
141	u8		*buffer;
142#ifdef CONFIG_HIGHMEM
143	struct page	*highmem;
144#endif
145	struct mmc_test_area		area;
146	struct mmc_test_general_result	*gr;
147};
148
149/*******************************************************************/
150/*  General helper functions                                       */
151/*******************************************************************/
152
153/*
154 * Configure correct block size in card
155 */
156static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
157{
158	return mmc_set_blocklen(test->card, size);
159}
160
161/*
162 * Fill in the mmc_request structure given a set of transfer parameters.
163 */
164static void mmc_test_prepare_mrq(struct mmc_test_card *test,
165	struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
166	unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
167{
168	BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);
169
170	if (blocks > 1) {
171		mrq->cmd->opcode = write ?
172			MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
173	} else {
174		mrq->cmd->opcode = write ?
175			MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
176	}
177
178	mrq->cmd->arg = dev_addr;
179	if (!mmc_card_blockaddr(test->card))
180		mrq->cmd->arg <<= 9;
181
182	mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
183
184	if (blocks == 1)
185		mrq->stop = NULL;
186	else {
187		mrq->stop->opcode = MMC_STOP_TRANSMISSION;
188		mrq->stop->arg = 0;
189		mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
190	}
191
192	mrq->data->blksz = blksz;
193	mrq->data->blocks = blocks;
194	mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
195	mrq->data->sg = sg;
196	mrq->data->sg_len = sg_len;
197
198	mmc_set_data_timeout(mrq->data, test->card);
199}
200
201static int mmc_test_busy(struct mmc_command *cmd)
202{
203	return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
204		(R1_CURRENT_STATE(cmd->resp[0]) == 7);
205}
206
207/*
208 * Wait for the card to finish the busy state
209 */
210static int mmc_test_wait_busy(struct mmc_test_card *test)
211{
212	int ret, busy;
213	struct mmc_command cmd;
214
215	busy = 0;
216	do {
217		memset(&cmd, 0, sizeof(struct mmc_command));
218
219		cmd.opcode = MMC_SEND_STATUS;
220		cmd.arg = test->card->rca << 16;
221		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
222
223		ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
224		if (ret)
225			break;
226
227		if (!busy && mmc_test_busy(&cmd)) {
228			busy = 1;
229			if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
230				printk(KERN_INFO "%s: Warning: Host did not "
231					"wait for busy state to end.\n",
232					mmc_hostname(test->card->host));
233		}
234	} while (mmc_test_busy(&cmd));
235
236	return ret;
237}
238
239/*
240 * Transfer a single sector of kernel addressable data
241 */
242static int mmc_test_buffer_transfer(struct mmc_test_card *test,
243	u8 *buffer, unsigned addr, unsigned blksz, int write)
244{
245	int ret;
246
247	struct mmc_request mrq;
248	struct mmc_command cmd;
249	struct mmc_command stop;
250	struct mmc_data data;
251
252	struct scatterlist sg;
253
254	memset(&mrq, 0, sizeof(struct mmc_request));
255	memset(&cmd, 0, sizeof(struct mmc_command));
256	memset(&data, 0, sizeof(struct mmc_data));
257	memset(&stop, 0, sizeof(struct mmc_command));
258
259	mrq.cmd = &cmd;
260	mrq.data = &data;
261	mrq.stop = &stop;
262
263	sg_init_one(&sg, buffer, blksz);
264
265	mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
266
267	mmc_wait_for_req(test->card->host, &mrq);
268
269	if (cmd.error)
270		return cmd.error;
271	if (data.error)
272		return data.error;
273
274	ret = mmc_test_wait_busy(test);
275	if (ret)
276		return ret;
277
278	return 0;
279}
280
281static void mmc_test_free_mem(struct mmc_test_mem *mem)
282{
283	if (!mem)
284		return;
285	while (mem->cnt--)
286		__free_pages(mem->arr[mem->cnt].page,
287			     mem->arr[mem->cnt].order);
288	kfree(mem->arr);
289	kfree(mem);
290}
291
292/*
293 * Allocate a lot of memory, preferrably max_sz but at least min_sz.  In case
294 * there isn't much memory do not exceed 1/16th total lowmem pages.  Also do
295 * not exceed a maximum number of segments and try not to make segments much
296 * bigger than maximum segment size.
297 */
298static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
299					       unsigned long max_sz,
300					       unsigned int max_segs,
301					       unsigned int max_seg_sz)
302{
303	unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
304	unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
305	unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
306	unsigned long page_cnt = 0;
307	unsigned long limit = nr_free_buffer_pages() >> 4;
308	struct mmc_test_mem *mem;
309
310	if (max_page_cnt > limit)
311		max_page_cnt = limit;
312	if (min_page_cnt > max_page_cnt)
313		min_page_cnt = max_page_cnt;
314
315	if (max_seg_page_cnt > max_page_cnt)
316		max_seg_page_cnt = max_page_cnt;
317
318	if (max_segs > max_page_cnt)
319		max_segs = max_page_cnt;
320
321	mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
322	if (!mem)
323		return NULL;
324
325	mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
326			   GFP_KERNEL);
327	if (!mem->arr)
328		goto out_free;
329
330	while (max_page_cnt) {
331		struct page *page;
332		unsigned int order;
333		gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
334				__GFP_NORETRY;
335
336		order = get_order(max_seg_page_cnt << PAGE_SHIFT);
337		while (1) {
338			page = alloc_pages(flags, order);
339			if (page || !order)
340				break;
341			order -= 1;
342		}
343		if (!page) {
344			if (page_cnt < min_page_cnt)
345				goto out_free;
346			break;
347		}
348		mem->arr[mem->cnt].page = page;
349		mem->arr[mem->cnt].order = order;
350		mem->cnt += 1;
351		if (max_page_cnt <= (1UL << order))
352			break;
353		max_page_cnt -= 1UL << order;
354		page_cnt += 1UL << order;
355		if (mem->cnt >= max_segs) {
356			if (page_cnt < min_page_cnt)
357				goto out_free;
358			break;
359		}
360	}
361
362	return mem;
363
364out_free:
365	mmc_test_free_mem(mem);
366	return NULL;
367}
368
369/*
370 * Map memory into a scatterlist.  Optionally allow the same memory to be
371 * mapped more than once.
372 */
373static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz,
374			   struct scatterlist *sglist, int repeat,
375			   unsigned int max_segs, unsigned int max_seg_sz,
376			   unsigned int *sg_len)
377{
378	struct scatterlist *sg = NULL;
379	unsigned int i;
380
381	sg_init_table(sglist, max_segs);
382
383	*sg_len = 0;
384	do {
385		for (i = 0; i < mem->cnt; i++) {
386			unsigned long len = PAGE_SIZE << mem->arr[i].order;
387
388			if (len > sz)
389				len = sz;
390			if (len > max_seg_sz)
391				len = max_seg_sz;
392			if (sg)
393				sg = sg_next(sg);
394			else
395				sg = sglist;
396			if (!sg)
397				return -EINVAL;
398			sg_set_page(sg, mem->arr[i].page, len, 0);
399			sz -= len;
400			*sg_len += 1;
401			if (!sz)
402				break;
403		}
404	} while (sz && repeat);
405
406	if (sz)
407		return -EINVAL;
408
409	if (sg)
410		sg_mark_end(sg);
411
412	return 0;
413}
414
415/*
416 * Map memory into a scatterlist so that no pages are contiguous.  Allow the
417 * same memory to be mapped more than once.
418 */
419static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
420				       unsigned long sz,
421				       struct scatterlist *sglist,
422				       unsigned int max_segs,
423				       unsigned int max_seg_sz,
424				       unsigned int *sg_len)
425{
426	struct scatterlist *sg = NULL;
427	unsigned int i = mem->cnt, cnt;
428	unsigned long len;
429	void *base, *addr, *last_addr = NULL;
430
431	sg_init_table(sglist, max_segs);
432
433	*sg_len = 0;
434	while (sz) {
435		base = page_address(mem->arr[--i].page);
436		cnt = 1 << mem->arr[i].order;
437		while (sz && cnt) {
438			addr = base + PAGE_SIZE * --cnt;
439			if (last_addr && last_addr + PAGE_SIZE == addr)
440				continue;
441			last_addr = addr;
442			len = PAGE_SIZE;
443			if (len > max_seg_sz)
444				len = max_seg_sz;
445			if (len > sz)
446				len = sz;
447			if (sg)
448				sg = sg_next(sg);
449			else
450				sg = sglist;
451			if (!sg)
452				return -EINVAL;
453			sg_set_page(sg, virt_to_page(addr), len, 0);
454			sz -= len;
455			*sg_len += 1;
456		}
457		if (i == 0)
458			i = mem->cnt;
459	}
460
461	if (sg)
462		sg_mark_end(sg);
463
464	return 0;
465}
466
467/*
468 * Calculate transfer rate in bytes per second.
469 */
470static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
471{
472	uint64_t ns;
473
474	ns = ts->tv_sec;
475	ns *= 1000000000;
476	ns += ts->tv_nsec;
477
478	bytes *= 1000000000;
479
480	while (ns > UINT_MAX) {
481		bytes >>= 1;
482		ns >>= 1;
483	}
484
485	if (!ns)
486		return 0;
487
488	do_div(bytes, (uint32_t)ns);
489
490	return bytes;
491}
492
493/*
494 * Save transfer results for future usage
495 */
496static void mmc_test_save_transfer_result(struct mmc_test_card *test,
497	unsigned int count, unsigned int sectors, struct timespec ts,
498	unsigned int rate)
499{
500	struct mmc_test_transfer_result *tr;
501
502	if (!test->gr)
503		return;
504
505	tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL);
506	if (!tr)
507		return;
508
509	tr->count = count;
510	tr->sectors = sectors;
511	tr->ts = ts;
512	tr->rate = rate;
513
514	list_add_tail(&tr->link, &test->gr->tr_lst);
515}
516
517/*
518 * Print the transfer rate.
519 */
520static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
521				struct timespec *ts1, struct timespec *ts2)
522{
523	unsigned int rate, sectors = bytes >> 9;
524	struct timespec ts;
525
526	ts = timespec_sub(*ts2, *ts1);
527
528	rate = mmc_test_rate(bytes, &ts);
529
530	printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
531			 "seconds (%u kB/s, %u KiB/s)\n",
532			 mmc_hostname(test->card->host), sectors, sectors >> 1,
533			 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
534			 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024);
535
536	mmc_test_save_transfer_result(test, 1, sectors, ts, rate);
537}
538
539/*
540 * Print the average transfer rate.
541 */
542static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
543				    unsigned int count, struct timespec *ts1,
544				    struct timespec *ts2)
545{
546	unsigned int rate, sectors = bytes >> 9;
547	uint64_t tot = bytes * count;
548	struct timespec ts;
549
550	ts = timespec_sub(*ts2, *ts1);
551
552	rate = mmc_test_rate(tot, &ts);
553
554	printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
555			 "%lu.%09lu seconds (%u kB/s, %u KiB/s)\n",
556			 mmc_hostname(test->card->host), count, sectors, count,
557			 sectors >> 1, (sectors & 1 ? ".5" : ""),
558			 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
559			 rate / 1000, rate / 1024);
560
561	mmc_test_save_transfer_result(test, count, sectors, ts, rate);
562}
563
564/*
565 * Return the card size in sectors.
566 */
567static unsigned int mmc_test_capacity(struct mmc_card *card)
568{
569	if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
570		return card->ext_csd.sectors;
571	else
572		return card->csd.capacity << (card->csd.read_blkbits - 9);
573}
574
575/*******************************************************************/
576/*  Test preparation and cleanup                                   */
577/*******************************************************************/
578
579/*
580 * Fill the first couple of sectors of the card with known data
581 * so that bad reads/writes can be detected
582 */
583static int __mmc_test_prepare(struct mmc_test_card *test, int write)
584{
585	int ret, i;
586
587	ret = mmc_test_set_blksize(test, 512);
588	if (ret)
589		return ret;
590
591	if (write)
592		memset(test->buffer, 0xDF, 512);
593	else {
594		for (i = 0;i < 512;i++)
595			test->buffer[i] = i;
596	}
597
598	for (i = 0;i < BUFFER_SIZE / 512;i++) {
599		ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
600		if (ret)
601			return ret;
602	}
603
604	return 0;
605}
606
607static int mmc_test_prepare_write(struct mmc_test_card *test)
608{
609	return __mmc_test_prepare(test, 1);
610}
611
612static int mmc_test_prepare_read(struct mmc_test_card *test)
613{
614	return __mmc_test_prepare(test, 0);
615}
616
617static int mmc_test_cleanup(struct mmc_test_card *test)
618{
619	int ret, i;
620
621	ret = mmc_test_set_blksize(test, 512);
622	if (ret)
623		return ret;
624
625	memset(test->buffer, 0, 512);
626
627	for (i = 0;i < BUFFER_SIZE / 512;i++) {
628		ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
629		if (ret)
630			return ret;
631	}
632
633	return 0;
634}
635
636/*******************************************************************/
637/*  Test execution helpers                                         */
638/*******************************************************************/
639
640/*
641 * Modifies the mmc_request to perform the "short transfer" tests
642 */
643static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
644	struct mmc_request *mrq, int write)
645{
646	BUG_ON(!mrq || !mrq->cmd || !mrq->data);
647
648	if (mrq->data->blocks > 1) {
649		mrq->cmd->opcode = write ?
650			MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
651		mrq->stop = NULL;
652	} else {
653		mrq->cmd->opcode = MMC_SEND_STATUS;
654		mrq->cmd->arg = test->card->rca << 16;
655	}
656}
657
658/*
659 * Checks that a normal transfer didn't have any errors
660 */
661static int mmc_test_check_result(struct mmc_test_card *test,
662	struct mmc_request *mrq)
663{
664	int ret;
665
666	BUG_ON(!mrq || !mrq->cmd || !mrq->data);
667
668	ret = 0;
669
670	if (!ret && mrq->cmd->error)
671		ret = mrq->cmd->error;
672	if (!ret && mrq->data->error)
673		ret = mrq->data->error;
674	if (!ret && mrq->stop && mrq->stop->error)
675		ret = mrq->stop->error;
676	if (!ret && mrq->data->bytes_xfered !=
677		mrq->data->blocks * mrq->data->blksz)
678		ret = RESULT_FAIL;
679
680	if (ret == -EINVAL)
681		ret = RESULT_UNSUP_HOST;
682
683	return ret;
684}
685
686/*
687 * Checks that a "short transfer" behaved as expected
688 */
689static int mmc_test_check_broken_result(struct mmc_test_card *test,
690	struct mmc_request *mrq)
691{
692	int ret;
693
694	BUG_ON(!mrq || !mrq->cmd || !mrq->data);
695
696	ret = 0;
697
698	if (!ret && mrq->cmd->error)
699		ret = mrq->cmd->error;
700	if (!ret && mrq->data->error == 0)
701		ret = RESULT_FAIL;
702	if (!ret && mrq->data->error != -ETIMEDOUT)
703		ret = mrq->data->error;
704	if (!ret && mrq->stop && mrq->stop->error)
705		ret = mrq->stop->error;
706	if (mrq->data->blocks > 1) {
707		if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
708			ret = RESULT_FAIL;
709	} else {
710		if (!ret && mrq->data->bytes_xfered > 0)
711			ret = RESULT_FAIL;
712	}
713
714	if (ret == -EINVAL)
715		ret = RESULT_UNSUP_HOST;
716
717	return ret;
718}
719
720/*
721 * Tests a basic transfer with certain parameters
722 */
723static int mmc_test_simple_transfer(struct mmc_test_card *test,
724	struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
725	unsigned blocks, unsigned blksz, int write)
726{
727	struct mmc_request mrq;
728	struct mmc_command cmd;
729	struct mmc_command stop;
730	struct mmc_data data;
731
732	memset(&mrq, 0, sizeof(struct mmc_request));
733	memset(&cmd, 0, sizeof(struct mmc_command));
734	memset(&data, 0, sizeof(struct mmc_data));
735	memset(&stop, 0, sizeof(struct mmc_command));
736
737	mrq.cmd = &cmd;
738	mrq.data = &data;
739	mrq.stop = &stop;
740
741	mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
742		blocks, blksz, write);
743
744	mmc_wait_for_req(test->card->host, &mrq);
745
746	mmc_test_wait_busy(test);
747
748	return mmc_test_check_result(test, &mrq);
749}
750
751/*
752 * Tests a transfer where the card will fail completely or partly
753 */
754static int mmc_test_broken_transfer(struct mmc_test_card *test,
755	unsigned blocks, unsigned blksz, int write)
756{
757	struct mmc_request mrq;
758	struct mmc_command cmd;
759	struct mmc_command stop;
760	struct mmc_data data;
761
762	struct scatterlist sg;
763
764	memset(&mrq, 0, sizeof(struct mmc_request));
765	memset(&cmd, 0, sizeof(struct mmc_command));
766	memset(&data, 0, sizeof(struct mmc_data));
767	memset(&stop, 0, sizeof(struct mmc_command));
768
769	mrq.cmd = &cmd;
770	mrq.data = &data;
771	mrq.stop = &stop;
772
773	sg_init_one(&sg, test->buffer, blocks * blksz);
774
775	mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
776	mmc_test_prepare_broken_mrq(test, &mrq, write);
777
778	mmc_wait_for_req(test->card->host, &mrq);
779
780	mmc_test_wait_busy(test);
781
782	return mmc_test_check_broken_result(test, &mrq);
783}
784
785/*
786 * Does a complete transfer test where data is also validated
787 *
788 * Note: mmc_test_prepare() must have been done before this call
789 */
790static int mmc_test_transfer(struct mmc_test_card *test,
791	struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
792	unsigned blocks, unsigned blksz, int write)
793{
794	int ret, i;
795	unsigned long flags;
796
797	if (write) {
798		for (i = 0;i < blocks * blksz;i++)
799			test->scratch[i] = i;
800	} else {
801		memset(test->scratch, 0, BUFFER_SIZE);
802	}
803	local_irq_save(flags);
804	sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
805	local_irq_restore(flags);
806
807	ret = mmc_test_set_blksize(test, blksz);
808	if (ret)
809		return ret;
810
811	ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
812		blocks, blksz, write);
813	if (ret)
814		return ret;
815
816	if (write) {
817		int sectors;
818
819		ret = mmc_test_set_blksize(test, 512);
820		if (ret)
821			return ret;
822
823		sectors = (blocks * blksz + 511) / 512;
824		if ((sectors * 512) == (blocks * blksz))
825			sectors++;
826
827		if ((sectors * 512) > BUFFER_SIZE)
828			return -EINVAL;
829
830		memset(test->buffer, 0, sectors * 512);
831
832		for (i = 0;i < sectors;i++) {
833			ret = mmc_test_buffer_transfer(test,
834				test->buffer + i * 512,
835				dev_addr + i, 512, 0);
836			if (ret)
837				return ret;
838		}
839
840		for (i = 0;i < blocks * blksz;i++) {
841			if (test->buffer[i] != (u8)i)
842				return RESULT_FAIL;
843		}
844
845		for (;i < sectors * 512;i++) {
846			if (test->buffer[i] != 0xDF)
847				return RESULT_FAIL;
848		}
849	} else {
850		local_irq_save(flags);
851		sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
852		local_irq_restore(flags);
853		for (i = 0;i < blocks * blksz;i++) {
854			if (test->scratch[i] != (u8)i)
855				return RESULT_FAIL;
856		}
857	}
858
859	return 0;
860}
861
862/*******************************************************************/
863/*  Tests                                                          */
864/*******************************************************************/
865
866struct mmc_test_case {
867	const char *name;
868
869	int (*prepare)(struct mmc_test_card *);
870	int (*run)(struct mmc_test_card *);
871	int (*cleanup)(struct mmc_test_card *);
872};
873
874static int mmc_test_basic_write(struct mmc_test_card *test)
875{
876	int ret;
877	struct scatterlist sg;
878
879	ret = mmc_test_set_blksize(test, 512);
880	if (ret)
881		return ret;
882
883	sg_init_one(&sg, test->buffer, 512);
884
885	ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
886	if (ret)
887		return ret;
888
889	return 0;
890}
891
892static int mmc_test_basic_read(struct mmc_test_card *test)
893{
894	int ret;
895	struct scatterlist sg;
896
897	ret = mmc_test_set_blksize(test, 512);
898	if (ret)
899		return ret;
900
901	sg_init_one(&sg, test->buffer, 512);
902
903	ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
904	if (ret)
905		return ret;
906
907	return 0;
908}
909
910static int mmc_test_verify_write(struct mmc_test_card *test)
911{
912	int ret;
913	struct scatterlist sg;
914
915	sg_init_one(&sg, test->buffer, 512);
916
917	ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
918	if (ret)
919		return ret;
920
921	return 0;
922}
923
924static int mmc_test_verify_read(struct mmc_test_card *test)
925{
926	int ret;
927	struct scatterlist sg;
928
929	sg_init_one(&sg, test->buffer, 512);
930
931	ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
932	if (ret)
933		return ret;
934
935	return 0;
936}
937
938static int mmc_test_multi_write(struct mmc_test_card *test)
939{
940	int ret;
941	unsigned int size;
942	struct scatterlist sg;
943
944	if (test->card->host->max_blk_count == 1)
945		return RESULT_UNSUP_HOST;
946
947	size = PAGE_SIZE * 2;
948	size = min(size, test->card->host->max_req_size);
949	size = min(size, test->card->host->max_seg_size);
950	size = min(size, test->card->host->max_blk_count * 512);
951
952	if (size < 1024)
953		return RESULT_UNSUP_HOST;
954
955	sg_init_one(&sg, test->buffer, size);
956
957	ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
958	if (ret)
959		return ret;
960
961	return 0;
962}
963
964static int mmc_test_multi_read(struct mmc_test_card *test)
965{
966	int ret;
967	unsigned int size;
968	struct scatterlist sg;
969
970	if (test->card->host->max_blk_count == 1)
971		return RESULT_UNSUP_HOST;
972
973	size = PAGE_SIZE * 2;
974	size = min(size, test->card->host->max_req_size);
975	size = min(size, test->card->host->max_seg_size);
976	size = min(size, test->card->host->max_blk_count * 512);
977
978	if (size < 1024)
979		return RESULT_UNSUP_HOST;
980
981	sg_init_one(&sg, test->buffer, size);
982
983	ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
984	if (ret)
985		return ret;
986
987	return 0;
988}
989
990static int mmc_test_pow2_write(struct mmc_test_card *test)
991{
992	int ret, i;
993	struct scatterlist sg;
994
995	if (!test->card->csd.write_partial)
996		return RESULT_UNSUP_CARD;
997
998	for (i = 1; i < 512;i <<= 1) {
999		sg_init_one(&sg, test->buffer, i);
1000		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1001		if (ret)
1002			return ret;
1003	}
1004
1005	return 0;
1006}
1007
1008static int mmc_test_pow2_read(struct mmc_test_card *test)
1009{
1010	int ret, i;
1011	struct scatterlist sg;
1012
1013	if (!test->card->csd.read_partial)
1014		return RESULT_UNSUP_CARD;
1015
1016	for (i = 1; i < 512;i <<= 1) {
1017		sg_init_one(&sg, test->buffer, i);
1018		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1019		if (ret)
1020			return ret;
1021	}
1022
1023	return 0;
1024}
1025
1026static int mmc_test_weird_write(struct mmc_test_card *test)
1027{
1028	int ret, i;
1029	struct scatterlist sg;
1030
1031	if (!test->card->csd.write_partial)
1032		return RESULT_UNSUP_CARD;
1033
1034	for (i = 3; i < 512;i += 7) {
1035		sg_init_one(&sg, test->buffer, i);
1036		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1037		if (ret)
1038			return ret;
1039	}
1040
1041	return 0;
1042}
1043
1044static int mmc_test_weird_read(struct mmc_test_card *test)
1045{
1046	int ret, i;
1047	struct scatterlist sg;
1048
1049	if (!test->card->csd.read_partial)
1050		return RESULT_UNSUP_CARD;
1051
1052	for (i = 3; i < 512;i += 7) {
1053		sg_init_one(&sg, test->buffer, i);
1054		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1055		if (ret)
1056			return ret;
1057	}
1058
1059	return 0;
1060}
1061
1062static int mmc_test_align_write(struct mmc_test_card *test)
1063{
1064	int ret, i;
1065	struct scatterlist sg;
1066
1067	for (i = 1;i < 4;i++) {
1068		sg_init_one(&sg, test->buffer + i, 512);
1069		ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1070		if (ret)
1071			return ret;
1072	}
1073
1074	return 0;
1075}
1076
1077static int mmc_test_align_read(struct mmc_test_card *test)
1078{
1079	int ret, i;
1080	struct scatterlist sg;
1081
1082	for (i = 1;i < 4;i++) {
1083		sg_init_one(&sg, test->buffer + i, 512);
1084		ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1085		if (ret)
1086			return ret;
1087	}
1088
1089	return 0;
1090}
1091
1092static int mmc_test_align_multi_write(struct mmc_test_card *test)
1093{
1094	int ret, i;
1095	unsigned int size;
1096	struct scatterlist sg;
1097
1098	if (test->card->host->max_blk_count == 1)
1099		return RESULT_UNSUP_HOST;
1100
1101	size = PAGE_SIZE * 2;
1102	size = min(size, test->card->host->max_req_size);
1103	size = min(size, test->card->host->max_seg_size);
1104	size = min(size, test->card->host->max_blk_count * 512);
1105
1106	if (size < 1024)
1107		return RESULT_UNSUP_HOST;
1108
1109	for (i = 1;i < 4;i++) {
1110		sg_init_one(&sg, test->buffer + i, size);
1111		ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1112		if (ret)
1113			return ret;
1114	}
1115
1116	return 0;
1117}
1118
1119static int mmc_test_align_multi_read(struct mmc_test_card *test)
1120{
1121	int ret, i;
1122	unsigned int size;
1123	struct scatterlist sg;
1124
1125	if (test->card->host->max_blk_count == 1)
1126		return RESULT_UNSUP_HOST;
1127
1128	size = PAGE_SIZE * 2;
1129	size = min(size, test->card->host->max_req_size);
1130	size = min(size, test->card->host->max_seg_size);
1131	size = min(size, test->card->host->max_blk_count * 512);
1132
1133	if (size < 1024)
1134		return RESULT_UNSUP_HOST;
1135
1136	for (i = 1;i < 4;i++) {
1137		sg_init_one(&sg, test->buffer + i, size);
1138		ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1139		if (ret)
1140			return ret;
1141	}
1142
1143	return 0;
1144}
1145
1146static int mmc_test_xfersize_write(struct mmc_test_card *test)
1147{
1148	int ret;
1149
1150	ret = mmc_test_set_blksize(test, 512);
1151	if (ret)
1152		return ret;
1153
1154	ret = mmc_test_broken_transfer(test, 1, 512, 1);
1155	if (ret)
1156		return ret;
1157
1158	return 0;
1159}
1160
1161static int mmc_test_xfersize_read(struct mmc_test_card *test)
1162{
1163	int ret;
1164
1165	ret = mmc_test_set_blksize(test, 512);
1166	if (ret)
1167		return ret;
1168
1169	ret = mmc_test_broken_transfer(test, 1, 512, 0);
1170	if (ret)
1171		return ret;
1172
1173	return 0;
1174}
1175
1176static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
1177{
1178	int ret;
1179
1180	if (test->card->host->max_blk_count == 1)
1181		return RESULT_UNSUP_HOST;
1182
1183	ret = mmc_test_set_blksize(test, 512);
1184	if (ret)
1185		return ret;
1186
1187	ret = mmc_test_broken_transfer(test, 2, 512, 1);
1188	if (ret)
1189		return ret;
1190
1191	return 0;
1192}
1193
1194static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
1195{
1196	int ret;
1197
1198	if (test->card->host->max_blk_count == 1)
1199		return RESULT_UNSUP_HOST;
1200
1201	ret = mmc_test_set_blksize(test, 512);
1202	if (ret)
1203		return ret;
1204
1205	ret = mmc_test_broken_transfer(test, 2, 512, 0);
1206	if (ret)
1207		return ret;
1208
1209	return 0;
1210}
1211
1212#ifdef CONFIG_HIGHMEM
1213
1214static int mmc_test_write_high(struct mmc_test_card *test)
1215{
1216	int ret;
1217	struct scatterlist sg;
1218
1219	sg_init_table(&sg, 1);
1220	sg_set_page(&sg, test->highmem, 512, 0);
1221
1222	ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1223	if (ret)
1224		return ret;
1225
1226	return 0;
1227}
1228
1229static int mmc_test_read_high(struct mmc_test_card *test)
1230{
1231	int ret;
1232	struct scatterlist sg;
1233
1234	sg_init_table(&sg, 1);
1235	sg_set_page(&sg, test->highmem, 512, 0);
1236
1237	ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1238	if (ret)
1239		return ret;
1240
1241	return 0;
1242}
1243
1244static int mmc_test_multi_write_high(struct mmc_test_card *test)
1245{
1246	int ret;
1247	unsigned int size;
1248	struct scatterlist sg;
1249
1250	if (test->card->host->max_blk_count == 1)
1251		return RESULT_UNSUP_HOST;
1252
1253	size = PAGE_SIZE * 2;
1254	size = min(size, test->card->host->max_req_size);
1255	size = min(size, test->card->host->max_seg_size);
1256	size = min(size, test->card->host->max_blk_count * 512);
1257
1258	if (size < 1024)
1259		return RESULT_UNSUP_HOST;
1260
1261	sg_init_table(&sg, 1);
1262	sg_set_page(&sg, test->highmem, size, 0);
1263
1264	ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1265	if (ret)
1266		return ret;
1267
1268	return 0;
1269}
1270
1271static int mmc_test_multi_read_high(struct mmc_test_card *test)
1272{
1273	int ret;
1274	unsigned int size;
1275	struct scatterlist sg;
1276
1277	if (test->card->host->max_blk_count == 1)
1278		return RESULT_UNSUP_HOST;
1279
1280	size = PAGE_SIZE * 2;
1281	size = min(size, test->card->host->max_req_size);
1282	size = min(size, test->card->host->max_seg_size);
1283	size = min(size, test->card->host->max_blk_count * 512);
1284
1285	if (size < 1024)
1286		return RESULT_UNSUP_HOST;
1287
1288	sg_init_table(&sg, 1);
1289	sg_set_page(&sg, test->highmem, size, 0);
1290
1291	ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1292	if (ret)
1293		return ret;
1294
1295	return 0;
1296}
1297
1298#else
1299
1300static int mmc_test_no_highmem(struct mmc_test_card *test)
1301{
1302	printk(KERN_INFO "%s: Highmem not configured - test skipped\n",
1303	       mmc_hostname(test->card->host));
1304	return 0;
1305}
1306
1307#endif /* CONFIG_HIGHMEM */
1308
1309/*
1310 * Map sz bytes so that it can be transferred.
1311 */
1312static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
1313			     int max_scatter)
1314{
1315	struct mmc_test_area *t = &test->area;
1316	int err;
1317
1318	t->blocks = sz >> 9;
1319
1320	if (max_scatter) {
1321		err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1322						  t->max_segs, t->max_seg_sz,
1323				       &t->sg_len);
1324	} else {
1325		err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1326				      t->max_seg_sz, &t->sg_len);
1327	}
1328	if (err)
1329		printk(KERN_INFO "%s: Failed to map sg list\n",
1330		       mmc_hostname(test->card->host));
1331	return err;
1332}
1333
1334/*
1335 * Transfer bytes mapped by mmc_test_area_map().
1336 */
1337static int mmc_test_area_transfer(struct mmc_test_card *test,
1338				  unsigned int dev_addr, int write)
1339{
1340	struct mmc_test_area *t = &test->area;
1341
1342	return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
1343					t->blocks, 512, write);
1344}
1345
1346/*
1347 * Map and transfer bytes.
1348 */
1349static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1350			    unsigned int dev_addr, int write, int max_scatter,
1351			    int timed)
1352{
1353	struct timespec ts1, ts2;
1354	int ret;
1355
1356	/*
1357	 * In the case of a maximally scattered transfer, the maximum transfer
1358	 * size is further limited by using PAGE_SIZE segments.
1359	 */
1360	if (max_scatter) {
1361		struct mmc_test_area *t = &test->area;
1362		unsigned long max_tfr;
1363
1364		if (t->max_seg_sz >= PAGE_SIZE)
1365			max_tfr = t->max_segs * PAGE_SIZE;
1366		else
1367			max_tfr = t->max_segs * t->max_seg_sz;
1368		if (sz > max_tfr)
1369			sz = max_tfr;
1370	}
1371
1372	ret = mmc_test_area_map(test, sz, max_scatter);
1373	if (ret)
1374		return ret;
1375
1376	if (timed)
1377		getnstimeofday(&ts1);
1378
1379	ret = mmc_test_area_transfer(test, dev_addr, write);
1380	if (ret)
1381		return ret;
1382
1383	if (timed)
1384		getnstimeofday(&ts2);
1385
1386	if (timed)
1387		mmc_test_print_rate(test, sz, &ts1, &ts2);
1388
1389	return 0;
1390}
1391
1392/*
1393 * Write the test area entirely.
1394 */
1395static int mmc_test_area_fill(struct mmc_test_card *test)
1396{
1397	return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr,
1398				1, 0, 0);
1399}
1400
1401/*
1402 * Erase the test area entirely.
1403 */
1404static int mmc_test_area_erase(struct mmc_test_card *test)
1405{
1406	struct mmc_test_area *t = &test->area;
1407
1408	if (!mmc_can_erase(test->card))
1409		return 0;
1410
1411	return mmc_erase(test->card, t->dev_addr, test->area.max_sz >> 9,
1412			 MMC_ERASE_ARG);
1413}
1414
1415/*
1416 * Cleanup struct mmc_test_area.
1417 */
1418static int mmc_test_area_cleanup(struct mmc_test_card *test)
1419{
1420	struct mmc_test_area *t = &test->area;
1421
1422	kfree(t->sg);
1423	mmc_test_free_mem(t->mem);
1424
1425	return 0;
1426}
1427
1428/*
1429 * Initialize an area for testing large transfers.  The test area is set to the
1430 * middle of the card because cards may have different charateristics at the
1431 * front (for FAT file system optimization).  Optionally, the area is erased
1432 * (if the card supports it) which may improve write performance.  Optionally,
1433 * the area is filled with data for subsequent read tests.
1434 */
1435static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1436{
1437	struct mmc_test_area *t = &test->area;
1438	unsigned long min_sz = 64 * 1024, sz;
1439	int ret;
1440
1441	ret = mmc_test_set_blksize(test, 512);
1442	if (ret)
1443		return ret;
1444
1445	/* Make the test area size about 4MiB */
1446	sz = (unsigned long)test->card->pref_erase << 9;
1447	t->max_sz = sz;
1448	while (t->max_sz < 4 * 1024 * 1024)
1449		t->max_sz += sz;
1450	while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
1451		t->max_sz -= sz;
1452
1453	t->max_segs = test->card->host->max_segs;
1454	t->max_seg_sz = test->card->host->max_seg_size;
1455
1456	t->max_tfr = t->max_sz;
1457	if (t->max_tfr >> 9 > test->card->host->max_blk_count)
1458		t->max_tfr = test->card->host->max_blk_count << 9;
1459	if (t->max_tfr > test->card->host->max_req_size)
1460		t->max_tfr = test->card->host->max_req_size;
1461	if (t->max_tfr / t->max_seg_sz > t->max_segs)
1462		t->max_tfr = t->max_segs * t->max_seg_sz;
1463
1464	/*
1465	 * Try to allocate enough memory for a max. sized transfer.  Less is OK
1466	 * because the same memory can be mapped into the scatterlist more than
1467	 * once.  Also, take into account the limits imposed on scatterlist
1468	 * segments by the host driver.
1469	 */
1470	t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
1471				    t->max_seg_sz);
1472	if (!t->mem)
1473		return -ENOMEM;
1474
1475	t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
1476	if (!t->sg) {
1477		ret = -ENOMEM;
1478		goto out_free;
1479	}
1480
1481	t->dev_addr = mmc_test_capacity(test->card) / 2;
1482	t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
1483
1484	if (erase) {
1485		ret = mmc_test_area_erase(test);
1486		if (ret)
1487			goto out_free;
1488	}
1489
1490	if (fill) {
1491		ret = mmc_test_area_fill(test);
1492		if (ret)
1493			goto out_free;
1494	}
1495
1496	return 0;
1497
1498out_free:
1499	mmc_test_area_cleanup(test);
1500	return ret;
1501}
1502
1503/*
1504 * Prepare for large transfers.  Do not erase the test area.
1505 */
1506static int mmc_test_area_prepare(struct mmc_test_card *test)
1507{
1508	return mmc_test_area_init(test, 0, 0);
1509}
1510
1511/*
1512 * Prepare for large transfers.  Do erase the test area.
1513 */
1514static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
1515{
1516	return mmc_test_area_init(test, 1, 0);
1517}
1518
1519/*
1520 * Prepare for large transfers.  Erase and fill the test area.
1521 */
1522static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1523{
1524	return mmc_test_area_init(test, 1, 1);
1525}
1526
1527/*
1528 * Test best-case performance.  Best-case performance is expected from
1529 * a single large transfer.
1530 *
1531 * An additional option (max_scatter) allows the measurement of the same
1532 * transfer but with no contiguous pages in the scatter list.  This tests
1533 * the efficiency of DMA to handle scattered pages.
1534 */
1535static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1536				     int max_scatter)
1537{
1538	return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr,
1539				write, max_scatter, 1);
1540}
1541
1542/*
1543 * Best-case read performance.
1544 */
1545static int mmc_test_best_read_performance(struct mmc_test_card *test)
1546{
1547	return mmc_test_best_performance(test, 0, 0);
1548}
1549
1550/*
1551 * Best-case write performance.
1552 */
1553static int mmc_test_best_write_performance(struct mmc_test_card *test)
1554{
1555	return mmc_test_best_performance(test, 1, 0);
1556}
1557
1558/*
1559 * Best-case read performance into scattered pages.
1560 */
1561static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
1562{
1563	return mmc_test_best_performance(test, 0, 1);
1564}
1565
1566/*
1567 * Best-case write performance from scattered pages.
1568 */
1569static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1570{
1571	return mmc_test_best_performance(test, 1, 1);
1572}
1573
1574/*
1575 * Single read performance by transfer size.
1576 */
1577static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1578{
1579	unsigned long sz;
1580	unsigned int dev_addr;
1581	int ret;
1582
1583	for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1584		dev_addr = test->area.dev_addr + (sz >> 9);
1585		ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1586		if (ret)
1587			return ret;
1588	}
1589	sz = test->area.max_tfr;
1590	dev_addr = test->area.dev_addr;
1591	return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1592}
1593
1594/*
1595 * Single write performance by transfer size.
1596 */
1597static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1598{
1599	unsigned long sz;
1600	unsigned int dev_addr;
1601	int ret;
1602
1603	ret = mmc_test_area_erase(test);
1604	if (ret)
1605		return ret;
1606	for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1607		dev_addr = test->area.dev_addr + (sz >> 9);
1608		ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1609		if (ret)
1610			return ret;
1611	}
1612	ret = mmc_test_area_erase(test);
1613	if (ret)
1614		return ret;
1615	sz = test->area.max_tfr;
1616	dev_addr = test->area.dev_addr;
1617	return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1618}
1619
1620/*
1621 * Single trim performance by transfer size.
1622 */
1623static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1624{
1625	unsigned long sz;
1626	unsigned int dev_addr;
1627	struct timespec ts1, ts2;
1628	int ret;
1629
1630	if (!mmc_can_trim(test->card))
1631		return RESULT_UNSUP_CARD;
1632
1633	if (!mmc_can_erase(test->card))
1634		return RESULT_UNSUP_HOST;
1635
1636	for (sz = 512; sz < test->area.max_sz; sz <<= 1) {
1637		dev_addr = test->area.dev_addr + (sz >> 9);
1638		getnstimeofday(&ts1);
1639		ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1640		if (ret)
1641			return ret;
1642		getnstimeofday(&ts2);
1643		mmc_test_print_rate(test, sz, &ts1, &ts2);
1644	}
1645	dev_addr = test->area.dev_addr;
1646	getnstimeofday(&ts1);
1647	ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1648	if (ret)
1649		return ret;
1650	getnstimeofday(&ts2);
1651	mmc_test_print_rate(test, sz, &ts1, &ts2);
1652	return 0;
1653}
1654
1655static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1656{
1657	unsigned int dev_addr, i, cnt;
1658	struct timespec ts1, ts2;
1659	int ret;
1660
1661	cnt = test->area.max_sz / sz;
1662	dev_addr = test->area.dev_addr;
1663	getnstimeofday(&ts1);
1664	for (i = 0; i < cnt; i++) {
1665		ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1666		if (ret)
1667			return ret;
1668		dev_addr += (sz >> 9);
1669	}
1670	getnstimeofday(&ts2);
1671	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1672	return 0;
1673}
1674
1675/*
1676 * Consecutive read performance by transfer size.
1677 */
1678static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1679{
1680	unsigned long sz;
1681	int ret;
1682
1683	for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1684		ret = mmc_test_seq_read_perf(test, sz);
1685		if (ret)
1686			return ret;
1687	}
1688	sz = test->area.max_tfr;
1689	return mmc_test_seq_read_perf(test, sz);
1690}
1691
1692static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1693{
1694	unsigned int dev_addr, i, cnt;
1695	struct timespec ts1, ts2;
1696	int ret;
1697
1698	ret = mmc_test_area_erase(test);
1699	if (ret)
1700		return ret;
1701	cnt = test->area.max_sz / sz;
1702	dev_addr = test->area.dev_addr;
1703	getnstimeofday(&ts1);
1704	for (i = 0; i < cnt; i++) {
1705		ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1706		if (ret)
1707			return ret;
1708		dev_addr += (sz >> 9);
1709	}
1710	getnstimeofday(&ts2);
1711	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1712	return 0;
1713}
1714
1715/*
1716 * Consecutive write performance by transfer size.
1717 */
1718static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1719{
1720	unsigned long sz;
1721	int ret;
1722
1723	for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1724		ret = mmc_test_seq_write_perf(test, sz);
1725		if (ret)
1726			return ret;
1727	}
1728	sz = test->area.max_tfr;
1729	return mmc_test_seq_write_perf(test, sz);
1730}
1731
1732/*
1733 * Consecutive trim performance by transfer size.
1734 */
1735static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1736{
1737	unsigned long sz;
1738	unsigned int dev_addr, i, cnt;
1739	struct timespec ts1, ts2;
1740	int ret;
1741
1742	if (!mmc_can_trim(test->card))
1743		return RESULT_UNSUP_CARD;
1744
1745	if (!mmc_can_erase(test->card))
1746		return RESULT_UNSUP_HOST;
1747
1748	for (sz = 512; sz <= test->area.max_sz; sz <<= 1) {
1749		ret = mmc_test_area_erase(test);
1750		if (ret)
1751			return ret;
1752		ret = mmc_test_area_fill(test);
1753		if (ret)
1754			return ret;
1755		cnt = test->area.max_sz / sz;
1756		dev_addr = test->area.dev_addr;
1757		getnstimeofday(&ts1);
1758		for (i = 0; i < cnt; i++) {
1759			ret = mmc_erase(test->card, dev_addr, sz >> 9,
1760					MMC_TRIM_ARG);
1761			if (ret)
1762				return ret;
1763			dev_addr += (sz >> 9);
1764		}
1765		getnstimeofday(&ts2);
1766		mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1767	}
1768	return 0;
1769}
1770
1771static const struct mmc_test_case mmc_test_cases[] = {
1772	{
1773		.name = "Basic write (no data verification)",
1774		.run = mmc_test_basic_write,
1775	},
1776
1777	{
1778		.name = "Basic read (no data verification)",
1779		.run = mmc_test_basic_read,
1780	},
1781
1782	{
1783		.name = "Basic write (with data verification)",
1784		.prepare = mmc_test_prepare_write,
1785		.run = mmc_test_verify_write,
1786		.cleanup = mmc_test_cleanup,
1787	},
1788
1789	{
1790		.name = "Basic read (with data verification)",
1791		.prepare = mmc_test_prepare_read,
1792		.run = mmc_test_verify_read,
1793		.cleanup = mmc_test_cleanup,
1794	},
1795
1796	{
1797		.name = "Multi-block write",
1798		.prepare = mmc_test_prepare_write,
1799		.run = mmc_test_multi_write,
1800		.cleanup = mmc_test_cleanup,
1801	},
1802
1803	{
1804		.name = "Multi-block read",
1805		.prepare = mmc_test_prepare_read,
1806		.run = mmc_test_multi_read,
1807		.cleanup = mmc_test_cleanup,
1808	},
1809
1810	{
1811		.name = "Power of two block writes",
1812		.prepare = mmc_test_prepare_write,
1813		.run = mmc_test_pow2_write,
1814		.cleanup = mmc_test_cleanup,
1815	},
1816
1817	{
1818		.name = "Power of two block reads",
1819		.prepare = mmc_test_prepare_read,
1820		.run = mmc_test_pow2_read,
1821		.cleanup = mmc_test_cleanup,
1822	},
1823
1824	{
1825		.name = "Weird sized block writes",
1826		.prepare = mmc_test_prepare_write,
1827		.run = mmc_test_weird_write,
1828		.cleanup = mmc_test_cleanup,
1829	},
1830
1831	{
1832		.name = "Weird sized block reads",
1833		.prepare = mmc_test_prepare_read,
1834		.run = mmc_test_weird_read,
1835		.cleanup = mmc_test_cleanup,
1836	},
1837
1838	{
1839		.name = "Badly aligned write",
1840		.prepare = mmc_test_prepare_write,
1841		.run = mmc_test_align_write,
1842		.cleanup = mmc_test_cleanup,
1843	},
1844
1845	{
1846		.name = "Badly aligned read",
1847		.prepare = mmc_test_prepare_read,
1848		.run = mmc_test_align_read,
1849		.cleanup = mmc_test_cleanup,
1850	},
1851
1852	{
1853		.name = "Badly aligned multi-block write",
1854		.prepare = mmc_test_prepare_write,
1855		.run = mmc_test_align_multi_write,
1856		.cleanup = mmc_test_cleanup,
1857	},
1858
1859	{
1860		.name = "Badly aligned multi-block read",
1861		.prepare = mmc_test_prepare_read,
1862		.run = mmc_test_align_multi_read,
1863		.cleanup = mmc_test_cleanup,
1864	},
1865
1866	{
1867		.name = "Correct xfer_size at write (start failure)",
1868		.run = mmc_test_xfersize_write,
1869	},
1870
1871	{
1872		.name = "Correct xfer_size at read (start failure)",
1873		.run = mmc_test_xfersize_read,
1874	},
1875
1876	{
1877		.name = "Correct xfer_size at write (midway failure)",
1878		.run = mmc_test_multi_xfersize_write,
1879	},
1880
1881	{
1882		.name = "Correct xfer_size at read (midway failure)",
1883		.run = mmc_test_multi_xfersize_read,
1884	},
1885
1886#ifdef CONFIG_HIGHMEM
1887
1888	{
1889		.name = "Highmem write",
1890		.prepare = mmc_test_prepare_write,
1891		.run = mmc_test_write_high,
1892		.cleanup = mmc_test_cleanup,
1893	},
1894
1895	{
1896		.name = "Highmem read",
1897		.prepare = mmc_test_prepare_read,
1898		.run = mmc_test_read_high,
1899		.cleanup = mmc_test_cleanup,
1900	},
1901
1902	{
1903		.name = "Multi-block highmem write",
1904		.prepare = mmc_test_prepare_write,
1905		.run = mmc_test_multi_write_high,
1906		.cleanup = mmc_test_cleanup,
1907	},
1908
1909	{
1910		.name = "Multi-block highmem read",
1911		.prepare = mmc_test_prepare_read,
1912		.run = mmc_test_multi_read_high,
1913		.cleanup = mmc_test_cleanup,
1914	},
1915
1916#else
1917
1918	{
1919		.name = "Highmem write",
1920		.run = mmc_test_no_highmem,
1921	},
1922
1923	{
1924		.name = "Highmem read",
1925		.run = mmc_test_no_highmem,
1926	},
1927
1928	{
1929		.name = "Multi-block highmem write",
1930		.run = mmc_test_no_highmem,
1931	},
1932
1933	{
1934		.name = "Multi-block highmem read",
1935		.run = mmc_test_no_highmem,
1936	},
1937
1938#endif /* CONFIG_HIGHMEM */
1939
1940	{
1941		.name = "Best-case read performance",
1942		.prepare = mmc_test_area_prepare_fill,
1943		.run = mmc_test_best_read_performance,
1944		.cleanup = mmc_test_area_cleanup,
1945	},
1946
1947	{
1948		.name = "Best-case write performance",
1949		.prepare = mmc_test_area_prepare_erase,
1950		.run = mmc_test_best_write_performance,
1951		.cleanup = mmc_test_area_cleanup,
1952	},
1953
1954	{
1955		.name = "Best-case read performance into scattered pages",
1956		.prepare = mmc_test_area_prepare_fill,
1957		.run = mmc_test_best_read_perf_max_scatter,
1958		.cleanup = mmc_test_area_cleanup,
1959	},
1960
1961	{
1962		.name = "Best-case write performance from scattered pages",
1963		.prepare = mmc_test_area_prepare_erase,
1964		.run = mmc_test_best_write_perf_max_scatter,
1965		.cleanup = mmc_test_area_cleanup,
1966	},
1967
1968	{
1969		.name = "Single read performance by transfer size",
1970		.prepare = mmc_test_area_prepare_fill,
1971		.run = mmc_test_profile_read_perf,
1972		.cleanup = mmc_test_area_cleanup,
1973	},
1974
1975	{
1976		.name = "Single write performance by transfer size",
1977		.prepare = mmc_test_area_prepare,
1978		.run = mmc_test_profile_write_perf,
1979		.cleanup = mmc_test_area_cleanup,
1980	},
1981
1982	{
1983		.name = "Single trim performance by transfer size",
1984		.prepare = mmc_test_area_prepare_fill,
1985		.run = mmc_test_profile_trim_perf,
1986		.cleanup = mmc_test_area_cleanup,
1987	},
1988
1989	{
1990		.name = "Consecutive read performance by transfer size",
1991		.prepare = mmc_test_area_prepare_fill,
1992		.run = mmc_test_profile_seq_read_perf,
1993		.cleanup = mmc_test_area_cleanup,
1994	},
1995
1996	{
1997		.name = "Consecutive write performance by transfer size",
1998		.prepare = mmc_test_area_prepare,
1999		.run = mmc_test_profile_seq_write_perf,
2000		.cleanup = mmc_test_area_cleanup,
2001	},
2002
2003	{
2004		.name = "Consecutive trim performance by transfer size",
2005		.prepare = mmc_test_area_prepare,
2006		.run = mmc_test_profile_seq_trim_perf,
2007		.cleanup = mmc_test_area_cleanup,
2008	},
2009
2010};
2011
2012static DEFINE_MUTEX(mmc_test_lock);
2013
2014static LIST_HEAD(mmc_test_result);
2015
2016static void mmc_test_run(struct mmc_test_card *test, int testcase)
2017{
2018	int i, ret;
2019
2020	printk(KERN_INFO "%s: Starting tests of card %s...\n",
2021		mmc_hostname(test->card->host), mmc_card_id(test->card));
2022
2023	mmc_claim_host(test->card->host);
2024
2025	for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
2026		struct mmc_test_general_result *gr;
2027
2028		if (testcase && ((i + 1) != testcase))
2029			continue;
2030
2031		printk(KERN_INFO "%s: Test case %d. %s...\n",
2032			mmc_hostname(test->card->host), i + 1,
2033			mmc_test_cases[i].name);
2034
2035		if (mmc_test_cases[i].prepare) {
2036			ret = mmc_test_cases[i].prepare(test);
2037			if (ret) {
2038				printk(KERN_INFO "%s: Result: Prepare "
2039					"stage failed! (%d)\n",
2040					mmc_hostname(test->card->host),
2041					ret);
2042				continue;
2043			}
2044		}
2045
2046		gr = kzalloc(sizeof(struct mmc_test_general_result),
2047			GFP_KERNEL);
2048		if (gr) {
2049			INIT_LIST_HEAD(&gr->tr_lst);
2050
2051			/* Assign data what we know already */
2052			gr->card = test->card;
2053			gr->testcase = i;
2054
2055			/* Append container to global one */
2056			list_add_tail(&gr->link, &mmc_test_result);
2057
2058			/*
2059			 * Save the pointer to created container in our private
2060			 * structure.
2061			 */
2062			test->gr = gr;
2063		}
2064
2065		ret = mmc_test_cases[i].run(test);
2066		switch (ret) {
2067		case RESULT_OK:
2068			printk(KERN_INFO "%s: Result: OK\n",
2069				mmc_hostname(test->card->host));
2070			break;
2071		case RESULT_FAIL:
2072			printk(KERN_INFO "%s: Result: FAILED\n",
2073				mmc_hostname(test->card->host));
2074			break;
2075		case RESULT_UNSUP_HOST:
2076			printk(KERN_INFO "%s: Result: UNSUPPORTED "
2077				"(by host)\n",
2078				mmc_hostname(test->card->host));
2079			break;
2080		case RESULT_UNSUP_CARD:
2081			printk(KERN_INFO "%s: Result: UNSUPPORTED "
2082				"(by card)\n",
2083				mmc_hostname(test->card->host));
2084			break;
2085		default:
2086			printk(KERN_INFO "%s: Result: ERROR (%d)\n",
2087				mmc_hostname(test->card->host), ret);
2088		}
2089
2090		/* Save the result */
2091		if (gr)
2092			gr->result = ret;
2093
2094		if (mmc_test_cases[i].cleanup) {
2095			ret = mmc_test_cases[i].cleanup(test);
2096			if (ret) {
2097				printk(KERN_INFO "%s: Warning: Cleanup "
2098					"stage failed! (%d)\n",
2099					mmc_hostname(test->card->host),
2100					ret);
2101			}
2102		}
2103	}
2104
2105	mmc_release_host(test->card->host);
2106
2107	printk(KERN_INFO "%s: Tests completed.\n",
2108		mmc_hostname(test->card->host));
2109}
2110
2111static void mmc_test_free_result(struct mmc_card *card)
2112{
2113	struct mmc_test_general_result *gr, *grs;
2114
2115	mutex_lock(&mmc_test_lock);
2116
2117	list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
2118		struct mmc_test_transfer_result *tr, *trs;
2119
2120		if (card && gr->card != card)
2121			continue;
2122
2123		list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
2124			list_del(&tr->link);
2125			kfree(tr);
2126		}
2127
2128		list_del(&gr->link);
2129		kfree(gr);
2130	}
2131
2132	mutex_unlock(&mmc_test_lock);
2133}
2134
2135static LIST_HEAD(mmc_test_file_test);
2136
2137static int mtf_test_show(struct seq_file *sf, void *data)
2138{
2139	struct mmc_card *card = (struct mmc_card *)sf->private;
2140	struct mmc_test_general_result *gr;
2141
2142	mutex_lock(&mmc_test_lock);
2143
2144	list_for_each_entry(gr, &mmc_test_result, link) {
2145		struct mmc_test_transfer_result *tr;
2146
2147		if (gr->card != card)
2148			continue;
2149
2150		seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
2151
2152		list_for_each_entry(tr, &gr->tr_lst, link) {
2153			seq_printf(sf, "%u %d %lu.%09lu %u\n",
2154				tr->count, tr->sectors,
2155				(unsigned long)tr->ts.tv_sec,
2156				(unsigned long)tr->ts.tv_nsec,
2157				tr->rate);
2158		}
2159	}
2160
2161	mutex_unlock(&mmc_test_lock);
2162
2163	return 0;
2164}
2165
2166static int mtf_test_open(struct inode *inode, struct file *file)
2167{
2168	return single_open(file, mtf_test_show, inode->i_private);
2169}
2170
2171static ssize_t mtf_test_write(struct file *file, const char __user *buf,
2172	size_t count, loff_t *pos)
2173{
2174	struct seq_file *sf = (struct seq_file *)file->private_data;
2175	struct mmc_card *card = (struct mmc_card *)sf->private;
2176	struct mmc_test_card *test;
2177	char lbuf[12];
2178	long testcase;
2179
2180	if (count >= sizeof(lbuf))
2181		return -EINVAL;
2182
2183	if (copy_from_user(lbuf, buf, count))
2184		return -EFAULT;
2185	lbuf[count] = '\0';
2186
2187	if (strict_strtol(lbuf, 10, &testcase))
2188		return -EINVAL;
2189
2190	test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
2191	if (!test)
2192		return -ENOMEM;
2193
2194	/*
2195	 * Remove all test cases associated with given card. Thus we have only
2196	 * actual data of the last run.
2197	 */
2198	mmc_test_free_result(card);
2199
2200	test->card = card;
2201
2202	test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
2203#ifdef CONFIG_HIGHMEM
2204	test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
2205#endif
2206
2207#ifdef CONFIG_HIGHMEM
2208	if (test->buffer && test->highmem) {
2209#else
2210	if (test->buffer) {
2211#endif
2212		mutex_lock(&mmc_test_lock);
2213		mmc_test_run(test, testcase);
2214		mutex_unlock(&mmc_test_lock);
2215	}
2216
2217#ifdef CONFIG_HIGHMEM
2218	__free_pages(test->highmem, BUFFER_ORDER);
2219#endif
2220	kfree(test->buffer);
2221	kfree(test);
2222
2223	return count;
2224}
2225
2226static const struct file_operations mmc_test_fops_test = {
2227	.open		= mtf_test_open,
2228	.read		= seq_read,
2229	.write		= mtf_test_write,
2230	.llseek		= seq_lseek,
2231	.release	= single_release,
2232};
2233
2234static void mmc_test_free_file_test(struct mmc_card *card)
2235{
2236	struct mmc_test_dbgfs_file *df, *dfs;
2237
2238	mutex_lock(&mmc_test_lock);
2239
2240	list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
2241		if (card && df->card != card)
2242			continue;
2243		debugfs_remove(df->file);
2244		list_del(&df->link);
2245		kfree(df);
2246	}
2247
2248	mutex_unlock(&mmc_test_lock);
2249}
2250
2251static int mmc_test_register_file_test(struct mmc_card *card)
2252{
2253	struct dentry *file = NULL;
2254	struct mmc_test_dbgfs_file *df;
2255	int ret = 0;
2256
2257	mutex_lock(&mmc_test_lock);
2258
2259	if (card->debugfs_root)
2260		file = debugfs_create_file("test", S_IWUSR | S_IRUGO,
2261			card->debugfs_root, card, &mmc_test_fops_test);
2262
2263	if (IS_ERR_OR_NULL(file)) {
2264		dev_err(&card->dev,
2265			"Can't create file. Perhaps debugfs is disabled.\n");
2266		ret = -ENODEV;
2267		goto err;
2268	}
2269
2270	df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
2271	if (!df) {
2272		debugfs_remove(file);
2273		dev_err(&card->dev,
2274			"Can't allocate memory for internal usage.\n");
2275		ret = -ENOMEM;
2276		goto err;
2277	}
2278
2279	df->card = card;
2280	df->file = file;
2281
2282	list_add(&df->link, &mmc_test_file_test);
2283
2284err:
2285	mutex_unlock(&mmc_test_lock);
2286
2287	return ret;
2288}
2289
2290static int mmc_test_probe(struct mmc_card *card)
2291{
2292	int ret;
2293
2294	if (!mmc_card_mmc(card) && !mmc_card_sd(card))
2295		return -ENODEV;
2296
2297	ret = mmc_test_register_file_test(card);
2298	if (ret)
2299		return ret;
2300
2301	dev_info(&card->dev, "Card claimed for testing.\n");
2302
2303	return 0;
2304}
2305
2306static void mmc_test_remove(struct mmc_card *card)
2307{
2308	mmc_test_free_result(card);
2309	mmc_test_free_file_test(card);
2310}
2311
2312static struct mmc_driver mmc_driver = {
2313	.drv		= {
2314		.name	= "mmc_test",
2315	},
2316	.probe		= mmc_test_probe,
2317	.remove		= mmc_test_remove,
2318};
2319
2320static int __init mmc_test_init(void)
2321{
2322	return mmc_register_driver(&mmc_driver);
2323}
2324
2325static void __exit mmc_test_exit(void)
2326{
2327	/* Clear stalled data if card is still plugged */
2328	mmc_test_free_result(NULL);
2329	mmc_test_free_file_test(NULL);
2330
2331	mmc_unregister_driver(&mmc_driver);
2332}
2333
2334module_init(mmc_test_init);
2335module_exit(mmc_test_exit);
2336
2337MODULE_LICENSE("GPL");
2338MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
2339MODULE_AUTHOR("Pierre Ossman");
2340