mmci.c revision 019a5f56ec195aceadada18aaaad0f67294bdaef
1/*
2 *  linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
3 *
4 *  Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/module.h>
11#include <linux/moduleparam.h>
12#include <linux/init.h>
13#include <linux/ioport.h>
14#include <linux/device.h>
15#include <linux/interrupt.h>
16#include <linux/delay.h>
17#include <linux/err.h>
18#include <linux/highmem.h>
19#include <linux/log2.h>
20#include <linux/mmc/host.h>
21#include <linux/amba/bus.h>
22#include <linux/clk.h>
23
24#include <asm/cacheflush.h>
25#include <asm/div64.h>
26#include <asm/io.h>
27#include <asm/scatterlist.h>
28#include <asm/sizes.h>
29#include <asm/mach/mmc.h>
30
31#include "mmci.h"
32
33#define DRIVER_NAME "mmci-pl18x"
34
35#define DBG(host,fmt,args...)	\
36	pr_debug("%s: %s: " fmt, mmc_hostname(host->mmc), __func__ , args)
37
38static unsigned int fmax = 515633;
39
40static void
41mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
42{
43	writel(0, host->base + MMCICOMMAND);
44
45	BUG_ON(host->data);
46
47	host->mrq = NULL;
48	host->cmd = NULL;
49
50	if (mrq->data)
51		mrq->data->bytes_xfered = host->data_xfered;
52
53	/*
54	 * Need to drop the host lock here; mmc_request_done may call
55	 * back into the driver...
56	 */
57	spin_unlock(&host->lock);
58	mmc_request_done(host->mmc, mrq);
59	spin_lock(&host->lock);
60}
61
62static void mmci_stop_data(struct mmci_host *host)
63{
64	writel(0, host->base + MMCIDATACTRL);
65	writel(0, host->base + MMCIMASK1);
66	host->data = NULL;
67}
68
69static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
70{
71	unsigned int datactrl, timeout, irqmask;
72	unsigned long long clks;
73	void __iomem *base;
74	int blksz_bits;
75
76	DBG(host, "blksz %04x blks %04x flags %08x\n",
77	    data->blksz, data->blocks, data->flags);
78
79	host->data = data;
80	host->size = data->blksz;
81	host->data_xfered = 0;
82
83	mmci_init_sg(host, data);
84
85	clks = (unsigned long long)data->timeout_ns * host->cclk;
86	do_div(clks, 1000000000UL);
87
88	timeout = data->timeout_clks + (unsigned int)clks;
89
90	base = host->base;
91	writel(timeout, base + MMCIDATATIMER);
92	writel(host->size, base + MMCIDATALENGTH);
93
94	blksz_bits = ffs(data->blksz) - 1;
95	BUG_ON(1 << blksz_bits != data->blksz);
96
97	datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
98	if (data->flags & MMC_DATA_READ) {
99		datactrl |= MCI_DPSM_DIRECTION;
100		irqmask = MCI_RXFIFOHALFFULLMASK;
101
102		/*
103		 * If we have less than a FIFOSIZE of bytes to transfer,
104		 * trigger a PIO interrupt as soon as any data is available.
105		 */
106		if (host->size < MCI_FIFOSIZE)
107			irqmask |= MCI_RXDATAAVLBLMASK;
108	} else {
109		/*
110		 * We don't actually need to include "FIFO empty" here
111		 * since its implicit in "FIFO half empty".
112		 */
113		irqmask = MCI_TXFIFOHALFEMPTYMASK;
114	}
115
116	writel(datactrl, base + MMCIDATACTRL);
117	writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
118	writel(irqmask, base + MMCIMASK1);
119}
120
121static void
122mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
123{
124	void __iomem *base = host->base;
125
126	DBG(host, "op %02x arg %08x flags %08x\n",
127	    cmd->opcode, cmd->arg, cmd->flags);
128
129	if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
130		writel(0, base + MMCICOMMAND);
131		udelay(1);
132	}
133
134	c |= cmd->opcode | MCI_CPSM_ENABLE;
135	if (cmd->flags & MMC_RSP_PRESENT) {
136		if (cmd->flags & MMC_RSP_136)
137			c |= MCI_CPSM_LONGRSP;
138		c |= MCI_CPSM_RESPONSE;
139	}
140	if (/*interrupt*/0)
141		c |= MCI_CPSM_INTERRUPT;
142
143	host->cmd = cmd;
144
145	writel(cmd->arg, base + MMCIARGUMENT);
146	writel(c, base + MMCICOMMAND);
147}
148
149static void
150mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
151	      unsigned int status)
152{
153	if (status & MCI_DATABLOCKEND) {
154		host->data_xfered += data->blksz;
155	}
156	if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
157		if (status & MCI_DATACRCFAIL)
158			data->error = -EILSEQ;
159		else if (status & MCI_DATATIMEOUT)
160			data->error = -ETIMEDOUT;
161		else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN))
162			data->error = -EIO;
163		status |= MCI_DATAEND;
164
165		/*
166		 * We hit an error condition.  Ensure that any data
167		 * partially written to a page is properly coherent.
168		 */
169		if (host->sg_len && data->flags & MMC_DATA_READ)
170			flush_dcache_page(host->sg_ptr->page);
171	}
172	if (status & MCI_DATAEND) {
173		mmci_stop_data(host);
174
175		if (!data->stop) {
176			mmci_request_end(host, data->mrq);
177		} else {
178			mmci_start_command(host, data->stop, 0);
179		}
180	}
181}
182
183static void
184mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
185	     unsigned int status)
186{
187	void __iomem *base = host->base;
188
189	host->cmd = NULL;
190
191	cmd->resp[0] = readl(base + MMCIRESPONSE0);
192	cmd->resp[1] = readl(base + MMCIRESPONSE1);
193	cmd->resp[2] = readl(base + MMCIRESPONSE2);
194	cmd->resp[3] = readl(base + MMCIRESPONSE3);
195
196	if (status & MCI_CMDTIMEOUT) {
197		cmd->error = -ETIMEDOUT;
198	} else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
199		cmd->error = -EILSEQ;
200	}
201
202	if (!cmd->data || cmd->error) {
203		if (host->data)
204			mmci_stop_data(host);
205		mmci_request_end(host, cmd->mrq);
206	} else if (!(cmd->data->flags & MMC_DATA_READ)) {
207		mmci_start_data(host, cmd->data);
208	}
209}
210
211static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
212{
213	void __iomem *base = host->base;
214	char *ptr = buffer;
215	u32 status;
216
217	do {
218		int count = host->size - (readl(base + MMCIFIFOCNT) << 2);
219
220		if (count > remain)
221			count = remain;
222
223		if (count <= 0)
224			break;
225
226		readsl(base + MMCIFIFO, ptr, count >> 2);
227
228		ptr += count;
229		remain -= count;
230
231		if (remain == 0)
232			break;
233
234		status = readl(base + MMCISTATUS);
235	} while (status & MCI_RXDATAAVLBL);
236
237	return ptr - buffer;
238}
239
240static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
241{
242	void __iomem *base = host->base;
243	char *ptr = buffer;
244
245	do {
246		unsigned int count, maxcnt;
247
248		maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE : MCI_FIFOHALFSIZE;
249		count = min(remain, maxcnt);
250
251		writesl(base + MMCIFIFO, ptr, count >> 2);
252
253		ptr += count;
254		remain -= count;
255
256		if (remain == 0)
257			break;
258
259		status = readl(base + MMCISTATUS);
260	} while (status & MCI_TXFIFOHALFEMPTY);
261
262	return ptr - buffer;
263}
264
265/*
266 * PIO data transfer IRQ handler.
267 */
268static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
269{
270	struct mmci_host *host = dev_id;
271	void __iomem *base = host->base;
272	u32 status;
273
274	status = readl(base + MMCISTATUS);
275
276	DBG(host, "irq1 %08x\n", status);
277
278	do {
279		unsigned long flags;
280		unsigned int remain, len;
281		char *buffer;
282
283		/*
284		 * For write, we only need to test the half-empty flag
285		 * here - if the FIFO is completely empty, then by
286		 * definition it is more than half empty.
287		 *
288		 * For read, check for data available.
289		 */
290		if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
291			break;
292
293		/*
294		 * Map the current scatter buffer.
295		 */
296		buffer = mmci_kmap_atomic(host, &flags) + host->sg_off;
297		remain = host->sg_ptr->length - host->sg_off;
298
299		len = 0;
300		if (status & MCI_RXACTIVE)
301			len = mmci_pio_read(host, buffer, remain);
302		if (status & MCI_TXACTIVE)
303			len = mmci_pio_write(host, buffer, remain, status);
304
305		/*
306		 * Unmap the buffer.
307		 */
308		mmci_kunmap_atomic(host, buffer, &flags);
309
310		host->sg_off += len;
311		host->size -= len;
312		remain -= len;
313
314		if (remain)
315			break;
316
317		/*
318		 * If we were reading, and we have completed this
319		 * page, ensure that the data cache is coherent.
320		 */
321		if (status & MCI_RXACTIVE)
322			flush_dcache_page(host->sg_ptr->page);
323
324		if (!mmci_next_sg(host))
325			break;
326
327		status = readl(base + MMCISTATUS);
328	} while (1);
329
330	/*
331	 * If we're nearing the end of the read, switch to
332	 * "any data available" mode.
333	 */
334	if (status & MCI_RXACTIVE && host->size < MCI_FIFOSIZE)
335		writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1);
336
337	/*
338	 * If we run out of data, disable the data IRQs; this
339	 * prevents a race where the FIFO becomes empty before
340	 * the chip itself has disabled the data path, and
341	 * stops us racing with our data end IRQ.
342	 */
343	if (host->size == 0) {
344		writel(0, base + MMCIMASK1);
345		writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
346	}
347
348	return IRQ_HANDLED;
349}
350
351/*
352 * Handle completion of command and data transfers.
353 */
354static irqreturn_t mmci_irq(int irq, void *dev_id)
355{
356	struct mmci_host *host = dev_id;
357	u32 status;
358	int ret = 0;
359
360	spin_lock(&host->lock);
361
362	do {
363		struct mmc_command *cmd;
364		struct mmc_data *data;
365
366		status = readl(host->base + MMCISTATUS);
367		status &= readl(host->base + MMCIMASK0);
368		writel(status, host->base + MMCICLEAR);
369
370		DBG(host, "irq0 %08x\n", status);
371
372		data = host->data;
373		if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
374			      MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
375			mmci_data_irq(host, data, status);
376
377		cmd = host->cmd;
378		if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd)
379			mmci_cmd_irq(host, cmd, status);
380
381		ret = 1;
382	} while (status);
383
384	spin_unlock(&host->lock);
385
386	return IRQ_RETVAL(ret);
387}
388
389static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
390{
391	struct mmci_host *host = mmc_priv(mmc);
392
393	WARN_ON(host->mrq != NULL);
394
395	if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
396		printk(KERN_ERR "%s: Unsupported block size (%d bytes)\n",
397			mmc_hostname(mmc), mrq->data->blksz);
398		mrq->cmd->error = -EINVAL;
399		mmc_request_done(mmc, mrq);
400		return;
401	}
402
403	spin_lock_irq(&host->lock);
404
405	host->mrq = mrq;
406
407	if (mrq->data && mrq->data->flags & MMC_DATA_READ)
408		mmci_start_data(host, mrq->data);
409
410	mmci_start_command(host, mrq->cmd, 0);
411
412	spin_unlock_irq(&host->lock);
413}
414
415static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
416{
417	struct mmci_host *host = mmc_priv(mmc);
418	u32 clk = 0, pwr = 0;
419
420	if (ios->clock) {
421		if (ios->clock >= host->mclk) {
422			clk = MCI_CLK_BYPASS;
423			host->cclk = host->mclk;
424		} else {
425			clk = host->mclk / (2 * ios->clock) - 1;
426			if (clk > 256)
427				clk = 255;
428			host->cclk = host->mclk / (2 * (clk + 1));
429		}
430		clk |= MCI_CLK_ENABLE;
431	}
432
433	if (host->plat->translate_vdd)
434		pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd);
435
436	switch (ios->power_mode) {
437	case MMC_POWER_OFF:
438		break;
439	case MMC_POWER_UP:
440		pwr |= MCI_PWR_UP;
441		break;
442	case MMC_POWER_ON:
443		pwr |= MCI_PWR_ON;
444		break;
445	}
446
447	if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
448		pwr |= MCI_ROD;
449
450	writel(clk, host->base + MMCICLOCK);
451
452	if (host->pwr != pwr) {
453		host->pwr = pwr;
454		writel(pwr, host->base + MMCIPOWER);
455	}
456}
457
458static const struct mmc_host_ops mmci_ops = {
459	.request	= mmci_request,
460	.set_ios	= mmci_set_ios,
461};
462
463static void mmci_check_status(unsigned long data)
464{
465	struct mmci_host *host = (struct mmci_host *)data;
466	unsigned int status;
467
468	status = host->plat->status(mmc_dev(host->mmc));
469	if (status ^ host->oldstat)
470		mmc_detect_change(host->mmc, 0);
471
472	host->oldstat = status;
473	mod_timer(&host->timer, jiffies + HZ);
474}
475
476static int mmci_probe(struct amba_device *dev, void *id)
477{
478	struct mmc_platform_data *plat = dev->dev.platform_data;
479	struct mmci_host *host;
480	struct mmc_host *mmc;
481	int ret;
482
483	/* must have platform data */
484	if (!plat) {
485		ret = -EINVAL;
486		goto out;
487	}
488
489	ret = amba_request_regions(dev, DRIVER_NAME);
490	if (ret)
491		goto out;
492
493	mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
494	if (!mmc) {
495		ret = -ENOMEM;
496		goto rel_regions;
497	}
498
499	host = mmc_priv(mmc);
500	host->clk = clk_get(&dev->dev, "MCLK");
501	if (IS_ERR(host->clk)) {
502		ret = PTR_ERR(host->clk);
503		host->clk = NULL;
504		goto host_free;
505	}
506
507	ret = clk_enable(host->clk);
508	if (ret)
509		goto clk_free;
510
511	host->plat = plat;
512	host->mclk = clk_get_rate(host->clk);
513	host->mmc = mmc;
514	host->base = ioremap(dev->res.start, SZ_4K);
515	if (!host->base) {
516		ret = -ENOMEM;
517		goto clk_disable;
518	}
519
520	mmc->ops = &mmci_ops;
521	mmc->f_min = (host->mclk + 511) / 512;
522	mmc->f_max = min(host->mclk, fmax);
523	mmc->ocr_avail = plat->ocr_mask;
524	mmc->caps = MMC_CAP_MULTIWRITE;
525
526	/*
527	 * We can do SGIO
528	 */
529	mmc->max_hw_segs = 16;
530	mmc->max_phys_segs = NR_SG;
531
532	/*
533	 * Since we only have a 16-bit data length register, we must
534	 * ensure that we don't exceed 2^16-1 bytes in a single request.
535	 */
536	mmc->max_req_size = 65535;
537
538	/*
539	 * Set the maximum segment size.  Since we aren't doing DMA
540	 * (yet) we are only limited by the data length register.
541	 */
542	mmc->max_seg_size = mmc->max_req_size;
543
544	/*
545	 * Block size can be up to 2048 bytes, but must be a power of two.
546	 */
547	mmc->max_blk_size = 2048;
548
549	/*
550	 * No limit on the number of blocks transferred.
551	 */
552	mmc->max_blk_count = mmc->max_req_size;
553
554	spin_lock_init(&host->lock);
555
556	writel(0, host->base + MMCIMASK0);
557	writel(0, host->base + MMCIMASK1);
558	writel(0xfff, host->base + MMCICLEAR);
559
560	ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
561	if (ret)
562		goto unmap;
563
564	ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, DRIVER_NAME " (pio)", host);
565	if (ret)
566		goto irq0_free;
567
568	writel(MCI_IRQENABLE, host->base + MMCIMASK0);
569
570	amba_set_drvdata(dev, mmc);
571
572	mmc_add_host(mmc);
573
574	printk(KERN_INFO "%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n",
575		mmc_hostname(mmc), amba_rev(dev), amba_config(dev),
576		(unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]);
577
578	init_timer(&host->timer);
579	host->timer.data = (unsigned long)host;
580	host->timer.function = mmci_check_status;
581	host->timer.expires = jiffies + HZ;
582	add_timer(&host->timer);
583
584	return 0;
585
586 irq0_free:
587	free_irq(dev->irq[0], host);
588 unmap:
589	iounmap(host->base);
590 clk_disable:
591	clk_disable(host->clk);
592 clk_free:
593	clk_put(host->clk);
594 host_free:
595	mmc_free_host(mmc);
596 rel_regions:
597	amba_release_regions(dev);
598 out:
599	return ret;
600}
601
602static int mmci_remove(struct amba_device *dev)
603{
604	struct mmc_host *mmc = amba_get_drvdata(dev);
605
606	amba_set_drvdata(dev, NULL);
607
608	if (mmc) {
609		struct mmci_host *host = mmc_priv(mmc);
610
611		del_timer_sync(&host->timer);
612
613		mmc_remove_host(mmc);
614
615		writel(0, host->base + MMCIMASK0);
616		writel(0, host->base + MMCIMASK1);
617
618		writel(0, host->base + MMCICOMMAND);
619		writel(0, host->base + MMCIDATACTRL);
620
621		free_irq(dev->irq[0], host);
622		free_irq(dev->irq[1], host);
623
624		iounmap(host->base);
625		clk_disable(host->clk);
626		clk_put(host->clk);
627
628		mmc_free_host(mmc);
629
630		amba_release_regions(dev);
631	}
632
633	return 0;
634}
635
636#ifdef CONFIG_PM
637static int mmci_suspend(struct amba_device *dev, pm_message_t state)
638{
639	struct mmc_host *mmc = amba_get_drvdata(dev);
640	int ret = 0;
641
642	if (mmc) {
643		struct mmci_host *host = mmc_priv(mmc);
644
645		ret = mmc_suspend_host(mmc, state);
646		if (ret == 0)
647			writel(0, host->base + MMCIMASK0);
648	}
649
650	return ret;
651}
652
653static int mmci_resume(struct amba_device *dev)
654{
655	struct mmc_host *mmc = amba_get_drvdata(dev);
656	int ret = 0;
657
658	if (mmc) {
659		struct mmci_host *host = mmc_priv(mmc);
660
661		writel(MCI_IRQENABLE, host->base + MMCIMASK0);
662
663		ret = mmc_resume_host(mmc);
664	}
665
666	return ret;
667}
668#else
669#define mmci_suspend	NULL
670#define mmci_resume	NULL
671#endif
672
673static struct amba_id mmci_ids[] = {
674	{
675		.id	= 0x00041180,
676		.mask	= 0x000fffff,
677	},
678	{
679		.id	= 0x00041181,
680		.mask	= 0x000fffff,
681	},
682	{ 0, 0 },
683};
684
685static struct amba_driver mmci_driver = {
686	.drv		= {
687		.name	= DRIVER_NAME,
688	},
689	.probe		= mmci_probe,
690	.remove		= mmci_remove,
691	.suspend	= mmci_suspend,
692	.resume		= mmci_resume,
693	.id_table	= mmci_ids,
694};
695
696static int __init mmci_init(void)
697{
698	return amba_driver_register(&mmci_driver);
699}
700
701static void __exit mmci_exit(void)
702{
703	amba_driver_unregister(&mmci_driver);
704}
705
706module_init(mmci_init);
707module_exit(mmci_exit);
708module_param(fmax, uint, 0444);
709
710MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
711MODULE_LICENSE("GPL");
712