mmci.c revision 34177802001894e064c857cac2759f68119550cd
1/*
2 *  linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
3 *
4 *  Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 *  Copyright (C) 2010 ST-Ericsson AB.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/init.h>
14#include <linux/ioport.h>
15#include <linux/device.h>
16#include <linux/interrupt.h>
17#include <linux/delay.h>
18#include <linux/err.h>
19#include <linux/highmem.h>
20#include <linux/log2.h>
21#include <linux/mmc/host.h>
22#include <linux/mmc/card.h>
23#include <linux/amba/bus.h>
24#include <linux/clk.h>
25#include <linux/scatterlist.h>
26#include <linux/gpio.h>
27#include <linux/amba/mmci.h>
28#include <linux/regulator/consumer.h>
29
30#include <asm/div64.h>
31#include <asm/io.h>
32#include <asm/sizes.h>
33
34#include "mmci.h"
35
36#define DRIVER_NAME "mmci-pl18x"
37
38static unsigned int fmax = 515633;
39
40/**
41 * struct variant_data - MMCI variant-specific quirks
42 * @clkreg: default value for MCICLOCK register
43 * @clkreg_enable: enable value for MMCICLOCK register
44 * @datalength_bits: number of bits in the MMCIDATALENGTH register
45 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
46 *	      is asserted (likewise for RX)
47 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
48 *		  is asserted (likewise for RX)
49 * @broken_blockend: the MCI_DATABLOCKEND is broken on the hardware
50 *		and will not work at all.
51 * @broken_blockend_dma: the MCI_DATABLOCKEND is broken on the hardware when
52 *		using DMA.
53 * @sdio: variant supports SDIO
54 */
55struct variant_data {
56	unsigned int		clkreg;
57	unsigned int		clkreg_enable;
58	unsigned int		datalength_bits;
59	unsigned int		fifosize;
60	unsigned int		fifohalfsize;
61	bool			broken_blockend;
62	bool			broken_blockend_dma;
63	bool			sdio;
64};
65
66static struct variant_data variant_arm = {
67	.fifosize		= 16 * 4,
68	.fifohalfsize		= 8 * 4,
69	.datalength_bits	= 16,
70};
71
72static struct variant_data variant_u300 = {
73	.fifosize		= 16 * 4,
74	.fifohalfsize		= 8 * 4,
75	.clkreg_enable		= 1 << 13, /* HWFCEN */
76	.datalength_bits	= 16,
77	.broken_blockend_dma	= true,
78	.sdio			= true,
79};
80
81static struct variant_data variant_ux500 = {
82	.fifosize		= 30 * 4,
83	.fifohalfsize		= 8 * 4,
84	.clkreg			= MCI_CLK_ENABLE,
85	.clkreg_enable		= 1 << 14, /* HWFCEN */
86	.datalength_bits	= 24,
87	.broken_blockend	= true,
88	.sdio			= true,
89};
90/*
91 * This must be called with host->lock held
92 */
93static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
94{
95	struct variant_data *variant = host->variant;
96	u32 clk = variant->clkreg;
97
98	if (desired) {
99		if (desired >= host->mclk) {
100			clk = MCI_CLK_BYPASS;
101			host->cclk = host->mclk;
102		} else {
103			clk = host->mclk / (2 * desired) - 1;
104			if (clk >= 256)
105				clk = 255;
106			host->cclk = host->mclk / (2 * (clk + 1));
107		}
108
109		clk |= variant->clkreg_enable;
110		clk |= MCI_CLK_ENABLE;
111		/* This hasn't proven to be worthwhile */
112		/* clk |= MCI_CLK_PWRSAVE; */
113	}
114
115	if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
116		clk |= MCI_4BIT_BUS;
117	if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
118		clk |= MCI_ST_8BIT_BUS;
119
120	writel(clk, host->base + MMCICLOCK);
121}
122
123static void
124mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
125{
126	writel(0, host->base + MMCICOMMAND);
127
128	BUG_ON(host->data);
129
130	host->mrq = NULL;
131	host->cmd = NULL;
132
133	if (mrq->data)
134		mrq->data->bytes_xfered = host->data_xfered;
135
136	/*
137	 * Need to drop the host lock here; mmc_request_done may call
138	 * back into the driver...
139	 */
140	spin_unlock(&host->lock);
141	mmc_request_done(host->mmc, mrq);
142	spin_lock(&host->lock);
143}
144
145static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
146{
147	void __iomem *base = host->base;
148
149	if (host->singleirq) {
150		unsigned int mask0 = readl(base + MMCIMASK0);
151
152		mask0 &= ~MCI_IRQ1MASK;
153		mask0 |= mask;
154
155		writel(mask0, base + MMCIMASK0);
156	}
157
158	writel(mask, base + MMCIMASK1);
159}
160
161static void mmci_stop_data(struct mmci_host *host)
162{
163	writel(0, host->base + MMCIDATACTRL);
164	mmci_set_mask1(host, 0);
165	host->data = NULL;
166}
167
168static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
169{
170	unsigned int flags = SG_MITER_ATOMIC;
171
172	if (data->flags & MMC_DATA_READ)
173		flags |= SG_MITER_TO_SG;
174	else
175		flags |= SG_MITER_FROM_SG;
176
177	sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
178}
179
180static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
181{
182	struct variant_data *variant = host->variant;
183	unsigned int datactrl, timeout, irqmask;
184	unsigned long long clks;
185	void __iomem *base;
186	int blksz_bits;
187
188	dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
189		data->blksz, data->blocks, data->flags);
190
191	host->data = data;
192	host->size = data->blksz * data->blocks;
193	host->data_xfered = 0;
194	host->blockend = false;
195	host->dataend = false;
196
197	mmci_init_sg(host, data);
198
199	clks = (unsigned long long)data->timeout_ns * host->cclk;
200	do_div(clks, 1000000000UL);
201
202	timeout = data->timeout_clks + (unsigned int)clks;
203
204	base = host->base;
205	writel(timeout, base + MMCIDATATIMER);
206	writel(host->size, base + MMCIDATALENGTH);
207
208	blksz_bits = ffs(data->blksz) - 1;
209	BUG_ON(1 << blksz_bits != data->blksz);
210
211	datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
212	if (data->flags & MMC_DATA_READ) {
213		datactrl |= MCI_DPSM_DIRECTION;
214		irqmask = MCI_RXFIFOHALFFULLMASK;
215
216		/*
217		 * If we have less than a FIFOSIZE of bytes to transfer,
218		 * trigger a PIO interrupt as soon as any data is available.
219		 */
220		if (host->size < variant->fifosize)
221			irqmask |= MCI_RXDATAAVLBLMASK;
222	} else {
223		/*
224		 * We don't actually need to include "FIFO empty" here
225		 * since its implicit in "FIFO half empty".
226		 */
227		irqmask = MCI_TXFIFOHALFEMPTYMASK;
228	}
229
230	/* The ST Micro variants has a special bit to enable SDIO */
231	if (variant->sdio && host->mmc->card)
232		if (mmc_card_sdio(host->mmc->card))
233			datactrl |= MCI_ST_DPSM_SDIOEN;
234
235	writel(datactrl, base + MMCIDATACTRL);
236	writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
237	mmci_set_mask1(host, irqmask);
238}
239
240static void
241mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
242{
243	void __iomem *base = host->base;
244
245	dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
246	    cmd->opcode, cmd->arg, cmd->flags);
247
248	if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
249		writel(0, base + MMCICOMMAND);
250		udelay(1);
251	}
252
253	c |= cmd->opcode | MCI_CPSM_ENABLE;
254	if (cmd->flags & MMC_RSP_PRESENT) {
255		if (cmd->flags & MMC_RSP_136)
256			c |= MCI_CPSM_LONGRSP;
257		c |= MCI_CPSM_RESPONSE;
258	}
259	if (/*interrupt*/0)
260		c |= MCI_CPSM_INTERRUPT;
261
262	host->cmd = cmd;
263
264	writel(cmd->arg, base + MMCIARGUMENT);
265	writel(c, base + MMCICOMMAND);
266}
267
268static void
269mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
270	      unsigned int status)
271{
272	struct variant_data *variant = host->variant;
273
274	/* First check for errors */
275	if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
276		dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status);
277		if (status & MCI_DATACRCFAIL)
278			data->error = -EILSEQ;
279		else if (status & MCI_DATATIMEOUT)
280			data->error = -ETIMEDOUT;
281		else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN))
282			data->error = -EIO;
283
284		/* Force-complete the transaction */
285		host->blockend = true;
286		host->dataend = true;
287
288		/*
289		 * We hit an error condition.  Ensure that any data
290		 * partially written to a page is properly coherent.
291		 */
292		if (data->flags & MMC_DATA_READ) {
293			struct sg_mapping_iter *sg_miter = &host->sg_miter;
294			unsigned long flags;
295
296			local_irq_save(flags);
297			if (sg_miter_next(sg_miter)) {
298				flush_dcache_page(sg_miter->page);
299				sg_miter_stop(sg_miter);
300			}
301			local_irq_restore(flags);
302		}
303	}
304
305	/*
306	 * On ARM variants in PIO mode, MCI_DATABLOCKEND
307	 * is always sent first, and we increase the
308	 * transfered number of bytes for that IRQ. Then
309	 * MCI_DATAEND follows and we conclude the transaction.
310	 *
311	 * On the Ux500 single-IRQ variant MCI_DATABLOCKEND
312	 * doesn't seem to immediately clear from the status,
313	 * so we can't use it keep count when only one irq is
314	 * used because the irq will hit for other reasons, and
315	 * then the flag is still up. So we use the MCI_DATAEND
316	 * IRQ at the end of the entire transfer because
317	 * MCI_DATABLOCKEND is broken.
318	 *
319	 * In the U300, the IRQs can arrive out-of-order,
320	 * e.g. MCI_DATABLOCKEND sometimes arrives after MCI_DATAEND,
321	 * so for this case we use the flags "blockend" and
322	 * "dataend" to make sure both IRQs have arrived before
323	 * concluding the transaction. (This does not apply
324	 * to the Ux500 which doesn't fire MCI_DATABLOCKEND
325	 * at all.) In DMA mode it suffers from the same problem
326	 * as the Ux500.
327	 */
328	if (status & MCI_DATABLOCKEND) {
329		/*
330		 * Just being a little over-cautious, we do not
331		 * use this progressive update if the hardware blockend
332		 * flag is unreliable: since it can stay high between
333		 * IRQs it will corrupt the transfer counter.
334		 */
335		if (!variant->broken_blockend)
336			host->data_xfered += data->blksz;
337		host->blockend = true;
338	}
339
340	if (status & MCI_DATAEND)
341		host->dataend = true;
342
343	/*
344	 * On variants with broken blockend we shall only wait for dataend,
345	 * on others we must sync with the blockend signal since they can
346	 * appear out-of-order.
347	 */
348	if (host->dataend && (host->blockend || variant->broken_blockend)) {
349		mmci_stop_data(host);
350
351		/* Reset these flags */
352		host->blockend = false;
353		host->dataend = false;
354
355		/*
356		 * Variants with broken blockend flags need to handle the
357		 * end of the entire transfer here.
358		 */
359		if (variant->broken_blockend && !data->error)
360			host->data_xfered += data->blksz * data->blocks;
361
362		if (!data->stop) {
363			mmci_request_end(host, data->mrq);
364		} else {
365			mmci_start_command(host, data->stop, 0);
366		}
367	}
368}
369
370static void
371mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
372	     unsigned int status)
373{
374	void __iomem *base = host->base;
375
376	host->cmd = NULL;
377
378	cmd->resp[0] = readl(base + MMCIRESPONSE0);
379	cmd->resp[1] = readl(base + MMCIRESPONSE1);
380	cmd->resp[2] = readl(base + MMCIRESPONSE2);
381	cmd->resp[3] = readl(base + MMCIRESPONSE3);
382
383	if (status & MCI_CMDTIMEOUT) {
384		cmd->error = -ETIMEDOUT;
385	} else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
386		cmd->error = -EILSEQ;
387	}
388
389	if (!cmd->data || cmd->error) {
390		if (host->data)
391			mmci_stop_data(host);
392		mmci_request_end(host, cmd->mrq);
393	} else if (!(cmd->data->flags & MMC_DATA_READ)) {
394		mmci_start_data(host, cmd->data);
395	}
396}
397
398static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
399{
400	void __iomem *base = host->base;
401	char *ptr = buffer;
402	u32 status;
403	int host_remain = host->size;
404
405	do {
406		int count = host_remain - (readl(base + MMCIFIFOCNT) << 2);
407
408		if (count > remain)
409			count = remain;
410
411		if (count <= 0)
412			break;
413
414		readsl(base + MMCIFIFO, ptr, count >> 2);
415
416		ptr += count;
417		remain -= count;
418		host_remain -= count;
419
420		if (remain == 0)
421			break;
422
423		status = readl(base + MMCISTATUS);
424	} while (status & MCI_RXDATAAVLBL);
425
426	return ptr - buffer;
427}
428
429static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
430{
431	struct variant_data *variant = host->variant;
432	void __iomem *base = host->base;
433	char *ptr = buffer;
434
435	do {
436		unsigned int count, maxcnt;
437
438		maxcnt = status & MCI_TXFIFOEMPTY ?
439			 variant->fifosize : variant->fifohalfsize;
440		count = min(remain, maxcnt);
441
442		/*
443		 * The ST Micro variant for SDIO transfer sizes
444		 * less then 8 bytes should have clock H/W flow
445		 * control disabled.
446		 */
447		if (variant->sdio &&
448		    mmc_card_sdio(host->mmc->card)) {
449			if (count < 8)
450				writel(readl(host->base + MMCICLOCK) &
451					~variant->clkreg_enable,
452					host->base + MMCICLOCK);
453			else
454				writel(readl(host->base + MMCICLOCK) |
455					variant->clkreg_enable,
456					host->base + MMCICLOCK);
457		}
458
459		/*
460		 * SDIO especially may want to send something that is
461		 * not divisible by 4 (as opposed to card sectors
462		 * etc), and the FIFO only accept full 32-bit writes.
463		 * So compensate by adding +3 on the count, a single
464		 * byte become a 32bit write, 7 bytes will be two
465		 * 32bit writes etc.
466		 */
467		writesl(base + MMCIFIFO, ptr, (count + 3) >> 2);
468
469		ptr += count;
470		remain -= count;
471
472		if (remain == 0)
473			break;
474
475		status = readl(base + MMCISTATUS);
476	} while (status & MCI_TXFIFOHALFEMPTY);
477
478	return ptr - buffer;
479}
480
481/*
482 * PIO data transfer IRQ handler.
483 */
484static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
485{
486	struct mmci_host *host = dev_id;
487	struct sg_mapping_iter *sg_miter = &host->sg_miter;
488	struct variant_data *variant = host->variant;
489	void __iomem *base = host->base;
490	unsigned long flags;
491	u32 status;
492
493	status = readl(base + MMCISTATUS);
494
495	dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
496
497	local_irq_save(flags);
498
499	do {
500		unsigned int remain, len;
501		char *buffer;
502
503		/*
504		 * For write, we only need to test the half-empty flag
505		 * here - if the FIFO is completely empty, then by
506		 * definition it is more than half empty.
507		 *
508		 * For read, check for data available.
509		 */
510		if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
511			break;
512
513		if (!sg_miter_next(sg_miter))
514			break;
515
516		buffer = sg_miter->addr;
517		remain = sg_miter->length;
518
519		len = 0;
520		if (status & MCI_RXACTIVE)
521			len = mmci_pio_read(host, buffer, remain);
522		if (status & MCI_TXACTIVE)
523			len = mmci_pio_write(host, buffer, remain, status);
524
525		sg_miter->consumed = len;
526
527		host->size -= len;
528		remain -= len;
529
530		if (remain)
531			break;
532
533		if (status & MCI_RXACTIVE)
534			flush_dcache_page(sg_miter->page);
535
536		status = readl(base + MMCISTATUS);
537	} while (1);
538
539	sg_miter_stop(sg_miter);
540
541	local_irq_restore(flags);
542
543	/*
544	 * If we're nearing the end of the read, switch to
545	 * "any data available" mode.
546	 */
547	if (status & MCI_RXACTIVE && host->size < variant->fifosize)
548		mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
549
550	/*
551	 * If we run out of data, disable the data IRQs; this
552	 * prevents a race where the FIFO becomes empty before
553	 * the chip itself has disabled the data path, and
554	 * stops us racing with our data end IRQ.
555	 */
556	if (host->size == 0) {
557		mmci_set_mask1(host, 0);
558		writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
559	}
560
561	return IRQ_HANDLED;
562}
563
564/*
565 * Handle completion of command and data transfers.
566 */
567static irqreturn_t mmci_irq(int irq, void *dev_id)
568{
569	struct mmci_host *host = dev_id;
570	u32 status;
571	int ret = 0;
572
573	spin_lock(&host->lock);
574
575	do {
576		struct mmc_command *cmd;
577		struct mmc_data *data;
578
579		status = readl(host->base + MMCISTATUS);
580
581		if (host->singleirq) {
582			if (status & readl(host->base + MMCIMASK1))
583				mmci_pio_irq(irq, dev_id);
584
585			status &= ~MCI_IRQ1MASK;
586		}
587
588		status &= readl(host->base + MMCIMASK0);
589		writel(status, host->base + MMCICLEAR);
590
591		dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
592
593		data = host->data;
594		if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
595			      MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
596			mmci_data_irq(host, data, status);
597
598		cmd = host->cmd;
599		if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd)
600			mmci_cmd_irq(host, cmd, status);
601
602		ret = 1;
603	} while (status);
604
605	spin_unlock(&host->lock);
606
607	return IRQ_RETVAL(ret);
608}
609
610static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
611{
612	struct mmci_host *host = mmc_priv(mmc);
613	unsigned long flags;
614
615	WARN_ON(host->mrq != NULL);
616
617	if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
618		dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n",
619			mrq->data->blksz);
620		mrq->cmd->error = -EINVAL;
621		mmc_request_done(mmc, mrq);
622		return;
623	}
624
625	spin_lock_irqsave(&host->lock, flags);
626
627	host->mrq = mrq;
628
629	if (mrq->data && mrq->data->flags & MMC_DATA_READ)
630		mmci_start_data(host, mrq->data);
631
632	mmci_start_command(host, mrq->cmd, 0);
633
634	spin_unlock_irqrestore(&host->lock, flags);
635}
636
637static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
638{
639	struct mmci_host *host = mmc_priv(mmc);
640	u32 pwr = 0;
641	unsigned long flags;
642	int ret;
643
644	switch (ios->power_mode) {
645	case MMC_POWER_OFF:
646		if (host->vcc)
647			ret = mmc_regulator_set_ocr(mmc, host->vcc, 0);
648		break;
649	case MMC_POWER_UP:
650		if (host->vcc) {
651			ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd);
652			if (ret) {
653				dev_err(mmc_dev(mmc), "unable to set OCR\n");
654				/*
655				 * The .set_ios() function in the mmc_host_ops
656				 * struct return void, and failing to set the
657				 * power should be rare so we print an error
658				 * and return here.
659				 */
660				return;
661			}
662		}
663		if (host->plat->vdd_handler)
664			pwr |= host->plat->vdd_handler(mmc_dev(mmc), ios->vdd,
665						       ios->power_mode);
666		/* The ST version does not have this, fall through to POWER_ON */
667		if (host->hw_designer != AMBA_VENDOR_ST) {
668			pwr |= MCI_PWR_UP;
669			break;
670		}
671	case MMC_POWER_ON:
672		pwr |= MCI_PWR_ON;
673		break;
674	}
675
676	if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
677		if (host->hw_designer != AMBA_VENDOR_ST)
678			pwr |= MCI_ROD;
679		else {
680			/*
681			 * The ST Micro variant use the ROD bit for something
682			 * else and only has OD (Open Drain).
683			 */
684			pwr |= MCI_OD;
685		}
686	}
687
688	spin_lock_irqsave(&host->lock, flags);
689
690	mmci_set_clkreg(host, ios->clock);
691
692	if (host->pwr != pwr) {
693		host->pwr = pwr;
694		writel(pwr, host->base + MMCIPOWER);
695	}
696
697	spin_unlock_irqrestore(&host->lock, flags);
698}
699
700static int mmci_get_ro(struct mmc_host *mmc)
701{
702	struct mmci_host *host = mmc_priv(mmc);
703
704	if (host->gpio_wp == -ENOSYS)
705		return -ENOSYS;
706
707	return gpio_get_value_cansleep(host->gpio_wp);
708}
709
710static int mmci_get_cd(struct mmc_host *mmc)
711{
712	struct mmci_host *host = mmc_priv(mmc);
713	struct mmci_platform_data *plat = host->plat;
714	unsigned int status;
715
716	if (host->gpio_cd == -ENOSYS) {
717		if (!plat->status)
718			return 1; /* Assume always present */
719
720		status = plat->status(mmc_dev(host->mmc));
721	} else
722		status = !!gpio_get_value_cansleep(host->gpio_cd)
723			^ plat->cd_invert;
724
725	/*
726	 * Use positive logic throughout - status is zero for no card,
727	 * non-zero for card inserted.
728	 */
729	return status;
730}
731
732static irqreturn_t mmci_cd_irq(int irq, void *dev_id)
733{
734	struct mmci_host *host = dev_id;
735
736	mmc_detect_change(host->mmc, msecs_to_jiffies(500));
737
738	return IRQ_HANDLED;
739}
740
741static const struct mmc_host_ops mmci_ops = {
742	.request	= mmci_request,
743	.set_ios	= mmci_set_ios,
744	.get_ro		= mmci_get_ro,
745	.get_cd		= mmci_get_cd,
746};
747
748static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
749{
750	struct mmci_platform_data *plat = dev->dev.platform_data;
751	struct variant_data *variant = id->data;
752	struct mmci_host *host;
753	struct mmc_host *mmc;
754	unsigned int mask;
755	int ret;
756
757	/* must have platform data */
758	if (!plat) {
759		ret = -EINVAL;
760		goto out;
761	}
762
763	ret = amba_request_regions(dev, DRIVER_NAME);
764	if (ret)
765		goto out;
766
767	mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
768	if (!mmc) {
769		ret = -ENOMEM;
770		goto rel_regions;
771	}
772
773	host = mmc_priv(mmc);
774	host->mmc = mmc;
775
776	host->gpio_wp = -ENOSYS;
777	host->gpio_cd = -ENOSYS;
778	host->gpio_cd_irq = -1;
779
780	host->hw_designer = amba_manf(dev);
781	host->hw_revision = amba_rev(dev);
782	dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
783	dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
784
785	host->clk = clk_get(&dev->dev, NULL);
786	if (IS_ERR(host->clk)) {
787		ret = PTR_ERR(host->clk);
788		host->clk = NULL;
789		goto host_free;
790	}
791
792	ret = clk_enable(host->clk);
793	if (ret)
794		goto clk_free;
795
796	host->plat = plat;
797	host->variant = variant;
798	host->mclk = clk_get_rate(host->clk);
799	/*
800	 * According to the spec, mclk is max 100 MHz,
801	 * so we try to adjust the clock down to this,
802	 * (if possible).
803	 */
804	if (host->mclk > 100000000) {
805		ret = clk_set_rate(host->clk, 100000000);
806		if (ret < 0)
807			goto clk_disable;
808		host->mclk = clk_get_rate(host->clk);
809		dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
810			host->mclk);
811	}
812	host->base = ioremap(dev->res.start, resource_size(&dev->res));
813	if (!host->base) {
814		ret = -ENOMEM;
815		goto clk_disable;
816	}
817
818	mmc->ops = &mmci_ops;
819	mmc->f_min = (host->mclk + 511) / 512;
820	/*
821	 * If the platform data supplies a maximum operating
822	 * frequency, this takes precedence. Else, we fall back
823	 * to using the module parameter, which has a (low)
824	 * default value in case it is not specified. Either
825	 * value must not exceed the clock rate into the block,
826	 * of course.
827	 */
828	if (plat->f_max)
829		mmc->f_max = min(host->mclk, plat->f_max);
830	else
831		mmc->f_max = min(host->mclk, fmax);
832	dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
833
834#ifdef CONFIG_REGULATOR
835	/* If we're using the regulator framework, try to fetch a regulator */
836	host->vcc = regulator_get(&dev->dev, "vmmc");
837	if (IS_ERR(host->vcc))
838		host->vcc = NULL;
839	else {
840		int mask = mmc_regulator_get_ocrmask(host->vcc);
841
842		if (mask < 0)
843			dev_err(&dev->dev, "error getting OCR mask (%d)\n",
844				mask);
845		else {
846			host->mmc->ocr_avail = (u32) mask;
847			if (plat->ocr_mask)
848				dev_warn(&dev->dev,
849				 "Provided ocr_mask/setpower will not be used "
850				 "(using regulator instead)\n");
851		}
852	}
853#endif
854	/* Fall back to platform data if no regulator is found */
855	if (host->vcc == NULL)
856		mmc->ocr_avail = plat->ocr_mask;
857	mmc->caps = plat->capabilities;
858
859	/*
860	 * We can do SGIO
861	 */
862	mmc->max_segs = NR_SG;
863
864	/*
865	 * Since only a certain number of bits are valid in the data length
866	 * register, we must ensure that we don't exceed 2^num-1 bytes in a
867	 * single request.
868	 */
869	mmc->max_req_size = (1 << variant->datalength_bits) - 1;
870
871	/*
872	 * Set the maximum segment size.  Since we aren't doing DMA
873	 * (yet) we are only limited by the data length register.
874	 */
875	mmc->max_seg_size = mmc->max_req_size;
876
877	/*
878	 * Block size can be up to 2048 bytes, but must be a power of two.
879	 */
880	mmc->max_blk_size = 2048;
881
882	/*
883	 * No limit on the number of blocks transferred.
884	 */
885	mmc->max_blk_count = mmc->max_req_size;
886
887	spin_lock_init(&host->lock);
888
889	writel(0, host->base + MMCIMASK0);
890	writel(0, host->base + MMCIMASK1);
891	writel(0xfff, host->base + MMCICLEAR);
892
893	if (gpio_is_valid(plat->gpio_cd)) {
894		ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)");
895		if (ret == 0)
896			ret = gpio_direction_input(plat->gpio_cd);
897		if (ret == 0)
898			host->gpio_cd = plat->gpio_cd;
899		else if (ret != -ENOSYS)
900			goto err_gpio_cd;
901
902		ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd),
903					      mmci_cd_irq, 0,
904					      DRIVER_NAME " (cd)", host);
905		if (ret >= 0)
906			host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd);
907	}
908	if (gpio_is_valid(plat->gpio_wp)) {
909		ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)");
910		if (ret == 0)
911			ret = gpio_direction_input(plat->gpio_wp);
912		if (ret == 0)
913			host->gpio_wp = plat->gpio_wp;
914		else if (ret != -ENOSYS)
915			goto err_gpio_wp;
916	}
917
918	if ((host->plat->status || host->gpio_cd != -ENOSYS)
919	    && host->gpio_cd_irq < 0)
920		mmc->caps |= MMC_CAP_NEEDS_POLL;
921
922	ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
923	if (ret)
924		goto unmap;
925
926	if (dev->irq[1] == NO_IRQ)
927		host->singleirq = true;
928	else {
929		ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED,
930				  DRIVER_NAME " (pio)", host);
931		if (ret)
932			goto irq0_free;
933	}
934
935	mask = MCI_IRQENABLE;
936	/* Don't use the datablockend flag if it's broken */
937	if (variant->broken_blockend)
938		mask &= ~MCI_DATABLOCKEND;
939
940	writel(mask, host->base + MMCIMASK0);
941
942	amba_set_drvdata(dev, mmc);
943
944	mmc_add_host(mmc);
945
946	dev_info(&dev->dev, "%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n",
947		mmc_hostname(mmc), amba_rev(dev), amba_config(dev),
948		(unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]);
949
950	return 0;
951
952 irq0_free:
953	free_irq(dev->irq[0], host);
954 unmap:
955	if (host->gpio_wp != -ENOSYS)
956		gpio_free(host->gpio_wp);
957 err_gpio_wp:
958	if (host->gpio_cd_irq >= 0)
959		free_irq(host->gpio_cd_irq, host);
960	if (host->gpio_cd != -ENOSYS)
961		gpio_free(host->gpio_cd);
962 err_gpio_cd:
963	iounmap(host->base);
964 clk_disable:
965	clk_disable(host->clk);
966 clk_free:
967	clk_put(host->clk);
968 host_free:
969	mmc_free_host(mmc);
970 rel_regions:
971	amba_release_regions(dev);
972 out:
973	return ret;
974}
975
976static int __devexit mmci_remove(struct amba_device *dev)
977{
978	struct mmc_host *mmc = amba_get_drvdata(dev);
979
980	amba_set_drvdata(dev, NULL);
981
982	if (mmc) {
983		struct mmci_host *host = mmc_priv(mmc);
984
985		mmc_remove_host(mmc);
986
987		writel(0, host->base + MMCIMASK0);
988		writel(0, host->base + MMCIMASK1);
989
990		writel(0, host->base + MMCICOMMAND);
991		writel(0, host->base + MMCIDATACTRL);
992
993		free_irq(dev->irq[0], host);
994		if (!host->singleirq)
995			free_irq(dev->irq[1], host);
996
997		if (host->gpio_wp != -ENOSYS)
998			gpio_free(host->gpio_wp);
999		if (host->gpio_cd_irq >= 0)
1000			free_irq(host->gpio_cd_irq, host);
1001		if (host->gpio_cd != -ENOSYS)
1002			gpio_free(host->gpio_cd);
1003
1004		iounmap(host->base);
1005		clk_disable(host->clk);
1006		clk_put(host->clk);
1007
1008		if (host->vcc)
1009			mmc_regulator_set_ocr(mmc, host->vcc, 0);
1010		regulator_put(host->vcc);
1011
1012		mmc_free_host(mmc);
1013
1014		amba_release_regions(dev);
1015	}
1016
1017	return 0;
1018}
1019
1020#ifdef CONFIG_PM
1021static int mmci_suspend(struct amba_device *dev, pm_message_t state)
1022{
1023	struct mmc_host *mmc = amba_get_drvdata(dev);
1024	int ret = 0;
1025
1026	if (mmc) {
1027		struct mmci_host *host = mmc_priv(mmc);
1028
1029		ret = mmc_suspend_host(mmc);
1030		if (ret == 0)
1031			writel(0, host->base + MMCIMASK0);
1032	}
1033
1034	return ret;
1035}
1036
1037static int mmci_resume(struct amba_device *dev)
1038{
1039	struct mmc_host *mmc = amba_get_drvdata(dev);
1040	int ret = 0;
1041
1042	if (mmc) {
1043		struct mmci_host *host = mmc_priv(mmc);
1044
1045		writel(MCI_IRQENABLE, host->base + MMCIMASK0);
1046
1047		ret = mmc_resume_host(mmc);
1048	}
1049
1050	return ret;
1051}
1052#else
1053#define mmci_suspend	NULL
1054#define mmci_resume	NULL
1055#endif
1056
1057static struct amba_id mmci_ids[] = {
1058	{
1059		.id	= 0x00041180,
1060		.mask	= 0x000fffff,
1061		.data	= &variant_arm,
1062	},
1063	{
1064		.id	= 0x00041181,
1065		.mask	= 0x000fffff,
1066		.data	= &variant_arm,
1067	},
1068	/* ST Micro variants */
1069	{
1070		.id     = 0x00180180,
1071		.mask   = 0x00ffffff,
1072		.data	= &variant_u300,
1073	},
1074	{
1075		.id     = 0x00280180,
1076		.mask   = 0x00ffffff,
1077		.data	= &variant_u300,
1078	},
1079	{
1080		.id     = 0x00480180,
1081		.mask   = 0x00ffffff,
1082		.data	= &variant_ux500,
1083	},
1084	{ 0, 0 },
1085};
1086
1087static struct amba_driver mmci_driver = {
1088	.drv		= {
1089		.name	= DRIVER_NAME,
1090	},
1091	.probe		= mmci_probe,
1092	.remove		= __devexit_p(mmci_remove),
1093	.suspend	= mmci_suspend,
1094	.resume		= mmci_resume,
1095	.id_table	= mmci_ids,
1096};
1097
1098static int __init mmci_init(void)
1099{
1100	return amba_driver_register(&mmci_driver);
1101}
1102
1103static void __exit mmci_exit(void)
1104{
1105	amba_driver_unregister(&mmci_driver);
1106}
1107
1108module_init(mmci_init);
1109module_exit(mmci_exit);
1110module_param(fmax, uint, 0444);
1111
1112MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
1113MODULE_LICENSE("GPL");
1114