mmci.c revision 4380c14fd77338bac9d1da4dc5dd9f6eb4966c82
1/*
2 *  linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
3 *
4 *  Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 *  Copyright (C) 2010 ST-Ericsson AB.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/init.h>
14#include <linux/ioport.h>
15#include <linux/device.h>
16#include <linux/interrupt.h>
17#include <linux/delay.h>
18#include <linux/err.h>
19#include <linux/highmem.h>
20#include <linux/log2.h>
21#include <linux/mmc/host.h>
22#include <linux/amba/bus.h>
23#include <linux/clk.h>
24#include <linux/scatterlist.h>
25#include <linux/gpio.h>
26#include <linux/amba/mmci.h>
27#include <linux/regulator/consumer.h>
28
29#include <asm/div64.h>
30#include <asm/io.h>
31#include <asm/sizes.h>
32
33#include "mmci.h"
34
35#define DRIVER_NAME "mmci-pl18x"
36
37static unsigned int fmax = 515633;
38
39/**
40 * struct variant_data - MMCI variant-specific quirks
41 * @clkreg: default value for MCICLOCK register
42 * @clkreg_enable: enable value for MMCICLOCK register
43 */
44struct variant_data {
45	unsigned int		clkreg;
46	unsigned int		clkreg_enable;
47};
48
49static struct variant_data variant_arm = {
50};
51
52static struct variant_data variant_u300 = {
53	.clkreg_enable		= 1 << 13, /* HWFCEN */
54};
55
56static struct variant_data variant_ux500 = {
57	.clkreg			= MCI_CLK_ENABLE,
58	.clkreg_enable		= 1 << 14, /* HWFCEN */
59};
60/*
61 * This must be called with host->lock held
62 */
63static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
64{
65	struct variant_data *variant = host->variant;
66	u32 clk = variant->clkreg;
67
68	if (desired) {
69		if (desired >= host->mclk) {
70			clk = MCI_CLK_BYPASS;
71			host->cclk = host->mclk;
72		} else {
73			clk = host->mclk / (2 * desired) - 1;
74			if (clk >= 256)
75				clk = 255;
76			host->cclk = host->mclk / (2 * (clk + 1));
77		}
78
79		clk |= variant->clkreg_enable;
80		clk |= MCI_CLK_ENABLE;
81		/* This hasn't proven to be worthwhile */
82		/* clk |= MCI_CLK_PWRSAVE; */
83	}
84
85	if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
86		clk |= MCI_4BIT_BUS;
87	if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
88		clk |= MCI_ST_8BIT_BUS;
89
90	writel(clk, host->base + MMCICLOCK);
91}
92
93static void
94mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
95{
96	writel(0, host->base + MMCICOMMAND);
97
98	BUG_ON(host->data);
99
100	host->mrq = NULL;
101	host->cmd = NULL;
102
103	if (mrq->data)
104		mrq->data->bytes_xfered = host->data_xfered;
105
106	/*
107	 * Need to drop the host lock here; mmc_request_done may call
108	 * back into the driver...
109	 */
110	spin_unlock(&host->lock);
111	mmc_request_done(host->mmc, mrq);
112	spin_lock(&host->lock);
113}
114
115static void mmci_stop_data(struct mmci_host *host)
116{
117	writel(0, host->base + MMCIDATACTRL);
118	writel(0, host->base + MMCIMASK1);
119	host->data = NULL;
120}
121
122static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
123{
124	unsigned int flags = SG_MITER_ATOMIC;
125
126	if (data->flags & MMC_DATA_READ)
127		flags |= SG_MITER_TO_SG;
128	else
129		flags |= SG_MITER_FROM_SG;
130
131	sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
132}
133
134static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
135{
136	unsigned int datactrl, timeout, irqmask;
137	unsigned long long clks;
138	void __iomem *base;
139	int blksz_bits;
140
141	dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
142		data->blksz, data->blocks, data->flags);
143
144	host->data = data;
145	host->size = data->blksz * data->blocks;
146	host->data_xfered = 0;
147
148	mmci_init_sg(host, data);
149
150	clks = (unsigned long long)data->timeout_ns * host->cclk;
151	do_div(clks, 1000000000UL);
152
153	timeout = data->timeout_clks + (unsigned int)clks;
154
155	base = host->base;
156	writel(timeout, base + MMCIDATATIMER);
157	writel(host->size, base + MMCIDATALENGTH);
158
159	blksz_bits = ffs(data->blksz) - 1;
160	BUG_ON(1 << blksz_bits != data->blksz);
161
162	datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
163	if (data->flags & MMC_DATA_READ) {
164		datactrl |= MCI_DPSM_DIRECTION;
165		irqmask = MCI_RXFIFOHALFFULLMASK;
166
167		/*
168		 * If we have less than a FIFOSIZE of bytes to transfer,
169		 * trigger a PIO interrupt as soon as any data is available.
170		 */
171		if (host->size < MCI_FIFOSIZE)
172			irqmask |= MCI_RXDATAAVLBLMASK;
173	} else {
174		/*
175		 * We don't actually need to include "FIFO empty" here
176		 * since its implicit in "FIFO half empty".
177		 */
178		irqmask = MCI_TXFIFOHALFEMPTYMASK;
179	}
180
181	writel(datactrl, base + MMCIDATACTRL);
182	writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
183	writel(irqmask, base + MMCIMASK1);
184}
185
186static void
187mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
188{
189	void __iomem *base = host->base;
190
191	dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
192	    cmd->opcode, cmd->arg, cmd->flags);
193
194	if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
195		writel(0, base + MMCICOMMAND);
196		udelay(1);
197	}
198
199	c |= cmd->opcode | MCI_CPSM_ENABLE;
200	if (cmd->flags & MMC_RSP_PRESENT) {
201		if (cmd->flags & MMC_RSP_136)
202			c |= MCI_CPSM_LONGRSP;
203		c |= MCI_CPSM_RESPONSE;
204	}
205	if (/*interrupt*/0)
206		c |= MCI_CPSM_INTERRUPT;
207
208	host->cmd = cmd;
209
210	writel(cmd->arg, base + MMCIARGUMENT);
211	writel(c, base + MMCICOMMAND);
212}
213
214static void
215mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
216	      unsigned int status)
217{
218	if (status & MCI_DATABLOCKEND) {
219		host->data_xfered += data->blksz;
220#ifdef CONFIG_ARCH_U300
221		/*
222		 * On the U300 some signal or other is
223		 * badly routed so that a data write does
224		 * not properly terminate with a MCI_DATAEND
225		 * status flag. This quirk will make writes
226		 * work again.
227		 */
228		if (data->flags & MMC_DATA_WRITE)
229			status |= MCI_DATAEND;
230#endif
231	}
232	if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
233		dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status);
234		if (status & MCI_DATACRCFAIL)
235			data->error = -EILSEQ;
236		else if (status & MCI_DATATIMEOUT)
237			data->error = -ETIMEDOUT;
238		else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN))
239			data->error = -EIO;
240		status |= MCI_DATAEND;
241
242		/*
243		 * We hit an error condition.  Ensure that any data
244		 * partially written to a page is properly coherent.
245		 */
246		if (data->flags & MMC_DATA_READ) {
247			struct sg_mapping_iter *sg_miter = &host->sg_miter;
248			unsigned long flags;
249
250			local_irq_save(flags);
251			if (sg_miter_next(sg_miter)) {
252				flush_dcache_page(sg_miter->page);
253				sg_miter_stop(sg_miter);
254			}
255			local_irq_restore(flags);
256		}
257	}
258	if (status & MCI_DATAEND) {
259		mmci_stop_data(host);
260
261		if (!data->stop) {
262			mmci_request_end(host, data->mrq);
263		} else {
264			mmci_start_command(host, data->stop, 0);
265		}
266	}
267}
268
269static void
270mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
271	     unsigned int status)
272{
273	void __iomem *base = host->base;
274
275	host->cmd = NULL;
276
277	cmd->resp[0] = readl(base + MMCIRESPONSE0);
278	cmd->resp[1] = readl(base + MMCIRESPONSE1);
279	cmd->resp[2] = readl(base + MMCIRESPONSE2);
280	cmd->resp[3] = readl(base + MMCIRESPONSE3);
281
282	if (status & MCI_CMDTIMEOUT) {
283		cmd->error = -ETIMEDOUT;
284	} else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
285		cmd->error = -EILSEQ;
286	}
287
288	if (!cmd->data || cmd->error) {
289		if (host->data)
290			mmci_stop_data(host);
291		mmci_request_end(host, cmd->mrq);
292	} else if (!(cmd->data->flags & MMC_DATA_READ)) {
293		mmci_start_data(host, cmd->data);
294	}
295}
296
297static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
298{
299	void __iomem *base = host->base;
300	char *ptr = buffer;
301	u32 status;
302	int host_remain = host->size;
303
304	do {
305		int count = host_remain - (readl(base + MMCIFIFOCNT) << 2);
306
307		if (count > remain)
308			count = remain;
309
310		if (count <= 0)
311			break;
312
313		readsl(base + MMCIFIFO, ptr, count >> 2);
314
315		ptr += count;
316		remain -= count;
317		host_remain -= count;
318
319		if (remain == 0)
320			break;
321
322		status = readl(base + MMCISTATUS);
323	} while (status & MCI_RXDATAAVLBL);
324
325	return ptr - buffer;
326}
327
328static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
329{
330	void __iomem *base = host->base;
331	char *ptr = buffer;
332
333	do {
334		unsigned int count, maxcnt;
335
336		maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE : MCI_FIFOHALFSIZE;
337		count = min(remain, maxcnt);
338
339		writesl(base + MMCIFIFO, ptr, count >> 2);
340
341		ptr += count;
342		remain -= count;
343
344		if (remain == 0)
345			break;
346
347		status = readl(base + MMCISTATUS);
348	} while (status & MCI_TXFIFOHALFEMPTY);
349
350	return ptr - buffer;
351}
352
353/*
354 * PIO data transfer IRQ handler.
355 */
356static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
357{
358	struct mmci_host *host = dev_id;
359	struct sg_mapping_iter *sg_miter = &host->sg_miter;
360	void __iomem *base = host->base;
361	unsigned long flags;
362	u32 status;
363
364	status = readl(base + MMCISTATUS);
365
366	dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
367
368	local_irq_save(flags);
369
370	do {
371		unsigned int remain, len;
372		char *buffer;
373
374		/*
375		 * For write, we only need to test the half-empty flag
376		 * here - if the FIFO is completely empty, then by
377		 * definition it is more than half empty.
378		 *
379		 * For read, check for data available.
380		 */
381		if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
382			break;
383
384		if (!sg_miter_next(sg_miter))
385			break;
386
387		buffer = sg_miter->addr;
388		remain = sg_miter->length;
389
390		len = 0;
391		if (status & MCI_RXACTIVE)
392			len = mmci_pio_read(host, buffer, remain);
393		if (status & MCI_TXACTIVE)
394			len = mmci_pio_write(host, buffer, remain, status);
395
396		sg_miter->consumed = len;
397
398		host->size -= len;
399		remain -= len;
400
401		if (remain)
402			break;
403
404		if (status & MCI_RXACTIVE)
405			flush_dcache_page(sg_miter->page);
406
407		status = readl(base + MMCISTATUS);
408	} while (1);
409
410	sg_miter_stop(sg_miter);
411
412	local_irq_restore(flags);
413
414	/*
415	 * If we're nearing the end of the read, switch to
416	 * "any data available" mode.
417	 */
418	if (status & MCI_RXACTIVE && host->size < MCI_FIFOSIZE)
419		writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1);
420
421	/*
422	 * If we run out of data, disable the data IRQs; this
423	 * prevents a race where the FIFO becomes empty before
424	 * the chip itself has disabled the data path, and
425	 * stops us racing with our data end IRQ.
426	 */
427	if (host->size == 0) {
428		writel(0, base + MMCIMASK1);
429		writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
430	}
431
432	return IRQ_HANDLED;
433}
434
435/*
436 * Handle completion of command and data transfers.
437 */
438static irqreturn_t mmci_irq(int irq, void *dev_id)
439{
440	struct mmci_host *host = dev_id;
441	u32 status;
442	int ret = 0;
443
444	spin_lock(&host->lock);
445
446	do {
447		struct mmc_command *cmd;
448		struct mmc_data *data;
449
450		status = readl(host->base + MMCISTATUS);
451		status &= readl(host->base + MMCIMASK0);
452		writel(status, host->base + MMCICLEAR);
453
454		dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
455
456		data = host->data;
457		if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
458			      MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
459			mmci_data_irq(host, data, status);
460
461		cmd = host->cmd;
462		if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd)
463			mmci_cmd_irq(host, cmd, status);
464
465		ret = 1;
466	} while (status);
467
468	spin_unlock(&host->lock);
469
470	return IRQ_RETVAL(ret);
471}
472
473static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
474{
475	struct mmci_host *host = mmc_priv(mmc);
476	unsigned long flags;
477
478	WARN_ON(host->mrq != NULL);
479
480	if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
481		dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n",
482			mrq->data->blksz);
483		mrq->cmd->error = -EINVAL;
484		mmc_request_done(mmc, mrq);
485		return;
486	}
487
488	spin_lock_irqsave(&host->lock, flags);
489
490	host->mrq = mrq;
491
492	if (mrq->data && mrq->data->flags & MMC_DATA_READ)
493		mmci_start_data(host, mrq->data);
494
495	mmci_start_command(host, mrq->cmd, 0);
496
497	spin_unlock_irqrestore(&host->lock, flags);
498}
499
500static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
501{
502	struct mmci_host *host = mmc_priv(mmc);
503	u32 pwr = 0;
504	unsigned long flags;
505
506	switch (ios->power_mode) {
507	case MMC_POWER_OFF:
508		if(host->vcc &&
509		   regulator_is_enabled(host->vcc))
510			regulator_disable(host->vcc);
511		break;
512	case MMC_POWER_UP:
513#ifdef CONFIG_REGULATOR
514		if (host->vcc)
515			/* This implicitly enables the regulator */
516			mmc_regulator_set_ocr(host->vcc, ios->vdd);
517#endif
518		if (host->plat->vdd_handler)
519			pwr |= host->plat->vdd_handler(mmc_dev(mmc), ios->vdd,
520						       ios->power_mode);
521		/* The ST version does not have this, fall through to POWER_ON */
522		if (host->hw_designer != AMBA_VENDOR_ST) {
523			pwr |= MCI_PWR_UP;
524			break;
525		}
526	case MMC_POWER_ON:
527		pwr |= MCI_PWR_ON;
528		break;
529	}
530
531	if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
532		if (host->hw_designer != AMBA_VENDOR_ST)
533			pwr |= MCI_ROD;
534		else {
535			/*
536			 * The ST Micro variant use the ROD bit for something
537			 * else and only has OD (Open Drain).
538			 */
539			pwr |= MCI_OD;
540		}
541	}
542
543	spin_lock_irqsave(&host->lock, flags);
544
545	mmci_set_clkreg(host, ios->clock);
546
547	if (host->pwr != pwr) {
548		host->pwr = pwr;
549		writel(pwr, host->base + MMCIPOWER);
550	}
551
552	spin_unlock_irqrestore(&host->lock, flags);
553}
554
555static int mmci_get_ro(struct mmc_host *mmc)
556{
557	struct mmci_host *host = mmc_priv(mmc);
558
559	if (host->gpio_wp == -ENOSYS)
560		return -ENOSYS;
561
562	return gpio_get_value(host->gpio_wp);
563}
564
565static int mmci_get_cd(struct mmc_host *mmc)
566{
567	struct mmci_host *host = mmc_priv(mmc);
568	unsigned int status;
569
570	if (host->gpio_cd == -ENOSYS)
571		status = host->plat->status(mmc_dev(host->mmc));
572	else
573		status = gpio_get_value(host->gpio_cd);
574
575	return !status;
576}
577
578static const struct mmc_host_ops mmci_ops = {
579	.request	= mmci_request,
580	.set_ios	= mmci_set_ios,
581	.get_ro		= mmci_get_ro,
582	.get_cd		= mmci_get_cd,
583};
584
585static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
586{
587	struct mmci_platform_data *plat = dev->dev.platform_data;
588	struct variant_data *variant = id->data;
589	struct mmci_host *host;
590	struct mmc_host *mmc;
591	int ret;
592
593	/* must have platform data */
594	if (!plat) {
595		ret = -EINVAL;
596		goto out;
597	}
598
599	ret = amba_request_regions(dev, DRIVER_NAME);
600	if (ret)
601		goto out;
602
603	mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
604	if (!mmc) {
605		ret = -ENOMEM;
606		goto rel_regions;
607	}
608
609	host = mmc_priv(mmc);
610	host->mmc = mmc;
611
612	host->gpio_wp = -ENOSYS;
613	host->gpio_cd = -ENOSYS;
614
615	host->hw_designer = amba_manf(dev);
616	host->hw_revision = amba_rev(dev);
617	dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
618	dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
619
620	host->clk = clk_get(&dev->dev, NULL);
621	if (IS_ERR(host->clk)) {
622		ret = PTR_ERR(host->clk);
623		host->clk = NULL;
624		goto host_free;
625	}
626
627	ret = clk_enable(host->clk);
628	if (ret)
629		goto clk_free;
630
631	host->plat = plat;
632	host->variant = variant;
633	host->mclk = clk_get_rate(host->clk);
634	/*
635	 * According to the spec, mclk is max 100 MHz,
636	 * so we try to adjust the clock down to this,
637	 * (if possible).
638	 */
639	if (host->mclk > 100000000) {
640		ret = clk_set_rate(host->clk, 100000000);
641		if (ret < 0)
642			goto clk_disable;
643		host->mclk = clk_get_rate(host->clk);
644		dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
645			host->mclk);
646	}
647	host->base = ioremap(dev->res.start, resource_size(&dev->res));
648	if (!host->base) {
649		ret = -ENOMEM;
650		goto clk_disable;
651	}
652
653	mmc->ops = &mmci_ops;
654	mmc->f_min = (host->mclk + 511) / 512;
655	/*
656	 * If the platform data supplies a maximum operating
657	 * frequency, this takes precedence. Else, we fall back
658	 * to using the module parameter, which has a (low)
659	 * default value in case it is not specified. Either
660	 * value must not exceed the clock rate into the block,
661	 * of course.
662	 */
663	if (plat->f_max)
664		mmc->f_max = min(host->mclk, plat->f_max);
665	else
666		mmc->f_max = min(host->mclk, fmax);
667	dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
668
669#ifdef CONFIG_REGULATOR
670	/* If we're using the regulator framework, try to fetch a regulator */
671	host->vcc = regulator_get(&dev->dev, "vmmc");
672	if (IS_ERR(host->vcc))
673		host->vcc = NULL;
674	else {
675		int mask = mmc_regulator_get_ocrmask(host->vcc);
676
677		if (mask < 0)
678			dev_err(&dev->dev, "error getting OCR mask (%d)\n",
679				mask);
680		else {
681			host->mmc->ocr_avail = (u32) mask;
682			if (plat->ocr_mask)
683				dev_warn(&dev->dev,
684				 "Provided ocr_mask/setpower will not be used "
685				 "(using regulator instead)\n");
686		}
687	}
688#endif
689	/* Fall back to platform data if no regulator is found */
690	if (host->vcc == NULL)
691		mmc->ocr_avail = plat->ocr_mask;
692	mmc->caps = plat->capabilities;
693	mmc->caps |= MMC_CAP_NEEDS_POLL;
694
695	/*
696	 * We can do SGIO
697	 */
698	mmc->max_hw_segs = 16;
699	mmc->max_phys_segs = NR_SG;
700
701	/*
702	 * Since we only have a 16-bit data length register, we must
703	 * ensure that we don't exceed 2^16-1 bytes in a single request.
704	 */
705	mmc->max_req_size = 65535;
706
707	/*
708	 * Set the maximum segment size.  Since we aren't doing DMA
709	 * (yet) we are only limited by the data length register.
710	 */
711	mmc->max_seg_size = mmc->max_req_size;
712
713	/*
714	 * Block size can be up to 2048 bytes, but must be a power of two.
715	 */
716	mmc->max_blk_size = 2048;
717
718	/*
719	 * No limit on the number of blocks transferred.
720	 */
721	mmc->max_blk_count = mmc->max_req_size;
722
723	spin_lock_init(&host->lock);
724
725	writel(0, host->base + MMCIMASK0);
726	writel(0, host->base + MMCIMASK1);
727	writel(0xfff, host->base + MMCICLEAR);
728
729	if (gpio_is_valid(plat->gpio_cd)) {
730		ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)");
731		if (ret == 0)
732			ret = gpio_direction_input(plat->gpio_cd);
733		if (ret == 0)
734			host->gpio_cd = plat->gpio_cd;
735		else if (ret != -ENOSYS)
736			goto err_gpio_cd;
737	}
738	if (gpio_is_valid(plat->gpio_wp)) {
739		ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)");
740		if (ret == 0)
741			ret = gpio_direction_input(plat->gpio_wp);
742		if (ret == 0)
743			host->gpio_wp = plat->gpio_wp;
744		else if (ret != -ENOSYS)
745			goto err_gpio_wp;
746	}
747
748	ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
749	if (ret)
750		goto unmap;
751
752	ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, DRIVER_NAME " (pio)", host);
753	if (ret)
754		goto irq0_free;
755
756	writel(MCI_IRQENABLE, host->base + MMCIMASK0);
757
758	amba_set_drvdata(dev, mmc);
759
760	mmc_add_host(mmc);
761
762	dev_info(&dev->dev, "%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n",
763		mmc_hostname(mmc), amba_rev(dev), amba_config(dev),
764		(unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]);
765
766	return 0;
767
768 irq0_free:
769	free_irq(dev->irq[0], host);
770 unmap:
771	if (host->gpio_wp != -ENOSYS)
772		gpio_free(host->gpio_wp);
773 err_gpio_wp:
774	if (host->gpio_cd != -ENOSYS)
775		gpio_free(host->gpio_cd);
776 err_gpio_cd:
777	iounmap(host->base);
778 clk_disable:
779	clk_disable(host->clk);
780 clk_free:
781	clk_put(host->clk);
782 host_free:
783	mmc_free_host(mmc);
784 rel_regions:
785	amba_release_regions(dev);
786 out:
787	return ret;
788}
789
790static int __devexit mmci_remove(struct amba_device *dev)
791{
792	struct mmc_host *mmc = amba_get_drvdata(dev);
793
794	amba_set_drvdata(dev, NULL);
795
796	if (mmc) {
797		struct mmci_host *host = mmc_priv(mmc);
798
799		mmc_remove_host(mmc);
800
801		writel(0, host->base + MMCIMASK0);
802		writel(0, host->base + MMCIMASK1);
803
804		writel(0, host->base + MMCICOMMAND);
805		writel(0, host->base + MMCIDATACTRL);
806
807		free_irq(dev->irq[0], host);
808		free_irq(dev->irq[1], host);
809
810		if (host->gpio_wp != -ENOSYS)
811			gpio_free(host->gpio_wp);
812		if (host->gpio_cd != -ENOSYS)
813			gpio_free(host->gpio_cd);
814
815		iounmap(host->base);
816		clk_disable(host->clk);
817		clk_put(host->clk);
818
819		if (regulator_is_enabled(host->vcc))
820			regulator_disable(host->vcc);
821		regulator_put(host->vcc);
822
823		mmc_free_host(mmc);
824
825		amba_release_regions(dev);
826	}
827
828	return 0;
829}
830
831#ifdef CONFIG_PM
832static int mmci_suspend(struct amba_device *dev, pm_message_t state)
833{
834	struct mmc_host *mmc = amba_get_drvdata(dev);
835	int ret = 0;
836
837	if (mmc) {
838		struct mmci_host *host = mmc_priv(mmc);
839
840		ret = mmc_suspend_host(mmc);
841		if (ret == 0)
842			writel(0, host->base + MMCIMASK0);
843	}
844
845	return ret;
846}
847
848static int mmci_resume(struct amba_device *dev)
849{
850	struct mmc_host *mmc = amba_get_drvdata(dev);
851	int ret = 0;
852
853	if (mmc) {
854		struct mmci_host *host = mmc_priv(mmc);
855
856		writel(MCI_IRQENABLE, host->base + MMCIMASK0);
857
858		ret = mmc_resume_host(mmc);
859	}
860
861	return ret;
862}
863#else
864#define mmci_suspend	NULL
865#define mmci_resume	NULL
866#endif
867
868static struct amba_id mmci_ids[] = {
869	{
870		.id	= 0x00041180,
871		.mask	= 0x000fffff,
872		.data	= &variant_arm,
873	},
874	{
875		.id	= 0x00041181,
876		.mask	= 0x000fffff,
877		.data	= &variant_arm,
878	},
879	/* ST Micro variants */
880	{
881		.id     = 0x00180180,
882		.mask   = 0x00ffffff,
883		.data	= &variant_u300,
884	},
885	{
886		.id     = 0x00280180,
887		.mask   = 0x00ffffff,
888		.data	= &variant_u300,
889	},
890	{
891		.id     = 0x00480180,
892		.mask   = 0x00ffffff,
893		.data	= &variant_ux500,
894	},
895	{ 0, 0 },
896};
897
898static struct amba_driver mmci_driver = {
899	.drv		= {
900		.name	= DRIVER_NAME,
901	},
902	.probe		= mmci_probe,
903	.remove		= __devexit_p(mmci_remove),
904	.suspend	= mmci_suspend,
905	.resume		= mmci_resume,
906	.id_table	= mmci_ids,
907};
908
909static int __init mmci_init(void)
910{
911	return amba_driver_register(&mmci_driver);
912}
913
914static void __exit mmci_exit(void)
915{
916	amba_driver_unregister(&mmci_driver);
917}
918
919module_init(mmci_init);
920module_exit(mmci_exit);
921module_param(fmax, uint, 0444);
922
923MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
924MODULE_LICENSE("GPL");
925