mmci.c revision 78f87df2b4f8760954d7d80603d0cfcbd4759683
1/*
2 *  linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
3 *
4 *  Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 *  Copyright (C) 2010 ST-Ericsson SA
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/init.h>
14#include <linux/ioport.h>
15#include <linux/device.h>
16#include <linux/io.h>
17#include <linux/interrupt.h>
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/delay.h>
21#include <linux/err.h>
22#include <linux/highmem.h>
23#include <linux/log2.h>
24#include <linux/mmc/pm.h>
25#include <linux/mmc/host.h>
26#include <linux/mmc/card.h>
27#include <linux/mmc/slot-gpio.h>
28#include <linux/amba/bus.h>
29#include <linux/clk.h>
30#include <linux/scatterlist.h>
31#include <linux/gpio.h>
32#include <linux/of_gpio.h>
33#include <linux/regulator/consumer.h>
34#include <linux/dmaengine.h>
35#include <linux/dma-mapping.h>
36#include <linux/amba/mmci.h>
37#include <linux/pm_runtime.h>
38#include <linux/types.h>
39#include <linux/pinctrl/consumer.h>
40
41#include <asm/div64.h>
42#include <asm/io.h>
43#include <asm/sizes.h>
44
45#include "mmci.h"
46
47#define DRIVER_NAME "mmci-pl18x"
48
49static unsigned int fmax = 515633;
50
51/**
52 * struct variant_data - MMCI variant-specific quirks
53 * @clkreg: default value for MCICLOCK register
54 * @clkreg_enable: enable value for MMCICLOCK register
55 * @datalength_bits: number of bits in the MMCIDATALENGTH register
56 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
57 *	      is asserted (likewise for RX)
58 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
59 *		  is asserted (likewise for RX)
60 * @sdio: variant supports SDIO
61 * @st_clkdiv: true if using a ST-specific clock divider algorithm
62 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
63 * @pwrreg_powerup: power up value for MMCIPOWER register
64 * @signal_direction: input/out direction of bus signals can be indicated
65 * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
66 * @busy_detect: true if busy detection on dat0 is supported
67 * @pwrreg_nopower: bits in MMCIPOWER don't controls ext. power supply
68 */
69struct variant_data {
70	unsigned int		clkreg;
71	unsigned int		clkreg_enable;
72	unsigned int		datalength_bits;
73	unsigned int		fifosize;
74	unsigned int		fifohalfsize;
75	bool			sdio;
76	bool			st_clkdiv;
77	bool			blksz_datactrl16;
78	u32			pwrreg_powerup;
79	bool			signal_direction;
80	bool			pwrreg_clkgate;
81	bool			busy_detect;
82	bool			pwrreg_nopower;
83};
84
85static struct variant_data variant_arm = {
86	.fifosize		= 16 * 4,
87	.fifohalfsize		= 8 * 4,
88	.datalength_bits	= 16,
89	.pwrreg_powerup		= MCI_PWR_UP,
90};
91
92static struct variant_data variant_arm_extended_fifo = {
93	.fifosize		= 128 * 4,
94	.fifohalfsize		= 64 * 4,
95	.datalength_bits	= 16,
96	.pwrreg_powerup		= MCI_PWR_UP,
97};
98
99static struct variant_data variant_arm_extended_fifo_hwfc = {
100	.fifosize		= 128 * 4,
101	.fifohalfsize		= 64 * 4,
102	.clkreg_enable		= MCI_ARM_HWFCEN,
103	.datalength_bits	= 16,
104	.pwrreg_powerup		= MCI_PWR_UP,
105};
106
107static struct variant_data variant_u300 = {
108	.fifosize		= 16 * 4,
109	.fifohalfsize		= 8 * 4,
110	.clkreg_enable		= MCI_ST_U300_HWFCEN,
111	.datalength_bits	= 16,
112	.sdio			= true,
113	.pwrreg_powerup		= MCI_PWR_ON,
114	.signal_direction	= true,
115	.pwrreg_clkgate		= true,
116	.pwrreg_nopower		= true,
117};
118
119static struct variant_data variant_nomadik = {
120	.fifosize		= 16 * 4,
121	.fifohalfsize		= 8 * 4,
122	.clkreg			= MCI_CLK_ENABLE,
123	.datalength_bits	= 24,
124	.sdio			= true,
125	.st_clkdiv		= true,
126	.pwrreg_powerup		= MCI_PWR_ON,
127	.signal_direction	= true,
128	.pwrreg_clkgate		= true,
129	.pwrreg_nopower		= true,
130};
131
132static struct variant_data variant_ux500 = {
133	.fifosize		= 30 * 4,
134	.fifohalfsize		= 8 * 4,
135	.clkreg			= MCI_CLK_ENABLE,
136	.clkreg_enable		= MCI_ST_UX500_HWFCEN,
137	.datalength_bits	= 24,
138	.sdio			= true,
139	.st_clkdiv		= true,
140	.pwrreg_powerup		= MCI_PWR_ON,
141	.signal_direction	= true,
142	.pwrreg_clkgate		= true,
143	.busy_detect		= true,
144	.pwrreg_nopower		= true,
145};
146
147static struct variant_data variant_ux500v2 = {
148	.fifosize		= 30 * 4,
149	.fifohalfsize		= 8 * 4,
150	.clkreg			= MCI_CLK_ENABLE,
151	.clkreg_enable		= MCI_ST_UX500_HWFCEN,
152	.datalength_bits	= 24,
153	.sdio			= true,
154	.st_clkdiv		= true,
155	.blksz_datactrl16	= true,
156	.pwrreg_powerup		= MCI_PWR_ON,
157	.signal_direction	= true,
158	.pwrreg_clkgate		= true,
159	.busy_detect		= true,
160	.pwrreg_nopower		= true,
161};
162
163static int mmci_card_busy(struct mmc_host *mmc)
164{
165	struct mmci_host *host = mmc_priv(mmc);
166	unsigned long flags;
167	int busy = 0;
168
169	pm_runtime_get_sync(mmc_dev(mmc));
170
171	spin_lock_irqsave(&host->lock, flags);
172	if (readl(host->base + MMCISTATUS) & MCI_ST_CARDBUSY)
173		busy = 1;
174	spin_unlock_irqrestore(&host->lock, flags);
175
176	pm_runtime_mark_last_busy(mmc_dev(mmc));
177	pm_runtime_put_autosuspend(mmc_dev(mmc));
178
179	return busy;
180}
181
182/*
183 * Validate mmc prerequisites
184 */
185static int mmci_validate_data(struct mmci_host *host,
186			      struct mmc_data *data)
187{
188	if (!data)
189		return 0;
190
191	if (!is_power_of_2(data->blksz)) {
192		dev_err(mmc_dev(host->mmc),
193			"unsupported block size (%d bytes)\n", data->blksz);
194		return -EINVAL;
195	}
196
197	return 0;
198}
199
200static void mmci_reg_delay(struct mmci_host *host)
201{
202	/*
203	 * According to the spec, at least three feedback clock cycles
204	 * of max 52 MHz must pass between two writes to the MMCICLOCK reg.
205	 * Three MCLK clock cycles must pass between two MMCIPOWER reg writes.
206	 * Worst delay time during card init is at 100 kHz => 30 us.
207	 * Worst delay time when up and running is at 25 MHz => 120 ns.
208	 */
209	if (host->cclk < 25000000)
210		udelay(30);
211	else
212		ndelay(120);
213}
214
215/*
216 * This must be called with host->lock held
217 */
218static void mmci_write_clkreg(struct mmci_host *host, u32 clk)
219{
220	if (host->clk_reg != clk) {
221		host->clk_reg = clk;
222		writel(clk, host->base + MMCICLOCK);
223	}
224}
225
226/*
227 * This must be called with host->lock held
228 */
229static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
230{
231	if (host->pwr_reg != pwr) {
232		host->pwr_reg = pwr;
233		writel(pwr, host->base + MMCIPOWER);
234	}
235}
236
237/*
238 * This must be called with host->lock held
239 */
240static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl)
241{
242	/* Keep ST Micro busy mode if enabled */
243	datactrl |= host->datactrl_reg & MCI_ST_DPSM_BUSYMODE;
244
245	if (host->datactrl_reg != datactrl) {
246		host->datactrl_reg = datactrl;
247		writel(datactrl, host->base + MMCIDATACTRL);
248	}
249}
250
251/*
252 * This must be called with host->lock held
253 */
254static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
255{
256	struct variant_data *variant = host->variant;
257	u32 clk = variant->clkreg;
258
259	/* Make sure cclk reflects the current calculated clock */
260	host->cclk = 0;
261
262	if (desired) {
263		if (desired >= host->mclk) {
264			clk = MCI_CLK_BYPASS;
265			if (variant->st_clkdiv)
266				clk |= MCI_ST_UX500_NEG_EDGE;
267			host->cclk = host->mclk;
268		} else if (variant->st_clkdiv) {
269			/*
270			 * DB8500 TRM says f = mclk / (clkdiv + 2)
271			 * => clkdiv = (mclk / f) - 2
272			 * Round the divider up so we don't exceed the max
273			 * frequency
274			 */
275			clk = DIV_ROUND_UP(host->mclk, desired) - 2;
276			if (clk >= 256)
277				clk = 255;
278			host->cclk = host->mclk / (clk + 2);
279		} else {
280			/*
281			 * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
282			 * => clkdiv = mclk / (2 * f) - 1
283			 */
284			clk = host->mclk / (2 * desired) - 1;
285			if (clk >= 256)
286				clk = 255;
287			host->cclk = host->mclk / (2 * (clk + 1));
288		}
289
290		clk |= variant->clkreg_enable;
291		clk |= MCI_CLK_ENABLE;
292		/* This hasn't proven to be worthwhile */
293		/* clk |= MCI_CLK_PWRSAVE; */
294	}
295
296	/* Set actual clock for debug */
297	host->mmc->actual_clock = host->cclk;
298
299	if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
300		clk |= MCI_4BIT_BUS;
301	if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
302		clk |= MCI_ST_8BIT_BUS;
303
304	if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50)
305		clk |= MCI_ST_UX500_NEG_EDGE;
306
307	mmci_write_clkreg(host, clk);
308}
309
310static void
311mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
312{
313	writel(0, host->base + MMCICOMMAND);
314
315	BUG_ON(host->data);
316
317	host->mrq = NULL;
318	host->cmd = NULL;
319
320	mmc_request_done(host->mmc, mrq);
321
322	pm_runtime_mark_last_busy(mmc_dev(host->mmc));
323	pm_runtime_put_autosuspend(mmc_dev(host->mmc));
324}
325
326static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
327{
328	void __iomem *base = host->base;
329
330	if (host->singleirq) {
331		unsigned int mask0 = readl(base + MMCIMASK0);
332
333		mask0 &= ~MCI_IRQ1MASK;
334		mask0 |= mask;
335
336		writel(mask0, base + MMCIMASK0);
337	}
338
339	writel(mask, base + MMCIMASK1);
340}
341
342static void mmci_stop_data(struct mmci_host *host)
343{
344	mmci_write_datactrlreg(host, 0);
345	mmci_set_mask1(host, 0);
346	host->data = NULL;
347}
348
349static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
350{
351	unsigned int flags = SG_MITER_ATOMIC;
352
353	if (data->flags & MMC_DATA_READ)
354		flags |= SG_MITER_TO_SG;
355	else
356		flags |= SG_MITER_FROM_SG;
357
358	sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
359}
360
361/*
362 * All the DMA operation mode stuff goes inside this ifdef.
363 * This assumes that you have a generic DMA device interface,
364 * no custom DMA interfaces are supported.
365 */
366#ifdef CONFIG_DMA_ENGINE
367static void mmci_dma_setup(struct mmci_host *host)
368{
369	struct mmci_platform_data *plat = host->plat;
370	const char *rxname, *txname;
371	dma_cap_mask_t mask;
372
373	host->dma_rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "rx");
374	host->dma_tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "tx");
375
376	/* initialize pre request cookie */
377	host->next_data.cookie = 1;
378
379	/* Try to acquire a generic DMA engine slave channel */
380	dma_cap_zero(mask);
381	dma_cap_set(DMA_SLAVE, mask);
382
383	if (plat && plat->dma_filter) {
384		if (!host->dma_rx_channel && plat->dma_rx_param) {
385			host->dma_rx_channel = dma_request_channel(mask,
386							   plat->dma_filter,
387							   plat->dma_rx_param);
388			/* E.g if no DMA hardware is present */
389			if (!host->dma_rx_channel)
390				dev_err(mmc_dev(host->mmc), "no RX DMA channel\n");
391		}
392
393		if (!host->dma_tx_channel && plat->dma_tx_param) {
394			host->dma_tx_channel = dma_request_channel(mask,
395							   plat->dma_filter,
396							   plat->dma_tx_param);
397			if (!host->dma_tx_channel)
398				dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n");
399		}
400	}
401
402	/*
403	 * If only an RX channel is specified, the driver will
404	 * attempt to use it bidirectionally, however if it is
405	 * is specified but cannot be located, DMA will be disabled.
406	 */
407	if (host->dma_rx_channel && !host->dma_tx_channel)
408		host->dma_tx_channel = host->dma_rx_channel;
409
410	if (host->dma_rx_channel)
411		rxname = dma_chan_name(host->dma_rx_channel);
412	else
413		rxname = "none";
414
415	if (host->dma_tx_channel)
416		txname = dma_chan_name(host->dma_tx_channel);
417	else
418		txname = "none";
419
420	dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
421		 rxname, txname);
422
423	/*
424	 * Limit the maximum segment size in any SG entry according to
425	 * the parameters of the DMA engine device.
426	 */
427	if (host->dma_tx_channel) {
428		struct device *dev = host->dma_tx_channel->device->dev;
429		unsigned int max_seg_size = dma_get_max_seg_size(dev);
430
431		if (max_seg_size < host->mmc->max_seg_size)
432			host->mmc->max_seg_size = max_seg_size;
433	}
434	if (host->dma_rx_channel) {
435		struct device *dev = host->dma_rx_channel->device->dev;
436		unsigned int max_seg_size = dma_get_max_seg_size(dev);
437
438		if (max_seg_size < host->mmc->max_seg_size)
439			host->mmc->max_seg_size = max_seg_size;
440	}
441}
442
443/*
444 * This is used in or so inline it
445 * so it can be discarded.
446 */
447static inline void mmci_dma_release(struct mmci_host *host)
448{
449	struct mmci_platform_data *plat = host->plat;
450
451	if (host->dma_rx_channel)
452		dma_release_channel(host->dma_rx_channel);
453	if (host->dma_tx_channel && plat->dma_tx_param)
454		dma_release_channel(host->dma_tx_channel);
455	host->dma_rx_channel = host->dma_tx_channel = NULL;
456}
457
458static void mmci_dma_data_error(struct mmci_host *host)
459{
460	dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
461	dmaengine_terminate_all(host->dma_current);
462	host->dma_current = NULL;
463	host->dma_desc_current = NULL;
464	host->data->host_cookie = 0;
465}
466
467static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
468{
469	struct dma_chan *chan;
470	enum dma_data_direction dir;
471
472	if (data->flags & MMC_DATA_READ) {
473		dir = DMA_FROM_DEVICE;
474		chan = host->dma_rx_channel;
475	} else {
476		dir = DMA_TO_DEVICE;
477		chan = host->dma_tx_channel;
478	}
479
480	dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
481}
482
483static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
484{
485	u32 status;
486	int i;
487
488	/* Wait up to 1ms for the DMA to complete */
489	for (i = 0; ; i++) {
490		status = readl(host->base + MMCISTATUS);
491		if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
492			break;
493		udelay(10);
494	}
495
496	/*
497	 * Check to see whether we still have some data left in the FIFO -
498	 * this catches DMA controllers which are unable to monitor the
499	 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
500	 * contiguous buffers.  On TX, we'll get a FIFO underrun error.
501	 */
502	if (status & MCI_RXDATAAVLBLMASK) {
503		mmci_dma_data_error(host);
504		if (!data->error)
505			data->error = -EIO;
506	}
507
508	if (!data->host_cookie)
509		mmci_dma_unmap(host, data);
510
511	/*
512	 * Use of DMA with scatter-gather is impossible.
513	 * Give up with DMA and switch back to PIO mode.
514	 */
515	if (status & MCI_RXDATAAVLBLMASK) {
516		dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
517		mmci_dma_release(host);
518	}
519
520	host->dma_current = NULL;
521	host->dma_desc_current = NULL;
522}
523
524/* prepares DMA channel and DMA descriptor, returns non-zero on failure */
525static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
526				struct dma_chan **dma_chan,
527				struct dma_async_tx_descriptor **dma_desc)
528{
529	struct variant_data *variant = host->variant;
530	struct dma_slave_config conf = {
531		.src_addr = host->phybase + MMCIFIFO,
532		.dst_addr = host->phybase + MMCIFIFO,
533		.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
534		.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
535		.src_maxburst = variant->fifohalfsize >> 2, /* # of words */
536		.dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
537		.device_fc = false,
538	};
539	struct dma_chan *chan;
540	struct dma_device *device;
541	struct dma_async_tx_descriptor *desc;
542	enum dma_data_direction buffer_dirn;
543	int nr_sg;
544
545	if (data->flags & MMC_DATA_READ) {
546		conf.direction = DMA_DEV_TO_MEM;
547		buffer_dirn = DMA_FROM_DEVICE;
548		chan = host->dma_rx_channel;
549	} else {
550		conf.direction = DMA_MEM_TO_DEV;
551		buffer_dirn = DMA_TO_DEVICE;
552		chan = host->dma_tx_channel;
553	}
554
555	/* If there's no DMA channel, fall back to PIO */
556	if (!chan)
557		return -EINVAL;
558
559	/* If less than or equal to the fifo size, don't bother with DMA */
560	if (data->blksz * data->blocks <= variant->fifosize)
561		return -EINVAL;
562
563	device = chan->device;
564	nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
565	if (nr_sg == 0)
566		return -EINVAL;
567
568	dmaengine_slave_config(chan, &conf);
569	desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg,
570					    conf.direction, DMA_CTRL_ACK);
571	if (!desc)
572		goto unmap_exit;
573
574	*dma_chan = chan;
575	*dma_desc = desc;
576
577	return 0;
578
579 unmap_exit:
580	dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
581	return -ENOMEM;
582}
583
584static inline int mmci_dma_prep_data(struct mmci_host *host,
585				     struct mmc_data *data)
586{
587	/* Check if next job is already prepared. */
588	if (host->dma_current && host->dma_desc_current)
589		return 0;
590
591	/* No job were prepared thus do it now. */
592	return __mmci_dma_prep_data(host, data, &host->dma_current,
593				    &host->dma_desc_current);
594}
595
596static inline int mmci_dma_prep_next(struct mmci_host *host,
597				     struct mmc_data *data)
598{
599	struct mmci_host_next *nd = &host->next_data;
600	return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc);
601}
602
603static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
604{
605	int ret;
606	struct mmc_data *data = host->data;
607
608	ret = mmci_dma_prep_data(host, host->data);
609	if (ret)
610		return ret;
611
612	/* Okay, go for it. */
613	dev_vdbg(mmc_dev(host->mmc),
614		 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
615		 data->sg_len, data->blksz, data->blocks, data->flags);
616	dmaengine_submit(host->dma_desc_current);
617	dma_async_issue_pending(host->dma_current);
618
619	datactrl |= MCI_DPSM_DMAENABLE;
620
621	/* Trigger the DMA transfer */
622	mmci_write_datactrlreg(host, datactrl);
623
624	/*
625	 * Let the MMCI say when the data is ended and it's time
626	 * to fire next DMA request. When that happens, MMCI will
627	 * call mmci_data_end()
628	 */
629	writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
630	       host->base + MMCIMASK0);
631	return 0;
632}
633
634static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
635{
636	struct mmci_host_next *next = &host->next_data;
637
638	WARN_ON(data->host_cookie && data->host_cookie != next->cookie);
639	WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan));
640
641	host->dma_desc_current = next->dma_desc;
642	host->dma_current = next->dma_chan;
643	next->dma_desc = NULL;
644	next->dma_chan = NULL;
645}
646
647static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
648			     bool is_first_req)
649{
650	struct mmci_host *host = mmc_priv(mmc);
651	struct mmc_data *data = mrq->data;
652	struct mmci_host_next *nd = &host->next_data;
653
654	if (!data)
655		return;
656
657	BUG_ON(data->host_cookie);
658
659	if (mmci_validate_data(host, data))
660		return;
661
662	if (!mmci_dma_prep_next(host, data))
663		data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
664}
665
666static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
667			      int err)
668{
669	struct mmci_host *host = mmc_priv(mmc);
670	struct mmc_data *data = mrq->data;
671
672	if (!data || !data->host_cookie)
673		return;
674
675	mmci_dma_unmap(host, data);
676
677	if (err) {
678		struct mmci_host_next *next = &host->next_data;
679		struct dma_chan *chan;
680		if (data->flags & MMC_DATA_READ)
681			chan = host->dma_rx_channel;
682		else
683			chan = host->dma_tx_channel;
684		dmaengine_terminate_all(chan);
685
686		next->dma_desc = NULL;
687		next->dma_chan = NULL;
688	}
689}
690
691#else
692/* Blank functions if the DMA engine is not available */
693static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
694{
695}
696static inline void mmci_dma_setup(struct mmci_host *host)
697{
698}
699
700static inline void mmci_dma_release(struct mmci_host *host)
701{
702}
703
704static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
705{
706}
707
708static inline void mmci_dma_finalize(struct mmci_host *host,
709				     struct mmc_data *data)
710{
711}
712
713static inline void mmci_dma_data_error(struct mmci_host *host)
714{
715}
716
717static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
718{
719	return -ENOSYS;
720}
721
722#define mmci_pre_request NULL
723#define mmci_post_request NULL
724
725#endif
726
727static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
728{
729	struct variant_data *variant = host->variant;
730	unsigned int datactrl, timeout, irqmask;
731	unsigned long long clks;
732	void __iomem *base;
733	int blksz_bits;
734
735	dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
736		data->blksz, data->blocks, data->flags);
737
738	host->data = data;
739	host->size = data->blksz * data->blocks;
740	data->bytes_xfered = 0;
741
742	clks = (unsigned long long)data->timeout_ns * host->cclk;
743	do_div(clks, 1000000000UL);
744
745	timeout = data->timeout_clks + (unsigned int)clks;
746
747	base = host->base;
748	writel(timeout, base + MMCIDATATIMER);
749	writel(host->size, base + MMCIDATALENGTH);
750
751	blksz_bits = ffs(data->blksz) - 1;
752	BUG_ON(1 << blksz_bits != data->blksz);
753
754	if (variant->blksz_datactrl16)
755		datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
756	else
757		datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
758
759	if (data->flags & MMC_DATA_READ)
760		datactrl |= MCI_DPSM_DIRECTION;
761
762	/* The ST Micro variants has a special bit to enable SDIO */
763	if (variant->sdio && host->mmc->card)
764		if (mmc_card_sdio(host->mmc->card)) {
765			/*
766			 * The ST Micro variants has a special bit
767			 * to enable SDIO.
768			 */
769			u32 clk;
770
771			datactrl |= MCI_ST_DPSM_SDIOEN;
772
773			/*
774			 * The ST Micro variant for SDIO small write transfers
775			 * needs to have clock H/W flow control disabled,
776			 * otherwise the transfer will not start. The threshold
777			 * depends on the rate of MCLK.
778			 */
779			if (data->flags & MMC_DATA_WRITE &&
780			    (host->size < 8 ||
781			     (host->size <= 8 && host->mclk > 50000000)))
782				clk = host->clk_reg & ~variant->clkreg_enable;
783			else
784				clk = host->clk_reg | variant->clkreg_enable;
785
786			mmci_write_clkreg(host, clk);
787		}
788
789	if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50)
790		datactrl |= MCI_ST_DPSM_DDRMODE;
791
792	/*
793	 * Attempt to use DMA operation mode, if this
794	 * should fail, fall back to PIO mode
795	 */
796	if (!mmci_dma_start_data(host, datactrl))
797		return;
798
799	/* IRQ mode, map the SG list for CPU reading/writing */
800	mmci_init_sg(host, data);
801
802	if (data->flags & MMC_DATA_READ) {
803		irqmask = MCI_RXFIFOHALFFULLMASK;
804
805		/*
806		 * If we have less than the fifo 'half-full' threshold to
807		 * transfer, trigger a PIO interrupt as soon as any data
808		 * is available.
809		 */
810		if (host->size < variant->fifohalfsize)
811			irqmask |= MCI_RXDATAAVLBLMASK;
812	} else {
813		/*
814		 * We don't actually need to include "FIFO empty" here
815		 * since its implicit in "FIFO half empty".
816		 */
817		irqmask = MCI_TXFIFOHALFEMPTYMASK;
818	}
819
820	mmci_write_datactrlreg(host, datactrl);
821	writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
822	mmci_set_mask1(host, irqmask);
823}
824
825static void
826mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
827{
828	void __iomem *base = host->base;
829
830	dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
831	    cmd->opcode, cmd->arg, cmd->flags);
832
833	if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
834		writel(0, base + MMCICOMMAND);
835		udelay(1);
836	}
837
838	c |= cmd->opcode | MCI_CPSM_ENABLE;
839	if (cmd->flags & MMC_RSP_PRESENT) {
840		if (cmd->flags & MMC_RSP_136)
841			c |= MCI_CPSM_LONGRSP;
842		c |= MCI_CPSM_RESPONSE;
843	}
844	if (/*interrupt*/0)
845		c |= MCI_CPSM_INTERRUPT;
846
847	host->cmd = cmd;
848
849	writel(cmd->arg, base + MMCIARGUMENT);
850	writel(c, base + MMCICOMMAND);
851}
852
853static void
854mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
855	      unsigned int status)
856{
857	/* First check for errors */
858	if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
859		      MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
860		u32 remain, success;
861
862		/* Terminate the DMA transfer */
863		if (dma_inprogress(host)) {
864			mmci_dma_data_error(host);
865			mmci_dma_unmap(host, data);
866		}
867
868		/*
869		 * Calculate how far we are into the transfer.  Note that
870		 * the data counter gives the number of bytes transferred
871		 * on the MMC bus, not on the host side.  On reads, this
872		 * can be as much as a FIFO-worth of data ahead.  This
873		 * matters for FIFO overruns only.
874		 */
875		remain = readl(host->base + MMCIDATACNT);
876		success = data->blksz * data->blocks - remain;
877
878		dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
879			status, success);
880		if (status & MCI_DATACRCFAIL) {
881			/* Last block was not successful */
882			success -= 1;
883			data->error = -EILSEQ;
884		} else if (status & MCI_DATATIMEOUT) {
885			data->error = -ETIMEDOUT;
886		} else if (status & MCI_STARTBITERR) {
887			data->error = -ECOMM;
888		} else if (status & MCI_TXUNDERRUN) {
889			data->error = -EIO;
890		} else if (status & MCI_RXOVERRUN) {
891			if (success > host->variant->fifosize)
892				success -= host->variant->fifosize;
893			else
894				success = 0;
895			data->error = -EIO;
896		}
897		data->bytes_xfered = round_down(success, data->blksz);
898	}
899
900	if (status & MCI_DATABLOCKEND)
901		dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
902
903	if (status & MCI_DATAEND || data->error) {
904		if (dma_inprogress(host))
905			mmci_dma_finalize(host, data);
906		mmci_stop_data(host);
907
908		if (!data->error)
909			/* The error clause is handled above, success! */
910			data->bytes_xfered = data->blksz * data->blocks;
911
912		if (!data->stop || host->mrq->sbc) {
913			mmci_request_end(host, data->mrq);
914		} else {
915			mmci_start_command(host, data->stop, 0);
916		}
917	}
918}
919
920static void
921mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
922	     unsigned int status)
923{
924	void __iomem *base = host->base;
925	bool sbc = (cmd == host->mrq->sbc);
926	bool busy_resp = host->variant->busy_detect &&
927			(cmd->flags & MMC_RSP_BUSY);
928
929	/* Check if we need to wait for busy completion. */
930	if (host->busy_status && (status & MCI_ST_CARDBUSY))
931		return;
932
933	/* Enable busy completion if needed and supported. */
934	if (!host->busy_status && busy_resp &&
935		!(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
936		(readl(base + MMCISTATUS) & MCI_ST_CARDBUSY)) {
937		writel(readl(base + MMCIMASK0) | MCI_ST_BUSYEND,
938			base + MMCIMASK0);
939		host->busy_status = status & (MCI_CMDSENT|MCI_CMDRESPEND);
940		return;
941	}
942
943	/* At busy completion, mask the IRQ and complete the request. */
944	if (host->busy_status) {
945		writel(readl(base + MMCIMASK0) & ~MCI_ST_BUSYEND,
946			base + MMCIMASK0);
947		host->busy_status = 0;
948	}
949
950	host->cmd = NULL;
951
952	if (status & MCI_CMDTIMEOUT) {
953		cmd->error = -ETIMEDOUT;
954	} else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
955		cmd->error = -EILSEQ;
956	} else {
957		cmd->resp[0] = readl(base + MMCIRESPONSE0);
958		cmd->resp[1] = readl(base + MMCIRESPONSE1);
959		cmd->resp[2] = readl(base + MMCIRESPONSE2);
960		cmd->resp[3] = readl(base + MMCIRESPONSE3);
961	}
962
963	if ((!sbc && !cmd->data) || cmd->error) {
964		if (host->data) {
965			/* Terminate the DMA transfer */
966			if (dma_inprogress(host)) {
967				mmci_dma_data_error(host);
968				mmci_dma_unmap(host, host->data);
969			}
970			mmci_stop_data(host);
971		}
972		mmci_request_end(host, host->mrq);
973	} else if (sbc) {
974		mmci_start_command(host, host->mrq->cmd, 0);
975	} else if (!(cmd->data->flags & MMC_DATA_READ)) {
976		mmci_start_data(host, cmd->data);
977	}
978}
979
980static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
981{
982	void __iomem *base = host->base;
983	char *ptr = buffer;
984	u32 status;
985	int host_remain = host->size;
986
987	do {
988		int count = host_remain - (readl(base + MMCIFIFOCNT) << 2);
989
990		if (count > remain)
991			count = remain;
992
993		if (count <= 0)
994			break;
995
996		/*
997		 * SDIO especially may want to send something that is
998		 * not divisible by 4 (as opposed to card sectors
999		 * etc). Therefore make sure to always read the last bytes
1000		 * while only doing full 32-bit reads towards the FIFO.
1001		 */
1002		if (unlikely(count & 0x3)) {
1003			if (count < 4) {
1004				unsigned char buf[4];
1005				ioread32_rep(base + MMCIFIFO, buf, 1);
1006				memcpy(ptr, buf, count);
1007			} else {
1008				ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
1009				count &= ~0x3;
1010			}
1011		} else {
1012			ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
1013		}
1014
1015		ptr += count;
1016		remain -= count;
1017		host_remain -= count;
1018
1019		if (remain == 0)
1020			break;
1021
1022		status = readl(base + MMCISTATUS);
1023	} while (status & MCI_RXDATAAVLBL);
1024
1025	return ptr - buffer;
1026}
1027
1028static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
1029{
1030	struct variant_data *variant = host->variant;
1031	void __iomem *base = host->base;
1032	char *ptr = buffer;
1033
1034	do {
1035		unsigned int count, maxcnt;
1036
1037		maxcnt = status & MCI_TXFIFOEMPTY ?
1038			 variant->fifosize : variant->fifohalfsize;
1039		count = min(remain, maxcnt);
1040
1041		/*
1042		 * SDIO especially may want to send something that is
1043		 * not divisible by 4 (as opposed to card sectors
1044		 * etc), and the FIFO only accept full 32-bit writes.
1045		 * So compensate by adding +3 on the count, a single
1046		 * byte become a 32bit write, 7 bytes will be two
1047		 * 32bit writes etc.
1048		 */
1049		iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2);
1050
1051		ptr += count;
1052		remain -= count;
1053
1054		if (remain == 0)
1055			break;
1056
1057		status = readl(base + MMCISTATUS);
1058	} while (status & MCI_TXFIFOHALFEMPTY);
1059
1060	return ptr - buffer;
1061}
1062
1063/*
1064 * PIO data transfer IRQ handler.
1065 */
1066static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
1067{
1068	struct mmci_host *host = dev_id;
1069	struct sg_mapping_iter *sg_miter = &host->sg_miter;
1070	struct variant_data *variant = host->variant;
1071	void __iomem *base = host->base;
1072	unsigned long flags;
1073	u32 status;
1074
1075	status = readl(base + MMCISTATUS);
1076
1077	dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
1078
1079	local_irq_save(flags);
1080
1081	do {
1082		unsigned int remain, len;
1083		char *buffer;
1084
1085		/*
1086		 * For write, we only need to test the half-empty flag
1087		 * here - if the FIFO is completely empty, then by
1088		 * definition it is more than half empty.
1089		 *
1090		 * For read, check for data available.
1091		 */
1092		if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
1093			break;
1094
1095		if (!sg_miter_next(sg_miter))
1096			break;
1097
1098		buffer = sg_miter->addr;
1099		remain = sg_miter->length;
1100
1101		len = 0;
1102		if (status & MCI_RXACTIVE)
1103			len = mmci_pio_read(host, buffer, remain);
1104		if (status & MCI_TXACTIVE)
1105			len = mmci_pio_write(host, buffer, remain, status);
1106
1107		sg_miter->consumed = len;
1108
1109		host->size -= len;
1110		remain -= len;
1111
1112		if (remain)
1113			break;
1114
1115		status = readl(base + MMCISTATUS);
1116	} while (1);
1117
1118	sg_miter_stop(sg_miter);
1119
1120	local_irq_restore(flags);
1121
1122	/*
1123	 * If we have less than the fifo 'half-full' threshold to transfer,
1124	 * trigger a PIO interrupt as soon as any data is available.
1125	 */
1126	if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
1127		mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
1128
1129	/*
1130	 * If we run out of data, disable the data IRQs; this
1131	 * prevents a race where the FIFO becomes empty before
1132	 * the chip itself has disabled the data path, and
1133	 * stops us racing with our data end IRQ.
1134	 */
1135	if (host->size == 0) {
1136		mmci_set_mask1(host, 0);
1137		writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
1138	}
1139
1140	return IRQ_HANDLED;
1141}
1142
1143/*
1144 * Handle completion of command and data transfers.
1145 */
1146static irqreturn_t mmci_irq(int irq, void *dev_id)
1147{
1148	struct mmci_host *host = dev_id;
1149	u32 status;
1150	int ret = 0;
1151
1152	spin_lock(&host->lock);
1153
1154	do {
1155		struct mmc_command *cmd;
1156		struct mmc_data *data;
1157
1158		status = readl(host->base + MMCISTATUS);
1159
1160		if (host->singleirq) {
1161			if (status & readl(host->base + MMCIMASK1))
1162				mmci_pio_irq(irq, dev_id);
1163
1164			status &= ~MCI_IRQ1MASK;
1165		}
1166
1167		/*
1168		 * We intentionally clear the MCI_ST_CARDBUSY IRQ here (if it's
1169		 * enabled) since the HW seems to be triggering the IRQ on both
1170		 * edges while monitoring DAT0 for busy completion.
1171		 */
1172		status &= readl(host->base + MMCIMASK0);
1173		writel(status, host->base + MMCICLEAR);
1174
1175		dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
1176
1177		cmd = host->cmd;
1178		if ((status|host->busy_status) & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|
1179			MCI_CMDSENT|MCI_CMDRESPEND) && cmd)
1180			mmci_cmd_irq(host, cmd, status);
1181
1182		data = host->data;
1183		if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
1184			      MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND|
1185			      MCI_DATABLOCKEND) && data)
1186			mmci_data_irq(host, data, status);
1187
1188		/* Don't poll for busy completion in irq context. */
1189		if (host->busy_status)
1190			status &= ~MCI_ST_CARDBUSY;
1191
1192		ret = 1;
1193	} while (status);
1194
1195	spin_unlock(&host->lock);
1196
1197	return IRQ_RETVAL(ret);
1198}
1199
1200static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1201{
1202	struct mmci_host *host = mmc_priv(mmc);
1203	unsigned long flags;
1204
1205	WARN_ON(host->mrq != NULL);
1206
1207	mrq->cmd->error = mmci_validate_data(host, mrq->data);
1208	if (mrq->cmd->error) {
1209		mmc_request_done(mmc, mrq);
1210		return;
1211	}
1212
1213	pm_runtime_get_sync(mmc_dev(mmc));
1214
1215	spin_lock_irqsave(&host->lock, flags);
1216
1217	host->mrq = mrq;
1218
1219	if (mrq->data)
1220		mmci_get_next_data(host, mrq->data);
1221
1222	if (mrq->data && mrq->data->flags & MMC_DATA_READ)
1223		mmci_start_data(host, mrq->data);
1224
1225	if (mrq->sbc)
1226		mmci_start_command(host, mrq->sbc, 0);
1227	else
1228		mmci_start_command(host, mrq->cmd, 0);
1229
1230	spin_unlock_irqrestore(&host->lock, flags);
1231}
1232
1233static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1234{
1235	struct mmci_host *host = mmc_priv(mmc);
1236	struct variant_data *variant = host->variant;
1237	u32 pwr = 0;
1238	unsigned long flags;
1239	int ret;
1240
1241	pm_runtime_get_sync(mmc_dev(mmc));
1242
1243	if (host->plat->ios_handler &&
1244		host->plat->ios_handler(mmc_dev(mmc), ios))
1245			dev_err(mmc_dev(mmc), "platform ios_handler failed\n");
1246
1247	switch (ios->power_mode) {
1248	case MMC_POWER_OFF:
1249		if (!IS_ERR(mmc->supply.vmmc))
1250			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1251
1252		if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
1253			regulator_disable(mmc->supply.vqmmc);
1254			host->vqmmc_enabled = false;
1255		}
1256
1257		break;
1258	case MMC_POWER_UP:
1259		if (!IS_ERR(mmc->supply.vmmc))
1260			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
1261
1262		/*
1263		 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP
1264		 * and instead uses MCI_PWR_ON so apply whatever value is
1265		 * configured in the variant data.
1266		 */
1267		pwr |= variant->pwrreg_powerup;
1268
1269		break;
1270	case MMC_POWER_ON:
1271		if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
1272			ret = regulator_enable(mmc->supply.vqmmc);
1273			if (ret < 0)
1274				dev_err(mmc_dev(mmc),
1275					"failed to enable vqmmc regulator\n");
1276			else
1277				host->vqmmc_enabled = true;
1278		}
1279
1280		pwr |= MCI_PWR_ON;
1281		break;
1282	}
1283
1284	if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) {
1285		/*
1286		 * The ST Micro variant has some additional bits
1287		 * indicating signal direction for the signals in
1288		 * the SD/MMC bus and feedback-clock usage.
1289		 */
1290		pwr |= host->plat->sigdir;
1291
1292		if (ios->bus_width == MMC_BUS_WIDTH_4)
1293			pwr &= ~MCI_ST_DATA74DIREN;
1294		else if (ios->bus_width == MMC_BUS_WIDTH_1)
1295			pwr &= (~MCI_ST_DATA74DIREN &
1296				~MCI_ST_DATA31DIREN &
1297				~MCI_ST_DATA2DIREN);
1298	}
1299
1300	if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
1301		if (host->hw_designer != AMBA_VENDOR_ST)
1302			pwr |= MCI_ROD;
1303		else {
1304			/*
1305			 * The ST Micro variant use the ROD bit for something
1306			 * else and only has OD (Open Drain).
1307			 */
1308			pwr |= MCI_OD;
1309		}
1310	}
1311
1312	/*
1313	 * If clock = 0 and the variant requires the MMCIPOWER to be used for
1314	 * gating the clock, the MCI_PWR_ON bit is cleared.
1315	 */
1316	if (!ios->clock && variant->pwrreg_clkgate)
1317		pwr &= ~MCI_PWR_ON;
1318
1319	spin_lock_irqsave(&host->lock, flags);
1320
1321	mmci_set_clkreg(host, ios->clock);
1322	mmci_write_pwrreg(host, pwr);
1323	mmci_reg_delay(host);
1324
1325	spin_unlock_irqrestore(&host->lock, flags);
1326
1327	pm_runtime_mark_last_busy(mmc_dev(mmc));
1328	pm_runtime_put_autosuspend(mmc_dev(mmc));
1329}
1330
1331static int mmci_get_cd(struct mmc_host *mmc)
1332{
1333	struct mmci_host *host = mmc_priv(mmc);
1334	struct mmci_platform_data *plat = host->plat;
1335	unsigned int status = mmc_gpio_get_cd(mmc);
1336
1337	if (status == -ENOSYS) {
1338		if (!plat->status)
1339			return 1; /* Assume always present */
1340
1341		status = plat->status(mmc_dev(host->mmc));
1342	}
1343	return status;
1344}
1345
1346static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
1347{
1348	int ret = 0;
1349
1350	if (!IS_ERR(mmc->supply.vqmmc)) {
1351
1352		pm_runtime_get_sync(mmc_dev(mmc));
1353
1354		switch (ios->signal_voltage) {
1355		case MMC_SIGNAL_VOLTAGE_330:
1356			ret = regulator_set_voltage(mmc->supply.vqmmc,
1357						2700000, 3600000);
1358			break;
1359		case MMC_SIGNAL_VOLTAGE_180:
1360			ret = regulator_set_voltage(mmc->supply.vqmmc,
1361						1700000, 1950000);
1362			break;
1363		case MMC_SIGNAL_VOLTAGE_120:
1364			ret = regulator_set_voltage(mmc->supply.vqmmc,
1365						1100000, 1300000);
1366			break;
1367		}
1368
1369		if (ret)
1370			dev_warn(mmc_dev(mmc), "Voltage switch failed\n");
1371
1372		pm_runtime_mark_last_busy(mmc_dev(mmc));
1373		pm_runtime_put_autosuspend(mmc_dev(mmc));
1374	}
1375
1376	return ret;
1377}
1378
1379static struct mmc_host_ops mmci_ops = {
1380	.request	= mmci_request,
1381	.pre_req	= mmci_pre_request,
1382	.post_req	= mmci_post_request,
1383	.set_ios	= mmci_set_ios,
1384	.get_ro		= mmc_gpio_get_ro,
1385	.get_cd		= mmci_get_cd,
1386	.start_signal_voltage_switch = mmci_sig_volt_switch,
1387};
1388
1389static void mmci_dt_populate_generic_pdata(struct device_node *np,
1390					struct mmci_platform_data *pdata)
1391{
1392	if (of_get_property(np, "st,sig-dir-dat0", NULL))
1393		pdata->sigdir |= MCI_ST_DATA0DIREN;
1394	if (of_get_property(np, "st,sig-dir-dat2", NULL))
1395		pdata->sigdir |= MCI_ST_DATA2DIREN;
1396	if (of_get_property(np, "st,sig-dir-dat31", NULL))
1397		pdata->sigdir |= MCI_ST_DATA31DIREN;
1398	if (of_get_property(np, "st,sig-dir-dat74", NULL))
1399		pdata->sigdir |= MCI_ST_DATA74DIREN;
1400	if (of_get_property(np, "st,sig-dir-cmd", NULL))
1401		pdata->sigdir |= MCI_ST_CMDDIREN;
1402	if (of_get_property(np, "st,sig-pin-fbclk", NULL))
1403		pdata->sigdir |= MCI_ST_FBCLKEN;
1404}
1405
1406static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc)
1407{
1408	int ret = mmc_of_parse(mmc);
1409
1410	if (ret)
1411		return ret;
1412
1413	if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
1414		mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
1415	if (of_get_property(np, "mmc-cap-sd-highspeed", NULL))
1416		mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1417
1418	return 0;
1419}
1420
1421static int mmci_probe(struct amba_device *dev,
1422	const struct amba_id *id)
1423{
1424	struct mmci_platform_data *plat = dev->dev.platform_data;
1425	struct device_node *np = dev->dev.of_node;
1426	struct variant_data *variant = id->data;
1427	struct mmci_host *host;
1428	struct mmc_host *mmc;
1429	int ret;
1430
1431	/* Must have platform data or Device Tree. */
1432	if (!plat && !np) {
1433		dev_err(&dev->dev, "No plat data or DT found\n");
1434		return -EINVAL;
1435	}
1436
1437	if (!plat) {
1438		plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL);
1439		if (!plat)
1440			return -ENOMEM;
1441	}
1442
1443	if (np)
1444		mmci_dt_populate_generic_pdata(np, plat);
1445
1446	mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
1447	if (!mmc)
1448		return -ENOMEM;
1449
1450	ret = mmci_of_parse(np, mmc);
1451	if (ret)
1452		goto host_free;
1453
1454	host = mmc_priv(mmc);
1455	host->mmc = mmc;
1456
1457	host->hw_designer = amba_manf(dev);
1458	host->hw_revision = amba_rev(dev);
1459	dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
1460	dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
1461
1462	host->clk = devm_clk_get(&dev->dev, NULL);
1463	if (IS_ERR(host->clk)) {
1464		ret = PTR_ERR(host->clk);
1465		goto host_free;
1466	}
1467
1468	ret = clk_prepare_enable(host->clk);
1469	if (ret)
1470		goto host_free;
1471
1472	host->plat = plat;
1473	host->variant = variant;
1474	host->mclk = clk_get_rate(host->clk);
1475	/*
1476	 * According to the spec, mclk is max 100 MHz,
1477	 * so we try to adjust the clock down to this,
1478	 * (if possible).
1479	 */
1480	if (host->mclk > 100000000) {
1481		ret = clk_set_rate(host->clk, 100000000);
1482		if (ret < 0)
1483			goto clk_disable;
1484		host->mclk = clk_get_rate(host->clk);
1485		dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
1486			host->mclk);
1487	}
1488
1489	host->phybase = dev->res.start;
1490	host->base = devm_ioremap_resource(&dev->dev, &dev->res);
1491	if (IS_ERR(host->base)) {
1492		ret = PTR_ERR(host->base);
1493		goto clk_disable;
1494	}
1495
1496	/*
1497	 * The ARM and ST versions of the block have slightly different
1498	 * clock divider equations which means that the minimum divider
1499	 * differs too.
1500	 */
1501	if (variant->st_clkdiv)
1502		mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
1503	else
1504		mmc->f_min = DIV_ROUND_UP(host->mclk, 512);
1505	/*
1506	 * If no maximum operating frequency is supplied, fall back to use
1507	 * the module parameter, which has a (low) default value in case it
1508	 * is not specified. Either value must not exceed the clock rate into
1509	 * the block, of course. Also note that DT takes precedence over
1510	 * platform data.
1511	 */
1512	if (mmc->f_max)
1513		mmc->f_max = min(host->mclk, mmc->f_max);
1514	else if (plat->f_max)
1515		mmc->f_max = min(host->mclk, plat->f_max);
1516	else
1517		mmc->f_max = min(host->mclk, fmax);
1518	dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
1519
1520	/* Get regulators and the supported OCR mask */
1521	mmc_regulator_get_supply(mmc);
1522	if (!mmc->ocr_avail)
1523		mmc->ocr_avail = plat->ocr_mask;
1524	else if (plat->ocr_mask)
1525		dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
1526
1527	/* DT takes precedence over platform data. */
1528	mmc->caps = np ? mmc->caps : plat->capabilities;
1529	mmc->caps2 = np ? mmc->caps2 : plat->capabilities2;
1530	if (!np) {
1531		if (!plat->cd_invert)
1532			mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
1533		mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
1534	}
1535
1536	if (variant->busy_detect) {
1537		mmci_ops.card_busy = mmci_card_busy;
1538		mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
1539		mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
1540		mmc->max_busy_timeout = 0;
1541	}
1542
1543	mmc->ops = &mmci_ops;
1544
1545	/* We support these PM capabilities. */
1546	mmc->pm_caps |= MMC_PM_KEEP_POWER;
1547
1548	/*
1549	 * We can do SGIO
1550	 */
1551	mmc->max_segs = NR_SG;
1552
1553	/*
1554	 * Since only a certain number of bits are valid in the data length
1555	 * register, we must ensure that we don't exceed 2^num-1 bytes in a
1556	 * single request.
1557	 */
1558	mmc->max_req_size = (1 << variant->datalength_bits) - 1;
1559
1560	/*
1561	 * Set the maximum segment size.  Since we aren't doing DMA
1562	 * (yet) we are only limited by the data length register.
1563	 */
1564	mmc->max_seg_size = mmc->max_req_size;
1565
1566	/*
1567	 * Block size can be up to 2048 bytes, but must be a power of two.
1568	 */
1569	mmc->max_blk_size = 1 << 11;
1570
1571	/*
1572	 * Limit the number of blocks transferred so that we don't overflow
1573	 * the maximum request size.
1574	 */
1575	mmc->max_blk_count = mmc->max_req_size >> 11;
1576
1577	spin_lock_init(&host->lock);
1578
1579	writel(0, host->base + MMCIMASK0);
1580	writel(0, host->base + MMCIMASK1);
1581	writel(0xfff, host->base + MMCICLEAR);
1582
1583	/* If DT, cd/wp gpios must be supplied through it. */
1584	if (!np && gpio_is_valid(plat->gpio_cd)) {
1585		ret = mmc_gpio_request_cd(mmc, plat->gpio_cd, 0);
1586		if (ret)
1587			goto clk_disable;
1588	}
1589	if (!np && gpio_is_valid(plat->gpio_wp)) {
1590		ret = mmc_gpio_request_ro(mmc, plat->gpio_wp);
1591		if (ret)
1592			goto clk_disable;
1593	}
1594
1595	ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED,
1596			DRIVER_NAME " (cmd)", host);
1597	if (ret)
1598		goto clk_disable;
1599
1600	if (!dev->irq[1])
1601		host->singleirq = true;
1602	else {
1603		ret = devm_request_irq(&dev->dev, dev->irq[1], mmci_pio_irq,
1604				IRQF_SHARED, DRIVER_NAME " (pio)", host);
1605		if (ret)
1606			goto clk_disable;
1607	}
1608
1609	writel(MCI_IRQENABLE, host->base + MMCIMASK0);
1610
1611	amba_set_drvdata(dev, mmc);
1612
1613	dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
1614		 mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
1615		 amba_rev(dev), (unsigned long long)dev->res.start,
1616		 dev->irq[0], dev->irq[1]);
1617
1618	mmci_dma_setup(host);
1619
1620	pm_runtime_set_autosuspend_delay(&dev->dev, 50);
1621	pm_runtime_use_autosuspend(&dev->dev);
1622	pm_runtime_put(&dev->dev);
1623
1624	mmc_add_host(mmc);
1625
1626	return 0;
1627
1628 clk_disable:
1629	clk_disable_unprepare(host->clk);
1630 host_free:
1631	mmc_free_host(mmc);
1632	return ret;
1633}
1634
1635static int mmci_remove(struct amba_device *dev)
1636{
1637	struct mmc_host *mmc = amba_get_drvdata(dev);
1638
1639	if (mmc) {
1640		struct mmci_host *host = mmc_priv(mmc);
1641
1642		/*
1643		 * Undo pm_runtime_put() in probe.  We use the _sync
1644		 * version here so that we can access the primecell.
1645		 */
1646		pm_runtime_get_sync(&dev->dev);
1647
1648		mmc_remove_host(mmc);
1649
1650		writel(0, host->base + MMCIMASK0);
1651		writel(0, host->base + MMCIMASK1);
1652
1653		writel(0, host->base + MMCICOMMAND);
1654		writel(0, host->base + MMCIDATACTRL);
1655
1656		mmci_dma_release(host);
1657		clk_disable_unprepare(host->clk);
1658		mmc_free_host(mmc);
1659	}
1660
1661	return 0;
1662}
1663
1664#ifdef CONFIG_PM
1665static void mmci_save(struct mmci_host *host)
1666{
1667	unsigned long flags;
1668
1669	spin_lock_irqsave(&host->lock, flags);
1670
1671	writel(0, host->base + MMCIMASK0);
1672	if (host->variant->pwrreg_nopower) {
1673		writel(0, host->base + MMCIDATACTRL);
1674		writel(0, host->base + MMCIPOWER);
1675		writel(0, host->base + MMCICLOCK);
1676	}
1677	mmci_reg_delay(host);
1678
1679	spin_unlock_irqrestore(&host->lock, flags);
1680}
1681
1682static void mmci_restore(struct mmci_host *host)
1683{
1684	unsigned long flags;
1685
1686	spin_lock_irqsave(&host->lock, flags);
1687
1688	if (host->variant->pwrreg_nopower) {
1689		writel(host->clk_reg, host->base + MMCICLOCK);
1690		writel(host->datactrl_reg, host->base + MMCIDATACTRL);
1691		writel(host->pwr_reg, host->base + MMCIPOWER);
1692	}
1693	writel(MCI_IRQENABLE, host->base + MMCIMASK0);
1694	mmci_reg_delay(host);
1695
1696	spin_unlock_irqrestore(&host->lock, flags);
1697}
1698
1699static int mmci_runtime_suspend(struct device *dev)
1700{
1701	struct amba_device *adev = to_amba_device(dev);
1702	struct mmc_host *mmc = amba_get_drvdata(adev);
1703
1704	if (mmc) {
1705		struct mmci_host *host = mmc_priv(mmc);
1706		pinctrl_pm_select_sleep_state(dev);
1707		mmci_save(host);
1708		clk_disable_unprepare(host->clk);
1709	}
1710
1711	return 0;
1712}
1713
1714static int mmci_runtime_resume(struct device *dev)
1715{
1716	struct amba_device *adev = to_amba_device(dev);
1717	struct mmc_host *mmc = amba_get_drvdata(adev);
1718
1719	if (mmc) {
1720		struct mmci_host *host = mmc_priv(mmc);
1721		clk_prepare_enable(host->clk);
1722		mmci_restore(host);
1723		pinctrl_pm_select_default_state(dev);
1724	}
1725
1726	return 0;
1727}
1728#endif
1729
1730static const struct dev_pm_ops mmci_dev_pm_ops = {
1731	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1732				pm_runtime_force_resume)
1733	SET_PM_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
1734};
1735
1736static struct amba_id mmci_ids[] = {
1737	{
1738		.id	= 0x00041180,
1739		.mask	= 0xff0fffff,
1740		.data	= &variant_arm,
1741	},
1742	{
1743		.id	= 0x01041180,
1744		.mask	= 0xff0fffff,
1745		.data	= &variant_arm_extended_fifo,
1746	},
1747	{
1748		.id	= 0x02041180,
1749		.mask	= 0xff0fffff,
1750		.data	= &variant_arm_extended_fifo_hwfc,
1751	},
1752	{
1753		.id	= 0x00041181,
1754		.mask	= 0x000fffff,
1755		.data	= &variant_arm,
1756	},
1757	/* ST Micro variants */
1758	{
1759		.id     = 0x00180180,
1760		.mask   = 0x00ffffff,
1761		.data	= &variant_u300,
1762	},
1763	{
1764		.id     = 0x10180180,
1765		.mask   = 0xf0ffffff,
1766		.data	= &variant_nomadik,
1767	},
1768	{
1769		.id     = 0x00280180,
1770		.mask   = 0x00ffffff,
1771		.data	= &variant_u300,
1772	},
1773	{
1774		.id     = 0x00480180,
1775		.mask   = 0xf0ffffff,
1776		.data	= &variant_ux500,
1777	},
1778	{
1779		.id     = 0x10480180,
1780		.mask   = 0xf0ffffff,
1781		.data	= &variant_ux500v2,
1782	},
1783	{ 0, 0 },
1784};
1785
1786MODULE_DEVICE_TABLE(amba, mmci_ids);
1787
1788static struct amba_driver mmci_driver = {
1789	.drv		= {
1790		.name	= DRIVER_NAME,
1791		.pm	= &mmci_dev_pm_ops,
1792	},
1793	.probe		= mmci_probe,
1794	.remove		= mmci_remove,
1795	.id_table	= mmci_ids,
1796};
1797
1798module_amba_driver(mmci_driver);
1799
1800module_param(fmax, uint, 0444);
1801
1802MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
1803MODULE_LICENSE("GPL");
1804