mmci.c revision d2762090153053bca984ce5f8978953f63390401
1/*
2 *  linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
3 *
4 *  Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 *  Copyright (C) 2010 ST-Ericsson SA
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/init.h>
14#include <linux/ioport.h>
15#include <linux/device.h>
16#include <linux/interrupt.h>
17#include <linux/kernel.h>
18#include <linux/slab.h>
19#include <linux/delay.h>
20#include <linux/err.h>
21#include <linux/highmem.h>
22#include <linux/log2.h>
23#include <linux/mmc/pm.h>
24#include <linux/mmc/host.h>
25#include <linux/mmc/card.h>
26#include <linux/mmc/slot-gpio.h>
27#include <linux/amba/bus.h>
28#include <linux/clk.h>
29#include <linux/scatterlist.h>
30#include <linux/gpio.h>
31#include <linux/of_gpio.h>
32#include <linux/regulator/consumer.h>
33#include <linux/dmaengine.h>
34#include <linux/dma-mapping.h>
35#include <linux/amba/mmci.h>
36#include <linux/pm_runtime.h>
37#include <linux/types.h>
38#include <linux/pinctrl/consumer.h>
39
40#include <asm/div64.h>
41#include <asm/io.h>
42#include <asm/sizes.h>
43
44#include "mmci.h"
45
46#define DRIVER_NAME "mmci-pl18x"
47
48static unsigned int fmax = 515633;
49
50/**
51 * struct variant_data - MMCI variant-specific quirks
52 * @clkreg: default value for MCICLOCK register
53 * @clkreg_enable: enable value for MMCICLOCK register
54 * @datalength_bits: number of bits in the MMCIDATALENGTH register
55 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
56 *	      is asserted (likewise for RX)
57 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
58 *		  is asserted (likewise for RX)
59 * @sdio: variant supports SDIO
60 * @st_clkdiv: true if using a ST-specific clock divider algorithm
61 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
62 * @pwrreg_powerup: power up value for MMCIPOWER register
63 * @signal_direction: input/out direction of bus signals can be indicated
64 * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
65 * @busy_detect: true if busy detection on dat0 is supported
66 * @pwrreg_nopower: bits in MMCIPOWER don't controls ext. power supply
67 */
68struct variant_data {
69	unsigned int		clkreg;
70	unsigned int		clkreg_enable;
71	unsigned int		datalength_bits;
72	unsigned int		fifosize;
73	unsigned int		fifohalfsize;
74	bool			sdio;
75	bool			st_clkdiv;
76	bool			blksz_datactrl16;
77	u32			pwrreg_powerup;
78	bool			signal_direction;
79	bool			pwrreg_clkgate;
80	bool			busy_detect;
81	bool			pwrreg_nopower;
82};
83
84static struct variant_data variant_arm = {
85	.fifosize		= 16 * 4,
86	.fifohalfsize		= 8 * 4,
87	.datalength_bits	= 16,
88	.pwrreg_powerup		= MCI_PWR_UP,
89};
90
91static struct variant_data variant_arm_extended_fifo = {
92	.fifosize		= 128 * 4,
93	.fifohalfsize		= 64 * 4,
94	.datalength_bits	= 16,
95	.pwrreg_powerup		= MCI_PWR_UP,
96};
97
98static struct variant_data variant_arm_extended_fifo_hwfc = {
99	.fifosize		= 128 * 4,
100	.fifohalfsize		= 64 * 4,
101	.clkreg_enable		= MCI_ARM_HWFCEN,
102	.datalength_bits	= 16,
103	.pwrreg_powerup		= MCI_PWR_UP,
104};
105
106static struct variant_data variant_u300 = {
107	.fifosize		= 16 * 4,
108	.fifohalfsize		= 8 * 4,
109	.clkreg_enable		= MCI_ST_U300_HWFCEN,
110	.datalength_bits	= 16,
111	.sdio			= true,
112	.pwrreg_powerup		= MCI_PWR_ON,
113	.signal_direction	= true,
114	.pwrreg_clkgate		= true,
115	.pwrreg_nopower		= true,
116};
117
118static struct variant_data variant_nomadik = {
119	.fifosize		= 16 * 4,
120	.fifohalfsize		= 8 * 4,
121	.clkreg			= MCI_CLK_ENABLE,
122	.datalength_bits	= 24,
123	.sdio			= true,
124	.st_clkdiv		= true,
125	.pwrreg_powerup		= MCI_PWR_ON,
126	.signal_direction	= true,
127	.pwrreg_clkgate		= true,
128	.pwrreg_nopower		= true,
129};
130
131static struct variant_data variant_ux500 = {
132	.fifosize		= 30 * 4,
133	.fifohalfsize		= 8 * 4,
134	.clkreg			= MCI_CLK_ENABLE,
135	.clkreg_enable		= MCI_ST_UX500_HWFCEN,
136	.datalength_bits	= 24,
137	.sdio			= true,
138	.st_clkdiv		= true,
139	.pwrreg_powerup		= MCI_PWR_ON,
140	.signal_direction	= true,
141	.pwrreg_clkgate		= true,
142	.busy_detect		= true,
143	.pwrreg_nopower		= true,
144};
145
146static struct variant_data variant_ux500v2 = {
147	.fifosize		= 30 * 4,
148	.fifohalfsize		= 8 * 4,
149	.clkreg			= MCI_CLK_ENABLE,
150	.clkreg_enable		= MCI_ST_UX500_HWFCEN,
151	.datalength_bits	= 24,
152	.sdio			= true,
153	.st_clkdiv		= true,
154	.blksz_datactrl16	= true,
155	.pwrreg_powerup		= MCI_PWR_ON,
156	.signal_direction	= true,
157	.pwrreg_clkgate		= true,
158	.busy_detect		= true,
159	.pwrreg_nopower		= true,
160};
161
162static int mmci_card_busy(struct mmc_host *mmc)
163{
164	struct mmci_host *host = mmc_priv(mmc);
165	unsigned long flags;
166	int busy = 0;
167
168	pm_runtime_get_sync(mmc_dev(mmc));
169
170	spin_lock_irqsave(&host->lock, flags);
171	if (readl(host->base + MMCISTATUS) & MCI_ST_CARDBUSY)
172		busy = 1;
173	spin_unlock_irqrestore(&host->lock, flags);
174
175	pm_runtime_mark_last_busy(mmc_dev(mmc));
176	pm_runtime_put_autosuspend(mmc_dev(mmc));
177
178	return busy;
179}
180
181/*
182 * Validate mmc prerequisites
183 */
184static int mmci_validate_data(struct mmci_host *host,
185			      struct mmc_data *data)
186{
187	if (!data)
188		return 0;
189
190	if (!is_power_of_2(data->blksz)) {
191		dev_err(mmc_dev(host->mmc),
192			"unsupported block size (%d bytes)\n", data->blksz);
193		return -EINVAL;
194	}
195
196	return 0;
197}
198
199static void mmci_reg_delay(struct mmci_host *host)
200{
201	/*
202	 * According to the spec, at least three feedback clock cycles
203	 * of max 52 MHz must pass between two writes to the MMCICLOCK reg.
204	 * Three MCLK clock cycles must pass between two MMCIPOWER reg writes.
205	 * Worst delay time during card init is at 100 kHz => 30 us.
206	 * Worst delay time when up and running is at 25 MHz => 120 ns.
207	 */
208	if (host->cclk < 25000000)
209		udelay(30);
210	else
211		ndelay(120);
212}
213
214/*
215 * This must be called with host->lock held
216 */
217static void mmci_write_clkreg(struct mmci_host *host, u32 clk)
218{
219	if (host->clk_reg != clk) {
220		host->clk_reg = clk;
221		writel(clk, host->base + MMCICLOCK);
222	}
223}
224
225/*
226 * This must be called with host->lock held
227 */
228static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
229{
230	if (host->pwr_reg != pwr) {
231		host->pwr_reg = pwr;
232		writel(pwr, host->base + MMCIPOWER);
233	}
234}
235
236/*
237 * This must be called with host->lock held
238 */
239static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl)
240{
241	/* Keep ST Micro busy mode if enabled */
242	datactrl |= host->datactrl_reg & MCI_ST_DPSM_BUSYMODE;
243
244	if (host->datactrl_reg != datactrl) {
245		host->datactrl_reg = datactrl;
246		writel(datactrl, host->base + MMCIDATACTRL);
247	}
248}
249
250/*
251 * This must be called with host->lock held
252 */
253static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
254{
255	struct variant_data *variant = host->variant;
256	u32 clk = variant->clkreg;
257
258	/* Make sure cclk reflects the current calculated clock */
259	host->cclk = 0;
260
261	if (desired) {
262		if (desired >= host->mclk) {
263			clk = MCI_CLK_BYPASS;
264			if (variant->st_clkdiv)
265				clk |= MCI_ST_UX500_NEG_EDGE;
266			host->cclk = host->mclk;
267		} else if (variant->st_clkdiv) {
268			/*
269			 * DB8500 TRM says f = mclk / (clkdiv + 2)
270			 * => clkdiv = (mclk / f) - 2
271			 * Round the divider up so we don't exceed the max
272			 * frequency
273			 */
274			clk = DIV_ROUND_UP(host->mclk, desired) - 2;
275			if (clk >= 256)
276				clk = 255;
277			host->cclk = host->mclk / (clk + 2);
278		} else {
279			/*
280			 * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
281			 * => clkdiv = mclk / (2 * f) - 1
282			 */
283			clk = host->mclk / (2 * desired) - 1;
284			if (clk >= 256)
285				clk = 255;
286			host->cclk = host->mclk / (2 * (clk + 1));
287		}
288
289		clk |= variant->clkreg_enable;
290		clk |= MCI_CLK_ENABLE;
291		/* This hasn't proven to be worthwhile */
292		/* clk |= MCI_CLK_PWRSAVE; */
293	}
294
295	/* Set actual clock for debug */
296	host->mmc->actual_clock = host->cclk;
297
298	if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
299		clk |= MCI_4BIT_BUS;
300	if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
301		clk |= MCI_ST_8BIT_BUS;
302
303	if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50)
304		clk |= MCI_ST_UX500_NEG_EDGE;
305
306	mmci_write_clkreg(host, clk);
307}
308
309static void
310mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
311{
312	writel(0, host->base + MMCICOMMAND);
313
314	BUG_ON(host->data);
315
316	host->mrq = NULL;
317	host->cmd = NULL;
318
319	mmc_request_done(host->mmc, mrq);
320
321	pm_runtime_mark_last_busy(mmc_dev(host->mmc));
322	pm_runtime_put_autosuspend(mmc_dev(host->mmc));
323}
324
325static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
326{
327	void __iomem *base = host->base;
328
329	if (host->singleirq) {
330		unsigned int mask0 = readl(base + MMCIMASK0);
331
332		mask0 &= ~MCI_IRQ1MASK;
333		mask0 |= mask;
334
335		writel(mask0, base + MMCIMASK0);
336	}
337
338	writel(mask, base + MMCIMASK1);
339}
340
341static void mmci_stop_data(struct mmci_host *host)
342{
343	mmci_write_datactrlreg(host, 0);
344	mmci_set_mask1(host, 0);
345	host->data = NULL;
346}
347
348static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
349{
350	unsigned int flags = SG_MITER_ATOMIC;
351
352	if (data->flags & MMC_DATA_READ)
353		flags |= SG_MITER_TO_SG;
354	else
355		flags |= SG_MITER_FROM_SG;
356
357	sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
358}
359
360/*
361 * All the DMA operation mode stuff goes inside this ifdef.
362 * This assumes that you have a generic DMA device interface,
363 * no custom DMA interfaces are supported.
364 */
365#ifdef CONFIG_DMA_ENGINE
366static void mmci_dma_setup(struct mmci_host *host)
367{
368	struct mmci_platform_data *plat = host->plat;
369	const char *rxname, *txname;
370	dma_cap_mask_t mask;
371
372	host->dma_rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "rx");
373	host->dma_tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "tx");
374
375	/* initialize pre request cookie */
376	host->next_data.cookie = 1;
377
378	/* Try to acquire a generic DMA engine slave channel */
379	dma_cap_zero(mask);
380	dma_cap_set(DMA_SLAVE, mask);
381
382	if (plat && plat->dma_filter) {
383		if (!host->dma_rx_channel && plat->dma_rx_param) {
384			host->dma_rx_channel = dma_request_channel(mask,
385							   plat->dma_filter,
386							   plat->dma_rx_param);
387			/* E.g if no DMA hardware is present */
388			if (!host->dma_rx_channel)
389				dev_err(mmc_dev(host->mmc), "no RX DMA channel\n");
390		}
391
392		if (!host->dma_tx_channel && plat->dma_tx_param) {
393			host->dma_tx_channel = dma_request_channel(mask,
394							   plat->dma_filter,
395							   plat->dma_tx_param);
396			if (!host->dma_tx_channel)
397				dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n");
398		}
399	}
400
401	/*
402	 * If only an RX channel is specified, the driver will
403	 * attempt to use it bidirectionally, however if it is
404	 * is specified but cannot be located, DMA will be disabled.
405	 */
406	if (host->dma_rx_channel && !host->dma_tx_channel)
407		host->dma_tx_channel = host->dma_rx_channel;
408
409	if (host->dma_rx_channel)
410		rxname = dma_chan_name(host->dma_rx_channel);
411	else
412		rxname = "none";
413
414	if (host->dma_tx_channel)
415		txname = dma_chan_name(host->dma_tx_channel);
416	else
417		txname = "none";
418
419	dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
420		 rxname, txname);
421
422	/*
423	 * Limit the maximum segment size in any SG entry according to
424	 * the parameters of the DMA engine device.
425	 */
426	if (host->dma_tx_channel) {
427		struct device *dev = host->dma_tx_channel->device->dev;
428		unsigned int max_seg_size = dma_get_max_seg_size(dev);
429
430		if (max_seg_size < host->mmc->max_seg_size)
431			host->mmc->max_seg_size = max_seg_size;
432	}
433	if (host->dma_rx_channel) {
434		struct device *dev = host->dma_rx_channel->device->dev;
435		unsigned int max_seg_size = dma_get_max_seg_size(dev);
436
437		if (max_seg_size < host->mmc->max_seg_size)
438			host->mmc->max_seg_size = max_seg_size;
439	}
440}
441
442/*
443 * This is used in or so inline it
444 * so it can be discarded.
445 */
446static inline void mmci_dma_release(struct mmci_host *host)
447{
448	struct mmci_platform_data *plat = host->plat;
449
450	if (host->dma_rx_channel)
451		dma_release_channel(host->dma_rx_channel);
452	if (host->dma_tx_channel && plat->dma_tx_param)
453		dma_release_channel(host->dma_tx_channel);
454	host->dma_rx_channel = host->dma_tx_channel = NULL;
455}
456
457static void mmci_dma_data_error(struct mmci_host *host)
458{
459	dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
460	dmaengine_terminate_all(host->dma_current);
461	host->dma_current = NULL;
462	host->dma_desc_current = NULL;
463	host->data->host_cookie = 0;
464}
465
466static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
467{
468	struct dma_chan *chan;
469	enum dma_data_direction dir;
470
471	if (data->flags & MMC_DATA_READ) {
472		dir = DMA_FROM_DEVICE;
473		chan = host->dma_rx_channel;
474	} else {
475		dir = DMA_TO_DEVICE;
476		chan = host->dma_tx_channel;
477	}
478
479	dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
480}
481
482static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
483{
484	u32 status;
485	int i;
486
487	/* Wait up to 1ms for the DMA to complete */
488	for (i = 0; ; i++) {
489		status = readl(host->base + MMCISTATUS);
490		if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
491			break;
492		udelay(10);
493	}
494
495	/*
496	 * Check to see whether we still have some data left in the FIFO -
497	 * this catches DMA controllers which are unable to monitor the
498	 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
499	 * contiguous buffers.  On TX, we'll get a FIFO underrun error.
500	 */
501	if (status & MCI_RXDATAAVLBLMASK) {
502		mmci_dma_data_error(host);
503		if (!data->error)
504			data->error = -EIO;
505	}
506
507	if (!data->host_cookie)
508		mmci_dma_unmap(host, data);
509
510	/*
511	 * Use of DMA with scatter-gather is impossible.
512	 * Give up with DMA and switch back to PIO mode.
513	 */
514	if (status & MCI_RXDATAAVLBLMASK) {
515		dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
516		mmci_dma_release(host);
517	}
518
519	host->dma_current = NULL;
520	host->dma_desc_current = NULL;
521}
522
523/* prepares DMA channel and DMA descriptor, returns non-zero on failure */
524static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
525				struct dma_chan **dma_chan,
526				struct dma_async_tx_descriptor **dma_desc)
527{
528	struct variant_data *variant = host->variant;
529	struct dma_slave_config conf = {
530		.src_addr = host->phybase + MMCIFIFO,
531		.dst_addr = host->phybase + MMCIFIFO,
532		.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
533		.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
534		.src_maxburst = variant->fifohalfsize >> 2, /* # of words */
535		.dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
536		.device_fc = false,
537	};
538	struct dma_chan *chan;
539	struct dma_device *device;
540	struct dma_async_tx_descriptor *desc;
541	enum dma_data_direction buffer_dirn;
542	int nr_sg;
543
544	if (data->flags & MMC_DATA_READ) {
545		conf.direction = DMA_DEV_TO_MEM;
546		buffer_dirn = DMA_FROM_DEVICE;
547		chan = host->dma_rx_channel;
548	} else {
549		conf.direction = DMA_MEM_TO_DEV;
550		buffer_dirn = DMA_TO_DEVICE;
551		chan = host->dma_tx_channel;
552	}
553
554	/* If there's no DMA channel, fall back to PIO */
555	if (!chan)
556		return -EINVAL;
557
558	/* If less than or equal to the fifo size, don't bother with DMA */
559	if (data->blksz * data->blocks <= variant->fifosize)
560		return -EINVAL;
561
562	device = chan->device;
563	nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
564	if (nr_sg == 0)
565		return -EINVAL;
566
567	dmaengine_slave_config(chan, &conf);
568	desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg,
569					    conf.direction, DMA_CTRL_ACK);
570	if (!desc)
571		goto unmap_exit;
572
573	*dma_chan = chan;
574	*dma_desc = desc;
575
576	return 0;
577
578 unmap_exit:
579	dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
580	return -ENOMEM;
581}
582
583static inline int mmci_dma_prep_data(struct mmci_host *host,
584				     struct mmc_data *data)
585{
586	/* Check if next job is already prepared. */
587	if (host->dma_current && host->dma_desc_current)
588		return 0;
589
590	/* No job were prepared thus do it now. */
591	return __mmci_dma_prep_data(host, data, &host->dma_current,
592				    &host->dma_desc_current);
593}
594
595static inline int mmci_dma_prep_next(struct mmci_host *host,
596				     struct mmc_data *data)
597{
598	struct mmci_host_next *nd = &host->next_data;
599	return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc);
600}
601
602static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
603{
604	int ret;
605	struct mmc_data *data = host->data;
606
607	ret = mmci_dma_prep_data(host, host->data);
608	if (ret)
609		return ret;
610
611	/* Okay, go for it. */
612	dev_vdbg(mmc_dev(host->mmc),
613		 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
614		 data->sg_len, data->blksz, data->blocks, data->flags);
615	dmaengine_submit(host->dma_desc_current);
616	dma_async_issue_pending(host->dma_current);
617
618	datactrl |= MCI_DPSM_DMAENABLE;
619
620	/* Trigger the DMA transfer */
621	mmci_write_datactrlreg(host, datactrl);
622
623	/*
624	 * Let the MMCI say when the data is ended and it's time
625	 * to fire next DMA request. When that happens, MMCI will
626	 * call mmci_data_end()
627	 */
628	writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
629	       host->base + MMCIMASK0);
630	return 0;
631}
632
633static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
634{
635	struct mmci_host_next *next = &host->next_data;
636
637	WARN_ON(data->host_cookie && data->host_cookie != next->cookie);
638	WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan));
639
640	host->dma_desc_current = next->dma_desc;
641	host->dma_current = next->dma_chan;
642	next->dma_desc = NULL;
643	next->dma_chan = NULL;
644}
645
646static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
647			     bool is_first_req)
648{
649	struct mmci_host *host = mmc_priv(mmc);
650	struct mmc_data *data = mrq->data;
651	struct mmci_host_next *nd = &host->next_data;
652
653	if (!data)
654		return;
655
656	BUG_ON(data->host_cookie);
657
658	if (mmci_validate_data(host, data))
659		return;
660
661	if (!mmci_dma_prep_next(host, data))
662		data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
663}
664
665static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
666			      int err)
667{
668	struct mmci_host *host = mmc_priv(mmc);
669	struct mmc_data *data = mrq->data;
670
671	if (!data || !data->host_cookie)
672		return;
673
674	mmci_dma_unmap(host, data);
675
676	if (err) {
677		struct mmci_host_next *next = &host->next_data;
678		struct dma_chan *chan;
679		if (data->flags & MMC_DATA_READ)
680			chan = host->dma_rx_channel;
681		else
682			chan = host->dma_tx_channel;
683		dmaengine_terminate_all(chan);
684
685		next->dma_desc = NULL;
686		next->dma_chan = NULL;
687	}
688}
689
690#else
691/* Blank functions if the DMA engine is not available */
692static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
693{
694}
695static inline void mmci_dma_setup(struct mmci_host *host)
696{
697}
698
699static inline void mmci_dma_release(struct mmci_host *host)
700{
701}
702
703static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
704{
705}
706
707static inline void mmci_dma_finalize(struct mmci_host *host,
708				     struct mmc_data *data)
709{
710}
711
712static inline void mmci_dma_data_error(struct mmci_host *host)
713{
714}
715
716static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
717{
718	return -ENOSYS;
719}
720
721#define mmci_pre_request NULL
722#define mmci_post_request NULL
723
724#endif
725
726static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
727{
728	struct variant_data *variant = host->variant;
729	unsigned int datactrl, timeout, irqmask;
730	unsigned long long clks;
731	void __iomem *base;
732	int blksz_bits;
733
734	dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
735		data->blksz, data->blocks, data->flags);
736
737	host->data = data;
738	host->size = data->blksz * data->blocks;
739	data->bytes_xfered = 0;
740
741	clks = (unsigned long long)data->timeout_ns * host->cclk;
742	do_div(clks, 1000000000UL);
743
744	timeout = data->timeout_clks + (unsigned int)clks;
745
746	base = host->base;
747	writel(timeout, base + MMCIDATATIMER);
748	writel(host->size, base + MMCIDATALENGTH);
749
750	blksz_bits = ffs(data->blksz) - 1;
751	BUG_ON(1 << blksz_bits != data->blksz);
752
753	if (variant->blksz_datactrl16)
754		datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
755	else
756		datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
757
758	if (data->flags & MMC_DATA_READ)
759		datactrl |= MCI_DPSM_DIRECTION;
760
761	/* The ST Micro variants has a special bit to enable SDIO */
762	if (variant->sdio && host->mmc->card)
763		if (mmc_card_sdio(host->mmc->card)) {
764			/*
765			 * The ST Micro variants has a special bit
766			 * to enable SDIO.
767			 */
768			u32 clk;
769
770			datactrl |= MCI_ST_DPSM_SDIOEN;
771
772			/*
773			 * The ST Micro variant for SDIO small write transfers
774			 * needs to have clock H/W flow control disabled,
775			 * otherwise the transfer will not start. The threshold
776			 * depends on the rate of MCLK.
777			 */
778			if (data->flags & MMC_DATA_WRITE &&
779			    (host->size < 8 ||
780			     (host->size <= 8 && host->mclk > 50000000)))
781				clk = host->clk_reg & ~variant->clkreg_enable;
782			else
783				clk = host->clk_reg | variant->clkreg_enable;
784
785			mmci_write_clkreg(host, clk);
786		}
787
788	if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50)
789		datactrl |= MCI_ST_DPSM_DDRMODE;
790
791	/*
792	 * Attempt to use DMA operation mode, if this
793	 * should fail, fall back to PIO mode
794	 */
795	if (!mmci_dma_start_data(host, datactrl))
796		return;
797
798	/* IRQ mode, map the SG list for CPU reading/writing */
799	mmci_init_sg(host, data);
800
801	if (data->flags & MMC_DATA_READ) {
802		irqmask = MCI_RXFIFOHALFFULLMASK;
803
804		/*
805		 * If we have less than the fifo 'half-full' threshold to
806		 * transfer, trigger a PIO interrupt as soon as any data
807		 * is available.
808		 */
809		if (host->size < variant->fifohalfsize)
810			irqmask |= MCI_RXDATAAVLBLMASK;
811	} else {
812		/*
813		 * We don't actually need to include "FIFO empty" here
814		 * since its implicit in "FIFO half empty".
815		 */
816		irqmask = MCI_TXFIFOHALFEMPTYMASK;
817	}
818
819	mmci_write_datactrlreg(host, datactrl);
820	writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
821	mmci_set_mask1(host, irqmask);
822}
823
824static void
825mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
826{
827	void __iomem *base = host->base;
828
829	dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
830	    cmd->opcode, cmd->arg, cmd->flags);
831
832	if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
833		writel(0, base + MMCICOMMAND);
834		udelay(1);
835	}
836
837	c |= cmd->opcode | MCI_CPSM_ENABLE;
838	if (cmd->flags & MMC_RSP_PRESENT) {
839		if (cmd->flags & MMC_RSP_136)
840			c |= MCI_CPSM_LONGRSP;
841		c |= MCI_CPSM_RESPONSE;
842	}
843	if (/*interrupt*/0)
844		c |= MCI_CPSM_INTERRUPT;
845
846	host->cmd = cmd;
847
848	writel(cmd->arg, base + MMCIARGUMENT);
849	writel(c, base + MMCICOMMAND);
850}
851
852static void
853mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
854	      unsigned int status)
855{
856	/* First check for errors */
857	if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
858		      MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
859		u32 remain, success;
860
861		/* Terminate the DMA transfer */
862		if (dma_inprogress(host)) {
863			mmci_dma_data_error(host);
864			mmci_dma_unmap(host, data);
865		}
866
867		/*
868		 * Calculate how far we are into the transfer.  Note that
869		 * the data counter gives the number of bytes transferred
870		 * on the MMC bus, not on the host side.  On reads, this
871		 * can be as much as a FIFO-worth of data ahead.  This
872		 * matters for FIFO overruns only.
873		 */
874		remain = readl(host->base + MMCIDATACNT);
875		success = data->blksz * data->blocks - remain;
876
877		dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
878			status, success);
879		if (status & MCI_DATACRCFAIL) {
880			/* Last block was not successful */
881			success -= 1;
882			data->error = -EILSEQ;
883		} else if (status & MCI_DATATIMEOUT) {
884			data->error = -ETIMEDOUT;
885		} else if (status & MCI_STARTBITERR) {
886			data->error = -ECOMM;
887		} else if (status & MCI_TXUNDERRUN) {
888			data->error = -EIO;
889		} else if (status & MCI_RXOVERRUN) {
890			if (success > host->variant->fifosize)
891				success -= host->variant->fifosize;
892			else
893				success = 0;
894			data->error = -EIO;
895		}
896		data->bytes_xfered = round_down(success, data->blksz);
897	}
898
899	if (status & MCI_DATABLOCKEND)
900		dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
901
902	if (status & MCI_DATAEND || data->error) {
903		if (dma_inprogress(host))
904			mmci_dma_finalize(host, data);
905		mmci_stop_data(host);
906
907		if (!data->error)
908			/* The error clause is handled above, success! */
909			data->bytes_xfered = data->blksz * data->blocks;
910
911		if (!data->stop || host->mrq->sbc) {
912			mmci_request_end(host, data->mrq);
913		} else {
914			mmci_start_command(host, data->stop, 0);
915		}
916	}
917}
918
919static void
920mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
921	     unsigned int status)
922{
923	void __iomem *base = host->base;
924	bool sbc = (cmd == host->mrq->sbc);
925	bool busy_resp = host->variant->busy_detect &&
926			(cmd->flags & MMC_RSP_BUSY);
927
928	/* Check if we need to wait for busy completion. */
929	if (host->busy_status && (status & MCI_ST_CARDBUSY))
930		return;
931
932	/* Enable busy completion if needed and supported. */
933	if (!host->busy_status && busy_resp &&
934		!(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
935		(readl(base + MMCISTATUS) & MCI_ST_CARDBUSY)) {
936		writel(readl(base + MMCIMASK0) | MCI_ST_BUSYEND,
937			base + MMCIMASK0);
938		host->busy_status = status & (MCI_CMDSENT|MCI_CMDRESPEND);
939		return;
940	}
941
942	/* At busy completion, mask the IRQ and complete the request. */
943	if (host->busy_status) {
944		writel(readl(base + MMCIMASK0) & ~MCI_ST_BUSYEND,
945			base + MMCIMASK0);
946		host->busy_status = 0;
947	}
948
949	host->cmd = NULL;
950
951	if (status & MCI_CMDTIMEOUT) {
952		cmd->error = -ETIMEDOUT;
953	} else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
954		cmd->error = -EILSEQ;
955	} else {
956		cmd->resp[0] = readl(base + MMCIRESPONSE0);
957		cmd->resp[1] = readl(base + MMCIRESPONSE1);
958		cmd->resp[2] = readl(base + MMCIRESPONSE2);
959		cmd->resp[3] = readl(base + MMCIRESPONSE3);
960	}
961
962	if ((!sbc && !cmd->data) || cmd->error) {
963		if (host->data) {
964			/* Terminate the DMA transfer */
965			if (dma_inprogress(host)) {
966				mmci_dma_data_error(host);
967				mmci_dma_unmap(host, host->data);
968			}
969			mmci_stop_data(host);
970		}
971		mmci_request_end(host, host->mrq);
972	} else if (sbc) {
973		mmci_start_command(host, host->mrq->cmd, 0);
974	} else if (!(cmd->data->flags & MMC_DATA_READ)) {
975		mmci_start_data(host, cmd->data);
976	}
977}
978
979static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
980{
981	void __iomem *base = host->base;
982	char *ptr = buffer;
983	u32 status;
984	int host_remain = host->size;
985
986	do {
987		int count = host_remain - (readl(base + MMCIFIFOCNT) << 2);
988
989		if (count > remain)
990			count = remain;
991
992		if (count <= 0)
993			break;
994
995		/*
996		 * SDIO especially may want to send something that is
997		 * not divisible by 4 (as opposed to card sectors
998		 * etc). Therefore make sure to always read the last bytes
999		 * while only doing full 32-bit reads towards the FIFO.
1000		 */
1001		if (unlikely(count & 0x3)) {
1002			if (count < 4) {
1003				unsigned char buf[4];
1004				ioread32_rep(base + MMCIFIFO, buf, 1);
1005				memcpy(ptr, buf, count);
1006			} else {
1007				ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
1008				count &= ~0x3;
1009			}
1010		} else {
1011			ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
1012		}
1013
1014		ptr += count;
1015		remain -= count;
1016		host_remain -= count;
1017
1018		if (remain == 0)
1019			break;
1020
1021		status = readl(base + MMCISTATUS);
1022	} while (status & MCI_RXDATAAVLBL);
1023
1024	return ptr - buffer;
1025}
1026
1027static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
1028{
1029	struct variant_data *variant = host->variant;
1030	void __iomem *base = host->base;
1031	char *ptr = buffer;
1032
1033	do {
1034		unsigned int count, maxcnt;
1035
1036		maxcnt = status & MCI_TXFIFOEMPTY ?
1037			 variant->fifosize : variant->fifohalfsize;
1038		count = min(remain, maxcnt);
1039
1040		/*
1041		 * SDIO especially may want to send something that is
1042		 * not divisible by 4 (as opposed to card sectors
1043		 * etc), and the FIFO only accept full 32-bit writes.
1044		 * So compensate by adding +3 on the count, a single
1045		 * byte become a 32bit write, 7 bytes will be two
1046		 * 32bit writes etc.
1047		 */
1048		iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2);
1049
1050		ptr += count;
1051		remain -= count;
1052
1053		if (remain == 0)
1054			break;
1055
1056		status = readl(base + MMCISTATUS);
1057	} while (status & MCI_TXFIFOHALFEMPTY);
1058
1059	return ptr - buffer;
1060}
1061
1062/*
1063 * PIO data transfer IRQ handler.
1064 */
1065static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
1066{
1067	struct mmci_host *host = dev_id;
1068	struct sg_mapping_iter *sg_miter = &host->sg_miter;
1069	struct variant_data *variant = host->variant;
1070	void __iomem *base = host->base;
1071	unsigned long flags;
1072	u32 status;
1073
1074	status = readl(base + MMCISTATUS);
1075
1076	dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
1077
1078	local_irq_save(flags);
1079
1080	do {
1081		unsigned int remain, len;
1082		char *buffer;
1083
1084		/*
1085		 * For write, we only need to test the half-empty flag
1086		 * here - if the FIFO is completely empty, then by
1087		 * definition it is more than half empty.
1088		 *
1089		 * For read, check for data available.
1090		 */
1091		if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
1092			break;
1093
1094		if (!sg_miter_next(sg_miter))
1095			break;
1096
1097		buffer = sg_miter->addr;
1098		remain = sg_miter->length;
1099
1100		len = 0;
1101		if (status & MCI_RXACTIVE)
1102			len = mmci_pio_read(host, buffer, remain);
1103		if (status & MCI_TXACTIVE)
1104			len = mmci_pio_write(host, buffer, remain, status);
1105
1106		sg_miter->consumed = len;
1107
1108		host->size -= len;
1109		remain -= len;
1110
1111		if (remain)
1112			break;
1113
1114		status = readl(base + MMCISTATUS);
1115	} while (1);
1116
1117	sg_miter_stop(sg_miter);
1118
1119	local_irq_restore(flags);
1120
1121	/*
1122	 * If we have less than the fifo 'half-full' threshold to transfer,
1123	 * trigger a PIO interrupt as soon as any data is available.
1124	 */
1125	if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
1126		mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
1127
1128	/*
1129	 * If we run out of data, disable the data IRQs; this
1130	 * prevents a race where the FIFO becomes empty before
1131	 * the chip itself has disabled the data path, and
1132	 * stops us racing with our data end IRQ.
1133	 */
1134	if (host->size == 0) {
1135		mmci_set_mask1(host, 0);
1136		writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
1137	}
1138
1139	return IRQ_HANDLED;
1140}
1141
1142/*
1143 * Handle completion of command and data transfers.
1144 */
1145static irqreturn_t mmci_irq(int irq, void *dev_id)
1146{
1147	struct mmci_host *host = dev_id;
1148	u32 status;
1149	int ret = 0;
1150
1151	spin_lock(&host->lock);
1152
1153	do {
1154		struct mmc_command *cmd;
1155		struct mmc_data *data;
1156
1157		status = readl(host->base + MMCISTATUS);
1158
1159		if (host->singleirq) {
1160			if (status & readl(host->base + MMCIMASK1))
1161				mmci_pio_irq(irq, dev_id);
1162
1163			status &= ~MCI_IRQ1MASK;
1164		}
1165
1166		/*
1167		 * We intentionally clear the MCI_ST_CARDBUSY IRQ here (if it's
1168		 * enabled) since the HW seems to be triggering the IRQ on both
1169		 * edges while monitoring DAT0 for busy completion.
1170		 */
1171		status &= readl(host->base + MMCIMASK0);
1172		writel(status, host->base + MMCICLEAR);
1173
1174		dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
1175
1176		cmd = host->cmd;
1177		if ((status|host->busy_status) & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|
1178			MCI_CMDSENT|MCI_CMDRESPEND) && cmd)
1179			mmci_cmd_irq(host, cmd, status);
1180
1181		data = host->data;
1182		if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
1183			      MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND|
1184			      MCI_DATABLOCKEND) && data)
1185			mmci_data_irq(host, data, status);
1186
1187		/* Don't poll for busy completion in irq context. */
1188		if (host->busy_status)
1189			status &= ~MCI_ST_CARDBUSY;
1190
1191		ret = 1;
1192	} while (status);
1193
1194	spin_unlock(&host->lock);
1195
1196	return IRQ_RETVAL(ret);
1197}
1198
1199static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1200{
1201	struct mmci_host *host = mmc_priv(mmc);
1202	unsigned long flags;
1203
1204	WARN_ON(host->mrq != NULL);
1205
1206	mrq->cmd->error = mmci_validate_data(host, mrq->data);
1207	if (mrq->cmd->error) {
1208		mmc_request_done(mmc, mrq);
1209		return;
1210	}
1211
1212	pm_runtime_get_sync(mmc_dev(mmc));
1213
1214	spin_lock_irqsave(&host->lock, flags);
1215
1216	host->mrq = mrq;
1217
1218	if (mrq->data)
1219		mmci_get_next_data(host, mrq->data);
1220
1221	if (mrq->data && mrq->data->flags & MMC_DATA_READ)
1222		mmci_start_data(host, mrq->data);
1223
1224	if (mrq->sbc)
1225		mmci_start_command(host, mrq->sbc, 0);
1226	else
1227		mmci_start_command(host, mrq->cmd, 0);
1228
1229	spin_unlock_irqrestore(&host->lock, flags);
1230}
1231
1232static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1233{
1234	struct mmci_host *host = mmc_priv(mmc);
1235	struct variant_data *variant = host->variant;
1236	u32 pwr = 0;
1237	unsigned long flags;
1238	int ret;
1239
1240	pm_runtime_get_sync(mmc_dev(mmc));
1241
1242	if (host->plat->ios_handler &&
1243		host->plat->ios_handler(mmc_dev(mmc), ios))
1244			dev_err(mmc_dev(mmc), "platform ios_handler failed\n");
1245
1246	switch (ios->power_mode) {
1247	case MMC_POWER_OFF:
1248		if (!IS_ERR(mmc->supply.vmmc))
1249			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1250
1251		if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
1252			regulator_disable(mmc->supply.vqmmc);
1253			host->vqmmc_enabled = false;
1254		}
1255
1256		break;
1257	case MMC_POWER_UP:
1258		if (!IS_ERR(mmc->supply.vmmc))
1259			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
1260
1261		/*
1262		 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP
1263		 * and instead uses MCI_PWR_ON so apply whatever value is
1264		 * configured in the variant data.
1265		 */
1266		pwr |= variant->pwrreg_powerup;
1267
1268		break;
1269	case MMC_POWER_ON:
1270		if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
1271			ret = regulator_enable(mmc->supply.vqmmc);
1272			if (ret < 0)
1273				dev_err(mmc_dev(mmc),
1274					"failed to enable vqmmc regulator\n");
1275			else
1276				host->vqmmc_enabled = true;
1277		}
1278
1279		pwr |= MCI_PWR_ON;
1280		break;
1281	}
1282
1283	if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) {
1284		/*
1285		 * The ST Micro variant has some additional bits
1286		 * indicating signal direction for the signals in
1287		 * the SD/MMC bus and feedback-clock usage.
1288		 */
1289		pwr |= host->plat->sigdir;
1290
1291		if (ios->bus_width == MMC_BUS_WIDTH_4)
1292			pwr &= ~MCI_ST_DATA74DIREN;
1293		else if (ios->bus_width == MMC_BUS_WIDTH_1)
1294			pwr &= (~MCI_ST_DATA74DIREN &
1295				~MCI_ST_DATA31DIREN &
1296				~MCI_ST_DATA2DIREN);
1297	}
1298
1299	if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
1300		if (host->hw_designer != AMBA_VENDOR_ST)
1301			pwr |= MCI_ROD;
1302		else {
1303			/*
1304			 * The ST Micro variant use the ROD bit for something
1305			 * else and only has OD (Open Drain).
1306			 */
1307			pwr |= MCI_OD;
1308		}
1309	}
1310
1311	/*
1312	 * If clock = 0 and the variant requires the MMCIPOWER to be used for
1313	 * gating the clock, the MCI_PWR_ON bit is cleared.
1314	 */
1315	if (!ios->clock && variant->pwrreg_clkgate)
1316		pwr &= ~MCI_PWR_ON;
1317
1318	spin_lock_irqsave(&host->lock, flags);
1319
1320	mmci_set_clkreg(host, ios->clock);
1321	mmci_write_pwrreg(host, pwr);
1322	mmci_reg_delay(host);
1323
1324	spin_unlock_irqrestore(&host->lock, flags);
1325
1326	pm_runtime_mark_last_busy(mmc_dev(mmc));
1327	pm_runtime_put_autosuspend(mmc_dev(mmc));
1328}
1329
1330static int mmci_get_cd(struct mmc_host *mmc)
1331{
1332	struct mmci_host *host = mmc_priv(mmc);
1333	struct mmci_platform_data *plat = host->plat;
1334	unsigned int status = mmc_gpio_get_cd(mmc);
1335
1336	if (status == -ENOSYS) {
1337		if (!plat->status)
1338			return 1; /* Assume always present */
1339
1340		status = plat->status(mmc_dev(host->mmc));
1341	}
1342	return status;
1343}
1344
1345static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
1346{
1347	int ret = 0;
1348
1349	if (!IS_ERR(mmc->supply.vqmmc)) {
1350
1351		pm_runtime_get_sync(mmc_dev(mmc));
1352
1353		switch (ios->signal_voltage) {
1354		case MMC_SIGNAL_VOLTAGE_330:
1355			ret = regulator_set_voltage(mmc->supply.vqmmc,
1356						2700000, 3600000);
1357			break;
1358		case MMC_SIGNAL_VOLTAGE_180:
1359			ret = regulator_set_voltage(mmc->supply.vqmmc,
1360						1700000, 1950000);
1361			break;
1362		case MMC_SIGNAL_VOLTAGE_120:
1363			ret = regulator_set_voltage(mmc->supply.vqmmc,
1364						1100000, 1300000);
1365			break;
1366		}
1367
1368		if (ret)
1369			dev_warn(mmc_dev(mmc), "Voltage switch failed\n");
1370
1371		pm_runtime_mark_last_busy(mmc_dev(mmc));
1372		pm_runtime_put_autosuspend(mmc_dev(mmc));
1373	}
1374
1375	return ret;
1376}
1377
1378static struct mmc_host_ops mmci_ops = {
1379	.request	= mmci_request,
1380	.pre_req	= mmci_pre_request,
1381	.post_req	= mmci_post_request,
1382	.set_ios	= mmci_set_ios,
1383	.get_ro		= mmc_gpio_get_ro,
1384	.get_cd		= mmci_get_cd,
1385	.start_signal_voltage_switch = mmci_sig_volt_switch,
1386};
1387
1388#ifdef CONFIG_OF
1389static void mmci_dt_populate_generic_pdata(struct device_node *np,
1390					struct mmci_platform_data *pdata)
1391{
1392	int bus_width = 0;
1393
1394	pdata->gpio_wp = of_get_named_gpio(np, "wp-gpios", 0);
1395	pdata->gpio_cd = of_get_named_gpio(np, "cd-gpios", 0);
1396
1397	if (of_get_property(np, "cd-inverted", NULL))
1398		pdata->cd_invert = true;
1399	else
1400		pdata->cd_invert = false;
1401
1402	of_property_read_u32(np, "max-frequency", &pdata->f_max);
1403	if (!pdata->f_max)
1404		pr_warn("%s has no 'max-frequency' property\n", np->full_name);
1405
1406	if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
1407		pdata->capabilities |= MMC_CAP_MMC_HIGHSPEED;
1408	if (of_get_property(np, "mmc-cap-sd-highspeed", NULL))
1409		pdata->capabilities |= MMC_CAP_SD_HIGHSPEED;
1410
1411	of_property_read_u32(np, "bus-width", &bus_width);
1412	switch (bus_width) {
1413	case 0 :
1414		/* No bus-width supplied. */
1415		break;
1416	case 4 :
1417		pdata->capabilities |= MMC_CAP_4_BIT_DATA;
1418		break;
1419	case 8 :
1420		pdata->capabilities |= MMC_CAP_8_BIT_DATA;
1421		break;
1422	default :
1423		pr_warn("%s: Unsupported bus width\n", np->full_name);
1424	}
1425}
1426#else
1427static void mmci_dt_populate_generic_pdata(struct device_node *np,
1428					struct mmci_platform_data *pdata)
1429{
1430	return;
1431}
1432#endif
1433
1434static int mmci_probe(struct amba_device *dev,
1435	const struct amba_id *id)
1436{
1437	struct mmci_platform_data *plat = dev->dev.platform_data;
1438	struct device_node *np = dev->dev.of_node;
1439	struct variant_data *variant = id->data;
1440	struct mmci_host *host;
1441	struct mmc_host *mmc;
1442	int ret;
1443
1444	/* Must have platform data or Device Tree. */
1445	if (!plat && !np) {
1446		dev_err(&dev->dev, "No plat data or DT found\n");
1447		return -EINVAL;
1448	}
1449
1450	if (!plat) {
1451		plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL);
1452		if (!plat)
1453			return -ENOMEM;
1454	}
1455
1456	if (np)
1457		mmci_dt_populate_generic_pdata(np, plat);
1458
1459	ret = amba_request_regions(dev, DRIVER_NAME);
1460	if (ret)
1461		goto out;
1462
1463	mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
1464	if (!mmc) {
1465		ret = -ENOMEM;
1466		goto rel_regions;
1467	}
1468
1469	host = mmc_priv(mmc);
1470	host->mmc = mmc;
1471
1472	host->hw_designer = amba_manf(dev);
1473	host->hw_revision = amba_rev(dev);
1474	dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
1475	dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
1476
1477	host->clk = devm_clk_get(&dev->dev, NULL);
1478	if (IS_ERR(host->clk)) {
1479		ret = PTR_ERR(host->clk);
1480		goto host_free;
1481	}
1482
1483	ret = clk_prepare_enable(host->clk);
1484	if (ret)
1485		goto host_free;
1486
1487	host->plat = plat;
1488	host->variant = variant;
1489	host->mclk = clk_get_rate(host->clk);
1490	/*
1491	 * According to the spec, mclk is max 100 MHz,
1492	 * so we try to adjust the clock down to this,
1493	 * (if possible).
1494	 */
1495	if (host->mclk > 100000000) {
1496		ret = clk_set_rate(host->clk, 100000000);
1497		if (ret < 0)
1498			goto clk_disable;
1499		host->mclk = clk_get_rate(host->clk);
1500		dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
1501			host->mclk);
1502	}
1503	host->phybase = dev->res.start;
1504	host->base = ioremap(dev->res.start, resource_size(&dev->res));
1505	if (!host->base) {
1506		ret = -ENOMEM;
1507		goto clk_disable;
1508	}
1509
1510	/*
1511	 * The ARM and ST versions of the block have slightly different
1512	 * clock divider equations which means that the minimum divider
1513	 * differs too.
1514	 */
1515	if (variant->st_clkdiv)
1516		mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
1517	else
1518		mmc->f_min = DIV_ROUND_UP(host->mclk, 512);
1519	/*
1520	 * If the platform data supplies a maximum operating
1521	 * frequency, this takes precedence. Else, we fall back
1522	 * to using the module parameter, which has a (low)
1523	 * default value in case it is not specified. Either
1524	 * value must not exceed the clock rate into the block,
1525	 * of course.
1526	 */
1527	if (plat->f_max)
1528		mmc->f_max = min(host->mclk, plat->f_max);
1529	else
1530		mmc->f_max = min(host->mclk, fmax);
1531	dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
1532
1533	/* Get regulators and the supported OCR mask */
1534	mmc_regulator_get_supply(mmc);
1535	if (!mmc->ocr_avail)
1536		mmc->ocr_avail = plat->ocr_mask;
1537	else if (plat->ocr_mask)
1538		dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
1539
1540	mmc->caps = plat->capabilities;
1541	mmc->caps2 = plat->capabilities2;
1542	if (!plat->cd_invert)
1543		mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
1544	mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
1545
1546	if (variant->busy_detect) {
1547		mmci_ops.card_busy = mmci_card_busy;
1548		mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
1549		mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
1550		mmc->max_busy_timeout = 0;
1551	}
1552
1553	mmc->ops = &mmci_ops;
1554
1555	/* We support these PM capabilities. */
1556	mmc->pm_caps = MMC_PM_KEEP_POWER;
1557
1558	/*
1559	 * We can do SGIO
1560	 */
1561	mmc->max_segs = NR_SG;
1562
1563	/*
1564	 * Since only a certain number of bits are valid in the data length
1565	 * register, we must ensure that we don't exceed 2^num-1 bytes in a
1566	 * single request.
1567	 */
1568	mmc->max_req_size = (1 << variant->datalength_bits) - 1;
1569
1570	/*
1571	 * Set the maximum segment size.  Since we aren't doing DMA
1572	 * (yet) we are only limited by the data length register.
1573	 */
1574	mmc->max_seg_size = mmc->max_req_size;
1575
1576	/*
1577	 * Block size can be up to 2048 bytes, but must be a power of two.
1578	 */
1579	mmc->max_blk_size = 1 << 11;
1580
1581	/*
1582	 * Limit the number of blocks transferred so that we don't overflow
1583	 * the maximum request size.
1584	 */
1585	mmc->max_blk_count = mmc->max_req_size >> 11;
1586
1587	spin_lock_init(&host->lock);
1588
1589	writel(0, host->base + MMCIMASK0);
1590	writel(0, host->base + MMCIMASK1);
1591	writel(0xfff, host->base + MMCICLEAR);
1592
1593	if (plat->gpio_cd == -EPROBE_DEFER) {
1594		ret = -EPROBE_DEFER;
1595		goto err_gpio_cd;
1596	}
1597	if (gpio_is_valid(plat->gpio_cd)) {
1598		ret = mmc_gpio_request_cd(mmc, plat->gpio_cd, 0);
1599		if (ret)
1600			goto err_gpio_cd;
1601	}
1602	if (plat->gpio_wp == -EPROBE_DEFER) {
1603		ret = -EPROBE_DEFER;
1604		goto err_gpio_cd;
1605	}
1606	if (gpio_is_valid(plat->gpio_wp)) {
1607		ret = mmc_gpio_request_ro(mmc, plat->gpio_wp);
1608		if (ret)
1609			goto err_gpio_cd;
1610	}
1611
1612	ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
1613	if (ret)
1614		goto err_gpio_cd;
1615
1616	if (!dev->irq[1])
1617		host->singleirq = true;
1618	else {
1619		ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED,
1620				  DRIVER_NAME " (pio)", host);
1621		if (ret)
1622			goto irq0_free;
1623	}
1624
1625	writel(MCI_IRQENABLE, host->base + MMCIMASK0);
1626
1627	amba_set_drvdata(dev, mmc);
1628
1629	dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
1630		 mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
1631		 amba_rev(dev), (unsigned long long)dev->res.start,
1632		 dev->irq[0], dev->irq[1]);
1633
1634	mmci_dma_setup(host);
1635
1636	pm_runtime_set_autosuspend_delay(&dev->dev, 50);
1637	pm_runtime_use_autosuspend(&dev->dev);
1638	pm_runtime_put(&dev->dev);
1639
1640	mmc_add_host(mmc);
1641
1642	return 0;
1643
1644 irq0_free:
1645	free_irq(dev->irq[0], host);
1646 err_gpio_cd:
1647	iounmap(host->base);
1648 clk_disable:
1649	clk_disable_unprepare(host->clk);
1650 host_free:
1651	mmc_free_host(mmc);
1652 rel_regions:
1653	amba_release_regions(dev);
1654 out:
1655	return ret;
1656}
1657
1658static int mmci_remove(struct amba_device *dev)
1659{
1660	struct mmc_host *mmc = amba_get_drvdata(dev);
1661
1662	if (mmc) {
1663		struct mmci_host *host = mmc_priv(mmc);
1664
1665		/*
1666		 * Undo pm_runtime_put() in probe.  We use the _sync
1667		 * version here so that we can access the primecell.
1668		 */
1669		pm_runtime_get_sync(&dev->dev);
1670
1671		mmc_remove_host(mmc);
1672
1673		writel(0, host->base + MMCIMASK0);
1674		writel(0, host->base + MMCIMASK1);
1675
1676		writel(0, host->base + MMCICOMMAND);
1677		writel(0, host->base + MMCIDATACTRL);
1678
1679		mmci_dma_release(host);
1680		free_irq(dev->irq[0], host);
1681		if (!host->singleirq)
1682			free_irq(dev->irq[1], host);
1683
1684		iounmap(host->base);
1685		clk_disable_unprepare(host->clk);
1686
1687		mmc_free_host(mmc);
1688
1689		amba_release_regions(dev);
1690	}
1691
1692	return 0;
1693}
1694
1695#ifdef CONFIG_PM
1696static void mmci_save(struct mmci_host *host)
1697{
1698	unsigned long flags;
1699
1700	spin_lock_irqsave(&host->lock, flags);
1701
1702	writel(0, host->base + MMCIMASK0);
1703	if (host->variant->pwrreg_nopower) {
1704		writel(0, host->base + MMCIDATACTRL);
1705		writel(0, host->base + MMCIPOWER);
1706		writel(0, host->base + MMCICLOCK);
1707	}
1708	mmci_reg_delay(host);
1709
1710	spin_unlock_irqrestore(&host->lock, flags);
1711}
1712
1713static void mmci_restore(struct mmci_host *host)
1714{
1715	unsigned long flags;
1716
1717	spin_lock_irqsave(&host->lock, flags);
1718
1719	if (host->variant->pwrreg_nopower) {
1720		writel(host->clk_reg, host->base + MMCICLOCK);
1721		writel(host->datactrl_reg, host->base + MMCIDATACTRL);
1722		writel(host->pwr_reg, host->base + MMCIPOWER);
1723	}
1724	writel(MCI_IRQENABLE, host->base + MMCIMASK0);
1725	mmci_reg_delay(host);
1726
1727	spin_unlock_irqrestore(&host->lock, flags);
1728}
1729
1730static int mmci_runtime_suspend(struct device *dev)
1731{
1732	struct amba_device *adev = to_amba_device(dev);
1733	struct mmc_host *mmc = amba_get_drvdata(adev);
1734
1735	if (mmc) {
1736		struct mmci_host *host = mmc_priv(mmc);
1737		pinctrl_pm_select_sleep_state(dev);
1738		mmci_save(host);
1739		clk_disable_unprepare(host->clk);
1740	}
1741
1742	return 0;
1743}
1744
1745static int mmci_runtime_resume(struct device *dev)
1746{
1747	struct amba_device *adev = to_amba_device(dev);
1748	struct mmc_host *mmc = amba_get_drvdata(adev);
1749
1750	if (mmc) {
1751		struct mmci_host *host = mmc_priv(mmc);
1752		clk_prepare_enable(host->clk);
1753		mmci_restore(host);
1754		pinctrl_pm_select_default_state(dev);
1755	}
1756
1757	return 0;
1758}
1759#endif
1760
1761static const struct dev_pm_ops mmci_dev_pm_ops = {
1762	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1763				pm_runtime_force_resume)
1764	SET_PM_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
1765};
1766
1767static struct amba_id mmci_ids[] = {
1768	{
1769		.id	= 0x00041180,
1770		.mask	= 0xff0fffff,
1771		.data	= &variant_arm,
1772	},
1773	{
1774		.id	= 0x01041180,
1775		.mask	= 0xff0fffff,
1776		.data	= &variant_arm_extended_fifo,
1777	},
1778	{
1779		.id	= 0x02041180,
1780		.mask	= 0xff0fffff,
1781		.data	= &variant_arm_extended_fifo_hwfc,
1782	},
1783	{
1784		.id	= 0x00041181,
1785		.mask	= 0x000fffff,
1786		.data	= &variant_arm,
1787	},
1788	/* ST Micro variants */
1789	{
1790		.id     = 0x00180180,
1791		.mask   = 0x00ffffff,
1792		.data	= &variant_u300,
1793	},
1794	{
1795		.id     = 0x10180180,
1796		.mask   = 0xf0ffffff,
1797		.data	= &variant_nomadik,
1798	},
1799	{
1800		.id     = 0x00280180,
1801		.mask   = 0x00ffffff,
1802		.data	= &variant_u300,
1803	},
1804	{
1805		.id     = 0x00480180,
1806		.mask   = 0xf0ffffff,
1807		.data	= &variant_ux500,
1808	},
1809	{
1810		.id     = 0x10480180,
1811		.mask   = 0xf0ffffff,
1812		.data	= &variant_ux500v2,
1813	},
1814	{ 0, 0 },
1815};
1816
1817MODULE_DEVICE_TABLE(amba, mmci_ids);
1818
1819static struct amba_driver mmci_driver = {
1820	.drv		= {
1821		.name	= DRIVER_NAME,
1822		.pm	= &mmci_dev_pm_ops,
1823	},
1824	.probe		= mmci_probe,
1825	.remove		= mmci_remove,
1826	.id_table	= mmci_ids,
1827};
1828
1829module_amba_driver(mmci_driver);
1830
1831module_param(fmax, uint, 0444);
1832
1833MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
1834MODULE_LICENSE("GPL");
1835