1/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 *  (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/blkdev.h>
15#include <linux/clk.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/ioport.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
25#include <linux/seq_file.h>
26#include <linux/slab.h>
27#include <linux/stat.h>
28#include <linux/delay.h>
29#include <linux/irq.h>
30#include <linux/mmc/host.h>
31#include <linux/mmc/mmc.h>
32#include <linux/mmc/dw_mmc.h>
33#include <linux/bitops.h>
34#include <linux/regulator/consumer.h>
35#include <linux/workqueue.h>
36
37#include "dw_mmc.h"
38
39/* Common flag combinations */
40#define DW_MCI_DATA_ERROR_FLAGS	(SDMMC_INT_DTO | SDMMC_INT_DCRC | \
41				 SDMMC_INT_HTO | SDMMC_INT_SBE  | \
42				 SDMMC_INT_EBE)
43#define DW_MCI_CMD_ERROR_FLAGS	(SDMMC_INT_RTO | SDMMC_INT_RCRC | \
44				 SDMMC_INT_RESP_ERR)
45#define DW_MCI_ERROR_FLAGS	(DW_MCI_DATA_ERROR_FLAGS | \
46				 DW_MCI_CMD_ERROR_FLAGS  | SDMMC_INT_HLE)
47#define DW_MCI_SEND_STATUS	1
48#define DW_MCI_RECV_STATUS	2
49#define DW_MCI_DMA_THRESHOLD	16
50
51#ifdef CONFIG_MMC_DW_IDMAC
52struct idmac_desc {
53	u32		des0;	/* Control Descriptor */
54#define IDMAC_DES0_DIC	BIT(1)
55#define IDMAC_DES0_LD	BIT(2)
56#define IDMAC_DES0_FD	BIT(3)
57#define IDMAC_DES0_CH	BIT(4)
58#define IDMAC_DES0_ER	BIT(5)
59#define IDMAC_DES0_CES	BIT(30)
60#define IDMAC_DES0_OWN	BIT(31)
61
62	u32		des1;	/* Buffer sizes */
63#define IDMAC_SET_BUFFER1_SIZE(d, s) \
64	((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
65
66	u32		des2;	/* buffer 1 physical address */
67
68	u32		des3;	/* buffer 2 physical address */
69};
70#endif /* CONFIG_MMC_DW_IDMAC */
71
72/**
73 * struct dw_mci_slot - MMC slot state
74 * @mmc: The mmc_host representing this slot.
75 * @host: The MMC controller this slot is using.
76 * @ctype: Card type for this slot.
77 * @mrq: mmc_request currently being processed or waiting to be
78 *	processed, or NULL when the slot is idle.
79 * @queue_node: List node for placing this node in the @queue list of
80 *	&struct dw_mci.
81 * @clock: Clock rate configured by set_ios(). Protected by host->lock.
82 * @flags: Random state bits associated with the slot.
83 * @id: Number of this slot.
84 * @last_detect_state: Most recently observed card detect state.
85 */
86struct dw_mci_slot {
87	struct mmc_host		*mmc;
88	struct dw_mci		*host;
89
90	u32			ctype;
91
92	struct mmc_request	*mrq;
93	struct list_head	queue_node;
94
95	unsigned int		clock;
96	unsigned long		flags;
97#define DW_MMC_CARD_PRESENT	0
98#define DW_MMC_CARD_NEED_INIT	1
99	int			id;
100	int			last_detect_state;
101};
102
103static struct workqueue_struct *dw_mci_card_workqueue;
104
105#if defined(CONFIG_DEBUG_FS)
106static int dw_mci_req_show(struct seq_file *s, void *v)
107{
108	struct dw_mci_slot *slot = s->private;
109	struct mmc_request *mrq;
110	struct mmc_command *cmd;
111	struct mmc_command *stop;
112	struct mmc_data	*data;
113
114	/* Make sure we get a consistent snapshot */
115	spin_lock_bh(&slot->host->lock);
116	mrq = slot->mrq;
117
118	if (mrq) {
119		cmd = mrq->cmd;
120		data = mrq->data;
121		stop = mrq->stop;
122
123		if (cmd)
124			seq_printf(s,
125				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
126				   cmd->opcode, cmd->arg, cmd->flags,
127				   cmd->resp[0], cmd->resp[1], cmd->resp[2],
128				   cmd->resp[2], cmd->error);
129		if (data)
130			seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
131				   data->bytes_xfered, data->blocks,
132				   data->blksz, data->flags, data->error);
133		if (stop)
134			seq_printf(s,
135				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
136				   stop->opcode, stop->arg, stop->flags,
137				   stop->resp[0], stop->resp[1], stop->resp[2],
138				   stop->resp[2], stop->error);
139	}
140
141	spin_unlock_bh(&slot->host->lock);
142
143	return 0;
144}
145
146static int dw_mci_req_open(struct inode *inode, struct file *file)
147{
148	return single_open(file, dw_mci_req_show, inode->i_private);
149}
150
151static const struct file_operations dw_mci_req_fops = {
152	.owner		= THIS_MODULE,
153	.open		= dw_mci_req_open,
154	.read		= seq_read,
155	.llseek		= seq_lseek,
156	.release	= single_release,
157};
158
159static int dw_mci_regs_show(struct seq_file *s, void *v)
160{
161	seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
162	seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
163	seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
164	seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
165	seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
166	seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
167
168	return 0;
169}
170
171static int dw_mci_regs_open(struct inode *inode, struct file *file)
172{
173	return single_open(file, dw_mci_regs_show, inode->i_private);
174}
175
176static const struct file_operations dw_mci_regs_fops = {
177	.owner		= THIS_MODULE,
178	.open		= dw_mci_regs_open,
179	.read		= seq_read,
180	.llseek		= seq_lseek,
181	.release	= single_release,
182};
183
184static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
185{
186	struct mmc_host	*mmc = slot->mmc;
187	struct dw_mci *host = slot->host;
188	struct dentry *root;
189	struct dentry *node;
190
191	root = mmc->debugfs_root;
192	if (!root)
193		return;
194
195	node = debugfs_create_file("regs", S_IRUSR, root, host,
196				   &dw_mci_regs_fops);
197	if (!node)
198		goto err;
199
200	node = debugfs_create_file("req", S_IRUSR, root, slot,
201				   &dw_mci_req_fops);
202	if (!node)
203		goto err;
204
205	node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
206	if (!node)
207		goto err;
208
209	node = debugfs_create_x32("pending_events", S_IRUSR, root,
210				  (u32 *)&host->pending_events);
211	if (!node)
212		goto err;
213
214	node = debugfs_create_x32("completed_events", S_IRUSR, root,
215				  (u32 *)&host->completed_events);
216	if (!node)
217		goto err;
218
219	return;
220
221err:
222	dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
223}
224#endif /* defined(CONFIG_DEBUG_FS) */
225
226static void dw_mci_set_timeout(struct dw_mci *host)
227{
228	/* timeout (maximum) */
229	mci_writel(host, TMOUT, 0xffffffff);
230}
231
232static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
233{
234	struct mmc_data	*data;
235	u32 cmdr;
236	cmd->error = -EINPROGRESS;
237
238	cmdr = cmd->opcode;
239
240	if (cmdr == MMC_STOP_TRANSMISSION)
241		cmdr |= SDMMC_CMD_STOP;
242	else
243		cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
244
245	if (cmd->flags & MMC_RSP_PRESENT) {
246		/* We expect a response, so set this bit */
247		cmdr |= SDMMC_CMD_RESP_EXP;
248		if (cmd->flags & MMC_RSP_136)
249			cmdr |= SDMMC_CMD_RESP_LONG;
250	}
251
252	if (cmd->flags & MMC_RSP_CRC)
253		cmdr |= SDMMC_CMD_RESP_CRC;
254
255	data = cmd->data;
256	if (data) {
257		cmdr |= SDMMC_CMD_DAT_EXP;
258		if (data->flags & MMC_DATA_STREAM)
259			cmdr |= SDMMC_CMD_STRM_MODE;
260		if (data->flags & MMC_DATA_WRITE)
261			cmdr |= SDMMC_CMD_DAT_WR;
262	}
263
264	return cmdr;
265}
266
267static void dw_mci_start_command(struct dw_mci *host,
268				 struct mmc_command *cmd, u32 cmd_flags)
269{
270	host->cmd = cmd;
271	dev_vdbg(&host->dev,
272		 "start command: ARGR=0x%08x CMDR=0x%08x\n",
273		 cmd->arg, cmd_flags);
274
275	mci_writel(host, CMDARG, cmd->arg);
276	wmb();
277
278	mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
279}
280
281static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
282{
283	dw_mci_start_command(host, data->stop, host->stop_cmdr);
284}
285
286/* DMA interface functions */
287static void dw_mci_stop_dma(struct dw_mci *host)
288{
289	if (host->using_dma) {
290		host->dma_ops->stop(host);
291		host->dma_ops->cleanup(host);
292	} else {
293		/* Data transfer was stopped by the interrupt handler */
294		set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
295	}
296}
297
298static int dw_mci_get_dma_dir(struct mmc_data *data)
299{
300	if (data->flags & MMC_DATA_WRITE)
301		return DMA_TO_DEVICE;
302	else
303		return DMA_FROM_DEVICE;
304}
305
306#ifdef CONFIG_MMC_DW_IDMAC
307static void dw_mci_dma_cleanup(struct dw_mci *host)
308{
309	struct mmc_data *data = host->data;
310
311	if (data)
312		if (!data->host_cookie)
313			dma_unmap_sg(&host->dev,
314				     data->sg,
315				     data->sg_len,
316				     dw_mci_get_dma_dir(data));
317}
318
319static void dw_mci_idmac_stop_dma(struct dw_mci *host)
320{
321	u32 temp;
322
323	/* Disable and reset the IDMAC interface */
324	temp = mci_readl(host, CTRL);
325	temp &= ~SDMMC_CTRL_USE_IDMAC;
326	temp |= SDMMC_CTRL_DMA_RESET;
327	mci_writel(host, CTRL, temp);
328
329	/* Stop the IDMAC running */
330	temp = mci_readl(host, BMOD);
331	temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
332	mci_writel(host, BMOD, temp);
333}
334
335static void dw_mci_idmac_complete_dma(struct dw_mci *host)
336{
337	struct mmc_data *data = host->data;
338
339	dev_vdbg(&host->dev, "DMA complete\n");
340
341	host->dma_ops->cleanup(host);
342
343	/*
344	 * If the card was removed, data will be NULL. No point in trying to
345	 * send the stop command or waiting for NBUSY in this case.
346	 */
347	if (data) {
348		set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
349		tasklet_schedule(&host->tasklet);
350	}
351}
352
353static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
354				    unsigned int sg_len)
355{
356	int i;
357	struct idmac_desc *desc = host->sg_cpu;
358
359	for (i = 0; i < sg_len; i++, desc++) {
360		unsigned int length = sg_dma_len(&data->sg[i]);
361		u32 mem_addr = sg_dma_address(&data->sg[i]);
362
363		/* Set the OWN bit and disable interrupts for this descriptor */
364		desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
365
366		/* Buffer length */
367		IDMAC_SET_BUFFER1_SIZE(desc, length);
368
369		/* Physical address to DMA to/from */
370		desc->des2 = mem_addr;
371	}
372
373	/* Set first descriptor */
374	desc = host->sg_cpu;
375	desc->des0 |= IDMAC_DES0_FD;
376
377	/* Set last descriptor */
378	desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
379	desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
380	desc->des0 |= IDMAC_DES0_LD;
381
382	wmb();
383}
384
385static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
386{
387	u32 temp;
388
389	dw_mci_translate_sglist(host, host->data, sg_len);
390
391	/* Select IDMAC interface */
392	temp = mci_readl(host, CTRL);
393	temp |= SDMMC_CTRL_USE_IDMAC;
394	mci_writel(host, CTRL, temp);
395
396	wmb();
397
398	/* Enable the IDMAC */
399	temp = mci_readl(host, BMOD);
400	temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
401	mci_writel(host, BMOD, temp);
402
403	/* Start it running */
404	mci_writel(host, PLDMND, 1);
405}
406
407static int dw_mci_idmac_init(struct dw_mci *host)
408{
409	struct idmac_desc *p;
410	int i;
411
412	/* Number of descriptors in the ring buffer */
413	host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
414
415	/* Forward link the descriptor list */
416	for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
417		p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
418
419	/* Set the last descriptor as the end-of-ring descriptor */
420	p->des3 = host->sg_dma;
421	p->des0 = IDMAC_DES0_ER;
422
423	/* Mask out interrupts - get Tx & Rx complete only */
424	mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
425		   SDMMC_IDMAC_INT_TI);
426
427	/* Set the descriptor base address */
428	mci_writel(host, DBADDR, host->sg_dma);
429	return 0;
430}
431
432static struct dw_mci_dma_ops dw_mci_idmac_ops = {
433	.init = dw_mci_idmac_init,
434	.start = dw_mci_idmac_start_dma,
435	.stop = dw_mci_idmac_stop_dma,
436	.complete = dw_mci_idmac_complete_dma,
437	.cleanup = dw_mci_dma_cleanup,
438};
439#endif /* CONFIG_MMC_DW_IDMAC */
440
441static int dw_mci_pre_dma_transfer(struct dw_mci *host,
442				   struct mmc_data *data,
443				   bool next)
444{
445	struct scatterlist *sg;
446	unsigned int i, sg_len;
447
448	if (!next && data->host_cookie)
449		return data->host_cookie;
450
451	/*
452	 * We don't do DMA on "complex" transfers, i.e. with
453	 * non-word-aligned buffers or lengths. Also, we don't bother
454	 * with all the DMA setup overhead for short transfers.
455	 */
456	if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
457		return -EINVAL;
458
459	if (data->blksz & 3)
460		return -EINVAL;
461
462	for_each_sg(data->sg, sg, data->sg_len, i) {
463		if (sg->offset & 3 || sg->length & 3)
464			return -EINVAL;
465	}
466
467	sg_len = dma_map_sg(&host->dev,
468			    data->sg,
469			    data->sg_len,
470			    dw_mci_get_dma_dir(data));
471	if (sg_len == 0)
472		return -EINVAL;
473
474	if (next)
475		data->host_cookie = sg_len;
476
477	return sg_len;
478}
479
480static void dw_mci_pre_req(struct mmc_host *mmc,
481			   struct mmc_request *mrq,
482			   bool is_first_req)
483{
484	struct dw_mci_slot *slot = mmc_priv(mmc);
485	struct mmc_data *data = mrq->data;
486
487	if (!slot->host->use_dma || !data)
488		return;
489
490	if (data->host_cookie) {
491		data->host_cookie = 0;
492		return;
493	}
494
495	if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
496		data->host_cookie = 0;
497}
498
499static void dw_mci_post_req(struct mmc_host *mmc,
500			    struct mmc_request *mrq,
501			    int err)
502{
503	struct dw_mci_slot *slot = mmc_priv(mmc);
504	struct mmc_data *data = mrq->data;
505
506	if (!slot->host->use_dma || !data)
507		return;
508
509	if (data->host_cookie)
510		dma_unmap_sg(&slot->host->dev,
511			     data->sg,
512			     data->sg_len,
513			     dw_mci_get_dma_dir(data));
514	data->host_cookie = 0;
515}
516
517static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
518{
519	int sg_len;
520	u32 temp;
521
522	host->using_dma = 0;
523
524	/* If we don't have a channel, we can't do DMA */
525	if (!host->use_dma)
526		return -ENODEV;
527
528	sg_len = dw_mci_pre_dma_transfer(host, data, 0);
529	if (sg_len < 0) {
530		host->dma_ops->stop(host);
531		return sg_len;
532	}
533
534	host->using_dma = 1;
535
536	dev_vdbg(&host->dev,
537		 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
538		 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
539		 sg_len);
540
541	/* Enable the DMA interface */
542	temp = mci_readl(host, CTRL);
543	temp |= SDMMC_CTRL_DMA_ENABLE;
544	mci_writel(host, CTRL, temp);
545
546	/* Disable RX/TX IRQs, let DMA handle it */
547	temp = mci_readl(host, INTMASK);
548	temp  &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
549	mci_writel(host, INTMASK, temp);
550
551	host->dma_ops->start(host, sg_len);
552
553	return 0;
554}
555
556static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
557{
558	u32 temp;
559
560	data->error = -EINPROGRESS;
561
562	WARN_ON(host->data);
563	host->sg = NULL;
564	host->data = data;
565
566	if (data->flags & MMC_DATA_READ)
567		host->dir_status = DW_MCI_RECV_STATUS;
568	else
569		host->dir_status = DW_MCI_SEND_STATUS;
570
571	if (dw_mci_submit_data_dma(host, data)) {
572		int flags = SG_MITER_ATOMIC;
573		if (host->data->flags & MMC_DATA_READ)
574			flags |= SG_MITER_TO_SG;
575		else
576			flags |= SG_MITER_FROM_SG;
577
578		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
579		host->sg = data->sg;
580		host->part_buf_start = 0;
581		host->part_buf_count = 0;
582
583		mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
584		temp = mci_readl(host, INTMASK);
585		temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
586		mci_writel(host, INTMASK, temp);
587
588		temp = mci_readl(host, CTRL);
589		temp &= ~SDMMC_CTRL_DMA_ENABLE;
590		mci_writel(host, CTRL, temp);
591	}
592}
593
594static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
595{
596	struct dw_mci *host = slot->host;
597	unsigned long timeout = jiffies + msecs_to_jiffies(500);
598	unsigned int cmd_status = 0;
599
600	mci_writel(host, CMDARG, arg);
601	wmb();
602	mci_writel(host, CMD, SDMMC_CMD_START | cmd);
603
604	while (time_before(jiffies, timeout)) {
605		cmd_status = mci_readl(host, CMD);
606		if (!(cmd_status & SDMMC_CMD_START))
607			return;
608	}
609	dev_err(&slot->mmc->class_dev,
610		"Timeout sending command (cmd %#x arg %#x status %#x)\n",
611		cmd, arg, cmd_status);
612}
613
614static void dw_mci_setup_bus(struct dw_mci_slot *slot)
615{
616	struct dw_mci *host = slot->host;
617	u32 div;
618
619	if (slot->clock != host->current_speed) {
620		if (host->bus_hz % slot->clock)
621			/*
622			 * move the + 1 after the divide to prevent
623			 * over-clocking the card.
624			 */
625			div = ((host->bus_hz / slot->clock) >> 1) + 1;
626		else
627			div = (host->bus_hz  / slot->clock) >> 1;
628
629		dev_info(&slot->mmc->class_dev,
630			 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ"
631			 " div = %d)\n", slot->id, host->bus_hz, slot->clock,
632			 div ? ((host->bus_hz / div) >> 1) : host->bus_hz, div);
633
634		/* disable clock */
635		mci_writel(host, CLKENA, 0);
636		mci_writel(host, CLKSRC, 0);
637
638		/* inform CIU */
639		mci_send_cmd(slot,
640			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
641
642		/* set clock to desired speed */
643		mci_writel(host, CLKDIV, div);
644
645		/* inform CIU */
646		mci_send_cmd(slot,
647			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
648
649		/* enable clock */
650		mci_writel(host, CLKENA, ((SDMMC_CLKEN_ENABLE |
651			   SDMMC_CLKEN_LOW_PWR) << slot->id));
652
653		/* inform CIU */
654		mci_send_cmd(slot,
655			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
656
657		host->current_speed = slot->clock;
658	}
659
660	/* Set the current slot bus width */
661	mci_writel(host, CTYPE, (slot->ctype << slot->id));
662}
663
664static void __dw_mci_start_request(struct dw_mci *host,
665				   struct dw_mci_slot *slot,
666				   struct mmc_command *cmd)
667{
668	struct mmc_request *mrq;
669	struct mmc_data	*data;
670	u32 cmdflags;
671
672	mrq = slot->mrq;
673	if (host->pdata->select_slot)
674		host->pdata->select_slot(slot->id);
675
676	/* Slot specific timing and width adjustment */
677	dw_mci_setup_bus(slot);
678
679	host->cur_slot = slot;
680	host->mrq = mrq;
681
682	host->pending_events = 0;
683	host->completed_events = 0;
684	host->data_status = 0;
685
686	data = cmd->data;
687	if (data) {
688		dw_mci_set_timeout(host);
689		mci_writel(host, BYTCNT, data->blksz*data->blocks);
690		mci_writel(host, BLKSIZ, data->blksz);
691	}
692
693	cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
694
695	/* this is the first command, send the initialization clock */
696	if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
697		cmdflags |= SDMMC_CMD_INIT;
698
699	if (data) {
700		dw_mci_submit_data(host, data);
701		wmb();
702	}
703
704	dw_mci_start_command(host, cmd, cmdflags);
705
706	if (mrq->stop)
707		host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
708}
709
710static void dw_mci_start_request(struct dw_mci *host,
711				 struct dw_mci_slot *slot)
712{
713	struct mmc_request *mrq = slot->mrq;
714	struct mmc_command *cmd;
715
716	cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
717	__dw_mci_start_request(host, slot, cmd);
718}
719
720/* must be called with host->lock held */
721static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
722				 struct mmc_request *mrq)
723{
724	dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
725		 host->state);
726
727	slot->mrq = mrq;
728
729	if (host->state == STATE_IDLE) {
730		host->state = STATE_SENDING_CMD;
731		dw_mci_start_request(host, slot);
732	} else {
733		list_add_tail(&slot->queue_node, &host->queue);
734	}
735}
736
737static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
738{
739	struct dw_mci_slot *slot = mmc_priv(mmc);
740	struct dw_mci *host = slot->host;
741
742	WARN_ON(slot->mrq);
743
744	/*
745	 * The check for card presence and queueing of the request must be
746	 * atomic, otherwise the card could be removed in between and the
747	 * request wouldn't fail until another card was inserted.
748	 */
749	spin_lock_bh(&host->lock);
750
751	if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
752		spin_unlock_bh(&host->lock);
753		mrq->cmd->error = -ENOMEDIUM;
754		mmc_request_done(mmc, mrq);
755		return;
756	}
757
758	dw_mci_queue_request(host, slot, mrq);
759
760	spin_unlock_bh(&host->lock);
761}
762
763static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
764{
765	struct dw_mci_slot *slot = mmc_priv(mmc);
766	u32 regs;
767
768	/* set default 1 bit mode */
769	slot->ctype = SDMMC_CTYPE_1BIT;
770
771	switch (ios->bus_width) {
772	case MMC_BUS_WIDTH_1:
773		slot->ctype = SDMMC_CTYPE_1BIT;
774		break;
775	case MMC_BUS_WIDTH_4:
776		slot->ctype = SDMMC_CTYPE_4BIT;
777		break;
778	case MMC_BUS_WIDTH_8:
779		slot->ctype = SDMMC_CTYPE_8BIT;
780		break;
781	}
782
783	regs = mci_readl(slot->host, UHS_REG);
784
785	/* DDR mode set */
786	if (ios->timing == MMC_TIMING_UHS_DDR50)
787		regs |= (0x1 << slot->id) << 16;
788	else
789		regs &= ~(0x1 << slot->id) << 16;
790
791	mci_writel(slot->host, UHS_REG, regs);
792
793	if (ios->clock) {
794		/*
795		 * Use mirror of ios->clock to prevent race with mmc
796		 * core ios update when finding the minimum.
797		 */
798		slot->clock = ios->clock;
799	}
800
801	switch (ios->power_mode) {
802	case MMC_POWER_UP:
803		set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
804		break;
805	default:
806		break;
807	}
808}
809
810static int dw_mci_get_ro(struct mmc_host *mmc)
811{
812	int read_only;
813	struct dw_mci_slot *slot = mmc_priv(mmc);
814	struct dw_mci_board *brd = slot->host->pdata;
815
816	/* Use platform get_ro function, else try on board write protect */
817	if (brd->get_ro)
818		read_only = brd->get_ro(slot->id);
819	else
820		read_only =
821			mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
822
823	dev_dbg(&mmc->class_dev, "card is %s\n",
824		read_only ? "read-only" : "read-write");
825
826	return read_only;
827}
828
829static int dw_mci_get_cd(struct mmc_host *mmc)
830{
831	int present;
832	struct dw_mci_slot *slot = mmc_priv(mmc);
833	struct dw_mci_board *brd = slot->host->pdata;
834
835	/* Use platform get_cd function, else try onboard card detect */
836	if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
837		present = 1;
838	else if (brd->get_cd)
839		present = !brd->get_cd(slot->id);
840	else
841		present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
842			== 0 ? 1 : 0;
843
844	if (present)
845		dev_dbg(&mmc->class_dev, "card is present\n");
846	else
847		dev_dbg(&mmc->class_dev, "card is not present\n");
848
849	return present;
850}
851
852static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
853{
854	struct dw_mci_slot *slot = mmc_priv(mmc);
855	struct dw_mci *host = slot->host;
856	u32 int_mask;
857
858	/* Enable/disable Slot Specific SDIO interrupt */
859	int_mask = mci_readl(host, INTMASK);
860	if (enb) {
861		mci_writel(host, INTMASK,
862			   (int_mask | (1 << SDMMC_INT_SDIO(slot->id))));
863	} else {
864		mci_writel(host, INTMASK,
865			   (int_mask & ~(1 << SDMMC_INT_SDIO(slot->id))));
866	}
867}
868
869static const struct mmc_host_ops dw_mci_ops = {
870	.request		= dw_mci_request,
871	.pre_req		= dw_mci_pre_req,
872	.post_req		= dw_mci_post_req,
873	.set_ios		= dw_mci_set_ios,
874	.get_ro			= dw_mci_get_ro,
875	.get_cd			= dw_mci_get_cd,
876	.enable_sdio_irq	= dw_mci_enable_sdio_irq,
877};
878
879static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
880	__releases(&host->lock)
881	__acquires(&host->lock)
882{
883	struct dw_mci_slot *slot;
884	struct mmc_host	*prev_mmc = host->cur_slot->mmc;
885
886	WARN_ON(host->cmd || host->data);
887
888	host->cur_slot->mrq = NULL;
889	host->mrq = NULL;
890	if (!list_empty(&host->queue)) {
891		slot = list_entry(host->queue.next,
892				  struct dw_mci_slot, queue_node);
893		list_del(&slot->queue_node);
894		dev_vdbg(&host->dev, "list not empty: %s is next\n",
895			 mmc_hostname(slot->mmc));
896		host->state = STATE_SENDING_CMD;
897		dw_mci_start_request(host, slot);
898	} else {
899		dev_vdbg(&host->dev, "list empty\n");
900		host->state = STATE_IDLE;
901	}
902
903	spin_unlock(&host->lock);
904	mmc_request_done(prev_mmc, mrq);
905	spin_lock(&host->lock);
906}
907
908static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
909{
910	u32 status = host->cmd_status;
911
912	host->cmd_status = 0;
913
914	/* Read the response from the card (up to 16 bytes) */
915	if (cmd->flags & MMC_RSP_PRESENT) {
916		if (cmd->flags & MMC_RSP_136) {
917			cmd->resp[3] = mci_readl(host, RESP0);
918			cmd->resp[2] = mci_readl(host, RESP1);
919			cmd->resp[1] = mci_readl(host, RESP2);
920			cmd->resp[0] = mci_readl(host, RESP3);
921		} else {
922			cmd->resp[0] = mci_readl(host, RESP0);
923			cmd->resp[1] = 0;
924			cmd->resp[2] = 0;
925			cmd->resp[3] = 0;
926		}
927	}
928
929	if (status & SDMMC_INT_RTO)
930		cmd->error = -ETIMEDOUT;
931	else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
932		cmd->error = -EILSEQ;
933	else if (status & SDMMC_INT_RESP_ERR)
934		cmd->error = -EIO;
935	else
936		cmd->error = 0;
937
938	if (cmd->error) {
939		/* newer ip versions need a delay between retries */
940		if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
941			mdelay(20);
942
943		if (cmd->data) {
944			host->data = NULL;
945			dw_mci_stop_dma(host);
946		}
947	}
948}
949
950static void dw_mci_tasklet_func(unsigned long priv)
951{
952	struct dw_mci *host = (struct dw_mci *)priv;
953	struct mmc_data	*data;
954	struct mmc_command *cmd;
955	enum dw_mci_state state;
956	enum dw_mci_state prev_state;
957	u32 status, ctrl;
958
959	spin_lock(&host->lock);
960
961	state = host->state;
962	data = host->data;
963
964	do {
965		prev_state = state;
966
967		switch (state) {
968		case STATE_IDLE:
969			break;
970
971		case STATE_SENDING_CMD:
972			if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
973						&host->pending_events))
974				break;
975
976			cmd = host->cmd;
977			host->cmd = NULL;
978			set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
979			dw_mci_command_complete(host, cmd);
980			if (cmd == host->mrq->sbc && !cmd->error) {
981				prev_state = state = STATE_SENDING_CMD;
982				__dw_mci_start_request(host, host->cur_slot,
983						       host->mrq->cmd);
984				goto unlock;
985			}
986
987			if (!host->mrq->data || cmd->error) {
988				dw_mci_request_end(host, host->mrq);
989				goto unlock;
990			}
991
992			prev_state = state = STATE_SENDING_DATA;
993			/* fall through */
994
995		case STATE_SENDING_DATA:
996			if (test_and_clear_bit(EVENT_DATA_ERROR,
997					       &host->pending_events)) {
998				dw_mci_stop_dma(host);
999				if (data->stop)
1000					send_stop_cmd(host, data);
1001				state = STATE_DATA_ERROR;
1002				break;
1003			}
1004
1005			if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1006						&host->pending_events))
1007				break;
1008
1009			set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1010			prev_state = state = STATE_DATA_BUSY;
1011			/* fall through */
1012
1013		case STATE_DATA_BUSY:
1014			if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1015						&host->pending_events))
1016				break;
1017
1018			host->data = NULL;
1019			set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1020			status = host->data_status;
1021
1022			if (status & DW_MCI_DATA_ERROR_FLAGS) {
1023				if (status & SDMMC_INT_DTO) {
1024					data->error = -ETIMEDOUT;
1025				} else if (status & SDMMC_INT_DCRC) {
1026					data->error = -EILSEQ;
1027				} else if (status & SDMMC_INT_EBE &&
1028					   host->dir_status ==
1029							DW_MCI_SEND_STATUS) {
1030					/*
1031					 * No data CRC status was returned.
1032					 * The number of bytes transferred will
1033					 * be exaggerated in PIO mode.
1034					 */
1035					data->bytes_xfered = 0;
1036					data->error = -ETIMEDOUT;
1037				} else {
1038					dev_err(&host->dev,
1039						"data FIFO error "
1040						"(status=%08x)\n",
1041						status);
1042					data->error = -EIO;
1043				}
1044				/*
1045				 * After an error, there may be data lingering
1046				 * in the FIFO, so reset it - doing so
1047				 * generates a block interrupt, hence setting
1048				 * the scatter-gather pointer to NULL.
1049				 */
1050				sg_miter_stop(&host->sg_miter);
1051				host->sg = NULL;
1052				ctrl = mci_readl(host, CTRL);
1053				ctrl |= SDMMC_CTRL_FIFO_RESET;
1054				mci_writel(host, CTRL, ctrl);
1055			} else {
1056				data->bytes_xfered = data->blocks * data->blksz;
1057				data->error = 0;
1058			}
1059
1060			if (!data->stop) {
1061				dw_mci_request_end(host, host->mrq);
1062				goto unlock;
1063			}
1064
1065			if (host->mrq->sbc && !data->error) {
1066				data->stop->error = 0;
1067				dw_mci_request_end(host, host->mrq);
1068				goto unlock;
1069			}
1070
1071			prev_state = state = STATE_SENDING_STOP;
1072			if (!data->error)
1073				send_stop_cmd(host, data);
1074			/* fall through */
1075
1076		case STATE_SENDING_STOP:
1077			if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1078						&host->pending_events))
1079				break;
1080
1081			host->cmd = NULL;
1082			dw_mci_command_complete(host, host->mrq->stop);
1083			dw_mci_request_end(host, host->mrq);
1084			goto unlock;
1085
1086		case STATE_DATA_ERROR:
1087			if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1088						&host->pending_events))
1089				break;
1090
1091			state = STATE_DATA_BUSY;
1092			break;
1093		}
1094	} while (state != prev_state);
1095
1096	host->state = state;
1097unlock:
1098	spin_unlock(&host->lock);
1099
1100}
1101
1102/* push final bytes to part_buf, only use during push */
1103static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1104{
1105	memcpy((void *)&host->part_buf, buf, cnt);
1106	host->part_buf_count = cnt;
1107}
1108
1109/* append bytes to part_buf, only use during push */
1110static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1111{
1112	cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1113	memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1114	host->part_buf_count += cnt;
1115	return cnt;
1116}
1117
1118/* pull first bytes from part_buf, only use during pull */
1119static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1120{
1121	cnt = min(cnt, (int)host->part_buf_count);
1122	if (cnt) {
1123		memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1124		       cnt);
1125		host->part_buf_count -= cnt;
1126		host->part_buf_start += cnt;
1127	}
1128	return cnt;
1129}
1130
1131/* pull final bytes from the part_buf, assuming it's just been filled */
1132static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1133{
1134	memcpy(buf, &host->part_buf, cnt);
1135	host->part_buf_start = cnt;
1136	host->part_buf_count = (1 << host->data_shift) - cnt;
1137}
1138
1139static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1140{
1141	/* try and push anything in the part_buf */
1142	if (unlikely(host->part_buf_count)) {
1143		int len = dw_mci_push_part_bytes(host, buf, cnt);
1144		buf += len;
1145		cnt -= len;
1146		if (!sg_next(host->sg) || host->part_buf_count == 2) {
1147			mci_writew(host, DATA(host->data_offset),
1148					host->part_buf16);
1149			host->part_buf_count = 0;
1150		}
1151	}
1152#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1153	if (unlikely((unsigned long)buf & 0x1)) {
1154		while (cnt >= 2) {
1155			u16 aligned_buf[64];
1156			int len = min(cnt & -2, (int)sizeof(aligned_buf));
1157			int items = len >> 1;
1158			int i;
1159			/* memcpy from input buffer into aligned buffer */
1160			memcpy(aligned_buf, buf, len);
1161			buf += len;
1162			cnt -= len;
1163			/* push data from aligned buffer into fifo */
1164			for (i = 0; i < items; ++i)
1165				mci_writew(host, DATA(host->data_offset),
1166						aligned_buf[i]);
1167		}
1168	} else
1169#endif
1170	{
1171		u16 *pdata = buf;
1172		for (; cnt >= 2; cnt -= 2)
1173			mci_writew(host, DATA(host->data_offset), *pdata++);
1174		buf = pdata;
1175	}
1176	/* put anything remaining in the part_buf */
1177	if (cnt) {
1178		dw_mci_set_part_bytes(host, buf, cnt);
1179		if (!sg_next(host->sg))
1180			mci_writew(host, DATA(host->data_offset),
1181					host->part_buf16);
1182	}
1183}
1184
1185static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1186{
1187#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1188	if (unlikely((unsigned long)buf & 0x1)) {
1189		while (cnt >= 2) {
1190			/* pull data from fifo into aligned buffer */
1191			u16 aligned_buf[64];
1192			int len = min(cnt & -2, (int)sizeof(aligned_buf));
1193			int items = len >> 1;
1194			int i;
1195			for (i = 0; i < items; ++i)
1196				aligned_buf[i] = mci_readw(host,
1197						DATA(host->data_offset));
1198			/* memcpy from aligned buffer into output buffer */
1199			memcpy(buf, aligned_buf, len);
1200			buf += len;
1201			cnt -= len;
1202		}
1203	} else
1204#endif
1205	{
1206		u16 *pdata = buf;
1207		for (; cnt >= 2; cnt -= 2)
1208			*pdata++ = mci_readw(host, DATA(host->data_offset));
1209		buf = pdata;
1210	}
1211	if (cnt) {
1212		host->part_buf16 = mci_readw(host, DATA(host->data_offset));
1213		dw_mci_pull_final_bytes(host, buf, cnt);
1214	}
1215}
1216
1217static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1218{
1219	/* try and push anything in the part_buf */
1220	if (unlikely(host->part_buf_count)) {
1221		int len = dw_mci_push_part_bytes(host, buf, cnt);
1222		buf += len;
1223		cnt -= len;
1224		if (!sg_next(host->sg) || host->part_buf_count == 4) {
1225			mci_writel(host, DATA(host->data_offset),
1226					host->part_buf32);
1227			host->part_buf_count = 0;
1228		}
1229	}
1230#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1231	if (unlikely((unsigned long)buf & 0x3)) {
1232		while (cnt >= 4) {
1233			u32 aligned_buf[32];
1234			int len = min(cnt & -4, (int)sizeof(aligned_buf));
1235			int items = len >> 2;
1236			int i;
1237			/* memcpy from input buffer into aligned buffer */
1238			memcpy(aligned_buf, buf, len);
1239			buf += len;
1240			cnt -= len;
1241			/* push data from aligned buffer into fifo */
1242			for (i = 0; i < items; ++i)
1243				mci_writel(host, DATA(host->data_offset),
1244						aligned_buf[i]);
1245		}
1246	} else
1247#endif
1248	{
1249		u32 *pdata = buf;
1250		for (; cnt >= 4; cnt -= 4)
1251			mci_writel(host, DATA(host->data_offset), *pdata++);
1252		buf = pdata;
1253	}
1254	/* put anything remaining in the part_buf */
1255	if (cnt) {
1256		dw_mci_set_part_bytes(host, buf, cnt);
1257		if (!sg_next(host->sg))
1258			mci_writel(host, DATA(host->data_offset),
1259						host->part_buf32);
1260	}
1261}
1262
1263static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1264{
1265#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1266	if (unlikely((unsigned long)buf & 0x3)) {
1267		while (cnt >= 4) {
1268			/* pull data from fifo into aligned buffer */
1269			u32 aligned_buf[32];
1270			int len = min(cnt & -4, (int)sizeof(aligned_buf));
1271			int items = len >> 2;
1272			int i;
1273			for (i = 0; i < items; ++i)
1274				aligned_buf[i] = mci_readl(host,
1275						DATA(host->data_offset));
1276			/* memcpy from aligned buffer into output buffer */
1277			memcpy(buf, aligned_buf, len);
1278			buf += len;
1279			cnt -= len;
1280		}
1281	} else
1282#endif
1283	{
1284		u32 *pdata = buf;
1285		for (; cnt >= 4; cnt -= 4)
1286			*pdata++ = mci_readl(host, DATA(host->data_offset));
1287		buf = pdata;
1288	}
1289	if (cnt) {
1290		host->part_buf32 = mci_readl(host, DATA(host->data_offset));
1291		dw_mci_pull_final_bytes(host, buf, cnt);
1292	}
1293}
1294
1295static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1296{
1297	/* try and push anything in the part_buf */
1298	if (unlikely(host->part_buf_count)) {
1299		int len = dw_mci_push_part_bytes(host, buf, cnt);
1300		buf += len;
1301		cnt -= len;
1302		if (!sg_next(host->sg) || host->part_buf_count == 8) {
1303			mci_writew(host, DATA(host->data_offset),
1304					host->part_buf);
1305			host->part_buf_count = 0;
1306		}
1307	}
1308#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1309	if (unlikely((unsigned long)buf & 0x7)) {
1310		while (cnt >= 8) {
1311			u64 aligned_buf[16];
1312			int len = min(cnt & -8, (int)sizeof(aligned_buf));
1313			int items = len >> 3;
1314			int i;
1315			/* memcpy from input buffer into aligned buffer */
1316			memcpy(aligned_buf, buf, len);
1317			buf += len;
1318			cnt -= len;
1319			/* push data from aligned buffer into fifo */
1320			for (i = 0; i < items; ++i)
1321				mci_writeq(host, DATA(host->data_offset),
1322						aligned_buf[i]);
1323		}
1324	} else
1325#endif
1326	{
1327		u64 *pdata = buf;
1328		for (; cnt >= 8; cnt -= 8)
1329			mci_writeq(host, DATA(host->data_offset), *pdata++);
1330		buf = pdata;
1331	}
1332	/* put anything remaining in the part_buf */
1333	if (cnt) {
1334		dw_mci_set_part_bytes(host, buf, cnt);
1335		if (!sg_next(host->sg))
1336			mci_writeq(host, DATA(host->data_offset),
1337					host->part_buf);
1338	}
1339}
1340
1341static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1342{
1343#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1344	if (unlikely((unsigned long)buf & 0x7)) {
1345		while (cnt >= 8) {
1346			/* pull data from fifo into aligned buffer */
1347			u64 aligned_buf[16];
1348			int len = min(cnt & -8, (int)sizeof(aligned_buf));
1349			int items = len >> 3;
1350			int i;
1351			for (i = 0; i < items; ++i)
1352				aligned_buf[i] = mci_readq(host,
1353						DATA(host->data_offset));
1354			/* memcpy from aligned buffer into output buffer */
1355			memcpy(buf, aligned_buf, len);
1356			buf += len;
1357			cnt -= len;
1358		}
1359	} else
1360#endif
1361	{
1362		u64 *pdata = buf;
1363		for (; cnt >= 8; cnt -= 8)
1364			*pdata++ = mci_readq(host, DATA(host->data_offset));
1365		buf = pdata;
1366	}
1367	if (cnt) {
1368		host->part_buf = mci_readq(host, DATA(host->data_offset));
1369		dw_mci_pull_final_bytes(host, buf, cnt);
1370	}
1371}
1372
1373static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1374{
1375	int len;
1376
1377	/* get remaining partial bytes */
1378	len = dw_mci_pull_part_bytes(host, buf, cnt);
1379	if (unlikely(len == cnt))
1380		return;
1381	buf += len;
1382	cnt -= len;
1383
1384	/* get the rest of the data */
1385	host->pull_data(host, buf, cnt);
1386}
1387
1388static void dw_mci_read_data_pio(struct dw_mci *host)
1389{
1390	struct sg_mapping_iter *sg_miter = &host->sg_miter;
1391	void *buf;
1392	unsigned int offset;
1393	struct mmc_data	*data = host->data;
1394	int shift = host->data_shift;
1395	u32 status;
1396	unsigned int nbytes = 0, len;
1397	unsigned int remain, fcnt;
1398
1399	do {
1400		if (!sg_miter_next(sg_miter))
1401			goto done;
1402
1403		host->sg = sg_miter->__sg;
1404		buf = sg_miter->addr;
1405		remain = sg_miter->length;
1406		offset = 0;
1407
1408		do {
1409			fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1410					<< shift) + host->part_buf_count;
1411			len = min(remain, fcnt);
1412			if (!len)
1413				break;
1414			dw_mci_pull_data(host, (void *)(buf + offset), len);
1415			offset += len;
1416			nbytes += len;
1417			remain -= len;
1418		} while (remain);
1419		sg_miter->consumed = offset;
1420
1421		status = mci_readl(host, MINTSTS);
1422		mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1423		if (status & DW_MCI_DATA_ERROR_FLAGS) {
1424			host->data_status = status;
1425			data->bytes_xfered += nbytes;
1426			sg_miter_stop(sg_miter);
1427			host->sg = NULL;
1428			smp_wmb();
1429
1430			set_bit(EVENT_DATA_ERROR, &host->pending_events);
1431
1432			tasklet_schedule(&host->tasklet);
1433			return;
1434		}
1435	} while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
1436	data->bytes_xfered += nbytes;
1437
1438	if (!remain) {
1439		if (!sg_miter_next(sg_miter))
1440			goto done;
1441		sg_miter->consumed = 0;
1442	}
1443	sg_miter_stop(sg_miter);
1444	return;
1445
1446done:
1447	data->bytes_xfered += nbytes;
1448	sg_miter_stop(sg_miter);
1449	host->sg = NULL;
1450	smp_wmb();
1451	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1452}
1453
1454static void dw_mci_write_data_pio(struct dw_mci *host)
1455{
1456	struct sg_mapping_iter *sg_miter = &host->sg_miter;
1457	void *buf;
1458	unsigned int offset;
1459	struct mmc_data	*data = host->data;
1460	int shift = host->data_shift;
1461	u32 status;
1462	unsigned int nbytes = 0, len;
1463	unsigned int fifo_depth = host->fifo_depth;
1464	unsigned int remain, fcnt;
1465
1466	do {
1467		if (!sg_miter_next(sg_miter))
1468			goto done;
1469
1470		host->sg = sg_miter->__sg;
1471		buf = sg_miter->addr;
1472		remain = sg_miter->length;
1473		offset = 0;
1474
1475		do {
1476			fcnt = ((fifo_depth -
1477				 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1478					<< shift) - host->part_buf_count;
1479			len = min(remain, fcnt);
1480			if (!len)
1481				break;
1482			host->push_data(host, (void *)(buf + offset), len);
1483			offset += len;
1484			nbytes += len;
1485			remain -= len;
1486		} while (remain);
1487		sg_miter->consumed = offset;
1488
1489		status = mci_readl(host, MINTSTS);
1490		mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1491		if (status & DW_MCI_DATA_ERROR_FLAGS) {
1492			host->data_status = status;
1493			data->bytes_xfered += nbytes;
1494			sg_miter_stop(sg_miter);
1495			host->sg = NULL;
1496
1497			smp_wmb();
1498
1499			set_bit(EVENT_DATA_ERROR, &host->pending_events);
1500
1501			tasklet_schedule(&host->tasklet);
1502			return;
1503		}
1504	} while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1505	data->bytes_xfered += nbytes;
1506
1507	if (!remain) {
1508		if (!sg_miter_next(sg_miter))
1509			goto done;
1510		sg_miter->consumed = 0;
1511	}
1512	sg_miter_stop(sg_miter);
1513	return;
1514
1515done:
1516	data->bytes_xfered += nbytes;
1517	sg_miter_stop(sg_miter);
1518	host->sg = NULL;
1519	smp_wmb();
1520	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1521}
1522
1523static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1524{
1525	if (!host->cmd_status)
1526		host->cmd_status = status;
1527
1528	smp_wmb();
1529
1530	set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1531	tasklet_schedule(&host->tasklet);
1532}
1533
1534static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1535{
1536	struct dw_mci *host = dev_id;
1537	u32 status, pending;
1538	unsigned int pass_count = 0;
1539	int i;
1540
1541	do {
1542		status = mci_readl(host, RINTSTS);
1543		pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1544
1545		/*
1546		 * DTO fix - version 2.10a and below, and only if internal DMA
1547		 * is configured.
1548		 */
1549		if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1550			if (!pending &&
1551			    ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1552				pending |= SDMMC_INT_DATA_OVER;
1553		}
1554
1555		if (!pending)
1556			break;
1557
1558		if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1559			mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
1560			host->cmd_status = status;
1561			smp_wmb();
1562			set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1563		}
1564
1565		if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1566			/* if there is an error report DATA_ERROR */
1567			mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
1568			host->data_status = status;
1569			smp_wmb();
1570			set_bit(EVENT_DATA_ERROR, &host->pending_events);
1571			if (!(pending & (SDMMC_INT_DTO | SDMMC_INT_DCRC |
1572					 SDMMC_INT_SBE | SDMMC_INT_EBE)))
1573				tasklet_schedule(&host->tasklet);
1574		}
1575
1576		if (pending & SDMMC_INT_DATA_OVER) {
1577			mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1578			if (!host->data_status)
1579				host->data_status = status;
1580			smp_wmb();
1581			if (host->dir_status == DW_MCI_RECV_STATUS) {
1582				if (host->sg != NULL)
1583					dw_mci_read_data_pio(host);
1584			}
1585			set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1586			tasklet_schedule(&host->tasklet);
1587		}
1588
1589		if (pending & SDMMC_INT_RXDR) {
1590			mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1591			if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
1592				dw_mci_read_data_pio(host);
1593		}
1594
1595		if (pending & SDMMC_INT_TXDR) {
1596			mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1597			if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
1598				dw_mci_write_data_pio(host);
1599		}
1600
1601		if (pending & SDMMC_INT_CMD_DONE) {
1602			mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
1603			dw_mci_cmd_interrupt(host, status);
1604		}
1605
1606		if (pending & SDMMC_INT_CD) {
1607			mci_writel(host, RINTSTS, SDMMC_INT_CD);
1608			queue_work(dw_mci_card_workqueue, &host->card_work);
1609		}
1610
1611		/* Handle SDIO Interrupts */
1612		for (i = 0; i < host->num_slots; i++) {
1613			struct dw_mci_slot *slot = host->slot[i];
1614			if (pending & SDMMC_INT_SDIO(i)) {
1615				mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1616				mmc_signal_sdio_irq(slot->mmc);
1617			}
1618		}
1619
1620	} while (pass_count++ < 5);
1621
1622#ifdef CONFIG_MMC_DW_IDMAC
1623	/* Handle DMA interrupts */
1624	pending = mci_readl(host, IDSTS);
1625	if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1626		mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1627		mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
1628		set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1629		host->dma_ops->complete(host);
1630	}
1631#endif
1632
1633	return IRQ_HANDLED;
1634}
1635
1636static void dw_mci_work_routine_card(struct work_struct *work)
1637{
1638	struct dw_mci *host = container_of(work, struct dw_mci, card_work);
1639	int i;
1640
1641	for (i = 0; i < host->num_slots; i++) {
1642		struct dw_mci_slot *slot = host->slot[i];
1643		struct mmc_host *mmc = slot->mmc;
1644		struct mmc_request *mrq;
1645		int present;
1646		u32 ctrl;
1647
1648		present = dw_mci_get_cd(mmc);
1649		while (present != slot->last_detect_state) {
1650			dev_dbg(&slot->mmc->class_dev, "card %s\n",
1651				present ? "inserted" : "removed");
1652
1653			/* Power up slot (before spin_lock, may sleep) */
1654			if (present != 0 && host->pdata->setpower)
1655				host->pdata->setpower(slot->id, mmc->ocr_avail);
1656
1657			spin_lock_bh(&host->lock);
1658
1659			/* Card change detected */
1660			slot->last_detect_state = present;
1661
1662			/* Mark card as present if applicable */
1663			if (present != 0)
1664				set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1665
1666			/* Clean up queue if present */
1667			mrq = slot->mrq;
1668			if (mrq) {
1669				if (mrq == host->mrq) {
1670					host->data = NULL;
1671					host->cmd = NULL;
1672
1673					switch (host->state) {
1674					case STATE_IDLE:
1675						break;
1676					case STATE_SENDING_CMD:
1677						mrq->cmd->error = -ENOMEDIUM;
1678						if (!mrq->data)
1679							break;
1680						/* fall through */
1681					case STATE_SENDING_DATA:
1682						mrq->data->error = -ENOMEDIUM;
1683						dw_mci_stop_dma(host);
1684						break;
1685					case STATE_DATA_BUSY:
1686					case STATE_DATA_ERROR:
1687						if (mrq->data->error == -EINPROGRESS)
1688							mrq->data->error = -ENOMEDIUM;
1689						if (!mrq->stop)
1690							break;
1691						/* fall through */
1692					case STATE_SENDING_STOP:
1693						mrq->stop->error = -ENOMEDIUM;
1694						break;
1695					}
1696
1697					dw_mci_request_end(host, mrq);
1698				} else {
1699					list_del(&slot->queue_node);
1700					mrq->cmd->error = -ENOMEDIUM;
1701					if (mrq->data)
1702						mrq->data->error = -ENOMEDIUM;
1703					if (mrq->stop)
1704						mrq->stop->error = -ENOMEDIUM;
1705
1706					spin_unlock(&host->lock);
1707					mmc_request_done(slot->mmc, mrq);
1708					spin_lock(&host->lock);
1709				}
1710			}
1711
1712			/* Power down slot */
1713			if (present == 0) {
1714				clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1715
1716				/*
1717				 * Clear down the FIFO - doing so generates a
1718				 * block interrupt, hence setting the
1719				 * scatter-gather pointer to NULL.
1720				 */
1721				sg_miter_stop(&host->sg_miter);
1722				host->sg = NULL;
1723
1724				ctrl = mci_readl(host, CTRL);
1725				ctrl |= SDMMC_CTRL_FIFO_RESET;
1726				mci_writel(host, CTRL, ctrl);
1727
1728#ifdef CONFIG_MMC_DW_IDMAC
1729				ctrl = mci_readl(host, BMOD);
1730				ctrl |= 0x01; /* Software reset of DMA */
1731				mci_writel(host, BMOD, ctrl);
1732#endif
1733
1734			}
1735
1736			spin_unlock_bh(&host->lock);
1737
1738			/* Power down slot (after spin_unlock, may sleep) */
1739			if (present == 0 && host->pdata->setpower)
1740				host->pdata->setpower(slot->id, 0);
1741
1742			present = dw_mci_get_cd(mmc);
1743		}
1744
1745		mmc_detect_change(slot->mmc,
1746			msecs_to_jiffies(host->pdata->detect_delay_ms));
1747	}
1748}
1749
1750static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
1751{
1752	struct mmc_host *mmc;
1753	struct dw_mci_slot *slot;
1754
1755	mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), &host->dev);
1756	if (!mmc)
1757		return -ENOMEM;
1758
1759	slot = mmc_priv(mmc);
1760	slot->id = id;
1761	slot->mmc = mmc;
1762	slot->host = host;
1763
1764	mmc->ops = &dw_mci_ops;
1765	mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510);
1766	mmc->f_max = host->bus_hz;
1767
1768	if (host->pdata->get_ocr)
1769		mmc->ocr_avail = host->pdata->get_ocr(id);
1770	else
1771		mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1772
1773	/*
1774	 * Start with slot power disabled, it will be enabled when a card
1775	 * is detected.
1776	 */
1777	if (host->pdata->setpower)
1778		host->pdata->setpower(id, 0);
1779
1780	if (host->pdata->caps)
1781		mmc->caps = host->pdata->caps;
1782
1783	if (host->pdata->caps2)
1784		mmc->caps2 = host->pdata->caps2;
1785
1786	if (host->pdata->get_bus_wd)
1787		if (host->pdata->get_bus_wd(slot->id) >= 4)
1788			mmc->caps |= MMC_CAP_4_BIT_DATA;
1789
1790	if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED)
1791		mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
1792
1793	if (mmc->caps2 & MMC_CAP2_POWEROFF_NOTIFY)
1794		mmc->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT;
1795	else
1796		mmc->power_notify_type = MMC_HOST_PW_NOTIFY_NONE;
1797
1798	if (host->pdata->blk_settings) {
1799		mmc->max_segs = host->pdata->blk_settings->max_segs;
1800		mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
1801		mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
1802		mmc->max_req_size = host->pdata->blk_settings->max_req_size;
1803		mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
1804	} else {
1805		/* Useful defaults if platform data is unset. */
1806#ifdef CONFIG_MMC_DW_IDMAC
1807		mmc->max_segs = host->ring_size;
1808		mmc->max_blk_size = 65536;
1809		mmc->max_blk_count = host->ring_size;
1810		mmc->max_seg_size = 0x1000;
1811		mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
1812#else
1813		mmc->max_segs = 64;
1814		mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
1815		mmc->max_blk_count = 512;
1816		mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1817		mmc->max_seg_size = mmc->max_req_size;
1818#endif /* CONFIG_MMC_DW_IDMAC */
1819	}
1820
1821	host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
1822	if (IS_ERR(host->vmmc)) {
1823		pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
1824		host->vmmc = NULL;
1825	} else
1826		regulator_enable(host->vmmc);
1827
1828	if (dw_mci_get_cd(mmc))
1829		set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1830	else
1831		clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1832
1833	host->slot[id] = slot;
1834	mmc_add_host(mmc);
1835
1836#if defined(CONFIG_DEBUG_FS)
1837	dw_mci_init_debugfs(slot);
1838#endif
1839
1840	/* Card initially undetected */
1841	slot->last_detect_state = 0;
1842
1843	/*
1844	 * Card may have been plugged in prior to boot so we
1845	 * need to run the detect tasklet
1846	 */
1847	queue_work(dw_mci_card_workqueue, &host->card_work);
1848
1849	return 0;
1850}
1851
1852static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
1853{
1854	/* Shutdown detect IRQ */
1855	if (slot->host->pdata->exit)
1856		slot->host->pdata->exit(id);
1857
1858	/* Debugfs stuff is cleaned up by mmc core */
1859	mmc_remove_host(slot->mmc);
1860	slot->host->slot[id] = NULL;
1861	mmc_free_host(slot->mmc);
1862}
1863
1864static void dw_mci_init_dma(struct dw_mci *host)
1865{
1866	/* Alloc memory for sg translation */
1867	host->sg_cpu = dma_alloc_coherent(&host->dev, PAGE_SIZE,
1868					  &host->sg_dma, GFP_KERNEL);
1869	if (!host->sg_cpu) {
1870		dev_err(&host->dev, "%s: could not alloc DMA memory\n",
1871			__func__);
1872		goto no_dma;
1873	}
1874
1875	/* Determine which DMA interface to use */
1876#ifdef CONFIG_MMC_DW_IDMAC
1877	host->dma_ops = &dw_mci_idmac_ops;
1878	dev_info(&host->dev, "Using internal DMA controller.\n");
1879#endif
1880
1881	if (!host->dma_ops)
1882		goto no_dma;
1883
1884	if (host->dma_ops->init && host->dma_ops->start &&
1885	    host->dma_ops->stop && host->dma_ops->cleanup) {
1886		if (host->dma_ops->init(host)) {
1887			dev_err(&host->dev, "%s: Unable to initialize "
1888				"DMA Controller.\n", __func__);
1889			goto no_dma;
1890		}
1891	} else {
1892		dev_err(&host->dev, "DMA initialization not found.\n");
1893		goto no_dma;
1894	}
1895
1896	host->use_dma = 1;
1897	return;
1898
1899no_dma:
1900	dev_info(&host->dev, "Using PIO mode.\n");
1901	host->use_dma = 0;
1902	return;
1903}
1904
1905static bool mci_wait_reset(struct device *dev, struct dw_mci *host)
1906{
1907	unsigned long timeout = jiffies + msecs_to_jiffies(500);
1908	unsigned int ctrl;
1909
1910	mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
1911				SDMMC_CTRL_DMA_RESET));
1912
1913	/* wait till resets clear */
1914	do {
1915		ctrl = mci_readl(host, CTRL);
1916		if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
1917			      SDMMC_CTRL_DMA_RESET)))
1918			return true;
1919	} while (time_before(jiffies, timeout));
1920
1921	dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
1922
1923	return false;
1924}
1925
1926int dw_mci_probe(struct dw_mci *host)
1927{
1928	int width, i, ret = 0;
1929	u32 fifo_size;
1930
1931	if (!host->pdata || !host->pdata->init) {
1932		dev_err(&host->dev,
1933			"Platform data must supply init function\n");
1934		return -ENODEV;
1935	}
1936
1937	if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
1938		dev_err(&host->dev,
1939			"Platform data must supply select_slot function\n");
1940		return -ENODEV;
1941	}
1942
1943	if (!host->pdata->bus_hz) {
1944		dev_err(&host->dev,
1945			"Platform data must supply bus speed\n");
1946		return -ENODEV;
1947	}
1948
1949	host->bus_hz = host->pdata->bus_hz;
1950	host->quirks = host->pdata->quirks;
1951
1952	spin_lock_init(&host->lock);
1953	INIT_LIST_HEAD(&host->queue);
1954
1955
1956	host->dma_ops = host->pdata->dma_ops;
1957	dw_mci_init_dma(host);
1958
1959	/*
1960	 * Get the host data width - this assumes that HCON has been set with
1961	 * the correct values.
1962	 */
1963	i = (mci_readl(host, HCON) >> 7) & 0x7;
1964	if (!i) {
1965		host->push_data = dw_mci_push_data16;
1966		host->pull_data = dw_mci_pull_data16;
1967		width = 16;
1968		host->data_shift = 1;
1969	} else if (i == 2) {
1970		host->push_data = dw_mci_push_data64;
1971		host->pull_data = dw_mci_pull_data64;
1972		width = 64;
1973		host->data_shift = 3;
1974	} else {
1975		/* Check for a reserved value, and warn if it is */
1976		WARN((i != 1),
1977		     "HCON reports a reserved host data width!\n"
1978		     "Defaulting to 32-bit access.\n");
1979		host->push_data = dw_mci_push_data32;
1980		host->pull_data = dw_mci_pull_data32;
1981		width = 32;
1982		host->data_shift = 2;
1983	}
1984
1985	/* Reset all blocks */
1986	if (!mci_wait_reset(&host->dev, host)) {
1987		ret = -ENODEV;
1988		goto err_dmaunmap;
1989	}
1990
1991	/* Clear the interrupts for the host controller */
1992	mci_writel(host, RINTSTS, 0xFFFFFFFF);
1993	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
1994
1995	/* Put in max timeout */
1996	mci_writel(host, TMOUT, 0xFFFFFFFF);
1997
1998	/*
1999	 * FIFO threshold settings  RxMark  = fifo_size / 2 - 1,
2000	 *                          Tx Mark = fifo_size / 2 DMA Size = 8
2001	 */
2002	if (!host->pdata->fifo_depth) {
2003		/*
2004		 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2005		 * have been overwritten by the bootloader, just like we're
2006		 * about to do, so if you know the value for your hardware, you
2007		 * should put it in the platform data.
2008		 */
2009		fifo_size = mci_readl(host, FIFOTH);
2010		fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
2011	} else {
2012		fifo_size = host->pdata->fifo_depth;
2013	}
2014	host->fifo_depth = fifo_size;
2015	host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) |
2016			((fifo_size/2) << 0));
2017	mci_writel(host, FIFOTH, host->fifoth_val);
2018
2019	/* disable clock to CIU */
2020	mci_writel(host, CLKENA, 0);
2021	mci_writel(host, CLKSRC, 0);
2022
2023	tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2024	dw_mci_card_workqueue = alloc_workqueue("dw-mci-card",
2025			WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
2026	if (!dw_mci_card_workqueue)
2027		goto err_dmaunmap;
2028	INIT_WORK(&host->card_work, dw_mci_work_routine_card);
2029	ret = request_irq(host->irq, dw_mci_interrupt, host->irq_flags, "dw-mci", host);
2030	if (ret)
2031		goto err_workqueue;
2032
2033	if (host->pdata->num_slots)
2034		host->num_slots = host->pdata->num_slots;
2035	else
2036		host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2037
2038	/* We need at least one slot to succeed */
2039	for (i = 0; i < host->num_slots; i++) {
2040		ret = dw_mci_init_slot(host, i);
2041		if (ret) {
2042			ret = -ENODEV;
2043			goto err_init_slot;
2044		}
2045	}
2046
2047	/*
2048	 * In 2.40a spec, Data offset is changed.
2049	 * Need to check the version-id and set data-offset for DATA register.
2050	 */
2051	host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2052	dev_info(&host->dev, "Version ID is %04x\n", host->verid);
2053
2054	if (host->verid < DW_MMC_240A)
2055		host->data_offset = DATA_OFFSET;
2056	else
2057		host->data_offset = DATA_240A_OFFSET;
2058
2059	/*
2060	 * Enable interrupts for command done, data over, data empty, card det,
2061	 * receive ready and error such as transmit, receive timeout, crc error
2062	 */
2063	mci_writel(host, RINTSTS, 0xFFFFFFFF);
2064	mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2065		   SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2066		   DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2067	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2068
2069	dev_info(&host->dev, "DW MMC controller at irq %d, "
2070		 "%d bit host data width, "
2071		 "%u deep fifo\n",
2072		 host->irq, width, fifo_size);
2073	if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
2074		dev_info(&host->dev, "Internal DMAC interrupt fix enabled.\n");
2075
2076	return 0;
2077
2078err_init_slot:
2079	/* De-init any initialized slots */
2080	while (i > 0) {
2081		if (host->slot[i])
2082			dw_mci_cleanup_slot(host->slot[i], i);
2083		i--;
2084	}
2085	free_irq(host->irq, host);
2086
2087err_workqueue:
2088	destroy_workqueue(dw_mci_card_workqueue);
2089
2090err_dmaunmap:
2091	if (host->use_dma && host->dma_ops->exit)
2092		host->dma_ops->exit(host);
2093	dma_free_coherent(&host->dev, PAGE_SIZE,
2094			  host->sg_cpu, host->sg_dma);
2095
2096	if (host->vmmc) {
2097		regulator_disable(host->vmmc);
2098		regulator_put(host->vmmc);
2099	}
2100	return ret;
2101}
2102EXPORT_SYMBOL(dw_mci_probe);
2103
2104void dw_mci_remove(struct dw_mci *host)
2105{
2106	int i;
2107
2108	mci_writel(host, RINTSTS, 0xFFFFFFFF);
2109	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2110
2111	for (i = 0; i < host->num_slots; i++) {
2112		dev_dbg(&host->dev, "remove slot %d\n", i);
2113		if (host->slot[i])
2114			dw_mci_cleanup_slot(host->slot[i], i);
2115	}
2116
2117	/* disable clock to CIU */
2118	mci_writel(host, CLKENA, 0);
2119	mci_writel(host, CLKSRC, 0);
2120
2121	free_irq(host->irq, host);
2122	destroy_workqueue(dw_mci_card_workqueue);
2123	dma_free_coherent(&host->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
2124
2125	if (host->use_dma && host->dma_ops->exit)
2126		host->dma_ops->exit(host);
2127
2128	if (host->vmmc) {
2129		regulator_disable(host->vmmc);
2130		regulator_put(host->vmmc);
2131	}
2132
2133}
2134EXPORT_SYMBOL(dw_mci_remove);
2135
2136
2137
2138#ifdef CONFIG_PM_SLEEP
2139/*
2140 * TODO: we should probably disable the clock to the card in the suspend path.
2141 */
2142int dw_mci_suspend(struct dw_mci *host)
2143{
2144	int i, ret = 0;
2145
2146	for (i = 0; i < host->num_slots; i++) {
2147		struct dw_mci_slot *slot = host->slot[i];
2148		if (!slot)
2149			continue;
2150		ret = mmc_suspend_host(slot->mmc);
2151		if (ret < 0) {
2152			while (--i >= 0) {
2153				slot = host->slot[i];
2154				if (slot)
2155					mmc_resume_host(host->slot[i]->mmc);
2156			}
2157			return ret;
2158		}
2159	}
2160
2161	if (host->vmmc)
2162		regulator_disable(host->vmmc);
2163
2164	return 0;
2165}
2166EXPORT_SYMBOL(dw_mci_suspend);
2167
2168int dw_mci_resume(struct dw_mci *host)
2169{
2170	int i, ret;
2171
2172	if (host->vmmc)
2173		regulator_enable(host->vmmc);
2174
2175	if (host->dma_ops->init)
2176		host->dma_ops->init(host);
2177
2178	if (!mci_wait_reset(&host->dev, host)) {
2179		ret = -ENODEV;
2180		return ret;
2181	}
2182
2183	/* Restore the old value at FIFOTH register */
2184	mci_writel(host, FIFOTH, host->fifoth_val);
2185
2186	mci_writel(host, RINTSTS, 0xFFFFFFFF);
2187	mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2188		   SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2189		   DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2190	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2191
2192	for (i = 0; i < host->num_slots; i++) {
2193		struct dw_mci_slot *slot = host->slot[i];
2194		if (!slot)
2195			continue;
2196		ret = mmc_resume_host(host->slot[i]->mmc);
2197		if (ret < 0)
2198			return ret;
2199	}
2200	return 0;
2201}
2202EXPORT_SYMBOL(dw_mci_resume);
2203#endif /* CONFIG_PM_SLEEP */
2204
2205static int __init dw_mci_init(void)
2206{
2207	printk(KERN_INFO "Synopsys Designware Multimedia Card Interface Driver");
2208	return 0;
2209}
2210
2211static void __exit dw_mci_exit(void)
2212{
2213}
2214
2215module_init(dw_mci_init);
2216module_exit(dw_mci_exit);
2217
2218MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2219MODULE_AUTHOR("NXP Semiconductor VietNam");
2220MODULE_AUTHOR("Imagination Technologies Ltd");
2221MODULE_LICENSE("GPL v2");
2222