atmel-mci.c revision c42aa775cc8a8ca558db0cc75979fb8e16667447
1/*
2 * Atmel MultiMedia Card Interface driver
3 *
4 * Copyright (C) 2004-2008 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/blkdev.h>
11#include <linux/clk.h>
12#include <linux/debugfs.h>
13#include <linux/device.h>
14#include <linux/dmaengine.h>
15#include <linux/dma-mapping.h>
16#include <linux/err.h>
17#include <linux/gpio.h>
18#include <linux/init.h>
19#include <linux/interrupt.h>
20#include <linux/ioport.h>
21#include <linux/module.h>
22#include <linux/platform_device.h>
23#include <linux/scatterlist.h>
24#include <linux/seq_file.h>
25#include <linux/stat.h>
26
27#include <linux/mmc/host.h>
28#include <linux/atmel-mci.h>
29
30#include <asm/io.h>
31#include <asm/unaligned.h>
32
33#include <mach/board.h>
34
35#include "atmel-mci-regs.h"
36
37#define ATMCI_DATA_ERROR_FLAGS	(MCI_DCRCE | MCI_DTOE | MCI_OVRE | MCI_UNRE)
38#define ATMCI_DMA_THRESHOLD	16
39
40enum {
41	EVENT_CMD_COMPLETE = 0,
42	EVENT_XFER_COMPLETE,
43	EVENT_DATA_COMPLETE,
44	EVENT_DATA_ERROR,
45};
46
47enum atmel_mci_state {
48	STATE_IDLE = 0,
49	STATE_SENDING_CMD,
50	STATE_SENDING_DATA,
51	STATE_DATA_BUSY,
52	STATE_SENDING_STOP,
53	STATE_DATA_ERROR,
54};
55
56struct atmel_mci_dma {
57#ifdef CONFIG_MMC_ATMELMCI_DMA
58	struct dma_client		client;
59	struct dma_chan			*chan;
60	struct dma_async_tx_descriptor	*data_desc;
61#endif
62};
63
64/**
65 * struct atmel_mci - MMC controller state shared between all slots
66 * @lock: Spinlock protecting the queue and associated data.
67 * @regs: Pointer to MMIO registers.
68 * @sg: Scatterlist entry currently being processed by PIO code, if any.
69 * @pio_offset: Offset into the current scatterlist entry.
70 * @cur_slot: The slot which is currently using the controller.
71 * @mrq: The request currently being processed on @cur_slot,
72 *	or NULL if the controller is idle.
73 * @cmd: The command currently being sent to the card, or NULL.
74 * @data: The data currently being transferred, or NULL if no data
75 *	transfer is in progress.
76 * @dma: DMA client state.
77 * @data_chan: DMA channel being used for the current data transfer.
78 * @cmd_status: Snapshot of SR taken upon completion of the current
79 *	command. Only valid when EVENT_CMD_COMPLETE is pending.
80 * @data_status: Snapshot of SR taken upon completion of the current
81 *	data transfer. Only valid when EVENT_DATA_COMPLETE or
82 *	EVENT_DATA_ERROR is pending.
83 * @stop_cmdr: Value to be loaded into CMDR when the stop command is
84 *	to be sent.
85 * @tasklet: Tasklet running the request state machine.
86 * @pending_events: Bitmask of events flagged by the interrupt handler
87 *	to be processed by the tasklet.
88 * @completed_events: Bitmask of events which the state machine has
89 *	processed.
90 * @state: Tasklet state.
91 * @queue: List of slots waiting for access to the controller.
92 * @need_clock_update: Update the clock rate before the next request.
93 * @need_reset: Reset controller before next request.
94 * @mode_reg: Value of the MR register.
95 * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
96 *	rate and timeout calculations.
97 * @mapbase: Physical address of the MMIO registers.
98 * @mck: The peripheral bus clock hooked up to the MMC controller.
99 * @pdev: Platform device associated with the MMC controller.
100 * @slot: Slots sharing this MMC controller.
101 *
102 * Locking
103 * =======
104 *
105 * @lock is a softirq-safe spinlock protecting @queue as well as
106 * @cur_slot, @mrq and @state. These must always be updated
107 * at the same time while holding @lock.
108 *
109 * @lock also protects mode_reg and need_clock_update since these are
110 * used to synchronize mode register updates with the queue
111 * processing.
112 *
113 * The @mrq field of struct atmel_mci_slot is also protected by @lock,
114 * and must always be written at the same time as the slot is added to
115 * @queue.
116 *
117 * @pending_events and @completed_events are accessed using atomic bit
118 * operations, so they don't need any locking.
119 *
120 * None of the fields touched by the interrupt handler need any
121 * locking. However, ordering is important: Before EVENT_DATA_ERROR or
122 * EVENT_DATA_COMPLETE is set in @pending_events, all data-related
123 * interrupts must be disabled and @data_status updated with a
124 * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the
125 * CMDRDY interupt must be disabled and @cmd_status updated with a
126 * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the
127 * bytes_xfered field of @data must be written. This is ensured by
128 * using barriers.
129 */
130struct atmel_mci {
131	spinlock_t		lock;
132	void __iomem		*regs;
133
134	struct scatterlist	*sg;
135	unsigned int		pio_offset;
136
137	struct atmel_mci_slot	*cur_slot;
138	struct mmc_request	*mrq;
139	struct mmc_command	*cmd;
140	struct mmc_data		*data;
141
142	struct atmel_mci_dma	dma;
143	struct dma_chan		*data_chan;
144
145	u32			cmd_status;
146	u32			data_status;
147	u32			stop_cmdr;
148
149	struct tasklet_struct	tasklet;
150	unsigned long		pending_events;
151	unsigned long		completed_events;
152	enum atmel_mci_state	state;
153	struct list_head	queue;
154
155	bool			need_clock_update;
156	bool			need_reset;
157	u32			mode_reg;
158	unsigned long		bus_hz;
159	unsigned long		mapbase;
160	struct clk		*mck;
161	struct platform_device	*pdev;
162
163	struct atmel_mci_slot	*slot[ATMEL_MCI_MAX_NR_SLOTS];
164};
165
166/**
167 * struct atmel_mci_slot - MMC slot state
168 * @mmc: The mmc_host representing this slot.
169 * @host: The MMC controller this slot is using.
170 * @sdc_reg: Value of SDCR to be written before using this slot.
171 * @mrq: mmc_request currently being processed or waiting to be
172 *	processed, or NULL when the slot is idle.
173 * @queue_node: List node for placing this node in the @queue list of
174 *	&struct atmel_mci.
175 * @clock: Clock rate configured by set_ios(). Protected by host->lock.
176 * @flags: Random state bits associated with the slot.
177 * @detect_pin: GPIO pin used for card detection, or negative if not
178 *	available.
179 * @wp_pin: GPIO pin used for card write protect sending, or negative
180 *	if not available.
181 * @detect_timer: Timer used for debouncing @detect_pin interrupts.
182 */
183struct atmel_mci_slot {
184	struct mmc_host		*mmc;
185	struct atmel_mci	*host;
186
187	u32			sdc_reg;
188
189	struct mmc_request	*mrq;
190	struct list_head	queue_node;
191
192	unsigned int		clock;
193	unsigned long		flags;
194#define ATMCI_CARD_PRESENT	0
195#define ATMCI_CARD_NEED_INIT	1
196#define ATMCI_SHUTDOWN		2
197
198	int			detect_pin;
199	int			wp_pin;
200
201	struct timer_list	detect_timer;
202};
203
204#define atmci_test_and_clear_pending(host, event)		\
205	test_and_clear_bit(event, &host->pending_events)
206#define atmci_set_completed(host, event)			\
207	set_bit(event, &host->completed_events)
208#define atmci_set_pending(host, event)				\
209	set_bit(event, &host->pending_events)
210
211/*
212 * The debugfs stuff below is mostly optimized away when
213 * CONFIG_DEBUG_FS is not set.
214 */
215static int atmci_req_show(struct seq_file *s, void *v)
216{
217	struct atmel_mci_slot	*slot = s->private;
218	struct mmc_request	*mrq;
219	struct mmc_command	*cmd;
220	struct mmc_command	*stop;
221	struct mmc_data		*data;
222
223	/* Make sure we get a consistent snapshot */
224	spin_lock_bh(&slot->host->lock);
225	mrq = slot->mrq;
226
227	if (mrq) {
228		cmd = mrq->cmd;
229		data = mrq->data;
230		stop = mrq->stop;
231
232		if (cmd)
233			seq_printf(s,
234				"CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
235				cmd->opcode, cmd->arg, cmd->flags,
236				cmd->resp[0], cmd->resp[1], cmd->resp[2],
237				cmd->resp[2], cmd->error);
238		if (data)
239			seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
240				data->bytes_xfered, data->blocks,
241				data->blksz, data->flags, data->error);
242		if (stop)
243			seq_printf(s,
244				"CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
245				stop->opcode, stop->arg, stop->flags,
246				stop->resp[0], stop->resp[1], stop->resp[2],
247				stop->resp[2], stop->error);
248	}
249
250	spin_unlock_bh(&slot->host->lock);
251
252	return 0;
253}
254
255static int atmci_req_open(struct inode *inode, struct file *file)
256{
257	return single_open(file, atmci_req_show, inode->i_private);
258}
259
260static const struct file_operations atmci_req_fops = {
261	.owner		= THIS_MODULE,
262	.open		= atmci_req_open,
263	.read		= seq_read,
264	.llseek		= seq_lseek,
265	.release	= single_release,
266};
267
268static void atmci_show_status_reg(struct seq_file *s,
269		const char *regname, u32 value)
270{
271	static const char	*sr_bit[] = {
272		[0]	= "CMDRDY",
273		[1]	= "RXRDY",
274		[2]	= "TXRDY",
275		[3]	= "BLKE",
276		[4]	= "DTIP",
277		[5]	= "NOTBUSY",
278		[8]	= "SDIOIRQA",
279		[9]	= "SDIOIRQB",
280		[16]	= "RINDE",
281		[17]	= "RDIRE",
282		[18]	= "RCRCE",
283		[19]	= "RENDE",
284		[20]	= "RTOE",
285		[21]	= "DCRCE",
286		[22]	= "DTOE",
287		[30]	= "OVRE",
288		[31]	= "UNRE",
289	};
290	unsigned int		i;
291
292	seq_printf(s, "%s:\t0x%08x", regname, value);
293	for (i = 0; i < ARRAY_SIZE(sr_bit); i++) {
294		if (value & (1 << i)) {
295			if (sr_bit[i])
296				seq_printf(s, " %s", sr_bit[i]);
297			else
298				seq_puts(s, " UNKNOWN");
299		}
300	}
301	seq_putc(s, '\n');
302}
303
304static int atmci_regs_show(struct seq_file *s, void *v)
305{
306	struct atmel_mci	*host = s->private;
307	u32			*buf;
308
309	buf = kmalloc(MCI_REGS_SIZE, GFP_KERNEL);
310	if (!buf)
311		return -ENOMEM;
312
313	/*
314	 * Grab a more or less consistent snapshot. Note that we're
315	 * not disabling interrupts, so IMR and SR may not be
316	 * consistent.
317	 */
318	spin_lock_bh(&host->lock);
319	clk_enable(host->mck);
320	memcpy_fromio(buf, host->regs, MCI_REGS_SIZE);
321	clk_disable(host->mck);
322	spin_unlock_bh(&host->lock);
323
324	seq_printf(s, "MR:\t0x%08x%s%s CLKDIV=%u\n",
325			buf[MCI_MR / 4],
326			buf[MCI_MR / 4] & MCI_MR_RDPROOF ? " RDPROOF" : "",
327			buf[MCI_MR / 4] & MCI_MR_WRPROOF ? " WRPROOF" : "",
328			buf[MCI_MR / 4] & 0xff);
329	seq_printf(s, "DTOR:\t0x%08x\n", buf[MCI_DTOR / 4]);
330	seq_printf(s, "SDCR:\t0x%08x\n", buf[MCI_SDCR / 4]);
331	seq_printf(s, "ARGR:\t0x%08x\n", buf[MCI_ARGR / 4]);
332	seq_printf(s, "BLKR:\t0x%08x BCNT=%u BLKLEN=%u\n",
333			buf[MCI_BLKR / 4],
334			buf[MCI_BLKR / 4] & 0xffff,
335			(buf[MCI_BLKR / 4] >> 16) & 0xffff);
336
337	/* Don't read RSPR and RDR; it will consume the data there */
338
339	atmci_show_status_reg(s, "SR", buf[MCI_SR / 4]);
340	atmci_show_status_reg(s, "IMR", buf[MCI_IMR / 4]);
341
342	kfree(buf);
343
344	return 0;
345}
346
347static int atmci_regs_open(struct inode *inode, struct file *file)
348{
349	return single_open(file, atmci_regs_show, inode->i_private);
350}
351
352static const struct file_operations atmci_regs_fops = {
353	.owner		= THIS_MODULE,
354	.open		= atmci_regs_open,
355	.read		= seq_read,
356	.llseek		= seq_lseek,
357	.release	= single_release,
358};
359
360static void atmci_init_debugfs(struct atmel_mci_slot *slot)
361{
362	struct mmc_host		*mmc = slot->mmc;
363	struct atmel_mci	*host = slot->host;
364	struct dentry		*root;
365	struct dentry		*node;
366
367	root = mmc->debugfs_root;
368	if (!root)
369		return;
370
371	node = debugfs_create_file("regs", S_IRUSR, root, host,
372			&atmci_regs_fops);
373	if (IS_ERR(node))
374		return;
375	if (!node)
376		goto err;
377
378	node = debugfs_create_file("req", S_IRUSR, root, slot, &atmci_req_fops);
379	if (!node)
380		goto err;
381
382	node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
383	if (!node)
384		goto err;
385
386	node = debugfs_create_x32("pending_events", S_IRUSR, root,
387				     (u32 *)&host->pending_events);
388	if (!node)
389		goto err;
390
391	node = debugfs_create_x32("completed_events", S_IRUSR, root,
392				     (u32 *)&host->completed_events);
393	if (!node)
394		goto err;
395
396	return;
397
398err:
399	dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
400}
401
402static inline unsigned int ns_to_clocks(struct atmel_mci *host,
403					unsigned int ns)
404{
405	return (ns * (host->bus_hz / 1000000) + 999) / 1000;
406}
407
408static void atmci_set_timeout(struct atmel_mci *host,
409		struct atmel_mci_slot *slot, struct mmc_data *data)
410{
411	static unsigned	dtomul_to_shift[] = {
412		0, 4, 7, 8, 10, 12, 16, 20
413	};
414	unsigned	timeout;
415	unsigned	dtocyc;
416	unsigned	dtomul;
417
418	timeout = ns_to_clocks(host, data->timeout_ns) + data->timeout_clks;
419
420	for (dtomul = 0; dtomul < 8; dtomul++) {
421		unsigned shift = dtomul_to_shift[dtomul];
422		dtocyc = (timeout + (1 << shift) - 1) >> shift;
423		if (dtocyc < 15)
424			break;
425	}
426
427	if (dtomul >= 8) {
428		dtomul = 7;
429		dtocyc = 15;
430	}
431
432	dev_vdbg(&slot->mmc->class_dev, "setting timeout to %u cycles\n",
433			dtocyc << dtomul_to_shift[dtomul]);
434	mci_writel(host, DTOR, (MCI_DTOMUL(dtomul) | MCI_DTOCYC(dtocyc)));
435}
436
437/*
438 * Return mask with command flags to be enabled for this command.
439 */
440static u32 atmci_prepare_command(struct mmc_host *mmc,
441				 struct mmc_command *cmd)
442{
443	struct mmc_data	*data;
444	u32		cmdr;
445
446	cmd->error = -EINPROGRESS;
447
448	cmdr = MCI_CMDR_CMDNB(cmd->opcode);
449
450	if (cmd->flags & MMC_RSP_PRESENT) {
451		if (cmd->flags & MMC_RSP_136)
452			cmdr |= MCI_CMDR_RSPTYP_136BIT;
453		else
454			cmdr |= MCI_CMDR_RSPTYP_48BIT;
455	}
456
457	/*
458	 * This should really be MAXLAT_5 for CMD2 and ACMD41, but
459	 * it's too difficult to determine whether this is an ACMD or
460	 * not. Better make it 64.
461	 */
462	cmdr |= MCI_CMDR_MAXLAT_64CYC;
463
464	if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN)
465		cmdr |= MCI_CMDR_OPDCMD;
466
467	data = cmd->data;
468	if (data) {
469		cmdr |= MCI_CMDR_START_XFER;
470		if (data->flags & MMC_DATA_STREAM)
471			cmdr |= MCI_CMDR_STREAM;
472		else if (data->blocks > 1)
473			cmdr |= MCI_CMDR_MULTI_BLOCK;
474		else
475			cmdr |= MCI_CMDR_BLOCK;
476
477		if (data->flags & MMC_DATA_READ)
478			cmdr |= MCI_CMDR_TRDIR_READ;
479	}
480
481	return cmdr;
482}
483
484static void atmci_start_command(struct atmel_mci *host,
485		struct mmc_command *cmd, u32 cmd_flags)
486{
487	WARN_ON(host->cmd);
488	host->cmd = cmd;
489
490	dev_vdbg(&host->pdev->dev,
491			"start command: ARGR=0x%08x CMDR=0x%08x\n",
492			cmd->arg, cmd_flags);
493
494	mci_writel(host, ARGR, cmd->arg);
495	mci_writel(host, CMDR, cmd_flags);
496}
497
498static void send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
499{
500	atmci_start_command(host, data->stop, host->stop_cmdr);
501	mci_writel(host, IER, MCI_CMDRDY);
502}
503
504#ifdef CONFIG_MMC_ATMELMCI_DMA
505static void atmci_dma_cleanup(struct atmel_mci *host)
506{
507	struct mmc_data			*data = host->data;
508
509	dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
510		     ((data->flags & MMC_DATA_WRITE)
511		      ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
512}
513
514static void atmci_stop_dma(struct atmel_mci *host)
515{
516	struct dma_chan *chan = host->data_chan;
517
518	if (chan) {
519		chan->device->device_terminate_all(chan);
520		atmci_dma_cleanup(host);
521	} else {
522		/* Data transfer was stopped by the interrupt handler */
523		atmci_set_pending(host, EVENT_XFER_COMPLETE);
524		mci_writel(host, IER, MCI_NOTBUSY);
525	}
526}
527
528/* This function is called by the DMA driver from tasklet context. */
529static void atmci_dma_complete(void *arg)
530{
531	struct atmel_mci	*host = arg;
532	struct mmc_data		*data = host->data;
533
534	dev_vdbg(&host->pdev->dev, "DMA complete\n");
535
536	atmci_dma_cleanup(host);
537
538	/*
539	 * If the card was removed, data will be NULL. No point trying
540	 * to send the stop command or waiting for NBUSY in this case.
541	 */
542	if (data) {
543		atmci_set_pending(host, EVENT_XFER_COMPLETE);
544		tasklet_schedule(&host->tasklet);
545
546		/*
547		 * Regardless of what the documentation says, we have
548		 * to wait for NOTBUSY even after block read
549		 * operations.
550		 *
551		 * When the DMA transfer is complete, the controller
552		 * may still be reading the CRC from the card, i.e.
553		 * the data transfer is still in progress and we
554		 * haven't seen all the potential error bits yet.
555		 *
556		 * The interrupt handler will schedule a different
557		 * tasklet to finish things up when the data transfer
558		 * is completely done.
559		 *
560		 * We may not complete the mmc request here anyway
561		 * because the mmc layer may call back and cause us to
562		 * violate the "don't submit new operations from the
563		 * completion callback" rule of the dma engine
564		 * framework.
565		 */
566		mci_writel(host, IER, MCI_NOTBUSY);
567	}
568}
569
570static int
571atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
572{
573	struct dma_chan			*chan;
574	struct dma_async_tx_descriptor	*desc;
575	struct scatterlist		*sg;
576	unsigned int			i;
577	enum dma_data_direction		direction;
578
579	/*
580	 * We don't do DMA on "complex" transfers, i.e. with
581	 * non-word-aligned buffers or lengths. Also, we don't bother
582	 * with all the DMA setup overhead for short transfers.
583	 */
584	if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD)
585		return -EINVAL;
586	if (data->blksz & 3)
587		return -EINVAL;
588
589	for_each_sg(data->sg, sg, data->sg_len, i) {
590		if (sg->offset & 3 || sg->length & 3)
591			return -EINVAL;
592	}
593
594	/* If we don't have a channel, we can't do DMA */
595	chan = host->dma.chan;
596	if (chan) {
597		dma_chan_get(chan);
598		host->data_chan = chan;
599	}
600
601	if (!chan)
602		return -ENODEV;
603
604	if (data->flags & MMC_DATA_READ)
605		direction = DMA_FROM_DEVICE;
606	else
607		direction = DMA_TO_DEVICE;
608
609	desc = chan->device->device_prep_slave_sg(chan,
610			data->sg, data->sg_len, direction,
611			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
612	if (!desc)
613		return -ENOMEM;
614
615	host->dma.data_desc = desc;
616	desc->callback = atmci_dma_complete;
617	desc->callback_param = host;
618	desc->tx_submit(desc);
619
620	/* Go! */
621	chan->device->device_issue_pending(chan);
622
623	return 0;
624}
625
626#else /* CONFIG_MMC_ATMELMCI_DMA */
627
628static int atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
629{
630	return -ENOSYS;
631}
632
633static void atmci_stop_dma(struct atmel_mci *host)
634{
635	/* Data transfer was stopped by the interrupt handler */
636	atmci_set_pending(host, EVENT_XFER_COMPLETE);
637	mci_writel(host, IER, MCI_NOTBUSY);
638}
639
640#endif /* CONFIG_MMC_ATMELMCI_DMA */
641
642/*
643 * Returns a mask of interrupt flags to be enabled after the whole
644 * request has been prepared.
645 */
646static u32 atmci_submit_data(struct atmel_mci *host, struct mmc_data *data)
647{
648	u32 iflags;
649
650	data->error = -EINPROGRESS;
651
652	WARN_ON(host->data);
653	host->sg = NULL;
654	host->data = data;
655
656	iflags = ATMCI_DATA_ERROR_FLAGS;
657	if (atmci_submit_data_dma(host, data)) {
658		host->data_chan = NULL;
659
660		/*
661		 * Errata: MMC data write operation with less than 12
662		 * bytes is impossible.
663		 *
664		 * Errata: MCI Transmit Data Register (TDR) FIFO
665		 * corruption when length is not multiple of 4.
666		 */
667		if (data->blocks * data->blksz < 12
668				|| (data->blocks * data->blksz) & 3)
669			host->need_reset = true;
670
671		host->sg = data->sg;
672		host->pio_offset = 0;
673		if (data->flags & MMC_DATA_READ)
674			iflags |= MCI_RXRDY;
675		else
676			iflags |= MCI_TXRDY;
677	}
678
679	return iflags;
680}
681
682static void atmci_start_request(struct atmel_mci *host,
683		struct atmel_mci_slot *slot)
684{
685	struct mmc_request	*mrq;
686	struct mmc_command	*cmd;
687	struct mmc_data		*data;
688	u32			iflags;
689	u32			cmdflags;
690
691	mrq = slot->mrq;
692	host->cur_slot = slot;
693	host->mrq = mrq;
694
695	host->pending_events = 0;
696	host->completed_events = 0;
697	host->data_status = 0;
698
699	if (host->need_reset) {
700		mci_writel(host, CR, MCI_CR_SWRST);
701		mci_writel(host, CR, MCI_CR_MCIEN);
702		mci_writel(host, MR, host->mode_reg);
703		host->need_reset = false;
704	}
705	mci_writel(host, SDCR, slot->sdc_reg);
706
707	iflags = mci_readl(host, IMR);
708	if (iflags)
709		dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
710				iflags);
711
712	if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) {
713		/* Send init sequence (74 clock cycles) */
714		mci_writel(host, CMDR, MCI_CMDR_SPCMD_INIT);
715		while (!(mci_readl(host, SR) & MCI_CMDRDY))
716			cpu_relax();
717	}
718	data = mrq->data;
719	if (data) {
720		atmci_set_timeout(host, slot, data);
721
722		/* Must set block count/size before sending command */
723		mci_writel(host, BLKR, MCI_BCNT(data->blocks)
724				| MCI_BLKLEN(data->blksz));
725		dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n",
726			MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz));
727	}
728
729	iflags = MCI_CMDRDY;
730	cmd = mrq->cmd;
731	cmdflags = atmci_prepare_command(slot->mmc, cmd);
732	atmci_start_command(host, cmd, cmdflags);
733
734	if (data)
735		iflags |= atmci_submit_data(host, data);
736
737	if (mrq->stop) {
738		host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
739		host->stop_cmdr |= MCI_CMDR_STOP_XFER;
740		if (!(data->flags & MMC_DATA_WRITE))
741			host->stop_cmdr |= MCI_CMDR_TRDIR_READ;
742		if (data->flags & MMC_DATA_STREAM)
743			host->stop_cmdr |= MCI_CMDR_STREAM;
744		else
745			host->stop_cmdr |= MCI_CMDR_MULTI_BLOCK;
746	}
747
748	/*
749	 * We could have enabled interrupts earlier, but I suspect
750	 * that would open up a nice can of interesting race
751	 * conditions (e.g. command and data complete, but stop not
752	 * prepared yet.)
753	 */
754	mci_writel(host, IER, iflags);
755}
756
757static void atmci_queue_request(struct atmel_mci *host,
758		struct atmel_mci_slot *slot, struct mmc_request *mrq)
759{
760	dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
761			host->state);
762
763	spin_lock_bh(&host->lock);
764	slot->mrq = mrq;
765	if (host->state == STATE_IDLE) {
766		host->state = STATE_SENDING_CMD;
767		atmci_start_request(host, slot);
768	} else {
769		list_add_tail(&slot->queue_node, &host->queue);
770	}
771	spin_unlock_bh(&host->lock);
772}
773
774static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
775{
776	struct atmel_mci_slot	*slot = mmc_priv(mmc);
777	struct atmel_mci	*host = slot->host;
778	struct mmc_data		*data;
779
780	WARN_ON(slot->mrq);
781
782	/*
783	 * We may "know" the card is gone even though there's still an
784	 * electrical connection. If so, we really need to communicate
785	 * this to the MMC core since there won't be any more
786	 * interrupts as the card is completely removed. Otherwise,
787	 * the MMC core might believe the card is still there even
788	 * though the card was just removed very slowly.
789	 */
790	if (!test_bit(ATMCI_CARD_PRESENT, &slot->flags)) {
791		mrq->cmd->error = -ENOMEDIUM;
792		mmc_request_done(mmc, mrq);
793		return;
794	}
795
796	/* We don't support multiple blocks of weird lengths. */
797	data = mrq->data;
798	if (data && data->blocks > 1 && data->blksz & 3) {
799		mrq->cmd->error = -EINVAL;
800		mmc_request_done(mmc, mrq);
801	}
802
803	atmci_queue_request(host, slot, mrq);
804}
805
806static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
807{
808	struct atmel_mci_slot	*slot = mmc_priv(mmc);
809	struct atmel_mci	*host = slot->host;
810	unsigned int		i;
811
812	slot->sdc_reg &= ~MCI_SDCBUS_MASK;
813	switch (ios->bus_width) {
814	case MMC_BUS_WIDTH_1:
815		slot->sdc_reg |= MCI_SDCBUS_1BIT;
816		break;
817	case MMC_BUS_WIDTH_4:
818		slot->sdc_reg = MCI_SDCBUS_4BIT;
819		break;
820	}
821
822	if (ios->clock) {
823		unsigned int clock_min = ~0U;
824		u32 clkdiv;
825
826		spin_lock_bh(&host->lock);
827		if (!host->mode_reg) {
828			clk_enable(host->mck);
829			mci_writel(host, CR, MCI_CR_SWRST);
830			mci_writel(host, CR, MCI_CR_MCIEN);
831		}
832
833		/*
834		 * Use mirror of ios->clock to prevent race with mmc
835		 * core ios update when finding the minimum.
836		 */
837		slot->clock = ios->clock;
838		for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
839			if (host->slot[i] && host->slot[i]->clock
840					&& host->slot[i]->clock < clock_min)
841				clock_min = host->slot[i]->clock;
842		}
843
844		/* Calculate clock divider */
845		clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1;
846		if (clkdiv > 255) {
847			dev_warn(&mmc->class_dev,
848				"clock %u too slow; using %lu\n",
849				clock_min, host->bus_hz / (2 * 256));
850			clkdiv = 255;
851		}
852
853		/*
854		 * WRPROOF and RDPROOF prevent overruns/underruns by
855		 * stopping the clock when the FIFO is full/empty.
856		 * This state is not expected to last for long.
857		 */
858		host->mode_reg = MCI_MR_CLKDIV(clkdiv) | MCI_MR_WRPROOF
859					| MCI_MR_RDPROOF;
860
861		if (list_empty(&host->queue))
862			mci_writel(host, MR, host->mode_reg);
863		else
864			host->need_clock_update = true;
865
866		spin_unlock_bh(&host->lock);
867	} else {
868		bool any_slot_active = false;
869
870		spin_lock_bh(&host->lock);
871		slot->clock = 0;
872		for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
873			if (host->slot[i] && host->slot[i]->clock) {
874				any_slot_active = true;
875				break;
876			}
877		}
878		if (!any_slot_active) {
879			mci_writel(host, CR, MCI_CR_MCIDIS);
880			if (host->mode_reg) {
881				mci_readl(host, MR);
882				clk_disable(host->mck);
883			}
884			host->mode_reg = 0;
885		}
886		spin_unlock_bh(&host->lock);
887	}
888
889	switch (ios->power_mode) {
890	case MMC_POWER_UP:
891		set_bit(ATMCI_CARD_NEED_INIT, &slot->flags);
892		break;
893	default:
894		/*
895		 * TODO: None of the currently available AVR32-based
896		 * boards allow MMC power to be turned off. Implement
897		 * power control when this can be tested properly.
898		 *
899		 * We also need to hook this into the clock management
900		 * somehow so that newly inserted cards aren't
901		 * subjected to a fast clock before we have a chance
902		 * to figure out what the maximum rate is. Currently,
903		 * there's no way to avoid this, and there never will
904		 * be for boards that don't support power control.
905		 */
906		break;
907	}
908}
909
910static int atmci_get_ro(struct mmc_host *mmc)
911{
912	int			read_only = -ENOSYS;
913	struct atmel_mci_slot	*slot = mmc_priv(mmc);
914
915	if (gpio_is_valid(slot->wp_pin)) {
916		read_only = gpio_get_value(slot->wp_pin);
917		dev_dbg(&mmc->class_dev, "card is %s\n",
918				read_only ? "read-only" : "read-write");
919	}
920
921	return read_only;
922}
923
924static int atmci_get_cd(struct mmc_host *mmc)
925{
926	int			present = -ENOSYS;
927	struct atmel_mci_slot	*slot = mmc_priv(mmc);
928
929	if (gpio_is_valid(slot->detect_pin)) {
930		present = !gpio_get_value(slot->detect_pin);
931		dev_dbg(&mmc->class_dev, "card is %spresent\n",
932				present ? "" : "not ");
933	}
934
935	return present;
936}
937
938static const struct mmc_host_ops atmci_ops = {
939	.request	= atmci_request,
940	.set_ios	= atmci_set_ios,
941	.get_ro		= atmci_get_ro,
942	.get_cd		= atmci_get_cd,
943};
944
945/* Called with host->lock held */
946static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
947	__releases(&host->lock)
948	__acquires(&host->lock)
949{
950	struct atmel_mci_slot	*slot = NULL;
951	struct mmc_host		*prev_mmc = host->cur_slot->mmc;
952
953	WARN_ON(host->cmd || host->data);
954
955	/*
956	 * Update the MMC clock rate if necessary. This may be
957	 * necessary if set_ios() is called when a different slot is
958	 * busy transfering data.
959	 */
960	if (host->need_clock_update)
961		mci_writel(host, MR, host->mode_reg);
962
963	host->cur_slot->mrq = NULL;
964	host->mrq = NULL;
965	if (!list_empty(&host->queue)) {
966		slot = list_entry(host->queue.next,
967				struct atmel_mci_slot, queue_node);
968		list_del(&slot->queue_node);
969		dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
970				mmc_hostname(slot->mmc));
971		host->state = STATE_SENDING_CMD;
972		atmci_start_request(host, slot);
973	} else {
974		dev_vdbg(&host->pdev->dev, "list empty\n");
975		host->state = STATE_IDLE;
976	}
977
978	spin_unlock(&host->lock);
979	mmc_request_done(prev_mmc, mrq);
980	spin_lock(&host->lock);
981}
982
983static void atmci_command_complete(struct atmel_mci *host,
984			struct mmc_command *cmd)
985{
986	u32		status = host->cmd_status;
987
988	/* Read the response from the card (up to 16 bytes) */
989	cmd->resp[0] = mci_readl(host, RSPR);
990	cmd->resp[1] = mci_readl(host, RSPR);
991	cmd->resp[2] = mci_readl(host, RSPR);
992	cmd->resp[3] = mci_readl(host, RSPR);
993
994	if (status & MCI_RTOE)
995		cmd->error = -ETIMEDOUT;
996	else if ((cmd->flags & MMC_RSP_CRC) && (status & MCI_RCRCE))
997		cmd->error = -EILSEQ;
998	else if (status & (MCI_RINDE | MCI_RDIRE | MCI_RENDE))
999		cmd->error = -EIO;
1000	else
1001		cmd->error = 0;
1002
1003	if (cmd->error) {
1004		dev_dbg(&host->pdev->dev,
1005			"command error: status=0x%08x\n", status);
1006
1007		if (cmd->data) {
1008			host->data = NULL;
1009			atmci_stop_dma(host);
1010			mci_writel(host, IDR, MCI_NOTBUSY
1011					| MCI_TXRDY | MCI_RXRDY
1012					| ATMCI_DATA_ERROR_FLAGS);
1013		}
1014	}
1015}
1016
1017static void atmci_detect_change(unsigned long data)
1018{
1019	struct atmel_mci_slot	*slot = (struct atmel_mci_slot *)data;
1020	bool			present;
1021	bool			present_old;
1022
1023	/*
1024	 * atmci_cleanup_slot() sets the ATMCI_SHUTDOWN flag before
1025	 * freeing the interrupt. We must not re-enable the interrupt
1026	 * if it has been freed, and if we're shutting down, it
1027	 * doesn't really matter whether the card is present or not.
1028	 */
1029	smp_rmb();
1030	if (test_bit(ATMCI_SHUTDOWN, &slot->flags))
1031		return;
1032
1033	enable_irq(gpio_to_irq(slot->detect_pin));
1034	present = !gpio_get_value(slot->detect_pin);
1035	present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags);
1036
1037	dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n",
1038			present, present_old);
1039
1040	if (present != present_old) {
1041		struct atmel_mci	*host = slot->host;
1042		struct mmc_request	*mrq;
1043
1044		dev_dbg(&slot->mmc->class_dev, "card %s\n",
1045			present ? "inserted" : "removed");
1046
1047		spin_lock(&host->lock);
1048
1049		if (!present)
1050			clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
1051		else
1052			set_bit(ATMCI_CARD_PRESENT, &slot->flags);
1053
1054		/* Clean up queue if present */
1055		mrq = slot->mrq;
1056		if (mrq) {
1057			if (mrq == host->mrq) {
1058				/*
1059				 * Reset controller to terminate any ongoing
1060				 * commands or data transfers.
1061				 */
1062				mci_writel(host, CR, MCI_CR_SWRST);
1063				mci_writel(host, CR, MCI_CR_MCIEN);
1064				mci_writel(host, MR, host->mode_reg);
1065
1066				host->data = NULL;
1067				host->cmd = NULL;
1068
1069				switch (host->state) {
1070				case STATE_IDLE:
1071					break;
1072				case STATE_SENDING_CMD:
1073					mrq->cmd->error = -ENOMEDIUM;
1074					if (!mrq->data)
1075						break;
1076					/* fall through */
1077				case STATE_SENDING_DATA:
1078					mrq->data->error = -ENOMEDIUM;
1079					atmci_stop_dma(host);
1080					break;
1081				case STATE_DATA_BUSY:
1082				case STATE_DATA_ERROR:
1083					if (mrq->data->error == -EINPROGRESS)
1084						mrq->data->error = -ENOMEDIUM;
1085					if (!mrq->stop)
1086						break;
1087					/* fall through */
1088				case STATE_SENDING_STOP:
1089					mrq->stop->error = -ENOMEDIUM;
1090					break;
1091				}
1092
1093				atmci_request_end(host, mrq);
1094			} else {
1095				list_del(&slot->queue_node);
1096				mrq->cmd->error = -ENOMEDIUM;
1097				if (mrq->data)
1098					mrq->data->error = -ENOMEDIUM;
1099				if (mrq->stop)
1100					mrq->stop->error = -ENOMEDIUM;
1101
1102				spin_unlock(&host->lock);
1103				mmc_request_done(slot->mmc, mrq);
1104				spin_lock(&host->lock);
1105			}
1106		}
1107		spin_unlock(&host->lock);
1108
1109		mmc_detect_change(slot->mmc, 0);
1110	}
1111}
1112
1113static void atmci_tasklet_func(unsigned long priv)
1114{
1115	struct atmel_mci	*host = (struct atmel_mci *)priv;
1116	struct mmc_request	*mrq = host->mrq;
1117	struct mmc_data		*data = host->data;
1118	struct mmc_command	*cmd = host->cmd;
1119	enum atmel_mci_state	state = host->state;
1120	enum atmel_mci_state	prev_state;
1121	u32			status;
1122
1123	spin_lock(&host->lock);
1124
1125	state = host->state;
1126
1127	dev_vdbg(&host->pdev->dev,
1128		"tasklet: state %u pending/completed/mask %lx/%lx/%x\n",
1129		state, host->pending_events, host->completed_events,
1130		mci_readl(host, IMR));
1131
1132	do {
1133		prev_state = state;
1134
1135		switch (state) {
1136		case STATE_IDLE:
1137			break;
1138
1139		case STATE_SENDING_CMD:
1140			if (!atmci_test_and_clear_pending(host,
1141						EVENT_CMD_COMPLETE))
1142				break;
1143
1144			host->cmd = NULL;
1145			atmci_set_completed(host, EVENT_CMD_COMPLETE);
1146			atmci_command_complete(host, mrq->cmd);
1147			if (!mrq->data || cmd->error) {
1148				atmci_request_end(host, host->mrq);
1149				goto unlock;
1150			}
1151
1152			prev_state = state = STATE_SENDING_DATA;
1153			/* fall through */
1154
1155		case STATE_SENDING_DATA:
1156			if (atmci_test_and_clear_pending(host,
1157						EVENT_DATA_ERROR)) {
1158				atmci_stop_dma(host);
1159				if (data->stop)
1160					send_stop_cmd(host, data);
1161				state = STATE_DATA_ERROR;
1162				break;
1163			}
1164
1165			if (!atmci_test_and_clear_pending(host,
1166						EVENT_XFER_COMPLETE))
1167				break;
1168
1169			atmci_set_completed(host, EVENT_XFER_COMPLETE);
1170			prev_state = state = STATE_DATA_BUSY;
1171			/* fall through */
1172
1173		case STATE_DATA_BUSY:
1174			if (!atmci_test_and_clear_pending(host,
1175						EVENT_DATA_COMPLETE))
1176				break;
1177
1178			host->data = NULL;
1179			atmci_set_completed(host, EVENT_DATA_COMPLETE);
1180			status = host->data_status;
1181			if (unlikely(status & ATMCI_DATA_ERROR_FLAGS)) {
1182				if (status & MCI_DTOE) {
1183					dev_dbg(&host->pdev->dev,
1184							"data timeout error\n");
1185					data->error = -ETIMEDOUT;
1186				} else if (status & MCI_DCRCE) {
1187					dev_dbg(&host->pdev->dev,
1188							"data CRC error\n");
1189					data->error = -EILSEQ;
1190				} else {
1191					dev_dbg(&host->pdev->dev,
1192						"data FIFO error (status=%08x)\n",
1193						status);
1194					data->error = -EIO;
1195				}
1196			} else {
1197				data->bytes_xfered = data->blocks * data->blksz;
1198				data->error = 0;
1199			}
1200
1201			if (!data->stop) {
1202				atmci_request_end(host, host->mrq);
1203				goto unlock;
1204			}
1205
1206			prev_state = state = STATE_SENDING_STOP;
1207			if (!data->error)
1208				send_stop_cmd(host, data);
1209			/* fall through */
1210
1211		case STATE_SENDING_STOP:
1212			if (!atmci_test_and_clear_pending(host,
1213						EVENT_CMD_COMPLETE))
1214				break;
1215
1216			host->cmd = NULL;
1217			atmci_command_complete(host, mrq->stop);
1218			atmci_request_end(host, host->mrq);
1219			goto unlock;
1220
1221		case STATE_DATA_ERROR:
1222			if (!atmci_test_and_clear_pending(host,
1223						EVENT_XFER_COMPLETE))
1224				break;
1225
1226			state = STATE_DATA_BUSY;
1227			break;
1228		}
1229	} while (state != prev_state);
1230
1231	host->state = state;
1232
1233unlock:
1234	spin_unlock(&host->lock);
1235}
1236
1237static void atmci_read_data_pio(struct atmel_mci *host)
1238{
1239	struct scatterlist	*sg = host->sg;
1240	void			*buf = sg_virt(sg);
1241	unsigned int		offset = host->pio_offset;
1242	struct mmc_data		*data = host->data;
1243	u32			value;
1244	u32			status;
1245	unsigned int		nbytes = 0;
1246
1247	do {
1248		value = mci_readl(host, RDR);
1249		if (likely(offset + 4 <= sg->length)) {
1250			put_unaligned(value, (u32 *)(buf + offset));
1251
1252			offset += 4;
1253			nbytes += 4;
1254
1255			if (offset == sg->length) {
1256				flush_dcache_page(sg_page(sg));
1257				host->sg = sg = sg_next(sg);
1258				if (!sg)
1259					goto done;
1260
1261				offset = 0;
1262				buf = sg_virt(sg);
1263			}
1264		} else {
1265			unsigned int remaining = sg->length - offset;
1266			memcpy(buf + offset, &value, remaining);
1267			nbytes += remaining;
1268
1269			flush_dcache_page(sg_page(sg));
1270			host->sg = sg = sg_next(sg);
1271			if (!sg)
1272				goto done;
1273
1274			offset = 4 - remaining;
1275			buf = sg_virt(sg);
1276			memcpy(buf, (u8 *)&value + remaining, offset);
1277			nbytes += offset;
1278		}
1279
1280		status = mci_readl(host, SR);
1281		if (status & ATMCI_DATA_ERROR_FLAGS) {
1282			mci_writel(host, IDR, (MCI_NOTBUSY | MCI_RXRDY
1283						| ATMCI_DATA_ERROR_FLAGS));
1284			host->data_status = status;
1285			data->bytes_xfered += nbytes;
1286			smp_wmb();
1287			atmci_set_pending(host, EVENT_DATA_ERROR);
1288			tasklet_schedule(&host->tasklet);
1289			return;
1290		}
1291	} while (status & MCI_RXRDY);
1292
1293	host->pio_offset = offset;
1294	data->bytes_xfered += nbytes;
1295
1296	return;
1297
1298done:
1299	mci_writel(host, IDR, MCI_RXRDY);
1300	mci_writel(host, IER, MCI_NOTBUSY);
1301	data->bytes_xfered += nbytes;
1302	smp_wmb();
1303	atmci_set_pending(host, EVENT_XFER_COMPLETE);
1304}
1305
1306static void atmci_write_data_pio(struct atmel_mci *host)
1307{
1308	struct scatterlist	*sg = host->sg;
1309	void			*buf = sg_virt(sg);
1310	unsigned int		offset = host->pio_offset;
1311	struct mmc_data		*data = host->data;
1312	u32			value;
1313	u32			status;
1314	unsigned int		nbytes = 0;
1315
1316	do {
1317		if (likely(offset + 4 <= sg->length)) {
1318			value = get_unaligned((u32 *)(buf + offset));
1319			mci_writel(host, TDR, value);
1320
1321			offset += 4;
1322			nbytes += 4;
1323			if (offset == sg->length) {
1324				host->sg = sg = sg_next(sg);
1325				if (!sg)
1326					goto done;
1327
1328				offset = 0;
1329				buf = sg_virt(sg);
1330			}
1331		} else {
1332			unsigned int remaining = sg->length - offset;
1333
1334			value = 0;
1335			memcpy(&value, buf + offset, remaining);
1336			nbytes += remaining;
1337
1338			host->sg = sg = sg_next(sg);
1339			if (!sg) {
1340				mci_writel(host, TDR, value);
1341				goto done;
1342			}
1343
1344			offset = 4 - remaining;
1345			buf = sg_virt(sg);
1346			memcpy((u8 *)&value + remaining, buf, offset);
1347			mci_writel(host, TDR, value);
1348			nbytes += offset;
1349		}
1350
1351		status = mci_readl(host, SR);
1352		if (status & ATMCI_DATA_ERROR_FLAGS) {
1353			mci_writel(host, IDR, (MCI_NOTBUSY | MCI_TXRDY
1354						| ATMCI_DATA_ERROR_FLAGS));
1355			host->data_status = status;
1356			data->bytes_xfered += nbytes;
1357			smp_wmb();
1358			atmci_set_pending(host, EVENT_DATA_ERROR);
1359			tasklet_schedule(&host->tasklet);
1360			return;
1361		}
1362	} while (status & MCI_TXRDY);
1363
1364	host->pio_offset = offset;
1365	data->bytes_xfered += nbytes;
1366
1367	return;
1368
1369done:
1370	mci_writel(host, IDR, MCI_TXRDY);
1371	mci_writel(host, IER, MCI_NOTBUSY);
1372	data->bytes_xfered += nbytes;
1373	smp_wmb();
1374	atmci_set_pending(host, EVENT_XFER_COMPLETE);
1375}
1376
1377static void atmci_cmd_interrupt(struct atmel_mci *host, u32 status)
1378{
1379	mci_writel(host, IDR, MCI_CMDRDY);
1380
1381	host->cmd_status = status;
1382	smp_wmb();
1383	atmci_set_pending(host, EVENT_CMD_COMPLETE);
1384	tasklet_schedule(&host->tasklet);
1385}
1386
1387static irqreturn_t atmci_interrupt(int irq, void *dev_id)
1388{
1389	struct atmel_mci	*host = dev_id;
1390	u32			status, mask, pending;
1391	unsigned int		pass_count = 0;
1392
1393	do {
1394		status = mci_readl(host, SR);
1395		mask = mci_readl(host, IMR);
1396		pending = status & mask;
1397		if (!pending)
1398			break;
1399
1400		if (pending & ATMCI_DATA_ERROR_FLAGS) {
1401			mci_writel(host, IDR, ATMCI_DATA_ERROR_FLAGS
1402					| MCI_RXRDY | MCI_TXRDY);
1403			pending &= mci_readl(host, IMR);
1404
1405			host->data_status = status;
1406			smp_wmb();
1407			atmci_set_pending(host, EVENT_DATA_ERROR);
1408			tasklet_schedule(&host->tasklet);
1409		}
1410		if (pending & MCI_NOTBUSY) {
1411			mci_writel(host, IDR,
1412					ATMCI_DATA_ERROR_FLAGS | MCI_NOTBUSY);
1413			if (!host->data_status)
1414				host->data_status = status;
1415			smp_wmb();
1416			atmci_set_pending(host, EVENT_DATA_COMPLETE);
1417			tasklet_schedule(&host->tasklet);
1418		}
1419		if (pending & MCI_RXRDY)
1420			atmci_read_data_pio(host);
1421		if (pending & MCI_TXRDY)
1422			atmci_write_data_pio(host);
1423
1424		if (pending & MCI_CMDRDY)
1425			atmci_cmd_interrupt(host, status);
1426	} while (pass_count++ < 5);
1427
1428	return pass_count ? IRQ_HANDLED : IRQ_NONE;
1429}
1430
1431static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
1432{
1433	struct atmel_mci_slot	*slot = dev_id;
1434
1435	/*
1436	 * Disable interrupts until the pin has stabilized and check
1437	 * the state then. Use mod_timer() since we may be in the
1438	 * middle of the timer routine when this interrupt triggers.
1439	 */
1440	disable_irq_nosync(irq);
1441	mod_timer(&slot->detect_timer, jiffies + msecs_to_jiffies(20));
1442
1443	return IRQ_HANDLED;
1444}
1445
1446#ifdef CONFIG_MMC_ATMELMCI_DMA
1447
1448static inline struct atmel_mci *
1449dma_client_to_atmel_mci(struct dma_client *client)
1450{
1451	return container_of(client, struct atmel_mci, dma.client);
1452}
1453
1454static enum dma_state_client atmci_dma_event(struct dma_client *client,
1455		struct dma_chan *chan, enum dma_state state)
1456{
1457	struct atmel_mci	*host;
1458	enum dma_state_client	ret = DMA_NAK;
1459
1460	host = dma_client_to_atmel_mci(client);
1461
1462	switch (state) {
1463	case DMA_RESOURCE_AVAILABLE:
1464		spin_lock_bh(&host->lock);
1465		if (!host->dma.chan) {
1466			host->dma.chan = chan;
1467			ret = DMA_ACK;
1468		}
1469		spin_unlock_bh(&host->lock);
1470
1471		if (ret == DMA_ACK)
1472			dev_info(&host->pdev->dev,
1473					"Using %s for DMA transfers\n",
1474					chan->dev.bus_id);
1475		break;
1476
1477	case DMA_RESOURCE_REMOVED:
1478		spin_lock_bh(&host->lock);
1479		if (host->dma.chan == chan) {
1480			host->dma.chan = NULL;
1481			ret = DMA_ACK;
1482		}
1483		spin_unlock_bh(&host->lock);
1484
1485		if (ret == DMA_ACK)
1486			dev_info(&host->pdev->dev,
1487					"Lost %s, falling back to PIO\n",
1488					chan->dev.bus_id);
1489		break;
1490
1491	default:
1492		break;
1493	}
1494
1495
1496	return ret;
1497}
1498#endif /* CONFIG_MMC_ATMELMCI_DMA */
1499
1500static int __init atmci_init_slot(struct atmel_mci *host,
1501		struct mci_slot_pdata *slot_data, unsigned int id,
1502		u32 sdc_reg)
1503{
1504	struct mmc_host			*mmc;
1505	struct atmel_mci_slot		*slot;
1506
1507	mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev);
1508	if (!mmc)
1509		return -ENOMEM;
1510
1511	slot = mmc_priv(mmc);
1512	slot->mmc = mmc;
1513	slot->host = host;
1514	slot->detect_pin = slot_data->detect_pin;
1515	slot->wp_pin = slot_data->wp_pin;
1516	slot->sdc_reg = sdc_reg;
1517
1518	mmc->ops = &atmci_ops;
1519	mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);
1520	mmc->f_max = host->bus_hz / 2;
1521	mmc->ocr_avail	= MMC_VDD_32_33 | MMC_VDD_33_34;
1522	if (slot_data->bus_width >= 4)
1523		mmc->caps |= MMC_CAP_4_BIT_DATA;
1524
1525	mmc->max_hw_segs = 64;
1526	mmc->max_phys_segs = 64;
1527	mmc->max_req_size = 32768 * 512;
1528	mmc->max_blk_size = 32768;
1529	mmc->max_blk_count = 512;
1530
1531	/* Assume card is present initially */
1532	set_bit(ATMCI_CARD_PRESENT, &slot->flags);
1533	if (gpio_is_valid(slot->detect_pin)) {
1534		if (gpio_request(slot->detect_pin, "mmc_detect")) {
1535			dev_dbg(&mmc->class_dev, "no detect pin available\n");
1536			slot->detect_pin = -EBUSY;
1537		} else if (gpio_get_value(slot->detect_pin)) {
1538			clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
1539		}
1540	}
1541
1542	if (!gpio_is_valid(slot->detect_pin))
1543		mmc->caps |= MMC_CAP_NEEDS_POLL;
1544
1545	if (gpio_is_valid(slot->wp_pin)) {
1546		if (gpio_request(slot->wp_pin, "mmc_wp")) {
1547			dev_dbg(&mmc->class_dev, "no WP pin available\n");
1548			slot->wp_pin = -EBUSY;
1549		}
1550	}
1551
1552	host->slot[id] = slot;
1553	mmc_add_host(mmc);
1554
1555	if (gpio_is_valid(slot->detect_pin)) {
1556		int ret;
1557
1558		setup_timer(&slot->detect_timer, atmci_detect_change,
1559				(unsigned long)slot);
1560
1561		ret = request_irq(gpio_to_irq(slot->detect_pin),
1562				atmci_detect_interrupt,
1563				IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
1564				"mmc-detect", slot);
1565		if (ret) {
1566			dev_dbg(&mmc->class_dev,
1567				"could not request IRQ %d for detect pin\n",
1568				gpio_to_irq(slot->detect_pin));
1569			gpio_free(slot->detect_pin);
1570			slot->detect_pin = -EBUSY;
1571		}
1572	}
1573
1574	atmci_init_debugfs(slot);
1575
1576	return 0;
1577}
1578
1579static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot,
1580		unsigned int id)
1581{
1582	/* Debugfs stuff is cleaned up by mmc core */
1583
1584	set_bit(ATMCI_SHUTDOWN, &slot->flags);
1585	smp_wmb();
1586
1587	mmc_remove_host(slot->mmc);
1588
1589	if (gpio_is_valid(slot->detect_pin)) {
1590		int pin = slot->detect_pin;
1591
1592		free_irq(gpio_to_irq(pin), slot);
1593		del_timer_sync(&slot->detect_timer);
1594		gpio_free(pin);
1595	}
1596	if (gpio_is_valid(slot->wp_pin))
1597		gpio_free(slot->wp_pin);
1598
1599	slot->host->slot[id] = NULL;
1600	mmc_free_host(slot->mmc);
1601}
1602
1603static int __init atmci_probe(struct platform_device *pdev)
1604{
1605	struct mci_platform_data	*pdata;
1606	struct atmel_mci		*host;
1607	struct resource			*regs;
1608	unsigned int			nr_slots;
1609	int				irq;
1610	int				ret;
1611
1612	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1613	if (!regs)
1614		return -ENXIO;
1615	pdata = pdev->dev.platform_data;
1616	if (!pdata)
1617		return -ENXIO;
1618	irq = platform_get_irq(pdev, 0);
1619	if (irq < 0)
1620		return irq;
1621
1622	host = kzalloc(sizeof(struct atmel_mci), GFP_KERNEL);
1623	if (!host)
1624		return -ENOMEM;
1625
1626	host->pdev = pdev;
1627	spin_lock_init(&host->lock);
1628	INIT_LIST_HEAD(&host->queue);
1629
1630	host->mck = clk_get(&pdev->dev, "mci_clk");
1631	if (IS_ERR(host->mck)) {
1632		ret = PTR_ERR(host->mck);
1633		goto err_clk_get;
1634	}
1635
1636	ret = -ENOMEM;
1637	host->regs = ioremap(regs->start, regs->end - regs->start + 1);
1638	if (!host->regs)
1639		goto err_ioremap;
1640
1641	clk_enable(host->mck);
1642	mci_writel(host, CR, MCI_CR_SWRST);
1643	host->bus_hz = clk_get_rate(host->mck);
1644	clk_disable(host->mck);
1645
1646	host->mapbase = regs->start;
1647
1648	tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)host);
1649
1650	ret = request_irq(irq, atmci_interrupt, 0, pdev->dev.bus_id, host);
1651	if (ret)
1652		goto err_request_irq;
1653
1654#ifdef CONFIG_MMC_ATMELMCI_DMA
1655	if (pdata->dma_slave) {
1656		struct dma_slave *slave = pdata->dma_slave;
1657
1658		slave->tx_reg = regs->start + MCI_TDR;
1659		slave->rx_reg = regs->start + MCI_RDR;
1660
1661		/* Try to grab a DMA channel */
1662		host->dma.client.event_callback = atmci_dma_event;
1663		dma_cap_set(DMA_SLAVE, host->dma.client.cap_mask);
1664		host->dma.client.slave = slave;
1665
1666		dma_async_client_register(&host->dma.client);
1667		dma_async_client_chan_request(&host->dma.client);
1668	} else {
1669		dev_notice(&pdev->dev, "DMA not available, using PIO\n");
1670	}
1671#endif /* CONFIG_MMC_ATMELMCI_DMA */
1672
1673	platform_set_drvdata(pdev, host);
1674
1675	/* We need at least one slot to succeed */
1676	nr_slots = 0;
1677	ret = -ENODEV;
1678	if (pdata->slot[0].bus_width) {
1679		ret = atmci_init_slot(host, &pdata->slot[0],
1680				MCI_SDCSEL_SLOT_A, 0);
1681		if (!ret)
1682			nr_slots++;
1683	}
1684	if (pdata->slot[1].bus_width) {
1685		ret = atmci_init_slot(host, &pdata->slot[1],
1686				MCI_SDCSEL_SLOT_B, 1);
1687		if (!ret)
1688			nr_slots++;
1689	}
1690
1691	if (!nr_slots)
1692		goto err_init_slot;
1693
1694	dev_info(&pdev->dev,
1695			"Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
1696			host->mapbase, irq, nr_slots);
1697
1698	return 0;
1699
1700err_init_slot:
1701#ifdef CONFIG_MMC_ATMELMCI_DMA
1702	if (pdata->dma_slave)
1703		dma_async_client_unregister(&host->dma.client);
1704#endif
1705	free_irq(irq, host);
1706err_request_irq:
1707	iounmap(host->regs);
1708err_ioremap:
1709	clk_put(host->mck);
1710err_clk_get:
1711	kfree(host);
1712	return ret;
1713}
1714
1715static int __exit atmci_remove(struct platform_device *pdev)
1716{
1717	struct atmel_mci	*host = platform_get_drvdata(pdev);
1718	unsigned int		i;
1719
1720	platform_set_drvdata(pdev, NULL);
1721
1722	for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
1723		if (host->slot[i])
1724			atmci_cleanup_slot(host->slot[i], i);
1725	}
1726
1727	clk_enable(host->mck);
1728	mci_writel(host, IDR, ~0UL);
1729	mci_writel(host, CR, MCI_CR_MCIDIS);
1730	mci_readl(host, SR);
1731	clk_disable(host->mck);
1732
1733#ifdef CONFIG_MMC_ATMELMCI_DMA
1734	if (host->dma.client.slave)
1735		dma_async_client_unregister(&host->dma.client);
1736#endif
1737
1738	free_irq(platform_get_irq(pdev, 0), host);
1739	iounmap(host->regs);
1740
1741	clk_put(host->mck);
1742	kfree(host);
1743
1744	return 0;
1745}
1746
1747static struct platform_driver atmci_driver = {
1748	.remove		= __exit_p(atmci_remove),
1749	.driver		= {
1750		.name		= "atmel_mci",
1751	},
1752};
1753
1754static int __init atmci_init(void)
1755{
1756	return platform_driver_probe(&atmci_driver, atmci_probe);
1757}
1758
1759static void __exit atmci_exit(void)
1760{
1761	platform_driver_unregister(&atmci_driver);
1762}
1763
1764module_init(atmci_init);
1765module_exit(atmci_exit);
1766
1767MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver");
1768MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>");
1769MODULE_LICENSE("GPL v2");
1770