1/*
2 * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
3 * AVR32 systems.)
4 *
5 * Copyright (C) 2007-2008 Atmel Corporation
6 * Copyright (C) 2010-2011 ST Microelectronics
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/clk.h>
13#include <linux/delay.h>
14#include <linux/dmaengine.h>
15#include <linux/dma-mapping.h>
16#include <linux/init.h>
17#include <linux/interrupt.h>
18#include <linux/io.h>
19#include <linux/mm.h>
20#include <linux/module.h>
21#include <linux/platform_device.h>
22#include <linux/slab.h>
23
24#include "dw_dmac_regs.h"
25
26/*
27 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
28 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
29 * of which use ARM any more).  See the "Databook" from Synopsys for
30 * information beyond what licensees probably provide.
31 *
32 * The driver has currently been tested only with the Atmel AT32AP7000,
33 * which does not support descriptor writeback.
34 */
35
36#define DWC_DEFAULT_CTLLO(private) ({				\
37		struct dw_dma_slave *__slave = (private);	\
38		int dms = __slave ? __slave->dst_master : 0;	\
39		int sms = __slave ? __slave->src_master : 1;	\
40		u8 smsize = __slave ? __slave->src_msize : DW_DMA_MSIZE_16; \
41		u8 dmsize = __slave ? __slave->dst_msize : DW_DMA_MSIZE_16; \
42								\
43		(DWC_CTLL_DST_MSIZE(dmsize)			\
44		 | DWC_CTLL_SRC_MSIZE(smsize)			\
45		 | DWC_CTLL_LLP_D_EN				\
46		 | DWC_CTLL_LLP_S_EN				\
47		 | DWC_CTLL_DMS(dms)				\
48		 | DWC_CTLL_SMS(sms));				\
49	})
50
51/*
52 * This is configuration-dependent and usually a funny size like 4095.
53 *
54 * Note that this is a transfer count, i.e. if we transfer 32-bit
55 * words, we can do 16380 bytes per descriptor.
56 *
57 * This parameter is also system-specific.
58 */
59#define DWC_MAX_COUNT	4095U
60
61/*
62 * Number of descriptors to allocate for each channel. This should be
63 * made configurable somehow; preferably, the clients (at least the
64 * ones using slave transfers) should be able to give us a hint.
65 */
66#define NR_DESCS_PER_CHANNEL	64
67
68/*----------------------------------------------------------------------*/
69
70/*
71 * Because we're not relying on writeback from the controller (it may not
72 * even be configured into the core!) we don't need to use dma_pool.  These
73 * descriptors -- and associated data -- are cacheable.  We do need to make
74 * sure their dcache entries are written back before handing them off to
75 * the controller, though.
76 */
77
78static struct device *chan2dev(struct dma_chan *chan)
79{
80	return &chan->dev->device;
81}
82static struct device *chan2parent(struct dma_chan *chan)
83{
84	return chan->dev->device.parent;
85}
86
87static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
88{
89	return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
90}
91
92static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
93{
94	struct dw_desc *desc, *_desc;
95	struct dw_desc *ret = NULL;
96	unsigned int i = 0;
97	unsigned long flags;
98
99	spin_lock_irqsave(&dwc->lock, flags);
100	list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
101		if (async_tx_test_ack(&desc->txd)) {
102			list_del(&desc->desc_node);
103			ret = desc;
104			break;
105		}
106		dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
107		i++;
108	}
109	spin_unlock_irqrestore(&dwc->lock, flags);
110
111	dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
112
113	return ret;
114}
115
116static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
117{
118	struct dw_desc	*child;
119
120	list_for_each_entry(child, &desc->tx_list, desc_node)
121		dma_sync_single_for_cpu(chan2parent(&dwc->chan),
122				child->txd.phys, sizeof(child->lli),
123				DMA_TO_DEVICE);
124	dma_sync_single_for_cpu(chan2parent(&dwc->chan),
125			desc->txd.phys, sizeof(desc->lli),
126			DMA_TO_DEVICE);
127}
128
129/*
130 * Move a descriptor, including any children, to the free list.
131 * `desc' must not be on any lists.
132 */
133static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
134{
135	unsigned long flags;
136
137	if (desc) {
138		struct dw_desc *child;
139
140		dwc_sync_desc_for_cpu(dwc, desc);
141
142		spin_lock_irqsave(&dwc->lock, flags);
143		list_for_each_entry(child, &desc->tx_list, desc_node)
144			dev_vdbg(chan2dev(&dwc->chan),
145					"moving child desc %p to freelist\n",
146					child);
147		list_splice_init(&desc->tx_list, &dwc->free_list);
148		dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
149		list_add(&desc->desc_node, &dwc->free_list);
150		spin_unlock_irqrestore(&dwc->lock, flags);
151	}
152}
153
154/* Called with dwc->lock held and bh disabled */
155static dma_cookie_t
156dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc)
157{
158	dma_cookie_t cookie = dwc->chan.cookie;
159
160	if (++cookie < 0)
161		cookie = 1;
162
163	dwc->chan.cookie = cookie;
164	desc->txd.cookie = cookie;
165
166	return cookie;
167}
168
169static void dwc_initialize(struct dw_dma_chan *dwc)
170{
171	struct dw_dma *dw = to_dw_dma(dwc->chan.device);
172	struct dw_dma_slave *dws = dwc->chan.private;
173	u32 cfghi = DWC_CFGH_FIFO_MODE;
174	u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
175
176	if (dwc->initialized == true)
177		return;
178
179	if (dws) {
180		/*
181		 * We need controller-specific data to set up slave
182		 * transfers.
183		 */
184		BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
185
186		cfghi = dws->cfg_hi;
187		cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
188	}
189
190	channel_writel(dwc, CFG_LO, cfglo);
191	channel_writel(dwc, CFG_HI, cfghi);
192
193	/* Enable interrupts */
194	channel_set_bit(dw, MASK.XFER, dwc->mask);
195	channel_set_bit(dw, MASK.BLOCK, dwc->mask);
196	channel_set_bit(dw, MASK.ERROR, dwc->mask);
197
198	dwc->initialized = true;
199}
200
201/*----------------------------------------------------------------------*/
202
203/* Called with dwc->lock held and bh disabled */
204static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
205{
206	struct dw_dma	*dw = to_dw_dma(dwc->chan.device);
207
208	/* ASSERT:  channel is idle */
209	if (dma_readl(dw, CH_EN) & dwc->mask) {
210		dev_err(chan2dev(&dwc->chan),
211			"BUG: Attempted to start non-idle channel\n");
212		dev_err(chan2dev(&dwc->chan),
213			"  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
214			channel_readl(dwc, SAR),
215			channel_readl(dwc, DAR),
216			channel_readl(dwc, LLP),
217			channel_readl(dwc, CTL_HI),
218			channel_readl(dwc, CTL_LO));
219
220		/* The tasklet will hopefully advance the queue... */
221		return;
222	}
223
224	dwc_initialize(dwc);
225
226	channel_writel(dwc, LLP, first->txd.phys);
227	channel_writel(dwc, CTL_LO,
228			DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
229	channel_writel(dwc, CTL_HI, 0);
230	channel_set_bit(dw, CH_EN, dwc->mask);
231}
232
233/*----------------------------------------------------------------------*/
234
235static void
236dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
237		bool callback_required)
238{
239	dma_async_tx_callback		callback = NULL;
240	void				*param = NULL;
241	struct dma_async_tx_descriptor	*txd = &desc->txd;
242	struct dw_desc			*child;
243	unsigned long			flags;
244
245	dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
246
247	spin_lock_irqsave(&dwc->lock, flags);
248	dwc->completed = txd->cookie;
249	if (callback_required) {
250		callback = txd->callback;
251		param = txd->callback_param;
252	}
253
254	dwc_sync_desc_for_cpu(dwc, desc);
255
256	/* async_tx_ack */
257	list_for_each_entry(child, &desc->tx_list, desc_node)
258		async_tx_ack(&child->txd);
259	async_tx_ack(&desc->txd);
260
261	list_splice_init(&desc->tx_list, &dwc->free_list);
262	list_move(&desc->desc_node, &dwc->free_list);
263
264	if (!dwc->chan.private) {
265		struct device *parent = chan2parent(&dwc->chan);
266		if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
267			if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
268				dma_unmap_single(parent, desc->lli.dar,
269						desc->len, DMA_FROM_DEVICE);
270			else
271				dma_unmap_page(parent, desc->lli.dar,
272						desc->len, DMA_FROM_DEVICE);
273		}
274		if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
275			if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
276				dma_unmap_single(parent, desc->lli.sar,
277						desc->len, DMA_TO_DEVICE);
278			else
279				dma_unmap_page(parent, desc->lli.sar,
280						desc->len, DMA_TO_DEVICE);
281		}
282	}
283
284	spin_unlock_irqrestore(&dwc->lock, flags);
285
286	if (callback_required && callback)
287		callback(param);
288}
289
290static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
291{
292	struct dw_desc *desc, *_desc;
293	LIST_HEAD(list);
294	unsigned long flags;
295
296	spin_lock_irqsave(&dwc->lock, flags);
297	if (dma_readl(dw, CH_EN) & dwc->mask) {
298		dev_err(chan2dev(&dwc->chan),
299			"BUG: XFER bit set, but channel not idle!\n");
300
301		/* Try to continue after resetting the channel... */
302		channel_clear_bit(dw, CH_EN, dwc->mask);
303		while (dma_readl(dw, CH_EN) & dwc->mask)
304			cpu_relax();
305	}
306
307	/*
308	 * Submit queued descriptors ASAP, i.e. before we go through
309	 * the completed ones.
310	 */
311	list_splice_init(&dwc->active_list, &list);
312	if (!list_empty(&dwc->queue)) {
313		list_move(dwc->queue.next, &dwc->active_list);
314		dwc_dostart(dwc, dwc_first_active(dwc));
315	}
316
317	spin_unlock_irqrestore(&dwc->lock, flags);
318
319	list_for_each_entry_safe(desc, _desc, &list, desc_node)
320		dwc_descriptor_complete(dwc, desc, true);
321}
322
323static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
324{
325	dma_addr_t llp;
326	struct dw_desc *desc, *_desc;
327	struct dw_desc *child;
328	u32 status_xfer;
329	unsigned long flags;
330
331	spin_lock_irqsave(&dwc->lock, flags);
332	/*
333	 * Clear block interrupt flag before scanning so that we don't
334	 * miss any, and read LLP before RAW_XFER to ensure it is
335	 * valid if we decide to scan the list.
336	 */
337	dma_writel(dw, CLEAR.BLOCK, dwc->mask);
338	llp = channel_readl(dwc, LLP);
339	status_xfer = dma_readl(dw, RAW.XFER);
340
341	if (status_xfer & dwc->mask) {
342		/* Everything we've submitted is done */
343		dma_writel(dw, CLEAR.XFER, dwc->mask);
344		spin_unlock_irqrestore(&dwc->lock, flags);
345
346		dwc_complete_all(dw, dwc);
347		return;
348	}
349
350	if (list_empty(&dwc->active_list)) {
351		spin_unlock_irqrestore(&dwc->lock, flags);
352		return;
353	}
354
355	dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
356
357	list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
358		/* check first descriptors addr */
359		if (desc->txd.phys == llp) {
360			spin_unlock_irqrestore(&dwc->lock, flags);
361			return;
362		}
363
364		/* check first descriptors llp */
365		if (desc->lli.llp == llp) {
366			/* This one is currently in progress */
367			spin_unlock_irqrestore(&dwc->lock, flags);
368			return;
369		}
370
371		list_for_each_entry(child, &desc->tx_list, desc_node)
372			if (child->lli.llp == llp) {
373				/* Currently in progress */
374				spin_unlock_irqrestore(&dwc->lock, flags);
375				return;
376			}
377
378		/*
379		 * No descriptors so far seem to be in progress, i.e.
380		 * this one must be done.
381		 */
382		spin_unlock_irqrestore(&dwc->lock, flags);
383		dwc_descriptor_complete(dwc, desc, true);
384		spin_lock_irqsave(&dwc->lock, flags);
385	}
386
387	dev_err(chan2dev(&dwc->chan),
388		"BUG: All descriptors done, but channel not idle!\n");
389
390	/* Try to continue after resetting the channel... */
391	channel_clear_bit(dw, CH_EN, dwc->mask);
392	while (dma_readl(dw, CH_EN) & dwc->mask)
393		cpu_relax();
394
395	if (!list_empty(&dwc->queue)) {
396		list_move(dwc->queue.next, &dwc->active_list);
397		dwc_dostart(dwc, dwc_first_active(dwc));
398	}
399	spin_unlock_irqrestore(&dwc->lock, flags);
400}
401
402static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
403{
404	dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
405			"  desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
406			lli->sar, lli->dar, lli->llp,
407			lli->ctlhi, lli->ctllo);
408}
409
410static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
411{
412	struct dw_desc *bad_desc;
413	struct dw_desc *child;
414	unsigned long flags;
415
416	dwc_scan_descriptors(dw, dwc);
417
418	spin_lock_irqsave(&dwc->lock, flags);
419
420	/*
421	 * The descriptor currently at the head of the active list is
422	 * borked. Since we don't have any way to report errors, we'll
423	 * just have to scream loudly and try to carry on.
424	 */
425	bad_desc = dwc_first_active(dwc);
426	list_del_init(&bad_desc->desc_node);
427	list_move(dwc->queue.next, dwc->active_list.prev);
428
429	/* Clear the error flag and try to restart the controller */
430	dma_writel(dw, CLEAR.ERROR, dwc->mask);
431	if (!list_empty(&dwc->active_list))
432		dwc_dostart(dwc, dwc_first_active(dwc));
433
434	/*
435	 * KERN_CRITICAL may seem harsh, but since this only happens
436	 * when someone submits a bad physical address in a
437	 * descriptor, we should consider ourselves lucky that the
438	 * controller flagged an error instead of scribbling over
439	 * random memory locations.
440	 */
441	dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
442			"Bad descriptor submitted for DMA!\n");
443	dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
444			"  cookie: %d\n", bad_desc->txd.cookie);
445	dwc_dump_lli(dwc, &bad_desc->lli);
446	list_for_each_entry(child, &bad_desc->tx_list, desc_node)
447		dwc_dump_lli(dwc, &child->lli);
448
449	spin_unlock_irqrestore(&dwc->lock, flags);
450
451	/* Pretend the descriptor completed successfully */
452	dwc_descriptor_complete(dwc, bad_desc, true);
453}
454
455/* --------------------- Cyclic DMA API extensions -------------------- */
456
457inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
458{
459	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
460	return channel_readl(dwc, SAR);
461}
462EXPORT_SYMBOL(dw_dma_get_src_addr);
463
464inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
465{
466	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
467	return channel_readl(dwc, DAR);
468}
469EXPORT_SYMBOL(dw_dma_get_dst_addr);
470
471/* called with dwc->lock held and all DMAC interrupts disabled */
472static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
473		u32 status_block, u32 status_err, u32 status_xfer)
474{
475	unsigned long flags;
476
477	if (status_block & dwc->mask) {
478		void (*callback)(void *param);
479		void *callback_param;
480
481		dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
482				channel_readl(dwc, LLP));
483		dma_writel(dw, CLEAR.BLOCK, dwc->mask);
484
485		callback = dwc->cdesc->period_callback;
486		callback_param = dwc->cdesc->period_callback_param;
487
488		if (callback)
489			callback(callback_param);
490	}
491
492	/*
493	 * Error and transfer complete are highly unlikely, and will most
494	 * likely be due to a configuration error by the user.
495	 */
496	if (unlikely(status_err & dwc->mask) ||
497			unlikely(status_xfer & dwc->mask)) {
498		int i;
499
500		dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
501				"interrupt, stopping DMA transfer\n",
502				status_xfer ? "xfer" : "error");
503
504		spin_lock_irqsave(&dwc->lock, flags);
505
506		dev_err(chan2dev(&dwc->chan),
507			"  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
508			channel_readl(dwc, SAR),
509			channel_readl(dwc, DAR),
510			channel_readl(dwc, LLP),
511			channel_readl(dwc, CTL_HI),
512			channel_readl(dwc, CTL_LO));
513
514		channel_clear_bit(dw, CH_EN, dwc->mask);
515		while (dma_readl(dw, CH_EN) & dwc->mask)
516			cpu_relax();
517
518		/* make sure DMA does not restart by loading a new list */
519		channel_writel(dwc, LLP, 0);
520		channel_writel(dwc, CTL_LO, 0);
521		channel_writel(dwc, CTL_HI, 0);
522
523		dma_writel(dw, CLEAR.BLOCK, dwc->mask);
524		dma_writel(dw, CLEAR.ERROR, dwc->mask);
525		dma_writel(dw, CLEAR.XFER, dwc->mask);
526
527		for (i = 0; i < dwc->cdesc->periods; i++)
528			dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
529
530		spin_unlock_irqrestore(&dwc->lock, flags);
531	}
532}
533
534/* ------------------------------------------------------------------------- */
535
536static void dw_dma_tasklet(unsigned long data)
537{
538	struct dw_dma *dw = (struct dw_dma *)data;
539	struct dw_dma_chan *dwc;
540	u32 status_block;
541	u32 status_xfer;
542	u32 status_err;
543	int i;
544
545	status_block = dma_readl(dw, RAW.BLOCK);
546	status_xfer = dma_readl(dw, RAW.XFER);
547	status_err = dma_readl(dw, RAW.ERROR);
548
549	dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n",
550			status_block, status_err);
551
552	for (i = 0; i < dw->dma.chancnt; i++) {
553		dwc = &dw->chan[i];
554		if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
555			dwc_handle_cyclic(dw, dwc, status_block, status_err,
556					status_xfer);
557		else if (status_err & (1 << i))
558			dwc_handle_error(dw, dwc);
559		else if ((status_block | status_xfer) & (1 << i))
560			dwc_scan_descriptors(dw, dwc);
561	}
562
563	/*
564	 * Re-enable interrupts. Block Complete interrupts are only
565	 * enabled if the INT_EN bit in the descriptor is set. This
566	 * will trigger a scan before the whole list is done.
567	 */
568	channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
569	channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
570	channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
571}
572
573static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
574{
575	struct dw_dma *dw = dev_id;
576	u32 status;
577
578	dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n",
579			dma_readl(dw, STATUS_INT));
580
581	/*
582	 * Just disable the interrupts. We'll turn them back on in the
583	 * softirq handler.
584	 */
585	channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
586	channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
587	channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
588
589	status = dma_readl(dw, STATUS_INT);
590	if (status) {
591		dev_err(dw->dma.dev,
592			"BUG: Unexpected interrupts pending: 0x%x\n",
593			status);
594
595		/* Try to recover */
596		channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
597		channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
598		channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
599		channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
600		channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
601	}
602
603	tasklet_schedule(&dw->tasklet);
604
605	return IRQ_HANDLED;
606}
607
608/*----------------------------------------------------------------------*/
609
610static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
611{
612	struct dw_desc		*desc = txd_to_dw_desc(tx);
613	struct dw_dma_chan	*dwc = to_dw_dma_chan(tx->chan);
614	dma_cookie_t		cookie;
615	unsigned long		flags;
616
617	spin_lock_irqsave(&dwc->lock, flags);
618	cookie = dwc_assign_cookie(dwc, desc);
619
620	/*
621	 * REVISIT: We should attempt to chain as many descriptors as
622	 * possible, perhaps even appending to those already submitted
623	 * for DMA. But this is hard to do in a race-free manner.
624	 */
625	if (list_empty(&dwc->active_list)) {
626		dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
627				desc->txd.cookie);
628		list_add_tail(&desc->desc_node, &dwc->active_list);
629		dwc_dostart(dwc, dwc_first_active(dwc));
630	} else {
631		dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
632				desc->txd.cookie);
633
634		list_add_tail(&desc->desc_node, &dwc->queue);
635	}
636
637	spin_unlock_irqrestore(&dwc->lock, flags);
638
639	return cookie;
640}
641
642static struct dma_async_tx_descriptor *
643dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
644		size_t len, unsigned long flags)
645{
646	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
647	struct dw_desc		*desc;
648	struct dw_desc		*first;
649	struct dw_desc		*prev;
650	size_t			xfer_count;
651	size_t			offset;
652	unsigned int		src_width;
653	unsigned int		dst_width;
654	u32			ctllo;
655
656	dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
657			dest, src, len, flags);
658
659	if (unlikely(!len)) {
660		dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
661		return NULL;
662	}
663
664	/*
665	 * We can be a lot more clever here, but this should take care
666	 * of the most common optimization.
667	 */
668	if (!((src | dest  | len) & 7))
669		src_width = dst_width = 3;
670	else if (!((src | dest  | len) & 3))
671		src_width = dst_width = 2;
672	else if (!((src | dest | len) & 1))
673		src_width = dst_width = 1;
674	else
675		src_width = dst_width = 0;
676
677	ctllo = DWC_DEFAULT_CTLLO(chan->private)
678			| DWC_CTLL_DST_WIDTH(dst_width)
679			| DWC_CTLL_SRC_WIDTH(src_width)
680			| DWC_CTLL_DST_INC
681			| DWC_CTLL_SRC_INC
682			| DWC_CTLL_FC_M2M;
683	prev = first = NULL;
684
685	for (offset = 0; offset < len; offset += xfer_count << src_width) {
686		xfer_count = min_t(size_t, (len - offset) >> src_width,
687				DWC_MAX_COUNT);
688
689		desc = dwc_desc_get(dwc);
690		if (!desc)
691			goto err_desc_get;
692
693		desc->lli.sar = src + offset;
694		desc->lli.dar = dest + offset;
695		desc->lli.ctllo = ctllo;
696		desc->lli.ctlhi = xfer_count;
697
698		if (!first) {
699			first = desc;
700		} else {
701			prev->lli.llp = desc->txd.phys;
702			dma_sync_single_for_device(chan2parent(chan),
703					prev->txd.phys, sizeof(prev->lli),
704					DMA_TO_DEVICE);
705			list_add_tail(&desc->desc_node,
706					&first->tx_list);
707		}
708		prev = desc;
709	}
710
711
712	if (flags & DMA_PREP_INTERRUPT)
713		/* Trigger interrupt after last block */
714		prev->lli.ctllo |= DWC_CTLL_INT_EN;
715
716	prev->lli.llp = 0;
717	dma_sync_single_for_device(chan2parent(chan),
718			prev->txd.phys, sizeof(prev->lli),
719			DMA_TO_DEVICE);
720
721	first->txd.flags = flags;
722	first->len = len;
723
724	return &first->txd;
725
726err_desc_get:
727	dwc_desc_put(dwc, first);
728	return NULL;
729}
730
731static struct dma_async_tx_descriptor *
732dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
733		unsigned int sg_len, enum dma_transfer_direction direction,
734		unsigned long flags)
735{
736	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
737	struct dw_dma_slave	*dws = chan->private;
738	struct dw_desc		*prev;
739	struct dw_desc		*first;
740	u32			ctllo;
741	dma_addr_t		reg;
742	unsigned int		reg_width;
743	unsigned int		mem_width;
744	unsigned int		i;
745	struct scatterlist	*sg;
746	size_t			total_len = 0;
747
748	dev_vdbg(chan2dev(chan), "prep_dma_slave\n");
749
750	if (unlikely(!dws || !sg_len))
751		return NULL;
752
753	reg_width = dws->reg_width;
754	prev = first = NULL;
755
756	switch (direction) {
757	case DMA_MEM_TO_DEV:
758		ctllo = (DWC_DEFAULT_CTLLO(chan->private)
759				| DWC_CTLL_DST_WIDTH(reg_width)
760				| DWC_CTLL_DST_FIX
761				| DWC_CTLL_SRC_INC
762				| DWC_CTLL_FC(dws->fc));
763		reg = dws->tx_reg;
764		for_each_sg(sgl, sg, sg_len, i) {
765			struct dw_desc	*desc;
766			u32		len, dlen, mem;
767
768			mem = sg_phys(sg);
769			len = sg_dma_len(sg);
770			mem_width = 2;
771			if (unlikely(mem & 3 || len & 3))
772				mem_width = 0;
773
774slave_sg_todev_fill_desc:
775			desc = dwc_desc_get(dwc);
776			if (!desc) {
777				dev_err(chan2dev(chan),
778					"not enough descriptors available\n");
779				goto err_desc_get;
780			}
781
782			desc->lli.sar = mem;
783			desc->lli.dar = reg;
784			desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
785			if ((len >> mem_width) > DWC_MAX_COUNT) {
786				dlen = DWC_MAX_COUNT << mem_width;
787				mem += dlen;
788				len -= dlen;
789			} else {
790				dlen = len;
791				len = 0;
792			}
793
794			desc->lli.ctlhi = dlen >> mem_width;
795
796			if (!first) {
797				first = desc;
798			} else {
799				prev->lli.llp = desc->txd.phys;
800				dma_sync_single_for_device(chan2parent(chan),
801						prev->txd.phys,
802						sizeof(prev->lli),
803						DMA_TO_DEVICE);
804				list_add_tail(&desc->desc_node,
805						&first->tx_list);
806			}
807			prev = desc;
808			total_len += dlen;
809
810			if (len)
811				goto slave_sg_todev_fill_desc;
812		}
813		break;
814	case DMA_DEV_TO_MEM:
815		ctllo = (DWC_DEFAULT_CTLLO(chan->private)
816				| DWC_CTLL_SRC_WIDTH(reg_width)
817				| DWC_CTLL_DST_INC
818				| DWC_CTLL_SRC_FIX
819				| DWC_CTLL_FC(dws->fc));
820
821		reg = dws->rx_reg;
822		for_each_sg(sgl, sg, sg_len, i) {
823			struct dw_desc	*desc;
824			u32		len, dlen, mem;
825
826			mem = sg_phys(sg);
827			len = sg_dma_len(sg);
828			mem_width = 2;
829			if (unlikely(mem & 3 || len & 3))
830				mem_width = 0;
831
832slave_sg_fromdev_fill_desc:
833			desc = dwc_desc_get(dwc);
834			if (!desc) {
835				dev_err(chan2dev(chan),
836						"not enough descriptors available\n");
837				goto err_desc_get;
838			}
839
840			desc->lli.sar = reg;
841			desc->lli.dar = mem;
842			desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
843			if ((len >> reg_width) > DWC_MAX_COUNT) {
844				dlen = DWC_MAX_COUNT << reg_width;
845				mem += dlen;
846				len -= dlen;
847			} else {
848				dlen = len;
849				len = 0;
850			}
851			desc->lli.ctlhi = dlen >> reg_width;
852
853			if (!first) {
854				first = desc;
855			} else {
856				prev->lli.llp = desc->txd.phys;
857				dma_sync_single_for_device(chan2parent(chan),
858						prev->txd.phys,
859						sizeof(prev->lli),
860						DMA_TO_DEVICE);
861				list_add_tail(&desc->desc_node,
862						&first->tx_list);
863			}
864			prev = desc;
865			total_len += dlen;
866
867			if (len)
868				goto slave_sg_fromdev_fill_desc;
869		}
870		break;
871	default:
872		return NULL;
873	}
874
875	if (flags & DMA_PREP_INTERRUPT)
876		/* Trigger interrupt after last block */
877		prev->lli.ctllo |= DWC_CTLL_INT_EN;
878
879	prev->lli.llp = 0;
880	dma_sync_single_for_device(chan2parent(chan),
881			prev->txd.phys, sizeof(prev->lli),
882			DMA_TO_DEVICE);
883
884	first->len = total_len;
885
886	return &first->txd;
887
888err_desc_get:
889	dwc_desc_put(dwc, first);
890	return NULL;
891}
892
893static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
894		       unsigned long arg)
895{
896	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
897	struct dw_dma		*dw = to_dw_dma(chan->device);
898	struct dw_desc		*desc, *_desc;
899	unsigned long		flags;
900	u32			cfglo;
901	LIST_HEAD(list);
902
903	if (cmd == DMA_PAUSE) {
904		spin_lock_irqsave(&dwc->lock, flags);
905
906		cfglo = channel_readl(dwc, CFG_LO);
907		channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
908		while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
909			cpu_relax();
910
911		dwc->paused = true;
912		spin_unlock_irqrestore(&dwc->lock, flags);
913	} else if (cmd == DMA_RESUME) {
914		if (!dwc->paused)
915			return 0;
916
917		spin_lock_irqsave(&dwc->lock, flags);
918
919		cfglo = channel_readl(dwc, CFG_LO);
920		channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
921		dwc->paused = false;
922
923		spin_unlock_irqrestore(&dwc->lock, flags);
924	} else if (cmd == DMA_TERMINATE_ALL) {
925		spin_lock_irqsave(&dwc->lock, flags);
926
927		channel_clear_bit(dw, CH_EN, dwc->mask);
928		while (dma_readl(dw, CH_EN) & dwc->mask)
929			cpu_relax();
930
931		dwc->paused = false;
932
933		/* active_list entries will end up before queued entries */
934		list_splice_init(&dwc->queue, &list);
935		list_splice_init(&dwc->active_list, &list);
936
937		spin_unlock_irqrestore(&dwc->lock, flags);
938
939		/* Flush all pending and queued descriptors */
940		list_for_each_entry_safe(desc, _desc, &list, desc_node)
941			dwc_descriptor_complete(dwc, desc, false);
942	} else
943		return -ENXIO;
944
945	return 0;
946}
947
948static enum dma_status
949dwc_tx_status(struct dma_chan *chan,
950	      dma_cookie_t cookie,
951	      struct dma_tx_state *txstate)
952{
953	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
954	dma_cookie_t		last_used;
955	dma_cookie_t		last_complete;
956	int			ret;
957
958	last_complete = dwc->completed;
959	last_used = chan->cookie;
960
961	ret = dma_async_is_complete(cookie, last_complete, last_used);
962	if (ret != DMA_SUCCESS) {
963		dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
964
965		last_complete = dwc->completed;
966		last_used = chan->cookie;
967
968		ret = dma_async_is_complete(cookie, last_complete, last_used);
969	}
970
971	if (ret != DMA_SUCCESS)
972		dma_set_tx_state(txstate, last_complete, last_used,
973				dwc_first_active(dwc)->len);
974	else
975		dma_set_tx_state(txstate, last_complete, last_used, 0);
976
977	if (dwc->paused)
978		return DMA_PAUSED;
979
980	return ret;
981}
982
983static void dwc_issue_pending(struct dma_chan *chan)
984{
985	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
986
987	if (!list_empty(&dwc->queue))
988		dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
989}
990
991static int dwc_alloc_chan_resources(struct dma_chan *chan)
992{
993	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
994	struct dw_dma		*dw = to_dw_dma(chan->device);
995	struct dw_desc		*desc;
996	int			i;
997	unsigned long		flags;
998
999	dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1000
1001	/* ASSERT:  channel is idle */
1002	if (dma_readl(dw, CH_EN) & dwc->mask) {
1003		dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
1004		return -EIO;
1005	}
1006
1007	dwc->completed = chan->cookie = 1;
1008
1009	/*
1010	 * NOTE: some controllers may have additional features that we
1011	 * need to initialize here, like "scatter-gather" (which
1012	 * doesn't mean what you think it means), and status writeback.
1013	 */
1014
1015	spin_lock_irqsave(&dwc->lock, flags);
1016	i = dwc->descs_allocated;
1017	while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
1018		spin_unlock_irqrestore(&dwc->lock, flags);
1019
1020		desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
1021		if (!desc) {
1022			dev_info(chan2dev(chan),
1023				"only allocated %d descriptors\n", i);
1024			spin_lock_irqsave(&dwc->lock, flags);
1025			break;
1026		}
1027
1028		INIT_LIST_HEAD(&desc->tx_list);
1029		dma_async_tx_descriptor_init(&desc->txd, chan);
1030		desc->txd.tx_submit = dwc_tx_submit;
1031		desc->txd.flags = DMA_CTRL_ACK;
1032		desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
1033				sizeof(desc->lli), DMA_TO_DEVICE);
1034		dwc_desc_put(dwc, desc);
1035
1036		spin_lock_irqsave(&dwc->lock, flags);
1037		i = ++dwc->descs_allocated;
1038	}
1039
1040	spin_unlock_irqrestore(&dwc->lock, flags);
1041
1042	dev_dbg(chan2dev(chan),
1043		"alloc_chan_resources allocated %d descriptors\n", i);
1044
1045	return i;
1046}
1047
1048static void dwc_free_chan_resources(struct dma_chan *chan)
1049{
1050	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
1051	struct dw_dma		*dw = to_dw_dma(chan->device);
1052	struct dw_desc		*desc, *_desc;
1053	unsigned long		flags;
1054	LIST_HEAD(list);
1055
1056	dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
1057			dwc->descs_allocated);
1058
1059	/* ASSERT:  channel is idle */
1060	BUG_ON(!list_empty(&dwc->active_list));
1061	BUG_ON(!list_empty(&dwc->queue));
1062	BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
1063
1064	spin_lock_irqsave(&dwc->lock, flags);
1065	list_splice_init(&dwc->free_list, &list);
1066	dwc->descs_allocated = 0;
1067	dwc->initialized = false;
1068
1069	/* Disable interrupts */
1070	channel_clear_bit(dw, MASK.XFER, dwc->mask);
1071	channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
1072	channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1073
1074	spin_unlock_irqrestore(&dwc->lock, flags);
1075
1076	list_for_each_entry_safe(desc, _desc, &list, desc_node) {
1077		dev_vdbg(chan2dev(chan), "  freeing descriptor %p\n", desc);
1078		dma_unmap_single(chan2parent(chan), desc->txd.phys,
1079				sizeof(desc->lli), DMA_TO_DEVICE);
1080		kfree(desc);
1081	}
1082
1083	dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
1084}
1085
1086/* --------------------- Cyclic DMA API extensions -------------------- */
1087
1088/**
1089 * dw_dma_cyclic_start - start the cyclic DMA transfer
1090 * @chan: the DMA channel to start
1091 *
1092 * Must be called with soft interrupts disabled. Returns zero on success or
1093 * -errno on failure.
1094 */
1095int dw_dma_cyclic_start(struct dma_chan *chan)
1096{
1097	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
1098	struct dw_dma		*dw = to_dw_dma(dwc->chan.device);
1099	unsigned long		flags;
1100
1101	if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
1102		dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
1103		return -ENODEV;
1104	}
1105
1106	spin_lock_irqsave(&dwc->lock, flags);
1107
1108	/* assert channel is idle */
1109	if (dma_readl(dw, CH_EN) & dwc->mask) {
1110		dev_err(chan2dev(&dwc->chan),
1111			"BUG: Attempted to start non-idle channel\n");
1112		dev_err(chan2dev(&dwc->chan),
1113			"  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
1114			channel_readl(dwc, SAR),
1115			channel_readl(dwc, DAR),
1116			channel_readl(dwc, LLP),
1117			channel_readl(dwc, CTL_HI),
1118			channel_readl(dwc, CTL_LO));
1119		spin_unlock_irqrestore(&dwc->lock, flags);
1120		return -EBUSY;
1121	}
1122
1123	dma_writel(dw, CLEAR.BLOCK, dwc->mask);
1124	dma_writel(dw, CLEAR.ERROR, dwc->mask);
1125	dma_writel(dw, CLEAR.XFER, dwc->mask);
1126
1127	/* setup DMAC channel registers */
1128	channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
1129	channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
1130	channel_writel(dwc, CTL_HI, 0);
1131
1132	channel_set_bit(dw, CH_EN, dwc->mask);
1133
1134	spin_unlock_irqrestore(&dwc->lock, flags);
1135
1136	return 0;
1137}
1138EXPORT_SYMBOL(dw_dma_cyclic_start);
1139
1140/**
1141 * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1142 * @chan: the DMA channel to stop
1143 *
1144 * Must be called with soft interrupts disabled.
1145 */
1146void dw_dma_cyclic_stop(struct dma_chan *chan)
1147{
1148	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
1149	struct dw_dma		*dw = to_dw_dma(dwc->chan.device);
1150	unsigned long		flags;
1151
1152	spin_lock_irqsave(&dwc->lock, flags);
1153
1154	channel_clear_bit(dw, CH_EN, dwc->mask);
1155	while (dma_readl(dw, CH_EN) & dwc->mask)
1156		cpu_relax();
1157
1158	spin_unlock_irqrestore(&dwc->lock, flags);
1159}
1160EXPORT_SYMBOL(dw_dma_cyclic_stop);
1161
1162/**
1163 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1164 * @chan: the DMA channel to prepare
1165 * @buf_addr: physical DMA address where the buffer starts
1166 * @buf_len: total number of bytes for the entire buffer
1167 * @period_len: number of bytes for each period
1168 * @direction: transfer direction, to or from device
1169 *
1170 * Must be called before trying to start the transfer. Returns a valid struct
1171 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1172 */
1173struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1174		dma_addr_t buf_addr, size_t buf_len, size_t period_len,
1175		enum dma_transfer_direction direction)
1176{
1177	struct dw_dma_chan		*dwc = to_dw_dma_chan(chan);
1178	struct dw_cyclic_desc		*cdesc;
1179	struct dw_cyclic_desc		*retval = NULL;
1180	struct dw_desc			*desc;
1181	struct dw_desc			*last = NULL;
1182	struct dw_dma_slave		*dws = chan->private;
1183	unsigned long			was_cyclic;
1184	unsigned int			reg_width;
1185	unsigned int			periods;
1186	unsigned int			i;
1187	unsigned long			flags;
1188
1189	spin_lock_irqsave(&dwc->lock, flags);
1190	if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
1191		spin_unlock_irqrestore(&dwc->lock, flags);
1192		dev_dbg(chan2dev(&dwc->chan),
1193				"queue and/or active list are not empty\n");
1194		return ERR_PTR(-EBUSY);
1195	}
1196
1197	was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1198	spin_unlock_irqrestore(&dwc->lock, flags);
1199	if (was_cyclic) {
1200		dev_dbg(chan2dev(&dwc->chan),
1201				"channel already prepared for cyclic DMA\n");
1202		return ERR_PTR(-EBUSY);
1203	}
1204
1205	retval = ERR_PTR(-EINVAL);
1206	reg_width = dws->reg_width;
1207	periods = buf_len / period_len;
1208
1209	/* Check for too big/unaligned periods and unaligned DMA buffer. */
1210	if (period_len > (DWC_MAX_COUNT << reg_width))
1211		goto out_err;
1212	if (unlikely(period_len & ((1 << reg_width) - 1)))
1213		goto out_err;
1214	if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1215		goto out_err;
1216	if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM))))
1217		goto out_err;
1218
1219	retval = ERR_PTR(-ENOMEM);
1220
1221	if (periods > NR_DESCS_PER_CHANNEL)
1222		goto out_err;
1223
1224	cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
1225	if (!cdesc)
1226		goto out_err;
1227
1228	cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
1229	if (!cdesc->desc)
1230		goto out_err_alloc;
1231
1232	for (i = 0; i < periods; i++) {
1233		desc = dwc_desc_get(dwc);
1234		if (!desc)
1235			goto out_err_desc_get;
1236
1237		switch (direction) {
1238		case DMA_MEM_TO_DEV:
1239			desc->lli.dar = dws->tx_reg;
1240			desc->lli.sar = buf_addr + (period_len * i);
1241			desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
1242					| DWC_CTLL_DST_WIDTH(reg_width)
1243					| DWC_CTLL_SRC_WIDTH(reg_width)
1244					| DWC_CTLL_DST_FIX
1245					| DWC_CTLL_SRC_INC
1246					| DWC_CTLL_FC(dws->fc)
1247					| DWC_CTLL_INT_EN);
1248			break;
1249		case DMA_DEV_TO_MEM:
1250			desc->lli.dar = buf_addr + (period_len * i);
1251			desc->lli.sar = dws->rx_reg;
1252			desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
1253					| DWC_CTLL_SRC_WIDTH(reg_width)
1254					| DWC_CTLL_DST_WIDTH(reg_width)
1255					| DWC_CTLL_DST_INC
1256					| DWC_CTLL_SRC_FIX
1257					| DWC_CTLL_FC(dws->fc)
1258					| DWC_CTLL_INT_EN);
1259			break;
1260		default:
1261			break;
1262		}
1263
1264		desc->lli.ctlhi = (period_len >> reg_width);
1265		cdesc->desc[i] = desc;
1266
1267		if (last) {
1268			last->lli.llp = desc->txd.phys;
1269			dma_sync_single_for_device(chan2parent(chan),
1270					last->txd.phys, sizeof(last->lli),
1271					DMA_TO_DEVICE);
1272		}
1273
1274		last = desc;
1275	}
1276
1277	/* lets make a cyclic list */
1278	last->lli.llp = cdesc->desc[0]->txd.phys;
1279	dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
1280			sizeof(last->lli), DMA_TO_DEVICE);
1281
1282	dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu "
1283			"period %zu periods %d\n", buf_addr, buf_len,
1284			period_len, periods);
1285
1286	cdesc->periods = periods;
1287	dwc->cdesc = cdesc;
1288
1289	return cdesc;
1290
1291out_err_desc_get:
1292	while (i--)
1293		dwc_desc_put(dwc, cdesc->desc[i]);
1294out_err_alloc:
1295	kfree(cdesc);
1296out_err:
1297	clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1298	return (struct dw_cyclic_desc *)retval;
1299}
1300EXPORT_SYMBOL(dw_dma_cyclic_prep);
1301
1302/**
1303 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1304 * @chan: the DMA channel to free
1305 */
1306void dw_dma_cyclic_free(struct dma_chan *chan)
1307{
1308	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
1309	struct dw_dma		*dw = to_dw_dma(dwc->chan.device);
1310	struct dw_cyclic_desc	*cdesc = dwc->cdesc;
1311	int			i;
1312	unsigned long		flags;
1313
1314	dev_dbg(chan2dev(&dwc->chan), "cyclic free\n");
1315
1316	if (!cdesc)
1317		return;
1318
1319	spin_lock_irqsave(&dwc->lock, flags);
1320
1321	channel_clear_bit(dw, CH_EN, dwc->mask);
1322	while (dma_readl(dw, CH_EN) & dwc->mask)
1323		cpu_relax();
1324
1325	dma_writel(dw, CLEAR.BLOCK, dwc->mask);
1326	dma_writel(dw, CLEAR.ERROR, dwc->mask);
1327	dma_writel(dw, CLEAR.XFER, dwc->mask);
1328
1329	spin_unlock_irqrestore(&dwc->lock, flags);
1330
1331	for (i = 0; i < cdesc->periods; i++)
1332		dwc_desc_put(dwc, cdesc->desc[i]);
1333
1334	kfree(cdesc->desc);
1335	kfree(cdesc);
1336
1337	clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1338}
1339EXPORT_SYMBOL(dw_dma_cyclic_free);
1340
1341/*----------------------------------------------------------------------*/
1342
1343static void dw_dma_off(struct dw_dma *dw)
1344{
1345	int i;
1346
1347	dma_writel(dw, CFG, 0);
1348
1349	channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1350	channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1351	channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1352	channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1353	channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1354
1355	while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
1356		cpu_relax();
1357
1358	for (i = 0; i < dw->dma.chancnt; i++)
1359		dw->chan[i].initialized = false;
1360}
1361
1362static int __init dw_probe(struct platform_device *pdev)
1363{
1364	struct dw_dma_platform_data *pdata;
1365	struct resource		*io;
1366	struct dw_dma		*dw;
1367	size_t			size;
1368	int			irq;
1369	int			err;
1370	int			i;
1371
1372	pdata = pdev->dev.platform_data;
1373	if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
1374		return -EINVAL;
1375
1376	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1377	if (!io)
1378		return -EINVAL;
1379
1380	irq = platform_get_irq(pdev, 0);
1381	if (irq < 0)
1382		return irq;
1383
1384	size = sizeof(struct dw_dma);
1385	size += pdata->nr_channels * sizeof(struct dw_dma_chan);
1386	dw = kzalloc(size, GFP_KERNEL);
1387	if (!dw)
1388		return -ENOMEM;
1389
1390	if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) {
1391		err = -EBUSY;
1392		goto err_kfree;
1393	}
1394
1395	dw->regs = ioremap(io->start, DW_REGLEN);
1396	if (!dw->regs) {
1397		err = -ENOMEM;
1398		goto err_release_r;
1399	}
1400
1401	dw->clk = clk_get(&pdev->dev, "hclk");
1402	if (IS_ERR(dw->clk)) {
1403		err = PTR_ERR(dw->clk);
1404		goto err_clk;
1405	}
1406	clk_enable(dw->clk);
1407
1408	/* force dma off, just in case */
1409	dw_dma_off(dw);
1410
1411	err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw);
1412	if (err)
1413		goto err_irq;
1414
1415	platform_set_drvdata(pdev, dw);
1416
1417	tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1418
1419	dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
1420
1421	INIT_LIST_HEAD(&dw->dma.channels);
1422	for (i = 0; i < pdata->nr_channels; i++) {
1423		struct dw_dma_chan	*dwc = &dw->chan[i];
1424
1425		dwc->chan.device = &dw->dma;
1426		dwc->chan.cookie = dwc->completed = 1;
1427		if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1428			list_add_tail(&dwc->chan.device_node,
1429					&dw->dma.channels);
1430		else
1431			list_add(&dwc->chan.device_node, &dw->dma.channels);
1432
1433		/* 7 is highest priority & 0 is lowest. */
1434		if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1435			dwc->priority = 7 - i;
1436		else
1437			dwc->priority = i;
1438
1439		dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1440		spin_lock_init(&dwc->lock);
1441		dwc->mask = 1 << i;
1442
1443		INIT_LIST_HEAD(&dwc->active_list);
1444		INIT_LIST_HEAD(&dwc->queue);
1445		INIT_LIST_HEAD(&dwc->free_list);
1446
1447		channel_clear_bit(dw, CH_EN, dwc->mask);
1448	}
1449
1450	/* Clear/disable all interrupts on all channels. */
1451	dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
1452	dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
1453	dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1454	dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1455	dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1456
1457	channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1458	channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1459	channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1460	channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1461	channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1462
1463	dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1464	dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1465	if (pdata->is_private)
1466		dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
1467	dw->dma.dev = &pdev->dev;
1468	dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1469	dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1470
1471	dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1472
1473	dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1474	dw->dma.device_control = dwc_control;
1475
1476	dw->dma.device_tx_status = dwc_tx_status;
1477	dw->dma.device_issue_pending = dwc_issue_pending;
1478
1479	dma_writel(dw, CFG, DW_CFG_DMA_EN);
1480
1481	printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
1482			dev_name(&pdev->dev), pdata->nr_channels);
1483
1484	dma_async_device_register(&dw->dma);
1485
1486	return 0;
1487
1488err_irq:
1489	clk_disable(dw->clk);
1490	clk_put(dw->clk);
1491err_clk:
1492	iounmap(dw->regs);
1493	dw->regs = NULL;
1494err_release_r:
1495	release_resource(io);
1496err_kfree:
1497	kfree(dw);
1498	return err;
1499}
1500
1501static int __exit dw_remove(struct platform_device *pdev)
1502{
1503	struct dw_dma		*dw = platform_get_drvdata(pdev);
1504	struct dw_dma_chan	*dwc, *_dwc;
1505	struct resource		*io;
1506
1507	dw_dma_off(dw);
1508	dma_async_device_unregister(&dw->dma);
1509
1510	free_irq(platform_get_irq(pdev, 0), dw);
1511	tasklet_kill(&dw->tasklet);
1512
1513	list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1514			chan.device_node) {
1515		list_del(&dwc->chan.device_node);
1516		channel_clear_bit(dw, CH_EN, dwc->mask);
1517	}
1518
1519	clk_disable(dw->clk);
1520	clk_put(dw->clk);
1521
1522	iounmap(dw->regs);
1523	dw->regs = NULL;
1524
1525	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1526	release_mem_region(io->start, DW_REGLEN);
1527
1528	kfree(dw);
1529
1530	return 0;
1531}
1532
1533static void dw_shutdown(struct platform_device *pdev)
1534{
1535	struct dw_dma	*dw = platform_get_drvdata(pdev);
1536
1537	dw_dma_off(platform_get_drvdata(pdev));
1538	clk_disable(dw->clk);
1539}
1540
1541static int dw_suspend_noirq(struct device *dev)
1542{
1543	struct platform_device *pdev = to_platform_device(dev);
1544	struct dw_dma	*dw = platform_get_drvdata(pdev);
1545
1546	dw_dma_off(platform_get_drvdata(pdev));
1547	clk_disable(dw->clk);
1548
1549	return 0;
1550}
1551
1552static int dw_resume_noirq(struct device *dev)
1553{
1554	struct platform_device *pdev = to_platform_device(dev);
1555	struct dw_dma	*dw = platform_get_drvdata(pdev);
1556
1557	clk_enable(dw->clk);
1558	dma_writel(dw, CFG, DW_CFG_DMA_EN);
1559	return 0;
1560}
1561
1562static const struct dev_pm_ops dw_dev_pm_ops = {
1563	.suspend_noirq = dw_suspend_noirq,
1564	.resume_noirq = dw_resume_noirq,
1565};
1566
1567static struct platform_driver dw_driver = {
1568	.remove		= __exit_p(dw_remove),
1569	.shutdown	= dw_shutdown,
1570	.driver = {
1571		.name	= "dw_dmac",
1572		.pm	= &dw_dev_pm_ops,
1573	},
1574};
1575
1576static int __init dw_init(void)
1577{
1578	return platform_driver_probe(&dw_driver, dw_probe);
1579}
1580subsys_initcall(dw_init);
1581
1582static void __exit dw_exit(void)
1583{
1584	platform_driver_unregister(&dw_driver);
1585}
1586module_exit(dw_exit);
1587
1588MODULE_LICENSE("GPL v2");
1589MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
1590MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1591MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
1592