1/*
2 * EDMA3 support for DaVinci
3 *
4 * Copyright (C) 2006-2009 Texas Instruments.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20#include <linux/kernel.h>
21#include <linux/init.h>
22#include <linux/module.h>
23#include <linux/interrupt.h>
24#include <linux/platform_device.h>
25#include <linux/io.h>
26#include <linux/slab.h>
27
28#include <mach/edma.h>
29
30/* Offsets matching "struct edmacc_param" */
31#define PARM_OPT		0x00
32#define PARM_SRC		0x04
33#define PARM_A_B_CNT		0x08
34#define PARM_DST		0x0c
35#define PARM_SRC_DST_BIDX	0x10
36#define PARM_LINK_BCNTRLD	0x14
37#define PARM_SRC_DST_CIDX	0x18
38#define PARM_CCNT		0x1c
39
40#define PARM_SIZE		0x20
41
42/* Offsets for EDMA CC global channel registers and their shadows */
43#define SH_ER		0x00	/* 64 bits */
44#define SH_ECR		0x08	/* 64 bits */
45#define SH_ESR		0x10	/* 64 bits */
46#define SH_CER		0x18	/* 64 bits */
47#define SH_EER		0x20	/* 64 bits */
48#define SH_EECR		0x28	/* 64 bits */
49#define SH_EESR		0x30	/* 64 bits */
50#define SH_SER		0x38	/* 64 bits */
51#define SH_SECR		0x40	/* 64 bits */
52#define SH_IER		0x50	/* 64 bits */
53#define SH_IECR		0x58	/* 64 bits */
54#define SH_IESR		0x60	/* 64 bits */
55#define SH_IPR		0x68	/* 64 bits */
56#define SH_ICR		0x70	/* 64 bits */
57#define SH_IEVAL	0x78
58#define SH_QER		0x80
59#define SH_QEER		0x84
60#define SH_QEECR	0x88
61#define SH_QEESR	0x8c
62#define SH_QSER		0x90
63#define SH_QSECR	0x94
64#define SH_SIZE		0x200
65
66/* Offsets for EDMA CC global registers */
67#define EDMA_REV	0x0000
68#define EDMA_CCCFG	0x0004
69#define EDMA_QCHMAP	0x0200	/* 8 registers */
70#define EDMA_DMAQNUM	0x0240	/* 8 registers (4 on OMAP-L1xx) */
71#define EDMA_QDMAQNUM	0x0260
72#define EDMA_QUETCMAP	0x0280
73#define EDMA_QUEPRI	0x0284
74#define EDMA_EMR	0x0300	/* 64 bits */
75#define EDMA_EMCR	0x0308	/* 64 bits */
76#define EDMA_QEMR	0x0310
77#define EDMA_QEMCR	0x0314
78#define EDMA_CCERR	0x0318
79#define EDMA_CCERRCLR	0x031c
80#define EDMA_EEVAL	0x0320
81#define EDMA_DRAE	0x0340	/* 4 x 64 bits*/
82#define EDMA_QRAE	0x0380	/* 4 registers */
83#define EDMA_QUEEVTENTRY	0x0400	/* 2 x 16 registers */
84#define EDMA_QSTAT	0x0600	/* 2 registers */
85#define EDMA_QWMTHRA	0x0620
86#define EDMA_QWMTHRB	0x0624
87#define EDMA_CCSTAT	0x0640
88
89#define EDMA_M		0x1000	/* global channel registers */
90#define EDMA_ECR	0x1008
91#define EDMA_ECRH	0x100C
92#define EDMA_SHADOW0	0x2000	/* 4 regions shadowing global channels */
93#define EDMA_PARM	0x4000	/* 128 param entries */
94
95#define PARM_OFFSET(param_no)	(EDMA_PARM + ((param_no) << 5))
96
97#define EDMA_DCHMAP	0x0100  /* 64 registers */
98#define CHMAP_EXIST	BIT(24)
99
100#define EDMA_MAX_DMACH           64
101#define EDMA_MAX_PARAMENTRY     512
102
103/*****************************************************************************/
104
105static void __iomem *edmacc_regs_base[EDMA_MAX_CC];
106
107static inline unsigned int edma_read(unsigned ctlr, int offset)
108{
109	return (unsigned int)__raw_readl(edmacc_regs_base[ctlr] + offset);
110}
111
112static inline void edma_write(unsigned ctlr, int offset, int val)
113{
114	__raw_writel(val, edmacc_regs_base[ctlr] + offset);
115}
116static inline void edma_modify(unsigned ctlr, int offset, unsigned and,
117		unsigned or)
118{
119	unsigned val = edma_read(ctlr, offset);
120	val &= and;
121	val |= or;
122	edma_write(ctlr, offset, val);
123}
124static inline void edma_and(unsigned ctlr, int offset, unsigned and)
125{
126	unsigned val = edma_read(ctlr, offset);
127	val &= and;
128	edma_write(ctlr, offset, val);
129}
130static inline void edma_or(unsigned ctlr, int offset, unsigned or)
131{
132	unsigned val = edma_read(ctlr, offset);
133	val |= or;
134	edma_write(ctlr, offset, val);
135}
136static inline unsigned int edma_read_array(unsigned ctlr, int offset, int i)
137{
138	return edma_read(ctlr, offset + (i << 2));
139}
140static inline void edma_write_array(unsigned ctlr, int offset, int i,
141		unsigned val)
142{
143	edma_write(ctlr, offset + (i << 2), val);
144}
145static inline void edma_modify_array(unsigned ctlr, int offset, int i,
146		unsigned and, unsigned or)
147{
148	edma_modify(ctlr, offset + (i << 2), and, or);
149}
150static inline void edma_or_array(unsigned ctlr, int offset, int i, unsigned or)
151{
152	edma_or(ctlr, offset + (i << 2), or);
153}
154static inline void edma_or_array2(unsigned ctlr, int offset, int i, int j,
155		unsigned or)
156{
157	edma_or(ctlr, offset + ((i*2 + j) << 2), or);
158}
159static inline void edma_write_array2(unsigned ctlr, int offset, int i, int j,
160		unsigned val)
161{
162	edma_write(ctlr, offset + ((i*2 + j) << 2), val);
163}
164static inline unsigned int edma_shadow0_read(unsigned ctlr, int offset)
165{
166	return edma_read(ctlr, EDMA_SHADOW0 + offset);
167}
168static inline unsigned int edma_shadow0_read_array(unsigned ctlr, int offset,
169		int i)
170{
171	return edma_read(ctlr, EDMA_SHADOW0 + offset + (i << 2));
172}
173static inline void edma_shadow0_write(unsigned ctlr, int offset, unsigned val)
174{
175	edma_write(ctlr, EDMA_SHADOW0 + offset, val);
176}
177static inline void edma_shadow0_write_array(unsigned ctlr, int offset, int i,
178		unsigned val)
179{
180	edma_write(ctlr, EDMA_SHADOW0 + offset + (i << 2), val);
181}
182static inline unsigned int edma_parm_read(unsigned ctlr, int offset,
183		int param_no)
184{
185	return edma_read(ctlr, EDMA_PARM + offset + (param_no << 5));
186}
187static inline void edma_parm_write(unsigned ctlr, int offset, int param_no,
188		unsigned val)
189{
190	edma_write(ctlr, EDMA_PARM + offset + (param_no << 5), val);
191}
192static inline void edma_parm_modify(unsigned ctlr, int offset, int param_no,
193		unsigned and, unsigned or)
194{
195	edma_modify(ctlr, EDMA_PARM + offset + (param_no << 5), and, or);
196}
197static inline void edma_parm_and(unsigned ctlr, int offset, int param_no,
198		unsigned and)
199{
200	edma_and(ctlr, EDMA_PARM + offset + (param_no << 5), and);
201}
202static inline void edma_parm_or(unsigned ctlr, int offset, int param_no,
203		unsigned or)
204{
205	edma_or(ctlr, EDMA_PARM + offset + (param_no << 5), or);
206}
207
208static inline void set_bits(int offset, int len, unsigned long *p)
209{
210	for (; len > 0; len--)
211		set_bit(offset + (len - 1), p);
212}
213
214static inline void clear_bits(int offset, int len, unsigned long *p)
215{
216	for (; len > 0; len--)
217		clear_bit(offset + (len - 1), p);
218}
219
220/*****************************************************************************/
221
222/* actual number of DMA channels and slots on this silicon */
223struct edma {
224	/* how many dma resources of each type */
225	unsigned	num_channels;
226	unsigned	num_region;
227	unsigned	num_slots;
228	unsigned	num_tc;
229	unsigned	num_cc;
230	enum dma_event_q 	default_queue;
231
232	/* list of channels with no even trigger; terminated by "-1" */
233	const s8	*noevent;
234
235	/* The edma_inuse bit for each PaRAM slot is clear unless the
236	 * channel is in use ... by ARM or DSP, for QDMA, or whatever.
237	 */
238	DECLARE_BITMAP(edma_inuse, EDMA_MAX_PARAMENTRY);
239
240	/* The edma_unused bit for each channel is clear unless
241	 * it is not being used on this platform. It uses a bit
242	 * of SOC-specific initialization code.
243	 */
244	DECLARE_BITMAP(edma_unused, EDMA_MAX_DMACH);
245
246	unsigned	irq_res_start;
247	unsigned	irq_res_end;
248
249	struct dma_interrupt_data {
250		void (*callback)(unsigned channel, unsigned short ch_status,
251				void *data);
252		void *data;
253	} intr_data[EDMA_MAX_DMACH];
254};
255
256static struct edma *edma_cc[EDMA_MAX_CC];
257static int arch_num_cc;
258
259/* dummy param set used to (re)initialize parameter RAM slots */
260static const struct edmacc_param dummy_paramset = {
261	.link_bcntrld = 0xffff,
262	.ccnt = 1,
263};
264
265/*****************************************************************************/
266
267static void map_dmach_queue(unsigned ctlr, unsigned ch_no,
268		enum dma_event_q queue_no)
269{
270	int bit = (ch_no & 0x7) * 4;
271
272	/* default to low priority queue */
273	if (queue_no == EVENTQ_DEFAULT)
274		queue_no = edma_cc[ctlr]->default_queue;
275
276	queue_no &= 7;
277	edma_modify_array(ctlr, EDMA_DMAQNUM, (ch_no >> 3),
278			~(0x7 << bit), queue_no << bit);
279}
280
281static void __init map_queue_tc(unsigned ctlr, int queue_no, int tc_no)
282{
283	int bit = queue_no * 4;
284	edma_modify(ctlr, EDMA_QUETCMAP, ~(0x7 << bit), ((tc_no & 0x7) << bit));
285}
286
287static void __init assign_priority_to_queue(unsigned ctlr, int queue_no,
288		int priority)
289{
290	int bit = queue_no * 4;
291	edma_modify(ctlr, EDMA_QUEPRI, ~(0x7 << bit),
292			((priority & 0x7) << bit));
293}
294
295/**
296 * map_dmach_param - Maps channel number to param entry number
297 *
298 * This maps the dma channel number to param entry numberter. In
299 * other words using the DMA channel mapping registers a param entry
300 * can be mapped to any channel
301 *
302 * Callers are responsible for ensuring the channel mapping logic is
303 * included in that particular EDMA variant (Eg : dm646x)
304 *
305 */
306static void __init map_dmach_param(unsigned ctlr)
307{
308	int i;
309	for (i = 0; i < EDMA_MAX_DMACH; i++)
310		edma_write_array(ctlr, EDMA_DCHMAP , i , (i << 5));
311}
312
313static inline void
314setup_dma_interrupt(unsigned lch,
315	void (*callback)(unsigned channel, u16 ch_status, void *data),
316	void *data)
317{
318	unsigned ctlr;
319
320	ctlr = EDMA_CTLR(lch);
321	lch = EDMA_CHAN_SLOT(lch);
322
323	if (!callback)
324		edma_shadow0_write_array(ctlr, SH_IECR, lch >> 5,
325				BIT(lch & 0x1f));
326
327	edma_cc[ctlr]->intr_data[lch].callback = callback;
328	edma_cc[ctlr]->intr_data[lch].data = data;
329
330	if (callback) {
331		edma_shadow0_write_array(ctlr, SH_ICR, lch >> 5,
332				BIT(lch & 0x1f));
333		edma_shadow0_write_array(ctlr, SH_IESR, lch >> 5,
334				BIT(lch & 0x1f));
335	}
336}
337
338static int irq2ctlr(int irq)
339{
340	if (irq >= edma_cc[0]->irq_res_start && irq <= edma_cc[0]->irq_res_end)
341		return 0;
342	else if (irq >= edma_cc[1]->irq_res_start &&
343		irq <= edma_cc[1]->irq_res_end)
344		return 1;
345
346	return -1;
347}
348
349/******************************************************************************
350 *
351 * DMA interrupt handler
352 *
353 *****************************************************************************/
354static irqreturn_t dma_irq_handler(int irq, void *data)
355{
356	int i;
357	int ctlr;
358	unsigned int cnt = 0;
359
360	ctlr = irq2ctlr(irq);
361	if (ctlr < 0)
362		return IRQ_NONE;
363
364	dev_dbg(data, "dma_irq_handler\n");
365
366	if ((edma_shadow0_read_array(ctlr, SH_IPR, 0) == 0) &&
367	    (edma_shadow0_read_array(ctlr, SH_IPR, 1) == 0))
368		return IRQ_NONE;
369
370	while (1) {
371		int j;
372		if (edma_shadow0_read_array(ctlr, SH_IPR, 0) &
373				edma_shadow0_read_array(ctlr, SH_IER, 0))
374			j = 0;
375		else if (edma_shadow0_read_array(ctlr, SH_IPR, 1) &
376				edma_shadow0_read_array(ctlr, SH_IER, 1))
377			j = 1;
378		else
379			break;
380		dev_dbg(data, "IPR%d %08x\n", j,
381				edma_shadow0_read_array(ctlr, SH_IPR, j));
382		for (i = 0; i < 32; i++) {
383			int k = (j << 5) + i;
384			if ((edma_shadow0_read_array(ctlr, SH_IPR, j) & BIT(i))
385					&& (edma_shadow0_read_array(ctlr,
386							SH_IER, j) & BIT(i))) {
387				/* Clear the corresponding IPR bits */
388				edma_shadow0_write_array(ctlr, SH_ICR, j,
389							BIT(i));
390				if (edma_cc[ctlr]->intr_data[k].callback)
391					edma_cc[ctlr]->intr_data[k].callback(
392						k, DMA_COMPLETE,
393						edma_cc[ctlr]->intr_data[k].
394						data);
395			}
396		}
397		cnt++;
398		if (cnt > 10)
399			break;
400	}
401	edma_shadow0_write(ctlr, SH_IEVAL, 1);
402	return IRQ_HANDLED;
403}
404
405/******************************************************************************
406 *
407 * DMA error interrupt handler
408 *
409 *****************************************************************************/
410static irqreturn_t dma_ccerr_handler(int irq, void *data)
411{
412	int i;
413	int ctlr;
414	unsigned int cnt = 0;
415
416	ctlr = irq2ctlr(irq);
417	if (ctlr < 0)
418		return IRQ_NONE;
419
420	dev_dbg(data, "dma_ccerr_handler\n");
421
422	if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
423	    (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
424	    (edma_read(ctlr, EDMA_QEMR) == 0) &&
425	    (edma_read(ctlr, EDMA_CCERR) == 0))
426		return IRQ_NONE;
427
428	while (1) {
429		int j = -1;
430		if (edma_read_array(ctlr, EDMA_EMR, 0))
431			j = 0;
432		else if (edma_read_array(ctlr, EDMA_EMR, 1))
433			j = 1;
434		if (j >= 0) {
435			dev_dbg(data, "EMR%d %08x\n", j,
436					edma_read_array(ctlr, EDMA_EMR, j));
437			for (i = 0; i < 32; i++) {
438				int k = (j << 5) + i;
439				if (edma_read_array(ctlr, EDMA_EMR, j) &
440							BIT(i)) {
441					/* Clear the corresponding EMR bits */
442					edma_write_array(ctlr, EDMA_EMCR, j,
443							BIT(i));
444					/* Clear any SER */
445					edma_shadow0_write_array(ctlr, SH_SECR,
446								j, BIT(i));
447					if (edma_cc[ctlr]->intr_data[k].
448								callback) {
449						edma_cc[ctlr]->intr_data[k].
450						callback(k,
451						DMA_CC_ERROR,
452						edma_cc[ctlr]->intr_data
453						[k].data);
454					}
455				}
456			}
457		} else if (edma_read(ctlr, EDMA_QEMR)) {
458			dev_dbg(data, "QEMR %02x\n",
459				edma_read(ctlr, EDMA_QEMR));
460			for (i = 0; i < 8; i++) {
461				if (edma_read(ctlr, EDMA_QEMR) & BIT(i)) {
462					/* Clear the corresponding IPR bits */
463					edma_write(ctlr, EDMA_QEMCR, BIT(i));
464					edma_shadow0_write(ctlr, SH_QSECR,
465								BIT(i));
466
467					/* NOTE:  not reported!! */
468				}
469			}
470		} else if (edma_read(ctlr, EDMA_CCERR)) {
471			dev_dbg(data, "CCERR %08x\n",
472				edma_read(ctlr, EDMA_CCERR));
473			/* FIXME:  CCERR.BIT(16) ignored!  much better
474			 * to just write CCERRCLR with CCERR value...
475			 */
476			for (i = 0; i < 8; i++) {
477				if (edma_read(ctlr, EDMA_CCERR) & BIT(i)) {
478					/* Clear the corresponding IPR bits */
479					edma_write(ctlr, EDMA_CCERRCLR, BIT(i));
480
481					/* NOTE:  not reported!! */
482				}
483			}
484		}
485		if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
486		    (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
487		    (edma_read(ctlr, EDMA_QEMR) == 0) &&
488		    (edma_read(ctlr, EDMA_CCERR) == 0))
489			break;
490		cnt++;
491		if (cnt > 10)
492			break;
493	}
494	edma_write(ctlr, EDMA_EEVAL, 1);
495	return IRQ_HANDLED;
496}
497
498/******************************************************************************
499 *
500 * Transfer controller error interrupt handlers
501 *
502 *****************************************************************************/
503
504#define tc_errs_handled	false	/* disabled as long as they're NOPs */
505
506static irqreturn_t dma_tc0err_handler(int irq, void *data)
507{
508	dev_dbg(data, "dma_tc0err_handler\n");
509	return IRQ_HANDLED;
510}
511
512static irqreturn_t dma_tc1err_handler(int irq, void *data)
513{
514	dev_dbg(data, "dma_tc1err_handler\n");
515	return IRQ_HANDLED;
516}
517
518static int reserve_contiguous_slots(int ctlr, unsigned int id,
519				     unsigned int num_slots,
520				     unsigned int start_slot)
521{
522	int i, j;
523	unsigned int count = num_slots;
524	int stop_slot = start_slot;
525	DECLARE_BITMAP(tmp_inuse, EDMA_MAX_PARAMENTRY);
526
527	for (i = start_slot; i < edma_cc[ctlr]->num_slots; ++i) {
528		j = EDMA_CHAN_SLOT(i);
529		if (!test_and_set_bit(j, edma_cc[ctlr]->edma_inuse)) {
530			/* Record our current beginning slot */
531			if (count == num_slots)
532				stop_slot = i;
533
534			count--;
535			set_bit(j, tmp_inuse);
536
537			if (count == 0)
538				break;
539		} else {
540			clear_bit(j, tmp_inuse);
541
542			if (id == EDMA_CONT_PARAMS_FIXED_EXACT) {
543				stop_slot = i;
544				break;
545			} else {
546				count = num_slots;
547			}
548		}
549	}
550
551	/*
552	 * We have to clear any bits that we set
553	 * if we run out parameter RAM slots, i.e we do find a set
554	 * of contiguous parameter RAM slots but do not find the exact number
555	 * requested as we may reach the total number of parameter RAM slots
556	 */
557	if (i == edma_cc[ctlr]->num_slots)
558		stop_slot = i;
559
560	for (j = start_slot; j < stop_slot; j++)
561		if (test_bit(j, tmp_inuse))
562			clear_bit(j, edma_cc[ctlr]->edma_inuse);
563
564	if (count)
565		return -EBUSY;
566
567	for (j = i - num_slots + 1; j <= i; ++j)
568		memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(j),
569			&dummy_paramset, PARM_SIZE);
570
571	return EDMA_CTLR_CHAN(ctlr, i - num_slots + 1);
572}
573
574static int prepare_unused_channel_list(struct device *dev, void *data)
575{
576	struct platform_device *pdev = to_platform_device(dev);
577	int i, ctlr;
578
579	for (i = 0; i < pdev->num_resources; i++) {
580		if ((pdev->resource[i].flags & IORESOURCE_DMA) &&
581				(int)pdev->resource[i].start >= 0) {
582			ctlr = EDMA_CTLR(pdev->resource[i].start);
583			clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start),
584					edma_cc[ctlr]->edma_unused);
585		}
586	}
587
588	return 0;
589}
590
591/*-----------------------------------------------------------------------*/
592
593static bool unused_chan_list_done;
594
595/* Resource alloc/free:  dma channels, parameter RAM slots */
596
597/**
598 * edma_alloc_channel - allocate DMA channel and paired parameter RAM
599 * @channel: specific channel to allocate; negative for "any unmapped channel"
600 * @callback: optional; to be issued on DMA completion or errors
601 * @data: passed to callback
602 * @eventq_no: an EVENTQ_* constant, used to choose which Transfer
603 *	Controller (TC) executes requests using this channel.  Use
604 *	EVENTQ_DEFAULT unless you really need a high priority queue.
605 *
606 * This allocates a DMA channel and its associated parameter RAM slot.
607 * The parameter RAM is initialized to hold a dummy transfer.
608 *
609 * Normal use is to pass a specific channel number as @channel, to make
610 * use of hardware events mapped to that channel.  When the channel will
611 * be used only for software triggering or event chaining, channels not
612 * mapped to hardware events (or mapped to unused events) are preferable.
613 *
614 * DMA transfers start from a channel using edma_start(), or by
615 * chaining.  When the transfer described in that channel's parameter RAM
616 * slot completes, that slot's data may be reloaded through a link.
617 *
618 * DMA errors are only reported to the @callback associated with the
619 * channel driving that transfer, but transfer completion callbacks can
620 * be sent to another channel under control of the TCC field in
621 * the option word of the transfer's parameter RAM set.  Drivers must not
622 * use DMA transfer completion callbacks for channels they did not allocate.
623 * (The same applies to TCC codes used in transfer chaining.)
624 *
625 * Returns the number of the channel, else negative errno.
626 */
627int edma_alloc_channel(int channel,
628		void (*callback)(unsigned channel, u16 ch_status, void *data),
629		void *data,
630		enum dma_event_q eventq_no)
631{
632	unsigned i, done = 0, ctlr = 0;
633	int ret = 0;
634
635	if (!unused_chan_list_done) {
636		/*
637		 * Scan all the platform devices to find out the EDMA channels
638		 * used and clear them in the unused list, making the rest
639		 * available for ARM usage.
640		 */
641		ret = bus_for_each_dev(&platform_bus_type, NULL, NULL,
642				prepare_unused_channel_list);
643		if (ret < 0)
644			return ret;
645
646		unused_chan_list_done = true;
647	}
648
649	if (channel >= 0) {
650		ctlr = EDMA_CTLR(channel);
651		channel = EDMA_CHAN_SLOT(channel);
652	}
653
654	if (channel < 0) {
655		for (i = 0; i < arch_num_cc; i++) {
656			channel = 0;
657			for (;;) {
658				channel = find_next_bit(edma_cc[i]->edma_unused,
659						edma_cc[i]->num_channels,
660						channel);
661				if (channel == edma_cc[i]->num_channels)
662					break;
663				if (!test_and_set_bit(channel,
664						edma_cc[i]->edma_inuse)) {
665					done = 1;
666					ctlr = i;
667					break;
668				}
669				channel++;
670			}
671			if (done)
672				break;
673		}
674		if (!done)
675			return -ENOMEM;
676	} else if (channel >= edma_cc[ctlr]->num_channels) {
677		return -EINVAL;
678	} else if (test_and_set_bit(channel, edma_cc[ctlr]->edma_inuse)) {
679		return -EBUSY;
680	}
681
682	/* ensure access through shadow region 0 */
683	edma_or_array2(ctlr, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
684
685	/* ensure no events are pending */
686	edma_stop(EDMA_CTLR_CHAN(ctlr, channel));
687	memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
688			&dummy_paramset, PARM_SIZE);
689
690	if (callback)
691		setup_dma_interrupt(EDMA_CTLR_CHAN(ctlr, channel),
692					callback, data);
693
694	map_dmach_queue(ctlr, channel, eventq_no);
695
696	return EDMA_CTLR_CHAN(ctlr, channel);
697}
698EXPORT_SYMBOL(edma_alloc_channel);
699
700
701/**
702 * edma_free_channel - deallocate DMA channel
703 * @channel: dma channel returned from edma_alloc_channel()
704 *
705 * This deallocates the DMA channel and associated parameter RAM slot
706 * allocated by edma_alloc_channel().
707 *
708 * Callers are responsible for ensuring the channel is inactive, and
709 * will not be reactivated by linking, chaining, or software calls to
710 * edma_start().
711 */
712void edma_free_channel(unsigned channel)
713{
714	unsigned ctlr;
715
716	ctlr = EDMA_CTLR(channel);
717	channel = EDMA_CHAN_SLOT(channel);
718
719	if (channel >= edma_cc[ctlr]->num_channels)
720		return;
721
722	setup_dma_interrupt(channel, NULL, NULL);
723	/* REVISIT should probably take out of shadow region 0 */
724
725	memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
726			&dummy_paramset, PARM_SIZE);
727	clear_bit(channel, edma_cc[ctlr]->edma_inuse);
728}
729EXPORT_SYMBOL(edma_free_channel);
730
731/**
732 * edma_alloc_slot - allocate DMA parameter RAM
733 * @slot: specific slot to allocate; negative for "any unused slot"
734 *
735 * This allocates a parameter RAM slot, initializing it to hold a
736 * dummy transfer.  Slots allocated using this routine have not been
737 * mapped to a hardware DMA channel, and will normally be used by
738 * linking to them from a slot associated with a DMA channel.
739 *
740 * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
741 * slots may be allocated on behalf of DSP firmware.
742 *
743 * Returns the number of the slot, else negative errno.
744 */
745int edma_alloc_slot(unsigned ctlr, int slot)
746{
747	if (slot >= 0)
748		slot = EDMA_CHAN_SLOT(slot);
749
750	if (slot < 0) {
751		slot = edma_cc[ctlr]->num_channels;
752		for (;;) {
753			slot = find_next_zero_bit(edma_cc[ctlr]->edma_inuse,
754					edma_cc[ctlr]->num_slots, slot);
755			if (slot == edma_cc[ctlr]->num_slots)
756				return -ENOMEM;
757			if (!test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse))
758				break;
759		}
760	} else if (slot < edma_cc[ctlr]->num_channels ||
761			slot >= edma_cc[ctlr]->num_slots) {
762		return -EINVAL;
763	} else if (test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse)) {
764		return -EBUSY;
765	}
766
767	memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
768			&dummy_paramset, PARM_SIZE);
769
770	return EDMA_CTLR_CHAN(ctlr, slot);
771}
772EXPORT_SYMBOL(edma_alloc_slot);
773
774/**
775 * edma_free_slot - deallocate DMA parameter RAM
776 * @slot: parameter RAM slot returned from edma_alloc_slot()
777 *
778 * This deallocates the parameter RAM slot allocated by edma_alloc_slot().
779 * Callers are responsible for ensuring the slot is inactive, and will
780 * not be activated.
781 */
782void edma_free_slot(unsigned slot)
783{
784	unsigned ctlr;
785
786	ctlr = EDMA_CTLR(slot);
787	slot = EDMA_CHAN_SLOT(slot);
788
789	if (slot < edma_cc[ctlr]->num_channels ||
790		slot >= edma_cc[ctlr]->num_slots)
791		return;
792
793	memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
794			&dummy_paramset, PARM_SIZE);
795	clear_bit(slot, edma_cc[ctlr]->edma_inuse);
796}
797EXPORT_SYMBOL(edma_free_slot);
798
799
800/**
801 * edma_alloc_cont_slots- alloc contiguous parameter RAM slots
802 * The API will return the starting point of a set of
803 * contiguous parameter RAM slots that have been requested
804 *
805 * @id: can only be EDMA_CONT_PARAMS_ANY or EDMA_CONT_PARAMS_FIXED_EXACT
806 * or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
807 * @count: number of contiguous Paramter RAM slots
808 * @slot  - the start value of Parameter RAM slot that should be passed if id
809 * is EDMA_CONT_PARAMS_FIXED_EXACT or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
810 *
811 * If id is EDMA_CONT_PARAMS_ANY then the API starts looking for a set of
812 * contiguous Parameter RAM slots from parameter RAM 64 in the case of
813 * DaVinci SOCs and 32 in the case of DA8xx SOCs.
814 *
815 * If id is EDMA_CONT_PARAMS_FIXED_EXACT then the API starts looking for a
816 * set of contiguous parameter RAM slots from the "slot" that is passed as an
817 * argument to the API.
818 *
819 * If id is EDMA_CONT_PARAMS_FIXED_NOT_EXACT then the API initially tries
820 * starts looking for a set of contiguous parameter RAMs from the "slot"
821 * that is passed as an argument to the API. On failure the API will try to
822 * find a set of contiguous Parameter RAM slots from the remaining Parameter
823 * RAM slots
824 */
825int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count)
826{
827	/*
828	 * The start slot requested should be greater than
829	 * the number of channels and lesser than the total number
830	 * of slots
831	 */
832	if ((id != EDMA_CONT_PARAMS_ANY) &&
833		(slot < edma_cc[ctlr]->num_channels ||
834		slot >= edma_cc[ctlr]->num_slots))
835		return -EINVAL;
836
837	/*
838	 * The number of parameter RAM slots requested cannot be less than 1
839	 * and cannot be more than the number of slots minus the number of
840	 * channels
841	 */
842	if (count < 1 || count >
843		(edma_cc[ctlr]->num_slots - edma_cc[ctlr]->num_channels))
844		return -EINVAL;
845
846	switch (id) {
847	case EDMA_CONT_PARAMS_ANY:
848		return reserve_contiguous_slots(ctlr, id, count,
849						 edma_cc[ctlr]->num_channels);
850	case EDMA_CONT_PARAMS_FIXED_EXACT:
851	case EDMA_CONT_PARAMS_FIXED_NOT_EXACT:
852		return reserve_contiguous_slots(ctlr, id, count, slot);
853	default:
854		return -EINVAL;
855	}
856
857}
858EXPORT_SYMBOL(edma_alloc_cont_slots);
859
860/**
861 * edma_free_cont_slots - deallocate DMA parameter RAM slots
862 * @slot: first parameter RAM of a set of parameter RAM slots to be freed
863 * @count: the number of contiguous parameter RAM slots to be freed
864 *
865 * This deallocates the parameter RAM slots allocated by
866 * edma_alloc_cont_slots.
867 * Callers/applications need to keep track of sets of contiguous
868 * parameter RAM slots that have been allocated using the edma_alloc_cont_slots
869 * API.
870 * Callers are responsible for ensuring the slots are inactive, and will
871 * not be activated.
872 */
873int edma_free_cont_slots(unsigned slot, int count)
874{
875	unsigned ctlr, slot_to_free;
876	int i;
877
878	ctlr = EDMA_CTLR(slot);
879	slot = EDMA_CHAN_SLOT(slot);
880
881	if (slot < edma_cc[ctlr]->num_channels ||
882		slot >= edma_cc[ctlr]->num_slots ||
883		count < 1)
884		return -EINVAL;
885
886	for (i = slot; i < slot + count; ++i) {
887		ctlr = EDMA_CTLR(i);
888		slot_to_free = EDMA_CHAN_SLOT(i);
889
890		memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot_to_free),
891			&dummy_paramset, PARM_SIZE);
892		clear_bit(slot_to_free, edma_cc[ctlr]->edma_inuse);
893	}
894
895	return 0;
896}
897EXPORT_SYMBOL(edma_free_cont_slots);
898
899/*-----------------------------------------------------------------------*/
900
901/* Parameter RAM operations (i) -- read/write partial slots */
902
903/**
904 * edma_set_src - set initial DMA source address in parameter RAM slot
905 * @slot: parameter RAM slot being configured
906 * @src_port: physical address of source (memory, controller FIFO, etc)
907 * @addressMode: INCR, except in very rare cases
908 * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
909 *	width to use when addressing the fifo (e.g. W8BIT, W32BIT)
910 *
911 * Note that the source address is modified during the DMA transfer
912 * according to edma_set_src_index().
913 */
914void edma_set_src(unsigned slot, dma_addr_t src_port,
915				enum address_mode mode, enum fifo_width width)
916{
917	unsigned ctlr;
918
919	ctlr = EDMA_CTLR(slot);
920	slot = EDMA_CHAN_SLOT(slot);
921
922	if (slot < edma_cc[ctlr]->num_slots) {
923		unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
924
925		if (mode) {
926			/* set SAM and program FWID */
927			i = (i & ~(EDMA_FWID)) | (SAM | ((width & 0x7) << 8));
928		} else {
929			/* clear SAM */
930			i &= ~SAM;
931		}
932		edma_parm_write(ctlr, PARM_OPT, slot, i);
933
934		/* set the source port address
935		   in source register of param structure */
936		edma_parm_write(ctlr, PARM_SRC, slot, src_port);
937	}
938}
939EXPORT_SYMBOL(edma_set_src);
940
941/**
942 * edma_set_dest - set initial DMA destination address in parameter RAM slot
943 * @slot: parameter RAM slot being configured
944 * @dest_port: physical address of destination (memory, controller FIFO, etc)
945 * @addressMode: INCR, except in very rare cases
946 * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
947 *	width to use when addressing the fifo (e.g. W8BIT, W32BIT)
948 *
949 * Note that the destination address is modified during the DMA transfer
950 * according to edma_set_dest_index().
951 */
952void edma_set_dest(unsigned slot, dma_addr_t dest_port,
953				 enum address_mode mode, enum fifo_width width)
954{
955	unsigned ctlr;
956
957	ctlr = EDMA_CTLR(slot);
958	slot = EDMA_CHAN_SLOT(slot);
959
960	if (slot < edma_cc[ctlr]->num_slots) {
961		unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
962
963		if (mode) {
964			/* set DAM and program FWID */
965			i = (i & ~(EDMA_FWID)) | (DAM | ((width & 0x7) << 8));
966		} else {
967			/* clear DAM */
968			i &= ~DAM;
969		}
970		edma_parm_write(ctlr, PARM_OPT, slot, i);
971		/* set the destination port address
972		   in dest register of param structure */
973		edma_parm_write(ctlr, PARM_DST, slot, dest_port);
974	}
975}
976EXPORT_SYMBOL(edma_set_dest);
977
978/**
979 * edma_get_position - returns the current transfer points
980 * @slot: parameter RAM slot being examined
981 * @src: pointer to source port position
982 * @dst: pointer to destination port position
983 *
984 * Returns current source and destination addresses for a particular
985 * parameter RAM slot.  Its channel should not be active when this is called.
986 */
987void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst)
988{
989	struct edmacc_param temp;
990	unsigned ctlr;
991
992	ctlr = EDMA_CTLR(slot);
993	slot = EDMA_CHAN_SLOT(slot);
994
995	edma_read_slot(EDMA_CTLR_CHAN(ctlr, slot), &temp);
996	if (src != NULL)
997		*src = temp.src;
998	if (dst != NULL)
999		*dst = temp.dst;
1000}
1001EXPORT_SYMBOL(edma_get_position);
1002
1003/**
1004 * edma_set_src_index - configure DMA source address indexing
1005 * @slot: parameter RAM slot being configured
1006 * @src_bidx: byte offset between source arrays in a frame
1007 * @src_cidx: byte offset between source frames in a block
1008 *
1009 * Offsets are specified to support either contiguous or discontiguous
1010 * memory transfers, or repeated access to a hardware register, as needed.
1011 * When accessing hardware registers, both offsets are normally zero.
1012 */
1013void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx)
1014{
1015	unsigned ctlr;
1016
1017	ctlr = EDMA_CTLR(slot);
1018	slot = EDMA_CHAN_SLOT(slot);
1019
1020	if (slot < edma_cc[ctlr]->num_slots) {
1021		edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
1022				0xffff0000, src_bidx);
1023		edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
1024				0xffff0000, src_cidx);
1025	}
1026}
1027EXPORT_SYMBOL(edma_set_src_index);
1028
1029/**
1030 * edma_set_dest_index - configure DMA destination address indexing
1031 * @slot: parameter RAM slot being configured
1032 * @dest_bidx: byte offset between destination arrays in a frame
1033 * @dest_cidx: byte offset between destination frames in a block
1034 *
1035 * Offsets are specified to support either contiguous or discontiguous
1036 * memory transfers, or repeated access to a hardware register, as needed.
1037 * When accessing hardware registers, both offsets are normally zero.
1038 */
1039void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx)
1040{
1041	unsigned ctlr;
1042
1043	ctlr = EDMA_CTLR(slot);
1044	slot = EDMA_CHAN_SLOT(slot);
1045
1046	if (slot < edma_cc[ctlr]->num_slots) {
1047		edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
1048				0x0000ffff, dest_bidx << 16);
1049		edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
1050				0x0000ffff, dest_cidx << 16);
1051	}
1052}
1053EXPORT_SYMBOL(edma_set_dest_index);
1054
1055/**
1056 * edma_set_transfer_params - configure DMA transfer parameters
1057 * @slot: parameter RAM slot being configured
1058 * @acnt: how many bytes per array (at least one)
1059 * @bcnt: how many arrays per frame (at least one)
1060 * @ccnt: how many frames per block (at least one)
1061 * @bcnt_rld: used only for A-Synchronized transfers; this specifies
1062 *	the value to reload into bcnt when it decrements to zero
1063 * @sync_mode: ASYNC or ABSYNC
1064 *
1065 * See the EDMA3 documentation to understand how to configure and link
1066 * transfers using the fields in PaRAM slots.  If you are not doing it
1067 * all at once with edma_write_slot(), you will use this routine
1068 * plus two calls each for source and destination, setting the initial
1069 * address and saying how to index that address.
1070 *
1071 * An example of an A-Synchronized transfer is a serial link using a
1072 * single word shift register.  In that case, @acnt would be equal to
1073 * that word size; the serial controller issues a DMA synchronization
1074 * event to transfer each word, and memory access by the DMA transfer
1075 * controller will be word-at-a-time.
1076 *
1077 * An example of an AB-Synchronized transfer is a device using a FIFO.
1078 * In that case, @acnt equals the FIFO width and @bcnt equals its depth.
1079 * The controller with the FIFO issues DMA synchronization events when
1080 * the FIFO threshold is reached, and the DMA transfer controller will
1081 * transfer one frame to (or from) the FIFO.  It will probably use
1082 * efficient burst modes to access memory.
1083 */
1084void edma_set_transfer_params(unsigned slot,
1085		u16 acnt, u16 bcnt, u16 ccnt,
1086		u16 bcnt_rld, enum sync_dimension sync_mode)
1087{
1088	unsigned ctlr;
1089
1090	ctlr = EDMA_CTLR(slot);
1091	slot = EDMA_CHAN_SLOT(slot);
1092
1093	if (slot < edma_cc[ctlr]->num_slots) {
1094		edma_parm_modify(ctlr, PARM_LINK_BCNTRLD, slot,
1095				0x0000ffff, bcnt_rld << 16);
1096		if (sync_mode == ASYNC)
1097			edma_parm_and(ctlr, PARM_OPT, slot, ~SYNCDIM);
1098		else
1099			edma_parm_or(ctlr, PARM_OPT, slot, SYNCDIM);
1100		/* Set the acount, bcount, ccount registers */
1101		edma_parm_write(ctlr, PARM_A_B_CNT, slot, (bcnt << 16) | acnt);
1102		edma_parm_write(ctlr, PARM_CCNT, slot, ccnt);
1103	}
1104}
1105EXPORT_SYMBOL(edma_set_transfer_params);
1106
1107/**
1108 * edma_link - link one parameter RAM slot to another
1109 * @from: parameter RAM slot originating the link
1110 * @to: parameter RAM slot which is the link target
1111 *
1112 * The originating slot should not be part of any active DMA transfer.
1113 */
1114void edma_link(unsigned from, unsigned to)
1115{
1116	unsigned ctlr_from, ctlr_to;
1117
1118	ctlr_from = EDMA_CTLR(from);
1119	from = EDMA_CHAN_SLOT(from);
1120	ctlr_to = EDMA_CTLR(to);
1121	to = EDMA_CHAN_SLOT(to);
1122
1123	if (from >= edma_cc[ctlr_from]->num_slots)
1124		return;
1125	if (to >= edma_cc[ctlr_to]->num_slots)
1126		return;
1127	edma_parm_modify(ctlr_from, PARM_LINK_BCNTRLD, from, 0xffff0000,
1128				PARM_OFFSET(to));
1129}
1130EXPORT_SYMBOL(edma_link);
1131
1132/**
1133 * edma_unlink - cut link from one parameter RAM slot
1134 * @from: parameter RAM slot originating the link
1135 *
1136 * The originating slot should not be part of any active DMA transfer.
1137 * Its link is set to 0xffff.
1138 */
1139void edma_unlink(unsigned from)
1140{
1141	unsigned ctlr;
1142
1143	ctlr = EDMA_CTLR(from);
1144	from = EDMA_CHAN_SLOT(from);
1145
1146	if (from >= edma_cc[ctlr]->num_slots)
1147		return;
1148	edma_parm_or(ctlr, PARM_LINK_BCNTRLD, from, 0xffff);
1149}
1150EXPORT_SYMBOL(edma_unlink);
1151
1152/*-----------------------------------------------------------------------*/
1153
1154/* Parameter RAM operations (ii) -- read/write whole parameter sets */
1155
1156/**
1157 * edma_write_slot - write parameter RAM data for slot
1158 * @slot: number of parameter RAM slot being modified
1159 * @param: data to be written into parameter RAM slot
1160 *
1161 * Use this to assign all parameters of a transfer at once.  This
1162 * allows more efficient setup of transfers than issuing multiple
1163 * calls to set up those parameters in small pieces, and provides
1164 * complete control over all transfer options.
1165 */
1166void edma_write_slot(unsigned slot, const struct edmacc_param *param)
1167{
1168	unsigned ctlr;
1169
1170	ctlr = EDMA_CTLR(slot);
1171	slot = EDMA_CHAN_SLOT(slot);
1172
1173	if (slot >= edma_cc[ctlr]->num_slots)
1174		return;
1175	memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), param,
1176			PARM_SIZE);
1177}
1178EXPORT_SYMBOL(edma_write_slot);
1179
1180/**
1181 * edma_read_slot - read parameter RAM data from slot
1182 * @slot: number of parameter RAM slot being copied
1183 * @param: where to store copy of parameter RAM data
1184 *
1185 * Use this to read data from a parameter RAM slot, perhaps to
1186 * save them as a template for later reuse.
1187 */
1188void edma_read_slot(unsigned slot, struct edmacc_param *param)
1189{
1190	unsigned ctlr;
1191
1192	ctlr = EDMA_CTLR(slot);
1193	slot = EDMA_CHAN_SLOT(slot);
1194
1195	if (slot >= edma_cc[ctlr]->num_slots)
1196		return;
1197	memcpy_fromio(param, edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
1198			PARM_SIZE);
1199}
1200EXPORT_SYMBOL(edma_read_slot);
1201
1202/*-----------------------------------------------------------------------*/
1203
1204/* Various EDMA channel control operations */
1205
1206/**
1207 * edma_pause - pause dma on a channel
1208 * @channel: on which edma_start() has been called
1209 *
1210 * This temporarily disables EDMA hardware events on the specified channel,
1211 * preventing them from triggering new transfers on its behalf
1212 */
1213void edma_pause(unsigned channel)
1214{
1215	unsigned ctlr;
1216
1217	ctlr = EDMA_CTLR(channel);
1218	channel = EDMA_CHAN_SLOT(channel);
1219
1220	if (channel < edma_cc[ctlr]->num_channels) {
1221		unsigned int mask = BIT(channel & 0x1f);
1222
1223		edma_shadow0_write_array(ctlr, SH_EECR, channel >> 5, mask);
1224	}
1225}
1226EXPORT_SYMBOL(edma_pause);
1227
1228/**
1229 * edma_resume - resumes dma on a paused channel
1230 * @channel: on which edma_pause() has been called
1231 *
1232 * This re-enables EDMA hardware events on the specified channel.
1233 */
1234void edma_resume(unsigned channel)
1235{
1236	unsigned ctlr;
1237
1238	ctlr = EDMA_CTLR(channel);
1239	channel = EDMA_CHAN_SLOT(channel);
1240
1241	if (channel < edma_cc[ctlr]->num_channels) {
1242		unsigned int mask = BIT(channel & 0x1f);
1243
1244		edma_shadow0_write_array(ctlr, SH_EESR, channel >> 5, mask);
1245	}
1246}
1247EXPORT_SYMBOL(edma_resume);
1248
1249/**
1250 * edma_start - start dma on a channel
1251 * @channel: channel being activated
1252 *
1253 * Channels with event associations will be triggered by their hardware
1254 * events, and channels without such associations will be triggered by
1255 * software.  (At this writing there is no interface for using software
1256 * triggers except with channels that don't support hardware triggers.)
1257 *
1258 * Returns zero on success, else negative errno.
1259 */
1260int edma_start(unsigned channel)
1261{
1262	unsigned ctlr;
1263
1264	ctlr = EDMA_CTLR(channel);
1265	channel = EDMA_CHAN_SLOT(channel);
1266
1267	if (channel < edma_cc[ctlr]->num_channels) {
1268		int j = channel >> 5;
1269		unsigned int mask = BIT(channel & 0x1f);
1270
1271		/* EDMA channels without event association */
1272		if (test_bit(channel, edma_cc[ctlr]->edma_unused)) {
1273			pr_debug("EDMA: ESR%d %08x\n", j,
1274				edma_shadow0_read_array(ctlr, SH_ESR, j));
1275			edma_shadow0_write_array(ctlr, SH_ESR, j, mask);
1276			return 0;
1277		}
1278
1279		/* EDMA channel with event association */
1280		pr_debug("EDMA: ER%d %08x\n", j,
1281			edma_shadow0_read_array(ctlr, SH_ER, j));
1282		/* Clear any pending event or error */
1283		edma_write_array(ctlr, EDMA_ECR, j, mask);
1284		edma_write_array(ctlr, EDMA_EMCR, j, mask);
1285		/* Clear any SER */
1286		edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1287		edma_shadow0_write_array(ctlr, SH_EESR, j, mask);
1288		pr_debug("EDMA: EER%d %08x\n", j,
1289			edma_shadow0_read_array(ctlr, SH_EER, j));
1290		return 0;
1291	}
1292
1293	return -EINVAL;
1294}
1295EXPORT_SYMBOL(edma_start);
1296
1297/**
1298 * edma_stop - stops dma on the channel passed
1299 * @channel: channel being deactivated
1300 *
1301 * When @lch is a channel, any active transfer is paused and
1302 * all pending hardware events are cleared.  The current transfer
1303 * may not be resumed, and the channel's Parameter RAM should be
1304 * reinitialized before being reused.
1305 */
1306void edma_stop(unsigned channel)
1307{
1308	unsigned ctlr;
1309
1310	ctlr = EDMA_CTLR(channel);
1311	channel = EDMA_CHAN_SLOT(channel);
1312
1313	if (channel < edma_cc[ctlr]->num_channels) {
1314		int j = channel >> 5;
1315		unsigned int mask = BIT(channel & 0x1f);
1316
1317		edma_shadow0_write_array(ctlr, SH_EECR, j, mask);
1318		edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
1319		edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1320		edma_write_array(ctlr, EDMA_EMCR, j, mask);
1321
1322		pr_debug("EDMA: EER%d %08x\n", j,
1323				edma_shadow0_read_array(ctlr, SH_EER, j));
1324
1325		/* REVISIT:  consider guarding against inappropriate event
1326		 * chaining by overwriting with dummy_paramset.
1327		 */
1328	}
1329}
1330EXPORT_SYMBOL(edma_stop);
1331
1332/******************************************************************************
1333 *
1334 * It cleans ParamEntry qand bring back EDMA to initial state if media has
1335 * been removed before EDMA has finished.It is usedful for removable media.
1336 * Arguments:
1337 *      ch_no     - channel no
1338 *
1339 * Return: zero on success, or corresponding error no on failure
1340 *
1341 * FIXME this should not be needed ... edma_stop() should suffice.
1342 *
1343 *****************************************************************************/
1344
1345void edma_clean_channel(unsigned channel)
1346{
1347	unsigned ctlr;
1348
1349	ctlr = EDMA_CTLR(channel);
1350	channel = EDMA_CHAN_SLOT(channel);
1351
1352	if (channel < edma_cc[ctlr]->num_channels) {
1353		int j = (channel >> 5);
1354		unsigned int mask = BIT(channel & 0x1f);
1355
1356		pr_debug("EDMA: EMR%d %08x\n", j,
1357				edma_read_array(ctlr, EDMA_EMR, j));
1358		edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
1359		/* Clear the corresponding EMR bits */
1360		edma_write_array(ctlr, EDMA_EMCR, j, mask);
1361		/* Clear any SER */
1362		edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1363		edma_write(ctlr, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
1364	}
1365}
1366EXPORT_SYMBOL(edma_clean_channel);
1367
1368/*
1369 * edma_clear_event - clear an outstanding event on the DMA channel
1370 * Arguments:
1371 *	channel - channel number
1372 */
1373void edma_clear_event(unsigned channel)
1374{
1375	unsigned ctlr;
1376
1377	ctlr = EDMA_CTLR(channel);
1378	channel = EDMA_CHAN_SLOT(channel);
1379
1380	if (channel >= edma_cc[ctlr]->num_channels)
1381		return;
1382	if (channel < 32)
1383		edma_write(ctlr, EDMA_ECR, BIT(channel));
1384	else
1385		edma_write(ctlr, EDMA_ECRH, BIT(channel - 32));
1386}
1387EXPORT_SYMBOL(edma_clear_event);
1388
1389/*-----------------------------------------------------------------------*/
1390
1391static int __init edma_probe(struct platform_device *pdev)
1392{
1393	struct edma_soc_info	**info = pdev->dev.platform_data;
1394	const s8		(*queue_priority_mapping)[2];
1395	const s8		(*queue_tc_mapping)[2];
1396	int			i, j, off, ln, found = 0;
1397	int			status = -1;
1398	const s16		(*rsv_chans)[2];
1399	const s16		(*rsv_slots)[2];
1400	int			irq[EDMA_MAX_CC] = {0, 0};
1401	int			err_irq[EDMA_MAX_CC] = {0, 0};
1402	struct resource		*r[EDMA_MAX_CC] = {NULL};
1403	resource_size_t		len[EDMA_MAX_CC];
1404	char			res_name[10];
1405	char			irq_name[10];
1406
1407	if (!info)
1408		return -ENODEV;
1409
1410	for (j = 0; j < EDMA_MAX_CC; j++) {
1411		sprintf(res_name, "edma_cc%d", j);
1412		r[j] = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1413						res_name);
1414		if (!r[j] || !info[j]) {
1415			if (found)
1416				break;
1417			else
1418				return -ENODEV;
1419		} else {
1420			found = 1;
1421		}
1422
1423		len[j] = resource_size(r[j]);
1424
1425		r[j] = request_mem_region(r[j]->start, len[j],
1426			dev_name(&pdev->dev));
1427		if (!r[j]) {
1428			status = -EBUSY;
1429			goto fail1;
1430		}
1431
1432		edmacc_regs_base[j] = ioremap(r[j]->start, len[j]);
1433		if (!edmacc_regs_base[j]) {
1434			status = -EBUSY;
1435			goto fail1;
1436		}
1437
1438		edma_cc[j] = kzalloc(sizeof(struct edma), GFP_KERNEL);
1439		if (!edma_cc[j]) {
1440			status = -ENOMEM;
1441			goto fail1;
1442		}
1443
1444		edma_cc[j]->num_channels = min_t(unsigned, info[j]->n_channel,
1445							EDMA_MAX_DMACH);
1446		edma_cc[j]->num_slots = min_t(unsigned, info[j]->n_slot,
1447							EDMA_MAX_PARAMENTRY);
1448		edma_cc[j]->num_cc = min_t(unsigned, info[j]->n_cc,
1449							EDMA_MAX_CC);
1450
1451		edma_cc[j]->default_queue = info[j]->default_queue;
1452
1453		dev_dbg(&pdev->dev, "DMA REG BASE ADDR=%p\n",
1454			edmacc_regs_base[j]);
1455
1456		for (i = 0; i < edma_cc[j]->num_slots; i++)
1457			memcpy_toio(edmacc_regs_base[j] + PARM_OFFSET(i),
1458					&dummy_paramset, PARM_SIZE);
1459
1460		/* Mark all channels as unused */
1461		memset(edma_cc[j]->edma_unused, 0xff,
1462			sizeof(edma_cc[j]->edma_unused));
1463
1464		if (info[j]->rsv) {
1465
1466			/* Clear the reserved channels in unused list */
1467			rsv_chans = info[j]->rsv->rsv_chans;
1468			if (rsv_chans) {
1469				for (i = 0; rsv_chans[i][0] != -1; i++) {
1470					off = rsv_chans[i][0];
1471					ln = rsv_chans[i][1];
1472					clear_bits(off, ln,
1473						edma_cc[j]->edma_unused);
1474				}
1475			}
1476
1477			/* Set the reserved slots in inuse list */
1478			rsv_slots = info[j]->rsv->rsv_slots;
1479			if (rsv_slots) {
1480				for (i = 0; rsv_slots[i][0] != -1; i++) {
1481					off = rsv_slots[i][0];
1482					ln = rsv_slots[i][1];
1483					set_bits(off, ln,
1484						edma_cc[j]->edma_inuse);
1485				}
1486			}
1487		}
1488
1489		sprintf(irq_name, "edma%d", j);
1490		irq[j] = platform_get_irq_byname(pdev, irq_name);
1491		edma_cc[j]->irq_res_start = irq[j];
1492		status = request_irq(irq[j], dma_irq_handler, 0, "edma",
1493					&pdev->dev);
1494		if (status < 0) {
1495			dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
1496				irq[j], status);
1497			goto fail;
1498		}
1499
1500		sprintf(irq_name, "edma%d_err", j);
1501		err_irq[j] = platform_get_irq_byname(pdev, irq_name);
1502		edma_cc[j]->irq_res_end = err_irq[j];
1503		status = request_irq(err_irq[j], dma_ccerr_handler, 0,
1504					"edma_error", &pdev->dev);
1505		if (status < 0) {
1506			dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
1507				err_irq[j], status);
1508			goto fail;
1509		}
1510
1511		for (i = 0; i < edma_cc[j]->num_channels; i++)
1512			map_dmach_queue(j, i, info[j]->default_queue);
1513
1514		queue_tc_mapping = info[j]->queue_tc_mapping;
1515		queue_priority_mapping = info[j]->queue_priority_mapping;
1516
1517		/* Event queue to TC mapping */
1518		for (i = 0; queue_tc_mapping[i][0] != -1; i++)
1519			map_queue_tc(j, queue_tc_mapping[i][0],
1520					queue_tc_mapping[i][1]);
1521
1522		/* Event queue priority mapping */
1523		for (i = 0; queue_priority_mapping[i][0] != -1; i++)
1524			assign_priority_to_queue(j,
1525						queue_priority_mapping[i][0],
1526						queue_priority_mapping[i][1]);
1527
1528		/* Map the channel to param entry if channel mapping logic
1529		 * exist
1530		 */
1531		if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST)
1532			map_dmach_param(j);
1533
1534		for (i = 0; i < info[j]->n_region; i++) {
1535			edma_write_array2(j, EDMA_DRAE, i, 0, 0x0);
1536			edma_write_array2(j, EDMA_DRAE, i, 1, 0x0);
1537			edma_write_array(j, EDMA_QRAE, i, 0x0);
1538		}
1539		arch_num_cc++;
1540	}
1541
1542	if (tc_errs_handled) {
1543		status = request_irq(IRQ_TCERRINT0, dma_tc0err_handler, 0,
1544					"edma_tc0", &pdev->dev);
1545		if (status < 0) {
1546			dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
1547				IRQ_TCERRINT0, status);
1548			return status;
1549		}
1550		status = request_irq(IRQ_TCERRINT, dma_tc1err_handler, 0,
1551					"edma_tc1", &pdev->dev);
1552		if (status < 0) {
1553			dev_dbg(&pdev->dev, "request_irq %d --> %d\n",
1554				IRQ_TCERRINT, status);
1555			return status;
1556		}
1557	}
1558
1559	return 0;
1560
1561fail:
1562	for (i = 0; i < EDMA_MAX_CC; i++) {
1563		if (err_irq[i])
1564			free_irq(err_irq[i], &pdev->dev);
1565		if (irq[i])
1566			free_irq(irq[i], &pdev->dev);
1567	}
1568fail1:
1569	for (i = 0; i < EDMA_MAX_CC; i++) {
1570		if (r[i])
1571			release_mem_region(r[i]->start, len[i]);
1572		if (edmacc_regs_base[i])
1573			iounmap(edmacc_regs_base[i]);
1574		kfree(edma_cc[i]);
1575	}
1576	return status;
1577}
1578
1579
1580static struct platform_driver edma_driver = {
1581	.driver.name	= "edma",
1582};
1583
1584static int __init edma_init(void)
1585{
1586	return platform_driver_probe(&edma_driver, edma_probe);
1587}
1588arch_initcall(edma_init);
1589
1590