sirf-dma.c revision ba07d812f58c0ec65fff981a085529ed88965d23
1/*
2 * DMA controller driver for CSR SiRFprimaII
3 *
4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
5 *
6 * Licensed under GPLv2 or later.
7 */
8
9#include <linux/module.h>
10#include <linux/dmaengine.h>
11#include <linux/dma-mapping.h>
12#include <linux/pm_runtime.h>
13#include <linux/interrupt.h>
14#include <linux/io.h>
15#include <linux/slab.h>
16#include <linux/of_irq.h>
17#include <linux/of_address.h>
18#include <linux/of_device.h>
19#include <linux/of_platform.h>
20#include <linux/clk.h>
21#include <linux/sirfsoc_dma.h>
22
23#include "dmaengine.h"
24
25#define SIRFSOC_DMA_DESCRIPTORS                 16
26#define SIRFSOC_DMA_CHANNELS                    16
27
28#define SIRFSOC_DMA_CH_ADDR                     0x00
29#define SIRFSOC_DMA_CH_XLEN                     0x04
30#define SIRFSOC_DMA_CH_YLEN                     0x08
31#define SIRFSOC_DMA_CH_CTRL                     0x0C
32
33#define SIRFSOC_DMA_WIDTH_0                     0x100
34#define SIRFSOC_DMA_CH_VALID                    0x140
35#define SIRFSOC_DMA_CH_INT                      0x144
36#define SIRFSOC_DMA_INT_EN                      0x148
37#define SIRFSOC_DMA_INT_EN_CLR			0x14C
38#define SIRFSOC_DMA_CH_LOOP_CTRL                0x150
39#define SIRFSOC_DMA_CH_LOOP_CTRL_CLR            0x15C
40
41#define SIRFSOC_DMA_MODE_CTRL_BIT               4
42#define SIRFSOC_DMA_DIR_CTRL_BIT                5
43
44/* xlen and dma_width register is in 4 bytes boundary */
45#define SIRFSOC_DMA_WORD_LEN			4
46
47struct sirfsoc_dma_desc {
48	struct dma_async_tx_descriptor	desc;
49	struct list_head		node;
50
51	/* SiRFprimaII 2D-DMA parameters */
52
53	int             xlen;           /* DMA xlen */
54	int             ylen;           /* DMA ylen */
55	int             width;          /* DMA width */
56	int             dir;
57	bool            cyclic;         /* is loop DMA? */
58	u32             addr;		/* DMA buffer address */
59};
60
61struct sirfsoc_dma_chan {
62	struct dma_chan			chan;
63	struct list_head		free;
64	struct list_head		prepared;
65	struct list_head		queued;
66	struct list_head		active;
67	struct list_head		completed;
68	unsigned long			happened_cyclic;
69	unsigned long			completed_cyclic;
70
71	/* Lock for this structure */
72	spinlock_t			lock;
73
74	int				mode;
75};
76
77struct sirfsoc_dma_regs {
78	u32				ctrl[SIRFSOC_DMA_CHANNELS];
79	u32				interrupt_en;
80};
81
82struct sirfsoc_dma {
83	struct dma_device		dma;
84	struct tasklet_struct		tasklet;
85	struct sirfsoc_dma_chan		channels[SIRFSOC_DMA_CHANNELS];
86	void __iomem			*base;
87	int				irq;
88	struct clk			*clk;
89	bool				is_marco;
90	struct sirfsoc_dma_regs		regs_save;
91};
92
93#define DRV_NAME	"sirfsoc_dma"
94
95static int sirfsoc_dma_runtime_suspend(struct device *dev);
96
97/* Convert struct dma_chan to struct sirfsoc_dma_chan */
98static inline
99struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c)
100{
101	return container_of(c, struct sirfsoc_dma_chan, chan);
102}
103
104/* Convert struct dma_chan to struct sirfsoc_dma */
105static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c)
106{
107	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c);
108	return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]);
109}
110
111/* Execute all queued DMA descriptors */
112static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
113{
114	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
115	int cid = schan->chan.chan_id;
116	struct sirfsoc_dma_desc *sdesc = NULL;
117
118	/*
119	 * lock has been held by functions calling this, so we don't hold
120	 * lock again
121	 */
122
123	sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc,
124		node);
125	/* Move the first queued descriptor to active list */
126	list_move_tail(&sdesc->node, &schan->active);
127
128	/* Start the DMA transfer */
129	writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 +
130		cid * 4);
131	writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
132		(sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT),
133		sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
134	writel_relaxed(sdesc->xlen, sdma->base + cid * 0x10 +
135		SIRFSOC_DMA_CH_XLEN);
136	writel_relaxed(sdesc->ylen, sdma->base + cid * 0x10 +
137		SIRFSOC_DMA_CH_YLEN);
138	writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) |
139		(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
140
141	/*
142	 * writel has an implict memory write barrier to make sure data is
143	 * flushed into memory before starting DMA
144	 */
145	writel(sdesc->addr >> 2, sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
146
147	if (sdesc->cyclic) {
148		writel((1 << cid) | 1 << (cid + 16) |
149			readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL),
150			sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
151		schan->happened_cyclic = schan->completed_cyclic = 0;
152	}
153}
154
155/* Interrupt handler */
156static irqreturn_t sirfsoc_dma_irq(int irq, void *data)
157{
158	struct sirfsoc_dma *sdma = data;
159	struct sirfsoc_dma_chan *schan;
160	struct sirfsoc_dma_desc *sdesc = NULL;
161	u32 is;
162	int ch;
163
164	is = readl(sdma->base + SIRFSOC_DMA_CH_INT);
165	while ((ch = fls(is) - 1) >= 0) {
166		is &= ~(1 << ch);
167		writel_relaxed(1 << ch, sdma->base + SIRFSOC_DMA_CH_INT);
168		schan = &sdma->channels[ch];
169
170		spin_lock(&schan->lock);
171
172		sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
173			node);
174		if (!sdesc->cyclic) {
175			/* Execute queued descriptors */
176			list_splice_tail_init(&schan->active, &schan->completed);
177			if (!list_empty(&schan->queued))
178				sirfsoc_dma_execute(schan);
179		} else
180			schan->happened_cyclic++;
181
182		spin_unlock(&schan->lock);
183	}
184
185	/* Schedule tasklet */
186	tasklet_schedule(&sdma->tasklet);
187
188	return IRQ_HANDLED;
189}
190
191/* process completed descriptors */
192static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
193{
194	dma_cookie_t last_cookie = 0;
195	struct sirfsoc_dma_chan *schan;
196	struct sirfsoc_dma_desc *sdesc;
197	struct dma_async_tx_descriptor *desc;
198	unsigned long flags;
199	unsigned long happened_cyclic;
200	LIST_HEAD(list);
201	int i;
202
203	for (i = 0; i < sdma->dma.chancnt; i++) {
204		schan = &sdma->channels[i];
205
206		/* Get all completed descriptors */
207		spin_lock_irqsave(&schan->lock, flags);
208		if (!list_empty(&schan->completed)) {
209			list_splice_tail_init(&schan->completed, &list);
210			spin_unlock_irqrestore(&schan->lock, flags);
211
212			/* Execute callbacks and run dependencies */
213			list_for_each_entry(sdesc, &list, node) {
214				desc = &sdesc->desc;
215
216				if (desc->callback)
217					desc->callback(desc->callback_param);
218
219				last_cookie = desc->cookie;
220				dma_run_dependencies(desc);
221			}
222
223			/* Free descriptors */
224			spin_lock_irqsave(&schan->lock, flags);
225			list_splice_tail_init(&list, &schan->free);
226			schan->chan.completed_cookie = last_cookie;
227			spin_unlock_irqrestore(&schan->lock, flags);
228		} else {
229			/* for cyclic channel, desc is always in active list */
230			sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
231				node);
232
233			if (!sdesc || (sdesc && !sdesc->cyclic)) {
234				/* without active cyclic DMA */
235				spin_unlock_irqrestore(&schan->lock, flags);
236				continue;
237			}
238
239			/* cyclic DMA */
240			happened_cyclic = schan->happened_cyclic;
241			spin_unlock_irqrestore(&schan->lock, flags);
242
243			desc = &sdesc->desc;
244			while (happened_cyclic != schan->completed_cyclic) {
245				if (desc->callback)
246					desc->callback(desc->callback_param);
247				schan->completed_cyclic++;
248			}
249		}
250	}
251}
252
253/* DMA Tasklet */
254static void sirfsoc_dma_tasklet(unsigned long data)
255{
256	struct sirfsoc_dma *sdma = (void *)data;
257
258	sirfsoc_dma_process_completed(sdma);
259}
260
261/* Submit descriptor to hardware */
262static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
263{
264	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan);
265	struct sirfsoc_dma_desc *sdesc;
266	unsigned long flags;
267	dma_cookie_t cookie;
268
269	sdesc = container_of(txd, struct sirfsoc_dma_desc, desc);
270
271	spin_lock_irqsave(&schan->lock, flags);
272
273	/* Move descriptor to queue */
274	list_move_tail(&sdesc->node, &schan->queued);
275
276	cookie = dma_cookie_assign(txd);
277
278	spin_unlock_irqrestore(&schan->lock, flags);
279
280	return cookie;
281}
282
283static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
284	struct dma_slave_config *config)
285{
286	unsigned long flags;
287
288	if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
289		(config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES))
290		return -EINVAL;
291
292	spin_lock_irqsave(&schan->lock, flags);
293	schan->mode = (config->src_maxburst == 4 ? 1 : 0);
294	spin_unlock_irqrestore(&schan->lock, flags);
295
296	return 0;
297}
298
299static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
300{
301	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
302	int cid = schan->chan.chan_id;
303	unsigned long flags;
304
305	spin_lock_irqsave(&schan->lock, flags);
306
307	if (!sdma->is_marco) {
308		writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) &
309			~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
310		writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
311			& ~((1 << cid) | 1 << (cid + 16)),
312			sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
313	} else {
314		writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_INT_EN_CLR);
315		writel_relaxed((1 << cid) | 1 << (cid + 16),
316			sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_CLR);
317	}
318
319	writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
320
321	list_splice_tail_init(&schan->active, &schan->free);
322	list_splice_tail_init(&schan->queued, &schan->free);
323
324	spin_unlock_irqrestore(&schan->lock, flags);
325
326	return 0;
327}
328
329static int sirfsoc_dma_pause_chan(struct sirfsoc_dma_chan *schan)
330{
331	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
332	int cid = schan->chan.chan_id;
333	unsigned long flags;
334
335	spin_lock_irqsave(&schan->lock, flags);
336
337	if (!sdma->is_marco)
338		writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
339			& ~((1 << cid) | 1 << (cid + 16)),
340			sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
341	else
342		writel_relaxed((1 << cid) | 1 << (cid + 16),
343			sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_CLR);
344
345	spin_unlock_irqrestore(&schan->lock, flags);
346
347	return 0;
348}
349
350static int sirfsoc_dma_resume_chan(struct sirfsoc_dma_chan *schan)
351{
352	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
353	int cid = schan->chan.chan_id;
354	unsigned long flags;
355
356	spin_lock_irqsave(&schan->lock, flags);
357
358	if (!sdma->is_marco)
359		writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
360			| ((1 << cid) | 1 << (cid + 16)),
361			sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
362	else
363		writel_relaxed((1 << cid) | 1 << (cid + 16),
364			sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
365
366	spin_unlock_irqrestore(&schan->lock, flags);
367
368	return 0;
369}
370
371static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
372	unsigned long arg)
373{
374	struct dma_slave_config *config;
375	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
376
377	switch (cmd) {
378	case DMA_PAUSE:
379		return sirfsoc_dma_pause_chan(schan);
380	case DMA_RESUME:
381		return sirfsoc_dma_resume_chan(schan);
382	case DMA_TERMINATE_ALL:
383		return sirfsoc_dma_terminate_all(schan);
384	case DMA_SLAVE_CONFIG:
385		config = (struct dma_slave_config *)arg;
386		return sirfsoc_dma_slave_config(schan, config);
387
388	default:
389		break;
390	}
391
392	return -ENOSYS;
393}
394
395/* Alloc channel resources */
396static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
397{
398	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
399	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
400	struct sirfsoc_dma_desc *sdesc;
401	unsigned long flags;
402	LIST_HEAD(descs);
403	int i;
404
405	pm_runtime_get_sync(sdma->dma.dev);
406
407	/* Alloc descriptors for this channel */
408	for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
409		sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL);
410		if (!sdesc) {
411			dev_notice(sdma->dma.dev, "Memory allocation error. "
412				"Allocated only %u descriptors\n", i);
413			break;
414		}
415
416		dma_async_tx_descriptor_init(&sdesc->desc, chan);
417		sdesc->desc.flags = DMA_CTRL_ACK;
418		sdesc->desc.tx_submit = sirfsoc_dma_tx_submit;
419
420		list_add_tail(&sdesc->node, &descs);
421	}
422
423	/* Return error only if no descriptors were allocated */
424	if (i == 0)
425		return -ENOMEM;
426
427	spin_lock_irqsave(&schan->lock, flags);
428
429	list_splice_tail_init(&descs, &schan->free);
430	spin_unlock_irqrestore(&schan->lock, flags);
431
432	return i;
433}
434
435/* Free channel resources */
436static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
437{
438	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
439	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
440	struct sirfsoc_dma_desc *sdesc, *tmp;
441	unsigned long flags;
442	LIST_HEAD(descs);
443
444	spin_lock_irqsave(&schan->lock, flags);
445
446	/* Channel must be idle */
447	BUG_ON(!list_empty(&schan->prepared));
448	BUG_ON(!list_empty(&schan->queued));
449	BUG_ON(!list_empty(&schan->active));
450	BUG_ON(!list_empty(&schan->completed));
451
452	/* Move data */
453	list_splice_tail_init(&schan->free, &descs);
454
455	spin_unlock_irqrestore(&schan->lock, flags);
456
457	/* Free descriptors */
458	list_for_each_entry_safe(sdesc, tmp, &descs, node)
459		kfree(sdesc);
460
461	pm_runtime_put(sdma->dma.dev);
462}
463
464/* Send pending descriptor to hardware */
465static void sirfsoc_dma_issue_pending(struct dma_chan *chan)
466{
467	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
468	unsigned long flags;
469
470	spin_lock_irqsave(&schan->lock, flags);
471
472	if (list_empty(&schan->active) && !list_empty(&schan->queued))
473		sirfsoc_dma_execute(schan);
474
475	spin_unlock_irqrestore(&schan->lock, flags);
476}
477
478/* Check request completion status */
479static enum dma_status
480sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
481	struct dma_tx_state *txstate)
482{
483	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
484	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
485	unsigned long flags;
486	enum dma_status ret;
487	struct sirfsoc_dma_desc *sdesc;
488	int cid = schan->chan.chan_id;
489	unsigned long dma_pos;
490	unsigned long dma_request_bytes;
491	unsigned long residue;
492
493	spin_lock_irqsave(&schan->lock, flags);
494
495	sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
496			node);
497	dma_request_bytes = (sdesc->xlen + 1) * (sdesc->ylen + 1) *
498		(sdesc->width * SIRFSOC_DMA_WORD_LEN);
499
500	ret = dma_cookie_status(chan, cookie, txstate);
501	dma_pos = readl_relaxed(sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR)
502		<< 2;
503	residue = dma_request_bytes - (dma_pos - sdesc->addr);
504	dma_set_residue(txstate, residue);
505
506	spin_unlock_irqrestore(&schan->lock, flags);
507
508	return ret;
509}
510
511static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved(
512	struct dma_chan *chan, struct dma_interleaved_template *xt,
513	unsigned long flags)
514{
515	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
516	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
517	struct sirfsoc_dma_desc *sdesc = NULL;
518	unsigned long iflags;
519	int ret;
520
521	if ((xt->dir != DMA_MEM_TO_DEV) && (xt->dir != DMA_DEV_TO_MEM)) {
522		ret = -EINVAL;
523		goto err_dir;
524	}
525
526	/* Get free descriptor */
527	spin_lock_irqsave(&schan->lock, iflags);
528	if (!list_empty(&schan->free)) {
529		sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
530			node);
531		list_del(&sdesc->node);
532	}
533	spin_unlock_irqrestore(&schan->lock, iflags);
534
535	if (!sdesc) {
536		/* try to free completed descriptors */
537		sirfsoc_dma_process_completed(sdma);
538		ret = 0;
539		goto no_desc;
540	}
541
542	/* Place descriptor in prepared list */
543	spin_lock_irqsave(&schan->lock, iflags);
544
545	/*
546	 * Number of chunks in a frame can only be 1 for prima2
547	 * and ylen (number of frame - 1) must be at least 0
548	 */
549	if ((xt->frame_size == 1) && (xt->numf > 0)) {
550		sdesc->cyclic = 0;
551		sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN;
552		sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) /
553				SIRFSOC_DMA_WORD_LEN;
554		sdesc->ylen = xt->numf - 1;
555		if (xt->dir == DMA_MEM_TO_DEV) {
556			sdesc->addr = xt->src_start;
557			sdesc->dir = 1;
558		} else {
559			sdesc->addr = xt->dst_start;
560			sdesc->dir = 0;
561		}
562
563		list_add_tail(&sdesc->node, &schan->prepared);
564	} else {
565		pr_err("sirfsoc DMA Invalid xfer\n");
566		ret = -EINVAL;
567		goto err_xfer;
568	}
569	spin_unlock_irqrestore(&schan->lock, iflags);
570
571	return &sdesc->desc;
572err_xfer:
573	spin_unlock_irqrestore(&schan->lock, iflags);
574no_desc:
575err_dir:
576	return ERR_PTR(ret);
577}
578
579static struct dma_async_tx_descriptor *
580sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
581	size_t buf_len, size_t period_len,
582	enum dma_transfer_direction direction, unsigned long flags, void *context)
583{
584	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
585	struct sirfsoc_dma_desc *sdesc = NULL;
586	unsigned long iflags;
587
588	/*
589	 * we only support cycle transfer with 2 period
590	 * If the X-length is set to 0, it would be the loop mode.
591	 * The DMA address keeps increasing until reaching the end of a loop
592	 * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then
593	 * the DMA address goes back to the beginning of this area.
594	 * In loop mode, the DMA data region is divided into two parts, BUFA
595	 * and BUFB. DMA controller generates interrupts twice in each loop:
596	 * when the DMA address reaches the end of BUFA or the end of the
597	 * BUFB
598	 */
599	if (buf_len !=  2 * period_len)
600		return ERR_PTR(-EINVAL);
601
602	/* Get free descriptor */
603	spin_lock_irqsave(&schan->lock, iflags);
604	if (!list_empty(&schan->free)) {
605		sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
606			node);
607		list_del(&sdesc->node);
608	}
609	spin_unlock_irqrestore(&schan->lock, iflags);
610
611	if (!sdesc)
612		return NULL;
613
614	/* Place descriptor in prepared list */
615	spin_lock_irqsave(&schan->lock, iflags);
616	sdesc->addr = addr;
617	sdesc->cyclic = 1;
618	sdesc->xlen = 0;
619	sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1;
620	sdesc->width = 1;
621	list_add_tail(&sdesc->node, &schan->prepared);
622	spin_unlock_irqrestore(&schan->lock, iflags);
623
624	return &sdesc->desc;
625}
626
627/*
628 * The DMA controller consists of 16 independent DMA channels.
629 * Each channel is allocated to a different function
630 */
631bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
632{
633	unsigned int ch_nr = (unsigned int) chan_id;
634
635	if (ch_nr == chan->chan_id +
636		chan->device->dev_id * SIRFSOC_DMA_CHANNELS)
637		return true;
638
639	return false;
640}
641EXPORT_SYMBOL(sirfsoc_dma_filter_id);
642
643#define SIRFSOC_DMA_BUSWIDTHS \
644	(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
645	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
646	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
647	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
648	BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
649
650static int sirfsoc_dma_device_slave_caps(struct dma_chan *dchan,
651	struct dma_slave_caps *caps)
652{
653	caps->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
654	caps->dstn_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
655	caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
656	caps->cmd_pause = true;
657	caps->cmd_terminate = true;
658
659	return 0;
660}
661
662static int sirfsoc_dma_probe(struct platform_device *op)
663{
664	struct device_node *dn = op->dev.of_node;
665	struct device *dev = &op->dev;
666	struct dma_device *dma;
667	struct sirfsoc_dma *sdma;
668	struct sirfsoc_dma_chan *schan;
669	struct resource res;
670	ulong regs_start, regs_size;
671	u32 id;
672	int ret, i;
673
674	sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL);
675	if (!sdma) {
676		dev_err(dev, "Memory exhausted!\n");
677		return -ENOMEM;
678	}
679
680	if (of_device_is_compatible(dn, "sirf,marco-dmac"))
681		sdma->is_marco = true;
682
683	if (of_property_read_u32(dn, "cell-index", &id)) {
684		dev_err(dev, "Fail to get DMAC index\n");
685		return -ENODEV;
686	}
687
688	sdma->irq = irq_of_parse_and_map(dn, 0);
689	if (sdma->irq == NO_IRQ) {
690		dev_err(dev, "Error mapping IRQ!\n");
691		return -EINVAL;
692	}
693
694	sdma->clk = devm_clk_get(dev, NULL);
695	if (IS_ERR(sdma->clk)) {
696		dev_err(dev, "failed to get a clock.\n");
697		return PTR_ERR(sdma->clk);
698	}
699
700	ret = of_address_to_resource(dn, 0, &res);
701	if (ret) {
702		dev_err(dev, "Error parsing memory region!\n");
703		goto irq_dispose;
704	}
705
706	regs_start = res.start;
707	regs_size = resource_size(&res);
708
709	sdma->base = devm_ioremap(dev, regs_start, regs_size);
710	if (!sdma->base) {
711		dev_err(dev, "Error mapping memory region!\n");
712		ret = -ENOMEM;
713		goto irq_dispose;
714	}
715
716	ret = request_irq(sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, sdma);
717	if (ret) {
718		dev_err(dev, "Error requesting IRQ!\n");
719		ret = -EINVAL;
720		goto irq_dispose;
721	}
722
723	dma = &sdma->dma;
724	dma->dev = dev;
725	dma->chancnt = SIRFSOC_DMA_CHANNELS;
726
727	dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
728	dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
729	dma->device_issue_pending = sirfsoc_dma_issue_pending;
730	dma->device_control = sirfsoc_dma_control;
731	dma->device_tx_status = sirfsoc_dma_tx_status;
732	dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
733	dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
734	dma->device_slave_caps = sirfsoc_dma_device_slave_caps;
735
736	INIT_LIST_HEAD(&dma->channels);
737	dma_cap_set(DMA_SLAVE, dma->cap_mask);
738	dma_cap_set(DMA_CYCLIC, dma->cap_mask);
739	dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
740	dma_cap_set(DMA_PRIVATE, dma->cap_mask);
741
742	for (i = 0; i < dma->chancnt; i++) {
743		schan = &sdma->channels[i];
744
745		schan->chan.device = dma;
746		dma_cookie_init(&schan->chan);
747
748		INIT_LIST_HEAD(&schan->free);
749		INIT_LIST_HEAD(&schan->prepared);
750		INIT_LIST_HEAD(&schan->queued);
751		INIT_LIST_HEAD(&schan->active);
752		INIT_LIST_HEAD(&schan->completed);
753
754		spin_lock_init(&schan->lock);
755		list_add_tail(&schan->chan.device_node, &dma->channels);
756	}
757
758	tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
759
760	/* Register DMA engine */
761	dev_set_drvdata(dev, sdma);
762
763	ret = dma_async_device_register(dma);
764	if (ret)
765		goto free_irq;
766
767	pm_runtime_enable(&op->dev);
768	dev_info(dev, "initialized SIRFSOC DMAC driver\n");
769
770	return 0;
771
772free_irq:
773	free_irq(sdma->irq, sdma);
774irq_dispose:
775	irq_dispose_mapping(sdma->irq);
776	return ret;
777}
778
779static int sirfsoc_dma_remove(struct platform_device *op)
780{
781	struct device *dev = &op->dev;
782	struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
783
784	dma_async_device_unregister(&sdma->dma);
785	free_irq(sdma->irq, sdma);
786	irq_dispose_mapping(sdma->irq);
787	pm_runtime_disable(&op->dev);
788	if (!pm_runtime_status_suspended(&op->dev))
789		sirfsoc_dma_runtime_suspend(&op->dev);
790
791	return 0;
792}
793
794static int sirfsoc_dma_runtime_suspend(struct device *dev)
795{
796	struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
797
798	clk_disable_unprepare(sdma->clk);
799	return 0;
800}
801
802static int sirfsoc_dma_runtime_resume(struct device *dev)
803{
804	struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
805	int ret;
806
807	ret = clk_prepare_enable(sdma->clk);
808	if (ret < 0) {
809		dev_err(dev, "clk_enable failed: %d\n", ret);
810		return ret;
811	}
812	return 0;
813}
814
815static int sirfsoc_dma_pm_suspend(struct device *dev)
816{
817	struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
818	struct sirfsoc_dma_regs *save = &sdma->regs_save;
819	struct sirfsoc_dma_desc *sdesc;
820	struct sirfsoc_dma_chan *schan;
821	int ch;
822	int ret;
823
824	/*
825	 * if we were runtime-suspended before, resume to enable clock
826	 * before accessing register
827	 */
828	if (pm_runtime_status_suspended(dev)) {
829		ret = sirfsoc_dma_runtime_resume(dev);
830		if (ret < 0)
831			return ret;
832	}
833
834	/*
835	 * DMA controller will lose all registers while suspending
836	 * so we need to save registers for active channels
837	 */
838	for (ch = 0; ch < SIRFSOC_DMA_CHANNELS; ch++) {
839		schan = &sdma->channels[ch];
840		if (list_empty(&schan->active))
841			continue;
842		sdesc = list_first_entry(&schan->active,
843			struct sirfsoc_dma_desc,
844			node);
845		save->ctrl[ch] = readl_relaxed(sdma->base +
846			ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
847	}
848	save->interrupt_en = readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN);
849
850	/* Disable clock */
851	sirfsoc_dma_runtime_suspend(dev);
852
853	return 0;
854}
855
856static int sirfsoc_dma_pm_resume(struct device *dev)
857{
858	struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
859	struct sirfsoc_dma_regs *save = &sdma->regs_save;
860	struct sirfsoc_dma_desc *sdesc;
861	struct sirfsoc_dma_chan *schan;
862	int ch;
863	int ret;
864
865	/* Enable clock before accessing register */
866	ret = sirfsoc_dma_runtime_resume(dev);
867	if (ret < 0)
868		return ret;
869
870	writel_relaxed(save->interrupt_en, sdma->base + SIRFSOC_DMA_INT_EN);
871	for (ch = 0; ch < SIRFSOC_DMA_CHANNELS; ch++) {
872		schan = &sdma->channels[ch];
873		if (list_empty(&schan->active))
874			continue;
875		sdesc = list_first_entry(&schan->active,
876			struct sirfsoc_dma_desc,
877			node);
878		writel_relaxed(sdesc->width,
879			sdma->base + SIRFSOC_DMA_WIDTH_0 + ch * 4);
880		writel_relaxed(sdesc->xlen,
881			sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_XLEN);
882		writel_relaxed(sdesc->ylen,
883			sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_YLEN);
884		writel_relaxed(save->ctrl[ch],
885			sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
886		writel_relaxed(sdesc->addr >> 2,
887			sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_ADDR);
888	}
889
890	/* if we were runtime-suspended before, suspend again */
891	if (pm_runtime_status_suspended(dev))
892		sirfsoc_dma_runtime_suspend(dev);
893
894	return 0;
895}
896
897static const struct dev_pm_ops sirfsoc_dma_pm_ops = {
898	SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend, sirfsoc_dma_runtime_resume, NULL)
899	SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend, sirfsoc_dma_pm_resume)
900};
901
902static struct of_device_id sirfsoc_dma_match[] = {
903	{ .compatible = "sirf,prima2-dmac", },
904	{ .compatible = "sirf,marco-dmac", },
905	{},
906};
907
908static struct platform_driver sirfsoc_dma_driver = {
909	.probe		= sirfsoc_dma_probe,
910	.remove		= sirfsoc_dma_remove,
911	.driver = {
912		.name = DRV_NAME,
913		.owner = THIS_MODULE,
914		.pm = &sirfsoc_dma_pm_ops,
915		.of_match_table	= sirfsoc_dma_match,
916	},
917};
918
919static __init int sirfsoc_dma_init(void)
920{
921	return platform_driver_register(&sirfsoc_dma_driver);
922}
923
924static void __exit sirfsoc_dma_exit(void)
925{
926	platform_driver_unregister(&sirfsoc_dma_driver);
927}
928
929subsys_initcall(sirfsoc_dma_init);
930module_exit(sirfsoc_dma_exit);
931
932MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
933	"Barry Song <baohua.song@csr.com>");
934MODULE_DESCRIPTION("SIRFSOC DMA control driver");
935MODULE_LICENSE("GPL v2");
936