1/*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21
22/*
23 * This code implements the DMA subsystem. It provides a HW-neutral interface
24 * for other kernel code to use asynchronous memory copy capabilities,
25 * if present, and allows different HW DMA drivers to register as providing
26 * this capability.
27 *
28 * Due to the fact we are accelerating what is already a relatively fast
29 * operation, the code goes to great lengths to avoid additional overhead,
30 * such as locking.
31 *
32 * LOCKING:
33 *
34 * The subsystem keeps a global list of dma_device structs it is protected by a
35 * mutex, dma_list_mutex.
36 *
37 * A subsystem can get access to a channel by calling dmaengine_get() followed
38 * by dma_find_channel(), or if it has need for an exclusive channel it can call
39 * dma_request_channel().  Once a channel is allocated a reference is taken
40 * against its corresponding driver to disable removal.
41 *
42 * Each device has a channels list, which runs unlocked but is never modified
43 * once the device is registered, it's just setup by the driver.
44 *
45 * See Documentation/dmaengine.txt for more details
46 */
47
48#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
49
50#include <linux/dma-mapping.h>
51#include <linux/init.h>
52#include <linux/module.h>
53#include <linux/mm.h>
54#include <linux/device.h>
55#include <linux/dmaengine.h>
56#include <linux/hardirq.h>
57#include <linux/spinlock.h>
58#include <linux/percpu.h>
59#include <linux/rcupdate.h>
60#include <linux/mutex.h>
61#include <linux/jiffies.h>
62#include <linux/rculist.h>
63#include <linux/idr.h>
64#include <linux/slab.h>
65#include <linux/acpi.h>
66#include <linux/acpi_dma.h>
67#include <linux/of_dma.h>
68#include <linux/mempool.h>
69
70static DEFINE_MUTEX(dma_list_mutex);
71static DEFINE_IDR(dma_idr);
72static LIST_HEAD(dma_device_list);
73static long dmaengine_ref_count;
74
75/* --- sysfs implementation --- */
76
77/**
78 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
79 * @dev - device node
80 *
81 * Must be called under dma_list_mutex
82 */
83static struct dma_chan *dev_to_dma_chan(struct device *dev)
84{
85	struct dma_chan_dev *chan_dev;
86
87	chan_dev = container_of(dev, typeof(*chan_dev), device);
88	return chan_dev->chan;
89}
90
91static ssize_t memcpy_count_show(struct device *dev,
92				 struct device_attribute *attr, char *buf)
93{
94	struct dma_chan *chan;
95	unsigned long count = 0;
96	int i;
97	int err;
98
99	mutex_lock(&dma_list_mutex);
100	chan = dev_to_dma_chan(dev);
101	if (chan) {
102		for_each_possible_cpu(i)
103			count += per_cpu_ptr(chan->local, i)->memcpy_count;
104		err = sprintf(buf, "%lu\n", count);
105	} else
106		err = -ENODEV;
107	mutex_unlock(&dma_list_mutex);
108
109	return err;
110}
111static DEVICE_ATTR_RO(memcpy_count);
112
113static ssize_t bytes_transferred_show(struct device *dev,
114				      struct device_attribute *attr, char *buf)
115{
116	struct dma_chan *chan;
117	unsigned long count = 0;
118	int i;
119	int err;
120
121	mutex_lock(&dma_list_mutex);
122	chan = dev_to_dma_chan(dev);
123	if (chan) {
124		for_each_possible_cpu(i)
125			count += per_cpu_ptr(chan->local, i)->bytes_transferred;
126		err = sprintf(buf, "%lu\n", count);
127	} else
128		err = -ENODEV;
129	mutex_unlock(&dma_list_mutex);
130
131	return err;
132}
133static DEVICE_ATTR_RO(bytes_transferred);
134
135static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
136			   char *buf)
137{
138	struct dma_chan *chan;
139	int err;
140
141	mutex_lock(&dma_list_mutex);
142	chan = dev_to_dma_chan(dev);
143	if (chan)
144		err = sprintf(buf, "%d\n", chan->client_count);
145	else
146		err = -ENODEV;
147	mutex_unlock(&dma_list_mutex);
148
149	return err;
150}
151static DEVICE_ATTR_RO(in_use);
152
153static struct attribute *dma_dev_attrs[] = {
154	&dev_attr_memcpy_count.attr,
155	&dev_attr_bytes_transferred.attr,
156	&dev_attr_in_use.attr,
157	NULL,
158};
159ATTRIBUTE_GROUPS(dma_dev);
160
161static void chan_dev_release(struct device *dev)
162{
163	struct dma_chan_dev *chan_dev;
164
165	chan_dev = container_of(dev, typeof(*chan_dev), device);
166	if (atomic_dec_and_test(chan_dev->idr_ref)) {
167		mutex_lock(&dma_list_mutex);
168		idr_remove(&dma_idr, chan_dev->dev_id);
169		mutex_unlock(&dma_list_mutex);
170		kfree(chan_dev->idr_ref);
171	}
172	kfree(chan_dev);
173}
174
175static struct class dma_devclass = {
176	.name		= "dma",
177	.dev_groups	= dma_dev_groups,
178	.dev_release	= chan_dev_release,
179};
180
181/* --- client and device registration --- */
182
183#define dma_device_satisfies_mask(device, mask) \
184	__dma_device_satisfies_mask((device), &(mask))
185static int
186__dma_device_satisfies_mask(struct dma_device *device,
187			    const dma_cap_mask_t *want)
188{
189	dma_cap_mask_t has;
190
191	bitmap_and(has.bits, want->bits, device->cap_mask.bits,
192		DMA_TX_TYPE_END);
193	return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
194}
195
196static struct module *dma_chan_to_owner(struct dma_chan *chan)
197{
198	return chan->device->dev->driver->owner;
199}
200
201/**
202 * balance_ref_count - catch up the channel reference count
203 * @chan - channel to balance ->client_count versus dmaengine_ref_count
204 *
205 * balance_ref_count must be called under dma_list_mutex
206 */
207static void balance_ref_count(struct dma_chan *chan)
208{
209	struct module *owner = dma_chan_to_owner(chan);
210
211	while (chan->client_count < dmaengine_ref_count) {
212		__module_get(owner);
213		chan->client_count++;
214	}
215}
216
217/**
218 * dma_chan_get - try to grab a dma channel's parent driver module
219 * @chan - channel to grab
220 *
221 * Must be called under dma_list_mutex
222 */
223static int dma_chan_get(struct dma_chan *chan)
224{
225	int err = -ENODEV;
226	struct module *owner = dma_chan_to_owner(chan);
227
228	if (chan->client_count) {
229		__module_get(owner);
230		err = 0;
231	} else if (try_module_get(owner))
232		err = 0;
233
234	if (err == 0)
235		chan->client_count++;
236
237	/* allocate upon first client reference */
238	if (chan->client_count == 1 && err == 0) {
239		int desc_cnt = chan->device->device_alloc_chan_resources(chan);
240
241		if (desc_cnt < 0) {
242			err = desc_cnt;
243			chan->client_count = 0;
244			module_put(owner);
245		} else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
246			balance_ref_count(chan);
247	}
248
249	return err;
250}
251
252/**
253 * dma_chan_put - drop a reference to a dma channel's parent driver module
254 * @chan - channel to release
255 *
256 * Must be called under dma_list_mutex
257 */
258static void dma_chan_put(struct dma_chan *chan)
259{
260	if (!chan->client_count)
261		return; /* this channel failed alloc_chan_resources */
262	chan->client_count--;
263	module_put(dma_chan_to_owner(chan));
264	if (chan->client_count == 0)
265		chan->device->device_free_chan_resources(chan);
266}
267
268enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
269{
270	enum dma_status status;
271	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
272
273	dma_async_issue_pending(chan);
274	do {
275		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
276		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
277			pr_err("%s: timeout!\n", __func__);
278			return DMA_ERROR;
279		}
280		if (status != DMA_IN_PROGRESS)
281			break;
282		cpu_relax();
283	} while (1);
284
285	return status;
286}
287EXPORT_SYMBOL(dma_sync_wait);
288
289/**
290 * dma_cap_mask_all - enable iteration over all operation types
291 */
292static dma_cap_mask_t dma_cap_mask_all;
293
294/**
295 * dma_chan_tbl_ent - tracks channel allocations per core/operation
296 * @chan - associated channel for this entry
297 */
298struct dma_chan_tbl_ent {
299	struct dma_chan *chan;
300};
301
302/**
303 * channel_table - percpu lookup table for memory-to-memory offload providers
304 */
305static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
306
307static int __init dma_channel_table_init(void)
308{
309	enum dma_transaction_type cap;
310	int err = 0;
311
312	bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
313
314	/* 'interrupt', 'private', and 'slave' are channel capabilities,
315	 * but are not associated with an operation so they do not need
316	 * an entry in the channel_table
317	 */
318	clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
319	clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
320	clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
321
322	for_each_dma_cap_mask(cap, dma_cap_mask_all) {
323		channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
324		if (!channel_table[cap]) {
325			err = -ENOMEM;
326			break;
327		}
328	}
329
330	if (err) {
331		pr_err("initialization failure\n");
332		for_each_dma_cap_mask(cap, dma_cap_mask_all)
333			if (channel_table[cap])
334				free_percpu(channel_table[cap]);
335	}
336
337	return err;
338}
339arch_initcall(dma_channel_table_init);
340
341/**
342 * dma_find_channel - find a channel to carry out the operation
343 * @tx_type: transaction type
344 */
345struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
346{
347	return this_cpu_read(channel_table[tx_type]->chan);
348}
349EXPORT_SYMBOL(dma_find_channel);
350
351/*
352 * net_dma_find_channel - find a channel for net_dma
353 * net_dma has alignment requirements
354 */
355struct dma_chan *net_dma_find_channel(void)
356{
357	struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
358	if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
359		return NULL;
360
361	return chan;
362}
363EXPORT_SYMBOL(net_dma_find_channel);
364
365/**
366 * dma_issue_pending_all - flush all pending operations across all channels
367 */
368void dma_issue_pending_all(void)
369{
370	struct dma_device *device;
371	struct dma_chan *chan;
372
373	rcu_read_lock();
374	list_for_each_entry_rcu(device, &dma_device_list, global_node) {
375		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
376			continue;
377		list_for_each_entry(chan, &device->channels, device_node)
378			if (chan->client_count)
379				device->device_issue_pending(chan);
380	}
381	rcu_read_unlock();
382}
383EXPORT_SYMBOL(dma_issue_pending_all);
384
385/**
386 * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
387 */
388static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
389{
390	int node = dev_to_node(chan->device->dev);
391	return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
392}
393
394/**
395 * min_chan - returns the channel with min count and in the same numa-node as the cpu
396 * @cap: capability to match
397 * @cpu: cpu index which the channel should be close to
398 *
399 * If some channels are close to the given cpu, the one with the lowest
400 * reference count is returned. Otherwise, cpu is ignored and only the
401 * reference count is taken into account.
402 * Must be called under dma_list_mutex.
403 */
404static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
405{
406	struct dma_device *device;
407	struct dma_chan *chan;
408	struct dma_chan *min = NULL;
409	struct dma_chan *localmin = NULL;
410
411	list_for_each_entry(device, &dma_device_list, global_node) {
412		if (!dma_has_cap(cap, device->cap_mask) ||
413		    dma_has_cap(DMA_PRIVATE, device->cap_mask))
414			continue;
415		list_for_each_entry(chan, &device->channels, device_node) {
416			if (!chan->client_count)
417				continue;
418			if (!min || chan->table_count < min->table_count)
419				min = chan;
420
421			if (dma_chan_is_local(chan, cpu))
422				if (!localmin ||
423				    chan->table_count < localmin->table_count)
424					localmin = chan;
425		}
426	}
427
428	chan = localmin ? localmin : min;
429
430	if (chan)
431		chan->table_count++;
432
433	return chan;
434}
435
436/**
437 * dma_channel_rebalance - redistribute the available channels
438 *
439 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
440 * operation type) in the SMP case,  and operation isolation (avoid
441 * multi-tasking channels) in the non-SMP case.  Must be called under
442 * dma_list_mutex.
443 */
444static void dma_channel_rebalance(void)
445{
446	struct dma_chan *chan;
447	struct dma_device *device;
448	int cpu;
449	int cap;
450
451	/* undo the last distribution */
452	for_each_dma_cap_mask(cap, dma_cap_mask_all)
453		for_each_possible_cpu(cpu)
454			per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
455
456	list_for_each_entry(device, &dma_device_list, global_node) {
457		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
458			continue;
459		list_for_each_entry(chan, &device->channels, device_node)
460			chan->table_count = 0;
461	}
462
463	/* don't populate the channel_table if no clients are available */
464	if (!dmaengine_ref_count)
465		return;
466
467	/* redistribute available channels */
468	for_each_dma_cap_mask(cap, dma_cap_mask_all)
469		for_each_online_cpu(cpu) {
470			chan = min_chan(cap, cpu);
471			per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
472		}
473}
474
475static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
476					  struct dma_device *dev,
477					  dma_filter_fn fn, void *fn_param)
478{
479	struct dma_chan *chan;
480
481	if (!__dma_device_satisfies_mask(dev, mask)) {
482		pr_debug("%s: wrong capabilities\n", __func__);
483		return NULL;
484	}
485	/* devices with multiple channels need special handling as we need to
486	 * ensure that all channels are either private or public.
487	 */
488	if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
489		list_for_each_entry(chan, &dev->channels, device_node) {
490			/* some channels are already publicly allocated */
491			if (chan->client_count)
492				return NULL;
493		}
494
495	list_for_each_entry(chan, &dev->channels, device_node) {
496		if (chan->client_count) {
497			pr_debug("%s: %s busy\n",
498				 __func__, dma_chan_name(chan));
499			continue;
500		}
501		if (fn && !fn(chan, fn_param)) {
502			pr_debug("%s: %s filter said false\n",
503				 __func__, dma_chan_name(chan));
504			continue;
505		}
506		return chan;
507	}
508
509	return NULL;
510}
511
512/**
513 * dma_request_slave_channel - try to get specific channel exclusively
514 * @chan: target channel
515 */
516struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
517{
518	int err = -EBUSY;
519
520	/* lock against __dma_request_channel */
521	mutex_lock(&dma_list_mutex);
522
523	if (chan->client_count == 0) {
524		err = dma_chan_get(chan);
525		if (err)
526			pr_debug("%s: failed to get %s: (%d)\n",
527				__func__, dma_chan_name(chan), err);
528	} else
529		chan = NULL;
530
531	mutex_unlock(&dma_list_mutex);
532
533
534	return chan;
535}
536EXPORT_SYMBOL_GPL(dma_get_slave_channel);
537
538struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
539{
540	dma_cap_mask_t mask;
541	struct dma_chan *chan;
542	int err;
543
544	dma_cap_zero(mask);
545	dma_cap_set(DMA_SLAVE, mask);
546
547	/* lock against __dma_request_channel */
548	mutex_lock(&dma_list_mutex);
549
550	chan = private_candidate(&mask, device, NULL, NULL);
551	if (chan) {
552		err = dma_chan_get(chan);
553		if (err) {
554			pr_debug("%s: failed to get %s: (%d)\n",
555				__func__, dma_chan_name(chan), err);
556			chan = NULL;
557		}
558	}
559
560	mutex_unlock(&dma_list_mutex);
561
562	return chan;
563}
564EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
565
566/**
567 * __dma_request_channel - try to allocate an exclusive channel
568 * @mask: capabilities that the channel must satisfy
569 * @fn: optional callback to disposition available channels
570 * @fn_param: opaque parameter to pass to dma_filter_fn
571 *
572 * Returns pointer to appropriate DMA channel on success or NULL.
573 */
574struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
575				       dma_filter_fn fn, void *fn_param)
576{
577	struct dma_device *device, *_d;
578	struct dma_chan *chan = NULL;
579	int err;
580
581	/* Find a channel */
582	mutex_lock(&dma_list_mutex);
583	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
584		chan = private_candidate(mask, device, fn, fn_param);
585		if (chan) {
586			/* Found a suitable channel, try to grab, prep, and
587			 * return it.  We first set DMA_PRIVATE to disable
588			 * balance_ref_count as this channel will not be
589			 * published in the general-purpose allocator
590			 */
591			dma_cap_set(DMA_PRIVATE, device->cap_mask);
592			device->privatecnt++;
593			err = dma_chan_get(chan);
594
595			if (err == -ENODEV) {
596				pr_debug("%s: %s module removed\n",
597					 __func__, dma_chan_name(chan));
598				list_del_rcu(&device->global_node);
599			} else if (err)
600				pr_debug("%s: failed to get %s: (%d)\n",
601					 __func__, dma_chan_name(chan), err);
602			else
603				break;
604			if (--device->privatecnt == 0)
605				dma_cap_clear(DMA_PRIVATE, device->cap_mask);
606			chan = NULL;
607		}
608	}
609	mutex_unlock(&dma_list_mutex);
610
611	pr_debug("%s: %s (%s)\n",
612		 __func__,
613		 chan ? "success" : "fail",
614		 chan ? dma_chan_name(chan) : NULL);
615
616	return chan;
617}
618EXPORT_SYMBOL_GPL(__dma_request_channel);
619
620/**
621 * dma_request_slave_channel - try to allocate an exclusive slave channel
622 * @dev:	pointer to client device structure
623 * @name:	slave channel name
624 *
625 * Returns pointer to appropriate DMA channel on success or an error pointer.
626 */
627struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
628						  const char *name)
629{
630	/* If device-tree is present get slave info from here */
631	if (dev->of_node)
632		return of_dma_request_slave_channel(dev->of_node, name);
633
634	/* If device was enumerated by ACPI get slave info from here */
635	if (ACPI_HANDLE(dev))
636		return acpi_dma_request_slave_chan_by_name(dev, name);
637
638	return ERR_PTR(-ENODEV);
639}
640EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason);
641
642/**
643 * dma_request_slave_channel - try to allocate an exclusive slave channel
644 * @dev:	pointer to client device structure
645 * @name:	slave channel name
646 *
647 * Returns pointer to appropriate DMA channel on success or NULL.
648 */
649struct dma_chan *dma_request_slave_channel(struct device *dev,
650					   const char *name)
651{
652	struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
653	if (IS_ERR(ch))
654		return NULL;
655	return ch;
656}
657EXPORT_SYMBOL_GPL(dma_request_slave_channel);
658
659void dma_release_channel(struct dma_chan *chan)
660{
661	mutex_lock(&dma_list_mutex);
662	WARN_ONCE(chan->client_count != 1,
663		  "chan reference count %d != 1\n", chan->client_count);
664	dma_chan_put(chan);
665	/* drop PRIVATE cap enabled by __dma_request_channel() */
666	if (--chan->device->privatecnt == 0)
667		dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
668	mutex_unlock(&dma_list_mutex);
669}
670EXPORT_SYMBOL_GPL(dma_release_channel);
671
672/**
673 * dmaengine_get - register interest in dma_channels
674 */
675void dmaengine_get(void)
676{
677	struct dma_device *device, *_d;
678	struct dma_chan *chan;
679	int err;
680
681	mutex_lock(&dma_list_mutex);
682	dmaengine_ref_count++;
683
684	/* try to grab channels */
685	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
686		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
687			continue;
688		list_for_each_entry(chan, &device->channels, device_node) {
689			err = dma_chan_get(chan);
690			if (err == -ENODEV) {
691				/* module removed before we could use it */
692				list_del_rcu(&device->global_node);
693				break;
694			} else if (err)
695				pr_debug("%s: failed to get %s: (%d)\n",
696				       __func__, dma_chan_name(chan), err);
697		}
698	}
699
700	/* if this is the first reference and there were channels
701	 * waiting we need to rebalance to get those channels
702	 * incorporated into the channel table
703	 */
704	if (dmaengine_ref_count == 1)
705		dma_channel_rebalance();
706	mutex_unlock(&dma_list_mutex);
707}
708EXPORT_SYMBOL(dmaengine_get);
709
710/**
711 * dmaengine_put - let dma drivers be removed when ref_count == 0
712 */
713void dmaengine_put(void)
714{
715	struct dma_device *device;
716	struct dma_chan *chan;
717
718	mutex_lock(&dma_list_mutex);
719	dmaengine_ref_count--;
720	BUG_ON(dmaengine_ref_count < 0);
721	/* drop channel references */
722	list_for_each_entry(device, &dma_device_list, global_node) {
723		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
724			continue;
725		list_for_each_entry(chan, &device->channels, device_node)
726			dma_chan_put(chan);
727	}
728	mutex_unlock(&dma_list_mutex);
729}
730EXPORT_SYMBOL(dmaengine_put);
731
732static bool device_has_all_tx_types(struct dma_device *device)
733{
734	/* A device that satisfies this test has channels that will never cause
735	 * an async_tx channel switch event as all possible operation types can
736	 * be handled.
737	 */
738	#ifdef CONFIG_ASYNC_TX_DMA
739	if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
740		return false;
741	#endif
742
743	#if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
744	if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
745		return false;
746	#endif
747
748	#if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
749	if (!dma_has_cap(DMA_XOR, device->cap_mask))
750		return false;
751
752	#ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
753	if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
754		return false;
755	#endif
756	#endif
757
758	#if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
759	if (!dma_has_cap(DMA_PQ, device->cap_mask))
760		return false;
761
762	#ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
763	if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
764		return false;
765	#endif
766	#endif
767
768	return true;
769}
770
771static int get_dma_id(struct dma_device *device)
772{
773	int rc;
774
775	mutex_lock(&dma_list_mutex);
776
777	rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL);
778	if (rc >= 0)
779		device->dev_id = rc;
780
781	mutex_unlock(&dma_list_mutex);
782	return rc < 0 ? rc : 0;
783}
784
785/**
786 * dma_async_device_register - registers DMA devices found
787 * @device: &dma_device
788 */
789int dma_async_device_register(struct dma_device *device)
790{
791	int chancnt = 0, rc;
792	struct dma_chan* chan;
793	atomic_t *idr_ref;
794
795	if (!device)
796		return -ENODEV;
797
798	/* validate device routines */
799	BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
800		!device->device_prep_dma_memcpy);
801	BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
802		!device->device_prep_dma_xor);
803	BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
804		!device->device_prep_dma_xor_val);
805	BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
806		!device->device_prep_dma_pq);
807	BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
808		!device->device_prep_dma_pq_val);
809	BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
810		!device->device_prep_dma_interrupt);
811	BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
812		!device->device_prep_dma_sg);
813	BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
814		!device->device_prep_dma_cyclic);
815	BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
816		!device->device_control);
817	BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
818		!device->device_prep_interleaved_dma);
819
820	BUG_ON(!device->device_alloc_chan_resources);
821	BUG_ON(!device->device_free_chan_resources);
822	BUG_ON(!device->device_tx_status);
823	BUG_ON(!device->device_issue_pending);
824	BUG_ON(!device->dev);
825
826	/* note: this only matters in the
827	 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
828	 */
829	if (device_has_all_tx_types(device))
830		dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
831
832	idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
833	if (!idr_ref)
834		return -ENOMEM;
835	rc = get_dma_id(device);
836	if (rc != 0) {
837		kfree(idr_ref);
838		return rc;
839	}
840
841	atomic_set(idr_ref, 0);
842
843	/* represent channels in sysfs. Probably want devs too */
844	list_for_each_entry(chan, &device->channels, device_node) {
845		rc = -ENOMEM;
846		chan->local = alloc_percpu(typeof(*chan->local));
847		if (chan->local == NULL)
848			goto err_out;
849		chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
850		if (chan->dev == NULL) {
851			free_percpu(chan->local);
852			chan->local = NULL;
853			goto err_out;
854		}
855
856		chan->chan_id = chancnt++;
857		chan->dev->device.class = &dma_devclass;
858		chan->dev->device.parent = device->dev;
859		chan->dev->chan = chan;
860		chan->dev->idr_ref = idr_ref;
861		chan->dev->dev_id = device->dev_id;
862		atomic_inc(idr_ref);
863		dev_set_name(&chan->dev->device, "dma%dchan%d",
864			     device->dev_id, chan->chan_id);
865
866		rc = device_register(&chan->dev->device);
867		if (rc) {
868			free_percpu(chan->local);
869			chan->local = NULL;
870			kfree(chan->dev);
871			atomic_dec(idr_ref);
872			goto err_out;
873		}
874		chan->client_count = 0;
875	}
876	device->chancnt = chancnt;
877
878	mutex_lock(&dma_list_mutex);
879	/* take references on public channels */
880	if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
881		list_for_each_entry(chan, &device->channels, device_node) {
882			/* if clients are already waiting for channels we need
883			 * to take references on their behalf
884			 */
885			if (dma_chan_get(chan) == -ENODEV) {
886				/* note we can only get here for the first
887				 * channel as the remaining channels are
888				 * guaranteed to get a reference
889				 */
890				rc = -ENODEV;
891				mutex_unlock(&dma_list_mutex);
892				goto err_out;
893			}
894		}
895	list_add_tail_rcu(&device->global_node, &dma_device_list);
896	if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
897		device->privatecnt++;	/* Always private */
898	dma_channel_rebalance();
899	mutex_unlock(&dma_list_mutex);
900
901	return 0;
902
903err_out:
904	/* if we never registered a channel just release the idr */
905	if (atomic_read(idr_ref) == 0) {
906		mutex_lock(&dma_list_mutex);
907		idr_remove(&dma_idr, device->dev_id);
908		mutex_unlock(&dma_list_mutex);
909		kfree(idr_ref);
910		return rc;
911	}
912
913	list_for_each_entry(chan, &device->channels, device_node) {
914		if (chan->local == NULL)
915			continue;
916		mutex_lock(&dma_list_mutex);
917		chan->dev->chan = NULL;
918		mutex_unlock(&dma_list_mutex);
919		device_unregister(&chan->dev->device);
920		free_percpu(chan->local);
921	}
922	return rc;
923}
924EXPORT_SYMBOL(dma_async_device_register);
925
926/**
927 * dma_async_device_unregister - unregister a DMA device
928 * @device: &dma_device
929 *
930 * This routine is called by dma driver exit routines, dmaengine holds module
931 * references to prevent it being called while channels are in use.
932 */
933void dma_async_device_unregister(struct dma_device *device)
934{
935	struct dma_chan *chan;
936
937	mutex_lock(&dma_list_mutex);
938	list_del_rcu(&device->global_node);
939	dma_channel_rebalance();
940	mutex_unlock(&dma_list_mutex);
941
942	list_for_each_entry(chan, &device->channels, device_node) {
943		WARN_ONCE(chan->client_count,
944			  "%s called while %d clients hold a reference\n",
945			  __func__, chan->client_count);
946		mutex_lock(&dma_list_mutex);
947		chan->dev->chan = NULL;
948		mutex_unlock(&dma_list_mutex);
949		device_unregister(&chan->dev->device);
950		free_percpu(chan->local);
951	}
952}
953EXPORT_SYMBOL(dma_async_device_unregister);
954
955struct dmaengine_unmap_pool {
956	struct kmem_cache *cache;
957	const char *name;
958	mempool_t *pool;
959	size_t size;
960};
961
962#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
963static struct dmaengine_unmap_pool unmap_pool[] = {
964	__UNMAP_POOL(2),
965	#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
966	__UNMAP_POOL(16),
967	__UNMAP_POOL(128),
968	__UNMAP_POOL(256),
969	#endif
970};
971
972static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
973{
974	int order = get_count_order(nr);
975
976	switch (order) {
977	case 0 ... 1:
978		return &unmap_pool[0];
979	case 2 ... 4:
980		return &unmap_pool[1];
981	case 5 ... 7:
982		return &unmap_pool[2];
983	case 8:
984		return &unmap_pool[3];
985	default:
986		BUG();
987		return NULL;
988	}
989}
990
991static void dmaengine_unmap(struct kref *kref)
992{
993	struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
994	struct device *dev = unmap->dev;
995	int cnt, i;
996
997	cnt = unmap->to_cnt;
998	for (i = 0; i < cnt; i++)
999		dma_unmap_page(dev, unmap->addr[i], unmap->len,
1000			       DMA_TO_DEVICE);
1001	cnt += unmap->from_cnt;
1002	for (; i < cnt; i++)
1003		dma_unmap_page(dev, unmap->addr[i], unmap->len,
1004			       DMA_FROM_DEVICE);
1005	cnt += unmap->bidi_cnt;
1006	for (; i < cnt; i++) {
1007		if (unmap->addr[i] == 0)
1008			continue;
1009		dma_unmap_page(dev, unmap->addr[i], unmap->len,
1010			       DMA_BIDIRECTIONAL);
1011	}
1012	cnt = unmap->map_cnt;
1013	mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1014}
1015
1016void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
1017{
1018	if (unmap)
1019		kref_put(&unmap->kref, dmaengine_unmap);
1020}
1021EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
1022
1023static void dmaengine_destroy_unmap_pool(void)
1024{
1025	int i;
1026
1027	for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1028		struct dmaengine_unmap_pool *p = &unmap_pool[i];
1029
1030		if (p->pool)
1031			mempool_destroy(p->pool);
1032		p->pool = NULL;
1033		if (p->cache)
1034			kmem_cache_destroy(p->cache);
1035		p->cache = NULL;
1036	}
1037}
1038
1039static int __init dmaengine_init_unmap_pool(void)
1040{
1041	int i;
1042
1043	for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1044		struct dmaengine_unmap_pool *p = &unmap_pool[i];
1045		size_t size;
1046
1047		size = sizeof(struct dmaengine_unmap_data) +
1048		       sizeof(dma_addr_t) * p->size;
1049
1050		p->cache = kmem_cache_create(p->name, size, 0,
1051					     SLAB_HWCACHE_ALIGN, NULL);
1052		if (!p->cache)
1053			break;
1054		p->pool = mempool_create_slab_pool(1, p->cache);
1055		if (!p->pool)
1056			break;
1057	}
1058
1059	if (i == ARRAY_SIZE(unmap_pool))
1060		return 0;
1061
1062	dmaengine_destroy_unmap_pool();
1063	return -ENOMEM;
1064}
1065
1066struct dmaengine_unmap_data *
1067dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1068{
1069	struct dmaengine_unmap_data *unmap;
1070
1071	unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1072	if (!unmap)
1073		return NULL;
1074
1075	memset(unmap, 0, sizeof(*unmap));
1076	kref_init(&unmap->kref);
1077	unmap->dev = dev;
1078	unmap->map_cnt = nr;
1079
1080	return unmap;
1081}
1082EXPORT_SYMBOL(dmaengine_get_unmap_data);
1083
1084void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1085	struct dma_chan *chan)
1086{
1087	tx->chan = chan;
1088	#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1089	spin_lock_init(&tx->lock);
1090	#endif
1091}
1092EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1093
1094/* dma_wait_for_async_tx - spin wait for a transaction to complete
1095 * @tx: in-flight transaction to wait on
1096 */
1097enum dma_status
1098dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1099{
1100	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1101
1102	if (!tx)
1103		return DMA_COMPLETE;
1104
1105	while (tx->cookie == -EBUSY) {
1106		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1107			pr_err("%s timeout waiting for descriptor submission\n",
1108			       __func__);
1109			return DMA_ERROR;
1110		}
1111		cpu_relax();
1112	}
1113	return dma_sync_wait(tx->chan, tx->cookie);
1114}
1115EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1116
1117/* dma_run_dependencies - helper routine for dma drivers to process
1118 *	(start) dependent operations on their target channel
1119 * @tx: transaction with dependencies
1120 */
1121void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1122{
1123	struct dma_async_tx_descriptor *dep = txd_next(tx);
1124	struct dma_async_tx_descriptor *dep_next;
1125	struct dma_chan *chan;
1126
1127	if (!dep)
1128		return;
1129
1130	/* we'll submit tx->next now, so clear the link */
1131	txd_clear_next(tx);
1132	chan = dep->chan;
1133
1134	/* keep submitting up until a channel switch is detected
1135	 * in that case we will be called again as a result of
1136	 * processing the interrupt from async_tx_channel_switch
1137	 */
1138	for (; dep; dep = dep_next) {
1139		txd_lock(dep);
1140		txd_clear_parent(dep);
1141		dep_next = txd_next(dep);
1142		if (dep_next && dep_next->chan == chan)
1143			txd_clear_next(dep); /* ->next will be submitted */
1144		else
1145			dep_next = NULL; /* submit current dep and terminate */
1146		txd_unlock(dep);
1147
1148		dep->tx_submit(dep);
1149	}
1150
1151	chan->device->device_issue_pending(chan);
1152}
1153EXPORT_SYMBOL_GPL(dma_run_dependencies);
1154
1155static int __init dma_bus_init(void)
1156{
1157	int err = dmaengine_init_unmap_pool();
1158
1159	if (err)
1160		return err;
1161	return class_register(&dma_devclass);
1162}
1163arch_initcall(dma_bus_init);
1164
1165
1166