1/*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21#ifndef DMAENGINE_H
22#define DMAENGINE_H
23
24#ifdef CONFIG_DMA_ENGINE
25
26#include <linux/device.h>
27#include <linux/uio.h>
28#include <linux/kref.h>
29#include <linux/completion.h>
30#include <linux/rcupdate.h>
31
32/**
33 * enum dma_event - resource PNP/power managment events
34 * @DMA_RESOURCE_SUSPEND: DMA device going into low power state
35 * @DMA_RESOURCE_RESUME: DMA device returning to full power
36 * @DMA_RESOURCE_ADDED: DMA device added to the system
37 * @DMA_RESOURCE_REMOVED: DMA device removed from the system
38 */
39enum dma_event {
40	DMA_RESOURCE_SUSPEND,
41	DMA_RESOURCE_RESUME,
42	DMA_RESOURCE_ADDED,
43	DMA_RESOURCE_REMOVED,
44};
45
46/**
47 * typedef dma_cookie_t - an opaque DMA cookie
48 *
49 * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
50 */
51typedef s32 dma_cookie_t;
52
53#define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0)
54
55/**
56 * enum dma_status - DMA transaction status
57 * @DMA_SUCCESS: transaction completed successfully
58 * @DMA_IN_PROGRESS: transaction not yet processed
59 * @DMA_ERROR: transaction failed
60 */
61enum dma_status {
62	DMA_SUCCESS,
63	DMA_IN_PROGRESS,
64	DMA_ERROR,
65};
66
67/**
68 * struct dma_chan_percpu - the per-CPU part of struct dma_chan
69 * @refcount: local_t used for open-coded "bigref" counting
70 * @memcpy_count: transaction counter
71 * @bytes_transferred: byte counter
72 */
73
74struct dma_chan_percpu {
75	local_t refcount;
76	/* stats */
77	unsigned long memcpy_count;
78	unsigned long bytes_transferred;
79};
80
81/**
82 * struct dma_chan - devices supply DMA channels, clients use them
83 * @client: ptr to the client user of this chan, will be %NULL when unused
84 * @device: ptr to the dma device who supplies this channel, always !%NULL
85 * @cookie: last cookie value returned to client
86 * @chan_id: channel ID for sysfs
87 * @class_dev: class device for sysfs
88 * @refcount: kref, used in "bigref" slow-mode
89 * @slow_ref: indicates that the DMA channel is free
90 * @rcu: the DMA channel's RCU head
91 * @client_node: used to add this to the client chan list
92 * @device_node: used to add this to the device chan list
93 * @local: per-cpu pointer to a struct dma_chan_percpu
94 */
95struct dma_chan {
96	struct dma_client *client;
97	struct dma_device *device;
98	dma_cookie_t cookie;
99
100	/* sysfs */
101	int chan_id;
102	struct class_device class_dev;
103
104	struct kref refcount;
105	int slow_ref;
106	struct rcu_head rcu;
107
108	struct list_head client_node;
109	struct list_head device_node;
110	struct dma_chan_percpu *local;
111};
112
113void dma_chan_cleanup(struct kref *kref);
114
115static inline void dma_chan_get(struct dma_chan *chan)
116{
117	if (unlikely(chan->slow_ref))
118		kref_get(&chan->refcount);
119	else {
120		local_inc(&(per_cpu_ptr(chan->local, get_cpu())->refcount));
121		put_cpu();
122	}
123}
124
125static inline void dma_chan_put(struct dma_chan *chan)
126{
127	if (unlikely(chan->slow_ref))
128		kref_put(&chan->refcount, dma_chan_cleanup);
129	else {
130		local_dec(&(per_cpu_ptr(chan->local, get_cpu())->refcount));
131		put_cpu();
132	}
133}
134
135/*
136 * typedef dma_event_callback - function pointer to a DMA event callback
137 */
138typedef void (*dma_event_callback) (struct dma_client *client,
139		struct dma_chan *chan, enum dma_event event);
140
141/**
142 * struct dma_client - info on the entity making use of DMA services
143 * @event_callback: func ptr to call when something happens
144 * @chan_count: number of chans allocated
145 * @chans_desired: number of chans requested. Can be +/- chan_count
146 * @lock: protects access to the channels list
147 * @channels: the list of DMA channels allocated
148 * @global_node: list_head for global dma_client_list
149 */
150struct dma_client {
151	dma_event_callback	event_callback;
152	unsigned int		chan_count;
153	unsigned int		chans_desired;
154
155	spinlock_t		lock;
156	struct list_head	channels;
157	struct list_head	global_node;
158};
159
160/**
161 * struct dma_device - info on the entity supplying DMA services
162 * @chancnt: how many DMA channels are supported
163 * @channels: the list of struct dma_chan
164 * @global_node: list_head for global dma_device_list
165 * @refcount: reference count
166 * @done: IO completion struct
167 * @dev_id: unique device ID
168 * @device_alloc_chan_resources: allocate resources and return the
169 *	number of allocated descriptors
170 * @device_free_chan_resources: release DMA channel's resources
171 * @device_memcpy_buf_to_buf: memcpy buf pointer to buf pointer
172 * @device_memcpy_buf_to_pg: memcpy buf pointer to struct page
173 * @device_memcpy_pg_to_pg: memcpy struct page/offset to struct page/offset
174 * @device_memcpy_complete: poll the status of an IOAT DMA transaction
175 * @device_memcpy_issue_pending: push appended descriptors to hardware
176 */
177struct dma_device {
178
179	unsigned int chancnt;
180	struct list_head channels;
181	struct list_head global_node;
182
183	struct kref refcount;
184	struct completion done;
185
186	int dev_id;
187
188	int (*device_alloc_chan_resources)(struct dma_chan *chan);
189	void (*device_free_chan_resources)(struct dma_chan *chan);
190	dma_cookie_t (*device_memcpy_buf_to_buf)(struct dma_chan *chan,
191			void *dest, void *src, size_t len);
192	dma_cookie_t (*device_memcpy_buf_to_pg)(struct dma_chan *chan,
193			struct page *page, unsigned int offset, void *kdata,
194			size_t len);
195	dma_cookie_t (*device_memcpy_pg_to_pg)(struct dma_chan *chan,
196			struct page *dest_pg, unsigned int dest_off,
197			struct page *src_pg, unsigned int src_off, size_t len);
198	enum dma_status (*device_memcpy_complete)(struct dma_chan *chan,
199			dma_cookie_t cookie, dma_cookie_t *last,
200			dma_cookie_t *used);
201	void (*device_memcpy_issue_pending)(struct dma_chan *chan);
202};
203
204/* --- public DMA engine API --- */
205
206struct dma_client *dma_async_client_register(dma_event_callback event_callback);
207void dma_async_client_unregister(struct dma_client *client);
208void dma_async_client_chan_request(struct dma_client *client,
209		unsigned int number);
210
211/**
212 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
213 * @chan: DMA channel to offload copy to
214 * @dest: destination address (virtual)
215 * @src: source address (virtual)
216 * @len: length
217 *
218 * Both @dest and @src must be mappable to a bus address according to the
219 * DMA mapping API rules for streaming mappings.
220 * Both @dest and @src must stay memory resident (kernel memory or locked
221 * user space pages).
222 */
223static inline dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
224	void *dest, void *src, size_t len)
225{
226	int cpu = get_cpu();
227	per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
228	per_cpu_ptr(chan->local, cpu)->memcpy_count++;
229	put_cpu();
230
231	return chan->device->device_memcpy_buf_to_buf(chan, dest, src, len);
232}
233
234/**
235 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
236 * @chan: DMA channel to offload copy to
237 * @page: destination page
238 * @offset: offset in page to copy to
239 * @kdata: source address (virtual)
240 * @len: length
241 *
242 * Both @page/@offset and @kdata must be mappable to a bus address according
243 * to the DMA mapping API rules for streaming mappings.
244 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
245 * locked user space pages)
246 */
247static inline dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
248	struct page *page, unsigned int offset, void *kdata, size_t len)
249{
250	int cpu = get_cpu();
251	per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
252	per_cpu_ptr(chan->local, cpu)->memcpy_count++;
253	put_cpu();
254
255	return chan->device->device_memcpy_buf_to_pg(chan, page, offset,
256	                                             kdata, len);
257}
258
259/**
260 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
261 * @chan: DMA channel to offload copy to
262 * @dest_pg: destination page
263 * @dest_off: offset in page to copy to
264 * @src_pg: source page
265 * @src_off: offset in page to copy from
266 * @len: length
267 *
268 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
269 * address according to the DMA mapping API rules for streaming mappings.
270 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
271 * (kernel memory or locked user space pages).
272 */
273static inline dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
274	struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
275	unsigned int src_off, size_t len)
276{
277	int cpu = get_cpu();
278	per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
279	per_cpu_ptr(chan->local, cpu)->memcpy_count++;
280	put_cpu();
281
282	return chan->device->device_memcpy_pg_to_pg(chan, dest_pg, dest_off,
283	                                            src_pg, src_off, len);
284}
285
286/**
287 * dma_async_memcpy_issue_pending - flush pending copies to HW
288 * @chan: target DMA channel
289 *
290 * This allows drivers to push copies to HW in batches,
291 * reducing MMIO writes where possible.
292 */
293static inline void dma_async_memcpy_issue_pending(struct dma_chan *chan)
294{
295	return chan->device->device_memcpy_issue_pending(chan);
296}
297
298/**
299 * dma_async_memcpy_complete - poll for transaction completion
300 * @chan: DMA channel
301 * @cookie: transaction identifier to check status of
302 * @last: returns last completed cookie, can be NULL
303 * @used: returns last issued cookie, can be NULL
304 *
305 * If @last and @used are passed in, upon return they reflect the driver
306 * internal state and can be used with dma_async_is_complete() to check
307 * the status of multiple cookies without re-checking hardware state.
308 */
309static inline enum dma_status dma_async_memcpy_complete(struct dma_chan *chan,
310	dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
311{
312	return chan->device->device_memcpy_complete(chan, cookie, last, used);
313}
314
315/**
316 * dma_async_is_complete - test a cookie against chan state
317 * @cookie: transaction identifier to test status of
318 * @last_complete: last know completed transaction
319 * @last_used: last cookie value handed out
320 *
321 * dma_async_is_complete() is used in dma_async_memcpy_complete()
322 * the test logic is seperated for lightweight testing of multiple cookies
323 */
324static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
325			dma_cookie_t last_complete, dma_cookie_t last_used)
326{
327	if (last_complete <= last_used) {
328		if ((cookie <= last_complete) || (cookie > last_used))
329			return DMA_SUCCESS;
330	} else {
331		if ((cookie <= last_complete) && (cookie > last_used))
332			return DMA_SUCCESS;
333	}
334	return DMA_IN_PROGRESS;
335}
336
337
338/* --- DMA device --- */
339
340int dma_async_device_register(struct dma_device *device);
341void dma_async_device_unregister(struct dma_device *device);
342
343/* --- Helper iov-locking functions --- */
344
345struct dma_page_list {
346	char *base_address;
347	int nr_pages;
348	struct page **pages;
349};
350
351struct dma_pinned_list {
352	int nr_iovecs;
353	struct dma_page_list page_list[0];
354};
355
356struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
357void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
358
359dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
360	struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
361dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
362	struct dma_pinned_list *pinned_list, struct page *page,
363	unsigned int offset, size_t len);
364
365#endif /* CONFIG_DMA_ENGINE */
366#endif /* DMAENGINE_H */
367