1/* iommu.c: Generic sparc64 IOMMU support.
2 *
3 * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
5 */
6
7#include <linux/kernel.h>
8#include <linux/export.h>
9#include <linux/slab.h>
10#include <linux/delay.h>
11#include <linux/device.h>
12#include <linux/dma-mapping.h>
13#include <linux/errno.h>
14#include <linux/iommu-helper.h>
15#include <linux/bitmap.h>
16
17#ifdef CONFIG_PCI
18#include <linux/pci.h>
19#endif
20
21#include <asm/iommu.h>
22
23#include "iommu_common.h"
24#include "kernel.h"
25
26#define STC_CTXMATCH_ADDR(STC, CTX)	\
27	((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
28#define STC_FLUSHFLAG_INIT(STC) \
29	(*((STC)->strbuf_flushflag) = 0UL)
30#define STC_FLUSHFLAG_SET(STC) \
31	(*((STC)->strbuf_flushflag) != 0UL)
32
33#define iommu_read(__reg) \
34({	u64 __ret; \
35	__asm__ __volatile__("ldxa [%1] %2, %0" \
36			     : "=r" (__ret) \
37			     : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
38			     : "memory"); \
39	__ret; \
40})
41#define iommu_write(__reg, __val) \
42	__asm__ __volatile__("stxa %0, [%1] %2" \
43			     : /* no outputs */ \
44			     : "r" (__val), "r" (__reg), \
45			       "i" (ASI_PHYS_BYPASS_EC_E))
46
47/* Must be invoked under the IOMMU lock. */
48static void iommu_flushall(struct iommu *iommu)
49{
50	if (iommu->iommu_flushinv) {
51		iommu_write(iommu->iommu_flushinv, ~(u64)0);
52	} else {
53		unsigned long tag;
54		int entry;
55
56		tag = iommu->iommu_tags;
57		for (entry = 0; entry < 16; entry++) {
58			iommu_write(tag, 0);
59			tag += 8;
60		}
61
62		/* Ensure completion of previous PIO writes. */
63		(void) iommu_read(iommu->write_complete_reg);
64	}
65}
66
67#define IOPTE_CONSISTENT(CTX) \
68	(IOPTE_VALID | IOPTE_CACHE | \
69	 (((CTX) << 47) & IOPTE_CONTEXT))
70
71#define IOPTE_STREAMING(CTX) \
72	(IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
73
74/* Existing mappings are never marked invalid, instead they
75 * are pointed to a dummy page.
76 */
77#define IOPTE_IS_DUMMY(iommu, iopte)	\
78	((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
79
80static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
81{
82	unsigned long val = iopte_val(*iopte);
83
84	val &= ~IOPTE_PAGE;
85	val |= iommu->dummy_page_pa;
86
87	iopte_val(*iopte) = val;
88}
89
90/* Based almost entirely upon the ppc64 iommu allocator.  If you use the 'handle'
91 * facility it must all be done in one pass while under the iommu lock.
92 *
93 * On sun4u platforms, we only flush the IOMMU once every time we've passed
94 * over the entire page table doing allocations.  Therefore we only ever advance
95 * the hint and cannot backtrack it.
96 */
97unsigned long iommu_range_alloc(struct device *dev,
98				struct iommu *iommu,
99				unsigned long npages,
100				unsigned long *handle)
101{
102	unsigned long n, end, start, limit, boundary_size;
103	struct iommu_arena *arena = &iommu->arena;
104	int pass = 0;
105
106	/* This allocator was derived from x86_64's bit string search */
107
108	/* Sanity check */
109	if (unlikely(npages == 0)) {
110		if (printk_ratelimit())
111			WARN_ON(1);
112		return DMA_ERROR_CODE;
113	}
114
115	if (handle && *handle)
116		start = *handle;
117	else
118		start = arena->hint;
119
120	limit = arena->limit;
121
122	/* The case below can happen if we have a small segment appended
123	 * to a large, or when the previous alloc was at the very end of
124	 * the available space. If so, go back to the beginning and flush.
125	 */
126	if (start >= limit) {
127		start = 0;
128		if (iommu->flush_all)
129			iommu->flush_all(iommu);
130	}
131
132 again:
133
134	if (dev)
135		boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
136				      1 << IO_PAGE_SHIFT);
137	else
138		boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT);
139
140	n = iommu_area_alloc(arena->map, limit, start, npages,
141			     iommu->page_table_map_base >> IO_PAGE_SHIFT,
142			     boundary_size >> IO_PAGE_SHIFT, 0);
143	if (n == -1) {
144		if (likely(pass < 1)) {
145			/* First failure, rescan from the beginning.  */
146			start = 0;
147			if (iommu->flush_all)
148				iommu->flush_all(iommu);
149			pass++;
150			goto again;
151		} else {
152			/* Second failure, give up */
153			return DMA_ERROR_CODE;
154		}
155	}
156
157	end = n + npages;
158
159	arena->hint = end;
160
161	/* Update handle for SG allocations */
162	if (handle)
163		*handle = end;
164
165	return n;
166}
167
168void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages)
169{
170	struct iommu_arena *arena = &iommu->arena;
171	unsigned long entry;
172
173	entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
174
175	bitmap_clear(arena->map, entry, npages);
176}
177
178int iommu_table_init(struct iommu *iommu, int tsbsize,
179		     u32 dma_offset, u32 dma_addr_mask,
180		     int numa_node)
181{
182	unsigned long i, order, sz, num_tsb_entries;
183	struct page *page;
184
185	num_tsb_entries = tsbsize / sizeof(iopte_t);
186
187	/* Setup initial software IOMMU state. */
188	spin_lock_init(&iommu->lock);
189	iommu->ctx_lowest_free = 1;
190	iommu->page_table_map_base = dma_offset;
191	iommu->dma_addr_mask = dma_addr_mask;
192
193	/* Allocate and initialize the free area map.  */
194	sz = num_tsb_entries / 8;
195	sz = (sz + 7UL) & ~7UL;
196	iommu->arena.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
197	if (!iommu->arena.map) {
198		printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n");
199		return -ENOMEM;
200	}
201	memset(iommu->arena.map, 0, sz);
202	iommu->arena.limit = num_tsb_entries;
203
204	if (tlb_type != hypervisor)
205		iommu->flush_all = iommu_flushall;
206
207	/* Allocate and initialize the dummy page which we
208	 * set inactive IO PTEs to point to.
209	 */
210	page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
211	if (!page) {
212		printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
213		goto out_free_map;
214	}
215	iommu->dummy_page = (unsigned long) page_address(page);
216	memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
217	iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
218
219	/* Now allocate and setup the IOMMU page table itself.  */
220	order = get_order(tsbsize);
221	page = alloc_pages_node(numa_node, GFP_KERNEL, order);
222	if (!page) {
223		printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
224		goto out_free_dummy_page;
225	}
226	iommu->page_table = (iopte_t *)page_address(page);
227
228	for (i = 0; i < num_tsb_entries; i++)
229		iopte_make_dummy(iommu, &iommu->page_table[i]);
230
231	return 0;
232
233out_free_dummy_page:
234	free_page(iommu->dummy_page);
235	iommu->dummy_page = 0UL;
236
237out_free_map:
238	kfree(iommu->arena.map);
239	iommu->arena.map = NULL;
240
241	return -ENOMEM;
242}
243
244static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
245				    unsigned long npages)
246{
247	unsigned long entry;
248
249	entry = iommu_range_alloc(dev, iommu, npages, NULL);
250	if (unlikely(entry == DMA_ERROR_CODE))
251		return NULL;
252
253	return iommu->page_table + entry;
254}
255
256static int iommu_alloc_ctx(struct iommu *iommu)
257{
258	int lowest = iommu->ctx_lowest_free;
259	int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
260
261	if (unlikely(n == IOMMU_NUM_CTXS)) {
262		n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
263		if (unlikely(n == lowest)) {
264			printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
265			n = 0;
266		}
267	}
268	if (n)
269		__set_bit(n, iommu->ctx_bitmap);
270
271	return n;
272}
273
274static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
275{
276	if (likely(ctx)) {
277		__clear_bit(ctx, iommu->ctx_bitmap);
278		if (ctx < iommu->ctx_lowest_free)
279			iommu->ctx_lowest_free = ctx;
280	}
281}
282
283static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
284				   dma_addr_t *dma_addrp, gfp_t gfp,
285				   struct dma_attrs *attrs)
286{
287	unsigned long flags, order, first_page;
288	struct iommu *iommu;
289	struct page *page;
290	int npages, nid;
291	iopte_t *iopte;
292	void *ret;
293
294	size = IO_PAGE_ALIGN(size);
295	order = get_order(size);
296	if (order >= 10)
297		return NULL;
298
299	nid = dev->archdata.numa_node;
300	page = alloc_pages_node(nid, gfp, order);
301	if (unlikely(!page))
302		return NULL;
303
304	first_page = (unsigned long) page_address(page);
305	memset((char *)first_page, 0, PAGE_SIZE << order);
306
307	iommu = dev->archdata.iommu;
308
309	spin_lock_irqsave(&iommu->lock, flags);
310	iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
311	spin_unlock_irqrestore(&iommu->lock, flags);
312
313	if (unlikely(iopte == NULL)) {
314		free_pages(first_page, order);
315		return NULL;
316	}
317
318	*dma_addrp = (iommu->page_table_map_base +
319		      ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
320	ret = (void *) first_page;
321	npages = size >> IO_PAGE_SHIFT;
322	first_page = __pa(first_page);
323	while (npages--) {
324		iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
325				     IOPTE_WRITE |
326				     (first_page & IOPTE_PAGE));
327		iopte++;
328		first_page += IO_PAGE_SIZE;
329	}
330
331	return ret;
332}
333
334static void dma_4u_free_coherent(struct device *dev, size_t size,
335				 void *cpu, dma_addr_t dvma,
336				 struct dma_attrs *attrs)
337{
338	struct iommu *iommu;
339	unsigned long flags, order, npages;
340
341	npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
342	iommu = dev->archdata.iommu;
343
344	spin_lock_irqsave(&iommu->lock, flags);
345
346	iommu_range_free(iommu, dvma, npages);
347
348	spin_unlock_irqrestore(&iommu->lock, flags);
349
350	order = get_order(size);
351	if (order < 10)
352		free_pages((unsigned long)cpu, order);
353}
354
355static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
356				  unsigned long offset, size_t sz,
357				  enum dma_data_direction direction,
358				  struct dma_attrs *attrs)
359{
360	struct iommu *iommu;
361	struct strbuf *strbuf;
362	iopte_t *base;
363	unsigned long flags, npages, oaddr;
364	unsigned long i, base_paddr, ctx;
365	u32 bus_addr, ret;
366	unsigned long iopte_protection;
367
368	iommu = dev->archdata.iommu;
369	strbuf = dev->archdata.stc;
370
371	if (unlikely(direction == DMA_NONE))
372		goto bad_no_ctx;
373
374	oaddr = (unsigned long)(page_address(page) + offset);
375	npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
376	npages >>= IO_PAGE_SHIFT;
377
378	spin_lock_irqsave(&iommu->lock, flags);
379	base = alloc_npages(dev, iommu, npages);
380	ctx = 0;
381	if (iommu->iommu_ctxflush)
382		ctx = iommu_alloc_ctx(iommu);
383	spin_unlock_irqrestore(&iommu->lock, flags);
384
385	if (unlikely(!base))
386		goto bad;
387
388	bus_addr = (iommu->page_table_map_base +
389		    ((base - iommu->page_table) << IO_PAGE_SHIFT));
390	ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
391	base_paddr = __pa(oaddr & IO_PAGE_MASK);
392	if (strbuf->strbuf_enabled)
393		iopte_protection = IOPTE_STREAMING(ctx);
394	else
395		iopte_protection = IOPTE_CONSISTENT(ctx);
396	if (direction != DMA_TO_DEVICE)
397		iopte_protection |= IOPTE_WRITE;
398
399	for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
400		iopte_val(*base) = iopte_protection | base_paddr;
401
402	return ret;
403
404bad:
405	iommu_free_ctx(iommu, ctx);
406bad_no_ctx:
407	if (printk_ratelimit())
408		WARN_ON(1);
409	return DMA_ERROR_CODE;
410}
411
412static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
413			 u32 vaddr, unsigned long ctx, unsigned long npages,
414			 enum dma_data_direction direction)
415{
416	int limit;
417
418	if (strbuf->strbuf_ctxflush &&
419	    iommu->iommu_ctxflush) {
420		unsigned long matchreg, flushreg;
421		u64 val;
422
423		flushreg = strbuf->strbuf_ctxflush;
424		matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
425
426		iommu_write(flushreg, ctx);
427		val = iommu_read(matchreg);
428		val &= 0xffff;
429		if (!val)
430			goto do_flush_sync;
431
432		while (val) {
433			if (val & 0x1)
434				iommu_write(flushreg, ctx);
435			val >>= 1;
436		}
437		val = iommu_read(matchreg);
438		if (unlikely(val)) {
439			printk(KERN_WARNING "strbuf_flush: ctx flush "
440			       "timeout matchreg[%llx] ctx[%lx]\n",
441			       val, ctx);
442			goto do_page_flush;
443		}
444	} else {
445		unsigned long i;
446
447	do_page_flush:
448		for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
449			iommu_write(strbuf->strbuf_pflush, vaddr);
450	}
451
452do_flush_sync:
453	/* If the device could not have possibly put dirty data into
454	 * the streaming cache, no flush-flag synchronization needs
455	 * to be performed.
456	 */
457	if (direction == DMA_TO_DEVICE)
458		return;
459
460	STC_FLUSHFLAG_INIT(strbuf);
461	iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
462	(void) iommu_read(iommu->write_complete_reg);
463
464	limit = 100000;
465	while (!STC_FLUSHFLAG_SET(strbuf)) {
466		limit--;
467		if (!limit)
468			break;
469		udelay(1);
470		rmb();
471	}
472	if (!limit)
473		printk(KERN_WARNING "strbuf_flush: flushflag timeout "
474		       "vaddr[%08x] ctx[%lx] npages[%ld]\n",
475		       vaddr, ctx, npages);
476}
477
478static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
479			      size_t sz, enum dma_data_direction direction,
480			      struct dma_attrs *attrs)
481{
482	struct iommu *iommu;
483	struct strbuf *strbuf;
484	iopte_t *base;
485	unsigned long flags, npages, ctx, i;
486
487	if (unlikely(direction == DMA_NONE)) {
488		if (printk_ratelimit())
489			WARN_ON(1);
490		return;
491	}
492
493	iommu = dev->archdata.iommu;
494	strbuf = dev->archdata.stc;
495
496	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
497	npages >>= IO_PAGE_SHIFT;
498	base = iommu->page_table +
499		((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
500	bus_addr &= IO_PAGE_MASK;
501
502	spin_lock_irqsave(&iommu->lock, flags);
503
504	/* Record the context, if any. */
505	ctx = 0;
506	if (iommu->iommu_ctxflush)
507		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
508
509	/* Step 1: Kick data out of streaming buffers if necessary. */
510	if (strbuf->strbuf_enabled)
511		strbuf_flush(strbuf, iommu, bus_addr, ctx,
512			     npages, direction);
513
514	/* Step 2: Clear out TSB entries. */
515	for (i = 0; i < npages; i++)
516		iopte_make_dummy(iommu, base + i);
517
518	iommu_range_free(iommu, bus_addr, npages);
519
520	iommu_free_ctx(iommu, ctx);
521
522	spin_unlock_irqrestore(&iommu->lock, flags);
523}
524
525static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
526			 int nelems, enum dma_data_direction direction,
527			 struct dma_attrs *attrs)
528{
529	struct scatterlist *s, *outs, *segstart;
530	unsigned long flags, handle, prot, ctx;
531	dma_addr_t dma_next = 0, dma_addr;
532	unsigned int max_seg_size;
533	unsigned long seg_boundary_size;
534	int outcount, incount, i;
535	struct strbuf *strbuf;
536	struct iommu *iommu;
537	unsigned long base_shift;
538
539	BUG_ON(direction == DMA_NONE);
540
541	iommu = dev->archdata.iommu;
542	strbuf = dev->archdata.stc;
543	if (nelems == 0 || !iommu)
544		return 0;
545
546	spin_lock_irqsave(&iommu->lock, flags);
547
548	ctx = 0;
549	if (iommu->iommu_ctxflush)
550		ctx = iommu_alloc_ctx(iommu);
551
552	if (strbuf->strbuf_enabled)
553		prot = IOPTE_STREAMING(ctx);
554	else
555		prot = IOPTE_CONSISTENT(ctx);
556	if (direction != DMA_TO_DEVICE)
557		prot |= IOPTE_WRITE;
558
559	outs = s = segstart = &sglist[0];
560	outcount = 1;
561	incount = nelems;
562	handle = 0;
563
564	/* Init first segment length for backout at failure */
565	outs->dma_length = 0;
566
567	max_seg_size = dma_get_max_seg_size(dev);
568	seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
569				  IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
570	base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
571	for_each_sg(sglist, s, nelems, i) {
572		unsigned long paddr, npages, entry, out_entry = 0, slen;
573		iopte_t *base;
574
575		slen = s->length;
576		/* Sanity check */
577		if (slen == 0) {
578			dma_next = 0;
579			continue;
580		}
581		/* Allocate iommu entries for that segment */
582		paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
583		npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
584		entry = iommu_range_alloc(dev, iommu, npages, &handle);
585
586		/* Handle failure */
587		if (unlikely(entry == DMA_ERROR_CODE)) {
588			if (printk_ratelimit())
589				printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
590				       " npages %lx\n", iommu, paddr, npages);
591			goto iommu_map_failed;
592		}
593
594		base = iommu->page_table + entry;
595
596		/* Convert entry to a dma_addr_t */
597		dma_addr = iommu->page_table_map_base +
598			(entry << IO_PAGE_SHIFT);
599		dma_addr |= (s->offset & ~IO_PAGE_MASK);
600
601		/* Insert into HW table */
602		paddr &= IO_PAGE_MASK;
603		while (npages--) {
604			iopte_val(*base) = prot | paddr;
605			base++;
606			paddr += IO_PAGE_SIZE;
607		}
608
609		/* If we are in an open segment, try merging */
610		if (segstart != s) {
611			/* We cannot merge if:
612			 * - allocated dma_addr isn't contiguous to previous allocation
613			 */
614			if ((dma_addr != dma_next) ||
615			    (outs->dma_length + s->length > max_seg_size) ||
616			    (is_span_boundary(out_entry, base_shift,
617					      seg_boundary_size, outs, s))) {
618				/* Can't merge: create a new segment */
619				segstart = s;
620				outcount++;
621				outs = sg_next(outs);
622			} else {
623				outs->dma_length += s->length;
624			}
625		}
626
627		if (segstart == s) {
628			/* This is a new segment, fill entries */
629			outs->dma_address = dma_addr;
630			outs->dma_length = slen;
631			out_entry = entry;
632		}
633
634		/* Calculate next page pointer for contiguous check */
635		dma_next = dma_addr + slen;
636	}
637
638	spin_unlock_irqrestore(&iommu->lock, flags);
639
640	if (outcount < incount) {
641		outs = sg_next(outs);
642		outs->dma_address = DMA_ERROR_CODE;
643		outs->dma_length = 0;
644	}
645
646	return outcount;
647
648iommu_map_failed:
649	for_each_sg(sglist, s, nelems, i) {
650		if (s->dma_length != 0) {
651			unsigned long vaddr, npages, entry, j;
652			iopte_t *base;
653
654			vaddr = s->dma_address & IO_PAGE_MASK;
655			npages = iommu_num_pages(s->dma_address, s->dma_length,
656						 IO_PAGE_SIZE);
657			iommu_range_free(iommu, vaddr, npages);
658
659			entry = (vaddr - iommu->page_table_map_base)
660				>> IO_PAGE_SHIFT;
661			base = iommu->page_table + entry;
662
663			for (j = 0; j < npages; j++)
664				iopte_make_dummy(iommu, base + j);
665
666			s->dma_address = DMA_ERROR_CODE;
667			s->dma_length = 0;
668		}
669		if (s == outs)
670			break;
671	}
672	spin_unlock_irqrestore(&iommu->lock, flags);
673
674	return 0;
675}
676
677/* If contexts are being used, they are the same in all of the mappings
678 * we make for a particular SG.
679 */
680static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
681{
682	unsigned long ctx = 0;
683
684	if (iommu->iommu_ctxflush) {
685		iopte_t *base;
686		u32 bus_addr;
687
688		bus_addr = sg->dma_address & IO_PAGE_MASK;
689		base = iommu->page_table +
690			((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
691
692		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
693	}
694	return ctx;
695}
696
697static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
698			    int nelems, enum dma_data_direction direction,
699			    struct dma_attrs *attrs)
700{
701	unsigned long flags, ctx;
702	struct scatterlist *sg;
703	struct strbuf *strbuf;
704	struct iommu *iommu;
705
706	BUG_ON(direction == DMA_NONE);
707
708	iommu = dev->archdata.iommu;
709	strbuf = dev->archdata.stc;
710
711	ctx = fetch_sg_ctx(iommu, sglist);
712
713	spin_lock_irqsave(&iommu->lock, flags);
714
715	sg = sglist;
716	while (nelems--) {
717		dma_addr_t dma_handle = sg->dma_address;
718		unsigned int len = sg->dma_length;
719		unsigned long npages, entry;
720		iopte_t *base;
721		int i;
722
723		if (!len)
724			break;
725		npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
726		iommu_range_free(iommu, dma_handle, npages);
727
728		entry = ((dma_handle - iommu->page_table_map_base)
729			 >> IO_PAGE_SHIFT);
730		base = iommu->page_table + entry;
731
732		dma_handle &= IO_PAGE_MASK;
733		if (strbuf->strbuf_enabled)
734			strbuf_flush(strbuf, iommu, dma_handle, ctx,
735				     npages, direction);
736
737		for (i = 0; i < npages; i++)
738			iopte_make_dummy(iommu, base + i);
739
740		sg = sg_next(sg);
741	}
742
743	iommu_free_ctx(iommu, ctx);
744
745	spin_unlock_irqrestore(&iommu->lock, flags);
746}
747
748static void dma_4u_sync_single_for_cpu(struct device *dev,
749				       dma_addr_t bus_addr, size_t sz,
750				       enum dma_data_direction direction)
751{
752	struct iommu *iommu;
753	struct strbuf *strbuf;
754	unsigned long flags, ctx, npages;
755
756	iommu = dev->archdata.iommu;
757	strbuf = dev->archdata.stc;
758
759	if (!strbuf->strbuf_enabled)
760		return;
761
762	spin_lock_irqsave(&iommu->lock, flags);
763
764	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
765	npages >>= IO_PAGE_SHIFT;
766	bus_addr &= IO_PAGE_MASK;
767
768	/* Step 1: Record the context, if any. */
769	ctx = 0;
770	if (iommu->iommu_ctxflush &&
771	    strbuf->strbuf_ctxflush) {
772		iopte_t *iopte;
773
774		iopte = iommu->page_table +
775			((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
776		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
777	}
778
779	/* Step 2: Kick data out of streaming buffers. */
780	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
781
782	spin_unlock_irqrestore(&iommu->lock, flags);
783}
784
785static void dma_4u_sync_sg_for_cpu(struct device *dev,
786				   struct scatterlist *sglist, int nelems,
787				   enum dma_data_direction direction)
788{
789	struct iommu *iommu;
790	struct strbuf *strbuf;
791	unsigned long flags, ctx, npages, i;
792	struct scatterlist *sg, *sgprv;
793	u32 bus_addr;
794
795	iommu = dev->archdata.iommu;
796	strbuf = dev->archdata.stc;
797
798	if (!strbuf->strbuf_enabled)
799		return;
800
801	spin_lock_irqsave(&iommu->lock, flags);
802
803	/* Step 1: Record the context, if any. */
804	ctx = 0;
805	if (iommu->iommu_ctxflush &&
806	    strbuf->strbuf_ctxflush) {
807		iopte_t *iopte;
808
809		iopte = iommu->page_table +
810			((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
811		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
812	}
813
814	/* Step 2: Kick data out of streaming buffers. */
815	bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
816	sgprv = NULL;
817	for_each_sg(sglist, sg, nelems, i) {
818		if (sg->dma_length == 0)
819			break;
820		sgprv = sg;
821	}
822
823	npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
824		  - bus_addr) >> IO_PAGE_SHIFT;
825	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
826
827	spin_unlock_irqrestore(&iommu->lock, flags);
828}
829
830static struct dma_map_ops sun4u_dma_ops = {
831	.alloc			= dma_4u_alloc_coherent,
832	.free			= dma_4u_free_coherent,
833	.map_page		= dma_4u_map_page,
834	.unmap_page		= dma_4u_unmap_page,
835	.map_sg			= dma_4u_map_sg,
836	.unmap_sg		= dma_4u_unmap_sg,
837	.sync_single_for_cpu	= dma_4u_sync_single_for_cpu,
838	.sync_sg_for_cpu	= dma_4u_sync_sg_for_cpu,
839};
840
841struct dma_map_ops *dma_ops = &sun4u_dma_ops;
842EXPORT_SYMBOL(dma_ops);
843
844int dma_supported(struct device *dev, u64 device_mask)
845{
846	struct iommu *iommu = dev->archdata.iommu;
847	u64 dma_addr_mask = iommu->dma_addr_mask;
848
849	if (device_mask >= (1UL << 32UL))
850		return 0;
851
852	if ((device_mask & dma_addr_mask) == dma_addr_mask)
853		return 1;
854
855#ifdef CONFIG_PCI
856	if (dev_is_pci(dev))
857		return pci64_dma_supported(to_pci_dev(dev), device_mask);
858#endif
859
860	return 0;
861}
862EXPORT_SYMBOL(dma_supported);
863