1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14 *
15 * Copyright (C) 2013 Freescale Semiconductor, Inc.
16 * Author: Varun Sethi <varun.sethi@freescale.com>
17 *
18 */
19
20#define pr_fmt(fmt)    "fsl-pamu-domain: %s: " fmt, __func__
21
22#include <linux/init.h>
23#include <linux/iommu.h>
24#include <linux/notifier.h>
25#include <linux/slab.h>
26#include <linux/module.h>
27#include <linux/types.h>
28#include <linux/mm.h>
29#include <linux/interrupt.h>
30#include <linux/device.h>
31#include <linux/of_platform.h>
32#include <linux/bootmem.h>
33#include <linux/err.h>
34#include <asm/io.h>
35#include <asm/bitops.h>
36
37#include <asm/pci-bridge.h>
38#include <sysdev/fsl_pci.h>
39
40#include "fsl_pamu_domain.h"
41
42/*
43 * Global spinlock that needs to be held while
44 * configuring PAMU.
45 */
46static DEFINE_SPINLOCK(iommu_lock);
47
48static struct kmem_cache *fsl_pamu_domain_cache;
49static struct kmem_cache *iommu_devinfo_cache;
50static DEFINE_SPINLOCK(device_domain_lock);
51
52static int __init iommu_init_mempool(void)
53{
54
55	fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
56					 sizeof(struct fsl_dma_domain),
57					 0,
58					 SLAB_HWCACHE_ALIGN,
59
60					 NULL);
61	if (!fsl_pamu_domain_cache) {
62		pr_debug("Couldn't create fsl iommu_domain cache\n");
63		return -ENOMEM;
64	}
65
66	iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
67					 sizeof(struct device_domain_info),
68					 0,
69					 SLAB_HWCACHE_ALIGN,
70					 NULL);
71	if (!iommu_devinfo_cache) {
72		pr_debug("Couldn't create devinfo cache\n");
73		kmem_cache_destroy(fsl_pamu_domain_cache);
74		return -ENOMEM;
75	}
76
77	return 0;
78}
79
80static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova)
81{
82	u32 win_cnt = dma_domain->win_cnt;
83	struct dma_window *win_ptr =
84				&dma_domain->win_arr[0];
85	struct iommu_domain_geometry *geom;
86
87	geom = &dma_domain->iommu_domain->geometry;
88
89	if (!win_cnt || !dma_domain->geom_size) {
90		pr_debug("Number of windows/geometry not configured for the domain\n");
91		return 0;
92	}
93
94	if (win_cnt > 1) {
95		u64 subwin_size;
96		dma_addr_t subwin_iova;
97		u32 wnd;
98
99		subwin_size = dma_domain->geom_size >> ilog2(win_cnt);
100		subwin_iova = iova & ~(subwin_size - 1);
101		wnd = (subwin_iova - geom->aperture_start) >> ilog2(subwin_size);
102		win_ptr = &dma_domain->win_arr[wnd];
103	}
104
105	if (win_ptr->valid)
106		return (win_ptr->paddr + (iova & (win_ptr->size - 1)));
107
108	return 0;
109}
110
111static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain)
112{
113	struct dma_window *sub_win_ptr =
114				&dma_domain->win_arr[0];
115	int i, ret;
116	unsigned long rpn, flags;
117
118	for (i = 0; i < dma_domain->win_cnt; i++) {
119		if (sub_win_ptr[i].valid) {
120			rpn = sub_win_ptr[i].paddr >>
121				 PAMU_PAGE_SHIFT;
122			spin_lock_irqsave(&iommu_lock, flags);
123			ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i,
124						 sub_win_ptr[i].size,
125						 ~(u32)0,
126						 rpn,
127						 dma_domain->snoop_id,
128						 dma_domain->stash_id,
129						 (i > 0) ? 1 : 0,
130						 sub_win_ptr[i].prot);
131			spin_unlock_irqrestore(&iommu_lock, flags);
132			if (ret) {
133				pr_debug("PAMU SPAACE configuration failed for liodn %d\n",
134					 liodn);
135				return ret;
136			}
137		}
138	}
139
140	return ret;
141}
142
143static int map_win(int liodn, struct fsl_dma_domain *dma_domain)
144{
145	int ret;
146	struct dma_window *wnd = &dma_domain->win_arr[0];
147	phys_addr_t wnd_addr = dma_domain->iommu_domain->geometry.aperture_start;
148	unsigned long flags;
149
150	spin_lock_irqsave(&iommu_lock, flags);
151	ret = pamu_config_ppaace(liodn, wnd_addr,
152				 wnd->size,
153				 ~(u32)0,
154				 wnd->paddr >> PAMU_PAGE_SHIFT,
155				 dma_domain->snoop_id, dma_domain->stash_id,
156				 0, wnd->prot);
157	spin_unlock_irqrestore(&iommu_lock, flags);
158	if (ret)
159		pr_debug("PAMU PAACE configuration failed for liodn %d\n",
160			liodn);
161
162	return ret;
163}
164
165/* Map the DMA window corresponding to the LIODN */
166static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain)
167{
168	if (dma_domain->win_cnt > 1)
169		return map_subwins(liodn, dma_domain);
170	else
171		return map_win(liodn, dma_domain);
172
173}
174
175/* Update window/subwindow mapping for the LIODN */
176static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr)
177{
178	int ret;
179	struct dma_window *wnd = &dma_domain->win_arr[wnd_nr];
180	unsigned long flags;
181
182	spin_lock_irqsave(&iommu_lock, flags);
183	if (dma_domain->win_cnt > 1) {
184		ret = pamu_config_spaace(liodn, dma_domain->win_cnt, wnd_nr,
185					 wnd->size,
186					 ~(u32)0,
187					 wnd->paddr >> PAMU_PAGE_SHIFT,
188					 dma_domain->snoop_id,
189					 dma_domain->stash_id,
190					 (wnd_nr > 0) ? 1 : 0,
191					 wnd->prot);
192		if (ret)
193			pr_debug("Subwindow reconfiguration failed for liodn %d\n", liodn);
194	} else {
195		phys_addr_t wnd_addr;
196
197		wnd_addr = dma_domain->iommu_domain->geometry.aperture_start;
198
199		ret = pamu_config_ppaace(liodn, wnd_addr,
200					 wnd->size,
201					 ~(u32)0,
202					 wnd->paddr >> PAMU_PAGE_SHIFT,
203					dma_domain->snoop_id, dma_domain->stash_id,
204					0, wnd->prot);
205		if (ret)
206			pr_debug("Window reconfiguration failed for liodn %d\n", liodn);
207	}
208
209	spin_unlock_irqrestore(&iommu_lock, flags);
210
211	return ret;
212}
213
214static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
215				 u32 val)
216{
217	int ret = 0, i;
218	unsigned long flags;
219
220	spin_lock_irqsave(&iommu_lock, flags);
221	if (!dma_domain->win_arr) {
222		pr_debug("Windows not configured, stash destination update failed for liodn %d\n", liodn);
223		spin_unlock_irqrestore(&iommu_lock, flags);
224		return -EINVAL;
225	}
226
227	for (i = 0; i < dma_domain->win_cnt; i++) {
228		ret = pamu_update_paace_stash(liodn, i, val);
229		if (ret) {
230			pr_debug("Failed to update SPAACE %d field for liodn %d\n ", i, liodn);
231			spin_unlock_irqrestore(&iommu_lock, flags);
232			return ret;
233		}
234	}
235
236	spin_unlock_irqrestore(&iommu_lock, flags);
237
238	return ret;
239}
240
241/* Set the geometry parameters for a LIODN */
242static int pamu_set_liodn(int liodn, struct device *dev,
243			   struct fsl_dma_domain *dma_domain,
244			   struct iommu_domain_geometry *geom_attr,
245			   u32 win_cnt)
246{
247	phys_addr_t window_addr, window_size;
248	phys_addr_t subwin_size;
249	int ret = 0, i;
250	u32 omi_index = ~(u32)0;
251	unsigned long flags;
252
253	/*
254	 * Configure the omi_index at the geometry setup time.
255	 * This is a static value which depends on the type of
256	 * device and would not change thereafter.
257	 */
258	get_ome_index(&omi_index, dev);
259
260	window_addr = geom_attr->aperture_start;
261	window_size = dma_domain->geom_size;
262
263	spin_lock_irqsave(&iommu_lock, flags);
264	ret = pamu_disable_liodn(liodn);
265	if (!ret)
266		ret = pamu_config_ppaace(liodn, window_addr, window_size, omi_index,
267					 0, dma_domain->snoop_id,
268					 dma_domain->stash_id, win_cnt, 0);
269	spin_unlock_irqrestore(&iommu_lock, flags);
270	if (ret) {
271		pr_debug("PAMU PAACE configuration failed for liodn %d, win_cnt =%d\n", liodn, win_cnt);
272		return ret;
273	}
274
275	if (win_cnt > 1) {
276		subwin_size = window_size >> ilog2(win_cnt);
277		for (i = 0; i < win_cnt; i++) {
278			spin_lock_irqsave(&iommu_lock, flags);
279			ret = pamu_disable_spaace(liodn, i);
280			if (!ret)
281				ret = pamu_config_spaace(liodn, win_cnt, i,
282							 subwin_size, omi_index,
283							 0, dma_domain->snoop_id,
284							 dma_domain->stash_id,
285							 0, 0);
286			spin_unlock_irqrestore(&iommu_lock, flags);
287			if (ret) {
288				pr_debug("PAMU SPAACE configuration failed for liodn %d\n", liodn);
289				return ret;
290			}
291		}
292	}
293
294	return ret;
295}
296
297static int check_size(u64 size, dma_addr_t iova)
298{
299	/*
300	 * Size must be a power of two and at least be equal
301	 * to PAMU page size.
302	 */
303	if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) {
304		pr_debug("%s: size too small or not a power of two\n", __func__);
305		return -EINVAL;
306	}
307
308	/* iova must be page size aligned*/
309	if (iova & (size - 1)) {
310		pr_debug("%s: address is not aligned with window size\n", __func__);
311		return -EINVAL;
312	}
313
314	return 0;
315}
316
317static struct fsl_dma_domain *iommu_alloc_dma_domain(void)
318{
319	struct fsl_dma_domain *domain;
320
321	domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
322	if (!domain)
323		return NULL;
324
325	domain->stash_id = ~(u32)0;
326	domain->snoop_id = ~(u32)0;
327	domain->win_cnt = pamu_get_max_subwin_cnt();
328	domain->geom_size = 0;
329
330	INIT_LIST_HEAD(&domain->devices);
331
332	spin_lock_init(&domain->domain_lock);
333
334	return domain;
335}
336
337static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
338{
339	unsigned long flags;
340
341	list_del(&info->link);
342	spin_lock_irqsave(&iommu_lock, flags);
343	if (win_cnt > 1)
344		pamu_free_subwins(info->liodn);
345	pamu_disable_liodn(info->liodn);
346	spin_unlock_irqrestore(&iommu_lock, flags);
347	spin_lock_irqsave(&device_domain_lock, flags);
348	info->dev->archdata.iommu_domain = NULL;
349	kmem_cache_free(iommu_devinfo_cache, info);
350	spin_unlock_irqrestore(&device_domain_lock, flags);
351}
352
353static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
354{
355	struct device_domain_info *info, *tmp;
356	unsigned long flags;
357
358	spin_lock_irqsave(&dma_domain->domain_lock, flags);
359	/* Remove the device from the domain device list */
360	list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
361		if (!dev || (info->dev == dev))
362			remove_device_ref(info, dma_domain->win_cnt);
363	}
364	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
365}
366
367static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev)
368{
369	struct device_domain_info *info, *old_domain_info;
370	unsigned long flags;
371
372	spin_lock_irqsave(&device_domain_lock, flags);
373	/*
374	 * Check here if the device is already attached to domain or not.
375	 * If the device is already attached to a domain detach it.
376	 */
377	old_domain_info = dev->archdata.iommu_domain;
378	if (old_domain_info && old_domain_info->domain != dma_domain) {
379		spin_unlock_irqrestore(&device_domain_lock, flags);
380		detach_device(dev, old_domain_info->domain);
381		spin_lock_irqsave(&device_domain_lock, flags);
382	}
383
384	info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC);
385
386	info->dev = dev;
387	info->liodn = liodn;
388	info->domain = dma_domain;
389
390	list_add(&info->link, &dma_domain->devices);
391	/*
392	 * In case of devices with multiple LIODNs just store
393	 * the info for the first LIODN as all
394	 * LIODNs share the same domain
395	 */
396	if (!dev->archdata.iommu_domain)
397		dev->archdata.iommu_domain = info;
398	spin_unlock_irqrestore(&device_domain_lock, flags);
399
400}
401
402static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
403					    dma_addr_t iova)
404{
405	struct fsl_dma_domain *dma_domain = domain->priv;
406
407	if ((iova < domain->geometry.aperture_start) ||
408		iova > (domain->geometry.aperture_end))
409		return 0;
410
411	return get_phys_addr(dma_domain, iova);
412}
413
414static bool fsl_pamu_capable(enum iommu_cap cap)
415{
416	return cap == IOMMU_CAP_CACHE_COHERENCY;
417}
418
419static void fsl_pamu_domain_destroy(struct iommu_domain *domain)
420{
421	struct fsl_dma_domain *dma_domain = domain->priv;
422
423	domain->priv = NULL;
424
425	/* remove all the devices from the device list */
426	detach_device(NULL, dma_domain);
427
428	dma_domain->enabled = 0;
429	dma_domain->mapped = 0;
430
431	kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
432}
433
434static int fsl_pamu_domain_init(struct iommu_domain *domain)
435{
436	struct fsl_dma_domain *dma_domain;
437
438	dma_domain = iommu_alloc_dma_domain();
439	if (!dma_domain) {
440		pr_debug("dma_domain allocation failed\n");
441		return -ENOMEM;
442	}
443	domain->priv = dma_domain;
444	dma_domain->iommu_domain = domain;
445	/* defaul geometry 64 GB i.e. maximum system address */
446	domain->geometry.aperture_start = 0;
447	domain->geometry.aperture_end = (1ULL << 36) - 1;
448	domain->geometry.force_aperture = true;
449
450	return 0;
451}
452
453/* Configure geometry settings for all LIODNs associated with domain */
454static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain,
455				    struct iommu_domain_geometry *geom_attr,
456				    u32 win_cnt)
457{
458	struct device_domain_info *info;
459	int ret = 0;
460
461	list_for_each_entry(info, &dma_domain->devices, link) {
462		ret = pamu_set_liodn(info->liodn, info->dev, dma_domain,
463				      geom_attr, win_cnt);
464		if (ret)
465			break;
466	}
467
468	return ret;
469}
470
471/* Update stash destination for all LIODNs associated with the domain */
472static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
473{
474	struct device_domain_info *info;
475	int ret = 0;
476
477	list_for_each_entry(info, &dma_domain->devices, link) {
478		ret = update_liodn_stash(info->liodn, dma_domain, val);
479		if (ret)
480			break;
481	}
482
483	return ret;
484}
485
486/* Update domain mappings for all LIODNs associated with the domain */
487static int update_domain_mapping(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
488{
489	struct device_domain_info *info;
490	int ret = 0;
491
492	list_for_each_entry(info, &dma_domain->devices, link) {
493		ret = update_liodn(info->liodn, dma_domain, wnd_nr);
494		if (ret)
495			break;
496	}
497	return ret;
498}
499
500static int disable_domain_win(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
501{
502	struct device_domain_info *info;
503	int ret = 0;
504
505	list_for_each_entry(info, &dma_domain->devices, link) {
506		if (dma_domain->win_cnt == 1 && dma_domain->enabled) {
507			ret = pamu_disable_liodn(info->liodn);
508			if (!ret)
509				dma_domain->enabled = 0;
510		} else {
511			ret = pamu_disable_spaace(info->liodn, wnd_nr);
512		}
513	}
514
515	return ret;
516}
517
518static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr)
519{
520	struct fsl_dma_domain *dma_domain = domain->priv;
521	unsigned long flags;
522	int ret;
523
524	spin_lock_irqsave(&dma_domain->domain_lock, flags);
525	if (!dma_domain->win_arr) {
526		pr_debug("Number of windows not configured\n");
527		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
528		return;
529	}
530
531	if (wnd_nr >= dma_domain->win_cnt) {
532		pr_debug("Invalid window index\n");
533		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
534		return;
535	}
536
537	if (dma_domain->win_arr[wnd_nr].valid) {
538		ret = disable_domain_win(dma_domain, wnd_nr);
539		if (!ret) {
540			dma_domain->win_arr[wnd_nr].valid = 0;
541			dma_domain->mapped--;
542		}
543	}
544
545	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
546
547}
548
549static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
550				  phys_addr_t paddr, u64 size, int prot)
551{
552	struct fsl_dma_domain *dma_domain = domain->priv;
553	struct dma_window *wnd;
554	int pamu_prot = 0;
555	int ret;
556	unsigned long flags;
557	u64 win_size;
558
559	if (prot & IOMMU_READ)
560		pamu_prot |= PAACE_AP_PERMS_QUERY;
561	if (prot & IOMMU_WRITE)
562		pamu_prot |= PAACE_AP_PERMS_UPDATE;
563
564	spin_lock_irqsave(&dma_domain->domain_lock, flags);
565	if (!dma_domain->win_arr) {
566		pr_debug("Number of windows not configured\n");
567		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
568		return -ENODEV;
569	}
570
571	if (wnd_nr >= dma_domain->win_cnt) {
572		pr_debug("Invalid window index\n");
573		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
574		return -EINVAL;
575	}
576
577	win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt);
578	if (size > win_size) {
579		pr_debug("Invalid window size \n");
580		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
581		return -EINVAL;
582	}
583
584	if (dma_domain->win_cnt == 1) {
585		if (dma_domain->enabled) {
586			pr_debug("Disable the window before updating the mapping\n");
587			spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
588			return -EBUSY;
589		}
590
591		ret = check_size(size, domain->geometry.aperture_start);
592		if (ret) {
593			pr_debug("Aperture start not aligned to the size\n");
594			spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
595			return -EINVAL;
596		}
597	}
598
599	wnd = &dma_domain->win_arr[wnd_nr];
600	if (!wnd->valid) {
601		wnd->paddr = paddr;
602		wnd->size = size;
603		wnd->prot = pamu_prot;
604
605		ret = update_domain_mapping(dma_domain, wnd_nr);
606		if (!ret) {
607			wnd->valid = 1;
608			dma_domain->mapped++;
609		}
610	} else {
611		pr_debug("Disable the window before updating the mapping\n");
612		ret = -EBUSY;
613	}
614
615	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
616
617	return ret;
618}
619
620/*
621 * Attach the LIODN to the DMA domain and configure the geometry
622 * and window mappings.
623 */
624static int handle_attach_device(struct fsl_dma_domain *dma_domain,
625				 struct device *dev, const u32 *liodn,
626				 int num)
627{
628	unsigned long flags;
629	struct iommu_domain *domain = dma_domain->iommu_domain;
630	int ret = 0;
631	int i;
632
633	spin_lock_irqsave(&dma_domain->domain_lock, flags);
634	for (i = 0; i < num; i++) {
635
636		/* Ensure that LIODN value is valid */
637		if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
638			pr_debug("Invalid liodn %d, attach device failed for %s\n",
639				liodn[i], dev->of_node->full_name);
640			ret = -EINVAL;
641			break;
642		}
643
644		attach_device(dma_domain, liodn[i], dev);
645		/*
646		 * Check if geometry has already been configured
647		 * for the domain. If yes, set the geometry for
648		 * the LIODN.
649		 */
650		if (dma_domain->win_arr) {
651			u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0;
652			ret = pamu_set_liodn(liodn[i], dev, dma_domain,
653					      &domain->geometry,
654					      win_cnt);
655			if (ret)
656				break;
657			if (dma_domain->mapped) {
658				/*
659				 * Create window/subwindow mapping for
660				 * the LIODN.
661				 */
662				ret = map_liodn(liodn[i], dma_domain);
663				if (ret)
664					break;
665			}
666		}
667	}
668	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
669
670	return ret;
671}
672
673static int fsl_pamu_attach_device(struct iommu_domain *domain,
674				  struct device *dev)
675{
676	struct fsl_dma_domain *dma_domain = domain->priv;
677	const u32 *liodn;
678	u32 liodn_cnt;
679	int len, ret = 0;
680	struct pci_dev *pdev = NULL;
681	struct pci_controller *pci_ctl;
682
683	/*
684	 * Use LIODN of the PCI controller while attaching a
685	 * PCI device.
686	 */
687	if (dev_is_pci(dev)) {
688		pdev = to_pci_dev(dev);
689		pci_ctl = pci_bus_to_host(pdev->bus);
690		/*
691		 * make dev point to pci controller device
692		 * so we can get the LIODN programmed by
693		 * u-boot.
694		 */
695		dev = pci_ctl->parent;
696	}
697
698	liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
699	if (liodn) {
700		liodn_cnt = len / sizeof(u32);
701		ret = handle_attach_device(dma_domain, dev,
702					 liodn, liodn_cnt);
703	} else {
704		pr_debug("missing fsl,liodn property at %s\n",
705		          dev->of_node->full_name);
706			ret = -EINVAL;
707	}
708
709	return ret;
710}
711
712static void fsl_pamu_detach_device(struct iommu_domain *domain,
713				      struct device *dev)
714{
715	struct fsl_dma_domain *dma_domain = domain->priv;
716	const u32 *prop;
717	int len;
718	struct pci_dev *pdev = NULL;
719	struct pci_controller *pci_ctl;
720
721	/*
722	 * Use LIODN of the PCI controller while detaching a
723	 * PCI device.
724	 */
725	if (dev_is_pci(dev)) {
726		pdev = to_pci_dev(dev);
727		pci_ctl = pci_bus_to_host(pdev->bus);
728		/*
729		 * make dev point to pci controller device
730		 * so we can get the LIODN programmed by
731		 * u-boot.
732		 */
733		dev = pci_ctl->parent;
734	}
735
736	prop = of_get_property(dev->of_node, "fsl,liodn", &len);
737	if (prop)
738		detach_device(dev, dma_domain);
739	else
740		pr_debug("missing fsl,liodn property at %s\n",
741		          dev->of_node->full_name);
742}
743
744static  int configure_domain_geometry(struct iommu_domain *domain, void *data)
745{
746	struct iommu_domain_geometry *geom_attr = data;
747	struct fsl_dma_domain *dma_domain = domain->priv;
748	dma_addr_t geom_size;
749	unsigned long flags;
750
751	geom_size = geom_attr->aperture_end - geom_attr->aperture_start + 1;
752	/*
753	 * Sanity check the geometry size. Also, we do not support
754	 * DMA outside of the geometry.
755	 */
756	if (check_size(geom_size, geom_attr->aperture_start) ||
757		!geom_attr->force_aperture) {
758			pr_debug("Invalid PAMU geometry attributes\n");
759			return -EINVAL;
760		}
761
762	spin_lock_irqsave(&dma_domain->domain_lock, flags);
763	if (dma_domain->enabled) {
764		pr_debug("Can't set geometry attributes as domain is active\n");
765		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
766		return  -EBUSY;
767	}
768
769	/* Copy the domain geometry information */
770	memcpy(&domain->geometry, geom_attr,
771	       sizeof(struct iommu_domain_geometry));
772	dma_domain->geom_size = geom_size;
773
774	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
775
776	return 0;
777}
778
779/* Set the domain stash attribute */
780static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data)
781{
782	struct pamu_stash_attribute *stash_attr = data;
783	unsigned long flags;
784	int ret;
785
786	spin_lock_irqsave(&dma_domain->domain_lock, flags);
787
788	memcpy(&dma_domain->dma_stash, stash_attr,
789		 sizeof(struct pamu_stash_attribute));
790
791	dma_domain->stash_id = get_stash_id(stash_attr->cache,
792					    stash_attr->cpu);
793	if (dma_domain->stash_id == ~(u32)0) {
794		pr_debug("Invalid stash attributes\n");
795		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
796		return -EINVAL;
797	}
798
799	ret = update_domain_stash(dma_domain, dma_domain->stash_id);
800
801	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
802
803	return ret;
804}
805
806/* Configure domain dma state i.e. enable/disable DMA*/
807static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable)
808{
809	struct device_domain_info *info;
810	unsigned long flags;
811	int ret;
812
813	spin_lock_irqsave(&dma_domain->domain_lock, flags);
814
815	if (enable && !dma_domain->mapped) {
816		pr_debug("Can't enable DMA domain without valid mapping\n");
817		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
818		return -ENODEV;
819	}
820
821	dma_domain->enabled = enable;
822	list_for_each_entry(info, &dma_domain->devices,
823				 link) {
824		ret = (enable) ? pamu_enable_liodn(info->liodn) :
825			pamu_disable_liodn(info->liodn);
826		if (ret)
827			pr_debug("Unable to set dma state for liodn %d",
828				 info->liodn);
829	}
830	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
831
832	return 0;
833}
834
835static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
836				 enum iommu_attr attr_type, void *data)
837{
838	struct fsl_dma_domain *dma_domain = domain->priv;
839	int ret = 0;
840
841
842	switch (attr_type) {
843	case DOMAIN_ATTR_GEOMETRY:
844		ret = configure_domain_geometry(domain, data);
845		break;
846	case DOMAIN_ATTR_FSL_PAMU_STASH:
847		ret = configure_domain_stash(dma_domain, data);
848		break;
849	case DOMAIN_ATTR_FSL_PAMU_ENABLE:
850		ret = configure_domain_dma_state(dma_domain, *(int *)data);
851		break;
852	default:
853		pr_debug("Unsupported attribute type\n");
854		ret = -EINVAL;
855		break;
856	};
857
858	return ret;
859}
860
861static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
862				 enum iommu_attr attr_type, void *data)
863{
864	struct fsl_dma_domain *dma_domain = domain->priv;
865	int ret = 0;
866
867
868	switch (attr_type) {
869	case DOMAIN_ATTR_FSL_PAMU_STASH:
870		memcpy((struct pamu_stash_attribute *) data, &dma_domain->dma_stash,
871				 sizeof(struct pamu_stash_attribute));
872		break;
873	case DOMAIN_ATTR_FSL_PAMU_ENABLE:
874		*(int *)data = dma_domain->enabled;
875		break;
876	case DOMAIN_ATTR_FSL_PAMUV1:
877		*(int *)data = DOMAIN_ATTR_FSL_PAMUV1;
878		break;
879	default:
880		pr_debug("Unsupported attribute type\n");
881		ret = -EINVAL;
882		break;
883	};
884
885	return ret;
886}
887
888static struct iommu_group *get_device_iommu_group(struct device *dev)
889{
890	struct iommu_group *group;
891
892	group = iommu_group_get(dev);
893	if (!group)
894		group = iommu_group_alloc();
895
896	return group;
897}
898
899static  bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
900{
901	u32 version;
902
903	/* Check the PCI controller version number by readding BRR1 register */
904	version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
905	version &= PCI_FSL_BRR1_VER;
906	/* If PCI controller version is >= 0x204 we can partition endpoints*/
907	if (version >= 0x204)
908		return 1;
909
910	return 0;
911}
912
913/* Get iommu group information from peer devices or devices on the parent bus */
914static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
915{
916	struct pci_dev *tmp;
917	struct iommu_group *group;
918	struct pci_bus *bus = pdev->bus;
919
920	/*
921	 * Traverese the pci bus device list to get
922	 * the shared iommu group.
923	 */
924	while (bus) {
925		list_for_each_entry(tmp, &bus->devices, bus_list) {
926			if (tmp == pdev)
927				continue;
928			group = iommu_group_get(&tmp->dev);
929			if (group)
930				return group;
931		}
932
933		bus = bus->parent;
934	}
935
936	return NULL;
937}
938
939static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
940{
941	struct pci_controller *pci_ctl;
942	bool pci_endpt_partioning;
943	struct iommu_group *group = NULL;
944
945	pci_ctl = pci_bus_to_host(pdev->bus);
946	pci_endpt_partioning = check_pci_ctl_endpt_part(pci_ctl);
947	/* We can partition PCIe devices so assign device group to the device */
948	if (pci_endpt_partioning) {
949		group = iommu_group_get_for_dev(&pdev->dev);
950
951		/*
952		 * PCIe controller is not a paritionable entity
953		 * free the controller device iommu_group.
954		 */
955		if (pci_ctl->parent->iommu_group)
956			iommu_group_remove_device(pci_ctl->parent);
957	} else {
958		/*
959		 * All devices connected to the controller will share the
960		 * PCI controllers device group. If this is the first
961		 * device to be probed for the pci controller, copy the
962		 * device group information from the PCI controller device
963		 * node and remove the PCI controller iommu group.
964		 * For subsequent devices, the iommu group information can
965		 * be obtained from sibling devices (i.e. from the bus_devices
966		 * link list).
967		 */
968		if (pci_ctl->parent->iommu_group) {
969			group = get_device_iommu_group(pci_ctl->parent);
970			iommu_group_remove_device(pci_ctl->parent);
971		} else
972			group = get_shared_pci_device_group(pdev);
973	}
974
975	if (!group)
976		group = ERR_PTR(-ENODEV);
977
978	return group;
979}
980
981static int fsl_pamu_add_device(struct device *dev)
982{
983	struct iommu_group *group = ERR_PTR(-ENODEV);
984	struct pci_dev *pdev;
985	const u32 *prop;
986	int ret = 0, len;
987
988	/*
989	 * For platform devices we allocate a separate group for
990	 * each of the devices.
991	 */
992	if (dev_is_pci(dev)) {
993		pdev = to_pci_dev(dev);
994		/* Don't create device groups for virtual PCI bridges */
995		if (pdev->subordinate)
996			return 0;
997
998		group = get_pci_device_group(pdev);
999
1000	} else {
1001		prop = of_get_property(dev->of_node, "fsl,liodn", &len);
1002		if (prop)
1003			group = get_device_iommu_group(dev);
1004	}
1005
1006	if (IS_ERR(group))
1007		return PTR_ERR(group);
1008
1009	/*
1010	 * Check if device has already been added to an iommu group.
1011	 * Group could have already been created for a PCI device in
1012	 * the iommu_group_get_for_dev path.
1013	 */
1014	if (!dev->iommu_group)
1015		ret = iommu_group_add_device(group, dev);
1016
1017	iommu_group_put(group);
1018	return ret;
1019}
1020
1021static void fsl_pamu_remove_device(struct device *dev)
1022{
1023	iommu_group_remove_device(dev);
1024}
1025
1026static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
1027{
1028	struct fsl_dma_domain *dma_domain = domain->priv;
1029	unsigned long flags;
1030	int ret;
1031
1032	spin_lock_irqsave(&dma_domain->domain_lock, flags);
1033	/* Ensure domain is inactive i.e. DMA should be disabled for the domain */
1034	if (dma_domain->enabled) {
1035		pr_debug("Can't set geometry attributes as domain is active\n");
1036		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1037		return  -EBUSY;
1038	}
1039
1040	/* Ensure that the geometry has been set for the domain */
1041	if (!dma_domain->geom_size) {
1042		pr_debug("Please configure geometry before setting the number of windows\n");
1043		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1044		return -EINVAL;
1045	}
1046
1047	/*
1048	 * Ensure we have valid window count i.e. it should be less than
1049	 * maximum permissible limit and should be a power of two.
1050	 */
1051	if (w_count > pamu_get_max_subwin_cnt() || !is_power_of_2(w_count)) {
1052		pr_debug("Invalid window count\n");
1053		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1054		return -EINVAL;
1055	}
1056
1057	ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
1058				((w_count > 1) ? w_count : 0));
1059	if (!ret) {
1060		kfree(dma_domain->win_arr);
1061		dma_domain->win_arr = kzalloc(sizeof(struct dma_window) *
1062							  w_count, GFP_ATOMIC);
1063		if (!dma_domain->win_arr) {
1064			spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1065			return -ENOMEM;
1066		}
1067		dma_domain->win_cnt = w_count;
1068	}
1069	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1070
1071	return ret;
1072}
1073
1074static u32 fsl_pamu_get_windows(struct iommu_domain *domain)
1075{
1076	struct fsl_dma_domain *dma_domain = domain->priv;
1077
1078	return dma_domain->win_cnt;
1079}
1080
1081static const struct iommu_ops fsl_pamu_ops = {
1082	.capable	= fsl_pamu_capable,
1083	.domain_init	= fsl_pamu_domain_init,
1084	.domain_destroy = fsl_pamu_domain_destroy,
1085	.attach_dev	= fsl_pamu_attach_device,
1086	.detach_dev	= fsl_pamu_detach_device,
1087	.domain_window_enable = fsl_pamu_window_enable,
1088	.domain_window_disable = fsl_pamu_window_disable,
1089	.domain_get_windows = fsl_pamu_get_windows,
1090	.domain_set_windows = fsl_pamu_set_windows,
1091	.iova_to_phys	= fsl_pamu_iova_to_phys,
1092	.domain_set_attr = fsl_pamu_set_domain_attr,
1093	.domain_get_attr = fsl_pamu_get_domain_attr,
1094	.add_device	= fsl_pamu_add_device,
1095	.remove_device	= fsl_pamu_remove_device,
1096};
1097
1098int pamu_domain_init(void)
1099{
1100	int ret = 0;
1101
1102	ret = iommu_init_mempool();
1103	if (ret)
1104		return ret;
1105
1106	bus_set_iommu(&platform_bus_type, &fsl_pamu_ops);
1107	bus_set_iommu(&pci_bus_type, &fsl_pamu_ops);
1108
1109	return ret;
1110}
1111