nouveau_sgdma.c revision b6fd780791e9189b781e27a443d47bd21ce5145f
1#include "drmP.h"
2#include "nouveau_drv.h"
3#include <linux/pagemap.h>
4#include <linux/slab.h>
5
6#define NV_CTXDMA_PAGE_SHIFT 12
7#define NV_CTXDMA_PAGE_SIZE  (1 << NV_CTXDMA_PAGE_SHIFT)
8#define NV_CTXDMA_PAGE_MASK  (NV_CTXDMA_PAGE_SIZE - 1)
9
10struct nouveau_sgdma_be {
11	struct ttm_backend backend;
12	struct drm_device *dev;
13
14	dma_addr_t *pages;
15	unsigned nr_pages;
16
17	unsigned pte_start;
18	bool bound;
19};
20
21static int
22nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
23		       struct page **pages, struct page *dummy_read_page)
24{
25	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
26	struct drm_device *dev = nvbe->dev;
27
28	NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
29
30	if (nvbe->pages)
31		return -EINVAL;
32
33	nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
34	if (!nvbe->pages)
35		return -ENOMEM;
36
37	nvbe->nr_pages = 0;
38	while (num_pages--) {
39		nvbe->pages[nvbe->nr_pages] =
40			pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
41				     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
42		if (pci_dma_mapping_error(dev->pdev,
43					  nvbe->pages[nvbe->nr_pages])) {
44			be->func->clear(be);
45			return -EFAULT;
46		}
47
48		nvbe->nr_pages++;
49	}
50
51	return 0;
52}
53
54static void
55nouveau_sgdma_clear(struct ttm_backend *be)
56{
57	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
58	struct drm_device *dev;
59
60	if (nvbe && nvbe->pages) {
61		dev = nvbe->dev;
62		NV_DEBUG(dev, "\n");
63
64		if (nvbe->bound)
65			be->func->unbind(be);
66
67		while (nvbe->nr_pages--) {
68			pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
69				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
70		}
71		kfree(nvbe->pages);
72		nvbe->pages = NULL;
73		nvbe->nr_pages = 0;
74	}
75}
76
77static inline unsigned
78nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset)
79{
80	struct drm_nouveau_private *dev_priv = dev->dev_private;
81	unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
82
83	if (dev_priv->card_type < NV_50)
84		return pte + 2;
85
86	return pte << 1;
87}
88
89static int
90nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
91{
92	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
93	struct drm_device *dev = nvbe->dev;
94	struct drm_nouveau_private *dev_priv = dev->dev_private;
95	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
96	unsigned i, j, pte;
97
98	NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start);
99
100	pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT);
101	nvbe->pte_start = pte;
102	for (i = 0; i < nvbe->nr_pages; i++) {
103		dma_addr_t dma_offset = nvbe->pages[i];
104		uint32_t offset_l = lower_32_bits(dma_offset);
105		uint32_t offset_h = upper_32_bits(dma_offset);
106
107		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
108			if (dev_priv->card_type < NV_50) {
109				nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
110				pte += 1;
111			} else {
112				nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 0x21);
113				nv_wo32(gpuobj, (pte * 4) + 4, offset_h & 0xff);
114				pte += 2;
115			}
116
117			dma_offset += NV_CTXDMA_PAGE_SIZE;
118		}
119	}
120	dev_priv->engine.instmem.flush(nvbe->dev);
121
122	if (dev_priv->card_type == NV_50) {
123		nv50_vm_flush(dev, 5); /* PGRAPH */
124		nv50_vm_flush(dev, 0); /* PFIFO */
125	}
126
127	nvbe->bound = true;
128	return 0;
129}
130
131static int
132nouveau_sgdma_unbind(struct ttm_backend *be)
133{
134	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
135	struct drm_device *dev = nvbe->dev;
136	struct drm_nouveau_private *dev_priv = dev->dev_private;
137	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
138	unsigned i, j, pte;
139
140	NV_DEBUG(dev, "\n");
141
142	if (!nvbe->bound)
143		return 0;
144
145	pte = nvbe->pte_start;
146	for (i = 0; i < nvbe->nr_pages; i++) {
147		dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
148
149		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
150			if (dev_priv->card_type < NV_50) {
151				nv_wo32(gpuobj, (pte * 4) + 0, dma_offset | 3);
152				pte += 1;
153			} else {
154				nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
155				nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000);
156				pte += 2;
157			}
158
159			dma_offset += NV_CTXDMA_PAGE_SIZE;
160		}
161	}
162	dev_priv->engine.instmem.flush(nvbe->dev);
163
164	if (dev_priv->card_type == NV_50) {
165		nv50_vm_flush(dev, 5);
166		nv50_vm_flush(dev, 0);
167	}
168
169	nvbe->bound = false;
170	return 0;
171}
172
173static void
174nouveau_sgdma_destroy(struct ttm_backend *be)
175{
176	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
177
178	if (be) {
179		NV_DEBUG(nvbe->dev, "\n");
180
181		if (nvbe) {
182			if (nvbe->pages)
183				be->func->clear(be);
184			kfree(nvbe);
185		}
186	}
187}
188
189static struct ttm_backend_func nouveau_sgdma_backend = {
190	.populate		= nouveau_sgdma_populate,
191	.clear			= nouveau_sgdma_clear,
192	.bind			= nouveau_sgdma_bind,
193	.unbind			= nouveau_sgdma_unbind,
194	.destroy		= nouveau_sgdma_destroy
195};
196
197struct ttm_backend *
198nouveau_sgdma_init_ttm(struct drm_device *dev)
199{
200	struct drm_nouveau_private *dev_priv = dev->dev_private;
201	struct nouveau_sgdma_be *nvbe;
202
203	if (!dev_priv->gart_info.sg_ctxdma)
204		return NULL;
205
206	nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
207	if (!nvbe)
208		return NULL;
209
210	nvbe->dev = dev;
211
212	nvbe->backend.func	= &nouveau_sgdma_backend;
213
214	return &nvbe->backend;
215}
216
217int
218nouveau_sgdma_init(struct drm_device *dev)
219{
220	struct drm_nouveau_private *dev_priv = dev->dev_private;
221	struct pci_dev *pdev = dev->pdev;
222	struct nouveau_gpuobj *gpuobj = NULL;
223	uint32_t aper_size, obj_size;
224	int i, ret;
225
226	if (dev_priv->card_type < NV_50) {
227		aper_size = (64 * 1024 * 1024);
228		obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
229		obj_size += 8; /* ctxdma header */
230	} else {
231		/* 1 entire VM page table */
232		aper_size = (512 * 1024 * 1024);
233		obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
234	}
235
236	ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
237				      NVOBJ_FLAG_ZERO_ALLOC |
238				      NVOBJ_FLAG_ZERO_FREE, &gpuobj);
239	if (ret) {
240		NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
241		return ret;
242	}
243
244	dev_priv->gart_info.sg_dummy_page =
245		alloc_page(GFP_KERNEL|__GFP_DMA32|__GFP_ZERO);
246	if (!dev_priv->gart_info.sg_dummy_page) {
247		nouveau_gpuobj_ref(NULL, &gpuobj);
248		return -ENOMEM;
249	}
250
251	set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
252	dev_priv->gart_info.sg_dummy_bus =
253		pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0,
254			     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
255	if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) {
256		nouveau_gpuobj_ref(NULL, &gpuobj);
257		return -EFAULT;
258	}
259
260	if (dev_priv->card_type < NV_50) {
261		/* special case, allocated from global instmem heap so
262		 * cinst is invalid, we use it on all channels though so
263		 * cinst needs to be valid, set it the same as pinst
264		 */
265		gpuobj->cinst = gpuobj->pinst;
266
267		/* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
268		 * confirmed to work on c51.  Perhaps means NV_DMA_TARGET_PCIE
269		 * on those cards? */
270		nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
271				   (1 << 12) /* PT present */ |
272				   (0 << 13) /* PT *not* linear */ |
273				   (NV_DMA_ACCESS_RW  << 14) |
274				   (NV_DMA_TARGET_PCI << 16));
275		nv_wo32(gpuobj, 4, aper_size - 1);
276		for (i = 2; i < 2 + (aper_size >> 12); i++) {
277			nv_wo32(gpuobj, i * 4,
278				dev_priv->gart_info.sg_dummy_bus | 3);
279		}
280	} else {
281		for (i = 0; i < obj_size; i += 8) {
282			nv_wo32(gpuobj, i + 0, 0x00000000);
283			nv_wo32(gpuobj, i + 4, 0x00000000);
284		}
285	}
286	dev_priv->engine.instmem.flush(dev);
287
288	dev_priv->gart_info.type      = NOUVEAU_GART_SGDMA;
289	dev_priv->gart_info.aper_base = 0;
290	dev_priv->gart_info.aper_size = aper_size;
291	dev_priv->gart_info.sg_ctxdma = gpuobj;
292	return 0;
293}
294
295void
296nouveau_sgdma_takedown(struct drm_device *dev)
297{
298	struct drm_nouveau_private *dev_priv = dev->dev_private;
299
300	if (dev_priv->gart_info.sg_dummy_page) {
301		pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
302			       NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
303		unlock_page(dev_priv->gart_info.sg_dummy_page);
304		__free_page(dev_priv->gart_info.sg_dummy_page);
305		dev_priv->gart_info.sg_dummy_page = NULL;
306		dev_priv->gart_info.sg_dummy_bus = 0;
307	}
308
309	nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
310}
311
312int
313nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
314{
315	struct drm_nouveau_private *dev_priv = dev->dev_private;
316	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
317	int pte;
318
319	pte = (offset >> NV_CTXDMA_PAGE_SHIFT) << 2;
320	if (dev_priv->card_type < NV_50) {
321		*page = nv_ro32(gpuobj, (pte + 8)) & ~NV_CTXDMA_PAGE_MASK;
322		return 0;
323	}
324
325	NV_ERROR(dev, "Unimplemented on NV50\n");
326	return -EINVAL;
327}
328