nouveau_sgdma.c revision a10e9e1dbb39970f232b1e2b0e4f738e2d77079f
1#include "drmP.h"
2#include "nouveau_drv.h"
3#include <linux/pagemap.h>
4#include <linux/slab.h>
5
6#define NV_CTXDMA_PAGE_SHIFT 12
7#define NV_CTXDMA_PAGE_SIZE  (1 << NV_CTXDMA_PAGE_SHIFT)
8#define NV_CTXDMA_PAGE_MASK  (NV_CTXDMA_PAGE_SIZE - 1)
9
10struct nouveau_sgdma_be {
11	struct ttm_backend backend;
12	struct drm_device *dev;
13
14	dma_addr_t *pages;
15	unsigned nr_pages;
16	bool unmap_pages;
17
18	u64 offset;
19	bool bound;
20};
21
22static int
23nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
24		       struct page **pages, struct page *dummy_read_page,
25		       dma_addr_t *dma_addrs)
26{
27	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
28	struct drm_device *dev = nvbe->dev;
29	int i;
30
31	NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
32
33	nvbe->pages = dma_addrs;
34	nvbe->nr_pages = num_pages;
35	nvbe->unmap_pages = true;
36
37	/* this code path isn't called and is incorrect anyways */
38	if (0) { /* dma_addrs[0] != DMA_ERROR_CODE) { */
39		nvbe->unmap_pages = false;
40		return 0;
41	}
42
43	for (i = 0; i < num_pages; i++) {
44		nvbe->pages[i] = pci_map_page(dev->pdev, pages[i], 0,
45					      PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
46		if (pci_dma_mapping_error(dev->pdev, nvbe->pages[i])) {
47			nvbe->nr_pages = --i;
48			be->func->clear(be);
49			return -EFAULT;
50		}
51	}
52
53	return 0;
54}
55
56static void
57nouveau_sgdma_clear(struct ttm_backend *be)
58{
59	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
60	struct drm_device *dev = nvbe->dev;
61
62	if (nvbe->bound)
63		be->func->unbind(be);
64
65	if (nvbe->unmap_pages) {
66		while (nvbe->nr_pages--) {
67			pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
68				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
69		}
70		nvbe->unmap_pages = false;
71	}
72
73	nvbe->pages = NULL;
74}
75
76static void
77nouveau_sgdma_destroy(struct ttm_backend *be)
78{
79	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
80
81	if (be) {
82		NV_DEBUG(nvbe->dev, "\n");
83
84		if (nvbe) {
85			if (nvbe->pages)
86				be->func->clear(be);
87			kfree(nvbe);
88		}
89	}
90}
91
92static int
93nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
94{
95	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
96	struct drm_device *dev = nvbe->dev;
97	struct drm_nouveau_private *dev_priv = dev->dev_private;
98	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
99	unsigned i, j, pte;
100
101	NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
102
103	nvbe->offset = mem->start << PAGE_SHIFT;
104	pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
105	for (i = 0; i < nvbe->nr_pages; i++) {
106		dma_addr_t dma_offset = nvbe->pages[i];
107		uint32_t offset_l = lower_32_bits(dma_offset);
108
109		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
110			nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
111			offset_l += NV_CTXDMA_PAGE_SIZE;
112		}
113	}
114
115	nvbe->bound = true;
116	return 0;
117}
118
119static int
120nv04_sgdma_unbind(struct ttm_backend *be)
121{
122	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
123	struct drm_device *dev = nvbe->dev;
124	struct drm_nouveau_private *dev_priv = dev->dev_private;
125	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
126	unsigned i, j, pte;
127
128	NV_DEBUG(dev, "\n");
129
130	if (!nvbe->bound)
131		return 0;
132
133	pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
134	for (i = 0; i < nvbe->nr_pages; i++) {
135		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
136			nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
137	}
138
139	nvbe->bound = false;
140	return 0;
141}
142
143static struct ttm_backend_func nv04_sgdma_backend = {
144	.populate		= nouveau_sgdma_populate,
145	.clear			= nouveau_sgdma_clear,
146	.bind			= nv04_sgdma_bind,
147	.unbind			= nv04_sgdma_unbind,
148	.destroy		= nouveau_sgdma_destroy
149};
150
151static void
152nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
153{
154	struct drm_device *dev = nvbe->dev;
155
156	nv_wr32(dev, 0x100810, 0x00000022);
157	if (!nv_wait(dev, 0x100810, 0x00000100, 0x00000100))
158		NV_ERROR(dev, "vm flush timeout: 0x%08x\n",
159			 nv_rd32(dev, 0x100810));
160	nv_wr32(dev, 0x100810, 0x00000000);
161}
162
163static int
164nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
165{
166	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
167	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
168	struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
169	dma_addr_t *list = nvbe->pages;
170	u32 pte = mem->start << 2;
171	u32 cnt = nvbe->nr_pages;
172
173	nvbe->offset = mem->start << PAGE_SHIFT;
174
175	while (cnt--) {
176		nv_wo32(pgt, pte, (*list++ >> 7) | 1);
177		pte += 4;
178	}
179
180	nv41_sgdma_flush(nvbe);
181	nvbe->bound = true;
182	return 0;
183}
184
185static int
186nv41_sgdma_unbind(struct ttm_backend *be)
187{
188	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
189	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
190	struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
191	u32 pte = (nvbe->offset >> 12) << 2;
192	u32 cnt = nvbe->nr_pages;
193
194	while (cnt--) {
195		nv_wo32(pgt, pte, 0x00000000);
196		pte += 4;
197	}
198
199	nv41_sgdma_flush(nvbe);
200	nvbe->bound = false;
201	return 0;
202}
203
204static struct ttm_backend_func nv41_sgdma_backend = {
205	.populate		= nouveau_sgdma_populate,
206	.clear			= nouveau_sgdma_clear,
207	.bind			= nv41_sgdma_bind,
208	.unbind			= nv41_sgdma_unbind,
209	.destroy		= nouveau_sgdma_destroy
210};
211
212static void
213nv44_sgdma_flush(struct nouveau_sgdma_be *nvbe)
214{
215	struct drm_device *dev = nvbe->dev;
216
217	nv_wr32(dev, 0x100814, (nvbe->nr_pages - 1) << 12);
218	nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
219	if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
220		NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
221			 nv_rd32(dev, 0x100808));
222	nv_wr32(dev, 0x100808, 0x00000000);
223}
224
225static void
226nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
227{
228	struct drm_nouveau_private *dev_priv = pgt->dev->dev_private;
229	dma_addr_t dummy = dev_priv->gart_info.dummy.addr;
230	u32 pte, tmp[4];
231
232	pte   = base >> 2;
233	base &= ~0x0000000f;
234
235	tmp[0] = nv_ro32(pgt, base + 0x0);
236	tmp[1] = nv_ro32(pgt, base + 0x4);
237	tmp[2] = nv_ro32(pgt, base + 0x8);
238	tmp[3] = nv_ro32(pgt, base + 0xc);
239	while (cnt--) {
240		u32 addr = list ? (*list++ >> 12) : (dummy >> 12);
241		switch (pte++ & 0x3) {
242		case 0:
243			tmp[0] &= ~0x07ffffff;
244			tmp[0] |= addr;
245			break;
246		case 1:
247			tmp[0] &= ~0xf8000000;
248			tmp[0] |= addr << 27;
249			tmp[1] &= ~0x003fffff;
250			tmp[1] |= addr >> 5;
251			break;
252		case 2:
253			tmp[1] &= ~0xffc00000;
254			tmp[1] |= addr << 22;
255			tmp[2] &= ~0x0001ffff;
256			tmp[2] |= addr >> 10;
257			break;
258		case 3:
259			tmp[2] &= ~0xfffe0000;
260			tmp[2] |= addr << 17;
261			tmp[3] &= ~0x00000fff;
262			tmp[3] |= addr >> 15;
263			break;
264		}
265	}
266
267	tmp[3] |= 0x40000000;
268
269	nv_wo32(pgt, base + 0x0, tmp[0]);
270	nv_wo32(pgt, base + 0x4, tmp[1]);
271	nv_wo32(pgt, base + 0x8, tmp[2]);
272	nv_wo32(pgt, base + 0xc, tmp[3]);
273}
274
275static int
276nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
277{
278	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
279	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
280	struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
281	dma_addr_t *list = nvbe->pages;
282	u32 pte = mem->start << 2, tmp[4];
283	u32 cnt = nvbe->nr_pages;
284	int i;
285
286	nvbe->offset = mem->start << PAGE_SHIFT;
287
288	if (pte & 0x0000000c) {
289		u32  max = 4 - ((pte >> 2) & 0x3);
290		u32 part = (cnt > max) ? max : cnt;
291		nv44_sgdma_fill(pgt, list, pte, part);
292		pte  += (part << 2);
293		list += part;
294		cnt  -= part;
295	}
296
297	while (cnt >= 4) {
298		for (i = 0; i < 4; i++)
299			tmp[i] = *list++ >> 12;
300		nv_wo32(pgt, pte + 0x0, tmp[0] >>  0 | tmp[1] << 27);
301		nv_wo32(pgt, pte + 0x4, tmp[1] >>  5 | tmp[2] << 22);
302		nv_wo32(pgt, pte + 0x8, tmp[2] >> 10 | tmp[3] << 17);
303		nv_wo32(pgt, pte + 0xc, tmp[3] >> 15 | 0x40000000);
304		pte  += 0x10;
305		cnt  -= 4;
306	}
307
308	if (cnt)
309		nv44_sgdma_fill(pgt, list, pte, cnt);
310
311	nv44_sgdma_flush(nvbe);
312	nvbe->bound = true;
313	return 0;
314}
315
316static int
317nv44_sgdma_unbind(struct ttm_backend *be)
318{
319	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
320	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
321	struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
322	u32 pte = (nvbe->offset >> 12) << 2;
323	u32 cnt = nvbe->nr_pages;
324
325	if (pte & 0x0000000c) {
326		u32  max = 4 - ((pte >> 2) & 0x3);
327		u32 part = (cnt > max) ? max : cnt;
328		nv44_sgdma_fill(pgt, NULL, pte, part);
329		pte  += (part << 2);
330		cnt  -= part;
331	}
332
333	while (cnt >= 4) {
334		nv_wo32(pgt, pte + 0x0, 0x00000000);
335		nv_wo32(pgt, pte + 0x4, 0x00000000);
336		nv_wo32(pgt, pte + 0x8, 0x00000000);
337		nv_wo32(pgt, pte + 0xc, 0x00000000);
338		pte  += 0x10;
339		cnt  -= 4;
340	}
341
342	if (cnt)
343		nv44_sgdma_fill(pgt, NULL, pte, cnt);
344
345	nv44_sgdma_flush(nvbe);
346	nvbe->bound = false;
347	return 0;
348}
349
350static struct ttm_backend_func nv44_sgdma_backend = {
351	.populate		= nouveau_sgdma_populate,
352	.clear			= nouveau_sgdma_clear,
353	.bind			= nv44_sgdma_bind,
354	.unbind			= nv44_sgdma_unbind,
355	.destroy		= nouveau_sgdma_destroy
356};
357
358static int
359nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
360{
361	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
362	struct nouveau_mem *node = mem->mm_node;
363	/* noop: bound in move_notify() */
364	node->pages = nvbe->pages;
365	nvbe->pages = (dma_addr_t *)node;
366	nvbe->bound = true;
367	return 0;
368}
369
370static int
371nv50_sgdma_unbind(struct ttm_backend *be)
372{
373	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
374	struct nouveau_mem *node = (struct nouveau_mem *)nvbe->pages;
375	/* noop: unbound in move_notify() */
376	nvbe->pages = node->pages;
377	node->pages = NULL;
378	nvbe->bound = false;
379	return 0;
380}
381
382static struct ttm_backend_func nv50_sgdma_backend = {
383	.populate		= nouveau_sgdma_populate,
384	.clear			= nouveau_sgdma_clear,
385	.bind			= nv50_sgdma_bind,
386	.unbind			= nv50_sgdma_unbind,
387	.destroy		= nouveau_sgdma_destroy
388};
389
390struct ttm_backend *
391nouveau_sgdma_init_ttm(struct drm_device *dev)
392{
393	struct drm_nouveau_private *dev_priv = dev->dev_private;
394	struct nouveau_sgdma_be *nvbe;
395
396	nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
397	if (!nvbe)
398		return NULL;
399
400	nvbe->dev = dev;
401
402	nvbe->backend.func = dev_priv->gart_info.func;
403	return &nvbe->backend;
404}
405
406int
407nouveau_sgdma_init(struct drm_device *dev)
408{
409	struct drm_nouveau_private *dev_priv = dev->dev_private;
410	struct nouveau_gpuobj *gpuobj = NULL;
411	u32 aper_size, align;
412	int ret;
413
414	if (dev_priv->card_type >= NV_40 && pci_is_pcie(dev->pdev))
415		aper_size = 512 * 1024 * 1024;
416	else
417		aper_size = 64 * 1024 * 1024;
418
419	/* Dear NVIDIA, NV44+ would like proper present bits in PTEs for
420	 * christmas.  The cards before it have them, the cards after
421	 * it have them, why is NV44 so unloved?
422	 */
423	dev_priv->gart_info.dummy.page = alloc_page(GFP_DMA32 | GFP_KERNEL);
424	if (!dev_priv->gart_info.dummy.page)
425		return -ENOMEM;
426
427	dev_priv->gart_info.dummy.addr =
428		pci_map_page(dev->pdev, dev_priv->gart_info.dummy.page,
429			     0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
430	if (pci_dma_mapping_error(dev->pdev, dev_priv->gart_info.dummy.addr)) {
431		NV_ERROR(dev, "error mapping dummy page\n");
432		__free_page(dev_priv->gart_info.dummy.page);
433		dev_priv->gart_info.dummy.page = NULL;
434		return -ENOMEM;
435	}
436
437	if (dev_priv->card_type >= NV_50) {
438		dev_priv->gart_info.aper_base = 0;
439		dev_priv->gart_info.aper_size = aper_size;
440		dev_priv->gart_info.type = NOUVEAU_GART_HW;
441		dev_priv->gart_info.func = &nv50_sgdma_backend;
442	} else
443	if (0 && pci_is_pcie(dev->pdev) &&
444	    dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) {
445		if (nv44_graph_class(dev)) {
446			dev_priv->gart_info.func = &nv44_sgdma_backend;
447			align = 512 * 1024;
448		} else {
449			dev_priv->gart_info.func = &nv41_sgdma_backend;
450			align = 16;
451		}
452
453		ret = nouveau_gpuobj_new(dev, NULL, aper_size / 1024, align,
454					 NVOBJ_FLAG_ZERO_ALLOC |
455					 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
456		if (ret) {
457			NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
458			return ret;
459		}
460
461		dev_priv->gart_info.sg_ctxdma = gpuobj;
462		dev_priv->gart_info.aper_base = 0;
463		dev_priv->gart_info.aper_size = aper_size;
464		dev_priv->gart_info.type = NOUVEAU_GART_HW;
465	} else {
466		ret = nouveau_gpuobj_new(dev, NULL, (aper_size / 1024) + 8, 16,
467					 NVOBJ_FLAG_ZERO_ALLOC |
468					 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
469		if (ret) {
470			NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
471			return ret;
472		}
473
474		nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
475				   (1 << 12) /* PT present */ |
476				   (0 << 13) /* PT *not* linear */ |
477				   (0 << 14) /* RW */ |
478				   (2 << 16) /* PCI */);
479		nv_wo32(gpuobj, 4, aper_size - 1);
480
481		dev_priv->gart_info.sg_ctxdma = gpuobj;
482		dev_priv->gart_info.aper_base = 0;
483		dev_priv->gart_info.aper_size = aper_size;
484		dev_priv->gart_info.type = NOUVEAU_GART_PDMA;
485		dev_priv->gart_info.func = &nv04_sgdma_backend;
486	}
487
488	return 0;
489}
490
491void
492nouveau_sgdma_takedown(struct drm_device *dev)
493{
494	struct drm_nouveau_private *dev_priv = dev->dev_private;
495
496	nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
497
498	if (dev_priv->gart_info.dummy.page) {
499		pci_unmap_page(dev->pdev, dev_priv->gart_info.dummy.addr,
500			       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
501		__free_page(dev_priv->gart_info.dummy.page);
502		dev_priv->gart_info.dummy.page = NULL;
503	}
504}
505
506uint32_t
507nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset)
508{
509	struct drm_nouveau_private *dev_priv = dev->dev_private;
510	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
511	int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
512
513	BUG_ON(dev_priv->card_type >= NV_50);
514
515	return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) |
516		(offset & NV_CTXDMA_PAGE_MASK);
517}
518