nouveau_sgdma.c revision 40b2a687bd92827ca144d3623cf48377d8f7680d
1#include "drmP.h"
2#include "nouveau_drv.h"
3#include <linux/pagemap.h>
4
5#define NV_CTXDMA_PAGE_SHIFT 12
6#define NV_CTXDMA_PAGE_SIZE  (1 << NV_CTXDMA_PAGE_SHIFT)
7#define NV_CTXDMA_PAGE_MASK  (NV_CTXDMA_PAGE_SIZE - 1)
8
9struct nouveau_sgdma_be {
10	struct ttm_backend backend;
11	struct drm_device *dev;
12
13	dma_addr_t *pages;
14	unsigned nr_pages;
15
16	unsigned pte_start;
17	bool bound;
18};
19
20static int
21nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
22		       struct page **pages, struct page *dummy_read_page)
23{
24	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
25	struct drm_device *dev = nvbe->dev;
26
27	NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
28
29	if (nvbe->pages)
30		return -EINVAL;
31
32	nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
33	if (!nvbe->pages)
34		return -ENOMEM;
35
36	nvbe->nr_pages = 0;
37	while (num_pages--) {
38		nvbe->pages[nvbe->nr_pages] =
39			pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
40				     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
41		if (pci_dma_mapping_error(dev->pdev,
42					  nvbe->pages[nvbe->nr_pages])) {
43			be->func->clear(be);
44			return -EFAULT;
45		}
46
47		nvbe->nr_pages++;
48	}
49
50	return 0;
51}
52
53static void
54nouveau_sgdma_clear(struct ttm_backend *be)
55{
56	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
57	struct drm_device *dev;
58
59	if (nvbe && nvbe->pages) {
60		dev = nvbe->dev;
61		NV_DEBUG(dev, "\n");
62
63		if (nvbe->bound)
64			be->func->unbind(be);
65
66		while (nvbe->nr_pages--) {
67			pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
68				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
69		}
70		kfree(nvbe->pages);
71		nvbe->pages = NULL;
72		nvbe->nr_pages = 0;
73	}
74}
75
76static inline unsigned
77nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset)
78{
79	struct drm_nouveau_private *dev_priv = dev->dev_private;
80	unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
81
82	if (dev_priv->card_type < NV_50)
83		return pte + 2;
84
85	return pte << 1;
86}
87
88static int
89nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
90{
91	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
92	struct drm_device *dev = nvbe->dev;
93	struct drm_nouveau_private *dev_priv = dev->dev_private;
94	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
95	unsigned i, j, pte;
96
97	NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start);
98
99	dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
100	pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT);
101	nvbe->pte_start = pte;
102	for (i = 0; i < nvbe->nr_pages; i++) {
103		dma_addr_t dma_offset = nvbe->pages[i];
104		uint32_t offset_l = lower_32_bits(dma_offset);
105		uint32_t offset_h = upper_32_bits(dma_offset);
106
107		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
108			if (dev_priv->card_type < NV_50)
109				nv_wo32(dev, gpuobj, pte++, offset_l | 3);
110			else {
111				nv_wo32(dev, gpuobj, pte++, offset_l | 0x21);
112				nv_wo32(dev, gpuobj, pte++, offset_h & 0xff);
113			}
114
115			dma_offset += NV_CTXDMA_PAGE_SIZE;
116		}
117	}
118	dev_priv->engine.instmem.finish_access(nvbe->dev);
119
120	if (dev_priv->card_type == NV_50) {
121		nv_wr32(dev, 0x100c80, 0x00050001);
122		if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
123			NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
124			NV_ERROR(dev, "0x100c80 = 0x%08x\n",
125						nv_rd32(dev, 0x100c80));
126			return -EBUSY;
127		}
128
129		nv_wr32(dev, 0x100c80, 0x00000001);
130		if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
131			NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
132			NV_ERROR(dev, "0x100c80 = 0x%08x\n",
133						nv_rd32(dev, 0x100c80));
134			return -EBUSY;
135		}
136	}
137
138	nvbe->bound = true;
139	return 0;
140}
141
142static int
143nouveau_sgdma_unbind(struct ttm_backend *be)
144{
145	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
146	struct drm_device *dev = nvbe->dev;
147	struct drm_nouveau_private *dev_priv = dev->dev_private;
148	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
149	unsigned i, j, pte;
150
151	NV_DEBUG(dev, "\n");
152
153	if (!nvbe->bound)
154		return 0;
155
156	dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
157	pte = nvbe->pte_start;
158	for (i = 0; i < nvbe->nr_pages; i++) {
159		dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
160
161		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
162			if (dev_priv->card_type < NV_50)
163				nv_wo32(dev, gpuobj, pte++, dma_offset | 3);
164			else {
165				nv_wo32(dev, gpuobj, pte++, dma_offset | 0x21);
166				nv_wo32(dev, gpuobj, pte++, 0x00000000);
167			}
168
169			dma_offset += NV_CTXDMA_PAGE_SIZE;
170		}
171	}
172	dev_priv->engine.instmem.finish_access(nvbe->dev);
173
174	if (dev_priv->card_type == NV_50) {
175		nv_wr32(dev, 0x100c80, 0x00050001);
176		if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
177			NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
178			NV_ERROR(dev, "0x100c80 = 0x%08x\n",
179						nv_rd32(dev, 0x100c80));
180			return -EBUSY;
181		}
182
183		nv_wr32(dev, 0x100c80, 0x00000001);
184		if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
185			NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
186			NV_ERROR(dev, "0x100c80 = 0x%08x\n",
187						nv_rd32(dev, 0x100c80));
188			return -EBUSY;
189		}
190	}
191
192	nvbe->bound = false;
193	return 0;
194}
195
196static void
197nouveau_sgdma_destroy(struct ttm_backend *be)
198{
199	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
200
201	if (be) {
202		NV_DEBUG(nvbe->dev, "\n");
203
204		if (nvbe) {
205			if (nvbe->pages)
206				be->func->clear(be);
207			kfree(nvbe);
208		}
209	}
210}
211
212static struct ttm_backend_func nouveau_sgdma_backend = {
213	.populate		= nouveau_sgdma_populate,
214	.clear			= nouveau_sgdma_clear,
215	.bind			= nouveau_sgdma_bind,
216	.unbind			= nouveau_sgdma_unbind,
217	.destroy		= nouveau_sgdma_destroy
218};
219
220struct ttm_backend *
221nouveau_sgdma_init_ttm(struct drm_device *dev)
222{
223	struct drm_nouveau_private *dev_priv = dev->dev_private;
224	struct nouveau_sgdma_be *nvbe;
225
226	if (!dev_priv->gart_info.sg_ctxdma)
227		return NULL;
228
229	nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
230	if (!nvbe)
231		return NULL;
232
233	nvbe->dev = dev;
234
235	nvbe->backend.func	= &nouveau_sgdma_backend;
236
237	return &nvbe->backend;
238}
239
240int
241nouveau_sgdma_init(struct drm_device *dev)
242{
243	struct drm_nouveau_private *dev_priv = dev->dev_private;
244	struct nouveau_gpuobj *gpuobj = NULL;
245	uint32_t aper_size, obj_size;
246	int i, ret;
247
248	if (dev_priv->card_type < NV_50) {
249		aper_size = (64 * 1024 * 1024);
250		obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
251		obj_size += 8; /* ctxdma header */
252	} else {
253		/* 1 entire VM page table */
254		aper_size = (512 * 1024 * 1024);
255		obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
256	}
257
258	ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
259				      NVOBJ_FLAG_ALLOW_NO_REFS |
260				      NVOBJ_FLAG_ZERO_ALLOC |
261				      NVOBJ_FLAG_ZERO_FREE, &gpuobj);
262	if (ret) {
263		NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
264		return ret;
265	}
266
267	dev_priv->gart_info.sg_dummy_page =
268		alloc_page(GFP_KERNEL|__GFP_DMA32);
269	set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
270	dev_priv->gart_info.sg_dummy_bus =
271		pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
272			     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
273
274	dev_priv->engine.instmem.prepare_access(dev, true);
275	if (dev_priv->card_type < NV_50) {
276		/* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
277		 * confirmed to work on c51.  Perhaps means NV_DMA_TARGET_PCIE
278		 * on those cards? */
279		nv_wo32(dev, gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
280				       (1 << 12) /* PT present */ |
281				       (0 << 13) /* PT *not* linear */ |
282				       (NV_DMA_ACCESS_RW  << 14) |
283				       (NV_DMA_TARGET_PCI << 16));
284		nv_wo32(dev, gpuobj, 1, aper_size - 1);
285		for (i = 2; i < 2 + (aper_size >> 12); i++) {
286			nv_wo32(dev, gpuobj, i,
287				    dev_priv->gart_info.sg_dummy_bus | 3);
288		}
289	} else {
290		for (i = 0; i < obj_size; i += 8) {
291			nv_wo32(dev, gpuobj, (i+0)/4,
292				    dev_priv->gart_info.sg_dummy_bus | 0x21);
293			nv_wo32(dev, gpuobj, (i+4)/4, 0);
294		}
295	}
296	dev_priv->engine.instmem.finish_access(dev);
297
298	dev_priv->gart_info.type      = NOUVEAU_GART_SGDMA;
299	dev_priv->gart_info.aper_base = 0;
300	dev_priv->gart_info.aper_size = aper_size;
301	dev_priv->gart_info.sg_ctxdma = gpuobj;
302	return 0;
303}
304
305void
306nouveau_sgdma_takedown(struct drm_device *dev)
307{
308	struct drm_nouveau_private *dev_priv = dev->dev_private;
309
310	if (dev_priv->gart_info.sg_dummy_page) {
311		pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
312			       NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
313		unlock_page(dev_priv->gart_info.sg_dummy_page);
314		__free_page(dev_priv->gart_info.sg_dummy_page);
315		dev_priv->gart_info.sg_dummy_page = NULL;
316		dev_priv->gart_info.sg_dummy_bus = 0;
317	}
318
319	nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma);
320}
321
322int
323nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
324{
325	struct drm_nouveau_private *dev_priv = dev->dev_private;
326	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
327	struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
328	int pte;
329
330	pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
331	if (dev_priv->card_type < NV_50) {
332		instmem->prepare_access(dev, false);
333		*page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
334		instmem->finish_access(dev);
335		return 0;
336	}
337
338	NV_ERROR(dev, "Unimplemented on NV50\n");
339	return -EINVAL;
340}
341