nouveau_sgdma.c revision f56cb86f9abd229418f894a8ffedfb9ff465c181
1#include "drmP.h"
2#include "nouveau_drv.h"
3#include <linux/pagemap.h>
4#include <linux/slab.h>
5
6#define NV_CTXDMA_PAGE_SHIFT 12
7#define NV_CTXDMA_PAGE_SIZE  (1 << NV_CTXDMA_PAGE_SHIFT)
8#define NV_CTXDMA_PAGE_MASK  (NV_CTXDMA_PAGE_SIZE - 1)
9
10struct nouveau_sgdma_be {
11	struct ttm_backend backend;
12	struct drm_device *dev;
13
14	dma_addr_t *pages;
15	unsigned nr_pages;
16
17	unsigned pte_start;
18	bool bound;
19};
20
21static int
22nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
23		       struct page **pages, struct page *dummy_read_page)
24{
25	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
26	struct drm_device *dev = nvbe->dev;
27
28	NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
29
30	if (nvbe->pages)
31		return -EINVAL;
32
33	nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
34	if (!nvbe->pages)
35		return -ENOMEM;
36
37	nvbe->nr_pages = 0;
38	while (num_pages--) {
39		nvbe->pages[nvbe->nr_pages] =
40			pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
41				     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
42		if (pci_dma_mapping_error(dev->pdev,
43					  nvbe->pages[nvbe->nr_pages])) {
44			be->func->clear(be);
45			return -EFAULT;
46		}
47
48		nvbe->nr_pages++;
49	}
50
51	return 0;
52}
53
54static void
55nouveau_sgdma_clear(struct ttm_backend *be)
56{
57	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
58	struct drm_device *dev;
59
60	if (nvbe && nvbe->pages) {
61		dev = nvbe->dev;
62		NV_DEBUG(dev, "\n");
63
64		if (nvbe->bound)
65			be->func->unbind(be);
66
67		while (nvbe->nr_pages--) {
68			pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
69				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
70		}
71		kfree(nvbe->pages);
72		nvbe->pages = NULL;
73		nvbe->nr_pages = 0;
74	}
75}
76
77static inline unsigned
78nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset)
79{
80	struct drm_nouveau_private *dev_priv = dev->dev_private;
81	unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
82
83	if (dev_priv->card_type < NV_50)
84		return pte + 2;
85
86	return pte << 1;
87}
88
89static int
90nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
91{
92	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
93	struct drm_device *dev = nvbe->dev;
94	struct drm_nouveau_private *dev_priv = dev->dev_private;
95	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
96	unsigned i, j, pte;
97
98	NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start);
99
100	pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT);
101	nvbe->pte_start = pte;
102	for (i = 0; i < nvbe->nr_pages; i++) {
103		dma_addr_t dma_offset = nvbe->pages[i];
104		uint32_t offset_l = lower_32_bits(dma_offset);
105		uint32_t offset_h = upper_32_bits(dma_offset);
106
107		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
108			if (dev_priv->card_type < NV_50)
109				nv_wo32(dev, gpuobj, pte++, offset_l | 3);
110			else {
111				nv_wo32(dev, gpuobj, pte++, offset_l | 0x21);
112				nv_wo32(dev, gpuobj, pte++, offset_h & 0xff);
113			}
114
115			dma_offset += NV_CTXDMA_PAGE_SIZE;
116		}
117	}
118	dev_priv->engine.instmem.flush(nvbe->dev);
119
120	if (dev_priv->card_type == NV_50) {
121		nv_wr32(dev, 0x100c80, 0x00050001);
122		if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
123			NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
124			NV_ERROR(dev, "0x100c80 = 0x%08x\n",
125						nv_rd32(dev, 0x100c80));
126			return -EBUSY;
127		}
128
129		nv_wr32(dev, 0x100c80, 0x00000001);
130		if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
131			NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
132			NV_ERROR(dev, "0x100c80 = 0x%08x\n",
133						nv_rd32(dev, 0x100c80));
134			return -EBUSY;
135		}
136	}
137
138	nvbe->bound = true;
139	return 0;
140}
141
142static int
143nouveau_sgdma_unbind(struct ttm_backend *be)
144{
145	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
146	struct drm_device *dev = nvbe->dev;
147	struct drm_nouveau_private *dev_priv = dev->dev_private;
148	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
149	unsigned i, j, pte;
150
151	NV_DEBUG(dev, "\n");
152
153	if (!nvbe->bound)
154		return 0;
155
156	pte = nvbe->pte_start;
157	for (i = 0; i < nvbe->nr_pages; i++) {
158		dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
159
160		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
161			if (dev_priv->card_type < NV_50)
162				nv_wo32(dev, gpuobj, pte++, dma_offset | 3);
163			else {
164				nv_wo32(dev, gpuobj, pte++, dma_offset | 0x21);
165				nv_wo32(dev, gpuobj, pte++, 0x00000000);
166			}
167
168			dma_offset += NV_CTXDMA_PAGE_SIZE;
169		}
170	}
171	dev_priv->engine.instmem.flush(nvbe->dev);
172
173	if (dev_priv->card_type == NV_50) {
174		nv_wr32(dev, 0x100c80, 0x00050001);
175		if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
176			NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
177			NV_ERROR(dev, "0x100c80 = 0x%08x\n",
178						nv_rd32(dev, 0x100c80));
179			return -EBUSY;
180		}
181
182		nv_wr32(dev, 0x100c80, 0x00000001);
183		if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
184			NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
185			NV_ERROR(dev, "0x100c80 = 0x%08x\n",
186						nv_rd32(dev, 0x100c80));
187			return -EBUSY;
188		}
189	}
190
191	nvbe->bound = false;
192	return 0;
193}
194
195static void
196nouveau_sgdma_destroy(struct ttm_backend *be)
197{
198	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
199
200	if (be) {
201		NV_DEBUG(nvbe->dev, "\n");
202
203		if (nvbe) {
204			if (nvbe->pages)
205				be->func->clear(be);
206			kfree(nvbe);
207		}
208	}
209}
210
211static struct ttm_backend_func nouveau_sgdma_backend = {
212	.populate		= nouveau_sgdma_populate,
213	.clear			= nouveau_sgdma_clear,
214	.bind			= nouveau_sgdma_bind,
215	.unbind			= nouveau_sgdma_unbind,
216	.destroy		= nouveau_sgdma_destroy
217};
218
219struct ttm_backend *
220nouveau_sgdma_init_ttm(struct drm_device *dev)
221{
222	struct drm_nouveau_private *dev_priv = dev->dev_private;
223	struct nouveau_sgdma_be *nvbe;
224
225	if (!dev_priv->gart_info.sg_ctxdma)
226		return NULL;
227
228	nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
229	if (!nvbe)
230		return NULL;
231
232	nvbe->dev = dev;
233
234	nvbe->backend.func	= &nouveau_sgdma_backend;
235
236	return &nvbe->backend;
237}
238
239int
240nouveau_sgdma_init(struct drm_device *dev)
241{
242	struct drm_nouveau_private *dev_priv = dev->dev_private;
243	struct nouveau_gpuobj *gpuobj = NULL;
244	uint32_t aper_size, obj_size;
245	int i, ret;
246
247	if (dev_priv->card_type < NV_50) {
248		aper_size = (64 * 1024 * 1024);
249		obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
250		obj_size += 8; /* ctxdma header */
251	} else {
252		/* 1 entire VM page table */
253		aper_size = (512 * 1024 * 1024);
254		obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
255	}
256
257	ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
258				      NVOBJ_FLAG_ALLOW_NO_REFS |
259				      NVOBJ_FLAG_ZERO_ALLOC |
260				      NVOBJ_FLAG_ZERO_FREE, &gpuobj);
261	if (ret) {
262		NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
263		return ret;
264	}
265
266	dev_priv->gart_info.sg_dummy_page =
267		alloc_page(GFP_KERNEL|__GFP_DMA32);
268	set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
269	dev_priv->gart_info.sg_dummy_bus =
270		pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
271			     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
272
273	if (dev_priv->card_type < NV_50) {
274		/* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
275		 * confirmed to work on c51.  Perhaps means NV_DMA_TARGET_PCIE
276		 * on those cards? */
277		nv_wo32(dev, gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
278				       (1 << 12) /* PT present */ |
279				       (0 << 13) /* PT *not* linear */ |
280				       (NV_DMA_ACCESS_RW  << 14) |
281				       (NV_DMA_TARGET_PCI << 16));
282		nv_wo32(dev, gpuobj, 1, aper_size - 1);
283		for (i = 2; i < 2 + (aper_size >> 12); i++) {
284			nv_wo32(dev, gpuobj, i,
285				    dev_priv->gart_info.sg_dummy_bus | 3);
286		}
287	} else {
288		for (i = 0; i < obj_size; i += 8) {
289			nv_wo32(dev, gpuobj, (i+0)/4,
290				    dev_priv->gart_info.sg_dummy_bus | 0x21);
291			nv_wo32(dev, gpuobj, (i+4)/4, 0);
292		}
293	}
294	dev_priv->engine.instmem.flush(dev);
295
296	dev_priv->gart_info.type      = NOUVEAU_GART_SGDMA;
297	dev_priv->gart_info.aper_base = 0;
298	dev_priv->gart_info.aper_size = aper_size;
299	dev_priv->gart_info.sg_ctxdma = gpuobj;
300	return 0;
301}
302
303void
304nouveau_sgdma_takedown(struct drm_device *dev)
305{
306	struct drm_nouveau_private *dev_priv = dev->dev_private;
307
308	if (dev_priv->gart_info.sg_dummy_page) {
309		pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
310			       NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
311		unlock_page(dev_priv->gart_info.sg_dummy_page);
312		__free_page(dev_priv->gart_info.sg_dummy_page);
313		dev_priv->gart_info.sg_dummy_page = NULL;
314		dev_priv->gart_info.sg_dummy_bus = 0;
315	}
316
317	nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma);
318}
319
320int
321nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
322{
323	struct drm_nouveau_private *dev_priv = dev->dev_private;
324	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
325	int pte;
326
327	pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
328	if (dev_priv->card_type < NV_50) {
329		*page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
330		return 0;
331	}
332
333	NV_ERROR(dev, "Unimplemented on NV50\n");
334	return -EINVAL;
335}
336