nouveau_sgdma.c revision 5a0e3ad6af8660be21ca98a971cd00f331318c05
1#include "drmP.h"
2#include "nouveau_drv.h"
3#include <linux/pagemap.h>
4#include <linux/slab.h>
5
6#define NV_CTXDMA_PAGE_SHIFT 12
7#define NV_CTXDMA_PAGE_SIZE  (1 << NV_CTXDMA_PAGE_SHIFT)
8#define NV_CTXDMA_PAGE_MASK  (NV_CTXDMA_PAGE_SIZE - 1)
9
10struct nouveau_sgdma_be {
11	struct ttm_backend backend;
12	struct drm_device *dev;
13
14	dma_addr_t *pages;
15	unsigned nr_pages;
16
17	unsigned pte_start;
18	bool bound;
19};
20
21static int
22nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
23		       struct page **pages, struct page *dummy_read_page)
24{
25	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
26	struct drm_device *dev = nvbe->dev;
27
28	NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
29
30	if (nvbe->pages)
31		return -EINVAL;
32
33	nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
34	if (!nvbe->pages)
35		return -ENOMEM;
36
37	nvbe->nr_pages = 0;
38	while (num_pages--) {
39		nvbe->pages[nvbe->nr_pages] =
40			pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
41				     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
42		if (pci_dma_mapping_error(dev->pdev,
43					  nvbe->pages[nvbe->nr_pages])) {
44			be->func->clear(be);
45			return -EFAULT;
46		}
47
48		nvbe->nr_pages++;
49	}
50
51	return 0;
52}
53
54static void
55nouveau_sgdma_clear(struct ttm_backend *be)
56{
57	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
58	struct drm_device *dev;
59
60	if (nvbe && nvbe->pages) {
61		dev = nvbe->dev;
62		NV_DEBUG(dev, "\n");
63
64		if (nvbe->bound)
65			be->func->unbind(be);
66
67		while (nvbe->nr_pages--) {
68			pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
69				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
70		}
71		kfree(nvbe->pages);
72		nvbe->pages = NULL;
73		nvbe->nr_pages = 0;
74	}
75}
76
77static inline unsigned
78nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset)
79{
80	struct drm_nouveau_private *dev_priv = dev->dev_private;
81	unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
82
83	if (dev_priv->card_type < NV_50)
84		return pte + 2;
85
86	return pte << 1;
87}
88
89static int
90nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
91{
92	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
93	struct drm_device *dev = nvbe->dev;
94	struct drm_nouveau_private *dev_priv = dev->dev_private;
95	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
96	unsigned i, j, pte;
97
98	NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start);
99
100	dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
101	pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT);
102	nvbe->pte_start = pte;
103	for (i = 0; i < nvbe->nr_pages; i++) {
104		dma_addr_t dma_offset = nvbe->pages[i];
105		uint32_t offset_l = lower_32_bits(dma_offset);
106		uint32_t offset_h = upper_32_bits(dma_offset);
107
108		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
109			if (dev_priv->card_type < NV_50)
110				nv_wo32(dev, gpuobj, pte++, offset_l | 3);
111			else {
112				nv_wo32(dev, gpuobj, pte++, offset_l | 0x21);
113				nv_wo32(dev, gpuobj, pte++, offset_h & 0xff);
114			}
115
116			dma_offset += NV_CTXDMA_PAGE_SIZE;
117		}
118	}
119	dev_priv->engine.instmem.finish_access(nvbe->dev);
120
121	if (dev_priv->card_type == NV_50) {
122		nv_wr32(dev, 0x100c80, 0x00050001);
123		if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
124			NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
125			NV_ERROR(dev, "0x100c80 = 0x%08x\n",
126						nv_rd32(dev, 0x100c80));
127			return -EBUSY;
128		}
129
130		nv_wr32(dev, 0x100c80, 0x00000001);
131		if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
132			NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
133			NV_ERROR(dev, "0x100c80 = 0x%08x\n",
134						nv_rd32(dev, 0x100c80));
135			return -EBUSY;
136		}
137	}
138
139	nvbe->bound = true;
140	return 0;
141}
142
143static int
144nouveau_sgdma_unbind(struct ttm_backend *be)
145{
146	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
147	struct drm_device *dev = nvbe->dev;
148	struct drm_nouveau_private *dev_priv = dev->dev_private;
149	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
150	unsigned i, j, pte;
151
152	NV_DEBUG(dev, "\n");
153
154	if (!nvbe->bound)
155		return 0;
156
157	dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
158	pte = nvbe->pte_start;
159	for (i = 0; i < nvbe->nr_pages; i++) {
160		dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
161
162		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
163			if (dev_priv->card_type < NV_50)
164				nv_wo32(dev, gpuobj, pte++, dma_offset | 3);
165			else {
166				nv_wo32(dev, gpuobj, pte++, dma_offset | 0x21);
167				nv_wo32(dev, gpuobj, pte++, 0x00000000);
168			}
169
170			dma_offset += NV_CTXDMA_PAGE_SIZE;
171		}
172	}
173	dev_priv->engine.instmem.finish_access(nvbe->dev);
174
175	nvbe->bound = false;
176	return 0;
177}
178
179static void
180nouveau_sgdma_destroy(struct ttm_backend *be)
181{
182	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
183
184	if (be) {
185		NV_DEBUG(nvbe->dev, "\n");
186
187		if (nvbe) {
188			if (nvbe->pages)
189				be->func->clear(be);
190			kfree(nvbe);
191		}
192	}
193}
194
195static struct ttm_backend_func nouveau_sgdma_backend = {
196	.populate		= nouveau_sgdma_populate,
197	.clear			= nouveau_sgdma_clear,
198	.bind			= nouveau_sgdma_bind,
199	.unbind			= nouveau_sgdma_unbind,
200	.destroy		= nouveau_sgdma_destroy
201};
202
203struct ttm_backend *
204nouveau_sgdma_init_ttm(struct drm_device *dev)
205{
206	struct drm_nouveau_private *dev_priv = dev->dev_private;
207	struct nouveau_sgdma_be *nvbe;
208
209	if (!dev_priv->gart_info.sg_ctxdma)
210		return NULL;
211
212	nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
213	if (!nvbe)
214		return NULL;
215
216	nvbe->dev = dev;
217
218	nvbe->backend.func	= &nouveau_sgdma_backend;
219
220	return &nvbe->backend;
221}
222
223int
224nouveau_sgdma_init(struct drm_device *dev)
225{
226	struct drm_nouveau_private *dev_priv = dev->dev_private;
227	struct nouveau_gpuobj *gpuobj = NULL;
228	uint32_t aper_size, obj_size;
229	int i, ret;
230
231	if (dev_priv->card_type < NV_50) {
232		aper_size = (64 * 1024 * 1024);
233		obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
234		obj_size += 8; /* ctxdma header */
235	} else {
236		/* 1 entire VM page table */
237		aper_size = (512 * 1024 * 1024);
238		obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
239	}
240
241	ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
242				      NVOBJ_FLAG_ALLOW_NO_REFS |
243				      NVOBJ_FLAG_ZERO_ALLOC |
244				      NVOBJ_FLAG_ZERO_FREE, &gpuobj);
245	if (ret) {
246		NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
247		return ret;
248	}
249
250	dev_priv->gart_info.sg_dummy_page =
251		alloc_page(GFP_KERNEL|__GFP_DMA32);
252	set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
253	dev_priv->gart_info.sg_dummy_bus =
254		pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
255			     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
256
257	dev_priv->engine.instmem.prepare_access(dev, true);
258	if (dev_priv->card_type < NV_50) {
259		/* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
260		 * confirmed to work on c51.  Perhaps means NV_DMA_TARGET_PCIE
261		 * on those cards? */
262		nv_wo32(dev, gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
263				       (1 << 12) /* PT present */ |
264				       (0 << 13) /* PT *not* linear */ |
265				       (NV_DMA_ACCESS_RW  << 14) |
266				       (NV_DMA_TARGET_PCI << 16));
267		nv_wo32(dev, gpuobj, 1, aper_size - 1);
268		for (i = 2; i < 2 + (aper_size >> 12); i++) {
269			nv_wo32(dev, gpuobj, i,
270				    dev_priv->gart_info.sg_dummy_bus | 3);
271		}
272	} else {
273		for (i = 0; i < obj_size; i += 8) {
274			nv_wo32(dev, gpuobj, (i+0)/4,
275				    dev_priv->gart_info.sg_dummy_bus | 0x21);
276			nv_wo32(dev, gpuobj, (i+4)/4, 0);
277		}
278	}
279	dev_priv->engine.instmem.finish_access(dev);
280
281	dev_priv->gart_info.type      = NOUVEAU_GART_SGDMA;
282	dev_priv->gart_info.aper_base = 0;
283	dev_priv->gart_info.aper_size = aper_size;
284	dev_priv->gart_info.sg_ctxdma = gpuobj;
285	return 0;
286}
287
288void
289nouveau_sgdma_takedown(struct drm_device *dev)
290{
291	struct drm_nouveau_private *dev_priv = dev->dev_private;
292
293	if (dev_priv->gart_info.sg_dummy_page) {
294		pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
295			       NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
296		unlock_page(dev_priv->gart_info.sg_dummy_page);
297		__free_page(dev_priv->gart_info.sg_dummy_page);
298		dev_priv->gart_info.sg_dummy_page = NULL;
299		dev_priv->gart_info.sg_dummy_bus = 0;
300	}
301
302	nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma);
303}
304
305int
306nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
307{
308	struct drm_nouveau_private *dev_priv = dev->dev_private;
309	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
310	struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
311	int pte;
312
313	pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
314	if (dev_priv->card_type < NV_50) {
315		instmem->prepare_access(dev, false);
316		*page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
317		instmem->finish_access(dev);
318		return 0;
319	}
320
321	NV_ERROR(dev, "Unimplemented on NV50\n");
322	return -EINVAL;
323}
324