nouveau_sgdma.c revision efa58db3de82ab0fdc0774aef69e2dd8a27cc98f
1#include "drmP.h"
2#include "nouveau_drv.h"
3#include <linux/pagemap.h>
4#include <linux/slab.h>
5
6#define NV_CTXDMA_PAGE_SHIFT 12
7#define NV_CTXDMA_PAGE_SIZE  (1 << NV_CTXDMA_PAGE_SHIFT)
8#define NV_CTXDMA_PAGE_MASK  (NV_CTXDMA_PAGE_SIZE - 1)
9
10struct nouveau_sgdma_be {
11	struct ttm_backend backend;
12	struct drm_device *dev;
13
14	dma_addr_t *pages;
15	bool *ttm_alloced;
16	unsigned nr_pages;
17
18	u64 offset;
19	bool bound;
20};
21
22static int
23nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
24		       struct page **pages, struct page *dummy_read_page,
25		       dma_addr_t *dma_addrs)
26{
27	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
28	struct drm_device *dev = nvbe->dev;
29
30	NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
31
32	if (nvbe->pages)
33		return -EINVAL;
34
35	nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
36	if (!nvbe->pages)
37		return -ENOMEM;
38
39	nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL);
40	if (!nvbe->ttm_alloced)
41		return -ENOMEM;
42
43	nvbe->nr_pages = 0;
44	while (num_pages--) {
45		if (dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE) {
46			nvbe->pages[nvbe->nr_pages] =
47					dma_addrs[nvbe->nr_pages];
48		 	nvbe->ttm_alloced[nvbe->nr_pages] = true;
49		} else {
50			nvbe->pages[nvbe->nr_pages] =
51				pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
52				     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
53			if (pci_dma_mapping_error(dev->pdev,
54						  nvbe->pages[nvbe->nr_pages])) {
55				be->func->clear(be);
56				return -EFAULT;
57			}
58		}
59
60		nvbe->nr_pages++;
61	}
62
63	return 0;
64}
65
66static void
67nouveau_sgdma_clear(struct ttm_backend *be)
68{
69	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
70	struct drm_device *dev;
71
72	if (nvbe && nvbe->pages) {
73		dev = nvbe->dev;
74		NV_DEBUG(dev, "\n");
75
76		if (nvbe->bound)
77			be->func->unbind(be);
78
79		while (nvbe->nr_pages--) {
80			if (!nvbe->ttm_alloced[nvbe->nr_pages])
81				pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
82				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
83		}
84		kfree(nvbe->pages);
85		kfree(nvbe->ttm_alloced);
86		nvbe->pages = NULL;
87		nvbe->ttm_alloced = NULL;
88		nvbe->nr_pages = 0;
89	}
90}
91
92static void
93nouveau_sgdma_destroy(struct ttm_backend *be)
94{
95	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
96
97	if (be) {
98		NV_DEBUG(nvbe->dev, "\n");
99
100		if (nvbe) {
101			if (nvbe->pages)
102				be->func->clear(be);
103			kfree(nvbe);
104		}
105	}
106}
107
108static int
109nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
110{
111	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
112	struct drm_device *dev = nvbe->dev;
113	struct drm_nouveau_private *dev_priv = dev->dev_private;
114	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
115	unsigned i, j, pte;
116
117	NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
118
119	nvbe->offset = mem->start << PAGE_SHIFT;
120	pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
121	for (i = 0; i < nvbe->nr_pages; i++) {
122		dma_addr_t dma_offset = nvbe->pages[i];
123		uint32_t offset_l = lower_32_bits(dma_offset);
124
125		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
126			nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
127			dma_offset += NV_CTXDMA_PAGE_SIZE;
128		}
129	}
130
131	nvbe->bound = true;
132	return 0;
133}
134
135static int
136nv04_sgdma_unbind(struct ttm_backend *be)
137{
138	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
139	struct drm_device *dev = nvbe->dev;
140	struct drm_nouveau_private *dev_priv = dev->dev_private;
141	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
142	unsigned i, j, pte;
143
144	NV_DEBUG(dev, "\n");
145
146	if (!nvbe->bound)
147		return 0;
148
149	pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
150	for (i = 0; i < nvbe->nr_pages; i++) {
151		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
152			nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
153	}
154
155	nvbe->bound = false;
156	return 0;
157}
158
159static struct ttm_backend_func nv04_sgdma_backend = {
160	.populate		= nouveau_sgdma_populate,
161	.clear			= nouveau_sgdma_clear,
162	.bind			= nv04_sgdma_bind,
163	.unbind			= nv04_sgdma_unbind,
164	.destroy		= nouveau_sgdma_destroy
165};
166
167static int
168nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
169{
170	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
171	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
172
173	nvbe->offset = mem->start << PAGE_SHIFT;
174
175	nouveau_vm_map_sg(&dev_priv->gart_info.vma, nvbe->offset,
176			  nvbe->nr_pages << PAGE_SHIFT, nvbe->pages);
177	nvbe->bound = true;
178	return 0;
179}
180
181static int
182nv50_sgdma_unbind(struct ttm_backend *be)
183{
184	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
185	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
186
187	if (!nvbe->bound)
188		return 0;
189
190	nouveau_vm_unmap_at(&dev_priv->gart_info.vma, nvbe->offset,
191			    nvbe->nr_pages << PAGE_SHIFT);
192	nvbe->bound = false;
193	return 0;
194}
195
196static struct ttm_backend_func nv50_sgdma_backend = {
197	.populate		= nouveau_sgdma_populate,
198	.clear			= nouveau_sgdma_clear,
199	.bind			= nv50_sgdma_bind,
200	.unbind			= nv50_sgdma_unbind,
201	.destroy		= nouveau_sgdma_destroy
202};
203
204struct ttm_backend *
205nouveau_sgdma_init_ttm(struct drm_device *dev)
206{
207	struct drm_nouveau_private *dev_priv = dev->dev_private;
208	struct nouveau_sgdma_be *nvbe;
209
210	nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
211	if (!nvbe)
212		return NULL;
213
214	nvbe->dev = dev;
215
216	if (dev_priv->card_type >= NV_50)
217		nvbe->backend.func = &nv50_sgdma_backend;
218	else
219		nvbe->backend.func = &nv04_sgdma_backend;
220	return &nvbe->backend;
221}
222
223int
224nouveau_sgdma_init(struct drm_device *dev)
225{
226	struct drm_nouveau_private *dev_priv = dev->dev_private;
227	struct nouveau_gpuobj *gpuobj = NULL;
228	uint32_t aper_size, obj_size;
229	int i, ret;
230
231	if (dev_priv->card_type >= NV_50) {
232		ret = nouveau_vm_get(dev_priv->chan_vm, 512 * 1024 * 1024,
233				     12, NV_MEM_ACCESS_RW,
234				     &dev_priv->gart_info.vma);
235		if (ret)
236			return ret;
237
238		dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset;
239		dev_priv->gart_info.aper_size = 512 * 1024 * 1024;
240	} else {
241		if(dev_priv->ramin_rsvd_vram < 2 * 1024 * 1024)
242			aper_size = 64 * 1024 * 1024;
243		else
244			aper_size = 512 * 1024 * 1024;
245
246		obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
247		obj_size += 8; /* ctxdma header */
248
249		ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
250					      NVOBJ_FLAG_ZERO_ALLOC |
251					      NVOBJ_FLAG_ZERO_FREE, &gpuobj);
252		if (ret) {
253			NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
254			return ret;
255		}
256
257		nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
258				   (1 << 12) /* PT present */ |
259				   (0 << 13) /* PT *not* linear */ |
260				   (0 << 14) /* RW */ |
261				   (2 << 16) /* PCI */);
262		nv_wo32(gpuobj, 4, aper_size - 1);
263		for (i = 2; i < 2 + (aper_size >> 12); i++)
264			nv_wo32(gpuobj, i * 4, 0x00000000);
265
266		dev_priv->gart_info.sg_ctxdma = gpuobj;
267		dev_priv->gart_info.aper_base = 0;
268		dev_priv->gart_info.aper_size = aper_size;
269	}
270
271	dev_priv->gart_info.type      = NOUVEAU_GART_SGDMA;
272	return 0;
273}
274
275void
276nouveau_sgdma_takedown(struct drm_device *dev)
277{
278	struct drm_nouveau_private *dev_priv = dev->dev_private;
279
280	nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
281	nouveau_vm_put(&dev_priv->gart_info.vma);
282}
283
284uint32_t
285nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset)
286{
287	struct drm_nouveau_private *dev_priv = dev->dev_private;
288	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
289	int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
290
291	BUG_ON(dev_priv->card_type >= NV_50);
292
293	return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) |
294		(offset & NV_CTXDMA_PAGE_MASK);
295}
296