1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifdef HAVE_CONFIG_H
26# include <config.h>
27#endif
28
29#include <stdlib.h>
30#include <stdint.h>
31#include <stddef.h>
32#include <errno.h>
33
34#include "private.h"
35
36#include "nvif/class.h"
37
38static int
39abi16_chan_nv04(struct nouveau_object *obj)
40{
41	struct nouveau_drm *drm = nouveau_drm(obj);
42	struct nv04_fifo *nv04 = obj->data;
43	struct drm_nouveau_channel_alloc req = {
44		.fb_ctxdma_handle = nv04->vram,
45		.tt_ctxdma_handle = nv04->gart
46	};
47	int ret;
48
49	ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_CHANNEL_ALLOC,
50				  &req, sizeof(req));
51	if (ret)
52		return ret;
53
54	nv04->base.channel = req.channel;
55	nv04->base.pushbuf = req.pushbuf_domains;
56	nv04->notify = req.notifier_handle;
57	nv04->base.object->handle = req.channel;
58	nv04->base.object->length = sizeof(*nv04);
59	return 0;
60}
61
62static int
63abi16_chan_nvc0(struct nouveau_object *obj)
64{
65	struct nouveau_drm *drm = nouveau_drm(obj);
66	struct drm_nouveau_channel_alloc req = {};
67	struct nvc0_fifo *nvc0 = obj->data;
68	int ret;
69
70	ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_CHANNEL_ALLOC,
71				  &req, sizeof(req));
72	if (ret)
73		return ret;
74
75	nvc0->base.channel = req.channel;
76	nvc0->base.pushbuf = req.pushbuf_domains;
77	nvc0->notify = req.notifier_handle;
78	nvc0->base.object->handle = req.channel;
79	nvc0->base.object->length = sizeof(*nvc0);
80	return 0;
81}
82
83static int
84abi16_chan_nve0(struct nouveau_object *obj)
85{
86	struct nouveau_drm *drm = nouveau_drm(obj);
87	struct drm_nouveau_channel_alloc req = {};
88	struct nve0_fifo *nve0 = obj->data;
89	int ret;
90
91	if (obj->length > offsetof(struct nve0_fifo, engine)) {
92		req.fb_ctxdma_handle = 0xffffffff;
93		req.tt_ctxdma_handle = nve0->engine;
94	}
95
96	ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_CHANNEL_ALLOC,
97				  &req, sizeof(req));
98	if (ret)
99		return ret;
100
101	nve0->base.channel = req.channel;
102	nve0->base.pushbuf = req.pushbuf_domains;
103	nve0->notify = req.notifier_handle;
104	nve0->base.object->handle = req.channel;
105	nve0->base.object->length = sizeof(*nve0);
106	return 0;
107}
108
109static int
110abi16_engobj(struct nouveau_object *obj)
111{
112	struct nouveau_drm *drm = nouveau_drm(obj);
113	struct drm_nouveau_grobj_alloc req = {
114		.channel = obj->parent->handle,
115		.handle = obj->handle,
116		.class = obj->oclass,
117	};
118	int ret;
119
120	/* Older kernel versions did not have the concept of nouveau-
121	 * specific classes and abused some NVIDIA-assigned ones for
122	 * a SW class.  The ABI16 layer has compatibility in place to
123	 * translate these older identifiers to the newer ones.
124	 *
125	 * Clients that have been updated to use NVIF are required to
126	 * use the newer class identifiers, which means that they'll
127	 * break if running on an older kernel.
128	 *
129	 * To handle this case, when using ABI16, we translate to the
130	 * older values which work on any kernel.
131	 */
132	switch (req.class) {
133	case NVIF_CLASS_SW_NV04 : req.class = 0x006e; break;
134	case NVIF_CLASS_SW_NV10 : req.class = 0x016e; break;
135	case NVIF_CLASS_SW_NV50 : req.class = 0x506e; break;
136	case NVIF_CLASS_SW_GF100: req.class = 0x906e; break;
137	default:
138		break;
139	}
140
141	ret = drmCommandWrite(drm->fd, DRM_NOUVEAU_GROBJ_ALLOC,
142			      &req, sizeof(req));
143	if (ret)
144		return ret;
145
146	obj->length = sizeof(struct nouveau_object *);
147	return 0;
148}
149
150static int
151abi16_ntfy(struct nouveau_object *obj)
152{
153	struct nouveau_drm *drm = nouveau_drm(obj);
154	struct nv04_notify *ntfy = obj->data;
155	struct drm_nouveau_notifierobj_alloc req = {
156		.channel = obj->parent->handle,
157		.handle = ntfy->object->handle,
158		.size = ntfy->length,
159	};
160	int ret;
161
162	ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_NOTIFIEROBJ_ALLOC,
163				  &req, sizeof(req));
164	if (ret)
165		return ret;
166
167	ntfy->offset = req.offset;
168	ntfy->object->length = sizeof(*ntfy);
169	return 0;
170}
171
172drm_private int
173abi16_sclass(struct nouveau_object *obj, struct nouveau_sclass **psclass)
174{
175	struct nouveau_sclass *sclass;
176	struct nouveau_device *dev;
177
178	if (!(sclass = calloc(8, sizeof(*sclass))))
179		return -ENOMEM;
180	*psclass = sclass;
181
182	switch (obj->oclass) {
183	case NOUVEAU_FIFO_CHANNEL_CLASS:
184		/* Older kernel versions were exposing the wrong video engine
185		 * classes on certain G98:GF100 boards.  This has since been
186		 * corrected, but ABI16 has compatibility in place to avoid
187		 * breaking older userspace.
188		 *
189		 * Clients that have been updated to use NVIF are required to
190		 * use the correct classes, which means that they'll break if
191		 * running on an older kernel.
192		 *
193		 * To handle this issue, if using the older kernel interfaces,
194		 * we'll magic up a list containing the vdec classes that the
195		 * kernel will accept for these boards.  Clients should make
196		 * use of this information instead of hardcoding classes for
197		 * specific chipsets.
198		 */
199		dev = (struct nouveau_device *)obj->parent;
200		if (dev->chipset >= 0x98 &&
201		    dev->chipset != 0xa0 &&
202		    dev->chipset <  0xc0) {
203			*sclass++ = (struct nouveau_sclass){
204				GT212_MSVLD, -1, -1
205			};
206			*sclass++ = (struct nouveau_sclass){
207				GT212_MSPDEC, -1, -1
208			};
209			*sclass++ = (struct nouveau_sclass){
210				GT212_MSPPP, -1, -1
211			};
212		}
213		break;
214	default:
215		break;
216	}
217
218	return sclass - *psclass;
219}
220
221drm_private void
222abi16_delete(struct nouveau_object *obj)
223{
224	struct nouveau_drm *drm = nouveau_drm(obj);
225	if (obj->oclass == NOUVEAU_FIFO_CHANNEL_CLASS) {
226		struct drm_nouveau_channel_free req;
227		req.channel = obj->handle;
228		drmCommandWrite(drm->fd, DRM_NOUVEAU_CHANNEL_FREE,
229				&req, sizeof(req));
230	} else {
231		struct drm_nouveau_gpuobj_free req;
232		req.channel = obj->parent->handle;
233		req.handle  = obj->handle;
234		drmCommandWrite(drm->fd, DRM_NOUVEAU_GPUOBJ_FREE,
235				&req, sizeof(req));
236	}
237}
238
239drm_private bool
240abi16_object(struct nouveau_object *obj, int (**func)(struct nouveau_object *))
241{
242	struct nouveau_object *parent = obj->parent;
243
244	/* nouveau_object::length is (ab)used to determine whether the
245	 * object is a legacy object (!=0), or a real NVIF object.
246	 */
247	if ((parent->length != 0 && parent->oclass == NOUVEAU_DEVICE_CLASS) ||
248	    (parent->length == 0 && parent->oclass == NV_DEVICE)) {
249		if (obj->oclass == NOUVEAU_FIFO_CHANNEL_CLASS) {
250			struct nouveau_device *dev = (void *)parent;
251			if (dev->chipset < 0xc0)
252				*func = abi16_chan_nv04;
253			else
254			if (dev->chipset < 0xe0)
255				*func = abi16_chan_nvc0;
256			else
257				*func = abi16_chan_nve0;
258			return true;
259		}
260	} else
261	if ((parent->length != 0 &&
262	     parent->oclass == NOUVEAU_FIFO_CHANNEL_CLASS)) {
263		if (obj->oclass == NOUVEAU_NOTIFIER_CLASS) {
264			*func = abi16_ntfy;
265			return true;
266		}
267
268		*func = abi16_engobj;
269		return false; /* try NVIF, if supported, before calling func */
270	}
271
272	*func = NULL;
273	return false;
274}
275
276drm_private void
277abi16_bo_info(struct nouveau_bo *bo, struct drm_nouveau_gem_info *info)
278{
279	struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
280
281	nvbo->map_handle = info->map_handle;
282	bo->handle = info->handle;
283	bo->size = info->size;
284	bo->offset = info->offset;
285
286	bo->flags = 0;
287	if (info->domain & NOUVEAU_GEM_DOMAIN_VRAM)
288		bo->flags |= NOUVEAU_BO_VRAM;
289	if (info->domain & NOUVEAU_GEM_DOMAIN_GART)
290		bo->flags |= NOUVEAU_BO_GART;
291	if (!(info->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG))
292		bo->flags |= NOUVEAU_BO_CONTIG;
293	if (nvbo->map_handle)
294		bo->flags |= NOUVEAU_BO_MAP;
295
296	if (bo->device->chipset >= 0xc0) {
297		bo->config.nvc0.memtype   = (info->tile_flags & 0xff00) >> 8;
298		bo->config.nvc0.tile_mode = info->tile_mode;
299	} else
300	if (bo->device->chipset >= 0x80 || bo->device->chipset == 0x50) {
301		bo->config.nv50.memtype   = (info->tile_flags & 0x07f00) >> 8 |
302					    (info->tile_flags & 0x30000) >> 9;
303		bo->config.nv50.tile_mode = info->tile_mode << 4;
304	} else {
305		bo->config.nv04.surf_flags = info->tile_flags & 7;
306		bo->config.nv04.surf_pitch = info->tile_mode;
307	}
308}
309
310drm_private int
311abi16_bo_init(struct nouveau_bo *bo, uint32_t alignment,
312	      union nouveau_bo_config *config)
313{
314	struct nouveau_device *dev = bo->device;
315	struct nouveau_drm *drm = nouveau_drm(&dev->object);
316	struct drm_nouveau_gem_new req = {};
317	struct drm_nouveau_gem_info *info = &req.info;
318	int ret;
319
320	if (bo->flags & NOUVEAU_BO_VRAM)
321		info->domain |= NOUVEAU_GEM_DOMAIN_VRAM;
322	if (bo->flags & NOUVEAU_BO_GART)
323		info->domain |= NOUVEAU_GEM_DOMAIN_GART;
324	if (!info->domain)
325		info->domain |= NOUVEAU_GEM_DOMAIN_VRAM |
326				NOUVEAU_GEM_DOMAIN_GART;
327
328	if (bo->flags & NOUVEAU_BO_MAP)
329		info->domain |= NOUVEAU_GEM_DOMAIN_MAPPABLE;
330
331	if (bo->flags & NOUVEAU_BO_COHERENT)
332		info->domain |= NOUVEAU_GEM_DOMAIN_COHERENT;
333
334	if (!(bo->flags & NOUVEAU_BO_CONTIG))
335		info->tile_flags = NOUVEAU_GEM_TILE_NONCONTIG;
336
337	info->size = bo->size;
338	req.align = alignment;
339
340	if (config) {
341		if (dev->chipset >= 0xc0) {
342			info->tile_flags = (config->nvc0.memtype & 0xff) << 8;
343			info->tile_mode  = config->nvc0.tile_mode;
344		} else
345		if (dev->chipset >= 0x80 || dev->chipset == 0x50) {
346			info->tile_flags = (config->nv50.memtype & 0x07f) << 8 |
347					   (config->nv50.memtype & 0x180) << 9;
348			info->tile_mode  = config->nv50.tile_mode >> 4;
349		} else {
350			info->tile_flags = config->nv04.surf_flags & 7;
351			info->tile_mode  = config->nv04.surf_pitch;
352		}
353	}
354
355	if (!nouveau_device(dev)->have_bo_usage)
356		info->tile_flags &= 0x0000ff00;
357
358	ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_GEM_NEW,
359				  &req, sizeof(req));
360	if (ret == 0)
361		abi16_bo_info(bo, &req.info);
362	return ret;
363}
364