qxl_ioctl.c revision 970fa986fadb1165cf38b45b70e98302a3bee497
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 *          Alon Levy
24 */
25
26#include "qxl_drv.h"
27#include "qxl_object.h"
28
29/*
30 * TODO: allocating a new gem(in qxl_bo) for each request.
31 * This is wasteful since bo's are page aligned.
32 */
33static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
34			   struct drm_file *file_priv)
35{
36	struct qxl_device *qdev = dev->dev_private;
37	struct drm_qxl_alloc *qxl_alloc = data;
38	int ret;
39	struct qxl_bo *qobj;
40	uint32_t handle;
41	u32 domain = QXL_GEM_DOMAIN_VRAM;
42
43	if (qxl_alloc->size == 0) {
44		DRM_ERROR("invalid size %d\n", qxl_alloc->size);
45		return -EINVAL;
46	}
47	ret = qxl_gem_object_create_with_handle(qdev, file_priv,
48						domain,
49						qxl_alloc->size,
50						NULL,
51						&qobj, &handle);
52	if (ret) {
53		DRM_ERROR("%s: failed to create gem ret=%d\n",
54			  __func__, ret);
55		return -ENOMEM;
56	}
57	qxl_alloc->handle = handle;
58	return 0;
59}
60
61static int qxl_map_ioctl(struct drm_device *dev, void *data,
62			 struct drm_file *file_priv)
63{
64	struct qxl_device *qdev = dev->dev_private;
65	struct drm_qxl_map *qxl_map = data;
66
67	return qxl_mode_dumb_mmap(file_priv, qdev->ddev, qxl_map->handle,
68				  &qxl_map->offset);
69}
70
71/*
72 * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
73 * are on vram).
74 * *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
75 */
76static void
77apply_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off,
78	    struct qxl_bo *src, uint64_t src_off)
79{
80	void *reloc_page;
81
82	reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK);
83	*(uint64_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
84								     src, src_off);
85	qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page);
86}
87
88static void
89apply_surf_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off,
90		 struct qxl_bo *src)
91{
92	uint32_t id = 0;
93	void *reloc_page;
94
95	if (src && !src->is_primary)
96		id = src->surface_id;
97
98	reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK);
99	*(uint32_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = id;
100	qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page);
101}
102
103/* return holding the reference to this object */
104static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
105					 struct drm_file *file_priv, uint64_t handle,
106					 struct qxl_reloc_list *reloc_list)
107{
108	struct drm_gem_object *gobj;
109	struct qxl_bo *qobj;
110	int ret;
111
112	gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle);
113	if (!gobj) {
114		DRM_ERROR("bad bo handle %lld\n", handle);
115		return NULL;
116	}
117	qobj = gem_to_qxl_bo(gobj);
118
119	ret = qxl_bo_list_add(reloc_list, qobj);
120	if (ret)
121		return NULL;
122
123	return qobj;
124}
125
126/*
127 * Usage of execbuffer:
128 * Relocations need to take into account the full QXLDrawable size.
129 * However, the command as passed from user space must *not* contain the initial
130 * QXLReleaseInfo struct (first XXX bytes)
131 */
132static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
133				struct drm_file *file_priv)
134{
135	struct qxl_device *qdev = dev->dev_private;
136	struct drm_qxl_execbuffer *execbuffer = data;
137	struct drm_qxl_command user_cmd;
138	int cmd_num;
139	struct qxl_bo *reloc_src_bo;
140	struct qxl_bo *reloc_dst_bo;
141	struct drm_qxl_reloc reloc;
142	void *fb_cmd;
143	int i, ret;
144	struct qxl_reloc_list reloc_list;
145	int unwritten;
146	uint32_t reloc_dst_offset;
147	INIT_LIST_HEAD(&reloc_list.bos);
148
149	for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
150		struct qxl_release *release;
151		struct qxl_bo *cmd_bo;
152		int release_type;
153		struct drm_qxl_command *commands =
154			(struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
155
156		if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
157				       sizeof(user_cmd)))
158			return -EFAULT;
159		switch (user_cmd.type) {
160		case QXL_CMD_DRAW:
161			release_type = QXL_RELEASE_DRAWABLE;
162			break;
163		case QXL_CMD_SURFACE:
164		case QXL_CMD_CURSOR:
165		default:
166			DRM_DEBUG("Only draw commands in execbuffers\n");
167			return -EINVAL;
168			break;
169		}
170
171		if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info))
172			return -EINVAL;
173
174		ret = qxl_alloc_release_reserved(qdev,
175						 sizeof(union qxl_release_info) +
176						 user_cmd.command_size,
177						 release_type,
178						 &release,
179						 &cmd_bo);
180		if (ret)
181			return ret;
182
183		/* TODO copy slow path code from i915 */
184		fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
185		unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)user_cmd.command, user_cmd.command_size);
186		qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
187		if (unwritten) {
188			DRM_ERROR("got unwritten %d\n", unwritten);
189			qxl_release_unreserve(qdev, release);
190			qxl_release_free(qdev, release);
191			return -EFAULT;
192		}
193
194		for (i = 0 ; i < user_cmd.relocs_num; ++i) {
195			if (DRM_COPY_FROM_USER(&reloc,
196					       &((struct drm_qxl_reloc *)(uintptr_t)user_cmd.relocs)[i],
197					       sizeof(reloc))) {
198				qxl_bo_list_unreserve(&reloc_list, true);
199				qxl_release_unreserve(qdev, release);
200				qxl_release_free(qdev, release);
201				return -EFAULT;
202			}
203
204			/* add the bos to the list of bos to validate -
205			   need to validate first then process relocs? */
206			if (reloc.dst_handle) {
207				reloc_dst_bo = qxlhw_handle_to_bo(qdev, file_priv,
208								  reloc.dst_handle, &reloc_list);
209				if (!reloc_dst_bo) {
210					qxl_bo_list_unreserve(&reloc_list, true);
211					qxl_release_unreserve(qdev, release);
212					qxl_release_free(qdev, release);
213					return -EINVAL;
214				}
215				reloc_dst_offset = 0;
216			} else {
217				reloc_dst_bo = cmd_bo;
218				reloc_dst_offset = release->release_offset;
219			}
220
221			/* reserve and validate the reloc dst bo */
222			if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) {
223				reloc_src_bo =
224					qxlhw_handle_to_bo(qdev, file_priv,
225							   reloc.src_handle, &reloc_list);
226				if (!reloc_src_bo) {
227					if (reloc_dst_bo != cmd_bo)
228						drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base);
229					qxl_bo_list_unreserve(&reloc_list, true);
230					qxl_release_unreserve(qdev, release);
231					qxl_release_free(qdev, release);
232					return -EINVAL;
233				}
234			} else
235				reloc_src_bo = NULL;
236			if (reloc.reloc_type == QXL_RELOC_TYPE_BO) {
237				apply_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset,
238					    reloc_src_bo, reloc.src_offset);
239			} else if (reloc.reloc_type == QXL_RELOC_TYPE_SURF) {
240				apply_surf_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, reloc_src_bo);
241			} else {
242				DRM_ERROR("unknown reloc type %d\n", reloc.reloc_type);
243				return -EINVAL;
244			}
245
246			if (reloc_src_bo && reloc_src_bo != cmd_bo) {
247				qxl_release_add_res(qdev, release, reloc_src_bo);
248				drm_gem_object_unreference_unlocked(&reloc_src_bo->gem_base);
249			}
250
251			if (reloc_dst_bo != cmd_bo)
252				drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base);
253		}
254		qxl_fence_releaseable(qdev, release);
255
256		ret = qxl_push_command_ring_release(qdev, release, user_cmd.type, true);
257		if (ret == -ERESTARTSYS) {
258			qxl_release_unreserve(qdev, release);
259			qxl_release_free(qdev, release);
260			qxl_bo_list_unreserve(&reloc_list, true);
261			return ret;
262		}
263		qxl_release_unreserve(qdev, release);
264	}
265	qxl_bo_list_unreserve(&reloc_list, 0);
266	return 0;
267}
268
269static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
270				 struct drm_file *file)
271{
272	struct qxl_device *qdev = dev->dev_private;
273	struct drm_qxl_update_area *update_area = data;
274	struct qxl_rect area = {.left = update_area->left,
275				.top = update_area->top,
276				.right = update_area->right,
277				.bottom = update_area->bottom};
278	int ret;
279	struct drm_gem_object *gobj = NULL;
280	struct qxl_bo *qobj = NULL;
281
282	if (update_area->left >= update_area->right ||
283	    update_area->top >= update_area->bottom)
284		return -EINVAL;
285
286	gobj = drm_gem_object_lookup(dev, file, update_area->handle);
287	if (gobj == NULL)
288		return -ENOENT;
289
290	qobj = gem_to_qxl_bo(gobj);
291
292	ret = qxl_bo_reserve(qobj, false);
293	if (ret)
294		goto out;
295
296	if (!qobj->pin_count) {
297		qxl_ttm_placement_from_domain(qobj, qobj->type);
298		ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
299				      true, false);
300		if (unlikely(ret))
301			goto out;
302	}
303
304	ret = qxl_bo_check_id(qdev, qobj);
305	if (ret)
306		goto out2;
307	if (!qobj->surface_id)
308		DRM_ERROR("got update area for surface with no id %d\n", update_area->handle);
309	ret = qxl_io_update_area(qdev, qobj, &area);
310
311out2:
312	qxl_bo_unreserve(qobj);
313
314out:
315	drm_gem_object_unreference_unlocked(gobj);
316	return ret;
317}
318
319static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
320		       struct drm_file *file_priv)
321{
322	struct qxl_device *qdev = dev->dev_private;
323	struct drm_qxl_getparam *param = data;
324
325	switch (param->param) {
326	case QXL_PARAM_NUM_SURFACES:
327		param->value = qdev->rom->n_surfaces;
328		break;
329	case QXL_PARAM_MAX_RELOCS:
330		param->value = QXL_MAX_RES;
331		break;
332	default:
333		return -EINVAL;
334	}
335	return 0;
336}
337
338static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
339				  struct drm_file *file_priv)
340{
341	struct qxl_device *qdev = dev->dev_private;
342	struct drm_qxl_clientcap *param = data;
343	int byte, idx;
344
345	byte = param->index / 8;
346	idx = param->index % 8;
347
348	if (qdev->pdev->revision < 4)
349		return -ENOSYS;
350
351	if (byte >= 58)
352		return -ENOSYS;
353
354	if (qdev->rom->client_capabilities[byte] & (1 << idx))
355		return 0;
356	return -ENOSYS;
357}
358
359static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
360				struct drm_file *file)
361{
362	struct qxl_device *qdev = dev->dev_private;
363	struct drm_qxl_alloc_surf *param = data;
364	struct qxl_bo *qobj;
365	int handle;
366	int ret;
367	int size, actual_stride;
368	struct qxl_surface surf;
369
370	/* work out size allocate bo with handle */
371	actual_stride = param->stride < 0 ? -param->stride : param->stride;
372	size = actual_stride * param->height + actual_stride;
373
374	surf.format = param->format;
375	surf.width = param->width;
376	surf.height = param->height;
377	surf.stride = param->stride;
378	surf.data = 0;
379
380	ret = qxl_gem_object_create_with_handle(qdev, file,
381						QXL_GEM_DOMAIN_SURFACE,
382						size,
383						&surf,
384						&qobj, &handle);
385	if (ret) {
386		DRM_ERROR("%s: failed to create gem ret=%d\n",
387			  __func__, ret);
388		return -ENOMEM;
389	} else
390		param->handle = handle;
391	return ret;
392}
393
394struct drm_ioctl_desc qxl_ioctls[] = {
395	DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED),
396
397	DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED),
398
399	DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl,
400							DRM_AUTH|DRM_UNLOCKED),
401	DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl,
402							DRM_AUTH|DRM_UNLOCKED),
403	DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl,
404							DRM_AUTH|DRM_UNLOCKED),
405	DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl,
406							DRM_AUTH|DRM_UNLOCKED),
407
408	DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl,
409			  DRM_AUTH|DRM_UNLOCKED),
410};
411
412int qxl_max_ioctls = DRM_ARRAY_SIZE(qxl_ioctls);
413