1/*
2 * Copyright © 2014 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25#ifdef HAVE_CONFIG_H
26#include "config.h"
27#endif
28
29#include <stdlib.h>
30#include <stdio.h>
31#include <stdint.h>
32#include <string.h>
33#include <errno.h>
34#include <fcntl.h>
35#include <unistd.h>
36#include <sys/ioctl.h>
37#include <sys/mman.h>
38#include <sys/time.h>
39
40#include "libdrm_macros.h"
41#include "xf86drm.h"
42#include "amdgpu_drm.h"
43#include "amdgpu_internal.h"
44#include "util_hash_table.h"
45#include "util_math.h"
46
47static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
48				     uint32_t handle)
49{
50	struct drm_gem_close args = {};
51
52	args.handle = handle;
53	drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
54}
55
56drm_private void amdgpu_bo_free_internal(amdgpu_bo_handle bo)
57{
58	/* Remove the buffer from the hash tables. */
59	pthread_mutex_lock(&bo->dev->bo_table_mutex);
60	util_hash_table_remove(bo->dev->bo_handles,
61			       (void*)(uintptr_t)bo->handle);
62	if (bo->flink_name) {
63		util_hash_table_remove(bo->dev->bo_flink_names,
64				       (void*)(uintptr_t)bo->flink_name);
65	}
66	pthread_mutex_unlock(&bo->dev->bo_table_mutex);
67
68	/* Release CPU access. */
69	if (bo->cpu_map_count > 0) {
70		bo->cpu_map_count = 1;
71		amdgpu_bo_cpu_unmap(bo);
72	}
73
74	amdgpu_close_kms_handle(bo->dev, bo->handle);
75	pthread_mutex_destroy(&bo->cpu_access_mutex);
76	free(bo);
77}
78
79int amdgpu_bo_alloc(amdgpu_device_handle dev,
80		    struct amdgpu_bo_alloc_request *alloc_buffer,
81		    amdgpu_bo_handle *buf_handle)
82{
83	struct amdgpu_bo *bo;
84	union drm_amdgpu_gem_create args;
85	unsigned heap = alloc_buffer->preferred_heap;
86	int r = 0;
87
88	/* It's an error if the heap is not specified */
89	if (!(heap & (AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM)))
90		return -EINVAL;
91
92	bo = calloc(1, sizeof(struct amdgpu_bo));
93	if (!bo)
94		return -ENOMEM;
95
96	atomic_set(&bo->refcount, 1);
97	bo->dev = dev;
98	bo->alloc_size = alloc_buffer->alloc_size;
99
100	memset(&args, 0, sizeof(args));
101	args.in.bo_size = alloc_buffer->alloc_size;
102	args.in.alignment = alloc_buffer->phys_alignment;
103
104	/* Set the placement. */
105	args.in.domains = heap;
106	args.in.domain_flags = alloc_buffer->flags;
107
108	/* Allocate the buffer with the preferred heap. */
109	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_CREATE,
110				&args, sizeof(args));
111	if (r) {
112		free(bo);
113		return r;
114	}
115
116	bo->handle = args.out.handle;
117
118	pthread_mutex_init(&bo->cpu_access_mutex, NULL);
119
120	*buf_handle = bo;
121	return 0;
122}
123
124int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
125			   struct amdgpu_bo_metadata *info)
126{
127	struct drm_amdgpu_gem_metadata args = {};
128
129	args.handle = bo->handle;
130	args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
131	args.data.flags = info->flags;
132	args.data.tiling_info = info->tiling_info;
133
134	if (info->size_metadata > sizeof(args.data.data))
135		return -EINVAL;
136
137	if (info->size_metadata) {
138		args.data.data_size_bytes = info->size_metadata;
139		memcpy(args.data.data, info->umd_metadata, info->size_metadata);
140	}
141
142	return drmCommandWriteRead(bo->dev->fd,
143				   DRM_AMDGPU_GEM_METADATA,
144				   &args, sizeof(args));
145}
146
147int amdgpu_bo_query_info(amdgpu_bo_handle bo,
148			 struct amdgpu_bo_info *info)
149{
150	struct drm_amdgpu_gem_metadata metadata = {};
151	struct drm_amdgpu_gem_create_in bo_info = {};
152	struct drm_amdgpu_gem_op gem_op = {};
153	int r;
154
155	/* Validate the BO passed in */
156	if (!bo->handle)
157		return -EINVAL;
158
159	/* Query metadata. */
160	metadata.handle = bo->handle;
161	metadata.op = AMDGPU_GEM_METADATA_OP_GET_METADATA;
162
163	r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_METADATA,
164				&metadata, sizeof(metadata));
165	if (r)
166		return r;
167
168	if (metadata.data.data_size_bytes >
169	    sizeof(info->metadata.umd_metadata))
170		return -EINVAL;
171
172	/* Query buffer info. */
173	gem_op.handle = bo->handle;
174	gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO;
175	gem_op.value = (uintptr_t)&bo_info;
176
177	r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_OP,
178				&gem_op, sizeof(gem_op));
179	if (r)
180		return r;
181
182	memset(info, 0, sizeof(*info));
183	info->alloc_size = bo_info.bo_size;
184	info->phys_alignment = bo_info.alignment;
185	info->preferred_heap = bo_info.domains;
186	info->alloc_flags = bo_info.domain_flags;
187	info->metadata.flags = metadata.data.flags;
188	info->metadata.tiling_info = metadata.data.tiling_info;
189
190	info->metadata.size_metadata = metadata.data.data_size_bytes;
191	if (metadata.data.data_size_bytes > 0)
192		memcpy(info->metadata.umd_metadata, metadata.data.data,
193		       metadata.data.data_size_bytes);
194
195	return 0;
196}
197
198static void amdgpu_add_handle_to_table(amdgpu_bo_handle bo)
199{
200	pthread_mutex_lock(&bo->dev->bo_table_mutex);
201	util_hash_table_set(bo->dev->bo_handles,
202			    (void*)(uintptr_t)bo->handle, bo);
203	pthread_mutex_unlock(&bo->dev->bo_table_mutex);
204}
205
206static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
207{
208	struct drm_gem_flink flink;
209	int fd, dma_fd;
210	uint32_t handle;
211	int r;
212
213	fd = bo->dev->fd;
214	handle = bo->handle;
215	if (bo->flink_name)
216		return 0;
217
218
219	if (bo->dev->flink_fd != bo->dev->fd) {
220		r = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
221				       &dma_fd);
222		if (!r) {
223			r = drmPrimeFDToHandle(bo->dev->flink_fd, dma_fd, &handle);
224			close(dma_fd);
225		}
226		if (r)
227			return r;
228		fd = bo->dev->flink_fd;
229	}
230	memset(&flink, 0, sizeof(flink));
231	flink.handle = handle;
232
233	r = drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
234	if (r)
235		return r;
236
237	bo->flink_name = flink.name;
238
239	if (bo->dev->flink_fd != bo->dev->fd) {
240		struct drm_gem_close args = {};
241		args.handle = handle;
242		drmIoctl(bo->dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &args);
243	}
244
245	pthread_mutex_lock(&bo->dev->bo_table_mutex);
246	util_hash_table_set(bo->dev->bo_flink_names,
247			    (void*)(uintptr_t)bo->flink_name,
248			    bo);
249	pthread_mutex_unlock(&bo->dev->bo_table_mutex);
250
251	return 0;
252}
253
254int amdgpu_bo_export(amdgpu_bo_handle bo,
255		     enum amdgpu_bo_handle_type type,
256		     uint32_t *shared_handle)
257{
258	int r;
259
260	switch (type) {
261	case amdgpu_bo_handle_type_gem_flink_name:
262		r = amdgpu_bo_export_flink(bo);
263		if (r)
264			return r;
265
266		*shared_handle = bo->flink_name;
267		return 0;
268
269	case amdgpu_bo_handle_type_kms:
270		amdgpu_add_handle_to_table(bo);
271		*shared_handle = bo->handle;
272		return 0;
273
274	case amdgpu_bo_handle_type_dma_buf_fd:
275		amdgpu_add_handle_to_table(bo);
276		return drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
277				       (int*)shared_handle);
278	}
279	return -EINVAL;
280}
281
282int amdgpu_bo_import(amdgpu_device_handle dev,
283		     enum amdgpu_bo_handle_type type,
284		     uint32_t shared_handle,
285		     struct amdgpu_bo_import_result *output)
286{
287	struct drm_gem_open open_arg = {};
288	struct amdgpu_bo *bo = NULL;
289	int r;
290	int dma_fd;
291	uint64_t dma_buf_size = 0;
292
293	/* We must maintain a list of pairs <handle, bo>, so that we always
294	 * return the same amdgpu_bo instance for the same handle. */
295	pthread_mutex_lock(&dev->bo_table_mutex);
296
297	/* Convert a DMA buf handle to a KMS handle now. */
298	if (type == amdgpu_bo_handle_type_dma_buf_fd) {
299		uint32_t handle;
300		off_t size;
301
302		/* Get a KMS handle. */
303		r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
304		if (r) {
305			return r;
306		}
307
308		/* Query the buffer size. */
309		size = lseek(shared_handle, 0, SEEK_END);
310		if (size == (off_t)-1) {
311			pthread_mutex_unlock(&dev->bo_table_mutex);
312			amdgpu_close_kms_handle(dev, handle);
313			return -errno;
314		}
315		lseek(shared_handle, 0, SEEK_SET);
316
317		dma_buf_size = size;
318		shared_handle = handle;
319	}
320
321	/* If we have already created a buffer with this handle, find it. */
322	switch (type) {
323	case amdgpu_bo_handle_type_gem_flink_name:
324		bo = util_hash_table_get(dev->bo_flink_names,
325					 (void*)(uintptr_t)shared_handle);
326		break;
327
328	case amdgpu_bo_handle_type_dma_buf_fd:
329		bo = util_hash_table_get(dev->bo_handles,
330					 (void*)(uintptr_t)shared_handle);
331		break;
332
333	case amdgpu_bo_handle_type_kms:
334		/* Importing a KMS handle in not allowed. */
335		pthread_mutex_unlock(&dev->bo_table_mutex);
336		return -EPERM;
337
338	default:
339		pthread_mutex_unlock(&dev->bo_table_mutex);
340		return -EINVAL;
341	}
342
343	if (bo) {
344		pthread_mutex_unlock(&dev->bo_table_mutex);
345
346		/* The buffer already exists, just bump the refcount. */
347		atomic_inc(&bo->refcount);
348
349		output->buf_handle = bo;
350		output->alloc_size = bo->alloc_size;
351		return 0;
352	}
353
354	bo = calloc(1, sizeof(struct amdgpu_bo));
355	if (!bo) {
356		pthread_mutex_unlock(&dev->bo_table_mutex);
357		if (type == amdgpu_bo_handle_type_dma_buf_fd) {
358			amdgpu_close_kms_handle(dev, shared_handle);
359		}
360		return -ENOMEM;
361	}
362
363	/* Open the handle. */
364	switch (type) {
365	case amdgpu_bo_handle_type_gem_flink_name:
366		open_arg.name = shared_handle;
367		r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
368		if (r) {
369			free(bo);
370			pthread_mutex_unlock(&dev->bo_table_mutex);
371			return r;
372		}
373
374		bo->handle = open_arg.handle;
375		if (dev->flink_fd != dev->fd) {
376			r = drmPrimeHandleToFD(dev->flink_fd, bo->handle, DRM_CLOEXEC, &dma_fd);
377			if (r) {
378				free(bo);
379				pthread_mutex_unlock(&dev->bo_table_mutex);
380				return r;
381			}
382			r = drmPrimeFDToHandle(dev->fd, dma_fd, &bo->handle );
383
384			close(dma_fd);
385
386			if (r) {
387				free(bo);
388				pthread_mutex_unlock(&dev->bo_table_mutex);
389				return r;
390			}
391		}
392		bo->flink_name = shared_handle;
393		bo->alloc_size = open_arg.size;
394		util_hash_table_set(dev->bo_flink_names,
395				    (void*)(uintptr_t)bo->flink_name, bo);
396		break;
397
398	case amdgpu_bo_handle_type_dma_buf_fd:
399		bo->handle = shared_handle;
400		bo->alloc_size = dma_buf_size;
401		break;
402
403	case amdgpu_bo_handle_type_kms:
404		assert(0); /* unreachable */
405	}
406
407	/* Initialize it. */
408	atomic_set(&bo->refcount, 1);
409	bo->dev = dev;
410	pthread_mutex_init(&bo->cpu_access_mutex, NULL);
411
412	util_hash_table_set(dev->bo_handles, (void*)(uintptr_t)bo->handle, bo);
413	pthread_mutex_unlock(&dev->bo_table_mutex);
414
415	output->buf_handle = bo;
416	output->alloc_size = bo->alloc_size;
417	return 0;
418}
419
420int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
421{
422	/* Just drop the reference. */
423	amdgpu_bo_reference(&buf_handle, NULL);
424	return 0;
425}
426
427int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
428{
429	union drm_amdgpu_gem_mmap args;
430	void *ptr;
431	int r;
432
433	pthread_mutex_lock(&bo->cpu_access_mutex);
434
435	if (bo->cpu_ptr) {
436		/* already mapped */
437		assert(bo->cpu_map_count > 0);
438		bo->cpu_map_count++;
439		*cpu = bo->cpu_ptr;
440		pthread_mutex_unlock(&bo->cpu_access_mutex);
441		return 0;
442	}
443
444	assert(bo->cpu_map_count == 0);
445
446	memset(&args, 0, sizeof(args));
447
448	/* Query the buffer address (args.addr_ptr).
449	 * The kernel driver ignores the offset and size parameters. */
450	args.in.handle = bo->handle;
451
452	r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_MMAP, &args,
453				sizeof(args));
454	if (r) {
455		pthread_mutex_unlock(&bo->cpu_access_mutex);
456		return r;
457	}
458
459	/* Map the buffer. */
460	ptr = drm_mmap(NULL, bo->alloc_size, PROT_READ | PROT_WRITE, MAP_SHARED,
461		       bo->dev->fd, args.out.addr_ptr);
462	if (ptr == MAP_FAILED) {
463		pthread_mutex_unlock(&bo->cpu_access_mutex);
464		return -errno;
465	}
466
467	bo->cpu_ptr = ptr;
468	bo->cpu_map_count = 1;
469	pthread_mutex_unlock(&bo->cpu_access_mutex);
470
471	*cpu = ptr;
472	return 0;
473}
474
475int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
476{
477	int r;
478
479	pthread_mutex_lock(&bo->cpu_access_mutex);
480	assert(bo->cpu_map_count >= 0);
481
482	if (bo->cpu_map_count == 0) {
483		/* not mapped */
484		pthread_mutex_unlock(&bo->cpu_access_mutex);
485		return -EINVAL;
486	}
487
488	bo->cpu_map_count--;
489	if (bo->cpu_map_count > 0) {
490		/* mapped multiple times */
491		pthread_mutex_unlock(&bo->cpu_access_mutex);
492		return 0;
493	}
494
495	r = drm_munmap(bo->cpu_ptr, bo->alloc_size) == 0 ? 0 : -errno;
496	bo->cpu_ptr = NULL;
497	pthread_mutex_unlock(&bo->cpu_access_mutex);
498	return r;
499}
500
501int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
502				struct amdgpu_buffer_size_alignments *info)
503{
504	info->size_local = dev->dev_info.pte_fragment_size;
505	info->size_remote = dev->dev_info.gart_page_size;
506	return 0;
507}
508
509int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
510			    uint64_t timeout_ns,
511			    bool *busy)
512{
513	union drm_amdgpu_gem_wait_idle args;
514	int r;
515
516	memset(&args, 0, sizeof(args));
517	args.in.handle = bo->handle;
518	args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
519
520	r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_WAIT_IDLE,
521				&args, sizeof(args));
522
523	if (r == 0) {
524		*busy = args.out.status;
525		return 0;
526	} else {
527		fprintf(stderr, "amdgpu: GEM_WAIT_IDLE failed with %i\n", r);
528		return r;
529	}
530}
531
532int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
533				    void *cpu,
534				    uint64_t size,
535				    amdgpu_bo_handle *buf_handle)
536{
537	int r;
538	struct amdgpu_bo *bo;
539	struct drm_amdgpu_gem_userptr args;
540
541	args.addr = (uintptr_t)cpu;
542	args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER |
543		AMDGPU_GEM_USERPTR_VALIDATE;
544	args.size = size;
545	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR,
546				&args, sizeof(args));
547	if (r)
548		return r;
549
550	bo = calloc(1, sizeof(struct amdgpu_bo));
551	if (!bo)
552		return -ENOMEM;
553
554	atomic_set(&bo->refcount, 1);
555	bo->dev = dev;
556	bo->alloc_size = size;
557	bo->handle = args.handle;
558
559	*buf_handle = bo;
560
561	return r;
562}
563
564int amdgpu_bo_list_create(amdgpu_device_handle dev,
565			  uint32_t number_of_resources,
566			  amdgpu_bo_handle *resources,
567			  uint8_t *resource_prios,
568			  amdgpu_bo_list_handle *result)
569{
570	struct drm_amdgpu_bo_list_entry *list;
571	union drm_amdgpu_bo_list args;
572	unsigned i;
573	int r;
574
575	if (!number_of_resources)
576		return -EINVAL;
577
578	/* overflow check for multiplication */
579	if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
580		return -EINVAL;
581
582	list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
583	if (!list)
584		return -ENOMEM;
585
586	*result = malloc(sizeof(struct amdgpu_bo_list));
587	if (!*result) {
588		free(list);
589		return -ENOMEM;
590	}
591
592	memset(&args, 0, sizeof(args));
593	args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
594	args.in.bo_number = number_of_resources;
595	args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
596	args.in.bo_info_ptr = (uint64_t)(uintptr_t)list;
597
598	for (i = 0; i < number_of_resources; i++) {
599		list[i].bo_handle = resources[i]->handle;
600		if (resource_prios)
601			list[i].bo_priority = resource_prios[i];
602		else
603			list[i].bo_priority = 0;
604	}
605
606	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
607				&args, sizeof(args));
608	free(list);
609	if (r) {
610		free(*result);
611		return r;
612	}
613
614	(*result)->dev = dev;
615	(*result)->handle = args.out.list_handle;
616	return 0;
617}
618
619int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)
620{
621	union drm_amdgpu_bo_list args;
622	int r;
623
624	memset(&args, 0, sizeof(args));
625	args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
626	args.in.list_handle = list->handle;
627
628	r = drmCommandWriteRead(list->dev->fd, DRM_AMDGPU_BO_LIST,
629				&args, sizeof(args));
630
631	if (!r)
632		free(list);
633
634	return r;
635}
636
637int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
638			  uint32_t number_of_resources,
639			  amdgpu_bo_handle *resources,
640			  uint8_t *resource_prios)
641{
642	struct drm_amdgpu_bo_list_entry *list;
643	union drm_amdgpu_bo_list args;
644	unsigned i;
645	int r;
646
647	if (!number_of_resources)
648		return -EINVAL;
649
650	/* overflow check for multiplication */
651	if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
652		return -EINVAL;
653
654	list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
655	if (list == NULL)
656		return -ENOMEM;
657
658	args.in.operation = AMDGPU_BO_LIST_OP_UPDATE;
659	args.in.list_handle = handle->handle;
660	args.in.bo_number = number_of_resources;
661	args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
662	args.in.bo_info_ptr = (uintptr_t)list;
663
664	for (i = 0; i < number_of_resources; i++) {
665		list[i].bo_handle = resources[i]->handle;
666		if (resource_prios)
667			list[i].bo_priority = resource_prios[i];
668		else
669			list[i].bo_priority = 0;
670	}
671
672	r = drmCommandWriteRead(handle->dev->fd, DRM_AMDGPU_BO_LIST,
673				&args, sizeof(args));
674	free(list);
675	return r;
676}
677
678int amdgpu_bo_va_op(amdgpu_bo_handle bo,
679		     uint64_t offset,
680		     uint64_t size,
681		     uint64_t addr,
682		     uint64_t flags,
683		     uint32_t ops)
684{
685	amdgpu_device_handle dev = bo->dev;
686	struct drm_amdgpu_gem_va va;
687	int r;
688
689	if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP)
690		return -EINVAL;
691
692	memset(&va, 0, sizeof(va));
693	va.handle = bo->handle;
694	va.operation = ops;
695	va.flags = AMDGPU_VM_PAGE_READABLE |
696		   AMDGPU_VM_PAGE_WRITEABLE |
697		   AMDGPU_VM_PAGE_EXECUTABLE;
698	va.va_address = addr;
699	va.offset_in_bo = offset;
700	va.map_size = ALIGN(size, getpagesize());
701
702	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
703
704	return r;
705}
706