1#ifndef _MSM_KGSL_H
2#define _MSM_KGSL_H
3
4#include <linux/types.h>
5#include <linux/ioctl.h>
6
7/*
8 * The KGSL version has proven not to be very useful in userspace if features
9 * are cherry picked into other trees out of order so it is frozen as of 3.14.
10 * It is left here for backwards compatabilty and as a reminder that
11 * software releases are never linear. Also, I like pie.
12 */
13
14#define KGSL_VERSION_MAJOR        3
15#define KGSL_VERSION_MINOR        14
16
17/*
18 * We have traditionally mixed context and issueibcmds / command batch flags
19 * together into a big flag stew. This worked fine until we started adding a
20 * lot more command batch flags and we started running out of bits. Turns out
21 * we have a bit of room in the context type / priority mask that we could use
22 * for command batches, but that means we need to split out the flags into two
23 * coherent sets.
24 *
25 * If any future definitions are for both context and cmdbatch add both defines
26 * and link the cmdbatch to the context define as we do below. Otherwise feel
27 * free to add exclusive bits to either set.
28 */
29
30/* --- context flags --- */
31#define KGSL_CONTEXT_SAVE_GMEM		0x00000001
32#define KGSL_CONTEXT_NO_GMEM_ALLOC	0x00000002
33/* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
34#define KGSL_CONTEXT_SUBMIT_IB_LIST	0x00000004
35#define KGSL_CONTEXT_CTX_SWITCH		0x00000008
36#define KGSL_CONTEXT_PREAMBLE		0x00000010
37#define KGSL_CONTEXT_TRASH_STATE	0x00000020
38#define KGSL_CONTEXT_PER_CONTEXT_TS	0x00000040
39#define KGSL_CONTEXT_USER_GENERATED_TS	0x00000080
40/* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
41#define KGSL_CONTEXT_END_OF_FRAME	0x00000100
42#define KGSL_CONTEXT_NO_FAULT_TOLERANCE 0x00000200
43/* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
44#define KGSL_CONTEXT_SYNC               0x00000400
45#define KGSL_CONTEXT_PWR_CONSTRAINT     0x00000800
46#define KGSL_CONTEXT_PRIORITY_MASK      0x0000F000
47#define KGSL_CONTEXT_PRIORITY_SHIFT     12
48#define KGSL_CONTEXT_PRIORITY_UNDEF     0
49
50#define KGSL_CONTEXT_IFH_NOP            0x00010000
51#define KGSL_CONTEXT_SECURE             0x00020000
52#define KGSL_CONTEXT_NO_SNAPSHOT        0x00040000
53#define KGSL_CONTEXT_SPARSE             0x00080000
54
55#define KGSL_CONTEXT_PREEMPT_STYLE_MASK       0x0E000000
56#define KGSL_CONTEXT_PREEMPT_STYLE_SHIFT      25
57#define KGSL_CONTEXT_PREEMPT_STYLE_DEFAULT    0x0
58#define KGSL_CONTEXT_PREEMPT_STYLE_RINGBUFFER 0x1
59#define KGSL_CONTEXT_PREEMPT_STYLE_FINEGRAIN  0x2
60
61#define KGSL_CONTEXT_TYPE_MASK          0x01F00000
62#define KGSL_CONTEXT_TYPE_SHIFT         20
63#define KGSL_CONTEXT_TYPE_ANY		0
64#define KGSL_CONTEXT_TYPE_GL		1
65#define KGSL_CONTEXT_TYPE_CL		2
66#define KGSL_CONTEXT_TYPE_C2D		3
67#define KGSL_CONTEXT_TYPE_RS		4
68#define KGSL_CONTEXT_TYPE_UNKNOWN	0x1E
69
70#define KGSL_CONTEXT_INVALIDATE_ON_FAULT 0x10000000
71
72#define KGSL_CONTEXT_INVALID 0xffffffff
73
74/*
75 * --- command batch flags ---
76 * The bits that are linked to a KGSL_CONTEXT equivalent are either legacy
77 * definitions or bits that are valid for both contexts and cmdbatches.  To be
78 * safe the other 8 bits that are still available in the context field should be
79 * omitted here in case we need to share - the other bits are available for
80 * cmdbatch only flags as needed
81 */
82#define KGSL_CMDBATCH_MEMLIST		0x00000001
83#define KGSL_CMDBATCH_MARKER		0x00000002
84#define KGSL_CMDBATCH_SUBMIT_IB_LIST	KGSL_CONTEXT_SUBMIT_IB_LIST /* 0x004 */
85#define KGSL_CMDBATCH_CTX_SWITCH	KGSL_CONTEXT_CTX_SWITCH     /* 0x008 */
86#define KGSL_CMDBATCH_PROFILING		0x00000010
87/*
88 * KGSL_CMDBATCH_PROFILING must also be set for KGSL_CMDBATCH_PROFILING_KTIME
89 * to take effect, as the latter only affects the time data returned.
90 */
91#define KGSL_CMDBATCH_PROFILING_KTIME	0x00000020
92#define KGSL_CMDBATCH_END_OF_FRAME	KGSL_CONTEXT_END_OF_FRAME   /* 0x100 */
93#define KGSL_CMDBATCH_SYNC		KGSL_CONTEXT_SYNC           /* 0x400 */
94#define KGSL_CMDBATCH_PWR_CONSTRAINT	KGSL_CONTEXT_PWR_CONSTRAINT /* 0x800 */
95#define KGSL_CMDBATCH_SPARSE	    0x1000 /* 0x1000 */
96
97/*
98 * Reserve bits [16:19] and bits [28:31] for possible bits shared between
99 * contexts and command batches.  Update this comment as new flags are added.
100 */
101
102/*
103 * gpu_command_object flags - these flags communicate the type of command or
104 * memory object being submitted for a GPU command
105 */
106
107/* Flags for GPU command objects */
108#define KGSL_CMDLIST_IB                  0x00000001U
109#define KGSL_CMDLIST_CTXTSWITCH_PREAMBLE 0x00000002U
110#define KGSL_CMDLIST_IB_PREAMBLE         0x00000004U
111
112/* Flags for GPU command memory objects */
113#define KGSL_OBJLIST_MEMOBJ  0x00000008U
114#define KGSL_OBJLIST_PROFILE 0x00000010U
115
116/* Flags for GPU command sync points */
117#define KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP 0
118#define KGSL_CMD_SYNCPOINT_TYPE_FENCE 1
119
120/* --- Memory allocation flags --- */
121
122/* General allocation hints */
123#define KGSL_MEMFLAGS_SECURE      0x00000008ULL
124#define KGSL_MEMFLAGS_GPUREADONLY 0x01000000U
125#define KGSL_MEMFLAGS_GPUWRITEONLY 0x02000000U
126#define KGSL_MEMFLAGS_FORCE_32BIT 0x100000000ULL
127
128/* Flag for binding all the virt range to single phys data */
129#define KGSL_SPARSE_BIND_MULTIPLE_TO_PHYS 0x400000000ULL
130#define KGSL_SPARSE_BIND 0x1ULL
131#define KGSL_SPARSE_UNBIND 0x2ULL
132
133/* Memory caching hints */
134#define KGSL_CACHEMODE_MASK       0x0C000000U
135#define KGSL_CACHEMODE_SHIFT 26
136
137#define KGSL_CACHEMODE_WRITECOMBINE 0
138#define KGSL_CACHEMODE_UNCACHED 1
139#define KGSL_CACHEMODE_WRITETHROUGH 2
140#define KGSL_CACHEMODE_WRITEBACK 3
141
142#define KGSL_MEMFLAGS_USE_CPU_MAP 0x10000000ULL
143#define KGSL_MEMFLAGS_SPARSE_PHYS 0x20000000ULL
144#define KGSL_MEMFLAGS_SPARSE_VIRT 0x40000000ULL
145
146/* Memory types for which allocations are made */
147#define KGSL_MEMTYPE_MASK		0x0000FF00
148#define KGSL_MEMTYPE_SHIFT		8
149
150#define KGSL_MEMTYPE_OBJECTANY			0
151#define KGSL_MEMTYPE_FRAMEBUFFER		1
152#define KGSL_MEMTYPE_RENDERBUFFER		2
153#define KGSL_MEMTYPE_ARRAYBUFFER		3
154#define KGSL_MEMTYPE_ELEMENTARRAYBUFFER		4
155#define KGSL_MEMTYPE_VERTEXARRAYBUFFER		5
156#define KGSL_MEMTYPE_TEXTURE			6
157#define KGSL_MEMTYPE_SURFACE			7
158#define KGSL_MEMTYPE_EGL_SURFACE		8
159#define KGSL_MEMTYPE_GL				9
160#define KGSL_MEMTYPE_CL				10
161#define KGSL_MEMTYPE_CL_BUFFER_MAP		11
162#define KGSL_MEMTYPE_CL_BUFFER_NOMAP		12
163#define KGSL_MEMTYPE_CL_IMAGE_MAP		13
164#define KGSL_MEMTYPE_CL_IMAGE_NOMAP		14
165#define KGSL_MEMTYPE_CL_KERNEL_STACK		15
166#define KGSL_MEMTYPE_COMMAND			16
167#define KGSL_MEMTYPE_2D				17
168#define KGSL_MEMTYPE_EGL_IMAGE			18
169#define KGSL_MEMTYPE_EGL_SHADOW			19
170#define KGSL_MEMTYPE_MULTISAMPLE		20
171#define KGSL_MEMTYPE_KERNEL			255
172
173/*
174 * Alignment hint, passed as the power of 2 exponent.
175 * i.e 4k (2^12) would be 12, 64k (2^16)would be 16.
176 */
177#define KGSL_MEMALIGN_MASK		0x00FF0000
178#define KGSL_MEMALIGN_SHIFT		16
179
180enum kgsl_user_mem_type {
181	KGSL_USER_MEM_TYPE_PMEM		= 0x00000000,
182	KGSL_USER_MEM_TYPE_ASHMEM	= 0x00000001,
183	KGSL_USER_MEM_TYPE_ADDR		= 0x00000002,
184	KGSL_USER_MEM_TYPE_ION		= 0x00000003,
185	/*
186	 * ION type is retained for backwards compatibilty but Ion buffers are
187	 * dma-bufs so try to use that naming if we can
188	 */
189	KGSL_USER_MEM_TYPE_DMABUF       = 0x00000003,
190	KGSL_USER_MEM_TYPE_MAX		= 0x00000007,
191};
192#define KGSL_MEMFLAGS_USERMEM_MASK 0x000000e0
193#define KGSL_MEMFLAGS_USERMEM_SHIFT 5
194
195/*
196 * Unfortunately, enum kgsl_user_mem_type starts at 0 which does not
197 * leave a good value for allocated memory. In the flags we use
198 * 0 to indicate allocated memory and thus need to add 1 to the enum
199 * values.
200 */
201#define KGSL_USERMEM_FLAG(x) (((x) + 1) << KGSL_MEMFLAGS_USERMEM_SHIFT)
202
203#define KGSL_MEMFLAGS_NOT_USERMEM 0
204#define KGSL_MEMFLAGS_USERMEM_PMEM KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_PMEM)
205#define KGSL_MEMFLAGS_USERMEM_ASHMEM \
206		KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ASHMEM)
207#define KGSL_MEMFLAGS_USERMEM_ADDR KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ADDR)
208#define KGSL_MEMFLAGS_USERMEM_ION KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ION)
209
210/* --- generic KGSL flag values --- */
211
212#define KGSL_FLAGS_NORMALMODE  0x00000000
213#define KGSL_FLAGS_SAFEMODE    0x00000001
214#define KGSL_FLAGS_INITIALIZED0 0x00000002
215#define KGSL_FLAGS_INITIALIZED 0x00000004
216#define KGSL_FLAGS_STARTED     0x00000008
217#define KGSL_FLAGS_ACTIVE      0x00000010
218#define KGSL_FLAGS_RESERVED0   0x00000020
219#define KGSL_FLAGS_RESERVED1   0x00000040
220#define KGSL_FLAGS_RESERVED2   0x00000080
221#define KGSL_FLAGS_SOFT_RESET  0x00000100
222#define KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS 0x00000200
223
224/* Server Side Sync Timeout in milliseconds */
225#define KGSL_SYNCOBJ_SERVER_TIMEOUT 2000
226
227/*
228 * Reset status values for context
229 */
230enum kgsl_ctx_reset_stat {
231	KGSL_CTX_STAT_NO_ERROR				= 0x00000000,
232	KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT		= 0x00000001,
233	KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT	= 0x00000002,
234	KGSL_CTX_STAT_UNKNOWN_CONTEXT_RESET_EXT		= 0x00000003
235};
236
237#define KGSL_CONVERT_TO_MBPS(val) \
238	(val*1000*1000U)
239
240/* device id */
241enum kgsl_deviceid {
242	KGSL_DEVICE_3D0		= 0x00000000,
243	KGSL_DEVICE_MAX
244};
245
246struct kgsl_devinfo {
247
248	unsigned int device_id;
249	/* chip revision id
250	* coreid:8 majorrev:8 minorrev:8 patch:8
251	*/
252	unsigned int chip_id;
253	unsigned int mmu_enabled;
254	unsigned long gmem_gpubaseaddr;
255	/*
256	* This field contains the adreno revision
257	* number 200, 205, 220, etc...
258	*/
259	unsigned int gpu_id;
260	size_t gmem_sizebytes;
261};
262
263/*
264 * struct kgsl_devmemstore - this structure defines the region of memory
265 * that can be mmap()ed from this driver. The timestamp fields are __volatile__
266 * because they are written by the GPU
267 * @soptimestamp: Start of pipeline timestamp written by GPU before the
268 * commands in concern are processed
269 * @sbz: Unused, kept for 8 byte alignment
270 * @eoptimestamp: End of pipeline timestamp written by GPU after the
271 * commands in concern are processed
272 * @sbz2: Unused, kept for 8 byte alignment
273 * @preempted: Indicates if the context was preempted
274 * @sbz3: Unused, kept for 8 byte alignment
275 * @ref_wait_ts: Timestamp on which to generate interrupt, unused now.
276 * @sbz4: Unused, kept for 8 byte alignment
277 * @current_context: The current context the GPU is working on
278 * @sbz5: Unused, kept for 8 byte alignment
279 */
280struct kgsl_devmemstore {
281	__volatile__ unsigned int soptimestamp;
282	unsigned int sbz;
283	__volatile__ unsigned int eoptimestamp;
284	unsigned int sbz2;
285	__volatile__ unsigned int preempted;
286	unsigned int sbz3;
287	__volatile__ unsigned int ref_wait_ts;
288	unsigned int sbz4;
289	unsigned int current_context;
290	unsigned int sbz5;
291};
292
293#define KGSL_MEMSTORE_OFFSET(ctxt_id, field) \
294	((ctxt_id)*sizeof(struct kgsl_devmemstore) + \
295	 offsetof(struct kgsl_devmemstore, field))
296
297/* timestamp id*/
298enum kgsl_timestamp_type {
299	KGSL_TIMESTAMP_CONSUMED = 0x00000001, /* start-of-pipeline timestamp */
300	KGSL_TIMESTAMP_RETIRED  = 0x00000002, /* end-of-pipeline timestamp*/
301	KGSL_TIMESTAMP_QUEUED   = 0x00000003,
302};
303
304/* property types - used with kgsl_device_getproperty */
305#define KGSL_PROP_DEVICE_INFO		0x1
306#define KGSL_PROP_DEVICE_SHADOW		0x2
307#define KGSL_PROP_DEVICE_POWER		0x3
308#define KGSL_PROP_SHMEM			0x4
309#define KGSL_PROP_SHMEM_APERTURES	0x5
310#define KGSL_PROP_MMU_ENABLE		0x6
311#define KGSL_PROP_INTERRUPT_WAITS	0x7
312#define KGSL_PROP_VERSION		0x8
313#define KGSL_PROP_GPU_RESET_STAT	0x9
314#define KGSL_PROP_PWRCTRL		0xE
315#define KGSL_PROP_PWR_CONSTRAINT	0x12
316#define KGSL_PROP_UCHE_GMEM_VADDR	0x13
317#define KGSL_PROP_SP_GENERIC_MEM	0x14
318#define KGSL_PROP_UCODE_VERSION		0x15
319#define KGSL_PROP_GPMU_VERSION		0x16
320#define KGSL_PROP_HIGHEST_BANK_BIT	0x17
321#define KGSL_PROP_DEVICE_BITNESS	0x18
322#define KGSL_PROP_DEVICE_QDSS_STM	0x19
323#define KGSL_PROP_DEVICE_QTIMER	0x20
324
325struct kgsl_shadowprop {
326	unsigned long gpuaddr;
327	size_t size;
328	unsigned int flags; /* contains KGSL_FLAGS_ values */
329};
330
331struct kgsl_qdss_stm_prop {
332	uint64_t gpuaddr;
333	uint64_t size;
334};
335
336struct kgsl_qtimer_prop {
337	uint64_t gpuaddr;
338	uint64_t size;
339};
340
341struct kgsl_version {
342	unsigned int drv_major;
343	unsigned int drv_minor;
344	unsigned int dev_major;
345	unsigned int dev_minor;
346};
347
348struct kgsl_sp_generic_mem {
349	uint64_t local;
350	uint64_t pvt;
351};
352
353struct kgsl_ucode_version {
354	unsigned int pfp;
355	unsigned int pm4;
356};
357
358struct kgsl_gpmu_version {
359	unsigned int major;
360	unsigned int minor;
361	unsigned int features;
362};
363
364/* Performance counter groups */
365
366#define KGSL_PERFCOUNTER_GROUP_CP 0x0
367#define KGSL_PERFCOUNTER_GROUP_RBBM 0x1
368#define KGSL_PERFCOUNTER_GROUP_PC 0x2
369#define KGSL_PERFCOUNTER_GROUP_VFD 0x3
370#define KGSL_PERFCOUNTER_GROUP_HLSQ 0x4
371#define KGSL_PERFCOUNTER_GROUP_VPC 0x5
372#define KGSL_PERFCOUNTER_GROUP_TSE 0x6
373#define KGSL_PERFCOUNTER_GROUP_RAS 0x7
374#define KGSL_PERFCOUNTER_GROUP_UCHE 0x8
375#define KGSL_PERFCOUNTER_GROUP_TP 0x9
376#define KGSL_PERFCOUNTER_GROUP_SP 0xA
377#define KGSL_PERFCOUNTER_GROUP_RB 0xB
378#define KGSL_PERFCOUNTER_GROUP_PWR 0xC
379#define KGSL_PERFCOUNTER_GROUP_VBIF 0xD
380#define KGSL_PERFCOUNTER_GROUP_VBIF_PWR 0xE
381#define KGSL_PERFCOUNTER_GROUP_MH 0xF
382#define KGSL_PERFCOUNTER_GROUP_PA_SU 0x10
383#define KGSL_PERFCOUNTER_GROUP_SQ 0x11
384#define KGSL_PERFCOUNTER_GROUP_SX 0x12
385#define KGSL_PERFCOUNTER_GROUP_TCF 0x13
386#define KGSL_PERFCOUNTER_GROUP_TCM 0x14
387#define KGSL_PERFCOUNTER_GROUP_TCR 0x15
388#define KGSL_PERFCOUNTER_GROUP_L2 0x16
389#define KGSL_PERFCOUNTER_GROUP_VSC 0x17
390#define KGSL_PERFCOUNTER_GROUP_CCU 0x18
391#define KGSL_PERFCOUNTER_GROUP_LRZ 0x19
392#define KGSL_PERFCOUNTER_GROUP_CMP 0x1A
393#define KGSL_PERFCOUNTER_GROUP_ALWAYSON 0x1B
394#define KGSL_PERFCOUNTER_GROUP_SP_PWR 0x1C
395#define KGSL_PERFCOUNTER_GROUP_TP_PWR 0x1D
396#define KGSL_PERFCOUNTER_GROUP_RB_PWR 0x1E
397#define KGSL_PERFCOUNTER_GROUP_CCU_PWR 0x1F
398#define KGSL_PERFCOUNTER_GROUP_UCHE_PWR 0x20
399#define KGSL_PERFCOUNTER_GROUP_CP_PWR 0x21
400#define KGSL_PERFCOUNTER_GROUP_GPMU_PWR 0x22
401#define KGSL_PERFCOUNTER_GROUP_ALWAYSON_PWR 0x23
402#define KGSL_PERFCOUNTER_GROUP_MAX 0x24
403
404#define KGSL_PERFCOUNTER_NOT_USED 0xFFFFFFFF
405#define KGSL_PERFCOUNTER_BROKEN 0xFFFFFFFE
406
407/* structure holds list of ibs */
408struct kgsl_ibdesc {
409	unsigned long gpuaddr;
410	unsigned long __pad;
411	size_t sizedwords;
412	unsigned int ctrl;
413};
414
415/**
416 * struct kgsl_cmdbatch_profiling_buffer
417 * @wall_clock_s: Ringbuffer submission time (seconds).
418 *                If KGSL_CMDBATCH_PROFILING_KTIME is set, time is provided
419 *                in kernel clocks, otherwise wall clock time is used.
420 * @wall_clock_ns: Ringbuffer submission time (nanoseconds).
421 *                 If KGSL_CMDBATCH_PROFILING_KTIME is set time is provided
422 *                 in kernel clocks, otherwise wall clock time is used.
423 * @gpu_ticks_queued: GPU ticks at ringbuffer submission
424 * @gpu_ticks_submitted: GPU ticks when starting cmdbatch execution
425 * @gpu_ticks_retired: GPU ticks when finishing cmdbatch execution
426 *
427 * This structure defines the profiling buffer used to measure cmdbatch
428 * execution time
429 */
430struct kgsl_cmdbatch_profiling_buffer {
431	uint64_t wall_clock_s;
432	uint64_t wall_clock_ns;
433	uint64_t gpu_ticks_queued;
434	uint64_t gpu_ticks_submitted;
435	uint64_t gpu_ticks_retired;
436};
437
438/* ioctls */
439#define KGSL_IOC_TYPE 0x09
440
441/* get misc info about the GPU
442   type should be a value from enum kgsl_property_type
443   value points to a structure that varies based on type
444   sizebytes is sizeof() that structure
445   for KGSL_PROP_DEVICE_INFO, use struct kgsl_devinfo
446   this structure contaings hardware versioning info.
447   for KGSL_PROP_DEVICE_SHADOW, use struct kgsl_shadowprop
448   this is used to find mmap() offset and sizes for mapping
449   struct kgsl_memstore into userspace.
450*/
451struct kgsl_device_getproperty {
452	unsigned int type;
453	void *value;
454	size_t sizebytes;
455};
456
457#define IOCTL_KGSL_DEVICE_GETPROPERTY \
458	_IOWR(KGSL_IOC_TYPE, 0x2, struct kgsl_device_getproperty)
459
460/* IOCTL_KGSL_DEVICE_READ (0x3) - removed 03/2012
461 */
462
463/* block until the GPU has executed past a given timestamp
464 * timeout is in milliseconds.
465 */
466struct kgsl_device_waittimestamp {
467	unsigned int timestamp;
468	unsigned int timeout;
469};
470
471#define IOCTL_KGSL_DEVICE_WAITTIMESTAMP \
472	_IOW(KGSL_IOC_TYPE, 0x6, struct kgsl_device_waittimestamp)
473
474struct kgsl_device_waittimestamp_ctxtid {
475	unsigned int context_id;
476	unsigned int timestamp;
477	unsigned int timeout;
478};
479
480#define IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID \
481	_IOW(KGSL_IOC_TYPE, 0x7, struct kgsl_device_waittimestamp_ctxtid)
482
483/* DEPRECATED: issue indirect commands to the GPU.
484 * drawctxt_id must have been created with IOCTL_KGSL_DRAWCTXT_CREATE
485 * ibaddr and sizedwords must specify a subset of a buffer created
486 * with IOCTL_KGSL_SHAREDMEM_FROM_PMEM
487 * flags may be a mask of KGSL_CONTEXT_ values
488 * timestamp is a returned counter value which can be passed to
489 * other ioctls to determine when the commands have been executed by
490 * the GPU.
491 *
492 * This fucntion is deprecated - consider using IOCTL_KGSL_SUBMIT_COMMANDS
493 * instead
494 */
495struct kgsl_ringbuffer_issueibcmds {
496	unsigned int drawctxt_id;
497	unsigned long ibdesc_addr;
498	unsigned int numibs;
499	unsigned int timestamp; /*output param */
500	unsigned int flags;
501};
502
503#define IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS \
504	_IOWR(KGSL_IOC_TYPE, 0x10, struct kgsl_ringbuffer_issueibcmds)
505
506/* read the most recently executed timestamp value
507 * type should be a value from enum kgsl_timestamp_type
508 */
509struct kgsl_cmdstream_readtimestamp {
510	unsigned int type;
511	unsigned int timestamp; /*output param */
512};
513
514#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_OLD \
515	_IOR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
516
517#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP \
518	_IOWR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
519
520/* free memory when the GPU reaches a given timestamp.
521 * gpuaddr specify a memory region created by a
522 * IOCTL_KGSL_SHAREDMEM_FROM_PMEM call
523 * type should be a value from enum kgsl_timestamp_type
524 */
525struct kgsl_cmdstream_freememontimestamp {
526	unsigned long gpuaddr;
527	unsigned int type;
528	unsigned int timestamp;
529};
530
531#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP \
532	_IOW(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
533
534/* Previous versions of this header had incorrectly defined
535   IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP as a read-only ioctl instead
536   of a write only ioctl.  To ensure binary compatability, the following
537   #define will be used to intercept the incorrect ioctl
538*/
539
540#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD \
541	_IOR(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
542
543/* create a draw context, which is used to preserve GPU state.
544 * The flags field may contain a mask KGSL_CONTEXT_*  values
545 */
546struct kgsl_drawctxt_create {
547	unsigned int flags;
548	unsigned int drawctxt_id; /*output param */
549};
550
551#define IOCTL_KGSL_DRAWCTXT_CREATE \
552	_IOWR(KGSL_IOC_TYPE, 0x13, struct kgsl_drawctxt_create)
553
554/* destroy a draw context */
555struct kgsl_drawctxt_destroy {
556	unsigned int drawctxt_id;
557};
558
559#define IOCTL_KGSL_DRAWCTXT_DESTROY \
560	_IOW(KGSL_IOC_TYPE, 0x14, struct kgsl_drawctxt_destroy)
561
562/* add a block of pmem, fb, ashmem or user allocated address
563 * into the GPU address space */
564struct kgsl_map_user_mem {
565	int fd;
566	unsigned long gpuaddr;   /*output param */
567	size_t len;
568	size_t offset;
569	unsigned long hostptr;   /*input param */
570	enum kgsl_user_mem_type memtype;
571	unsigned int flags;
572};
573
574#define IOCTL_KGSL_MAP_USER_MEM \
575	_IOWR(KGSL_IOC_TYPE, 0x15, struct kgsl_map_user_mem)
576
577struct kgsl_cmdstream_readtimestamp_ctxtid {
578	unsigned int context_id;
579	unsigned int type;
580	unsigned int timestamp; /*output param */
581};
582
583#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_CTXTID \
584	_IOWR(KGSL_IOC_TYPE, 0x16, struct kgsl_cmdstream_readtimestamp_ctxtid)
585
586struct kgsl_cmdstream_freememontimestamp_ctxtid {
587	unsigned int context_id;
588	unsigned long gpuaddr;
589	unsigned int type;
590	unsigned int timestamp;
591};
592
593#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID \
594	_IOW(KGSL_IOC_TYPE, 0x17, \
595	struct kgsl_cmdstream_freememontimestamp_ctxtid)
596
597/* add a block of pmem or fb into the GPU address space */
598struct kgsl_sharedmem_from_pmem {
599        int pmem_fd;
600        unsigned long gpuaddr;  /*output param */
601        unsigned int len;
602        unsigned int offset;
603};
604
605#define IOCTL_KGSL_SHAREDMEM_FROM_PMEM \
606        _IOWR(KGSL_IOC_TYPE, 0x20, struct kgsl_sharedmem_from_pmem)
607
608/* remove memory from the GPU's address space */
609struct kgsl_sharedmem_free {
610	unsigned long gpuaddr;
611};
612
613#define IOCTL_KGSL_SHAREDMEM_FREE \
614	_IOW(KGSL_IOC_TYPE, 0x21, struct kgsl_sharedmem_free)
615
616struct kgsl_cff_user_event {
617	unsigned char cff_opcode;
618	unsigned int op1;
619	unsigned int op2;
620	unsigned int op3;
621	unsigned int op4;
622	unsigned int op5;
623	unsigned int __pad[2];
624};
625
626#define IOCTL_KGSL_CFF_USER_EVENT \
627	_IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_cff_user_event)
628
629struct kgsl_gmem_desc {
630	unsigned int x;
631	unsigned int y;
632	unsigned int width;
633	unsigned int height;
634	unsigned int pitch;
635};
636
637struct kgsl_buffer_desc {
638	void 			*hostptr;
639	unsigned long	gpuaddr;
640	int				size;
641	unsigned int	format;
642	unsigned int  	pitch;
643	unsigned int  	enabled;
644};
645
646struct kgsl_bind_gmem_shadow {
647	unsigned int drawctxt_id;
648	struct kgsl_gmem_desc gmem_desc;
649	unsigned int shadow_x;
650	unsigned int shadow_y;
651	struct kgsl_buffer_desc shadow_buffer;
652	unsigned int buffer_id;
653};
654
655#define IOCTL_KGSL_DRAWCTXT_BIND_GMEM_SHADOW \
656    _IOW(KGSL_IOC_TYPE, 0x22, struct kgsl_bind_gmem_shadow)
657
658/* add a block of memory into the GPU address space */
659
660/*
661 * IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC deprecated 09/2012
662 * use IOCTL_KGSL_GPUMEM_ALLOC instead
663 */
664
665struct kgsl_sharedmem_from_vmalloc {
666	unsigned long gpuaddr;	/*output param */
667	unsigned int hostptr;
668	unsigned int flags;
669};
670
671#define IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC \
672	_IOWR(KGSL_IOC_TYPE, 0x23, struct kgsl_sharedmem_from_vmalloc)
673
674/*
675 * This is being deprecated in favor of IOCTL_KGSL_GPUMEM_CACHE_SYNC which
676 * supports both directions (flush and invalidate). This code will still
677 * work, but by definition it will do a flush of the cache which might not be
678 * what you want to have happen on a buffer following a GPU operation.  It is
679 * safer to go with IOCTL_KGSL_GPUMEM_CACHE_SYNC
680 */
681
682#define IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE \
683	_IOW(KGSL_IOC_TYPE, 0x24, struct kgsl_sharedmem_free)
684
685struct kgsl_drawctxt_set_bin_base_offset {
686	unsigned int drawctxt_id;
687	unsigned int offset;
688};
689
690#define IOCTL_KGSL_DRAWCTXT_SET_BIN_BASE_OFFSET \
691	_IOW(KGSL_IOC_TYPE, 0x25, struct kgsl_drawctxt_set_bin_base_offset)
692
693enum kgsl_cmdwindow_type {
694	KGSL_CMDWINDOW_MIN     = 0x00000000,
695	KGSL_CMDWINDOW_2D      = 0x00000000,
696	KGSL_CMDWINDOW_3D      = 0x00000001, /* legacy */
697	KGSL_CMDWINDOW_MMU     = 0x00000002,
698	KGSL_CMDWINDOW_ARBITER = 0x000000FF,
699	KGSL_CMDWINDOW_MAX     = 0x000000FF,
700};
701
702/* write to the command window */
703struct kgsl_cmdwindow_write {
704	enum kgsl_cmdwindow_type target;
705	unsigned int addr;
706	unsigned int data;
707};
708
709#define IOCTL_KGSL_CMDWINDOW_WRITE \
710	_IOW(KGSL_IOC_TYPE, 0x2e, struct kgsl_cmdwindow_write)
711
712struct kgsl_gpumem_alloc {
713	unsigned long gpuaddr; /* output param */
714	size_t size;
715	unsigned int flags;
716};
717
718#define IOCTL_KGSL_GPUMEM_ALLOC \
719	_IOWR(KGSL_IOC_TYPE, 0x2f, struct kgsl_gpumem_alloc)
720
721struct kgsl_cff_syncmem {
722	unsigned long gpuaddr;
723	size_t len;
724	unsigned int __pad[2]; /* For future binary compatibility */
725};
726
727#define IOCTL_KGSL_CFF_SYNCMEM \
728	_IOW(KGSL_IOC_TYPE, 0x30, struct kgsl_cff_syncmem)
729
730/*
731 * A timestamp event allows the user space to register an action following an
732 * expired timestamp. Note IOCTL_KGSL_TIMESTAMP_EVENT has been redefined to
733 * _IOWR to support fences which need to return a fd for the priv parameter.
734 */
735
736struct kgsl_timestamp_event {
737	int type;                /* Type of event (see list below) */
738	unsigned int timestamp;  /* Timestamp to trigger event on */
739	unsigned int context_id; /* Context for the timestamp */
740	void *priv;	 /* Pointer to the event specific blob */
741	size_t len;              /* Size of the event specific blob */
742};
743
744#define IOCTL_KGSL_TIMESTAMP_EVENT_OLD \
745	_IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_timestamp_event)
746
747/* A genlock timestamp event releases an existing lock on timestamp expire */
748
749#define KGSL_TIMESTAMP_EVENT_GENLOCK 1
750
751struct kgsl_timestamp_event_genlock {
752	int handle; /* Handle of the genlock lock to release */
753};
754
755/* A fence timestamp event releases an existing lock on timestamp expire */
756
757#define KGSL_TIMESTAMP_EVENT_FENCE 2
758
759struct kgsl_timestamp_event_fence {
760	int fence_fd; /* Fence to signal */
761};
762
763/*
764 * Set a property within the kernel.  Uses the same structure as
765 * IOCTL_KGSL_GETPROPERTY
766 */
767
768#define IOCTL_KGSL_SETPROPERTY \
769	_IOW(KGSL_IOC_TYPE, 0x32, struct kgsl_device_getproperty)
770
771#define IOCTL_KGSL_TIMESTAMP_EVENT \
772	_IOWR(KGSL_IOC_TYPE, 0x33, struct kgsl_timestamp_event)
773
774/**
775 * struct kgsl_gpumem_alloc_id - argument to IOCTL_KGSL_GPUMEM_ALLOC_ID
776 * @id: returned id value for this allocation.
777 * @flags: mask of KGSL_MEM* values requested and actual flags on return.
778 * @size: requested size of the allocation and actual size on return.
779 * @mmapsize: returned size to pass to mmap() which may be larger than 'size'
780 * @gpuaddr: returned GPU address for the allocation
781 *
782 * Allocate memory for access by the GPU. The flags and size fields are echoed
783 * back by the kernel, so that the caller can know if the request was
784 * adjusted.
785 *
786 * Supported flags:
787 * KGSL_MEMFLAGS_GPUREADONLY: the GPU will be unable to write to the buffer
788 * KGSL_MEMTYPE*: usage hint for debugging aid
789 * KGSL_MEMALIGN*: alignment hint, may be ignored or adjusted by the kernel.
790 * KGSL_MEMFLAGS_USE_CPU_MAP: If set on call and return, the returned GPU
791 * address will be 0. Calling mmap() will set the GPU address.
792 */
793struct kgsl_gpumem_alloc_id {
794	unsigned int id;
795	unsigned int flags;
796	size_t size;
797	size_t mmapsize;
798	unsigned long gpuaddr;
799/* private: reserved for future use*/
800	unsigned long __pad[2];
801};
802
803#define IOCTL_KGSL_GPUMEM_ALLOC_ID \
804	_IOWR(KGSL_IOC_TYPE, 0x34, struct kgsl_gpumem_alloc_id)
805
806/**
807 * struct kgsl_gpumem_free_id - argument to IOCTL_KGSL_GPUMEM_FREE_ID
808 * @id: GPU allocation id to free
809 *
810 * Free an allocation by id, in case a GPU address has not been assigned or
811 * is unknown. Freeing an allocation by id with this ioctl or by GPU address
812 * with IOCTL_KGSL_SHAREDMEM_FREE are equivalent.
813 */
814struct kgsl_gpumem_free_id {
815	unsigned int id;
816/* private: reserved for future use*/
817	unsigned int __pad;
818};
819
820#define IOCTL_KGSL_GPUMEM_FREE_ID \
821	_IOWR(KGSL_IOC_TYPE, 0x35, struct kgsl_gpumem_free_id)
822
823/**
824 * struct kgsl_gpumem_get_info - argument to IOCTL_KGSL_GPUMEM_GET_INFO
825 * @gpuaddr: GPU address to query. Also set on return.
826 * @id: GPU allocation id to query. Also set on return.
827 * @flags: returned mask of KGSL_MEM* values.
828 * @size: returned size of the allocation.
829 * @mmapsize: returned size to pass mmap(), which may be larger than 'size'
830 * @useraddr: returned address of the userspace mapping for this buffer
831 *
832 * This ioctl allows querying of all user visible attributes of an existing
833 * allocation, by either the GPU address or the id returned by a previous
834 * call to IOCTL_KGSL_GPUMEM_ALLOC_ID. Legacy allocation ioctls may not
835 * return all attributes so this ioctl can be used to look them up if needed.
836 *
837 */
838struct kgsl_gpumem_get_info {
839	unsigned long gpuaddr;
840	unsigned int id;
841	unsigned int flags;
842	size_t size;
843	size_t mmapsize;
844	unsigned long useraddr;
845/* private: reserved for future use*/
846	unsigned long __pad[4];
847};
848
849#define IOCTL_KGSL_GPUMEM_GET_INFO\
850	_IOWR(KGSL_IOC_TYPE, 0x36, struct kgsl_gpumem_get_info)
851
852/**
853 * struct kgsl_gpumem_sync_cache - argument to IOCTL_KGSL_GPUMEM_SYNC_CACHE
854 * @gpuaddr: GPU address of the buffer to sync.
855 * @id: id of the buffer to sync. Either gpuaddr or id is sufficient.
856 * @op: a mask of KGSL_GPUMEM_CACHE_* values
857 * @offset: offset into the buffer
858 * @length: number of bytes starting from offset to perform
859 * the cache operation on
860 *
861 * Sync the L2 cache for memory headed to and from the GPU - this replaces
862 * KGSL_SHAREDMEM_FLUSH_CACHE since it can handle cache management for both
863 * directions
864 *
865 */
866struct kgsl_gpumem_sync_cache {
867	unsigned long gpuaddr;
868	unsigned int id;
869	unsigned int op;
870	size_t offset;
871	size_t length;
872};
873
874#define KGSL_GPUMEM_CACHE_CLEAN (1 << 0)
875#define KGSL_GPUMEM_CACHE_TO_GPU KGSL_GPUMEM_CACHE_CLEAN
876
877#define KGSL_GPUMEM_CACHE_INV (1 << 1)
878#define KGSL_GPUMEM_CACHE_FROM_GPU KGSL_GPUMEM_CACHE_INV
879
880#define KGSL_GPUMEM_CACHE_FLUSH \
881	(KGSL_GPUMEM_CACHE_CLEAN | KGSL_GPUMEM_CACHE_INV)
882
883/* Flag to ensure backwards compatibility of kgsl_gpumem_sync_cache struct */
884#define KGSL_GPUMEM_CACHE_RANGE (1 << 31U)
885
886#define IOCTL_KGSL_GPUMEM_SYNC_CACHE \
887	_IOW(KGSL_IOC_TYPE, 0x37, struct kgsl_gpumem_sync_cache)
888
889/**
890 * struct kgsl_perfcounter_get - argument to IOCTL_KGSL_PERFCOUNTER_GET
891 * @groupid: Performance counter group ID
892 * @countable: Countable to select within the group
893 * @offset: Return offset of the reserved LO counter
894 * @offset_hi: Return offset of the reserved HI counter
895 *
896 * Get an available performance counter from a specified groupid.  The offset
897 * of the performance counter will be returned after successfully assigning
898 * the countable to the counter for the specified group.  An error will be
899 * returned and an offset of 0 if the groupid is invalid or there are no
900 * more counters left.  After successfully getting a perfcounter, the user
901 * must call kgsl_perfcounter_put(groupid, contable) when finished with
902 * the perfcounter to clear up perfcounter resources.
903 *
904 */
905struct kgsl_perfcounter_get {
906	unsigned int groupid;
907	unsigned int countable;
908	unsigned int offset;
909	unsigned int offset_hi;
910/* private: reserved for future use */
911	unsigned int __pad; /* For future binary compatibility */
912};
913
914#define IOCTL_KGSL_PERFCOUNTER_GET \
915	_IOWR(KGSL_IOC_TYPE, 0x38, struct kgsl_perfcounter_get)
916
917/**
918 * struct kgsl_perfcounter_put - argument to IOCTL_KGSL_PERFCOUNTER_PUT
919 * @groupid: Performance counter group ID
920 * @countable: Countable to release within the group
921 *
922 * Put an allocated performance counter to allow others to have access to the
923 * resource that was previously taken.  This is only to be called after
924 * successfully getting a performance counter from kgsl_perfcounter_get().
925 *
926 */
927struct kgsl_perfcounter_put {
928	unsigned int groupid;
929	unsigned int countable;
930/* private: reserved for future use */
931	unsigned int __pad[2]; /* For future binary compatibility */
932};
933
934#define IOCTL_KGSL_PERFCOUNTER_PUT \
935	_IOW(KGSL_IOC_TYPE, 0x39, struct kgsl_perfcounter_put)
936
937/**
938 * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
939 * @groupid: Performance counter group ID
940 * @countable: Return active countables array
941 * @size: Size of active countables array
942 * @max_counters: Return total number counters for the group ID
943 *
944 * Query the available performance counters given a groupid.  The array
945 * *countables is used to return the current active countables in counters.
946 * The size of the array is passed in so the kernel will only write at most
947 * size or counter->size for the group id.  The total number of available
948 * counters for the group ID is returned in max_counters.
949 * If the array or size passed in are invalid, then only the maximum number
950 * of counters will be returned, no data will be written to *countables.
951 * If the groupid is invalid an error code will be returned.
952 *
953 */
954struct kgsl_perfcounter_query {
955	unsigned int groupid;
956	/* Array to return the current countable for up to size counters */
957	unsigned int *countables;
958	unsigned int count;
959	unsigned int max_counters;
960/* private: reserved for future use */
961	unsigned int __pad[2]; /* For future binary compatibility */
962};
963
964#define IOCTL_KGSL_PERFCOUNTER_QUERY \
965	_IOWR(KGSL_IOC_TYPE, 0x3A, struct kgsl_perfcounter_query)
966
967/**
968 * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
969 * @groupid: Performance counter group IDs
970 * @countable: Performance counter countable IDs
971 * @value: Return performance counter reads
972 * @size: Size of all arrays (groupid/countable pair and return value)
973 *
974 * Read in the current value of a performance counter given by the groupid
975 * and countable.
976 *
977 */
978
979struct kgsl_perfcounter_read_group {
980	unsigned int groupid;
981	unsigned int countable;
982	unsigned long long value;
983};
984
985struct kgsl_perfcounter_read {
986	struct kgsl_perfcounter_read_group *reads;
987	unsigned int count;
988/* private: reserved for future use */
989	unsigned int __pad[2]; /* For future binary compatibility */
990};
991
992#define IOCTL_KGSL_PERFCOUNTER_READ \
993	_IOWR(KGSL_IOC_TYPE, 0x3B, struct kgsl_perfcounter_read)
994/*
995 * struct kgsl_gpumem_sync_cache_bulk - argument to
996 * IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK
997 * @id_list: list of GPU buffer ids of the buffers to sync
998 * @count: number of GPU buffer ids in id_list
999 * @op: a mask of KGSL_GPUMEM_CACHE_* values
1000 *
1001 * Sync the cache for memory headed to and from the GPU. Certain
1002 * optimizations can be made on the cache operation based on the total
1003 * size of the working set of memory to be managed.
1004 */
1005struct kgsl_gpumem_sync_cache_bulk {
1006	unsigned int *id_list;
1007	unsigned int count;
1008	unsigned int op;
1009/* private: reserved for future use */
1010	unsigned int __pad[2]; /* For future binary compatibility */
1011};
1012
1013#define IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK \
1014	_IOWR(KGSL_IOC_TYPE, 0x3C, struct kgsl_gpumem_sync_cache_bulk)
1015
1016/*
1017 * struct kgsl_cmd_syncpoint_timestamp
1018 * @context_id: ID of a KGSL context
1019 * @timestamp: GPU timestamp
1020 *
1021 * This structure defines a syncpoint comprising a context/timestamp pair. A
1022 * list of these may be passed by IOCTL_KGSL_SUBMIT_COMMANDS to define
1023 * dependencies that must be met before the command can be submitted to the
1024 * hardware
1025 */
1026struct kgsl_cmd_syncpoint_timestamp {
1027	unsigned int context_id;
1028	unsigned int timestamp;
1029};
1030
1031struct kgsl_cmd_syncpoint_fence {
1032	int fd;
1033};
1034
1035/**
1036 * struct kgsl_cmd_syncpoint - Define a sync point for a command batch
1037 * @type: type of sync point defined here
1038 * @priv: Pointer to the type specific buffer
1039 * @size: Size of the type specific buffer
1040 *
1041 * This structure contains pointers defining a specific command sync point.
1042 * The pointer and size should point to a type appropriate structure.
1043 */
1044struct kgsl_cmd_syncpoint {
1045	int type;
1046	void *priv;
1047	size_t size;
1048};
1049
1050/* Flag to indicate that the cmdlist may contain memlists */
1051#define KGSL_IBDESC_MEMLIST 0x1
1052
1053/* Flag to point out the cmdbatch profiling buffer in the memlist */
1054#define KGSL_IBDESC_PROFILING_BUFFER 0x2
1055
1056/**
1057 * struct kgsl_submit_commands - Argument to IOCTL_KGSL_SUBMIT_COMMANDS
1058 * @context_id: KGSL context ID that owns the commands
1059 * @flags:
1060 * @cmdlist: User pointer to a list of kgsl_ibdesc structures
1061 * @numcmds: Number of commands listed in cmdlist
1062 * @synclist: User pointer to a list of kgsl_cmd_syncpoint structures
1063 * @numsyncs: Number of sync points listed in synclist
1064 * @timestamp: On entry the a user defined timestamp, on exist the timestamp
1065 * assigned to the command batch
1066 *
1067 * This structure specifies a command to send to the GPU hardware.  This is
1068 * similar to kgsl_issueibcmds expect that it doesn't support the legacy way to
1069 * submit IB lists and it adds sync points to block the IB until the
1070 * dependencies are satisified.  This entry point is the new and preferred way
1071 * to submit commands to the GPU. The memory list can be used to specify all
1072 * memory that is referrenced in the current set of commands.
1073 */
1074
1075struct kgsl_submit_commands {
1076	unsigned int context_id;
1077	unsigned int flags;
1078	struct kgsl_ibdesc *cmdlist;
1079	unsigned int numcmds;
1080	struct kgsl_cmd_syncpoint *synclist;
1081	unsigned int numsyncs;
1082	unsigned int timestamp;
1083/* private: reserved for future use */
1084	unsigned int __pad[4];
1085};
1086
1087#define IOCTL_KGSL_SUBMIT_COMMANDS \
1088	_IOWR(KGSL_IOC_TYPE, 0x3D, struct kgsl_submit_commands)
1089
1090/**
1091 * struct kgsl_device_constraint - device constraint argument
1092 * @context_id: KGSL context ID
1093 * @type: type of constraint i.e pwrlevel/none
1094 * @data: constraint data
1095 * @size: size of the constraint data
1096 */
1097struct kgsl_device_constraint {
1098	unsigned int type;
1099	unsigned int context_id;
1100	void *data;
1101	size_t size;
1102};
1103
1104/* Constraint Type*/
1105#define KGSL_CONSTRAINT_NONE 0
1106#define KGSL_CONSTRAINT_PWRLEVEL 1
1107
1108/* PWRLEVEL constraint level*/
1109/* set to min frequency */
1110#define KGSL_CONSTRAINT_PWR_MIN    0
1111/* set to max frequency */
1112#define KGSL_CONSTRAINT_PWR_MAX    1
1113
1114struct kgsl_device_constraint_pwrlevel {
1115	unsigned int level;
1116};
1117
1118/**
1119 * struct kgsl_syncsource_create - Argument to IOCTL_KGSL_SYNCSOURCE_CREATE
1120 * @id: returned id for the syncsource that was created.
1121 *
1122 * This ioctl creates a userspace sync timeline.
1123 */
1124
1125struct kgsl_syncsource_create {
1126	unsigned int id;
1127/* private: reserved for future use */
1128	unsigned int __pad[3];
1129};
1130
1131#define IOCTL_KGSL_SYNCSOURCE_CREATE \
1132	_IOWR(KGSL_IOC_TYPE, 0x40, struct kgsl_syncsource_create)
1133
1134/**
1135 * struct kgsl_syncsource_destroy - Argument to IOCTL_KGSL_SYNCSOURCE_DESTROY
1136 * @id: syncsource id to destroy
1137 *
1138 * This ioctl creates a userspace sync timeline.
1139 */
1140
1141struct kgsl_syncsource_destroy {
1142	unsigned int id;
1143/* private: reserved for future use */
1144	unsigned int __pad[3];
1145};
1146
1147#define IOCTL_KGSL_SYNCSOURCE_DESTROY \
1148	_IOWR(KGSL_IOC_TYPE, 0x41, struct kgsl_syncsource_destroy)
1149
1150/**
1151 * struct kgsl_syncsource_create_fence - Argument to
1152 *     IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE
1153 * @id: syncsource id
1154 * @fence_fd: returned sync_fence fd
1155 *
1156 * Create a fence that may be signaled by userspace by calling
1157 * IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE. There are no order dependencies between
1158 * these fences.
1159 */
1160struct kgsl_syncsource_create_fence {
1161	unsigned int id;
1162	int fence_fd;
1163/* private: reserved for future use */
1164	unsigned int __pad[4];
1165};
1166
1167/**
1168 * struct kgsl_syncsource_signal_fence - Argument to
1169 *     IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE
1170 * @id: syncsource id
1171 * @fence_fd: sync_fence fd to signal
1172 *
1173 * Signal a fence that was created by a IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE
1174 * call using the same syncsource id. This allows a fence to be shared
1175 * to other processes but only signaled by the process owning the fd
1176 * used to create the fence.
1177 */
1178#define IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE \
1179	_IOWR(KGSL_IOC_TYPE, 0x42, struct kgsl_syncsource_create_fence)
1180
1181struct kgsl_syncsource_signal_fence {
1182	unsigned int id;
1183	int fence_fd;
1184/* private: reserved for future use */
1185	unsigned int __pad[4];
1186};
1187
1188#define IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE \
1189	_IOWR(KGSL_IOC_TYPE, 0x43, struct kgsl_syncsource_signal_fence)
1190
1191/**
1192 * struct kgsl_cff_sync_gpuobj - Argument to IOCTL_KGSL_CFF_SYNC_GPUOBJ
1193 * @offset: Offset into the GPU object to sync
1194 * @length: Number of bytes to sync
1195 * @id: ID of the GPU object to sync
1196 */
1197struct kgsl_cff_sync_gpuobj {
1198	uint64_t offset;
1199	uint64_t length;
1200	unsigned int id;
1201};
1202
1203#define IOCTL_KGSL_CFF_SYNC_GPUOBJ \
1204	_IOW(KGSL_IOC_TYPE, 0x44, struct kgsl_cff_sync_gpuobj)
1205
1206/**
1207 * struct kgsl_gpuobj_alloc - Argument to IOCTL_KGSL_GPUOBJ_ALLOC
1208 * @size: Size in bytes of the object to allocate
1209 * @flags: mask of KGSL_MEMFLAG_* bits
1210 * @va_len: Size in bytes of the virtual region to allocate
1211 * @mmapsize: Returns the mmap() size of the object
1212 * @id: Returns the GPU object ID of the new object
1213 * @metadata_len: Length of the metdata to copy from the user
1214 * @metadata: Pointer to the user specified metadata to store for the object
1215 */
1216struct kgsl_gpuobj_alloc {
1217	uint64_t size;
1218	uint64_t flags;
1219	uint64_t va_len;
1220	uint64_t mmapsize;
1221	unsigned int id;
1222	unsigned int metadata_len;
1223	uint64_t metadata;
1224};
1225
1226/* Let the user know that this header supports the gpuobj metadata */
1227#define KGSL_GPUOBJ_ALLOC_METADATA_MAX 64
1228
1229#define IOCTL_KGSL_GPUOBJ_ALLOC \
1230	_IOWR(KGSL_IOC_TYPE, 0x45, struct kgsl_gpuobj_alloc)
1231
1232/**
1233 * struct kgsl_gpuobj_free - Argument to IOCTL_KGLS_GPUOBJ_FREE
1234 * @flags: Mask of: KGSL_GUPOBJ_FREE_ON_EVENT
1235 * @priv: Pointer to the private object if KGSL_GPUOBJ_FREE_ON_EVENT is
1236 * specified
1237 * @id: ID of the GPU object to free
1238 * @type: If KGSL_GPUOBJ_FREE_ON_EVENT is specified, the type of asynchronous
1239 * event to free on
1240 * @len: Length of the data passed in priv
1241 */
1242struct kgsl_gpuobj_free {
1243	uint64_t flags;
1244	uint64_t priv;
1245	unsigned int id;
1246	unsigned int type;
1247	unsigned int len;
1248};
1249
1250#define KGSL_GPUOBJ_FREE_ON_EVENT 1
1251
1252#define KGSL_GPU_EVENT_TIMESTAMP 1
1253#define KGSL_GPU_EVENT_FENCE     2
1254
1255/**
1256 * struct kgsl_gpu_event_timestamp - Specifies a timestamp event to free a GPU
1257 * object on
1258 * @context_id: ID of the timestamp event to wait for
1259 * @timestamp: Timestamp of the timestamp event to wait for
1260 */
1261struct kgsl_gpu_event_timestamp {
1262	unsigned int context_id;
1263	unsigned int timestamp;
1264};
1265
1266/**
1267 * struct kgsl_gpu_event_fence - Specifies a fence ID to to free a GPU object on
1268 * @fd: File descriptor for the fence
1269 */
1270struct kgsl_gpu_event_fence {
1271	int fd;
1272};
1273
1274#define IOCTL_KGSL_GPUOBJ_FREE \
1275	_IOW(KGSL_IOC_TYPE, 0x46, struct kgsl_gpuobj_free)
1276
1277/**
1278 * struct kgsl_gpuobj_info - argument to IOCTL_KGSL_GPUOBJ_INFO
1279 * @gpuaddr: GPU address of the object
1280 * @flags: Current flags for the object
1281 * @size: Size of the object
1282 * @va_len: VA size of the object
1283 * @va_addr: Virtual address of the object (if it is mapped)
1284 * id - GPU object ID of the object to query
1285 */
1286struct kgsl_gpuobj_info {
1287	uint64_t gpuaddr;
1288	uint64_t flags;
1289	uint64_t size;
1290	uint64_t va_len;
1291	uint64_t va_addr;
1292	unsigned int id;
1293};
1294
1295#define IOCTL_KGSL_GPUOBJ_INFO \
1296	_IOWR(KGSL_IOC_TYPE, 0x47, struct kgsl_gpuobj_info)
1297
1298/**
1299 * struct kgsl_gpuobj_import - argument to IOCTL_KGSL_GPUOBJ_IMPORT
1300 * @priv: Pointer to the private data for the import type
1301 * @priv_len: Length of the private data
1302 * @flags: Mask of KGSL_MEMFLAG_ flags
1303 * @type: Type of the import (KGSL_USER_MEM_TYPE_*)
1304 * @id: Returns the ID of the new GPU object
1305 */
1306struct kgsl_gpuobj_import {
1307	uint64_t priv;
1308	uint64_t priv_len;
1309	uint64_t flags;
1310	unsigned int type;
1311	unsigned int id;
1312};
1313
1314/**
1315 * struct kgsl_gpuobj_import_dma_buf - import a dmabuf object
1316 * @fd: File descriptor for the dma-buf object
1317 */
1318struct kgsl_gpuobj_import_dma_buf {
1319	int fd;
1320};
1321
1322/**
1323 * struct kgsl_gpuobj_import_useraddr - import an object based on a useraddr
1324 * @virtaddr: Virtual address of the object to import
1325 */
1326struct kgsl_gpuobj_import_useraddr {
1327	uint64_t virtaddr;
1328};
1329
1330#define IOCTL_KGSL_GPUOBJ_IMPORT \
1331	_IOWR(KGSL_IOC_TYPE, 0x48, struct kgsl_gpuobj_import)
1332
1333/**
1334 * struct kgsl_gpuobj_sync_obj - Individual GPU object to sync
1335 * @offset: Offset within the GPU object to sync
1336 * @length: Number of bytes to sync
1337 * @id: ID of the GPU object to sync
1338 * @op: Cache operation to execute
1339 */
1340
1341struct kgsl_gpuobj_sync_obj {
1342	uint64_t offset;
1343	uint64_t length;
1344	unsigned int id;
1345	unsigned int op;
1346};
1347
1348/**
1349 * struct kgsl_gpuobj_sync - Argument for IOCTL_KGSL_GPUOBJ_SYNC
1350 * @objs: Pointer to an array of kgsl_gpuobj_sync_obj structs
1351 * @obj_len: Size of each item in the array
1352 * @count: Number of items in the array
1353 */
1354
1355struct kgsl_gpuobj_sync {
1356	uint64_t objs;
1357	unsigned int obj_len;
1358	unsigned int count;
1359};
1360
1361#define IOCTL_KGSL_GPUOBJ_SYNC \
1362	_IOW(KGSL_IOC_TYPE, 0x49, struct kgsl_gpuobj_sync)
1363
1364/**
1365 * struct kgsl_command_object - GPU command object
1366 * @offset: GPU address offset of the object
1367 * @gpuaddr: GPU address of the object
1368 * @size: Size of the object
1369 * @flags: Current flags for the object
1370 * @id - GPU command object ID
1371 */
1372struct kgsl_command_object {
1373	uint64_t offset;
1374	uint64_t gpuaddr;
1375	uint64_t size;
1376	unsigned int flags;
1377	unsigned int id;
1378};
1379
1380/**
1381 * struct kgsl_command_syncpoint - GPU syncpoint object
1382 * @priv: Pointer to the type specific buffer
1383 * @size: Size of the type specific buffer
1384 * @type: type of sync point defined here
1385 */
1386struct kgsl_command_syncpoint {
1387	uint64_t priv;
1388	uint64_t size;
1389	unsigned int type;
1390};
1391
1392/**
1393 * struct kgsl_command_object - Argument for IOCTL_KGSL_GPU_COMMAND
1394 * @flags: Current flags for the object
1395 * @cmdlist: List of kgsl_command_objects for submission
1396 * @cmd_size: Size of kgsl_command_objects structure
1397 * @numcmds: Number of kgsl_command_objects in command list
1398 * @objlist: List of kgsl_command_objects for tracking
1399 * @obj_size: Size of kgsl_command_objects structure
1400 * @numobjs: Number of kgsl_command_objects in object list
1401 * @synclist: List of kgsl_command_syncpoints
1402 * @sync_size: Size of kgsl_command_syncpoint structure
1403 * @numsyncs: Number of kgsl_command_syncpoints in syncpoint list
1404 * @context_id: Context ID submittin ghte kgsl_gpu_command
1405 * @timestamp: Timestamp for the submitted commands
1406 */
1407struct kgsl_gpu_command {
1408	uint64_t flags;
1409	uint64_t cmdlist;
1410	unsigned int cmdsize;
1411	unsigned int numcmds;
1412	uint64_t objlist;
1413	unsigned int objsize;
1414	unsigned int numobjs;
1415	uint64_t synclist;
1416	unsigned int syncsize;
1417	unsigned int numsyncs;
1418	unsigned int context_id;
1419	unsigned int timestamp;
1420};
1421
1422#define IOCTL_KGSL_GPU_COMMAND \
1423	_IOWR(KGSL_IOC_TYPE, 0x4A, struct kgsl_gpu_command)
1424
1425/**
1426 * struct kgsl_preemption_counters_query - argument to
1427 * IOCTL_KGSL_PREEMPTIONCOUNTER_QUERY
1428 * @counters: Return preemption counters array
1429 * @size_user: Size allocated by userspace
1430 * @size_priority_level: Size of preemption counters for each
1431 * priority level
1432 * @max_priority_level: Return max number of priority levels
1433 *
1434 * Query the available preemption counters. The array counters
1435 * is used to return preemption counters. The size of the array
1436 * is passed in so the kernel will only write at most size_user
1437 * or max available preemption counters.  The total number of
1438 * preemption counters is returned in max_priority_level. If the
1439 * array or size passed in are invalid, then an error is
1440 * returned back.
1441 */
1442struct kgsl_preemption_counters_query {
1443	uint64_t counters;
1444	unsigned int size_user;
1445	unsigned int size_priority_level;
1446	unsigned int max_priority_level;
1447};
1448
1449#define IOCTL_KGSL_PREEMPTIONCOUNTER_QUERY \
1450	_IOWR(KGSL_IOC_TYPE, 0x4B, struct kgsl_preemption_counters_query)
1451
1452/**
1453 * struct kgsl_gpuobj_set_info - argument for IOCTL_KGSL_GPUOBJ_SET_INFO
1454 * @flags: Flags to indicate which paramaters to change
1455 * @metadata:  If KGSL_GPUOBJ_SET_INFO_METADATA is set, a pointer to the new
1456 * metadata
1457 * @id: GPU memory object ID to change
1458 * @metadata_len:  If KGSL_GPUOBJ_SET_INFO_METADATA is set, the length of the
1459 * new metadata string
1460 * @type: If KGSL_GPUOBJ_SET_INFO_TYPE is set, the new type of the memory object
1461 */
1462
1463#define KGSL_GPUOBJ_SET_INFO_METADATA (1 << 0)
1464#define KGSL_GPUOBJ_SET_INFO_TYPE (1 << 1)
1465
1466struct kgsl_gpuobj_set_info {
1467	uint64_t flags;
1468	uint64_t metadata;
1469	unsigned int id;
1470	unsigned int metadata_len;
1471	unsigned int type;
1472};
1473
1474#define IOCTL_KGSL_GPUOBJ_SET_INFO \
1475	_IOW(KGSL_IOC_TYPE, 0x4C, struct kgsl_gpuobj_set_info)
1476
1477/**
1478 * struct kgsl_sparse_phys_alloc - Argument for IOCTL_KGSL_SPARSE_PHYS_ALLOC
1479 * @size: Size in bytes to back
1480 * @pagesize: Pagesize alignment required
1481 * @flags: Flags for this allocation
1482 * @id: Returned ID for this allocation
1483 */
1484struct kgsl_sparse_phys_alloc {
1485	uint64_t size;
1486	uint64_t pagesize;
1487	uint64_t flags;
1488	unsigned int id;
1489};
1490
1491#define IOCTL_KGSL_SPARSE_PHYS_ALLOC \
1492	_IOWR(KGSL_IOC_TYPE, 0x50, struct kgsl_sparse_phys_alloc)
1493
1494/**
1495 * struct kgsl_sparse_phys_free - Argument for IOCTL_KGSL_SPARSE_PHYS_FREE
1496 * @id: ID to free
1497 */
1498struct kgsl_sparse_phys_free {
1499	unsigned int id;
1500};
1501
1502#define IOCTL_KGSL_SPARSE_PHYS_FREE \
1503	_IOW(KGSL_IOC_TYPE, 0x51, struct kgsl_sparse_phys_free)
1504
1505/**
1506 * struct kgsl_sparse_virt_alloc - Argument for IOCTL_KGSL_SPARSE_VIRT_ALLOC
1507 * @size: Size in bytes to reserve
1508 * @pagesize: Pagesize alignment required
1509 * @flags: Flags for this allocation
1510 * @id: Returned ID for this allocation
1511 * @gpuaddr: Returned GPU address for this allocation
1512 */
1513struct kgsl_sparse_virt_alloc {
1514	uint64_t size;
1515	uint64_t pagesize;
1516	uint64_t flags;
1517	uint64_t gpuaddr;
1518	unsigned int id;
1519};
1520
1521#define IOCTL_KGSL_SPARSE_VIRT_ALLOC \
1522	_IOWR(KGSL_IOC_TYPE, 0x52, struct kgsl_sparse_virt_alloc)
1523
1524/**
1525 * struct kgsl_sparse_virt_free - Argument for IOCTL_KGSL_SPARSE_VIRT_FREE
1526 * @id: ID to free
1527 */
1528struct kgsl_sparse_virt_free {
1529	unsigned int id;
1530};
1531
1532#define IOCTL_KGSL_SPARSE_VIRT_FREE \
1533	_IOW(KGSL_IOC_TYPE, 0x53, struct kgsl_sparse_virt_free)
1534
1535/**
1536 * struct kgsl_sparse_binding_object - Argument for kgsl_sparse_bind
1537 * @virtoffset: Offset into the virtual ID
1538 * @physoffset: Offset into the physical ID (bind only)
1539 * @size: Size in bytes to reserve
1540 * @flags: Flags for this kgsl_sparse_binding_object
1541 * @id: Physical ID to bind (bind only)
1542 */
1543struct kgsl_sparse_binding_object {
1544	uint64_t virtoffset;
1545	uint64_t physoffset;
1546	uint64_t size;
1547	uint64_t flags;
1548	unsigned int id;
1549};
1550
1551/**
1552 * struct kgsl_sparse_bind - Argument for IOCTL_KGSL_SPARSE_BIND
1553 * @list: List of kgsl_sparse_bind_objects to bind/unbind
1554 * @id: Virtual ID to bind/unbind
1555 * @size: Size of kgsl_sparse_bind_object
1556 * @count: Number of elements in list
1557 *
1558 */
1559struct kgsl_sparse_bind {
1560	uint64_t list;
1561	unsigned int id;
1562	unsigned int size;
1563	unsigned int count;
1564};
1565
1566#define IOCTL_KGSL_SPARSE_BIND \
1567	_IOW(KGSL_IOC_TYPE, 0x54, struct kgsl_sparse_bind)
1568
1569/**
1570 * struct kgsl_gpu_sparse_command - Argument for
1571 * IOCTL_KGSL_GPU_SPARSE_COMMAND
1572 * @flags: Current flags for the object
1573 * @sparselist: List of kgsl_sparse_binding_object to bind/unbind
1574 * @synclist: List of kgsl_command_syncpoints
1575 * @sparsesize: Size of kgsl_sparse_binding_object
1576 * @numsparse: Number of elements in list
1577 * @sync_size: Size of kgsl_command_syncpoint structure
1578 * @numsyncs: Number of kgsl_command_syncpoints in syncpoint list
1579 * @context_id: Context ID submitting the kgsl_gpu_command
1580 * @timestamp: Timestamp for the submitted commands
1581 * @id: Virtual ID to bind/unbind
1582 */
1583struct kgsl_gpu_sparse_command {
1584	uint64_t flags;
1585	uint64_t sparselist;
1586	uint64_t synclist;
1587	unsigned int sparsesize;
1588	unsigned int numsparse;
1589	unsigned int syncsize;
1590	unsigned int numsyncs;
1591	unsigned int context_id;
1592	unsigned int timestamp;
1593	unsigned int id;
1594};
1595
1596#define IOCTL_KGSL_GPU_SPARSE_COMMAND \
1597	_IOWR(KGSL_IOC_TYPE, 0x55, struct kgsl_gpu_sparse_command)
1598
1599#endif /* _MSM_KGSL_H */
1600