1#ifndef _UAPI_MSM_KGSL_H
2#define _UAPI_MSM_KGSL_H
3
4#include <linux/types.h>
5#include <linux/ioctl.h>
6
7/*
8 * The KGSL version has proven not to be very useful in userspace if features
9 * are cherry picked into other trees out of order so it is frozen as of 3.14.
10 * It is left here for backwards compatabilty and as a reminder that
11 * software releases are never linear. Also, I like pie.
12 */
13
14#define KGSL_VERSION_MAJOR        3
15#define KGSL_VERSION_MINOR        14
16
17/*
18 * We have traditionally mixed context and issueibcmds / command batch flags
19 * together into a big flag stew. This worked fine until we started adding a
20 * lot more command batch flags and we started running out of bits. Turns out
21 * we have a bit of room in the context type / priority mask that we could use
22 * for command batches, but that means we need to split out the flags into two
23 * coherent sets.
24 *
25 * If any future definitions are for both context and cmdbatch add both defines
26 * and link the cmdbatch to the context define as we do below. Otherwise feel
27 * free to add exclusive bits to either set.
28 */
29
30/* --- context flags --- */
31#define KGSL_CONTEXT_SAVE_GMEM		0x00000001
32#define KGSL_CONTEXT_NO_GMEM_ALLOC	0x00000002
33/* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
34#define KGSL_CONTEXT_SUBMIT_IB_LIST	0x00000004
35#define KGSL_CONTEXT_CTX_SWITCH		0x00000008
36#define KGSL_CONTEXT_PREAMBLE		0x00000010
37#define KGSL_CONTEXT_TRASH_STATE	0x00000020
38#define KGSL_CONTEXT_PER_CONTEXT_TS	0x00000040
39#define KGSL_CONTEXT_USER_GENERATED_TS	0x00000080
40/* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
41#define KGSL_CONTEXT_END_OF_FRAME	0x00000100
42#define KGSL_CONTEXT_NO_FAULT_TOLERANCE 0x00000200
43/* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
44#define KGSL_CONTEXT_SYNC               0x00000400
45#define KGSL_CONTEXT_PWR_CONSTRAINT     0x00000800
46#define KGSL_CONTEXT_PRIORITY_MASK      0x0000F000
47#define KGSL_CONTEXT_PRIORITY_SHIFT     12
48#define KGSL_CONTEXT_PRIORITY_UNDEF     0
49
50#define KGSL_CONTEXT_IFH_NOP            0x00010000
51#define KGSL_CONTEXT_SECURE             0x00020000
52#define KGSL_CONTEXT_NO_SNAPSHOT        0x00040000
53
54#define KGSL_CONTEXT_PREEMPT_STYLE_MASK       0x0E000000
55#define KGSL_CONTEXT_PREEMPT_STYLE_SHIFT      25
56#define KGSL_CONTEXT_PREEMPT_STYLE_DEFAULT    0x0
57#define KGSL_CONTEXT_PREEMPT_STYLE_RINGBUFFER 0x1
58#define KGSL_CONTEXT_PREEMPT_STYLE_FINEGRAIN  0x2
59
60#define KGSL_CONTEXT_TYPE_MASK          0x01F00000
61#define KGSL_CONTEXT_TYPE_SHIFT         20
62#define KGSL_CONTEXT_TYPE_ANY		0
63#define KGSL_CONTEXT_TYPE_GL		1
64#define KGSL_CONTEXT_TYPE_CL		2
65#define KGSL_CONTEXT_TYPE_C2D		3
66#define KGSL_CONTEXT_TYPE_RS		4
67#define KGSL_CONTEXT_TYPE_UNKNOWN	0x1E
68
69#define KGSL_CONTEXT_INVALID 0xffffffff
70
71/*
72 * --- command batch flags ---
73 * The bits that are linked to a KGSL_CONTEXT equivalent are either legacy
74 * definitions or bits that are valid for both contexts and cmdbatches.  To be
75 * safe the other 8 bits that are still available in the context field should be
76 * omitted here in case we need to share - the other bits are available for
77 * cmdbatch only flags as needed
78 */
79#define KGSL_CMDBATCH_MEMLIST		0x00000001
80#define KGSL_CMDBATCH_MARKER		0x00000002
81#define KGSL_CMDBATCH_SUBMIT_IB_LIST	KGSL_CONTEXT_SUBMIT_IB_LIST /* 0x004 */
82#define KGSL_CMDBATCH_CTX_SWITCH	KGSL_CONTEXT_CTX_SWITCH     /* 0x008 */
83#define KGSL_CMDBATCH_PROFILING		0x00000010
84/*
85 * KGSL_CMDBATCH_PROFILING must also be set for KGSL_CMDBATCH_PROFILING_KTIME
86 * to take effect, as the latter only affects the time data returned.
87 */
88#define KGSL_CMDBATCH_PROFILING_KTIME	0x00000020
89#define KGSL_CMDBATCH_END_OF_FRAME	KGSL_CONTEXT_END_OF_FRAME   /* 0x100 */
90#define KGSL_CMDBATCH_SYNC		KGSL_CONTEXT_SYNC           /* 0x400 */
91#define KGSL_CMDBATCH_PWR_CONSTRAINT	KGSL_CONTEXT_PWR_CONSTRAINT /* 0x800 */
92
93/*
94 * Reserve bits [16:19] and bits [28:31] for possible bits shared between
95 * contexts and command batches.  Update this comment as new flags are added.
96 */
97
98/*
99 * gpu_command_object flags - these flags communicate the type of command or
100 * memory object being submitted for a GPU command
101 */
102
103/* Flags for GPU command objects */
104#define KGSL_CMDLIST_IB                  0x00000001U
105#define KGSL_CMDLIST_CTXTSWITCH_PREAMBLE 0x00000002U
106#define KGSL_CMDLIST_IB_PREAMBLE         0x00000004U
107
108/* Flags for GPU command memory objects */
109#define KGSL_OBJLIST_MEMOBJ  0x00000008U
110#define KGSL_OBJLIST_PROFILE 0x00000010U
111
112/* Flags for GPU command sync points */
113#define KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP 0
114#define KGSL_CMD_SYNCPOINT_TYPE_FENCE 1
115
116/* --- Memory allocation flags --- */
117
118/* General allocation hints */
119#define KGSL_MEMFLAGS_SECURE      0x00000008ULL
120#define KGSL_MEMFLAGS_GPUREADONLY 0x01000000U
121#define KGSL_MEMFLAGS_GPUWRITEONLY 0x02000000U
122#define KGSL_MEMFLAGS_FORCE_32BIT 0x100000000ULL
123
124/* Memory caching hints */
125#define KGSL_CACHEMODE_MASK       0x0C000000U
126#define KGSL_CACHEMODE_SHIFT 26
127
128#define KGSL_CACHEMODE_WRITECOMBINE 0
129#define KGSL_CACHEMODE_UNCACHED 1
130#define KGSL_CACHEMODE_WRITETHROUGH 2
131#define KGSL_CACHEMODE_WRITEBACK 3
132
133#define KGSL_MEMFLAGS_USE_CPU_MAP 0x10000000ULL
134
135/* Memory types for which allocations are made */
136#define KGSL_MEMTYPE_MASK		0x0000FF00
137#define KGSL_MEMTYPE_SHIFT		8
138
139#define KGSL_MEMTYPE_OBJECTANY			0
140#define KGSL_MEMTYPE_FRAMEBUFFER		1
141#define KGSL_MEMTYPE_RENDERBUFFER		2
142#define KGSL_MEMTYPE_ARRAYBUFFER		3
143#define KGSL_MEMTYPE_ELEMENTARRAYBUFFER		4
144#define KGSL_MEMTYPE_VERTEXARRAYBUFFER		5
145#define KGSL_MEMTYPE_TEXTURE			6
146#define KGSL_MEMTYPE_SURFACE			7
147#define KGSL_MEMTYPE_EGL_SURFACE		8
148#define KGSL_MEMTYPE_GL				9
149#define KGSL_MEMTYPE_CL				10
150#define KGSL_MEMTYPE_CL_BUFFER_MAP		11
151#define KGSL_MEMTYPE_CL_BUFFER_NOMAP		12
152#define KGSL_MEMTYPE_CL_IMAGE_MAP		13
153#define KGSL_MEMTYPE_CL_IMAGE_NOMAP		14
154#define KGSL_MEMTYPE_CL_KERNEL_STACK		15
155#define KGSL_MEMTYPE_COMMAND			16
156#define KGSL_MEMTYPE_2D				17
157#define KGSL_MEMTYPE_EGL_IMAGE			18
158#define KGSL_MEMTYPE_EGL_SHADOW			19
159#define KGSL_MEMTYPE_MULTISAMPLE		20
160#define KGSL_MEMTYPE_KERNEL			255
161
162/*
163 * Alignment hint, passed as the power of 2 exponent.
164 * i.e 4k (2^12) would be 12, 64k (2^16)would be 16.
165 */
166#define KGSL_MEMALIGN_MASK		0x00FF0000
167#define KGSL_MEMALIGN_SHIFT		16
168
169enum kgsl_user_mem_type {
170	KGSL_USER_MEM_TYPE_PMEM		= 0x00000000,
171	KGSL_USER_MEM_TYPE_ASHMEM	= 0x00000001,
172	KGSL_USER_MEM_TYPE_ADDR		= 0x00000002,
173	KGSL_USER_MEM_TYPE_ION		= 0x00000003,
174	/*
175	 * ION type is retained for backwards compatibilty but Ion buffers are
176	 * dma-bufs so try to use that naming if we can
177	 */
178	KGSL_USER_MEM_TYPE_DMABUF       = 0x00000003,
179	KGSL_USER_MEM_TYPE_MAX		= 0x00000007,
180};
181#define KGSL_MEMFLAGS_USERMEM_MASK 0x000000e0
182#define KGSL_MEMFLAGS_USERMEM_SHIFT 5
183
184/*
185 * Unfortunately, enum kgsl_user_mem_type starts at 0 which does not
186 * leave a good value for allocated memory. In the flags we use
187 * 0 to indicate allocated memory and thus need to add 1 to the enum
188 * values.
189 */
190#define KGSL_USERMEM_FLAG(x) (((x) + 1) << KGSL_MEMFLAGS_USERMEM_SHIFT)
191
192#define KGSL_MEMFLAGS_NOT_USERMEM 0
193#define KGSL_MEMFLAGS_USERMEM_PMEM KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_PMEM)
194#define KGSL_MEMFLAGS_USERMEM_ASHMEM \
195		KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ASHMEM)
196#define KGSL_MEMFLAGS_USERMEM_ADDR KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ADDR)
197#define KGSL_MEMFLAGS_USERMEM_ION KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ION)
198
199/* --- generic KGSL flag values --- */
200
201#define KGSL_FLAGS_NORMALMODE  0x00000000
202#define KGSL_FLAGS_SAFEMODE    0x00000001
203#define KGSL_FLAGS_INITIALIZED0 0x00000002
204#define KGSL_FLAGS_INITIALIZED 0x00000004
205#define KGSL_FLAGS_STARTED     0x00000008
206#define KGSL_FLAGS_ACTIVE      0x00000010
207#define KGSL_FLAGS_RESERVED0   0x00000020
208#define KGSL_FLAGS_RESERVED1   0x00000040
209#define KGSL_FLAGS_RESERVED2   0x00000080
210#define KGSL_FLAGS_SOFT_RESET  0x00000100
211#define KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS 0x00000200
212
213/* Server Side Sync Timeout in milliseconds */
214#define KGSL_SYNCOBJ_SERVER_TIMEOUT 2000
215
216/*
217 * Reset status values for context
218 */
219enum kgsl_ctx_reset_stat {
220	KGSL_CTX_STAT_NO_ERROR				= 0x00000000,
221	KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT		= 0x00000001,
222	KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT	= 0x00000002,
223	KGSL_CTX_STAT_UNKNOWN_CONTEXT_RESET_EXT		= 0x00000003
224};
225
226#define KGSL_CONVERT_TO_MBPS(val) \
227	(val*1000*1000U)
228
229/* device id */
230enum kgsl_deviceid {
231	KGSL_DEVICE_3D0		= 0x00000000,
232	KGSL_DEVICE_MAX
233};
234
235struct kgsl_devinfo {
236
237	unsigned int device_id;
238	/* chip revision id
239	* coreid:8 majorrev:8 minorrev:8 patch:8
240	*/
241	unsigned int chip_id;
242	unsigned int mmu_enabled;
243	unsigned long gmem_gpubaseaddr;
244	/*
245	* This field contains the adreno revision
246	* number 200, 205, 220, etc...
247	*/
248	unsigned int gpu_id;
249	size_t gmem_sizebytes;
250};
251
252/*
253 * struct kgsl_devmemstore - this structure defines the region of memory
254 * that can be mmap()ed from this driver. The timestamp fields are volatile
255 * because they are written by the GPU
256 * @soptimestamp: Start of pipeline timestamp written by GPU before the
257 * commands in concern are processed
258 * @sbz: Unused, kept for 8 byte alignment
259 * @eoptimestamp: End of pipeline timestamp written by GPU after the
260 * commands in concern are processed
261 * @sbz2: Unused, kept for 8 byte alignment
262 * @preempted: Indicates if the context was preempted
263 * @sbz3: Unused, kept for 8 byte alignment
264 * @ref_wait_ts: Timestamp on which to generate interrupt, unused now.
265 * @sbz4: Unused, kept for 8 byte alignment
266 * @current_context: The current context the GPU is working on
267 * @sbz5: Unused, kept for 8 byte alignment
268 */
269struct kgsl_devmemstore {
270	volatile unsigned int soptimestamp;
271	unsigned int sbz;
272	volatile unsigned int eoptimestamp;
273	unsigned int sbz2;
274	volatile unsigned int preempted;
275	unsigned int sbz3;
276	volatile unsigned int ref_wait_ts;
277	unsigned int sbz4;
278	unsigned int current_context;
279	unsigned int sbz5;
280};
281
282#define KGSL_MEMSTORE_OFFSET(ctxt_id, field) \
283	((ctxt_id)*sizeof(struct kgsl_devmemstore) + \
284	 offsetof(struct kgsl_devmemstore, field))
285
286/* timestamp id*/
287enum kgsl_timestamp_type {
288	KGSL_TIMESTAMP_CONSUMED = 0x00000001, /* start-of-pipeline timestamp */
289	KGSL_TIMESTAMP_RETIRED  = 0x00000002, /* end-of-pipeline timestamp*/
290	KGSL_TIMESTAMP_QUEUED   = 0x00000003,
291};
292
293/* property types - used with kgsl_device_getproperty */
294#define KGSL_PROP_DEVICE_INFO		0x1
295#define KGSL_PROP_DEVICE_SHADOW		0x2
296#define KGSL_PROP_DEVICE_POWER		0x3
297#define KGSL_PROP_SHMEM			0x4
298#define KGSL_PROP_SHMEM_APERTURES	0x5
299#define KGSL_PROP_MMU_ENABLE		0x6
300#define KGSL_PROP_INTERRUPT_WAITS	0x7
301#define KGSL_PROP_VERSION		0x8
302#define KGSL_PROP_GPU_RESET_STAT	0x9
303#define KGSL_PROP_PWRCTRL		0xE
304#define KGSL_PROP_PWR_CONSTRAINT	0x12
305#define KGSL_PROP_UCHE_GMEM_VADDR	0x13
306#define KGSL_PROP_SP_GENERIC_MEM	0x14
307#define KGSL_PROP_UCODE_VERSION		0x15
308#define KGSL_PROP_GPMU_VERSION		0x16
309#define KGSL_PROP_HIGHEST_BANK_BIT	0x17
310#define KGSL_PROP_DEVICE_BITNESS	0x18
311
312struct kgsl_shadowprop {
313	unsigned long gpuaddr;
314	size_t size;
315	unsigned int flags; /* contains KGSL_FLAGS_ values */
316};
317
318struct kgsl_version {
319	unsigned int drv_major;
320	unsigned int drv_minor;
321	unsigned int dev_major;
322	unsigned int dev_minor;
323};
324
325struct kgsl_sp_generic_mem {
326	uint64_t local;
327	uint64_t pvt;
328};
329
330struct kgsl_ucode_version {
331	unsigned int pfp;
332	unsigned int pm4;
333};
334
335struct kgsl_gpmu_version {
336	unsigned int major;
337	unsigned int minor;
338	unsigned int features;
339};
340
341/* Performance counter groups */
342
343#define KGSL_PERFCOUNTER_GROUP_CP 0x0
344#define KGSL_PERFCOUNTER_GROUP_RBBM 0x1
345#define KGSL_PERFCOUNTER_GROUP_PC 0x2
346#define KGSL_PERFCOUNTER_GROUP_VFD 0x3
347#define KGSL_PERFCOUNTER_GROUP_HLSQ 0x4
348#define KGSL_PERFCOUNTER_GROUP_VPC 0x5
349#define KGSL_PERFCOUNTER_GROUP_TSE 0x6
350#define KGSL_PERFCOUNTER_GROUP_RAS 0x7
351#define KGSL_PERFCOUNTER_GROUP_UCHE 0x8
352#define KGSL_PERFCOUNTER_GROUP_TP 0x9
353#define KGSL_PERFCOUNTER_GROUP_SP 0xA
354#define KGSL_PERFCOUNTER_GROUP_RB 0xB
355#define KGSL_PERFCOUNTER_GROUP_PWR 0xC
356#define KGSL_PERFCOUNTER_GROUP_VBIF 0xD
357#define KGSL_PERFCOUNTER_GROUP_VBIF_PWR 0xE
358#define KGSL_PERFCOUNTER_GROUP_MH 0xF
359#define KGSL_PERFCOUNTER_GROUP_PA_SU 0x10
360#define KGSL_PERFCOUNTER_GROUP_SQ 0x11
361#define KGSL_PERFCOUNTER_GROUP_SX 0x12
362#define KGSL_PERFCOUNTER_GROUP_TCF 0x13
363#define KGSL_PERFCOUNTER_GROUP_TCM 0x14
364#define KGSL_PERFCOUNTER_GROUP_TCR 0x15
365#define KGSL_PERFCOUNTER_GROUP_L2 0x16
366#define KGSL_PERFCOUNTER_GROUP_VSC 0x17
367#define KGSL_PERFCOUNTER_GROUP_CCU 0x18
368#define KGSL_PERFCOUNTER_GROUP_LRZ 0x19
369#define KGSL_PERFCOUNTER_GROUP_CMP 0x1A
370#define KGSL_PERFCOUNTER_GROUP_ALWAYSON 0x1B
371#define KGSL_PERFCOUNTER_GROUP_SP_PWR 0x1C
372#define KGSL_PERFCOUNTER_GROUP_TP_PWR 0x1D
373#define KGSL_PERFCOUNTER_GROUP_RB_PWR 0x1E
374#define KGSL_PERFCOUNTER_GROUP_CCU_PWR 0x1F
375#define KGSL_PERFCOUNTER_GROUP_UCHE_PWR 0x20
376#define KGSL_PERFCOUNTER_GROUP_CP_PWR 0x21
377#define KGSL_PERFCOUNTER_GROUP_GPMU_PWR 0x22
378#define KGSL_PERFCOUNTER_GROUP_ALWAYSON_PWR 0x23
379#define KGSL_PERFCOUNTER_GROUP_MAX 0x24
380
381#define KGSL_PERFCOUNTER_NOT_USED 0xFFFFFFFF
382#define KGSL_PERFCOUNTER_BROKEN 0xFFFFFFFE
383
384/* structure holds list of ibs */
385struct kgsl_ibdesc {
386	unsigned long gpuaddr;
387	unsigned long __pad;
388	size_t sizedwords;
389	unsigned int ctrl;
390};
391
392/**
393 * struct kgsl_cmdbatch_profiling_buffer
394 * @wall_clock_s: Ringbuffer submission time (seconds).
395 *                If KGSL_CMDBATCH_PROFILING_KTIME is set, time is provided
396 *                in kernel clocks, otherwise wall clock time is used.
397 * @wall_clock_ns: Ringbuffer submission time (nanoseconds).
398 *                 If KGSL_CMDBATCH_PROFILING_KTIME is set time is provided
399 *                 in kernel clocks, otherwise wall clock time is used.
400 * @gpu_ticks_queued: GPU ticks at ringbuffer submission
401 * @gpu_ticks_submitted: GPU ticks when starting cmdbatch execution
402 * @gpu_ticks_retired: GPU ticks when finishing cmdbatch execution
403 *
404 * This structure defines the profiling buffer used to measure cmdbatch
405 * execution time
406 */
407struct kgsl_cmdbatch_profiling_buffer {
408	uint64_t wall_clock_s;
409	uint64_t wall_clock_ns;
410	uint64_t gpu_ticks_queued;
411	uint64_t gpu_ticks_submitted;
412	uint64_t gpu_ticks_retired;
413};
414
415/* ioctls */
416#define KGSL_IOC_TYPE 0x09
417
418/* get misc info about the GPU
419   type should be a value from enum kgsl_property_type
420   value points to a structure that varies based on type
421   sizebytes is sizeof() that structure
422   for KGSL_PROP_DEVICE_INFO, use struct kgsl_devinfo
423   this structure contaings hardware versioning info.
424   for KGSL_PROP_DEVICE_SHADOW, use struct kgsl_shadowprop
425   this is used to find mmap() offset and sizes for mapping
426   struct kgsl_memstore into userspace.
427*/
428struct kgsl_device_getproperty {
429	unsigned int type;
430	void __user *value;
431	size_t sizebytes;
432};
433
434#define IOCTL_KGSL_DEVICE_GETPROPERTY \
435	_IOWR(KGSL_IOC_TYPE, 0x2, struct kgsl_device_getproperty)
436
437/* IOCTL_KGSL_DEVICE_READ (0x3) - removed 03/2012
438 */
439
440/* block until the GPU has executed past a given timestamp
441 * timeout is in milliseconds.
442 */
443struct kgsl_device_waittimestamp {
444	unsigned int timestamp;
445	unsigned int timeout;
446};
447
448#define IOCTL_KGSL_DEVICE_WAITTIMESTAMP \
449	_IOW(KGSL_IOC_TYPE, 0x6, struct kgsl_device_waittimestamp)
450
451struct kgsl_device_waittimestamp_ctxtid {
452	unsigned int context_id;
453	unsigned int timestamp;
454	unsigned int timeout;
455};
456
457#define IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID \
458	_IOW(KGSL_IOC_TYPE, 0x7, struct kgsl_device_waittimestamp_ctxtid)
459
460/* DEPRECATED: issue indirect commands to the GPU.
461 * drawctxt_id must have been created with IOCTL_KGSL_DRAWCTXT_CREATE
462 * ibaddr and sizedwords must specify a subset of a buffer created
463 * with IOCTL_KGSL_SHAREDMEM_FROM_PMEM
464 * flags may be a mask of KGSL_CONTEXT_ values
465 * timestamp is a returned counter value which can be passed to
466 * other ioctls to determine when the commands have been executed by
467 * the GPU.
468 *
469 * This fucntion is deprecated - consider using IOCTL_KGSL_SUBMIT_COMMANDS
470 * instead
471 */
472struct kgsl_ringbuffer_issueibcmds {
473	unsigned int drawctxt_id;
474	unsigned long ibdesc_addr;
475	unsigned int numibs;
476	unsigned int timestamp; /*output param */
477	unsigned int flags;
478};
479
480#define IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS \
481	_IOWR(KGSL_IOC_TYPE, 0x10, struct kgsl_ringbuffer_issueibcmds)
482
483/* read the most recently executed timestamp value
484 * type should be a value from enum kgsl_timestamp_type
485 */
486struct kgsl_cmdstream_readtimestamp {
487	unsigned int type;
488	unsigned int timestamp; /*output param */
489};
490
491#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_OLD \
492	_IOR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
493
494#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP \
495	_IOWR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
496
497/* free memory when the GPU reaches a given timestamp.
498 * gpuaddr specify a memory region created by a
499 * IOCTL_KGSL_SHAREDMEM_FROM_PMEM call
500 * type should be a value from enum kgsl_timestamp_type
501 */
502struct kgsl_cmdstream_freememontimestamp {
503	unsigned long gpuaddr;
504	unsigned int type;
505	unsigned int timestamp;
506};
507
508#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP \
509	_IOW(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
510
511/* Previous versions of this header had incorrectly defined
512   IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP as a read-only ioctl instead
513   of a write only ioctl.  To ensure binary compatability, the following
514   #define will be used to intercept the incorrect ioctl
515*/
516
517#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD \
518	_IOR(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
519
520/* create a draw context, which is used to preserve GPU state.
521 * The flags field may contain a mask KGSL_CONTEXT_*  values
522 */
523struct kgsl_drawctxt_create {
524	unsigned int flags;
525	unsigned int drawctxt_id; /*output param */
526};
527
528#define IOCTL_KGSL_DRAWCTXT_CREATE \
529	_IOWR(KGSL_IOC_TYPE, 0x13, struct kgsl_drawctxt_create)
530
531/* destroy a draw context */
532struct kgsl_drawctxt_destroy {
533	unsigned int drawctxt_id;
534};
535
536#define IOCTL_KGSL_DRAWCTXT_DESTROY \
537	_IOW(KGSL_IOC_TYPE, 0x14, struct kgsl_drawctxt_destroy)
538
539/* add a block of pmem, fb, ashmem or user allocated address
540 * into the GPU address space */
541struct kgsl_map_user_mem {
542	int fd;
543	unsigned long gpuaddr;   /*output param */
544	size_t len;
545	size_t offset;
546	unsigned long hostptr;   /*input param */
547	enum kgsl_user_mem_type memtype;
548	unsigned int flags;
549};
550
551#define IOCTL_KGSL_MAP_USER_MEM \
552	_IOWR(KGSL_IOC_TYPE, 0x15, struct kgsl_map_user_mem)
553
554struct kgsl_cmdstream_readtimestamp_ctxtid {
555	unsigned int context_id;
556	unsigned int type;
557	unsigned int timestamp; /*output param */
558};
559
560#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_CTXTID \
561	_IOWR(KGSL_IOC_TYPE, 0x16, struct kgsl_cmdstream_readtimestamp_ctxtid)
562
563struct kgsl_cmdstream_freememontimestamp_ctxtid {
564	unsigned int context_id;
565	unsigned long gpuaddr;
566	unsigned int type;
567	unsigned int timestamp;
568};
569
570#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID \
571	_IOW(KGSL_IOC_TYPE, 0x17, \
572	struct kgsl_cmdstream_freememontimestamp_ctxtid)
573
574/* add a block of pmem or fb into the GPU address space */
575struct kgsl_sharedmem_from_pmem {
576        int pmem_fd;
577        unsigned long gpuaddr;  /*output param */
578        unsigned int len;
579        unsigned int offset;
580};
581
582#define IOCTL_KGSL_SHAREDMEM_FROM_PMEM \
583        _IOWR(KGSL_IOC_TYPE, 0x20, struct kgsl_sharedmem_from_pmem)
584
585/* remove memory from the GPU's address space */
586struct kgsl_sharedmem_free {
587	unsigned long gpuaddr;
588};
589
590#define IOCTL_KGSL_SHAREDMEM_FREE \
591	_IOW(KGSL_IOC_TYPE, 0x21, struct kgsl_sharedmem_free)
592
593struct kgsl_cff_user_event {
594	unsigned char cff_opcode;
595	unsigned int op1;
596	unsigned int op2;
597	unsigned int op3;
598	unsigned int op4;
599	unsigned int op5;
600	unsigned int __pad[2];
601};
602
603#define IOCTL_KGSL_CFF_USER_EVENT \
604	_IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_cff_user_event)
605
606struct kgsl_gmem_desc {
607	unsigned int x;
608	unsigned int y;
609	unsigned int width;
610	unsigned int height;
611	unsigned int pitch;
612};
613
614struct kgsl_buffer_desc {
615	void 			*hostptr;
616	unsigned long	gpuaddr;
617	int				size;
618	unsigned int	format;
619	unsigned int  	pitch;
620	unsigned int  	enabled;
621};
622
623struct kgsl_bind_gmem_shadow {
624	unsigned int drawctxt_id;
625	struct kgsl_gmem_desc gmem_desc;
626	unsigned int shadow_x;
627	unsigned int shadow_y;
628	struct kgsl_buffer_desc shadow_buffer;
629	unsigned int buffer_id;
630};
631
632#define IOCTL_KGSL_DRAWCTXT_BIND_GMEM_SHADOW \
633    _IOW(KGSL_IOC_TYPE, 0x22, struct kgsl_bind_gmem_shadow)
634
635/* add a block of memory into the GPU address space */
636
637/*
638 * IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC deprecated 09/2012
639 * use IOCTL_KGSL_GPUMEM_ALLOC instead
640 */
641
642struct kgsl_sharedmem_from_vmalloc {
643	unsigned long gpuaddr;	/*output param */
644	unsigned int hostptr;
645	unsigned int flags;
646};
647
648#define IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC \
649	_IOWR(KGSL_IOC_TYPE, 0x23, struct kgsl_sharedmem_from_vmalloc)
650
651/*
652 * This is being deprecated in favor of IOCTL_KGSL_GPUMEM_CACHE_SYNC which
653 * supports both directions (flush and invalidate). This code will still
654 * work, but by definition it will do a flush of the cache which might not be
655 * what you want to have happen on a buffer following a GPU operation.  It is
656 * safer to go with IOCTL_KGSL_GPUMEM_CACHE_SYNC
657 */
658
659#define IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE \
660	_IOW(KGSL_IOC_TYPE, 0x24, struct kgsl_sharedmem_free)
661
662struct kgsl_drawctxt_set_bin_base_offset {
663	unsigned int drawctxt_id;
664	unsigned int offset;
665};
666
667#define IOCTL_KGSL_DRAWCTXT_SET_BIN_BASE_OFFSET \
668	_IOW(KGSL_IOC_TYPE, 0x25, struct kgsl_drawctxt_set_bin_base_offset)
669
670enum kgsl_cmdwindow_type {
671	KGSL_CMDWINDOW_MIN     = 0x00000000,
672	KGSL_CMDWINDOW_2D      = 0x00000000,
673	KGSL_CMDWINDOW_3D      = 0x00000001, /* legacy */
674	KGSL_CMDWINDOW_MMU     = 0x00000002,
675	KGSL_CMDWINDOW_ARBITER = 0x000000FF,
676	KGSL_CMDWINDOW_MAX     = 0x000000FF,
677};
678
679/* write to the command window */
680struct kgsl_cmdwindow_write {
681	enum kgsl_cmdwindow_type target;
682	unsigned int addr;
683	unsigned int data;
684};
685
686#define IOCTL_KGSL_CMDWINDOW_WRITE \
687	_IOW(KGSL_IOC_TYPE, 0x2e, struct kgsl_cmdwindow_write)
688
689struct kgsl_gpumem_alloc {
690	unsigned long gpuaddr; /* output param */
691	size_t size;
692	unsigned int flags;
693};
694
695#define IOCTL_KGSL_GPUMEM_ALLOC \
696	_IOWR(KGSL_IOC_TYPE, 0x2f, struct kgsl_gpumem_alloc)
697
698struct kgsl_cff_syncmem {
699	unsigned long gpuaddr;
700	size_t len;
701	unsigned int __pad[2]; /* For future binary compatibility */
702};
703
704#define IOCTL_KGSL_CFF_SYNCMEM \
705	_IOW(KGSL_IOC_TYPE, 0x30, struct kgsl_cff_syncmem)
706
707/*
708 * A timestamp event allows the user space to register an action following an
709 * expired timestamp. Note IOCTL_KGSL_TIMESTAMP_EVENT has been redefined to
710 * _IOWR to support fences which need to return a fd for the priv parameter.
711 */
712
713struct kgsl_timestamp_event {
714	int type;                /* Type of event (see list below) */
715	unsigned int timestamp;  /* Timestamp to trigger event on */
716	unsigned int context_id; /* Context for the timestamp */
717	void __user *priv;	 /* Pointer to the event specific blob */
718	size_t len;              /* Size of the event specific blob */
719};
720
721#define IOCTL_KGSL_TIMESTAMP_EVENT_OLD \
722	_IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_timestamp_event)
723
724/* A genlock timestamp event releases an existing lock on timestamp expire */
725
726#define KGSL_TIMESTAMP_EVENT_GENLOCK 1
727
728struct kgsl_timestamp_event_genlock {
729	int handle; /* Handle of the genlock lock to release */
730};
731
732/* A fence timestamp event releases an existing lock on timestamp expire */
733
734#define KGSL_TIMESTAMP_EVENT_FENCE 2
735
736struct kgsl_timestamp_event_fence {
737	int fence_fd; /* Fence to signal */
738};
739
740/*
741 * Set a property within the kernel.  Uses the same structure as
742 * IOCTL_KGSL_GETPROPERTY
743 */
744
745#define IOCTL_KGSL_SETPROPERTY \
746	_IOW(KGSL_IOC_TYPE, 0x32, struct kgsl_device_getproperty)
747
748#define IOCTL_KGSL_TIMESTAMP_EVENT \
749	_IOWR(KGSL_IOC_TYPE, 0x33, struct kgsl_timestamp_event)
750
751/**
752 * struct kgsl_gpumem_alloc_id - argument to IOCTL_KGSL_GPUMEM_ALLOC_ID
753 * @id: returned id value for this allocation.
754 * @flags: mask of KGSL_MEM* values requested and actual flags on return.
755 * @size: requested size of the allocation and actual size on return.
756 * @mmapsize: returned size to pass to mmap() which may be larger than 'size'
757 * @gpuaddr: returned GPU address for the allocation
758 *
759 * Allocate memory for access by the GPU. The flags and size fields are echoed
760 * back by the kernel, so that the caller can know if the request was
761 * adjusted.
762 *
763 * Supported flags:
764 * KGSL_MEMFLAGS_GPUREADONLY: the GPU will be unable to write to the buffer
765 * KGSL_MEMTYPE*: usage hint for debugging aid
766 * KGSL_MEMALIGN*: alignment hint, may be ignored or adjusted by the kernel.
767 * KGSL_MEMFLAGS_USE_CPU_MAP: If set on call and return, the returned GPU
768 * address will be 0. Calling mmap() will set the GPU address.
769 */
770struct kgsl_gpumem_alloc_id {
771	unsigned int id;
772	unsigned int flags;
773	size_t size;
774	size_t mmapsize;
775	unsigned long gpuaddr;
776/* private: reserved for future use*/
777	unsigned long __pad[2];
778};
779
780#define IOCTL_KGSL_GPUMEM_ALLOC_ID \
781	_IOWR(KGSL_IOC_TYPE, 0x34, struct kgsl_gpumem_alloc_id)
782
783/**
784 * struct kgsl_gpumem_free_id - argument to IOCTL_KGSL_GPUMEM_FREE_ID
785 * @id: GPU allocation id to free
786 *
787 * Free an allocation by id, in case a GPU address has not been assigned or
788 * is unknown. Freeing an allocation by id with this ioctl or by GPU address
789 * with IOCTL_KGSL_SHAREDMEM_FREE are equivalent.
790 */
791struct kgsl_gpumem_free_id {
792	unsigned int id;
793/* private: reserved for future use*/
794	unsigned int __pad;
795};
796
797#define IOCTL_KGSL_GPUMEM_FREE_ID \
798	_IOWR(KGSL_IOC_TYPE, 0x35, struct kgsl_gpumem_free_id)
799
800/**
801 * struct kgsl_gpumem_get_info - argument to IOCTL_KGSL_GPUMEM_GET_INFO
802 * @gpuaddr: GPU address to query. Also set on return.
803 * @id: GPU allocation id to query. Also set on return.
804 * @flags: returned mask of KGSL_MEM* values.
805 * @size: returned size of the allocation.
806 * @mmapsize: returned size to pass mmap(), which may be larger than 'size'
807 * @useraddr: returned address of the userspace mapping for this buffer
808 *
809 * This ioctl allows querying of all user visible attributes of an existing
810 * allocation, by either the GPU address or the id returned by a previous
811 * call to IOCTL_KGSL_GPUMEM_ALLOC_ID. Legacy allocation ioctls may not
812 * return all attributes so this ioctl can be used to look them up if needed.
813 *
814 */
815struct kgsl_gpumem_get_info {
816	unsigned long gpuaddr;
817	unsigned int id;
818	unsigned int flags;
819	size_t size;
820	size_t mmapsize;
821	unsigned long useraddr;
822/* private: reserved for future use*/
823	unsigned long __pad[4];
824};
825
826#define IOCTL_KGSL_GPUMEM_GET_INFO\
827	_IOWR(KGSL_IOC_TYPE, 0x36, struct kgsl_gpumem_get_info)
828
829/**
830 * struct kgsl_gpumem_sync_cache - argument to IOCTL_KGSL_GPUMEM_SYNC_CACHE
831 * @gpuaddr: GPU address of the buffer to sync.
832 * @id: id of the buffer to sync. Either gpuaddr or id is sufficient.
833 * @op: a mask of KGSL_GPUMEM_CACHE_* values
834 * @offset: offset into the buffer
835 * @length: number of bytes starting from offset to perform
836 * the cache operation on
837 *
838 * Sync the L2 cache for memory headed to and from the GPU - this replaces
839 * KGSL_SHAREDMEM_FLUSH_CACHE since it can handle cache management for both
840 * directions
841 *
842 */
843struct kgsl_gpumem_sync_cache {
844	unsigned long gpuaddr;
845	unsigned int id;
846	unsigned int op;
847	size_t offset;
848	size_t length;
849};
850
851#define KGSL_GPUMEM_CACHE_CLEAN (1 << 0)
852#define KGSL_GPUMEM_CACHE_TO_GPU KGSL_GPUMEM_CACHE_CLEAN
853
854#define KGSL_GPUMEM_CACHE_INV (1 << 1)
855#define KGSL_GPUMEM_CACHE_FROM_GPU KGSL_GPUMEM_CACHE_INV
856
857#define KGSL_GPUMEM_CACHE_FLUSH \
858	(KGSL_GPUMEM_CACHE_CLEAN | KGSL_GPUMEM_CACHE_INV)
859
860/* Flag to ensure backwards compatibility of kgsl_gpumem_sync_cache struct */
861#define KGSL_GPUMEM_CACHE_RANGE (1 << 31U)
862
863#define IOCTL_KGSL_GPUMEM_SYNC_CACHE \
864	_IOW(KGSL_IOC_TYPE, 0x37, struct kgsl_gpumem_sync_cache)
865
866/**
867 * struct kgsl_perfcounter_get - argument to IOCTL_KGSL_PERFCOUNTER_GET
868 * @groupid: Performance counter group ID
869 * @countable: Countable to select within the group
870 * @offset: Return offset of the reserved LO counter
871 * @offset_hi: Return offset of the reserved HI counter
872 *
873 * Get an available performance counter from a specified groupid.  The offset
874 * of the performance counter will be returned after successfully assigning
875 * the countable to the counter for the specified group.  An error will be
876 * returned and an offset of 0 if the groupid is invalid or there are no
877 * more counters left.  After successfully getting a perfcounter, the user
878 * must call kgsl_perfcounter_put(groupid, contable) when finished with
879 * the perfcounter to clear up perfcounter resources.
880 *
881 */
882struct kgsl_perfcounter_get {
883	unsigned int groupid;
884	unsigned int countable;
885	unsigned int offset;
886	unsigned int offset_hi;
887/* private: reserved for future use */
888	unsigned int __pad; /* For future binary compatibility */
889};
890
891#define IOCTL_KGSL_PERFCOUNTER_GET \
892	_IOWR(KGSL_IOC_TYPE, 0x38, struct kgsl_perfcounter_get)
893
894/**
895 * struct kgsl_perfcounter_put - argument to IOCTL_KGSL_PERFCOUNTER_PUT
896 * @groupid: Performance counter group ID
897 * @countable: Countable to release within the group
898 *
899 * Put an allocated performance counter to allow others to have access to the
900 * resource that was previously taken.  This is only to be called after
901 * successfully getting a performance counter from kgsl_perfcounter_get().
902 *
903 */
904struct kgsl_perfcounter_put {
905	unsigned int groupid;
906	unsigned int countable;
907/* private: reserved for future use */
908	unsigned int __pad[2]; /* For future binary compatibility */
909};
910
911#define IOCTL_KGSL_PERFCOUNTER_PUT \
912	_IOW(KGSL_IOC_TYPE, 0x39, struct kgsl_perfcounter_put)
913
914/**
915 * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
916 * @groupid: Performance counter group ID
917 * @countable: Return active countables array
918 * @size: Size of active countables array
919 * @max_counters: Return total number counters for the group ID
920 *
921 * Query the available performance counters given a groupid.  The array
922 * *countables is used to return the current active countables in counters.
923 * The size of the array is passed in so the kernel will only write at most
924 * size or counter->size for the group id.  The total number of available
925 * counters for the group ID is returned in max_counters.
926 * If the array or size passed in are invalid, then only the maximum number
927 * of counters will be returned, no data will be written to *countables.
928 * If the groupid is invalid an error code will be returned.
929 *
930 */
931struct kgsl_perfcounter_query {
932	unsigned int groupid;
933	/* Array to return the current countable for up to size counters */
934	unsigned int __user *countables;
935	unsigned int count;
936	unsigned int max_counters;
937/* private: reserved for future use */
938	unsigned int __pad[2]; /* For future binary compatibility */
939};
940
941#define IOCTL_KGSL_PERFCOUNTER_QUERY \
942	_IOWR(KGSL_IOC_TYPE, 0x3A, struct kgsl_perfcounter_query)
943
944/**
945 * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
946 * @groupid: Performance counter group IDs
947 * @countable: Performance counter countable IDs
948 * @value: Return performance counter reads
949 * @size: Size of all arrays (groupid/countable pair and return value)
950 *
951 * Read in the current value of a performance counter given by the groupid
952 * and countable.
953 *
954 */
955
956struct kgsl_perfcounter_read_group {
957	unsigned int groupid;
958	unsigned int countable;
959	unsigned long long value;
960};
961
962struct kgsl_perfcounter_read {
963	struct kgsl_perfcounter_read_group __user *reads;
964	unsigned int count;
965/* private: reserved for future use */
966	unsigned int __pad[2]; /* For future binary compatibility */
967};
968
969#define IOCTL_KGSL_PERFCOUNTER_READ \
970	_IOWR(KGSL_IOC_TYPE, 0x3B, struct kgsl_perfcounter_read)
971/*
972 * struct kgsl_gpumem_sync_cache_bulk - argument to
973 * IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK
974 * @id_list: list of GPU buffer ids of the buffers to sync
975 * @count: number of GPU buffer ids in id_list
976 * @op: a mask of KGSL_GPUMEM_CACHE_* values
977 *
978 * Sync the cache for memory headed to and from the GPU. Certain
979 * optimizations can be made on the cache operation based on the total
980 * size of the working set of memory to be managed.
981 */
982struct kgsl_gpumem_sync_cache_bulk {
983	unsigned int __user *id_list;
984	unsigned int count;
985	unsigned int op;
986/* private: reserved for future use */
987	unsigned int __pad[2]; /* For future binary compatibility */
988};
989
990#define IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK \
991	_IOWR(KGSL_IOC_TYPE, 0x3C, struct kgsl_gpumem_sync_cache_bulk)
992
993/*
994 * struct kgsl_cmd_syncpoint_timestamp
995 * @context_id: ID of a KGSL context
996 * @timestamp: GPU timestamp
997 *
998 * This structure defines a syncpoint comprising a context/timestamp pair. A
999 * list of these may be passed by IOCTL_KGSL_SUBMIT_COMMANDS to define
1000 * dependencies that must be met before the command can be submitted to the
1001 * hardware
1002 */
1003struct kgsl_cmd_syncpoint_timestamp {
1004	unsigned int context_id;
1005	unsigned int timestamp;
1006};
1007
1008struct kgsl_cmd_syncpoint_fence {
1009	int fd;
1010};
1011
1012/**
1013 * struct kgsl_cmd_syncpoint - Define a sync point for a command batch
1014 * @type: type of sync point defined here
1015 * @priv: Pointer to the type specific buffer
1016 * @size: Size of the type specific buffer
1017 *
1018 * This structure contains pointers defining a specific command sync point.
1019 * The pointer and size should point to a type appropriate structure.
1020 */
1021struct kgsl_cmd_syncpoint {
1022	int type;
1023	void __user *priv;
1024	size_t size;
1025};
1026
1027/* Flag to indicate that the cmdlist may contain memlists */
1028#define KGSL_IBDESC_MEMLIST 0x1
1029
1030/* Flag to point out the cmdbatch profiling buffer in the memlist */
1031#define KGSL_IBDESC_PROFILING_BUFFER 0x2
1032
1033/**
1034 * struct kgsl_submit_commands - Argument to IOCTL_KGSL_SUBMIT_COMMANDS
1035 * @context_id: KGSL context ID that owns the commands
1036 * @flags:
1037 * @cmdlist: User pointer to a list of kgsl_ibdesc structures
1038 * @numcmds: Number of commands listed in cmdlist
1039 * @synclist: User pointer to a list of kgsl_cmd_syncpoint structures
1040 * @numsyncs: Number of sync points listed in synclist
1041 * @timestamp: On entry the a user defined timestamp, on exist the timestamp
1042 * assigned to the command batch
1043 *
1044 * This structure specifies a command to send to the GPU hardware.  This is
1045 * similar to kgsl_issueibcmds expect that it doesn't support the legacy way to
1046 * submit IB lists and it adds sync points to block the IB until the
1047 * dependencies are satisified.  This entry point is the new and preferred way
1048 * to submit commands to the GPU. The memory list can be used to specify all
1049 * memory that is referrenced in the current set of commands.
1050 */
1051
1052struct kgsl_submit_commands {
1053	unsigned int context_id;
1054	unsigned int flags;
1055	struct kgsl_ibdesc __user *cmdlist;
1056	unsigned int numcmds;
1057	struct kgsl_cmd_syncpoint __user *synclist;
1058	unsigned int numsyncs;
1059	unsigned int timestamp;
1060/* private: reserved for future use */
1061	unsigned int __pad[4];
1062};
1063
1064#define IOCTL_KGSL_SUBMIT_COMMANDS \
1065	_IOWR(KGSL_IOC_TYPE, 0x3D, struct kgsl_submit_commands)
1066
1067/**
1068 * struct kgsl_device_constraint - device constraint argument
1069 * @context_id: KGSL context ID
1070 * @type: type of constraint i.e pwrlevel/none
1071 * @data: constraint data
1072 * @size: size of the constraint data
1073 */
1074struct kgsl_device_constraint {
1075	unsigned int type;
1076	unsigned int context_id;
1077	void __user *data;
1078	size_t size;
1079};
1080
1081/* Constraint Type*/
1082#define KGSL_CONSTRAINT_NONE 0
1083#define KGSL_CONSTRAINT_PWRLEVEL 1
1084
1085/* PWRLEVEL constraint level*/
1086/* set to min frequency */
1087#define KGSL_CONSTRAINT_PWR_MIN    0
1088/* set to max frequency */
1089#define KGSL_CONSTRAINT_PWR_MAX    1
1090
1091struct kgsl_device_constraint_pwrlevel {
1092	unsigned int level;
1093};
1094
1095/**
1096 * struct kgsl_syncsource_create - Argument to IOCTL_KGSL_SYNCSOURCE_CREATE
1097 * @id: returned id for the syncsource that was created.
1098 *
1099 * This ioctl creates a userspace sync timeline.
1100 */
1101
1102struct kgsl_syncsource_create {
1103	unsigned int id;
1104/* private: reserved for future use */
1105	unsigned int __pad[3];
1106};
1107
1108#define IOCTL_KGSL_SYNCSOURCE_CREATE \
1109	_IOWR(KGSL_IOC_TYPE, 0x40, struct kgsl_syncsource_create)
1110
1111/**
1112 * struct kgsl_syncsource_destroy - Argument to IOCTL_KGSL_SYNCSOURCE_DESTROY
1113 * @id: syncsource id to destroy
1114 *
1115 * This ioctl creates a userspace sync timeline.
1116 */
1117
1118struct kgsl_syncsource_destroy {
1119	unsigned int id;
1120/* private: reserved for future use */
1121	unsigned int __pad[3];
1122};
1123
1124#define IOCTL_KGSL_SYNCSOURCE_DESTROY \
1125	_IOWR(KGSL_IOC_TYPE, 0x41, struct kgsl_syncsource_destroy)
1126
1127/**
1128 * struct kgsl_syncsource_create_fence - Argument to
1129 *     IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE
1130 * @id: syncsource id
1131 * @fence_fd: returned sync_fence fd
1132 *
1133 * Create a fence that may be signaled by userspace by calling
1134 * IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE. There are no order dependencies between
1135 * these fences.
1136 */
1137struct kgsl_syncsource_create_fence {
1138	unsigned int id;
1139	int fence_fd;
1140/* private: reserved for future use */
1141	unsigned int __pad[4];
1142};
1143
1144/**
1145 * struct kgsl_syncsource_signal_fence - Argument to
1146 *     IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE
1147 * @id: syncsource id
1148 * @fence_fd: sync_fence fd to signal
1149 *
1150 * Signal a fence that was created by a IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE
1151 * call using the same syncsource id. This allows a fence to be shared
1152 * to other processes but only signaled by the process owning the fd
1153 * used to create the fence.
1154 */
1155#define IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE \
1156	_IOWR(KGSL_IOC_TYPE, 0x42, struct kgsl_syncsource_create_fence)
1157
1158struct kgsl_syncsource_signal_fence {
1159	unsigned int id;
1160	int fence_fd;
1161/* private: reserved for future use */
1162	unsigned int __pad[4];
1163};
1164
1165#define IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE \
1166	_IOWR(KGSL_IOC_TYPE, 0x43, struct kgsl_syncsource_signal_fence)
1167
1168/**
1169 * struct kgsl_cff_sync_gpuobj - Argument to IOCTL_KGSL_CFF_SYNC_GPUOBJ
1170 * @offset: Offset into the GPU object to sync
1171 * @length: Number of bytes to sync
1172 * @id: ID of the GPU object to sync
1173 */
1174struct kgsl_cff_sync_gpuobj {
1175	uint64_t offset;
1176	uint64_t length;
1177	unsigned int id;
1178};
1179
1180#define IOCTL_KGSL_CFF_SYNC_GPUOBJ \
1181	_IOW(KGSL_IOC_TYPE, 0x44, struct kgsl_cff_sync_gpuobj)
1182
1183/**
1184 * struct kgsl_gpuobj_alloc - Argument to IOCTL_KGSL_GPUOBJ_ALLOC
1185 * @size: Size in bytes of the object to allocate
1186 * @flags: mask of KGSL_MEMFLAG_* bits
1187 * @va_len: Size in bytes of the virtual region to allocate
1188 * @mmapsize: Returns the mmap() size of the object
1189 * @id: Returns the GPU object ID of the new object
1190 * @metadata_len: Length of the metdata to copy from the user
1191 * @metadata: Pointer to the user specified metadata to store for the object
1192 */
1193struct kgsl_gpuobj_alloc {
1194	uint64_t size;
1195	uint64_t flags;
1196	uint64_t va_len;
1197	uint64_t mmapsize;
1198	unsigned int id;
1199	unsigned int metadata_len;
1200	uint64_t metadata;
1201};
1202
1203/* Let the user know that this header supports the gpuobj metadata */
1204#define KGSL_GPUOBJ_ALLOC_METADATA_MAX 64
1205
1206#define IOCTL_KGSL_GPUOBJ_ALLOC \
1207	_IOWR(KGSL_IOC_TYPE, 0x45, struct kgsl_gpuobj_alloc)
1208
1209/**
1210 * struct kgsl_gpuobj_free - Argument to IOCTL_KGLS_GPUOBJ_FREE
1211 * @flags: Mask of: KGSL_GUPOBJ_FREE_ON_EVENT
1212 * @priv: Pointer to the private object if KGSL_GPUOBJ_FREE_ON_EVENT is
1213 * specified
1214 * @id: ID of the GPU object to free
1215 * @type: If KGSL_GPUOBJ_FREE_ON_EVENT is specified, the type of asynchronous
1216 * event to free on
1217 * @len: Length of the data passed in priv
1218 */
1219struct kgsl_gpuobj_free {
1220	uint64_t flags;
1221	uint64_t __user priv;
1222	unsigned int id;
1223	unsigned int type;
1224	unsigned int len;
1225};
1226
1227#define KGSL_GPUOBJ_FREE_ON_EVENT 1
1228
1229#define KGSL_GPU_EVENT_TIMESTAMP 1
1230#define KGSL_GPU_EVENT_FENCE     2
1231
1232/**
1233 * struct kgsl_gpu_event_timestamp - Specifies a timestamp event to free a GPU
1234 * object on
1235 * @context_id: ID of the timestamp event to wait for
1236 * @timestamp: Timestamp of the timestamp event to wait for
1237 */
1238struct kgsl_gpu_event_timestamp {
1239	unsigned int context_id;
1240	unsigned int timestamp;
1241};
1242
1243/**
1244 * struct kgsl_gpu_event_fence - Specifies a fence ID to to free a GPU object on
1245 * @fd: File descriptor for the fence
1246 */
1247struct kgsl_gpu_event_fence {
1248	int fd;
1249};
1250
1251#define IOCTL_KGSL_GPUOBJ_FREE \
1252	_IOW(KGSL_IOC_TYPE, 0x46, struct kgsl_gpuobj_free)
1253
1254/**
1255 * struct kgsl_gpuobj_info - argument to IOCTL_KGSL_GPUOBJ_INFO
1256 * @gpuaddr: GPU address of the object
1257 * @flags: Current flags for the object
1258 * @size: Size of the object
1259 * @va_len: VA size of the object
1260 * @va_addr: Virtual address of the object (if it is mapped)
1261 * id - GPU object ID of the object to query
1262 */
1263struct kgsl_gpuobj_info {
1264	uint64_t gpuaddr;
1265	uint64_t flags;
1266	uint64_t size;
1267	uint64_t va_len;
1268	uint64_t va_addr;
1269	unsigned int id;
1270};
1271
1272#define IOCTL_KGSL_GPUOBJ_INFO \
1273	_IOWR(KGSL_IOC_TYPE, 0x47, struct kgsl_gpuobj_info)
1274
1275/**
1276 * struct kgsl_gpuobj_import - argument to IOCTL_KGSL_GPUOBJ_IMPORT
1277 * @priv: Pointer to the private data for the import type
1278 * @priv_len: Length of the private data
1279 * @flags: Mask of KGSL_MEMFLAG_ flags
1280 * @type: Type of the import (KGSL_USER_MEM_TYPE_*)
1281 * @id: Returns the ID of the new GPU object
1282 */
1283struct kgsl_gpuobj_import {
1284	uint64_t __user priv;
1285	uint64_t priv_len;
1286	uint64_t flags;
1287	unsigned int type;
1288	unsigned int id;
1289};
1290
1291/**
1292 * struct kgsl_gpuobj_import_dma_buf - import a dmabuf object
1293 * @fd: File descriptor for the dma-buf object
1294 */
1295struct kgsl_gpuobj_import_dma_buf {
1296	int fd;
1297};
1298
1299/**
1300 * struct kgsl_gpuobj_import_useraddr - import an object based on a useraddr
1301 * @virtaddr: Virtual address of the object to import
1302 */
1303struct kgsl_gpuobj_import_useraddr {
1304	uint64_t virtaddr;
1305};
1306
1307#define IOCTL_KGSL_GPUOBJ_IMPORT \
1308	_IOWR(KGSL_IOC_TYPE, 0x48, struct kgsl_gpuobj_import)
1309
1310/**
1311 * struct kgsl_gpuobj_sync_obj - Individual GPU object to sync
1312 * @offset: Offset within the GPU object to sync
1313 * @length: Number of bytes to sync
1314 * @id: ID of the GPU object to sync
1315 * @op: Cache operation to execute
1316 */
1317
1318struct kgsl_gpuobj_sync_obj {
1319	uint64_t offset;
1320	uint64_t length;
1321	unsigned int id;
1322	unsigned int op;
1323};
1324
1325/**
1326 * struct kgsl_gpuobj_sync - Argument for IOCTL_KGSL_GPUOBJ_SYNC
1327 * @objs: Pointer to an array of kgsl_gpuobj_sync_obj structs
1328 * @obj_len: Size of each item in the array
1329 * @count: Number of items in the array
1330 */
1331
1332struct kgsl_gpuobj_sync {
1333	uint64_t __user objs;
1334	unsigned int obj_len;
1335	unsigned int count;
1336};
1337
1338#define IOCTL_KGSL_GPUOBJ_SYNC \
1339	_IOW(KGSL_IOC_TYPE, 0x49, struct kgsl_gpuobj_sync)
1340
1341/**
1342 * struct kgsl_command_object - GPU command object
1343 * @offset: GPU address offset of the object
1344 * @gpuaddr: GPU address of the object
1345 * @size: Size of the object
1346 * @flags: Current flags for the object
1347 * @id - GPU command object ID
1348 */
1349struct kgsl_command_object {
1350	uint64_t offset;
1351	uint64_t gpuaddr;
1352	uint64_t size;
1353	unsigned int flags;
1354	unsigned int id;
1355};
1356
1357/**
1358 * struct kgsl_command_syncpoint - GPU syncpoint object
1359 * @priv: Pointer to the type specific buffer
1360 * @size: Size of the type specific buffer
1361 * @type: type of sync point defined here
1362 */
1363struct kgsl_command_syncpoint {
1364	uint64_t __user priv;
1365	uint64_t size;
1366	unsigned int type;
1367};
1368
1369/**
1370 * struct kgsl_command_object - Argument for IOCTL_KGSL_GPU_COMMAND
1371 * @flags: Current flags for the object
1372 * @cmdlist: List of kgsl_command_objects for submission
1373 * @cmd_size: Size of kgsl_command_objects structure
1374 * @numcmds: Number of kgsl_command_objects in command list
1375 * @objlist: List of kgsl_command_objects for tracking
1376 * @obj_size: Size of kgsl_command_objects structure
1377 * @numobjs: Number of kgsl_command_objects in object list
1378 * @synclist: List of kgsl_command_syncpoints
1379 * @sync_size: Size of kgsl_command_syncpoint structure
1380 * @numsyncs: Number of kgsl_command_syncpoints in syncpoint list
1381 * @context_id: Context ID submittin ghte kgsl_gpu_command
1382 * @timestamp: Timestamp for the submitted commands
1383 */
1384struct kgsl_gpu_command {
1385	uint64_t flags;
1386	uint64_t __user cmdlist;
1387	unsigned int cmdsize;
1388	unsigned int numcmds;
1389	uint64_t __user objlist;
1390	unsigned int objsize;
1391	unsigned int numobjs;
1392	uint64_t __user synclist;
1393	unsigned int syncsize;
1394	unsigned int numsyncs;
1395	unsigned int context_id;
1396	unsigned int timestamp;
1397};
1398
1399#define IOCTL_KGSL_GPU_COMMAND \
1400	_IOWR(KGSL_IOC_TYPE, 0x4A, struct kgsl_gpu_command)
1401
1402/**
1403 * struct kgsl_preemption_counters_query - argument to
1404 * IOCTL_KGSL_PREEMPTIONCOUNTER_QUERY
1405 * @counters: Return preemption counters array
1406 * @size_user: Size allocated by userspace
1407 * @size_priority_level: Size of preemption counters for each
1408 * priority level
1409 * @max_priority_level: Return max number of priority levels
1410 *
1411 * Query the available preemption counters. The array counters
1412 * is used to return preemption counters. The size of the array
1413 * is passed in so the kernel will only write at most size_user
1414 * or max available preemption counters.  The total number of
1415 * preemption counters is returned in max_priority_level. If the
1416 * array or size passed in are invalid, then an error is
1417 * returned back.
1418 */
1419struct kgsl_preemption_counters_query {
1420	uint64_t __user counters;
1421	unsigned int size_user;
1422	unsigned int size_priority_level;
1423	unsigned int max_priority_level;
1424};
1425
1426#define IOCTL_KGSL_PREEMPTIONCOUNTER_QUERY \
1427	_IOWR(KGSL_IOC_TYPE, 0x4B, struct kgsl_preemption_counters_query)
1428
1429/**
1430 * struct kgsl_gpuobj_set_info - argument for IOCTL_KGSL_GPUOBJ_SET_INFO
1431 * @flags: Flags to indicate which paramaters to change
1432 * @metadata:  If KGSL_GPUOBJ_SET_INFO_METADATA is set, a pointer to the new
1433 * metadata
1434 * @id: GPU memory object ID to change
1435 * @metadata_len:  If KGSL_GPUOBJ_SET_INFO_METADATA is set, the length of the
1436 * new metadata string
1437 * @type: If KGSL_GPUOBJ_SET_INFO_TYPE is set, the new type of the memory object
1438 */
1439
1440#define KGSL_GPUOBJ_SET_INFO_METADATA (1 << 0)
1441#define KGSL_GPUOBJ_SET_INFO_TYPE (1 << 1)
1442
1443struct kgsl_gpuobj_set_info {
1444	uint64_t flags;
1445	uint64_t metadata;
1446	unsigned int id;
1447	unsigned int metadata_len;
1448	unsigned int type;
1449};
1450
1451#define IOCTL_KGSL_GPUOBJ_SET_INFO \
1452	_IOW(KGSL_IOC_TYPE, 0x4C, struct kgsl_gpuobj_set_info)
1453
1454#endif /* _UAPI_MSM_KGSL_H */
1455