1#ifndef _MSM_KGSL_H
2#define _MSM_KGSL_H
3
4/*
5 * The KGSL version has proven not to be very useful in userspace if features
6 * are cherry picked into other trees out of order so it is frozen as of 3.14.
7 * It is left here for backwards compatabilty and as a reminder that
8 * software releases are never linear. Also, I like pie.
9 */
10
11#define KGSL_VERSION_MAJOR        3
12#define KGSL_VERSION_MINOR        14
13
14/*context flags */
15#define KGSL_CONTEXT_SAVE_GMEM		0x00000001
16#define KGSL_CONTEXT_NO_GMEM_ALLOC	0x00000002
17#define KGSL_CONTEXT_SUBMIT_IB_LIST	0x00000004
18#define KGSL_CONTEXT_CTX_SWITCH		0x00000008
19#define KGSL_CONTEXT_PREAMBLE		0x00000010
20#define KGSL_CONTEXT_TRASH_STATE	0x00000020
21#define KGSL_CONTEXT_PER_CONTEXT_TS	0x00000040
22#define KGSL_CONTEXT_USER_GENERATED_TS	0x00000080
23#define KGSL_CONTEXT_END_OF_FRAME	0x00000100
24
25#define KGSL_CONTEXT_NO_FAULT_TOLERANCE 0x00000200
26#define KGSL_CONTEXT_SYNC               0x00000400
27/* bits [12:15] are reserved for future use */
28#define KGSL_CONTEXT_TYPE_MASK          0x01F00000
29#define KGSL_CONTEXT_TYPE_SHIFT         20
30
31#define KGSL_CONTEXT_TYPE_ANY		0
32#define KGSL_CONTEXT_TYPE_GL		1
33#define KGSL_CONTEXT_TYPE_CL		2
34#define KGSL_CONTEXT_TYPE_C2D		3
35#define KGSL_CONTEXT_TYPE_RS		4
36
37#define KGSL_CONTEXT_INVALID 0xffffffff
38
39/* --- Memory allocation flags --- */
40
41/* General allocation hints */
42#define KGSL_MEMFLAGS_GPUREADONLY 0x01000000
43#define KGSL_MEMFLAGS_USE_CPU_MAP 0x10000000
44
45/* Memory caching hints */
46#define KGSL_CACHEMODE_MASK 0x0C000000
47#define KGSL_CACHEMODE_SHIFT 26
48
49#define KGSL_CACHEMODE_WRITECOMBINE 0
50#define KGSL_CACHEMODE_UNCACHED 1
51#define KGSL_CACHEMODE_WRITETHROUGH 2
52#define KGSL_CACHEMODE_WRITEBACK 3
53
54/* Memory types for which allocations are made */
55#define KGSL_MEMTYPE_MASK		0x0000FF00
56#define KGSL_MEMTYPE_SHIFT		8
57
58#define KGSL_MEMTYPE_OBJECTANY			0
59#define KGSL_MEMTYPE_FRAMEBUFFER		1
60#define KGSL_MEMTYPE_RENDERBUFFER		2
61#define KGSL_MEMTYPE_ARRAYBUFFER		3
62#define KGSL_MEMTYPE_ELEMENTARRAYBUFFER		4
63#define KGSL_MEMTYPE_VERTEXARRAYBUFFER		5
64#define KGSL_MEMTYPE_TEXTURE			6
65#define KGSL_MEMTYPE_SURFACE			7
66#define KGSL_MEMTYPE_EGL_SURFACE		8
67#define KGSL_MEMTYPE_GL				9
68#define KGSL_MEMTYPE_CL				10
69#define KGSL_MEMTYPE_CL_BUFFER_MAP		11
70#define KGSL_MEMTYPE_CL_BUFFER_NOMAP		12
71#define KGSL_MEMTYPE_CL_IMAGE_MAP		13
72#define KGSL_MEMTYPE_CL_IMAGE_NOMAP		14
73#define KGSL_MEMTYPE_CL_KERNEL_STACK		15
74#define KGSL_MEMTYPE_COMMAND			16
75#define KGSL_MEMTYPE_2D				17
76#define KGSL_MEMTYPE_EGL_IMAGE			18
77#define KGSL_MEMTYPE_EGL_SHADOW			19
78#define KGSL_MEMTYPE_MULTISAMPLE		20
79#define KGSL_MEMTYPE_KERNEL			255
80
81/*
82 * Alignment hint, passed as the power of 2 exponent.
83 * i.e 4k (2^12) would be 12, 64k (2^16)would be 16.
84 */
85#define KGSL_MEMALIGN_MASK		0x00FF0000
86#define KGSL_MEMALIGN_SHIFT		16
87
88/* --- generic KGSL flag values --- */
89
90#define KGSL_FLAGS_NORMALMODE  0x00000000
91#define KGSL_FLAGS_SAFEMODE    0x00000001
92#define KGSL_FLAGS_INITIALIZED0 0x00000002
93#define KGSL_FLAGS_INITIALIZED 0x00000004
94#define KGSL_FLAGS_STARTED     0x00000008
95#define KGSL_FLAGS_ACTIVE      0x00000010
96#define KGSL_FLAGS_RESERVED0   0x00000020
97#define KGSL_FLAGS_RESERVED1   0x00000040
98#define KGSL_FLAGS_RESERVED2   0x00000080
99#define KGSL_FLAGS_SOFT_RESET  0x00000100
100#define KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS 0x00000200
101
102/* Clock flags to show which clocks should be controled by a given platform */
103#define KGSL_CLK_SRC	0x00000001
104#define KGSL_CLK_CORE	0x00000002
105#define KGSL_CLK_IFACE	0x00000004
106#define KGSL_CLK_MEM	0x00000008
107#define KGSL_CLK_MEM_IFACE 0x00000010
108#define KGSL_CLK_AXI	0x00000020
109
110/* Server Side Sync Timeout in milliseconds */
111#define KGSL_SYNCOBJ_SERVER_TIMEOUT 2000
112
113/*
114 * Reset status values for context
115 */
116enum kgsl_ctx_reset_stat {
117	KGSL_CTX_STAT_NO_ERROR				= 0x00000000,
118	KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT		= 0x00000001,
119	KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT	= 0x00000002,
120	KGSL_CTX_STAT_UNKNOWN_CONTEXT_RESET_EXT		= 0x00000003
121};
122
123#define KGSL_CONVERT_TO_MBPS(val) \
124	(val*1000*1000U)
125
126/* device id */
127enum kgsl_deviceid {
128	KGSL_DEVICE_3D0		= 0x00000000,
129	KGSL_DEVICE_2D0		= 0x00000001,
130	KGSL_DEVICE_2D1		= 0x00000002,
131	KGSL_DEVICE_MAX		= 0x00000003
132};
133
134enum kgsl_user_mem_type {
135	KGSL_USER_MEM_TYPE_PMEM		= 0x00000000,
136	KGSL_USER_MEM_TYPE_ASHMEM	= 0x00000001,
137	KGSL_USER_MEM_TYPE_ADDR		= 0x00000002,
138	KGSL_USER_MEM_TYPE_ION		= 0x00000003,
139	KGSL_USER_MEM_TYPE_MAX		= 0x00000004,
140};
141
142struct kgsl_devinfo {
143
144	unsigned int device_id;
145	/* chip revision id
146	* coreid:8 majorrev:8 minorrev:8 patch:8
147	*/
148	unsigned int chip_id;
149	unsigned int mmu_enabled;
150	unsigned int gmem_gpubaseaddr;
151	/*
152	* This field contains the adreno revision
153	* number 200, 205, 220, etc...
154	*/
155	unsigned int gpu_id;
156	unsigned int gmem_sizebytes;
157};
158
159/* this structure defines the region of memory that can be mmap()ed from this
160   driver. The timestamp fields are volatile because they are written by the
161   GPU
162*/
163struct kgsl_devmemstore {
164	volatile unsigned int soptimestamp;
165	unsigned int sbz;
166	volatile unsigned int eoptimestamp;
167	unsigned int sbz2;
168	volatile unsigned int ts_cmp_enable;
169	unsigned int sbz3;
170	volatile unsigned int ref_wait_ts;
171	unsigned int sbz4;
172	unsigned int current_context;
173	unsigned int sbz5;
174};
175
176#define KGSL_MEMSTORE_OFFSET(ctxt_id, field) \
177	((ctxt_id)*sizeof(struct kgsl_devmemstore) + \
178	 offsetof(struct kgsl_devmemstore, field))
179
180/* timestamp id*/
181enum kgsl_timestamp_type {
182	KGSL_TIMESTAMP_CONSUMED = 0x00000001, /* start-of-pipeline timestamp */
183	KGSL_TIMESTAMP_RETIRED  = 0x00000002, /* end-of-pipeline timestamp*/
184	KGSL_TIMESTAMP_QUEUED   = 0x00000003,
185};
186
187/* property types - used with kgsl_device_getproperty */
188enum kgsl_property_type {
189	KGSL_PROP_DEVICE_INFO     = 0x00000001,
190	KGSL_PROP_DEVICE_SHADOW   = 0x00000002,
191	KGSL_PROP_DEVICE_POWER    = 0x00000003,
192	KGSL_PROP_SHMEM           = 0x00000004,
193	KGSL_PROP_SHMEM_APERTURES = 0x00000005,
194	KGSL_PROP_MMU_ENABLE 	  = 0x00000006,
195	KGSL_PROP_INTERRUPT_WAITS = 0x00000007,
196	KGSL_PROP_VERSION         = 0x00000008,
197	KGSL_PROP_GPU_RESET_STAT  = 0x00000009,
198	KGSL_PROP_PWRCTRL         = 0x0000000E,
199};
200
201struct kgsl_shadowprop {
202	unsigned int gpuaddr;
203	unsigned int size;
204	unsigned int flags; /* contains KGSL_FLAGS_ values */
205};
206
207struct kgsl_version {
208	unsigned int drv_major;
209	unsigned int drv_minor;
210	unsigned int dev_major;
211	unsigned int dev_minor;
212};
213
214/* Performance counter groups */
215
216#define KGSL_PERFCOUNTER_GROUP_CP 0x0
217#define KGSL_PERFCOUNTER_GROUP_RBBM 0x1
218#define KGSL_PERFCOUNTER_GROUP_PC 0x2
219#define KGSL_PERFCOUNTER_GROUP_VFD 0x3
220#define KGSL_PERFCOUNTER_GROUP_HLSQ 0x4
221#define KGSL_PERFCOUNTER_GROUP_VPC 0x5
222#define KGSL_PERFCOUNTER_GROUP_TSE 0x6
223#define KGSL_PERFCOUNTER_GROUP_RAS 0x7
224#define KGSL_PERFCOUNTER_GROUP_UCHE 0x8
225#define KGSL_PERFCOUNTER_GROUP_TP 0x9
226#define KGSL_PERFCOUNTER_GROUP_SP 0xA
227#define KGSL_PERFCOUNTER_GROUP_RB 0xB
228#define KGSL_PERFCOUNTER_GROUP_PWR 0xC
229#define KGSL_PERFCOUNTER_GROUP_VBIF 0xD
230#define KGSL_PERFCOUNTER_GROUP_VBIF_PWR 0xE
231
232#define KGSL_PERFCOUNTER_NOT_USED 0xFFFFFFFF
233#define KGSL_PERFCOUNTER_BROKEN 0xFFFFFFFE
234
235/* structure holds list of ibs */
236struct kgsl_ibdesc {
237	unsigned int gpuaddr;
238	void *hostptr;
239	unsigned int sizedwords;
240	unsigned int ctrl;
241};
242
243/* ioctls */
244#define KGSL_IOC_TYPE 0x09
245
246/* get misc info about the GPU
247   type should be a value from enum kgsl_property_type
248   value points to a structure that varies based on type
249   sizebytes is sizeof() that structure
250   for KGSL_PROP_DEVICE_INFO, use struct kgsl_devinfo
251   this structure contaings hardware versioning info.
252   for KGSL_PROP_DEVICE_SHADOW, use struct kgsl_shadowprop
253   this is used to find mmap() offset and sizes for mapping
254   struct kgsl_memstore into userspace.
255*/
256struct kgsl_device_getproperty {
257	unsigned int type;
258	void  *value;
259	unsigned int sizebytes;
260};
261
262#define IOCTL_KGSL_DEVICE_GETPROPERTY \
263	_IOWR(KGSL_IOC_TYPE, 0x2, struct kgsl_device_getproperty)
264
265/* IOCTL_KGSL_DEVICE_READ (0x3) - removed 03/2012
266 */
267
268/* block until the GPU has executed past a given timestamp
269 * timeout is in milliseconds.
270 */
271struct kgsl_device_waittimestamp {
272	unsigned int timestamp;
273	unsigned int timeout;
274};
275
276#define IOCTL_KGSL_DEVICE_WAITTIMESTAMP \
277	_IOW(KGSL_IOC_TYPE, 0x6, struct kgsl_device_waittimestamp)
278
279struct kgsl_device_waittimestamp_ctxtid {
280	unsigned int context_id;
281	unsigned int timestamp;
282	unsigned int timeout;
283};
284
285#define IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID \
286	_IOW(KGSL_IOC_TYPE, 0x7, struct kgsl_device_waittimestamp_ctxtid)
287
288/* DEPRECATED: issue indirect commands to the GPU.
289 * drawctxt_id must have been created with IOCTL_KGSL_DRAWCTXT_CREATE
290 * ibaddr and sizedwords must specify a subset of a buffer created
291 * with IOCTL_KGSL_SHAREDMEM_FROM_PMEM
292 * flags may be a mask of KGSL_CONTEXT_ values
293 * timestamp is a returned counter value which can be passed to
294 * other ioctls to determine when the commands have been executed by
295 * the GPU.
296 *
297 * This fucntion is deprecated - consider using IOCTL_KGSL_SUBMIT_COMMANDS
298 * instead
299 */
300struct kgsl_ringbuffer_issueibcmds {
301	unsigned int drawctxt_id;
302	unsigned int ibdesc_addr;
303	unsigned int numibs;
304	unsigned int timestamp; /*output param */
305	unsigned int flags;
306};
307
308#define IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS \
309	_IOWR(KGSL_IOC_TYPE, 0x10, struct kgsl_ringbuffer_issueibcmds)
310
311/* read the most recently executed timestamp value
312 * type should be a value from enum kgsl_timestamp_type
313 */
314struct kgsl_cmdstream_readtimestamp {
315	unsigned int type;
316	unsigned int timestamp; /*output param */
317};
318
319#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_OLD \
320	_IOR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
321
322#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP \
323	_IOWR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
324
325/* free memory when the GPU reaches a given timestamp.
326 * gpuaddr specify a memory region created by a
327 * IOCTL_KGSL_SHAREDMEM_FROM_PMEM call
328 * type should be a value from enum kgsl_timestamp_type
329 */
330struct kgsl_cmdstream_freememontimestamp {
331	unsigned int gpuaddr;
332	unsigned int type;
333	unsigned int timestamp;
334};
335
336#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP \
337	_IOW(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
338
339/* Previous versions of this header had incorrectly defined
340   IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP as a read-only ioctl instead
341   of a write only ioctl.  To ensure binary compatability, the following
342   #define will be used to intercept the incorrect ioctl
343*/
344
345#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD \
346	_IOR(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
347
348/* create a draw context, which is used to preserve GPU state.
349 * The flags field may contain a mask KGSL_CONTEXT_*  values
350 */
351struct kgsl_drawctxt_create {
352	unsigned int flags;
353	unsigned int drawctxt_id; /*output param */
354};
355
356#define IOCTL_KGSL_DRAWCTXT_CREATE \
357	_IOWR(KGSL_IOC_TYPE, 0x13, struct kgsl_drawctxt_create)
358
359/* destroy a draw context */
360struct kgsl_drawctxt_destroy {
361	unsigned int drawctxt_id;
362};
363
364#define IOCTL_KGSL_DRAWCTXT_DESTROY \
365	_IOW(KGSL_IOC_TYPE, 0x14, struct kgsl_drawctxt_destroy)
366
367/* add a block of pmem, fb, ashmem or user allocated address
368 * into the GPU address space */
369struct kgsl_map_user_mem {
370	int fd;
371	unsigned int gpuaddr;   /*output param */
372	unsigned int len;
373	unsigned int offset;
374	unsigned int hostptr;   /*input param */
375	enum kgsl_user_mem_type memtype;
376	unsigned int flags;
377};
378
379#define IOCTL_KGSL_MAP_USER_MEM \
380	_IOWR(KGSL_IOC_TYPE, 0x15, struct kgsl_map_user_mem)
381
382struct kgsl_cmdstream_readtimestamp_ctxtid {
383	unsigned int context_id;
384	unsigned int type;
385	unsigned int timestamp; /*output param */
386};
387
388#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_CTXTID \
389	_IOWR(KGSL_IOC_TYPE, 0x16, struct kgsl_cmdstream_readtimestamp_ctxtid)
390
391struct kgsl_cmdstream_freememontimestamp_ctxtid {
392	unsigned int context_id;
393	unsigned int gpuaddr;
394	unsigned int type;
395	unsigned int timestamp;
396};
397
398#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID \
399	_IOW(KGSL_IOC_TYPE, 0x17, \
400	struct kgsl_cmdstream_freememontimestamp_ctxtid)
401
402/* add a block of pmem or fb into the GPU address space */
403struct kgsl_sharedmem_from_pmem {
404	int pmem_fd;
405	unsigned int gpuaddr;	/*output param */
406	unsigned int len;
407	unsigned int offset;
408};
409
410#define IOCTL_KGSL_SHAREDMEM_FROM_PMEM \
411	_IOWR(KGSL_IOC_TYPE, 0x20, struct kgsl_sharedmem_from_pmem)
412
413/* remove memory from the GPU's address space */
414struct kgsl_sharedmem_free {
415	unsigned int gpuaddr;
416};
417
418#define IOCTL_KGSL_SHAREDMEM_FREE \
419	_IOW(KGSL_IOC_TYPE, 0x21, struct kgsl_sharedmem_free)
420
421struct kgsl_cff_user_event {
422	unsigned char cff_opcode;
423	unsigned int op1;
424	unsigned int op2;
425	unsigned int op3;
426	unsigned int op4;
427	unsigned int op5;
428	unsigned int __pad[2];
429};
430
431#define IOCTL_KGSL_CFF_USER_EVENT \
432	_IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_cff_user_event)
433
434struct kgsl_gmem_desc {
435	unsigned int x;
436	unsigned int y;
437	unsigned int width;
438	unsigned int height;
439	unsigned int pitch;
440};
441
442struct kgsl_buffer_desc {
443	void 			*hostptr;
444	unsigned int	gpuaddr;
445	int				size;
446	unsigned int	format;
447	unsigned int  	pitch;
448	unsigned int  	enabled;
449};
450
451struct kgsl_bind_gmem_shadow {
452	unsigned int drawctxt_id;
453	struct kgsl_gmem_desc gmem_desc;
454	unsigned int shadow_x;
455	unsigned int shadow_y;
456	struct kgsl_buffer_desc shadow_buffer;
457	unsigned int buffer_id;
458};
459
460#define IOCTL_KGSL_DRAWCTXT_BIND_GMEM_SHADOW \
461    _IOW(KGSL_IOC_TYPE, 0x22, struct kgsl_bind_gmem_shadow)
462
463/* add a block of memory into the GPU address space */
464
465/*
466 * IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC deprecated 09/2012
467 * use IOCTL_KGSL_GPUMEM_ALLOC instead
468 */
469
470struct kgsl_sharedmem_from_vmalloc {
471	unsigned int gpuaddr;	/*output param */
472	unsigned int hostptr;
473	unsigned int flags;
474};
475
476#define IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC \
477	_IOWR(KGSL_IOC_TYPE, 0x23, struct kgsl_sharedmem_from_vmalloc)
478
479/*
480 * This is being deprecated in favor of IOCTL_KGSL_GPUMEM_CACHE_SYNC which
481 * supports both directions (flush and invalidate). This code will still
482 * work, but by definition it will do a flush of the cache which might not be
483 * what you want to have happen on a buffer following a GPU operation.  It is
484 * safer to go with IOCTL_KGSL_GPUMEM_CACHE_SYNC
485 */
486
487#define IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE \
488	_IOW(KGSL_IOC_TYPE, 0x24, struct kgsl_sharedmem_free)
489
490struct kgsl_drawctxt_set_bin_base_offset {
491	unsigned int drawctxt_id;
492	unsigned int offset;
493};
494
495#define IOCTL_KGSL_DRAWCTXT_SET_BIN_BASE_OFFSET \
496	_IOW(KGSL_IOC_TYPE, 0x25, struct kgsl_drawctxt_set_bin_base_offset)
497
498enum kgsl_cmdwindow_type {
499	KGSL_CMDWINDOW_MIN     = 0x00000000,
500	KGSL_CMDWINDOW_2D      = 0x00000000,
501	KGSL_CMDWINDOW_3D      = 0x00000001, /* legacy */
502	KGSL_CMDWINDOW_MMU     = 0x00000002,
503	KGSL_CMDWINDOW_ARBITER = 0x000000FF,
504	KGSL_CMDWINDOW_MAX     = 0x000000FF,
505};
506
507/* write to the command window */
508struct kgsl_cmdwindow_write {
509	enum kgsl_cmdwindow_type target;
510	unsigned int addr;
511	unsigned int data;
512};
513
514#define IOCTL_KGSL_CMDWINDOW_WRITE \
515	_IOW(KGSL_IOC_TYPE, 0x2e, struct kgsl_cmdwindow_write)
516
517struct kgsl_gpumem_alloc {
518	unsigned long gpuaddr;
519	size_t size;
520	unsigned int flags;
521};
522
523#define IOCTL_KGSL_GPUMEM_ALLOC \
524	_IOWR(KGSL_IOC_TYPE, 0x2f, struct kgsl_gpumem_alloc)
525
526struct kgsl_cff_syncmem {
527	unsigned int gpuaddr;
528	unsigned int len;
529	unsigned int __pad[2]; /* For future binary compatibility */
530};
531
532#define IOCTL_KGSL_CFF_SYNCMEM \
533	_IOW(KGSL_IOC_TYPE, 0x30, struct kgsl_cff_syncmem)
534
535/*
536 * A timestamp event allows the user space to register an action following an
537 * expired timestamp. Note IOCTL_KGSL_TIMESTAMP_EVENT has been redefined to
538 * _IOWR to support fences which need to return a fd for the priv parameter.
539 */
540
541struct kgsl_timestamp_event {
542	int type;                /* Type of event (see list below) */
543	unsigned int timestamp;  /* Timestamp to trigger event on */
544	unsigned int context_id; /* Context for the timestamp */
545	void *priv;              /* Pointer to the event specific blob */
546	size_t len;              /* Size of the event specific blob */
547};
548
549#define IOCTL_KGSL_TIMESTAMP_EVENT_OLD \
550	_IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_timestamp_event)
551
552/* A genlock timestamp event releases an existing lock on timestamp expire */
553
554#define KGSL_TIMESTAMP_EVENT_GENLOCK 1
555
556struct kgsl_timestamp_event_genlock {
557	int handle; /* Handle of the genlock lock to release */
558};
559
560/* A fence timestamp event releases an existing lock on timestamp expire */
561
562#define KGSL_TIMESTAMP_EVENT_FENCE 2
563
564struct kgsl_timestamp_event_fence {
565	int fence_fd; /* Fence to signal */
566};
567
568/*
569 * Set a property within the kernel.  Uses the same structure as
570 * IOCTL_KGSL_GETPROPERTY
571 */
572
573#define IOCTL_KGSL_SETPROPERTY \
574	_IOW(KGSL_IOC_TYPE, 0x32, struct kgsl_device_getproperty)
575
576#define IOCTL_KGSL_TIMESTAMP_EVENT \
577	_IOWR(KGSL_IOC_TYPE, 0x33, struct kgsl_timestamp_event)
578
579/**
580 * struct kgsl_gpumem_alloc_id - argument to IOCTL_KGSL_GPUMEM_ALLOC_ID
581 * @id: returned id value for this allocation.
582 * @flags: mask of KGSL_MEM* values requested and actual flags on return.
583 * @size: requested size of the allocation and actual size on return.
584 * @mmapsize: returned size to pass to mmap() which may be larger than 'size'
585 * @gpuaddr: returned GPU address for the allocation
586 *
587 * Allocate memory for access by the GPU. The flags and size fields are echoed
588 * back by the kernel, so that the caller can know if the request was
589 * adjusted.
590 *
591 * Supported flags:
592 * KGSL_MEMFLAGS_GPUREADONLY: the GPU will be unable to write to the buffer
593 * KGSL_MEMTYPE*: usage hint for debugging aid
594 * KGSL_MEMALIGN*: alignment hint, may be ignored or adjusted by the kernel.
595 * KGSL_MEMFLAGS_USE_CPU_MAP: If set on call and return, the returned GPU
596 * address will be 0. Calling mmap() will set the GPU address.
597 */
598struct kgsl_gpumem_alloc_id {
599	unsigned int id;
600	unsigned int flags;
601	unsigned int size;
602	unsigned int mmapsize;
603	unsigned long gpuaddr;
604/* private: reserved for future use*/
605	unsigned int __pad[2];
606};
607
608#define IOCTL_KGSL_GPUMEM_ALLOC_ID \
609	_IOWR(KGSL_IOC_TYPE, 0x34, struct kgsl_gpumem_alloc_id)
610
611/**
612 * struct kgsl_gpumem_free_id - argument to IOCTL_KGSL_GPUMEM_FREE_ID
613 * @id: GPU allocation id to free
614 *
615 * Free an allocation by id, in case a GPU address has not been assigned or
616 * is unknown. Freeing an allocation by id with this ioctl or by GPU address
617 * with IOCTL_KGSL_SHAREDMEM_FREE are equivalent.
618 */
619struct kgsl_gpumem_free_id {
620	unsigned int id;
621/* private: reserved for future use*/
622	unsigned int __pad;
623};
624
625#define IOCTL_KGSL_GPUMEM_FREE_ID \
626	_IOWR(KGSL_IOC_TYPE, 0x35, struct kgsl_gpumem_free_id)
627
628/**
629 * struct kgsl_gpumem_get_info - argument to IOCTL_KGSL_GPUMEM_GET_INFO
630 * @gpuaddr: GPU address to query. Also set on return.
631 * @id: GPU allocation id to query. Also set on return.
632 * @flags: returned mask of KGSL_MEM* values.
633 * @size: returned size of the allocation.
634 * @mmapsize: returned size to pass mmap(), which may be larger than 'size'
635 * @useraddr: returned address of the userspace mapping for this buffer
636 *
637 * This ioctl allows querying of all user visible attributes of an existing
638 * allocation, by either the GPU address or the id returned by a previous
639 * call to IOCTL_KGSL_GPUMEM_ALLOC_ID. Legacy allocation ioctls may not
640 * return all attributes so this ioctl can be used to look them up if needed.
641 *
642 */
643struct kgsl_gpumem_get_info {
644	unsigned long gpuaddr;
645	unsigned int id;
646	unsigned int flags;
647	unsigned int size;
648	unsigned int mmapsize;
649	unsigned long useraddr;
650/* private: reserved for future use*/
651	unsigned int __pad[4];
652};
653
654#define IOCTL_KGSL_GPUMEM_GET_INFO\
655	_IOWR(KGSL_IOC_TYPE, 0x36, struct kgsl_gpumem_get_info)
656
657/**
658 * struct kgsl_gpumem_sync_cache - argument to IOCTL_KGSL_GPUMEM_SYNC_CACHE
659 * @gpuaddr: GPU address of the buffer to sync.
660 * @id: id of the buffer to sync. Either gpuaddr or id is sufficient.
661 * @op: a mask of KGSL_GPUMEM_CACHE_* values
662 *
663 * Sync the L2 cache for memory headed to and from the GPU - this replaces
664 * KGSL_SHAREDMEM_FLUSH_CACHE since it can handle cache management for both
665 * directions
666 *
667 */
668struct kgsl_gpumem_sync_cache {
669	unsigned int gpuaddr;
670	unsigned int id;
671	unsigned int op;
672/* private: reserved for future use*/
673	unsigned int __pad[2]; /* For future binary compatibility */
674};
675
676#define KGSL_GPUMEM_CACHE_CLEAN (1 << 0)
677#define KGSL_GPUMEM_CACHE_TO_GPU KGSL_GPUMEM_CACHE_CLEAN
678
679#define KGSL_GPUMEM_CACHE_INV (1 << 1)
680#define KGSL_GPUMEM_CACHE_FROM_GPU KGSL_GPUMEM_CACHE_INV
681
682#define KGSL_GPUMEM_CACHE_FLUSH \
683	(KGSL_GPUMEM_CACHE_CLEAN | KGSL_GPUMEM_CACHE_INV)
684
685#define IOCTL_KGSL_GPUMEM_SYNC_CACHE \
686	_IOW(KGSL_IOC_TYPE, 0x37, struct kgsl_gpumem_sync_cache)
687
688/**
689 * struct kgsl_perfcounter_get - argument to IOCTL_KGSL_PERFCOUNTER_GET
690 * @groupid: Performance counter group ID
691 * @countable: Countable to select within the group
692 * @offset: Return offset of the reserved counter
693 *
694 * Get an available performance counter from a specified groupid.  The offset
695 * of the performance counter will be returned after successfully assigning
696 * the countable to the counter for the specified group.  An error will be
697 * returned and an offset of 0 if the groupid is invalid or there are no
698 * more counters left.  After successfully getting a perfcounter, the user
699 * must call kgsl_perfcounter_put(groupid, contable) when finished with
700 * the perfcounter to clear up perfcounter resources.
701 *
702 */
703struct kgsl_perfcounter_get {
704	unsigned int groupid;
705	unsigned int countable;
706	unsigned int offset;
707/* private: reserved for future use */
708	unsigned int __pad[2]; /* For future binary compatibility */
709};
710
711#define IOCTL_KGSL_PERFCOUNTER_GET \
712	_IOWR(KGSL_IOC_TYPE, 0x38, struct kgsl_perfcounter_get)
713
714/**
715 * struct kgsl_perfcounter_put - argument to IOCTL_KGSL_PERFCOUNTER_PUT
716 * @groupid: Performance counter group ID
717 * @countable: Countable to release within the group
718 *
719 * Put an allocated performance counter to allow others to have access to the
720 * resource that was previously taken.  This is only to be called after
721 * successfully getting a performance counter from kgsl_perfcounter_get().
722 *
723 */
724struct kgsl_perfcounter_put {
725	unsigned int groupid;
726	unsigned int countable;
727/* private: reserved for future use */
728	unsigned int __pad[2]; /* For future binary compatibility */
729};
730
731#define IOCTL_KGSL_PERFCOUNTER_PUT \
732	_IOW(KGSL_IOC_TYPE, 0x39, struct kgsl_perfcounter_put)
733
734/**
735 * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
736 * @groupid: Performance counter group ID
737 * @countable: Return active countables array
738 * @size: Size of active countables array
739 * @max_counters: Return total number counters for the group ID
740 *
741 * Query the available performance counters given a groupid.  The array
742 * *countables is used to return the current active countables in counters.
743 * The size of the array is passed in so the kernel will only write at most
744 * size or counter->size for the group id.  The total number of available
745 * counters for the group ID is returned in max_counters.
746 * If the array or size passed in are invalid, then only the maximum number
747 * of counters will be returned, no data will be written to *countables.
748 * If the groupid is invalid an error code will be returned.
749 *
750 */
751struct kgsl_perfcounter_query {
752	unsigned int groupid;
753	/* Array to return the current countable for up to size counters */
754	unsigned int *countables;
755	unsigned int count;
756	unsigned int max_counters;
757/* private: reserved for future use */
758	unsigned int __pad[2]; /* For future binary compatibility */
759};
760
761#define IOCTL_KGSL_PERFCOUNTER_QUERY \
762	_IOWR(KGSL_IOC_TYPE, 0x3A, struct kgsl_perfcounter_query)
763
764/**
765 * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
766 * @groupid: Performance counter group IDs
767 * @countable: Performance counter countable IDs
768 * @value: Return performance counter reads
769 * @size: Size of all arrays (groupid/countable pair and return value)
770 *
771 * Read in the current value of a performance counter given by the groupid
772 * and countable.
773 *
774 */
775
776struct kgsl_perfcounter_read_group {
777	unsigned int groupid;
778	unsigned int countable;
779	unsigned long long value;
780};
781
782struct kgsl_perfcounter_read {
783	struct kgsl_perfcounter_read_group *reads;
784	unsigned int count;
785/* private: reserved for future use */
786	unsigned int __pad[2]; /* For future binary compatibility */
787};
788
789#define IOCTL_KGSL_PERFCOUNTER_READ \
790	_IOWR(KGSL_IOC_TYPE, 0x3B, struct kgsl_perfcounter_read)
791/*
792 * struct kgsl_gpumem_sync_cache_bulk - argument to
793 * IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK
794 * @id_list: list of GPU buffer ids of the buffers to sync
795 * @count: number of GPU buffer ids in id_list
796 * @op: a mask of KGSL_GPUMEM_CACHE_* values
797 *
798 * Sync the cache for memory headed to and from the GPU. Certain
799 * optimizations can be made on the cache operation based on the total
800 * size of the working set of memory to be managed.
801 */
802struct kgsl_gpumem_sync_cache_bulk {
803	unsigned int *id_list;
804	unsigned int count;
805	unsigned int op;
806/* private: reserved for future use */
807	unsigned int __pad[2]; /* For future binary compatibility */
808};
809
810#define IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK \
811	_IOWR(KGSL_IOC_TYPE, 0x3C, struct kgsl_gpumem_sync_cache_bulk)
812
813/*
814 * struct kgsl_cmd_syncpoint_timestamp
815 * @context_id: ID of a KGSL context
816 * @timestamp: GPU timestamp
817 *
818 * This structure defines a syncpoint comprising a context/timestamp pair. A
819 * list of these may be passed by IOCTL_KGSL_SUBMIT_COMMANDS to define
820 * dependencies that must be met before the command can be submitted to the
821 * hardware
822 */
823struct kgsl_cmd_syncpoint_timestamp {
824	unsigned int context_id;
825	unsigned int timestamp;
826};
827
828#define KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP 0
829
830struct kgsl_cmd_syncpoint_fence {
831	int fd;
832};
833
834#define KGSL_CMD_SYNCPOINT_TYPE_FENCE 1
835
836/**
837 * struct kgsl_cmd_syncpoint - Define a sync point for a command batch
838 * @type: type of sync point defined here
839 * @priv: Pointer to the type specific buffer
840 * @size: Size of the type specific buffer
841 *
842 * This structure contains pointers defining a specific command sync point.
843 * The pointer and size should point to a type appropriate structure.
844 */
845struct kgsl_cmd_syncpoint {
846	int type;
847	void __user *priv;
848	unsigned int size;
849};
850
851/**
852 * struct kgsl_submit_commands - Argument to IOCTL_KGSL_SUBMIT_COMMANDS
853 * @context_id: KGSL context ID that owns the commands
854 * @flags:
855 * @cmdlist: User pointer to a list of kgsl_ibdesc structures
856 * @numcmds: Number of commands listed in cmdlist
857 * @synclist: User pointer to a list of kgsl_cmd_syncpoint structures
858 * @numsyncs: Number of sync points listed in synclist
859 * @timestamp: On entry the a user defined timestamp, on exist the timestamp
860 * assigned to the command batch
861 *
862 * This structure specifies a command to send to the GPU hardware.  This is
863 * similar to kgsl_issueibcmds expect that it doesn't support the legacy way to
864 * submit IB lists and it adds sync points to block the IB until the
865 * dependencies are satisified.  This entry point is the new and preferred way
866 * to submit commands to the GPU.
867 */
868
869struct kgsl_submit_commands {
870	unsigned int context_id;
871	unsigned int flags;
872	struct kgsl_ibdesc __user *cmdlist;
873	unsigned int numcmds;
874	struct kgsl_cmd_syncpoint __user *synclist;
875	unsigned int numsyncs;
876	unsigned int timestamp;
877/* private: reserved for future use */
878	unsigned int __pad[4];
879};
880
881#define IOCTL_KGSL_SUBMIT_COMMANDS \
882	_IOWR(KGSL_IOC_TYPE, 0x3D, struct kgsl_submit_commands)
883
884#ifdef __KERNEL__
885#ifdef CONFIG_MSM_KGSL_DRM
886int kgsl_gem_obj_addr(int drm_fd, int handle, unsigned long *start,
887			unsigned long *len);
888#else
889#define kgsl_gem_obj_addr(...) 0
890#endif
891#endif
892#endif /* _MSM_KGSL_H */
893