msm_kgsl.h revision a8dacd55667c4b495110f04338067794a6d50ee2
1#ifndef _MSM_KGSL_H
2#define _MSM_KGSL_H
3
4/*
5 * The KGSL version has proven not to be very useful in userspace if features
6 * are cherry picked into other trees out of order so it is frozen as of 3.14.
7 * It is left here for backwards compatabilty and as a reminder that
8 * software releases are never linear. Also, I like pie.
9 */
10
11#define KGSL_VERSION_MAJOR        3
12#define KGSL_VERSION_MINOR        14
13
14/*context flags */
15#define KGSL_CONTEXT_SAVE_GMEM		0x00000001
16#define KGSL_CONTEXT_NO_GMEM_ALLOC	0x00000002
17#define KGSL_CONTEXT_SUBMIT_IB_LIST	0x00000004
18#define KGSL_CONTEXT_CTX_SWITCH		0x00000008
19#define KGSL_CONTEXT_PREAMBLE		0x00000010
20#define KGSL_CONTEXT_TRASH_STATE	0x00000020
21#define KGSL_CONTEXT_PER_CONTEXT_TS	0x00000040
22#define KGSL_CONTEXT_USER_GENERATED_TS	0x00000080
23#define KGSL_CONTEXT_NO_FAULT_TOLERANCE 0x00000200
24/* bits [12:15] are reserved for future use */
25#define KGSL_CONTEXT_TYPE_MASK          0x01F00000
26#define KGSL_CONTEXT_TYPE_SHIFT         20
27
28#define KGSL_CONTEXT_TYPE_ANY		0
29#define KGSL_CONTEXT_TYPE_GL		1
30#define KGSL_CONTEXT_TYPE_CL		2
31#define KGSL_CONTEXT_TYPE_C2D		3
32#define KGSL_CONTEXT_TYPE_RS		4
33
34#define KGSL_CONTEXT_INVALID 0xffffffff
35
36/* --- Memory allocation flags --- */
37
38/* General allocation hints */
39#define KGSL_MEMFLAGS_GPUREADONLY 0x01000000
40#define KGSL_MEMFLAGS_USE_CPU_MAP 0x10000000
41
42/* Memory caching hints */
43#define KGSL_CACHEMODE_MASK 0x0C000000
44#define KGSL_CACHEMODE_SHIFT 26
45
46#define KGSL_CACHEMODE_WRITECOMBINE 0
47#define KGSL_CACHEMODE_UNCACHED 1
48#define KGSL_CACHEMODE_WRITETHROUGH 2
49#define KGSL_CACHEMODE_WRITEBACK 3
50
51/* Memory types for which allocations are made */
52#define KGSL_MEMTYPE_MASK		0x0000FF00
53#define KGSL_MEMTYPE_SHIFT		8
54
55#define KGSL_MEMTYPE_OBJECTANY			0
56#define KGSL_MEMTYPE_FRAMEBUFFER		1
57#define KGSL_MEMTYPE_RENDERBUFFER		2
58#define KGSL_MEMTYPE_ARRAYBUFFER		3
59#define KGSL_MEMTYPE_ELEMENTARRAYBUFFER		4
60#define KGSL_MEMTYPE_VERTEXARRAYBUFFER		5
61#define KGSL_MEMTYPE_TEXTURE			6
62#define KGSL_MEMTYPE_SURFACE			7
63#define KGSL_MEMTYPE_EGL_SURFACE		8
64#define KGSL_MEMTYPE_GL				9
65#define KGSL_MEMTYPE_CL				10
66#define KGSL_MEMTYPE_CL_BUFFER_MAP		11
67#define KGSL_MEMTYPE_CL_BUFFER_NOMAP		12
68#define KGSL_MEMTYPE_CL_IMAGE_MAP		13
69#define KGSL_MEMTYPE_CL_IMAGE_NOMAP		14
70#define KGSL_MEMTYPE_CL_KERNEL_STACK		15
71#define KGSL_MEMTYPE_COMMAND			16
72#define KGSL_MEMTYPE_2D				17
73#define KGSL_MEMTYPE_EGL_IMAGE			18
74#define KGSL_MEMTYPE_EGL_SHADOW			19
75#define KGSL_MEMTYPE_MULTISAMPLE		20
76#define KGSL_MEMTYPE_KERNEL			255
77
78/*
79 * Alignment hint, passed as the power of 2 exponent.
80 * i.e 4k (2^12) would be 12, 64k (2^16)would be 16.
81 */
82#define KGSL_MEMALIGN_MASK		0x00FF0000
83#define KGSL_MEMALIGN_SHIFT		16
84
85/* --- generic KGSL flag values --- */
86
87#define KGSL_FLAGS_NORMALMODE  0x00000000
88#define KGSL_FLAGS_SAFEMODE    0x00000001
89#define KGSL_FLAGS_INITIALIZED0 0x00000002
90#define KGSL_FLAGS_INITIALIZED 0x00000004
91#define KGSL_FLAGS_STARTED     0x00000008
92#define KGSL_FLAGS_ACTIVE      0x00000010
93#define KGSL_FLAGS_RESERVED0   0x00000020
94#define KGSL_FLAGS_RESERVED1   0x00000040
95#define KGSL_FLAGS_RESERVED2   0x00000080
96#define KGSL_FLAGS_SOFT_RESET  0x00000100
97#define KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS 0x00000200
98
99/* Clock flags to show which clocks should be controled by a given platform */
100#define KGSL_CLK_SRC	0x00000001
101#define KGSL_CLK_CORE	0x00000002
102#define KGSL_CLK_IFACE	0x00000004
103#define KGSL_CLK_MEM	0x00000008
104#define KGSL_CLK_MEM_IFACE 0x00000010
105#define KGSL_CLK_AXI	0x00000020
106
107/* Server Side Sync Timeout in milliseconds */
108#define KGSL_SYNCOBJ_SERVER_TIMEOUT 2000
109
110/*
111 * Reset status values for context
112 */
113enum kgsl_ctx_reset_stat {
114	KGSL_CTX_STAT_NO_ERROR				= 0x00000000,
115	KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT		= 0x00000001,
116	KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT	= 0x00000002,
117	KGSL_CTX_STAT_UNKNOWN_CONTEXT_RESET_EXT		= 0x00000003
118};
119
120#define KGSL_CONVERT_TO_MBPS(val) \
121	(val*1000*1000U)
122
123/* device id */
124enum kgsl_deviceid {
125	KGSL_DEVICE_3D0		= 0x00000000,
126	KGSL_DEVICE_2D0		= 0x00000001,
127	KGSL_DEVICE_2D1		= 0x00000002,
128	KGSL_DEVICE_MAX		= 0x00000003
129};
130
131enum kgsl_user_mem_type {
132	KGSL_USER_MEM_TYPE_PMEM		= 0x00000000,
133	KGSL_USER_MEM_TYPE_ASHMEM	= 0x00000001,
134	KGSL_USER_MEM_TYPE_ADDR		= 0x00000002,
135	KGSL_USER_MEM_TYPE_ION		= 0x00000003,
136	KGSL_USER_MEM_TYPE_MAX		= 0x00000004,
137};
138
139struct kgsl_devinfo {
140
141	unsigned int device_id;
142	/* chip revision id
143	* coreid:8 majorrev:8 minorrev:8 patch:8
144	*/
145	unsigned int chip_id;
146	unsigned int mmu_enabled;
147	unsigned int gmem_gpubaseaddr;
148	/*
149	* This field contains the adreno revision
150	* number 200, 205, 220, etc...
151	*/
152	unsigned int gpu_id;
153	unsigned int gmem_sizebytes;
154};
155
156/* this structure defines the region of memory that can be mmap()ed from this
157   driver. The timestamp fields are volatile because they are written by the
158   GPU
159*/
160struct kgsl_devmemstore {
161	volatile unsigned int soptimestamp;
162	unsigned int sbz;
163	volatile unsigned int eoptimestamp;
164	unsigned int sbz2;
165	volatile unsigned int ts_cmp_enable;
166	unsigned int sbz3;
167	volatile unsigned int ref_wait_ts;
168	unsigned int sbz4;
169	unsigned int current_context;
170	unsigned int sbz5;
171};
172
173#define KGSL_MEMSTORE_OFFSET(ctxt_id, field) \
174	((ctxt_id)*sizeof(struct kgsl_devmemstore) + \
175	 offsetof(struct kgsl_devmemstore, field))
176
177/* timestamp id*/
178enum kgsl_timestamp_type {
179	KGSL_TIMESTAMP_CONSUMED = 0x00000001, /* start-of-pipeline timestamp */
180	KGSL_TIMESTAMP_RETIRED  = 0x00000002, /* end-of-pipeline timestamp*/
181	KGSL_TIMESTAMP_QUEUED   = 0x00000003,
182};
183
184/* property types - used with kgsl_device_getproperty */
185enum kgsl_property_type {
186	KGSL_PROP_DEVICE_INFO     = 0x00000001,
187	KGSL_PROP_DEVICE_SHADOW   = 0x00000002,
188	KGSL_PROP_DEVICE_POWER    = 0x00000003,
189	KGSL_PROP_SHMEM           = 0x00000004,
190	KGSL_PROP_SHMEM_APERTURES = 0x00000005,
191	KGSL_PROP_MMU_ENABLE 	  = 0x00000006,
192	KGSL_PROP_INTERRUPT_WAITS = 0x00000007,
193	KGSL_PROP_VERSION         = 0x00000008,
194	KGSL_PROP_GPU_RESET_STAT  = 0x00000009,
195	KGSL_PROP_PWRCTRL         = 0x0000000E,
196};
197
198struct kgsl_shadowprop {
199	unsigned int gpuaddr;
200	unsigned int size;
201	unsigned int flags; /* contains KGSL_FLAGS_ values */
202};
203
204struct kgsl_version {
205	unsigned int drv_major;
206	unsigned int drv_minor;
207	unsigned int dev_major;
208	unsigned int dev_minor;
209};
210
211/* Performance counter groups */
212
213#define KGSL_PERFCOUNTER_GROUP_CP 0x0
214#define KGSL_PERFCOUNTER_GROUP_RBBM 0x1
215#define KGSL_PERFCOUNTER_GROUP_PC 0x2
216#define KGSL_PERFCOUNTER_GROUP_VFD 0x3
217#define KGSL_PERFCOUNTER_GROUP_HLSQ 0x4
218#define KGSL_PERFCOUNTER_GROUP_VPC 0x5
219#define KGSL_PERFCOUNTER_GROUP_TSE 0x6
220#define KGSL_PERFCOUNTER_GROUP_RAS 0x7
221#define KGSL_PERFCOUNTER_GROUP_UCHE 0x8
222#define KGSL_PERFCOUNTER_GROUP_TP 0x9
223#define KGSL_PERFCOUNTER_GROUP_SP 0xA
224#define KGSL_PERFCOUNTER_GROUP_RB 0xB
225#define KGSL_PERFCOUNTER_GROUP_PWR 0xC
226#define KGSL_PERFCOUNTER_GROUP_VBIF 0xD
227#define KGSL_PERFCOUNTER_GROUP_VBIF_PWR 0xE
228
229#define KGSL_PERFCOUNTER_NOT_USED 0xFFFFFFFF
230
231/* structure holds list of ibs */
232struct kgsl_ibdesc {
233	unsigned int gpuaddr;
234	void *hostptr;
235	unsigned int sizedwords;
236	unsigned int ctrl;
237};
238
239/* ioctls */
240#define KGSL_IOC_TYPE 0x09
241
242/* get misc info about the GPU
243   type should be a value from enum kgsl_property_type
244   value points to a structure that varies based on type
245   sizebytes is sizeof() that structure
246   for KGSL_PROP_DEVICE_INFO, use struct kgsl_devinfo
247   this structure contaings hardware versioning info.
248   for KGSL_PROP_DEVICE_SHADOW, use struct kgsl_shadowprop
249   this is used to find mmap() offset and sizes for mapping
250   struct kgsl_memstore into userspace.
251*/
252struct kgsl_device_getproperty {
253	unsigned int type;
254	void  *value;
255	unsigned int sizebytes;
256};
257
258#define IOCTL_KGSL_DEVICE_GETPROPERTY \
259	_IOWR(KGSL_IOC_TYPE, 0x2, struct kgsl_device_getproperty)
260
261/* IOCTL_KGSL_DEVICE_READ (0x3) - removed 03/2012
262 */
263
264/* block until the GPU has executed past a given timestamp
265 * timeout is in milliseconds.
266 */
267struct kgsl_device_waittimestamp {
268	unsigned int timestamp;
269	unsigned int timeout;
270};
271
272#define IOCTL_KGSL_DEVICE_WAITTIMESTAMP \
273	_IOW(KGSL_IOC_TYPE, 0x6, struct kgsl_device_waittimestamp)
274
275struct kgsl_device_waittimestamp_ctxtid {
276	unsigned int context_id;
277	unsigned int timestamp;
278	unsigned int timeout;
279};
280
281#define IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID \
282	_IOW(KGSL_IOC_TYPE, 0x7, struct kgsl_device_waittimestamp_ctxtid)
283
284/* issue indirect commands to the GPU.
285 * drawctxt_id must have been created with IOCTL_KGSL_DRAWCTXT_CREATE
286 * ibaddr and sizedwords must specify a subset of a buffer created
287 * with IOCTL_KGSL_SHAREDMEM_FROM_PMEM
288 * flags may be a mask of KGSL_CONTEXT_ values
289 * timestamp is a returned counter value which can be passed to
290 * other ioctls to determine when the commands have been executed by
291 * the GPU.
292 */
293struct kgsl_ringbuffer_issueibcmds {
294	unsigned int drawctxt_id;
295	unsigned int ibdesc_addr;
296	unsigned int numibs;
297	unsigned int timestamp; /*output param */
298	unsigned int flags;
299};
300
301#define IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS \
302	_IOWR(KGSL_IOC_TYPE, 0x10, struct kgsl_ringbuffer_issueibcmds)
303
304/* read the most recently executed timestamp value
305 * type should be a value from enum kgsl_timestamp_type
306 */
307struct kgsl_cmdstream_readtimestamp {
308	unsigned int type;
309	unsigned int timestamp; /*output param */
310};
311
312#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_OLD \
313	_IOR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
314
315#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP \
316	_IOWR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
317
318/* free memory when the GPU reaches a given timestamp.
319 * gpuaddr specify a memory region created by a
320 * IOCTL_KGSL_SHAREDMEM_FROM_PMEM call
321 * type should be a value from enum kgsl_timestamp_type
322 */
323struct kgsl_cmdstream_freememontimestamp {
324	unsigned int gpuaddr;
325	unsigned int type;
326	unsigned int timestamp;
327};
328
329#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP \
330	_IOW(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
331
332/* Previous versions of this header had incorrectly defined
333   IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP as a read-only ioctl instead
334   of a write only ioctl.  To ensure binary compatability, the following
335   #define will be used to intercept the incorrect ioctl
336*/
337
338#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD \
339	_IOR(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
340
341/* create a draw context, which is used to preserve GPU state.
342 * The flags field may contain a mask KGSL_CONTEXT_*  values
343 */
344struct kgsl_drawctxt_create {
345	unsigned int flags;
346	unsigned int drawctxt_id; /*output param */
347};
348
349#define IOCTL_KGSL_DRAWCTXT_CREATE \
350	_IOWR(KGSL_IOC_TYPE, 0x13, struct kgsl_drawctxt_create)
351
352/* destroy a draw context */
353struct kgsl_drawctxt_destroy {
354	unsigned int drawctxt_id;
355};
356
357#define IOCTL_KGSL_DRAWCTXT_DESTROY \
358	_IOW(KGSL_IOC_TYPE, 0x14, struct kgsl_drawctxt_destroy)
359
360/* add a block of pmem, fb, ashmem or user allocated address
361 * into the GPU address space */
362struct kgsl_map_user_mem {
363	int fd;
364	unsigned int gpuaddr;   /*output param */
365	unsigned int len;
366	unsigned int offset;
367	unsigned int hostptr;   /*input param */
368	enum kgsl_user_mem_type memtype;
369	unsigned int flags;
370};
371
372#define IOCTL_KGSL_MAP_USER_MEM \
373	_IOWR(KGSL_IOC_TYPE, 0x15, struct kgsl_map_user_mem)
374
375struct kgsl_cmdstream_readtimestamp_ctxtid {
376	unsigned int context_id;
377	unsigned int type;
378	unsigned int timestamp; /*output param */
379};
380
381#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_CTXTID \
382	_IOWR(KGSL_IOC_TYPE, 0x16, struct kgsl_cmdstream_readtimestamp_ctxtid)
383
384struct kgsl_cmdstream_freememontimestamp_ctxtid {
385	unsigned int context_id;
386	unsigned int gpuaddr;
387	unsigned int type;
388	unsigned int timestamp;
389};
390
391#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID \
392	_IOW(KGSL_IOC_TYPE, 0x17, \
393	struct kgsl_cmdstream_freememontimestamp_ctxtid)
394
395/* add a block of pmem or fb into the GPU address space */
396struct kgsl_sharedmem_from_pmem {
397	int pmem_fd;
398	unsigned int gpuaddr;	/*output param */
399	unsigned int len;
400	unsigned int offset;
401};
402
403#define IOCTL_KGSL_SHAREDMEM_FROM_PMEM \
404	_IOWR(KGSL_IOC_TYPE, 0x20, struct kgsl_sharedmem_from_pmem)
405
406/* remove memory from the GPU's address space */
407struct kgsl_sharedmem_free {
408	unsigned int gpuaddr;
409};
410
411#define IOCTL_KGSL_SHAREDMEM_FREE \
412	_IOW(KGSL_IOC_TYPE, 0x21, struct kgsl_sharedmem_free)
413
414struct kgsl_cff_user_event {
415	unsigned char cff_opcode;
416	unsigned int op1;
417	unsigned int op2;
418	unsigned int op3;
419	unsigned int op4;
420	unsigned int op5;
421	unsigned int __pad[2];
422};
423
424#define IOCTL_KGSL_CFF_USER_EVENT \
425	_IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_cff_user_event)
426
427struct kgsl_gmem_desc {
428	unsigned int x;
429	unsigned int y;
430	unsigned int width;
431	unsigned int height;
432	unsigned int pitch;
433};
434
435struct kgsl_buffer_desc {
436	void 			*hostptr;
437	unsigned int	gpuaddr;
438	int				size;
439	unsigned int	format;
440	unsigned int  	pitch;
441	unsigned int  	enabled;
442};
443
444struct kgsl_bind_gmem_shadow {
445	unsigned int drawctxt_id;
446	struct kgsl_gmem_desc gmem_desc;
447	unsigned int shadow_x;
448	unsigned int shadow_y;
449	struct kgsl_buffer_desc shadow_buffer;
450	unsigned int buffer_id;
451};
452
453#define IOCTL_KGSL_DRAWCTXT_BIND_GMEM_SHADOW \
454    _IOW(KGSL_IOC_TYPE, 0x22, struct kgsl_bind_gmem_shadow)
455
456/* add a block of memory into the GPU address space */
457
458/*
459 * IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC deprecated 09/2012
460 * use IOCTL_KGSL_GPUMEM_ALLOC instead
461 */
462
463struct kgsl_sharedmem_from_vmalloc {
464	unsigned int gpuaddr;	/*output param */
465	unsigned int hostptr;
466	unsigned int flags;
467};
468
469#define IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC \
470	_IOWR(KGSL_IOC_TYPE, 0x23, struct kgsl_sharedmem_from_vmalloc)
471
472/*
473 * This is being deprecated in favor of IOCTL_KGSL_GPUMEM_CACHE_SYNC which
474 * supports both directions (flush and invalidate). This code will still
475 * work, but by definition it will do a flush of the cache which might not be
476 * what you want to have happen on a buffer following a GPU operation.  It is
477 * safer to go with IOCTL_KGSL_GPUMEM_CACHE_SYNC
478 */
479
480#define IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE \
481	_IOW(KGSL_IOC_TYPE, 0x24, struct kgsl_sharedmem_free)
482
483struct kgsl_drawctxt_set_bin_base_offset {
484	unsigned int drawctxt_id;
485	unsigned int offset;
486};
487
488#define IOCTL_KGSL_DRAWCTXT_SET_BIN_BASE_OFFSET \
489	_IOW(KGSL_IOC_TYPE, 0x25, struct kgsl_drawctxt_set_bin_base_offset)
490
491enum kgsl_cmdwindow_type {
492	KGSL_CMDWINDOW_MIN     = 0x00000000,
493	KGSL_CMDWINDOW_2D      = 0x00000000,
494	KGSL_CMDWINDOW_3D      = 0x00000001, /* legacy */
495	KGSL_CMDWINDOW_MMU     = 0x00000002,
496	KGSL_CMDWINDOW_ARBITER = 0x000000FF,
497	KGSL_CMDWINDOW_MAX     = 0x000000FF,
498};
499
500/* write to the command window */
501struct kgsl_cmdwindow_write {
502	enum kgsl_cmdwindow_type target;
503	unsigned int addr;
504	unsigned int data;
505};
506
507#define IOCTL_KGSL_CMDWINDOW_WRITE \
508	_IOW(KGSL_IOC_TYPE, 0x2e, struct kgsl_cmdwindow_write)
509
510struct kgsl_gpumem_alloc {
511	unsigned long gpuaddr;
512	size_t size;
513	unsigned int flags;
514};
515
516#define IOCTL_KGSL_GPUMEM_ALLOC \
517	_IOWR(KGSL_IOC_TYPE, 0x2f, struct kgsl_gpumem_alloc)
518
519struct kgsl_cff_syncmem {
520	unsigned int gpuaddr;
521	unsigned int len;
522	unsigned int __pad[2]; /* For future binary compatibility */
523};
524
525#define IOCTL_KGSL_CFF_SYNCMEM \
526	_IOW(KGSL_IOC_TYPE, 0x30, struct kgsl_cff_syncmem)
527
528/*
529 * A timestamp event allows the user space to register an action following an
530 * expired timestamp. Note IOCTL_KGSL_TIMESTAMP_EVENT has been redefined to
531 * _IOWR to support fences which need to return a fd for the priv parameter.
532 */
533
534struct kgsl_timestamp_event {
535	int type;                /* Type of event (see list below) */
536	unsigned int timestamp;  /* Timestamp to trigger event on */
537	unsigned int context_id; /* Context for the timestamp */
538	void *priv;              /* Pointer to the event specific blob */
539	size_t len;              /* Size of the event specific blob */
540};
541
542#define IOCTL_KGSL_TIMESTAMP_EVENT_OLD \
543	_IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_timestamp_event)
544
545/* A genlock timestamp event releases an existing lock on timestamp expire */
546
547#define KGSL_TIMESTAMP_EVENT_GENLOCK 1
548
549struct kgsl_timestamp_event_genlock {
550	int handle; /* Handle of the genlock lock to release */
551};
552
553/* A fence timestamp event releases an existing lock on timestamp expire */
554
555#define KGSL_TIMESTAMP_EVENT_FENCE 2
556
557struct kgsl_timestamp_event_fence {
558	int fence_fd; /* Fence to signal */
559};
560
561/*
562 * Set a property within the kernel.  Uses the same structure as
563 * IOCTL_KGSL_GETPROPERTY
564 */
565
566#define IOCTL_KGSL_SETPROPERTY \
567	_IOW(KGSL_IOC_TYPE, 0x32, struct kgsl_device_getproperty)
568
569#define IOCTL_KGSL_TIMESTAMP_EVENT \
570	_IOWR(KGSL_IOC_TYPE, 0x33, struct kgsl_timestamp_event)
571
572/**
573 * struct kgsl_gpumem_alloc_id - argument to IOCTL_KGSL_GPUMEM_ALLOC_ID
574 * @id: returned id value for this allocation.
575 * @flags: mask of KGSL_MEM* values requested and actual flags on return.
576 * @size: requested size of the allocation and actual size on return.
577 * @mmapsize: returned size to pass to mmap() which may be larger than 'size'
578 * @gpuaddr: returned GPU address for the allocation
579 *
580 * Allocate memory for access by the GPU. The flags and size fields are echoed
581 * back by the kernel, so that the caller can know if the request was
582 * adjusted.
583 *
584 * Supported flags:
585 * KGSL_MEMFLAGS_GPUREADONLY: the GPU will be unable to write to the buffer
586 * KGSL_MEMTYPE*: usage hint for debugging aid
587 * KGSL_MEMALIGN*: alignment hint, may be ignored or adjusted by the kernel.
588 * KGSL_MEMFLAGS_USE_CPU_MAP: If set on call and return, the returned GPU
589 * address will be 0. Calling mmap() will set the GPU address.
590 */
591struct kgsl_gpumem_alloc_id {
592	unsigned int id;
593	unsigned int flags;
594	unsigned int size;
595	unsigned int mmapsize;
596	unsigned long gpuaddr;
597/* private: reserved for future use*/
598	unsigned int __pad[2];
599};
600
601#define IOCTL_KGSL_GPUMEM_ALLOC_ID \
602	_IOWR(KGSL_IOC_TYPE, 0x34, struct kgsl_gpumem_alloc_id)
603
604/**
605 * struct kgsl_gpumem_free_id - argument to IOCTL_KGSL_GPUMEM_FREE_ID
606 * @id: GPU allocation id to free
607 *
608 * Free an allocation by id, in case a GPU address has not been assigned or
609 * is unknown. Freeing an allocation by id with this ioctl or by GPU address
610 * with IOCTL_KGSL_SHAREDMEM_FREE are equivalent.
611 */
612struct kgsl_gpumem_free_id {
613	unsigned int id;
614/* private: reserved for future use*/
615	unsigned int __pad;
616};
617
618#define IOCTL_KGSL_GPUMEM_FREE_ID \
619	_IOWR(KGSL_IOC_TYPE, 0x35, struct kgsl_gpumem_free_id)
620
621/**
622 * struct kgsl_gpumem_get_info - argument to IOCTL_KGSL_GPUMEM_GET_INFO
623 * @gpuaddr: GPU address to query. Also set on return.
624 * @id: GPU allocation id to query. Also set on return.
625 * @flags: returned mask of KGSL_MEM* values.
626 * @size: returned size of the allocation.
627 * @mmapsize: returned size to pass mmap(), which may be larger than 'size'
628 * @useraddr: returned address of the userspace mapping for this buffer
629 *
630 * This ioctl allows querying of all user visible attributes of an existing
631 * allocation, by either the GPU address or the id returned by a previous
632 * call to IOCTL_KGSL_GPUMEM_ALLOC_ID. Legacy allocation ioctls may not
633 * return all attributes so this ioctl can be used to look them up if needed.
634 *
635 */
636struct kgsl_gpumem_get_info {
637	unsigned long gpuaddr;
638	unsigned int id;
639	unsigned int flags;
640	unsigned int size;
641	unsigned int mmapsize;
642	unsigned long useraddr;
643/* private: reserved for future use*/
644	unsigned int __pad[4];
645};
646
647#define IOCTL_KGSL_GPUMEM_GET_INFO\
648	_IOWR(KGSL_IOC_TYPE, 0x36, struct kgsl_gpumem_get_info)
649
650/**
651 * struct kgsl_gpumem_sync_cache - argument to IOCTL_KGSL_GPUMEM_SYNC_CACHE
652 * @gpuaddr: GPU address of the buffer to sync.
653 * @id: id of the buffer to sync. Either gpuaddr or id is sufficient.
654 * @op: a mask of KGSL_GPUMEM_CACHE_* values
655 *
656 * Sync the L2 cache for memory headed to and from the GPU - this replaces
657 * KGSL_SHAREDMEM_FLUSH_CACHE since it can handle cache management for both
658 * directions
659 *
660 */
661struct kgsl_gpumem_sync_cache {
662	unsigned int gpuaddr;
663	unsigned int id;
664	unsigned int op;
665/* private: reserved for future use*/
666	unsigned int __pad[2]; /* For future binary compatibility */
667};
668
669#define KGSL_GPUMEM_CACHE_CLEAN (1 << 0)
670#define KGSL_GPUMEM_CACHE_TO_GPU KGSL_GPUMEM_CACHE_CLEAN
671
672#define KGSL_GPUMEM_CACHE_INV (1 << 1)
673#define KGSL_GPUMEM_CACHE_FROM_GPU KGSL_GPUMEM_CACHE_INV
674
675#define KGSL_GPUMEM_CACHE_FLUSH \
676	(KGSL_GPUMEM_CACHE_CLEAN | KGSL_GPUMEM_CACHE_INV)
677
678#define IOCTL_KGSL_GPUMEM_SYNC_CACHE \
679	_IOW(KGSL_IOC_TYPE, 0x37, struct kgsl_gpumem_sync_cache)
680
681/**
682 * struct kgsl_perfcounter_get - argument to IOCTL_KGSL_PERFCOUNTER_GET
683 * @groupid: Performance counter group ID
684 * @countable: Countable to select within the group
685 * @offset: Return offset of the reserved counter
686 *
687 * Get an available performance counter from a specified groupid.  The offset
688 * of the performance counter will be returned after successfully assigning
689 * the countable to the counter for the specified group.  An error will be
690 * returned and an offset of 0 if the groupid is invalid or there are no
691 * more counters left.  After successfully getting a perfcounter, the user
692 * must call kgsl_perfcounter_put(groupid, contable) when finished with
693 * the perfcounter to clear up perfcounter resources.
694 *
695 */
696struct kgsl_perfcounter_get {
697	unsigned int groupid;
698	unsigned int countable;
699	unsigned int offset;
700/* private: reserved for future use */
701	unsigned int __pad[2]; /* For future binary compatibility */
702};
703
704#define IOCTL_KGSL_PERFCOUNTER_GET \
705	_IOWR(KGSL_IOC_TYPE, 0x38, struct kgsl_perfcounter_get)
706
707/**
708 * struct kgsl_perfcounter_put - argument to IOCTL_KGSL_PERFCOUNTER_PUT
709 * @groupid: Performance counter group ID
710 * @countable: Countable to release within the group
711 *
712 * Put an allocated performance counter to allow others to have access to the
713 * resource that was previously taken.  This is only to be called after
714 * successfully getting a performance counter from kgsl_perfcounter_get().
715 *
716 */
717struct kgsl_perfcounter_put {
718	unsigned int groupid;
719	unsigned int countable;
720/* private: reserved for future use */
721	unsigned int __pad[2]; /* For future binary compatibility */
722};
723
724#define IOCTL_KGSL_PERFCOUNTER_PUT \
725	_IOW(KGSL_IOC_TYPE, 0x39, struct kgsl_perfcounter_put)
726
727/**
728 * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
729 * @groupid: Performance counter group ID
730 * @countable: Return active countables array
731 * @size: Size of active countables array
732 * @max_counters: Return total number counters for the group ID
733 *
734 * Query the available performance counters given a groupid.  The array
735 * *countables is used to return the current active countables in counters.
736 * The size of the array is passed in so the kernel will only write at most
737 * size or counter->size for the group id.  The total number of available
738 * counters for the group ID is returned in max_counters.
739 * If the array or size passed in are invalid, then only the maximum number
740 * of counters will be returned, no data will be written to *countables.
741 * If the groupid is invalid an error code will be returned.
742 *
743 */
744struct kgsl_perfcounter_query {
745	unsigned int groupid;
746	/* Array to return the current countable for up to size counters */
747	unsigned int *countables;
748	unsigned int count;
749	unsigned int max_counters;
750/* private: reserved for future use */
751	unsigned int __pad[2]; /* For future binary compatibility */
752};
753
754#define IOCTL_KGSL_PERFCOUNTER_QUERY \
755	_IOWR(KGSL_IOC_TYPE, 0x3A, struct kgsl_perfcounter_query)
756
757/**
758 * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
759 * @groupid: Performance counter group IDs
760 * @countable: Performance counter countable IDs
761 * @value: Return performance counter reads
762 * @size: Size of all arrays (groupid/countable pair and return value)
763 *
764 * Read in the current value of a performance counter given by the groupid
765 * and countable.
766 *
767 */
768
769struct kgsl_perfcounter_read_group {
770	unsigned int groupid;
771	unsigned int countable;
772	unsigned long long value;
773};
774
775struct kgsl_perfcounter_read {
776	struct kgsl_perfcounter_read_group *reads;
777	unsigned int count;
778/* private: reserved for future use */
779	unsigned int __pad[2]; /* For future binary compatibility */
780};
781
782#define IOCTL_KGSL_PERFCOUNTER_READ \
783	_IOWR(KGSL_IOC_TYPE, 0x3B, struct kgsl_perfcounter_read)
784/*
785 * struct kgsl_gpumem_sync_cache_bulk - argument to
786 * IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK
787 * @id_list: list of GPU buffer ids of the buffers to sync
788 * @count: number of GPU buffer ids in id_list
789 * @op: a mask of KGSL_GPUMEM_CACHE_* values
790 *
791 * Sync the cache for memory headed to and from the GPU. Certain
792 * optimizations can be made on the cache operation based on the total
793 * size of the working set of memory to be managed.
794 */
795struct kgsl_gpumem_sync_cache_bulk {
796	unsigned int *id_list;
797	unsigned int count;
798	unsigned int op;
799/* private: reserved for future use */
800	unsigned int __pad[2]; /* For future binary compatibility */
801};
802
803#define IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK \
804	_IOWR(KGSL_IOC_TYPE, 0x3C, struct kgsl_gpumem_sync_cache_bulk)
805
806#ifdef __KERNEL__
807#ifdef CONFIG_MSM_KGSL_DRM
808int kgsl_gem_obj_addr(int drm_fd, int handle, unsigned long *start,
809			unsigned long *len);
810#else
811#define kgsl_gem_obj_addr(...) 0
812#endif
813#endif
814#endif /* _MSM_KGSL_H */
815