i915_drm.h revision ccfaccd726a369b7df72e251710755233d176e5a
1/*
2 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef _UAPI_I915_DRM_H_
28#define _UAPI_I915_DRM_H_
29
30#include "drm.h"
31
32#if defined(__cplusplus)
33extern "C" {
34#endif
35
36/* Please note that modifications to all structs defined here are
37 * subject to backwards-compatibility constraints.
38 */
39
40/**
41 * DOC: uevents generated by i915 on it's device node
42 *
43 * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
44 *	event from the gpu l3 cache. Additional information supplied is ROW,
45 *	BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
46 *	track of these events and if a specific cache-line seems to have a
47 *	persistent error remap it with the l3 remapping tool supplied in
48 *	intel-gpu-tools.  The value supplied with the event is always 1.
49 *
50 * I915_ERROR_UEVENT - Generated upon error detection, currently only via
51 *	hangcheck. The error detection event is a good indicator of when things
52 *	began to go badly. The value supplied with the event is a 1 upon error
53 *	detection, and a 0 upon reset completion, signifying no more error
54 *	exists. NOTE: Disabling hangcheck or reset via module parameter will
55 *	cause the related events to not be seen.
56 *
57 * I915_RESET_UEVENT - Event is generated just before an attempt to reset the
58 *	the GPU. The value supplied with the event is always 1. NOTE: Disable
59 *	reset via module parameter will cause this event to not be seen.
60 */
61#define I915_L3_PARITY_UEVENT		"L3_PARITY_ERROR"
62#define I915_ERROR_UEVENT		"ERROR"
63#define I915_RESET_UEVENT		"RESET"
64
65/* Each region is a minimum of 16k, and there are at most 255 of them.
66 */
67#define I915_NR_TEX_REGIONS 255	/* table size 2k - maximum due to use
68				 * of chars for next/prev indices */
69#define I915_LOG_MIN_TEX_REGION_SIZE 14
70
71typedef struct _drm_i915_init {
72	enum {
73		I915_INIT_DMA = 0x01,
74		I915_CLEANUP_DMA = 0x02,
75		I915_RESUME_DMA = 0x03
76	} func;
77	unsigned int mmio_offset;
78	int sarea_priv_offset;
79	unsigned int ring_start;
80	unsigned int ring_end;
81	unsigned int ring_size;
82	unsigned int front_offset;
83	unsigned int back_offset;
84	unsigned int depth_offset;
85	unsigned int w;
86	unsigned int h;
87	unsigned int pitch;
88	unsigned int pitch_bits;
89	unsigned int back_pitch;
90	unsigned int depth_pitch;
91	unsigned int cpp;
92	unsigned int chipset;
93} drm_i915_init_t;
94
95typedef struct _drm_i915_sarea {
96	struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
97	int last_upload;	/* last time texture was uploaded */
98	int last_enqueue;	/* last time a buffer was enqueued */
99	int last_dispatch;	/* age of the most recently dispatched buffer */
100	int ctxOwner;		/* last context to upload state */
101	int texAge;
102	int pf_enabled;		/* is pageflipping allowed? */
103	int pf_active;
104	int pf_current_page;	/* which buffer is being displayed? */
105	int perf_boxes;		/* performance boxes to be displayed */
106	int width, height;      /* screen size in pixels */
107
108	drm_handle_t front_handle;
109	int front_offset;
110	int front_size;
111
112	drm_handle_t back_handle;
113	int back_offset;
114	int back_size;
115
116	drm_handle_t depth_handle;
117	int depth_offset;
118	int depth_size;
119
120	drm_handle_t tex_handle;
121	int tex_offset;
122	int tex_size;
123	int log_tex_granularity;
124	int pitch;
125	int rotation;           /* 0, 90, 180 or 270 */
126	int rotated_offset;
127	int rotated_size;
128	int rotated_pitch;
129	int virtualX, virtualY;
130
131	unsigned int front_tiled;
132	unsigned int back_tiled;
133	unsigned int depth_tiled;
134	unsigned int rotated_tiled;
135	unsigned int rotated2_tiled;
136
137	int pipeA_x;
138	int pipeA_y;
139	int pipeA_w;
140	int pipeA_h;
141	int pipeB_x;
142	int pipeB_y;
143	int pipeB_w;
144	int pipeB_h;
145
146	/* fill out some space for old userspace triple buffer */
147	drm_handle_t unused_handle;
148	__u32 unused1, unused2, unused3;
149
150	/* buffer object handles for static buffers. May change
151	 * over the lifetime of the client.
152	 */
153	__u32 front_bo_handle;
154	__u32 back_bo_handle;
155	__u32 unused_bo_handle;
156	__u32 depth_bo_handle;
157
158} drm_i915_sarea_t;
159
160/* due to userspace building against these headers we need some compat here */
161#define planeA_x pipeA_x
162#define planeA_y pipeA_y
163#define planeA_w pipeA_w
164#define planeA_h pipeA_h
165#define planeB_x pipeB_x
166#define planeB_y pipeB_y
167#define planeB_w pipeB_w
168#define planeB_h pipeB_h
169
170/* Flags for perf_boxes
171 */
172#define I915_BOX_RING_EMPTY    0x1
173#define I915_BOX_FLIP          0x2
174#define I915_BOX_WAIT          0x4
175#define I915_BOX_TEXTURE_LOAD  0x8
176#define I915_BOX_LOST_CONTEXT  0x10
177
178/*
179 * i915 specific ioctls.
180 *
181 * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
182 * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
183 * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
184 */
185#define DRM_I915_INIT		0x00
186#define DRM_I915_FLUSH		0x01
187#define DRM_I915_FLIP		0x02
188#define DRM_I915_BATCHBUFFER	0x03
189#define DRM_I915_IRQ_EMIT	0x04
190#define DRM_I915_IRQ_WAIT	0x05
191#define DRM_I915_GETPARAM	0x06
192#define DRM_I915_SETPARAM	0x07
193#define DRM_I915_ALLOC		0x08
194#define DRM_I915_FREE		0x09
195#define DRM_I915_INIT_HEAP	0x0a
196#define DRM_I915_CMDBUFFER	0x0b
197#define DRM_I915_DESTROY_HEAP	0x0c
198#define DRM_I915_SET_VBLANK_PIPE	0x0d
199#define DRM_I915_GET_VBLANK_PIPE	0x0e
200#define DRM_I915_VBLANK_SWAP	0x0f
201#define DRM_I915_HWS_ADDR	0x11
202#define DRM_I915_GEM_INIT	0x13
203#define DRM_I915_GEM_EXECBUFFER	0x14
204#define DRM_I915_GEM_PIN	0x15
205#define DRM_I915_GEM_UNPIN	0x16
206#define DRM_I915_GEM_BUSY	0x17
207#define DRM_I915_GEM_THROTTLE	0x18
208#define DRM_I915_GEM_ENTERVT	0x19
209#define DRM_I915_GEM_LEAVEVT	0x1a
210#define DRM_I915_GEM_CREATE	0x1b
211#define DRM_I915_GEM_PREAD	0x1c
212#define DRM_I915_GEM_PWRITE	0x1d
213#define DRM_I915_GEM_MMAP	0x1e
214#define DRM_I915_GEM_SET_DOMAIN	0x1f
215#define DRM_I915_GEM_SW_FINISH	0x20
216#define DRM_I915_GEM_SET_TILING	0x21
217#define DRM_I915_GEM_GET_TILING	0x22
218#define DRM_I915_GEM_GET_APERTURE 0x23
219#define DRM_I915_GEM_MMAP_GTT	0x24
220#define DRM_I915_GET_PIPE_FROM_CRTC_ID	0x25
221#define DRM_I915_GEM_MADVISE	0x26
222#define DRM_I915_OVERLAY_PUT_IMAGE	0x27
223#define DRM_I915_OVERLAY_ATTRS	0x28
224#define DRM_I915_GEM_EXECBUFFER2	0x29
225#define DRM_I915_GET_SPRITE_COLORKEY	0x2a
226#define DRM_I915_SET_SPRITE_COLORKEY	0x2b
227#define DRM_I915_GEM_WAIT	0x2c
228#define DRM_I915_GEM_CONTEXT_CREATE	0x2d
229#define DRM_I915_GEM_CONTEXT_DESTROY	0x2e
230#define DRM_I915_GEM_SET_CACHING	0x2f
231#define DRM_I915_GEM_GET_CACHING	0x30
232#define DRM_I915_REG_READ		0x31
233#define DRM_I915_GET_RESET_STATS	0x32
234#define DRM_I915_GEM_USERPTR		0x33
235#define DRM_I915_GEM_CONTEXT_GETPARAM	0x34
236#define DRM_I915_GEM_CONTEXT_SETPARAM	0x35
237
238#define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
239#define DRM_IOCTL_I915_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
240#define DRM_IOCTL_I915_FLIP		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
241#define DRM_IOCTL_I915_BATCHBUFFER	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
242#define DRM_IOCTL_I915_IRQ_EMIT         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
243#define DRM_IOCTL_I915_IRQ_WAIT         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
244#define DRM_IOCTL_I915_GETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
245#define DRM_IOCTL_I915_SETPARAM         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
246#define DRM_IOCTL_I915_ALLOC            DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
247#define DRM_IOCTL_I915_FREE             DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
248#define DRM_IOCTL_I915_INIT_HEAP        DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
249#define DRM_IOCTL_I915_CMDBUFFER	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
250#define DRM_IOCTL_I915_DESTROY_HEAP	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
251#define DRM_IOCTL_I915_SET_VBLANK_PIPE	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
252#define DRM_IOCTL_I915_GET_VBLANK_PIPE	DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
253#define DRM_IOCTL_I915_VBLANK_SWAP	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
254#define DRM_IOCTL_I915_HWS_ADDR		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
255#define DRM_IOCTL_I915_GEM_INIT		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
256#define DRM_IOCTL_I915_GEM_EXECBUFFER	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
257#define DRM_IOCTL_I915_GEM_EXECBUFFER2	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
258#define DRM_IOCTL_I915_GEM_PIN		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
259#define DRM_IOCTL_I915_GEM_UNPIN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
260#define DRM_IOCTL_I915_GEM_BUSY		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
261#define DRM_IOCTL_I915_GEM_SET_CACHING		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
262#define DRM_IOCTL_I915_GEM_GET_CACHING		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
263#define DRM_IOCTL_I915_GEM_THROTTLE	DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
264#define DRM_IOCTL_I915_GEM_ENTERVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
265#define DRM_IOCTL_I915_GEM_LEAVEVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
266#define DRM_IOCTL_I915_GEM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
267#define DRM_IOCTL_I915_GEM_PREAD	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
268#define DRM_IOCTL_I915_GEM_PWRITE	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
269#define DRM_IOCTL_I915_GEM_MMAP		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
270#define DRM_IOCTL_I915_GEM_MMAP_GTT	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
271#define DRM_IOCTL_I915_GEM_SET_DOMAIN	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
272#define DRM_IOCTL_I915_GEM_SW_FINISH	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
273#define DRM_IOCTL_I915_GEM_SET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
274#define DRM_IOCTL_I915_GEM_GET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
275#define DRM_IOCTL_I915_GEM_GET_APERTURE	DRM_IOR  (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
276#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
277#define DRM_IOCTL_I915_GEM_MADVISE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
278#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
279#define DRM_IOCTL_I915_OVERLAY_ATTRS	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
280#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
281#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
282#define DRM_IOCTL_I915_GEM_WAIT		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
283#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
284#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
285#define DRM_IOCTL_I915_REG_READ			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
286#define DRM_IOCTL_I915_GET_RESET_STATS		DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
287#define DRM_IOCTL_I915_GEM_USERPTR			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
288#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
289#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
290
291/* Allow drivers to submit batchbuffers directly to hardware, relying
292 * on the security mechanisms provided by hardware.
293 */
294typedef struct drm_i915_batchbuffer {
295	int start;		/* agp offset */
296	int used;		/* nr bytes in use */
297	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
298	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
299	int num_cliprects;	/* mulitpass with multiple cliprects? */
300	struct drm_clip_rect __user *cliprects;	/* pointer to userspace cliprects */
301} drm_i915_batchbuffer_t;
302
303/* As above, but pass a pointer to userspace buffer which can be
304 * validated by the kernel prior to sending to hardware.
305 */
306typedef struct _drm_i915_cmdbuffer {
307	char __user *buf;	/* pointer to userspace command buffer */
308	int sz;			/* nr bytes in buf */
309	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
310	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
311	int num_cliprects;	/* mulitpass with multiple cliprects? */
312	struct drm_clip_rect __user *cliprects;	/* pointer to userspace cliprects */
313} drm_i915_cmdbuffer_t;
314
315/* Userspace can request & wait on irq's:
316 */
317typedef struct drm_i915_irq_emit {
318	int __user *irq_seq;
319} drm_i915_irq_emit_t;
320
321typedef struct drm_i915_irq_wait {
322	int irq_seq;
323} drm_i915_irq_wait_t;
324
325/* Ioctl to query kernel params:
326 */
327#define I915_PARAM_IRQ_ACTIVE            1
328#define I915_PARAM_ALLOW_BATCHBUFFER     2
329#define I915_PARAM_LAST_DISPATCH         3
330#define I915_PARAM_CHIPSET_ID            4
331#define I915_PARAM_HAS_GEM               5
332#define I915_PARAM_NUM_FENCES_AVAIL      6
333#define I915_PARAM_HAS_OVERLAY           7
334#define I915_PARAM_HAS_PAGEFLIPPING	 8
335#define I915_PARAM_HAS_EXECBUF2          9
336#define I915_PARAM_HAS_BSD		 10
337#define I915_PARAM_HAS_BLT		 11
338#define I915_PARAM_HAS_RELAXED_FENCING	 12
339#define I915_PARAM_HAS_COHERENT_RINGS	 13
340#define I915_PARAM_HAS_EXEC_CONSTANTS	 14
341#define I915_PARAM_HAS_RELAXED_DELTA	 15
342#define I915_PARAM_HAS_GEN7_SOL_RESET	 16
343#define I915_PARAM_HAS_LLC     	 	 17
344#define I915_PARAM_HAS_ALIASING_PPGTT	 18
345#define I915_PARAM_HAS_WAIT_TIMEOUT	 19
346#define I915_PARAM_HAS_SEMAPHORES	 20
347#define I915_PARAM_HAS_PRIME_VMAP_FLUSH	 21
348#define I915_PARAM_HAS_VEBOX		 22
349#define I915_PARAM_HAS_SECURE_BATCHES	 23
350#define I915_PARAM_HAS_PINNED_BATCHES	 24
351#define I915_PARAM_HAS_EXEC_NO_RELOC	 25
352#define I915_PARAM_HAS_EXEC_HANDLE_LUT   26
353#define I915_PARAM_HAS_WT     	 	 27
354#define I915_PARAM_CMD_PARSER_VERSION	 28
355#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
356#define I915_PARAM_MMAP_VERSION          30
357#define I915_PARAM_HAS_BSD2		 31
358#define I915_PARAM_REVISION              32
359#define I915_PARAM_SUBSLICE_TOTAL	 33
360#define I915_PARAM_EU_TOTAL		 34
361#define I915_PARAM_HAS_GPU_RESET	 35
362#define I915_PARAM_HAS_RESOURCE_STREAMER 36
363#define I915_PARAM_HAS_EXEC_SOFTPIN	 37
364
365typedef struct drm_i915_getparam {
366	__s32 param;
367	/*
368	 * WARNING: Using pointers instead of fixed-size u64 means we need to write
369	 * compat32 code. Don't repeat this mistake.
370	 */
371	int __user *value;
372} drm_i915_getparam_t;
373
374/* Ioctl to set kernel params:
375 */
376#define I915_SETPARAM_USE_MI_BATCHBUFFER_START            1
377#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY             2
378#define I915_SETPARAM_ALLOW_BATCHBUFFER                   3
379#define I915_SETPARAM_NUM_USED_FENCES                     4
380
381typedef struct drm_i915_setparam {
382	int param;
383	int value;
384} drm_i915_setparam_t;
385
386/* A memory manager for regions of shared memory:
387 */
388#define I915_MEM_REGION_AGP 1
389
390typedef struct drm_i915_mem_alloc {
391	int region;
392	int alignment;
393	int size;
394	int __user *region_offset;	/* offset from start of fb or agp */
395} drm_i915_mem_alloc_t;
396
397typedef struct drm_i915_mem_free {
398	int region;
399	int region_offset;
400} drm_i915_mem_free_t;
401
402typedef struct drm_i915_mem_init_heap {
403	int region;
404	int size;
405	int start;
406} drm_i915_mem_init_heap_t;
407
408/* Allow memory manager to be torn down and re-initialized (eg on
409 * rotate):
410 */
411typedef struct drm_i915_mem_destroy_heap {
412	int region;
413} drm_i915_mem_destroy_heap_t;
414
415/* Allow X server to configure which pipes to monitor for vblank signals
416 */
417#define	DRM_I915_VBLANK_PIPE_A	1
418#define	DRM_I915_VBLANK_PIPE_B	2
419
420typedef struct drm_i915_vblank_pipe {
421	int pipe;
422} drm_i915_vblank_pipe_t;
423
424/* Schedule buffer swap at given vertical blank:
425 */
426typedef struct drm_i915_vblank_swap {
427	drm_drawable_t drawable;
428	enum drm_vblank_seq_type seqtype;
429	unsigned int sequence;
430} drm_i915_vblank_swap_t;
431
432typedef struct drm_i915_hws_addr {
433	__u64 addr;
434} drm_i915_hws_addr_t;
435
436struct drm_i915_gem_init {
437	/**
438	 * Beginning offset in the GTT to be managed by the DRM memory
439	 * manager.
440	 */
441	__u64 gtt_start;
442	/**
443	 * Ending offset in the GTT to be managed by the DRM memory
444	 * manager.
445	 */
446	__u64 gtt_end;
447};
448
449struct drm_i915_gem_create {
450	/**
451	 * Requested size for the object.
452	 *
453	 * The (page-aligned) allocated size for the object will be returned.
454	 */
455	__u64 size;
456	/**
457	 * Returned handle for the object.
458	 *
459	 * Object handles are nonzero.
460	 */
461	__u32 handle;
462	__u32 pad;
463};
464
465struct drm_i915_gem_pread {
466	/** Handle for the object being read. */
467	__u32 handle;
468	__u32 pad;
469	/** Offset into the object to read from */
470	__u64 offset;
471	/** Length of data to read */
472	__u64 size;
473	/**
474	 * Pointer to write the data into.
475	 *
476	 * This is a fixed-size type for 32/64 compatibility.
477	 */
478	__u64 data_ptr;
479};
480
481struct drm_i915_gem_pwrite {
482	/** Handle for the object being written to. */
483	__u32 handle;
484	__u32 pad;
485	/** Offset into the object to write to */
486	__u64 offset;
487	/** Length of data to write */
488	__u64 size;
489	/**
490	 * Pointer to read the data from.
491	 *
492	 * This is a fixed-size type for 32/64 compatibility.
493	 */
494	__u64 data_ptr;
495};
496
497struct drm_i915_gem_mmap {
498	/** Handle for the object being mapped. */
499	__u32 handle;
500	__u32 pad;
501	/** Offset in the object to map. */
502	__u64 offset;
503	/**
504	 * Length of data to map.
505	 *
506	 * The value will be page-aligned.
507	 */
508	__u64 size;
509	/**
510	 * Returned pointer the data was mapped at.
511	 *
512	 * This is a fixed-size type for 32/64 compatibility.
513	 */
514	__u64 addr_ptr;
515
516	/**
517	 * Flags for extended behaviour.
518	 *
519	 * Added in version 2.
520	 */
521	__u64 flags;
522#define I915_MMAP_WC 0x1
523};
524
525struct drm_i915_gem_mmap_gtt {
526	/** Handle for the object being mapped. */
527	__u32 handle;
528	__u32 pad;
529	/**
530	 * Fake offset to use for subsequent mmap call
531	 *
532	 * This is a fixed-size type for 32/64 compatibility.
533	 */
534	__u64 offset;
535};
536
537struct drm_i915_gem_set_domain {
538	/** Handle for the object */
539	__u32 handle;
540
541	/** New read domains */
542	__u32 read_domains;
543
544	/** New write domain */
545	__u32 write_domain;
546};
547
548struct drm_i915_gem_sw_finish {
549	/** Handle for the object */
550	__u32 handle;
551};
552
553struct drm_i915_gem_relocation_entry {
554	/**
555	 * Handle of the buffer being pointed to by this relocation entry.
556	 *
557	 * It's appealing to make this be an index into the mm_validate_entry
558	 * list to refer to the buffer, but this allows the driver to create
559	 * a relocation list for state buffers and not re-write it per
560	 * exec using the buffer.
561	 */
562	__u32 target_handle;
563
564	/**
565	 * Value to be added to the offset of the target buffer to make up
566	 * the relocation entry.
567	 */
568	__u32 delta;
569
570	/** Offset in the buffer the relocation entry will be written into */
571	__u64 offset;
572
573	/**
574	 * Offset value of the target buffer that the relocation entry was last
575	 * written as.
576	 *
577	 * If the buffer has the same offset as last time, we can skip syncing
578	 * and writing the relocation.  This value is written back out by
579	 * the execbuffer ioctl when the relocation is written.
580	 */
581	__u64 presumed_offset;
582
583	/**
584	 * Target memory domains read by this operation.
585	 */
586	__u32 read_domains;
587
588	/**
589	 * Target memory domains written by this operation.
590	 *
591	 * Note that only one domain may be written by the whole
592	 * execbuffer operation, so that where there are conflicts,
593	 * the application will get -EINVAL back.
594	 */
595	__u32 write_domain;
596};
597
598/** @{
599 * Intel memory domains
600 *
601 * Most of these just align with the various caches in
602 * the system and are used to flush and invalidate as
603 * objects end up cached in different domains.
604 */
605/** CPU cache */
606#define I915_GEM_DOMAIN_CPU		0x00000001
607/** Render cache, used by 2D and 3D drawing */
608#define I915_GEM_DOMAIN_RENDER		0x00000002
609/** Sampler cache, used by texture engine */
610#define I915_GEM_DOMAIN_SAMPLER		0x00000004
611/** Command queue, used to load batch buffers */
612#define I915_GEM_DOMAIN_COMMAND		0x00000008
613/** Instruction cache, used by shader programs */
614#define I915_GEM_DOMAIN_INSTRUCTION	0x00000010
615/** Vertex address cache */
616#define I915_GEM_DOMAIN_VERTEX		0x00000020
617/** GTT domain - aperture and scanout */
618#define I915_GEM_DOMAIN_GTT		0x00000040
619/** @} */
620
621struct drm_i915_gem_exec_object {
622	/**
623	 * User's handle for a buffer to be bound into the GTT for this
624	 * operation.
625	 */
626	__u32 handle;
627
628	/** Number of relocations to be performed on this buffer */
629	__u32 relocation_count;
630	/**
631	 * Pointer to array of struct drm_i915_gem_relocation_entry containing
632	 * the relocations to be performed in this buffer.
633	 */
634	__u64 relocs_ptr;
635
636	/** Required alignment in graphics aperture */
637	__u64 alignment;
638
639	/**
640	 * Returned value of the updated offset of the object, for future
641	 * presumed_offset writes.
642	 */
643	__u64 offset;
644};
645
646struct drm_i915_gem_execbuffer {
647	/**
648	 * List of buffers to be validated with their relocations to be
649	 * performend on them.
650	 *
651	 * This is a pointer to an array of struct drm_i915_gem_validate_entry.
652	 *
653	 * These buffers must be listed in an order such that all relocations
654	 * a buffer is performing refer to buffers that have already appeared
655	 * in the validate list.
656	 */
657	__u64 buffers_ptr;
658	__u32 buffer_count;
659
660	/** Offset in the batchbuffer to start execution from. */
661	__u32 batch_start_offset;
662	/** Bytes used in batchbuffer from batch_start_offset */
663	__u32 batch_len;
664	__u32 DR1;
665	__u32 DR4;
666	__u32 num_cliprects;
667	/** This is a struct drm_clip_rect *cliprects */
668	__u64 cliprects_ptr;
669};
670
671struct drm_i915_gem_exec_object2 {
672	/**
673	 * User's handle for a buffer to be bound into the GTT for this
674	 * operation.
675	 */
676	__u32 handle;
677
678	/** Number of relocations to be performed on this buffer */
679	__u32 relocation_count;
680	/**
681	 * Pointer to array of struct drm_i915_gem_relocation_entry containing
682	 * the relocations to be performed in this buffer.
683	 */
684	__u64 relocs_ptr;
685
686	/** Required alignment in graphics aperture */
687	__u64 alignment;
688
689	/**
690	 * When the EXEC_OBJECT_PINNED flag is specified this is populated by
691	 * the user with the GTT offset at which this object will be pinned.
692	 * When the I915_EXEC_NO_RELOC flag is specified this must contain the
693	 * presumed_offset of the object.
694	 * During execbuffer2 the kernel populates it with the value of the
695	 * current GTT offset of the object, for future presumed_offset writes.
696	 */
697	__u64 offset;
698
699#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
700#define EXEC_OBJECT_NEEDS_GTT	(1<<1)
701#define EXEC_OBJECT_WRITE	(1<<2)
702#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
703#define EXEC_OBJECT_PINNED	(1<<4)
704#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_PINNED<<1)
705	__u64 flags;
706
707	__u64 rsvd1;
708	__u64 rsvd2;
709};
710
711struct drm_i915_gem_execbuffer2 {
712	/**
713	 * List of gem_exec_object2 structs
714	 */
715	__u64 buffers_ptr;
716	__u32 buffer_count;
717
718	/** Offset in the batchbuffer to start execution from. */
719	__u32 batch_start_offset;
720	/** Bytes used in batchbuffer from batch_start_offset */
721	__u32 batch_len;
722	__u32 DR1;
723	__u32 DR4;
724	__u32 num_cliprects;
725	/** This is a struct drm_clip_rect *cliprects */
726	__u64 cliprects_ptr;
727#define I915_EXEC_RING_MASK              (7<<0)
728#define I915_EXEC_DEFAULT                (0<<0)
729#define I915_EXEC_RENDER                 (1<<0)
730#define I915_EXEC_BSD                    (2<<0)
731#define I915_EXEC_BLT                    (3<<0)
732#define I915_EXEC_VEBOX                  (4<<0)
733
734/* Used for switching the constants addressing mode on gen4+ RENDER ring.
735 * Gen6+ only supports relative addressing to dynamic state (default) and
736 * absolute addressing.
737 *
738 * These flags are ignored for the BSD and BLT rings.
739 */
740#define I915_EXEC_CONSTANTS_MASK 	(3<<6)
741#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
742#define I915_EXEC_CONSTANTS_ABSOLUTE 	(1<<6)
743#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
744	__u64 flags;
745	__u64 rsvd1; /* now used for context info */
746	__u64 rsvd2;
747};
748
749/** Resets the SO write offset registers for transform feedback on gen7. */
750#define I915_EXEC_GEN7_SOL_RESET	(1<<8)
751
752/** Request a privileged ("secure") batch buffer. Note only available for
753 * DRM_ROOT_ONLY | DRM_MASTER processes.
754 */
755#define I915_EXEC_SECURE		(1<<9)
756
757/** Inform the kernel that the batch is and will always be pinned. This
758 * negates the requirement for a workaround to be performed to avoid
759 * an incoherent CS (such as can be found on 830/845). If this flag is
760 * not passed, the kernel will endeavour to make sure the batch is
761 * coherent with the CS before execution. If this flag is passed,
762 * userspace assumes the responsibility for ensuring the same.
763 */
764#define I915_EXEC_IS_PINNED		(1<<10)
765
766/** Provide a hint to the kernel that the command stream and auxiliary
767 * state buffers already holds the correct presumed addresses and so the
768 * relocation process may be skipped if no buffers need to be moved in
769 * preparation for the execbuffer.
770 */
771#define I915_EXEC_NO_RELOC		(1<<11)
772
773/** Use the reloc.handle as an index into the exec object array rather
774 * than as the per-file handle.
775 */
776#define I915_EXEC_HANDLE_LUT		(1<<12)
777
778/** Used for switching BSD rings on the platforms with two BSD rings */
779#define I915_EXEC_BSD_SHIFT	 (13)
780#define I915_EXEC_BSD_MASK	 (3 << I915_EXEC_BSD_SHIFT)
781/* default ping-pong mode */
782#define I915_EXEC_BSD_DEFAULT	 (0 << I915_EXEC_BSD_SHIFT)
783#define I915_EXEC_BSD_RING1	 (1 << I915_EXEC_BSD_SHIFT)
784#define I915_EXEC_BSD_RING2	 (2 << I915_EXEC_BSD_SHIFT)
785
786/** Tell the kernel that the batchbuffer is processed by
787 *  the resource streamer.
788 */
789#define I915_EXEC_RESOURCE_STREAMER     (1<<15)
790
791#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_RESOURCE_STREAMER<<1)
792
793#define I915_EXEC_CONTEXT_ID_MASK	(0xffffffff)
794#define i915_execbuffer2_set_context_id(eb2, context) \
795	(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
796#define i915_execbuffer2_get_context_id(eb2) \
797	((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
798
799struct drm_i915_gem_pin {
800	/** Handle of the buffer to be pinned. */
801	__u32 handle;
802	__u32 pad;
803
804	/** alignment required within the aperture */
805	__u64 alignment;
806
807	/** Returned GTT offset of the buffer. */
808	__u64 offset;
809};
810
811struct drm_i915_gem_unpin {
812	/** Handle of the buffer to be unpinned. */
813	__u32 handle;
814	__u32 pad;
815};
816
817struct drm_i915_gem_busy {
818	/** Handle of the buffer to check for busy */
819	__u32 handle;
820
821	/** Return busy status
822	 *
823	 * A return of 0 implies that the object is idle (after
824	 * having flushed any pending activity), and a non-zero return that
825	 * the object is still in-flight on the GPU. (The GPU has not yet
826	 * signaled completion for all pending requests that reference the
827	 * object.)
828	 *
829	 * The returned dword is split into two fields to indicate both
830	 * the engines on which the object is being read, and the
831	 * engine on which it is currently being written (if any).
832	 *
833	 * The low word (bits 0:15) indicate if the object is being written
834	 * to by any engine (there can only be one, as the GEM implicit
835	 * synchronisation rules force writes to be serialised). Only the
836	 * engine for the last write is reported.
837	 *
838	 * The high word (bits 16:31) are a bitmask of which engines are
839	 * currently reading from the object. Multiple engines may be
840	 * reading from the object simultaneously.
841	 *
842	 * The value of each engine is the same as specified in the
843	 * EXECBUFFER2 ioctl, i.e. I915_EXEC_RENDER, I915_EXEC_BSD etc.
844	 * Note I915_EXEC_DEFAULT is a symbolic value and is mapped to
845	 * the I915_EXEC_RENDER engine for execution, and so it is never
846	 * reported as active itself. Some hardware may have parallel
847	 * execution engines, e.g. multiple media engines, which are
848	 * mapped to the same identifier in the EXECBUFFER2 ioctl and
849	 * so are not separately reported for busyness.
850	 */
851	__u32 busy;
852};
853
854/**
855 * I915_CACHING_NONE
856 *
857 * GPU access is not coherent with cpu caches. Default for machines without an
858 * LLC.
859 */
860#define I915_CACHING_NONE		0
861/**
862 * I915_CACHING_CACHED
863 *
864 * GPU access is coherent with cpu caches and furthermore the data is cached in
865 * last-level caches shared between cpu cores and the gpu GT. Default on
866 * machines with HAS_LLC.
867 */
868#define I915_CACHING_CACHED		1
869/**
870 * I915_CACHING_DISPLAY
871 *
872 * Special GPU caching mode which is coherent with the scanout engines.
873 * Transparently falls back to I915_CACHING_NONE on platforms where no special
874 * cache mode (like write-through or gfdt flushing) is available. The kernel
875 * automatically sets this mode when using a buffer as a scanout target.
876 * Userspace can manually set this mode to avoid a costly stall and clflush in
877 * the hotpath of drawing the first frame.
878 */
879#define I915_CACHING_DISPLAY		2
880
881struct drm_i915_gem_caching {
882	/**
883	 * Handle of the buffer to set/get the caching level of. */
884	__u32 handle;
885
886	/**
887	 * Cacheing level to apply or return value
888	 *
889	 * bits0-15 are for generic caching control (i.e. the above defined
890	 * values). bits16-31 are reserved for platform-specific variations
891	 * (e.g. l3$ caching on gen7). */
892	__u32 caching;
893};
894
895#define I915_TILING_NONE	0
896#define I915_TILING_X		1
897#define I915_TILING_Y		2
898
899#define I915_BIT_6_SWIZZLE_NONE		0
900#define I915_BIT_6_SWIZZLE_9		1
901#define I915_BIT_6_SWIZZLE_9_10		2
902#define I915_BIT_6_SWIZZLE_9_11		3
903#define I915_BIT_6_SWIZZLE_9_10_11	4
904/* Not seen by userland */
905#define I915_BIT_6_SWIZZLE_UNKNOWN	5
906/* Seen by userland. */
907#define I915_BIT_6_SWIZZLE_9_17		6
908#define I915_BIT_6_SWIZZLE_9_10_17	7
909
910struct drm_i915_gem_set_tiling {
911	/** Handle of the buffer to have its tiling state updated */
912	__u32 handle;
913
914	/**
915	 * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
916	 * I915_TILING_Y).
917	 *
918	 * This value is to be set on request, and will be updated by the
919	 * kernel on successful return with the actual chosen tiling layout.
920	 *
921	 * The tiling mode may be demoted to I915_TILING_NONE when the system
922	 * has bit 6 swizzling that can't be managed correctly by GEM.
923	 *
924	 * Buffer contents become undefined when changing tiling_mode.
925	 */
926	__u32 tiling_mode;
927
928	/**
929	 * Stride in bytes for the object when in I915_TILING_X or
930	 * I915_TILING_Y.
931	 */
932	__u32 stride;
933
934	/**
935	 * Returned address bit 6 swizzling required for CPU access through
936	 * mmap mapping.
937	 */
938	__u32 swizzle_mode;
939};
940
941struct drm_i915_gem_get_tiling {
942	/** Handle of the buffer to get tiling state for. */
943	__u32 handle;
944
945	/**
946	 * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
947	 * I915_TILING_Y).
948	 */
949	__u32 tiling_mode;
950
951	/**
952	 * Returned address bit 6 swizzling required for CPU access through
953	 * mmap mapping.
954	 */
955	__u32 swizzle_mode;
956
957	/**
958	 * Returned address bit 6 swizzling required for CPU access through
959	 * mmap mapping whilst bound.
960	 */
961	__u32 phys_swizzle_mode;
962};
963
964struct drm_i915_gem_get_aperture {
965	/** Total size of the aperture used by i915_gem_execbuffer, in bytes */
966	__u64 aper_size;
967
968	/**
969	 * Available space in the aperture used by i915_gem_execbuffer, in
970	 * bytes
971	 */
972	__u64 aper_available_size;
973};
974
975struct drm_i915_get_pipe_from_crtc_id {
976	/** ID of CRTC being requested **/
977	__u32 crtc_id;
978
979	/** pipe of requested CRTC **/
980	__u32 pipe;
981};
982
983#define I915_MADV_WILLNEED 0
984#define I915_MADV_DONTNEED 1
985#define __I915_MADV_PURGED 2 /* internal state */
986
987struct drm_i915_gem_madvise {
988	/** Handle of the buffer to change the backing store advice */
989	__u32 handle;
990
991	/* Advice: either the buffer will be needed again in the near future,
992	 *         or wont be and could be discarded under memory pressure.
993	 */
994	__u32 madv;
995
996	/** Whether the backing store still exists. */
997	__u32 retained;
998};
999
1000/* flags */
1001#define I915_OVERLAY_TYPE_MASK 		0xff
1002#define I915_OVERLAY_YUV_PLANAR 	0x01
1003#define I915_OVERLAY_YUV_PACKED 	0x02
1004#define I915_OVERLAY_RGB		0x03
1005
1006#define I915_OVERLAY_DEPTH_MASK		0xff00
1007#define I915_OVERLAY_RGB24		0x1000
1008#define I915_OVERLAY_RGB16		0x2000
1009#define I915_OVERLAY_RGB15		0x3000
1010#define I915_OVERLAY_YUV422		0x0100
1011#define I915_OVERLAY_YUV411		0x0200
1012#define I915_OVERLAY_YUV420		0x0300
1013#define I915_OVERLAY_YUV410		0x0400
1014
1015#define I915_OVERLAY_SWAP_MASK		0xff0000
1016#define I915_OVERLAY_NO_SWAP		0x000000
1017#define I915_OVERLAY_UV_SWAP		0x010000
1018#define I915_OVERLAY_Y_SWAP		0x020000
1019#define I915_OVERLAY_Y_AND_UV_SWAP	0x030000
1020
1021#define I915_OVERLAY_FLAGS_MASK		0xff000000
1022#define I915_OVERLAY_ENABLE		0x01000000
1023
1024struct drm_intel_overlay_put_image {
1025	/* various flags and src format description */
1026	__u32 flags;
1027	/* source picture description */
1028	__u32 bo_handle;
1029	/* stride values and offsets are in bytes, buffer relative */
1030	__u16 stride_Y; /* stride for packed formats */
1031	__u16 stride_UV;
1032	__u32 offset_Y; /* offset for packet formats */
1033	__u32 offset_U;
1034	__u32 offset_V;
1035	/* in pixels */
1036	__u16 src_width;
1037	__u16 src_height;
1038	/* to compensate the scaling factors for partially covered surfaces */
1039	__u16 src_scan_width;
1040	__u16 src_scan_height;
1041	/* output crtc description */
1042	__u32 crtc_id;
1043	__u16 dst_x;
1044	__u16 dst_y;
1045	__u16 dst_width;
1046	__u16 dst_height;
1047};
1048
1049/* flags */
1050#define I915_OVERLAY_UPDATE_ATTRS	(1<<0)
1051#define I915_OVERLAY_UPDATE_GAMMA	(1<<1)
1052#define I915_OVERLAY_DISABLE_DEST_COLORKEY	(1<<2)
1053struct drm_intel_overlay_attrs {
1054	__u32 flags;
1055	__u32 color_key;
1056	__s32 brightness;
1057	__u32 contrast;
1058	__u32 saturation;
1059	__u32 gamma0;
1060	__u32 gamma1;
1061	__u32 gamma2;
1062	__u32 gamma3;
1063	__u32 gamma4;
1064	__u32 gamma5;
1065};
1066
1067/*
1068 * Intel sprite handling
1069 *
1070 * Color keying works with a min/mask/max tuple.  Both source and destination
1071 * color keying is allowed.
1072 *
1073 * Source keying:
1074 * Sprite pixels within the min & max values, masked against the color channels
1075 * specified in the mask field, will be transparent.  All other pixels will
1076 * be displayed on top of the primary plane.  For RGB surfaces, only the min
1077 * and mask fields will be used; ranged compares are not allowed.
1078 *
1079 * Destination keying:
1080 * Primary plane pixels that match the min value, masked against the color
1081 * channels specified in the mask field, will be replaced by corresponding
1082 * pixels from the sprite plane.
1083 *
1084 * Note that source & destination keying are exclusive; only one can be
1085 * active on a given plane.
1086 */
1087
1088#define I915_SET_COLORKEY_NONE		(1<<0) /* disable color key matching */
1089#define I915_SET_COLORKEY_DESTINATION	(1<<1)
1090#define I915_SET_COLORKEY_SOURCE	(1<<2)
1091struct drm_intel_sprite_colorkey {
1092	__u32 plane_id;
1093	__u32 min_value;
1094	__u32 channel_mask;
1095	__u32 max_value;
1096	__u32 flags;
1097};
1098
1099struct drm_i915_gem_wait {
1100	/** Handle of BO we shall wait on */
1101	__u32 bo_handle;
1102	__u32 flags;
1103	/** Number of nanoseconds to wait, Returns time remaining. */
1104	__s64 timeout_ns;
1105};
1106
1107struct drm_i915_gem_context_create {
1108	/*  output: id of new context*/
1109	__u32 ctx_id;
1110	__u32 pad;
1111};
1112
1113struct drm_i915_gem_context_destroy {
1114	__u32 ctx_id;
1115	__u32 pad;
1116};
1117
1118struct drm_i915_reg_read {
1119	/*
1120	 * Register offset.
1121	 * For 64bit wide registers where the upper 32bits don't immediately
1122	 * follow the lower 32bits, the offset of the lower 32bits must
1123	 * be specified
1124	 */
1125	__u64 offset;
1126	__u64 val; /* Return value */
1127};
1128/* Known registers:
1129 *
1130 * Render engine timestamp - 0x2358 + 64bit - gen7+
1131 * - Note this register returns an invalid value if using the default
1132 *   single instruction 8byte read, in order to workaround that use
1133 *   offset (0x2538 | 1) instead.
1134 *
1135 */
1136
1137struct drm_i915_reset_stats {
1138	__u32 ctx_id;
1139	__u32 flags;
1140
1141	/* All resets since boot/module reload, for all contexts */
1142	__u32 reset_count;
1143
1144	/* Number of batches lost when active in GPU, for this context */
1145	__u32 batch_active;
1146
1147	/* Number of batches lost pending for execution, for this context */
1148	__u32 batch_pending;
1149
1150	__u32 pad;
1151};
1152
1153struct drm_i915_gem_userptr {
1154	__u64 user_ptr;
1155	__u64 user_size;
1156	__u32 flags;
1157#define I915_USERPTR_READ_ONLY 0x1
1158#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
1159	/**
1160	 * Returned handle for the object.
1161	 *
1162	 * Object handles are nonzero.
1163	 */
1164	__u32 handle;
1165};
1166
1167struct drm_i915_gem_context_param {
1168	__u32 ctx_id;
1169	__u32 size;
1170	__u64 param;
1171#define I915_CONTEXT_PARAM_BAN_PERIOD	0x1
1172#define I915_CONTEXT_PARAM_NO_ZEROMAP	0x2
1173#define I915_CONTEXT_PARAM_GTT_SIZE	0x3
1174	__u64 value;
1175};
1176
1177#if defined(__cplusplus)
1178}
1179#endif
1180
1181#endif /* _UAPI_I915_DRM_H_ */
1182