vmwgfx_drm.h revision ccfaccd726a369b7df72e251710755233d176e5a
1/**************************************************************************
2 *
3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef __VMWGFX_DRM_H__
29#define __VMWGFX_DRM_H__
30
31#include "drm.h"
32
33#if defined(__cplusplus)
34extern "C" {
35#endif
36
37#define DRM_VMW_MAX_SURFACE_FACES 6
38#define DRM_VMW_MAX_MIP_LEVELS 24
39
40
41#define DRM_VMW_GET_PARAM            0
42#define DRM_VMW_ALLOC_DMABUF         1
43#define DRM_VMW_UNREF_DMABUF         2
44#define DRM_VMW_CURSOR_BYPASS        3
45/* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/
46#define DRM_VMW_CONTROL_STREAM       4
47#define DRM_VMW_CLAIM_STREAM         5
48#define DRM_VMW_UNREF_STREAM         6
49/* guarded by DRM_VMW_PARAM_3D == 1 */
50#define DRM_VMW_CREATE_CONTEXT       7
51#define DRM_VMW_UNREF_CONTEXT        8
52#define DRM_VMW_CREATE_SURFACE       9
53#define DRM_VMW_UNREF_SURFACE        10
54#define DRM_VMW_REF_SURFACE          11
55#define DRM_VMW_EXECBUF              12
56#define DRM_VMW_GET_3D_CAP           13
57#define DRM_VMW_FENCE_WAIT           14
58#define DRM_VMW_FENCE_SIGNALED       15
59#define DRM_VMW_FENCE_UNREF          16
60#define DRM_VMW_FENCE_EVENT          17
61#define DRM_VMW_PRESENT              18
62#define DRM_VMW_PRESENT_READBACK     19
63#define DRM_VMW_UPDATE_LAYOUT        20
64#define DRM_VMW_CREATE_SHADER        21
65#define DRM_VMW_UNREF_SHADER         22
66#define DRM_VMW_GB_SURFACE_CREATE    23
67#define DRM_VMW_GB_SURFACE_REF       24
68#define DRM_VMW_SYNCCPU              25
69#define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
70
71/*************************************************************************/
72/**
73 * DRM_VMW_GET_PARAM - get device information.
74 *
75 * DRM_VMW_PARAM_FIFO_OFFSET:
76 * Offset to use to map the first page of the FIFO read-only.
77 * The fifo is mapped using the mmap() system call on the drm device.
78 *
79 * DRM_VMW_PARAM_OVERLAY_IOCTL:
80 * Does the driver support the overlay ioctl.
81 */
82
83#define DRM_VMW_PARAM_NUM_STREAMS      0
84#define DRM_VMW_PARAM_NUM_FREE_STREAMS 1
85#define DRM_VMW_PARAM_3D               2
86#define DRM_VMW_PARAM_HW_CAPS          3
87#define DRM_VMW_PARAM_FIFO_CAPS        4
88#define DRM_VMW_PARAM_MAX_FB_SIZE      5
89#define DRM_VMW_PARAM_FIFO_HW_VERSION  6
90#define DRM_VMW_PARAM_MAX_SURF_MEMORY  7
91#define DRM_VMW_PARAM_3D_CAPS_SIZE     8
92#define DRM_VMW_PARAM_MAX_MOB_MEMORY   9
93#define DRM_VMW_PARAM_MAX_MOB_SIZE     10
94#define DRM_VMW_PARAM_SCREEN_TARGET    11
95#define DRM_VMW_PARAM_DX               12
96
97/**
98 * enum drm_vmw_handle_type - handle type for ref ioctls
99 *
100 */
101enum drm_vmw_handle_type {
102	DRM_VMW_HANDLE_LEGACY = 0,
103	DRM_VMW_HANDLE_PRIME = 1
104};
105
106/**
107 * struct drm_vmw_getparam_arg
108 *
109 * @value: Returned value. //Out
110 * @param: Parameter to query. //In.
111 *
112 * Argument to the DRM_VMW_GET_PARAM Ioctl.
113 */
114
115struct drm_vmw_getparam_arg {
116	__u64 value;
117	__u32 param;
118	__u32 pad64;
119};
120
121/*************************************************************************/
122/**
123 * DRM_VMW_CREATE_CONTEXT - Create a host context.
124 *
125 * Allocates a device unique context id, and queues a create context command
126 * for the host. Does not wait for host completion.
127 */
128
129/**
130 * struct drm_vmw_context_arg
131 *
132 * @cid: Device unique context ID.
133 *
134 * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
135 * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
136 */
137
138struct drm_vmw_context_arg {
139	__s32 cid;
140	__u32 pad64;
141};
142
143/*************************************************************************/
144/**
145 * DRM_VMW_UNREF_CONTEXT - Create a host context.
146 *
147 * Frees a global context id, and queues a destroy host command for the host.
148 * Does not wait for host completion. The context ID can be used directly
149 * in the command stream and shows up as the same context ID on the host.
150 */
151
152/*************************************************************************/
153/**
154 * DRM_VMW_CREATE_SURFACE - Create a host suface.
155 *
156 * Allocates a device unique surface id, and queues a create surface command
157 * for the host. Does not wait for host completion. The surface ID can be
158 * used directly in the command stream and shows up as the same surface
159 * ID on the host.
160 */
161
162/**
163 * struct drm_wmv_surface_create_req
164 *
165 * @flags: Surface flags as understood by the host.
166 * @format: Surface format as understood by the host.
167 * @mip_levels: Number of mip levels for each face.
168 * An unused face should have 0 encoded.
169 * @size_addr: Address of a user-space array of sruct drm_vmw_size
170 * cast to an __u64 for 32-64 bit compatibility.
171 * The size of the array should equal the total number of mipmap levels.
172 * @shareable: Boolean whether other clients (as identified by file descriptors)
173 * may reference this surface.
174 * @scanout: Boolean whether the surface is intended to be used as a
175 * scanout.
176 *
177 * Input data to the DRM_VMW_CREATE_SURFACE Ioctl.
178 * Output data from the DRM_VMW_REF_SURFACE Ioctl.
179 */
180
181struct drm_vmw_surface_create_req {
182	__u32 flags;
183	__u32 format;
184	__u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES];
185	__u64 size_addr;
186	__s32 shareable;
187	__s32 scanout;
188};
189
190/**
191 * struct drm_wmv_surface_arg
192 *
193 * @sid: Surface id of created surface or surface to destroy or reference.
194 * @handle_type: Handle type for DRM_VMW_REF_SURFACE Ioctl.
195 *
196 * Output data from the DRM_VMW_CREATE_SURFACE Ioctl.
197 * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl.
198 * Input argument to the DRM_VMW_REF_SURFACE Ioctl.
199 */
200
201struct drm_vmw_surface_arg {
202	__s32 sid;
203	enum drm_vmw_handle_type handle_type;
204};
205
206/**
207 * struct drm_vmw_size ioctl.
208 *
209 * @width - mip level width
210 * @height - mip level height
211 * @depth - mip level depth
212 *
213 * Description of a mip level.
214 * Input data to the DRM_WMW_CREATE_SURFACE Ioctl.
215 */
216
217struct drm_vmw_size {
218	__u32 width;
219	__u32 height;
220	__u32 depth;
221	__u32 pad64;
222};
223
224/**
225 * union drm_vmw_surface_create_arg
226 *
227 * @rep: Output data as described above.
228 * @req: Input data as described above.
229 *
230 * Argument to the DRM_VMW_CREATE_SURFACE Ioctl.
231 */
232
233union drm_vmw_surface_create_arg {
234	struct drm_vmw_surface_arg rep;
235	struct drm_vmw_surface_create_req req;
236};
237
238/*************************************************************************/
239/**
240 * DRM_VMW_REF_SURFACE - Reference a host surface.
241 *
242 * Puts a reference on a host surface with a give sid, as previously
243 * returned by the DRM_VMW_CREATE_SURFACE ioctl.
244 * A reference will make sure the surface isn't destroyed while we hold
245 * it and will allow the calling client to use the surface ID in the command
246 * stream.
247 *
248 * On successful return, the Ioctl returns the surface information given
249 * in the DRM_VMW_CREATE_SURFACE ioctl.
250 */
251
252/**
253 * union drm_vmw_surface_reference_arg
254 *
255 * @rep: Output data as described above.
256 * @req: Input data as described above.
257 *
258 * Argument to the DRM_VMW_REF_SURFACE Ioctl.
259 */
260
261union drm_vmw_surface_reference_arg {
262	struct drm_vmw_surface_create_req rep;
263	struct drm_vmw_surface_arg req;
264};
265
266/*************************************************************************/
267/**
268 * DRM_VMW_UNREF_SURFACE - Unreference a host surface.
269 *
270 * Clear a reference previously put on a host surface.
271 * When all references are gone, including the one implicitly placed
272 * on creation,
273 * a destroy surface command will be queued for the host.
274 * Does not wait for completion.
275 */
276
277/*************************************************************************/
278/**
279 * DRM_VMW_EXECBUF
280 *
281 * Submit a command buffer for execution on the host, and return a
282 * fence seqno that when signaled, indicates that the command buffer has
283 * executed.
284 */
285
286/**
287 * struct drm_vmw_execbuf_arg
288 *
289 * @commands: User-space address of a command buffer cast to an __u64.
290 * @command-size: Size in bytes of the command buffer.
291 * @throttle-us: Sleep until software is less than @throttle_us
292 * microseconds ahead of hardware. The driver may round this value
293 * to the nearest kernel tick.
294 * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
295 * __u64.
296 * @version: Allows expanding the execbuf ioctl parameters without breaking
297 * backwards compatibility, since user-space will always tell the kernel
298 * which version it uses.
299 * @flags: Execbuf flags. None currently.
300 *
301 * Argument to the DRM_VMW_EXECBUF Ioctl.
302 */
303
304#define DRM_VMW_EXECBUF_VERSION 2
305
306struct drm_vmw_execbuf_arg {
307	__u64 commands;
308	__u32 command_size;
309	__u32 throttle_us;
310	__u64 fence_rep;
311	__u32 version;
312	__u32 flags;
313	__u32 context_handle;
314	__u32 pad64;
315};
316
317/**
318 * struct drm_vmw_fence_rep
319 *
320 * @handle: Fence object handle for fence associated with a command submission.
321 * @mask: Fence flags relevant for this fence object.
322 * @seqno: Fence sequence number in fifo. A fence object with a lower
323 * seqno will signal the EXEC flag before a fence object with a higher
324 * seqno. This can be used by user-space to avoid kernel calls to determine
325 * whether a fence has signaled the EXEC flag. Note that @seqno will
326 * wrap at 32-bit.
327 * @passed_seqno: The highest seqno number processed by the hardware
328 * so far. This can be used to mark user-space fence objects as signaled, and
329 * to determine whether a fence seqno might be stale.
330 * @error: This member should've been set to -EFAULT on submission.
331 * The following actions should be take on completion:
332 * error == -EFAULT: Fence communication failed. The host is synchronized.
333 * Use the last fence id read from the FIFO fence register.
334 * error != 0 && error != -EFAULT:
335 * Fence submission failed. The host is synchronized. Use the fence_seq member.
336 * error == 0: All is OK, The host may not be synchronized.
337 * Use the fence_seq member.
338 *
339 * Input / Output data to the DRM_VMW_EXECBUF Ioctl.
340 */
341
342struct drm_vmw_fence_rep {
343	__u32 handle;
344	__u32 mask;
345	__u32 seqno;
346	__u32 passed_seqno;
347	__u32 pad64;
348	__s32 error;
349};
350
351/*************************************************************************/
352/**
353 * DRM_VMW_ALLOC_DMABUF
354 *
355 * Allocate a DMA buffer that is visible also to the host.
356 * NOTE: The buffer is
357 * identified by a handle and an offset, which are private to the guest, but
358 * useable in the command stream. The guest kernel may translate these
359 * and patch up the command stream accordingly. In the future, the offset may
360 * be zero at all times, or it may disappear from the interface before it is
361 * fixed.
362 *
363 * The DMA buffer may stay user-space mapped in the guest at all times,
364 * and is thus suitable for sub-allocation.
365 *
366 * DMA buffers are mapped using the mmap() syscall on the drm device.
367 */
368
369/**
370 * struct drm_vmw_alloc_dmabuf_req
371 *
372 * @size: Required minimum size of the buffer.
373 *
374 * Input data to the DRM_VMW_ALLOC_DMABUF Ioctl.
375 */
376
377struct drm_vmw_alloc_dmabuf_req {
378	__u32 size;
379	__u32 pad64;
380};
381
382/**
383 * struct drm_vmw_dmabuf_rep
384 *
385 * @map_handle: Offset to use in the mmap() call used to map the buffer.
386 * @handle: Handle unique to this buffer. Used for unreferencing.
387 * @cur_gmr_id: GMR id to use in the command stream when this buffer is
388 * referenced. See not above.
389 * @cur_gmr_offset: Offset to use in the command stream when this buffer is
390 * referenced. See note above.
391 *
392 * Output data from the DRM_VMW_ALLOC_DMABUF Ioctl.
393 */
394
395struct drm_vmw_dmabuf_rep {
396	__u64 map_handle;
397	__u32 handle;
398	__u32 cur_gmr_id;
399	__u32 cur_gmr_offset;
400	__u32 pad64;
401};
402
403/**
404 * union drm_vmw_dmabuf_arg
405 *
406 * @req: Input data as described above.
407 * @rep: Output data as described above.
408 *
409 * Argument to the DRM_VMW_ALLOC_DMABUF Ioctl.
410 */
411
412union drm_vmw_alloc_dmabuf_arg {
413	struct drm_vmw_alloc_dmabuf_req req;
414	struct drm_vmw_dmabuf_rep rep;
415};
416
417/*************************************************************************/
418/**
419 * DRM_VMW_UNREF_DMABUF - Free a DMA buffer.
420 *
421 */
422
423/**
424 * struct drm_vmw_unref_dmabuf_arg
425 *
426 * @handle: Handle indicating what buffer to free. Obtained from the
427 * DRM_VMW_ALLOC_DMABUF Ioctl.
428 *
429 * Argument to the DRM_VMW_UNREF_DMABUF Ioctl.
430 */
431
432struct drm_vmw_unref_dmabuf_arg {
433	__u32 handle;
434	__u32 pad64;
435};
436
437/*************************************************************************/
438/**
439 * DRM_VMW_CONTROL_STREAM - Control overlays, aka streams.
440 *
441 * This IOCTL controls the overlay units of the svga device.
442 * The SVGA overlay units does not work like regular hardware units in
443 * that they do not automaticaly read back the contents of the given dma
444 * buffer. But instead only read back for each call to this ioctl, and
445 * at any point between this call being made and a following call that
446 * either changes the buffer or disables the stream.
447 */
448
449/**
450 * struct drm_vmw_rect
451 *
452 * Defines a rectangle. Used in the overlay ioctl to define
453 * source and destination rectangle.
454 */
455
456struct drm_vmw_rect {
457	__s32 x;
458	__s32 y;
459	__u32 w;
460	__u32 h;
461};
462
463/**
464 * struct drm_vmw_control_stream_arg
465 *
466 * @stream_id: Stearm to control
467 * @enabled: If false all following arguments are ignored.
468 * @handle: Handle to buffer for getting data from.
469 * @format: Format of the overlay as understood by the host.
470 * @width: Width of the overlay.
471 * @height: Height of the overlay.
472 * @size: Size of the overlay in bytes.
473 * @pitch: Array of pitches, the two last are only used for YUV12 formats.
474 * @offset: Offset from start of dma buffer to overlay.
475 * @src: Source rect, must be within the defined area above.
476 * @dst: Destination rect, x and y may be negative.
477 *
478 * Argument to the DRM_VMW_CONTROL_STREAM Ioctl.
479 */
480
481struct drm_vmw_control_stream_arg {
482	__u32 stream_id;
483	__u32 enabled;
484
485	__u32 flags;
486	__u32 color_key;
487
488	__u32 handle;
489	__u32 offset;
490	__s32 format;
491	__u32 size;
492	__u32 width;
493	__u32 height;
494	__u32 pitch[3];
495
496	__u32 pad64;
497	struct drm_vmw_rect src;
498	struct drm_vmw_rect dst;
499};
500
501/*************************************************************************/
502/**
503 * DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass.
504 *
505 */
506
507#define DRM_VMW_CURSOR_BYPASS_ALL    (1 << 0)
508#define DRM_VMW_CURSOR_BYPASS_FLAGS       (1)
509
510/**
511 * struct drm_vmw_cursor_bypass_arg
512 *
513 * @flags: Flags.
514 * @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed.
515 * @xpos: X position of cursor.
516 * @ypos: Y position of cursor.
517 * @xhot: X hotspot.
518 * @yhot: Y hotspot.
519 *
520 * Argument to the DRM_VMW_CURSOR_BYPASS Ioctl.
521 */
522
523struct drm_vmw_cursor_bypass_arg {
524	__u32 flags;
525	__u32 crtc_id;
526	__s32 xpos;
527	__s32 ypos;
528	__s32 xhot;
529	__s32 yhot;
530};
531
532/*************************************************************************/
533/**
534 * DRM_VMW_CLAIM_STREAM - Claim a single stream.
535 */
536
537/**
538 * struct drm_vmw_context_arg
539 *
540 * @stream_id: Device unique context ID.
541 *
542 * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
543 * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
544 */
545
546struct drm_vmw_stream_arg {
547	__u32 stream_id;
548	__u32 pad64;
549};
550
551/*************************************************************************/
552/**
553 * DRM_VMW_UNREF_STREAM - Unclaim a stream.
554 *
555 * Return a single stream that was claimed by this process. Also makes
556 * sure that the stream has been stopped.
557 */
558
559/*************************************************************************/
560/**
561 * DRM_VMW_GET_3D_CAP
562 *
563 * Read 3D capabilities from the FIFO
564 *
565 */
566
567/**
568 * struct drm_vmw_get_3d_cap_arg
569 *
570 * @buffer: Pointer to a buffer for capability data, cast to an __u64
571 * @size: Max size to copy
572 *
573 * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL
574 * ioctls.
575 */
576
577struct drm_vmw_get_3d_cap_arg {
578	__u64 buffer;
579	__u32 max_size;
580	__u32 pad64;
581};
582
583/*************************************************************************/
584/**
585 * DRM_VMW_FENCE_WAIT
586 *
587 * Waits for a fence object to signal. The wait is interruptible, so that
588 * signals may be delivered during the interrupt. The wait may timeout,
589 * in which case the calls returns -EBUSY. If the wait is restarted,
590 * that is restarting without resetting @cookie_valid to zero,
591 * the timeout is computed from the first call.
592 *
593 * The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait
594 * on:
595 * DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command
596 * stream
597 * have executed.
598 * DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish
599 * commands
600 * in the buffer given to the EXECBUF ioctl returning the fence object handle
601 * are available to user-space.
602 *
603 * DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the
604 * fenc wait ioctl returns 0, the fence object has been unreferenced after
605 * the wait.
606 */
607
608#define DRM_VMW_FENCE_FLAG_EXEC   (1 << 0)
609#define DRM_VMW_FENCE_FLAG_QUERY  (1 << 1)
610
611#define DRM_VMW_WAIT_OPTION_UNREF (1 << 0)
612
613/**
614 * struct drm_vmw_fence_wait_arg
615 *
616 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
617 * @cookie_valid: Must be reset to 0 on first call. Left alone on restart.
618 * @kernel_cookie: Set to 0 on first call. Left alone on restart.
619 * @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout.
620 * @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick
621 * before returning.
622 * @flags: Fence flags to wait on.
623 * @wait_options: Options that control the behaviour of the wait ioctl.
624 *
625 * Input argument to the DRM_VMW_FENCE_WAIT ioctl.
626 */
627
628struct drm_vmw_fence_wait_arg {
629	__u32 handle;
630	__s32  cookie_valid;
631	__u64 kernel_cookie;
632	__u64 timeout_us;
633	__s32 lazy;
634	__s32 flags;
635	__s32 wait_options;
636	__s32 pad64;
637};
638
639/*************************************************************************/
640/**
641 * DRM_VMW_FENCE_SIGNALED
642 *
643 * Checks if a fence object is signaled..
644 */
645
646/**
647 * struct drm_vmw_fence_signaled_arg
648 *
649 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
650 * @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl
651 * @signaled: Out: Flags signaled.
652 * @sequence: Out: Highest sequence passed so far. Can be used to signal the
653 * EXEC flag of user-space fence objects.
654 *
655 * Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF
656 * ioctls.
657 */
658
659struct drm_vmw_fence_signaled_arg {
660	 __u32 handle;
661	 __u32 flags;
662	 __s32 signaled;
663	 __u32 passed_seqno;
664	 __u32 signaled_flags;
665	 __u32 pad64;
666};
667
668/*************************************************************************/
669/**
670 * DRM_VMW_FENCE_UNREF
671 *
672 * Unreferences a fence object, and causes it to be destroyed if there are no
673 * other references to it.
674 *
675 */
676
677/**
678 * struct drm_vmw_fence_arg
679 *
680 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
681 *
682 * Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl..
683 */
684
685struct drm_vmw_fence_arg {
686	 __u32 handle;
687	 __u32 pad64;
688};
689
690
691/*************************************************************************/
692/**
693 * DRM_VMW_FENCE_EVENT
694 *
695 * Queues an event on a fence to be delivered on the drm character device
696 * when the fence has signaled the DRM_VMW_FENCE_FLAG_EXEC flag.
697 * Optionally the approximate time when the fence signaled is
698 * given by the event.
699 */
700
701/*
702 * The event type
703 */
704#define DRM_VMW_EVENT_FENCE_SIGNALED 0x80000000
705
706struct drm_vmw_event_fence {
707	struct drm_event base;
708	__u64 user_data;
709	__u32 tv_sec;
710	__u32 tv_usec;
711};
712
713/*
714 * Flags that may be given to the command.
715 */
716/* Request fence signaled time on the event. */
717#define DRM_VMW_FE_FLAG_REQ_TIME (1 << 0)
718
719/**
720 * struct drm_vmw_fence_event_arg
721 *
722 * @fence_rep: Pointer to fence_rep structure cast to __u64 or 0 if
723 * the fence is not supposed to be referenced by user-space.
724 * @user_info: Info to be delivered with the event.
725 * @handle: Attach the event to this fence only.
726 * @flags: A set of flags as defined above.
727 */
728struct drm_vmw_fence_event_arg {
729	__u64 fence_rep;
730	__u64 user_data;
731	__u32 handle;
732	__u32 flags;
733};
734
735
736/*************************************************************************/
737/**
738 * DRM_VMW_PRESENT
739 *
740 * Executes an SVGA present on a given fb for a given surface. The surface
741 * is placed on the framebuffer. Cliprects are given relative to the given
742 * point (the point disignated by dest_{x|y}).
743 *
744 */
745
746/**
747 * struct drm_vmw_present_arg
748 * @fb_id: framebuffer id to present / read back from.
749 * @sid: Surface id to present from.
750 * @dest_x: X placement coordinate for surface.
751 * @dest_y: Y placement coordinate for surface.
752 * @clips_ptr: Pointer to an array of clip rects cast to an __u64.
753 * @num_clips: Number of cliprects given relative to the framebuffer origin,
754 * in the same coordinate space as the frame buffer.
755 * @pad64: Unused 64-bit padding.
756 *
757 * Input argument to the DRM_VMW_PRESENT ioctl.
758 */
759
760struct drm_vmw_present_arg {
761	__u32 fb_id;
762	__u32 sid;
763	__s32 dest_x;
764	__s32 dest_y;
765	__u64 clips_ptr;
766	__u32 num_clips;
767	__u32 pad64;
768};
769
770
771/*************************************************************************/
772/**
773 * DRM_VMW_PRESENT_READBACK
774 *
775 * Executes an SVGA present readback from a given fb to the dma buffer
776 * currently bound as the fb. If there is no dma buffer bound to the fb,
777 * an error will be returned.
778 *
779 */
780
781/**
782 * struct drm_vmw_present_arg
783 * @fb_id: fb_id to present / read back from.
784 * @num_clips: Number of cliprects.
785 * @clips_ptr: Pointer to an array of clip rects cast to an __u64.
786 * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an __u64.
787 * If this member is NULL, then the ioctl should not return a fence.
788 */
789
790struct drm_vmw_present_readback_arg {
791	 __u32 fb_id;
792	 __u32 num_clips;
793	 __u64 clips_ptr;
794	 __u64 fence_rep;
795};
796
797/*************************************************************************/
798/**
799 * DRM_VMW_UPDATE_LAYOUT - Update layout
800 *
801 * Updates the preferred modes and connection status for connectors. The
802 * command consists of one drm_vmw_update_layout_arg pointing to an array
803 * of num_outputs drm_vmw_rect's.
804 */
805
806/**
807 * struct drm_vmw_update_layout_arg
808 *
809 * @num_outputs: number of active connectors
810 * @rects: pointer to array of drm_vmw_rect cast to an __u64
811 *
812 * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl.
813 */
814struct drm_vmw_update_layout_arg {
815	__u32 num_outputs;
816	__u32 pad64;
817	__u64 rects;
818};
819
820
821/*************************************************************************/
822/**
823 * DRM_VMW_CREATE_SHADER - Create shader
824 *
825 * Creates a shader and optionally binds it to a dma buffer containing
826 * the shader byte-code.
827 */
828
829/**
830 * enum drm_vmw_shader_type - Shader types
831 */
832enum drm_vmw_shader_type {
833	drm_vmw_shader_type_vs = 0,
834	drm_vmw_shader_type_ps,
835};
836
837
838/**
839 * struct drm_vmw_shader_create_arg
840 *
841 * @shader_type: Shader type of the shader to create.
842 * @size: Size of the byte-code in bytes.
843 * where the shader byte-code starts
844 * @buffer_handle: Buffer handle identifying the buffer containing the
845 * shader byte-code
846 * @shader_handle: On successful completion contains a handle that
847 * can be used to subsequently identify the shader.
848 * @offset: Offset in bytes into the buffer given by @buffer_handle,
849 *
850 * Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl.
851 */
852struct drm_vmw_shader_create_arg {
853	enum drm_vmw_shader_type shader_type;
854	__u32 size;
855	__u32 buffer_handle;
856	__u32 shader_handle;
857	__u64 offset;
858};
859
860/*************************************************************************/
861/**
862 * DRM_VMW_UNREF_SHADER - Unreferences a shader
863 *
864 * Destroys a user-space reference to a shader, optionally destroying
865 * it.
866 */
867
868/**
869 * struct drm_vmw_shader_arg
870 *
871 * @handle: Handle identifying the shader to destroy.
872 *
873 * Input argument to the DRM_VMW_UNREF_SHADER ioctl.
874 */
875struct drm_vmw_shader_arg {
876	__u32 handle;
877	__u32 pad64;
878};
879
880/*************************************************************************/
881/**
882 * DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface.
883 *
884 * Allocates a surface handle and queues a create surface command
885 * for the host on the first use of the surface. The surface ID can
886 * be used as the surface ID in commands referencing the surface.
887 */
888
889/**
890 * enum drm_vmw_surface_flags
891 *
892 * @drm_vmw_surface_flag_shareable:     Whether the surface is shareable
893 * @drm_vmw_surface_flag_scanout:       Whether the surface is a scanout
894 *                                      surface.
895 * @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is
896 *                                      given.
897 */
898enum drm_vmw_surface_flags {
899	drm_vmw_surface_flag_shareable = (1 << 0),
900	drm_vmw_surface_flag_scanout = (1 << 1),
901	drm_vmw_surface_flag_create_buffer = (1 << 2)
902};
903
904/**
905 * struct drm_vmw_gb_surface_create_req
906 *
907 * @svga3d_flags:     SVGA3d surface flags for the device.
908 * @format:           SVGA3d format.
909 * @mip_level:        Number of mip levels for all faces.
910 * @drm_surface_flags Flags as described above.
911 * @multisample_count Future use. Set to 0.
912 * @autogen_filter    Future use. Set to 0.
913 * @buffer_handle     Buffer handle of backup buffer. SVGA3D_INVALID_ID
914 *                    if none.
915 * @base_size         Size of the base mip level for all faces.
916 * @array_size        Must be zero for non-DX hardware, and if non-zero
917 *                    svga3d_flags must have proper bind flags setup.
918 *
919 * Input argument to the  DRM_VMW_GB_SURFACE_CREATE Ioctl.
920 * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
921 */
922struct drm_vmw_gb_surface_create_req {
923	__u32 svga3d_flags;
924	__u32 format;
925	__u32 mip_levels;
926	enum drm_vmw_surface_flags drm_surface_flags;
927	__u32 multisample_count;
928	__u32 autogen_filter;
929	__u32 buffer_handle;
930	__u32 array_size;
931	struct drm_vmw_size base_size;
932};
933
934/**
935 * struct drm_vmw_gb_surface_create_rep
936 *
937 * @handle:            Surface handle.
938 * @backup_size:       Size of backup buffers for this surface.
939 * @buffer_handle:     Handle of backup buffer. SVGA3D_INVALID_ID if none.
940 * @buffer_size:       Actual size of the buffer identified by
941 *                     @buffer_handle
942 * @buffer_map_handle: Offset into device address space for the buffer
943 *                     identified by @buffer_handle.
944 *
945 * Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl.
946 * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl.
947 */
948struct drm_vmw_gb_surface_create_rep {
949	__u32 handle;
950	__u32 backup_size;
951	__u32 buffer_handle;
952	__u32 buffer_size;
953	__u64 buffer_map_handle;
954};
955
956/**
957 * union drm_vmw_gb_surface_create_arg
958 *
959 * @req: Input argument as described above.
960 * @rep: Output argument as described above.
961 *
962 * Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl.
963 */
964union drm_vmw_gb_surface_create_arg {
965	struct drm_vmw_gb_surface_create_rep rep;
966	struct drm_vmw_gb_surface_create_req req;
967};
968
969/*************************************************************************/
970/**
971 * DRM_VMW_GB_SURFACE_REF - Reference a host surface.
972 *
973 * Puts a reference on a host surface with a given handle, as previously
974 * returned by the DRM_VMW_GB_SURFACE_CREATE ioctl.
975 * A reference will make sure the surface isn't destroyed while we hold
976 * it and will allow the calling client to use the surface handle in
977 * the command stream.
978 *
979 * On successful return, the Ioctl returns the surface information given
980 * to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl.
981 */
982
983/**
984 * struct drm_vmw_gb_surface_reference_arg
985 *
986 * @creq: The data used as input when the surface was created, as described
987 *        above at "struct drm_vmw_gb_surface_create_req"
988 * @crep: Additional data output when the surface was created, as described
989 *        above at "struct drm_vmw_gb_surface_create_rep"
990 *
991 * Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl.
992 */
993struct drm_vmw_gb_surface_ref_rep {
994	struct drm_vmw_gb_surface_create_req creq;
995	struct drm_vmw_gb_surface_create_rep crep;
996};
997
998/**
999 * union drm_vmw_gb_surface_reference_arg
1000 *
1001 * @req: Input data as described above at "struct drm_vmw_surface_arg"
1002 * @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep"
1003 *
1004 * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
1005 */
1006union drm_vmw_gb_surface_reference_arg {
1007	struct drm_vmw_gb_surface_ref_rep rep;
1008	struct drm_vmw_surface_arg req;
1009};
1010
1011
1012/*************************************************************************/
1013/**
1014 * DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access.
1015 *
1016 * Idles any previously submitted GPU operations on the buffer and
1017 * by default blocks command submissions that reference the buffer.
1018 * If the file descriptor used to grab a blocking CPU sync is closed, the
1019 * cpu sync is released.
1020 * The flags argument indicates how the grab / release operation should be
1021 * performed:
1022 */
1023
1024/**
1025 * enum drm_vmw_synccpu_flags - Synccpu flags:
1026 *
1027 * @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a
1028 * hint to the kernel to allow command submissions that references the buffer
1029 * for read-only.
1030 * @drm_vmw_synccpu_write: Sync for write. Block all command submissions
1031 * referencing this buffer.
1032 * @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return
1033 * -EBUSY should the buffer be busy.
1034 * @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer
1035 * while the buffer is synced for CPU. This is similar to the GEM bo idle
1036 * behavior.
1037 */
1038enum drm_vmw_synccpu_flags {
1039	drm_vmw_synccpu_read = (1 << 0),
1040	drm_vmw_synccpu_write = (1 << 1),
1041	drm_vmw_synccpu_dontblock = (1 << 2),
1042	drm_vmw_synccpu_allow_cs = (1 << 3)
1043};
1044
1045/**
1046 * enum drm_vmw_synccpu_op - Synccpu operations:
1047 *
1048 * @drm_vmw_synccpu_grab:    Grab the buffer for CPU operations
1049 * @drm_vmw_synccpu_release: Release a previous grab.
1050 */
1051enum drm_vmw_synccpu_op {
1052	drm_vmw_synccpu_grab,
1053	drm_vmw_synccpu_release
1054};
1055
1056/**
1057 * struct drm_vmw_synccpu_arg
1058 *
1059 * @op:			     The synccpu operation as described above.
1060 * @handle:		     Handle identifying the buffer object.
1061 * @flags:		     Flags as described above.
1062 */
1063struct drm_vmw_synccpu_arg {
1064	enum drm_vmw_synccpu_op op;
1065	enum drm_vmw_synccpu_flags flags;
1066	__u32 handle;
1067	__u32 pad64;
1068};
1069
1070/*************************************************************************/
1071/**
1072 * DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context.
1073 *
1074 * Allocates a device unique context id, and queues a create context command
1075 * for the host. Does not wait for host completion.
1076 */
1077enum drm_vmw_extended_context {
1078	drm_vmw_context_legacy,
1079	drm_vmw_context_dx
1080};
1081
1082/**
1083 * union drm_vmw_extended_context_arg
1084 *
1085 * @req: Context type.
1086 * @rep: Context identifier.
1087 *
1088 * Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl.
1089 */
1090union drm_vmw_extended_context_arg {
1091	enum drm_vmw_extended_context req;
1092	struct drm_vmw_context_arg rep;
1093};
1094
1095#if defined(__cplusplus)
1096}
1097#endif
1098
1099#endif
1100