indirect_glx.c revision 559e4f8ebcb186b491d7d687ac43f22a62448fc1
1/* 2 * Copyright © 2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Soft- 6 * ware"), to deal in the Software without restriction, including without 7 * limitation the rights to use, copy, modify, merge, publish, distribute, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, provided that the above copyright 10 * notice(s) and this permission notice appear in all copies of the Soft- 11 * ware and that both the above copyright notice(s) and this permission 12 * notice appear in supporting documentation. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- 16 * ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY 17 * RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN 18 * THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSE- 19 * QUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, 20 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 21 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFOR- 22 * MANCE OF THIS SOFTWARE. 23 * 24 * Except as contained in this notice, the name of a copyright holder shall 25 * not be used in advertising or otherwise to promote the sale, use or 26 * other dealings in this Software without prior written authorization of 27 * the copyright holder. 28 * 29 * Authors: 30 * Kristian Høgsberg (krh@bitplanet.net) 31 */ 32 33#include "glapi.h" 34#include "glxclient.h" 35 36extern struct _glapi_table *__glXNewIndirectAPI(void); 37 38/* 39** All indirect rendering contexts will share the same indirect dispatch table. 40*/ 41static struct _glapi_table *IndirectAPI = NULL; 42 43static void 44indirect_destroy_context(struct glx_context *gc) 45{ 46 if (!gc->imported && gc->xid) 47 glx_send_destroy_context(gc->psc->dpy, gc->xid); 48 49 __glXFreeVertexArrayState(gc); 50 51 if (gc->vendor) 52 XFree((char *) gc->vendor); 53 if (gc->renderer) 54 XFree((char *) gc->renderer); 55 if (gc->version) 56 XFree((char *) gc->version); 57 if (gc->extensions) 58 XFree((char *) gc->extensions); 59 __glFreeAttributeState(gc); 60 XFree((char *) gc->buf); 61 Xfree((char *) gc->client_state_private); 62 XFree((char *) gc); 63} 64 65static Bool 66SendMakeCurrentRequest(Display * dpy, CARD8 opcode, 67 GLXContextID gc_id, GLXContextTag gc_tag, 68 GLXDrawable draw, GLXDrawable read, 69 xGLXMakeCurrentReply * reply) 70{ 71 Bool ret; 72 73 LockDisplay(dpy); 74 75 if (draw == read) { 76 xGLXMakeCurrentReq *req; 77 78 GetReq(GLXMakeCurrent, req); 79 req->reqType = opcode; 80 req->glxCode = X_GLXMakeCurrent; 81 req->drawable = draw; 82 req->context = gc_id; 83 req->oldContextTag = gc_tag; 84 } 85 else { 86 struct glx_display *priv = __glXInitialize(dpy); 87 88 /* If the server can support the GLX 1.3 version, we should 89 * perfer that. Not only that, some servers support GLX 1.3 but 90 * not the SGI extension. 91 */ 92 93 if ((priv->majorVersion > 1) || (priv->minorVersion >= 3)) { 94 xGLXMakeContextCurrentReq *req; 95 96 GetReq(GLXMakeContextCurrent, req); 97 req->reqType = opcode; 98 req->glxCode = X_GLXMakeContextCurrent; 99 req->drawable = draw; 100 req->readdrawable = read; 101 req->context = gc_id; 102 req->oldContextTag = gc_tag; 103 } 104 else { 105 xGLXVendorPrivateWithReplyReq *vpreq; 106 xGLXMakeCurrentReadSGIReq *req; 107 108 GetReqExtra(GLXVendorPrivateWithReply, 109 sz_xGLXMakeCurrentReadSGIReq - 110 sz_xGLXVendorPrivateWithReplyReq, vpreq); 111 req = (xGLXMakeCurrentReadSGIReq *) vpreq; 112 req->reqType = opcode; 113 req->glxCode = X_GLXVendorPrivateWithReply; 114 req->vendorCode = X_GLXvop_MakeCurrentReadSGI; 115 req->drawable = draw; 116 req->readable = read; 117 req->context = gc_id; 118 req->oldContextTag = gc_tag; 119 } 120 } 121 122 ret = _XReply(dpy, (xReply *) reply, 0, False); 123 124 UnlockDisplay(dpy); 125 SyncHandle(); 126 127 return ret; 128} 129 130static int 131indirect_bind_context(struct glx_context *gc, struct glx_context *old, 132 GLXDrawable draw, GLXDrawable read) 133{ 134 xGLXMakeCurrentReply reply; 135 GLXContextTag tag; 136 __GLXattribute *state; 137 Display *dpy = gc->psc->dpy; 138 int opcode = __glXSetupForCommand(dpy); 139 140 if (old != &dummyContext && !old->isDirect && old->psc->dpy == dpy) { 141 tag = old->currentContextTag; 142 old->currentContextTag = 0; 143 } else { 144 tag = 0; 145 } 146 147 SendMakeCurrentRequest(dpy, opcode, gc->xid, tag, draw, read, &reply); 148 149 if (!IndirectAPI) 150 IndirectAPI = __glXNewIndirectAPI(); 151 _glapi_set_dispatch(IndirectAPI); 152 153 gc->currentContextTag = reply.contextTag; 154 state = gc->client_state_private; 155 if (state->array_state == NULL) { 156 glGetString(GL_EXTENSIONS); 157 glGetString(GL_VERSION); 158 __glXInitVertexArrayState(gc); 159 } 160 161 return Success; 162} 163 164static void 165indirect_unbind_context(struct glx_context *gc, struct glx_context *new) 166{ 167 Display *dpy = gc->psc->dpy; 168 int opcode = __glXSetupForCommand(dpy); 169 xGLXMakeCurrentReply reply; 170 171 if (gc == new) 172 return; 173 174 /* We are either switching to no context, away from a indirect 175 * context to a direct context or from one dpy to another and have 176 * to send a request to the dpy to unbind the previous context. 177 */ 178 if (!new || new->isDirect || new->psc->dpy != dpy) { 179 SendMakeCurrentRequest(dpy, opcode, None, 180 gc->currentContextTag, None, None, &reply); 181 gc->currentContextTag = 0; 182 } 183} 184 185static void 186indirect_wait_gl(struct glx_context *gc) 187{ 188 xGLXWaitGLReq *req; 189 Display *dpy = gc->currentDpy; 190 191 /* Flush any pending commands out */ 192 __glXFlushRenderBuffer(gc, gc->pc); 193 194 /* Send the glXWaitGL request */ 195 LockDisplay(dpy); 196 GetReq(GLXWaitGL, req); 197 req->reqType = gc->majorOpcode; 198 req->glxCode = X_GLXWaitGL; 199 req->contextTag = gc->currentContextTag; 200 UnlockDisplay(dpy); 201 SyncHandle(); 202} 203 204static void 205indirect_wait_x(struct glx_context *gc) 206{ 207 xGLXWaitXReq *req; 208 Display *dpy = gc->currentDpy; 209 210 /* Flush any pending commands out */ 211 __glXFlushRenderBuffer(gc, gc->pc); 212 213 LockDisplay(dpy); 214 GetReq(GLXWaitX, req); 215 req->reqType = gc->majorOpcode; 216 req->glxCode = X_GLXWaitX; 217 req->contextTag = gc->currentContextTag; 218 UnlockDisplay(dpy); 219 SyncHandle(); 220} 221 222static void 223indirect_use_x_font(struct glx_context *gc, 224 Font font, int first, int count, int listBase) 225{ 226 xGLXUseXFontReq *req; 227 Display *dpy = gc->currentDpy; 228 229 /* Flush any pending commands out */ 230 __glXFlushRenderBuffer(gc, gc->pc); 231 232 /* Send the glXUseFont request */ 233 LockDisplay(dpy); 234 GetReq(GLXUseXFont, req); 235 req->reqType = gc->majorOpcode; 236 req->glxCode = X_GLXUseXFont; 237 req->contextTag = gc->currentContextTag; 238 req->font = font; 239 req->first = first; 240 req->count = count; 241 req->listBase = listBase; 242 UnlockDisplay(dpy); 243 SyncHandle(); 244} 245 246static void 247indirect_bind_tex_image(Display * dpy, 248 GLXDrawable drawable, 249 int buffer, const int *attrib_list) 250{ 251 xGLXVendorPrivateReq *req; 252 struct glx_context *gc = __glXGetCurrentContext(); 253 CARD32 *drawable_ptr; 254 INT32 *buffer_ptr; 255 CARD32 *num_attrib_ptr; 256 CARD32 *attrib_ptr; 257 CARD8 opcode; 258 unsigned int i; 259 260 i = 0; 261 if (attrib_list) { 262 while (attrib_list[i * 2] != None) 263 i++; 264 } 265 266 opcode = __glXSetupForCommand(dpy); 267 if (!opcode) 268 return; 269 270 LockDisplay(dpy); 271 GetReqExtra(GLXVendorPrivate, 12 + 8 * i, req); 272 req->reqType = opcode; 273 req->glxCode = X_GLXVendorPrivate; 274 req->vendorCode = X_GLXvop_BindTexImageEXT; 275 req->contextTag = gc->currentContextTag; 276 277 drawable_ptr = (CARD32 *) (req + 1); 278 buffer_ptr = (INT32 *) (drawable_ptr + 1); 279 num_attrib_ptr = (CARD32 *) (buffer_ptr + 1); 280 attrib_ptr = (CARD32 *) (num_attrib_ptr + 1); 281 282 *drawable_ptr = drawable; 283 *buffer_ptr = buffer; 284 *num_attrib_ptr = (CARD32) i; 285 286 i = 0; 287 if (attrib_list) { 288 while (attrib_list[i * 2] != None) { 289 *attrib_ptr++ = (CARD32) attrib_list[i * 2 + 0]; 290 *attrib_ptr++ = (CARD32) attrib_list[i * 2 + 1]; 291 i++; 292 } 293 } 294 295 UnlockDisplay(dpy); 296 SyncHandle(); 297} 298 299static void 300indirect_release_tex_image(Display * dpy, GLXDrawable drawable, int buffer) 301{ 302 xGLXVendorPrivateReq *req; 303 struct glx_context *gc = __glXGetCurrentContext(); 304 CARD32 *drawable_ptr; 305 INT32 *buffer_ptr; 306 CARD8 opcode; 307 308 opcode = __glXSetupForCommand(dpy); 309 if (!opcode) 310 return; 311 312 LockDisplay(dpy); 313 GetReqExtra(GLXVendorPrivate, sizeof(CARD32) + sizeof(INT32), req); 314 req->reqType = opcode; 315 req->glxCode = X_GLXVendorPrivate; 316 req->vendorCode = X_GLXvop_ReleaseTexImageEXT; 317 req->contextTag = gc->currentContextTag; 318 319 drawable_ptr = (CARD32 *) (req + 1); 320 buffer_ptr = (INT32 *) (drawable_ptr + 1); 321 322 *drawable_ptr = drawable; 323 *buffer_ptr = buffer; 324 325 UnlockDisplay(dpy); 326 SyncHandle(); 327} 328 329static const struct glx_context_vtable indirect_context_vtable = { 330 indirect_destroy_context, 331 indirect_bind_context, 332 indirect_unbind_context, 333 indirect_wait_gl, 334 indirect_wait_x, 335 indirect_use_x_font, 336 indirect_bind_tex_image, 337 indirect_release_tex_image, 338 NULL, /* get_proc_address */ 339}; 340 341/** 342 * \todo Eliminate \c __glXInitVertexArrayState. Replace it with a new 343 * function called \c __glXAllocateClientState that allocates the memory and 344 * does all the initialization (including the pixel pack / unpack). 345 */ 346_X_HIDDEN struct glx_context * 347indirect_create_context(struct glx_screen *psc, 348 struct glx_config *mode, 349 struct glx_context *shareList, int renderType) 350{ 351 struct glx_context *gc; 352 int bufSize; 353 CARD8 opcode; 354 __GLXattribute *state; 355 356 opcode = __glXSetupForCommand(psc->dpy); 357 if (!opcode) { 358 return NULL; 359 } 360 361 /* Allocate our context record */ 362 gc = Xmalloc(sizeof *gc); 363 if (!gc) { 364 /* Out of memory */ 365 return NULL; 366 } 367 memset(gc, 0, sizeof *gc); 368 369 glx_context_init(gc, psc, mode); 370 gc->isDirect = GL_FALSE; 371 gc->vtable = &indirect_context_vtable; 372 state = Xmalloc(sizeof(struct __GLXattributeRec)); 373 if (state == NULL) { 374 /* Out of memory */ 375 Xfree(gc); 376 return NULL; 377 } 378 gc->client_state_private = state; 379 memset(gc->client_state_private, 0, sizeof(struct __GLXattributeRec)); 380 state->NoDrawArraysProtocol = (getenv("LIBGL_NO_DRAWARRAYS") != NULL); 381 382 /* 383 ** Create a temporary buffer to hold GLX rendering commands. The size 384 ** of the buffer is selected so that the maximum number of GLX rendering 385 ** commands can fit in a single X packet and still have room in the X 386 ** packet for the GLXRenderReq header. 387 */ 388 389 bufSize = (XMaxRequestSize(psc->dpy) * 4) - sz_xGLXRenderReq; 390 gc->buf = (GLubyte *) Xmalloc(bufSize); 391 if (!gc->buf) { 392 Xfree(gc->client_state_private); 393 Xfree(gc); 394 return NULL; 395 } 396 gc->bufSize = bufSize; 397 398 /* Fill in the new context */ 399 gc->renderMode = GL_RENDER; 400 401 state->storePack.alignment = 4; 402 state->storeUnpack.alignment = 4; 403 404 gc->attributes.stackPointer = &gc->attributes.stack[0]; 405 406 /* 407 ** PERFORMANCE NOTE: A mode dependent fill image can speed things up. 408 */ 409 gc->fillImage = __glFillImage; 410 gc->pc = gc->buf; 411 gc->bufEnd = gc->buf + bufSize; 412 gc->isDirect = GL_FALSE; 413 if (__glXDebug) { 414 /* 415 ** Set limit register so that there will be one command per packet 416 */ 417 gc->limit = gc->buf; 418 } 419 else { 420 gc->limit = gc->buf + bufSize - __GLX_BUFFER_LIMIT_SIZE; 421 } 422 gc->majorOpcode = opcode; 423 424 /* 425 ** Constrain the maximum drawing command size allowed to be 426 ** transfered using the X_GLXRender protocol request. First 427 ** constrain by a software limit, then constrain by the protocl 428 ** limit. 429 */ 430 if (bufSize > __GLX_RENDER_CMD_SIZE_LIMIT) { 431 bufSize = __GLX_RENDER_CMD_SIZE_LIMIT; 432 } 433 if (bufSize > __GLX_MAX_RENDER_CMD_SIZE) { 434 bufSize = __GLX_MAX_RENDER_CMD_SIZE; 435 } 436 gc->maxSmallRenderCommandSize = bufSize; 437 438 439 return gc; 440} 441 442struct glx_screen_vtable indirect_screen_vtable = { 443 indirect_create_context 444}; 445 446_X_HIDDEN struct glx_screen * 447indirect_create_screen(int screen, struct glx_display * priv) 448{ 449 struct glx_screen *psc; 450 451 psc = Xmalloc(sizeof *psc); 452 if (psc == NULL) 453 return NULL; 454 455 memset(psc, 0, sizeof *psc); 456 glx_screen_init(psc, screen, priv); 457 psc->vtable = &indirect_screen_vtable; 458 459 return psc; 460} 461