indirect_glx.c revision 23e2dec1a1cae26430a988e0c74b1f2b13eb0f6f
1/* 2 * Copyright © 2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Soft- 6 * ware"), to deal in the Software without restriction, including without 7 * limitation the rights to use, copy, modify, merge, publish, distribute, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, provided that the above copyright 10 * notice(s) and this permission notice appear in all copies of the Soft- 11 * ware and that both the above copyright notice(s) and this permission 12 * notice appear in supporting documentation. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- 16 * ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY 17 * RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN 18 * THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSE- 19 * QUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, 20 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 21 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFOR- 22 * MANCE OF THIS SOFTWARE. 23 * 24 * Except as contained in this notice, the name of a copyright holder shall 25 * not be used in advertising or otherwise to promote the sale, use or 26 * other dealings in this Software without prior written authorization of 27 * the copyright holder. 28 * 29 * Authors: 30 * Kristian Høgsberg (krh@bitplanet.net) 31 */ 32 33#include "glapi.h" 34#include "glxclient.h" 35 36extern struct _glapi_table *__glXNewIndirectAPI(void); 37 38/* 39** All indirect rendering contexts will share the same indirect dispatch table. 40*/ 41static struct _glapi_table *IndirectAPI = NULL; 42 43static void 44indirect_destroy_context(struct glx_context *gc) 45{ 46 if (!gc->imported && gc->xid) 47 glx_send_destroy_context(gc->psc->dpy, gc->xid); 48 49 __glXFreeVertexArrayState(gc); 50 51 if (gc->vendor) 52 XFree((char *) gc->vendor); 53 if (gc->renderer) 54 XFree((char *) gc->renderer); 55 if (gc->version) 56 XFree((char *) gc->version); 57 if (gc->extensions) 58 XFree((char *) gc->extensions); 59 __glFreeAttributeState(gc); 60 XFree((char *) gc->buf); 61 Xfree((char *) gc->client_state_private); 62 XFree((char *) gc); 63} 64 65static Bool 66SendMakeCurrentRequest(Display * dpy, CARD8 opcode, 67 GLXContextID gc_id, GLXContextTag gc_tag, 68 GLXDrawable draw, GLXDrawable read, 69 xGLXMakeCurrentReply * reply) 70{ 71 Bool ret; 72 73 LockDisplay(dpy); 74 75 if (draw == read) { 76 xGLXMakeCurrentReq *req; 77 78 GetReq(GLXMakeCurrent, req); 79 req->reqType = opcode; 80 req->glxCode = X_GLXMakeCurrent; 81 req->drawable = draw; 82 req->context = gc_id; 83 req->oldContextTag = gc_tag; 84 } 85 else { 86 struct glx_display *priv = __glXInitialize(dpy); 87 88 /* If the server can support the GLX 1.3 version, we should 89 * perfer that. Not only that, some servers support GLX 1.3 but 90 * not the SGI extension. 91 */ 92 93 if ((priv->majorVersion > 1) || (priv->minorVersion >= 3)) { 94 xGLXMakeContextCurrentReq *req; 95 96 GetReq(GLXMakeContextCurrent, req); 97 req->reqType = opcode; 98 req->glxCode = X_GLXMakeContextCurrent; 99 req->drawable = draw; 100 req->readdrawable = read; 101 req->context = gc_id; 102 req->oldContextTag = gc_tag; 103 } 104 else { 105 xGLXVendorPrivateWithReplyReq *vpreq; 106 xGLXMakeCurrentReadSGIReq *req; 107 108 GetReqExtra(GLXVendorPrivateWithReply, 109 sz_xGLXMakeCurrentReadSGIReq - 110 sz_xGLXVendorPrivateWithReplyReq, vpreq); 111 req = (xGLXMakeCurrentReadSGIReq *) vpreq; 112 req->reqType = opcode; 113 req->glxCode = X_GLXVendorPrivateWithReply; 114 req->vendorCode = X_GLXvop_MakeCurrentReadSGI; 115 req->drawable = draw; 116 req->readable = read; 117 req->context = gc_id; 118 req->oldContextTag = gc_tag; 119 } 120 } 121 122 ret = _XReply(dpy, (xReply *) reply, 0, False); 123 124 UnlockDisplay(dpy); 125 SyncHandle(); 126 127 return ret; 128} 129 130static int 131indirect_bind_context(struct glx_context *gc, struct glx_context *old, 132 GLXDrawable draw, GLXDrawable read) 133{ 134 xGLXMakeCurrentReply reply; 135 GLXContextTag tag; 136 __GLXattribute *state; 137 Display *dpy = gc->psc->dpy; 138 int opcode = __glXSetupForCommand(dpy); 139 140 if (old != &dummyContext && !old->isDirect && old->psc->dpy == dpy) { 141 tag = old->currentContextTag; 142 old->currentContextTag = 0; 143 } else { 144 tag = 0; 145 } 146 147 SendMakeCurrentRequest(dpy, opcode, gc->xid, tag, draw, read, &reply); 148 149 if (!IndirectAPI) 150 IndirectAPI = __glXNewIndirectAPI(); 151 _glapi_set_dispatch(IndirectAPI); 152 153 gc->currentContextTag = reply.contextTag; 154 state = gc->client_state_private; 155 if (state->array_state == NULL) { 156 glGetString(GL_EXTENSIONS); 157 glGetString(GL_VERSION); 158 __glXInitVertexArrayState(gc); 159 } 160 161 return Success; 162} 163 164static void 165indirect_unbind_context(struct glx_context *gc, struct glx_context *new) 166{ 167 Display *dpy = gc->psc->dpy; 168 int opcode = __glXSetupForCommand(dpy); 169 xGLXMakeCurrentReply reply; 170 171 /* We are either switching to no context, away from a indirect 172 * context to a direct context or from one dpy to another and have 173 * to send a request to the dpy to unbind the previous context. 174 */ 175 if (!new || new->isDirect || new->psc->dpy != dpy) { 176 SendMakeCurrentRequest(dpy, opcode, None, 177 gc->currentContextTag, None, None, &reply); 178 gc->currentContextTag = 0; 179 } 180} 181 182static void 183indirect_wait_gl(struct glx_context *gc) 184{ 185 xGLXWaitGLReq *req; 186 Display *dpy = gc->currentDpy; 187 188 /* Flush any pending commands out */ 189 __glXFlushRenderBuffer(gc, gc->pc); 190 191 /* Send the glXWaitGL request */ 192 LockDisplay(dpy); 193 GetReq(GLXWaitGL, req); 194 req->reqType = gc->majorOpcode; 195 req->glxCode = X_GLXWaitGL; 196 req->contextTag = gc->currentContextTag; 197 UnlockDisplay(dpy); 198 SyncHandle(); 199} 200 201static void 202indirect_wait_x(struct glx_context *gc) 203{ 204 xGLXWaitXReq *req; 205 Display *dpy = gc->currentDpy; 206 207 /* Flush any pending commands out */ 208 __glXFlushRenderBuffer(gc, gc->pc); 209 210 LockDisplay(dpy); 211 GetReq(GLXWaitX, req); 212 req->reqType = gc->majorOpcode; 213 req->glxCode = X_GLXWaitX; 214 req->contextTag = gc->currentContextTag; 215 UnlockDisplay(dpy); 216 SyncHandle(); 217} 218 219static void 220indirect_use_x_font(struct glx_context *gc, 221 Font font, int first, int count, int listBase) 222{ 223 xGLXUseXFontReq *req; 224 Display *dpy = gc->currentDpy; 225 226 /* Flush any pending commands out */ 227 __glXFlushRenderBuffer(gc, gc->pc); 228 229 /* Send the glXUseFont request */ 230 LockDisplay(dpy); 231 GetReq(GLXUseXFont, req); 232 req->reqType = gc->majorOpcode; 233 req->glxCode = X_GLXUseXFont; 234 req->contextTag = gc->currentContextTag; 235 req->font = font; 236 req->first = first; 237 req->count = count; 238 req->listBase = listBase; 239 UnlockDisplay(dpy); 240 SyncHandle(); 241} 242 243static void 244indirect_bind_tex_image(Display * dpy, 245 GLXDrawable drawable, 246 int buffer, const int *attrib_list) 247{ 248 xGLXVendorPrivateReq *req; 249 struct glx_context *gc = __glXGetCurrentContext(); 250 CARD32 *drawable_ptr; 251 INT32 *buffer_ptr; 252 CARD32 *num_attrib_ptr; 253 CARD32 *attrib_ptr; 254 CARD8 opcode; 255 unsigned int i; 256 257 i = 0; 258 if (attrib_list) { 259 while (attrib_list[i * 2] != None) 260 i++; 261 } 262 263 opcode = __glXSetupForCommand(dpy); 264 if (!opcode) 265 return; 266 267 LockDisplay(dpy); 268 GetReqExtra(GLXVendorPrivate, 12 + 8 * i, req); 269 req->reqType = opcode; 270 req->glxCode = X_GLXVendorPrivate; 271 req->vendorCode = X_GLXvop_BindTexImageEXT; 272 req->contextTag = gc->currentContextTag; 273 274 drawable_ptr = (CARD32 *) (req + 1); 275 buffer_ptr = (INT32 *) (drawable_ptr + 1); 276 num_attrib_ptr = (CARD32 *) (buffer_ptr + 1); 277 attrib_ptr = (CARD32 *) (num_attrib_ptr + 1); 278 279 *drawable_ptr = drawable; 280 *buffer_ptr = buffer; 281 *num_attrib_ptr = (CARD32) i; 282 283 i = 0; 284 if (attrib_list) { 285 while (attrib_list[i * 2] != None) { 286 *attrib_ptr++ = (CARD32) attrib_list[i * 2 + 0]; 287 *attrib_ptr++ = (CARD32) attrib_list[i * 2 + 1]; 288 i++; 289 } 290 } 291 292 UnlockDisplay(dpy); 293 SyncHandle(); 294} 295 296static void 297indirect_release_tex_image(Display * dpy, GLXDrawable drawable, int buffer) 298{ 299 xGLXVendorPrivateReq *req; 300 struct glx_context *gc = __glXGetCurrentContext(); 301 CARD32 *drawable_ptr; 302 INT32 *buffer_ptr; 303 CARD8 opcode; 304 305 opcode = __glXSetupForCommand(dpy); 306 if (!opcode) 307 return; 308 309 LockDisplay(dpy); 310 GetReqExtra(GLXVendorPrivate, sizeof(CARD32) + sizeof(INT32), req); 311 req->reqType = opcode; 312 req->glxCode = X_GLXVendorPrivate; 313 req->vendorCode = X_GLXvop_ReleaseTexImageEXT; 314 req->contextTag = gc->currentContextTag; 315 316 drawable_ptr = (CARD32 *) (req + 1); 317 buffer_ptr = (INT32 *) (drawable_ptr + 1); 318 319 *drawable_ptr = drawable; 320 *buffer_ptr = buffer; 321 322 UnlockDisplay(dpy); 323 SyncHandle(); 324} 325 326static const struct glx_context_vtable indirect_context_vtable = { 327 indirect_destroy_context, 328 indirect_bind_context, 329 indirect_unbind_context, 330 indirect_wait_gl, 331 indirect_wait_x, 332 indirect_use_x_font, 333 indirect_bind_tex_image, 334 indirect_release_tex_image, 335}; 336 337/** 338 * \todo Eliminate \c __glXInitVertexArrayState. Replace it with a new 339 * function called \c __glXAllocateClientState that allocates the memory and 340 * does all the initialization (including the pixel pack / unpack). 341 */ 342_X_HIDDEN struct glx_context * 343indirect_create_context(struct glx_screen *psc, 344 struct glx_config *mode, 345 struct glx_context *shareList, int renderType) 346{ 347 struct glx_context *gc; 348 int bufSize; 349 CARD8 opcode; 350 __GLXattribute *state; 351 352 opcode = __glXSetupForCommand(psc->dpy); 353 if (!opcode) { 354 return NULL; 355 } 356 357 /* Allocate our context record */ 358 gc = Xmalloc(sizeof *gc); 359 if (!gc) { 360 /* Out of memory */ 361 return NULL; 362 } 363 memset(gc, 0, sizeof *gc); 364 365 glx_context_init(gc, psc, mode); 366 gc->isDirect = GL_FALSE; 367 gc->vtable = &indirect_context_vtable; 368 state = Xmalloc(sizeof(struct __GLXattributeRec)); 369 if (state == NULL) { 370 /* Out of memory */ 371 Xfree(gc); 372 return NULL; 373 } 374 gc->client_state_private = state; 375 memset(gc->client_state_private, 0, sizeof(struct __GLXattributeRec)); 376 state->NoDrawArraysProtocol = (getenv("LIBGL_NO_DRAWARRAYS") != NULL); 377 378 /* 379 ** Create a temporary buffer to hold GLX rendering commands. The size 380 ** of the buffer is selected so that the maximum number of GLX rendering 381 ** commands can fit in a single X packet and still have room in the X 382 ** packet for the GLXRenderReq header. 383 */ 384 385 bufSize = (XMaxRequestSize(psc->dpy) * 4) - sz_xGLXRenderReq; 386 gc->buf = (GLubyte *) Xmalloc(bufSize); 387 if (!gc->buf) { 388 Xfree(gc->client_state_private); 389 Xfree(gc); 390 return NULL; 391 } 392 gc->bufSize = bufSize; 393 394 /* Fill in the new context */ 395 gc->renderMode = GL_RENDER; 396 397 state->storePack.alignment = 4; 398 state->storeUnpack.alignment = 4; 399 400 gc->attributes.stackPointer = &gc->attributes.stack[0]; 401 402 /* 403 ** PERFORMANCE NOTE: A mode dependent fill image can speed things up. 404 ** Other code uses the fastImageUnpack bit, but it is never set 405 ** to GL_TRUE. 406 */ 407 gc->fastImageUnpack = GL_FALSE; 408 gc->fillImage = __glFillImage; 409 gc->pc = gc->buf; 410 gc->bufEnd = gc->buf + bufSize; 411 gc->isDirect = GL_FALSE; 412 if (__glXDebug) { 413 /* 414 ** Set limit register so that there will be one command per packet 415 */ 416 gc->limit = gc->buf; 417 } 418 else { 419 gc->limit = gc->buf + bufSize - __GLX_BUFFER_LIMIT_SIZE; 420 } 421 gc->majorOpcode = opcode; 422 423 /* 424 ** Constrain the maximum drawing command size allowed to be 425 ** transfered using the X_GLXRender protocol request. First 426 ** constrain by a software limit, then constrain by the protocl 427 ** limit. 428 */ 429 if (bufSize > __GLX_RENDER_CMD_SIZE_LIMIT) { 430 bufSize = __GLX_RENDER_CMD_SIZE_LIMIT; 431 } 432 if (bufSize > __GLX_MAX_RENDER_CMD_SIZE) { 433 bufSize = __GLX_MAX_RENDER_CMD_SIZE; 434 } 435 gc->maxSmallRenderCommandSize = bufSize; 436 437 438 return gc; 439} 440 441struct glx_screen_vtable indirect_screen_vtable = { 442 indirect_create_context 443}; 444 445_X_HIDDEN struct glx_screen * 446indirect_create_screen(int screen, struct glx_display * priv) 447{ 448 struct glx_screen *psc; 449 450 psc = Xmalloc(sizeof *psc); 451 if (psc == NULL) 452 return NULL; 453 454 memset(psc, 0, sizeof *psc); 455 glx_screen_init(psc, screen, priv); 456 psc->vtable = &indirect_screen_vtable; 457 458 return psc; 459} 460