radeon_ioctl.c revision 5562fe653cf88454bbf2c50f77a8b56b0dafe01b
1/* $XFree86: xc/lib/GL/mesa/src/drv/radeon/radeon_ioctl.c,v 1.11 2003/01/29 22:04:59 dawes Exp $ */ 2/************************************************************************** 3 4Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and 5 VA Linux Systems Inc., Fremont, California. 6 7All Rights Reserved. 8 9Permission is hereby granted, free of charge, to any person obtaining 10a copy of this software and associated documentation files (the 11"Software"), to deal in the Software without restriction, including 12without limitation the rights to use, copy, modify, merge, publish, 13distribute, sublicense, and/or sell copies of the Software, and to 14permit persons to whom the Software is furnished to do so, subject to 15the following conditions: 16 17The above copyright notice and this permission notice (including the 18next paragraph) shall be included in all copies or substantial 19portions of the Software. 20 21THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 22EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 23MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 24IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 25LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 26OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 27WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 28 29**************************************************************************/ 30 31/* 32 * Authors: 33 * Kevin E. Martin <martin@valinux.com> 34 * Gareth Hughes <gareth@valinux.com> 35 * Keith Whitwell <keith@tungstengraphics.com> 36 */ 37 38#include <sched.h> 39#include <errno.h> 40 41#include "glheader.h" 42#include "imports.h" 43#include "simple_list.h" 44#include "swrast/swrast.h" 45 46#include "radeon_context.h" 47#include "radeon_state.h" 48#include "radeon_ioctl.h" 49#include "radeon_tcl.h" 50#include "radeon_sanity.h" 51 52#define STANDALONE_MMIO 53#include "radeon_macros.h" /* for INREG() */ 54 55#include "vblank.h" 56 57#define RADEON_TIMEOUT 512 58#define RADEON_IDLE_RETRY 16 59 60 61static void radeonWaitForIdle( radeonContextPtr rmesa ); 62static int radeonFlushCmdBufLocked( radeonContextPtr rmesa, 63 const char * caller ); 64 65void radeonSaveHwState( radeonContextPtr rmesa ) 66{ 67 struct radeon_state_atom *atom; 68 69 foreach(atom, &rmesa->hw.atomlist) 70 memcpy(atom->savedcmd, atom->cmd, atom->cmd_size * 4); 71} 72 73static void radeonSwapHwState( radeonContextPtr rmesa ) 74{ 75 int *temp; 76 struct radeon_state_atom *atom; 77 78 foreach(atom, &rmesa->hw.atomlist) { 79 temp = atom->cmd; 80 atom->cmd = atom->savedcmd; 81 atom->savedcmd = temp; 82 } 83} 84 85/* At this point we were in FlushCmdBufLocked but we had lost our context, so 86 * we need to unwire our current cmdbuf and hook a new one in, emit that, then 87 * wire the old cmdbuf back in so that FlushCmdBufLocked can continue and the 88 * buffer can depend on the state not being lost across lock/unlock. 89 */ 90static void radeonBackUpAndEmitLostStateLocked( radeonContextPtr rmesa ) 91{ 92 GLuint nr_released_bufs; 93 struct radeon_store store; 94 95 rmesa->lost_context = GL_FALSE; 96 97 nr_released_bufs = rmesa->dma.nr_released_bufs; 98 store = rmesa->store; 99 rmesa->store.statenr = 0; 100 rmesa->store.primnr = 0; 101 rmesa->store.cmd_used = 0; 102 rmesa->store.elts_start = 0; 103 rmesa->hw.all_dirty = GL_TRUE; 104 radeonSwapHwState( rmesa ); 105 /* In this case it's okay to EmitState while locked because we won't exhaust 106 * our (empty) cmdbuf. 107 */ 108 radeonEmitState(rmesa); 109 radeonFlushCmdBufLocked(rmesa, __FUNCTION__); 110 111 radeonSwapHwState(rmesa); 112 /* We've just cleared out the dirty flags, so we don't remember what 113 * actually needed to be emitted for the next state emit. 114 */ 115 rmesa->hw.all_dirty = GL_TRUE; 116 rmesa->dma.nr_released_bufs = nr_released_bufs; 117 rmesa->store = store; 118} 119 120/* ============================================================= 121 * Kernel command buffer handling 122 */ 123 124static void print_state_atom( struct radeon_state_atom *state ) 125{ 126 int i; 127 128 fprintf(stderr, "emit %s/%d\n", state->name, state->cmd_size); 129 130 if (RADEON_DEBUG & DEBUG_VERBOSE) 131 for (i = 0 ; i < state->cmd_size ; i++) 132 fprintf(stderr, "\t%s[%d]: %x\n", state->name, i, state->cmd[i]); 133 134} 135 136/* The state atoms will be emitted in the order they appear in the atom list, 137 * so this step is important. 138 */ 139void radeonSetUpAtomList( radeonContextPtr rmesa ) 140{ 141 int i, mtu = rmesa->glCtx->Const.MaxTextureUnits; 142 143 make_empty_list(&rmesa->hw.atomlist); 144 rmesa->hw.atomlist.name = "atom-list"; 145 146 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.ctx); 147 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.set); 148 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.lin); 149 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.msk); 150 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.vpt); 151 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.tcl); 152 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.msc); 153 for (i = 0; i < mtu; ++i) { 154 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.tex[i]); 155 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.txr[i]); 156 } 157 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.zbs); 158 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.mtl); 159 for (i = 0; i < 3 + mtu; ++i) 160 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.mat[i]); 161 for (i = 0; i < 8; ++i) 162 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.lit[i]); 163 for (i = 0; i < 6; ++i) 164 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.ucp[i]); 165 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.eye); 166 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.grd); 167 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.fog); 168 insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.glt); 169} 170 171void radeonEmitState( radeonContextPtr rmesa ) 172{ 173 struct radeon_state_atom *atom; 174 char *dest; 175 176 if (RADEON_DEBUG & (DEBUG_STATE|DEBUG_PRIMS)) 177 fprintf(stderr, "%s\n", __FUNCTION__); 178 179 if (!rmesa->hw.is_dirty && !rmesa->hw.all_dirty) 180 return; 181 182 /* To avoid going across the entire set of states multiple times, just check 183 * for enough space for the case of emitting all state, and inline the 184 * radeonAllocCmdBuf code here without all the checks. 185 */ 186 radeonEnsureCmdBufSpace(rmesa, rmesa->hw.max_state_size); 187 dest = rmesa->store.cmd_buf + rmesa->store.cmd_used; 188 189 if (RADEON_DEBUG & DEBUG_STATE) { 190 foreach(atom, &rmesa->hw.atomlist) { 191 if (atom->dirty || rmesa->hw.all_dirty) { 192 if (atom->check(rmesa->glCtx)) 193 print_state_atom(atom); 194 else 195 fprintf(stderr, "skip state %s\n", atom->name); 196 } 197 } 198 } 199 200 foreach(atom, &rmesa->hw.atomlist) { 201 if (rmesa->hw.all_dirty) 202 atom->dirty = GL_TRUE; 203 if (!(rmesa->radeonScreen->chipset & RADEON_CHIPSET_TCL) && 204 atom->is_tcl) 205 atom->dirty = GL_FALSE; 206 if (atom->dirty) { 207 if (atom->check(rmesa->glCtx)) { 208 int size = atom->cmd_size * 4; 209 memcpy(dest, atom->cmd, size); 210 dest += size; 211 rmesa->store.cmd_used += size; 212 atom->dirty = GL_FALSE; 213 } 214 } 215 } 216 217 assert(rmesa->store.cmd_used <= RADEON_CMD_BUF_SZ); 218 219 rmesa->hw.is_dirty = GL_FALSE; 220 rmesa->hw.all_dirty = GL_FALSE; 221} 222 223/* Fire a section of the retained (indexed_verts) buffer as a regular 224 * primtive. 225 */ 226extern void radeonEmitVbufPrim( radeonContextPtr rmesa, 227 GLuint vertex_format, 228 GLuint primitive, 229 GLuint vertex_nr ) 230{ 231 drm_radeon_cmd_header_t *cmd; 232 233 234 assert(!(primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND)); 235 236 radeonEmitState( rmesa ); 237 238 if (RADEON_DEBUG & DEBUG_IOCTL) 239 fprintf(stderr, "%s cmd_used/4: %d\n", __FUNCTION__, 240 rmesa->store.cmd_used/4); 241 242 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, VBUF_BUFSZ, 243 __FUNCTION__ ); 244#if RADEON_OLD_PACKETS 245 cmd[0].i = 0; 246 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP; 247 cmd[1].i = RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM | (3 << 16); 248 cmd[2].i = rmesa->ioctl.vertex_offset; 249 cmd[3].i = vertex_nr; 250 cmd[4].i = vertex_format; 251 cmd[5].i = (primitive | 252 RADEON_CP_VC_CNTL_PRIM_WALK_LIST | 253 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA | 254 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE | 255 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT)); 256 257 if (RADEON_DEBUG & DEBUG_PRIMS) 258 fprintf(stderr, "%s: header 0x%x offt 0x%x vfmt 0x%x vfcntl %x \n", 259 __FUNCTION__, 260 cmd[1].i, cmd[2].i, cmd[4].i, cmd[5].i); 261#else 262 cmd[0].i = 0; 263 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP; 264 cmd[1].i = RADEON_CP_PACKET3_3D_DRAW_VBUF | (1 << 16); 265 cmd[2].i = vertex_format; 266 cmd[3].i = (primitive | 267 RADEON_CP_VC_CNTL_PRIM_WALK_LIST | 268 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA | 269 RADEON_CP_VC_CNTL_MAOS_ENABLE | 270 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE | 271 (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT)); 272 273 274 if (RADEON_DEBUG & DEBUG_PRIMS) 275 fprintf(stderr, "%s: header 0x%x vfmt 0x%x vfcntl %x \n", 276 __FUNCTION__, 277 cmd[1].i, cmd[2].i, cmd[3].i); 278#endif 279} 280 281 282void radeonFlushElts( radeonContextPtr rmesa ) 283{ 284 int *cmd = (int *)(rmesa->store.cmd_buf + rmesa->store.elts_start); 285 int dwords; 286#if RADEON_OLD_PACKETS 287 int nr = (rmesa->store.cmd_used - (rmesa->store.elts_start + 24)) / 2; 288#else 289 int nr = (rmesa->store.cmd_used - (rmesa->store.elts_start + 16)) / 2; 290#endif 291 292 if (RADEON_DEBUG & DEBUG_IOCTL) 293 fprintf(stderr, "%s\n", __FUNCTION__); 294 295 assert( rmesa->dma.flush == radeonFlushElts ); 296 rmesa->dma.flush = 0; 297 298 /* Cope with odd number of elts: 299 */ 300 rmesa->store.cmd_used = (rmesa->store.cmd_used + 2) & ~2; 301 dwords = (rmesa->store.cmd_used - rmesa->store.elts_start) / 4; 302 303#if RADEON_OLD_PACKETS 304 cmd[1] |= (dwords - 3) << 16; 305 cmd[5] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT; 306#else 307 cmd[1] |= (dwords - 3) << 16; 308 cmd[3] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT; 309#endif 310} 311 312 313GLushort *radeonAllocEltsOpenEnded( radeonContextPtr rmesa, 314 GLuint vertex_format, 315 GLuint primitive, 316 GLuint min_nr ) 317{ 318 drm_radeon_cmd_header_t *cmd; 319 GLushort *retval; 320 321 if (RADEON_DEBUG & DEBUG_IOCTL) 322 fprintf(stderr, "%s %d\n", __FUNCTION__, min_nr); 323 324 assert((primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND)); 325 326 radeonEmitState( rmesa ); 327 328 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, 329 ELTS_BUFSZ(min_nr), 330 __FUNCTION__ ); 331#if RADEON_OLD_PACKETS 332 cmd[0].i = 0; 333 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP; 334 cmd[1].i = RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM; 335 cmd[2].i = rmesa->ioctl.vertex_offset; 336 cmd[3].i = 0xffff; 337 cmd[4].i = vertex_format; 338 cmd[5].i = (primitive | 339 RADEON_CP_VC_CNTL_PRIM_WALK_IND | 340 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA | 341 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE); 342 343 retval = (GLushort *)(cmd+6); 344#else 345 cmd[0].i = 0; 346 cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP; 347 cmd[1].i = RADEON_CP_PACKET3_3D_DRAW_INDX; 348 cmd[2].i = vertex_format; 349 cmd[3].i = (primitive | 350 RADEON_CP_VC_CNTL_PRIM_WALK_IND | 351 RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA | 352 RADEON_CP_VC_CNTL_MAOS_ENABLE | 353 RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE); 354 355 retval = (GLushort *)(cmd+4); 356#endif 357 358 if (RADEON_DEBUG & DEBUG_PRIMS) 359 fprintf(stderr, "%s: header 0x%x vfmt 0x%x prim %x \n", 360 __FUNCTION__, 361 cmd[1].i, vertex_format, primitive); 362 363 assert(!rmesa->dma.flush); 364 rmesa->glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES; 365 rmesa->dma.flush = radeonFlushElts; 366 367 rmesa->store.elts_start = ((char *)cmd) - rmesa->store.cmd_buf; 368 369 return retval; 370} 371 372 373 374void radeonEmitVertexAOS( radeonContextPtr rmesa, 375 GLuint vertex_size, 376 GLuint offset ) 377{ 378#if RADEON_OLD_PACKETS 379 rmesa->ioctl.vertex_size = vertex_size; 380 rmesa->ioctl.vertex_offset = offset; 381#else 382 drm_radeon_cmd_header_t *cmd; 383 384 if (RADEON_DEBUG & (DEBUG_PRIMS|DEBUG_IOCTL)) 385 fprintf(stderr, "%s: vertex_size 0x%x offset 0x%x \n", 386 __FUNCTION__, vertex_size, offset); 387 388 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, VERT_AOS_BUFSZ, 389 __FUNCTION__ ); 390 391 cmd[0].i = 0; 392 cmd[0].header.cmd_type = RADEON_CMD_PACKET3; 393 cmd[1].i = RADEON_CP_PACKET3_3D_LOAD_VBPNTR | (2 << 16); 394 cmd[2].i = 1; 395 cmd[3].i = vertex_size | (vertex_size << 8); 396 cmd[4].i = offset; 397#endif 398} 399 400 401void radeonEmitAOS( radeonContextPtr rmesa, 402 struct radeon_dma_region **component, 403 GLuint nr, 404 GLuint offset ) 405{ 406#if RADEON_OLD_PACKETS 407 assert( nr == 1 ); 408 assert( component[0]->aos_size == component[0]->aos_stride ); 409 rmesa->ioctl.vertex_size = component[0]->aos_size; 410 rmesa->ioctl.vertex_offset = 411 (component[0]->aos_start + offset * component[0]->aos_stride * 4); 412#else 413 drm_radeon_cmd_header_t *cmd; 414 int sz = AOS_BUFSZ(nr); 415 int i; 416 int *tmp; 417 418 if (RADEON_DEBUG & DEBUG_IOCTL) 419 fprintf(stderr, "%s\n", __FUNCTION__); 420 421 422 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, sz, 423 __FUNCTION__ ); 424 cmd[0].i = 0; 425 cmd[0].header.cmd_type = RADEON_CMD_PACKET3; 426 cmd[1].i = RADEON_CP_PACKET3_3D_LOAD_VBPNTR | (((sz / sizeof(int))-3) << 16); 427 cmd[2].i = nr; 428 tmp = &cmd[0].i; 429 cmd += 3; 430 431 for (i = 0 ; i < nr ; i++) { 432 if (i & 1) { 433 cmd[0].i |= ((component[i]->aos_stride << 24) | 434 (component[i]->aos_size << 16)); 435 cmd[2].i = (component[i]->aos_start + 436 offset * component[i]->aos_stride * 4); 437 cmd += 3; 438 } 439 else { 440 cmd[0].i = ((component[i]->aos_stride << 8) | 441 (component[i]->aos_size << 0)); 442 cmd[1].i = (component[i]->aos_start + 443 offset * component[i]->aos_stride * 4); 444 } 445 } 446 447 if (RADEON_DEBUG & DEBUG_VERTS) { 448 fprintf(stderr, "%s:\n", __FUNCTION__); 449 for (i = 0 ; i < sz ; i++) 450 fprintf(stderr, " %d: %x\n", i, tmp[i]); 451 } 452#endif 453} 454 455/* using already shifted color_fmt! */ 456void radeonEmitBlit( radeonContextPtr rmesa, /* FIXME: which drmMinor is required? */ 457 GLuint color_fmt, 458 GLuint src_pitch, 459 GLuint src_offset, 460 GLuint dst_pitch, 461 GLuint dst_offset, 462 GLint srcx, GLint srcy, 463 GLint dstx, GLint dsty, 464 GLuint w, GLuint h ) 465{ 466 drm_radeon_cmd_header_t *cmd; 467 468 if (RADEON_DEBUG & DEBUG_IOCTL) 469 fprintf(stderr, "%s src %x/%x %d,%d dst: %x/%x %d,%d sz: %dx%d\n", 470 __FUNCTION__, 471 src_pitch, src_offset, srcx, srcy, 472 dst_pitch, dst_offset, dstx, dsty, 473 w, h); 474 475 assert( (src_pitch & 63) == 0 ); 476 assert( (dst_pitch & 63) == 0 ); 477 assert( (src_offset & 1023) == 0 ); 478 assert( (dst_offset & 1023) == 0 ); 479 assert( w < (1<<16) ); 480 assert( h < (1<<16) ); 481 482 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, 8 * sizeof(int), 483 __FUNCTION__ ); 484 485 486 cmd[0].i = 0; 487 cmd[0].header.cmd_type = RADEON_CMD_PACKET3; 488 cmd[1].i = RADEON_CP_PACKET3_CNTL_BITBLT_MULTI | (5 << 16); 489 cmd[2].i = (RADEON_GMC_SRC_PITCH_OFFSET_CNTL | 490 RADEON_GMC_DST_PITCH_OFFSET_CNTL | 491 RADEON_GMC_BRUSH_NONE | 492 color_fmt | 493 RADEON_GMC_SRC_DATATYPE_COLOR | 494 RADEON_ROP3_S | 495 RADEON_DP_SRC_SOURCE_MEMORY | 496 RADEON_GMC_CLR_CMP_CNTL_DIS | 497 RADEON_GMC_WR_MSK_DIS ); 498 499 cmd[3].i = ((src_pitch/64)<<22) | (src_offset >> 10); 500 cmd[4].i = ((dst_pitch/64)<<22) | (dst_offset >> 10); 501 cmd[5].i = (srcx << 16) | srcy; 502 cmd[6].i = (dstx << 16) | dsty; /* dst */ 503 cmd[7].i = (w << 16) | h; 504} 505 506 507void radeonEmitWait( radeonContextPtr rmesa, GLuint flags ) 508{ 509 if (rmesa->dri.drmMinor >= 6) { 510 drm_radeon_cmd_header_t *cmd; 511 512 assert( !(flags & ~(RADEON_WAIT_2D|RADEON_WAIT_3D)) ); 513 514 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, 1 * sizeof(int), 515 __FUNCTION__ ); 516 cmd[0].i = 0; 517 cmd[0].wait.cmd_type = RADEON_CMD_WAIT; 518 cmd[0].wait.flags = flags; 519 } 520} 521 522 523static int radeonFlushCmdBufLocked( radeonContextPtr rmesa, 524 const char * caller ) 525{ 526 int ret, i; 527 drm_radeon_cmd_buffer_t cmd; 528 529 if (rmesa->lost_context) 530 radeonBackUpAndEmitLostStateLocked(rmesa); 531 532 if (RADEON_DEBUG & DEBUG_IOCTL) { 533 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller); 534 535 if (RADEON_DEBUG & DEBUG_VERBOSE) 536 for (i = 0 ; i < rmesa->store.cmd_used ; i += 4 ) 537 fprintf(stderr, "%d: %x\n", i/4, 538 *(int *)(&rmesa->store.cmd_buf[i])); 539 } 540 541 if (RADEON_DEBUG & DEBUG_DMA) 542 fprintf(stderr, "%s: Releasing %d buffers\n", __FUNCTION__, 543 rmesa->dma.nr_released_bufs); 544 545 546 if (RADEON_DEBUG & DEBUG_SANITY) { 547 if (rmesa->state.scissor.enabled) 548 ret = radeonSanityCmdBuffer( rmesa, 549 rmesa->state.scissor.numClipRects, 550 rmesa->state.scissor.pClipRects); 551 else 552 ret = radeonSanityCmdBuffer( rmesa, 553 rmesa->numClipRects, 554 rmesa->pClipRects); 555 if (ret) { 556 fprintf(stderr, "drmSanityCommandWrite: %d\n", ret); 557 goto out; 558 } 559 } 560 561 562 cmd.bufsz = rmesa->store.cmd_used; 563 cmd.buf = rmesa->store.cmd_buf; 564 565 if (rmesa->state.scissor.enabled) { 566 cmd.nbox = rmesa->state.scissor.numClipRects; 567 cmd.boxes = rmesa->state.scissor.pClipRects; 568 } else { 569 cmd.nbox = rmesa->numClipRects; 570 cmd.boxes = rmesa->pClipRects; 571 } 572 573 ret = drmCommandWrite( rmesa->dri.fd, 574 DRM_RADEON_CMDBUF, 575 &cmd, sizeof(cmd) ); 576 577 if (ret) 578 fprintf(stderr, "drmCommandWrite: %d\n", ret); 579 580 out: 581 rmesa->store.primnr = 0; 582 rmesa->store.statenr = 0; 583 rmesa->store.cmd_used = 0; 584 rmesa->dma.nr_released_bufs = 0; 585 rmesa->save_on_next_unlock = 1; 586 587 return ret; 588} 589 590 591/* Note: does not emit any commands to avoid recursion on 592 * radeonAllocCmdBuf. 593 */ 594void radeonFlushCmdBuf( radeonContextPtr rmesa, const char *caller ) 595{ 596 int ret; 597 598 599 LOCK_HARDWARE( rmesa ); 600 601 ret = radeonFlushCmdBufLocked( rmesa, caller ); 602 603 UNLOCK_HARDWARE( rmesa ); 604 605 if (ret) { 606 fprintf(stderr, "drm_radeon_cmd_buffer_t: %d (exiting)\n", ret); 607 exit(ret); 608 } 609} 610 611/* ============================================================= 612 * Hardware vertex buffer handling 613 */ 614 615 616void radeonRefillCurrentDmaRegion( radeonContextPtr rmesa ) 617{ 618 struct radeon_dma_buffer *dmabuf; 619 int fd = rmesa->dri.fd; 620 int index = 0; 621 int size = 0; 622 drmDMAReq dma; 623 int ret; 624 625 if (RADEON_DEBUG & (DEBUG_IOCTL|DEBUG_DMA)) 626 fprintf(stderr, "%s\n", __FUNCTION__); 627 628 if (rmesa->dma.flush) { 629 rmesa->dma.flush( rmesa ); 630 } 631 632 if (rmesa->dma.current.buf) 633 radeonReleaseDmaRegion( rmesa, &rmesa->dma.current, __FUNCTION__ ); 634 635 if (rmesa->dma.nr_released_bufs > 4) 636 radeonFlushCmdBuf( rmesa, __FUNCTION__ ); 637 638 dma.context = rmesa->dri.hwContext; 639 dma.send_count = 0; 640 dma.send_list = NULL; 641 dma.send_sizes = NULL; 642 dma.flags = 0; 643 dma.request_count = 1; 644 dma.request_size = RADEON_BUFFER_SIZE; 645 dma.request_list = &index; 646 dma.request_sizes = &size; 647 dma.granted_count = 0; 648 649 LOCK_HARDWARE(rmesa); /* no need to validate */ 650 651 ret = drmDMA( fd, &dma ); 652 653 if (ret != 0) { 654 /* Free some up this way? 655 */ 656 if (rmesa->dma.nr_released_bufs) { 657 radeonFlushCmdBufLocked( rmesa, __FUNCTION__ ); 658 } 659 660 if (RADEON_DEBUG & DEBUG_DMA) 661 fprintf(stderr, "Waiting for buffers\n"); 662 663 radeonWaitForIdleLocked( rmesa ); 664 ret = drmDMA( fd, &dma ); 665 666 if ( ret != 0 ) { 667 UNLOCK_HARDWARE( rmesa ); 668 fprintf( stderr, "Error: Could not get dma buffer... exiting\n" ); 669 exit( -1 ); 670 } 671 } 672 673 UNLOCK_HARDWARE(rmesa); 674 675 if (RADEON_DEBUG & DEBUG_DMA) 676 fprintf(stderr, "Allocated buffer %d\n", index); 677 678 dmabuf = CALLOC_STRUCT( radeon_dma_buffer ); 679 dmabuf->buf = &rmesa->radeonScreen->buffers->list[index]; 680 dmabuf->refcount = 1; 681 682 rmesa->dma.current.buf = dmabuf; 683 rmesa->dma.current.address = dmabuf->buf->address; 684 rmesa->dma.current.end = dmabuf->buf->total; 685 rmesa->dma.current.start = 0; 686 rmesa->dma.current.ptr = 0; 687 688 rmesa->c_vertexBuffers++; 689} 690 691void radeonReleaseDmaRegion( radeonContextPtr rmesa, 692 struct radeon_dma_region *region, 693 const char *caller ) 694{ 695 if (RADEON_DEBUG & DEBUG_IOCTL) 696 fprintf(stderr, "%s from %s\n", __FUNCTION__, caller); 697 698 if (!region->buf) 699 return; 700 701 if (rmesa->dma.flush) 702 rmesa->dma.flush( rmesa ); 703 704 if (--region->buf->refcount == 0) { 705 drm_radeon_cmd_header_t *cmd; 706 707 if (RADEON_DEBUG & (DEBUG_IOCTL|DEBUG_DMA)) 708 fprintf(stderr, "%s -- DISCARD BUF %d\n", __FUNCTION__, 709 region->buf->buf->idx); 710 711 cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, sizeof(*cmd), 712 __FUNCTION__ ); 713 cmd->dma.cmd_type = RADEON_CMD_DMA_DISCARD; 714 cmd->dma.buf_idx = region->buf->buf->idx; 715 FREE(region->buf); 716 rmesa->dma.nr_released_bufs++; 717 } 718 719 region->buf = 0; 720 region->start = 0; 721} 722 723/* Allocates a region from rmesa->dma.current. If there isn't enough 724 * space in current, grab a new buffer (and discard what was left of current) 725 */ 726void radeonAllocDmaRegion( radeonContextPtr rmesa, 727 struct radeon_dma_region *region, 728 int bytes, 729 int alignment ) 730{ 731 if (RADEON_DEBUG & DEBUG_IOCTL) 732 fprintf(stderr, "%s %d\n", __FUNCTION__, bytes); 733 734 if (rmesa->dma.flush) 735 rmesa->dma.flush( rmesa ); 736 737 if (region->buf) 738 radeonReleaseDmaRegion( rmesa, region, __FUNCTION__ ); 739 740 alignment--; 741 rmesa->dma.current.start = rmesa->dma.current.ptr = 742 (rmesa->dma.current.ptr + alignment) & ~alignment; 743 744 if ( rmesa->dma.current.ptr + bytes > rmesa->dma.current.end ) 745 radeonRefillCurrentDmaRegion( rmesa ); 746 747 region->start = rmesa->dma.current.start; 748 region->ptr = rmesa->dma.current.start; 749 region->end = rmesa->dma.current.start + bytes; 750 region->address = rmesa->dma.current.address; 751 region->buf = rmesa->dma.current.buf; 752 region->buf->refcount++; 753 754 rmesa->dma.current.ptr += bytes; /* bug - if alignment > 7 */ 755 rmesa->dma.current.start = 756 rmesa->dma.current.ptr = (rmesa->dma.current.ptr + 0x7) & ~0x7; 757} 758 759void radeonAllocDmaRegionVerts( radeonContextPtr rmesa, 760 struct radeon_dma_region *region, 761 int numverts, 762 int vertsize, 763 int alignment ) 764{ 765 radeonAllocDmaRegion( rmesa, region, vertsize * numverts, alignment ); 766} 767 768/* ================================================================ 769 * SwapBuffers with client-side throttling 770 */ 771 772static uint32_t radeonGetLastFrame (radeonContextPtr rmesa) 773{ 774 unsigned char *RADEONMMIO = rmesa->radeonScreen->mmio.map; 775 int ret; 776 uint32_t frame; 777 778 if (rmesa->dri.screen->drmMinor >= 4) { 779 drm_radeon_getparam_t gp; 780 781 gp.param = RADEON_PARAM_LAST_FRAME; 782 gp.value = (int *)&frame; 783 ret = drmCommandWriteRead( rmesa->dri.fd, DRM_RADEON_GETPARAM, 784 &gp, sizeof(gp) ); 785 } 786 else 787 ret = -EINVAL; 788 789 if ( ret == -EINVAL ) { 790 frame = INREG( RADEON_LAST_FRAME_REG ); 791 ret = 0; 792 } 793 if ( ret ) { 794 fprintf( stderr, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__, ret ); 795 exit(1); 796 } 797 798 return frame; 799} 800 801static void radeonEmitIrqLocked( radeonContextPtr rmesa ) 802{ 803 drm_radeon_irq_emit_t ie; 804 int ret; 805 806 ie.irq_seq = &rmesa->iw.irq_seq; 807 ret = drmCommandWriteRead( rmesa->dri.fd, DRM_RADEON_IRQ_EMIT, 808 &ie, sizeof(ie) ); 809 if ( ret ) { 810 fprintf( stderr, "%s: drm_radeon_irq_emit_t: %d\n", __FUNCTION__, ret ); 811 exit(1); 812 } 813} 814 815 816static void radeonWaitIrq( radeonContextPtr rmesa ) 817{ 818 int ret; 819 820 do { 821 ret = drmCommandWrite( rmesa->dri.fd, DRM_RADEON_IRQ_WAIT, 822 &rmesa->iw, sizeof(rmesa->iw) ); 823 } while (ret && (errno == EINTR || errno == EAGAIN)); 824 825 if ( ret ) { 826 fprintf( stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__, ret ); 827 exit(1); 828 } 829} 830 831 832static void radeonWaitForFrameCompletion( radeonContextPtr rmesa ) 833{ 834 drm_radeon_sarea_t *sarea = rmesa->sarea; 835 836 if (rmesa->do_irqs) { 837 if (radeonGetLastFrame(rmesa) < sarea->last_frame) { 838 if (!rmesa->irqsEmitted) { 839 while (radeonGetLastFrame (rmesa) < sarea->last_frame) 840 ; 841 } 842 else { 843 UNLOCK_HARDWARE( rmesa ); 844 radeonWaitIrq( rmesa ); 845 LOCK_HARDWARE( rmesa ); 846 } 847 rmesa->irqsEmitted = 10; 848 } 849 850 if (rmesa->irqsEmitted) { 851 radeonEmitIrqLocked( rmesa ); 852 rmesa->irqsEmitted--; 853 } 854 } 855 else { 856 while (radeonGetLastFrame (rmesa) < sarea->last_frame) { 857 UNLOCK_HARDWARE( rmesa ); 858 if (rmesa->do_usleeps) 859 DO_USLEEP( 1 ); 860 LOCK_HARDWARE( rmesa ); 861 } 862 } 863} 864 865/* Copy the back color buffer to the front color buffer. 866 */ 867void radeonCopyBuffer( const __DRIdrawablePrivate *dPriv ) 868{ 869 radeonContextPtr rmesa; 870 GLint nbox, i, ret; 871 GLboolean missed_target; 872 int64_t ust; 873 874 assert(dPriv); 875 assert(dPriv->driContextPriv); 876 assert(dPriv->driContextPriv->driverPrivate); 877 878 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate; 879 880 if ( RADEON_DEBUG & DEBUG_IOCTL ) { 881 fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx ); 882 } 883 884 RADEON_FIREVERTICES( rmesa ); 885 LOCK_HARDWARE( rmesa ); 886 887 /* Throttle the frame rate -- only allow one pending swap buffers 888 * request at a time. 889 */ 890 radeonWaitForFrameCompletion( rmesa ); 891 UNLOCK_HARDWARE( rmesa ); 892 driWaitForVBlank( dPriv, & rmesa->vbl_seq, rmesa->vblank_flags, & missed_target ); 893 LOCK_HARDWARE( rmesa ); 894 895 nbox = dPriv->numClipRects; /* must be in locked region */ 896 897 for ( i = 0 ; i < nbox ; ) { 898 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox ); 899 drm_clip_rect_t *box = dPriv->pClipRects; 900 drm_clip_rect_t *b = rmesa->sarea->boxes; 901 GLint n = 0; 902 903 for ( ; i < nr ; i++ ) { 904 *b++ = box[i]; 905 n++; 906 } 907 rmesa->sarea->nbox = n; 908 909 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP ); 910 911 if ( ret ) { 912 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret ); 913 UNLOCK_HARDWARE( rmesa ); 914 exit( 1 ); 915 } 916 } 917 918 UNLOCK_HARDWARE( rmesa ); 919 rmesa->swap_count++; 920 (*rmesa->get_ust)( & ust ); 921 if ( missed_target ) { 922 rmesa->swap_missed_count++; 923 rmesa->swap_missed_ust = ust - rmesa->swap_ust; 924 } 925 926 rmesa->swap_ust = ust; 927 rmesa->hw.all_dirty = GL_TRUE; 928} 929 930void radeonPageFlip( const __DRIdrawablePrivate *dPriv ) 931{ 932 radeonContextPtr rmesa; 933 GLint ret; 934 GLboolean missed_target; 935 936 assert(dPriv); 937 assert(dPriv->driContextPriv); 938 assert(dPriv->driContextPriv->driverPrivate); 939 940 rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate; 941 942 if ( RADEON_DEBUG & DEBUG_IOCTL ) { 943 fprintf(stderr, "%s: pfCurrentPage: %d\n", __FUNCTION__, 944 rmesa->sarea->pfCurrentPage); 945 } 946 947 RADEON_FIREVERTICES( rmesa ); 948 LOCK_HARDWARE( rmesa ); 949 950 /* Need to do this for the perf box placement: 951 */ 952 if (dPriv->numClipRects) 953 { 954 drm_clip_rect_t *box = dPriv->pClipRects; 955 drm_clip_rect_t *b = rmesa->sarea->boxes; 956 b[0] = box[0]; 957 rmesa->sarea->nbox = 1; 958 } 959 960 /* Throttle the frame rate -- only allow a few pending swap buffers 961 * request at a time. 962 */ 963 radeonWaitForFrameCompletion( rmesa ); 964 UNLOCK_HARDWARE( rmesa ); 965 driWaitForVBlank( dPriv, & rmesa->vbl_seq, rmesa->vblank_flags, & missed_target ); 966 if ( missed_target ) { 967 rmesa->swap_missed_count++; 968 (void) (*rmesa->get_ust)( & rmesa->swap_missed_ust ); 969 } 970 LOCK_HARDWARE( rmesa ); 971 972 ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_FLIP ); 973 974 UNLOCK_HARDWARE( rmesa ); 975 976 if ( ret ) { 977 fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret ); 978 exit( 1 ); 979 } 980 981 rmesa->swap_count++; 982 (void) (*rmesa->get_ust)( & rmesa->swap_ust ); 983 984 if ( rmesa->sarea->pfCurrentPage == 1 ) { 985 rmesa->state.color.drawOffset = rmesa->radeonScreen->frontOffset; 986 rmesa->state.color.drawPitch = rmesa->radeonScreen->frontPitch; 987 } else { 988 rmesa->state.color.drawOffset = rmesa->radeonScreen->backOffset; 989 rmesa->state.color.drawPitch = rmesa->radeonScreen->backPitch; 990 } 991 992 RADEON_STATECHANGE( rmesa, ctx ); 993 rmesa->hw.ctx.cmd[CTX_RB3D_COLOROFFSET] = rmesa->state.color.drawOffset 994 + rmesa->radeonScreen->fbLocation; 995 rmesa->hw.ctx.cmd[CTX_RB3D_COLORPITCH] = rmesa->state.color.drawPitch; 996} 997 998 999/* ================================================================ 1000 * Buffer clear 1001 */ 1002#define RADEON_MAX_CLEARS 256 1003 1004static void radeonClear( GLcontext *ctx, GLbitfield mask, GLboolean all, 1005 GLint cx, GLint cy, GLint cw, GLint ch ) 1006{ 1007 radeonContextPtr rmesa = RADEON_CONTEXT(ctx); 1008 __DRIdrawablePrivate *dPriv = rmesa->dri.drawable; 1009 drm_radeon_sarea_t *sarea = rmesa->sarea; 1010 unsigned char *RADEONMMIO = rmesa->radeonScreen->mmio.map; 1011 uint32_t clear; 1012 GLuint flags = 0; 1013 GLuint color_mask = 0; 1014 GLint ret, i; 1015 1016 if ( RADEON_DEBUG & DEBUG_IOCTL ) { 1017 fprintf( stderr, "%s: all=%d cx=%d cy=%d cw=%d ch=%d\n", 1018 __FUNCTION__, all, cx, cy, cw, ch ); 1019 } 1020 1021 RADEON_FIREVERTICES( rmesa ); 1022 1023 if ( mask & DD_FRONT_LEFT_BIT ) { 1024 flags |= RADEON_FRONT; 1025 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK]; 1026 mask &= ~DD_FRONT_LEFT_BIT; 1027 } 1028 1029 if ( mask & DD_BACK_LEFT_BIT ) { 1030 flags |= RADEON_BACK; 1031 color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK]; 1032 mask &= ~DD_BACK_LEFT_BIT; 1033 } 1034 1035 if ( mask & DD_DEPTH_BIT ) { 1036 if ( ctx->Depth.Mask ) flags |= RADEON_DEPTH; /* FIXME: ??? */ 1037 mask &= ~DD_DEPTH_BIT; 1038 } 1039 1040 if ( (mask & DD_STENCIL_BIT) && rmesa->state.stencil.hwBuffer ) { 1041 flags |= RADEON_STENCIL; 1042 mask &= ~DD_STENCIL_BIT; 1043 } 1044 1045 if ( mask ) { 1046 if (RADEON_DEBUG & DEBUG_FALLBACKS) 1047 fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, mask); 1048 _swrast_Clear( ctx, mask, all, cx, cy, cw, ch ); 1049 } 1050 1051 if ( !flags ) 1052 return; 1053 1054 1055 /* Flip top to bottom */ 1056 cx += dPriv->x; 1057 cy = dPriv->y + dPriv->h - cy - ch; 1058 1059 LOCK_HARDWARE( rmesa ); 1060 1061 /* Throttle the number of clear ioctls we do. 1062 */ 1063 while ( 1 ) { 1064 int ret; 1065 1066 if (rmesa->dri.screen->drmMinor >= 4) { 1067 drm_radeon_getparam_t gp; 1068 1069 gp.param = RADEON_PARAM_LAST_CLEAR; 1070 gp.value = (int *)&clear; 1071 ret = drmCommandWriteRead( rmesa->dri.fd, 1072 DRM_RADEON_GETPARAM, &gp, sizeof(gp) ); 1073 } else 1074 ret = -EINVAL; 1075 1076 if ( ret == -EINVAL ) { 1077 clear = INREG( RADEON_LAST_CLEAR_REG ); 1078 ret = 0; 1079 } 1080 if ( ret ) { 1081 fprintf( stderr, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__, ret ); 1082 exit(1); 1083 } 1084 if ( RADEON_DEBUG & DEBUG_IOCTL ) { 1085 fprintf( stderr, "%s( %d )\n", __FUNCTION__, (int)clear ); 1086 if ( ret ) fprintf( stderr, " ( RADEON_LAST_CLEAR register read directly )\n" ); 1087 } 1088 1089 if ( sarea->last_clear - clear <= RADEON_MAX_CLEARS ) { 1090 break; 1091 } 1092 1093 if ( rmesa->do_usleeps ) { 1094 UNLOCK_HARDWARE( rmesa ); 1095 DO_USLEEP( 1 ); 1096 LOCK_HARDWARE( rmesa ); 1097 } 1098 } 1099 1100 /* Send current state to the hardware */ 1101 radeonFlushCmdBufLocked( rmesa, __FUNCTION__ ); 1102 1103 for ( i = 0 ; i < dPriv->numClipRects ; ) { 1104 GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS, dPriv->numClipRects ); 1105 drm_clip_rect_t *box = dPriv->pClipRects; 1106 drm_clip_rect_t *b = rmesa->sarea->boxes; 1107 drm_radeon_clear_t clear; 1108 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS]; 1109 GLint n = 0; 1110 1111 if ( !all ) { 1112 for ( ; i < nr ; i++ ) { 1113 GLint x = box[i].x1; 1114 GLint y = box[i].y1; 1115 GLint w = box[i].x2 - x; 1116 GLint h = box[i].y2 - y; 1117 1118 if ( x < cx ) w -= cx - x, x = cx; 1119 if ( y < cy ) h -= cy - y, y = cy; 1120 if ( x + w > cx + cw ) w = cx + cw - x; 1121 if ( y + h > cy + ch ) h = cy + ch - y; 1122 if ( w <= 0 ) continue; 1123 if ( h <= 0 ) continue; 1124 1125 b->x1 = x; 1126 b->y1 = y; 1127 b->x2 = x + w; 1128 b->y2 = y + h; 1129 b++; 1130 n++; 1131 } 1132 } else { 1133 for ( ; i < nr ; i++ ) { 1134 *b++ = box[i]; 1135 n++; 1136 } 1137 } 1138 1139 rmesa->sarea->nbox = n; 1140 1141 clear.flags = flags; 1142 clear.clear_color = rmesa->state.color.clear; 1143 clear.clear_depth = rmesa->state.depth.clear; 1144 clear.color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK]; 1145 clear.depth_mask = rmesa->state.stencil.clear; 1146 clear.depth_boxes = depth_boxes; 1147 1148 n--; 1149 b = rmesa->sarea->boxes; 1150 for ( ; n >= 0 ; n-- ) { 1151 depth_boxes[n].f[CLEAR_X1] = (float)b[n].x1; 1152 depth_boxes[n].f[CLEAR_Y1] = (float)b[n].y1; 1153 depth_boxes[n].f[CLEAR_X2] = (float)b[n].x2; 1154 depth_boxes[n].f[CLEAR_Y2] = (float)b[n].y2; 1155 depth_boxes[n].f[CLEAR_DEPTH] = 1156 (float)rmesa->state.depth.clear; 1157 } 1158 1159 ret = drmCommandWrite( rmesa->dri.fd, DRM_RADEON_CLEAR, 1160 &clear, sizeof(drm_radeon_clear_t)); 1161 1162 if ( ret ) { 1163 UNLOCK_HARDWARE( rmesa ); 1164 fprintf( stderr, "DRM_RADEON_CLEAR: return = %d\n", ret ); 1165 exit( 1 ); 1166 } 1167 } 1168 1169 UNLOCK_HARDWARE( rmesa ); 1170 rmesa->hw.all_dirty = GL_TRUE; 1171} 1172 1173 1174void radeonWaitForIdleLocked( radeonContextPtr rmesa ) 1175{ 1176 int fd = rmesa->dri.fd; 1177 int to = 0; 1178 int ret, i = 0; 1179 1180 rmesa->c_drawWaits++; 1181 1182 do { 1183 do { 1184 ret = drmCommandNone( fd, DRM_RADEON_CP_IDLE); 1185 } while ( ret && errno == EBUSY && i++ < RADEON_IDLE_RETRY ); 1186 } while ( ( ret == -EBUSY ) && ( to++ < RADEON_TIMEOUT ) ); 1187 1188 if ( ret < 0 ) { 1189 UNLOCK_HARDWARE( rmesa ); 1190 fprintf( stderr, "Error: Radeon timed out... exiting\n" ); 1191 exit( -1 ); 1192 } 1193} 1194 1195 1196static void radeonWaitForIdle( radeonContextPtr rmesa ) 1197{ 1198 LOCK_HARDWARE(rmesa); 1199 radeonWaitForIdleLocked( rmesa ); 1200 UNLOCK_HARDWARE(rmesa); 1201} 1202 1203 1204void radeonFlush( GLcontext *ctx ) 1205{ 1206 radeonContextPtr rmesa = RADEON_CONTEXT( ctx ); 1207 1208 if (RADEON_DEBUG & DEBUG_IOCTL) 1209 fprintf(stderr, "%s\n", __FUNCTION__); 1210 1211 if (rmesa->dma.flush) 1212 rmesa->dma.flush( rmesa ); 1213 1214 radeonEmitState( rmesa ); 1215 1216 if (rmesa->store.cmd_used) 1217 radeonFlushCmdBuf( rmesa, __FUNCTION__ ); 1218} 1219 1220/* Make sure all commands have been sent to the hardware and have 1221 * completed processing. 1222 */ 1223void radeonFinish( GLcontext *ctx ) 1224{ 1225 radeonContextPtr rmesa = RADEON_CONTEXT(ctx); 1226 radeonFlush( ctx ); 1227 1228 if (rmesa->do_irqs) { 1229 LOCK_HARDWARE( rmesa ); 1230 radeonEmitIrqLocked( rmesa ); 1231 UNLOCK_HARDWARE( rmesa ); 1232 radeonWaitIrq( rmesa ); 1233 } 1234 else 1235 radeonWaitForIdle( rmesa ); 1236} 1237 1238 1239void radeonInitIoctlFuncs( GLcontext *ctx ) 1240{ 1241 ctx->Driver.Clear = radeonClear; 1242 ctx->Driver.Finish = radeonFinish; 1243 ctx->Driver.Flush = radeonFlush; 1244} 1245 1246