radeon_ioctl.c revision 3177b4e2cf7d2fff7428cb6057bebbe60ff5cc6c
1/* $XFree86: xc/lib/GL/mesa/src/drv/radeon/radeon_ioctl.c,v 1.11 2003/01/29 22:04:59 dawes Exp $ */
2/**************************************************************************
3
4Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
5                     VA Linux Systems Inc., Fremont, California.
6
7All Rights Reserved.
8
9Permission is hereby granted, free of charge, to any person obtaining
10a copy of this software and associated documentation files (the
11"Software"), to deal in the Software without restriction, including
12without limitation the rights to use, copy, modify, merge, publish,
13distribute, sublicense, and/or sell copies of the Software, and to
14permit persons to whom the Software is furnished to do so, subject to
15the following conditions:
16
17The above copyright notice and this permission notice (including the
18next paragraph) shall be included in all copies or substantial
19portions of the Software.
20
21THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29**************************************************************************/
30
31/*
32 * Authors:
33 *   Kevin E. Martin <martin@valinux.com>
34 *   Gareth Hughes <gareth@valinux.com>
35 *   Keith Whitwell <keith@tungstengraphics.com>
36 */
37
38#include <sched.h>
39#include <errno.h>
40
41#include "glheader.h"
42#include "imports.h"
43#include "simple_list.h"
44#include "swrast/swrast.h"
45
46#include "radeon_context.h"
47#include "radeon_state.h"
48#include "radeon_ioctl.h"
49#include "radeon_tcl.h"
50#include "radeon_sanity.h"
51
52#define STANDALONE_MMIO
53#include "radeon_macros.h"  /* for INREG() */
54
55#include "drirenderbuffer.h"
56#include "vblank.h"
57
58#define RADEON_TIMEOUT             512
59#define RADEON_IDLE_RETRY           16
60
61
62static void radeonWaitForIdle( radeonContextPtr rmesa );
63static int radeonFlushCmdBufLocked( radeonContextPtr rmesa,
64				    const char * caller );
65
66static void print_state_atom( struct radeon_state_atom *state )
67{
68   int i;
69
70   fprintf(stderr, "emit %s/%d\n", state->name, state->cmd_size);
71
72   if (RADEON_DEBUG & DEBUG_VERBOSE)
73      for (i = 0 ; i < state->cmd_size ; i++)
74	 fprintf(stderr, "\t%s[%d]: %x\n", state->name, i, state->cmd[i]);
75
76}
77
78static void radeonSaveHwState( radeonContextPtr rmesa )
79{
80   struct radeon_state_atom *atom;
81   char * dest = rmesa->backup_store.cmd_buf;
82
83   if (RADEON_DEBUG & DEBUG_STATE)
84      fprintf(stderr, "%s\n", __FUNCTION__);
85
86   rmesa->backup_store.cmd_used = 0;
87
88   foreach( atom, &rmesa->hw.atomlist ) {
89      if ( atom->check( rmesa->glCtx ) ) {
90	 int size = atom->cmd_size * 4;
91	 memcpy( dest, atom->cmd, size);
92	 dest += size;
93	 rmesa->backup_store.cmd_used += size;
94	 if (RADEON_DEBUG & DEBUG_STATE)
95	    print_state_atom( atom );
96      }
97   }
98
99   assert( rmesa->backup_store.cmd_used <= RADEON_CMD_BUF_SZ );
100   if (RADEON_DEBUG & DEBUG_STATE)
101      fprintf(stderr, "Returning to radeonEmitState\n");
102}
103
104/* At this point we were in FlushCmdBufLocked but we had lost our context, so
105 * we need to unwire our current cmdbuf, hook the one with the saved state in
106 * it, flush it, and then put the current one back.  This is so commands at the
107 * start of a cmdbuf can rely on the state being kept from the previous one.
108 */
109static void radeonBackUpAndEmitLostStateLocked( radeonContextPtr rmesa )
110{
111   GLuint nr_released_bufs;
112   struct radeon_store saved_store;
113
114   if (rmesa->backup_store.cmd_used == 0)
115      return;
116
117   if (RADEON_DEBUG & DEBUG_STATE)
118      fprintf(stderr, "Emitting backup state on lost context\n");
119
120   rmesa->lost_context = GL_FALSE;
121
122   nr_released_bufs = rmesa->dma.nr_released_bufs;
123   saved_store = rmesa->store;
124   rmesa->dma.nr_released_bufs = 0;
125   rmesa->store = rmesa->backup_store;
126   radeonFlushCmdBufLocked( rmesa, __FUNCTION__ );
127   rmesa->dma.nr_released_bufs = nr_released_bufs;
128   rmesa->store = saved_store;
129}
130
131/* =============================================================
132 * Kernel command buffer handling
133 */
134
135/* The state atoms will be emitted in the order they appear in the atom list,
136 * so this step is important.
137 */
138void radeonSetUpAtomList( radeonContextPtr rmesa )
139{
140   int i, mtu = rmesa->glCtx->Const.MaxTextureUnits;
141
142   make_empty_list(&rmesa->hw.atomlist);
143   rmesa->hw.atomlist.name = "atom-list";
144
145   insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.ctx);
146   insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.set);
147   insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.lin);
148   insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.msk);
149   insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.vpt);
150   insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.tcl);
151   insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.msc);
152   for (i = 0; i < mtu; ++i) {
153       insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.tex[i]);
154       insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.txr[i]);
155       insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.cube[i]);
156   }
157   insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.zbs);
158   insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.mtl);
159   for (i = 0; i < 3 + mtu; ++i)
160      insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.mat[i]);
161   for (i = 0; i < 8; ++i)
162      insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.lit[i]);
163   for (i = 0; i < 6; ++i)
164      insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.ucp[i]);
165   insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.eye);
166   insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.grd);
167   insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.fog);
168   insert_at_tail(&rmesa->hw.atomlist, &rmesa->hw.glt);
169}
170
171void radeonEmitState( radeonContextPtr rmesa )
172{
173   struct radeon_state_atom *atom;
174   char *dest;
175
176   if (RADEON_DEBUG & (DEBUG_STATE|DEBUG_PRIMS))
177      fprintf(stderr, "%s\n", __FUNCTION__);
178
179   if (rmesa->save_on_next_emit) {
180      radeonSaveHwState(rmesa);
181      rmesa->save_on_next_emit = GL_FALSE;
182   }
183
184   /* this code used to return here but now it emits zbs */
185
186   /* To avoid going across the entire set of states multiple times, just check
187    * for enough space for the case of emitting all state, and inline the
188    * radeonAllocCmdBuf code here without all the checks.
189    */
190   radeonEnsureCmdBufSpace(rmesa, rmesa->hw.max_state_size);
191   dest = rmesa->store.cmd_buf + rmesa->store.cmd_used;
192
193   /* We always always emit zbs, this is due to a bug found by keithw in
194      the hardware and rediscovered after Erics changes by me.
195      if you ever touch this code make sure you emit zbs otherwise
196      you get tcl lockups on at least M7/7500 class of chips - airlied */
197   rmesa->hw.zbs.dirty=1;
198
199   if (RADEON_DEBUG & DEBUG_STATE) {
200      foreach(atom, &rmesa->hw.atomlist) {
201	 if (atom->dirty || rmesa->hw.all_dirty) {
202	    if (atom->check(rmesa->glCtx))
203	       print_state_atom(atom);
204	    else
205	       fprintf(stderr, "skip state %s\n", atom->name);
206	 }
207      }
208   }
209
210   foreach(atom, &rmesa->hw.atomlist) {
211      if (rmesa->hw.all_dirty)
212	 atom->dirty = GL_TRUE;
213      if (!(rmesa->radeonScreen->chip_flags & RADEON_CHIPSET_TCL) &&
214	   atom->is_tcl)
215	 atom->dirty = GL_FALSE;
216      if (atom->dirty) {
217	 if (atom->check(rmesa->glCtx)) {
218	    int size = atom->cmd_size * 4;
219	    memcpy(dest, atom->cmd, size);
220	    dest += size;
221	    rmesa->store.cmd_used += size;
222	    atom->dirty = GL_FALSE;
223	 }
224      }
225   }
226
227   assert(rmesa->store.cmd_used <= RADEON_CMD_BUF_SZ);
228
229   rmesa->hw.is_dirty = GL_FALSE;
230   rmesa->hw.all_dirty = GL_FALSE;
231}
232
233/* Fire a section of the retained (indexed_verts) buffer as a regular
234 * primtive.
235 */
236extern void radeonEmitVbufPrim( radeonContextPtr rmesa,
237				GLuint vertex_format,
238				GLuint primitive,
239				GLuint vertex_nr )
240{
241   drm_radeon_cmd_header_t *cmd;
242
243
244   assert(!(primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
245
246   radeonEmitState( rmesa );
247
248   if (RADEON_DEBUG & DEBUG_IOCTL)
249      fprintf(stderr, "%s cmd_used/4: %d\n", __FUNCTION__,
250	      rmesa->store.cmd_used/4);
251
252   cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, VBUF_BUFSZ,
253						       __FUNCTION__ );
254#if RADEON_OLD_PACKETS
255   cmd[0].i = 0;
256   cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
257   cmd[1].i = RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM | (3 << 16);
258   cmd[2].i = rmesa->ioctl.vertex_offset;
259   cmd[3].i = vertex_nr;
260   cmd[4].i = vertex_format;
261   cmd[5].i = (primitive |
262	       RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
263	       RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
264	       RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
265	       (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
266
267   if (RADEON_DEBUG & DEBUG_PRIMS)
268      fprintf(stderr, "%s: header 0x%x offt 0x%x vfmt 0x%x vfcntl %x \n",
269	      __FUNCTION__,
270	      cmd[1].i, cmd[2].i, cmd[4].i, cmd[5].i);
271#else
272   cmd[0].i = 0;
273   cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
274   cmd[1].i = RADEON_CP_PACKET3_3D_DRAW_VBUF | (1 << 16);
275   cmd[2].i = vertex_format;
276   cmd[3].i = (primitive |
277	       RADEON_CP_VC_CNTL_PRIM_WALK_LIST |
278	       RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
279	       RADEON_CP_VC_CNTL_MAOS_ENABLE |
280	       RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE |
281	       (vertex_nr << RADEON_CP_VC_CNTL_NUM_SHIFT));
282
283
284   if (RADEON_DEBUG & DEBUG_PRIMS)
285      fprintf(stderr, "%s: header 0x%x vfmt 0x%x vfcntl %x \n",
286	      __FUNCTION__,
287	      cmd[1].i, cmd[2].i, cmd[3].i);
288#endif
289}
290
291
292void radeonFlushElts( radeonContextPtr rmesa )
293{
294   int *cmd = (int *)(rmesa->store.cmd_buf + rmesa->store.elts_start);
295   int dwords;
296#if RADEON_OLD_PACKETS
297   int nr = (rmesa->store.cmd_used - (rmesa->store.elts_start + 24)) / 2;
298#else
299   int nr = (rmesa->store.cmd_used - (rmesa->store.elts_start + 16)) / 2;
300#endif
301
302   if (RADEON_DEBUG & DEBUG_IOCTL)
303      fprintf(stderr, "%s\n", __FUNCTION__);
304
305   assert( rmesa->dma.flush == radeonFlushElts );
306   rmesa->dma.flush = NULL;
307
308   /* Cope with odd number of elts:
309    */
310   rmesa->store.cmd_used = (rmesa->store.cmd_used + 2) & ~2;
311   dwords = (rmesa->store.cmd_used - rmesa->store.elts_start) / 4;
312
313#if RADEON_OLD_PACKETS
314   cmd[1] |= (dwords - 3) << 16;
315   cmd[5] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
316#else
317   cmd[1] |= (dwords - 3) << 16;
318   cmd[3] |= nr << RADEON_CP_VC_CNTL_NUM_SHIFT;
319#endif
320
321   if (RADEON_DEBUG & DEBUG_SYNC) {
322      fprintf(stderr, "%s: Syncing\n", __FUNCTION__);
323      radeonFinish( rmesa->glCtx );
324   }
325}
326
327
328GLushort *radeonAllocEltsOpenEnded( radeonContextPtr rmesa,
329				    GLuint vertex_format,
330				    GLuint primitive,
331				    GLuint min_nr )
332{
333   drm_radeon_cmd_header_t *cmd;
334   GLushort *retval;
335
336   if (RADEON_DEBUG & DEBUG_IOCTL)
337      fprintf(stderr, "%s %d\n", __FUNCTION__, min_nr);
338
339   assert((primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
340
341   radeonEmitState( rmesa );
342
343   cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa,
344						       ELTS_BUFSZ(min_nr),
345						       __FUNCTION__ );
346#if RADEON_OLD_PACKETS
347   cmd[0].i = 0;
348   cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
349   cmd[1].i = RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM;
350   cmd[2].i = rmesa->ioctl.vertex_offset;
351   cmd[3].i = 0xffff;
352   cmd[4].i = vertex_format;
353   cmd[5].i = (primitive |
354	       RADEON_CP_VC_CNTL_PRIM_WALK_IND |
355	       RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
356	       RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
357
358   retval = (GLushort *)(cmd+6);
359#else
360   cmd[0].i = 0;
361   cmd[0].header.cmd_type = RADEON_CMD_PACKET3_CLIP;
362   cmd[1].i = RADEON_CP_PACKET3_3D_DRAW_INDX;
363   cmd[2].i = vertex_format;
364   cmd[3].i = (primitive |
365	       RADEON_CP_VC_CNTL_PRIM_WALK_IND |
366	       RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA |
367	       RADEON_CP_VC_CNTL_MAOS_ENABLE |
368	       RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE);
369
370   retval = (GLushort *)(cmd+4);
371#endif
372
373   if (RADEON_DEBUG & DEBUG_PRIMS)
374      fprintf(stderr, "%s: header 0x%x vfmt 0x%x prim %x \n",
375	      __FUNCTION__,
376	      cmd[1].i, vertex_format, primitive);
377
378   assert(!rmesa->dma.flush);
379   rmesa->glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
380   rmesa->dma.flush = radeonFlushElts;
381
382   rmesa->store.elts_start = ((char *)cmd) - rmesa->store.cmd_buf;
383
384   return retval;
385}
386
387
388
389void radeonEmitVertexAOS( radeonContextPtr rmesa,
390			  GLuint vertex_size,
391			  GLuint offset )
392{
393#if RADEON_OLD_PACKETS
394   rmesa->ioctl.vertex_size = vertex_size;
395   rmesa->ioctl.vertex_offset = offset;
396#else
397   drm_radeon_cmd_header_t *cmd;
398
399   if (RADEON_DEBUG & (DEBUG_PRIMS|DEBUG_IOCTL))
400      fprintf(stderr, "%s:  vertex_size 0x%x offset 0x%x \n",
401	      __FUNCTION__, vertex_size, offset);
402
403   cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, VERT_AOS_BUFSZ,
404						  __FUNCTION__ );
405
406   cmd[0].i = 0;
407   cmd[0].header.cmd_type = RADEON_CMD_PACKET3;
408   cmd[1].i = RADEON_CP_PACKET3_3D_LOAD_VBPNTR | (2 << 16);
409   cmd[2].i = 1;
410   cmd[3].i = vertex_size | (vertex_size << 8);
411   cmd[4].i = offset;
412#endif
413}
414
415
416void radeonEmitAOS( radeonContextPtr rmesa,
417		    struct radeon_dma_region **component,
418		    GLuint nr,
419		    GLuint offset )
420{
421#if RADEON_OLD_PACKETS
422   assert( nr == 1 );
423   assert( component[0]->aos_size == component[0]->aos_stride );
424   rmesa->ioctl.vertex_size = component[0]->aos_size;
425   rmesa->ioctl.vertex_offset =
426      (component[0]->aos_start + offset * component[0]->aos_stride * 4);
427#else
428   drm_radeon_cmd_header_t *cmd;
429   int sz = AOS_BUFSZ(nr);
430   int i;
431   int *tmp;
432
433   if (RADEON_DEBUG & DEBUG_IOCTL)
434      fprintf(stderr, "%s\n", __FUNCTION__);
435
436
437   cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, sz,
438						  __FUNCTION__ );
439   cmd[0].i = 0;
440   cmd[0].header.cmd_type = RADEON_CMD_PACKET3;
441   cmd[1].i = RADEON_CP_PACKET3_3D_LOAD_VBPNTR | (((sz / sizeof(int))-3) << 16);
442   cmd[2].i = nr;
443   tmp = &cmd[0].i;
444   cmd += 3;
445
446   for (i = 0 ; i < nr ; i++) {
447      if (i & 1) {
448	 cmd[0].i |= ((component[i]->aos_stride << 24) |
449		      (component[i]->aos_size << 16));
450	 cmd[2].i = (component[i]->aos_start +
451		     offset * component[i]->aos_stride * 4);
452	 cmd += 3;
453      }
454      else {
455	 cmd[0].i = ((component[i]->aos_stride << 8) |
456		     (component[i]->aos_size << 0));
457	 cmd[1].i = (component[i]->aos_start +
458		     offset * component[i]->aos_stride * 4);
459      }
460   }
461
462   if (RADEON_DEBUG & DEBUG_VERTS) {
463      fprintf(stderr, "%s:\n", __FUNCTION__);
464      for (i = 0 ; i < sz ; i++)
465	 fprintf(stderr, "   %d: %x\n", i, tmp[i]);
466   }
467#endif
468}
469
470/* using already shifted color_fmt! */
471void radeonEmitBlit( radeonContextPtr rmesa, /* FIXME: which drmMinor is required? */
472		   GLuint color_fmt,
473		   GLuint src_pitch,
474		   GLuint src_offset,
475		   GLuint dst_pitch,
476		   GLuint dst_offset,
477		   GLint srcx, GLint srcy,
478		   GLint dstx, GLint dsty,
479		   GLuint w, GLuint h )
480{
481   drm_radeon_cmd_header_t *cmd;
482
483   if (RADEON_DEBUG & DEBUG_IOCTL)
484      fprintf(stderr, "%s src %x/%x %d,%d dst: %x/%x %d,%d sz: %dx%d\n",
485	      __FUNCTION__,
486	      src_pitch, src_offset, srcx, srcy,
487	      dst_pitch, dst_offset, dstx, dsty,
488	      w, h);
489
490   assert( (src_pitch & 63) == 0 );
491   assert( (dst_pitch & 63) == 0 );
492   assert( (src_offset & 1023) == 0 );
493   assert( (dst_offset & 1023) == 0 );
494   assert( w < (1<<16) );
495   assert( h < (1<<16) );
496
497   cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, 8 * sizeof(int),
498						  __FUNCTION__ );
499
500
501   cmd[0].i = 0;
502   cmd[0].header.cmd_type = RADEON_CMD_PACKET3;
503   cmd[1].i = RADEON_CP_PACKET3_CNTL_BITBLT_MULTI | (5 << 16);
504   cmd[2].i = (RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
505	       RADEON_GMC_DST_PITCH_OFFSET_CNTL |
506	       RADEON_GMC_BRUSH_NONE |
507	       color_fmt |
508	       RADEON_GMC_SRC_DATATYPE_COLOR |
509	       RADEON_ROP3_S |
510	       RADEON_DP_SRC_SOURCE_MEMORY |
511	       RADEON_GMC_CLR_CMP_CNTL_DIS |
512	       RADEON_GMC_WR_MSK_DIS );
513
514   cmd[3].i = ((src_pitch/64)<<22) | (src_offset >> 10);
515   cmd[4].i = ((dst_pitch/64)<<22) | (dst_offset >> 10);
516   cmd[5].i = (srcx << 16) | srcy;
517   cmd[6].i = (dstx << 16) | dsty; /* dst */
518   cmd[7].i = (w << 16) | h;
519}
520
521
522void radeonEmitWait( radeonContextPtr rmesa, GLuint flags )
523{
524   drm_radeon_cmd_header_t *cmd;
525
526   assert( !(flags & ~(RADEON_WAIT_2D|RADEON_WAIT_3D)) );
527
528   cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, 1 * sizeof(int),
529					   __FUNCTION__ );
530   cmd[0].i = 0;
531   cmd[0].wait.cmd_type = RADEON_CMD_WAIT;
532   cmd[0].wait.flags = flags;
533}
534
535
536static int radeonFlushCmdBufLocked( radeonContextPtr rmesa,
537				    const char * caller )
538{
539   int ret, i;
540   drm_radeon_cmd_buffer_t cmd;
541
542   if (rmesa->lost_context)
543      radeonBackUpAndEmitLostStateLocked(rmesa);
544
545   if (RADEON_DEBUG & DEBUG_IOCTL) {
546      fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
547
548      if (RADEON_DEBUG & DEBUG_VERBOSE)
549	 for (i = 0 ; i < rmesa->store.cmd_used ; i += 4 )
550	    fprintf(stderr, "%d: %x\n", i/4,
551		    *(int *)(&rmesa->store.cmd_buf[i]));
552   }
553
554   if (RADEON_DEBUG & DEBUG_DMA)
555      fprintf(stderr, "%s: Releasing %d buffers\n", __FUNCTION__,
556	      rmesa->dma.nr_released_bufs);
557
558
559   if (RADEON_DEBUG & DEBUG_SANITY) {
560      if (rmesa->state.scissor.enabled)
561	 ret = radeonSanityCmdBuffer( rmesa,
562				      rmesa->state.scissor.numClipRects,
563				      rmesa->state.scissor.pClipRects);
564      else
565	 ret = radeonSanityCmdBuffer( rmesa,
566				      rmesa->numClipRects,
567				      rmesa->pClipRects);
568      if (ret) {
569	 fprintf(stderr, "drmSanityCommandWrite: %d\n", ret);
570	 goto out;
571      }
572   }
573
574
575   cmd.bufsz = rmesa->store.cmd_used;
576   cmd.buf = rmesa->store.cmd_buf;
577
578   if (rmesa->state.scissor.enabled) {
579      cmd.nbox = rmesa->state.scissor.numClipRects;
580      cmd.boxes = rmesa->state.scissor.pClipRects;
581   } else {
582      cmd.nbox = rmesa->numClipRects;
583      cmd.boxes = rmesa->pClipRects;
584   }
585
586   ret = drmCommandWrite( rmesa->dri.fd,
587			  DRM_RADEON_CMDBUF,
588			  &cmd, sizeof(cmd) );
589
590   if (ret)
591      fprintf(stderr, "drmCommandWrite: %d\n", ret);
592
593   if (RADEON_DEBUG & DEBUG_SYNC) {
594      fprintf(stderr, "\nSyncing in %s\n\n", __FUNCTION__);
595      radeonWaitForIdleLocked( rmesa );
596   }
597
598 out:
599   rmesa->store.primnr = 0;
600   rmesa->store.statenr = 0;
601   rmesa->store.cmd_used = 0;
602   rmesa->dma.nr_released_bufs = 0;
603   rmesa->save_on_next_emit = 1;
604
605   return ret;
606}
607
608
609/* Note: does not emit any commands to avoid recursion on
610 * radeonAllocCmdBuf.
611 */
612void radeonFlushCmdBuf( radeonContextPtr rmesa, const char *caller )
613{
614   int ret;
615
616
617   LOCK_HARDWARE( rmesa );
618
619   ret = radeonFlushCmdBufLocked( rmesa, caller );
620
621   UNLOCK_HARDWARE( rmesa );
622
623   if (ret) {
624      fprintf(stderr, "drm_radeon_cmd_buffer_t: %d (exiting)\n", ret);
625      exit(ret);
626   }
627}
628
629/* =============================================================
630 * Hardware vertex buffer handling
631 */
632
633
634void radeonRefillCurrentDmaRegion( radeonContextPtr rmesa )
635{
636   struct radeon_dma_buffer *dmabuf;
637   int fd = rmesa->dri.fd;
638   int index = 0;
639   int size = 0;
640   drmDMAReq dma;
641   int ret;
642
643   if (RADEON_DEBUG & (DEBUG_IOCTL|DEBUG_DMA))
644      fprintf(stderr, "%s\n", __FUNCTION__);
645
646   if (rmesa->dma.flush) {
647      rmesa->dma.flush( rmesa );
648   }
649
650   if (rmesa->dma.current.buf)
651      radeonReleaseDmaRegion( rmesa, &rmesa->dma.current, __FUNCTION__ );
652
653   if (rmesa->dma.nr_released_bufs > 4)
654      radeonFlushCmdBuf( rmesa, __FUNCTION__ );
655
656   dma.context = rmesa->dri.hwContext;
657   dma.send_count = 0;
658   dma.send_list = NULL;
659   dma.send_sizes = NULL;
660   dma.flags = 0;
661   dma.request_count = 1;
662   dma.request_size = RADEON_BUFFER_SIZE;
663   dma.request_list = &index;
664   dma.request_sizes = &size;
665   dma.granted_count = 0;
666
667   LOCK_HARDWARE(rmesa);	/* no need to validate */
668
669   ret = drmDMA( fd, &dma );
670
671   if (ret != 0) {
672      /* Free some up this way?
673       */
674      if (rmesa->dma.nr_released_bufs) {
675	 radeonFlushCmdBufLocked( rmesa, __FUNCTION__ );
676      }
677
678      if (RADEON_DEBUG & DEBUG_DMA)
679	 fprintf(stderr, "Waiting for buffers\n");
680
681      radeonWaitForIdleLocked( rmesa );
682      ret = drmDMA( fd, &dma );
683
684      if ( ret != 0 ) {
685	 UNLOCK_HARDWARE( rmesa );
686	 fprintf( stderr, "Error: Could not get dma buffer... exiting\n" );
687	 exit( -1 );
688      }
689   }
690
691   UNLOCK_HARDWARE(rmesa);
692
693   if (RADEON_DEBUG & DEBUG_DMA)
694      fprintf(stderr, "Allocated buffer %d\n", index);
695
696   dmabuf = CALLOC_STRUCT( radeon_dma_buffer );
697   dmabuf->buf = &rmesa->radeonScreen->buffers->list[index];
698   dmabuf->refcount = 1;
699
700   rmesa->dma.current.buf = dmabuf;
701   rmesa->dma.current.address = dmabuf->buf->address;
702   rmesa->dma.current.end = dmabuf->buf->total;
703   rmesa->dma.current.start = 0;
704   rmesa->dma.current.ptr = 0;
705
706   rmesa->c_vertexBuffers++;
707}
708
709void radeonReleaseDmaRegion( radeonContextPtr rmesa,
710			     struct radeon_dma_region *region,
711			     const char *caller )
712{
713   if (RADEON_DEBUG & DEBUG_IOCTL)
714      fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
715
716   if (!region->buf)
717      return;
718
719   if (rmesa->dma.flush)
720      rmesa->dma.flush( rmesa );
721
722   if (--region->buf->refcount == 0) {
723      drm_radeon_cmd_header_t *cmd;
724
725      if (RADEON_DEBUG & (DEBUG_IOCTL|DEBUG_DMA))
726	 fprintf(stderr, "%s -- DISCARD BUF %d\n", __FUNCTION__,
727		 region->buf->buf->idx);
728
729      cmd = (drm_radeon_cmd_header_t *)radeonAllocCmdBuf( rmesa, sizeof(*cmd),
730						     __FUNCTION__ );
731      cmd->dma.cmd_type = RADEON_CMD_DMA_DISCARD;
732      cmd->dma.buf_idx = region->buf->buf->idx;
733      FREE(region->buf);
734      rmesa->dma.nr_released_bufs++;
735   }
736
737   region->buf = NULL;
738   region->start = 0;
739}
740
741/* Allocates a region from rmesa->dma.current.  If there isn't enough
742 * space in current, grab a new buffer (and discard what was left of current)
743 */
744void radeonAllocDmaRegion( radeonContextPtr rmesa,
745			   struct radeon_dma_region *region,
746			   int bytes,
747			   int alignment )
748{
749   if (RADEON_DEBUG & DEBUG_IOCTL)
750      fprintf(stderr, "%s %d\n", __FUNCTION__, bytes);
751
752   if (rmesa->dma.flush)
753      rmesa->dma.flush( rmesa );
754
755   if (region->buf)
756      radeonReleaseDmaRegion( rmesa, region, __FUNCTION__ );
757
758   alignment--;
759   rmesa->dma.current.start = rmesa->dma.current.ptr =
760      (rmesa->dma.current.ptr + alignment) & ~alignment;
761
762   if ( rmesa->dma.current.ptr + bytes > rmesa->dma.current.end )
763      radeonRefillCurrentDmaRegion( rmesa );
764
765   region->start = rmesa->dma.current.start;
766   region->ptr = rmesa->dma.current.start;
767   region->end = rmesa->dma.current.start + bytes;
768   region->address = rmesa->dma.current.address;
769   region->buf = rmesa->dma.current.buf;
770   region->buf->refcount++;
771
772   rmesa->dma.current.ptr += bytes; /* bug - if alignment > 7 */
773   rmesa->dma.current.start =
774      rmesa->dma.current.ptr = (rmesa->dma.current.ptr + 0x7) & ~0x7;
775}
776
777/* ================================================================
778 * SwapBuffers with client-side throttling
779 */
780
781static u_int32_t radeonGetLastFrame (radeonContextPtr rmesa)
782{
783   drm_radeon_getparam_t gp;
784   int ret;
785   u_int32_t frame;
786
787   gp.param = RADEON_PARAM_LAST_FRAME;
788   gp.value = (int *)&frame;
789   ret = drmCommandWriteRead( rmesa->dri.fd, DRM_RADEON_GETPARAM,
790			      &gp, sizeof(gp) );
791
792   if ( ret ) {
793      fprintf( stderr, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__, ret );
794      exit(1);
795   }
796
797   return frame;
798}
799
800static void radeonEmitIrqLocked( radeonContextPtr rmesa )
801{
802   drm_radeon_irq_emit_t ie;
803   int ret;
804
805   ie.irq_seq = &rmesa->iw.irq_seq;
806   ret = drmCommandWriteRead( rmesa->dri.fd, DRM_RADEON_IRQ_EMIT,
807			      &ie, sizeof(ie) );
808   if ( ret ) {
809      fprintf( stderr, "%s: drm_radeon_irq_emit_t: %d\n", __FUNCTION__, ret );
810      exit(1);
811   }
812}
813
814
815static void radeonWaitIrq( radeonContextPtr rmesa )
816{
817   int ret;
818
819   do {
820      ret = drmCommandWrite( rmesa->dri.fd, DRM_RADEON_IRQ_WAIT,
821			     &rmesa->iw, sizeof(rmesa->iw) );
822   } while (ret && (errno == EINTR || errno == EBUSY));
823
824   if ( ret ) {
825      fprintf( stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__, ret );
826      exit(1);
827   }
828}
829
830
831static void radeonWaitForFrameCompletion( radeonContextPtr rmesa )
832{
833   drm_radeon_sarea_t *sarea = rmesa->sarea;
834
835   if (rmesa->do_irqs) {
836      if (radeonGetLastFrame(rmesa) < sarea->last_frame) {
837	 if (!rmesa->irqsEmitted) {
838	    while (radeonGetLastFrame (rmesa) < sarea->last_frame)
839	       ;
840	 }
841	 else {
842	    UNLOCK_HARDWARE( rmesa );
843	    radeonWaitIrq( rmesa );
844	    LOCK_HARDWARE( rmesa );
845	 }
846	 rmesa->irqsEmitted = 10;
847      }
848
849      if (rmesa->irqsEmitted) {
850	 radeonEmitIrqLocked( rmesa );
851	 rmesa->irqsEmitted--;
852      }
853   }
854   else {
855      while (radeonGetLastFrame (rmesa) < sarea->last_frame) {
856	 UNLOCK_HARDWARE( rmesa );
857	 if (rmesa->do_usleeps)
858	    DO_USLEEP( 1 );
859	 LOCK_HARDWARE( rmesa );
860      }
861   }
862}
863
864/* Copy the back color buffer to the front color buffer.
865 */
866void radeonCopyBuffer( __DRIdrawablePrivate *dPriv,
867		       const drm_clip_rect_t	  *rect)
868{
869   radeonContextPtr rmesa;
870   GLint nbox, i, ret;
871   GLboolean   missed_target;
872   int64_t ust;
873
874   assert(dPriv);
875   assert(dPriv->driContextPriv);
876   assert(dPriv->driContextPriv->driverPrivate);
877
878   rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
879
880   if ( RADEON_DEBUG & DEBUG_IOCTL ) {
881      fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
882   }
883
884   RADEON_FIREVERTICES( rmesa );
885   LOCK_HARDWARE( rmesa );
886
887   /* Throttle the frame rate -- only allow one pending swap buffers
888    * request at a time.
889    */
890   radeonWaitForFrameCompletion( rmesa );
891   if (!rect)
892   {
893       UNLOCK_HARDWARE( rmesa );
894       driWaitForVBlank( dPriv, & missed_target );
895       LOCK_HARDWARE( rmesa );
896   }
897
898   nbox = dPriv->numClipRects; /* must be in locked region */
899
900   for ( i = 0 ; i < nbox ; ) {
901      GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
902      drm_clip_rect_t *box = dPriv->pClipRects;
903      drm_clip_rect_t *b = rmesa->sarea->boxes;
904      GLint n = 0;
905
906      for ( ; i < nr ; i++ ) {
907
908	  *b = box[i];
909
910	  if (rect)
911	  {
912	      if (rect->x1 > b->x1)
913		  b->x1 = rect->x1;
914	      if (rect->y1 > b->y1)
915		  b->y1 = rect->y1;
916	      if (rect->x2 < b->x2)
917		  b->x2 = rect->x2;
918	      if (rect->y2 < b->y2)
919		  b->y2 = rect->y2;
920
921	      if (b->x1 < b->x2 && b->y1 < b->y2)
922		  b++;
923	  }
924	  else
925	      b++;
926
927	  n++;
928      }
929      rmesa->sarea->nbox = n;
930
931      ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
932
933      if ( ret ) {
934	 fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
935	 UNLOCK_HARDWARE( rmesa );
936	 exit( 1 );
937      }
938   }
939
940   UNLOCK_HARDWARE( rmesa );
941   if (!rect)
942   {
943       rmesa->swap_count++;
944       (*dri_interface->getUST)( & ust );
945       if ( missed_target ) {
946	   rmesa->swap_missed_count++;
947	   rmesa->swap_missed_ust = ust - rmesa->swap_ust;
948       }
949
950       rmesa->swap_ust = ust;
951       rmesa->hw.all_dirty = GL_TRUE;
952   }
953}
954
955void radeonPageFlip( __DRIdrawablePrivate *dPriv )
956{
957   radeonContextPtr rmesa;
958   GLint ret;
959   GLboolean   missed_target;
960
961   assert(dPriv);
962   assert(dPriv->driContextPriv);
963   assert(dPriv->driContextPriv->driverPrivate);
964
965   rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
966
967   if ( RADEON_DEBUG & DEBUG_IOCTL ) {
968      fprintf(stderr, "%s: pfCurrentPage: %d\n", __FUNCTION__,
969	      rmesa->sarea->pfCurrentPage);
970   }
971
972   RADEON_FIREVERTICES( rmesa );
973   LOCK_HARDWARE( rmesa );
974
975   /* Need to do this for the perf box placement:
976    */
977   if (dPriv->numClipRects)
978   {
979      drm_clip_rect_t *box = dPriv->pClipRects;
980      drm_clip_rect_t *b = rmesa->sarea->boxes;
981      b[0] = box[0];
982      rmesa->sarea->nbox = 1;
983   }
984
985   /* Throttle the frame rate -- only allow a few pending swap buffers
986    * request at a time.
987    */
988   radeonWaitForFrameCompletion( rmesa );
989   UNLOCK_HARDWARE( rmesa );
990   driWaitForVBlank( dPriv, & missed_target );
991   if ( missed_target ) {
992      rmesa->swap_missed_count++;
993      (void) (*dri_interface->getUST)( & rmesa->swap_missed_ust );
994   }
995   LOCK_HARDWARE( rmesa );
996
997   ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_FLIP );
998
999   UNLOCK_HARDWARE( rmesa );
1000
1001   if ( ret ) {
1002      fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
1003      exit( 1 );
1004   }
1005
1006   rmesa->swap_count++;
1007   (void) (*dri_interface->getUST)( & rmesa->swap_ust );
1008
1009   /* Get ready for drawing next frame.  Update the renderbuffers'
1010    * flippedOffset/Pitch fields so we draw into the right place.
1011    */
1012   driFlipRenderbuffers(rmesa->glCtx->WinSysDrawBuffer,
1013                        rmesa->sarea->pfCurrentPage);
1014
1015   radeonUpdateDrawBuffer(rmesa->glCtx);
1016}
1017
1018
1019/* ================================================================
1020 * Buffer clear
1021 */
1022#define RADEON_MAX_CLEARS	256
1023
1024static void radeonClear( GLcontext *ctx, GLbitfield mask )
1025{
1026   radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1027   __DRIdrawablePrivate *dPriv = rmesa->dri.drawable;
1028   drm_radeon_sarea_t *sarea = rmesa->sarea;
1029   u_int32_t clear;
1030   GLuint flags = 0;
1031   GLuint color_mask = 0;
1032   GLint ret, i;
1033   GLint cx, cy, cw, ch;
1034
1035   if ( RADEON_DEBUG & DEBUG_IOCTL ) {
1036      fprintf( stderr, "radeonClear\n");
1037   }
1038
1039   {
1040      LOCK_HARDWARE( rmesa );
1041      UNLOCK_HARDWARE( rmesa );
1042      if ( dPriv->numClipRects == 0 )
1043	 return;
1044   }
1045
1046   radeonFlush( ctx );
1047
1048   if ( mask & BUFFER_BIT_FRONT_LEFT ) {
1049      flags |= RADEON_FRONT;
1050      color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
1051      mask &= ~BUFFER_BIT_FRONT_LEFT;
1052   }
1053
1054   if ( mask & BUFFER_BIT_BACK_LEFT ) {
1055      flags |= RADEON_BACK;
1056      color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
1057      mask &= ~BUFFER_BIT_BACK_LEFT;
1058   }
1059
1060   if ( mask & BUFFER_BIT_DEPTH ) {
1061      flags |= RADEON_DEPTH;
1062      mask &= ~BUFFER_BIT_DEPTH;
1063   }
1064
1065   if ( (mask & BUFFER_BIT_STENCIL) && rmesa->state.stencil.hwBuffer ) {
1066      flags |= RADEON_STENCIL;
1067      mask &= ~BUFFER_BIT_STENCIL;
1068   }
1069
1070   if ( mask ) {
1071      if (RADEON_DEBUG & DEBUG_FALLBACKS)
1072	 fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, mask);
1073      _swrast_Clear( ctx, mask );
1074   }
1075
1076   if ( !flags )
1077      return;
1078
1079   if (rmesa->using_hyperz) {
1080      flags |= RADEON_USE_COMP_ZBUF;
1081/*      if (rmesa->radeonScreen->chipset & RADEON_CHIPSET_TCL)
1082         flags |= RADEON_USE_HIERZ; */
1083      if (!(rmesa->state.stencil.hwBuffer) ||
1084	 ((flags & RADEON_DEPTH) && (flags & RADEON_STENCIL) &&
1085	    ((rmesa->state.stencil.clear & RADEON_STENCIL_WRITE_MASK) == RADEON_STENCIL_WRITE_MASK))) {
1086	  flags |= RADEON_CLEAR_FASTZ;
1087      }
1088   }
1089
1090   LOCK_HARDWARE( rmesa );
1091
1092   /* compute region after locking: */
1093   cx = ctx->DrawBuffer->_Xmin;
1094   cy = ctx->DrawBuffer->_Ymin;
1095   cw = ctx->DrawBuffer->_Xmax - cx;
1096   ch = ctx->DrawBuffer->_Ymax - cy;
1097
1098   /* Flip top to bottom */
1099   cx += dPriv->x;
1100   cy  = dPriv->y + dPriv->h - cy - ch;
1101
1102   /* Throttle the number of clear ioctls we do.
1103    */
1104   while ( 1 ) {
1105      int ret;
1106      drm_radeon_getparam_t gp;
1107
1108      gp.param = RADEON_PARAM_LAST_CLEAR;
1109      gp.value = (int *)&clear;
1110      ret = drmCommandWriteRead( rmesa->dri.fd,
1111				 DRM_RADEON_GETPARAM, &gp, sizeof(gp) );
1112
1113      if ( ret ) {
1114	 fprintf( stderr, "%s: drm_radeon_getparam_t: %d\n", __FUNCTION__, ret );
1115	 exit(1);
1116      }
1117
1118      if ( sarea->last_clear - clear <= RADEON_MAX_CLEARS ) {
1119	 break;
1120      }
1121
1122      if ( rmesa->do_usleeps ) {
1123	 UNLOCK_HARDWARE( rmesa );
1124	 DO_USLEEP( 1 );
1125	 LOCK_HARDWARE( rmesa );
1126      }
1127   }
1128
1129   /* Send current state to the hardware */
1130   radeonFlushCmdBufLocked( rmesa, __FUNCTION__ );
1131
1132   for ( i = 0 ; i < dPriv->numClipRects ; ) {
1133      GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS, dPriv->numClipRects );
1134      drm_clip_rect_t *box = dPriv->pClipRects;
1135      drm_clip_rect_t *b = rmesa->sarea->boxes;
1136      drm_radeon_clear_t clear;
1137      drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
1138      GLint n = 0;
1139
1140      if (cw != dPriv->w || ch != dPriv->h) {
1141         /* clear subregion */
1142	 for ( ; i < nr ; i++ ) {
1143	    GLint x = box[i].x1;
1144	    GLint y = box[i].y1;
1145	    GLint w = box[i].x2 - x;
1146	    GLint h = box[i].y2 - y;
1147
1148	    if ( x < cx ) w -= cx - x, x = cx;
1149	    if ( y < cy ) h -= cy - y, y = cy;
1150	    if ( x + w > cx + cw ) w = cx + cw - x;
1151	    if ( y + h > cy + ch ) h = cy + ch - y;
1152	    if ( w <= 0 ) continue;
1153	    if ( h <= 0 ) continue;
1154
1155	    b->x1 = x;
1156	    b->y1 = y;
1157	    b->x2 = x + w;
1158	    b->y2 = y + h;
1159	    b++;
1160	    n++;
1161	 }
1162      } else {
1163         /* clear whole buffer */
1164	 for ( ; i < nr ; i++ ) {
1165	    *b++ = box[i];
1166	    n++;
1167	 }
1168      }
1169
1170      rmesa->sarea->nbox = n;
1171
1172      clear.flags       = flags;
1173      clear.clear_color = rmesa->state.color.clear;
1174      clear.clear_depth = rmesa->state.depth.clear;
1175      clear.color_mask  = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
1176      clear.depth_mask  = rmesa->state.stencil.clear;
1177      clear.depth_boxes = depth_boxes;
1178
1179      n--;
1180      b = rmesa->sarea->boxes;
1181      for ( ; n >= 0 ; n-- ) {
1182	 depth_boxes[n].f[CLEAR_X1] = (float)b[n].x1;
1183	 depth_boxes[n].f[CLEAR_Y1] = (float)b[n].y1;
1184	 depth_boxes[n].f[CLEAR_X2] = (float)b[n].x2;
1185	 depth_boxes[n].f[CLEAR_Y2] = (float)b[n].y2;
1186	 depth_boxes[n].f[CLEAR_DEPTH] =
1187	    (float)rmesa->state.depth.clear;
1188      }
1189
1190      ret = drmCommandWrite( rmesa->dri.fd, DRM_RADEON_CLEAR,
1191			     &clear, sizeof(drm_radeon_clear_t));
1192
1193      if ( ret ) {
1194	 UNLOCK_HARDWARE( rmesa );
1195	 fprintf( stderr, "DRM_RADEON_CLEAR: return = %d\n", ret );
1196	 exit( 1 );
1197      }
1198   }
1199
1200   UNLOCK_HARDWARE( rmesa );
1201   rmesa->hw.all_dirty = GL_TRUE;
1202}
1203
1204
1205void radeonWaitForIdleLocked( radeonContextPtr rmesa )
1206{
1207    int fd = rmesa->dri.fd;
1208    int to = 0;
1209    int ret, i = 0;
1210
1211    rmesa->c_drawWaits++;
1212
1213    do {
1214        do {
1215            ret = drmCommandNone( fd, DRM_RADEON_CP_IDLE);
1216        } while ( ret && errno == EBUSY && i++ < RADEON_IDLE_RETRY );
1217    } while ( ( ret == -EBUSY ) && ( to++ < RADEON_TIMEOUT ) );
1218
1219    if ( ret < 0 ) {
1220	UNLOCK_HARDWARE( rmesa );
1221	fprintf( stderr, "Error: Radeon timed out... exiting\n" );
1222	exit( -1 );
1223    }
1224}
1225
1226
1227static void radeonWaitForIdle( radeonContextPtr rmesa )
1228{
1229   LOCK_HARDWARE(rmesa);
1230   radeonWaitForIdleLocked( rmesa );
1231   UNLOCK_HARDWARE(rmesa);
1232}
1233
1234
1235void radeonFlush( GLcontext *ctx )
1236{
1237   radeonContextPtr rmesa = RADEON_CONTEXT( ctx );
1238
1239   if (RADEON_DEBUG & DEBUG_IOCTL)
1240      fprintf(stderr, "%s\n", __FUNCTION__);
1241
1242   if (rmesa->dma.flush)
1243      rmesa->dma.flush( rmesa );
1244
1245   radeonEmitState( rmesa );
1246
1247   if (rmesa->store.cmd_used)
1248      radeonFlushCmdBuf( rmesa, __FUNCTION__ );
1249}
1250
1251/* Make sure all commands have been sent to the hardware and have
1252 * completed processing.
1253 */
1254void radeonFinish( GLcontext *ctx )
1255{
1256   radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
1257   radeonFlush( ctx );
1258
1259   if (rmesa->do_irqs) {
1260      LOCK_HARDWARE( rmesa );
1261      radeonEmitIrqLocked( rmesa );
1262      UNLOCK_HARDWARE( rmesa );
1263      radeonWaitIrq( rmesa );
1264   }
1265   else
1266      radeonWaitForIdle( rmesa );
1267}
1268
1269
1270void radeonInitIoctlFuncs( GLcontext *ctx )
1271{
1272    ctx->Driver.Clear = radeonClear;
1273    ctx->Driver.Finish = radeonFinish;
1274    ctx->Driver.Flush = radeonFlush;
1275}
1276
1277