radeon_span.c revision 94d2a809f9cabf3821be9b0d2b11b26151ed3084
1/**************************************************************************
2
3Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
4Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
5                     VA Linux Systems Inc., Fremont, California.
6
7The Weather Channel (TM) funded Tungsten Graphics to develop the
8initial release of the Radeon 8500 driver under the XFree86 license.
9This notice must be preserved.
10
11All Rights Reserved.
12
13Permission is hereby granted, free of charge, to any person obtaining
14a copy of this software and associated documentation files (the
15"Software"), to deal in the Software without restriction, including
16without limitation the rights to use, copy, modify, merge, publish,
17distribute, sublicense, and/or sell copies of the Software, and to
18permit persons to whom the Software is furnished to do so, subject to
19the following conditions:
20
21The above copyright notice and this permission notice (including the
22next paragraph) shall be included in all copies or substantial
23portions of the Software.
24
25THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
28IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
29LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
30OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
31WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
32
33**************************************************************************/
34
35/*
36 * Authors:
37 *   Kevin E. Martin <martin@valinux.com>
38 *   Gareth Hughes <gareth@valinux.com>
39 *   Keith Whitwell <keith@tungstengraphics.com>
40 *
41 */
42
43#include "main/glheader.h"
44#include "swrast/swrast.h"
45
46#include "radeon_common.h"
47#include "radeon_lock.h"
48#include "radeon_span.h"
49
50#define DBG 0
51
52static void radeonSetSpanFunctions(struct radeon_renderbuffer *rrb);
53
54static GLubyte *radeon_ptr32(const struct radeon_renderbuffer * rrb,
55			     GLint x, GLint y)
56{
57    GLubyte *ptr = rrb->bo->ptr;
58    uint32_t mask = RADEON_BO_FLAGS_MACRO_TILE | RADEON_BO_FLAGS_MICRO_TILE;
59    GLint offset;
60    GLint nmacroblkpl;
61    GLint nmicroblkpl;
62
63    if (rrb->has_surface || !(rrb->bo->flags & mask)) {
64        offset = x * rrb->cpp + y * rrb->pitch;
65    } else {
66        offset = 0;
67        if (rrb->bo->flags & RADEON_BO_FLAGS_MACRO_TILE) {
68            if (rrb->bo->flags & RADEON_BO_FLAGS_MICRO_TILE) {
69                nmacroblkpl = rrb->pitch >> 5;
70                offset += ((y >> 4) * nmacroblkpl) << 11;
71                offset += ((y & 15) >> 1) << 8;
72                offset += (y & 1) << 4;
73                offset += (x >> 5) << 11;
74                offset += ((x & 31) >> 2) << 5;
75                offset += (x & 3) << 2;
76            } else {
77		offset = ((y >> 3) * (rrb->pitch >> 8) + (x >> 6)) << 11;
78		offset += (((y >> 2) ^ (x >> 6)) & 0x1) << 10;
79		offset += (((y >> 3) ^ (x >> 5)) & 0x1) << 9;
80		offset += (((y >> 1) ^ (x >> 5)) & 0x1) << 8;
81		offset += (((y >> 2) ^ (x >> 4)) & 0x1) << 7;
82		offset += (y & 1) << 6;
83		offset += (x & 15) << 2;
84            }
85        } else {
86            nmicroblkpl = ((rrb->pitch + 31) & ~31) >> 5;
87            offset += (y * nmicroblkpl) << 5;
88            offset += (x >> 3) << 5;
89            offset += (x & 7) << 2;
90        }
91    }
92    return &ptr[offset];
93}
94
95static GLubyte *radeon_ptr16(const struct radeon_renderbuffer * rrb,
96			     GLint x, GLint y)
97{
98    GLubyte *ptr = rrb->bo->ptr;
99    uint32_t mask = RADEON_BO_FLAGS_MACRO_TILE | RADEON_BO_FLAGS_MICRO_TILE;
100    GLint offset;
101    GLint nmacroblkpl;
102    GLint nmicroblkpl;
103
104    if (rrb->has_surface || !(rrb->bo->flags & mask)) {
105        offset = x * rrb->cpp + y * rrb->pitch;
106    } else {
107        offset = 0;
108        if (rrb->bo->flags & RADEON_BO_FLAGS_MACRO_TILE) {
109            if (rrb->bo->flags & RADEON_BO_FLAGS_MICRO_TILE) {
110                nmacroblkpl = rrb->pitch >> 6;
111                offset += ((y >> 4) * nmacroblkpl) << 11;
112                offset += ((y & 15) >> 1) << 8;
113                offset += (y & 1) << 4;
114                offset += (x >> 6) << 11;
115                offset += ((x & 63) >> 3) << 5;
116                offset += (x & 7) << 1;
117            } else {
118                nmacroblkpl = rrb->pitch >> 7;
119                offset += ((y >> 3) * nmacroblkpl) << 11;
120                offset += (y & 7) << 8;
121                offset += (x >> 7) << 11;
122                offset += ((x & 127) >> 4) << 5;
123                offset += (x & 15) << 2;
124            }
125        } else {
126            nmicroblkpl = ((rrb->pitch + 31) & ~31) >> 5;
127            offset += (y * nmicroblkpl) << 5;
128            offset += (x >> 4) << 5;
129            offset += (x & 15) << 2;
130        }
131    }
132    return &ptr[offset];
133}
134
135static GLubyte *radeon_ptr(const struct radeon_renderbuffer * rrb,
136			   GLint x, GLint y)
137{
138    GLubyte *ptr = rrb->bo->ptr;
139    uint32_t mask = RADEON_BO_FLAGS_MACRO_TILE | RADEON_BO_FLAGS_MICRO_TILE;
140    GLint offset;
141    GLint microblkxs;
142    GLint macroblkxs;
143    GLint nmacroblkpl;
144    GLint nmicroblkpl;
145
146    if (rrb->has_surface || !(rrb->bo->flags & mask)) {
147        offset = x * rrb->cpp + y * rrb->pitch;
148    } else {
149        offset = 0;
150        if (rrb->bo->flags & RADEON_BO_FLAGS_MACRO_TILE) {
151            if (rrb->bo->flags & RADEON_BO_FLAGS_MICRO_TILE) {
152                microblkxs = 16 / rrb->cpp;
153                macroblkxs = 128 / rrb->cpp;
154                nmacroblkpl = rrb->pitch / macroblkxs;
155                offset += ((y >> 4) * nmacroblkpl) << 11;
156                offset += ((y & 15) >> 1) << 8;
157                offset += (y & 1) << 4;
158                offset += (x / macroblkxs) << 11;
159                offset += ((x & (macroblkxs - 1)) / microblkxs) << 5;
160                offset += (x & (microblkxs - 1)) * rrb->cpp;
161            } else {
162                microblkxs = 32 / rrb->cpp;
163                macroblkxs = 256 / rrb->cpp;
164                nmacroblkpl = rrb->pitch / macroblkxs;
165                offset += ((y >> 3) * nmacroblkpl) << 11;
166                offset += (y & 7) << 8;
167                offset += (x / macroblkxs) << 11;
168                offset += ((x & (macroblkxs - 1)) / microblkxs) << 5;
169                offset += (x & (microblkxs - 1)) * rrb->cpp;
170            }
171        } else {
172            microblkxs = 32 / rrb->cpp;
173            nmicroblkpl = ((rrb->pitch + 31) & ~31) >> 5;
174            offset += (y * nmicroblkpl) << 5;
175            offset += (x / microblkxs) << 5;
176            offset += (x & (microblkxs - 1)) * rrb->cpp;
177        }
178    }
179    return &ptr[offset];
180}
181
182#ifndef COMPILE_R300
183static uint32_t
184z24s8_to_s8z24(uint32_t val)
185{
186   return (val << 24) | (val >> 8);
187}
188
189static uint32_t
190s8z24_to_z24s8(uint32_t val)
191{
192   return (val >> 24) | (val << 8);
193}
194#endif
195
196/*
197 * Note that all information needed to access pixels in a renderbuffer
198 * should be obtained through the gl_renderbuffer parameter, not per-context
199 * information.
200 */
201#define LOCAL_VARS						\
202   struct radeon_context *radeon = RADEON_CONTEXT(ctx);			\
203   struct radeon_renderbuffer *rrb = (void *) rb;		\
204   const GLint yScale = ctx->DrawBuffer->Name ? 1 : -1;			\
205   const GLint yBias = ctx->DrawBuffer->Name ? 0 : rrb->base.Height - 1;\
206   unsigned int num_cliprects;						\
207   struct drm_clip_rect *cliprects;					\
208   int x_off, y_off;							\
209   GLuint p;						\
210   (void)p;						\
211   radeon_get_cliprects(radeon, &cliprects, &num_cliprects, &x_off, &y_off);
212
213#define LOCAL_DEPTH_VARS				\
214   struct radeon_context *radeon = RADEON_CONTEXT(ctx);			\
215   struct radeon_renderbuffer *rrb = (void *) rb;	\
216   const GLint yScale = ctx->DrawBuffer->Name ? 1 : -1;			\
217   const GLint yBias = ctx->DrawBuffer->Name ? 0 : rrb->base.Height - 1;\
218   unsigned int num_cliprects;						\
219   struct drm_clip_rect *cliprects;					\
220   int x_off, y_off;							\
221  radeon_get_cliprects(radeon, &cliprects, &num_cliprects, &x_off, &y_off);
222
223#define LOCAL_STENCIL_VARS LOCAL_DEPTH_VARS
224
225#define Y_FLIP(_y) ((_y) * yScale + yBias)
226
227#define HW_LOCK()
228
229#define HW_UNLOCK()
230
231/* XXX FBO: this is identical to the macro in spantmp2.h except we get
232 * the cliprect info from the context, not the driDrawable.
233 * Move this into spantmp2.h someday.
234 */
235#define HW_CLIPLOOP()							\
236   do {									\
237      int _nc = num_cliprects;						\
238      while ( _nc-- ) {							\
239	 int minx = cliprects[_nc].x1 - x_off;				\
240	 int miny = cliprects[_nc].y1 - y_off;				\
241	 int maxx = cliprects[_nc].x2 - x_off;				\
242	 int maxy = cliprects[_nc].y2 - y_off;
243
244/* ================================================================
245 * Color buffer
246 */
247
248/* 16 bit, RGB565 color spanline and pixel functions
249 */
250#define SPANTMP_PIXEL_FMT GL_RGB
251#define SPANTMP_PIXEL_TYPE GL_UNSIGNED_SHORT_5_6_5
252
253#define TAG(x)    radeon##x##_RGB565
254#define TAG2(x,y) radeon##x##_RGB565##y
255#define GET_PTR(X,Y) radeon_ptr16(rrb, (X) + x_off, (Y) + y_off)
256#include "spantmp2.h"
257
258/* 16 bit, ARGB1555 color spanline and pixel functions
259 */
260#define SPANTMP_PIXEL_FMT GL_BGRA
261#define SPANTMP_PIXEL_TYPE GL_UNSIGNED_SHORT_1_5_5_5_REV
262
263#define TAG(x)    radeon##x##_ARGB1555
264#define TAG2(x,y) radeon##x##_ARGB1555##y
265#define GET_PTR(X,Y) radeon_ptr16(rrb, (X) + x_off, (Y) + y_off)
266#include "spantmp2.h"
267
268/* 16 bit, RGBA4 color spanline and pixel functions
269 */
270#define SPANTMP_PIXEL_FMT GL_BGRA
271#define SPANTMP_PIXEL_TYPE GL_UNSIGNED_SHORT_4_4_4_4_REV
272
273#define TAG(x)    radeon##x##_ARGB4444
274#define TAG2(x,y) radeon##x##_ARGB4444##y
275#define GET_PTR(X,Y) radeon_ptr16(rrb, (X) + x_off, (Y) + y_off)
276#include "spantmp2.h"
277
278/* 32 bit, xRGB8888 color spanline and pixel functions
279 */
280#define SPANTMP_PIXEL_FMT GL_BGRA
281#define SPANTMP_PIXEL_TYPE GL_UNSIGNED_INT_8_8_8_8_REV
282
283#define TAG(x)    radeon##x##_xRGB8888
284#define TAG2(x,y) radeon##x##_xRGB8888##y
285#define GET_VALUE(_x, _y) ((*(GLuint*)(radeon_ptr32(rrb, _x + x_off, _y + y_off)) | 0xff000000))
286#define PUT_VALUE(_x, _y, d) { \
287   GLuint *_ptr = (GLuint*)radeon_ptr32( rrb, _x + x_off, _y + y_off );		\
288   *_ptr = d;								\
289} while (0)
290#include "spantmp2.h"
291
292/* 32 bit, ARGB8888 color spanline and pixel functions
293 */
294#define SPANTMP_PIXEL_FMT GL_BGRA
295#define SPANTMP_PIXEL_TYPE GL_UNSIGNED_INT_8_8_8_8_REV
296
297#define TAG(x)    radeon##x##_ARGB8888
298#define TAG2(x,y) radeon##x##_ARGB8888##y
299#define GET_VALUE(_x, _y) (*(GLuint*)(radeon_ptr32(rrb, _x + x_off, _y + y_off)))
300#define PUT_VALUE(_x, _y, d) { \
301   GLuint *_ptr = (GLuint*)radeon_ptr32( rrb, _x + x_off, _y + y_off );		\
302   *_ptr = d;								\
303} while (0)
304#include "spantmp2.h"
305
306/* ================================================================
307 * Depth buffer
308 */
309
310/* The Radeon family has depth tiling on all the time, so we have to convert
311 * the x,y coordinates into the memory bus address (mba) in the same
312 * manner as the engine.  In each case, the linear block address (ba)
313 * is calculated, and then wired with x and y to produce the final
314 * memory address.
315 * The chip will do address translation on its own if the surface registers
316 * are set up correctly. It is not quite enough to get it working with hyperz
317 * too...
318 */
319
320/* 16-bit depth buffer functions
321 */
322#define VALUE_TYPE GLushort
323
324#define WRITE_DEPTH( _x, _y, d )					\
325   *(GLushort *)radeon_ptr(rrb, _x + x_off, _y + y_off) = d
326
327#define READ_DEPTH( d, _x, _y )						\
328   d = *(GLushort *)radeon_ptr(rrb, _x + x_off, _y + y_off)
329
330#define TAG(x) radeon##x##_z16
331#include "depthtmp.h"
332
333/* 24 bit depth
334 *
335 * Careful: It looks like the R300 uses ZZZS byte order while the R200
336 * uses SZZZ for 24 bit depth, 8 bit stencil mode.
337 */
338#define VALUE_TYPE GLuint
339
340#ifdef COMPILE_R300
341#define WRITE_DEPTH( _x, _y, d )					\
342do {									\
343   GLuint *_ptr = (GLuint*)radeon_ptr32( rrb, _x + x_off, _y + y_off );		\
344   GLuint tmp = *_ptr;				\
345   tmp &= 0x000000ff;							\
346   tmp |= ((d << 8) & 0xffffff00);					\
347   *_ptr = tmp;					\
348} while (0)
349#else
350#define WRITE_DEPTH( _x, _y, d )					\
351do {									\
352   GLuint *_ptr = (GLuint*)radeon_ptr32( rrb, _x + x_off, _y + y_off );	\
353   GLuint tmp = *_ptr;							\
354   tmp &= 0xff000000;							\
355   tmp |= ((d) & 0x00ffffff);						\
356   *_ptr = tmp;					\
357} while (0)
358#endif
359
360#ifdef COMPILE_R300
361#define READ_DEPTH( d, _x, _y )						\
362  do {									\
363    d = (*(GLuint*)(radeon_ptr32(rrb, _x + x_off, _y + y_off)) & 0xffffff00) >> 8; \
364  }while(0)
365#else
366#define READ_DEPTH( d, _x, _y )	\
367  d = *(GLuint*)(radeon_ptr32(rrb, _x + x_off,	_y + y_off)) & 0x00ffffff;
368#endif
369/*
370    fprintf(stderr, "dval(%d, %d, %d, %d)=0x%08X\n", _x, xo, _y, yo, d);\
371   d = *(GLuint*)(radeon_ptr(rrb, _x,	_y )) & 0x00ffffff;
372*/
373#define TAG(x) radeon##x##_z24
374#include "depthtmp.h"
375
376/* 24 bit depth, 8 bit stencil depthbuffer functions
377 * EXT_depth_stencil
378 *
379 * Careful: It looks like the R300 uses ZZZS byte order while the R200
380 * uses SZZZ for 24 bit depth, 8 bit stencil mode.
381 */
382#define VALUE_TYPE GLuint
383
384#ifdef COMPILE_R300
385#define WRITE_DEPTH( _x, _y, d )					\
386do {									\
387   GLuint *_ptr = (GLuint*)radeon_ptr32( rrb, _x + x_off, _y + y_off );		\
388   *_ptr = d;								\
389} while (0)
390#else
391#define WRITE_DEPTH( _x, _y, d )					\
392do {									\
393   GLuint *_ptr = (GLuint*)radeon_ptr32( rrb, _x + x_off, _y + y_off );	\
394   GLuint tmp = z24s8_to_s8z24(d);					\
395   *_ptr = tmp;					\
396} while (0)
397#endif
398
399#ifdef COMPILE_R300
400#define READ_DEPTH( d, _x, _y )						\
401  do { \
402    d = (*(GLuint*)(radeon_ptr32(rrb, _x + x_off, _y + y_off)));	\
403  }while(0)
404#else
405#define READ_DEPTH( d, _x, _y )	do {					\
406    d = s8z24_to_z24s8(*(GLuint*)(radeon_ptr32(rrb, _x + x_off,	_y + y_off ))); \
407  } while (0)
408#endif
409/*
410    fprintf(stderr, "dval(%d, %d, %d, %d)=0x%08X\n", _x, xo, _y, yo, d);\
411   d = *(GLuint*)(radeon_ptr(rrb, _x,	_y )) & 0x00ffffff;
412*/
413#define TAG(x) radeon##x##_z24_s8
414#include "depthtmp.h"
415
416/* ================================================================
417 * Stencil buffer
418 */
419
420/* 24 bit depth, 8 bit stencil depthbuffer functions
421 */
422#ifdef COMPILE_R300
423#define WRITE_STENCIL( _x, _y, d )					\
424do {									\
425   GLuint *_ptr = (GLuint*)radeon_ptr32(rrb, _x + x_off, _y + y_off);		\
426   GLuint tmp = *_ptr;				\
427   tmp &= 0xffffff00;							\
428   tmp |= (d) & 0xff;							\
429   *_ptr = tmp;					\
430} while (0)
431#else
432#define WRITE_STENCIL( _x, _y, d )					\
433do {									\
434   GLuint *_ptr = (GLuint*)radeon_ptr32(rrb, _x + x_off, _y + y_off);		\
435   GLuint tmp = *_ptr;				\
436   tmp &= 0x00ffffff;							\
437   tmp |= (((d) & 0xff) << 24);						\
438   *_ptr = tmp;					\
439} while (0)
440#endif
441
442#ifdef COMPILE_R300
443#define READ_STENCIL( d, _x, _y )					\
444do {									\
445   GLuint *_ptr = (GLuint*)radeon_ptr32( rrb, _x + x_off, _y + y_off );		\
446   GLuint tmp = *_ptr;				\
447   d = tmp & 0x000000ff;						\
448} while (0)
449#else
450#define READ_STENCIL( d, _x, _y )					\
451do {									\
452   GLuint *_ptr = (GLuint*)radeon_ptr32( rrb, _x + x_off, _y + y_off );		\
453   GLuint tmp = *_ptr;				\
454   d = (tmp & 0xff000000) >> 24;					\
455} while (0)
456#endif
457
458#define TAG(x) radeon##x##_z24_s8
459#include "stenciltmp.h"
460
461
462static void map_unmap_rb(struct gl_renderbuffer *rb, int flag)
463{
464	struct radeon_renderbuffer *rrb = radeon_renderbuffer(rb);
465	int r;
466
467	if (rrb == NULL || !rrb->bo)
468		return;
469
470	if (flag) {
471		if (rrb->bo->bom->funcs->bo_wait)
472			radeon_bo_wait(rrb->bo);
473		r = radeon_bo_map(rrb->bo, 1);
474		if (r) {
475			fprintf(stderr, "(%s) error(%d) mapping buffer.\n",
476				__FUNCTION__, r);
477		}
478
479		radeonSetSpanFunctions(rrb);
480	} else {
481		radeon_bo_unmap(rrb->bo);
482		rb->GetRow = NULL;
483		rb->PutRow = NULL;
484	}
485}
486
487static void
488radeon_map_unmap_buffers(GLcontext *ctx, GLboolean map)
489{
490	GLuint i, j;
491
492	/* color draw buffers */
493	for (j = 0; j < ctx->DrawBuffer->_NumColorDrawBuffers; j++)
494		map_unmap_rb(ctx->DrawBuffer->_ColorDrawBuffers[j], map);
495
496	/* check for render to textures */
497	for (i = 0; i < BUFFER_COUNT; i++) {
498		struct gl_renderbuffer_attachment *att =
499			ctx->DrawBuffer->Attachment + i;
500		struct gl_texture_object *tex = att->Texture;
501		if (tex) {
502			/* Render to texture. Note that a mipmapped texture need not
503			 * be complete for render to texture, so we must restrict to
504			 * mapping only the attached image.
505			 */
506			radeon_texture_image *image = get_radeon_texture_image(tex->Image[att->CubeMapFace][att->TextureLevel]);
507			ASSERT(att->Renderbuffer);
508
509			if (map)
510				radeon_teximage_map(image, GL_TRUE);
511			else
512				radeon_teximage_unmap(image);
513		}
514	}
515
516	map_unmap_rb(ctx->ReadBuffer->_ColorReadBuffer, map);
517
518	/* depth buffer (Note wrapper!) */
519	if (ctx->DrawBuffer->_DepthBuffer)
520		map_unmap_rb(ctx->DrawBuffer->_DepthBuffer->Wrapped, map);
521
522	if (ctx->DrawBuffer->_StencilBuffer)
523		map_unmap_rb(ctx->DrawBuffer->_StencilBuffer->Wrapped, map);
524}
525
526static void radeonSpanRenderStart(GLcontext * ctx)
527{
528	radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
529	int i;
530
531	radeon_firevertices(rmesa);
532
533	/* The locking and wait for idle should really only be needed in classic mode.
534	 * In a future memory manager based implementation, this should become
535	 * unnecessary due to the fact that mapping our buffers, textures, etc.
536	 * should implicitly wait for any previous rendering commands that must
537	 * be waited on. */
538	if (!rmesa->radeonScreen->driScreen->dri2.enabled) {
539		LOCK_HARDWARE(rmesa);
540		radeonWaitForIdleLocked(rmesa);
541	}
542
543	for (i = 0; i < ctx->Const.MaxTextureImageUnits; i++) {
544		if (ctx->Texture.Unit[i]._ReallyEnabled)
545			ctx->Driver.MapTexture(ctx, ctx->Texture.Unit[i]._Current);
546	}
547
548	radeon_map_unmap_buffers(ctx, 1);
549}
550
551static void radeonSpanRenderFinish(GLcontext * ctx)
552{
553	radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
554	int i;
555	_swrast_flush(ctx);
556	if (!rmesa->radeonScreen->driScreen->dri2.enabled) {
557		UNLOCK_HARDWARE(rmesa);
558	}
559	for (i = 0; i < ctx->Const.MaxTextureImageUnits; i++) {
560		if (ctx->Texture.Unit[i]._ReallyEnabled)
561			ctx->Driver.UnmapTexture(ctx, ctx->Texture.Unit[i]._Current);
562	}
563
564	radeon_map_unmap_buffers(ctx, 0);
565}
566
567void radeonInitSpanFuncs(GLcontext * ctx)
568{
569	struct swrast_device_driver *swdd =
570	    _swrast_GetDeviceDriverReference(ctx);
571	swdd->SpanRenderStart = radeonSpanRenderStart;
572	swdd->SpanRenderFinish = radeonSpanRenderFinish;
573}
574
575/**
576 * Plug in the Get/Put routines for the given driRenderbuffer.
577 */
578static void radeonSetSpanFunctions(struct radeon_renderbuffer *rrb)
579{
580	if (rrb->base._ActualFormat == GL_RGB5) {
581		radeonInitPointers_RGB565(&rrb->base);
582	} else if (rrb->base._ActualFormat == GL_RGB8) {
583		radeonInitPointers_xRGB8888(&rrb->base);
584	} else if (rrb->base._ActualFormat == GL_RGBA8) {
585		radeonInitPointers_ARGB8888(&rrb->base);
586	} else if (rrb->base._ActualFormat == GL_RGBA4) {
587		radeonInitPointers_ARGB4444(&rrb->base);
588	} else if (rrb->base._ActualFormat == GL_RGB5_A1) {
589		radeonInitPointers_ARGB1555(&rrb->base);
590	} else if (rrb->base._ActualFormat == GL_DEPTH_COMPONENT16) {
591		radeonInitDepthPointers_z16(&rrb->base);
592	} else if (rrb->base._ActualFormat == GL_DEPTH_COMPONENT24) {
593		radeonInitDepthPointers_z24(&rrb->base);
594	} else if (rrb->base._ActualFormat == GL_DEPTH24_STENCIL8_EXT) {
595		radeonInitDepthPointers_z24_s8(&rrb->base);
596	} else if (rrb->base._ActualFormat == GL_STENCIL_INDEX8_EXT) {
597		radeonInitStencilPointers_z24_s8(&rrb->base);
598	} else {
599		fprintf(stderr, "radeonSetSpanFunctions: bad actual format: 0x%04X\n", rrb->base._ActualFormat);
600	}
601}
602