radeon_span.c revision b484c71036e0d0b30ac7685ba50a9008d09f5047
1/************************************************************************** 2 3Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. 4Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and 5 VA Linux Systems Inc., Fremont, California. 6 7The Weather Channel (TM) funded Tungsten Graphics to develop the 8initial release of the Radeon 8500 driver under the XFree86 license. 9This notice must be preserved. 10 11All Rights Reserved. 12 13Permission is hereby granted, free of charge, to any person obtaining 14a copy of this software and associated documentation files (the 15"Software"), to deal in the Software without restriction, including 16without limitation the rights to use, copy, modify, merge, publish, 17distribute, sublicense, and/or sell copies of the Software, and to 18permit persons to whom the Software is furnished to do so, subject to 19the following conditions: 20 21The above copyright notice and this permission notice (including the 22next paragraph) shall be included in all copies or substantial 23portions of the Software. 24 25THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 28IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 29LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 30OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 31WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 32 33**************************************************************************/ 34 35/* 36 * Authors: 37 * Kevin E. Martin <martin@valinux.com> 38 * Gareth Hughes <gareth@valinux.com> 39 * Keith Whitwell <keith@tungstengraphics.com> 40 * 41 */ 42 43#include "main/glheader.h" 44#include "swrast/swrast.h" 45 46#include "radeon_common.h" 47#include "radeon_lock.h" 48#include "radeon_span.h" 49 50#define DBG 0 51 52static void radeonSetSpanFunctions(struct radeon_renderbuffer *rrb); 53 54static GLubyte *radeon_ptr32(const struct radeon_renderbuffer * rrb, 55 GLint x, GLint y) 56{ 57 GLubyte *ptr = rrb->bo->ptr; 58 uint32_t mask = RADEON_BO_FLAGS_MACRO_TILE | RADEON_BO_FLAGS_MICRO_TILE; 59 GLint offset; 60 GLint nmacroblkpl; 61 GLint nmicroblkpl; 62 63 if (rrb->has_surface || !(rrb->bo->flags & mask)) { 64 offset = x * rrb->cpp + y * rrb->pitch; 65 } else { 66 offset = 0; 67 if (rrb->bo->flags & RADEON_BO_FLAGS_MACRO_TILE) { 68 if (rrb->bo->flags & RADEON_BO_FLAGS_MICRO_TILE) { 69 nmacroblkpl = rrb->pitch >> 5; 70 offset += ((y >> 4) * nmacroblkpl) << 11; 71 offset += ((y & 15) >> 1) << 8; 72 offset += (y & 1) << 4; 73 offset += (x >> 5) << 11; 74 offset += ((x & 31) >> 2) << 5; 75 offset += (x & 3) << 2; 76 } else { 77 nmacroblkpl = rrb->pitch >> 6; 78 offset += ((y >> 3) * nmacroblkpl) << 11; 79 offset += (y & 7) << 8; 80 offset += (x >> 6) << 11; 81 offset += ((x & 63) >> 3) << 5; 82 offset += (x & 7) << 2; 83 } 84 } else { 85 nmicroblkpl = ((rrb->pitch + 31) & ~31) >> 5; 86 offset += (y * nmicroblkpl) << 5; 87 offset += (x >> 3) << 5; 88 offset += (x & 7) << 2; 89 } 90 } 91 return &ptr[offset]; 92} 93 94static GLubyte *radeon_ptr16(const struct radeon_renderbuffer * rrb, 95 GLint x, GLint y) 96{ 97 GLubyte *ptr = rrb->bo->ptr; 98 uint32_t mask = RADEON_BO_FLAGS_MACRO_TILE | RADEON_BO_FLAGS_MICRO_TILE; 99 GLint offset; 100 GLint nmacroblkpl; 101 GLint nmicroblkpl; 102 103 if (rrb->has_surface || !(rrb->bo->flags & mask)) { 104 offset = x * rrb->cpp + y * rrb->pitch; 105 } else { 106 offset = 0; 107 if (rrb->bo->flags & RADEON_BO_FLAGS_MACRO_TILE) { 108 if (rrb->bo->flags & RADEON_BO_FLAGS_MICRO_TILE) { 109 nmacroblkpl = rrb->pitch >> 6; 110 offset += ((y >> 4) * nmacroblkpl) << 11; 111 offset += ((y & 15) >> 1) << 8; 112 offset += (y & 1) << 4; 113 offset += (x >> 6) << 11; 114 offset += ((x & 63) >> 3) << 5; 115 offset += (x & 7) << 1; 116 } else { 117 nmacroblkpl = rrb->pitch >> 7; 118 offset += ((y >> 3) * nmacroblkpl) << 11; 119 offset += (y & 7) << 8; 120 offset += (x >> 7) << 11; 121 offset += ((x & 127) >> 4) << 5; 122 offset += (x & 15) << 2; 123 } 124 } else { 125 nmicroblkpl = ((rrb->pitch + 31) & ~31) >> 5; 126 offset += (y * nmicroblkpl) << 5; 127 offset += (x >> 4) << 5; 128 offset += (x & 15) << 2; 129 } 130 } 131 return &ptr[offset]; 132} 133 134static GLubyte *radeon_ptr(const struct radeon_renderbuffer * rrb, 135 GLint x, GLint y) 136{ 137 GLubyte *ptr = rrb->bo->ptr; 138 uint32_t mask = RADEON_BO_FLAGS_MACRO_TILE | RADEON_BO_FLAGS_MICRO_TILE; 139 GLint offset; 140 GLint microblkxs; 141 GLint macroblkxs; 142 GLint nmacroblkpl; 143 GLint nmicroblkpl; 144 145 if (rrb->has_surface || !(rrb->bo->flags & mask)) { 146 offset = x * rrb->cpp + y * rrb->pitch; 147 } else { 148 offset = 0; 149 if (rrb->bo->flags & RADEON_BO_FLAGS_MACRO_TILE) { 150 if (rrb->bo->flags & RADEON_BO_FLAGS_MICRO_TILE) { 151 microblkxs = 16 / rrb->cpp; 152 macroblkxs = 128 / rrb->cpp; 153 nmacroblkpl = rrb->pitch / macroblkxs; 154 offset += ((y >> 4) * nmacroblkpl) << 11; 155 offset += ((y & 15) >> 1) << 8; 156 offset += (y & 1) << 4; 157 offset += (x / macroblkxs) << 11; 158 offset += ((x & (macroblkxs - 1)) / microblkxs) << 5; 159 offset += (x & (microblkxs - 1)) * rrb->cpp; 160 } else { 161 microblkxs = 32 / rrb->cpp; 162 macroblkxs = 256 / rrb->cpp; 163 nmacroblkpl = rrb->pitch / macroblkxs; 164 offset += ((y >> 3) * nmacroblkpl) << 11; 165 offset += (y & 7) << 8; 166 offset += (x / macroblkxs) << 11; 167 offset += ((x & (macroblkxs - 1)) / microblkxs) << 5; 168 offset += (x & (microblkxs - 1)) * rrb->cpp; 169 } 170 } else { 171 microblkxs = 32 / rrb->cpp; 172 nmicroblkpl = ((rrb->pitch + 31) & ~31) >> 5; 173 offset += (y * nmicroblkpl) << 5; 174 offset += (x / microblkxs) << 5; 175 offset += (x & (microblkxs - 1)) * rrb->cpp; 176 } 177 } 178 return &ptr[offset]; 179} 180 181#ifndef COMPILE_R300 182static uint32_t 183z24s8_to_s8z24(uint32_t val) 184{ 185 return (val << 24) | (val >> 8); 186} 187 188static uint32_t 189s8z24_to_z24s8(uint32_t val) 190{ 191 return (val >> 24) | (val << 8); 192} 193#endif 194 195/* 196 * Note that all information needed to access pixels in a renderbuffer 197 * should be obtained through the gl_renderbuffer parameter, not per-context 198 * information. 199 */ 200#define LOCAL_VARS \ 201 struct radeon_context *radeon = RADEON_CONTEXT(ctx); \ 202 struct radeon_renderbuffer *rrb = (void *) rb; \ 203 const GLint yScale = ctx->DrawBuffer->Name ? 1 : -1; \ 204 const GLint yBias = ctx->DrawBuffer->Name ? 0 : rrb->base.Height - 1;\ 205 unsigned int num_cliprects; \ 206 struct drm_clip_rect *cliprects; \ 207 int x_off, y_off; \ 208 GLuint p; \ 209 (void)p; \ 210 radeon_get_cliprects(radeon, &cliprects, &num_cliprects, &x_off, &y_off); 211 212#define LOCAL_DEPTH_VARS \ 213 struct radeon_context *radeon = RADEON_CONTEXT(ctx); \ 214 struct radeon_renderbuffer *rrb = (void *) rb; \ 215 const GLint yScale = ctx->DrawBuffer->Name ? 1 : -1; \ 216 const GLint yBias = ctx->DrawBuffer->Name ? 0 : rrb->base.Height - 1;\ 217 unsigned int num_cliprects; \ 218 struct drm_clip_rect *cliprects; \ 219 int x_off, y_off; \ 220 radeon_get_cliprects(radeon, &cliprects, &num_cliprects, &x_off, &y_off); 221 222#define LOCAL_STENCIL_VARS LOCAL_DEPTH_VARS 223 224#define Y_FLIP(_y) ((_y) * yScale + yBias) 225 226#define HW_LOCK() 227 228#define HW_UNLOCK() 229 230/* XXX FBO: this is identical to the macro in spantmp2.h except we get 231 * the cliprect info from the context, not the driDrawable. 232 * Move this into spantmp2.h someday. 233 */ 234#define HW_CLIPLOOP() \ 235 do { \ 236 int _nc = num_cliprects; \ 237 while ( _nc-- ) { \ 238 int minx = cliprects[_nc].x1 - x_off; \ 239 int miny = cliprects[_nc].y1 - y_off; \ 240 int maxx = cliprects[_nc].x2 - x_off; \ 241 int maxy = cliprects[_nc].y2 - y_off; 242 243/* ================================================================ 244 * Color buffer 245 */ 246 247/* 16 bit, RGB565 color spanline and pixel functions 248 */ 249#define SPANTMP_PIXEL_FMT GL_RGB 250#define SPANTMP_PIXEL_TYPE GL_UNSIGNED_SHORT_5_6_5 251 252#define TAG(x) radeon##x##_RGB565 253#define TAG2(x,y) radeon##x##_RGB565##y 254#define GET_PTR(X,Y) radeon_ptr16(rrb, (X) + x_off, (Y) + y_off) 255#include "spantmp2.h" 256 257/* 16 bit, ARGB1555 color spanline and pixel functions 258 */ 259#define SPANTMP_PIXEL_FMT GL_BGRA 260#define SPANTMP_PIXEL_TYPE GL_UNSIGNED_SHORT_1_5_5_5_REV 261 262#define TAG(x) radeon##x##_ARGB1555 263#define TAG2(x,y) radeon##x##_ARGB1555##y 264#define GET_PTR(X,Y) radeon_ptr16(rrb, (X) + x_off, (Y) + y_off) 265#include "spantmp2.h" 266 267/* 16 bit, RGBA4 color spanline and pixel functions 268 */ 269#define SPANTMP_PIXEL_FMT GL_BGRA 270#define SPANTMP_PIXEL_TYPE GL_UNSIGNED_SHORT_4_4_4_4_REV 271 272#define TAG(x) radeon##x##_ARGB4444 273#define TAG2(x,y) radeon##x##_ARGB4444##y 274#define GET_PTR(X,Y) radeon_ptr16(rrb, (X) + x_off, (Y) + y_off) 275#include "spantmp2.h" 276 277/* 32 bit, xRGB8888 color spanline and pixel functions 278 */ 279#define SPANTMP_PIXEL_FMT GL_BGRA 280#define SPANTMP_PIXEL_TYPE GL_UNSIGNED_INT_8_8_8_8_REV 281 282#define TAG(x) radeon##x##_xRGB8888 283#define TAG2(x,y) radeon##x##_xRGB8888##y 284#define GET_VALUE(_x, _y) ((*(GLuint*)(radeon_ptr32(rrb, _x + x_off, _y + y_off)) | 0xff000000)) 285#define PUT_VALUE(_x, _y, d) { \ 286 GLuint *_ptr = (GLuint*)radeon_ptr32( rrb, _x + x_off, _y + y_off ); \ 287 *_ptr = d; \ 288} while (0) 289#include "spantmp2.h" 290 291/* 32 bit, ARGB8888 color spanline and pixel functions 292 */ 293#define SPANTMP_PIXEL_FMT GL_BGRA 294#define SPANTMP_PIXEL_TYPE GL_UNSIGNED_INT_8_8_8_8_REV 295 296#define TAG(x) radeon##x##_ARGB8888 297#define TAG2(x,y) radeon##x##_ARGB8888##y 298#define GET_PTR(X,Y) radeon_ptr32(rrb, (X) + x_off, (Y) + y_off) 299#include "spantmp2.h" 300 301/* ================================================================ 302 * Depth buffer 303 */ 304 305/* The Radeon family has depth tiling on all the time, so we have to convert 306 * the x,y coordinates into the memory bus address (mba) in the same 307 * manner as the engine. In each case, the linear block address (ba) 308 * is calculated, and then wired with x and y to produce the final 309 * memory address. 310 * The chip will do address translation on its own if the surface registers 311 * are set up correctly. It is not quite enough to get it working with hyperz 312 * too... 313 */ 314 315/* 16-bit depth buffer functions 316 */ 317#define VALUE_TYPE GLushort 318 319#define WRITE_DEPTH( _x, _y, d ) \ 320 *(GLushort *)radeon_ptr(rrb, _x + x_off, _y + y_off) = d 321 322#define READ_DEPTH( d, _x, _y ) \ 323 d = *(GLushort *)radeon_ptr(rrb, _x + x_off, _y + y_off) 324 325#define TAG(x) radeon##x##_z16 326#include "depthtmp.h" 327 328/* 24 bit depth 329 * 330 * Careful: It looks like the R300 uses ZZZS byte order while the R200 331 * uses SZZZ for 24 bit depth, 8 bit stencil mode. 332 */ 333#define VALUE_TYPE GLuint 334 335#ifdef COMPILE_R300 336#define WRITE_DEPTH( _x, _y, d ) \ 337do { \ 338 GLuint *_ptr = (GLuint*)radeon_ptr32( rrb, _x + x_off, _y + y_off ); \ 339 GLuint tmp = *_ptr; \ 340 tmp &= 0x000000ff; \ 341 tmp |= ((d << 8) & 0xffffff00); \ 342 *_ptr = tmp; \ 343} while (0) 344#else 345#define WRITE_DEPTH( _x, _y, d ) \ 346do { \ 347 GLuint *_ptr = (GLuint*)radeon_ptr32( rrb, _x + x_off, _y + y_off ); \ 348 GLuint tmp = *_ptr; \ 349 tmp &= 0xff000000; \ 350 tmp |= ((d) & 0x00ffffff); \ 351 *_ptr = tmp; \ 352} while (0) 353#endif 354 355#ifdef COMPILE_R300 356#define READ_DEPTH( d, _x, _y ) \ 357 do { \ 358 d = (*(GLuint*)(radeon_ptr32(rrb, _x + x_off, _y + y_off)) & 0xffffff00) >> 8; \ 359 }while(0) 360#else 361#define READ_DEPTH( d, _x, _y ) \ 362 d = *(GLuint*)(radeon_ptr32(rrb, _x + x_off, _y + y_off)) & 0x00ffffff; 363#endif 364/* 365 fprintf(stderr, "dval(%d, %d, %d, %d)=0x%08X\n", _x, xo, _y, yo, d);\ 366 d = *(GLuint*)(radeon_ptr(rrb, _x, _y )) & 0x00ffffff; 367*/ 368#define TAG(x) radeon##x##_z24 369#include "depthtmp.h" 370 371/* 24 bit depth, 8 bit stencil depthbuffer functions 372 * EXT_depth_stencil 373 * 374 * Careful: It looks like the R300 uses ZZZS byte order while the R200 375 * uses SZZZ for 24 bit depth, 8 bit stencil mode. 376 */ 377#define VALUE_TYPE GLuint 378 379#ifdef COMPILE_R300 380#define WRITE_DEPTH( _x, _y, d ) \ 381do { \ 382 GLuint *_ptr = (GLuint*)radeon_ptr32( rrb, _x + x_off, _y + y_off ); \ 383 *_ptr = d; \ 384} while (0) 385#else 386#define WRITE_DEPTH( _x, _y, d ) \ 387do { \ 388 GLuint *_ptr = (GLuint*)radeon_ptr32( rrb, _x + x_off, _y + y_off ); \ 389 GLuint tmp = z24s8_to_s8z24(d); \ 390 *_ptr = tmp; \ 391} while (0) 392#endif 393 394#ifdef COMPILE_R300 395#define READ_DEPTH( d, _x, _y ) \ 396 do { \ 397 d = (*(GLuint*)(radeon_ptr32(rrb, _x + x_off, _y + y_off))); \ 398 }while(0) 399#else 400#define READ_DEPTH( d, _x, _y ) do { \ 401 d = s8z24_to_z24s8(*(GLuint*)(radeon_ptr32(rrb, _x + x_off, _y + y_off ))); \ 402 } while (0) 403#endif 404/* 405 fprintf(stderr, "dval(%d, %d, %d, %d)=0x%08X\n", _x, xo, _y, yo, d);\ 406 d = *(GLuint*)(radeon_ptr(rrb, _x, _y )) & 0x00ffffff; 407*/ 408#define TAG(x) radeon##x##_z24_s8 409#include "depthtmp.h" 410 411/* ================================================================ 412 * Stencil buffer 413 */ 414 415/* 24 bit depth, 8 bit stencil depthbuffer functions 416 */ 417#ifdef COMPILE_R300 418#define WRITE_STENCIL( _x, _y, d ) \ 419do { \ 420 GLuint *_ptr = (GLuint*)radeon_ptr32(rrb, _x + x_off, _y + y_off); \ 421 GLuint tmp = *_ptr; \ 422 tmp &= 0xffffff00; \ 423 tmp |= (d) & 0xff; \ 424 *_ptr = tmp; \ 425} while (0) 426#else 427#define WRITE_STENCIL( _x, _y, d ) \ 428do { \ 429 GLuint *_ptr = (GLuint*)radeon_ptr32(rrb, _x + x_off, _y + y_off); \ 430 GLuint tmp = *_ptr; \ 431 tmp &= 0x00ffffff; \ 432 tmp |= (((d) & 0xff) << 24); \ 433 *_ptr = tmp; \ 434} while (0) 435#endif 436 437#ifdef COMPILE_R300 438#define READ_STENCIL( d, _x, _y ) \ 439do { \ 440 GLuint *_ptr = (GLuint*)radeon_ptr32( rrb, _x + x_off, _y + y_off ); \ 441 GLuint tmp = *_ptr; \ 442 d = tmp & 0x000000ff; \ 443} while (0) 444#else 445#define READ_STENCIL( d, _x, _y ) \ 446do { \ 447 GLuint *_ptr = (GLuint*)radeon_ptr32( rrb, _x + x_off, _y + y_off ); \ 448 GLuint tmp = *_ptr; \ 449 d = (tmp & 0xff000000) >> 24; \ 450} while (0) 451#endif 452 453#define TAG(x) radeon##x##_z24_s8 454#include "stenciltmp.h" 455 456 457static void map_unmap_rb(struct gl_renderbuffer *rb, int flag) 458{ 459 struct radeon_renderbuffer *rrb = radeon_renderbuffer(rb); 460 int r; 461 462 if (rrb == NULL || !rrb->bo) 463 return; 464 465 if (flag) { 466 if (rrb->bo->bom->funcs->bo_wait) 467 radeon_bo_wait(rrb->bo); 468 r = radeon_bo_map(rrb->bo, 1); 469 if (r) { 470 fprintf(stderr, "(%s) error(%d) mapping buffer.\n", 471 __FUNCTION__, r); 472 } 473 474 radeonSetSpanFunctions(rrb); 475 } else { 476 radeon_bo_unmap(rrb->bo); 477 rb->GetRow = NULL; 478 rb->PutRow = NULL; 479 } 480} 481 482static void 483radeon_map_unmap_buffers(GLcontext *ctx, GLboolean map) 484{ 485 GLuint i, j; 486 487 /* color draw buffers */ 488 for (j = 0; j < ctx->DrawBuffer->_NumColorDrawBuffers; j++) 489 map_unmap_rb(ctx->DrawBuffer->_ColorDrawBuffers[j], map); 490 491 /* check for render to textures */ 492 for (i = 0; i < BUFFER_COUNT; i++) { 493 struct gl_renderbuffer_attachment *att = 494 ctx->DrawBuffer->Attachment + i; 495 struct gl_texture_object *tex = att->Texture; 496 if (tex) { 497 /* Render to texture. Note that a mipmapped texture need not 498 * be complete for render to texture, so we must restrict to 499 * mapping only the attached image. 500 */ 501 radeon_texture_image *image = get_radeon_texture_image(tex->Image[att->CubeMapFace][att->TextureLevel]); 502 ASSERT(att->Renderbuffer); 503 504 if (map) 505 radeon_teximage_map(image, GL_TRUE); 506 else 507 radeon_teximage_unmap(image); 508 } 509 } 510 511 map_unmap_rb(ctx->ReadBuffer->_ColorReadBuffer, map); 512 513 /* depth buffer (Note wrapper!) */ 514 if (ctx->DrawBuffer->_DepthBuffer) 515 map_unmap_rb(ctx->DrawBuffer->_DepthBuffer->Wrapped, map); 516 517 if (ctx->DrawBuffer->_StencilBuffer) 518 map_unmap_rb(ctx->DrawBuffer->_StencilBuffer->Wrapped, map); 519} 520 521static void radeonSpanRenderStart(GLcontext * ctx) 522{ 523 radeonContextPtr rmesa = RADEON_CONTEXT(ctx); 524 int i; 525 526 radeon_firevertices(rmesa); 527 528 /* The locking and wait for idle should really only be needed in classic mode. 529 * In a future memory manager based implementation, this should become 530 * unnecessary due to the fact that mapping our buffers, textures, etc. 531 * should implicitly wait for any previous rendering commands that must 532 * be waited on. */ 533 if (!rmesa->radeonScreen->driScreen->dri2.enabled) { 534 LOCK_HARDWARE(rmesa); 535 radeonWaitForIdleLocked(rmesa); 536 } 537 538 for (i = 0; i < ctx->Const.MaxTextureImageUnits; i++) { 539 if (ctx->Texture.Unit[i]._ReallyEnabled) 540 ctx->Driver.MapTexture(ctx, ctx->Texture.Unit[i]._Current); 541 } 542 543 radeon_map_unmap_buffers(ctx, 1); 544} 545 546static void radeonSpanRenderFinish(GLcontext * ctx) 547{ 548 radeonContextPtr rmesa = RADEON_CONTEXT(ctx); 549 int i; 550 _swrast_flush(ctx); 551 if (!rmesa->radeonScreen->driScreen->dri2.enabled) { 552 UNLOCK_HARDWARE(rmesa); 553 } 554 for (i = 0; i < ctx->Const.MaxTextureImageUnits; i++) { 555 if (ctx->Texture.Unit[i]._ReallyEnabled) 556 ctx->Driver.UnmapTexture(ctx, ctx->Texture.Unit[i]._Current); 557 } 558 559 radeon_map_unmap_buffers(ctx, 0); 560} 561 562void radeonInitSpanFuncs(GLcontext * ctx) 563{ 564 struct swrast_device_driver *swdd = 565 _swrast_GetDeviceDriverReference(ctx); 566 swdd->SpanRenderStart = radeonSpanRenderStart; 567 swdd->SpanRenderFinish = radeonSpanRenderFinish; 568} 569 570/** 571 * Plug in the Get/Put routines for the given driRenderbuffer. 572 */ 573static void radeonSetSpanFunctions(struct radeon_renderbuffer *rrb) 574{ 575 if (rrb->base._ActualFormat == GL_RGB5) { 576 radeonInitPointers_RGB565(&rrb->base); 577 } else if (rrb->base._ActualFormat == GL_RGB8) { 578 radeonInitPointers_xRGB8888(&rrb->base); 579 } else if (rrb->base._ActualFormat == GL_RGBA8) { 580 radeonInitPointers_ARGB8888(&rrb->base); 581 } else if (rrb->base._ActualFormat == GL_RGBA4) { 582 radeonInitPointers_ARGB4444(&rrb->base); 583 } else if (rrb->base._ActualFormat == GL_RGB5_A1) { 584 radeonInitPointers_ARGB1555(&rrb->base); 585 } else if (rrb->base._ActualFormat == GL_DEPTH_COMPONENT16) { 586 radeonInitDepthPointers_z16(&rrb->base); 587 } else if (rrb->base._ActualFormat == GL_DEPTH_COMPONENT24) { 588 radeonInitDepthPointers_z24(&rrb->base); 589 } else if (rrb->base._ActualFormat == GL_DEPTH24_STENCIL8_EXT) { 590 radeonInitDepthPointers_z24_s8(&rrb->base); 591 } else if (rrb->base._ActualFormat == GL_STENCIL_INDEX8_EXT) { 592 radeonInitStencilPointers_z24_s8(&rrb->base); 593 } else { 594 fprintf(stderr, "radeonSetSpanFunctions: bad actual format: 0x%04X\n", rrb->base._ActualFormat); 595 } 596} 597