radeon_texture.c revision 7628b06ba32e42f57a4fdb322bc32e3b411c1f18
1/* 2 * Copyright (C) 2008 Nicolai Haehnle. 3 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. 4 * 5 * The Weather Channel (TM) funded Tungsten Graphics to develop the 6 * initial release of the Radeon 8500 driver under the XFree86 license. 7 * This notice must be preserved. 8 * 9 * Permission is hereby granted, free of charge, to any person obtaining 10 * a copy of this software and associated documentation files (the 11 * "Software"), to deal in the Software without restriction, including 12 * without limitation the rights to use, copy, modify, merge, publish, 13 * distribute, sublicense, and/or sell copies of the Software, and to 14 * permit persons to whom the Software is furnished to do so, subject to 15 * the following conditions: 16 * 17 * The above copyright notice and this permission notice (including the 18 * next paragraph) shall be included in all copies or substantial 19 * portions of the Software. 20 * 21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 23 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 24 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 25 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 26 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 27 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 28 * 29 */ 30 31#include "main/glheader.h" 32#include "main/imports.h" 33#include "main/context.h" 34#include "main/convolve.h" 35#include "main/mipmap.h" 36#include "main/texcompress.h" 37#include "main/texstore.h" 38#include "main/teximage.h" 39#include "main/texobj.h" 40#include "main/texgetimage.h" 41 42#include "xmlpool.h" /* for symbolic values of enum-type options */ 43 44#include "radeon_common.h" 45 46#include "radeon_mipmap_tree.h" 47 48 49static void copy_rows(void* dst, GLuint dststride, const void* src, GLuint srcstride, 50 GLuint numrows, GLuint rowsize) 51{ 52 assert(rowsize <= dststride); 53 assert(rowsize <= srcstride); 54 55 if (rowsize == srcstride && rowsize == dststride) { 56 memcpy(dst, src, numrows*rowsize); 57 } else { 58 GLuint i; 59 for(i = 0; i < numrows; ++i) { 60 memcpy(dst, src, rowsize); 61 dst += dststride; 62 src += srcstride; 63 } 64 } 65} 66 67/* textures */ 68/** 69 * Allocate an empty texture image object. 70 */ 71struct gl_texture_image *radeonNewTextureImage(GLcontext *ctx) 72{ 73 return CALLOC(sizeof(radeon_texture_image)); 74} 75 76/** 77 * Free memory associated with this texture image. 78 */ 79void radeonFreeTexImageData(GLcontext *ctx, struct gl_texture_image *timage) 80{ 81 radeon_texture_image* image = get_radeon_texture_image(timage); 82 83 if (image->mt) { 84 radeon_miptree_unreference(&image->mt); 85 assert(!image->base.Data); 86 } else { 87 _mesa_free_texture_image_data(ctx, timage); 88 } 89 if (image->bo) { 90 radeon_bo_unref(image->bo); 91 image->bo = NULL; 92 } 93 if (timage->Data) { 94 _mesa_free_texmemory(timage->Data); 95 timage->Data = NULL; 96 } 97} 98 99/* Set Data pointer and additional data for mapped texture image */ 100static void teximage_set_map_data(radeon_texture_image *image) 101{ 102 radeon_mipmap_level *lvl; 103 104 if (!image->mt) 105 return; 106 107 lvl = &image->mt->levels[image->mtlevel]; 108 109 image->base.Data = image->mt->bo->ptr + lvl->faces[image->mtface].offset; 110 image->base.RowStride = lvl->rowstride / image->mt->bpp; 111} 112 113 114/** 115 * Map a single texture image for glTexImage and friends. 116 */ 117void radeon_teximage_map(radeon_texture_image *image, GLboolean write_enable) 118{ 119 if (image->mt) { 120 assert(!image->base.Data); 121 122 radeon_bo_map(image->mt->bo, write_enable); 123 teximage_set_map_data(image); 124 } 125} 126 127 128void radeon_teximage_unmap(radeon_texture_image *image) 129{ 130 if (image->mt) { 131 assert(image->base.Data); 132 133 image->base.Data = 0; 134 radeon_bo_unmap(image->mt->bo); 135 } 136} 137 138static void map_override(GLcontext *ctx, radeonTexObj *t) 139{ 140 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]); 141 142 radeon_bo_map(t->bo, GL_FALSE); 143 144 img->base.Data = t->bo->ptr; 145} 146 147static void unmap_override(GLcontext *ctx, radeonTexObj *t) 148{ 149 radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]); 150 151 radeon_bo_unmap(t->bo); 152 153 img->base.Data = NULL; 154} 155 156/** 157 * Map a validated texture for reading during software rendering. 158 */ 159void radeonMapTexture(GLcontext *ctx, struct gl_texture_object *texObj) 160{ 161 radeonTexObj* t = radeon_tex_obj(texObj); 162 int face, level; 163 164 if (!radeon_validate_texture_miptree(ctx, texObj)) 165 return; 166 167 /* for r100 3D sw fallbacks don't have mt */ 168 if (t->image_override && t->bo) 169 map_override(ctx, t); 170 171 if (!t->mt) 172 return; 173 174 radeon_bo_map(t->mt->bo, GL_FALSE); 175 for(face = 0; face < t->mt->faces; ++face) { 176 for(level = t->mt->firstLevel; level <= t->mt->lastLevel; ++level) 177 teximage_set_map_data(get_radeon_texture_image(texObj->Image[face][level])); 178 } 179} 180 181void radeonUnmapTexture(GLcontext *ctx, struct gl_texture_object *texObj) 182{ 183 radeonTexObj* t = radeon_tex_obj(texObj); 184 int face, level; 185 186 if (t->image_override && t->bo) 187 unmap_override(ctx, t); 188 /* for r100 3D sw fallbacks don't have mt */ 189 if (!t->mt) 190 return; 191 192 for(face = 0; face < t->mt->faces; ++face) { 193 for(level = t->mt->firstLevel; level <= t->mt->lastLevel; ++level) 194 texObj->Image[face][level]->Data = 0; 195 } 196 radeon_bo_unmap(t->mt->bo); 197} 198 199GLuint radeon_face_for_target(GLenum target) 200{ 201 switch (target) { 202 case GL_TEXTURE_CUBE_MAP_POSITIVE_X: 203 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X: 204 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y: 205 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y: 206 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z: 207 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z: 208 return (GLuint) target - (GLuint) GL_TEXTURE_CUBE_MAP_POSITIVE_X; 209 default: 210 return 0; 211 } 212} 213 214/** 215 * Wraps Mesa's implementation to ensure that the base level image is mapped. 216 * 217 * This relies on internal details of _mesa_generate_mipmap, in particular 218 * the fact that the memory for recreated texture images is always freed. 219 */ 220static void radeon_generate_mipmap(GLcontext *ctx, GLenum target, 221 struct gl_texture_object *texObj) 222{ 223 radeonTexObj* t = radeon_tex_obj(texObj); 224 GLuint nr_faces = (t->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1; 225 int i, face; 226 227 228 _mesa_generate_mipmap(ctx, target, texObj); 229 230 for (face = 0; face < nr_faces; face++) { 231 for (i = texObj->BaseLevel + 1; i < texObj->MaxLevel; i++) { 232 radeon_texture_image *image; 233 234 image = get_radeon_texture_image(texObj->Image[face][i]); 235 236 if (image == NULL) 237 break; 238 239 image->mtlevel = i; 240 image->mtface = face; 241 242 radeon_miptree_unreference(&image->mt); 243 } 244 } 245 246} 247 248void radeonGenerateMipmap(GLcontext* ctx, GLenum target, struct gl_texture_object *texObj) 249{ 250 GLuint face = radeon_face_for_target(target); 251 radeon_texture_image *baseimage = get_radeon_texture_image(texObj->Image[face][texObj->BaseLevel]); 252 253 radeon_teximage_map(baseimage, GL_FALSE); 254 radeon_generate_mipmap(ctx, target, texObj); 255 radeon_teximage_unmap(baseimage); 256} 257 258 259/* try to find a format which will only need a memcopy */ 260static gl_format radeonChoose8888TexFormat(radeonContextPtr rmesa, 261 GLenum srcFormat, 262 GLenum srcType, GLboolean fbo) 263{ 264 const GLuint ui = 1; 265 const GLubyte littleEndian = *((const GLubyte *)&ui); 266 267 /* r100 can only do this */ 268 if (IS_R100_CLASS(rmesa->radeonScreen) || fbo) 269 return _dri_texformat_argb8888; 270 271 if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8) || 272 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && !littleEndian) || 273 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) || 274 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && littleEndian)) { 275 return MESA_FORMAT_RGBA8888; 276 } else if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) || 277 (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && littleEndian) || 278 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8) || 279 (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && !littleEndian)) { 280 return MESA_FORMAT_RGBA8888_REV; 281 } else if (IS_R200_CLASS(rmesa->radeonScreen)) { 282 return _dri_texformat_argb8888; 283 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && !littleEndian) || 284 srcType == GL_UNSIGNED_INT_8_8_8_8)) { 285 return MESA_FORMAT_ARGB8888_REV; 286 } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && littleEndian) || 287 srcType == GL_UNSIGNED_INT_8_8_8_8_REV)) { 288 return MESA_FORMAT_ARGB8888; 289 } else 290 return _dri_texformat_argb8888; 291} 292 293gl_format radeonChooseTextureFormat_mesa(GLcontext * ctx, 294 GLint internalFormat, 295 GLenum format, 296 GLenum type) 297{ 298 return radeonChooseTextureFormat(ctx, internalFormat, format, 299 type, 0); 300} 301 302gl_format radeonChooseTextureFormat(GLcontext * ctx, 303 GLint internalFormat, 304 GLenum format, 305 GLenum type, GLboolean fbo) 306{ 307 radeonContextPtr rmesa = RADEON_CONTEXT(ctx); 308 const GLboolean do32bpt = 309 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_32); 310 const GLboolean force16bpt = 311 (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_FORCE_16); 312 (void)format; 313 314#if 0 315 fprintf(stderr, "InternalFormat=%s(%d) type=%s format=%s\n", 316 _mesa_lookup_enum_by_nr(internalFormat), internalFormat, 317 _mesa_lookup_enum_by_nr(type), _mesa_lookup_enum_by_nr(format)); 318 fprintf(stderr, "do32bpt=%d force16bpt=%d\n", do32bpt, force16bpt); 319#endif 320 321 switch (internalFormat) { 322 case 4: 323 case GL_RGBA: 324 case GL_COMPRESSED_RGBA: 325 switch (type) { 326 case GL_UNSIGNED_INT_10_10_10_2: 327 case GL_UNSIGNED_INT_2_10_10_10_REV: 328 return do32bpt ? _dri_texformat_argb8888 : 329 _dri_texformat_argb1555; 330 case GL_UNSIGNED_SHORT_4_4_4_4: 331 case GL_UNSIGNED_SHORT_4_4_4_4_REV: 332 return _dri_texformat_argb4444; 333 case GL_UNSIGNED_SHORT_5_5_5_1: 334 case GL_UNSIGNED_SHORT_1_5_5_5_REV: 335 return _dri_texformat_argb1555; 336 default: 337 return do32bpt ? radeonChoose8888TexFormat(rmesa, format, type, fbo) : 338 _dri_texformat_argb4444; 339 } 340 341 case 3: 342 case GL_RGB: 343 case GL_COMPRESSED_RGB: 344 switch (type) { 345 case GL_UNSIGNED_SHORT_4_4_4_4: 346 case GL_UNSIGNED_SHORT_4_4_4_4_REV: 347 return _dri_texformat_argb4444; 348 case GL_UNSIGNED_SHORT_5_5_5_1: 349 case GL_UNSIGNED_SHORT_1_5_5_5_REV: 350 return _dri_texformat_argb1555; 351 case GL_UNSIGNED_SHORT_5_6_5: 352 case GL_UNSIGNED_SHORT_5_6_5_REV: 353 return _dri_texformat_rgb565; 354 default: 355 return do32bpt ? _dri_texformat_argb8888 : 356 _dri_texformat_rgb565; 357 } 358 359 case GL_RGBA8: 360 case GL_RGB10_A2: 361 case GL_RGBA12: 362 case GL_RGBA16: 363 return !force16bpt ? 364 radeonChoose8888TexFormat(rmesa, format, type, fbo) : 365 _dri_texformat_argb4444; 366 367 case GL_RGBA4: 368 case GL_RGBA2: 369 return _dri_texformat_argb4444; 370 371 case GL_RGB5_A1: 372 return _dri_texformat_argb1555; 373 374 case GL_RGB8: 375 case GL_RGB10: 376 case GL_RGB12: 377 case GL_RGB16: 378 return !force16bpt ? _dri_texformat_argb8888 : 379 _dri_texformat_rgb565; 380 381 case GL_RGB5: 382 case GL_RGB4: 383 case GL_R3_G3_B2: 384 return _dri_texformat_rgb565; 385 386 case GL_ALPHA: 387 case GL_ALPHA4: 388 case GL_ALPHA8: 389 case GL_ALPHA12: 390 case GL_ALPHA16: 391 case GL_COMPRESSED_ALPHA: 392 /* r200: can't use a8 format since interpreting hw I8 as a8 would result 393 in wrong rgb values (same as alpha value instead of 0). */ 394 if (IS_R200_CLASS(rmesa->radeonScreen)) 395 return _dri_texformat_al88; 396 else 397 return _dri_texformat_a8; 398 case 1: 399 case GL_LUMINANCE: 400 case GL_LUMINANCE4: 401 case GL_LUMINANCE8: 402 case GL_LUMINANCE12: 403 case GL_LUMINANCE16: 404 case GL_COMPRESSED_LUMINANCE: 405 return _dri_texformat_l8; 406 407 case 2: 408 case GL_LUMINANCE_ALPHA: 409 case GL_LUMINANCE4_ALPHA4: 410 case GL_LUMINANCE6_ALPHA2: 411 case GL_LUMINANCE8_ALPHA8: 412 case GL_LUMINANCE12_ALPHA4: 413 case GL_LUMINANCE12_ALPHA12: 414 case GL_LUMINANCE16_ALPHA16: 415 case GL_COMPRESSED_LUMINANCE_ALPHA: 416 return _dri_texformat_al88; 417 418 case GL_INTENSITY: 419 case GL_INTENSITY4: 420 case GL_INTENSITY8: 421 case GL_INTENSITY12: 422 case GL_INTENSITY16: 423 case GL_COMPRESSED_INTENSITY: 424 return _dri_texformat_i8; 425 426 case GL_YCBCR_MESA: 427 if (type == GL_UNSIGNED_SHORT_8_8_APPLE || 428 type == GL_UNSIGNED_BYTE) 429 return MESA_FORMAT_YCBCR; 430 else 431 return MESA_FORMAT_YCBCR_REV; 432 433 case GL_RGB_S3TC: 434 case GL_RGB4_S3TC: 435 case GL_COMPRESSED_RGB_S3TC_DXT1_EXT: 436 return MESA_FORMAT_RGB_DXT1; 437 438 case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT: 439 return MESA_FORMAT_RGBA_DXT1; 440 441 case GL_RGBA_S3TC: 442 case GL_RGBA4_S3TC: 443 case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT: 444 return MESA_FORMAT_RGBA_DXT3; 445 446 case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT: 447 return MESA_FORMAT_RGBA_DXT5; 448 449 case GL_ALPHA16F_ARB: 450 return MESA_FORMAT_ALPHA_FLOAT16; 451 case GL_ALPHA32F_ARB: 452 return MESA_FORMAT_ALPHA_FLOAT32; 453 case GL_LUMINANCE16F_ARB: 454 return MESA_FORMAT_LUMINANCE_FLOAT16; 455 case GL_LUMINANCE32F_ARB: 456 return MESA_FORMAT_LUMINANCE_FLOAT32; 457 case GL_LUMINANCE_ALPHA16F_ARB: 458 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT16; 459 case GL_LUMINANCE_ALPHA32F_ARB: 460 return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT32; 461 case GL_INTENSITY16F_ARB: 462 return MESA_FORMAT_INTENSITY_FLOAT16; 463 case GL_INTENSITY32F_ARB: 464 return MESA_FORMAT_INTENSITY_FLOAT32; 465 case GL_RGB16F_ARB: 466 return MESA_FORMAT_RGBA_FLOAT16; 467 case GL_RGB32F_ARB: 468 return MESA_FORMAT_RGBA_FLOAT32; 469 case GL_RGBA16F_ARB: 470 return MESA_FORMAT_RGBA_FLOAT16; 471 case GL_RGBA32F_ARB: 472 return MESA_FORMAT_RGBA_FLOAT32; 473 474 case GL_DEPTH_COMPONENT: 475 case GL_DEPTH_COMPONENT16: 476 case GL_DEPTH_COMPONENT24: 477 case GL_DEPTH_COMPONENT32: 478 case GL_DEPTH_STENCIL_EXT: 479 case GL_DEPTH24_STENCIL8_EXT: 480 return MESA_FORMAT_S8_Z24; 481 482 /* EXT_texture_sRGB */ 483 case GL_SRGB: 484 case GL_SRGB8: 485 case GL_SRGB_ALPHA: 486 case GL_SRGB8_ALPHA8: 487 case GL_COMPRESSED_SRGB: 488 case GL_COMPRESSED_SRGB_ALPHA: 489 return MESA_FORMAT_SRGBA8; 490 491 case GL_SLUMINANCE: 492 case GL_SLUMINANCE8: 493 case GL_COMPRESSED_SLUMINANCE: 494 return MESA_FORMAT_SL8; 495 496 case GL_SLUMINANCE_ALPHA: 497 case GL_SLUMINANCE8_ALPHA8: 498 case GL_COMPRESSED_SLUMINANCE_ALPHA: 499 return MESA_FORMAT_SLA8; 500 501 default: 502 _mesa_problem(ctx, 503 "unexpected internalFormat 0x%x in %s", 504 (int)internalFormat, __func__); 505 return MESA_FORMAT_NONE; 506 } 507 508 return MESA_FORMAT_NONE; /* never get here */ 509} 510 511/** 512 * All glTexImage calls go through this function. 513 */ 514static void radeon_teximage( 515 GLcontext *ctx, int dims, 516 GLenum target, GLint level, 517 GLint internalFormat, 518 GLint width, GLint height, GLint depth, 519 GLsizei imageSize, 520 GLenum format, GLenum type, const GLvoid * pixels, 521 const struct gl_pixelstore_attrib *packing, 522 struct gl_texture_object *texObj, 523 struct gl_texture_image *texImage, 524 int compressed) 525{ 526 radeonContextPtr rmesa = RADEON_CONTEXT(ctx); 527 radeonTexObj* t = radeon_tex_obj(texObj); 528 radeon_texture_image* image = get_radeon_texture_image(texImage); 529 GLuint dstRowStride; 530 GLint postConvWidth = width; 531 GLint postConvHeight = height; 532 GLuint texelBytes; 533 GLuint face = radeon_face_for_target(target); 534 535 { 536 struct radeon_bo *bo; 537 bo = !image->mt ? image->bo : image->mt->bo; 538 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) { 539 radeon_firevertices(rmesa); 540 } 541 } 542 543 t->validated = GL_FALSE; 544 545 if (ctx->_ImageTransferState & IMAGE_CONVOLUTION_BIT) { 546 _mesa_adjust_image_for_convolution(ctx, dims, &postConvWidth, 547 &postConvHeight); 548 } 549 550 if (_mesa_is_format_compressed(texImage->TexFormat)) { 551 texelBytes = 0; 552 } else { 553 texelBytes = _mesa_get_format_bytes(texImage->TexFormat); 554 /* Minimum pitch of 32 bytes */ 555 if (postConvWidth * texelBytes < 32) { 556 postConvWidth = 32 / texelBytes; 557 texImage->RowStride = postConvWidth; 558 } 559 if (!image->mt) { 560 assert(texImage->RowStride == postConvWidth); 561 } 562 } 563 564 /* Allocate memory for image */ 565 radeonFreeTexImageData(ctx, texImage); /* Mesa core only clears texImage->Data but not image->mt */ 566 567 if (t->mt && 568 t->mt->firstLevel == level && 569 t->mt->lastLevel == level && 570 t->mt->target != GL_TEXTURE_CUBE_MAP_ARB && 571 !radeon_miptree_matches_image(t->mt, texImage, face, level)) { 572 radeon_miptree_unreference(&t->mt); 573 } 574 575 if (!t->mt) 576 radeon_try_alloc_miptree(rmesa, t, image, face, level); 577 if (t->mt && radeon_miptree_matches_image(t->mt, texImage, face, level)) { 578 radeon_mipmap_level *lvl; 579 image->mtlevel = level - t->mt->firstLevel; 580 image->mtface = face; 581 radeon_miptree_reference(t->mt, &image->mt); 582 lvl = &image->mt->levels[image->mtlevel]; 583 dstRowStride = lvl->rowstride; 584 } else { 585 int size; 586 if (_mesa_is_format_compressed(texImage->TexFormat)) { 587 size = _mesa_format_image_size(texImage->TexFormat, 588 texImage->Width, 589 texImage->Height, 590 texImage->Depth); 591 } else { 592 size = texImage->Width * texImage->Height * texImage->Depth * _mesa_get_format_bytes(texImage->TexFormat); 593 } 594 texImage->Data = _mesa_alloc_texmemory(size); 595 } 596 597 /* Upload texture image; note that the spec allows pixels to be NULL */ 598 if (compressed) { 599 pixels = _mesa_validate_pbo_compressed_teximage( 600 ctx, imageSize, pixels, packing, "glCompressedTexImage"); 601 } else { 602 pixels = _mesa_validate_pbo_teximage( 603 ctx, dims, width, height, depth, 604 format, type, pixels, packing, "glTexImage"); 605 } 606 607 if (pixels) { 608 radeon_teximage_map(image, GL_TRUE); 609 if (compressed) { 610 if (image->mt) { 611 uint32_t srcRowStride, bytesPerRow, rows; 612 srcRowStride = _mesa_format_row_stride(texImage->TexFormat, width); 613 bytesPerRow = srcRowStride; 614 rows = (height + 3) / 4; 615 copy_rows(texImage->Data, image->mt->levels[level].rowstride, 616 pixels, srcRowStride, rows, bytesPerRow); 617 } else { 618 memcpy(texImage->Data, pixels, imageSize); 619 } 620 } else { 621 GLuint dstRowStride; 622 GLuint *dstImageOffsets; 623 624 if (image->mt) { 625 radeon_mipmap_level *lvl = &image->mt->levels[image->mtlevel]; 626 dstRowStride = lvl->rowstride; 627 } else { 628 dstRowStride = texImage->Width * _mesa_get_format_bytes(texImage->TexFormat); 629 } 630 631 if (dims == 3) { 632 int i; 633 634 dstImageOffsets = _mesa_malloc(depth * sizeof(GLuint)) ; 635 if (!dstImageOffsets) 636 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexImage"); 637 638 for (i = 0; i < depth; ++i) { 639 dstImageOffsets[i] = dstRowStride/_mesa_get_format_bytes(texImage->TexFormat) * height * i; 640 } 641 } else { 642 dstImageOffsets = texImage->ImageOffsets; 643 } 644 645 if (!_mesa_texstore(ctx, dims, 646 texImage->_BaseFormat, 647 texImage->TexFormat, 648 texImage->Data, 0, 0, 0, /* dstX/Y/Zoffset */ 649 dstRowStride, 650 dstImageOffsets, 651 width, height, depth, 652 format, type, pixels, packing)) { 653 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexImage"); 654 } 655 656 if (dims == 3) 657 _mesa_free(dstImageOffsets); 658 } 659 } 660 661 _mesa_unmap_teximage_pbo(ctx, packing); 662 663 if (pixels) 664 radeon_teximage_unmap(image); 665 666 667} 668 669void radeonTexImage1D(GLcontext * ctx, GLenum target, GLint level, 670 GLint internalFormat, 671 GLint width, GLint border, 672 GLenum format, GLenum type, const GLvoid * pixels, 673 const struct gl_pixelstore_attrib *packing, 674 struct gl_texture_object *texObj, 675 struct gl_texture_image *texImage) 676{ 677 radeon_teximage(ctx, 1, target, level, internalFormat, width, 1, 1, 678 0, format, type, pixels, packing, texObj, texImage, 0); 679} 680 681void radeonTexImage2D(GLcontext * ctx, GLenum target, GLint level, 682 GLint internalFormat, 683 GLint width, GLint height, GLint border, 684 GLenum format, GLenum type, const GLvoid * pixels, 685 const struct gl_pixelstore_attrib *packing, 686 struct gl_texture_object *texObj, 687 struct gl_texture_image *texImage) 688 689{ 690 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1, 691 0, format, type, pixels, packing, texObj, texImage, 0); 692} 693 694void radeonCompressedTexImage2D(GLcontext * ctx, GLenum target, 695 GLint level, GLint internalFormat, 696 GLint width, GLint height, GLint border, 697 GLsizei imageSize, const GLvoid * data, 698 struct gl_texture_object *texObj, 699 struct gl_texture_image *texImage) 700{ 701 radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1, 702 imageSize, 0, 0, data, &ctx->Unpack, texObj, texImage, 1); 703} 704 705void radeonTexImage3D(GLcontext * ctx, GLenum target, GLint level, 706 GLint internalFormat, 707 GLint width, GLint height, GLint depth, 708 GLint border, 709 GLenum format, GLenum type, const GLvoid * pixels, 710 const struct gl_pixelstore_attrib *packing, 711 struct gl_texture_object *texObj, 712 struct gl_texture_image *texImage) 713{ 714 radeon_teximage(ctx, 3, target, level, internalFormat, width, height, depth, 715 0, format, type, pixels, packing, texObj, texImage, 0); 716} 717 718/** 719 * Update a subregion of the given texture image. 720 */ 721static void radeon_texsubimage(GLcontext* ctx, int dims, GLenum target, int level, 722 GLint xoffset, GLint yoffset, GLint zoffset, 723 GLsizei width, GLsizei height, GLsizei depth, 724 GLsizei imageSize, 725 GLenum format, GLenum type, 726 const GLvoid * pixels, 727 const struct gl_pixelstore_attrib *packing, 728 struct gl_texture_object *texObj, 729 struct gl_texture_image *texImage, 730 int compressed) 731{ 732 radeonContextPtr rmesa = RADEON_CONTEXT(ctx); 733 radeonTexObj* t = radeon_tex_obj(texObj); 734 radeon_texture_image* image = get_radeon_texture_image(texImage); 735 736 { 737 struct radeon_bo *bo; 738 bo = !image->mt ? image->bo : image->mt->bo; 739 if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) { 740 radeon_firevertices(rmesa); 741 } 742 } 743 744 t->validated = GL_FALSE; 745 if (compressed) { 746 pixels = _mesa_validate_pbo_compressed_teximage( 747 ctx, imageSize, pixels, packing, "glCompressedTexImage"); 748 } else { 749 pixels = _mesa_validate_pbo_teximage(ctx, dims, 750 width, height, depth, format, type, pixels, packing, "glTexSubImage1D"); 751 } 752 753 if (pixels) { 754 GLint dstRowStride; 755 radeon_teximage_map(image, GL_TRUE); 756 757 if (image->mt) { 758 radeon_mipmap_level *lvl = &image->mt->levels[image->mtlevel]; 759 dstRowStride = lvl->rowstride; 760 } else { 761 dstRowStride = texImage->RowStride * _mesa_get_format_bytes(texImage->TexFormat); 762 } 763 764 if (compressed) { 765 uint32_t srcRowStride, bytesPerRow, rows; 766 GLubyte *img_start; 767 if (!image->mt) { 768 dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width); 769 img_start = _mesa_compressed_image_address(xoffset, yoffset, 0, 770 texImage->TexFormat, 771 texImage->Width, texImage->Data); 772 } 773 else { 774 uint32_t blocks_x = dstRowStride / (image->mt->bpp * 4); 775 img_start = texImage->Data + image->mt->bpp * 4 * (blocks_x * (yoffset / 4) + xoffset / 4); 776 } 777 srcRowStride = _mesa_format_row_stride(texImage->TexFormat, width); 778 bytesPerRow = srcRowStride; 779 rows = (height + 3) / 4; 780 781 copy_rows(img_start, dstRowStride, pixels, srcRowStride, rows, bytesPerRow); 782 783 } 784 else { 785 if (!_mesa_texstore(ctx, dims, texImage->_BaseFormat, 786 texImage->TexFormat, texImage->Data, 787 xoffset, yoffset, zoffset, 788 dstRowStride, 789 texImage->ImageOffsets, 790 width, height, depth, 791 format, type, pixels, packing)) { 792 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexSubImage"); 793 } 794 } 795 } 796 797 radeon_teximage_unmap(image); 798 799 _mesa_unmap_teximage_pbo(ctx, packing); 800 801 802} 803 804void radeonTexSubImage1D(GLcontext * ctx, GLenum target, GLint level, 805 GLint xoffset, 806 GLsizei width, 807 GLenum format, GLenum type, 808 const GLvoid * pixels, 809 const struct gl_pixelstore_attrib *packing, 810 struct gl_texture_object *texObj, 811 struct gl_texture_image *texImage) 812{ 813 radeon_texsubimage(ctx, 1, target, level, xoffset, 0, 0, width, 1, 1, 0, 814 format, type, pixels, packing, texObj, texImage, 0); 815} 816 817void radeonTexSubImage2D(GLcontext * ctx, GLenum target, GLint level, 818 GLint xoffset, GLint yoffset, 819 GLsizei width, GLsizei height, 820 GLenum format, GLenum type, 821 const GLvoid * pixels, 822 const struct gl_pixelstore_attrib *packing, 823 struct gl_texture_object *texObj, 824 struct gl_texture_image *texImage) 825{ 826 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1, 827 0, format, type, pixels, packing, texObj, texImage, 828 0); 829} 830 831void radeonCompressedTexSubImage2D(GLcontext * ctx, GLenum target, 832 GLint level, GLint xoffset, 833 GLint yoffset, GLsizei width, 834 GLsizei height, GLenum format, 835 GLsizei imageSize, const GLvoid * data, 836 struct gl_texture_object *texObj, 837 struct gl_texture_image *texImage) 838{ 839 radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1, 840 imageSize, format, 0, data, &ctx->Unpack, texObj, texImage, 1); 841} 842 843 844void radeonTexSubImage3D(GLcontext * ctx, GLenum target, GLint level, 845 GLint xoffset, GLint yoffset, GLint zoffset, 846 GLsizei width, GLsizei height, GLsizei depth, 847 GLenum format, GLenum type, 848 const GLvoid * pixels, 849 const struct gl_pixelstore_attrib *packing, 850 struct gl_texture_object *texObj, 851 struct gl_texture_image *texImage) 852{ 853 radeon_texsubimage(ctx, 3, target, level, xoffset, yoffset, zoffset, width, height, depth, 0, 854 format, type, pixels, packing, texObj, texImage, 0); 855} 856 857 858 859/** 860 * Ensure that the given image is stored in the given miptree from now on. 861 */ 862static void migrate_image_to_miptree(radeon_mipmap_tree *mt, radeon_texture_image *image, int face, int level) 863{ 864 radeon_mipmap_level *dstlvl = &mt->levels[level - mt->firstLevel]; 865 unsigned char *dest; 866 867 assert(image->mt != mt); 868 assert(dstlvl->width == image->base.Width); 869 assert(dstlvl->height == image->base.Height); 870 assert(dstlvl->depth == image->base.Depth); 871 872 873 radeon_bo_map(mt->bo, GL_TRUE); 874 dest = mt->bo->ptr + dstlvl->faces[face].offset; 875 876 if (image->mt) { 877 /* Format etc. should match, so we really just need a memcpy(). 878 * In fact, that memcpy() could be done by the hardware in many 879 * cases, provided that we have a proper memory manager. 880 */ 881 radeon_mipmap_level *srclvl = &image->mt->levels[image->mtlevel-image->mt->firstLevel]; 882 883 assert(srclvl->size == dstlvl->size); 884 assert(srclvl->rowstride == dstlvl->rowstride); 885 886 radeon_bo_map(image->mt->bo, GL_FALSE); 887 888 memcpy(dest, 889 image->mt->bo->ptr + srclvl->faces[face].offset, 890 dstlvl->size); 891 radeon_bo_unmap(image->mt->bo); 892 893 radeon_miptree_unreference(&image->mt); 894 } else { 895 uint32_t srcrowstride; 896 uint32_t height; 897 /* need to confirm this value is correct */ 898 if (mt->compressed) { 899 height = (image->base.Height + 3) / 4; 900 srcrowstride = _mesa_format_row_stride(image->base.TexFormat, image->base.Width); 901 } else { 902 height = image->base.Height * image->base.Depth; 903 srcrowstride = image->base.Width * _mesa_get_format_bytes(image->base.TexFormat); 904 } 905 906// if (mt->tilebits) 907// WARN_ONCE("%s: tiling not supported yet", __FUNCTION__); 908 909 copy_rows(dest, dstlvl->rowstride, image->base.Data, srcrowstride, 910 height, srcrowstride); 911 912 _mesa_free_texmemory(image->base.Data); 913 image->base.Data = 0; 914 } 915 916 radeon_bo_unmap(mt->bo); 917 918 image->mtface = face; 919 image->mtlevel = level; 920 radeon_miptree_reference(mt, &image->mt); 921} 922 923int radeon_validate_texture_miptree(GLcontext * ctx, struct gl_texture_object *texObj) 924{ 925 radeonContextPtr rmesa = RADEON_CONTEXT(ctx); 926 radeonTexObj *t = radeon_tex_obj(texObj); 927 radeon_texture_image *baseimage = get_radeon_texture_image(texObj->Image[0][texObj->BaseLevel]); 928 int face, level; 929 930 if (t->validated || t->image_override) 931 return GL_TRUE; 932 933 if (RADEON_DEBUG & RADEON_TEXTURE) 934 fprintf(stderr, "%s: Validating texture %p now\n", __FUNCTION__, texObj); 935 936 if (baseimage->base.Border > 0) 937 return GL_FALSE; 938 939 /* Ensure a matching miptree exists. 940 * 941 * Differing mipmap trees can result when the app uses TexImage to 942 * change texture dimensions. 943 * 944 * Prefer to use base image's miptree if it 945 * exists, since that most likely contains more valid data (remember 946 * that the base level is usually significantly larger than the rest 947 * of the miptree, so cubemaps are the only possible exception). 948 */ 949 if (baseimage->mt && 950 baseimage->mt != t->mt && 951 radeon_miptree_matches_texture(baseimage->mt, &t->base)) { 952 radeon_miptree_unreference(&t->mt); 953 radeon_miptree_reference(baseimage->mt, &t->mt); 954 } else if (t->mt && !radeon_miptree_matches_texture(t->mt, &t->base)) { 955 radeon_miptree_unreference(&t->mt); 956 } 957 958 if (!t->mt) { 959 if (RADEON_DEBUG & RADEON_TEXTURE) 960 fprintf(stderr, " Allocate new miptree\n"); 961 radeon_try_alloc_miptree(rmesa, t, baseimage, 0, texObj->BaseLevel); 962 if (!t->mt) { 963 _mesa_problem(ctx, "radeon_validate_texture failed to alloc miptree"); 964 return GL_FALSE; 965 } 966 } 967 968 /* Ensure all images are stored in the single main miptree */ 969 for(face = 0; face < t->mt->faces; ++face) { 970 for(level = t->mt->firstLevel; level <= t->mt->lastLevel; ++level) { 971 radeon_texture_image *image = get_radeon_texture_image(texObj->Image[face][level]); 972 if (RADEON_DEBUG & RADEON_TEXTURE) 973 fprintf(stderr, " face %i, level %i... %p vs %p ", face, level, t->mt, image->mt); 974 if (t->mt == image->mt || (!image->mt && !image->base.Data)) { 975 if (RADEON_DEBUG & RADEON_TEXTURE) 976 fprintf(stderr, "OK\n"); 977 978 continue; 979 } 980 981 if (RADEON_DEBUG & RADEON_TEXTURE) 982 fprintf(stderr, "migrating\n"); 983 migrate_image_to_miptree(t->mt, image, face, level); 984 } 985 } 986 987 return GL_TRUE; 988} 989 990 991/** 992 * Need to map texture image into memory before copying image data, 993 * then unmap it. 994 */ 995static void 996radeon_get_tex_image(GLcontext * ctx, GLenum target, GLint level, 997 GLenum format, GLenum type, GLvoid * pixels, 998 struct gl_texture_object *texObj, 999 struct gl_texture_image *texImage, int compressed) 1000{ 1001 radeon_texture_image *image = get_radeon_texture_image(texImage); 1002 1003 if (image->mt) { 1004 /* Map the texture image read-only */ 1005 radeon_teximage_map(image, GL_FALSE); 1006 } else { 1007 /* Image hasn't been uploaded to a miptree yet */ 1008 assert(image->base.Data); 1009 } 1010 1011 if (compressed) { 1012 /* FIXME: this can't work for small textures (mips) which 1013 use different hw stride */ 1014 _mesa_get_compressed_teximage(ctx, target, level, pixels, 1015 texObj, texImage); 1016 } else { 1017 _mesa_get_teximage(ctx, target, level, format, type, pixels, 1018 texObj, texImage); 1019 } 1020 1021 if (image->mt) { 1022 radeon_teximage_unmap(image); 1023 } 1024} 1025 1026void 1027radeonGetTexImage(GLcontext * ctx, GLenum target, GLint level, 1028 GLenum format, GLenum type, GLvoid * pixels, 1029 struct gl_texture_object *texObj, 1030 struct gl_texture_image *texImage) 1031{ 1032 radeon_get_tex_image(ctx, target, level, format, type, pixels, 1033 texObj, texImage, 0); 1034} 1035 1036void 1037radeonGetCompressedTexImage(GLcontext *ctx, GLenum target, GLint level, 1038 GLvoid *pixels, 1039 struct gl_texture_object *texObj, 1040 struct gl_texture_image *texImage) 1041{ 1042 radeon_get_tex_image(ctx, target, level, 0, 0, pixels, 1043 texObj, texImage, 1); 1044} 1045