radeon_mipmap_tree.c revision f4553d99c63e4bcb4d023c9e33b72fedd0dfbdc1
1/* 2 * Copyright (C) 2009 Maciej Cencora. 3 * Copyright (C) 2008 Nicolai Haehnle. 4 * 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining 8 * a copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sublicense, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial 17 * portions of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 20 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 22 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 23 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 24 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 25 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29#include "radeon_mipmap_tree.h" 30 31#include <errno.h> 32#include <unistd.h> 33 34#include "main/simple_list.h" 35#include "main/teximage.h" 36#include "main/texobj.h" 37#include "main/enums.h" 38#include "radeon_texture.h" 39 40static unsigned get_aligned_compressed_row_stride( 41 gl_format format, 42 unsigned width, 43 unsigned minStride) 44{ 45 const unsigned blockBytes = _mesa_get_format_bytes(format); 46 unsigned blockWidth, blockHeight; 47 unsigned stride; 48 49 _mesa_get_format_block_size(format, &blockWidth, &blockHeight); 50 51 /* Count number of blocks required to store the given width. 52 * And then multiple it with bytes required to store a block. 53 */ 54 stride = (width + blockWidth - 1) / blockWidth * blockBytes; 55 56 /* Round the given minimum stride to the next full blocksize. 57 * (minStride + blockBytes - 1) / blockBytes * blockBytes 58 */ 59 if ( stride < minStride ) 60 stride = (minStride + blockBytes - 1) / blockBytes * blockBytes; 61 62 radeon_print(RADEON_TEXTURE, RADEON_TRACE, 63 "%s width %u, minStride %u, block(bytes %u, width %u):" 64 "stride %u\n", 65 __func__, width, minStride, 66 blockBytes, blockWidth, 67 stride); 68 69 return stride; 70} 71 72static unsigned get_compressed_image_size( 73 gl_format format, 74 unsigned rowStride, 75 unsigned height) 76{ 77 unsigned blockWidth, blockHeight; 78 79 _mesa_get_format_block_size(format, &blockWidth, &blockHeight); 80 81 return rowStride * ((height + blockHeight - 1) / blockHeight); 82} 83 84/** 85 * Compute sizes and fill in offset and blit information for the given 86 * image (determined by \p face and \p level). 87 * 88 * \param curOffset points to the offset at which the image is to be stored 89 * and is updated by this function according to the size of the image. 90 */ 91static void compute_tex_image_offset(radeonContextPtr rmesa, radeon_mipmap_tree *mt, 92 GLuint face, GLuint level, GLuint* curOffset) 93{ 94 radeon_mipmap_level *lvl = &mt->levels[level]; 95 uint32_t row_align; 96 GLuint height; 97 98 height = _mesa_next_pow_two_32(lvl->height); 99 100 /* Find image size in bytes */ 101 if (_mesa_is_format_compressed(mt->mesaFormat)) { 102 lvl->rowstride = get_aligned_compressed_row_stride(mt->mesaFormat, lvl->width, rmesa->texture_compressed_row_align); 103 lvl->size = get_compressed_image_size(mt->mesaFormat, lvl->rowstride, height); 104 } else if (mt->target == GL_TEXTURE_RECTANGLE_NV) { 105 row_align = rmesa->texture_rect_row_align - 1; 106 lvl->rowstride = (_mesa_format_row_stride(mt->mesaFormat, lvl->width) + row_align) & ~row_align; 107 lvl->size = lvl->rowstride * height; 108 } else if (mt->tilebits & RADEON_TXO_MICRO_TILE) { 109 /* tile pattern is 16 bytes x2. mipmaps stay 32 byte aligned, 110 * though the actual offset may be different (if texture is less than 111 * 32 bytes width) to the untiled case */ 112 lvl->rowstride = (_mesa_format_row_stride(mt->mesaFormat, lvl->width) * 2 + 31) & ~31; 113 lvl->size = lvl->rowstride * ((height + 1) / 2) * lvl->depth; 114 } else { 115 row_align = rmesa->texture_row_align - 1; 116 lvl->rowstride = (_mesa_format_row_stride(mt->mesaFormat, lvl->width) + row_align) & ~row_align; 117 lvl->size = lvl->rowstride * height * lvl->depth; 118 } 119 assert(lvl->size > 0); 120 121 /* All images are aligned to a 32-byte offset */ 122 *curOffset = (*curOffset + 0x1f) & ~0x1f; 123 lvl->faces[face].offset = *curOffset; 124 *curOffset += lvl->size; 125 126 radeon_print(RADEON_TEXTURE, RADEON_TRACE, 127 "%s(%p) level %d, face %d: rs:%d %dx%d at %d\n", 128 __func__, rmesa, 129 level, face, 130 lvl->rowstride, lvl->width, height, lvl->faces[face].offset); 131} 132 133static GLuint minify(GLuint size, GLuint levels) 134{ 135 size = size >> levels; 136 if (size < 1) 137 size = 1; 138 return size; 139} 140 141 142static void calculate_miptree_layout_r100(radeonContextPtr rmesa, radeon_mipmap_tree *mt) 143{ 144 GLuint curOffset, i, face, level; 145 146 assert(mt->numLevels <= rmesa->glCtx->Const.MaxTextureLevels); 147 148 curOffset = 0; 149 for(face = 0; face < mt->faces; face++) { 150 151 for(i = 0, level = mt->baseLevel; i < mt->numLevels; i++, level++) { 152 mt->levels[level].valid = 1; 153 mt->levels[level].width = minify(mt->width0, i); 154 mt->levels[level].height = minify(mt->height0, i); 155 mt->levels[level].depth = minify(mt->depth0, i); 156 compute_tex_image_offset(rmesa, mt, face, level, &curOffset); 157 } 158 } 159 160 /* Note the required size in memory */ 161 mt->totalsize = (curOffset + RADEON_OFFSET_MASK) & ~RADEON_OFFSET_MASK; 162 163 radeon_print(RADEON_TEXTURE, RADEON_TRACE, 164 "%s(%p, %p) total size %d\n", 165 __func__, rmesa, mt, mt->totalsize); 166} 167 168static void calculate_miptree_layout_r300(radeonContextPtr rmesa, radeon_mipmap_tree *mt) 169{ 170 GLuint curOffset, i, level; 171 172 assert(mt->numLevels <= rmesa->glCtx->Const.MaxTextureLevels); 173 174 curOffset = 0; 175 for(i = 0, level = mt->baseLevel; i < mt->numLevels; i++, level++) { 176 GLuint face; 177 178 mt->levels[level].valid = 1; 179 mt->levels[level].width = minify(mt->width0, i); 180 mt->levels[level].height = minify(mt->height0, i); 181 mt->levels[level].depth = minify(mt->depth0, i); 182 183 for(face = 0; face < mt->faces; face++) 184 compute_tex_image_offset(rmesa, mt, face, level, &curOffset); 185 /* r600 cube levels seems to be aligned to 8 faces but 186 * we have separate register for 1'st level offset so add 187 * 2 image alignment after 1'st mip level */ 188 if(rmesa->radeonScreen->chip_family >= CHIP_FAMILY_R600 && 189 mt->target == GL_TEXTURE_CUBE_MAP && level >= 1) 190 curOffset += 2 * mt->levels[level].size; 191 } 192 193 /* Note the required size in memory */ 194 mt->totalsize = (curOffset + RADEON_OFFSET_MASK) & ~RADEON_OFFSET_MASK; 195 196 radeon_print(RADEON_TEXTURE, RADEON_TRACE, 197 "%s(%p, %p) total size %d\n", 198 __func__, rmesa, mt, mt->totalsize); 199} 200 201/** 202 * Create a new mipmap tree, calculate its layout and allocate memory. 203 */ 204static radeon_mipmap_tree* radeon_miptree_create(radeonContextPtr rmesa, 205 GLenum target, gl_format mesaFormat, GLuint baseLevel, GLuint numLevels, 206 GLuint width0, GLuint height0, GLuint depth0, GLuint tilebits) 207{ 208 radeon_mipmap_tree *mt = CALLOC_STRUCT(_radeon_mipmap_tree); 209 210 radeon_print(RADEON_TEXTURE, RADEON_NORMAL, 211 "%s(%p) new tree is %p.\n", 212 __func__, rmesa, mt); 213 214 mt->mesaFormat = mesaFormat; 215 mt->refcount = 1; 216 mt->target = target; 217 mt->faces = (target == GL_TEXTURE_CUBE_MAP) ? 6 : 1; 218 mt->baseLevel = baseLevel; 219 mt->numLevels = numLevels; 220 mt->width0 = width0; 221 mt->height0 = height0; 222 mt->depth0 = depth0; 223 mt->tilebits = tilebits; 224 225 if (rmesa->radeonScreen->chip_family >= CHIP_FAMILY_R300) 226 calculate_miptree_layout_r300(rmesa, mt); 227 else 228 calculate_miptree_layout_r100(rmesa, mt); 229 230 mt->bo = radeon_bo_open(rmesa->radeonScreen->bom, 231 0, mt->totalsize, 1024, 232 RADEON_GEM_DOMAIN_VRAM, 233 0); 234 235 return mt; 236} 237 238void radeon_miptree_reference(radeon_mipmap_tree *mt, radeon_mipmap_tree **ptr) 239{ 240 assert(!*ptr); 241 242 mt->refcount++; 243 assert(mt->refcount > 0); 244 245 *ptr = mt; 246} 247 248void radeon_miptree_unreference(radeon_mipmap_tree **ptr) 249{ 250 radeon_mipmap_tree *mt = *ptr; 251 if (!mt) 252 return; 253 254 assert(mt->refcount > 0); 255 256 mt->refcount--; 257 if (!mt->refcount) { 258 radeon_bo_unref(mt->bo); 259 free(mt); 260 } 261 262 *ptr = 0; 263} 264 265/** 266 * Calculate min and max LOD for the given texture object. 267 * @param[in] tObj texture object whose LOD values to calculate 268 * @param[out] pminLod minimal LOD 269 * @param[out] pmaxLod maximal LOD 270 */ 271static void calculate_min_max_lod(struct gl_texture_object *tObj, 272 unsigned *pminLod, unsigned *pmaxLod) 273{ 274 int minLod, maxLod; 275 /* Yes, this looks overly complicated, but it's all needed. 276 */ 277 switch (tObj->Target) { 278 case GL_TEXTURE_1D: 279 case GL_TEXTURE_2D: 280 case GL_TEXTURE_3D: 281 case GL_TEXTURE_CUBE_MAP: 282 if (tObj->MinFilter == GL_NEAREST || tObj->MinFilter == GL_LINEAR) { 283 /* GL_NEAREST and GL_LINEAR only care about GL_TEXTURE_BASE_LEVEL. 284 */ 285 minLod = maxLod = tObj->BaseLevel; 286 } else { 287 minLod = tObj->BaseLevel + (GLint)(tObj->MinLod); 288 minLod = MAX2(minLod, tObj->BaseLevel); 289 minLod = MIN2(minLod, tObj->MaxLevel); 290 maxLod = tObj->BaseLevel + (GLint)(tObj->MaxLod + 0.5); 291 maxLod = MIN2(maxLod, tObj->MaxLevel); 292 maxLod = MIN2(maxLod, tObj->Image[0][minLod]->MaxLog2 + minLod); 293 maxLod = MAX2(maxLod, minLod); /* need at least one level */ 294 } 295 break; 296 case GL_TEXTURE_RECTANGLE_NV: 297 case GL_TEXTURE_4D_SGIS: 298 minLod = maxLod = 0; 299 break; 300 default: 301 return; 302 } 303 304 radeon_print(RADEON_TEXTURE, RADEON_TRACE, 305 "%s(%p) target %s, min %d, max %d.\n", 306 __func__, tObj, 307 _mesa_lookup_enum_by_nr(tObj->Target), 308 minLod, maxLod); 309 310 /* save these values */ 311 *pminLod = minLod; 312 *pmaxLod = maxLod; 313} 314 315/** 316 * Checks whether the given miptree can hold the given texture image at the 317 * given face and level. 318 */ 319GLboolean radeon_miptree_matches_image(radeon_mipmap_tree *mt, 320 struct gl_texture_image *texImage, GLuint face, GLuint level) 321{ 322 radeon_mipmap_level *lvl; 323 324 if (face >= mt->faces) 325 return GL_FALSE; 326 327 if (texImage->TexFormat != mt->mesaFormat) 328 return GL_FALSE; 329 330 lvl = &mt->levels[level]; 331 if (!lvl->valid || 332 lvl->width != texImage->Width || 333 lvl->height != texImage->Height || 334 lvl->depth != texImage->Depth) 335 return GL_FALSE; 336 337 return GL_TRUE; 338} 339 340/** 341 * Checks whether the given miptree has the right format to store the given texture object. 342 */ 343static GLboolean radeon_miptree_matches_texture(radeon_mipmap_tree *mt, struct gl_texture_object *texObj) 344{ 345 struct gl_texture_image *firstImage; 346 unsigned numLevels; 347 radeon_mipmap_level *mtBaseLevel; 348 349 if (texObj->BaseLevel < mt->baseLevel) 350 return GL_FALSE; 351 352 mtBaseLevel = &mt->levels[texObj->BaseLevel - mt->baseLevel]; 353 firstImage = texObj->Image[0][texObj->BaseLevel]; 354 numLevels = MIN2(texObj->MaxLevel - texObj->BaseLevel + 1, firstImage->MaxLog2 + 1); 355 356 if (radeon_is_debug_enabled(RADEON_TEXTURE,RADEON_TRACE)) { 357 fprintf(stderr, "Checking if miptree %p matches texObj %p\n", mt, texObj); 358 fprintf(stderr, "target %d vs %d\n", mt->target, texObj->Target); 359 fprintf(stderr, "format %d vs %d\n", mt->mesaFormat, firstImage->TexFormat); 360 fprintf(stderr, "numLevels %d vs %d\n", mt->numLevels, numLevels); 361 fprintf(stderr, "width0 %d vs %d\n", mtBaseLevel->width, firstImage->Width); 362 fprintf(stderr, "height0 %d vs %d\n", mtBaseLevel->height, firstImage->Height); 363 fprintf(stderr, "depth0 %d vs %d\n", mtBaseLevel->depth, firstImage->Depth); 364 if (mt->target == texObj->Target && 365 mt->mesaFormat == firstImage->TexFormat && 366 mt->numLevels >= numLevels && 367 mtBaseLevel->width == firstImage->Width && 368 mtBaseLevel->height == firstImage->Height && 369 mtBaseLevel->depth == firstImage->Depth) { 370 fprintf(stderr, "MATCHED\n"); 371 } else { 372 fprintf(stderr, "NOT MATCHED\n"); 373 } 374 } 375 376 return (mt->target == texObj->Target && 377 mt->mesaFormat == firstImage->TexFormat && 378 mt->numLevels >= numLevels && 379 mtBaseLevel->width == firstImage->Width && 380 mtBaseLevel->height == firstImage->Height && 381 mtBaseLevel->depth == firstImage->Depth); 382} 383 384/** 385 * Try to allocate a mipmap tree for the given texture object. 386 * @param[in] rmesa radeon context 387 * @param[in] t radeon texture object 388 */ 389void radeon_try_alloc_miptree(radeonContextPtr rmesa, radeonTexObj *t) 390{ 391 struct gl_texture_object *texObj = &t->base; 392 struct gl_texture_image *texImg = texObj->Image[0][texObj->BaseLevel]; 393 GLuint numLevels; 394 395 assert(!t->mt); 396 397 if (!texImg) { 398 radeon_warning("%s(%p) No image in given texture object(%p).\n", 399 __func__, rmesa, t); 400 return; 401 } 402 403 404 numLevels = MIN2(texObj->MaxLevel - texObj->BaseLevel + 1, texImg->MaxLog2 + 1); 405 406 t->mt = radeon_miptree_create(rmesa, t->base.Target, 407 texImg->TexFormat, texObj->BaseLevel, 408 numLevels, texImg->Width, texImg->Height, 409 texImg->Depth, t->tile_bits); 410} 411 412GLuint 413radeon_miptree_image_offset(radeon_mipmap_tree *mt, 414 GLuint face, GLuint level) 415{ 416 if (mt->target == GL_TEXTURE_CUBE_MAP_ARB) 417 return (mt->levels[level].faces[face].offset); 418 else 419 return mt->levels[level].faces[0].offset; 420} 421 422/** 423 * Ensure that the given image is stored in the given miptree from now on. 424 */ 425static void migrate_image_to_miptree(radeon_mipmap_tree *mt, 426 radeon_texture_image *image, 427 int face, int level) 428{ 429 radeon_mipmap_level *dstlvl = &mt->levels[level]; 430 unsigned char *dest; 431 432 assert(image->mt != mt); 433 assert(dstlvl->valid); 434 assert(dstlvl->width == image->base.Width); 435 assert(dstlvl->height == image->base.Height); 436 assert(dstlvl->depth == image->base.Depth); 437 438 radeon_print(RADEON_TEXTURE, RADEON_VERBOSE, 439 "%s miptree %p, image %p, face %d, level %d.\n", 440 __func__, mt, image, face, level); 441 442 radeon_bo_map(mt->bo, GL_TRUE); 443 dest = mt->bo->ptr + dstlvl->faces[face].offset; 444 445 if (image->mt) { 446 /* Format etc. should match, so we really just need a memcpy(). 447 * In fact, that memcpy() could be done by the hardware in many 448 * cases, provided that we have a proper memory manager. 449 */ 450 assert(mt->mesaFormat == image->base.TexFormat); 451 452 radeon_mipmap_level *srclvl = &image->mt->levels[image->mtlevel]; 453 454 /* TODO: bring back these assertions once the FBOs are fixed */ 455#if 0 456 assert(image->mtlevel == level); 457 assert(srclvl->size == dstlvl->size); 458 assert(srclvl->rowstride == dstlvl->rowstride); 459#endif 460 461 radeon_bo_map(image->mt->bo, GL_FALSE); 462 463 memcpy(dest, 464 image->mt->bo->ptr + srclvl->faces[face].offset, 465 dstlvl->size); 466 radeon_bo_unmap(image->mt->bo); 467 468 radeon_miptree_unreference(&image->mt); 469 } else if (image->base.Data) { 470 /* This condition should be removed, it's here to workaround 471 * a segfault when mapping textures during software fallbacks. 472 */ 473 radeon_print(RADEON_FALLBACKS, RADEON_IMPORTANT, 474 "%s Trying to map texture in sowftware fallback.\n", 475 __func__); 476 const uint32_t srcrowstride = _mesa_format_row_stride(image->base.TexFormat, image->base.Width); 477 uint32_t rows = image->base.Height * image->base.Depth; 478 479 if (_mesa_is_format_compressed(image->base.TexFormat)) { 480 uint32_t blockWidth, blockHeight; 481 _mesa_get_format_block_size(image->base.TexFormat, &blockWidth, &blockHeight); 482 rows = (rows + blockHeight - 1) / blockHeight; 483 } 484 485 copy_rows(dest, dstlvl->rowstride, image->base.Data, srcrowstride, 486 rows, srcrowstride); 487 488 _mesa_free_texmemory(image->base.Data); 489 image->base.Data = 0; 490 } 491 492 radeon_bo_unmap(mt->bo); 493 494 radeon_miptree_reference(mt, &image->mt); 495 image->mtface = face; 496 image->mtlevel = level; 497} 498 499/** 500 * Filter matching miptrees, and select one with the most of data. 501 * @param[in] texObj radeon texture object 502 * @param[in] firstLevel first texture level to check 503 * @param[in] lastLevel last texture level to check 504 */ 505static radeon_mipmap_tree * get_biggest_matching_miptree(radeonTexObj *texObj, 506 unsigned firstLevel, 507 unsigned lastLevel) 508{ 509 const unsigned numLevels = lastLevel - firstLevel + 1; 510 unsigned *mtSizes = calloc(numLevels, sizeof(unsigned)); 511 radeon_mipmap_tree **mts = calloc(numLevels, sizeof(radeon_mipmap_tree *)); 512 unsigned mtCount = 0; 513 unsigned maxMtIndex = 0; 514 radeon_mipmap_tree *tmp; 515 unsigned int level; 516 int i; 517 518 for (level = firstLevel; level <= lastLevel; ++level) { 519 radeon_texture_image *img = get_radeon_texture_image(texObj->base.Image[0][level]); 520 unsigned found = 0; 521 // TODO: why this hack?? 522 if (!img) 523 break; 524 525 if (!img->mt) 526 continue; 527 528 for (i = 0; i < mtCount; ++i) { 529 if (mts[i] == img->mt) { 530 found = 1; 531 mtSizes[i] += img->mt->levels[img->mtlevel].size; 532 break; 533 } 534 } 535 536 if (!found && radeon_miptree_matches_texture(img->mt, &texObj->base)) { 537 mtSizes[mtCount] = img->mt->levels[img->mtlevel].size; 538 mts[mtCount] = img->mt; 539 mtCount++; 540 } 541 } 542 543 if (mtCount == 0) { 544 free(mtSizes); 545 free(mts); 546 return NULL; 547 } 548 549 for (i = 1; i < mtCount; ++i) { 550 if (mtSizes[i] > mtSizes[maxMtIndex]) { 551 maxMtIndex = i; 552 } 553 } 554 555 tmp = mts[maxMtIndex]; 556 free(mtSizes); 557 free(mts); 558 559 return tmp; 560} 561 562/** 563 * Validate texture mipmap tree. 564 * If individual images are stored in different mipmap trees 565 * use the mipmap tree that has the most of the correct data. 566 */ 567int radeon_validate_texture_miptree(GLcontext * ctx, struct gl_texture_object *texObj) 568{ 569 radeonContextPtr rmesa = RADEON_CONTEXT(ctx); 570 radeonTexObj *t = radeon_tex_obj(texObj); 571 572 if (t->validated || t->image_override) { 573 return GL_TRUE; 574 } 575 576 if (texObj->Image[0][texObj->BaseLevel]->Border > 0) 577 return GL_FALSE; 578 579 _mesa_test_texobj_completeness(rmesa->glCtx, texObj); 580 if (!texObj->_Complete) { 581 return GL_FALSE; 582 } 583 584 calculate_min_max_lod(&t->base, &t->minLod, &t->maxLod); 585 586 radeon_print(RADEON_TEXTURE, RADEON_NORMAL, 587 "%s: Validating texture %p now, minLod = %d, maxLod = %d\n", 588 __FUNCTION__, texObj ,t->minLod, t->maxLod); 589 590 radeon_mipmap_tree *dst_miptree; 591 dst_miptree = get_biggest_matching_miptree(t, t->minLod, t->maxLod); 592 593 if (!dst_miptree) { 594 radeon_miptree_unreference(&t->mt); 595 radeon_try_alloc_miptree(rmesa, t); 596 dst_miptree = t->mt; 597 radeon_print(RADEON_TEXTURE, RADEON_NORMAL, 598 "%s: No matching miptree found, allocated new one %p\n", 599 __FUNCTION__, t->mt); 600 601 } else { 602 radeon_print(RADEON_TEXTURE, RADEON_NORMAL, 603 "%s: Using miptree %p\n", __FUNCTION__, t->mt); 604 } 605 606 const unsigned faces = texObj->Target == GL_TEXTURE_CUBE_MAP ? 6 : 1; 607 unsigned face, level; 608 radeon_texture_image *img; 609 /* Validate only the levels that will actually be used during rendering */ 610 for (face = 0; face < faces; ++face) { 611 for (level = t->minLod; level <= t->maxLod; ++level) { 612 img = get_radeon_texture_image(texObj->Image[face][level]); 613 614 radeon_print(RADEON_TEXTURE, RADEON_TRACE, 615 "Checking image level %d, face %d, mt %p ... ", 616 level, face, img->mt); 617 618 if (img->mt != dst_miptree) { 619 radeon_print(RADEON_TEXTURE, RADEON_TRACE, 620 "MIGRATING\n"); 621 622 struct radeon_bo *src_bo = (img->mt) ? img->mt->bo : img->bo; 623 if (src_bo && radeon_bo_is_referenced_by_cs(src_bo, rmesa->cmdbuf.cs)) { 624 radeon_firevertices(rmesa); 625 } 626 migrate_image_to_miptree(dst_miptree, img, face, level); 627 } else 628 radeon_print(RADEON_TEXTURE, RADEON_TRACE, "OK\n"); 629 } 630 } 631 632 t->validated = GL_TRUE; 633 634 return GL_TRUE; 635} 636 637uint32_t get_base_teximage_offset(radeonTexObj *texObj) 638{ 639 if (!texObj->mt) { 640 return 0; 641 } else { 642 return radeon_miptree_image_offset(texObj->mt, 0, texObj->minLod); 643 } 644} 645