1/************************************************************************** 2 * 3 * Copyright 2009 VMware, Inc. All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial portions 15 * of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR 21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 * 25 **************************************************************************/ 26 27/** 28 * @file 29 * Surface utility functions. 30 * 31 * @author Brian Paul 32 */ 33 34 35#include "pipe/p_defines.h" 36#include "pipe/p_screen.h" 37#include "pipe/p_state.h" 38 39#include "util/u_format.h" 40#include "util/u_inlines.h" 41#include "util/u_rect.h" 42#include "util/u_surface.h" 43#include "util/u_pack_color.h" 44 45 46/** 47 * Initialize a pipe_surface object. 'view' is considered to have 48 * uninitialized contents. 49 */ 50void 51u_surface_default_template(struct pipe_surface *surf, 52 const struct pipe_resource *texture) 53{ 54 memset(surf, 0, sizeof(*surf)); 55 56 surf->format = texture->format; 57} 58 59 60/** 61 * Copy 2D rect from one place to another. 62 * Position and sizes are in pixels. 63 * src_stride may be negative to do vertical flip of pixels from source. 64 */ 65void 66util_copy_rect(ubyte * dst, 67 enum pipe_format format, 68 unsigned dst_stride, 69 unsigned dst_x, 70 unsigned dst_y, 71 unsigned width, 72 unsigned height, 73 const ubyte * src, 74 int src_stride, 75 unsigned src_x, 76 unsigned src_y) 77{ 78 unsigned i; 79 int src_stride_pos = src_stride < 0 ? -src_stride : src_stride; 80 int blocksize = util_format_get_blocksize(format); 81 int blockwidth = util_format_get_blockwidth(format); 82 int blockheight = util_format_get_blockheight(format); 83 84 assert(blocksize > 0); 85 assert(blockwidth > 0); 86 assert(blockheight > 0); 87 88 dst_x /= blockwidth; 89 dst_y /= blockheight; 90 width = (width + blockwidth - 1)/blockwidth; 91 height = (height + blockheight - 1)/blockheight; 92 src_x /= blockwidth; 93 src_y /= blockheight; 94 95 dst += dst_x * blocksize; 96 src += src_x * blocksize; 97 dst += dst_y * dst_stride; 98 src += src_y * src_stride_pos; 99 width *= blocksize; 100 101 if (width == dst_stride && width == src_stride) 102 memcpy(dst, src, height * width); 103 else { 104 for (i = 0; i < height; i++) { 105 memcpy(dst, src, width); 106 dst += dst_stride; 107 src += src_stride; 108 } 109 } 110} 111 112 113/** 114 * Copy 3D box from one place to another. 115 * Position and sizes are in pixels. 116 */ 117void 118util_copy_box(ubyte * dst, 119 enum pipe_format format, 120 unsigned dst_stride, unsigned dst_slice_stride, 121 unsigned dst_x, unsigned dst_y, unsigned dst_z, 122 unsigned width, unsigned height, unsigned depth, 123 const ubyte * src, 124 int src_stride, unsigned src_slice_stride, 125 unsigned src_x, unsigned src_y, unsigned src_z) 126{ 127 unsigned z; 128 dst += dst_z * dst_slice_stride; 129 src += src_z * src_slice_stride; 130 for (z = 0; z < depth; ++z) { 131 util_copy_rect(dst, 132 format, 133 dst_stride, 134 dst_x, dst_y, 135 width, height, 136 src, 137 src_stride, 138 src_x, src_y); 139 140 dst += dst_slice_stride; 141 src += src_slice_stride; 142 } 143} 144 145 146void 147util_fill_rect(ubyte * dst, 148 enum pipe_format format, 149 unsigned dst_stride, 150 unsigned dst_x, 151 unsigned dst_y, 152 unsigned width, 153 unsigned height, 154 union util_color *uc) 155{ 156 const struct util_format_description *desc = util_format_description(format); 157 unsigned i, j; 158 unsigned width_size; 159 int blocksize = desc->block.bits / 8; 160 int blockwidth = desc->block.width; 161 int blockheight = desc->block.height; 162 163 assert(blocksize > 0); 164 assert(blockwidth > 0); 165 assert(blockheight > 0); 166 167 dst_x /= blockwidth; 168 dst_y /= blockheight; 169 width = (width + blockwidth - 1)/blockwidth; 170 height = (height + blockheight - 1)/blockheight; 171 172 dst += dst_x * blocksize; 173 dst += dst_y * dst_stride; 174 width_size = width * blocksize; 175 176 switch (blocksize) { 177 case 1: 178 if(dst_stride == width_size) 179 memset(dst, uc->ub, height * width_size); 180 else { 181 for (i = 0; i < height; i++) { 182 memset(dst, uc->ub, width_size); 183 dst += dst_stride; 184 } 185 } 186 break; 187 case 2: 188 for (i = 0; i < height; i++) { 189 uint16_t *row = (uint16_t *)dst; 190 for (j = 0; j < width; j++) 191 *row++ = uc->us; 192 dst += dst_stride; 193 } 194 break; 195 case 4: 196 for (i = 0; i < height; i++) { 197 uint32_t *row = (uint32_t *)dst; 198 for (j = 0; j < width; j++) 199 *row++ = uc->ui[0]; 200 dst += dst_stride; 201 } 202 break; 203 default: 204 for (i = 0; i < height; i++) { 205 ubyte *row = dst; 206 for (j = 0; j < width; j++) { 207 memcpy(row, uc, blocksize); 208 row += blocksize; 209 } 210 dst += dst_stride; 211 } 212 break; 213 } 214} 215 216 217void 218util_fill_box(ubyte * dst, 219 enum pipe_format format, 220 unsigned stride, 221 unsigned layer_stride, 222 unsigned x, 223 unsigned y, 224 unsigned z, 225 unsigned width, 226 unsigned height, 227 unsigned depth, 228 union util_color *uc) 229{ 230 unsigned layer; 231 dst += z * layer_stride; 232 for (layer = z; layer < depth; layer++) { 233 util_fill_rect(dst, format, 234 stride, 235 x, y, width, height, uc); 236 dst += layer_stride; 237 } 238} 239 240 241/** 242 * Fallback function for pipe->resource_copy_region(). 243 * We support copying between different formats (including compressed/ 244 * uncompressed) if the bytes per block or pixel matches. If copying 245 * compressed -> uncompressed, the dst region is reduced by the block 246 * width, height. If copying uncompressed -> compressed, the dest region 247 * is expanded by the block width, height. See GL_ARB_copy_image. 248 * Note: (X,Y)=(0,0) is always the upper-left corner. 249 */ 250void 251util_resource_copy_region(struct pipe_context *pipe, 252 struct pipe_resource *dst, 253 unsigned dst_level, 254 unsigned dst_x, unsigned dst_y, unsigned dst_z, 255 struct pipe_resource *src, 256 unsigned src_level, 257 const struct pipe_box *src_box_in) 258{ 259 struct pipe_transfer *src_trans, *dst_trans; 260 uint8_t *dst_map; 261 const uint8_t *src_map; 262 MAYBE_UNUSED enum pipe_format src_format; 263 enum pipe_format dst_format; 264 struct pipe_box src_box, dst_box; 265 unsigned src_bs, dst_bs, src_bw, dst_bw, src_bh, dst_bh; 266 267 assert(src && dst); 268 if (!src || !dst) 269 return; 270 271 assert((src->target == PIPE_BUFFER && dst->target == PIPE_BUFFER) || 272 (src->target != PIPE_BUFFER && dst->target != PIPE_BUFFER)); 273 274 src_format = src->format; 275 dst_format = dst->format; 276 277 /* init src box */ 278 src_box = *src_box_in; 279 280 /* init dst box */ 281 dst_box.x = dst_x; 282 dst_box.y = dst_y; 283 dst_box.z = dst_z; 284 dst_box.width = src_box.width; 285 dst_box.height = src_box.height; 286 dst_box.depth = src_box.depth; 287 288 src_bs = util_format_get_blocksize(src_format); 289 src_bw = util_format_get_blockwidth(src_format); 290 src_bh = util_format_get_blockheight(src_format); 291 dst_bs = util_format_get_blocksize(dst_format); 292 dst_bw = util_format_get_blockwidth(dst_format); 293 dst_bh = util_format_get_blockheight(dst_format); 294 295 /* Note: all box positions and sizes are in pixels */ 296 if (src_bw > 1 && dst_bw == 1) { 297 /* Copy from compressed to uncompressed. 298 * Shrink dest box by the src block size. 299 */ 300 dst_box.width /= src_bw; 301 dst_box.height /= src_bh; 302 } 303 else if (src_bw == 1 && dst_bw > 1) { 304 /* Copy from uncompressed to compressed. 305 * Expand dest box by the dest block size. 306 */ 307 dst_box.width *= dst_bw; 308 dst_box.height *= dst_bh; 309 } 310 else { 311 /* compressed -> compressed or uncompressed -> uncompressed copy */ 312 assert(src_bw == dst_bw); 313 assert(src_bh == dst_bh); 314 } 315 316 assert(src_bs == dst_bs); 317 if (src_bs != dst_bs) { 318 /* This can happen if we fail to do format checking before hand. 319 * Don't crash below. 320 */ 321 return; 322 } 323 324 /* check that region boxes are block aligned */ 325 assert(src_box.x % src_bw == 0); 326 assert(src_box.y % src_bh == 0); 327 assert(src_box.width % src_bw == 0 || 328 src_box.x + src_box.width == u_minify(src->width0, src_level)); 329 assert(src_box.height % src_bh == 0 || 330 src_box.y + src_box.height == u_minify(src->height0, src_level)); 331 assert(dst_box.x % dst_bw == 0); 332 assert(dst_box.y % dst_bh == 0); 333 assert(dst_box.width % dst_bw == 0 || 334 dst_box.x + dst_box.width == u_minify(dst->width0, dst_level)); 335 assert(dst_box.height % dst_bh == 0 || 336 dst_box.y + dst_box.height == u_minify(dst->height0, dst_level)); 337 338 /* check that region boxes are not out of bounds */ 339 assert(src_box.x + src_box.width <= u_minify(src->width0, src_level)); 340 assert(src_box.y + src_box.height <= u_minify(src->height0, src_level)); 341 assert(dst_box.x + dst_box.width <= u_minify(dst->width0, dst_level)); 342 assert(dst_box.y + dst_box.height <= u_minify(dst->height0, dst_level)); 343 344 /* check that total number of src, dest bytes match */ 345 assert((src_box.width / src_bw) * (src_box.height / src_bh) * src_bs == 346 (dst_box.width / dst_bw) * (dst_box.height / dst_bh) * dst_bs); 347 348 src_map = pipe->transfer_map(pipe, 349 src, 350 src_level, 351 PIPE_TRANSFER_READ, 352 &src_box, &src_trans); 353 assert(src_map); 354 if (!src_map) { 355 goto no_src_map; 356 } 357 358 dst_map = pipe->transfer_map(pipe, 359 dst, 360 dst_level, 361 PIPE_TRANSFER_WRITE | 362 PIPE_TRANSFER_DISCARD_RANGE, &dst_box, 363 &dst_trans); 364 assert(dst_map); 365 if (!dst_map) { 366 goto no_dst_map; 367 } 368 369 if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) { 370 assert(src_box.height == 1); 371 assert(src_box.depth == 1); 372 memcpy(dst_map, src_map, src_box.width); 373 } else { 374 util_copy_box(dst_map, 375 src_format, 376 dst_trans->stride, dst_trans->layer_stride, 377 0, 0, 0, 378 src_box.width, src_box.height, src_box.depth, 379 src_map, 380 src_trans->stride, src_trans->layer_stride, 381 0, 0, 0); 382 } 383 384 pipe->transfer_unmap(pipe, dst_trans); 385no_dst_map: 386 pipe->transfer_unmap(pipe, src_trans); 387no_src_map: 388 ; 389} 390 391 392 393#define UBYTE_TO_USHORT(B) ((B) | ((B) << 8)) 394 395 396/** 397 * Fallback for pipe->clear_render_target() function. 398 * XXX this looks too hackish to be really useful. 399 * cpp > 4 looks like a gross hack at best... 400 * Plus can't use these transfer fallbacks when clearing 401 * multisampled surfaces for instance. 402 * Clears all bound layers. 403 */ 404void 405util_clear_render_target(struct pipe_context *pipe, 406 struct pipe_surface *dst, 407 const union pipe_color_union *color, 408 unsigned dstx, unsigned dsty, 409 unsigned width, unsigned height) 410{ 411 struct pipe_transfer *dst_trans; 412 ubyte *dst_map; 413 union util_color uc; 414 unsigned max_layer; 415 416 assert(dst->texture); 417 if (!dst->texture) 418 return; 419 420 if (dst->texture->target == PIPE_BUFFER) { 421 /* 422 * The fill naturally works on the surface format, however 423 * the transfer uses resource format which is just bytes for buffers. 424 */ 425 unsigned dx, w; 426 unsigned pixstride = util_format_get_blocksize(dst->format); 427 dx = (dst->u.buf.first_element + dstx) * pixstride; 428 w = width * pixstride; 429 max_layer = 0; 430 dst_map = pipe_transfer_map(pipe, 431 dst->texture, 432 0, 0, 433 PIPE_TRANSFER_WRITE, 434 dx, 0, w, 1, 435 &dst_trans); 436 } 437 else { 438 max_layer = dst->u.tex.last_layer - dst->u.tex.first_layer; 439 dst_map = pipe_transfer_map_3d(pipe, 440 dst->texture, 441 dst->u.tex.level, 442 PIPE_TRANSFER_WRITE, 443 dstx, dsty, dst->u.tex.first_layer, 444 width, height, max_layer + 1, &dst_trans); 445 } 446 447 assert(dst_map); 448 449 if (dst_map) { 450 enum pipe_format format = dst->format; 451 assert(dst_trans->stride > 0); 452 453 if (util_format_is_pure_integer(format)) { 454 /* 455 * We expect int/uint clear values here, though some APIs 456 * might disagree (but in any case util_pack_color() 457 * couldn't handle it)... 458 */ 459 if (util_format_is_pure_sint(format)) { 460 util_format_write_4i(format, color->i, 0, &uc, 0, 0, 0, 1, 1); 461 } 462 else { 463 assert(util_format_is_pure_uint(format)); 464 util_format_write_4ui(format, color->ui, 0, &uc, 0, 0, 0, 1, 1); 465 } 466 } 467 else { 468 util_pack_color(color->f, format, &uc); 469 } 470 471 util_fill_box(dst_map, dst->format, 472 dst_trans->stride, dst_trans->layer_stride, 473 0, 0, 0, width, height, max_layer + 1, &uc); 474 475 pipe->transfer_unmap(pipe, dst_trans); 476 } 477} 478 479/** 480 * Fallback for pipe->clear_stencil() function. 481 * sw fallback doesn't look terribly useful here. 482 * Plus can't use these transfer fallbacks when clearing 483 * multisampled surfaces for instance. 484 * Clears all bound layers. 485 */ 486void 487util_clear_depth_stencil(struct pipe_context *pipe, 488 struct pipe_surface *dst, 489 unsigned clear_flags, 490 double depth, 491 unsigned stencil, 492 unsigned dstx, unsigned dsty, 493 unsigned width, unsigned height) 494{ 495 enum pipe_format format = dst->format; 496 struct pipe_transfer *dst_trans; 497 ubyte *dst_map; 498 boolean need_rmw = FALSE; 499 unsigned max_layer, layer; 500 501 if ((clear_flags & PIPE_CLEAR_DEPTHSTENCIL) && 502 ((clear_flags & PIPE_CLEAR_DEPTHSTENCIL) != PIPE_CLEAR_DEPTHSTENCIL) && 503 util_format_is_depth_and_stencil(format)) 504 need_rmw = TRUE; 505 506 assert(dst->texture); 507 if (!dst->texture) 508 return; 509 510 max_layer = dst->u.tex.last_layer - dst->u.tex.first_layer; 511 dst_map = pipe_transfer_map_3d(pipe, 512 dst->texture, 513 dst->u.tex.level, 514 (need_rmw ? PIPE_TRANSFER_READ_WRITE : 515 PIPE_TRANSFER_WRITE), 516 dstx, dsty, dst->u.tex.first_layer, 517 width, height, max_layer + 1, &dst_trans); 518 assert(dst_map); 519 520 if (dst_map) { 521 unsigned dst_stride = dst_trans->stride; 522 uint64_t zstencil = util_pack64_z_stencil(format, depth, stencil); 523 ubyte *dst_layer = dst_map; 524 unsigned i, j; 525 assert(dst_trans->stride > 0); 526 527 for (layer = 0; layer <= max_layer; layer++) { 528 dst_map = dst_layer; 529 530 switch (util_format_get_blocksize(format)) { 531 case 1: 532 assert(format == PIPE_FORMAT_S8_UINT); 533 if(dst_stride == width) 534 memset(dst_map, (uint8_t) zstencil, height * width); 535 else { 536 for (i = 0; i < height; i++) { 537 memset(dst_map, (uint8_t) zstencil, width); 538 dst_map += dst_stride; 539 } 540 } 541 break; 542 case 2: 543 assert(format == PIPE_FORMAT_Z16_UNORM); 544 for (i = 0; i < height; i++) { 545 uint16_t *row = (uint16_t *)dst_map; 546 for (j = 0; j < width; j++) 547 *row++ = (uint16_t) zstencil; 548 dst_map += dst_stride; 549 } 550 break; 551 case 4: 552 if (!need_rmw) { 553 for (i = 0; i < height; i++) { 554 uint32_t *row = (uint32_t *)dst_map; 555 for (j = 0; j < width; j++) 556 *row++ = (uint32_t) zstencil; 557 dst_map += dst_stride; 558 } 559 } 560 else { 561 uint32_t dst_mask; 562 if (format == PIPE_FORMAT_Z24_UNORM_S8_UINT) 563 dst_mask = 0x00ffffff; 564 else { 565 assert(format == PIPE_FORMAT_S8_UINT_Z24_UNORM); 566 dst_mask = 0xffffff00; 567 } 568 if (clear_flags & PIPE_CLEAR_DEPTH) 569 dst_mask = ~dst_mask; 570 for (i = 0; i < height; i++) { 571 uint32_t *row = (uint32_t *)dst_map; 572 for (j = 0; j < width; j++) { 573 uint32_t tmp = *row & dst_mask; 574 *row++ = tmp | ((uint32_t) zstencil & ~dst_mask); 575 } 576 dst_map += dst_stride; 577 } 578 } 579 break; 580 case 8: 581 if (!need_rmw) { 582 for (i = 0; i < height; i++) { 583 uint64_t *row = (uint64_t *)dst_map; 584 for (j = 0; j < width; j++) 585 *row++ = zstencil; 586 dst_map += dst_stride; 587 } 588 } 589 else { 590 uint64_t src_mask; 591 592 if (clear_flags & PIPE_CLEAR_DEPTH) 593 src_mask = 0x00000000ffffffffull; 594 else 595 src_mask = 0x000000ff00000000ull; 596 597 for (i = 0; i < height; i++) { 598 uint64_t *row = (uint64_t *)dst_map; 599 for (j = 0; j < width; j++) { 600 uint64_t tmp = *row & ~src_mask; 601 *row++ = tmp | (zstencil & src_mask); 602 } 603 dst_map += dst_stride; 604 } 605 } 606 break; 607 default: 608 assert(0); 609 break; 610 } 611 dst_layer += dst_trans->layer_stride; 612 } 613 614 pipe->transfer_unmap(pipe, dst_trans); 615 } 616} 617 618 619/* Return if the box is totally inside the resource. 620 */ 621static boolean 622is_box_inside_resource(const struct pipe_resource *res, 623 const struct pipe_box *box, 624 unsigned level) 625{ 626 unsigned width = 1, height = 1, depth = 1; 627 628 switch (res->target) { 629 case PIPE_BUFFER: 630 width = res->width0; 631 height = 1; 632 depth = 1; 633 break; 634 case PIPE_TEXTURE_1D: 635 width = u_minify(res->width0, level); 636 height = 1; 637 depth = 1; 638 break; 639 case PIPE_TEXTURE_2D: 640 case PIPE_TEXTURE_RECT: 641 width = u_minify(res->width0, level); 642 height = u_minify(res->height0, level); 643 depth = 1; 644 break; 645 case PIPE_TEXTURE_3D: 646 width = u_minify(res->width0, level); 647 height = u_minify(res->height0, level); 648 depth = u_minify(res->depth0, level); 649 break; 650 case PIPE_TEXTURE_CUBE: 651 width = u_minify(res->width0, level); 652 height = u_minify(res->height0, level); 653 depth = 6; 654 break; 655 case PIPE_TEXTURE_1D_ARRAY: 656 width = u_minify(res->width0, level); 657 height = 1; 658 depth = res->array_size; 659 break; 660 case PIPE_TEXTURE_2D_ARRAY: 661 width = u_minify(res->width0, level); 662 height = u_minify(res->height0, level); 663 depth = res->array_size; 664 break; 665 case PIPE_TEXTURE_CUBE_ARRAY: 666 width = u_minify(res->width0, level); 667 height = u_minify(res->height0, level); 668 depth = res->array_size; 669 assert(res->array_size % 6 == 0); 670 break; 671 case PIPE_MAX_TEXTURE_TYPES: 672 break; 673 } 674 675 return box->x >= 0 && 676 box->x + box->width <= (int) width && 677 box->y >= 0 && 678 box->y + box->height <= (int) height && 679 box->z >= 0 && 680 box->z + box->depth <= (int) depth; 681} 682 683static unsigned 684get_sample_count(const struct pipe_resource *res) 685{ 686 return res->nr_samples ? res->nr_samples : 1; 687} 688 689 690/** 691 * Check if a blit() command can be implemented with a resource_copy_region(). 692 * If tight_format_check is true, only allow the resource_copy_region() if 693 * the blit src/dst formats are identical, ignoring the resource formats. 694 * Otherwise, check for format casting and compatibility. 695 */ 696boolean 697util_can_blit_via_copy_region(const struct pipe_blit_info *blit, 698 boolean tight_format_check) 699{ 700 const struct util_format_description *src_desc, *dst_desc; 701 702 src_desc = util_format_description(blit->src.resource->format); 703 dst_desc = util_format_description(blit->dst.resource->format); 704 705 if (tight_format_check) { 706 /* no format conversions allowed */ 707 if (blit->src.format != blit->dst.format) { 708 return FALSE; 709 } 710 } 711 else { 712 /* do loose format compatibility checking */ 713 if (blit->src.resource->format != blit->src.format || 714 blit->dst.resource->format != blit->dst.format || 715 !util_is_format_compatible(src_desc, dst_desc)) { 716 return FALSE; 717 } 718 } 719 720 unsigned mask = util_format_get_mask(blit->dst.format); 721 722 /* No masks, no filtering, no scissor, no blending */ 723 if ((blit->mask & mask) != mask || 724 blit->filter != PIPE_TEX_FILTER_NEAREST || 725 blit->scissor_enable || 726 blit->num_window_rectangles > 0 || 727 blit->alpha_blend) { 728 return FALSE; 729 } 730 731 /* Only the src box can have negative dims for flipping */ 732 assert(blit->dst.box.width >= 1); 733 assert(blit->dst.box.height >= 1); 734 assert(blit->dst.box.depth >= 1); 735 736 /* No scaling or flipping */ 737 if (blit->src.box.width != blit->dst.box.width || 738 blit->src.box.height != blit->dst.box.height || 739 blit->src.box.depth != blit->dst.box.depth) { 740 return FALSE; 741 } 742 743 /* No out-of-bounds access. */ 744 if (!is_box_inside_resource(blit->src.resource, &blit->src.box, 745 blit->src.level) || 746 !is_box_inside_resource(blit->dst.resource, &blit->dst.box, 747 blit->dst.level)) { 748 return FALSE; 749 } 750 751 /* Sample counts must match. */ 752 if (get_sample_count(blit->src.resource) != 753 get_sample_count(blit->dst.resource)) { 754 return FALSE; 755 } 756 757 return TRUE; 758} 759 760 761/** 762 * Try to do a blit using resource_copy_region. The function calls 763 * resource_copy_region if the blit description is compatible with it. 764 * 765 * It returns TRUE if the blit was done using resource_copy_region. 766 * 767 * It returns FALSE otherwise and the caller must fall back to a more generic 768 * codepath for the blit operation. (e.g. by using u_blitter) 769 */ 770boolean 771util_try_blit_via_copy_region(struct pipe_context *ctx, 772 const struct pipe_blit_info *blit) 773{ 774 if (util_can_blit_via_copy_region(blit, FALSE)) { 775 ctx->resource_copy_region(ctx, blit->dst.resource, blit->dst.level, 776 blit->dst.box.x, blit->dst.box.y, 777 blit->dst.box.z, 778 blit->src.resource, blit->src.level, 779 &blit->src.box); 780 return TRUE; 781 } 782 else { 783 return FALSE; 784 } 785} 786