u_inlines.h revision b496136af84e396e7890082817b563dc53ac36fc
1/************************************************************************** 2 * 3 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28#ifndef U_INLINES_H 29#define U_INLINES_H 30 31#include "pipe/p_context.h" 32#include "pipe/p_defines.h" 33#include "pipe/p_state.h" 34#include "pipe/p_screen.h" 35#include "util/u_debug.h" 36#include "util/u_debug_describe.h" 37#include "util/u_debug_refcnt.h" 38#include "util/u_atomic.h" 39#include "util/u_box.h" 40#include "util/u_math.h" 41 42 43#ifdef __cplusplus 44extern "C" { 45#endif 46 47 48/* 49 * Reference counting helper functions. 50 */ 51 52 53static INLINE void 54pipe_reference_init(struct pipe_reference *reference, unsigned count) 55{ 56 p_atomic_set(&reference->count, count); 57} 58 59static INLINE boolean 60pipe_is_referenced(struct pipe_reference *reference) 61{ 62 return p_atomic_read(&reference->count) != 0; 63} 64 65/** 66 * Update reference counting. 67 * The old thing pointed to, if any, will be unreferenced. 68 * Both 'ptr' and 'reference' may be NULL. 69 * \return TRUE if the object's refcount hits zero and should be destroyed. 70 */ 71static INLINE boolean 72pipe_reference_described(struct pipe_reference *ptr, 73 struct pipe_reference *reference, 74 debug_reference_descriptor get_desc) 75{ 76 boolean destroy = FALSE; 77 78 if(ptr != reference) { 79 /* bump the reference.count first */ 80 if (reference) { 81 assert(pipe_is_referenced(reference)); 82 p_atomic_inc(&reference->count); 83 debug_reference(reference, get_desc, 1); 84 } 85 86 if (ptr) { 87 assert(pipe_is_referenced(ptr)); 88 if (p_atomic_dec_zero(&ptr->count)) { 89 destroy = TRUE; 90 } 91 debug_reference(ptr, get_desc, -1); 92 } 93 } 94 95 return destroy; 96} 97 98static INLINE boolean 99pipe_reference(struct pipe_reference *ptr, struct pipe_reference *reference) 100{ 101 return pipe_reference_described(ptr, reference, 102 (debug_reference_descriptor)debug_describe_reference); 103} 104 105static INLINE void 106pipe_surface_reference(struct pipe_surface **ptr, struct pipe_surface *surf) 107{ 108 struct pipe_surface *old_surf = *ptr; 109 110 if (pipe_reference_described(&(*ptr)->reference, &surf->reference, 111 (debug_reference_descriptor)debug_describe_surface)) 112 old_surf->context->surface_destroy(old_surf->context, old_surf); 113 *ptr = surf; 114} 115 116static INLINE void 117pipe_resource_reference(struct pipe_resource **ptr, struct pipe_resource *tex) 118{ 119 struct pipe_resource *old_tex = *ptr; 120 121 if (pipe_reference_described(&(*ptr)->reference, &tex->reference, 122 (debug_reference_descriptor)debug_describe_resource)) 123 old_tex->screen->resource_destroy(old_tex->screen, old_tex); 124 *ptr = tex; 125} 126 127static INLINE void 128pipe_sampler_view_reference(struct pipe_sampler_view **ptr, struct pipe_sampler_view *view) 129{ 130 struct pipe_sampler_view *old_view = *ptr; 131 132 if (pipe_reference_described(&(*ptr)->reference, &view->reference, 133 (debug_reference_descriptor)debug_describe_sampler_view)) 134 old_view->context->sampler_view_destroy(old_view->context, old_view); 135 *ptr = view; 136} 137 138/** 139 * Similar to pipe_sampler_view_reference() but always set the pointer to 140 * NULL and pass in an explicit context. Passing an explicit context is a 141 * work-around for fixing a dangling context pointer problem when textures 142 * are shared by multiple contexts. XXX fix this someday. 143 */ 144static INLINE void 145pipe_sampler_view_release(struct pipe_context *ctx, 146 struct pipe_sampler_view **ptr) 147{ 148 struct pipe_sampler_view *old_view = *ptr; 149 if (*ptr && (*ptr)->context != ctx) { 150 debug_printf_once(("context mis-match in pipe_sampler_view_release()\n")); 151 } 152 if (pipe_reference_described(&(*ptr)->reference, NULL, 153 (debug_reference_descriptor)debug_describe_sampler_view)) { 154 ctx->sampler_view_destroy(ctx, old_view); 155 } 156 *ptr = NULL; 157} 158 159 160static INLINE void 161pipe_so_target_reference(struct pipe_stream_output_target **ptr, 162 struct pipe_stream_output_target *target) 163{ 164 struct pipe_stream_output_target *old = *ptr; 165 166 if (pipe_reference_described(&(*ptr)->reference, &target->reference, 167 (debug_reference_descriptor)debug_describe_so_target)) 168 old->context->stream_output_target_destroy(old->context, old); 169 *ptr = target; 170} 171 172static INLINE void 173pipe_surface_reset(struct pipe_context *ctx, struct pipe_surface* ps, 174 struct pipe_resource *pt, unsigned level, unsigned layer, 175 unsigned flags) 176{ 177 pipe_resource_reference(&ps->texture, pt); 178 ps->format = pt->format; 179 ps->width = u_minify(pt->width0, level); 180 ps->height = u_minify(pt->height0, level); 181 ps->usage = flags; 182 ps->u.tex.level = level; 183 ps->u.tex.first_layer = ps->u.tex.last_layer = layer; 184 ps->context = ctx; 185} 186 187static INLINE void 188pipe_surface_init(struct pipe_context *ctx, struct pipe_surface* ps, 189 struct pipe_resource *pt, unsigned level, unsigned layer, 190 unsigned flags) 191{ 192 ps->texture = 0; 193 pipe_reference_init(&ps->reference, 1); 194 pipe_surface_reset(ctx, ps, pt, level, layer, flags); 195} 196 197/* Return true if the surfaces are equal. */ 198static INLINE boolean 199pipe_surface_equal(struct pipe_surface *s1, struct pipe_surface *s2) 200{ 201 return s1->texture == s2->texture && 202 s1->format == s2->format && 203 (s1->texture->target != PIPE_BUFFER || 204 (s1->u.buf.first_element == s2->u.buf.first_element && 205 s1->u.buf.last_element == s2->u.buf.last_element)) && 206 (s1->texture->target == PIPE_BUFFER || 207 (s1->u.tex.level == s2->u.tex.level && 208 s1->u.tex.first_layer == s2->u.tex.first_layer && 209 s1->u.tex.last_layer == s2->u.tex.last_layer)); 210} 211 212/* 213 * Convenience wrappers for screen buffer functions. 214 */ 215 216static INLINE struct pipe_resource * 217pipe_buffer_create( struct pipe_screen *screen, 218 unsigned bind, 219 unsigned usage, 220 unsigned size ) 221{ 222 struct pipe_resource buffer; 223 memset(&buffer, 0, sizeof buffer); 224 buffer.target = PIPE_BUFFER; 225 buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */ 226 buffer.bind = bind; 227 buffer.usage = usage; 228 buffer.flags = 0; 229 buffer.width0 = size; 230 buffer.height0 = 1; 231 buffer.depth0 = 1; 232 buffer.array_size = 1; 233 return screen->resource_create(screen, &buffer); 234} 235 236static INLINE void * 237pipe_buffer_map_range(struct pipe_context *pipe, 238 struct pipe_resource *buffer, 239 unsigned offset, 240 unsigned length, 241 unsigned usage, 242 struct pipe_transfer **transfer) 243{ 244 struct pipe_box box; 245 void *map; 246 247 assert(offset < buffer->width0); 248 assert(offset + length <= buffer->width0); 249 assert(length); 250 251 u_box_1d(offset, length, &box); 252 253 *transfer = pipe->get_transfer( pipe, 254 buffer, 255 0, 256 usage, 257 &box); 258 259 if (*transfer == NULL) 260 return NULL; 261 262 map = pipe->transfer_map( pipe, *transfer ); 263 if (map == NULL) { 264 pipe->transfer_destroy( pipe, *transfer ); 265 *transfer = NULL; 266 return NULL; 267 } 268 269 return map; 270} 271 272 273static INLINE void * 274pipe_buffer_map(struct pipe_context *pipe, 275 struct pipe_resource *buffer, 276 unsigned usage, 277 struct pipe_transfer **transfer) 278{ 279 return pipe_buffer_map_range(pipe, buffer, 0, buffer->width0, usage, transfer); 280} 281 282 283static INLINE void 284pipe_buffer_unmap(struct pipe_context *pipe, 285 struct pipe_transfer *transfer) 286{ 287 if (transfer) { 288 pipe->transfer_unmap(pipe, transfer); 289 pipe->transfer_destroy(pipe, transfer); 290 } 291} 292 293static INLINE void 294pipe_buffer_flush_mapped_range(struct pipe_context *pipe, 295 struct pipe_transfer *transfer, 296 unsigned offset, 297 unsigned length) 298{ 299 struct pipe_box box; 300 int transfer_offset; 301 302 assert(length); 303 assert(transfer->box.x <= offset); 304 assert(offset + length <= transfer->box.x + transfer->box.width); 305 306 /* Match old screen->buffer_flush_mapped_range() behaviour, where 307 * offset parameter is relative to the start of the buffer, not the 308 * mapped range. 309 */ 310 transfer_offset = offset - transfer->box.x; 311 312 u_box_1d(transfer_offset, length, &box); 313 314 pipe->transfer_flush_region(pipe, transfer, &box); 315} 316 317static INLINE void 318pipe_buffer_write(struct pipe_context *pipe, 319 struct pipe_resource *buf, 320 unsigned offset, 321 unsigned size, 322 const void *data) 323{ 324 struct pipe_box box; 325 unsigned usage = PIPE_TRANSFER_WRITE; 326 327 if (offset == 0 && size == buf->width0) { 328 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE; 329 } else { 330 usage |= PIPE_TRANSFER_DISCARD_RANGE; 331 } 332 333 u_box_1d(offset, size, &box); 334 335 pipe->transfer_inline_write( pipe, 336 buf, 337 0, 338 usage, 339 &box, 340 data, 341 size, 342 0); 343} 344 345/** 346 * Special case for writing non-overlapping ranges. 347 * 348 * We can avoid GPU/CPU synchronization when writing range that has never 349 * been written before. 350 */ 351static INLINE void 352pipe_buffer_write_nooverlap(struct pipe_context *pipe, 353 struct pipe_resource *buf, 354 unsigned offset, unsigned size, 355 const void *data) 356{ 357 struct pipe_box box; 358 359 u_box_1d(offset, size, &box); 360 361 pipe->transfer_inline_write(pipe, 362 buf, 363 0, 364 (PIPE_TRANSFER_WRITE | 365 PIPE_TRANSFER_UNSYNCHRONIZED), 366 &box, 367 data, 368 0, 0); 369} 370 371static INLINE struct pipe_resource * 372pipe_buffer_create_with_data(struct pipe_context *pipe, 373 unsigned bind, 374 unsigned usage, 375 unsigned size, 376 void *ptr) 377{ 378 struct pipe_resource *res = pipe_buffer_create(pipe->screen, 379 bind, usage, size); 380 pipe_buffer_write_nooverlap(pipe, res, 0, size, ptr); 381 return res; 382} 383 384static INLINE void 385pipe_buffer_read(struct pipe_context *pipe, 386 struct pipe_resource *buf, 387 unsigned offset, 388 unsigned size, 389 void *data) 390{ 391 struct pipe_transfer *src_transfer; 392 ubyte *map; 393 394 map = (ubyte *) pipe_buffer_map_range(pipe, 395 buf, 396 offset, size, 397 PIPE_TRANSFER_READ, 398 &src_transfer); 399 400 if (map) 401 memcpy(data, map, size); 402 403 pipe_buffer_unmap(pipe, src_transfer); 404} 405 406static INLINE struct pipe_transfer * 407pipe_get_transfer( struct pipe_context *context, 408 struct pipe_resource *resource, 409 unsigned level, unsigned layer, 410 enum pipe_transfer_usage usage, 411 unsigned x, unsigned y, 412 unsigned w, unsigned h) 413{ 414 struct pipe_box box; 415 u_box_2d_zslice( x, y, layer, w, h, &box ); 416 return context->get_transfer( context, 417 resource, 418 level, 419 usage, 420 &box ); 421} 422 423static INLINE void * 424pipe_transfer_map( struct pipe_context *context, 425 struct pipe_transfer *transfer ) 426{ 427 return context->transfer_map( context, transfer ); 428} 429 430static INLINE void 431pipe_transfer_unmap( struct pipe_context *context, 432 struct pipe_transfer *transfer ) 433{ 434 context->transfer_unmap( context, transfer ); 435} 436 437 438static INLINE void 439pipe_transfer_destroy( struct pipe_context *context, 440 struct pipe_transfer *transfer ) 441{ 442 context->transfer_destroy(context, transfer); 443} 444 445static INLINE void 446pipe_set_constant_buffer(struct pipe_context *pipe, uint shader, uint index, 447 struct pipe_resource *buf) 448{ 449 if (buf) { 450 struct pipe_constant_buffer cb; 451 cb.buffer = buf; 452 cb.buffer_offset = 0; 453 cb.buffer_size = buf->width0; 454 cb.user_buffer = NULL; 455 pipe->set_constant_buffer(pipe, shader, index, &cb); 456 } else { 457 pipe->set_constant_buffer(pipe, shader, index, NULL); 458 } 459} 460 461 462static INLINE boolean util_get_offset( 463 const struct pipe_rasterizer_state *templ, 464 unsigned fill_mode) 465{ 466 switch(fill_mode) { 467 case PIPE_POLYGON_MODE_POINT: 468 return templ->offset_point; 469 case PIPE_POLYGON_MODE_LINE: 470 return templ->offset_line; 471 case PIPE_POLYGON_MODE_FILL: 472 return templ->offset_tri; 473 default: 474 assert(0); 475 return FALSE; 476 } 477} 478 479/** 480 * This function is used to copy an array of pipe_vertex_buffer structures, 481 * while properly referencing the pipe_vertex_buffer::buffer member. 482 * 483 * \sa util_copy_framebuffer_state 484 */ 485static INLINE void util_copy_vertex_buffers(struct pipe_vertex_buffer *dst, 486 unsigned *dst_count, 487 const struct pipe_vertex_buffer *src, 488 unsigned src_count) 489{ 490 unsigned i; 491 492 /* Reference the buffers of 'src' in 'dst'. */ 493 for (i = 0; i < src_count; i++) { 494 pipe_resource_reference(&dst[i].buffer, src[i].buffer); 495 } 496 /* Unreference the rest of the buffers in 'dst'. */ 497 for (; i < *dst_count; i++) { 498 pipe_resource_reference(&dst[i].buffer, NULL); 499 } 500 501 /* Update the size of 'dst' and copy over the other members 502 * of pipe_vertex_buffer. */ 503 *dst_count = src_count; 504 memcpy(dst, src, src_count * sizeof(struct pipe_vertex_buffer)); 505} 506 507static INLINE float 508util_get_min_point_size(const struct pipe_rasterizer_state *state) 509{ 510 /* The point size should be clamped to this value at the rasterizer stage. 511 */ 512 return state->gl_rasterization_rules && 513 !state->point_quad_rasterization && 514 !state->point_smooth && 515 !state->multisample ? 1.0f : 0.0f; 516} 517 518static INLINE void 519util_query_clear_result(union pipe_query_result *result, unsigned type) 520{ 521 switch (type) { 522 case PIPE_QUERY_OCCLUSION_PREDICATE: 523 case PIPE_QUERY_SO_OVERFLOW_PREDICATE: 524 case PIPE_QUERY_GPU_FINISHED: 525 result->b = FALSE; 526 break; 527 case PIPE_QUERY_OCCLUSION_COUNTER: 528 case PIPE_QUERY_TIMESTAMP: 529 case PIPE_QUERY_TIME_ELAPSED: 530 case PIPE_QUERY_PRIMITIVES_GENERATED: 531 case PIPE_QUERY_PRIMITIVES_EMITTED: 532 result->u64 = 0; 533 break; 534 case PIPE_QUERY_SO_STATISTICS: 535 memset(&result->so_statistics, 0, sizeof(result->so_statistics)); 536 break; 537 case PIPE_QUERY_TIMESTAMP_DISJOINT: 538 memset(&result->timestamp_disjoint, 0, sizeof(result->timestamp_disjoint)); 539 break; 540 case PIPE_QUERY_PIPELINE_STATISTICS: 541 memset(&result->pipeline_statistics, 0, sizeof(result->pipeline_statistics)); 542 break; 543 default: 544 assert(0); 545 } 546} 547 548#ifdef __cplusplus 549} 550#endif 551 552#endif /* U_INLINES_H */ 553