u_inlines.h revision 825b45366d5308fd3e8e71c0c1943cb6ca8f69ea
1/**************************************************************************
2 *
3 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef U_INLINES_H
29#define U_INLINES_H
30
31#include "pipe/p_context.h"
32#include "pipe/p_defines.h"
33#include "pipe/p_shader_tokens.h"
34#include "pipe/p_state.h"
35#include "pipe/p_screen.h"
36#include "util/u_debug.h"
37#include "util/u_debug_describe.h"
38#include "util/u_debug_refcnt.h"
39#include "util/u_atomic.h"
40#include "util/u_box.h"
41#include "util/u_math.h"
42
43
44#ifdef __cplusplus
45extern "C" {
46#endif
47
48
49/*
50 * Reference counting helper functions.
51 */
52
53
54static INLINE void
55pipe_reference_init(struct pipe_reference *reference, unsigned count)
56{
57   p_atomic_set(&reference->count, count);
58}
59
60static INLINE boolean
61pipe_is_referenced(struct pipe_reference *reference)
62{
63   return p_atomic_read(&reference->count) != 0;
64}
65
66/**
67 * Update reference counting.
68 * The old thing pointed to, if any, will be unreferenced.
69 * Both 'ptr' and 'reference' may be NULL.
70 * \return TRUE if the object's refcount hits zero and should be destroyed.
71 */
72static INLINE boolean
73pipe_reference_described(struct pipe_reference *ptr,
74                         struct pipe_reference *reference,
75                         debug_reference_descriptor get_desc)
76{
77   boolean destroy = FALSE;
78
79   if(ptr != reference) {
80      /* bump the reference.count first */
81      if (reference) {
82         assert(pipe_is_referenced(reference));
83         p_atomic_inc(&reference->count);
84         debug_reference(reference, get_desc, 1);
85      }
86
87      if (ptr) {
88         assert(pipe_is_referenced(ptr));
89         if (p_atomic_dec_zero(&ptr->count)) {
90            destroy = TRUE;
91         }
92         debug_reference(ptr, get_desc, -1);
93      }
94   }
95
96   return destroy;
97}
98
99static INLINE boolean
100pipe_reference(struct pipe_reference *ptr, struct pipe_reference *reference)
101{
102   return pipe_reference_described(ptr, reference,
103                                   (debug_reference_descriptor)debug_describe_reference);
104}
105
106static INLINE void
107pipe_surface_reference(struct pipe_surface **ptr, struct pipe_surface *surf)
108{
109   struct pipe_surface *old_surf = *ptr;
110
111   if (pipe_reference_described(&(*ptr)->reference, &surf->reference,
112                                (debug_reference_descriptor)debug_describe_surface))
113      old_surf->context->surface_destroy(old_surf->context, old_surf);
114   *ptr = surf;
115}
116
117static INLINE void
118pipe_resource_reference(struct pipe_resource **ptr, struct pipe_resource *tex)
119{
120   struct pipe_resource *old_tex = *ptr;
121
122   if (pipe_reference_described(&(*ptr)->reference, &tex->reference,
123                                (debug_reference_descriptor)debug_describe_resource))
124      old_tex->screen->resource_destroy(old_tex->screen, old_tex);
125   *ptr = tex;
126}
127
128static INLINE void
129pipe_sampler_view_reference(struct pipe_sampler_view **ptr, struct pipe_sampler_view *view)
130{
131   struct pipe_sampler_view *old_view = *ptr;
132
133   if (pipe_reference_described(&(*ptr)->reference, &view->reference,
134                                (debug_reference_descriptor)debug_describe_sampler_view))
135      old_view->context->sampler_view_destroy(old_view->context, old_view);
136   *ptr = view;
137}
138
139/**
140 * Similar to pipe_sampler_view_reference() but always set the pointer to
141 * NULL and pass in an explicit context.  Passing an explicit context is a
142 * work-around for fixing a dangling context pointer problem when textures
143 * are shared by multiple contexts.  XXX fix this someday.
144 */
145static INLINE void
146pipe_sampler_view_release(struct pipe_context *ctx,
147                          struct pipe_sampler_view **ptr)
148{
149   struct pipe_sampler_view *old_view = *ptr;
150   if (*ptr && (*ptr)->context != ctx) {
151      debug_printf_once(("context mis-match in pipe_sampler_view_release()\n"));
152   }
153   if (pipe_reference_described(&(*ptr)->reference, NULL,
154                    (debug_reference_descriptor)debug_describe_sampler_view)) {
155      ctx->sampler_view_destroy(ctx, old_view);
156   }
157   *ptr = NULL;
158}
159
160
161static INLINE void
162pipe_so_target_reference(struct pipe_stream_output_target **ptr,
163                         struct pipe_stream_output_target *target)
164{
165   struct pipe_stream_output_target *old = *ptr;
166
167   if (pipe_reference_described(&(*ptr)->reference, &target->reference,
168                     (debug_reference_descriptor)debug_describe_so_target))
169      old->context->stream_output_target_destroy(old->context, old);
170   *ptr = target;
171}
172
173static INLINE void
174pipe_surface_reset(struct pipe_context *ctx, struct pipe_surface* ps,
175                   struct pipe_resource *pt, unsigned level, unsigned layer,
176                   unsigned flags)
177{
178   pipe_resource_reference(&ps->texture, pt);
179   ps->format = pt->format;
180   ps->width = u_minify(pt->width0, level);
181   ps->height = u_minify(pt->height0, level);
182   ps->usage = flags;
183   ps->u.tex.level = level;
184   ps->u.tex.first_layer = ps->u.tex.last_layer = layer;
185   ps->context = ctx;
186}
187
188static INLINE void
189pipe_surface_init(struct pipe_context *ctx, struct pipe_surface* ps,
190                  struct pipe_resource *pt, unsigned level, unsigned layer,
191                  unsigned flags)
192{
193   ps->texture = 0;
194   pipe_reference_init(&ps->reference, 1);
195   pipe_surface_reset(ctx, ps, pt, level, layer, flags);
196}
197
198/* Return true if the surfaces are equal. */
199static INLINE boolean
200pipe_surface_equal(struct pipe_surface *s1, struct pipe_surface *s2)
201{
202   return s1->texture == s2->texture &&
203          s1->format == s2->format &&
204          (s1->texture->target != PIPE_BUFFER ||
205           (s1->u.buf.first_element == s2->u.buf.first_element &&
206            s1->u.buf.last_element == s2->u.buf.last_element)) &&
207          (s1->texture->target == PIPE_BUFFER ||
208           (s1->u.tex.level == s2->u.tex.level &&
209            s1->u.tex.first_layer == s2->u.tex.first_layer &&
210            s1->u.tex.last_layer == s2->u.tex.last_layer));
211}
212
213/*
214 * Convenience wrappers for screen buffer functions.
215 */
216
217static INLINE struct pipe_resource *
218pipe_buffer_create( struct pipe_screen *screen,
219		    unsigned bind,
220		    unsigned usage,
221		    unsigned size )
222{
223   struct pipe_resource buffer;
224   memset(&buffer, 0, sizeof buffer);
225   buffer.target = PIPE_BUFFER;
226   buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
227   buffer.bind = bind;
228   buffer.usage = usage;
229   buffer.flags = 0;
230   buffer.width0 = size;
231   buffer.height0 = 1;
232   buffer.depth0 = 1;
233   buffer.array_size = 1;
234   return screen->resource_create(screen, &buffer);
235}
236
237static INLINE void *
238pipe_buffer_map_range(struct pipe_context *pipe,
239		      struct pipe_resource *buffer,
240		      unsigned offset,
241		      unsigned length,
242		      unsigned usage,
243		      struct pipe_transfer **transfer)
244{
245   struct pipe_box box;
246   void *map;
247
248   assert(offset < buffer->width0);
249   assert(offset + length <= buffer->width0);
250   assert(length);
251
252   u_box_1d(offset, length, &box);
253
254   *transfer = pipe->get_transfer( pipe,
255                                   buffer,
256                                   0,
257                                   usage,
258                                   &box);
259
260   if (*transfer == NULL)
261      return NULL;
262
263   map = pipe->transfer_map( pipe, *transfer );
264   if (map == NULL) {
265      pipe->transfer_destroy( pipe, *transfer );
266      *transfer = NULL;
267      return NULL;
268   }
269
270   return map;
271}
272
273
274static INLINE void *
275pipe_buffer_map(struct pipe_context *pipe,
276                struct pipe_resource *buffer,
277                unsigned usage,
278                struct pipe_transfer **transfer)
279{
280   return pipe_buffer_map_range(pipe, buffer, 0, buffer->width0, usage, transfer);
281}
282
283
284static INLINE void
285pipe_buffer_unmap(struct pipe_context *pipe,
286                  struct pipe_transfer *transfer)
287{
288   if (transfer) {
289      pipe->transfer_unmap(pipe, transfer);
290      pipe->transfer_destroy(pipe, transfer);
291   }
292}
293
294static INLINE void
295pipe_buffer_flush_mapped_range(struct pipe_context *pipe,
296                               struct pipe_transfer *transfer,
297                               unsigned offset,
298                               unsigned length)
299{
300   struct pipe_box box;
301   int transfer_offset;
302
303   assert(length);
304   assert(transfer->box.x <= offset);
305   assert(offset + length <= transfer->box.x + transfer->box.width);
306
307   /* Match old screen->buffer_flush_mapped_range() behaviour, where
308    * offset parameter is relative to the start of the buffer, not the
309    * mapped range.
310    */
311   transfer_offset = offset - transfer->box.x;
312
313   u_box_1d(transfer_offset, length, &box);
314
315   pipe->transfer_flush_region(pipe, transfer, &box);
316}
317
318static INLINE void
319pipe_buffer_write(struct pipe_context *pipe,
320                  struct pipe_resource *buf,
321                  unsigned offset,
322                  unsigned size,
323                  const void *data)
324{
325   struct pipe_box box;
326   unsigned usage = PIPE_TRANSFER_WRITE;
327
328   if (offset == 0 && size == buf->width0) {
329      usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
330   } else {
331      usage |= PIPE_TRANSFER_DISCARD_RANGE;
332   }
333
334   u_box_1d(offset, size, &box);
335
336   pipe->transfer_inline_write( pipe,
337                                buf,
338                                0,
339                                usage,
340                                &box,
341                                data,
342                                size,
343                                0);
344}
345
346/**
347 * Special case for writing non-overlapping ranges.
348 *
349 * We can avoid GPU/CPU synchronization when writing range that has never
350 * been written before.
351 */
352static INLINE void
353pipe_buffer_write_nooverlap(struct pipe_context *pipe,
354                            struct pipe_resource *buf,
355                            unsigned offset, unsigned size,
356                            const void *data)
357{
358   struct pipe_box box;
359
360   u_box_1d(offset, size, &box);
361
362   pipe->transfer_inline_write(pipe,
363                               buf,
364                               0,
365                               (PIPE_TRANSFER_WRITE |
366                                PIPE_TRANSFER_UNSYNCHRONIZED),
367                               &box,
368                               data,
369                               0, 0);
370}
371
372static INLINE struct pipe_resource *
373pipe_buffer_create_with_data(struct pipe_context *pipe,
374                             unsigned bind,
375                             unsigned usage,
376                             unsigned size,
377                             void *ptr)
378{
379   struct pipe_resource *res = pipe_buffer_create(pipe->screen,
380                                                  bind, usage, size);
381   pipe_buffer_write_nooverlap(pipe, res, 0, size, ptr);
382   return res;
383}
384
385static INLINE void
386pipe_buffer_read(struct pipe_context *pipe,
387                 struct pipe_resource *buf,
388                 unsigned offset,
389                 unsigned size,
390                 void *data)
391{
392   struct pipe_transfer *src_transfer;
393   ubyte *map;
394
395   map = (ubyte *) pipe_buffer_map_range(pipe,
396					 buf,
397					 offset, size,
398					 PIPE_TRANSFER_READ,
399					 &src_transfer);
400
401   if (map)
402      memcpy(data, map, size);
403
404   pipe_buffer_unmap(pipe, src_transfer);
405}
406
407static INLINE struct pipe_transfer *
408pipe_get_transfer( struct pipe_context *context,
409                   struct pipe_resource *resource,
410                   unsigned level, unsigned layer,
411                   enum pipe_transfer_usage usage,
412                   unsigned x, unsigned y,
413                   unsigned w, unsigned h)
414{
415   struct pipe_box box;
416   u_box_2d_zslice( x, y, layer, w, h, &box );
417   return context->get_transfer( context,
418                                 resource,
419                                 level,
420                                 usage,
421                                 &box );
422}
423
424static INLINE void *
425pipe_transfer_map( struct pipe_context *context,
426                   struct pipe_transfer *transfer )
427{
428   return context->transfer_map( context, transfer );
429}
430
431static INLINE void
432pipe_transfer_unmap( struct pipe_context *context,
433                     struct pipe_transfer *transfer )
434{
435   context->transfer_unmap( context, transfer );
436}
437
438
439static INLINE void
440pipe_transfer_destroy( struct pipe_context *context,
441                       struct pipe_transfer *transfer )
442{
443   context->transfer_destroy(context, transfer);
444}
445
446static INLINE void
447pipe_set_constant_buffer(struct pipe_context *pipe, uint shader, uint index,
448                         struct pipe_resource *buf)
449{
450   if (buf) {
451      struct pipe_constant_buffer cb;
452      cb.buffer = buf;
453      cb.buffer_offset = 0;
454      cb.buffer_size = buf->width0;
455      cb.user_buffer = NULL;
456      pipe->set_constant_buffer(pipe, shader, index, &cb);
457   } else {
458      pipe->set_constant_buffer(pipe, shader, index, NULL);
459   }
460}
461
462
463static INLINE boolean util_get_offset(
464   const struct pipe_rasterizer_state *templ,
465   unsigned fill_mode)
466{
467   switch(fill_mode) {
468   case PIPE_POLYGON_MODE_POINT:
469      return templ->offset_point;
470   case PIPE_POLYGON_MODE_LINE:
471      return templ->offset_line;
472   case PIPE_POLYGON_MODE_FILL:
473      return templ->offset_tri;
474   default:
475      assert(0);
476      return FALSE;
477   }
478}
479
480/**
481 * This function is used to copy an array of pipe_vertex_buffer structures,
482 * while properly referencing the pipe_vertex_buffer::buffer member.
483 *
484 * \sa util_copy_framebuffer_state
485 */
486static INLINE void util_copy_vertex_buffers(struct pipe_vertex_buffer *dst,
487                                            unsigned *dst_count,
488                                            const struct pipe_vertex_buffer *src,
489                                            unsigned src_count)
490{
491   unsigned i;
492
493   /* Reference the buffers of 'src' in 'dst'. */
494   for (i = 0; i < src_count; i++) {
495      pipe_resource_reference(&dst[i].buffer, src[i].buffer);
496   }
497   /* Unreference the rest of the buffers in 'dst'. */
498   for (; i < *dst_count; i++) {
499      pipe_resource_reference(&dst[i].buffer, NULL);
500   }
501
502   /* Update the size of 'dst' and copy over the other members
503    * of pipe_vertex_buffer. */
504   *dst_count = src_count;
505   memcpy(dst, src, src_count * sizeof(struct pipe_vertex_buffer));
506}
507
508static INLINE float
509util_get_min_point_size(const struct pipe_rasterizer_state *state)
510{
511   /* The point size should be clamped to this value at the rasterizer stage.
512    */
513   return state->gl_rasterization_rules &&
514          !state->point_quad_rasterization &&
515          !state->point_smooth &&
516          !state->multisample ? 1.0f : 0.0f;
517}
518
519static INLINE void
520util_query_clear_result(union pipe_query_result *result, unsigned type)
521{
522   switch (type) {
523   case PIPE_QUERY_OCCLUSION_PREDICATE:
524   case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
525   case PIPE_QUERY_GPU_FINISHED:
526      result->b = FALSE;
527      break;
528   case PIPE_QUERY_OCCLUSION_COUNTER:
529   case PIPE_QUERY_TIMESTAMP:
530   case PIPE_QUERY_TIME_ELAPSED:
531   case PIPE_QUERY_PRIMITIVES_GENERATED:
532   case PIPE_QUERY_PRIMITIVES_EMITTED:
533      result->u64 = 0;
534      break;
535   case PIPE_QUERY_SO_STATISTICS:
536      memset(&result->so_statistics, 0, sizeof(result->so_statistics));
537      break;
538   case PIPE_QUERY_TIMESTAMP_DISJOINT:
539      memset(&result->timestamp_disjoint, 0, sizeof(result->timestamp_disjoint));
540      break;
541   case PIPE_QUERY_PIPELINE_STATISTICS:
542      memset(&result->pipeline_statistics, 0, sizeof(result->pipeline_statistics));
543      break;
544   default:
545      assert(0);
546   }
547}
548
549/** Convert PIPE_TEXTURE_x to TGSI_TEXTURE_x */
550static INLINE unsigned
551util_pipe_tex_to_tgsi_tex(enum pipe_texture_target pipe_tex_target,
552                          unsigned nr_samples)
553{
554   switch (pipe_tex_target) {
555   case PIPE_TEXTURE_1D:
556      assert(nr_samples <= 1);
557      return TGSI_TEXTURE_1D;
558
559   case PIPE_TEXTURE_2D:
560      return nr_samples > 1 ? TGSI_TEXTURE_2D_MSAA : TGSI_TEXTURE_2D;
561
562   case PIPE_TEXTURE_RECT:
563      assert(nr_samples <= 1);
564      return TGSI_TEXTURE_RECT;
565
566   case PIPE_TEXTURE_3D:
567      assert(nr_samples <= 1);
568      return TGSI_TEXTURE_3D;
569
570   case PIPE_TEXTURE_CUBE:
571      assert(nr_samples <= 1);
572      return TGSI_TEXTURE_CUBE;
573
574   case PIPE_TEXTURE_1D_ARRAY:
575      assert(nr_samples <= 1);
576      return TGSI_TEXTURE_1D_ARRAY;
577
578   case PIPE_TEXTURE_2D_ARRAY:
579      return nr_samples > 1 ? TGSI_TEXTURE_2D_ARRAY_MSAA :
580                              TGSI_TEXTURE_2D_ARRAY;
581
582   default:
583      assert(0 && "unexpected texture target");
584      return TGSI_TEXTURE_UNKNOWN;
585   }
586}
587
588#ifdef __cplusplus
589}
590#endif
591
592#endif /* U_INLINES_H */
593