pb_bufmgr_debug.c revision 037e7a68f504f019b409ec8cb92f0075019a90f5
1/**************************************************************************
2 *
3 * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28/**
29 * \file
30 * Debug buffer manager to detect buffer under- and overflows.
31 *
32 * \author Jose Fonseca <jrfonseca@tungstengraphics.com>
33 */
34
35
36#include "pipe/p_compiler.h"
37#include "util/u_debug.h"
38#include "os/os_thread.h"
39#include "util/u_math.h"
40#include "util/u_memory.h"
41#include "util/u_double_list.h"
42#include "util/u_time.h"
43#include "util/u_debug_stack.h"
44
45#include "pb_buffer.h"
46#include "pb_bufmgr.h"
47
48
49#ifdef DEBUG
50
51
52#define PB_DEBUG_CREATE_BACKTRACE 8
53#define PB_DEBUG_MAP_BACKTRACE 8
54
55
56/**
57 * Convenience macro (type safe).
58 */
59#define SUPER(__derived) (&(__derived)->base)
60
61
62struct pb_debug_manager;
63
64
65/**
66 * Wrapper around a pipe buffer which adds delayed destruction.
67 */
68struct pb_debug_buffer
69{
70   struct pb_buffer base;
71
72   struct pb_buffer *buffer;
73   struct pb_debug_manager *mgr;
74
75   pb_size underflow_size;
76   pb_size overflow_size;
77
78   struct debug_stack_frame create_backtrace[PB_DEBUG_CREATE_BACKTRACE];
79
80   pipe_mutex mutex;
81   unsigned map_count;
82   struct debug_stack_frame map_backtrace[PB_DEBUG_MAP_BACKTRACE];
83
84   struct list_head head;
85};
86
87
88struct pb_debug_manager
89{
90   struct pb_manager base;
91
92   struct pb_manager *provider;
93
94   pb_size underflow_size;
95   pb_size overflow_size;
96
97   pipe_mutex mutex;
98   struct list_head list;
99};
100
101
102static INLINE struct pb_debug_buffer *
103pb_debug_buffer(struct pb_buffer *buf)
104{
105   assert(buf);
106   return (struct pb_debug_buffer *)buf;
107}
108
109
110static INLINE struct pb_debug_manager *
111pb_debug_manager(struct pb_manager *mgr)
112{
113   assert(mgr);
114   return (struct pb_debug_manager *)mgr;
115}
116
117
118static const uint8_t random_pattern[32] = {
119   0xaf, 0xcf, 0xa5, 0xa2, 0xc2, 0x63, 0x15, 0x1a,
120   0x7e, 0xe2, 0x7e, 0x84, 0x15, 0x49, 0xa2, 0x1e,
121   0x49, 0x63, 0xf5, 0x52, 0x74, 0x66, 0x9e, 0xc4,
122   0x6d, 0xcf, 0x2c, 0x4a, 0x74, 0xe6, 0xfd, 0x94
123};
124
125
126static INLINE void
127fill_random_pattern(uint8_t *dst, pb_size size)
128{
129   pb_size i = 0;
130   while(size--) {
131      *dst++ = random_pattern[i++];
132      i &= sizeof(random_pattern) - 1;
133   }
134}
135
136
137static INLINE boolean
138check_random_pattern(const uint8_t *dst, pb_size size,
139                     pb_size *min_ofs, pb_size *max_ofs)
140{
141   boolean result = TRUE;
142   pb_size i;
143   *min_ofs = size;
144   *max_ofs = 0;
145   for(i = 0; i < size; ++i) {
146      if(*dst++ != random_pattern[i % sizeof(random_pattern)]) {
147         *min_ofs = MIN2(*min_ofs, i);
148         *max_ofs = MAX2(*max_ofs, i);
149	 result = FALSE;
150      }
151   }
152   return result;
153}
154
155
156static void
157pb_debug_buffer_fill(struct pb_debug_buffer *buf)
158{
159   uint8_t *map;
160
161   map = pb_map(buf->buffer, PB_USAGE_CPU_WRITE, NULL);
162   assert(map);
163   if(map) {
164      fill_random_pattern(map, buf->underflow_size);
165      fill_random_pattern(map + buf->underflow_size + buf->base.size,
166                          buf->overflow_size);
167      pb_unmap(buf->buffer);
168   }
169}
170
171
172/**
173 * Check for under/over flows.
174 *
175 * Should be called with the buffer unmaped.
176 */
177static void
178pb_debug_buffer_check(struct pb_debug_buffer *buf)
179{
180   uint8_t *map;
181
182   map = pb_map(buf->buffer,
183                PB_USAGE_CPU_READ |
184                PB_USAGE_UNSYNCHRONIZED, NULL);
185   assert(map);
186   if(map) {
187      boolean underflow, overflow;
188      pb_size min_ofs, max_ofs;
189
190      underflow = !check_random_pattern(map, buf->underflow_size,
191                                        &min_ofs, &max_ofs);
192      if(underflow) {
193         debug_printf("buffer underflow (offset -%u%s to -%u bytes) detected\n",
194                      buf->underflow_size - min_ofs,
195                      min_ofs == 0 ? "+" : "",
196                      buf->underflow_size - max_ofs);
197      }
198
199      overflow = !check_random_pattern(map + buf->underflow_size + buf->base.size,
200                                       buf->overflow_size,
201                                       &min_ofs, &max_ofs);
202      if(overflow) {
203         debug_printf("buffer overflow (size %u plus offset %u to %u%s bytes) detected\n",
204                      buf->base.size,
205                      min_ofs,
206                      max_ofs,
207                      max_ofs == buf->overflow_size - 1 ? "+" : "");
208      }
209
210      if(underflow || overflow)
211         debug_backtrace_dump(buf->create_backtrace, PB_DEBUG_CREATE_BACKTRACE);
212
213      debug_assert(!underflow && !overflow);
214
215      /* re-fill if not aborted */
216      if(underflow)
217         fill_random_pattern(map, buf->underflow_size);
218      if(overflow)
219         fill_random_pattern(map + buf->underflow_size + buf->base.size,
220                             buf->overflow_size);
221
222      pb_unmap(buf->buffer);
223   }
224}
225
226
227static void
228pb_debug_buffer_destroy(struct pb_buffer *_buf)
229{
230   struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
231   struct pb_debug_manager *mgr = buf->mgr;
232
233   assert(!pipe_is_referenced(&buf->base.reference));
234
235   pb_debug_buffer_check(buf);
236
237   pipe_mutex_lock(mgr->mutex);
238   LIST_DEL(&buf->head);
239   pipe_mutex_unlock(mgr->mutex);
240
241   pipe_mutex_destroy(buf->mutex);
242
243   pb_reference(&buf->buffer, NULL);
244   FREE(buf);
245}
246
247
248static void *
249pb_debug_buffer_map(struct pb_buffer *_buf,
250                    unsigned flags, void *flush_ctx)
251{
252   struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
253   void *map;
254
255   pb_debug_buffer_check(buf);
256
257   map = pb_map(buf->buffer, flags, flush_ctx);
258   if(!map)
259      return NULL;
260
261   if(map) {
262      pipe_mutex_lock(buf->mutex);
263      ++buf->map_count;
264      debug_backtrace_capture(buf->map_backtrace, 1, PB_DEBUG_MAP_BACKTRACE);
265      pipe_mutex_unlock(buf->mutex);
266   }
267
268   return (uint8_t *)map + buf->underflow_size;
269}
270
271
272static void
273pb_debug_buffer_unmap(struct pb_buffer *_buf)
274{
275   struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
276
277   pipe_mutex_lock(buf->mutex);
278   assert(buf->map_count);
279   if(buf->map_count)
280      --buf->map_count;
281   pipe_mutex_unlock(buf->mutex);
282
283   pb_unmap(buf->buffer);
284
285   pb_debug_buffer_check(buf);
286}
287
288
289static void
290pb_debug_buffer_get_base_buffer(struct pb_buffer *_buf,
291                                struct pb_buffer **base_buf,
292                                pb_size *offset)
293{
294   struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
295   pb_get_base_buffer(buf->buffer, base_buf, offset);
296   *offset += buf->underflow_size;
297}
298
299
300static enum pipe_error
301pb_debug_buffer_validate(struct pb_buffer *_buf,
302                         struct pb_validate *vl,
303                         unsigned flags)
304{
305   struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
306
307   pipe_mutex_lock(buf->mutex);
308   if(buf->map_count) {
309      debug_printf("%s: attempting to validate a mapped buffer\n", __FUNCTION__);
310      debug_printf("last map backtrace is\n");
311      debug_backtrace_dump(buf->map_backtrace, PB_DEBUG_MAP_BACKTRACE);
312   }
313   pipe_mutex_unlock(buf->mutex);
314
315   pb_debug_buffer_check(buf);
316
317   return pb_validate(buf->buffer, vl, flags);
318}
319
320
321static void
322pb_debug_buffer_fence(struct pb_buffer *_buf,
323                      struct pipe_fence_handle *fence)
324{
325   struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
326   pb_fence(buf->buffer, fence);
327}
328
329
330const struct pb_vtbl
331pb_debug_buffer_vtbl = {
332      pb_debug_buffer_destroy,
333      pb_debug_buffer_map,
334      pb_debug_buffer_unmap,
335      pb_debug_buffer_validate,
336      pb_debug_buffer_fence,
337      pb_debug_buffer_get_base_buffer
338};
339
340
341static void
342pb_debug_manager_dump_locked(struct pb_debug_manager *mgr)
343{
344   struct list_head *curr, *next;
345   struct pb_debug_buffer *buf;
346
347   curr = mgr->list.next;
348   next = curr->next;
349   while(curr != &mgr->list) {
350      buf = LIST_ENTRY(struct pb_debug_buffer, curr, head);
351
352      debug_printf("buffer = %p\n", (void *) buf);
353      debug_printf("    .size = 0x%x\n", buf->base.size);
354      debug_backtrace_dump(buf->create_backtrace, PB_DEBUG_CREATE_BACKTRACE);
355
356      curr = next;
357      next = curr->next;
358   }
359
360}
361
362
363static struct pb_buffer *
364pb_debug_manager_create_buffer(struct pb_manager *_mgr,
365                               pb_size size,
366                               const struct pb_desc *desc)
367{
368   struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
369   struct pb_debug_buffer *buf;
370   struct pb_desc real_desc;
371   pb_size real_size;
372
373   assert(size);
374   assert(desc->alignment);
375
376   buf = CALLOC_STRUCT(pb_debug_buffer);
377   if(!buf)
378      return NULL;
379
380   real_size = mgr->underflow_size + size + mgr->overflow_size;
381   real_desc = *desc;
382   real_desc.usage |= PB_USAGE_CPU_WRITE;
383   real_desc.usage |= PB_USAGE_CPU_READ;
384
385   buf->buffer = mgr->provider->create_buffer(mgr->provider,
386                                              real_size,
387                                              &real_desc);
388   if(!buf->buffer) {
389      FREE(buf);
390#if 0
391      pipe_mutex_lock(mgr->mutex);
392      debug_printf("%s: failed to create buffer\n", __FUNCTION__);
393      if(!LIST_IS_EMPTY(&mgr->list))
394         pb_debug_manager_dump_locked(mgr);
395      pipe_mutex_unlock(mgr->mutex);
396#endif
397      return NULL;
398   }
399
400   assert(pipe_is_referenced(&buf->buffer->reference));
401   assert(pb_check_alignment(real_desc.alignment, buf->buffer->alignment));
402   assert(pb_check_usage(real_desc.usage, buf->buffer->usage));
403   assert(buf->buffer->size >= real_size);
404
405   pipe_reference_init(&buf->base.reference, 1);
406   buf->base.alignment = desc->alignment;
407   buf->base.usage = desc->usage;
408   buf->base.size = size;
409
410   buf->base.vtbl = &pb_debug_buffer_vtbl;
411   buf->mgr = mgr;
412
413   buf->underflow_size = mgr->underflow_size;
414   buf->overflow_size = buf->buffer->size - buf->underflow_size - size;
415
416   debug_backtrace_capture(buf->create_backtrace, 1, PB_DEBUG_CREATE_BACKTRACE);
417
418   pb_debug_buffer_fill(buf);
419
420   pipe_mutex_init(buf->mutex);
421
422   pipe_mutex_lock(mgr->mutex);
423   LIST_ADDTAIL(&buf->head, &mgr->list);
424   pipe_mutex_unlock(mgr->mutex);
425
426   return &buf->base;
427}
428
429
430static void
431pb_debug_manager_flush(struct pb_manager *_mgr)
432{
433   struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
434   assert(mgr->provider->flush);
435   if(mgr->provider->flush)
436      mgr->provider->flush(mgr->provider);
437}
438
439
440static void
441pb_debug_manager_destroy(struct pb_manager *_mgr)
442{
443   struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
444
445   pipe_mutex_lock(mgr->mutex);
446   if(!LIST_IS_EMPTY(&mgr->list)) {
447      debug_printf("%s: unfreed buffers\n", __FUNCTION__);
448      pb_debug_manager_dump_locked(mgr);
449   }
450   pipe_mutex_unlock(mgr->mutex);
451
452   pipe_mutex_destroy(mgr->mutex);
453   mgr->provider->destroy(mgr->provider);
454   FREE(mgr);
455}
456
457
458struct pb_manager *
459pb_debug_manager_create(struct pb_manager *provider,
460                        pb_size underflow_size, pb_size overflow_size)
461{
462   struct pb_debug_manager *mgr;
463
464   if(!provider)
465      return NULL;
466
467   mgr = CALLOC_STRUCT(pb_debug_manager);
468   if (!mgr)
469      return NULL;
470
471   mgr->base.destroy = pb_debug_manager_destroy;
472   mgr->base.create_buffer = pb_debug_manager_create_buffer;
473   mgr->base.flush = pb_debug_manager_flush;
474   mgr->provider = provider;
475   mgr->underflow_size = underflow_size;
476   mgr->overflow_size = overflow_size;
477
478   pipe_mutex_init(mgr->mutex);
479   LIST_INITHEAD(&mgr->list);
480
481   return &mgr->base;
482}
483
484
485#else /* !DEBUG */
486
487
488struct pb_manager *
489pb_debug_manager_create(struct pb_manager *provider,
490                        pb_size underflow_size, pb_size overflow_size)
491{
492   return provider;
493}
494
495
496#endif /* !DEBUG */
497