pb_bufmgr_debug.c revision 4682e706012fe26627a2f827db01b5068cc62814
1/**************************************************************************
2 *
3 * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28/**
29 * \file
30 * Debug buffer manager to detect buffer under- and overflows.
31 *
32 * \author Jose Fonseca <jrfonseca@tungstengraphics.com>
33 */
34
35
36#include "pipe/p_compiler.h"
37#include "util/u_debug.h"
38#include "os/os_thread.h"
39#include "util/u_math.h"
40#include "util/u_memory.h"
41#include "util/u_double_list.h"
42#include "util/u_time.h"
43#include "util/u_debug_stack.h"
44
45#include "pb_buffer.h"
46#include "pb_bufmgr.h"
47
48
49#ifdef DEBUG
50
51
52#define PB_DEBUG_CREATE_BACKTRACE 8
53#define PB_DEBUG_MAP_BACKTRACE 8
54
55
56/**
57 * Convenience macro (type safe).
58 */
59#define SUPER(__derived) (&(__derived)->base)
60
61
62struct pb_debug_manager;
63
64
65/**
66 * Wrapper around a pipe buffer which adds delayed destruction.
67 */
68struct pb_debug_buffer
69{
70   struct pb_buffer base;
71
72   struct pb_buffer *buffer;
73   struct pb_debug_manager *mgr;
74
75   pb_size underflow_size;
76   pb_size overflow_size;
77
78   struct debug_stack_frame create_backtrace[PB_DEBUG_CREATE_BACKTRACE];
79
80   pipe_mutex mutex;
81   unsigned map_count;
82   struct debug_stack_frame map_backtrace[PB_DEBUG_MAP_BACKTRACE];
83
84   struct list_head head;
85};
86
87
88struct pb_debug_manager
89{
90   struct pb_manager base;
91
92   struct pb_manager *provider;
93
94   pb_size underflow_size;
95   pb_size overflow_size;
96
97   pipe_mutex mutex;
98   struct list_head list;
99};
100
101
102static INLINE struct pb_debug_buffer *
103pb_debug_buffer(struct pb_buffer *buf)
104{
105   assert(buf);
106   return (struct pb_debug_buffer *)buf;
107}
108
109
110static INLINE struct pb_debug_manager *
111pb_debug_manager(struct pb_manager *mgr)
112{
113   assert(mgr);
114   return (struct pb_debug_manager *)mgr;
115}
116
117
118static const uint8_t random_pattern[32] = {
119   0xaf, 0xcf, 0xa5, 0xa2, 0xc2, 0x63, 0x15, 0x1a,
120   0x7e, 0xe2, 0x7e, 0x84, 0x15, 0x49, 0xa2, 0x1e,
121   0x49, 0x63, 0xf5, 0x52, 0x74, 0x66, 0x9e, 0xc4,
122   0x6d, 0xcf, 0x2c, 0x4a, 0x74, 0xe6, 0xfd, 0x94
123};
124
125
126static INLINE void
127fill_random_pattern(uint8_t *dst, pb_size size)
128{
129   pb_size i = 0;
130   while(size--) {
131      *dst++ = random_pattern[i++];
132      i &= sizeof(random_pattern) - 1;
133   }
134}
135
136
137static INLINE boolean
138check_random_pattern(const uint8_t *dst, pb_size size,
139                     pb_size *min_ofs, pb_size *max_ofs)
140{
141   boolean result = TRUE;
142   pb_size i;
143   *min_ofs = size;
144   *max_ofs = 0;
145   for(i = 0; i < size; ++i) {
146      if(*dst++ != random_pattern[i % sizeof(random_pattern)]) {
147         *min_ofs = MIN2(*min_ofs, i);
148         *max_ofs = MAX2(*max_ofs, i);
149	 result = FALSE;
150      }
151   }
152   return result;
153}
154
155
156static void
157pb_debug_buffer_fill(struct pb_debug_buffer *buf)
158{
159   uint8_t *map;
160
161   map = pb_map(buf->buffer, PB_USAGE_CPU_WRITE, NULL);
162   assert(map);
163   if(map) {
164      fill_random_pattern(map, buf->underflow_size);
165      fill_random_pattern(map + buf->underflow_size + buf->base.size,
166                          buf->overflow_size);
167      pb_unmap(buf->buffer);
168   }
169}
170
171
172/**
173 * Check for under/over flows.
174 *
175 * Should be called with the buffer unmaped.
176 */
177static void
178pb_debug_buffer_check(struct pb_debug_buffer *buf)
179{
180   uint8_t *map;
181
182   map = pb_map(buf->buffer,
183                PB_USAGE_CPU_READ |
184                PB_USAGE_UNSYNCHRONIZED, NULL);
185   assert(map);
186   if(map) {
187      boolean underflow, overflow;
188      pb_size min_ofs, max_ofs;
189
190      underflow = !check_random_pattern(map, buf->underflow_size,
191                                        &min_ofs, &max_ofs);
192      if(underflow) {
193         debug_printf("buffer underflow (offset -%u%s to -%u bytes) detected\n",
194                      buf->underflow_size - min_ofs,
195                      min_ofs == 0 ? "+" : "",
196                      buf->underflow_size - max_ofs);
197      }
198
199      overflow = !check_random_pattern(map + buf->underflow_size + buf->base.size,
200                                       buf->overflow_size,
201                                       &min_ofs, &max_ofs);
202      if(overflow) {
203         debug_printf("buffer overflow (size %u plus offset %u to %u%s bytes) detected\n",
204                      buf->base.size,
205                      min_ofs,
206                      max_ofs,
207                      max_ofs == buf->overflow_size - 1 ? "+" : "");
208      }
209
210      if(underflow || overflow)
211         debug_backtrace_dump(buf->create_backtrace, PB_DEBUG_CREATE_BACKTRACE);
212
213      debug_assert(!underflow && !overflow);
214
215      /* re-fill if not aborted */
216      if(underflow)
217         fill_random_pattern(map, buf->underflow_size);
218      if(overflow)
219         fill_random_pattern(map + buf->underflow_size + buf->base.size,
220                             buf->overflow_size);
221
222      pb_unmap(buf->buffer);
223   }
224}
225
226
227static void
228pb_debug_buffer_destroy(struct pb_buffer *_buf)
229{
230   struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
231   struct pb_debug_manager *mgr = buf->mgr;
232
233   assert(!pipe_is_referenced(&buf->base.reference));
234
235   pb_debug_buffer_check(buf);
236
237   pipe_mutex_lock(mgr->mutex);
238   LIST_DEL(&buf->head);
239   pipe_mutex_unlock(mgr->mutex);
240
241   pipe_mutex_destroy(buf->mutex);
242
243   pb_reference(&buf->buffer, NULL);
244   FREE(buf);
245}
246
247
248static void *
249pb_debug_buffer_map(struct pb_buffer *_buf,
250                    unsigned flags, void *flush_ctx)
251{
252   struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
253   void *map;
254
255   pb_debug_buffer_check(buf);
256
257   map = pb_map(buf->buffer, flags, flush_ctx);
258   if(!map)
259      return NULL;
260
261   if(map) {
262      pipe_mutex_lock(buf->mutex);
263      ++buf->map_count;
264      debug_backtrace_capture(buf->map_backtrace, 1, PB_DEBUG_MAP_BACKTRACE);
265      pipe_mutex_unlock(buf->mutex);
266   }
267
268   return (uint8_t *)map + buf->underflow_size;
269}
270
271
272static void
273pb_debug_buffer_unmap(struct pb_buffer *_buf)
274{
275   struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
276
277   pipe_mutex_lock(buf->mutex);
278   assert(buf->map_count);
279   if(buf->map_count)
280      --buf->map_count;
281   pipe_mutex_unlock(buf->mutex);
282
283   pb_unmap(buf->buffer);
284
285   pb_debug_buffer_check(buf);
286}
287
288
289static void
290pb_debug_buffer_get_base_buffer(struct pb_buffer *_buf,
291                                struct pb_buffer **base_buf,
292                                pb_size *offset)
293{
294   struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
295   pb_get_base_buffer(buf->buffer, base_buf, offset);
296   *offset += buf->underflow_size;
297}
298
299
300static enum pipe_error
301pb_debug_buffer_validate(struct pb_buffer *_buf,
302                         struct pb_validate *vl,
303                         unsigned flags)
304{
305   struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
306
307   pipe_mutex_lock(buf->mutex);
308   if(buf->map_count) {
309      debug_printf("%s: attempting to validate a mapped buffer\n", __FUNCTION__);
310      debug_printf("last map backtrace is\n");
311      debug_backtrace_dump(buf->map_backtrace, PB_DEBUG_MAP_BACKTRACE);
312   }
313   pipe_mutex_unlock(buf->mutex);
314
315   pb_debug_buffer_check(buf);
316
317   return pb_validate(buf->buffer, vl, flags);
318}
319
320
321static void
322pb_debug_buffer_fence(struct pb_buffer *_buf,
323                      struct pipe_fence_handle *fence)
324{
325   struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
326   pb_fence(buf->buffer, fence);
327}
328
329
330const struct pb_vtbl
331pb_debug_buffer_vtbl = {
332      pb_debug_buffer_destroy,
333      pb_debug_buffer_map,
334      pb_debug_buffer_unmap,
335      pb_debug_buffer_validate,
336      pb_debug_buffer_fence,
337      pb_debug_buffer_get_base_buffer
338};
339
340
341static void
342pb_debug_manager_dump(struct pb_debug_manager *mgr)
343{
344   struct list_head *curr, *next;
345   struct pb_debug_buffer *buf;
346
347   pipe_mutex_lock(mgr->mutex);
348
349   curr = mgr->list.next;
350   next = curr->next;
351   while(curr != &mgr->list) {
352      buf = LIST_ENTRY(struct pb_debug_buffer, curr, head);
353
354      debug_printf("buffer = %p\n", (void *) buf);
355      debug_printf("    .size = 0x%x\n", buf->base.size);
356      debug_backtrace_dump(buf->create_backtrace, PB_DEBUG_CREATE_BACKTRACE);
357
358      curr = next;
359      next = curr->next;
360   }
361
362   pipe_mutex_unlock(mgr->mutex);
363}
364
365
366static struct pb_buffer *
367pb_debug_manager_create_buffer(struct pb_manager *_mgr,
368                               pb_size size,
369                               const struct pb_desc *desc)
370{
371   struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
372   struct pb_debug_buffer *buf;
373   struct pb_desc real_desc;
374   pb_size real_size;
375
376   assert(size);
377   assert(desc->alignment);
378
379   buf = CALLOC_STRUCT(pb_debug_buffer);
380   if(!buf)
381      return NULL;
382
383   real_size = mgr->underflow_size + size + mgr->overflow_size;
384   real_desc = *desc;
385   real_desc.usage |= PB_USAGE_CPU_WRITE;
386   real_desc.usage |= PB_USAGE_CPU_READ;
387
388   buf->buffer = mgr->provider->create_buffer(mgr->provider,
389                                              real_size,
390                                              &real_desc);
391   if(!buf->buffer) {
392      FREE(buf);
393#if 0
394      pipe_mutex_lock(mgr->mutex);
395      debug_printf("%s: failed to create buffer\n", __FUNCTION__);
396      if(!LIST_IS_EMPTY(&mgr->list))
397         pb_debug_manager_dump(mgr);
398      pipe_mutex_unlock(mgr->mutex);
399#endif
400      return NULL;
401   }
402
403   assert(pipe_is_referenced(&buf->buffer->reference));
404   assert(pb_check_alignment(real_desc.alignment, buf->buffer->alignment));
405   assert(pb_check_usage(real_desc.usage, buf->buffer->usage));
406   assert(buf->buffer->size >= real_size);
407
408   pipe_reference_init(&buf->base.reference, 1);
409   buf->base.alignment = desc->alignment;
410   buf->base.usage = desc->usage;
411   buf->base.size = size;
412
413   buf->base.vtbl = &pb_debug_buffer_vtbl;
414   buf->mgr = mgr;
415
416   buf->underflow_size = mgr->underflow_size;
417   buf->overflow_size = buf->buffer->size - buf->underflow_size - size;
418
419   debug_backtrace_capture(buf->create_backtrace, 1, PB_DEBUG_CREATE_BACKTRACE);
420
421   pb_debug_buffer_fill(buf);
422
423   pipe_mutex_init(buf->mutex);
424
425   pipe_mutex_lock(mgr->mutex);
426   LIST_ADDTAIL(&buf->head, &mgr->list);
427   pipe_mutex_unlock(mgr->mutex);
428
429   return &buf->base;
430}
431
432
433static void
434pb_debug_manager_flush(struct pb_manager *_mgr)
435{
436   struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
437   assert(mgr->provider->flush);
438   if(mgr->provider->flush)
439      mgr->provider->flush(mgr->provider);
440}
441
442
443static void
444pb_debug_manager_destroy(struct pb_manager *_mgr)
445{
446   struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
447
448   pipe_mutex_lock(mgr->mutex);
449   if(!LIST_IS_EMPTY(&mgr->list)) {
450      debug_printf("%s: unfreed buffers\n", __FUNCTION__);
451      pb_debug_manager_dump(mgr);
452   }
453   pipe_mutex_unlock(mgr->mutex);
454
455   pipe_mutex_destroy(mgr->mutex);
456   mgr->provider->destroy(mgr->provider);
457   FREE(mgr);
458}
459
460
461struct pb_manager *
462pb_debug_manager_create(struct pb_manager *provider,
463                        pb_size underflow_size, pb_size overflow_size)
464{
465   struct pb_debug_manager *mgr;
466
467   if(!provider)
468      return NULL;
469
470   mgr = CALLOC_STRUCT(pb_debug_manager);
471   if (!mgr)
472      return NULL;
473
474   mgr->base.destroy = pb_debug_manager_destroy;
475   mgr->base.create_buffer = pb_debug_manager_create_buffer;
476   mgr->base.flush = pb_debug_manager_flush;
477   mgr->provider = provider;
478   mgr->underflow_size = underflow_size;
479   mgr->overflow_size = overflow_size;
480
481   pipe_mutex_init(mgr->mutex);
482   LIST_INITHEAD(&mgr->list);
483
484   return &mgr->base;
485}
486
487
488#else /* !DEBUG */
489
490
491struct pb_manager *
492pb_debug_manager_create(struct pb_manager *provider,
493                        pb_size underflow_size, pb_size overflow_size)
494{
495   return provider;
496}
497
498
499#endif /* !DEBUG */
500