1/**************************************************************************
2 *
3 * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28/**
29 * \file
30 * Buffer cache.
31 *
32 * \author Jose Fonseca <jrfonseca-at-tungstengraphics-dot-com>
33 * \author Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34 */
35
36
37#include "pipe/p_compiler.h"
38#include "util/u_debug.h"
39#include "os/os_thread.h"
40#include "util/u_memory.h"
41#include "util/u_double_list.h"
42#include "util/u_time.h"
43
44#include "pb_buffer.h"
45#include "pb_bufmgr.h"
46
47
48/**
49 * Convenience macro (type safe).
50 */
51#define SUPER(__derived) (&(__derived)->base)
52
53
54struct pb_cache_manager;
55
56
57/**
58 * Wrapper around a pipe buffer which adds delayed destruction.
59 */
60struct pb_cache_buffer
61{
62   struct pb_buffer base;
63
64   struct pb_buffer *buffer;
65   struct pb_cache_manager *mgr;
66
67   /** Caching time interval */
68   int64_t start, end;
69
70   struct list_head head;
71};
72
73
74struct pb_cache_manager
75{
76   struct pb_manager base;
77
78   struct pb_manager *provider;
79   unsigned usecs;
80
81   pipe_mutex mutex;
82
83   struct list_head delayed;
84   pb_size numDelayed;
85};
86
87
88static INLINE struct pb_cache_buffer *
89pb_cache_buffer(struct pb_buffer *buf)
90{
91   assert(buf);
92   return (struct pb_cache_buffer *)buf;
93}
94
95
96static INLINE struct pb_cache_manager *
97pb_cache_manager(struct pb_manager *mgr)
98{
99   assert(mgr);
100   return (struct pb_cache_manager *)mgr;
101}
102
103
104/**
105 * Actually destroy the buffer.
106 */
107static INLINE void
108_pb_cache_buffer_destroy(struct pb_cache_buffer *buf)
109{
110   struct pb_cache_manager *mgr = buf->mgr;
111
112   LIST_DEL(&buf->head);
113   assert(mgr->numDelayed);
114   --mgr->numDelayed;
115   assert(!pipe_is_referenced(&buf->base.reference));
116   pb_reference(&buf->buffer, NULL);
117   FREE(buf);
118}
119
120
121/**
122 * Free as many cache buffers from the list head as possible.
123 */
124static void
125_pb_cache_buffer_list_check_free(struct pb_cache_manager *mgr)
126{
127   struct list_head *curr, *next;
128   struct pb_cache_buffer *buf;
129   int64_t now;
130
131   now = os_time_get();
132
133   curr = mgr->delayed.next;
134   next = curr->next;
135   while(curr != &mgr->delayed) {
136      buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
137
138      if(!os_time_timeout(buf->start, buf->end, now))
139	 break;
140
141      _pb_cache_buffer_destroy(buf);
142
143      curr = next;
144      next = curr->next;
145   }
146}
147
148
149static void
150pb_cache_buffer_destroy(struct pb_buffer *_buf)
151{
152   struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
153   struct pb_cache_manager *mgr = buf->mgr;
154
155   pipe_mutex_lock(mgr->mutex);
156   assert(!pipe_is_referenced(&buf->base.reference));
157
158   _pb_cache_buffer_list_check_free(mgr);
159
160   buf->start = os_time_get();
161   buf->end = buf->start + mgr->usecs;
162   LIST_ADDTAIL(&buf->head, &mgr->delayed);
163   ++mgr->numDelayed;
164   pipe_mutex_unlock(mgr->mutex);
165}
166
167
168static void *
169pb_cache_buffer_map(struct pb_buffer *_buf,
170		    unsigned flags, void *flush_ctx)
171{
172   struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
173   return pb_map(buf->buffer, flags, flush_ctx);
174}
175
176
177static void
178pb_cache_buffer_unmap(struct pb_buffer *_buf)
179{
180   struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
181   pb_unmap(buf->buffer);
182}
183
184
185static enum pipe_error
186pb_cache_buffer_validate(struct pb_buffer *_buf,
187                         struct pb_validate *vl,
188                         unsigned flags)
189{
190   struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
191   return pb_validate(buf->buffer, vl, flags);
192}
193
194
195static void
196pb_cache_buffer_fence(struct pb_buffer *_buf,
197                      struct pipe_fence_handle *fence)
198{
199   struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
200   pb_fence(buf->buffer, fence);
201}
202
203
204static void
205pb_cache_buffer_get_base_buffer(struct pb_buffer *_buf,
206                              struct pb_buffer **base_buf,
207                              pb_size *offset)
208{
209   struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
210   pb_get_base_buffer(buf->buffer, base_buf, offset);
211}
212
213
214const struct pb_vtbl
215pb_cache_buffer_vtbl = {
216      pb_cache_buffer_destroy,
217      pb_cache_buffer_map,
218      pb_cache_buffer_unmap,
219      pb_cache_buffer_validate,
220      pb_cache_buffer_fence,
221      pb_cache_buffer_get_base_buffer
222};
223
224
225static INLINE int
226pb_cache_is_buffer_compat(struct pb_cache_buffer *buf,
227                          pb_size size,
228                          const struct pb_desc *desc)
229{
230   if(buf->base.size < size)
231      return 0;
232
233   /* be lenient with size */
234   if(buf->base.size >= 2*size)
235      return 0;
236
237   if(!pb_check_alignment(desc->alignment, buf->base.alignment))
238      return 0;
239
240   if(!pb_check_usage(desc->usage, buf->base.usage))
241      return 0;
242
243   if (buf->mgr->provider->is_buffer_busy) {
244      if (buf->mgr->provider->is_buffer_busy(buf->mgr->provider, buf->buffer))
245         return -1;
246   } else {
247      void *ptr = pb_map(buf->buffer, PB_USAGE_DONTBLOCK, NULL);
248
249      if (!ptr)
250         return -1;
251
252      pb_unmap(buf->buffer);
253   }
254
255   return 1;
256}
257
258
259static struct pb_buffer *
260pb_cache_manager_create_buffer(struct pb_manager *_mgr,
261                               pb_size size,
262                               const struct pb_desc *desc)
263{
264   struct pb_cache_manager *mgr = pb_cache_manager(_mgr);
265   struct pb_cache_buffer *buf;
266   struct pb_cache_buffer *curr_buf;
267   struct list_head *curr, *next;
268   int64_t now;
269   int ret = 0;
270
271   pipe_mutex_lock(mgr->mutex);
272
273   buf = NULL;
274   curr = mgr->delayed.next;
275   next = curr->next;
276
277   /* search in the expired buffers, freeing them in the process */
278   now = os_time_get();
279   while(curr != &mgr->delayed) {
280      curr_buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
281      if(!buf && (ret = pb_cache_is_buffer_compat(curr_buf, size, desc) > 0))
282         buf = curr_buf;
283      else if(os_time_timeout(curr_buf->start, curr_buf->end, now))
284         _pb_cache_buffer_destroy(curr_buf);
285      else
286         /* This buffer (and all hereafter) are still hot in cache */
287         break;
288      if (ret == -1)
289         break;
290      curr = next;
291      next = curr->next;
292   }
293
294   /* keep searching in the hot buffers */
295   if(!buf && ret != -1) {
296      while(curr != &mgr->delayed) {
297         curr_buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
298         ret = pb_cache_is_buffer_compat(curr_buf, size, desc);
299         if (ret > 0) {
300            buf = curr_buf;
301            break;
302         }
303         if (ret == -1)
304            break;
305         /* no need to check the timeout here */
306         curr = next;
307         next = curr->next;
308      }
309   }
310
311   if(buf) {
312      LIST_DEL(&buf->head);
313      --mgr->numDelayed;
314      pipe_mutex_unlock(mgr->mutex);
315      /* Increase refcount */
316      pipe_reference_init(&buf->base.reference, 1);
317      return &buf->base;
318   }
319
320   pipe_mutex_unlock(mgr->mutex);
321
322   buf = CALLOC_STRUCT(pb_cache_buffer);
323   if(!buf)
324      return NULL;
325
326   buf->buffer = mgr->provider->create_buffer(mgr->provider, size, desc);
327
328   /* Empty the cache and try again. */
329   if (!buf->buffer) {
330      mgr->base.flush(&mgr->base);
331      buf->buffer = mgr->provider->create_buffer(mgr->provider, size, desc);
332   }
333
334   if(!buf->buffer) {
335      FREE(buf);
336      return NULL;
337   }
338
339   assert(pipe_is_referenced(&buf->buffer->reference));
340   assert(pb_check_alignment(desc->alignment, buf->buffer->alignment));
341   assert(pb_check_usage(desc->usage, buf->buffer->usage));
342   assert(buf->buffer->size >= size);
343
344   pipe_reference_init(&buf->base.reference, 1);
345   buf->base.alignment = buf->buffer->alignment;
346   buf->base.usage = buf->buffer->usage;
347   buf->base.size = buf->buffer->size;
348
349   buf->base.vtbl = &pb_cache_buffer_vtbl;
350   buf->mgr = mgr;
351
352   return &buf->base;
353}
354
355
356static void
357pb_cache_manager_flush(struct pb_manager *_mgr)
358{
359   struct pb_cache_manager *mgr = pb_cache_manager(_mgr);
360   struct list_head *curr, *next;
361   struct pb_cache_buffer *buf;
362
363   pipe_mutex_lock(mgr->mutex);
364   curr = mgr->delayed.next;
365   next = curr->next;
366   while(curr != &mgr->delayed) {
367      buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
368      _pb_cache_buffer_destroy(buf);
369      curr = next;
370      next = curr->next;
371   }
372   pipe_mutex_unlock(mgr->mutex);
373
374   assert(mgr->provider->flush);
375   if(mgr->provider->flush)
376      mgr->provider->flush(mgr->provider);
377}
378
379
380static void
381pb_cache_manager_destroy(struct pb_manager *mgr)
382{
383   pb_cache_manager_flush(mgr);
384   FREE(mgr);
385}
386
387
388struct pb_manager *
389pb_cache_manager_create(struct pb_manager *provider,
390                     	unsigned usecs)
391{
392   struct pb_cache_manager *mgr;
393
394   if(!provider)
395      return NULL;
396
397   mgr = CALLOC_STRUCT(pb_cache_manager);
398   if (!mgr)
399      return NULL;
400
401   mgr->base.destroy = pb_cache_manager_destroy;
402   mgr->base.create_buffer = pb_cache_manager_create_buffer;
403   mgr->base.flush = pb_cache_manager_flush;
404   mgr->provider = provider;
405   mgr->usecs = usecs;
406   LIST_INITHEAD(&mgr->delayed);
407   mgr->numDelayed = 0;
408   pipe_mutex_init(mgr->mutex);
409
410   return &mgr->base;
411}
412