1
2#include <inttypes.h>
3
4#include "util/u_inlines.h"
5#include "util/u_memory.h"
6#include "util/u_double_list.h"
7
8#include "nouveau_winsys.h"
9#include "nouveau_screen.h"
10#include "nouveau_mm.h"
11
12#define MM_MIN_ORDER 7
13#define MM_MAX_ORDER 20
14
15#define MM_NUM_BUCKETS (MM_MAX_ORDER - MM_MIN_ORDER + 1)
16
17#define MM_MIN_SIZE (1 << MM_MIN_ORDER)
18#define MM_MAX_SIZE (1 << MM_MAX_ORDER)
19
20struct mm_bucket {
21   struct list_head free;
22   struct list_head used;
23   struct list_head full;
24   int num_free;
25};
26
27struct nouveau_mman {
28   struct nouveau_device *dev;
29   struct mm_bucket bucket[MM_NUM_BUCKETS];
30   uint32_t domain;
31   union nouveau_bo_config config;
32   uint64_t allocated;
33};
34
35struct mm_slab {
36   struct list_head head;
37   struct nouveau_bo *bo;
38   struct nouveau_mman *cache;
39   int order;
40   int count;
41   int free;
42   uint32_t bits[0];
43};
44
45static int
46mm_slab_alloc(struct mm_slab *slab)
47{
48   int i, n, b;
49
50   if (slab->free == 0)
51      return -1;
52
53   for (i = 0; i < (slab->count + 31) / 32; ++i) {
54      b = ffs(slab->bits[i]) - 1;
55      if (b >= 0) {
56         n = i * 32 + b;
57         assert(n < slab->count);
58         slab->free--;
59         slab->bits[i] &= ~(1 << b);
60         return n;
61      }
62   }
63   return -1;
64}
65
66static INLINE void
67mm_slab_free(struct mm_slab *slab, int i)
68{
69   assert(i < slab->count);
70   slab->bits[i / 32] |= 1 << (i % 32);
71   slab->free++;
72   assert(slab->free <= slab->count);
73}
74
75static INLINE int
76mm_get_order(uint32_t size)
77{
78   int s = __builtin_clz(size) ^ 31;
79
80   if (size > (1 << s))
81      s += 1;
82   return s;
83}
84
85static struct mm_bucket *
86mm_bucket_by_order(struct nouveau_mman *cache, int order)
87{
88   if (order > MM_MAX_ORDER)
89      return NULL;
90   return &cache->bucket[MAX2(order, MM_MIN_ORDER) - MM_MIN_ORDER];
91}
92
93static struct mm_bucket *
94mm_bucket_by_size(struct nouveau_mman *cache, unsigned size)
95{
96   return mm_bucket_by_order(cache, mm_get_order(size));
97}
98
99/* size of bo allocation for slab with chunks of (1 << chunk_order) bytes */
100static INLINE uint32_t
101mm_default_slab_size(unsigned chunk_order)
102{
103   static const int8_t slab_order[MM_MAX_ORDER - MM_MIN_ORDER + 1] =
104   {
105      12, 12, 13, 14, 14, 17, 17, 17, 17, 19, 19, 20, 21, 22
106   };
107
108   assert(chunk_order <= MM_MAX_ORDER && chunk_order >= MM_MIN_ORDER);
109
110   return 1 << slab_order[chunk_order - MM_MIN_ORDER];
111}
112
113static int
114mm_slab_new(struct nouveau_mman *cache, int chunk_order)
115{
116   struct mm_slab *slab;
117   int words, ret;
118   const uint32_t size = mm_default_slab_size(chunk_order);
119
120   words = ((size >> chunk_order) + 31) / 32;
121   assert(words);
122
123   slab = MALLOC(sizeof(struct mm_slab) + words * 4);
124   if (!slab)
125      return PIPE_ERROR_OUT_OF_MEMORY;
126
127   memset(&slab->bits[0], ~0, words * 4);
128
129   slab->bo = NULL;
130
131   ret = nouveau_bo_new(cache->dev, cache->domain, 0, size, &cache->config,
132                        &slab->bo);
133   if (ret) {
134      FREE(slab);
135      return PIPE_ERROR_OUT_OF_MEMORY;
136   }
137
138   LIST_INITHEAD(&slab->head);
139
140   slab->cache = cache;
141   slab->order = chunk_order;
142   slab->count = slab->free = size >> chunk_order;
143
144   LIST_ADD(&slab->head, &mm_bucket_by_order(cache, chunk_order)->free);
145
146   cache->allocated += size;
147
148   if (nouveau_mesa_debug)
149      debug_printf("MM: new slab, total memory = %"PRIu64" KiB\n",
150                   cache->allocated / 1024);
151
152   return PIPE_OK;
153}
154
155/* @return token to identify slab or NULL if we just allocated a new bo */
156struct nouveau_mm_allocation *
157nouveau_mm_allocate(struct nouveau_mman *cache,
158                    uint32_t size, struct nouveau_bo **bo, uint32_t *offset)
159{
160   struct mm_bucket *bucket;
161   struct mm_slab *slab;
162   struct nouveau_mm_allocation *alloc;
163   int ret;
164
165   bucket = mm_bucket_by_size(cache, size);
166   if (!bucket) {
167      ret = nouveau_bo_new(cache->dev, cache->domain, 0, size, &cache->config,
168                           bo);
169      if (ret)
170         debug_printf("bo_new(%x, %x): %i\n",
171                      size, cache->config.nv50.memtype, ret);
172
173      *offset = 0;
174      return NULL;
175   }
176
177   if (!LIST_IS_EMPTY(&bucket->used)) {
178      slab = LIST_ENTRY(struct mm_slab, bucket->used.next, head);
179   } else {
180      if (LIST_IS_EMPTY(&bucket->free)) {
181         mm_slab_new(cache, MAX2(mm_get_order(size), MM_MIN_ORDER));
182      }
183      slab = LIST_ENTRY(struct mm_slab, bucket->free.next, head);
184
185      LIST_DEL(&slab->head);
186      LIST_ADD(&slab->head, &bucket->used);
187   }
188
189   *offset = mm_slab_alloc(slab) << slab->order;
190
191   alloc = MALLOC_STRUCT(nouveau_mm_allocation);
192   if (!alloc)
193      return NULL;
194
195   nouveau_bo_ref(slab->bo, bo);
196
197   if (slab->free == 0) {
198      LIST_DEL(&slab->head);
199      LIST_ADD(&slab->head, &bucket->full);
200   }
201
202   alloc->next = NULL;
203   alloc->offset = *offset;
204   alloc->priv = (void *)slab;
205
206   return alloc;
207}
208
209void
210nouveau_mm_free(struct nouveau_mm_allocation *alloc)
211{
212   struct mm_slab *slab = (struct mm_slab *)alloc->priv;
213   struct mm_bucket *bucket = mm_bucket_by_order(slab->cache, slab->order);
214
215   mm_slab_free(slab, alloc->offset >> slab->order);
216
217   if (slab->free == slab->count) {
218      LIST_DEL(&slab->head);
219      LIST_ADDTAIL(&slab->head, &bucket->free);
220   } else
221   if (slab->free == 1) {
222      LIST_DEL(&slab->head);
223      LIST_ADDTAIL(&slab->head, &bucket->used);
224   }
225
226   FREE(alloc);
227}
228
229void
230nouveau_mm_free_work(void *data)
231{
232   nouveau_mm_free(data);
233}
234
235struct nouveau_mman *
236nouveau_mm_create(struct nouveau_device *dev, uint32_t domain,
237                  union nouveau_bo_config *config)
238{
239   struct nouveau_mman *cache = MALLOC_STRUCT(nouveau_mman);
240   int i;
241
242   if (!cache)
243      return NULL;
244
245   cache->dev = dev;
246   cache->domain = domain;
247   cache->config = *config;
248   cache->allocated = 0;
249
250   for (i = 0; i < MM_NUM_BUCKETS; ++i) {
251      LIST_INITHEAD(&cache->bucket[i].free);
252      LIST_INITHEAD(&cache->bucket[i].used);
253      LIST_INITHEAD(&cache->bucket[i].full);
254   }
255
256   return cache;
257}
258
259static INLINE void
260nouveau_mm_free_slabs(struct list_head *head)
261{
262   struct mm_slab *slab, *next;
263
264   LIST_FOR_EACH_ENTRY_SAFE(slab, next, head, head) {
265      LIST_DEL(&slab->head);
266      nouveau_bo_ref(NULL, &slab->bo);
267      FREE(slab);
268   }
269}
270
271void
272nouveau_mm_destroy(struct nouveau_mman *cache)
273{
274   int i;
275
276   if (!cache)
277      return;
278
279   for (i = 0; i < MM_NUM_BUCKETS; ++i) {
280      if (!LIST_IS_EMPTY(&cache->bucket[i].used) ||
281          !LIST_IS_EMPTY(&cache->bucket[i].full))
282         debug_printf("WARNING: destroying GPU memory cache "
283                      "with some buffers still in use\n");
284
285      nouveau_mm_free_slabs(&cache->bucket[i].free);
286      nouveau_mm_free_slabs(&cache->bucket[i].used);
287      nouveau_mm_free_slabs(&cache->bucket[i].full);
288   }
289
290   FREE(cache);
291}
292
293