1/************************************************************************** 2 * 3 * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA. 4 * All Rights Reserved. 5 * Copyright 2009 VMware, Inc., Palo Alto, CA., USA. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 23 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 24 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 25 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 26 * USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 **************************************************************************/ 29 30/* 31 * Generic simple memory manager implementation. Intended to be used as a base 32 * class implementation for more advanced memory managers. 33 * 34 * Note that the algorithm used is quite simple and there might be substantial 35 * performance gains if a smarter free list is implemented. Currently it is just an 36 * unordered stack of free regions. This could easily be improved if an RB-tree 37 * is used instead. At least if we expect heavy fragmentation. 38 * Note that this implementation is more or less identical to the drm core manager 39 * in the linux kernel. 40 * 41 * Authors: 42 * Thomas Hellstr�m <thomas-at-tungstengraphics-dot-com> 43 */ 44 45#ifdef HAVE_CONFIG_H 46#include "config.h" 47#endif 48 49#include "wsbm_mm.h" 50#include <errno.h> 51#include <stdlib.h> 52 53unsigned long 54wsbmMMTailSpace(struct _WsbmMM *mm) 55{ 56 struct _WsbmListHead *tail_node; 57 struct _WsbmMMNode *entry; 58 59 tail_node = mm->ml_entry.prev; 60 entry = WSBMLISTENTRY(tail_node, struct _WsbmMMNode, ml_entry); 61 62 if (!entry->free) 63 return 0; 64 65 return entry->size; 66} 67 68int 69wsbmMMRemoveSpaceFromTail(struct _WsbmMM *mm, unsigned long size) 70{ 71 struct _WsbmListHead *tail_node; 72 struct _WsbmMMNode *entry; 73 74 tail_node = mm->ml_entry.prev; 75 entry = WSBMLISTENTRY(tail_node, struct _WsbmMMNode, ml_entry); 76 77 if (!entry->free) 78 return -ENOMEM; 79 80 if (entry->size <= size) 81 return -ENOMEM; 82 83 entry->size -= size; 84 return 0; 85} 86 87static int 88wsbmMMCreateTailNode(struct _WsbmMM *mm, 89 unsigned long start, unsigned long size) 90{ 91 struct _WsbmMMNode *child; 92 93 child = (struct _WsbmMMNode *)malloc(sizeof(*child)); 94 if (!child) 95 return -ENOMEM; 96 97 child->free = 1; 98 child->size = size; 99 child->start = start; 100 child->mm = mm; 101 102 WSBMLISTADDTAIL(&child->ml_entry, &mm->ml_entry); 103 WSBMLISTADDTAIL(&child->fl_entry, &mm->fl_entry); 104 105 return 0; 106} 107 108static struct _WsbmMMNode * 109wsbmMMSplitAtStart(struct _WsbmMMNode *parent, unsigned long size) 110{ 111 struct _WsbmMMNode *child; 112 113 child = (struct _WsbmMMNode *)malloc(sizeof(*child)); 114 if (!child) 115 return NULL; 116 117 WSBMINITLISTHEAD(&child->fl_entry); 118 119 child->free = 0; 120 child->size = size; 121 child->start = parent->start; 122 child->mm = parent->mm; 123 124 WSBMLISTADDTAIL(&child->ml_entry, &parent->ml_entry); 125 WSBMINITLISTHEAD(&child->fl_entry); 126 127 parent->size -= size; 128 parent->start += size; 129 return child; 130} 131 132struct _WsbmMMNode * 133wsbmMMGetBlock(struct _WsbmMMNode *parent, 134 unsigned long size, unsigned alignment) 135{ 136 137 struct _WsbmMMNode *align_splitoff = NULL; 138 struct _WsbmMMNode *child; 139 unsigned tmp = 0; 140 141 if (alignment) 142 tmp = parent->start % alignment; 143 144 if (tmp) { 145 align_splitoff = wsbmMMSplitAtStart(parent, alignment - tmp); 146 if (!align_splitoff) 147 return NULL; 148 } 149 150 if (parent->size == size) { 151 WSBMLISTDELINIT(&parent->fl_entry); 152 parent->free = 0; 153 return parent; 154 } else { 155 child = wsbmMMSplitAtStart(parent, size); 156 } 157 158 if (align_splitoff) 159 wsbmMMPutBlock(align_splitoff); 160 161 return child; 162} 163 164/* 165 * Put a block. Merge with the previous and / or next block if they are free. 166 * Otherwise add to the free stack. 167 */ 168 169void 170wsbmMMPutBlock(struct _WsbmMMNode *cur) 171{ 172 173 struct _WsbmMM *mm = cur->mm; 174 struct _WsbmListHead *cur_head = &cur->ml_entry; 175 struct _WsbmListHead *root_head = &mm->ml_entry; 176 struct _WsbmMMNode *prev_node = NULL; 177 struct _WsbmMMNode *next_node; 178 179 int merged = 0; 180 181 if (cur_head->prev != root_head) { 182 prev_node = 183 WSBMLISTENTRY(cur_head->prev, struct _WsbmMMNode, ml_entry); 184 if (prev_node->free) { 185 prev_node->size += cur->size; 186 merged = 1; 187 } 188 } 189 if (cur_head->next != root_head) { 190 next_node = 191 WSBMLISTENTRY(cur_head->next, struct _WsbmMMNode, ml_entry); 192 if (next_node->free) { 193 if (merged) { 194 prev_node->size += next_node->size; 195 WSBMLISTDEL(&next_node->ml_entry); 196 WSBMLISTDEL(&next_node->fl_entry); 197 free(next_node); 198 } else { 199 next_node->size += cur->size; 200 next_node->start = cur->start; 201 merged = 1; 202 } 203 } 204 } 205 if (!merged) { 206 cur->free = 1; 207 WSBMLISTADD(&cur->fl_entry, &mm->fl_entry); 208 } else { 209 WSBMLISTDEL(&cur->ml_entry); 210 free(cur); 211 } 212} 213 214struct _WsbmMMNode * 215wsbmMMSearchFree(const struct _WsbmMM *mm, 216 unsigned long size, unsigned alignment, int best_match) 217{ 218 struct _WsbmListHead *list; 219 const struct _WsbmListHead *free_stack = &mm->fl_entry; 220 struct _WsbmMMNode *entry; 221 struct _WsbmMMNode *best; 222 unsigned long best_size; 223 unsigned wasted; 224 225 best = NULL; 226 best_size = ~0UL; 227 228 WSBMLISTFOREACH(list, free_stack) { 229 entry = WSBMLISTENTRY(list, struct _WsbmMMNode, fl_entry); 230 231 wasted = 0; 232 233 if (entry->size < size) 234 continue; 235 236 if (alignment) { 237 register unsigned tmp = entry->start % alignment; 238 239 if (tmp) 240 wasted += alignment - tmp; 241 } 242 243 if (entry->size >= size + wasted) { 244 if (!best_match) 245 return entry; 246 if (size < best_size) { 247 best = entry; 248 best_size = entry->size; 249 } 250 } 251 } 252 253 return best; 254} 255 256int 257wsbmMMclean(struct _WsbmMM *mm) 258{ 259 struct _WsbmListHead *head = &mm->ml_entry; 260 261 return (head->next->next == head); 262} 263 264int 265wsbmMMinit(struct _WsbmMM *mm, unsigned long start, unsigned long size) 266{ 267 WSBMINITLISTHEAD(&mm->ml_entry); 268 WSBMINITLISTHEAD(&mm->fl_entry); 269 270 return wsbmMMCreateTailNode(mm, start, size); 271} 272 273void 274wsbmMMtakedown(struct _WsbmMM *mm) 275{ 276 struct _WsbmListHead *bnode = mm->fl_entry.next; 277 struct _WsbmMMNode *entry; 278 279 entry = WSBMLISTENTRY(bnode, struct _WsbmMMNode, fl_entry); 280 281 if (entry->ml_entry.next != &mm->ml_entry || 282 entry->fl_entry.next != &mm->fl_entry) { 283 return; 284 } 285 286 WSBMLISTDEL(&entry->fl_entry); 287 WSBMLISTDEL(&entry->ml_entry); 288 free(entry); 289} 290