ttm_memory.c revision 336f5899d287f06d8329e208fc14ce50f7ec9698
1/************************************************************************** 2 * 3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28#include "ttm/ttm_memory.h" 29#include "ttm/ttm_module.h" 30#include <linux/spinlock.h> 31#include <linux/sched.h> 32#include <linux/wait.h> 33#include <linux/mm.h> 34#include <linux/module.h> 35#include <linux/slab.h> 36 37#define TTM_MEMORY_ALLOC_RETRIES 4 38 39struct ttm_mem_zone { 40 struct kobject kobj; 41 struct ttm_mem_global *glob; 42 const char *name; 43 uint64_t zone_mem; 44 uint64_t emer_mem; 45 uint64_t max_mem; 46 uint64_t swap_limit; 47 uint64_t used_mem; 48}; 49 50static struct attribute ttm_mem_sys = { 51 .name = "zone_memory", 52 .mode = S_IRUGO 53}; 54static struct attribute ttm_mem_emer = { 55 .name = "emergency_memory", 56 .mode = S_IRUGO | S_IWUSR 57}; 58static struct attribute ttm_mem_max = { 59 .name = "available_memory", 60 .mode = S_IRUGO | S_IWUSR 61}; 62static struct attribute ttm_mem_swap = { 63 .name = "swap_limit", 64 .mode = S_IRUGO | S_IWUSR 65}; 66static struct attribute ttm_mem_used = { 67 .name = "used_memory", 68 .mode = S_IRUGO 69}; 70 71static void ttm_mem_zone_kobj_release(struct kobject *kobj) 72{ 73 struct ttm_mem_zone *zone = 74 container_of(kobj, struct ttm_mem_zone, kobj); 75 76 printk(KERN_INFO TTM_PFX 77 "Zone %7s: Used memory at exit: %llu kiB.\n", 78 zone->name, (unsigned long long) zone->used_mem >> 10); 79 kfree(zone); 80} 81 82static ssize_t ttm_mem_zone_show(struct kobject *kobj, 83 struct attribute *attr, 84 char *buffer) 85{ 86 struct ttm_mem_zone *zone = 87 container_of(kobj, struct ttm_mem_zone, kobj); 88 uint64_t val = 0; 89 90 spin_lock(&zone->glob->lock); 91 if (attr == &ttm_mem_sys) 92 val = zone->zone_mem; 93 else if (attr == &ttm_mem_emer) 94 val = zone->emer_mem; 95 else if (attr == &ttm_mem_max) 96 val = zone->max_mem; 97 else if (attr == &ttm_mem_swap) 98 val = zone->swap_limit; 99 else if (attr == &ttm_mem_used) 100 val = zone->used_mem; 101 spin_unlock(&zone->glob->lock); 102 103 return snprintf(buffer, PAGE_SIZE, "%llu\n", 104 (unsigned long long) val >> 10); 105} 106 107static void ttm_check_swapping(struct ttm_mem_global *glob); 108 109static ssize_t ttm_mem_zone_store(struct kobject *kobj, 110 struct attribute *attr, 111 const char *buffer, 112 size_t size) 113{ 114 struct ttm_mem_zone *zone = 115 container_of(kobj, struct ttm_mem_zone, kobj); 116 int chars; 117 unsigned long val; 118 uint64_t val64; 119 120 chars = sscanf(buffer, "%lu", &val); 121 if (chars == 0) 122 return size; 123 124 val64 = val; 125 val64 <<= 10; 126 127 spin_lock(&zone->glob->lock); 128 if (val64 > zone->zone_mem) 129 val64 = zone->zone_mem; 130 if (attr == &ttm_mem_emer) { 131 zone->emer_mem = val64; 132 if (zone->max_mem > val64) 133 zone->max_mem = val64; 134 } else if (attr == &ttm_mem_max) { 135 zone->max_mem = val64; 136 if (zone->emer_mem < val64) 137 zone->emer_mem = val64; 138 } else if (attr == &ttm_mem_swap) 139 zone->swap_limit = val64; 140 spin_unlock(&zone->glob->lock); 141 142 ttm_check_swapping(zone->glob); 143 144 return size; 145} 146 147static struct attribute *ttm_mem_zone_attrs[] = { 148 &ttm_mem_sys, 149 &ttm_mem_emer, 150 &ttm_mem_max, 151 &ttm_mem_swap, 152 &ttm_mem_used, 153 NULL 154}; 155 156static const struct sysfs_ops ttm_mem_zone_ops = { 157 .show = &ttm_mem_zone_show, 158 .store = &ttm_mem_zone_store 159}; 160 161static struct kobj_type ttm_mem_zone_kobj_type = { 162 .release = &ttm_mem_zone_kobj_release, 163 .sysfs_ops = &ttm_mem_zone_ops, 164 .default_attrs = ttm_mem_zone_attrs, 165}; 166 167static void ttm_mem_global_kobj_release(struct kobject *kobj) 168{ 169 struct ttm_mem_global *glob = 170 container_of(kobj, struct ttm_mem_global, kobj); 171 172 kfree(glob); 173} 174 175static struct kobj_type ttm_mem_glob_kobj_type = { 176 .release = &ttm_mem_global_kobj_release, 177}; 178 179static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob, 180 bool from_wq, uint64_t extra) 181{ 182 unsigned int i; 183 struct ttm_mem_zone *zone; 184 uint64_t target; 185 186 for (i = 0; i < glob->num_zones; ++i) { 187 zone = glob->zones[i]; 188 189 if (from_wq) 190 target = zone->swap_limit; 191 else if (capable(CAP_SYS_ADMIN)) 192 target = zone->emer_mem; 193 else 194 target = zone->max_mem; 195 196 target = (extra > target) ? 0ULL : target; 197 198 if (zone->used_mem > target) 199 return true; 200 } 201 return false; 202} 203 204/** 205 * At this point we only support a single shrink callback. 206 * Extend this if needed, perhaps using a linked list of callbacks. 207 * Note that this function is reentrant: 208 * many threads may try to swap out at any given time. 209 */ 210 211static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq, 212 uint64_t extra) 213{ 214 int ret; 215 struct ttm_mem_shrink *shrink; 216 217 spin_lock(&glob->lock); 218 if (glob->shrink == NULL) 219 goto out; 220 221 while (ttm_zones_above_swap_target(glob, from_wq, extra)) { 222 shrink = glob->shrink; 223 spin_unlock(&glob->lock); 224 ret = shrink->do_shrink(shrink); 225 spin_lock(&glob->lock); 226 if (unlikely(ret != 0)) 227 goto out; 228 } 229out: 230 spin_unlock(&glob->lock); 231} 232 233 234 235static void ttm_shrink_work(struct work_struct *work) 236{ 237 struct ttm_mem_global *glob = 238 container_of(work, struct ttm_mem_global, work); 239 240 ttm_shrink(glob, true, 0ULL); 241} 242 243static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob, 244 const struct sysinfo *si) 245{ 246 struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL); 247 uint64_t mem; 248 int ret; 249 250 if (unlikely(!zone)) 251 return -ENOMEM; 252 253 mem = si->totalram - si->totalhigh; 254 mem *= si->mem_unit; 255 256 zone->name = "kernel"; 257 zone->zone_mem = mem; 258 zone->max_mem = mem >> 1; 259 zone->emer_mem = (mem >> 1) + (mem >> 2); 260 zone->swap_limit = zone->max_mem - (mem >> 3); 261 zone->used_mem = 0; 262 zone->glob = glob; 263 glob->zone_kernel = zone; 264 ret = kobject_init_and_add( 265 &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); 266 if (unlikely(ret != 0)) { 267 kobject_put(&zone->kobj); 268 return ret; 269 } 270 glob->zones[glob->num_zones++] = zone; 271 return 0; 272} 273 274#ifdef CONFIG_HIGHMEM 275static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob, 276 const struct sysinfo *si) 277{ 278 struct ttm_mem_zone *zone; 279 uint64_t mem; 280 int ret; 281 282 if (si->totalhigh == 0) 283 return 0; 284 285 zone = kzalloc(sizeof(*zone), GFP_KERNEL); 286 if (unlikely(!zone)) 287 return -ENOMEM; 288 289 mem = si->totalram; 290 mem *= si->mem_unit; 291 292 zone->name = "highmem"; 293 zone->zone_mem = mem; 294 zone->max_mem = mem >> 1; 295 zone->emer_mem = (mem >> 1) + (mem >> 2); 296 zone->swap_limit = zone->max_mem - (mem >> 3); 297 zone->used_mem = 0; 298 zone->glob = glob; 299 glob->zone_highmem = zone; 300 ret = kobject_init_and_add( 301 &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); 302 if (unlikely(ret != 0)) { 303 kobject_put(&zone->kobj); 304 return ret; 305 } 306 glob->zones[glob->num_zones++] = zone; 307 return 0; 308} 309#else 310static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob, 311 const struct sysinfo *si) 312{ 313 struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL); 314 uint64_t mem; 315 int ret; 316 317 if (unlikely(!zone)) 318 return -ENOMEM; 319 320 mem = si->totalram; 321 mem *= si->mem_unit; 322 323 /** 324 * No special dma32 zone needed. 325 */ 326 327 if (mem <= ((uint64_t) 1ULL << 32)) { 328 kfree(zone); 329 return 0; 330 } 331 332 /* 333 * Limit max dma32 memory to 4GB for now 334 * until we can figure out how big this 335 * zone really is. 336 */ 337 338 mem = ((uint64_t) 1ULL << 32); 339 zone->name = "dma32"; 340 zone->zone_mem = mem; 341 zone->max_mem = mem >> 1; 342 zone->emer_mem = (mem >> 1) + (mem >> 2); 343 zone->swap_limit = zone->max_mem - (mem >> 3); 344 zone->used_mem = 0; 345 zone->glob = glob; 346 glob->zone_dma32 = zone; 347 ret = kobject_init_and_add( 348 &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); 349 if (unlikely(ret != 0)) { 350 kobject_put(&zone->kobj); 351 return ret; 352 } 353 glob->zones[glob->num_zones++] = zone; 354 return 0; 355} 356#endif 357 358int ttm_mem_global_init(struct ttm_mem_global *glob) 359{ 360 struct sysinfo si; 361 int ret; 362 int i; 363 struct ttm_mem_zone *zone; 364 365 spin_lock_init(&glob->lock); 366 glob->swap_queue = create_singlethread_workqueue("ttm_swap"); 367 INIT_WORK(&glob->work, ttm_shrink_work); 368 init_waitqueue_head(&glob->queue); 369 ret = kobject_init_and_add( 370 &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting"); 371 if (unlikely(ret != 0)) { 372 kobject_put(&glob->kobj); 373 return ret; 374 } 375 376 si_meminfo(&si); 377 378 ret = ttm_mem_init_kernel_zone(glob, &si); 379 if (unlikely(ret != 0)) 380 goto out_no_zone; 381#ifdef CONFIG_HIGHMEM 382 ret = ttm_mem_init_highmem_zone(glob, &si); 383 if (unlikely(ret != 0)) 384 goto out_no_zone; 385#else 386 ret = ttm_mem_init_dma32_zone(glob, &si); 387 if (unlikely(ret != 0)) 388 goto out_no_zone; 389#endif 390 for (i = 0; i < glob->num_zones; ++i) { 391 zone = glob->zones[i]; 392 printk(KERN_INFO TTM_PFX 393 "Zone %7s: Available graphics memory: %llu kiB.\n", 394 zone->name, (unsigned long long) zone->max_mem >> 10); 395 } 396 return 0; 397out_no_zone: 398 ttm_mem_global_release(glob); 399 return ret; 400} 401EXPORT_SYMBOL(ttm_mem_global_init); 402 403void ttm_mem_global_release(struct ttm_mem_global *glob) 404{ 405 unsigned int i; 406 struct ttm_mem_zone *zone; 407 408 flush_workqueue(glob->swap_queue); 409 destroy_workqueue(glob->swap_queue); 410 glob->swap_queue = NULL; 411 for (i = 0; i < glob->num_zones; ++i) { 412 zone = glob->zones[i]; 413 kobject_del(&zone->kobj); 414 kobject_put(&zone->kobj); 415 } 416 kobject_del(&glob->kobj); 417 kobject_put(&glob->kobj); 418} 419EXPORT_SYMBOL(ttm_mem_global_release); 420 421static void ttm_check_swapping(struct ttm_mem_global *glob) 422{ 423 bool needs_swapping = false; 424 unsigned int i; 425 struct ttm_mem_zone *zone; 426 427 spin_lock(&glob->lock); 428 for (i = 0; i < glob->num_zones; ++i) { 429 zone = glob->zones[i]; 430 if (zone->used_mem > zone->swap_limit) { 431 needs_swapping = true; 432 break; 433 } 434 } 435 436 spin_unlock(&glob->lock); 437 438 if (unlikely(needs_swapping)) 439 (void)queue_work(glob->swap_queue, &glob->work); 440 441} 442 443static void ttm_mem_global_free_zone(struct ttm_mem_global *glob, 444 struct ttm_mem_zone *single_zone, 445 uint64_t amount) 446{ 447 unsigned int i; 448 struct ttm_mem_zone *zone; 449 450 spin_lock(&glob->lock); 451 for (i = 0; i < glob->num_zones; ++i) { 452 zone = glob->zones[i]; 453 if (single_zone && zone != single_zone) 454 continue; 455 zone->used_mem -= amount; 456 } 457 spin_unlock(&glob->lock); 458} 459 460void ttm_mem_global_free(struct ttm_mem_global *glob, 461 uint64_t amount) 462{ 463 return ttm_mem_global_free_zone(glob, NULL, amount); 464} 465EXPORT_SYMBOL(ttm_mem_global_free); 466 467static int ttm_mem_global_reserve(struct ttm_mem_global *glob, 468 struct ttm_mem_zone *single_zone, 469 uint64_t amount, bool reserve) 470{ 471 uint64_t limit; 472 int ret = -ENOMEM; 473 unsigned int i; 474 struct ttm_mem_zone *zone; 475 476 spin_lock(&glob->lock); 477 for (i = 0; i < glob->num_zones; ++i) { 478 zone = glob->zones[i]; 479 if (single_zone && zone != single_zone) 480 continue; 481 482 limit = (capable(CAP_SYS_ADMIN)) ? 483 zone->emer_mem : zone->max_mem; 484 485 if (zone->used_mem > limit) 486 goto out_unlock; 487 } 488 489 if (reserve) { 490 for (i = 0; i < glob->num_zones; ++i) { 491 zone = glob->zones[i]; 492 if (single_zone && zone != single_zone) 493 continue; 494 zone->used_mem += amount; 495 } 496 } 497 498 ret = 0; 499out_unlock: 500 spin_unlock(&glob->lock); 501 ttm_check_swapping(glob); 502 503 return ret; 504} 505 506 507static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob, 508 struct ttm_mem_zone *single_zone, 509 uint64_t memory, 510 bool no_wait, bool interruptible) 511{ 512 int count = TTM_MEMORY_ALLOC_RETRIES; 513 514 while (unlikely(ttm_mem_global_reserve(glob, 515 single_zone, 516 memory, true) 517 != 0)) { 518 if (no_wait) 519 return -ENOMEM; 520 if (unlikely(count-- == 0)) 521 return -ENOMEM; 522 ttm_shrink(glob, false, memory + (memory >> 2) + 16); 523 } 524 525 return 0; 526} 527 528int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, 529 bool no_wait, bool interruptible) 530{ 531 /** 532 * Normal allocations of kernel memory are registered in 533 * all zones. 534 */ 535 536 return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait, 537 interruptible); 538} 539EXPORT_SYMBOL(ttm_mem_global_alloc); 540 541int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, 542 struct page *page, 543 bool no_wait, bool interruptible) 544{ 545 546 struct ttm_mem_zone *zone = NULL; 547 548 /** 549 * Page allocations may be registed in a single zone 550 * only if highmem or !dma32. 551 */ 552 553#ifdef CONFIG_HIGHMEM 554 if (PageHighMem(page) && glob->zone_highmem != NULL) 555 zone = glob->zone_highmem; 556#else 557 if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) 558 zone = glob->zone_kernel; 559#endif 560 return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait, 561 interruptible); 562} 563 564void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page) 565{ 566 struct ttm_mem_zone *zone = NULL; 567 568#ifdef CONFIG_HIGHMEM 569 if (PageHighMem(page) && glob->zone_highmem != NULL) 570 zone = glob->zone_highmem; 571#else 572 if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) 573 zone = glob->zone_kernel; 574#endif 575 ttm_mem_global_free_zone(glob, zone, PAGE_SIZE); 576} 577 578 579size_t ttm_round_pot(size_t size) 580{ 581 if ((size & (size - 1)) == 0) 582 return size; 583 else if (size > PAGE_SIZE) 584 return PAGE_ALIGN(size); 585 else { 586 size_t tmp_size = 4; 587 588 while (tmp_size < size) 589 tmp_size <<= 1; 590 591 return tmp_size; 592 } 593 return 0; 594} 595EXPORT_SYMBOL(ttm_round_pot); 596