1/************************************************************************** 2 * 3 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27/* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30/** @file ttm_ref_object.c 31 * 32 * Base- and reference object implementation for the various 33 * ttm objects. Implements reference counting, minimal security checks 34 * and release on file close. 35 */ 36 37/** 38 * struct ttm_object_file 39 * 40 * @tdev: Pointer to the ttm_object_device. 41 * 42 * @lock: Lock that protects the ref_list list and the 43 * ref_hash hash tables. 44 * 45 * @ref_list: List of ttm_ref_objects to be destroyed at 46 * file release. 47 * 48 * @ref_hash: Hash tables of ref objects, one per ttm_ref_type, 49 * for fast lookup of ref objects given a base object. 50 */ 51 52#include "ttm/ttm_object.h" 53#include "ttm/ttm_module.h" 54#include <linux/list.h> 55#include <linux/spinlock.h> 56#include <linux/slab.h> 57#include <linux/module.h> 58#include <linux/atomic.h> 59 60struct ttm_object_file { 61 struct ttm_object_device *tdev; 62 rwlock_t lock; 63 struct list_head ref_list; 64 struct drm_open_hash ref_hash[TTM_REF_NUM]; 65 struct kref refcount; 66}; 67 68/** 69 * struct ttm_object_device 70 * 71 * @object_lock: lock that protects the object_hash hash table. 72 * 73 * @object_hash: hash table for fast lookup of object global names. 74 * 75 * @object_count: Per device object count. 76 * 77 * This is the per-device data structure needed for ttm object management. 78 */ 79 80struct ttm_object_device { 81 rwlock_t object_lock; 82 struct drm_open_hash object_hash; 83 atomic_t object_count; 84 struct ttm_mem_global *mem_glob; 85}; 86 87/** 88 * struct ttm_ref_object 89 * 90 * @hash: Hash entry for the per-file object reference hash. 91 * 92 * @head: List entry for the per-file list of ref-objects. 93 * 94 * @kref: Ref count. 95 * 96 * @obj: Base object this ref object is referencing. 97 * 98 * @ref_type: Type of ref object. 99 * 100 * This is similar to an idr object, but it also has a hash table entry 101 * that allows lookup with a pointer to the referenced object as a key. In 102 * that way, one can easily detect whether a base object is referenced by 103 * a particular ttm_object_file. It also carries a ref count to avoid creating 104 * multiple ref objects if a ttm_object_file references the same base 105 * object more than once. 106 */ 107 108struct ttm_ref_object { 109 struct drm_hash_item hash; 110 struct list_head head; 111 struct kref kref; 112 enum ttm_ref_type ref_type; 113 struct ttm_base_object *obj; 114 struct ttm_object_file *tfile; 115}; 116 117static inline struct ttm_object_file * 118ttm_object_file_ref(struct ttm_object_file *tfile) 119{ 120 kref_get(&tfile->refcount); 121 return tfile; 122} 123 124static void ttm_object_file_destroy(struct kref *kref) 125{ 126 struct ttm_object_file *tfile = 127 container_of(kref, struct ttm_object_file, refcount); 128 129 kfree(tfile); 130} 131 132 133static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile) 134{ 135 struct ttm_object_file *tfile = *p_tfile; 136 137 *p_tfile = NULL; 138 kref_put(&tfile->refcount, ttm_object_file_destroy); 139} 140 141 142int ttm_base_object_init(struct ttm_object_file *tfile, 143 struct ttm_base_object *base, 144 bool shareable, 145 enum ttm_object_type object_type, 146 void (*refcount_release) (struct ttm_base_object **), 147 void (*ref_obj_release) (struct ttm_base_object *, 148 enum ttm_ref_type ref_type)) 149{ 150 struct ttm_object_device *tdev = tfile->tdev; 151 int ret; 152 153 base->shareable = shareable; 154 base->tfile = ttm_object_file_ref(tfile); 155 base->refcount_release = refcount_release; 156 base->ref_obj_release = ref_obj_release; 157 base->object_type = object_type; 158 write_lock(&tdev->object_lock); 159 kref_init(&base->refcount); 160 ret = drm_ht_just_insert_please(&tdev->object_hash, 161 &base->hash, 162 (unsigned long)base, 31, 0, 0); 163 write_unlock(&tdev->object_lock); 164 if (unlikely(ret != 0)) 165 goto out_err0; 166 167 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); 168 if (unlikely(ret != 0)) 169 goto out_err1; 170 171 ttm_base_object_unref(&base); 172 173 return 0; 174out_err1: 175 (void)drm_ht_remove_item(&tdev->object_hash, &base->hash); 176out_err0: 177 return ret; 178} 179EXPORT_SYMBOL(ttm_base_object_init); 180 181static void ttm_release_base(struct kref *kref) 182{ 183 struct ttm_base_object *base = 184 container_of(kref, struct ttm_base_object, refcount); 185 struct ttm_object_device *tdev = base->tfile->tdev; 186 187 (void)drm_ht_remove_item(&tdev->object_hash, &base->hash); 188 write_unlock(&tdev->object_lock); 189 if (base->refcount_release) { 190 ttm_object_file_unref(&base->tfile); 191 base->refcount_release(&base); 192 } 193 write_lock(&tdev->object_lock); 194} 195 196void ttm_base_object_unref(struct ttm_base_object **p_base) 197{ 198 struct ttm_base_object *base = *p_base; 199 struct ttm_object_device *tdev = base->tfile->tdev; 200 201 *p_base = NULL; 202 203 /* 204 * Need to take the lock here to avoid racing with 205 * users trying to look up the object. 206 */ 207 208 write_lock(&tdev->object_lock); 209 kref_put(&base->refcount, ttm_release_base); 210 write_unlock(&tdev->object_lock); 211} 212EXPORT_SYMBOL(ttm_base_object_unref); 213 214struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, 215 uint32_t key) 216{ 217 struct ttm_object_device *tdev = tfile->tdev; 218 struct ttm_base_object *base; 219 struct drm_hash_item *hash; 220 int ret; 221 222 read_lock(&tdev->object_lock); 223 ret = drm_ht_find_item(&tdev->object_hash, key, &hash); 224 225 if (likely(ret == 0)) { 226 base = drm_hash_entry(hash, struct ttm_base_object, hash); 227 kref_get(&base->refcount); 228 } 229 read_unlock(&tdev->object_lock); 230 231 if (unlikely(ret != 0)) 232 return NULL; 233 234 if (tfile != base->tfile && !base->shareable) { 235 printk(KERN_ERR TTM_PFX 236 "Attempted access of non-shareable object.\n"); 237 ttm_base_object_unref(&base); 238 return NULL; 239 } 240 241 return base; 242} 243EXPORT_SYMBOL(ttm_base_object_lookup); 244 245int ttm_ref_object_add(struct ttm_object_file *tfile, 246 struct ttm_base_object *base, 247 enum ttm_ref_type ref_type, bool *existed) 248{ 249 struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; 250 struct ttm_ref_object *ref; 251 struct drm_hash_item *hash; 252 struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; 253 int ret = -EINVAL; 254 255 if (existed != NULL) 256 *existed = true; 257 258 while (ret == -EINVAL) { 259 read_lock(&tfile->lock); 260 ret = drm_ht_find_item(ht, base->hash.key, &hash); 261 262 if (ret == 0) { 263 ref = drm_hash_entry(hash, struct ttm_ref_object, hash); 264 kref_get(&ref->kref); 265 read_unlock(&tfile->lock); 266 break; 267 } 268 269 read_unlock(&tfile->lock); 270 ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), 271 false, false); 272 if (unlikely(ret != 0)) 273 return ret; 274 ref = kmalloc(sizeof(*ref), GFP_KERNEL); 275 if (unlikely(ref == NULL)) { 276 ttm_mem_global_free(mem_glob, sizeof(*ref)); 277 return -ENOMEM; 278 } 279 280 ref->hash.key = base->hash.key; 281 ref->obj = base; 282 ref->tfile = tfile; 283 ref->ref_type = ref_type; 284 kref_init(&ref->kref); 285 286 write_lock(&tfile->lock); 287 ret = drm_ht_insert_item(ht, &ref->hash); 288 289 if (likely(ret == 0)) { 290 list_add_tail(&ref->head, &tfile->ref_list); 291 kref_get(&base->refcount); 292 write_unlock(&tfile->lock); 293 if (existed != NULL) 294 *existed = false; 295 break; 296 } 297 298 write_unlock(&tfile->lock); 299 BUG_ON(ret != -EINVAL); 300 301 ttm_mem_global_free(mem_glob, sizeof(*ref)); 302 kfree(ref); 303 } 304 305 return ret; 306} 307EXPORT_SYMBOL(ttm_ref_object_add); 308 309static void ttm_ref_object_release(struct kref *kref) 310{ 311 struct ttm_ref_object *ref = 312 container_of(kref, struct ttm_ref_object, kref); 313 struct ttm_base_object *base = ref->obj; 314 struct ttm_object_file *tfile = ref->tfile; 315 struct drm_open_hash *ht; 316 struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; 317 318 ht = &tfile->ref_hash[ref->ref_type]; 319 (void)drm_ht_remove_item(ht, &ref->hash); 320 list_del(&ref->head); 321 write_unlock(&tfile->lock); 322 323 if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release) 324 base->ref_obj_release(base, ref->ref_type); 325 326 ttm_base_object_unref(&ref->obj); 327 ttm_mem_global_free(mem_glob, sizeof(*ref)); 328 kfree(ref); 329 write_lock(&tfile->lock); 330} 331 332int ttm_ref_object_base_unref(struct ttm_object_file *tfile, 333 unsigned long key, enum ttm_ref_type ref_type) 334{ 335 struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; 336 struct ttm_ref_object *ref; 337 struct drm_hash_item *hash; 338 int ret; 339 340 write_lock(&tfile->lock); 341 ret = drm_ht_find_item(ht, key, &hash); 342 if (unlikely(ret != 0)) { 343 write_unlock(&tfile->lock); 344 return -EINVAL; 345 } 346 ref = drm_hash_entry(hash, struct ttm_ref_object, hash); 347 kref_put(&ref->kref, ttm_ref_object_release); 348 write_unlock(&tfile->lock); 349 return 0; 350} 351EXPORT_SYMBOL(ttm_ref_object_base_unref); 352 353void ttm_object_file_release(struct ttm_object_file **p_tfile) 354{ 355 struct ttm_ref_object *ref; 356 struct list_head *list; 357 unsigned int i; 358 struct ttm_object_file *tfile = *p_tfile; 359 360 *p_tfile = NULL; 361 write_lock(&tfile->lock); 362 363 /* 364 * Since we release the lock within the loop, we have to 365 * restart it from the beginning each time. 366 */ 367 368 while (!list_empty(&tfile->ref_list)) { 369 list = tfile->ref_list.next; 370 ref = list_entry(list, struct ttm_ref_object, head); 371 ttm_ref_object_release(&ref->kref); 372 } 373 374 for (i = 0; i < TTM_REF_NUM; ++i) 375 drm_ht_remove(&tfile->ref_hash[i]); 376 377 write_unlock(&tfile->lock); 378 ttm_object_file_unref(&tfile); 379} 380EXPORT_SYMBOL(ttm_object_file_release); 381 382struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev, 383 unsigned int hash_order) 384{ 385 struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL); 386 unsigned int i; 387 unsigned int j = 0; 388 int ret; 389 390 if (unlikely(tfile == NULL)) 391 return NULL; 392 393 rwlock_init(&tfile->lock); 394 tfile->tdev = tdev; 395 kref_init(&tfile->refcount); 396 INIT_LIST_HEAD(&tfile->ref_list); 397 398 for (i = 0; i < TTM_REF_NUM; ++i) { 399 ret = drm_ht_create(&tfile->ref_hash[i], hash_order); 400 if (ret) { 401 j = i; 402 goto out_err; 403 } 404 } 405 406 return tfile; 407out_err: 408 for (i = 0; i < j; ++i) 409 drm_ht_remove(&tfile->ref_hash[i]); 410 411 kfree(tfile); 412 413 return NULL; 414} 415EXPORT_SYMBOL(ttm_object_file_init); 416 417struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global 418 *mem_glob, 419 unsigned int hash_order) 420{ 421 struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL); 422 int ret; 423 424 if (unlikely(tdev == NULL)) 425 return NULL; 426 427 tdev->mem_glob = mem_glob; 428 rwlock_init(&tdev->object_lock); 429 atomic_set(&tdev->object_count, 0); 430 ret = drm_ht_create(&tdev->object_hash, hash_order); 431 432 if (likely(ret == 0)) 433 return tdev; 434 435 kfree(tdev); 436 return NULL; 437} 438EXPORT_SYMBOL(ttm_object_device_init); 439 440void ttm_object_device_release(struct ttm_object_device **p_tdev) 441{ 442 struct ttm_object_device *tdev = *p_tdev; 443 444 *p_tdev = NULL; 445 446 write_lock(&tdev->object_lock); 447 drm_ht_remove(&tdev->object_hash); 448 write_unlock(&tdev->object_lock); 449 450 kfree(tdev); 451} 452EXPORT_SYMBOL(ttm_object_device_release); 453