ttm_object.c revision ae8df2ae8aa27bfeb8d1b99e8adaac5810a97fa8
1/**************************************************************************
2 *
3 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30/** @file ttm_ref_object.c
31 *
32 * Base- and reference object implementation for the various
33 * ttm objects. Implements reference counting, minimal security checks
34 * and release on file close.
35 */
36
37/**
38 * struct ttm_object_file
39 *
40 * @tdev: Pointer to the ttm_object_device.
41 *
42 * @lock: Lock that protects the ref_list list and the
43 * ref_hash hash tables.
44 *
45 * @ref_list: List of ttm_ref_objects to be destroyed at
46 * file release.
47 *
48 * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
49 * for fast lookup of ref objects given a base object.
50 */
51
52#define pr_fmt(fmt) "[TTM] " fmt
53
54#include <drm/ttm/ttm_object.h>
55#include <drm/ttm/ttm_module.h>
56#include <linux/list.h>
57#include <linux/spinlock.h>
58#include <linux/slab.h>
59#include <linux/module.h>
60#include <linux/atomic.h>
61
62struct ttm_object_file {
63	struct ttm_object_device *tdev;
64	rwlock_t lock;
65	struct list_head ref_list;
66	struct drm_open_hash ref_hash[TTM_REF_NUM];
67	struct kref refcount;
68};
69
70/**
71 * struct ttm_object_device
72 *
73 * @object_lock: lock that protects the object_hash hash table.
74 *
75 * @object_hash: hash table for fast lookup of object global names.
76 *
77 * @object_count: Per device object count.
78 *
79 * This is the per-device data structure needed for ttm object management.
80 */
81
82struct ttm_object_device {
83	spinlock_t object_lock;
84	struct drm_open_hash object_hash;
85	atomic_t object_count;
86	struct ttm_mem_global *mem_glob;
87};
88
89/**
90 * struct ttm_ref_object
91 *
92 * @hash: Hash entry for the per-file object reference hash.
93 *
94 * @head: List entry for the per-file list of ref-objects.
95 *
96 * @kref: Ref count.
97 *
98 * @obj: Base object this ref object is referencing.
99 *
100 * @ref_type: Type of ref object.
101 *
102 * This is similar to an idr object, but it also has a hash table entry
103 * that allows lookup with a pointer to the referenced object as a key. In
104 * that way, one can easily detect whether a base object is referenced by
105 * a particular ttm_object_file. It also carries a ref count to avoid creating
106 * multiple ref objects if a ttm_object_file references the same base
107 * object more than once.
108 */
109
110struct ttm_ref_object {
111	struct drm_hash_item hash;
112	struct list_head head;
113	struct kref kref;
114	enum ttm_ref_type ref_type;
115	struct ttm_base_object *obj;
116	struct ttm_object_file *tfile;
117};
118
119static inline struct ttm_object_file *
120ttm_object_file_ref(struct ttm_object_file *tfile)
121{
122	kref_get(&tfile->refcount);
123	return tfile;
124}
125
126static void ttm_object_file_destroy(struct kref *kref)
127{
128	struct ttm_object_file *tfile =
129		container_of(kref, struct ttm_object_file, refcount);
130
131	kfree(tfile);
132}
133
134
135static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
136{
137	struct ttm_object_file *tfile = *p_tfile;
138
139	*p_tfile = NULL;
140	kref_put(&tfile->refcount, ttm_object_file_destroy);
141}
142
143
144int ttm_base_object_init(struct ttm_object_file *tfile,
145			 struct ttm_base_object *base,
146			 bool shareable,
147			 enum ttm_object_type object_type,
148			 void (*refcount_release) (struct ttm_base_object **),
149			 void (*ref_obj_release) (struct ttm_base_object *,
150						  enum ttm_ref_type ref_type))
151{
152	struct ttm_object_device *tdev = tfile->tdev;
153	int ret;
154
155	base->shareable = shareable;
156	base->tfile = ttm_object_file_ref(tfile);
157	base->refcount_release = refcount_release;
158	base->ref_obj_release = ref_obj_release;
159	base->object_type = object_type;
160	spin_lock(&tdev->object_lock);
161	kref_init(&base->refcount);
162	ret = drm_ht_just_insert_please(&tdev->object_hash,
163					&base->hash,
164					(unsigned long)base, 31, 0, 0);
165	spin_unlock(&tdev->object_lock);
166	if (unlikely(ret != 0))
167		goto out_err0;
168
169	ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
170	if (unlikely(ret != 0))
171		goto out_err1;
172
173	ttm_base_object_unref(&base);
174
175	return 0;
176out_err1:
177	spin_lock(&tdev->object_lock);
178	(void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
179	spin_unlock(&tdev->object_lock);
180out_err0:
181	return ret;
182}
183EXPORT_SYMBOL(ttm_base_object_init);
184
185static void ttm_release_base(struct kref *kref)
186{
187	struct ttm_base_object *base =
188	    container_of(kref, struct ttm_base_object, refcount);
189	struct ttm_object_device *tdev = base->tfile->tdev;
190
191	spin_lock(&tdev->object_lock);
192	(void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
193	spin_unlock(&tdev->object_lock);
194	if (base->refcount_release) {
195		ttm_object_file_unref(&base->tfile);
196		base->refcount_release(&base);
197	}
198}
199
200void ttm_base_object_unref(struct ttm_base_object **p_base)
201{
202	struct ttm_base_object *base = *p_base;
203
204	*p_base = NULL;
205
206	kref_put(&base->refcount, ttm_release_base);
207}
208EXPORT_SYMBOL(ttm_base_object_unref);
209
210struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
211					       uint32_t key)
212{
213	struct ttm_object_device *tdev = tfile->tdev;
214	struct ttm_base_object *base;
215	struct drm_hash_item *hash;
216	int ret;
217
218	rcu_read_lock();
219	ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
220
221	if (likely(ret == 0)) {
222		base = drm_hash_entry(hash, struct ttm_base_object, hash);
223		ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL;
224	}
225	rcu_read_unlock();
226
227	if (unlikely(ret != 0))
228		return NULL;
229
230	if (tfile != base->tfile && !base->shareable) {
231		pr_err("Attempted access of non-shareable object\n");
232		ttm_base_object_unref(&base);
233		return NULL;
234	}
235
236	return base;
237}
238EXPORT_SYMBOL(ttm_base_object_lookup);
239
240int ttm_ref_object_add(struct ttm_object_file *tfile,
241		       struct ttm_base_object *base,
242		       enum ttm_ref_type ref_type, bool *existed)
243{
244	struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
245	struct ttm_ref_object *ref;
246	struct drm_hash_item *hash;
247	struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
248	int ret = -EINVAL;
249
250	if (existed != NULL)
251		*existed = true;
252
253	while (ret == -EINVAL) {
254		read_lock(&tfile->lock);
255		ret = drm_ht_find_item(ht, base->hash.key, &hash);
256
257		if (ret == 0) {
258			ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
259			kref_get(&ref->kref);
260			read_unlock(&tfile->lock);
261			break;
262		}
263
264		read_unlock(&tfile->lock);
265		ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
266					   false, false);
267		if (unlikely(ret != 0))
268			return ret;
269		ref = kmalloc(sizeof(*ref), GFP_KERNEL);
270		if (unlikely(ref == NULL)) {
271			ttm_mem_global_free(mem_glob, sizeof(*ref));
272			return -ENOMEM;
273		}
274
275		ref->hash.key = base->hash.key;
276		ref->obj = base;
277		ref->tfile = tfile;
278		ref->ref_type = ref_type;
279		kref_init(&ref->kref);
280
281		write_lock(&tfile->lock);
282		ret = drm_ht_insert_item(ht, &ref->hash);
283
284		if (likely(ret == 0)) {
285			list_add_tail(&ref->head, &tfile->ref_list);
286			kref_get(&base->refcount);
287			write_unlock(&tfile->lock);
288			if (existed != NULL)
289				*existed = false;
290			break;
291		}
292
293		write_unlock(&tfile->lock);
294		BUG_ON(ret != -EINVAL);
295
296		ttm_mem_global_free(mem_glob, sizeof(*ref));
297		kfree(ref);
298	}
299
300	return ret;
301}
302EXPORT_SYMBOL(ttm_ref_object_add);
303
304static void ttm_ref_object_release(struct kref *kref)
305{
306	struct ttm_ref_object *ref =
307	    container_of(kref, struct ttm_ref_object, kref);
308	struct ttm_base_object *base = ref->obj;
309	struct ttm_object_file *tfile = ref->tfile;
310	struct drm_open_hash *ht;
311	struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
312
313	ht = &tfile->ref_hash[ref->ref_type];
314	(void)drm_ht_remove_item(ht, &ref->hash);
315	list_del(&ref->head);
316	write_unlock(&tfile->lock);
317
318	if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
319		base->ref_obj_release(base, ref->ref_type);
320
321	ttm_base_object_unref(&ref->obj);
322	ttm_mem_global_free(mem_glob, sizeof(*ref));
323	kfree(ref);
324	write_lock(&tfile->lock);
325}
326
327int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
328			      unsigned long key, enum ttm_ref_type ref_type)
329{
330	struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
331	struct ttm_ref_object *ref;
332	struct drm_hash_item *hash;
333	int ret;
334
335	write_lock(&tfile->lock);
336	ret = drm_ht_find_item(ht, key, &hash);
337	if (unlikely(ret != 0)) {
338		write_unlock(&tfile->lock);
339		return -EINVAL;
340	}
341	ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
342	kref_put(&ref->kref, ttm_ref_object_release);
343	write_unlock(&tfile->lock);
344	return 0;
345}
346EXPORT_SYMBOL(ttm_ref_object_base_unref);
347
348void ttm_object_file_release(struct ttm_object_file **p_tfile)
349{
350	struct ttm_ref_object *ref;
351	struct list_head *list;
352	unsigned int i;
353	struct ttm_object_file *tfile = *p_tfile;
354
355	*p_tfile = NULL;
356	write_lock(&tfile->lock);
357
358	/*
359	 * Since we release the lock within the loop, we have to
360	 * restart it from the beginning each time.
361	 */
362
363	while (!list_empty(&tfile->ref_list)) {
364		list = tfile->ref_list.next;
365		ref = list_entry(list, struct ttm_ref_object, head);
366		ttm_ref_object_release(&ref->kref);
367	}
368
369	for (i = 0; i < TTM_REF_NUM; ++i)
370		drm_ht_remove(&tfile->ref_hash[i]);
371
372	write_unlock(&tfile->lock);
373	ttm_object_file_unref(&tfile);
374}
375EXPORT_SYMBOL(ttm_object_file_release);
376
377struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
378					     unsigned int hash_order)
379{
380	struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
381	unsigned int i;
382	unsigned int j = 0;
383	int ret;
384
385	if (unlikely(tfile == NULL))
386		return NULL;
387
388	rwlock_init(&tfile->lock);
389	tfile->tdev = tdev;
390	kref_init(&tfile->refcount);
391	INIT_LIST_HEAD(&tfile->ref_list);
392
393	for (i = 0; i < TTM_REF_NUM; ++i) {
394		ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
395		if (ret) {
396			j = i;
397			goto out_err;
398		}
399	}
400
401	return tfile;
402out_err:
403	for (i = 0; i < j; ++i)
404		drm_ht_remove(&tfile->ref_hash[i]);
405
406	kfree(tfile);
407
408	return NULL;
409}
410EXPORT_SYMBOL(ttm_object_file_init);
411
412struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
413						 *mem_glob,
414						 unsigned int hash_order)
415{
416	struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
417	int ret;
418
419	if (unlikely(tdev == NULL))
420		return NULL;
421
422	tdev->mem_glob = mem_glob;
423	spin_lock_init(&tdev->object_lock);
424	atomic_set(&tdev->object_count, 0);
425	ret = drm_ht_create(&tdev->object_hash, hash_order);
426
427	if (likely(ret == 0))
428		return tdev;
429
430	kfree(tdev);
431	return NULL;
432}
433EXPORT_SYMBOL(ttm_object_device_init);
434
435void ttm_object_device_release(struct ttm_object_device **p_tdev)
436{
437	struct ttm_object_device *tdev = *p_tdev;
438
439	*p_tdev = NULL;
440
441	spin_lock(&tdev->object_lock);
442	drm_ht_remove(&tdev->object_hash);
443	spin_unlock(&tdev->object_lock);
444
445	kfree(tdev);
446}
447EXPORT_SYMBOL(ttm_object_device_release);
448