1/**************************************************************************
2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include <drm/ttm/ttm_bo_driver.h>
32#include <drm/ttm/ttm_placement.h>
33#include <drm/drm_vma_manager.h>
34#include <linux/io.h>
35#include <linux/highmem.h>
36#include <linux/wait.h>
37#include <linux/slab.h>
38#include <linux/vmalloc.h>
39#include <linux/module.h>
40#include <linux/reservation.h>
41
42void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
43{
44	ttm_bo_mem_put(bo, &bo->mem);
45}
46
47int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
48		    bool evict,
49		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
50{
51	struct ttm_tt *ttm = bo->ttm;
52	struct ttm_mem_reg *old_mem = &bo->mem;
53	int ret;
54
55	if (old_mem->mem_type != TTM_PL_SYSTEM) {
56		ttm_tt_unbind(ttm);
57		ttm_bo_free_old_node(bo);
58		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
59				TTM_PL_MASK_MEM);
60		old_mem->mem_type = TTM_PL_SYSTEM;
61	}
62
63	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
64	if (unlikely(ret != 0))
65		return ret;
66
67	if (new_mem->mem_type != TTM_PL_SYSTEM) {
68		ret = ttm_tt_bind(ttm, new_mem);
69		if (unlikely(ret != 0))
70			return ret;
71	}
72
73	*old_mem = *new_mem;
74	new_mem->mm_node = NULL;
75
76	return 0;
77}
78EXPORT_SYMBOL(ttm_bo_move_ttm);
79
80int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
81{
82	if (likely(man->io_reserve_fastpath))
83		return 0;
84
85	if (interruptible)
86		return mutex_lock_interruptible(&man->io_reserve_mutex);
87
88	mutex_lock(&man->io_reserve_mutex);
89	return 0;
90}
91EXPORT_SYMBOL(ttm_mem_io_lock);
92
93void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
94{
95	if (likely(man->io_reserve_fastpath))
96		return;
97
98	mutex_unlock(&man->io_reserve_mutex);
99}
100EXPORT_SYMBOL(ttm_mem_io_unlock);
101
102static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
103{
104	struct ttm_buffer_object *bo;
105
106	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
107		return -EAGAIN;
108
109	bo = list_first_entry(&man->io_reserve_lru,
110			      struct ttm_buffer_object,
111			      io_reserve_lru);
112	list_del_init(&bo->io_reserve_lru);
113	ttm_bo_unmap_virtual_locked(bo);
114
115	return 0;
116}
117
118
119int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
120		       struct ttm_mem_reg *mem)
121{
122	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
123	int ret = 0;
124
125	if (!bdev->driver->io_mem_reserve)
126		return 0;
127	if (likely(man->io_reserve_fastpath))
128		return bdev->driver->io_mem_reserve(bdev, mem);
129
130	if (bdev->driver->io_mem_reserve &&
131	    mem->bus.io_reserved_count++ == 0) {
132retry:
133		ret = bdev->driver->io_mem_reserve(bdev, mem);
134		if (ret == -EAGAIN) {
135			ret = ttm_mem_io_evict(man);
136			if (ret == 0)
137				goto retry;
138		}
139	}
140	return ret;
141}
142EXPORT_SYMBOL(ttm_mem_io_reserve);
143
144void ttm_mem_io_free(struct ttm_bo_device *bdev,
145		     struct ttm_mem_reg *mem)
146{
147	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
148
149	if (likely(man->io_reserve_fastpath))
150		return;
151
152	if (bdev->driver->io_mem_reserve &&
153	    --mem->bus.io_reserved_count == 0 &&
154	    bdev->driver->io_mem_free)
155		bdev->driver->io_mem_free(bdev, mem);
156
157}
158EXPORT_SYMBOL(ttm_mem_io_free);
159
160int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
161{
162	struct ttm_mem_reg *mem = &bo->mem;
163	int ret;
164
165	if (!mem->bus.io_reserved_vm) {
166		struct ttm_mem_type_manager *man =
167			&bo->bdev->man[mem->mem_type];
168
169		ret = ttm_mem_io_reserve(bo->bdev, mem);
170		if (unlikely(ret != 0))
171			return ret;
172		mem->bus.io_reserved_vm = true;
173		if (man->use_io_reserve_lru)
174			list_add_tail(&bo->io_reserve_lru,
175				      &man->io_reserve_lru);
176	}
177	return 0;
178}
179
180void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
181{
182	struct ttm_mem_reg *mem = &bo->mem;
183
184	if (mem->bus.io_reserved_vm) {
185		mem->bus.io_reserved_vm = false;
186		list_del_init(&bo->io_reserve_lru);
187		ttm_mem_io_free(bo->bdev, mem);
188	}
189}
190
191static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
192			void **virtual)
193{
194	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
195	int ret;
196	void *addr;
197
198	*virtual = NULL;
199	(void) ttm_mem_io_lock(man, false);
200	ret = ttm_mem_io_reserve(bdev, mem);
201	ttm_mem_io_unlock(man);
202	if (ret || !mem->bus.is_iomem)
203		return ret;
204
205	if (mem->bus.addr) {
206		addr = mem->bus.addr;
207	} else {
208		if (mem->placement & TTM_PL_FLAG_WC)
209			addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
210		else
211			addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
212		if (!addr) {
213			(void) ttm_mem_io_lock(man, false);
214			ttm_mem_io_free(bdev, mem);
215			ttm_mem_io_unlock(man);
216			return -ENOMEM;
217		}
218	}
219	*virtual = addr;
220	return 0;
221}
222
223static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
224			 void *virtual)
225{
226	struct ttm_mem_type_manager *man;
227
228	man = &bdev->man[mem->mem_type];
229
230	if (virtual && mem->bus.addr == NULL)
231		iounmap(virtual);
232	(void) ttm_mem_io_lock(man, false);
233	ttm_mem_io_free(bdev, mem);
234	ttm_mem_io_unlock(man);
235}
236
237static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
238{
239	uint32_t *dstP =
240	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
241	uint32_t *srcP =
242	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
243
244	int i;
245	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
246		iowrite32(ioread32(srcP++), dstP++);
247	return 0;
248}
249
250static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
251				unsigned long page,
252				pgprot_t prot)
253{
254	struct page *d = ttm->pages[page];
255	void *dst;
256
257	if (!d)
258		return -ENOMEM;
259
260	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
261
262#ifdef CONFIG_X86
263	dst = kmap_atomic_prot(d, prot);
264#else
265	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
266		dst = vmap(&d, 1, 0, prot);
267	else
268		dst = kmap(d);
269#endif
270	if (!dst)
271		return -ENOMEM;
272
273	memcpy_fromio(dst, src, PAGE_SIZE);
274
275#ifdef CONFIG_X86
276	kunmap_atomic(dst);
277#else
278	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
279		vunmap(dst);
280	else
281		kunmap(d);
282#endif
283
284	return 0;
285}
286
287static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
288				unsigned long page,
289				pgprot_t prot)
290{
291	struct page *s = ttm->pages[page];
292	void *src;
293
294	if (!s)
295		return -ENOMEM;
296
297	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
298#ifdef CONFIG_X86
299	src = kmap_atomic_prot(s, prot);
300#else
301	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
302		src = vmap(&s, 1, 0, prot);
303	else
304		src = kmap(s);
305#endif
306	if (!src)
307		return -ENOMEM;
308
309	memcpy_toio(dst, src, PAGE_SIZE);
310
311#ifdef CONFIG_X86
312	kunmap_atomic(src);
313#else
314	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
315		vunmap(src);
316	else
317		kunmap(s);
318#endif
319
320	return 0;
321}
322
323int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
324		       bool evict, bool no_wait_gpu,
325		       struct ttm_mem_reg *new_mem)
326{
327	struct ttm_bo_device *bdev = bo->bdev;
328	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
329	struct ttm_tt *ttm = bo->ttm;
330	struct ttm_mem_reg *old_mem = &bo->mem;
331	struct ttm_mem_reg old_copy = *old_mem;
332	void *old_iomap;
333	void *new_iomap;
334	int ret;
335	unsigned long i;
336	unsigned long page;
337	unsigned long add = 0;
338	int dir;
339
340	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
341	if (ret)
342		return ret;
343	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
344	if (ret)
345		goto out;
346
347	/*
348	 * Single TTM move. NOP.
349	 */
350	if (old_iomap == NULL && new_iomap == NULL)
351		goto out2;
352
353	/*
354	 * Don't move nonexistent data. Clear destination instead.
355	 */
356	if (old_iomap == NULL &&
357	    (ttm == NULL || (ttm->state == tt_unpopulated &&
358			     !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
359		memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
360		goto out2;
361	}
362
363	/*
364	 * TTM might be null for moves within the same region.
365	 */
366	if (ttm && ttm->state == tt_unpopulated) {
367		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
368		if (ret)
369			goto out1;
370	}
371
372	add = 0;
373	dir = 1;
374
375	if ((old_mem->mem_type == new_mem->mem_type) &&
376	    (new_mem->start < old_mem->start + old_mem->size)) {
377		dir = -1;
378		add = new_mem->num_pages - 1;
379	}
380
381	for (i = 0; i < new_mem->num_pages; ++i) {
382		page = i * dir + add;
383		if (old_iomap == NULL) {
384			pgprot_t prot = ttm_io_prot(old_mem->placement,
385						    PAGE_KERNEL);
386			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
387						   prot);
388		} else if (new_iomap == NULL) {
389			pgprot_t prot = ttm_io_prot(new_mem->placement,
390						    PAGE_KERNEL);
391			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
392						   prot);
393		} else
394			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
395		if (ret)
396			goto out1;
397	}
398	mb();
399out2:
400	old_copy = *old_mem;
401	*old_mem = *new_mem;
402	new_mem->mm_node = NULL;
403
404	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
405		ttm_tt_unbind(ttm);
406		ttm_tt_destroy(ttm);
407		bo->ttm = NULL;
408	}
409
410out1:
411	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
412out:
413	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
414
415	/*
416	 * On error, keep the mm node!
417	 */
418	if (!ret)
419		ttm_bo_mem_put(bo, &old_copy);
420	return ret;
421}
422EXPORT_SYMBOL(ttm_bo_move_memcpy);
423
424static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
425{
426	kfree(bo);
427}
428
429/**
430 * ttm_buffer_object_transfer
431 *
432 * @bo: A pointer to a struct ttm_buffer_object.
433 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
434 * holding the data of @bo with the old placement.
435 *
436 * This is a utility function that may be called after an accelerated move
437 * has been scheduled. A new buffer object is created as a placeholder for
438 * the old data while it's being copied. When that buffer object is idle,
439 * it can be destroyed, releasing the space of the old placement.
440 * Returns:
441 * !0: Failure.
442 */
443
444static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
445				      struct ttm_buffer_object **new_obj)
446{
447	struct ttm_buffer_object *fbo;
448	int ret;
449
450	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
451	if (!fbo)
452		return -ENOMEM;
453
454	*fbo = *bo;
455
456	/**
457	 * Fix up members that we shouldn't copy directly:
458	 * TODO: Explicit member copy would probably be better here.
459	 */
460
461	INIT_LIST_HEAD(&fbo->ddestroy);
462	INIT_LIST_HEAD(&fbo->lru);
463	INIT_LIST_HEAD(&fbo->swap);
464	INIT_LIST_HEAD(&fbo->io_reserve_lru);
465	drm_vma_node_reset(&fbo->vma_node);
466	atomic_set(&fbo->cpu_writers, 0);
467
468	kref_init(&fbo->list_kref);
469	kref_init(&fbo->kref);
470	fbo->destroy = &ttm_transfered_destroy;
471	fbo->acc_size = 0;
472	fbo->resv = &fbo->ttm_resv;
473	reservation_object_init(fbo->resv);
474	ret = ww_mutex_trylock(&fbo->resv->lock);
475	WARN_ON(!ret);
476
477	*new_obj = fbo;
478	return 0;
479}
480
481pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
482{
483	/* Cached mappings need no adjustment */
484	if (caching_flags & TTM_PL_FLAG_CACHED)
485		return tmp;
486
487#if defined(__i386__) || defined(__x86_64__)
488	if (caching_flags & TTM_PL_FLAG_WC)
489		tmp = pgprot_writecombine(tmp);
490	else if (boot_cpu_data.x86 > 3)
491		tmp = pgprot_noncached(tmp);
492#endif
493#if defined(__ia64__) || defined(__arm__) || defined(__powerpc__)
494	if (caching_flags & TTM_PL_FLAG_WC)
495		tmp = pgprot_writecombine(tmp);
496	else
497		tmp = pgprot_noncached(tmp);
498#endif
499#if defined(__sparc__) || defined(__mips__)
500	tmp = pgprot_noncached(tmp);
501#endif
502	return tmp;
503}
504EXPORT_SYMBOL(ttm_io_prot);
505
506static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
507			  unsigned long offset,
508			  unsigned long size,
509			  struct ttm_bo_kmap_obj *map)
510{
511	struct ttm_mem_reg *mem = &bo->mem;
512
513	if (bo->mem.bus.addr) {
514		map->bo_kmap_type = ttm_bo_map_premapped;
515		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
516	} else {
517		map->bo_kmap_type = ttm_bo_map_iomap;
518		if (mem->placement & TTM_PL_FLAG_WC)
519			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
520						  size);
521		else
522			map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
523						       size);
524	}
525	return (!map->virtual) ? -ENOMEM : 0;
526}
527
528static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
529			   unsigned long start_page,
530			   unsigned long num_pages,
531			   struct ttm_bo_kmap_obj *map)
532{
533	struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
534	struct ttm_tt *ttm = bo->ttm;
535	int ret;
536
537	BUG_ON(!ttm);
538
539	if (ttm->state == tt_unpopulated) {
540		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
541		if (ret)
542			return ret;
543	}
544
545	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
546		/*
547		 * We're mapping a single page, and the desired
548		 * page protection is consistent with the bo.
549		 */
550
551		map->bo_kmap_type = ttm_bo_map_kmap;
552		map->page = ttm->pages[start_page];
553		map->virtual = kmap(map->page);
554	} else {
555		/*
556		 * We need to use vmap to get the desired page protection
557		 * or to make the buffer object look contiguous.
558		 */
559		prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
560		map->bo_kmap_type = ttm_bo_map_vmap;
561		map->virtual = vmap(ttm->pages + start_page, num_pages,
562				    0, prot);
563	}
564	return (!map->virtual) ? -ENOMEM : 0;
565}
566
567int ttm_bo_kmap(struct ttm_buffer_object *bo,
568		unsigned long start_page, unsigned long num_pages,
569		struct ttm_bo_kmap_obj *map)
570{
571	struct ttm_mem_type_manager *man =
572		&bo->bdev->man[bo->mem.mem_type];
573	unsigned long offset, size;
574	int ret;
575
576	BUG_ON(!list_empty(&bo->swap));
577	map->virtual = NULL;
578	map->bo = bo;
579	if (num_pages > bo->num_pages)
580		return -EINVAL;
581	if (start_page > bo->num_pages)
582		return -EINVAL;
583#if 0
584	if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
585		return -EPERM;
586#endif
587	(void) ttm_mem_io_lock(man, false);
588	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
589	ttm_mem_io_unlock(man);
590	if (ret)
591		return ret;
592	if (!bo->mem.bus.is_iomem) {
593		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
594	} else {
595		offset = start_page << PAGE_SHIFT;
596		size = num_pages << PAGE_SHIFT;
597		return ttm_bo_ioremap(bo, offset, size, map);
598	}
599}
600EXPORT_SYMBOL(ttm_bo_kmap);
601
602void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
603{
604	struct ttm_buffer_object *bo = map->bo;
605	struct ttm_mem_type_manager *man =
606		&bo->bdev->man[bo->mem.mem_type];
607
608	if (!map->virtual)
609		return;
610	switch (map->bo_kmap_type) {
611	case ttm_bo_map_iomap:
612		iounmap(map->virtual);
613		break;
614	case ttm_bo_map_vmap:
615		vunmap(map->virtual);
616		break;
617	case ttm_bo_map_kmap:
618		kunmap(map->page);
619		break;
620	case ttm_bo_map_premapped:
621		break;
622	default:
623		BUG();
624	}
625	(void) ttm_mem_io_lock(man, false);
626	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
627	ttm_mem_io_unlock(man);
628	map->virtual = NULL;
629	map->page = NULL;
630}
631EXPORT_SYMBOL(ttm_bo_kunmap);
632
633int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
634			      struct fence *fence,
635			      bool evict,
636			      bool no_wait_gpu,
637			      struct ttm_mem_reg *new_mem)
638{
639	struct ttm_bo_device *bdev = bo->bdev;
640	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
641	struct ttm_mem_reg *old_mem = &bo->mem;
642	int ret;
643	struct ttm_buffer_object *ghost_obj;
644
645	reservation_object_add_excl_fence(bo->resv, fence);
646	if (evict) {
647		ret = ttm_bo_wait(bo, false, false, false);
648		if (ret)
649			return ret;
650
651		if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
652		    (bo->ttm != NULL)) {
653			ttm_tt_unbind(bo->ttm);
654			ttm_tt_destroy(bo->ttm);
655			bo->ttm = NULL;
656		}
657		ttm_bo_free_old_node(bo);
658	} else {
659		/**
660		 * This should help pipeline ordinary buffer moves.
661		 *
662		 * Hang old buffer memory on a new buffer object,
663		 * and leave it to be released when the GPU
664		 * operation has completed.
665		 */
666
667		set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
668
669		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
670		if (ret)
671			return ret;
672
673		reservation_object_add_excl_fence(ghost_obj->resv, fence);
674
675		/**
676		 * If we're not moving to fixed memory, the TTM object
677		 * needs to stay alive. Otherwhise hang it on the ghost
678		 * bo to be unbound and destroyed.
679		 */
680
681		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
682			ghost_obj->ttm = NULL;
683		else
684			bo->ttm = NULL;
685
686		ttm_bo_unreserve(ghost_obj);
687		ttm_bo_unref(&ghost_obj);
688	}
689
690	*old_mem = *new_mem;
691	new_mem->mm_node = NULL;
692
693	return 0;
694}
695EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
696