1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include <drm/ttm/ttm_bo_driver.h>
30#include <drm/ttm/ttm_placement.h>
31#include <drm/ttm/ttm_page_alloc.h>
32
33static struct ttm_place vram_placement_flags = {
34	.fpfn = 0,
35	.lpfn = 0,
36	.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
37};
38
39static struct ttm_place vram_ne_placement_flags = {
40	.fpfn = 0,
41	.lpfn = 0,
42	.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
43};
44
45static struct ttm_place sys_placement_flags = {
46	.fpfn = 0,
47	.lpfn = 0,
48	.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
49};
50
51static struct ttm_place sys_ne_placement_flags = {
52	.fpfn = 0,
53	.lpfn = 0,
54	.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
55};
56
57static struct ttm_place gmr_placement_flags = {
58	.fpfn = 0,
59	.lpfn = 0,
60	.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
61};
62
63static struct ttm_place gmr_ne_placement_flags = {
64	.fpfn = 0,
65	.lpfn = 0,
66	.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
67};
68
69static struct ttm_place mob_placement_flags = {
70	.fpfn = 0,
71	.lpfn = 0,
72	.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
73};
74
75struct ttm_placement vmw_vram_placement = {
76	.num_placement = 1,
77	.placement = &vram_placement_flags,
78	.num_busy_placement = 1,
79	.busy_placement = &vram_placement_flags
80};
81
82static struct ttm_place vram_gmr_placement_flags[] = {
83	{
84		.fpfn = 0,
85		.lpfn = 0,
86		.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
87	}, {
88		.fpfn = 0,
89		.lpfn = 0,
90		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
91	}
92};
93
94static struct ttm_place gmr_vram_placement_flags[] = {
95	{
96		.fpfn = 0,
97		.lpfn = 0,
98		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
99	}, {
100		.fpfn = 0,
101		.lpfn = 0,
102		.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
103	}
104};
105
106struct ttm_placement vmw_vram_gmr_placement = {
107	.num_placement = 2,
108	.placement = vram_gmr_placement_flags,
109	.num_busy_placement = 1,
110	.busy_placement = &gmr_placement_flags
111};
112
113static struct ttm_place vram_gmr_ne_placement_flags[] = {
114	{
115		.fpfn = 0,
116		.lpfn = 0,
117		.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
118			 TTM_PL_FLAG_NO_EVICT
119	}, {
120		.fpfn = 0,
121		.lpfn = 0,
122		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
123			 TTM_PL_FLAG_NO_EVICT
124	}
125};
126
127struct ttm_placement vmw_vram_gmr_ne_placement = {
128	.num_placement = 2,
129	.placement = vram_gmr_ne_placement_flags,
130	.num_busy_placement = 1,
131	.busy_placement = &gmr_ne_placement_flags
132};
133
134struct ttm_placement vmw_vram_sys_placement = {
135	.num_placement = 1,
136	.placement = &vram_placement_flags,
137	.num_busy_placement = 1,
138	.busy_placement = &sys_placement_flags
139};
140
141struct ttm_placement vmw_vram_ne_placement = {
142	.num_placement = 1,
143	.placement = &vram_ne_placement_flags,
144	.num_busy_placement = 1,
145	.busy_placement = &vram_ne_placement_flags
146};
147
148struct ttm_placement vmw_sys_placement = {
149	.num_placement = 1,
150	.placement = &sys_placement_flags,
151	.num_busy_placement = 1,
152	.busy_placement = &sys_placement_flags
153};
154
155struct ttm_placement vmw_sys_ne_placement = {
156	.num_placement = 1,
157	.placement = &sys_ne_placement_flags,
158	.num_busy_placement = 1,
159	.busy_placement = &sys_ne_placement_flags
160};
161
162static struct ttm_place evictable_placement_flags[] = {
163	{
164		.fpfn = 0,
165		.lpfn = 0,
166		.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
167	}, {
168		.fpfn = 0,
169		.lpfn = 0,
170		.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
171	}, {
172		.fpfn = 0,
173		.lpfn = 0,
174		.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
175	}, {
176		.fpfn = 0,
177		.lpfn = 0,
178		.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
179	}
180};
181
182struct ttm_placement vmw_evictable_placement = {
183	.num_placement = 4,
184	.placement = evictable_placement_flags,
185	.num_busy_placement = 1,
186	.busy_placement = &sys_placement_flags
187};
188
189struct ttm_placement vmw_srf_placement = {
190	.num_placement = 1,
191	.num_busy_placement = 2,
192	.placement = &gmr_placement_flags,
193	.busy_placement = gmr_vram_placement_flags
194};
195
196struct ttm_placement vmw_mob_placement = {
197	.num_placement = 1,
198	.num_busy_placement = 1,
199	.placement = &mob_placement_flags,
200	.busy_placement = &mob_placement_flags
201};
202
203struct vmw_ttm_tt {
204	struct ttm_dma_tt dma_ttm;
205	struct vmw_private *dev_priv;
206	int gmr_id;
207	struct vmw_mob *mob;
208	int mem_type;
209	struct sg_table sgt;
210	struct vmw_sg_table vsgt;
211	uint64_t sg_alloc_size;
212	bool mapped;
213};
214
215const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
216
217/**
218 * Helper functions to advance a struct vmw_piter iterator.
219 *
220 * @viter: Pointer to the iterator.
221 *
222 * These functions return false if past the end of the list,
223 * true otherwise. Functions are selected depending on the current
224 * DMA mapping mode.
225 */
226static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
227{
228	return ++(viter->i) < viter->num_pages;
229}
230
231static bool __vmw_piter_sg_next(struct vmw_piter *viter)
232{
233	return __sg_page_iter_next(&viter->iter);
234}
235
236
237/**
238 * Helper functions to return a pointer to the current page.
239 *
240 * @viter: Pointer to the iterator
241 *
242 * These functions return a pointer to the page currently
243 * pointed to by @viter. Functions are selected depending on the
244 * current mapping mode.
245 */
246static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
247{
248	return viter->pages[viter->i];
249}
250
251static struct page *__vmw_piter_sg_page(struct vmw_piter *viter)
252{
253	return sg_page_iter_page(&viter->iter);
254}
255
256
257/**
258 * Helper functions to return the DMA address of the current page.
259 *
260 * @viter: Pointer to the iterator
261 *
262 * These functions return the DMA address of the page currently
263 * pointed to by @viter. Functions are selected depending on the
264 * current mapping mode.
265 */
266static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
267{
268	return page_to_phys(viter->pages[viter->i]);
269}
270
271static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
272{
273	return viter->addrs[viter->i];
274}
275
276static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
277{
278	return sg_page_iter_dma_address(&viter->iter);
279}
280
281
282/**
283 * vmw_piter_start - Initialize a struct vmw_piter.
284 *
285 * @viter: Pointer to the iterator to initialize
286 * @vsgt: Pointer to a struct vmw_sg_table to initialize from
287 *
288 * Note that we're following the convention of __sg_page_iter_start, so that
289 * the iterator doesn't point to a valid page after initialization; it has
290 * to be advanced one step first.
291 */
292void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
293		     unsigned long p_offset)
294{
295	viter->i = p_offset - 1;
296	viter->num_pages = vsgt->num_pages;
297	switch (vsgt->mode) {
298	case vmw_dma_phys:
299		viter->next = &__vmw_piter_non_sg_next;
300		viter->dma_address = &__vmw_piter_phys_addr;
301		viter->page = &__vmw_piter_non_sg_page;
302		viter->pages = vsgt->pages;
303		break;
304	case vmw_dma_alloc_coherent:
305		viter->next = &__vmw_piter_non_sg_next;
306		viter->dma_address = &__vmw_piter_dma_addr;
307		viter->page = &__vmw_piter_non_sg_page;
308		viter->addrs = vsgt->addrs;
309		viter->pages = vsgt->pages;
310		break;
311	case vmw_dma_map_populate:
312	case vmw_dma_map_bind:
313		viter->next = &__vmw_piter_sg_next;
314		viter->dma_address = &__vmw_piter_sg_addr;
315		viter->page = &__vmw_piter_sg_page;
316		__sg_page_iter_start(&viter->iter, vsgt->sgt->sgl,
317				     vsgt->sgt->orig_nents, p_offset);
318		break;
319	default:
320		BUG();
321	}
322}
323
324/**
325 * vmw_ttm_unmap_from_dma - unmap  device addresses previsouly mapped for
326 * TTM pages
327 *
328 * @vmw_tt: Pointer to a struct vmw_ttm_backend
329 *
330 * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
331 */
332static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
333{
334	struct device *dev = vmw_tt->dev_priv->dev->dev;
335
336	dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents,
337		DMA_BIDIRECTIONAL);
338	vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
339}
340
341/**
342 * vmw_ttm_map_for_dma - map TTM pages to get device addresses
343 *
344 * @vmw_tt: Pointer to a struct vmw_ttm_backend
345 *
346 * This function is used to get device addresses from the kernel DMA layer.
347 * However, it's violating the DMA API in that when this operation has been
348 * performed, it's illegal for the CPU to write to the pages without first
349 * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
350 * therefore only legal to call this function if we know that the function
351 * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
352 * a CPU write buffer flush.
353 */
354static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
355{
356	struct device *dev = vmw_tt->dev_priv->dev->dev;
357	int ret;
358
359	ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents,
360			 DMA_BIDIRECTIONAL);
361	if (unlikely(ret == 0))
362		return -ENOMEM;
363
364	vmw_tt->sgt.nents = ret;
365
366	return 0;
367}
368
369/**
370 * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
371 *
372 * @vmw_tt: Pointer to a struct vmw_ttm_tt
373 *
374 * Select the correct function for and make sure the TTM pages are
375 * visible to the device. Allocate storage for the device mappings.
376 * If a mapping has already been performed, indicated by the storage
377 * pointer being non NULL, the function returns success.
378 */
379static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
380{
381	struct vmw_private *dev_priv = vmw_tt->dev_priv;
382	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
383	struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
384	struct vmw_piter iter;
385	dma_addr_t old;
386	int ret = 0;
387	static size_t sgl_size;
388	static size_t sgt_size;
389
390	if (vmw_tt->mapped)
391		return 0;
392
393	vsgt->mode = dev_priv->map_mode;
394	vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
395	vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
396	vsgt->addrs = vmw_tt->dma_ttm.dma_address;
397	vsgt->sgt = &vmw_tt->sgt;
398
399	switch (dev_priv->map_mode) {
400	case vmw_dma_map_bind:
401	case vmw_dma_map_populate:
402		if (unlikely(!sgl_size)) {
403			sgl_size = ttm_round_pot(sizeof(struct scatterlist));
404			sgt_size = ttm_round_pot(sizeof(struct sg_table));
405		}
406		vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
407		ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, false,
408					   true);
409		if (unlikely(ret != 0))
410			return ret;
411
412		ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
413						vsgt->num_pages, 0,
414						(unsigned long)
415						vsgt->num_pages << PAGE_SHIFT,
416						GFP_KERNEL);
417		if (unlikely(ret != 0))
418			goto out_sg_alloc_fail;
419
420		if (vsgt->num_pages > vmw_tt->sgt.nents) {
421			uint64_t over_alloc =
422				sgl_size * (vsgt->num_pages -
423					    vmw_tt->sgt.nents);
424
425			ttm_mem_global_free(glob, over_alloc);
426			vmw_tt->sg_alloc_size -= over_alloc;
427		}
428
429		ret = vmw_ttm_map_for_dma(vmw_tt);
430		if (unlikely(ret != 0))
431			goto out_map_fail;
432
433		break;
434	default:
435		break;
436	}
437
438	old = ~((dma_addr_t) 0);
439	vmw_tt->vsgt.num_regions = 0;
440	for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
441		dma_addr_t cur = vmw_piter_dma_addr(&iter);
442
443		if (cur != old + PAGE_SIZE)
444			vmw_tt->vsgt.num_regions++;
445		old = cur;
446	}
447
448	vmw_tt->mapped = true;
449	return 0;
450
451out_map_fail:
452	sg_free_table(vmw_tt->vsgt.sgt);
453	vmw_tt->vsgt.sgt = NULL;
454out_sg_alloc_fail:
455	ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
456	return ret;
457}
458
459/**
460 * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
461 *
462 * @vmw_tt: Pointer to a struct vmw_ttm_tt
463 *
464 * Tear down any previously set up device DMA mappings and free
465 * any storage space allocated for them. If there are no mappings set up,
466 * this function is a NOP.
467 */
468static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
469{
470	struct vmw_private *dev_priv = vmw_tt->dev_priv;
471
472	if (!vmw_tt->vsgt.sgt)
473		return;
474
475	switch (dev_priv->map_mode) {
476	case vmw_dma_map_bind:
477	case vmw_dma_map_populate:
478		vmw_ttm_unmap_from_dma(vmw_tt);
479		sg_free_table(vmw_tt->vsgt.sgt);
480		vmw_tt->vsgt.sgt = NULL;
481		ttm_mem_global_free(vmw_mem_glob(dev_priv),
482				    vmw_tt->sg_alloc_size);
483		break;
484	default:
485		break;
486	}
487	vmw_tt->mapped = false;
488}
489
490
491/**
492 * vmw_bo_map_dma - Make sure buffer object pages are visible to the device
493 *
494 * @bo: Pointer to a struct ttm_buffer_object
495 *
496 * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer
497 * instead of a pointer to a struct vmw_ttm_backend as argument.
498 * Note that the buffer object must be either pinned or reserved before
499 * calling this function.
500 */
501int vmw_bo_map_dma(struct ttm_buffer_object *bo)
502{
503	struct vmw_ttm_tt *vmw_tt =
504		container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
505
506	return vmw_ttm_map_dma(vmw_tt);
507}
508
509
510/**
511 * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device
512 *
513 * @bo: Pointer to a struct ttm_buffer_object
514 *
515 * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer
516 * instead of a pointer to a struct vmw_ttm_backend as argument.
517 */
518void vmw_bo_unmap_dma(struct ttm_buffer_object *bo)
519{
520	struct vmw_ttm_tt *vmw_tt =
521		container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
522
523	vmw_ttm_unmap_dma(vmw_tt);
524}
525
526
527/**
528 * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
529 * TTM buffer object
530 *
531 * @bo: Pointer to a struct ttm_buffer_object
532 *
533 * Returns a pointer to a struct vmw_sg_table object. The object should
534 * not be freed after use.
535 * Note that for the device addresses to be valid, the buffer object must
536 * either be reserved or pinned.
537 */
538const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
539{
540	struct vmw_ttm_tt *vmw_tt =
541		container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
542
543	return &vmw_tt->vsgt;
544}
545
546
547static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
548{
549	struct vmw_ttm_tt *vmw_be =
550		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
551	int ret;
552
553	ret = vmw_ttm_map_dma(vmw_be);
554	if (unlikely(ret != 0))
555		return ret;
556
557	vmw_be->gmr_id = bo_mem->start;
558	vmw_be->mem_type = bo_mem->mem_type;
559
560	switch (bo_mem->mem_type) {
561	case VMW_PL_GMR:
562		return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
563				    ttm->num_pages, vmw_be->gmr_id);
564	case VMW_PL_MOB:
565		if (unlikely(vmw_be->mob == NULL)) {
566			vmw_be->mob =
567				vmw_mob_create(ttm->num_pages);
568			if (unlikely(vmw_be->mob == NULL))
569				return -ENOMEM;
570		}
571
572		return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
573				    &vmw_be->vsgt, ttm->num_pages,
574				    vmw_be->gmr_id);
575	default:
576		BUG();
577	}
578	return 0;
579}
580
581static int vmw_ttm_unbind(struct ttm_tt *ttm)
582{
583	struct vmw_ttm_tt *vmw_be =
584		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
585
586	switch (vmw_be->mem_type) {
587	case VMW_PL_GMR:
588		vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
589		break;
590	case VMW_PL_MOB:
591		vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
592		break;
593	default:
594		BUG();
595	}
596
597	if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
598		vmw_ttm_unmap_dma(vmw_be);
599
600	return 0;
601}
602
603
604static void vmw_ttm_destroy(struct ttm_tt *ttm)
605{
606	struct vmw_ttm_tt *vmw_be =
607		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
608
609	vmw_ttm_unmap_dma(vmw_be);
610	if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
611		ttm_dma_tt_fini(&vmw_be->dma_ttm);
612	else
613		ttm_tt_fini(ttm);
614
615	if (vmw_be->mob)
616		vmw_mob_destroy(vmw_be->mob);
617
618	kfree(vmw_be);
619}
620
621
622static int vmw_ttm_populate(struct ttm_tt *ttm)
623{
624	struct vmw_ttm_tt *vmw_tt =
625		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
626	struct vmw_private *dev_priv = vmw_tt->dev_priv;
627	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
628	int ret;
629
630	if (ttm->state != tt_unpopulated)
631		return 0;
632
633	if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
634		size_t size =
635			ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
636		ret = ttm_mem_global_alloc(glob, size, false, true);
637		if (unlikely(ret != 0))
638			return ret;
639
640		ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
641		if (unlikely(ret != 0))
642			ttm_mem_global_free(glob, size);
643	} else
644		ret = ttm_pool_populate(ttm);
645
646	return ret;
647}
648
649static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
650{
651	struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
652						 dma_ttm.ttm);
653	struct vmw_private *dev_priv = vmw_tt->dev_priv;
654	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
655
656
657	if (vmw_tt->mob) {
658		vmw_mob_destroy(vmw_tt->mob);
659		vmw_tt->mob = NULL;
660	}
661
662	vmw_ttm_unmap_dma(vmw_tt);
663	if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
664		size_t size =
665			ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
666
667		ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
668		ttm_mem_global_free(glob, size);
669	} else
670		ttm_pool_unpopulate(ttm);
671}
672
673static struct ttm_backend_func vmw_ttm_func = {
674	.bind = vmw_ttm_bind,
675	.unbind = vmw_ttm_unbind,
676	.destroy = vmw_ttm_destroy,
677};
678
679static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
680				 unsigned long size, uint32_t page_flags,
681				 struct page *dummy_read_page)
682{
683	struct vmw_ttm_tt *vmw_be;
684	int ret;
685
686	vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
687	if (!vmw_be)
688		return NULL;
689
690	vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
691	vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
692	vmw_be->mob = NULL;
693
694	if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
695		ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags,
696				      dummy_read_page);
697	else
698		ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bdev, size, page_flags,
699				  dummy_read_page);
700	if (unlikely(ret != 0))
701		goto out_no_init;
702
703	return &vmw_be->dma_ttm.ttm;
704out_no_init:
705	kfree(vmw_be);
706	return NULL;
707}
708
709static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
710{
711	return 0;
712}
713
714static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
715		      struct ttm_mem_type_manager *man)
716{
717	switch (type) {
718	case TTM_PL_SYSTEM:
719		/* System memory */
720
721		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
722		man->available_caching = TTM_PL_FLAG_CACHED;
723		man->default_caching = TTM_PL_FLAG_CACHED;
724		break;
725	case TTM_PL_VRAM:
726		/* "On-card" video ram */
727		man->func = &ttm_bo_manager_func;
728		man->gpu_offset = 0;
729		man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
730		man->available_caching = TTM_PL_FLAG_CACHED;
731		man->default_caching = TTM_PL_FLAG_CACHED;
732		break;
733	case VMW_PL_GMR:
734	case VMW_PL_MOB:
735		/*
736		 * "Guest Memory Regions" is an aperture like feature with
737		 *  one slot per bo. There is an upper limit of the number of
738		 *  slots as well as the bo size.
739		 */
740		man->func = &vmw_gmrid_manager_func;
741		man->gpu_offset = 0;
742		man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
743		man->available_caching = TTM_PL_FLAG_CACHED;
744		man->default_caching = TTM_PL_FLAG_CACHED;
745		break;
746	default:
747		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
748		return -EINVAL;
749	}
750	return 0;
751}
752
753static void vmw_evict_flags(struct ttm_buffer_object *bo,
754		     struct ttm_placement *placement)
755{
756	*placement = vmw_sys_placement;
757}
758
759static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
760{
761	struct ttm_object_file *tfile =
762		vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
763
764	return vmw_user_dmabuf_verify_access(bo, tfile);
765}
766
767static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
768{
769	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
770	struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
771
772	mem->bus.addr = NULL;
773	mem->bus.is_iomem = false;
774	mem->bus.offset = 0;
775	mem->bus.size = mem->num_pages << PAGE_SHIFT;
776	mem->bus.base = 0;
777	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
778		return -EINVAL;
779	switch (mem->mem_type) {
780	case TTM_PL_SYSTEM:
781	case VMW_PL_GMR:
782	case VMW_PL_MOB:
783		return 0;
784	case TTM_PL_VRAM:
785		mem->bus.offset = mem->start << PAGE_SHIFT;
786		mem->bus.base = dev_priv->vram_start;
787		mem->bus.is_iomem = true;
788		break;
789	default:
790		return -EINVAL;
791	}
792	return 0;
793}
794
795static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
796{
797}
798
799static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
800{
801	return 0;
802}
803
804/**
805 * vmw_move_notify - TTM move_notify_callback
806 *
807 * @bo:             The TTM buffer object about to move.
808 * @mem:            The truct ttm_mem_reg indicating to what memory
809 *                  region the move is taking place.
810 *
811 * Calls move_notify for all subsystems needing it.
812 * (currently only resources).
813 */
814static void vmw_move_notify(struct ttm_buffer_object *bo,
815			    struct ttm_mem_reg *mem)
816{
817	vmw_resource_move_notify(bo, mem);
818}
819
820
821/**
822 * vmw_swap_notify - TTM move_notify_callback
823 *
824 * @bo:             The TTM buffer object about to be swapped out.
825 */
826static void vmw_swap_notify(struct ttm_buffer_object *bo)
827{
828	ttm_bo_wait(bo, false, false, false);
829}
830
831
832struct ttm_bo_driver vmw_bo_driver = {
833	.ttm_tt_create = &vmw_ttm_tt_create,
834	.ttm_tt_populate = &vmw_ttm_populate,
835	.ttm_tt_unpopulate = &vmw_ttm_unpopulate,
836	.invalidate_caches = vmw_invalidate_caches,
837	.init_mem_type = vmw_init_mem_type,
838	.evict_flags = vmw_evict_flags,
839	.move = NULL,
840	.verify_access = vmw_verify_access,
841	.move_notify = vmw_move_notify,
842	.swap_notify = vmw_swap_notify,
843	.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
844	.io_mem_reserve = &vmw_ttm_io_mem_reserve,
845	.io_mem_free = &vmw_ttm_io_mem_free,
846};
847