1/*
2 * Copyright (C) 2008 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "nouveau_drm.h"
28#include "nouveau_dma.h"
29#include "nouveau_fence.h"
30#include "nouveau_abi16.h"
31
32#include "nouveau_ttm.h"
33#include "nouveau_gem.h"
34
35void
36nouveau_gem_object_del(struct drm_gem_object *gem)
37{
38	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
39	struct ttm_buffer_object *bo = &nvbo->bo;
40
41	if (gem->import_attach)
42		drm_prime_gem_destroy(gem, nvbo->bo.sg);
43
44	drm_gem_object_release(gem);
45
46	/* reset filp so nouveau_bo_del_ttm() can test for it */
47	gem->filp = NULL;
48	ttm_bo_unref(&bo);
49}
50
51int
52nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
53{
54	struct nouveau_cli *cli = nouveau_cli(file_priv);
55	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
56	struct nouveau_vma *vma;
57	int ret;
58
59	if (!cli->vm)
60		return 0;
61
62	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
63	if (ret)
64		return ret;
65
66	vma = nouveau_bo_vma_find(nvbo, cli->vm);
67	if (!vma) {
68		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
69		if (!vma) {
70			ret = -ENOMEM;
71			goto out;
72		}
73
74		ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
75		if (ret) {
76			kfree(vma);
77			goto out;
78		}
79	} else {
80		vma->refcount++;
81	}
82
83out:
84	ttm_bo_unreserve(&nvbo->bo);
85	return ret;
86}
87
88static void
89nouveau_gem_object_delete(void *data)
90{
91	struct nouveau_vma *vma = data;
92	nouveau_vm_unmap(vma);
93	nouveau_vm_put(vma);
94	kfree(vma);
95}
96
97static void
98nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
99{
100	const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
101	struct reservation_object *resv = nvbo->bo.resv;
102	struct reservation_object_list *fobj;
103	struct fence *fence = NULL;
104
105	fobj = reservation_object_get_list(resv);
106
107	list_del(&vma->head);
108
109	if (fobj && fobj->shared_count > 1)
110		ttm_bo_wait(&nvbo->bo, true, false, false);
111	else if (fobj && fobj->shared_count == 1)
112		fence = rcu_dereference_protected(fobj->shared[0],
113						reservation_object_held(resv));
114	else
115		fence = reservation_object_get_excl(nvbo->bo.resv);
116
117	if (fence && mapped) {
118		nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
119	} else {
120		if (mapped)
121			nouveau_vm_unmap(vma);
122		nouveau_vm_put(vma);
123		kfree(vma);
124	}
125}
126
127void
128nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
129{
130	struct nouveau_cli *cli = nouveau_cli(file_priv);
131	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
132	struct nouveau_vma *vma;
133	int ret;
134
135	if (!cli->vm)
136		return;
137
138	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
139	if (ret)
140		return;
141
142	vma = nouveau_bo_vma_find(nvbo, cli->vm);
143	if (vma) {
144		if (--vma->refcount == 0)
145			nouveau_gem_object_unmap(nvbo, vma);
146	}
147	ttm_bo_unreserve(&nvbo->bo);
148}
149
150int
151nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
152		uint32_t tile_mode, uint32_t tile_flags,
153		struct nouveau_bo **pnvbo)
154{
155	struct nouveau_drm *drm = nouveau_drm(dev);
156	struct nouveau_bo *nvbo;
157	u32 flags = 0;
158	int ret;
159
160	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
161		flags |= TTM_PL_FLAG_VRAM;
162	if (domain & NOUVEAU_GEM_DOMAIN_GART)
163		flags |= TTM_PL_FLAG_TT;
164	if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
165		flags |= TTM_PL_FLAG_SYSTEM;
166
167	ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
168			     tile_flags, NULL, NULL, pnvbo);
169	if (ret)
170		return ret;
171	nvbo = *pnvbo;
172
173	/* we restrict allowed domains on nv50+ to only the types
174	 * that were requested at creation time.  not possibly on
175	 * earlier chips without busting the ABI.
176	 */
177	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
178			      NOUVEAU_GEM_DOMAIN_GART;
179	if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
180		nvbo->valid_domains &= domain;
181
182	/* Initialize the embedded gem-object. We return a single gem-reference
183	 * to the caller, instead of a normal nouveau_bo ttm reference. */
184	ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
185	if (ret) {
186		nouveau_bo_ref(NULL, pnvbo);
187		return -ENOMEM;
188	}
189
190	nvbo->bo.persistent_swap_storage = nvbo->gem.filp;
191	return 0;
192}
193
194static int
195nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
196		 struct drm_nouveau_gem_info *rep)
197{
198	struct nouveau_cli *cli = nouveau_cli(file_priv);
199	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
200	struct nouveau_vma *vma;
201
202	if (nvbo->bo.mem.mem_type == TTM_PL_TT)
203		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
204	else
205		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
206
207	rep->offset = nvbo->bo.offset;
208	if (cli->vm) {
209		vma = nouveau_bo_vma_find(nvbo, cli->vm);
210		if (!vma)
211			return -EINVAL;
212
213		rep->offset = vma->offset;
214	}
215
216	rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
217	rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
218	rep->tile_mode = nvbo->tile_mode;
219	rep->tile_flags = nvbo->tile_flags;
220	return 0;
221}
222
223int
224nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
225		      struct drm_file *file_priv)
226{
227	struct nouveau_drm *drm = nouveau_drm(dev);
228	struct nouveau_cli *cli = nouveau_cli(file_priv);
229	struct nouveau_fb *pfb = nvkm_fb(&drm->device);
230	struct drm_nouveau_gem_new *req = data;
231	struct nouveau_bo *nvbo = NULL;
232	int ret = 0;
233
234	if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
235		NV_PRINTK(error, cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
236		return -EINVAL;
237	}
238
239	ret = nouveau_gem_new(dev, req->info.size, req->align,
240			      req->info.domain, req->info.tile_mode,
241			      req->info.tile_flags, &nvbo);
242	if (ret)
243		return ret;
244
245	ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle);
246	if (ret == 0) {
247		ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info);
248		if (ret)
249			drm_gem_handle_delete(file_priv, req->info.handle);
250	}
251
252	/* drop reference from allocate - handle holds it now */
253	drm_gem_object_unreference_unlocked(&nvbo->gem);
254	return ret;
255}
256
257static int
258nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
259		       uint32_t write_domains, uint32_t valid_domains)
260{
261	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
262	struct ttm_buffer_object *bo = &nvbo->bo;
263	uint32_t domains = valid_domains & nvbo->valid_domains &
264		(write_domains ? write_domains : read_domains);
265	uint32_t pref_flags = 0, valid_flags = 0;
266
267	if (!domains)
268		return -EINVAL;
269
270	if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
271		valid_flags |= TTM_PL_FLAG_VRAM;
272
273	if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
274		valid_flags |= TTM_PL_FLAG_TT;
275
276	if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
277	    bo->mem.mem_type == TTM_PL_VRAM)
278		pref_flags |= TTM_PL_FLAG_VRAM;
279
280	else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
281		 bo->mem.mem_type == TTM_PL_TT)
282		pref_flags |= TTM_PL_FLAG_TT;
283
284	else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
285		pref_flags |= TTM_PL_FLAG_VRAM;
286
287	else
288		pref_flags |= TTM_PL_FLAG_TT;
289
290	nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
291
292	return 0;
293}
294
295struct validate_op {
296	struct list_head list;
297	struct ww_acquire_ctx ticket;
298};
299
300static void
301validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence,
302			struct drm_nouveau_gem_pushbuf_bo *pbbo)
303{
304	struct nouveau_bo *nvbo;
305	struct drm_nouveau_gem_pushbuf_bo *b;
306
307	while (!list_empty(&op->list)) {
308		nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
309		b = &pbbo[nvbo->pbbo_index];
310
311		if (likely(fence))
312			nouveau_bo_fence(nvbo, fence, !!b->write_domains);
313
314		if (unlikely(nvbo->validate_mapped)) {
315			ttm_bo_kunmap(&nvbo->kmap);
316			nvbo->validate_mapped = false;
317		}
318
319		list_del(&nvbo->entry);
320		nvbo->reserved_by = NULL;
321		ttm_bo_unreserve_ticket(&nvbo->bo, &op->ticket);
322		drm_gem_object_unreference_unlocked(&nvbo->gem);
323	}
324}
325
326static void
327validate_fini(struct validate_op *op, struct nouveau_fence *fence,
328	      struct drm_nouveau_gem_pushbuf_bo *pbbo)
329{
330	validate_fini_no_ticket(op, fence, pbbo);
331	ww_acquire_fini(&op->ticket);
332}
333
334static int
335validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
336	      struct drm_nouveau_gem_pushbuf_bo *pbbo,
337	      int nr_buffers, struct validate_op *op)
338{
339	struct nouveau_cli *cli = nouveau_cli(file_priv);
340	struct drm_device *dev = chan->drm->dev;
341	int trycnt = 0;
342	int ret, i;
343	struct nouveau_bo *res_bo = NULL;
344	LIST_HEAD(gart_list);
345	LIST_HEAD(vram_list);
346	LIST_HEAD(both_list);
347
348	ww_acquire_init(&op->ticket, &reservation_ww_class);
349retry:
350	if (++trycnt > 100000) {
351		NV_PRINTK(error, cli, "%s failed and gave up.\n", __func__);
352		return -EINVAL;
353	}
354
355	for (i = 0; i < nr_buffers; i++) {
356		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
357		struct drm_gem_object *gem;
358		struct nouveau_bo *nvbo;
359
360		gem = drm_gem_object_lookup(dev, file_priv, b->handle);
361		if (!gem) {
362			NV_PRINTK(error, cli, "Unknown handle 0x%08x\n", b->handle);
363			ret = -ENOENT;
364			break;
365		}
366		nvbo = nouveau_gem_object(gem);
367		if (nvbo == res_bo) {
368			res_bo = NULL;
369			drm_gem_object_unreference_unlocked(gem);
370			continue;
371		}
372
373		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
374			NV_PRINTK(error, cli, "multiple instances of buffer %d on "
375				      "validation list\n", b->handle);
376			drm_gem_object_unreference_unlocked(gem);
377			ret = -EINVAL;
378			break;
379		}
380
381		ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket);
382		if (ret) {
383			list_splice_tail_init(&vram_list, &op->list);
384			list_splice_tail_init(&gart_list, &op->list);
385			list_splice_tail_init(&both_list, &op->list);
386			validate_fini_no_ticket(op, NULL, NULL);
387			if (unlikely(ret == -EDEADLK)) {
388				ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
389							      &op->ticket);
390				if (!ret)
391					res_bo = nvbo;
392			}
393			if (unlikely(ret)) {
394				if (ret != -ERESTARTSYS)
395					NV_PRINTK(error, cli, "fail reserve\n");
396				break;
397			}
398		}
399
400		b->user_priv = (uint64_t)(unsigned long)nvbo;
401		nvbo->reserved_by = file_priv;
402		nvbo->pbbo_index = i;
403		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
404		    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
405			list_add_tail(&nvbo->entry, &both_list);
406		else
407		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
408			list_add_tail(&nvbo->entry, &vram_list);
409		else
410		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
411			list_add_tail(&nvbo->entry, &gart_list);
412		else {
413			NV_PRINTK(error, cli, "invalid valid domains: 0x%08x\n",
414				 b->valid_domains);
415			list_add_tail(&nvbo->entry, &both_list);
416			ret = -EINVAL;
417			break;
418		}
419		if (nvbo == res_bo)
420			goto retry;
421	}
422
423	ww_acquire_done(&op->ticket);
424	list_splice_tail(&vram_list, &op->list);
425	list_splice_tail(&gart_list, &op->list);
426	list_splice_tail(&both_list, &op->list);
427	if (ret)
428		validate_fini(op, NULL, NULL);
429	return ret;
430
431}
432
433static int
434validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
435	      struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo,
436	      uint64_t user_pbbo_ptr)
437{
438	struct nouveau_drm *drm = chan->drm;
439	struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
440				(void __force __user *)(uintptr_t)user_pbbo_ptr;
441	struct nouveau_bo *nvbo;
442	int ret, relocs = 0;
443
444	list_for_each_entry(nvbo, list, entry) {
445		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
446
447		ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
448					     b->write_domains,
449					     b->valid_domains);
450		if (unlikely(ret)) {
451			NV_PRINTK(error, cli, "fail set_domain\n");
452			return ret;
453		}
454
455		ret = nouveau_bo_validate(nvbo, true, false);
456		if (unlikely(ret)) {
457			if (ret != -ERESTARTSYS)
458				NV_PRINTK(error, cli, "fail ttm_validate\n");
459			return ret;
460		}
461
462		ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
463		if (unlikely(ret)) {
464			if (ret != -ERESTARTSYS)
465				NV_PRINTK(error, cli, "fail post-validate sync\n");
466			return ret;
467		}
468
469		if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
470			if (nvbo->bo.offset == b->presumed.offset &&
471			    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
472			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
473			     (nvbo->bo.mem.mem_type == TTM_PL_TT &&
474			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
475				continue;
476
477			if (nvbo->bo.mem.mem_type == TTM_PL_TT)
478				b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
479			else
480				b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
481			b->presumed.offset = nvbo->bo.offset;
482			b->presumed.valid = 0;
483			relocs++;
484
485			if (copy_to_user(&upbbo[nvbo->pbbo_index].presumed,
486					     &b->presumed, sizeof(b->presumed)))
487				return -EFAULT;
488		}
489	}
490
491	return relocs;
492}
493
494static int
495nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
496			     struct drm_file *file_priv,
497			     struct drm_nouveau_gem_pushbuf_bo *pbbo,
498			     uint64_t user_buffers, int nr_buffers,
499			     struct validate_op *op, int *apply_relocs)
500{
501	struct nouveau_cli *cli = nouveau_cli(file_priv);
502	int ret;
503
504	INIT_LIST_HEAD(&op->list);
505
506	if (nr_buffers == 0)
507		return 0;
508
509	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
510	if (unlikely(ret)) {
511		if (ret != -ERESTARTSYS)
512			NV_PRINTK(error, cli, "validate_init\n");
513		return ret;
514	}
515
516	ret = validate_list(chan, cli, &op->list, pbbo, user_buffers);
517	if (unlikely(ret < 0)) {
518		if (ret != -ERESTARTSYS)
519			NV_PRINTK(error, cli, "validating bo list\n");
520		validate_fini(op, NULL, NULL);
521		return ret;
522	}
523	*apply_relocs = ret;
524	return 0;
525}
526
527static inline void
528u_free(void *addr)
529{
530	if (!is_vmalloc_addr(addr))
531		kfree(addr);
532	else
533		vfree(addr);
534}
535
536static inline void *
537u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
538{
539	void *mem;
540	void __user *userptr = (void __force __user *)(uintptr_t)user;
541
542	size *= nmemb;
543
544	mem = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
545	if (!mem)
546		mem = vmalloc(size);
547	if (!mem)
548		return ERR_PTR(-ENOMEM);
549
550	if (copy_from_user(mem, userptr, size)) {
551		u_free(mem);
552		return ERR_PTR(-EFAULT);
553	}
554
555	return mem;
556}
557
558static int
559nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
560				struct drm_nouveau_gem_pushbuf *req,
561				struct drm_nouveau_gem_pushbuf_bo *bo)
562{
563	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
564	int ret = 0;
565	unsigned i;
566
567	reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
568	if (IS_ERR(reloc))
569		return PTR_ERR(reloc);
570
571	for (i = 0; i < req->nr_relocs; i++) {
572		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
573		struct drm_nouveau_gem_pushbuf_bo *b;
574		struct nouveau_bo *nvbo;
575		uint32_t data;
576
577		if (unlikely(r->bo_index > req->nr_buffers)) {
578			NV_PRINTK(error, cli, "reloc bo index invalid\n");
579			ret = -EINVAL;
580			break;
581		}
582
583		b = &bo[r->bo_index];
584		if (b->presumed.valid)
585			continue;
586
587		if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
588			NV_PRINTK(error, cli, "reloc container bo index invalid\n");
589			ret = -EINVAL;
590			break;
591		}
592		nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
593
594		if (unlikely(r->reloc_bo_offset + 4 >
595			     nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
596			NV_PRINTK(error, cli, "reloc outside of bo\n");
597			ret = -EINVAL;
598			break;
599		}
600
601		if (!nvbo->kmap.virtual) {
602			ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
603					  &nvbo->kmap);
604			if (ret) {
605				NV_PRINTK(error, cli, "failed kmap for reloc\n");
606				break;
607			}
608			nvbo->validate_mapped = true;
609		}
610
611		if (r->flags & NOUVEAU_GEM_RELOC_LOW)
612			data = b->presumed.offset + r->data;
613		else
614		if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
615			data = (b->presumed.offset + r->data) >> 32;
616		else
617			data = r->data;
618
619		if (r->flags & NOUVEAU_GEM_RELOC_OR) {
620			if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
621				data |= r->tor;
622			else
623				data |= r->vor;
624		}
625
626		ret = ttm_bo_wait(&nvbo->bo, true, false, false);
627		if (ret) {
628			NV_PRINTK(error, cli, "reloc wait_idle failed: %d\n", ret);
629			break;
630		}
631
632		nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
633	}
634
635	u_free(reloc);
636	return ret;
637}
638
639int
640nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
641			  struct drm_file *file_priv)
642{
643	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
644	struct nouveau_cli *cli = nouveau_cli(file_priv);
645	struct nouveau_abi16_chan *temp;
646	struct nouveau_drm *drm = nouveau_drm(dev);
647	struct drm_nouveau_gem_pushbuf *req = data;
648	struct drm_nouveau_gem_pushbuf_push *push;
649	struct drm_nouveau_gem_pushbuf_bo *bo;
650	struct nouveau_channel *chan = NULL;
651	struct validate_op op;
652	struct nouveau_fence *fence = NULL;
653	int i, j, ret = 0, do_reloc = 0;
654
655	if (unlikely(!abi16))
656		return -ENOMEM;
657
658	list_for_each_entry(temp, &abi16->channels, head) {
659		if (temp->chan->object->handle == (NVDRM_CHAN | req->channel)) {
660			chan = temp->chan;
661			break;
662		}
663	}
664
665	if (!chan)
666		return nouveau_abi16_put(abi16, -ENOENT);
667
668	req->vram_available = drm->gem.vram_available;
669	req->gart_available = drm->gem.gart_available;
670	if (unlikely(req->nr_push == 0))
671		goto out_next;
672
673	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
674		NV_PRINTK(error, cli, "pushbuf push count exceeds limit: %d max %d\n",
675			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
676		return nouveau_abi16_put(abi16, -EINVAL);
677	}
678
679	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
680		NV_PRINTK(error, cli, "pushbuf bo count exceeds limit: %d max %d\n",
681			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
682		return nouveau_abi16_put(abi16, -EINVAL);
683	}
684
685	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
686		NV_PRINTK(error, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
687			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
688		return nouveau_abi16_put(abi16, -EINVAL);
689	}
690
691	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
692	if (IS_ERR(push))
693		return nouveau_abi16_put(abi16, PTR_ERR(push));
694
695	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
696	if (IS_ERR(bo)) {
697		u_free(push);
698		return nouveau_abi16_put(abi16, PTR_ERR(bo));
699	}
700
701	/* Ensure all push buffers are on validate list */
702	for (i = 0; i < req->nr_push; i++) {
703		if (push[i].bo_index >= req->nr_buffers) {
704			NV_PRINTK(error, cli, "push %d buffer not in list\n", i);
705			ret = -EINVAL;
706			goto out_prevalid;
707		}
708	}
709
710	/* Validate buffer list */
711	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
712					   req->nr_buffers, &op, &do_reloc);
713	if (ret) {
714		if (ret != -ERESTARTSYS)
715			NV_PRINTK(error, cli, "validate: %d\n", ret);
716		goto out_prevalid;
717	}
718
719	/* Apply any relocations that are required */
720	if (do_reloc) {
721		ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
722		if (ret) {
723			NV_PRINTK(error, cli, "reloc apply: %d\n", ret);
724			goto out;
725		}
726	}
727
728	if (chan->dma.ib_max) {
729		ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
730		if (ret) {
731			NV_PRINTK(error, cli, "nv50cal_space: %d\n", ret);
732			goto out;
733		}
734
735		for (i = 0; i < req->nr_push; i++) {
736			struct nouveau_bo *nvbo = (void *)(unsigned long)
737				bo[push[i].bo_index].user_priv;
738
739			nv50_dma_push(chan, nvbo, push[i].offset,
740				      push[i].length);
741		}
742	} else
743	if (drm->device.info.chipset >= 0x25) {
744		ret = RING_SPACE(chan, req->nr_push * 2);
745		if (ret) {
746			NV_PRINTK(error, cli, "cal_space: %d\n", ret);
747			goto out;
748		}
749
750		for (i = 0; i < req->nr_push; i++) {
751			struct nouveau_bo *nvbo = (void *)(unsigned long)
752				bo[push[i].bo_index].user_priv;
753
754			OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2);
755			OUT_RING(chan, 0);
756		}
757	} else {
758		ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
759		if (ret) {
760			NV_PRINTK(error, cli, "jmp_space: %d\n", ret);
761			goto out;
762		}
763
764		for (i = 0; i < req->nr_push; i++) {
765			struct nouveau_bo *nvbo = (void *)(unsigned long)
766				bo[push[i].bo_index].user_priv;
767			uint32_t cmd;
768
769			cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2);
770			cmd |= 0x20000000;
771			if (unlikely(cmd != req->suffix0)) {
772				if (!nvbo->kmap.virtual) {
773					ret = ttm_bo_kmap(&nvbo->bo, 0,
774							  nvbo->bo.mem.
775							  num_pages,
776							  &nvbo->kmap);
777					if (ret) {
778						WIND_RING(chan);
779						goto out;
780					}
781					nvbo->validate_mapped = true;
782				}
783
784				nouveau_bo_wr32(nvbo, (push[i].offset +
785						push[i].length - 8) / 4, cmd);
786			}
787
788			OUT_RING(chan, 0x20000000 |
789				      (nvbo->bo.offset + push[i].offset));
790			OUT_RING(chan, 0);
791			for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
792				OUT_RING(chan, 0);
793		}
794	}
795
796	ret = nouveau_fence_new(chan, false, &fence);
797	if (ret) {
798		NV_PRINTK(error, cli, "error fencing pushbuf: %d\n", ret);
799		WIND_RING(chan);
800		goto out;
801	}
802
803out:
804	validate_fini(&op, fence, bo);
805	nouveau_fence_unref(&fence);
806
807out_prevalid:
808	u_free(bo);
809	u_free(push);
810
811out_next:
812	if (chan->dma.ib_max) {
813		req->suffix0 = 0x00000000;
814		req->suffix1 = 0x00000000;
815	} else
816	if (drm->device.info.chipset >= 0x25) {
817		req->suffix0 = 0x00020000;
818		req->suffix1 = 0x00000000;
819	} else {
820		req->suffix0 = 0x20000000 |
821			      (chan->push.vma.offset + ((chan->dma.cur + 2) << 2));
822		req->suffix1 = 0x00000000;
823	}
824
825	return nouveau_abi16_put(abi16, ret);
826}
827
828static inline uint32_t
829domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
830{
831	uint32_t flags = 0;
832
833	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
834		flags |= TTM_PL_FLAG_VRAM;
835	if (domain & NOUVEAU_GEM_DOMAIN_GART)
836		flags |= TTM_PL_FLAG_TT;
837
838	return flags;
839}
840
841int
842nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
843			   struct drm_file *file_priv)
844{
845	struct drm_nouveau_gem_cpu_prep *req = data;
846	struct drm_gem_object *gem;
847	struct nouveau_bo *nvbo;
848	bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
849	bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
850	int ret;
851
852	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
853	if (!gem)
854		return -ENOENT;
855	nvbo = nouveau_gem_object(gem);
856
857	if (no_wait)
858		ret = reservation_object_test_signaled_rcu(nvbo->bo.resv, write) ? 0 : -EBUSY;
859	else {
860		long lret;
861
862		lret = reservation_object_wait_timeout_rcu(nvbo->bo.resv, write, true, 30 * HZ);
863		if (!lret)
864			ret = -EBUSY;
865		else if (lret > 0)
866			ret = 0;
867		else
868			ret = lret;
869	}
870	drm_gem_object_unreference_unlocked(gem);
871
872	return ret;
873}
874
875int
876nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
877			   struct drm_file *file_priv)
878{
879	return 0;
880}
881
882int
883nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
884		       struct drm_file *file_priv)
885{
886	struct drm_nouveau_gem_info *req = data;
887	struct drm_gem_object *gem;
888	int ret;
889
890	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
891	if (!gem)
892		return -ENOENT;
893
894	ret = nouveau_gem_info(file_priv, gem, req);
895	drm_gem_object_unreference_unlocked(gem);
896	return ret;
897}
898
899