1/*
2 * Copyright 2007 Nouveau Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 * SOFTWARE.
21 */
22
23#include <stdio.h>
24#include <stdlib.h>
25#include <errno.h>
26#include <assert.h>
27
28#include "nouveau_private.h"
29
30#define PB_BUFMGR_DWORDS   (4096 / 2)
31#define PB_MIN_USER_DWORDS  2048
32
33static uint32_t
34nouveau_pushbuf_calc_reloc(struct drm_nouveau_gem_pushbuf_bo *pbbo,
35			   struct drm_nouveau_gem_pushbuf_reloc *r)
36{
37	uint32_t push = 0;
38
39	if (r->flags & NOUVEAU_GEM_RELOC_LOW)
40		push = (pbbo->presumed_offset + r->data);
41	else
42	if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
43		push = (pbbo->presumed_offset + r->data) >> 32;
44	else
45		push = r->data;
46
47	if (r->flags & NOUVEAU_GEM_RELOC_OR) {
48		if (pbbo->presumed_domain & NOUVEAU_GEM_DOMAIN_VRAM)
49			push |= r->vor;
50		else
51			push |= r->tor;
52	}
53
54	return push;
55}
56
57int
58nouveau_pushbuf_emit_reloc(struct nouveau_channel *chan, void *ptr,
59			   struct nouveau_bo *bo, uint32_t data, uint32_t data2,
60			   uint32_t flags, uint32_t vor, uint32_t tor)
61{
62	struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(chan->pushbuf);
63	struct drm_nouveau_gem_pushbuf_reloc *r;
64	struct drm_nouveau_gem_pushbuf_bo *pbbo;
65	uint32_t domains = 0;
66
67	if (nvpb->nr_relocs >= NOUVEAU_GEM_MAX_RELOCS) {
68		fprintf(stderr, "too many relocs!!\n");
69		assert(0);
70		return -ENOMEM;
71	}
72
73	if (nouveau_bo(bo)->user && (flags & NOUVEAU_BO_WR)) {
74		fprintf(stderr, "write to user buffer!!\n");
75		return -EINVAL;
76	}
77
78	pbbo = nouveau_bo_emit_buffer(chan, bo);
79	if (!pbbo) {
80		fprintf(stderr, "buffer emit fail :(\n");
81		assert(0);
82		return -ENOMEM;
83	}
84
85	if (flags & NOUVEAU_BO_VRAM)
86		domains |= NOUVEAU_GEM_DOMAIN_VRAM;
87	if (flags & NOUVEAU_BO_GART)
88		domains |= NOUVEAU_GEM_DOMAIN_GART;
89	pbbo->valid_domains &= domains;
90	assert(pbbo->valid_domains);
91
92	assert(flags & NOUVEAU_BO_RDWR);
93	if (flags & NOUVEAU_BO_RD) {
94		pbbo->read_domains |= domains;
95	}
96	if (flags & NOUVEAU_BO_WR) {
97		pbbo->write_domains |= domains;
98		nouveau_bo(bo)->write_marker = 1;
99	}
100
101	r = nvpb->relocs + nvpb->nr_relocs++;
102	r->bo_index = pbbo - nvpb->buffers;
103	r->reloc_index = (uint32_t *)ptr - nvpb->pushbuf;
104	r->flags = 0;
105	if (flags & NOUVEAU_BO_LOW)
106		r->flags |= NOUVEAU_GEM_RELOC_LOW;
107	if (flags & NOUVEAU_BO_HIGH)
108		r->flags |= NOUVEAU_GEM_RELOC_HIGH;
109	if (flags & NOUVEAU_BO_OR)
110		r->flags |= NOUVEAU_GEM_RELOC_OR;
111	r->data = data;
112	r->vor = vor;
113	r->tor = tor;
114
115	*(uint32_t *)ptr = (flags & NOUVEAU_BO_DUMMY) ? 0 :
116		nouveau_pushbuf_calc_reloc(pbbo, r);
117	return 0;
118}
119
120static int
121nouveau_pushbuf_space_call(struct nouveau_channel *chan, unsigned min)
122{
123	struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
124	struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
125	struct nouveau_bo *bo;
126	int ret;
127
128	if (min < PB_MIN_USER_DWORDS)
129		min = PB_MIN_USER_DWORDS;
130
131	nvpb->current_offset = nvpb->base.cur - nvpb->pushbuf;
132	if (nvpb->current_offset + min + 2 <= nvpb->size)
133		return 0;
134
135	nvpb->current++;
136	if (nvpb->current == CALPB_BUFFERS)
137		nvpb->current = 0;
138	bo = nvpb->buffer[nvpb->current];
139
140	ret = nouveau_bo_map(bo, NOUVEAU_BO_WR);
141	if (ret)
142		return ret;
143
144	nvpb->size = (bo->size - 8) / 4;
145	nvpb->pushbuf = bo->map;
146	nvpb->current_offset = 0;
147
148	nvpb->base.channel = chan;
149	nvpb->base.remaining = nvpb->size;
150	nvpb->base.cur = nvpb->pushbuf;
151
152	nouveau_bo_unmap(bo);
153	return 0;
154}
155
156static int
157nouveau_pushbuf_space(struct nouveau_channel *chan, unsigned min)
158{
159	struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
160	struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
161
162	if (nvpb->use_cal)
163		return nouveau_pushbuf_space_call(chan, min);
164
165	if (nvpb->pushbuf) {
166		free(nvpb->pushbuf);
167		nvpb->pushbuf = NULL;
168	}
169
170	nvpb->size = min < PB_MIN_USER_DWORDS ? PB_MIN_USER_DWORDS : min;
171	nvpb->pushbuf = malloc(sizeof(uint32_t) * nvpb->size);
172
173	nvpb->base.channel = chan;
174	nvpb->base.remaining = nvpb->size;
175	nvpb->base.cur = nvpb->pushbuf;
176
177	return 0;
178}
179
180static void
181nouveau_pushbuf_fini_call(struct nouveau_channel *chan)
182{
183	struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
184	struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
185	int i;
186
187	for (i = 0; i < CALPB_BUFFERS; i++)
188		nouveau_bo_ref(NULL, &nvpb->buffer[i]);
189	nvpb->use_cal = 0;
190	nvpb->pushbuf = NULL;
191}
192
193static void
194nouveau_pushbuf_init_call(struct nouveau_channel *chan)
195{
196	struct drm_nouveau_gem_pushbuf_call req;
197	struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
198	struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
199	struct nouveau_device *dev = chan->device;
200	int i, ret;
201
202	req.channel = chan->id;
203	req.handle = 0;
204	ret = drmCommandWriteRead(nouveau_device(dev)->fd,
205				  DRM_NOUVEAU_GEM_PUSHBUF_CALL,
206				  &req, sizeof(req));
207	if (ret)
208		return;
209
210	for (i = 0; i < CALPB_BUFFERS; i++) {
211		ret = nouveau_bo_new(dev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
212				     0, CALPB_BUFSZ, &nvpb->buffer[i]);
213		if (ret) {
214			nouveau_pushbuf_fini_call(chan);
215			return;
216		}
217	}
218
219	nvpb->use_cal = 1;
220	nvpb->cal_suffix0 = req.suffix0;
221	nvpb->cal_suffix1 = req.suffix1;
222}
223
224int
225nouveau_pushbuf_init(struct nouveau_channel *chan)
226{
227	struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
228	struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
229	int ret;
230
231	nouveau_pushbuf_init_call(chan);
232
233	ret = nouveau_pushbuf_space(chan, 0);
234	if (ret) {
235		if (nvpb->use_cal) {
236			nouveau_pushbuf_fini_call(chan);
237			ret = nouveau_pushbuf_space(chan, 0);
238		}
239
240		if (ret)
241			return ret;
242	}
243
244	nvpb->buffers = calloc(NOUVEAU_GEM_MAX_BUFFERS,
245			       sizeof(struct drm_nouveau_gem_pushbuf_bo));
246	nvpb->relocs = calloc(NOUVEAU_GEM_MAX_RELOCS,
247			      sizeof(struct drm_nouveau_gem_pushbuf_reloc));
248
249	chan->pushbuf = &nvpb->base;
250	return 0;
251}
252
253int
254nouveau_pushbuf_flush(struct nouveau_channel *chan, unsigned min)
255{
256	struct nouveau_device_priv *nvdev = nouveau_device(chan->device);
257	struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
258	struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
259	unsigned i;
260	int ret;
261
262	if (nvpb->base.remaining == nvpb->size)
263		return 0;
264
265	if (nvpb->use_cal) {
266		struct drm_nouveau_gem_pushbuf_call req;
267
268		*(nvpb->base.cur++) = nvpb->cal_suffix0;
269		*(nvpb->base.cur++) = nvpb->cal_suffix1;
270		if (nvpb->base.remaining > 2) /* space() will fixup if not */
271			nvpb->base.remaining -= 2;
272
273		req.channel = chan->id;
274		req.handle = nvpb->buffer[nvpb->current]->handle;
275		req.offset = nvpb->current_offset * 4;
276		req.nr_buffers = nvpb->nr_buffers;
277		req.buffers = (uint64_t)(unsigned long)nvpb->buffers;
278		req.nr_relocs = nvpb->nr_relocs;
279		req.relocs = (uint64_t)(unsigned long)nvpb->relocs;
280		req.nr_dwords = (nvpb->base.cur - nvpb->pushbuf) -
281				nvpb->current_offset;
282		req.suffix0 = nvpb->cal_suffix0;
283		req.suffix1 = nvpb->cal_suffix1;
284		ret = drmCommandWriteRead(nvdev->fd,
285					  DRM_NOUVEAU_GEM_PUSHBUF_CALL,
286					  &req, sizeof(req));
287		nvpb->cal_suffix0 = req.suffix0;
288		nvpb->cal_suffix1 = req.suffix1;
289		assert(ret == 0);
290	} else {
291		struct drm_nouveau_gem_pushbuf req;
292
293		req.channel = chan->id;
294		req.nr_dwords = nvpb->size - nvpb->base.remaining;
295		req.dwords = (uint64_t)(unsigned long)nvpb->pushbuf;
296		req.nr_buffers = nvpb->nr_buffers;
297		req.buffers = (uint64_t)(unsigned long)nvpb->buffers;
298		req.nr_relocs = nvpb->nr_relocs;
299		req.relocs = (uint64_t)(unsigned long)nvpb->relocs;
300		ret = drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GEM_PUSHBUF,
301				      &req, sizeof(req));
302		assert(ret == 0);
303	}
304
305
306	/* Update presumed offset/domain for any buffers that moved.
307	 * Dereference all buffers on validate list
308	 */
309	for (i = 0; i < nvpb->nr_buffers; i++) {
310		struct drm_nouveau_gem_pushbuf_bo *pbbo = &nvpb->buffers[i];
311		struct nouveau_bo *bo = (void *)(unsigned long)pbbo->user_priv;
312
313		if (pbbo->presumed_ok == 0) {
314			nouveau_bo(bo)->domain = pbbo->presumed_domain;
315			nouveau_bo(bo)->offset = pbbo->presumed_offset;
316		}
317
318		nouveau_bo(bo)->pending = NULL;
319		nouveau_bo_ref(NULL, &bo);
320	}
321	nvpb->nr_buffers = 0;
322	nvpb->nr_relocs = 0;
323
324	/* Allocate space for next push buffer */
325	ret = nouveau_pushbuf_space(chan, min);
326	assert(!ret);
327
328	if (chan->flush_notify)
329		chan->flush_notify(chan);
330
331	return 0;
332}
333
334