evergreen_compute.c revision e5a9bf55231aa14f6ae831a5c47d7176cb6c230b
1/*
2 * Copyright 2011 Adam Rak <adam.rak@streamnovation.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 *      Adam Rak <adam.rak@streamnovation.com>
25 */
26
27#include <stdio.h>
28#include <errno.h>
29#include "pipe/p_defines.h"
30#include "pipe/p_state.h"
31#include "pipe/p_context.h"
32#include "util/u_blitter.h"
33#include "util/u_double_list.h"
34#include "util/u_transfer.h"
35#include "util/u_surface.h"
36#include "util/u_pack_color.h"
37#include "util/u_memory.h"
38#include "util/u_inlines.h"
39#include "util/u_framebuffer.h"
40#include "pipebuffer/pb_buffer.h"
41#include "r600.h"
42#include "evergreend.h"
43#include "r600_resource.h"
44#include "r600_shader.h"
45#include "r600_pipe.h"
46#include "r600_formats.h"
47#include "evergreen_compute.h"
48#include "r600_hw_context_priv.h"
49#include "evergreen_compute_internal.h"
50#include "compute_memory_pool.h"
51#ifdef HAVE_OPENCL
52#include "llvm_wrapper.h"
53#endif
54
55/**
56RAT0 is for global binding write
57VTX1 is for global binding read
58
59for wrting images RAT1...
60for reading images TEX2...
61  TEX2-RAT1 is paired
62
63TEX2... consumes the same fetch resources, that VTX2... would consume
64
65CONST0 and VTX0 is for parameters
66  CONST0 is binding smaller input parameter buffer, and for constant indexing,
67  also constant cached
68  VTX0 is for indirect/non-constant indexing, or if the input is bigger than
69  the constant cache can handle
70
71RAT-s are limited to 12, so we can only bind at most 11 texture for writing
72because we reserve RAT0 for global bindings. With byteaddressing enabled,
73we should reserve another one too.=> 10 image binding for writing max.
74
75from Nvidia OpenCL:
76  CL_DEVICE_MAX_READ_IMAGE_ARGS:        128
77  CL_DEVICE_MAX_WRITE_IMAGE_ARGS:       8
78
79so 10 for writing is enough. 176 is the max for reading according to the docs
80
81writable images should be listed first < 10, so their id corresponds to RAT(id+1)
82writable images will consume TEX slots, VTX slots too because of linear indexing
83
84*/
85
86static void evergreen_cs_set_vertex_buffer(
87	struct r600_context * rctx,
88	unsigned vb_index,
89	unsigned offset,
90	struct pipe_resource * buffer)
91{
92	struct r600_vertexbuf_state *state = &rctx->cs_vertex_buffer_state;
93	struct pipe_vertex_buffer *vb = &state->vb[vb_index];
94	vb->stride = 1;
95	vb->buffer_offset = offset;
96	vb->buffer = buffer;
97	vb->user_buffer = NULL;
98
99	/* The vertex instructions in the compute shaders use the texture cache,
100	 * so we need to invalidate it. */
101	rctx->flags |= R600_CONTEXT_TEX_FLUSH;
102	state->enabled_mask |= 1 << vb_index;
103	state->dirty_mask |= 1 << vb_index;
104	state->atom.dirty = true;
105}
106
107static const struct u_resource_vtbl r600_global_buffer_vtbl =
108{
109	u_default_resource_get_handle, /* get_handle */
110	r600_compute_global_buffer_destroy, /* resource_destroy */
111	r600_compute_global_transfer_map, /* transfer_map */
112	r600_compute_global_transfer_flush_region,/* transfer_flush_region */
113	r600_compute_global_transfer_unmap, /* transfer_unmap */
114	r600_compute_global_transfer_inline_write /* transfer_inline_write */
115};
116
117
118void *evergreen_create_compute_state(
119	struct pipe_context *ctx_,
120	const const struct pipe_compute_state *cso)
121{
122	struct r600_context *ctx = (struct r600_context *)ctx_;
123	struct r600_pipe_compute *shader = CALLOC_STRUCT(r600_pipe_compute);
124
125#ifdef HAVE_OPENCL
126	const struct pipe_llvm_program_header * header;
127	const unsigned char * code;
128	unsigned i;
129
130	COMPUTE_DBG("*** evergreen_create_compute_state\n");
131
132	header = cso->prog;
133	code = cso->prog + sizeof(struct pipe_llvm_program_header);
134#endif
135
136	shader->ctx = (struct r600_context*)ctx;
137	shader->resources = (struct evergreen_compute_resource*)
138			CALLOC(sizeof(struct evergreen_compute_resource),
139			get_compute_resource_num());
140	shader->local_size = cso->req_local_mem; ///TODO: assert it
141	shader->private_size = cso->req_private_mem;
142	shader->input_size = cso->req_input_mem;
143
144#ifdef HAVE_OPENCL
145	shader->num_kernels = llvm_get_num_kernels(code, header->num_bytes);
146	shader->kernels = CALLOC(sizeof(struct r600_kernel), shader->num_kernels);
147
148	for (i = 0; i < shader->num_kernels; i++) {
149		struct r600_kernel *kernel = &shader->kernels[i];
150		kernel->llvm_module = llvm_get_kernel_module(i, code,
151							header->num_bytes);
152	}
153#endif
154	return shader;
155}
156
157void evergreen_delete_compute_state(struct pipe_context *ctx, void* state)
158{
159	struct r600_pipe_compute *shader = (struct r600_pipe_compute *)state;
160
161	free(shader->resources);
162	free(shader);
163}
164
165static void evergreen_bind_compute_state(struct pipe_context *ctx_, void *state)
166{
167	struct r600_context *ctx = (struct r600_context *)ctx_;
168
169	COMPUTE_DBG("*** evergreen_bind_compute_state\n");
170
171	ctx->cs_shader_state.shader = (struct r600_pipe_compute *)state;
172}
173
174/* The kernel parameters are stored a vtx buffer (ID=0), besides the explicit
175 * kernel parameters there are inplicit parameters that need to be stored
176 * in the vertex buffer as well.  Here is how these parameters are organized in
177 * the buffer:
178 *
179 * DWORDS 0-2: Number of work groups in each dimension (x,y,z)
180 * DWORDS 3-5: Number of global work items in each dimension (x,y,z)
181 * DWORDS 6-8: Number of work items within each work group in each dimension
182 *             (x,y,z)
183 * DWORDS 9+ : Kernel parameters
184 */
185void evergreen_compute_upload_input(
186	struct pipe_context *ctx_,
187	const uint *block_layout,
188	const uint *grid_layout,
189	const void *input)
190{
191	struct r600_context *ctx = (struct r600_context *)ctx_;
192	struct r600_pipe_compute *shader = ctx->cs_shader_state.shader;
193	int i;
194	unsigned kernel_parameters_offset_bytes = 36;
195	uint32_t * num_work_groups_start;
196	uint32_t * global_size_start;
197	uint32_t * local_size_start;
198	uint32_t * kernel_parameters_start;
199
200	if (shader->input_size == 0) {
201		return;
202	}
203
204	if (!shader->kernel_param) {
205		unsigned buffer_size = shader->input_size;
206
207		/* Add space for the grid dimensions */
208		buffer_size += kernel_parameters_offset_bytes * sizeof(uint);
209		shader->kernel_param = r600_compute_buffer_alloc_vram(
210						ctx->screen, buffer_size);
211	}
212
213	num_work_groups_start = ctx->ws->buffer_map(
214		shader->kernel_param->cs_buf, ctx->cs, PIPE_TRANSFER_WRITE);
215	global_size_start = num_work_groups_start + (3 * (sizeof(uint) /4));
216	local_size_start = global_size_start + (3 * (sizeof(uint)) / 4);
217	kernel_parameters_start = local_size_start + (3 * (sizeof(uint)) / 4);
218
219	/* Copy the work group size */
220	memcpy(num_work_groups_start, grid_layout, 3 * sizeof(uint));
221
222	/* Copy the global size */
223	for (i = 0; i < 3; i++) {
224		global_size_start[i] = grid_layout[i] * block_layout[i];
225	}
226
227	/* Copy the local dimensions */
228	memcpy(local_size_start, block_layout, 3 * sizeof(uint));
229
230	/* Copy the kernel inputs */
231	memcpy(kernel_parameters_start, input, shader->input_size);
232
233	for (i = 0; i < (kernel_parameters_offset_bytes / 4) +
234					(shader->input_size / 4); i++) {
235		COMPUTE_DBG("input %i : %i\n", i,
236			((unsigned*)num_work_groups_start)[i]);
237	}
238
239	ctx->ws->buffer_unmap(shader->kernel_param->cs_buf);
240
241	///ID=0 is reserved for the parameters
242	evergreen_cs_set_vertex_buffer(ctx, 0, 0,
243			(struct pipe_resource*)shader->kernel_param);
244	///ID=0 is reserved for parameters
245	evergreen_set_const_cache(shader, 0, shader->kernel_param,
246						shader->input_size, 0);
247}
248
249static void evergreen_emit_direct_dispatch(
250		struct r600_context *rctx,
251		const uint *block_layout, const uint *grid_layout)
252{
253	int i;
254	struct radeon_winsys_cs *cs = rctx->cs;
255	unsigned num_waves;
256	unsigned num_pipes = rctx->screen->info.r600_max_pipes;
257	unsigned wave_divisor = (16 * num_pipes);
258	int group_size = 1;
259	int grid_size = 1;
260	/* XXX: Enable lds and get size from cs_shader_state */
261	unsigned lds_size = 0;
262
263	/* Calculate group_size/grid_size */
264	for (i = 0; i < 3; i++) {
265		group_size *= block_layout[i];
266	}
267
268	for (i = 0; i < 3; i++)	{
269		grid_size *= grid_layout[i];
270	}
271
272	/* num_waves = ceil((tg_size.x * tg_size.y, tg_size.z) / (16 * num_pipes)) */
273	num_waves = (block_layout[0] * block_layout[1] * block_layout[2] +
274			wave_divisor - 1) / wave_divisor;
275
276	COMPUTE_DBG("Using %u pipes, there are %u wavefronts per thread block\n",
277							num_pipes, num_waves);
278
279	/* XXX: Partition the LDS between PS/CS.  By default half (4096 dwords
280	 * on Evergreen) oes to Pixel Shaders and half goes to Compute Shaders.
281	 * We may need to allocat the entire LDS space for Compute Shaders.
282	 *
283	 * EG: R_008E2C_SQ_LDS_RESOURCE_MGMT := S_008E2C_NUM_LS_LDS(lds_dwords)
284	 * CM: CM_R_0286FC_SPI_LDS_MGMT :=  S_0286FC_NUM_LS_LDS(lds_dwords)
285	 */
286
287	r600_write_config_reg(cs, R_008970_VGT_NUM_INDICES, group_size);
288
289	r600_write_config_reg_seq(cs, R_00899C_VGT_COMPUTE_START_X, 3);
290	r600_write_value(cs, 0); /* R_00899C_VGT_COMPUTE_START_X */
291	r600_write_value(cs, 0); /* R_0089A0_VGT_COMPUTE_START_Y */
292	r600_write_value(cs, 0); /* R_0089A4_VGT_COMPUTE_START_Z */
293
294	r600_write_config_reg(cs, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE,
295								group_size);
296
297	r600_write_compute_context_reg_seq(cs, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, 3);
298	r600_write_value(cs, block_layout[0]); /* R_0286EC_SPI_COMPUTE_NUM_THREAD_X */
299	r600_write_value(cs, block_layout[1]); /* R_0286F0_SPI_COMPUTE_NUM_THREAD_Y */
300	r600_write_value(cs, block_layout[2]); /* R_0286F4_SPI_COMPUTE_NUM_THREAD_Z */
301
302	r600_write_compute_context_reg(cs, CM_R_0288E8_SQ_LDS_ALLOC,
303					lds_size | (num_waves << 14));
304
305	/* Dispatch packet */
306	r600_write_value(cs, PKT3C(PKT3_DISPATCH_DIRECT, 3, 0));
307	r600_write_value(cs, grid_layout[0]);
308	r600_write_value(cs, grid_layout[1]);
309	r600_write_value(cs, grid_layout[2]);
310	/* VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN */
311	r600_write_value(cs, 1);
312}
313
314static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout,
315		const uint *grid_layout)
316{
317	struct radeon_winsys_cs *cs = ctx->cs;
318	unsigned flush_flags = 0;
319	int i;
320
321	struct r600_resource *onebo = NULL;
322	struct evergreen_compute_resource *resources =
323					ctx->cs_shader_state.shader->resources;
324
325	/* Initialize all the compute-related registers.
326	 *
327	 * See evergreen_init_atom_start_compute_cs() in this file for the list
328	 * of registers initialized by the start_compute_cs_cmd atom.
329	 */
330	r600_emit_command_buffer(ctx->cs, &ctx->start_compute_cs_cmd);
331
332	ctx->flags |= R600_CONTEXT_CB_FLUSH;
333	r600_flush_emit(ctx);
334
335	/* Emit colorbuffers. */
336	for (i = 0; i < ctx->framebuffer.state.nr_cbufs; i++) {
337		struct r600_surface *cb = (struct r600_surface*)ctx->framebuffer.state.cbufs[i];
338		unsigned reloc = r600_context_bo_reloc(ctx, (struct r600_resource*)cb->base.texture,
339						       RADEON_USAGE_READWRITE);
340
341		r600_write_compute_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 7);
342		r600_write_value(cs, cb->cb_color_base);	/* R_028C60_CB_COLOR0_BASE */
343		r600_write_value(cs, cb->cb_color_pitch);	/* R_028C64_CB_COLOR0_PITCH */
344		r600_write_value(cs, cb->cb_color_slice);	/* R_028C68_CB_COLOR0_SLICE */
345		r600_write_value(cs, cb->cb_color_view);	/* R_028C6C_CB_COLOR0_VIEW */
346		r600_write_value(cs, cb->cb_color_info);	/* R_028C70_CB_COLOR0_INFO */
347		r600_write_value(cs, cb->cb_color_attrib);	/* R_028C74_CB_COLOR0_ATTRIB */
348		r600_write_value(cs, cb->cb_color_dim);		/* R_028C78_CB_COLOR0_DIM */
349
350		r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C60_CB_COLOR0_BASE */
351		r600_write_value(cs, reloc);
352
353		if (!ctx->keep_tiling_flags) {
354			r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C70_CB_COLOR0_INFO */
355			r600_write_value(cs, reloc);
356		}
357
358		r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */
359		r600_write_value(cs, reloc);
360	}
361
362	/* Set CB_TARGET_MASK  XXX: Use cb_misc_state */
363	r600_write_compute_context_reg(cs, R_028238_CB_TARGET_MASK,
364					ctx->compute_cb_target_mask);
365
366
367	/* Emit vertex buffer state */
368	ctx->cs_vertex_buffer_state.atom.num_dw = 12 * util_bitcount(ctx->cs_vertex_buffer_state.dirty_mask);
369	r600_emit_atom(ctx, &ctx->cs_vertex_buffer_state.atom);
370
371	/* Emit compute shader state */
372	r600_emit_atom(ctx, &ctx->cs_shader_state.atom);
373
374	for (i = 0; i < get_compute_resource_num(); i++) {
375		if (resources[i].enabled) {
376			int j;
377			COMPUTE_DBG("resnum: %i, cdw: %i\n", i, cs->cdw);
378
379			for (j = 0; j < resources[i].cs_end; j++) {
380				if (resources[i].do_reloc[j]) {
381					assert(resources[i].bo);
382					evergreen_emit_ctx_reloc(ctx,
383						resources[i].bo,
384						resources[i].usage);
385				}
386
387				cs->buf[cs->cdw++] = resources[i].cs[j];
388			}
389
390			if (resources[i].bo) {
391				onebo = resources[i].bo;
392				evergreen_emit_ctx_reloc(ctx,
393					resources[i].bo,
394					resources[i].usage);
395
396				///special case for textures
397				if (resources[i].do_reloc
398					[resources[i].cs_end] == 2) {
399					evergreen_emit_ctx_reloc(ctx,
400						resources[i].bo,
401						resources[i].usage);
402				}
403			}
404		}
405	}
406
407	/* Emit dispatch state and dispatch packet */
408	evergreen_emit_direct_dispatch(ctx, block_layout, grid_layout);
409
410	/* XXX evergreen_flush_emit() hardcodes the CP_COHER_SIZE to 0xffffffff
411	 */
412	ctx->flags |= R600_CONTEXT_CB_FLUSH;
413	r600_flush_emit(ctx);
414
415#if 0
416	COMPUTE_DBG("cdw: %i\n", cs->cdw);
417	for (i = 0; i < cs->cdw; i++) {
418		COMPUTE_DBG("%4i : 0x%08X\n", i, ctx->cs->buf[i]);
419	}
420#endif
421
422	flush_flags = RADEON_FLUSH_ASYNC | RADEON_FLUSH_COMPUTE;
423	if (ctx->keep_tiling_flags) {
424		flush_flags |= RADEON_FLUSH_KEEP_TILING_FLAGS;
425	}
426
427	ctx->ws->cs_flush(ctx->cs, flush_flags);
428
429	ctx->pm4_dirty_cdwords = 0;
430	ctx->flags = 0;
431
432	COMPUTE_DBG("shader started\n");
433
434	ctx->ws->buffer_wait(onebo->buf, 0);
435
436	COMPUTE_DBG("...\n");
437
438	ctx->streamout_start = TRUE;
439	ctx->streamout_append_bitmask = ~0;
440
441}
442
443
444/**
445 * Emit function for r600_cs_shader_state atom
446 */
447void evergreen_emit_cs_shader(
448		struct r600_context *rctx,
449		struct r600_atom *atom)
450{
451	struct r600_cs_shader_state *state =
452					(struct r600_cs_shader_state*)atom;
453	struct r600_pipe_compute *shader = state->shader;
454	struct r600_kernel *kernel = &shader->kernels[state->kernel_index];
455	struct radeon_winsys_cs *cs = rctx->cs;
456	uint64_t va;
457
458	va = r600_resource_va(&rctx->screen->screen, &kernel->code_bo->b.b);
459
460	r600_write_compute_context_reg_seq(cs, R_0288D0_SQ_PGM_START_LS, 3);
461	r600_write_value(cs, va >> 8); /* R_0288D0_SQ_PGM_START_LS */
462	r600_write_value(cs,           /* R_0288D4_SQ_PGM_RESOURCES_LS */
463			S_0288D4_NUM_GPRS(kernel->bc.ngpr)
464			| S_0288D4_STACK_SIZE(kernel->bc.nstack));
465	r600_write_value(cs, 0);	/* R_0288D8_SQ_PGM_RESOURCES_LS_2 */
466
467	r600_write_value(cs, PKT3C(PKT3_NOP, 0, 0));
468	r600_write_value(cs, r600_context_bo_reloc(rctx, kernel->code_bo,
469							RADEON_USAGE_READ));
470
471	rctx->flags |= R600_CONTEXT_SHADERCONST_FLUSH;
472}
473
474static void evergreen_launch_grid(
475		struct pipe_context *ctx_,
476		const uint *block_layout, const uint *grid_layout,
477		uint32_t pc, const void *input)
478{
479	struct r600_context *ctx = (struct r600_context *)ctx_;
480
481#ifdef HAVE_OPENCL
482	COMPUTE_DBG("*** evergreen_launch_grid: pc = %u\n", pc);
483
484	struct r600_pipe_compute *shader = ctx->cs_shader_state.shader;
485	if (!shader->kernels[pc].code_bo) {
486		void *p;
487		struct r600_kernel *kernel = &shader->kernels[pc];
488		r600_compute_shader_create(ctx_, kernel->llvm_module, &kernel->bc);
489		kernel->code_bo = r600_compute_buffer_alloc_vram(ctx->screen,
490							kernel->bc.ndw * 4);
491		p = ctx->ws->buffer_map(kernel->code_bo->cs_buf, ctx->cs,
492							PIPE_TRANSFER_WRITE);
493		memcpy(p, kernel->bc.bytecode, kernel->bc.ndw * 4);
494		ctx->ws->buffer_unmap(kernel->code_bo->cs_buf);
495	}
496#endif
497
498	ctx->cs_shader_state.kernel_index = pc;
499	evergreen_compute_upload_input(ctx_, block_layout, grid_layout, input);
500	compute_emit_cs(ctx, block_layout, grid_layout);
501}
502
503static void evergreen_set_compute_resources(struct pipe_context * ctx_,
504		unsigned start, unsigned count,
505		struct pipe_surface ** surfaces)
506{
507	struct r600_context *ctx = (struct r600_context *)ctx_;
508	struct r600_surface **resources = (struct r600_surface **)surfaces;
509
510	COMPUTE_DBG("*** evergreen_set_compute_resources: start = %u count = %u\n",
511			start, count);
512
513	for (int i = 0; i < count; i++)	{
514		/* The First two vertex buffers are reserved for parameters and
515		 * global buffers. */
516		unsigned vtx_id = 2 + i;
517		if (resources[i]) {
518			struct r600_resource_global *buffer =
519				(struct r600_resource_global*)
520				resources[i]->base.texture;
521			if (resources[i]->base.writable) {
522				assert(i+1 < 12);
523
524				evergreen_set_rat(ctx->cs_shader_state.shader, i+1,
525				(struct r600_resource *)resources[i]->base.texture,
526				buffer->chunk->start_in_dw*4,
527				resources[i]->base.texture->width0);
528			}
529
530			evergreen_cs_set_vertex_buffer(ctx, vtx_id,
531					buffer->chunk->start_in_dw * 4,
532					resources[i]->base.texture);
533		}
534	}
535}
536
537static void evergreen_set_cs_sampler_view(struct pipe_context *ctx_,
538		unsigned start_slot, unsigned count,
539		struct pipe_sampler_view **views)
540{
541	struct r600_context *ctx = (struct r600_context *)ctx_;
542	struct r600_pipe_sampler_view **resource =
543		(struct r600_pipe_sampler_view **)views;
544
545	for (int i = 0; i < count; i++)	{
546		if (resource[i]) {
547			assert(i+1 < 12);
548			///FETCH0 = VTX0 (param buffer),
549			//FETCH1 = VTX1 (global buffer pool), FETCH2... = TEX
550			evergreen_set_tex_resource(ctx->cs_shader_state.shader, resource[i], i+2);
551		}
552	}
553}
554
555static void evergreen_bind_compute_sampler_states(
556	struct pipe_context *ctx_,
557	unsigned start_slot,
558	unsigned num_samplers,
559	void **samplers_)
560{
561	struct r600_context *ctx = (struct r600_context *)ctx_;
562	struct compute_sampler_state ** samplers =
563		(struct compute_sampler_state **)samplers_;
564
565	for (int i = 0; i < num_samplers; i++) {
566		if (samplers[i]) {
567			evergreen_set_sampler_resource(
568				ctx->cs_shader_state.shader, samplers[i], i);
569		}
570	}
571}
572
573static void evergreen_set_global_binding(
574	struct pipe_context *ctx_, unsigned first, unsigned n,
575	struct pipe_resource **resources,
576	uint32_t **handles)
577{
578	struct r600_context *ctx = (struct r600_context *)ctx_;
579	struct compute_memory_pool *pool = ctx->screen->global_pool;
580	struct r600_resource_global **buffers =
581		(struct r600_resource_global **)resources;
582
583	COMPUTE_DBG("*** evergreen_set_global_binding first = %u n = %u\n",
584			first, n);
585
586	if (!resources) {
587		/* XXX: Unset */
588		return;
589	}
590
591	compute_memory_finalize_pending(pool, ctx_);
592
593	for (int i = 0; i < n; i++)
594	{
595		assert(resources[i]->target == PIPE_BUFFER);
596		assert(resources[i]->bind & PIPE_BIND_GLOBAL);
597
598		*(handles[i]) = buffers[i]->chunk->start_in_dw * 4;
599	}
600
601	evergreen_set_rat(ctx->cs_shader_state.shader, 0, pool->bo, 0, pool->size_in_dw * 4);
602	evergreen_cs_set_vertex_buffer(ctx, 1, 0,
603				(struct pipe_resource*)pool->bo);
604}
605
606/**
607 * This function initializes all the compute specific registers that need to
608 * be initialized for each compute command stream.  Registers that are common
609 * to both compute and 3D will be initialized at the beginning of each compute
610 * command stream by the start_cs_cmd atom.  However, since the SET_CONTEXT_REG
611 * packet requires that the shader type bit be set, we must initialize all
612 * context registers needed for compute in this function.  The registers
613 * intialized by the start_cs_cmd atom can be found in evereen_state.c in the
614 * functions evergreen_init_atom_start_cs or cayman_init_atom_start_cs depending
615 * on the GPU family.
616 */
617void evergreen_init_atom_start_compute_cs(struct r600_context *ctx)
618{
619	struct r600_command_buffer *cb = &ctx->start_compute_cs_cmd;
620	int num_threads;
621	int num_stack_entries;
622
623	/* since all required registers are initialised in the
624	 * start_compute_cs_cmd atom, we can EMIT_EARLY here.
625	 */
626	r600_init_command_buffer(cb, 256);
627	cb->pkt_flags = RADEON_CP_PACKET3_COMPUTE_MODE;
628
629	switch (ctx->family) {
630	case CHIP_CEDAR:
631	default:
632		num_threads = 128;
633		num_stack_entries = 256;
634		break;
635	case CHIP_REDWOOD:
636		num_threads = 128;
637		num_stack_entries = 256;
638		break;
639	case CHIP_JUNIPER:
640		num_threads = 128;
641		num_stack_entries = 512;
642		break;
643	case CHIP_CYPRESS:
644	case CHIP_HEMLOCK:
645		num_threads = 128;
646		num_stack_entries = 512;
647		break;
648	case CHIP_PALM:
649		num_threads = 128;
650		num_stack_entries = 256;
651		break;
652	case CHIP_SUMO:
653		num_threads = 128;
654		num_stack_entries = 256;
655		break;
656	case CHIP_SUMO2:
657		num_threads = 128;
658		num_stack_entries = 512;
659		break;
660	case CHIP_BARTS:
661		num_threads = 128;
662		num_stack_entries = 512;
663		break;
664	case CHIP_TURKS:
665		num_threads = 128;
666		num_stack_entries = 256;
667		break;
668	case CHIP_CAICOS:
669		num_threads = 128;
670		num_stack_entries = 256;
671		break;
672	}
673
674	/* Config Registers */
675	evergreen_init_common_regs(cb, ctx->chip_class
676			, ctx->family, ctx->screen->info.drm_minor);
677
678	/* The primitive type always needs to be POINTLIST for compute. */
679	r600_store_config_reg(cb, R_008958_VGT_PRIMITIVE_TYPE,
680						V_008958_DI_PT_POINTLIST);
681
682	if (ctx->chip_class < CAYMAN) {
683
684		/* These registers control which simds can be used by each stage.
685		 * The default for these registers is 0xffffffff, which means
686		 * all simds are available for each stage.  It's possible we may
687		 * want to play around with these in the future, but for now
688		 * the default value is fine.
689		 *
690		 * R_008E20_SQ_STATIC_THREAD_MGMT1
691		 * R_008E24_SQ_STATIC_THREAD_MGMT2
692		 * R_008E28_SQ_STATIC_THREAD_MGMT3
693		 */
694
695		/* XXX: We may need to adjust the thread and stack resouce
696		 * values for 3D/compute interop */
697
698		r600_store_config_reg_seq(cb, R_008C18_SQ_THREAD_RESOURCE_MGMT_1, 5);
699
700		/* R_008C18_SQ_THREAD_RESOURCE_MGMT_1
701		 * Set the number of threads used by the PS/VS/GS/ES stage to
702		 * 0.
703		 */
704		r600_store_value(cb, 0);
705
706		/* R_008C1C_SQ_THREAD_RESOURCE_MGMT_2
707		 * Set the number of threads used by the CS (aka LS) stage to
708		 * the maximum number of threads and set the number of threads
709		 * for the HS stage to 0. */
710		r600_store_value(cb, S_008C1C_NUM_LS_THREADS(num_threads));
711
712		/* R_008C20_SQ_STACK_RESOURCE_MGMT_1
713		 * Set the Control Flow stack entries to 0 for PS/VS stages */
714		r600_store_value(cb, 0);
715
716		/* R_008C24_SQ_STACK_RESOURCE_MGMT_2
717		 * Set the Control Flow stack entries to 0 for GS/ES stages */
718		r600_store_value(cb, 0);
719
720		/* R_008C28_SQ_STACK_RESOURCE_MGMT_3
721		 * Set the Contol Flow stack entries to 0 for the HS stage, and
722		 * set it to the maximum value for the CS (aka LS) stage. */
723		r600_store_value(cb,
724			S_008C28_NUM_LS_STACK_ENTRIES(num_stack_entries));
725	}
726
727	/* Context Registers */
728
729	if (ctx->chip_class < CAYMAN) {
730		/* workaround for hw issues with dyn gpr - must set all limits
731		 * to 240 instead of 0, 0x1e == 240 / 8
732		 */
733		r600_store_context_reg(cb, R_028838_SQ_DYN_GPR_RESOURCE_LIMIT_1,
734				S_028838_PS_GPRS(0x1e) |
735				S_028838_VS_GPRS(0x1e) |
736				S_028838_GS_GPRS(0x1e) |
737				S_028838_ES_GPRS(0x1e) |
738				S_028838_HS_GPRS(0x1e) |
739				S_028838_LS_GPRS(0x1e));
740	}
741
742	/* XXX: Investigate setting bit 15, which is FAST_COMPUTE_MODE */
743	r600_store_context_reg(cb, R_028A40_VGT_GS_MODE,
744		S_028A40_COMPUTE_MODE(1) | S_028A40_PARTIAL_THD_AT_EOI(1));
745
746	r600_store_context_reg(cb, R_028B54_VGT_SHADER_STAGES_EN, 2/*CS_ON*/);
747
748	r600_store_context_reg(cb, R_0286E8_SPI_COMPUTE_INPUT_CNTL,
749						S_0286E8_TID_IN_GROUP_ENA
750						| S_0286E8_TGID_ENA
751						| S_0286E8_DISABLE_INDEX_PACK)
752						;
753
754	/* The LOOP_CONST registers are an optimizations for loops that allows
755	 * you to store the initial counter, increment value, and maximum
756	 * counter value in a register so that hardware can calculate the
757	 * correct number of iterations for the loop, so that you don't need
758	 * to have the loop counter in your shader code.  We don't currently use
759	 * this optimization, so we must keep track of the counter in the
760	 * shader and use a break instruction to exit loops.  However, the
761	 * hardware will still uses this register to determine when to exit a
762	 * loop, so we need to initialize the counter to 0, set the increment
763	 * value to 1 and the maximum counter value to the 4095 (0xfff) which
764	 * is the maximum value allowed.  This gives us a maximum of 4096
765	 * iterations for our loops, but hopefully our break instruction will
766	 * execute before some time before the 4096th iteration.
767	 */
768	eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (160 * 4), 0x1000FFF);
769}
770
771void evergreen_init_compute_state_functions(struct r600_context *ctx)
772{
773	ctx->context.create_compute_state = evergreen_create_compute_state;
774	ctx->context.delete_compute_state = evergreen_delete_compute_state;
775	ctx->context.bind_compute_state = evergreen_bind_compute_state;
776//	 ctx->context.create_sampler_view = evergreen_compute_create_sampler_view;
777	ctx->context.set_compute_resources = evergreen_set_compute_resources;
778	ctx->context.set_compute_sampler_views = evergreen_set_cs_sampler_view;
779	ctx->context.bind_compute_sampler_states = evergreen_bind_compute_sampler_states;
780	ctx->context.set_global_binding = evergreen_set_global_binding;
781	ctx->context.launch_grid = evergreen_launch_grid;
782
783	/* We always use at least two vertex buffers for compute, one for
784         * parameters and one for global memory */
785	ctx->cs_vertex_buffer_state.enabled_mask =
786	ctx->cs_vertex_buffer_state.dirty_mask = 1 | 2;
787}
788
789
790struct pipe_resource *r600_compute_global_buffer_create(
791	struct pipe_screen *screen,
792	const struct pipe_resource *templ)
793{
794	assert(templ->target == PIPE_BUFFER);
795	assert(templ->bind & PIPE_BIND_GLOBAL);
796	assert(templ->array_size == 1 || templ->array_size == 0);
797	assert(templ->depth0 == 1 || templ->depth0 == 0);
798	assert(templ->height0 == 1 || templ->height0 == 0);
799
800	struct r600_resource_global* result = (struct r600_resource_global*)
801		CALLOC(sizeof(struct r600_resource_global), 1);
802	struct r600_screen* rscreen = (struct r600_screen*)screen;
803
804	COMPUTE_DBG("*** r600_compute_global_buffer_create\n");
805	COMPUTE_DBG("width = %u array_size = %u\n", templ->width0,
806			templ->array_size);
807
808	result->base.b.vtbl = &r600_global_buffer_vtbl;
809	result->base.b.b.screen = screen;
810	result->base.b.b = *templ;
811	pipe_reference_init(&result->base.b.b.reference, 1);
812
813	int size_in_dw = (templ->width0+3) / 4;
814
815	result->chunk = compute_memory_alloc(rscreen->global_pool, size_in_dw);
816
817	if (result->chunk == NULL)
818	{
819		free(result);
820		return NULL;
821	}
822
823	return &result->base.b.b;
824}
825
826void r600_compute_global_buffer_destroy(
827	struct pipe_screen *screen,
828	struct pipe_resource *res)
829{
830	assert(res->target == PIPE_BUFFER);
831	assert(res->bind & PIPE_BIND_GLOBAL);
832
833	struct r600_resource_global* buffer = (struct r600_resource_global*)res;
834	struct r600_screen* rscreen = (struct r600_screen*)screen;
835
836	compute_memory_free(rscreen->global_pool, buffer->chunk->id);
837
838	buffer->chunk = NULL;
839	free(res);
840}
841
842void *r600_compute_global_transfer_map(
843	struct pipe_context *ctx_,
844	struct pipe_resource *resource,
845	unsigned level,
846	unsigned usage,
847	const struct pipe_box *box,
848	struct pipe_transfer **ptransfer)
849{
850	struct r600_context *rctx = (struct r600_context*)ctx_;
851	struct compute_memory_pool *pool = rctx->screen->global_pool;
852	struct pipe_transfer *transfer = util_slab_alloc(&rctx->pool_transfers);
853	struct r600_resource_global* buffer =
854		(struct r600_resource_global*)resource;
855	uint32_t* map;
856
857	compute_memory_finalize_pending(pool, ctx_);
858
859	assert(resource->target == PIPE_BUFFER);
860
861	COMPUTE_DBG("* r600_compute_global_get_transfer()\n"
862			"level = %u, usage = %u, box(x = %u, y = %u, z = %u "
863			"width = %u, height = %u, depth = %u)\n", level, usage,
864			box->x, box->y, box->z, box->width, box->height,
865			box->depth);
866
867	transfer->resource = resource;
868	transfer->level = level;
869	transfer->usage = usage;
870	transfer->box = *box;
871	transfer->stride = 0;
872	transfer->layer_stride = 0;
873
874	assert(transfer->resource->target == PIPE_BUFFER);
875	assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
876	assert(transfer->box.x >= 0);
877	assert(transfer->box.y == 0);
878	assert(transfer->box.z == 0);
879
880	///TODO: do it better, mapping is not possible if the pool is too big
881
882	COMPUTE_DBG("* r600_compute_global_transfer_map()\n");
883
884	if (!(map = rctx->ws->buffer_map(buffer->chunk->pool->bo->cs_buf,
885						rctx->cs, transfer->usage))) {
886		util_slab_free(&rctx->pool_transfers, transfer);
887		return NULL;
888	}
889
890	*ptransfer = transfer;
891
892	COMPUTE_DBG("Buffer: %p + %u (buffer offset in global memory) "
893		"+ %u (box.x)\n", map, buffer->chunk->start_in_dw, transfer->box.x);
894	return ((char*)(map + buffer->chunk->start_in_dw)) + transfer->box.x;
895}
896
897void r600_compute_global_transfer_unmap(
898	struct pipe_context *ctx_,
899	struct pipe_transfer* transfer)
900{
901	assert(transfer->resource->target == PIPE_BUFFER);
902	assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
903
904	struct r600_context *ctx = (struct r600_context *)ctx_;
905	struct r600_resource_global* buffer =
906		(struct r600_resource_global*)transfer->resource;
907
908	COMPUTE_DBG("* r600_compute_global_transfer_unmap()\n");
909
910	ctx->ws->buffer_unmap(buffer->chunk->pool->bo->cs_buf);
911	util_slab_free(&ctx->pool_transfers, transfer);
912}
913
914void r600_compute_global_transfer_flush_region(
915	struct pipe_context *ctx_,
916	struct pipe_transfer *transfer,
917	const struct pipe_box *box)
918{
919	assert(0 && "TODO");
920}
921
922void r600_compute_global_transfer_inline_write(
923	struct pipe_context *pipe,
924	struct pipe_resource *resource,
925	unsigned level,
926	unsigned usage,
927	const struct pipe_box *box,
928	const void *data,
929	unsigned stride,
930	unsigned layer_stride)
931{
932	assert(0 && "TODO");
933}
934