evergreen_compute.c revision 5016fe2d47a08ce2cd7c597e67862086e7d63b64
1/*
2 * Copyright 2011 Adam Rak <adam.rak@streamnovation.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 *      Adam Rak <adam.rak@streamnovation.com>
25 */
26
27#include <stdio.h>
28#include <errno.h>
29#include "pipe/p_defines.h"
30#include "pipe/p_state.h"
31#include "pipe/p_context.h"
32#include "util/u_blitter.h"
33#include "util/u_double_list.h"
34#include "util/u_transfer.h"
35#include "util/u_surface.h"
36#include "util/u_pack_color.h"
37#include "util/u_memory.h"
38#include "util/u_inlines.h"
39#include "util/u_framebuffer.h"
40#include "pipebuffer/pb_buffer.h"
41#include "r600.h"
42#include "evergreend.h"
43#include "r600_resource.h"
44#include "r600_shader.h"
45#include "r600_pipe.h"
46#include "r600_formats.h"
47#include "evergreen_compute.h"
48#include "r600_hw_context_priv.h"
49#include "evergreen_compute_internal.h"
50#include "compute_memory_pool.h"
51#ifdef HAVE_OPENCL
52#include "llvm_wrapper.h"
53#endif
54
55/**
56RAT0 is for global binding write
57VTX1 is for global binding read
58
59for wrting images RAT1...
60for reading images TEX2...
61  TEX2-RAT1 is paired
62
63TEX2... consumes the same fetch resources, that VTX2... would consume
64
65CONST0 and VTX0 is for parameters
66  CONST0 is binding smaller input parameter buffer, and for constant indexing,
67  also constant cached
68  VTX0 is for indirect/non-constant indexing, or if the input is bigger than
69  the constant cache can handle
70
71RAT-s are limited to 12, so we can only bind at most 11 texture for writing
72because we reserve RAT0 for global bindings. With byteaddressing enabled,
73we should reserve another one too.=> 10 image binding for writing max.
74
75from Nvidia OpenCL:
76  CL_DEVICE_MAX_READ_IMAGE_ARGS:        128
77  CL_DEVICE_MAX_WRITE_IMAGE_ARGS:       8
78
79so 10 for writing is enough. 176 is the max for reading according to the docs
80
81writable images should be listed first < 10, so their id corresponds to RAT(id+1)
82writable images will consume TEX slots, VTX slots too because of linear indexing
83
84*/
85
86const struct u_resource_vtbl r600_global_buffer_vtbl =
87{
88	u_default_resource_get_handle, /* get_handle */
89	r600_compute_global_buffer_destroy, /* resource_destroy */
90	r600_compute_global_get_transfer, /* get_transfer */
91	r600_compute_global_transfer_destroy, /* transfer_destroy */
92	r600_compute_global_transfer_map, /* transfer_map */
93	r600_compute_global_transfer_flush_region,/* transfer_flush_region */
94	r600_compute_global_transfer_unmap, /* transfer_unmap */
95	r600_compute_global_transfer_inline_write /* transfer_inline_write */
96};
97
98
99void *evergreen_create_compute_state(
100	struct pipe_context *ctx_,
101	const const struct pipe_compute_state *cso)
102{
103	struct r600_context *ctx = (struct r600_context *)ctx_;
104
105#ifdef HAVE_OPENCL
106	const struct pipe_llvm_program_header * header;
107	const unsigned char * code;
108
109	header = cso->prog;
110	code = cso->prog + sizeof(struct pipe_llvm_program_header);
111#endif
112
113	if (!ctx->screen->screen.get_param(&ctx->screen->screen,
114							PIPE_CAP_COMPUTE)) {
115		fprintf(stderr, "Compute is not supported\n");
116		return NULL;
117	}
118	struct r600_pipe_compute *shader =	CALLOC_STRUCT(r600_pipe_compute);
119
120	shader->ctx = (struct r600_context*)ctx;
121	shader->resources = (struct evergreen_compute_resource*)
122			CALLOC(sizeof(struct evergreen_compute_resource),
123			get_compute_resource_num());
124	shader->local_size = cso->req_local_mem; ///TODO: assert it
125	shader->private_size = cso->req_private_mem;
126	shader->input_size = cso->req_input_mem;
127
128#ifdef HAVE_OPENCL
129	shader->mod = llvm_parse_bitcode(code, header->num_bytes);
130
131	r600_compute_shader_create(ctx_, shader->mod, &shader->bc);
132#endif
133	return shader;
134}
135
136void evergreen_delete_compute_state(struct pipe_context *ctx, void* state)
137{
138	struct r600_pipe_compute *shader = (struct r600_pipe_compute *)state;
139
140	free(shader->resources);
141	free(shader);
142}
143
144static void evergreen_bind_compute_state(struct pipe_context *ctx_, void *state)
145{
146	struct r600_context *ctx = (struct r600_context *)ctx_;
147
148	ctx->cs_shader = (struct r600_pipe_compute *)state;
149
150	if (!ctx->cs_shader->shader_code_bo) {
151
152		ctx->cs_shader->shader_code_bo =
153			r600_compute_buffer_alloc_vram(ctx->screen,
154					ctx->cs_shader->bc.ndw * 4);
155
156		void *p = ctx->ws->buffer_map(
157					ctx->cs_shader->shader_code_bo->cs_buf,
158					ctx->cs, PIPE_TRANSFER_WRITE);
159
160		memcpy(p, ctx->cs_shader->bc.bytecode, ctx->cs_shader->bc.ndw * 4);
161
162		ctx->ws->buffer_unmap(ctx->cs_shader->shader_code_bo->cs_buf);
163
164	}
165
166	struct evergreen_compute_resource* res = get_empty_res(ctx->cs_shader,
167						COMPUTE_RESOURCE_SHADER, 0);
168
169	if (ctx->chip_class < CAYMAN) {
170		evergreen_reg_set(res, R_008C0C_SQ_GPR_RESOURCE_MGMT_3,
171			S_008C0C_NUM_LS_GPRS(ctx->cs_shader->bc.ngpr));
172	}
173
174	///maybe we can use it later
175	evergreen_reg_set(res, R_0286C8_SPI_THREAD_GROUPING, 0);
176	///maybe we can use it later
177	evergreen_reg_set(res, R_008C14_SQ_GLOBAL_GPR_RESOURCE_MGMT_2, 0);
178
179	evergreen_reg_set(res, R_0288D4_SQ_PGM_RESOURCES_LS,
180		S_0288D4_NUM_GPRS(ctx->cs_shader->bc.ngpr)
181		| S_0288D4_STACK_SIZE(ctx->cs_shader->bc.nstack));
182	evergreen_reg_set(res, R_0288D8_SQ_PGM_RESOURCES_LS_2, 0);
183
184	evergreen_reg_set(res, R_0288D0_SQ_PGM_START_LS, 0);
185	res->bo = ctx->cs_shader->shader_code_bo;
186	res->usage = RADEON_USAGE_READ;
187	res->coher_bo_size = ctx->cs_shader->bc.ndw*4;
188
189	r600_inval_shader_cache(ctx);
190
191	/* We can't always determine the
192	 * number of iterations in a loop before it's executed,
193	 * so we just need to set up the loop counter to give us the maximum
194	 * number of iterations possible.  Currently, loops in shader code
195	 * ignore the loop counter and use a break instruction to exit the
196	 * loop at the correct time.
197	 */
198	evergreen_set_loop_const(ctx->cs_shader,
199		0, /* index */
200		0xFFF, /* Maximum value of the loop counter (i.e. when the loop
201			* counter reaches this value, the program will break
202			* out of the loop. */
203		0x0,   /* Starting value of the loop counter. */
204		0x1);  /* Amount to increment the loop counter each iteration. */
205}
206
207/* The kernel parameters are stored a vtx buffer (ID=0), besides the explicit
208 * kernel parameters there are inplicit parameters that need to be stored
209 * in the vertex buffer as well.  Here is how these parameters are organized in
210 * the buffer:
211 *
212 * DWORDS 0-2: Number of work groups in each dimension (x,y,z)
213 * DWORDS 3-5: Number of global work items in each dimension (x,y,z)
214 * DWORDS 6-8: Number of work items within each work group in each dimension
215 *             (x,y,z)
216 * DWORDS 9+ : Kernel parameters
217 */
218void evergreen_compute_upload_input(
219	struct pipe_context *ctx_,
220	const uint *block_layout,
221	const uint *grid_layout,
222	const void *input)
223{
224	struct r600_context *ctx = (struct r600_context *)ctx_;
225	int i;
226	unsigned kernel_parameters_offset_bytes = 36;
227	uint32_t * num_work_groups_start;
228	uint32_t * global_size_start;
229	uint32_t * local_size_start;
230	uint32_t * kernel_parameters_start;
231
232	if (ctx->cs_shader->input_size == 0) {
233		return;
234	}
235
236	if (!ctx->cs_shader->kernel_param) {
237		unsigned buffer_size = ctx->cs_shader->input_size;
238
239		/* Add space for the grid dimensions */
240		buffer_size += kernel_parameters_offset_bytes * sizeof(uint);
241		ctx->cs_shader->kernel_param =
242				r600_compute_buffer_alloc_vram(ctx->screen,
243						buffer_size);
244	}
245
246	num_work_groups_start = ctx->ws->buffer_map(
247			ctx->cs_shader->kernel_param->cs_buf,
248			ctx->cs, PIPE_TRANSFER_WRITE);
249	global_size_start = num_work_groups_start + (3 * (sizeof(uint) /4));
250	local_size_start = global_size_start + (3 * (sizeof(uint)) / 4);
251	kernel_parameters_start = local_size_start + (3 * (sizeof(uint)) / 4);
252
253	/* Copy the work group size */
254	memcpy(num_work_groups_start, grid_layout, 3 * sizeof(uint));
255
256	/* Copy the global size */
257	for (i = 0; i < 3; i++) {
258		global_size_start[i] = grid_layout[i] * block_layout[i];
259	}
260
261	/* Copy the local dimensions */
262	memcpy(local_size_start, block_layout, 3 * sizeof(uint));
263
264	/* Copy the kernel inputs */
265	memcpy(kernel_parameters_start, input, ctx->cs_shader->input_size);
266
267	for (i = 0; i < (kernel_parameters_offset_bytes / 4) +
268					(ctx->cs_shader->input_size / 4); i++) {
269		COMPUTE_DBG("input %i : %i\n", i,
270			((unsigned*)num_work_groups_start)[i]);
271	}
272
273	ctx->ws->buffer_unmap(ctx->cs_shader->kernel_param->cs_buf);
274
275	///ID=0 is reserved for the parameters
276	evergreen_set_vtx_resource(ctx->cs_shader,
277		ctx->cs_shader->kernel_param, 0, 0, 0);
278	///ID=0 is reserved for parameters
279	evergreen_set_const_cache(ctx->cs_shader, 0,
280		ctx->cs_shader->kernel_param, ctx->cs_shader->input_size, 0);
281}
282
283void evergreen_direct_dispatch(
284		struct pipe_context *ctx_,
285		const uint *block_layout, const uint *grid_layout)
286{
287	struct r600_context *ctx = (struct r600_context *)ctx_;
288
289	int i;
290
291	struct evergreen_compute_resource* res = get_empty_res(ctx->cs_shader,
292		COMPUTE_RESOURCE_DISPATCH, 0);
293
294	evergreen_reg_set(res, R_008958_VGT_PRIMITIVE_TYPE, V_008958_DI_PT_POINTLIST);
295
296	evergreen_reg_set(res, R_00899C_VGT_COMPUTE_START_X, 0);
297	evergreen_reg_set(res, R_0089A0_VGT_COMPUTE_START_Y, 0);
298	evergreen_reg_set(res, R_0089A4_VGT_COMPUTE_START_Z, 0);
299
300	evergreen_reg_set(res, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, block_layout[0]);
301	evergreen_reg_set(res, R_0286F0_SPI_COMPUTE_NUM_THREAD_Y, block_layout[1]);
302	evergreen_reg_set(res, R_0286F4_SPI_COMPUTE_NUM_THREAD_Z, block_layout[2]);
303
304	int group_size = 1;
305
306	int grid_size = 1;
307
308	for (i = 0; i < 3; i++) {
309		group_size *= block_layout[i];
310	}
311
312	for (i = 0; i < 3; i++)	{
313		grid_size *= grid_layout[i];
314	}
315
316	evergreen_reg_set(res, R_008970_VGT_NUM_INDICES, group_size);
317	evergreen_reg_set(res, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE, group_size);
318
319	evergreen_emit_raw_value(res, PKT3C(PKT3_DISPATCH_DIRECT, 3, 0));
320	evergreen_emit_raw_value(res, grid_layout[0]);
321	evergreen_emit_raw_value(res, grid_layout[1]);
322	evergreen_emit_raw_value(res, grid_layout[2]);
323	///VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN
324	evergreen_emit_raw_value(res, 1);
325}
326
327static void compute_emit_cs(struct r600_context *ctx)
328{
329	struct radeon_winsys_cs *cs = ctx->cs;
330	int i;
331
332	struct r600_resource *onebo = NULL;
333
334	/* Initialize all the registers common to both 3D and compute.  Some
335	 * 3D only register will be initialized by this atom as well, but
336	 * this is OK for now.
337	 *
338	 * See evergreen_init_atom_start_cs() or cayman_init_atom_start_cs() in
339	 * evergreen_state.c for the list of registers that are intialized by
340	 * the start_cs_cmd atom.
341	 */
342	r600_emit_atom(ctx, &ctx->start_cs_cmd.atom);
343
344	/* Initialize all the compute specific registers.
345	 *
346	 * See evergreen_init_atom_start_compute_cs() in this file for the list
347	 * of registers initialized by the start_compuet_cs_cmd atom.
348	 */
349	r600_emit_atom(ctx, &ctx->start_compute_cs_cmd.atom);
350
351	for (i = 0; i < get_compute_resource_num(); i++) {
352		if (ctx->cs_shader->resources[i].enabled) {
353			int j;
354			COMPUTE_DBG("resnum: %i, cdw: %i\n", i, cs->cdw);
355
356			for (j = 0; j < ctx->cs_shader->resources[i].cs_end; j++) {
357				if (ctx->cs_shader->resources[i].do_reloc[j]) {
358					assert(ctx->cs_shader->resources[i].bo);
359					evergreen_emit_ctx_reloc(ctx,
360						ctx->cs_shader->resources[i].bo,
361						ctx->cs_shader->resources[i].usage);
362				}
363
364				cs->buf[cs->cdw++] = ctx->cs_shader->resources[i].cs[j];
365			}
366
367			if (ctx->cs_shader->resources[i].bo) {
368				onebo = ctx->cs_shader->resources[i].bo;
369				evergreen_emit_ctx_reloc(ctx,
370					ctx->cs_shader->resources[i].bo,
371					ctx->cs_shader->resources[i].usage);
372
373				///special case for textures
374				if (ctx->cs_shader->resources[i].do_reloc
375					[ctx->cs_shader->resources[i].cs_end] == 2) {
376					evergreen_emit_ctx_reloc(ctx,
377						ctx->cs_shader->resources[i].bo,
378						ctx->cs_shader->resources[i].usage);
379				}
380			}
381		}
382	}
383
384	/* r600_flush_framebuffer() updates the cb_flush_flags and then
385	 * calls r600_emit_atom() on the ctx->surface_sync_cmd.atom, which emits
386	 * a SURFACE_SYNC packet via r600_emit_surface_sync().
387	 *
388	 * XXX r600_emit_surface_sync() hardcodes the CP_COHER_SIZE to
389	 * 0xffffffff, so we will need to add a field to struct
390	 * r600_surface_sync_cmd if we want to manually set this value.
391	 */
392	r600_flush_framebuffer(ctx, true /* Flush now */);
393
394#if 0
395	COMPUTE_DBG("cdw: %i\n", cs->cdw);
396	for (i = 0; i < cs->cdw; i++) {
397		COMPUTE_DBG("%4i : 0x%08X\n", i, ctx->cs->buf[i]);
398	}
399#endif
400
401	ctx->ws->cs_flush(ctx->cs, RADEON_FLUSH_ASYNC | RADEON_FLUSH_COMPUTE);
402
403	ctx->pm4_dirty_cdwords = 0;
404	ctx->flags = 0;
405
406	COMPUTE_DBG("shader started\n");
407
408	ctx->ws->buffer_wait(onebo->buf, 0);
409
410	COMPUTE_DBG("...\n");
411
412	ctx->streamout_start = TRUE;
413	ctx->streamout_append_bitmask = ~0;
414
415}
416
417static void evergreen_launch_grid(
418		struct pipe_context *ctx_,
419		const uint *block_layout, const uint *grid_layout,
420		uint32_t pc, const void *input)
421{
422	COMPUTE_DBG("PC: %i\n", pc);
423
424	struct r600_context *ctx = (struct r600_context *)ctx_;
425	unsigned num_waves;
426	unsigned num_pipes = ctx->screen->info.r600_max_pipes;
427	unsigned wave_divisor = (16 * num_pipes);
428
429	/* num_waves = ceil((tg_size.x * tg_size.y, tg_size.z) / (16 * num_pipes)) */
430	num_waves = (block_layout[0] * block_layout[1] * block_layout[2] +
431			wave_divisor - 1) / wave_divisor;
432
433	COMPUTE_DBG("Using %u pipes, there are %u wavefronts per thread block\n",
434							num_pipes, num_waves);
435
436	evergreen_set_lds(ctx->cs_shader, 0, 0, num_waves);
437	evergreen_compute_upload_input(ctx_, block_layout, grid_layout, input);
438	evergreen_direct_dispatch(ctx_, block_layout, grid_layout);
439	compute_emit_cs(ctx);
440}
441
442static void evergreen_set_compute_resources(struct pipe_context * ctx_,
443		unsigned start, unsigned count,
444		struct pipe_surface ** surfaces)
445{
446	struct r600_context *ctx = (struct r600_context *)ctx_;
447	struct r600_surface **resources = (struct r600_surface **)surfaces;
448	for (int i = 0; i < count; i++)	{
449		if (resources[i]) {
450			struct r600_resource_global *buffer =
451				(struct r600_resource_global*)resources[i]->base.texture;
452			if (resources[i]->base.writable) {
453				assert(i+1 < 12);
454				struct r600_resource_global *buffer =
455					(struct r600_resource_global*)
456					resources[i]->base.texture;
457
458				evergreen_set_rat(ctx->cs_shader, i+1,
459				(struct r600_resource *)resources[i]->base.texture,
460				buffer->chunk->start_in_dw*4,
461				resources[i]->base.texture->width0);
462			}
463
464			evergreen_set_vtx_resource(ctx->cs_shader,
465				(struct r600_resource *)resources[i]->base.texture, i+2,
466				 buffer->chunk->start_in_dw*4, resources[i]->base.writable);
467		}
468	}
469
470}
471
472static void evergreen_set_cs_sampler_view(struct pipe_context *ctx_,
473		unsigned start_slot, unsigned count,
474		struct pipe_sampler_view **views)
475{
476	struct r600_context *ctx = (struct r600_context *)ctx_;
477	struct r600_pipe_sampler_view **resource =
478		(struct r600_pipe_sampler_view **)views;
479
480	for (int i = 0; i < count; i++)	{
481		if (resource[i]) {
482			assert(i+1 < 12);
483			///FETCH0 = VTX0 (param buffer),
484			//FETCH1 = VTX1 (global buffer pool), FETCH2... = TEX
485			evergreen_set_tex_resource(ctx->cs_shader, resource[i], i+2);
486		}
487	}
488}
489
490static void evergreen_bind_compute_sampler_states(
491	struct pipe_context *ctx_,
492	unsigned start_slot,
493	unsigned num_samplers,
494	void **samplers_)
495{
496	struct r600_context *ctx = (struct r600_context *)ctx_;
497	struct compute_sampler_state ** samplers =
498		(struct compute_sampler_state **)samplers_;
499
500	for (int i = 0; i < num_samplers; i++) {
501		if (samplers[i]) {
502			evergreen_set_sampler_resource(ctx->cs_shader, samplers[i], i);
503		}
504	}
505}
506
507static void evergreen_set_global_binding(
508	struct pipe_context *ctx_, unsigned first, unsigned n,
509	struct pipe_resource **resources,
510	uint32_t **handles)
511{
512	struct r600_context *ctx = (struct r600_context *)ctx_;
513	struct compute_memory_pool *pool = ctx->screen->global_pool;
514	struct r600_resource_global **buffers =
515		(struct r600_resource_global **)resources;
516
517	if (!resources) {
518		/* XXX: Unset */
519		return;
520	}
521
522	compute_memory_finalize_pending(pool, ctx_);
523
524	for (int i = 0; i < n; i++)
525	{
526		assert(resources[i]->target == PIPE_BUFFER);
527		assert(resources[i]->bind & PIPE_BIND_GLOBAL);
528
529		*(handles[i]) = buffers[i]->chunk->start_in_dw * 4;
530	}
531
532	evergreen_set_rat(ctx->cs_shader, 0, pool->bo, 0, pool->size_in_dw * 4);
533	evergreen_set_vtx_resource(ctx->cs_shader, pool->bo, 1, 0, 1);
534}
535
536/**
537 * This function initializes all the compute specific registers that need to
538 * be initialized for each compute command stream.  Registers that are common
539 * to both compute and 3D will be initialized at the beginning of each compute
540 * command stream by the start_cs_cmd atom.  However, since the SET_CONTEXT_REG
541 * packet requires that the shader type bit be set, we must initialize all
542 * context registers needed for compute in this function.  The registers
543 * intialized by the start_cs_cmd atom can be found in evereen_state.c in the
544 * functions evergreen_init_atom_start_cs or cayman_init_atom_start_cs depending
545 * on the GPU family.
546 */
547void evergreen_init_atom_start_compute_cs(struct r600_context *ctx)
548{
549	struct r600_command_buffer *cb = &ctx->start_compute_cs_cmd;
550	int num_threads;
551	int num_stack_entries;
552
553	/* We aren't passing the EMIT_EARLY flag as the third argument
554	 * because we will be emitting this atom manually in order to
555	 * ensure it gets emitted after the start_cs_cmd atom.
556	 */
557	r600_init_command_buffer(cb, 256, 0);
558	cb->pkt_flags = RADEON_CP_PACKET3_COMPUTE_MODE;
559
560	switch (ctx->family) {
561	case CHIP_CEDAR:
562	default:
563		num_threads = 128;
564		num_stack_entries = 256;
565		break;
566	case CHIP_REDWOOD:
567		num_threads = 128;
568		num_stack_entries = 256;
569		break;
570	case CHIP_JUNIPER:
571		num_threads = 128;
572		num_stack_entries = 512;
573		break;
574	case CHIP_CYPRESS:
575	case CHIP_HEMLOCK:
576		num_threads = 128;
577		num_stack_entries = 512;
578		break;
579	case CHIP_PALM:
580		num_threads = 128;
581		num_stack_entries = 256;
582		break;
583	case CHIP_SUMO:
584		num_threads = 128;
585		num_stack_entries = 256;
586		break;
587	case CHIP_SUMO2:
588		num_threads = 128;
589		num_stack_entries = 512;
590		break;
591	case CHIP_BARTS:
592		num_threads = 128;
593		num_stack_entries = 512;
594		break;
595	case CHIP_TURKS:
596		num_threads = 128;
597		num_stack_entries = 256;
598		break;
599	case CHIP_CAICOS:
600		num_threads = 128;
601		num_stack_entries = 256;
602		break;
603	}
604
605	/* Config Registers */
606	if (ctx->chip_class < CAYMAN) {
607
608		/* These registers control which simds can be used by each stage.
609		 * The default for these registers is 0xffffffff, which means
610		 * all simds are available for each stage.  It's possible we may
611		 * want to play around with these in the future, but for now
612		 * the default value is fine.
613		 *
614		 * R_008E20_SQ_STATIC_THREAD_MGMT1
615		 * R_008E24_SQ_STATIC_THREAD_MGMT2
616		 * R_008E28_SQ_STATIC_THREAD_MGMT3
617		 */
618
619		/* XXX: We may need to adjust the thread and stack resouce
620		 * values for 3D/compute interop */
621
622		r600_store_config_reg_seq(cb, R_008C18_SQ_THREAD_RESOURCE_MGMT_1, 5);
623
624		/* R_008C18_SQ_THREAD_RESOURCE_MGMT_1
625		 * Set the number of threads used by the PS/VS/GS/ES stage to
626		 * 0.
627		 */
628		r600_store_value(cb, 0);
629
630		/* R_008C1C_SQ_THREAD_RESOURCE_MGMT_2
631		 * Set the number of threads used by the CS (aka LS) stage to
632		 * the maximum number of threads and set the number of threads
633		 * for the HS stage to 0. */
634		r600_store_value(cb, S_008C1C_NUM_LS_THREADS(num_threads));
635
636		/* R_008C20_SQ_STACK_RESOURCE_MGMT_1
637		 * Set the Control Flow stack entries to 0 for PS/VS stages */
638		r600_store_value(cb, 0);
639
640		/* R_008C24_SQ_STACK_RESOURCE_MGMT_2
641		 * Set the Control Flow stack entries to 0 for GS/ES stages */
642		r600_store_value(cb, 0);
643
644		/* R_008C28_SQ_STACK_RESOURCE_MGMT_3
645		 * Set the Contol Flow stack entries to 0 for the HS stage, and
646		 * set it to the maximum value for the CS (aka LS) stage. */
647		r600_store_value(cb,
648			S_008C28_NUM_LS_STACK_ENTRIES(num_stack_entries));
649	}
650
651	/* Context Registers */
652
653	if (ctx->chip_class < CAYMAN) {
654		/* workaround for hw issues with dyn gpr - must set all limits
655		 * to 240 instead of 0, 0x1e == 240 / 8
656		 */
657		r600_store_context_reg(cb, R_028838_SQ_DYN_GPR_RESOURCE_LIMIT_1,
658				S_028838_PS_GPRS(0x1e) |
659				S_028838_VS_GPRS(0x1e) |
660				S_028838_GS_GPRS(0x1e) |
661				S_028838_ES_GPRS(0x1e) |
662				S_028838_HS_GPRS(0x1e) |
663				S_028838_LS_GPRS(0x1e));
664	}
665
666	/* XXX: Investigate setting bit 15, which is FAST_COMPUTE_MODE */
667	r600_store_context_reg(cb, R_028A40_VGT_GS_MODE,
668		S_028A40_COMPUTE_MODE(1) | S_028A40_PARTIAL_THD_AT_EOI(1));
669
670	r600_store_context_reg(cb, R_028B54_VGT_SHADER_STAGES_EN, 2/*CS_ON*/);
671
672	r600_store_context_reg(cb, R_0286E8_SPI_COMPUTE_INPUT_CNTL,
673						S_0286E8_TID_IN_GROUP_ENA
674						| S_0286E8_TGID_ENA
675						| S_0286E8_DISABLE_INDEX_PACK)
676						;
677}
678
679void evergreen_init_compute_state_functions(struct r600_context *ctx)
680{
681	ctx->context.create_compute_state = evergreen_create_compute_state;
682	ctx->context.delete_compute_state = evergreen_delete_compute_state;
683	ctx->context.bind_compute_state = evergreen_bind_compute_state;
684//	 ctx->context.create_sampler_view = evergreen_compute_create_sampler_view;
685	ctx->context.set_compute_resources = evergreen_set_compute_resources;
686	ctx->context.set_compute_sampler_views = evergreen_set_cs_sampler_view;
687	ctx->context.bind_compute_sampler_states = evergreen_bind_compute_sampler_states;
688	ctx->context.set_global_binding = evergreen_set_global_binding;
689	ctx->context.launch_grid = evergreen_launch_grid;
690}
691
692
693struct pipe_resource *r600_compute_global_buffer_create(
694	struct pipe_screen *screen,
695	const struct pipe_resource *templ)
696{
697	assert(templ->target == PIPE_BUFFER);
698	assert(templ->bind & PIPE_BIND_GLOBAL);
699	assert(templ->array_size == 1 || templ->array_size == 0);
700	assert(templ->depth0 == 1 || templ->depth0 == 0);
701	assert(templ->height0 == 1 || templ->height0 == 0);
702
703	struct r600_resource_global* result = (struct r600_resource_global*)
704		CALLOC(sizeof(struct r600_resource_global), 1);
705	struct r600_screen* rscreen = (struct r600_screen*)screen;
706
707	result->base.b.vtbl = &r600_global_buffer_vtbl;
708	result->base.b.b.screen = screen;
709	result->base.b.b = *templ;
710	pipe_reference_init(&result->base.b.b.reference, 1);
711
712	int size_in_dw = (templ->width0+3) / 4;
713
714	result->chunk = compute_memory_alloc(rscreen->global_pool, size_in_dw);
715
716	if (result->chunk == NULL)
717	{
718		free(result);
719		return NULL;
720	}
721
722	return &result->base.b.b;
723}
724
725void r600_compute_global_buffer_destroy(
726	struct pipe_screen *screen,
727	struct pipe_resource *res)
728{
729	assert(res->target == PIPE_BUFFER);
730	assert(res->bind & PIPE_BIND_GLOBAL);
731
732	struct r600_resource_global* buffer = (struct r600_resource_global*)res;
733	struct r600_screen* rscreen = (struct r600_screen*)screen;
734
735	compute_memory_free(rscreen->global_pool, buffer->chunk->id);
736
737	buffer->chunk = NULL;
738	free(res);
739}
740
741void* r600_compute_global_transfer_map(
742	struct pipe_context *ctx_,
743	struct pipe_transfer* transfer)
744{
745	assert(transfer->resource->target == PIPE_BUFFER);
746	assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
747	assert(transfer->box.x >= 0);
748	assert(transfer->box.y == 0);
749	assert(transfer->box.z == 0);
750
751	struct r600_context *ctx = (struct r600_context *)ctx_;
752	struct r600_resource_global* buffer =
753		(struct r600_resource_global*)transfer->resource;
754
755	uint32_t* map;
756	///TODO: do it better, mapping is not possible if the pool is too big
757
758	if (!(map = ctx->ws->buffer_map(buffer->chunk->pool->bo->cs_buf,
759						ctx->cs, transfer->usage))) {
760		return NULL;
761	}
762
763	COMPUTE_DBG("buffer start: %lli\n", buffer->chunk->start_in_dw);
764	return ((char*)(map + buffer->chunk->start_in_dw)) + transfer->box.x;
765}
766
767void r600_compute_global_transfer_unmap(
768	struct pipe_context *ctx_,
769	struct pipe_transfer* transfer)
770{
771	assert(transfer->resource->target == PIPE_BUFFER);
772	assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
773
774	struct r600_context *ctx = (struct r600_context *)ctx_;
775	struct r600_resource_global* buffer =
776		(struct r600_resource_global*)transfer->resource;
777
778	ctx->ws->buffer_unmap(buffer->chunk->pool->bo->cs_buf);
779}
780
781struct pipe_transfer * r600_compute_global_get_transfer(
782	struct pipe_context *ctx_,
783	struct pipe_resource *resource,
784	unsigned level,
785	unsigned usage,
786	const struct pipe_box *box)
787{
788	struct r600_context *ctx = (struct r600_context *)ctx_;
789	struct compute_memory_pool *pool = ctx->screen->global_pool;
790
791	compute_memory_finalize_pending(pool, ctx_);
792
793	assert(resource->target == PIPE_BUFFER);
794	struct r600_context *rctx = (struct r600_context*)ctx_;
795	struct pipe_transfer *transfer = util_slab_alloc(&rctx->pool_transfers);
796
797	transfer->resource = resource;
798	transfer->level = level;
799	transfer->usage = usage;
800	transfer->box = *box;
801	transfer->stride = 0;
802	transfer->layer_stride = 0;
803	transfer->data = NULL;
804
805	/* Note strides are zero, this is ok for buffers, but not for
806	* textures 2d & higher at least.
807	*/
808	return transfer;
809}
810
811void r600_compute_global_transfer_destroy(
812	struct pipe_context *ctx_,
813	struct pipe_transfer *transfer)
814{
815	struct r600_context *rctx = (struct r600_context*)ctx_;
816	util_slab_free(&rctx->pool_transfers, transfer);
817}
818
819void r600_compute_global_transfer_flush_region(
820	struct pipe_context *ctx_,
821	struct pipe_transfer *transfer,
822	const struct pipe_box *box)
823{
824	assert(0 && "TODO");
825}
826
827void r600_compute_global_transfer_inline_write(
828	struct pipe_context *pipe,
829	struct pipe_resource *resource,
830	unsigned level,
831	unsigned usage,
832	const struct pipe_box *box,
833	const void *data,
834	unsigned stride,
835	unsigned layer_stride)
836{
837	assert(0 && "TODO");
838}
839