evergreen_compute_internal.c revision eb065f5d9d1159af3a88a64a7606c9b6d67dc3e3
1/*
2 * Permission is hereby granted, free of charge, to any person obtaining a
3 * copy of this software and associated documentation files (the "Software"),
4 * to deal in the Software without restriction, including without limitation
5 * on the rights to use, copy, modify, merge, publish, distribute, sub
6 * license, and/or sell copies of the Software, and to permit persons to whom
7 * the Software is furnished to do so, subject to the following conditions:
8 *
9 * The above copyright notice and this permission notice (including the next
10 * paragraph) shall be included in all copies or substantial portions of the
11 * Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * Authors:
22 *      Adam Rak <adam.rak@streamnovation.com>
23 */
24
25#include <stdlib.h>
26#include <stdio.h>
27
28#include "pipe/p_defines.h"
29#include "pipe/p_state.h"
30#include "pipe/p_context.h"
31#include "util/u_blitter.h"
32#include "util/u_double_list.h"
33#include "util/u_transfer.h"
34#include "util/u_surface.h"
35#include "util/u_pack_color.h"
36#include "util/u_memory.h"
37#include "util/u_inlines.h"
38#include "util/u_framebuffer.h"
39#include "r600.h"
40#include "r600_resource.h"
41#include "r600_shader.h"
42#include "r600_pipe.h"
43#include "r600_formats.h"
44#include "evergreend.h"
45#include "evergreen_compute_internal.h"
46#include "r600_hw_context_priv.h"
47
48int get_compute_resource_num(void)
49{
50	int num = 0;
51#define DECL_COMPUTE_RESOURCE(name, n) num += n;
52#include "compute_resource.def"
53#undef DECL_COMPUTE_RESOURCE
54	return num;
55}
56
57void evergreen_emit_raw_value(
58	struct evergreen_compute_resource* res,
59	unsigned value)
60{
61	res->cs[res->cs_end++] = value;
62}
63
64void evergreen_emit_ctx_value(struct r600_context *ctx, unsigned value)
65{
66	ctx->cs->buf[ctx->cs->cdw++] = value;
67}
68
69void evergreen_mult_reg_set_(
70	struct evergreen_compute_resource* res,
71	int index,
72	u32* array,
73	int size)
74{
75	int i = 0;
76
77	evergreen_emit_raw_reg_set(res, index, size / 4);
78
79	for (i = 0; i < size; i+=4) {
80		res->cs[res->cs_end++] = array[i / 4];
81	}
82}
83
84void evergreen_reg_set(
85	struct evergreen_compute_resource* res,
86	unsigned index,
87	unsigned value)
88{
89	evergreen_emit_raw_reg_set(res, index, 1);
90	res->cs[res->cs_end++] = value;
91}
92
93struct evergreen_compute_resource* get_empty_res(
94	struct r600_pipe_compute* pipe,
95	enum evergreen_compute_resources res_code,
96	int offset_index)
97{
98	int code_index = -1;
99	int code_size = -1;
100
101	{
102		int i = 0;
103		#define DECL_COMPUTE_RESOURCE(name, n) if (COMPUTE_RESOURCE_ ## name	== res_code) {code_index = i; code_size = n;} i += n;
104		#include "compute_resource.def"
105		#undef DECL_COMPUTE_RESOURCE
106	}
107
108	assert(code_index != -1 && "internal error: resouce index not found");
109	assert(offset_index < code_size && "internal error: overindexing resource");
110
111	int index = code_index + offset_index;
112
113	struct evergreen_compute_resource* res = &pipe->resources[index];
114
115	res->enabled = true;
116	res->bo = NULL;
117	res->cs_end = 0;
118	bzero(&res->do_reloc, sizeof(res->do_reloc));
119
120	return res;
121}
122
123void evergreen_emit_raw_reg_set(
124	struct evergreen_compute_resource* res,
125	unsigned index,
126	int num)
127{
128	res->enabled = 1;
129	int cs_end = res->cs_end;
130
131	if (index >= EVERGREEN_CONFIG_REG_OFFSET
132			&& index < EVERGREEN_CONFIG_REG_END) {
133		res->cs[cs_end] = PKT3C(PKT3_SET_CONFIG_REG, num, 0);
134		res->cs[cs_end+1] = (index - EVERGREEN_CONFIG_REG_OFFSET) >> 2;
135	} else if (index >= EVERGREEN_CONTEXT_REG_OFFSET
136			&& index < EVERGREEN_CONTEXT_REG_END) {
137		res->cs[cs_end] = PKT3C(PKT3_SET_CONTEXT_REG, num, 0);
138		res->cs[cs_end+1] = (index - EVERGREEN_CONTEXT_REG_OFFSET) >> 2;
139	} else if (index >= EVERGREEN_RESOURCE_OFFSET
140			&& index < EVERGREEN_RESOURCE_END) {
141		res->cs[cs_end] = PKT3C(PKT3_SET_RESOURCE, num, 0);
142		res->cs[cs_end+1] = (index - EVERGREEN_RESOURCE_OFFSET) >> 2;
143	} else if (index >= EVERGREEN_SAMPLER_OFFSET
144			&& index < EVERGREEN_SAMPLER_END) {
145		res->cs[cs_end] = PKT3C(PKT3_SET_SAMPLER, num, 0);
146		res->cs[cs_end+1] = (index - EVERGREEN_SAMPLER_OFFSET) >> 2;
147	} else if (index >= EVERGREEN_CTL_CONST_OFFSET
148			&& index < EVERGREEN_CTL_CONST_END) {
149		res->cs[cs_end] = PKT3C(PKT3_SET_CTL_CONST, num, 0);
150		res->cs[cs_end+1] = (index - EVERGREEN_CTL_CONST_OFFSET) >> 2;
151	} else if (index >= EVERGREEN_LOOP_CONST_OFFSET
152			&& index < EVERGREEN_LOOP_CONST_END) {
153		res->cs[cs_end] = PKT3C(PKT3_SET_LOOP_CONST, num, 0);
154		res->cs[cs_end+1] = (index - EVERGREEN_LOOP_CONST_OFFSET) >> 2;
155	} else if (index >= EVERGREEN_BOOL_CONST_OFFSET
156			&& index < EVERGREEN_BOOL_CONST_END) {
157		res->cs[cs_end] = PKT3C(PKT3_SET_BOOL_CONST, num, 0);
158		res->cs[cs_end+1] = (index - EVERGREEN_BOOL_CONST_OFFSET) >> 2;
159	} else {
160		res->cs[cs_end] = PKT0(index, num-1);
161		res->cs_end--;
162	}
163
164	res->cs_end += 2;
165}
166
167void evergreen_emit_force_reloc(struct evergreen_compute_resource* res)
168{
169	res->do_reloc[res->cs_end] += 1;
170}
171
172void evergreen_emit_ctx_reg_set(
173	struct r600_context *ctx,
174	unsigned index,
175	int num)
176{
177
178	if (index >= EVERGREEN_CONFIG_REG_OFFSET
179			&& index < EVERGREEN_CONFIG_REG_END) {
180		ctx->cs->buf[ctx->cs->cdw++] = PKT3C(PKT3_SET_CONFIG_REG, num, 0);
181		ctx->cs->buf[ctx->cs->cdw++] = (index - EVERGREEN_CONFIG_REG_OFFSET) >> 2;
182	} else if (index >= EVERGREEN_CONTEXT_REG_OFFSET
183			&& index < EVERGREEN_CONTEXT_REG_END) {
184		ctx->cs->buf[ctx->cs->cdw++] = PKT3C(PKT3_SET_CONTEXT_REG, num, 0);
185		ctx->cs->buf[ctx->cs->cdw++] = (index - EVERGREEN_CONTEXT_REG_OFFSET) >> 2;
186	} else if (index >= EVERGREEN_RESOURCE_OFFSET
187			&& index < EVERGREEN_RESOURCE_END) {
188		ctx->cs->buf[ctx->cs->cdw++] = PKT3C(PKT3_SET_RESOURCE, num, 0);
189		ctx->cs->buf[ctx->cs->cdw++] = (index - EVERGREEN_RESOURCE_OFFSET) >> 2;
190	} else if (index >= EVERGREEN_SAMPLER_OFFSET
191			&& index < EVERGREEN_SAMPLER_END) {
192		ctx->cs->buf[ctx->cs->cdw++] = PKT3C(PKT3_SET_SAMPLER, num, 0);
193		ctx->cs->buf[ctx->cs->cdw++] = (index - EVERGREEN_SAMPLER_OFFSET) >> 2;
194	} else if (index >= EVERGREEN_CTL_CONST_OFFSET
195			&& index < EVERGREEN_CTL_CONST_END) {
196		ctx->cs->buf[ctx->cs->cdw++] = PKT3C(PKT3_SET_CTL_CONST, num, 0);
197		ctx->cs->buf[ctx->cs->cdw++] = (index - EVERGREEN_CTL_CONST_OFFSET) >> 2;
198	} else if (index >= EVERGREEN_LOOP_CONST_OFFSET
199			&& index < EVERGREEN_LOOP_CONST_END) {
200		ctx->cs->buf[ctx->cs->cdw++] = PKT3C(PKT3_SET_LOOP_CONST, num, 0);
201		ctx->cs->buf[ctx->cs->cdw++] = (index - EVERGREEN_LOOP_CONST_OFFSET) >> 2;
202	} else if (index >= EVERGREEN_BOOL_CONST_OFFSET
203			&& index < EVERGREEN_BOOL_CONST_END) {
204		ctx->cs->buf[ctx->cs->cdw++] = PKT3C(PKT3_SET_BOOL_CONST, num, 0);
205		ctx->cs->buf[ctx->cs->cdw++] = (index - EVERGREEN_BOOL_CONST_OFFSET) >> 2;
206	} else {
207		ctx->cs->buf[ctx->cs->cdw++] = PKT0(index, num-1);
208	}
209}
210
211void evergreen_emit_ctx_reloc(
212	struct r600_context *ctx,
213	struct r600_resource *bo,
214	enum radeon_bo_usage usage)
215{
216	assert(bo);
217
218	ctx->cs->buf[ctx->cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
219	u32 rr = r600_context_bo_reloc(ctx, bo, usage);
220	ctx->cs->buf[ctx->cs->cdw++] = rr;
221}
222
223int evergreen_compute_get_gpu_format(
224	struct number_type_and_format* fmt,
225	struct r600_resource *bo)
226{
227	switch (bo->b.b.format)
228	{
229		case PIPE_FORMAT_R8_UNORM:
230		case PIPE_FORMAT_R32_UNORM:
231			fmt->format = V_028C70_COLOR_32;
232			fmt->number_type = V_028C70_NUMBER_UNORM;
233			fmt->num_format_all = 0;
234		break;
235		case PIPE_FORMAT_R32_FLOAT:
236			fmt->format = V_028C70_COLOR_32_FLOAT;
237			fmt->number_type = V_028C70_NUMBER_FLOAT;
238			fmt->num_format_all = 0;
239		break;
240		case PIPE_FORMAT_R32G32B32A32_FLOAT:
241			fmt->format = V_028C70_COLOR_32_32_32_32_FLOAT;
242			fmt->number_type = V_028C70_NUMBER_FLOAT;
243			fmt->num_format_all = 0;
244		break;
245
246		///TODO: other formats...
247
248		default:
249			return 0;
250	}
251
252	return 1;
253}
254
255void evergreen_set_rat(
256	struct r600_pipe_compute *pipe,
257	int id,
258	struct r600_resource* bo,
259	int start,
260	int size)
261{
262	assert(id < 12);
263	assert((size & 3) == 0);
264	assert((start & 0xFF) == 0);
265
266	int offset;
267	COMPUTE_DBG("bind rat: %i \n", id);
268
269	if (id < 8) {
270		offset = id*0x3c;
271	}
272	else {
273		offset = 8*0x3c + (id-8)*0x1c;
274	}
275
276	int linear = 0;
277
278	if (bo->b.b.height0 <= 1 && bo->b.b.depth0 <= 1
279			&& bo->b.b.target == PIPE_BUFFER) {
280		linear = 1;
281	}
282
283	struct evergreen_compute_resource* res =
284		get_empty_res(pipe, COMPUTE_RESOURCE_RAT, id);
285
286	evergreen_emit_force_reloc(res);
287
288	evergreen_reg_set(res, R_028C64_CB_COLOR0_PITCH, 0); ///TODO: for 2D?
289	evergreen_reg_set(res, R_028C68_CB_COLOR0_SLICE, 0);
290
291	struct number_type_and_format fmt;
292
293	///default config
294	if (bo->b.b.format == PIPE_FORMAT_NONE) {
295		 fmt.format = V_028C70_COLOR_32;
296		 fmt.number_type = V_028C70_NUMBER_FLOAT;
297	} else {
298		evergreen_compute_get_gpu_format(&fmt, bo);
299	}
300
301	evergreen_reg_set(res,
302		R_028C70_CB_COLOR0_INFO, S_028C70_RAT(1)
303		| S_028C70_ARRAY_MODE(V_028C70_ARRAY_LINEAR_ALIGNED)
304		| S_028C70_FORMAT(fmt.format)
305		| S_028C70_NUMBER_TYPE(fmt.number_type)
306	);
307	evergreen_emit_force_reloc(res);
308
309	evergreen_reg_set(res, R_028C74_CB_COLOR0_ATTRIB, S_028C74_NON_DISP_TILING_ORDER(1));
310	evergreen_emit_force_reloc(res);
311
312	if (linear) {
313		/* XXX: Why are we using size instead of bo->b.b.b.width0 ? */
314		evergreen_reg_set(res, R_028C78_CB_COLOR0_DIM, size);
315	} else {
316		evergreen_reg_set(res, R_028C78_CB_COLOR0_DIM,
317			S_028C78_WIDTH_MAX(bo->b.b.width0)
318			| S_028C78_HEIGHT_MAX(bo->b.b.height0));
319	}
320
321	if (id < 8) {
322		evergreen_reg_set(res, R_028C7C_CB_COLOR0_CMASK, 0);
323		evergreen_emit_force_reloc(res);
324		evergreen_reg_set(res, R_028C84_CB_COLOR0_FMASK, 0);
325		evergreen_emit_force_reloc(res);
326	}
327
328	evergreen_reg_set(res, R_028C60_CB_COLOR0_BASE + offset, start >> 8);
329
330	res->bo = bo;
331	res->usage = RADEON_USAGE_READWRITE;
332	res->coher_bo_size = size;
333
334	/* XXX We are setting nr_cbufs to 1 so we can get the correct
335         * cb flush flags to be emitted with the SURFACE_SYNC packet.
336         * In the future we should be adding the pipe_surface for this RAT
337         * to pipe->ctx->framebuffer.cbufs.
338         */
339	pipe->ctx->framebuffer.nr_cbufs = 1;
340}
341
342void evergreen_set_lds(
343	struct r600_pipe_compute *pipe,
344	int num_lds,
345	int size,
346	int num_waves)
347{
348	struct evergreen_compute_resource* res =
349		get_empty_res(pipe, COMPUTE_RESOURCE_LDS, 0);
350
351	if (pipe->ctx->chip_class < CAYMAN) {
352		evergreen_reg_set(res, R_008E2C_SQ_LDS_RESOURCE_MGMT,
353			S_008E2C_NUM_LS_LDS(num_lds));
354	} else {
355		evergreen_reg_set(res, CM_R_0286FC_SPI_LDS_MGMT,
356					S_0286FC_NUM_LS_LDS(num_lds));
357	}
358	evergreen_reg_set(res, CM_R_0288E8_SQ_LDS_ALLOC, size | num_waves << 14);
359}
360
361void evergreen_set_gds(
362	struct r600_pipe_compute *pipe,
363	uint32_t addr,
364	uint32_t size)
365{
366	struct evergreen_compute_resource* res =
367		get_empty_res(pipe, COMPUTE_RESOURCE_GDS, 0);
368
369	evergreen_reg_set(res, R_028728_GDS_ORDERED_WAVE_PER_SE, 1);
370	evergreen_reg_set(res, R_028720_GDS_ADDR_BASE, addr);
371	evergreen_reg_set(res, R_028724_GDS_ADDR_SIZE, size);
372}
373
374void evergreen_set_export(
375	struct r600_pipe_compute *pipe,
376	struct r600_resource* bo,
377	int offset, int size)
378{
379	#define SX_MEMORY_EXPORT_BASE 0x9010
380	#define SX_MEMORY_EXPORT_SIZE 0x9014
381
382	struct evergreen_compute_resource* res =
383		get_empty_res(pipe, COMPUTE_RESOURCE_EXPORT, 0);
384
385	evergreen_reg_set(res, SX_MEMORY_EXPORT_SIZE, size);
386
387	if (size) {
388		evergreen_reg_set(res, SX_MEMORY_EXPORT_BASE, offset);
389		res->bo = bo;
390		res->usage = RADEON_USAGE_WRITE;
391		res->coher_bo_size = size;
392		res->flags = 0;
393	}
394}
395
396void evergreen_set_loop_const(
397	struct r600_pipe_compute *pipe,
398	int id, int count, int init, int inc) {
399
400	struct evergreen_compute_resource* res =
401		get_empty_res(pipe, COMPUTE_RESOURCE_LOOP, id);
402
403	assert(id < 32);
404	assert(count <= 0xFFF);
405	assert(init <= 0xFF);
406	assert(inc <= 0xFF);
407
408	/* Compute shaders use LOOP_CONST registers SQ_LOOP_CONST_160 to
409         * SQ_LOOP_CONST_191 */
410	evergreen_reg_set(res, R_03A200_SQ_LOOP_CONST_0 + (160 * 4) + (id * 4),
411		count | init << 12 | inc << 24);
412}
413
414void evergreen_set_tmp_ring(
415	struct r600_pipe_compute *pipe,
416	struct r600_resource* bo,
417	int offset, int size, int se)
418{
419	#define SQ_LSTMP_RING_BASE 0x00008e10
420	#define SQ_LSTMP_RING_SIZE 0x00008e14
421	#define GRBM_GFX_INDEX                                  0x802C
422	#define         INSTANCE_INDEX(x)                       ((x) << 0)
423	#define         SE_INDEX(x)                             ((x) << 16)
424	#define         INSTANCE_BROADCAST_WRITES               (1 << 30)
425	#define         SE_BROADCAST_WRITES                     (1 << 31)
426
427	struct evergreen_compute_resource* res =
428		get_empty_res(pipe, COMPUTE_RESOURCE_TMPRING, se);
429
430	evergreen_reg_set(res,
431		GRBM_GFX_INDEX,INSTANCE_INDEX(0)
432		| SE_INDEX(se)
433		| INSTANCE_BROADCAST_WRITES);
434	evergreen_reg_set(res, SQ_LSTMP_RING_SIZE, size);
435
436	if (size) {
437		assert(bo);
438
439		evergreen_reg_set(res, SQ_LSTMP_RING_BASE, offset);
440		res->bo = bo;
441		res->usage = RADEON_USAGE_WRITE;
442		res->coher_bo_size = 0;
443		res->flags = 0;
444	}
445
446	if (size) {
447		evergreen_emit_force_reloc(res);
448	}
449
450	evergreen_reg_set(res,
451		GRBM_GFX_INDEX,INSTANCE_INDEX(0)
452		| SE_INDEX(0)
453		| INSTANCE_BROADCAST_WRITES
454		| SE_BROADCAST_WRITES);
455}
456
457static uint32_t r600_colorformat_endian_swap(uint32_t colorformat)
458{
459	if (R600_BIG_ENDIAN) {
460		switch(colorformat) {
461		case V_028C70_COLOR_4_4:
462			return ENDIAN_NONE;
463
464		/* 8-bit buffers. */
465		case V_028C70_COLOR_8:
466			return ENDIAN_NONE;
467
468		/* 16-bit buffers. */
469		case V_028C70_COLOR_5_6_5:
470		case V_028C70_COLOR_1_5_5_5:
471		case V_028C70_COLOR_4_4_4_4:
472		case V_028C70_COLOR_16:
473		case V_028C70_COLOR_8_8:
474			return ENDIAN_8IN16;
475
476		/* 32-bit buffers. */
477		case V_028C70_COLOR_8_8_8_8:
478		case V_028C70_COLOR_2_10_10_10:
479		case V_028C70_COLOR_8_24:
480		case V_028C70_COLOR_24_8:
481		case V_028C70_COLOR_32_FLOAT:
482		case V_028C70_COLOR_16_16_FLOAT:
483		case V_028C70_COLOR_16_16:
484			return ENDIAN_8IN32;
485
486		/* 64-bit buffers. */
487		case V_028C70_COLOR_16_16_16_16:
488		case V_028C70_COLOR_16_16_16_16_FLOAT:
489			return ENDIAN_8IN16;
490
491		case V_028C70_COLOR_32_32_FLOAT:
492		case V_028C70_COLOR_32_32:
493		case V_028C70_COLOR_X24_8_32_FLOAT:
494			return ENDIAN_8IN32;
495
496		/* 96-bit buffers. */
497		case V_028C70_COLOR_32_32_32_FLOAT:
498		/* 128-bit buffers. */
499		case V_028C70_COLOR_32_32_32_32_FLOAT:
500		case V_028C70_COLOR_32_32_32_32:
501			return ENDIAN_8IN32;
502		default:
503			return ENDIAN_NONE; /* Unsupported. */
504		}
505	} else {
506		return ENDIAN_NONE;
507	}
508}
509
510static unsigned r600_tex_dim(unsigned dim)
511{
512	switch (dim) {
513	default:
514	case PIPE_TEXTURE_1D:
515		return V_030000_SQ_TEX_DIM_1D;
516	case PIPE_TEXTURE_1D_ARRAY:
517		return V_030000_SQ_TEX_DIM_1D_ARRAY;
518	case PIPE_TEXTURE_2D:
519	case PIPE_TEXTURE_RECT:
520		return V_030000_SQ_TEX_DIM_2D;
521	case PIPE_TEXTURE_2D_ARRAY:
522		return V_030000_SQ_TEX_DIM_2D_ARRAY;
523	case PIPE_TEXTURE_3D:
524		return V_030000_SQ_TEX_DIM_3D;
525	case PIPE_TEXTURE_CUBE:
526		return V_030000_SQ_TEX_DIM_CUBEMAP;
527	}
528}
529
530void evergreen_set_vtx_resource(
531	struct r600_pipe_compute *pipe,
532	struct r600_resource* bo,
533	int id, uint64_t offset, int writable)
534{
535	assert(id < 16);
536	uint32_t sq_vtx_constant_word2, sq_vtx_constant_word3, sq_vtx_constant_word4;
537	struct number_type_and_format fmt;
538	uint64_t va;
539
540	fmt.format = 0;
541
542	assert(bo->b.b.height0 <= 1);
543	assert(bo->b.b.depth0 <= 1);
544
545	int e = evergreen_compute_get_gpu_format(&fmt, bo);
546
547	assert(e && "unknown format");
548
549	struct evergreen_compute_resource* res =
550		get_empty_res(pipe, COMPUTE_RESOURCE_VERT, id);
551
552	unsigned size = bo->b.b.width0;
553	unsigned stride = 1;
554
555//	size = (size * util_format_get_blockwidth(bo->b.b.b.format) *
556//		util_format_get_blocksize(bo->b.b.b.format));
557
558	va = r600_resource_va(&pipe->ctx->screen->screen, &bo->b.b) + offset;
559
560	COMPUTE_DBG("id: %i vtx size: %i byte,	width0: %i elem\n",
561		id, size, bo->b.b.width0);
562
563	sq_vtx_constant_word2 =
564		S_030008_BASE_ADDRESS_HI(va >> 32) |
565		S_030008_STRIDE(stride) |
566		S_030008_DATA_FORMAT(fmt.format) |
567		S_030008_NUM_FORMAT_ALL(fmt.num_format_all) |
568		S_030008_ENDIAN_SWAP(0);
569
570	COMPUTE_DBG("%08X %i %i %i %i\n", sq_vtx_constant_word2, offset,
571			stride, fmt.format, fmt.num_format_all);
572
573	sq_vtx_constant_word3 =
574		S_03000C_DST_SEL_X(0) |
575		S_03000C_DST_SEL_Y(1) |
576		S_03000C_DST_SEL_Z(2) |
577		S_03000C_DST_SEL_W(3);
578
579	sq_vtx_constant_word4 = 0;
580
581	evergreen_emit_raw_value(res, PKT3C(PKT3_SET_RESOURCE, 8, 0));
582	evergreen_emit_raw_value(res, (id+816)*32 >> 2);
583	evergreen_emit_raw_value(res, (unsigned)((va) & 0xffffffff));
584	evergreen_emit_raw_value(res, size - 1);
585	evergreen_emit_raw_value(res, sq_vtx_constant_word2);
586	evergreen_emit_raw_value(res, sq_vtx_constant_word3);
587	evergreen_emit_raw_value(res, sq_vtx_constant_word4);
588	evergreen_emit_raw_value(res, 0);
589	evergreen_emit_raw_value(res, 0);
590	evergreen_emit_raw_value(res, S_03001C_TYPE(V_03001C_SQ_TEX_VTX_VALID_BUFFER));
591
592	res->bo = bo;
593
594	if (writable) {
595		res->usage = RADEON_USAGE_READWRITE;
596	}
597	else {
598		res->usage = RADEON_USAGE_READ;
599	}
600
601	res->coher_bo_size = size;
602
603	r600_inval_vertex_cache(pipe->ctx);
604	/* XXX: Do we really need to invalidate the texture cache here?
605	 * r600_inval_vertex_cache() will invalidate the texture cache
606	 * if the chip does not have a vertex cache.
607	 */
608	r600_inval_texture_cache(pipe->ctx);
609}
610
611void evergreen_set_tex_resource(
612	struct r600_pipe_compute *pipe,
613	struct r600_pipe_sampler_view* view,
614	int id)
615{
616	struct evergreen_compute_resource* res =
617		get_empty_res(pipe, COMPUTE_RESOURCE_TEX, id);
618	struct r600_resource_texture *tmp =
619		(struct r600_resource_texture*)view->base.texture;
620
621	unsigned format, endian;
622	uint32_t word4 = 0, yuv_format = 0, pitch = 0;
623	unsigned char swizzle[4], array_mode = 0, tile_type = 0;
624	unsigned height, depth;
625
626	swizzle[0] = 0;
627	swizzle[1] = 1;
628	swizzle[2] = 2;
629	swizzle[3] = 3;
630
631	format = r600_translate_texformat((struct pipe_screen *)pipe->ctx->screen,
632		view->base.format, swizzle, &word4, &yuv_format);
633
634	if (format == ~0) {
635		format = 0;
636	}
637
638	endian = r600_colorformat_endian_swap(format);
639
640	height = view->base.texture->height0;
641	depth = view->base.texture->depth0;
642
643	pitch = align(tmp->pitch_in_blocks[0] *
644		util_format_get_blockwidth(tmp->real_format), 8);
645	array_mode = tmp->array_mode[0];
646	tile_type = tmp->tile_type;
647
648	assert(view->base.texture->target != PIPE_TEXTURE_1D_ARRAY);
649	assert(view->base.texture->target != PIPE_TEXTURE_2D_ARRAY);
650
651	evergreen_emit_raw_value(res, PKT3C(PKT3_SET_RESOURCE, 8, 0));
652	evergreen_emit_raw_value(res, (id+816)*32 >> 2); ///TODO: check this line
653	evergreen_emit_raw_value(res,
654				(S_030000_DIM(r600_tex_dim(view->base.texture->target)) |
655				S_030000_PITCH((pitch / 8) - 1) |
656				S_030000_NON_DISP_TILING_ORDER(tile_type) |
657				S_030000_TEX_WIDTH(view->base.texture->width0 - 1)));
658	evergreen_emit_raw_value(res, (S_030004_TEX_HEIGHT(height - 1) |
659				S_030004_TEX_DEPTH(depth - 1) |
660				S_030004_ARRAY_MODE(array_mode)));
661	evergreen_emit_raw_value(res, tmp->offset[0] >> 8);
662	evergreen_emit_raw_value(res, tmp->offset[0] >> 8);
663	evergreen_emit_raw_value(res, (word4 |
664				S_030010_SRF_MODE_ALL(V_030010_SRF_MODE_ZERO_CLAMP_MINUS_ONE) |
665				S_030010_ENDIAN_SWAP(endian) |
666				S_030010_BASE_LEVEL(0)));
667	evergreen_emit_raw_value(res, (S_030014_LAST_LEVEL(0) |
668				S_030014_BASE_ARRAY(0) |
669				S_030014_LAST_ARRAY(0)));
670	evergreen_emit_raw_value(res, (S_030018_MAX_ANISO(4 /* max 16 samples */)));
671	evergreen_emit_raw_value(res,
672		S_03001C_TYPE(V_03001C_SQ_TEX_VTX_VALID_TEXTURE)
673		| S_03001C_DATA_FORMAT(format));
674
675	res->bo = (struct r600_resource*)view->base.texture;
676
677	res->usage = RADEON_USAGE_READ;
678
679	res->coher_bo_size = tmp->offset[0] + util_format_get_blockwidth(tmp->real_format)*view->base.texture->width0*height*depth;
680
681	r600_inval_texture_cache(pipe->ctx);
682
683	evergreen_emit_force_reloc(res);
684	evergreen_emit_force_reloc(res);
685}
686
687void evergreen_set_sampler_resource(
688	struct r600_pipe_compute *pipe,
689	struct compute_sampler_state *sampler,
690	int id)
691{
692	struct evergreen_compute_resource* res =
693		get_empty_res(pipe, COMPUTE_RESOURCE_SAMPLER, id);
694
695	unsigned aniso_flag_offset = sampler->state.max_anisotropy > 1 ? 2 : 0;
696
697	evergreen_emit_raw_value(res, PKT3C(PKT3_SET_SAMPLER, 3, 0));
698	evergreen_emit_raw_value(res, (id + 90)*3);
699	evergreen_emit_raw_value(res,
700		S_03C000_CLAMP_X(r600_tex_wrap(sampler->state.wrap_s)) |
701		S_03C000_CLAMP_Y(r600_tex_wrap(sampler->state.wrap_t)) |
702		S_03C000_CLAMP_Z(r600_tex_wrap(sampler->state.wrap_r)) |
703		S_03C000_XY_MAG_FILTER(r600_tex_filter(sampler->state.mag_img_filter) | aniso_flag_offset) |
704		S_03C000_XY_MIN_FILTER(r600_tex_filter(sampler->state.min_img_filter) | aniso_flag_offset) |
705		S_03C000_BORDER_COLOR_TYPE(V_03C000_SQ_TEX_BORDER_COLOR_OPAQUE_BLACK)
706	);
707	evergreen_emit_raw_value(res,
708		S_03C004_MIN_LOD(S_FIXED(CLAMP(sampler->state.min_lod, 0, 15), 8)) |
709		S_03C004_MAX_LOD(S_FIXED(CLAMP(sampler->state.max_lod, 0, 15), 8))
710	);
711	evergreen_emit_raw_value(res,
712		S_03C008_LOD_BIAS(S_FIXED(CLAMP(sampler->state.lod_bias, -16, 16), 8)) |
713		(sampler->state.seamless_cube_map ? 0 : S_03C008_DISABLE_CUBE_WRAP(1)) |
714		S_03C008_TYPE(1)
715	);
716}
717
718void evergreen_set_const_cache(
719	struct r600_pipe_compute *pipe,
720	int cache_id,
721	struct r600_resource* cbo,
722	int size, int offset)
723{
724	#define SQ_ALU_CONST_BUFFER_SIZE_LS_0 0x00028fc0
725	#define SQ_ALU_CONST_CACHE_LS_0 0x00028f40
726
727	struct evergreen_compute_resource* res =
728		get_empty_res(pipe, COMPUTE_RESOURCE_CONST_MEM, cache_id);
729
730	assert(size < 0x200);
731	assert((offset & 0xFF) == 0);
732	assert(cache_id < 16);
733
734	evergreen_reg_set(res, SQ_ALU_CONST_BUFFER_SIZE_LS_0 + cache_id*4, size);
735	evergreen_reg_set(res, SQ_ALU_CONST_CACHE_LS_0 + cache_id*4, offset >> 8);
736	res->bo = cbo;
737	res->usage = RADEON_USAGE_READ;
738	res->coher_bo_size = size;
739
740	r600_inval_shader_cache(pipe->ctx);
741}
742
743struct r600_resource* r600_compute_buffer_alloc_vram(
744	struct r600_screen *screen,
745	unsigned size)
746{
747	assert(size);
748
749	struct pipe_resource * buffer = pipe_buffer_create(
750			(struct pipe_screen*) screen,
751			PIPE_BIND_CUSTOM,
752			PIPE_USAGE_IMMUTABLE,
753			size);
754
755	return (struct r600_resource *)buffer;
756}
757