1#include "r600_llvm.h"
2
3#include "gallivm/lp_bld_const.h"
4#include "gallivm/lp_bld_intr.h"
5#include "gallivm/lp_bld_gather.h"
6#include "tgsi/tgsi_parse.h"
7#include "util/u_double_list.h"
8#include "util/u_memory.h"
9
10#include "r600.h"
11#include "r600_asm.h"
12#include "r600_opcodes.h"
13#include "r600_shader.h"
14#include "radeon_llvm.h"
15#include "radeon_llvm_emit.h"
16
17#include <stdio.h>
18
19#if defined R600_USE_LLVM || defined HAVE_OPENCL
20
21static LLVMValueRef llvm_fetch_const(
22	struct lp_build_tgsi_context * bld_base,
23	const struct tgsi_full_src_register *reg,
24	enum tgsi_opcode_type type,
25	unsigned swizzle)
26{
27	LLVMValueRef idx = lp_build_const_int32(bld_base->base.gallivm,
28			radeon_llvm_reg_index_soa(reg->Register.Index, swizzle));
29	LLVMValueRef cval = build_intrinsic(bld_base->base.gallivm->builder,
30		"llvm.AMDGPU.load.const", bld_base->base.elem_type,
31		&idx, 1, LLVMReadNoneAttribute);
32
33	return bitcast(bld_base, type, cval);
34}
35
36static void llvm_load_system_value(
37		struct radeon_llvm_context * ctx,
38		unsigned index,
39		const struct tgsi_full_declaration *decl)
40{
41	unsigned chan;
42
43	switch (decl->Semantic.Name) {
44	case TGSI_SEMANTIC_INSTANCEID: chan = 3; break;
45	case TGSI_SEMANTIC_VERTEXID: chan = 0; break;
46	default: assert(!"unknown system value");
47	}
48
49	LLVMValueRef reg = lp_build_const_int32(
50			ctx->soa.bld_base.base.gallivm, chan);
51	ctx->system_values[index] = build_intrinsic(
52			ctx->soa.bld_base.base.gallivm->builder,
53			"llvm.R600.load.input",
54			ctx->soa.bld_base.base.elem_type, &reg, 1,
55			LLVMReadNoneAttribute);
56}
57
58static LLVMValueRef llvm_fetch_system_value(
59		struct lp_build_tgsi_context * bld_base,
60		const struct tgsi_full_src_register *reg,
61		enum tgsi_opcode_type type,
62		unsigned swizzle)
63{
64	struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
65	LLVMValueRef cval = ctx->system_values[reg->Register.Index];
66	return bitcast(bld_base, type, cval);
67}
68
69static void llvm_load_input(
70	struct radeon_llvm_context * ctx,
71	unsigned input_index,
72	const struct tgsi_full_declaration *decl)
73{
74	unsigned chan;
75
76	for (chan = 0; chan < 4; chan++) {
77		unsigned soa_index = radeon_llvm_reg_index_soa(input_index,
78								chan);
79
80		/* The * 4 is assuming that we are in soa mode. */
81		LLVMValueRef reg = lp_build_const_int32(
82				ctx->soa.bld_base.base.gallivm,
83				soa_index + (ctx->reserved_reg_count * 4));
84		ctx->inputs[soa_index] = build_intrinsic(
85				ctx->soa.bld_base.base.gallivm->builder,
86				"llvm.R600.load.input",
87				ctx->soa.bld_base.base.elem_type, &reg, 1,
88				LLVMReadNoneAttribute);
89	}
90}
91
92static void llvm_emit_prologue(struct lp_build_tgsi_context * bld_base)
93{
94	struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
95	struct lp_build_context * base = &bld_base->base;
96	unsigned i;
97
98	/* Reserve special input registers */
99	for (i = 0; i < ctx->reserved_reg_count; i++) {
100		unsigned chan;
101		for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
102			LLVMValueRef reg_index = lp_build_const_int32(
103					base->gallivm,
104					radeon_llvm_reg_index_soa(i, chan));
105			lp_build_intrinsic_unary(base->gallivm->builder,
106				"llvm.AMDGPU.reserve.reg",
107				LLVMVoidTypeInContext(base->gallivm->context),
108				reg_index);
109		}
110	}
111}
112
113static void llvm_emit_epilogue(struct lp_build_tgsi_context * bld_base)
114{
115	struct radeon_llvm_context * ctx = radeon_llvm_context(bld_base);
116	struct lp_build_context * base = &bld_base->base;
117	unsigned i;
118
119	/* Add the necessary export instructions */
120	for (i = 0; i < ctx->output_reg_count; i++) {
121		unsigned chan;
122		for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
123			LLVMValueRef output;
124			unsigned adjusted_reg_idx = i +
125					ctx->reserved_reg_count;
126			LLVMValueRef reg_index = lp_build_const_int32(
127				base->gallivm,
128				radeon_llvm_reg_index_soa(adjusted_reg_idx, chan));
129
130			output = LLVMBuildLoad(base->gallivm->builder,
131				ctx->soa.outputs[i][chan], "");
132
133			lp_build_intrinsic_binary(
134				base->gallivm->builder,
135				"llvm.AMDGPU.store.output",
136				LLVMVoidTypeInContext(base->gallivm->context),
137				output, reg_index);
138		}
139	}
140}
141
142static void llvm_emit_tex(
143	const struct lp_build_tgsi_action * action,
144	struct lp_build_tgsi_context * bld_base,
145	struct lp_build_emit_data * emit_data)
146{
147	struct gallivm_state * gallivm = bld_base->base.gallivm;
148	LLVMValueRef args[6];
149	unsigned c, sampler_src;
150
151	assert(emit_data->arg_count + 2 <= Elements(args));
152
153	for (c = 0; c < emit_data->arg_count; ++c)
154		args[c] = emit_data->args[c];
155
156	sampler_src = emit_data->inst->Instruction.NumSrcRegs-1;
157
158	args[c++] = lp_build_const_int32(gallivm,
159					emit_data->inst->Src[sampler_src].Register.Index);
160	args[c++] = lp_build_const_int32(gallivm,
161					emit_data->inst->Texture.Texture);
162
163	emit_data->output[0] = build_intrinsic(gallivm->builder,
164					action->intr_name,
165					emit_data->dst_type, args, c, LLVMReadNoneAttribute);
166}
167
168static void dp_fetch_args(
169	struct lp_build_tgsi_context * bld_base,
170	struct lp_build_emit_data * emit_data)
171{
172	struct lp_build_context * base = &bld_base->base;
173	unsigned chan;
174	LLVMValueRef elements[2][4];
175	unsigned opcode = emit_data->inst->Instruction.Opcode;
176	unsigned dp_components = (opcode == TGSI_OPCODE_DP2 ? 2 :
177					(opcode == TGSI_OPCODE_DP3 ? 3 : 4));
178	for (chan = 0 ; chan < dp_components; chan++) {
179		elements[0][chan] = lp_build_emit_fetch(bld_base,
180						emit_data->inst, 0, chan);
181		elements[1][chan] = lp_build_emit_fetch(bld_base,
182						emit_data->inst, 1, chan);
183	}
184
185	for ( ; chan < 4; chan++) {
186		elements[0][chan] = base->zero;
187		elements[1][chan] = base->zero;
188	}
189
190	 /* Fix up for DPH */
191	if (opcode == TGSI_OPCODE_DPH) {
192		elements[0][TGSI_CHAN_W] = base->one;
193	}
194
195	emit_data->args[0] = lp_build_gather_values(bld_base->base.gallivm,
196							elements[0], 4);
197	emit_data->args[1] = lp_build_gather_values(bld_base->base.gallivm,
198							elements[1], 4);
199	emit_data->arg_count = 2;
200
201	emit_data->dst_type = base->elem_type;
202}
203
204static struct lp_build_tgsi_action dot_action = {
205	.fetch_args = dp_fetch_args,
206	.emit = build_tgsi_intrinsic_nomem,
207	.intr_name = "llvm.AMDGPU.dp4"
208};
209
210
211
212LLVMModuleRef r600_tgsi_llvm(
213	struct radeon_llvm_context * ctx,
214	const struct tgsi_token * tokens)
215{
216	struct tgsi_shader_info shader_info;
217	struct lp_build_tgsi_context * bld_base = &ctx->soa.bld_base;
218	radeon_llvm_context_init(ctx);
219	tgsi_scan_shader(tokens, &shader_info);
220
221	bld_base->info = &shader_info;
222	bld_base->userdata = ctx;
223	bld_base->emit_fetch_funcs[TGSI_FILE_CONSTANT] = llvm_fetch_const;
224	bld_base->emit_fetch_funcs[TGSI_FILE_SYSTEM_VALUE] = llvm_fetch_system_value;
225	bld_base->emit_prologue = llvm_emit_prologue;
226	bld_base->emit_epilogue = llvm_emit_epilogue;
227	ctx->userdata = ctx;
228	ctx->load_input = llvm_load_input;
229	ctx->load_system_value = llvm_load_system_value;
230
231	bld_base->op_actions[TGSI_OPCODE_DP2] = dot_action;
232	bld_base->op_actions[TGSI_OPCODE_DP3] = dot_action;
233	bld_base->op_actions[TGSI_OPCODE_DP4] = dot_action;
234	bld_base->op_actions[TGSI_OPCODE_DPH] = dot_action;
235	bld_base->op_actions[TGSI_OPCODE_DDX].emit = llvm_emit_tex;
236	bld_base->op_actions[TGSI_OPCODE_DDY].emit = llvm_emit_tex;
237	bld_base->op_actions[TGSI_OPCODE_TEX].emit = llvm_emit_tex;
238	bld_base->op_actions[TGSI_OPCODE_TXB].emit = llvm_emit_tex;
239	bld_base->op_actions[TGSI_OPCODE_TXD].emit = llvm_emit_tex;
240	bld_base->op_actions[TGSI_OPCODE_TXL].emit = llvm_emit_tex;
241	bld_base->op_actions[TGSI_OPCODE_TXF].emit = llvm_emit_tex;
242	bld_base->op_actions[TGSI_OPCODE_TXQ].emit = llvm_emit_tex;
243	bld_base->op_actions[TGSI_OPCODE_TXP].emit = llvm_emit_tex;
244
245	lp_build_tgsi_llvm(bld_base, tokens);
246
247	radeon_llvm_finalize_module(ctx);
248
249	return ctx->gallivm.module;
250}
251
252const char * r600_llvm_gpu_string(enum radeon_family family)
253{
254	const char * gpu_family;
255
256	switch (family) {
257	case CHIP_R600:
258	case CHIP_RV610:
259	case CHIP_RV630:
260	case CHIP_RV620:
261	case CHIP_RV635:
262	case CHIP_RS780:
263	case CHIP_RS880:
264	case CHIP_RV710:
265		gpu_family = "rv710";
266		break;
267	case CHIP_RV730:
268		gpu_family = "rv730";
269		break;
270	case CHIP_RV670:
271	case CHIP_RV740:
272	case CHIP_RV770:
273		gpu_family = "rv770";
274		break;
275	case CHIP_PALM:
276	case CHIP_CEDAR:
277		gpu_family = "cedar";
278		break;
279	case CHIP_SUMO:
280	case CHIP_SUMO2:
281	case CHIP_REDWOOD:
282		gpu_family = "redwood";
283		break;
284	case CHIP_JUNIPER:
285		gpu_family = "juniper";
286		break;
287	case CHIP_HEMLOCK:
288	case CHIP_CYPRESS:
289		gpu_family = "cypress";
290		break;
291	case CHIP_BARTS:
292		gpu_family = "barts";
293		break;
294	case CHIP_TURKS:
295		gpu_family = "turks";
296		break;
297	case CHIP_CAICOS:
298		gpu_family = "caicos";
299		break;
300	case CHIP_CAYMAN:
301        case CHIP_ARUBA:
302		gpu_family = "cayman";
303		break;
304	default:
305		gpu_family = "";
306		fprintf(stderr, "Chip not supported by r600 llvm "
307			"backend, please file a bug at bugs.freedesktop.org\n");
308		break;
309	}
310	return gpu_family;
311}
312
313unsigned r600_llvm_compile(
314	LLVMModuleRef mod,
315	unsigned char ** inst_bytes,
316	unsigned * inst_byte_count,
317	enum radeon_family family,
318	unsigned dump)
319{
320	const char * gpu_family = r600_llvm_gpu_string(family);
321	return radeon_llvm_compile(mod, inst_bytes, inst_byte_count,
322							gpu_family, dump);
323}
324
325#endif
326