vc4_qir.h revision ace0d810e56a1e2978fc3ac237158918ebe2a23c
1/* 2 * Copyright © 2014 Broadcom 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24#ifndef VC4_QIR_H 25#define VC4_QIR_H 26 27#include <assert.h> 28#include <stdio.h> 29#include <stdlib.h> 30#include <stdbool.h> 31#include <stdint.h> 32#include <string.h> 33 34#include "util/macros.h" 35#include "compiler/nir/nir.h" 36#include "util/list.h" 37#include "util/u_math.h" 38 39#include "vc4_screen.h" 40#include "vc4_qpu_defines.h" 41#include "vc4_qpu.h" 42#include "kernel/vc4_packet.h" 43#include "pipe/p_state.h" 44 45struct nir_builder; 46 47enum qfile { 48 QFILE_NULL, 49 QFILE_TEMP, 50 QFILE_VARY, 51 QFILE_UNIF, 52 QFILE_VPM, 53 QFILE_TLB_COLOR_WRITE, 54 QFILE_TLB_COLOR_WRITE_MS, 55 QFILE_TLB_Z_WRITE, 56 QFILE_TLB_STENCIL_SETUP, 57 58 /* Payload registers that aren't in the physical register file, so we 59 * can just use the corresponding qpu_reg at qpu_emit time. 60 */ 61 QFILE_FRAG_X, 62 QFILE_FRAG_Y, 63 QFILE_FRAG_REV_FLAG, 64 QFILE_QPU_ELEMENT, 65 66 /** 67 * Stores an immediate value in the index field that will be used 68 * directly by qpu_load_imm(). 69 */ 70 QFILE_LOAD_IMM, 71 72 /** 73 * Stores an immediate value in the index field that can be turned 74 * into a small immediate field by qpu_encode_small_immediate(). 75 */ 76 QFILE_SMALL_IMM, 77}; 78 79struct qreg { 80 enum qfile file; 81 uint32_t index; 82 int pack; 83}; 84 85static inline struct qreg qir_reg(enum qfile file, uint32_t index) 86{ 87 return (struct qreg){file, index}; 88} 89 90enum qop { 91 QOP_UNDEF, 92 QOP_MOV, 93 QOP_FMOV, 94 QOP_MMOV, 95 QOP_FADD, 96 QOP_FSUB, 97 QOP_FMUL, 98 QOP_V8MULD, 99 QOP_V8MIN, 100 QOP_V8MAX, 101 QOP_V8ADDS, 102 QOP_V8SUBS, 103 QOP_MUL24, 104 QOP_FMIN, 105 QOP_FMAX, 106 QOP_FMINABS, 107 QOP_FMAXABS, 108 QOP_ADD, 109 QOP_SUB, 110 QOP_SHL, 111 QOP_SHR, 112 QOP_ASR, 113 QOP_MIN, 114 QOP_MAX, 115 QOP_AND, 116 QOP_OR, 117 QOP_XOR, 118 QOP_NOT, 119 120 QOP_FTOI, 121 QOP_ITOF, 122 QOP_RCP, 123 QOP_RSQ, 124 QOP_EXP2, 125 QOP_LOG2, 126 QOP_VW_SETUP, 127 QOP_VR_SETUP, 128 QOP_TLB_COLOR_READ, 129 QOP_MS_MASK, 130 QOP_VARY_ADD_C, 131 132 QOP_FRAG_Z, 133 QOP_FRAG_W, 134 135 /** Texture x coordinate parameter write */ 136 QOP_TEX_S, 137 /** Texture y coordinate parameter write */ 138 QOP_TEX_T, 139 /** Texture border color parameter or cube map z coordinate write */ 140 QOP_TEX_R, 141 /** Texture LOD bias parameter write */ 142 QOP_TEX_B, 143 144 /** 145 * Texture-unit 4-byte read with address provided direct in S 146 * cooordinate. 147 * 148 * The first operand is the offset from the start of the UBO, and the 149 * second is the uniform that has the UBO's base pointer. 150 */ 151 QOP_TEX_DIRECT, 152 153 /** 154 * Signal of texture read being necessary and then reading r4 into 155 * the destination 156 */ 157 QOP_TEX_RESULT, 158 159 /** 160 * Insert the signal for switching threads in a threaded fragment 161 * shader. No value can be live in an accumulator across a thrsw. 162 * 163 * At the QPU level, this will have several delay slots before the 164 * switch happens. Those slots are the responsibility of the 165 * scheduler. 166 */ 167 QOP_THRSW, 168 169 /* 32-bit immediate loaded to each SIMD channel */ 170 QOP_LOAD_IMM, 171 172 /* 32-bit immediate divided into 16 2-bit unsigned int values and 173 * loaded to each corresponding SIMD channel. 174 */ 175 QOP_LOAD_IMM_U2, 176 /* 32-bit immediate divided into 16 2-bit signed int values and 177 * loaded to each corresponding SIMD channel. 178 */ 179 QOP_LOAD_IMM_I2, 180 181 QOP_ROT_MUL, 182 183 /* Jumps to block->successor[0] if the qinst->cond (as a 184 * QPU_COND_BRANCH_*) passes, or block->successor[1] if not. Note 185 * that block->successor[1] may be unset if the condition is ALWAYS. 186 */ 187 QOP_BRANCH, 188 189 /* Emits an ADD from src[0] to src[1], where src[0] must be a 190 * QOP_LOAD_IMM result and src[1] is a QUNIFORM_UNIFORMS_ADDRESS, 191 * required by the kernel as part of its branch validation. 192 */ 193 QOP_UNIFORMS_RESET, 194}; 195 196struct queued_qpu_inst { 197 struct list_head link; 198 uint64_t inst; 199}; 200 201struct qinst { 202 struct list_head link; 203 204 enum qop op; 205 struct qreg dst; 206 struct qreg *src; 207 bool sf; 208 bool cond_is_exec_mask; 209 uint8_t cond; 210}; 211 212enum qstage { 213 /** 214 * Coordinate shader, runs during binning, before the VS, and just 215 * outputs position. 216 */ 217 QSTAGE_COORD, 218 QSTAGE_VERT, 219 QSTAGE_FRAG, 220}; 221 222enum quniform_contents { 223 /** 224 * Indicates that a constant 32-bit value is copied from the program's 225 * uniform contents. 226 */ 227 QUNIFORM_CONSTANT, 228 /** 229 * Indicates that the program's uniform contents are used as an index 230 * into the GL uniform storage. 231 */ 232 QUNIFORM_UNIFORM, 233 234 /** @{ 235 * Scaling factors from clip coordinates to relative to the viewport 236 * center. 237 * 238 * This is used by the coordinate and vertex shaders to produce the 239 * 32-bit entry consisting of 2 16-bit fields with 12.4 signed fixed 240 * point offsets from the viewport ccenter. 241 */ 242 QUNIFORM_VIEWPORT_X_SCALE, 243 QUNIFORM_VIEWPORT_Y_SCALE, 244 /** @} */ 245 246 QUNIFORM_VIEWPORT_Z_OFFSET, 247 QUNIFORM_VIEWPORT_Z_SCALE, 248 249 QUNIFORM_USER_CLIP_PLANE, 250 251 /** 252 * A reference to a texture config parameter 0 uniform. 253 * 254 * This is a uniform implicitly loaded with a QPU_W_TMU* write, which 255 * defines texture type, miplevels, and such. It will be found as a 256 * parameter to the first QOP_TEX_[STRB] instruction in a sequence. 257 */ 258 QUNIFORM_TEXTURE_CONFIG_P0, 259 260 /** 261 * A reference to a texture config parameter 1 uniform. 262 * 263 * This is a uniform implicitly loaded with a QPU_W_TMU* write, which 264 * defines texture width, height, filters, and wrap modes. It will be 265 * found as a parameter to the second QOP_TEX_[STRB] instruction in a 266 * sequence. 267 */ 268 QUNIFORM_TEXTURE_CONFIG_P1, 269 270 /** A reference to a texture config parameter 2 cubemap stride uniform */ 271 QUNIFORM_TEXTURE_CONFIG_P2, 272 273 QUNIFORM_TEXTURE_FIRST_LEVEL, 274 275 QUNIFORM_TEXTURE_MSAA_ADDR, 276 277 QUNIFORM_UBO_ADDR, 278 279 QUNIFORM_TEXRECT_SCALE_X, 280 QUNIFORM_TEXRECT_SCALE_Y, 281 282 QUNIFORM_TEXTURE_BORDER_COLOR, 283 284 QUNIFORM_BLEND_CONST_COLOR_X, 285 QUNIFORM_BLEND_CONST_COLOR_Y, 286 QUNIFORM_BLEND_CONST_COLOR_Z, 287 QUNIFORM_BLEND_CONST_COLOR_W, 288 QUNIFORM_BLEND_CONST_COLOR_RGBA, 289 QUNIFORM_BLEND_CONST_COLOR_AAAA, 290 291 QUNIFORM_STENCIL, 292 293 QUNIFORM_ALPHA_REF, 294 QUNIFORM_SAMPLE_MASK, 295 296 /* Placeholder uniform that will be updated by the kernel when used by 297 * an instruction writing to QPU_W_UNIFORMS_ADDRESS. 298 */ 299 QUNIFORM_UNIFORMS_ADDRESS, 300}; 301 302struct vc4_varying_slot { 303 uint8_t slot; 304 uint8_t swizzle; 305}; 306 307struct vc4_compiler_ubo_range { 308 /** 309 * offset in bytes from the start of the ubo where this range is 310 * uploaded. 311 * 312 * Only set once used is set. 313 */ 314 uint32_t dst_offset; 315 316 /** 317 * offset in bytes from the start of the gallium uniforms where the 318 * data comes from. 319 */ 320 uint32_t src_offset; 321 322 /** size in bytes of this ubo range */ 323 uint32_t size; 324 325 /** 326 * Set if this range is used by the shader for indirect uniforms 327 * access. 328 */ 329 bool used; 330}; 331 332struct vc4_key { 333 struct vc4_uncompiled_shader *shader_state; 334 struct { 335 enum pipe_format format; 336 uint8_t swizzle[4]; 337 union { 338 struct { 339 unsigned compare_mode:1; 340 unsigned compare_func:3; 341 unsigned wrap_s:3; 342 unsigned wrap_t:3; 343 bool force_first_level:1; 344 }; 345 struct { 346 uint16_t msaa_width, msaa_height; 347 }; 348 }; 349 } tex[VC4_MAX_TEXTURE_SAMPLERS]; 350 uint8_t ucp_enables; 351}; 352 353struct vc4_fs_key { 354 struct vc4_key base; 355 enum pipe_format color_format; 356 bool depth_enabled; 357 bool stencil_enabled; 358 bool stencil_twoside; 359 bool stencil_full_writemasks; 360 bool is_points; 361 bool is_lines; 362 bool alpha_test; 363 bool point_coord_upper_left; 364 bool light_twoside; 365 bool msaa; 366 bool sample_coverage; 367 bool sample_alpha_to_coverage; 368 bool sample_alpha_to_one; 369 uint8_t alpha_test_func; 370 uint8_t logicop_func; 371 uint32_t point_sprite_mask; 372 373 struct pipe_rt_blend_state blend; 374}; 375 376struct vc4_vs_key { 377 struct vc4_key base; 378 379 const struct vc4_fs_inputs *fs_inputs; 380 enum pipe_format attr_formats[8]; 381 bool is_coord; 382 bool per_vertex_point_size; 383 bool clamp_color; 384}; 385 386/** A basic block of QIR intructions. */ 387struct qblock { 388 struct list_head link; 389 390 struct list_head instructions; 391 struct list_head qpu_inst_list; 392 393 struct set *predecessors; 394 struct qblock *successors[2]; 395 396 int index; 397 398 /* Instruction IPs for the first and last instruction of the block. 399 * Set by vc4_qpu_schedule.c. 400 */ 401 uint32_t start_qpu_ip; 402 uint32_t end_qpu_ip; 403 404 /* Instruction IP for the branch instruction of the block. Set by 405 * vc4_qpu_schedule.c. 406 */ 407 uint32_t branch_qpu_ip; 408 409 /** @{ used by vc4_qir_live_variables.c */ 410 BITSET_WORD *def; 411 BITSET_WORD *use; 412 BITSET_WORD *live_in; 413 BITSET_WORD *live_out; 414 int start_ip, end_ip; 415 /** @} */ 416}; 417 418struct vc4_compile { 419 struct vc4_context *vc4; 420 nir_shader *s; 421 nir_function_impl *impl; 422 struct exec_list *cf_node_list; 423 424 /** 425 * Mapping from nir_register * or nir_ssa_def * to array of struct 426 * qreg for the values. 427 */ 428 struct hash_table *def_ht; 429 430 /* For each temp, the instruction generating its value. */ 431 struct qinst **defs; 432 uint32_t defs_array_size; 433 434 /** 435 * Inputs to the shader, arranged by TGSI declaration order. 436 * 437 * Not all fragment shader QFILE_VARY reads are present in this array. 438 */ 439 struct qreg *inputs; 440 struct qreg *outputs; 441 bool msaa_per_sample_output; 442 struct qreg color_reads[VC4_MAX_SAMPLES]; 443 struct qreg sample_colors[VC4_MAX_SAMPLES]; 444 uint32_t inputs_array_size; 445 uint32_t outputs_array_size; 446 uint32_t uniforms_array_size; 447 448 struct vc4_compiler_ubo_range *ubo_ranges; 449 uint32_t ubo_ranges_array_size; 450 /** Number of uniform areas declared in ubo_ranges. */ 451 uint32_t num_uniform_ranges; 452 /** Number of uniform areas used for indirect addressed loads. */ 453 uint32_t num_ubo_ranges; 454 uint32_t next_ubo_dst_offset; 455 456 /* State for whether we're executing on each channel currently. 0 if 457 * yes, otherwise a block number + 1 that the channel jumped to. 458 */ 459 struct qreg execute; 460 461 struct qreg line_x, point_x, point_y; 462 /** boolean (~0 -> true) if the fragment has been discarded. */ 463 struct qreg discard; 464 struct qreg payload_FRAG_Z; 465 struct qreg payload_FRAG_W; 466 467 uint8_t vattr_sizes[8]; 468 469 /** 470 * Array of the VARYING_SLOT_* of all FS QFILE_VARY reads. 471 * 472 * This includes those that aren't part of the VPM varyings, like 473 * point/line coordinates. 474 */ 475 struct vc4_varying_slot *input_slots; 476 uint32_t num_input_slots; 477 uint32_t input_slots_array_size; 478 479 /** 480 * An entry per outputs[] in the VS indicating what the VARYING_SLOT_* 481 * of the output is. Used to emit from the VS in the order that the 482 * FS needs. 483 */ 484 struct vc4_varying_slot *output_slots; 485 486 struct pipe_shader_state *shader_state; 487 struct vc4_key *key; 488 struct vc4_fs_key *fs_key; 489 struct vc4_vs_key *vs_key; 490 491 /* Live ranges of temps. */ 492 int *temp_start, *temp_end; 493 494 uint32_t *uniform_data; 495 enum quniform_contents *uniform_contents; 496 uint32_t uniform_array_size; 497 uint32_t num_uniforms; 498 uint32_t num_outputs; 499 uint32_t num_texture_samples; 500 uint32_t output_position_index; 501 uint32_t output_color_index; 502 uint32_t output_point_size_index; 503 uint32_t output_sample_mask_index; 504 505 struct qreg undef; 506 enum qstage stage; 507 uint32_t num_temps; 508 509 struct list_head blocks; 510 int next_block_index; 511 struct qblock *cur_block; 512 struct qblock *loop_cont_block; 513 struct qblock *loop_break_block; 514 515 struct list_head qpu_inst_list; 516 517 /* Pre-QPU-scheduled instruction containing the last THRSW */ 518 uint64_t *last_thrsw; 519 520 uint64_t *qpu_insts; 521 uint32_t qpu_inst_count; 522 uint32_t qpu_inst_size; 523 uint32_t num_inputs; 524 525 /** 526 * Number of inputs from num_inputs remaining to be queued to the read 527 * FIFO in the VS/CS. 528 */ 529 uint32_t num_inputs_remaining; 530 531 /* Number of inputs currently in the read FIFO for the VS/CS */ 532 uint32_t num_inputs_in_fifo; 533 534 /** Next offset in the VPM to read from in the VS/CS */ 535 uint32_t vpm_read_offset; 536 537 uint32_t program_id; 538 uint32_t variant_id; 539 540 /* Set to compile program in threaded FS mode, where SIG_THREAD_SWITCH 541 * is used to hide texturing latency at the cost of limiting ourselves 542 * to the bottom half of physical reg space. 543 */ 544 bool fs_threaded; 545 546 bool last_thrsw_at_top_level; 547 548 bool failed; 549}; 550 551/* Special nir_load_input intrinsic index for loading the current TLB 552 * destination color. 553 */ 554#define VC4_NIR_TLB_COLOR_READ_INPUT 2000000000 555 556#define VC4_NIR_MS_MASK_OUTPUT 2000000000 557 558struct vc4_compile *qir_compile_init(void); 559void qir_compile_destroy(struct vc4_compile *c); 560struct qblock *qir_new_block(struct vc4_compile *c); 561void qir_set_emit_block(struct vc4_compile *c, struct qblock *block); 562void qir_link_blocks(struct qblock *predecessor, struct qblock *successor); 563struct qblock *qir_entry_block(struct vc4_compile *c); 564struct qblock *qir_exit_block(struct vc4_compile *c); 565struct qinst *qir_inst(enum qop op, struct qreg dst, 566 struct qreg src0, struct qreg src1); 567struct qinst *qir_inst4(enum qop op, struct qreg dst, 568 struct qreg a, 569 struct qreg b, 570 struct qreg c, 571 struct qreg d); 572void qir_remove_instruction(struct vc4_compile *c, struct qinst *qinst); 573struct qreg qir_uniform(struct vc4_compile *c, 574 enum quniform_contents contents, 575 uint32_t data); 576void qir_schedule_instructions(struct vc4_compile *c); 577void qir_reorder_uniforms(struct vc4_compile *c); 578void qir_emit_uniform_stream_resets(struct vc4_compile *c); 579 580struct qreg qir_emit_def(struct vc4_compile *c, struct qinst *inst); 581struct qinst *qir_emit_nondef(struct vc4_compile *c, struct qinst *inst); 582 583struct qreg qir_get_temp(struct vc4_compile *c); 584void qir_calculate_live_intervals(struct vc4_compile *c); 585int qir_get_op_nsrc(enum qop qop); 586bool qir_reg_equals(struct qreg a, struct qreg b); 587bool qir_has_side_effects(struct vc4_compile *c, struct qinst *inst); 588bool qir_has_side_effect_reads(struct vc4_compile *c, struct qinst *inst); 589bool qir_is_mul(struct qinst *inst); 590bool qir_is_raw_mov(struct qinst *inst); 591bool qir_is_tex(struct qinst *inst); 592bool qir_is_float_input(struct qinst *inst); 593bool qir_depends_on_flags(struct qinst *inst); 594bool qir_writes_r4(struct qinst *inst); 595struct qreg qir_follow_movs(struct vc4_compile *c, struct qreg reg); 596uint8_t qir_channels_written(struct qinst *inst); 597 598void qir_dump(struct vc4_compile *c); 599void qir_dump_inst(struct vc4_compile *c, struct qinst *inst); 600const char *qir_get_stage_name(enum qstage stage); 601 602void qir_validate(struct vc4_compile *c); 603 604void qir_optimize(struct vc4_compile *c); 605bool qir_opt_algebraic(struct vc4_compile *c); 606bool qir_opt_constant_folding(struct vc4_compile *c); 607bool qir_opt_copy_propagation(struct vc4_compile *c); 608bool qir_opt_dead_code(struct vc4_compile *c); 609bool qir_opt_peephole_sf(struct vc4_compile *c); 610bool qir_opt_small_immediates(struct vc4_compile *c); 611bool qir_opt_vpm(struct vc4_compile *c); 612void vc4_nir_lower_blend(nir_shader *s, struct vc4_compile *c); 613void vc4_nir_lower_io(nir_shader *s, struct vc4_compile *c); 614nir_ssa_def *vc4_nir_get_swizzled_channel(struct nir_builder *b, 615 nir_ssa_def **srcs, int swiz); 616void vc4_nir_lower_txf_ms(nir_shader *s, struct vc4_compile *c); 617void qir_lower_uniforms(struct vc4_compile *c); 618 619uint32_t qpu_schedule_instructions(struct vc4_compile *c); 620 621void qir_SF(struct vc4_compile *c, struct qreg src); 622 623static inline struct qreg 624qir_uniform_ui(struct vc4_compile *c, uint32_t ui) 625{ 626 return qir_uniform(c, QUNIFORM_CONSTANT, ui); 627} 628 629static inline struct qreg 630qir_uniform_f(struct vc4_compile *c, float f) 631{ 632 return qir_uniform(c, QUNIFORM_CONSTANT, fui(f)); 633} 634 635#define QIR_ALU0(name) \ 636static inline struct qreg \ 637qir_##name(struct vc4_compile *c) \ 638{ \ 639 return qir_emit_def(c, qir_inst(QOP_##name, c->undef, \ 640 c->undef, c->undef)); \ 641} \ 642static inline struct qinst * \ 643qir_##name##_dest(struct vc4_compile *c, struct qreg dest) \ 644{ \ 645 return qir_emit_nondef(c, qir_inst(QOP_##name, dest, \ 646 c->undef, c->undef)); \ 647} 648 649#define QIR_ALU1(name) \ 650static inline struct qreg \ 651qir_##name(struct vc4_compile *c, struct qreg a) \ 652{ \ 653 return qir_emit_def(c, qir_inst(QOP_##name, c->undef, \ 654 a, c->undef)); \ 655} \ 656static inline struct qinst * \ 657qir_##name##_dest(struct vc4_compile *c, struct qreg dest, \ 658 struct qreg a) \ 659{ \ 660 return qir_emit_nondef(c, qir_inst(QOP_##name, dest, a, \ 661 c->undef)); \ 662} 663 664#define QIR_ALU2(name) \ 665static inline struct qreg \ 666qir_##name(struct vc4_compile *c, struct qreg a, struct qreg b) \ 667{ \ 668 return qir_emit_def(c, qir_inst(QOP_##name, c->undef, a, b)); \ 669} \ 670static inline struct qinst * \ 671qir_##name##_dest(struct vc4_compile *c, struct qreg dest, \ 672 struct qreg a, struct qreg b) \ 673{ \ 674 return qir_emit_nondef(c, qir_inst(QOP_##name, dest, a, b)); \ 675} 676 677#define QIR_NODST_1(name) \ 678static inline struct qinst * \ 679qir_##name(struct vc4_compile *c, struct qreg a) \ 680{ \ 681 return qir_emit_nondef(c, qir_inst(QOP_##name, c->undef, \ 682 a, c->undef)); \ 683} 684 685#define QIR_NODST_2(name) \ 686static inline struct qinst * \ 687qir_##name(struct vc4_compile *c, struct qreg a, struct qreg b) \ 688{ \ 689 return qir_emit_nondef(c, qir_inst(QOP_##name, c->undef, \ 690 a, b)); \ 691} 692 693#define QIR_PAYLOAD(name) \ 694static inline struct qreg \ 695qir_##name(struct vc4_compile *c) \ 696{ \ 697 struct qreg *payload = &c->payload_##name; \ 698 if (payload->file != QFILE_NULL) \ 699 return *payload; \ 700 *payload = qir_get_temp(c); \ 701 struct qinst *inst = qir_inst(QOP_##name, *payload, \ 702 c->undef, c->undef); \ 703 struct qblock *entry = qir_entry_block(c); \ 704 list_add(&inst->link, &entry->instructions); \ 705 c->defs[payload->index] = inst; \ 706 return *payload; \ 707} 708 709QIR_ALU1(MOV) 710QIR_ALU1(FMOV) 711QIR_ALU1(MMOV) 712QIR_ALU2(FADD) 713QIR_ALU2(FSUB) 714QIR_ALU2(FMUL) 715QIR_ALU2(V8MULD) 716QIR_ALU2(V8MIN) 717QIR_ALU2(V8MAX) 718QIR_ALU2(V8ADDS) 719QIR_ALU2(V8SUBS) 720QIR_ALU2(MUL24) 721QIR_ALU2(FMIN) 722QIR_ALU2(FMAX) 723QIR_ALU2(FMINABS) 724QIR_ALU2(FMAXABS) 725QIR_ALU1(FTOI) 726QIR_ALU1(ITOF) 727 728QIR_ALU2(ADD) 729QIR_ALU2(SUB) 730QIR_ALU2(SHL) 731QIR_ALU2(SHR) 732QIR_ALU2(ASR) 733QIR_ALU2(MIN) 734QIR_ALU2(MAX) 735QIR_ALU2(AND) 736QIR_ALU2(OR) 737QIR_ALU2(XOR) 738QIR_ALU1(NOT) 739 740QIR_ALU1(RCP) 741QIR_ALU1(RSQ) 742QIR_ALU1(EXP2) 743QIR_ALU1(LOG2) 744QIR_ALU1(VARY_ADD_C) 745QIR_NODST_2(TEX_S) 746QIR_NODST_2(TEX_T) 747QIR_NODST_2(TEX_R) 748QIR_NODST_2(TEX_B) 749QIR_NODST_2(TEX_DIRECT) 750QIR_PAYLOAD(FRAG_Z) 751QIR_PAYLOAD(FRAG_W) 752QIR_ALU0(TEX_RESULT) 753QIR_ALU0(TLB_COLOR_READ) 754QIR_NODST_1(MS_MASK) 755 756static inline struct qreg 757qir_SEL(struct vc4_compile *c, uint8_t cond, struct qreg src0, struct qreg src1) 758{ 759 struct qreg t = qir_get_temp(c); 760 struct qinst *a = qir_MOV_dest(c, t, src0); 761 struct qinst *b = qir_MOV_dest(c, t, src1); 762 a->cond = cond; 763 b->cond = qpu_cond_complement(cond); 764 return t; 765} 766 767static inline struct qreg 768qir_UNPACK_8_F(struct vc4_compile *c, struct qreg src, int i) 769{ 770 struct qreg t = qir_FMOV(c, src); 771 c->defs[t.index]->src[0].pack = QPU_UNPACK_8A + i; 772 return t; 773} 774 775static inline struct qreg 776qir_UNPACK_8_I(struct vc4_compile *c, struct qreg src, int i) 777{ 778 struct qreg t = qir_MOV(c, src); 779 c->defs[t.index]->src[0].pack = QPU_UNPACK_8A + i; 780 return t; 781} 782 783static inline struct qreg 784qir_UNPACK_16_F(struct vc4_compile *c, struct qreg src, int i) 785{ 786 struct qreg t = qir_FMOV(c, src); 787 c->defs[t.index]->src[0].pack = QPU_UNPACK_16A + i; 788 return t; 789} 790 791static inline struct qreg 792qir_UNPACK_16_I(struct vc4_compile *c, struct qreg src, int i) 793{ 794 struct qreg t = qir_MOV(c, src); 795 c->defs[t.index]->src[0].pack = QPU_UNPACK_16A + i; 796 return t; 797} 798 799static inline void 800qir_PACK_8_F(struct vc4_compile *c, struct qreg dest, struct qreg val, int chan) 801{ 802 assert(!dest.pack); 803 dest.pack = QPU_PACK_MUL_8A + chan; 804 qir_emit_nondef(c, qir_inst(QOP_MMOV, dest, val, c->undef)); 805} 806 807static inline struct qreg 808qir_PACK_8888_F(struct vc4_compile *c, struct qreg val) 809{ 810 struct qreg dest = qir_MMOV(c, val); 811 c->defs[dest.index]->dst.pack = QPU_PACK_MUL_8888; 812 return dest; 813} 814 815static inline struct qreg 816qir_POW(struct vc4_compile *c, struct qreg x, struct qreg y) 817{ 818 return qir_EXP2(c, qir_FMUL(c, 819 y, 820 qir_LOG2(c, x))); 821} 822 823static inline void 824qir_VPM_WRITE(struct vc4_compile *c, struct qreg val) 825{ 826 qir_MOV_dest(c, qir_reg(QFILE_VPM, 0), val); 827} 828 829static inline struct qreg 830qir_LOAD_IMM(struct vc4_compile *c, uint32_t val) 831{ 832 return qir_emit_def(c, qir_inst(QOP_LOAD_IMM, c->undef, 833 qir_reg(QFILE_LOAD_IMM, val), c->undef)); 834} 835 836static inline struct qreg 837qir_LOAD_IMM_U2(struct vc4_compile *c, uint32_t val) 838{ 839 return qir_emit_def(c, qir_inst(QOP_LOAD_IMM_U2, c->undef, 840 qir_reg(QFILE_LOAD_IMM, val), 841 c->undef)); 842} 843 844static inline struct qreg 845qir_LOAD_IMM_I2(struct vc4_compile *c, uint32_t val) 846{ 847 return qir_emit_def(c, qir_inst(QOP_LOAD_IMM_I2, c->undef, 848 qir_reg(QFILE_LOAD_IMM, val), 849 c->undef)); 850} 851 852/** Shifts the multiply output to the right by rot channels */ 853static inline struct qreg 854qir_ROT_MUL(struct vc4_compile *c, struct qreg val, uint32_t rot) 855{ 856 return qir_emit_def(c, qir_inst(QOP_ROT_MUL, c->undef, 857 val, 858 qir_reg(QFILE_LOAD_IMM, 859 QPU_SMALL_IMM_MUL_ROT + rot))); 860} 861 862static inline struct qinst * 863qir_MOV_cond(struct vc4_compile *c, uint8_t cond, 864 struct qreg dest, struct qreg src) 865{ 866 struct qinst *mov = qir_MOV_dest(c, dest, src); 867 mov->cond = cond; 868 return mov; 869} 870 871static inline struct qinst * 872qir_BRANCH(struct vc4_compile *c, uint8_t cond) 873{ 874 struct qinst *inst = qir_inst(QOP_BRANCH, c->undef, c->undef, c->undef); 875 inst->cond = cond; 876 qir_emit_nondef(c, inst); 877 return inst; 878} 879 880#define qir_for_each_block(block, c) \ 881 list_for_each_entry(struct qblock, block, &c->blocks, link) 882 883#define qir_for_each_block_rev(block, c) \ 884 list_for_each_entry_rev(struct qblock, block, &c->blocks, link) 885 886/* Loop over the non-NULL members of the successors array. */ 887#define qir_for_each_successor(succ, block) \ 888 for (struct qblock *succ = block->successors[0]; \ 889 succ != NULL; \ 890 succ = (succ == block->successors[1] ? NULL : \ 891 block->successors[1])) 892 893#define qir_for_each_inst(inst, block) \ 894 list_for_each_entry(struct qinst, inst, &block->instructions, link) 895 896#define qir_for_each_inst_rev(inst, block) \ 897 list_for_each_entry_rev(struct qinst, inst, &block->instructions, link) 898 899#define qir_for_each_inst_safe(inst, block) \ 900 list_for_each_entry_safe(struct qinst, inst, &block->instructions, link) 901 902#define qir_for_each_inst_inorder(inst, c) \ 903 qir_for_each_block(_block, c) \ 904 qir_for_each_inst(inst, _block) 905 906#endif /* VC4_QIR_H */ 907