r600_pipe.h revision cb922b63eba1d75706354614bc5de4d39dbe9ad3
1/* 2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org> 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * on the rights to use, copy, modify, merge, publish, distribute, sub 8 * license, and/or sell copies of the Software, and to permit persons to whom 9 * the Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, 19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 21 * USE OR OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Jerome Glisse 25 */ 26#ifndef R600_PIPE_H 27#define R600_PIPE_H 28 29#include "util/u_slab.h" 30#include "r600.h" 31#include "r600_llvm.h" 32#include "r600_public.h" 33#include "r600_shader.h" 34#include "r600_resource.h" 35#include "evergreen_compute.h" 36 37#define R600_MAX_CONST_BUFFERS 2 38#define R600_MAX_CONST_BUFFER_SIZE 4096 39 40#ifdef PIPE_ARCH_BIG_ENDIAN 41#define R600_BIG_ENDIAN 1 42#else 43#define R600_BIG_ENDIAN 0 44#endif 45 46enum r600_atom_flags { 47 /* When set, atoms are added at the beginning of the dirty list 48 * instead of the end. */ 49 EMIT_EARLY = (1 << 0) 50}; 51 52/* This encapsulates a state or an operation which can emitted into the GPU 53 * command stream. It's not limited to states only, it can be used for anything 54 * that wants to write commands into the CS (e.g. cache flushes). */ 55struct r600_atom { 56 void (*emit)(struct r600_context *ctx, struct r600_atom *state); 57 58 unsigned num_dw; 59 enum r600_atom_flags flags; 60 bool dirty; 61 62 struct list_head head; 63}; 64 65/* This is an atom containing GPU commands that never change. 66 * This is supposed to be copied directly into the CS. */ 67struct r600_command_buffer { 68 struct r600_atom atom; 69 uint32_t *buf; 70 unsigned max_num_dw; 71 unsigned pkt_flags; 72}; 73 74struct r600_surface_sync_cmd { 75 struct r600_atom atom; 76 unsigned flush_flags; /* CP_COHER_CNTL */ 77}; 78 79struct r600_db_misc_state { 80 struct r600_atom atom; 81 bool occlusion_query_enabled; 82 bool flush_depthstencil_through_cb; 83 bool copy_depth, copy_stencil; 84}; 85 86struct r600_cb_misc_state { 87 struct r600_atom atom; 88 unsigned cb_color_control; /* this comes from blend state */ 89 unsigned blend_colormask; /* 8*4 bits for 8 RGBA colorbuffers */ 90 unsigned nr_cbufs; 91 unsigned nr_ps_color_outputs; 92 bool multiwrite; 93 bool dual_src_blend; 94}; 95 96struct r600_alphatest_state { 97 struct r600_atom atom; 98 unsigned sx_alpha_test_control; /* this comes from dsa state */ 99 unsigned sx_alpha_ref; /* this comes from dsa state */ 100 bool bypass; 101 bool cb0_export_16bpc; /* from set_framebuffer_state */ 102}; 103 104struct r600_cs_shader_state { 105 struct r600_atom atom; 106 struct r600_pipe_compute *shader; 107}; 108 109enum r600_pipe_state_id { 110 R600_PIPE_STATE_BLEND = 0, 111 R600_PIPE_STATE_BLEND_COLOR, 112 R600_PIPE_STATE_CONFIG, 113 R600_PIPE_STATE_SEAMLESS_CUBEMAP, 114 R600_PIPE_STATE_CLIP, 115 R600_PIPE_STATE_SCISSOR, 116 R600_PIPE_STATE_VIEWPORT, 117 R600_PIPE_STATE_RASTERIZER, 118 R600_PIPE_STATE_VGT, 119 R600_PIPE_STATE_FRAMEBUFFER, 120 R600_PIPE_STATE_DSA, 121 R600_PIPE_STATE_STENCIL_REF, 122 R600_PIPE_STATE_PS_SHADER, 123 R600_PIPE_STATE_VS_SHADER, 124 R600_PIPE_STATE_CONSTANT, 125 R600_PIPE_STATE_SAMPLER, 126 R600_PIPE_STATE_RESOURCE, 127 R600_PIPE_STATE_POLYGON_OFFSET, 128 R600_PIPE_STATE_FETCH_SHADER, 129 R600_PIPE_STATE_SPI, 130 R600_PIPE_NSTATES 131}; 132 133struct compute_memory_pool; 134void compute_memory_pool_delete(struct compute_memory_pool* pool); 135struct compute_memory_pool* compute_memory_pool_new( 136 struct r600_screen *rscreen); 137 138struct r600_pipe_fences { 139 struct r600_resource *bo; 140 unsigned *data; 141 unsigned next_index; 142 /* linked list of preallocated blocks */ 143 struct list_head blocks; 144 /* linked list of freed fences */ 145 struct list_head pool; 146 pipe_mutex mutex; 147}; 148 149struct r600_screen { 150 struct pipe_screen screen; 151 struct radeon_winsys *ws; 152 unsigned family; 153 enum chip_class chip_class; 154 struct radeon_info info; 155 bool has_streamout; 156 struct r600_tiling_info tiling_info; 157 struct r600_pipe_fences fences; 158 159 /*for compute global memory binding, we allocate stuff here, instead of 160 * buffers. 161 * XXX: Not sure if this is the best place for global_pool. Also, 162 * it's not thread safe, so it won't work with multiple contexts. */ 163 struct compute_memory_pool *global_pool; 164}; 165 166struct r600_pipe_sampler_view { 167 struct pipe_sampler_view base; 168 struct r600_resource *tex_resource; 169 uint32_t tex_resource_words[8]; 170}; 171 172struct r600_pipe_rasterizer { 173 struct r600_pipe_state rstate; 174 boolean flatshade; 175 boolean two_side; 176 unsigned sprite_coord_enable; 177 unsigned clip_plane_enable; 178 unsigned pa_sc_line_stipple; 179 unsigned pa_cl_clip_cntl; 180 float offset_units; 181 float offset_scale; 182 bool scissor_enable; 183}; 184 185struct r600_pipe_blend { 186 struct r600_pipe_state rstate; 187 unsigned cb_target_mask; 188 unsigned cb_color_control; 189 bool dual_src_blend; 190}; 191 192struct r600_pipe_dsa { 193 struct r600_pipe_state rstate; 194 unsigned alpha_ref; 195 ubyte valuemask[2]; 196 ubyte writemask[2]; 197 unsigned sx_alpha_test_control; 198}; 199 200struct r600_vertex_element 201{ 202 unsigned count; 203 struct pipe_vertex_element elements[PIPE_MAX_ATTRIBS]; 204 struct r600_resource *fetch_shader; 205 unsigned fs_size; 206 struct r600_pipe_state rstate; 207}; 208 209struct r600_pipe_shader; 210 211struct r600_pipe_shader_selector { 212 struct r600_pipe_shader *current; 213 214 struct tgsi_token *tokens; 215 struct pipe_stream_output_info so; 216 217 unsigned num_shaders; 218 219 /* PIPE_SHADER_[VERTEX|FRAGMENT|...] */ 220 unsigned type; 221 222 unsigned nr_ps_max_color_exports; 223}; 224 225struct r600_pipe_shader { 226 struct r600_pipe_shader_selector *selector; 227 struct r600_pipe_shader *next_variant; 228 struct r600_shader shader; 229 struct r600_pipe_state rstate; 230 struct r600_resource *bo; 231 struct r600_resource *bo_fetch; 232 struct r600_vertex_element vertex_elements; 233 unsigned sprite_coord_enable; 234 unsigned flatshade; 235 unsigned pa_cl_vs_out_cntl; 236 unsigned nr_ps_color_outputs; 237 unsigned key; 238 unsigned db_shader_control; 239 unsigned ps_depth_export; 240}; 241 242struct r600_pipe_sampler_state { 243 struct r600_pipe_state rstate; 244 boolean seamless_cube_map; 245}; 246 247/* needed for blitter save */ 248#define NUM_TEX_UNITS 16 249 250struct r600_samplerview_state 251{ 252 struct r600_atom atom; 253 struct r600_pipe_sampler_view *views[NUM_TEX_UNITS]; 254 uint32_t enabled_mask; 255 uint32_t dirty_mask; 256 uint32_t depth_texture_mask; /* which textures are depth */ 257}; 258 259struct r600_textures_info { 260 struct r600_samplerview_state views; 261 262 struct r600_pipe_sampler_state *samplers[NUM_TEX_UNITS]; 263 unsigned n_samplers; 264 bool samplers_dirty; 265 bool is_array_sampler[NUM_TEX_UNITS]; 266}; 267 268struct r600_fence { 269 struct pipe_reference reference; 270 unsigned index; /* in the shared bo */ 271 struct r600_resource *sleep_bo; 272 struct list_head head; 273}; 274 275#define FENCE_BLOCK_SIZE 16 276 277struct r600_fence_block { 278 struct r600_fence fences[FENCE_BLOCK_SIZE]; 279 struct list_head head; 280}; 281 282#define R600_CONSTANT_ARRAY_SIZE 256 283#define R600_RESOURCE_ARRAY_SIZE 160 284 285struct r600_stencil_ref 286{ 287 ubyte ref_value[2]; 288 ubyte valuemask[2]; 289 ubyte writemask[2]; 290}; 291 292struct r600_constbuf_state 293{ 294 struct r600_atom atom; 295 struct pipe_constant_buffer cb[PIPE_MAX_CONSTANT_BUFFERS]; 296 uint32_t enabled_mask; 297 uint32_t dirty_mask; 298}; 299 300struct r600_vertexbuf_state 301{ 302 struct r600_atom atom; 303 struct pipe_vertex_buffer vb[PIPE_MAX_ATTRIBS]; 304 uint32_t enabled_mask; /* non-NULL buffers */ 305 uint32_t dirty_mask; 306}; 307 308struct r600_context { 309 struct pipe_context context; 310 struct blitter_context *blitter; 311 enum radeon_family family; 312 enum chip_class chip_class; 313 boolean has_vertex_cache; 314 unsigned r6xx_num_clause_temp_gprs; 315 void *custom_dsa_flush; 316 struct r600_screen *screen; 317 struct radeon_winsys *ws; 318 struct r600_pipe_state *states[R600_PIPE_NSTATES]; 319 struct r600_vertex_element *vertex_elements; 320 struct pipe_framebuffer_state framebuffer; 321 unsigned compute_cb_target_mask; 322 unsigned db_shader_control; 323 unsigned pa_sc_line_stipple; 324 unsigned pa_cl_clip_cntl; 325 /* for saving when using blitter */ 326 struct pipe_stencil_ref stencil_ref; 327 struct pipe_viewport_state viewport; 328 struct pipe_clip_state clip; 329 struct r600_pipe_shader_selector *ps_shader; 330 struct r600_pipe_shader_selector *vs_shader; 331 struct r600_pipe_rasterizer *rasterizer; 332 struct r600_pipe_state vgt; 333 struct r600_pipe_state spi; 334 struct pipe_query *current_render_cond; 335 unsigned current_render_cond_mode; 336 struct pipe_query *saved_render_cond; 337 unsigned saved_render_cond_mode; 338 /* shader information */ 339 boolean two_side; 340 boolean spi_dirty; 341 unsigned sprite_coord_enable; 342 boolean flatshade; 343 boolean export_16bpc; 344 unsigned nr_cbufs; 345 346 struct u_upload_mgr *uploader; 347 struct util_slab_mempool pool_transfers; 348 349 unsigned default_ps_gprs, default_vs_gprs; 350 351 /* States based on r600_atom. */ 352 struct list_head dirty_states; 353 struct r600_command_buffer start_cs_cmd; /* invariant state mostly */ 354 /** Compute specific registers initializations. The start_cs_cmd atom 355 * must be emitted before start_compute_cs_cmd. */ 356 struct r600_command_buffer start_compute_cs_cmd; 357 struct r600_surface_sync_cmd surface_sync_cmd; 358 struct r600_atom r6xx_flush_and_inv_cmd; 359 struct r600_alphatest_state alphatest_state; 360 struct r600_cb_misc_state cb_misc_state; 361 struct r600_db_misc_state db_misc_state; 362 /** Vertex buffers for fetch shaders */ 363 struct r600_vertexbuf_state vertex_buffer_state; 364 /** Vertex buffers for compute shaders */ 365 struct r600_vertexbuf_state cs_vertex_buffer_state; 366 struct r600_constbuf_state vs_constbuf_state; 367 struct r600_constbuf_state ps_constbuf_state; 368 struct r600_textures_info vs_samplers; 369 struct r600_textures_info ps_samplers; 370 struct r600_cs_shader_state cs_shader_state; 371 372 struct radeon_winsys_cs *cs; 373 374 struct r600_range *range; 375 unsigned nblocks; 376 struct r600_block **blocks; 377 struct list_head dirty; 378 struct list_head enable_list; 379 unsigned pm4_dirty_cdwords; 380 unsigned ctx_pm4_ndwords; 381 382 /* The list of active queries. Only one query of each type can be active. */ 383 int num_occlusion_queries; 384 385 /* Manage queries in two separate groups: 386 * The timer ones and the others (streamout, occlusion). 387 * 388 * We do this because we should only suspend non-timer queries for u_blitter, 389 * and later if the non-timer queries are suspended, the context flush should 390 * only suspend and resume the timer queries. */ 391 struct list_head active_timer_queries; 392 unsigned num_cs_dw_timer_queries_suspend; 393 struct list_head active_nontimer_queries; 394 unsigned num_cs_dw_nontimer_queries_suspend; 395 396 unsigned num_cs_dw_streamout_end; 397 398 unsigned backend_mask; 399 unsigned max_db; /* for OQ */ 400 unsigned flags; 401 boolean predicate_drawing; 402 403 unsigned num_so_targets; 404 struct r600_so_target *so_targets[PIPE_MAX_SO_BUFFERS]; 405 boolean streamout_start; 406 unsigned streamout_append_bitmask; 407 408 /* There is no scissor enable bit on r6xx, so we must use a workaround. 409 * These track the current scissor state. */ 410 bool scissor_enable; 411 struct pipe_scissor_state scissor_state; 412 413 /* With rasterizer discard, there doesn't have to be a pixel shader. 414 * In that case, we bind this one: */ 415 void *dummy_pixel_shader; 416 417 boolean dual_src_blend; 418 419 /* Index buffer. */ 420 struct pipe_index_buffer index_buffer; 421}; 422 423static INLINE void r600_emit_atom(struct r600_context *rctx, struct r600_atom *atom) 424{ 425 atom->emit(rctx, atom); 426 atom->dirty = false; 427 if (atom->head.next && atom->head.prev) 428 LIST_DELINIT(&atom->head); 429} 430 431static INLINE void r600_atom_dirty(struct r600_context *rctx, struct r600_atom *state) 432{ 433 if (!state->dirty) { 434 if (state->flags & EMIT_EARLY) { 435 LIST_ADD(&state->head, &rctx->dirty_states); 436 } else { 437 LIST_ADDTAIL(&state->head, &rctx->dirty_states); 438 } 439 state->dirty = true; 440 } 441} 442 443/* evergreen_state.c */ 444void evergreen_init_state_functions(struct r600_context *rctx); 445void evergreen_init_atom_start_cs(struct r600_context *rctx); 446void evergreen_pipe_shader_ps(struct pipe_context *ctx, struct r600_pipe_shader *shader); 447void evergreen_pipe_shader_vs(struct pipe_context *ctx, struct r600_pipe_shader *shader); 448void evergreen_fetch_shader(struct pipe_context *ctx, struct r600_vertex_element *ve); 449void *evergreen_create_db_flush_dsa(struct r600_context *rctx); 450void evergreen_polygon_offset_update(struct r600_context *rctx); 451boolean evergreen_is_format_supported(struct pipe_screen *screen, 452 enum pipe_format format, 453 enum pipe_texture_target target, 454 unsigned sample_count, 455 unsigned usage); 456void evergreen_init_color_surface(struct r600_context *rctx, 457 struct r600_surface *surf); 458void evergreen_update_dual_export_state(struct r600_context * rctx); 459 460/* r600_blit.c */ 461void r600_copy_buffer(struct pipe_context *ctx, struct 462 pipe_resource *dst, unsigned dstx, 463 struct pipe_resource *src, const struct pipe_box *src_box); 464void r600_init_blit_functions(struct r600_context *rctx); 465void r600_blit_uncompress_depth(struct pipe_context *ctx, 466 struct r600_resource_texture *texture, 467 struct r600_resource_texture *staging, 468 unsigned first_level, unsigned last_level, 469 unsigned first_layer, unsigned last_layer); 470void r600_flush_depth_textures(struct r600_context *rctx, 471 struct r600_samplerview_state *textures); 472/* r600_buffer.c */ 473bool r600_init_resource(struct r600_screen *rscreen, 474 struct r600_resource *res, 475 unsigned size, unsigned alignment, 476 unsigned bind, unsigned usage); 477struct pipe_resource *r600_buffer_create(struct pipe_screen *screen, 478 const struct pipe_resource *templ); 479 480/* r600_pipe.c */ 481void r600_flush(struct pipe_context *ctx, struct pipe_fence_handle **fence, 482 unsigned flags); 483 484/* r600_query.c */ 485void r600_init_query_functions(struct r600_context *rctx); 486void r600_suspend_nontimer_queries(struct r600_context *ctx); 487void r600_resume_nontimer_queries(struct r600_context *ctx); 488void r600_suspend_timer_queries(struct r600_context *ctx); 489void r600_resume_timer_queries(struct r600_context *ctx); 490 491/* r600_resource.c */ 492void r600_init_context_resource_functions(struct r600_context *r600); 493 494/* r600_shader.c */ 495int r600_pipe_shader_create(struct pipe_context *ctx, struct r600_pipe_shader *shader); 496#ifdef HAVE_OPENCL 497int r600_compute_shader_create(struct pipe_context * ctx, 498 LLVMModuleRef mod, struct r600_bytecode * bytecode); 499#endif 500void r600_pipe_shader_destroy(struct pipe_context *ctx, struct r600_pipe_shader *shader); 501 502/* r600_state.c */ 503void r600_set_scissor_state(struct r600_context *rctx, 504 const struct pipe_scissor_state *state); 505void r600_update_sampler_states(struct r600_context *rctx); 506void r600_init_state_functions(struct r600_context *rctx); 507void r600_init_atom_start_cs(struct r600_context *rctx); 508void r600_pipe_shader_ps(struct pipe_context *ctx, struct r600_pipe_shader *shader); 509void r600_pipe_shader_vs(struct pipe_context *ctx, struct r600_pipe_shader *shader); 510void r600_fetch_shader(struct pipe_context *ctx, struct r600_vertex_element *ve); 511void *r600_create_db_flush_dsa(struct r600_context *rctx); 512void r600_polygon_offset_update(struct r600_context *rctx); 513void r600_adjust_gprs(struct r600_context *rctx); 514boolean r600_is_format_supported(struct pipe_screen *screen, 515 enum pipe_format format, 516 enum pipe_texture_target target, 517 unsigned sample_count, 518 unsigned usage); 519void r600_update_dual_export_state(struct r600_context * rctx); 520 521/* r600_texture.c */ 522void r600_init_screen_texture_functions(struct pipe_screen *screen); 523void r600_init_surface_functions(struct r600_context *r600); 524uint32_t r600_translate_texformat(struct pipe_screen *screen, enum pipe_format format, 525 const unsigned char *swizzle_view, 526 uint32_t *word4_p, uint32_t *yuv_format_p); 527unsigned r600_texture_get_offset(struct r600_resource_texture *rtex, 528 unsigned level, unsigned layer); 529 530/* r600_translate.c */ 531void r600_translate_index_buffer(struct r600_context *r600, 532 struct pipe_index_buffer *ib, 533 unsigned count); 534 535/* r600_state_common.c */ 536void r600_init_atom(struct r600_atom *atom, 537 void (*emit)(struct r600_context *ctx, struct r600_atom *state), 538 unsigned num_dw, enum r600_atom_flags flags); 539void r600_init_common_atoms(struct r600_context *rctx); 540unsigned r600_get_cb_flush_flags(struct r600_context *rctx); 541void r600_texture_barrier(struct pipe_context *ctx); 542void r600_set_index_buffer(struct pipe_context *ctx, 543 const struct pipe_index_buffer *ib); 544void r600_vertex_buffers_dirty(struct r600_context *rctx); 545void r600_set_vertex_buffers(struct pipe_context *ctx, unsigned count, 546 const struct pipe_vertex_buffer *input); 547void r600_sampler_views_dirty(struct r600_context *rctx, 548 struct r600_samplerview_state *state); 549void r600_set_sampler_views(struct r600_context *rctx, 550 struct r600_textures_info *dst, 551 unsigned count, 552 struct pipe_sampler_view **views); 553void *r600_create_vertex_elements(struct pipe_context *ctx, 554 unsigned count, 555 const struct pipe_vertex_element *elements); 556void r600_delete_vertex_element(struct pipe_context *ctx, void *state); 557void r600_bind_blend_state(struct pipe_context *ctx, void *state); 558void r600_set_blend_color(struct pipe_context *ctx, 559 const struct pipe_blend_color *state); 560void r600_bind_dsa_state(struct pipe_context *ctx, void *state); 561void r600_set_max_scissor(struct r600_context *rctx); 562void r600_bind_rs_state(struct pipe_context *ctx, void *state); 563void r600_delete_rs_state(struct pipe_context *ctx, void *state); 564void r600_sampler_view_destroy(struct pipe_context *ctx, 565 struct pipe_sampler_view *state); 566void r600_delete_state(struct pipe_context *ctx, void *state); 567void r600_bind_vertex_elements(struct pipe_context *ctx, void *state); 568void *r600_create_shader_state_ps(struct pipe_context *ctx, 569 const struct pipe_shader_state *state); 570void *r600_create_shader_state_vs(struct pipe_context *ctx, 571 const struct pipe_shader_state *state); 572void r600_bind_ps_shader(struct pipe_context *ctx, void *state); 573void r600_bind_vs_shader(struct pipe_context *ctx, void *state); 574void r600_delete_ps_shader(struct pipe_context *ctx, void *state); 575void r600_delete_vs_shader(struct pipe_context *ctx, void *state); 576void r600_constant_buffers_dirty(struct r600_context *rctx, struct r600_constbuf_state *state); 577void r600_set_constant_buffer(struct pipe_context *ctx, uint shader, uint index, 578 struct pipe_constant_buffer *cb); 579struct pipe_stream_output_target * 580r600_create_so_target(struct pipe_context *ctx, 581 struct pipe_resource *buffer, 582 unsigned buffer_offset, 583 unsigned buffer_size); 584void r600_so_target_destroy(struct pipe_context *ctx, 585 struct pipe_stream_output_target *target); 586void r600_set_so_targets(struct pipe_context *ctx, 587 unsigned num_targets, 588 struct pipe_stream_output_target **targets, 589 unsigned append_bitmask); 590void r600_set_pipe_stencil_ref(struct pipe_context *ctx, 591 const struct pipe_stencil_ref *state); 592void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info); 593uint32_t r600_translate_stencil_op(int s_op); 594uint32_t r600_translate_fill(uint32_t func); 595unsigned r600_tex_wrap(unsigned wrap); 596unsigned r600_tex_filter(unsigned filter); 597unsigned r600_tex_mipfilter(unsigned filter); 598unsigned r600_tex_compare(unsigned compare); 599 600/* 601 * Helpers for building command buffers 602 */ 603 604#define PKT3_SET_CONFIG_REG 0x68 605#define PKT3_SET_CONTEXT_REG 0x69 606#define PKT3_SET_CTL_CONST 0x6F 607#define PKT3_SET_LOOP_CONST 0x6C 608 609#define R600_CONFIG_REG_OFFSET 0x08000 610#define R600_CONTEXT_REG_OFFSET 0x28000 611#define R600_CTL_CONST_OFFSET 0x3CFF0 612#define R600_LOOP_CONST_OFFSET 0X0003E200 613#define EG_LOOP_CONST_OFFSET 0x0003A200 614 615#define PKT_TYPE_S(x) (((x) & 0x3) << 30) 616#define PKT_COUNT_S(x) (((x) & 0x3FFF) << 16) 617#define PKT3_IT_OPCODE_S(x) (((x) & 0xFF) << 8) 618#define PKT3_PREDICATE(x) (((x) >> 0) & 0x1) 619#define PKT3(op, count, predicate) (PKT_TYPE_S(3) | PKT_COUNT_S(count) | PKT3_IT_OPCODE_S(op) | PKT3_PREDICATE(predicate)) 620 621#define RADEON_CP_PACKET3_COMPUTE_MODE 0x00000002 622 623/*Evergreen Compute packet3*/ 624#define PKT3C(op, count, predicate) (PKT_TYPE_S(3) | PKT3_IT_OPCODE_S(op) | PKT_COUNT_S(count) | PKT3_PREDICATE(predicate) | RADEON_CP_PACKET3_COMPUTE_MODE) 625 626static INLINE void r600_store_value(struct r600_command_buffer *cb, unsigned value) 627{ 628 cb->buf[cb->atom.num_dw++] = value; 629} 630 631static INLINE void r600_store_config_reg_seq(struct r600_command_buffer *cb, unsigned reg, unsigned num) 632{ 633 assert(reg < R600_CONTEXT_REG_OFFSET); 634 assert(cb->atom.num_dw+2+num <= cb->max_num_dw); 635 cb->buf[cb->atom.num_dw++] = PKT3(PKT3_SET_CONFIG_REG, num, 0); 636 cb->buf[cb->atom.num_dw++] = (reg - R600_CONFIG_REG_OFFSET) >> 2; 637} 638 639/** 640 * Needs cb->pkt_flags set to RADEON_CP_PACKET3_COMPUTE_MODE for compute 641 * shaders. 642 */ 643static INLINE void r600_store_context_reg_seq(struct r600_command_buffer *cb, unsigned reg, unsigned num) 644{ 645 assert(reg >= R600_CONTEXT_REG_OFFSET && reg < R600_CTL_CONST_OFFSET); 646 assert(cb->atom.num_dw+2+num <= cb->max_num_dw); 647 cb->buf[cb->atom.num_dw++] = PKT3(PKT3_SET_CONTEXT_REG, num, 0) | cb->pkt_flags; 648 cb->buf[cb->atom.num_dw++] = (reg - R600_CONTEXT_REG_OFFSET) >> 2; 649} 650 651/** 652 * Needs cb->pkt_flags set to RADEON_CP_PACKET3_COMPUTE_MODE for compute 653 * shaders. 654 */ 655static INLINE void r600_store_ctl_const_seq(struct r600_command_buffer *cb, unsigned reg, unsigned num) 656{ 657 assert(reg >= R600_CTL_CONST_OFFSET); 658 assert(cb->atom.num_dw+2+num <= cb->max_num_dw); 659 cb->buf[cb->atom.num_dw++] = PKT3(PKT3_SET_CTL_CONST, num, 0) | cb->pkt_flags; 660 cb->buf[cb->atom.num_dw++] = (reg - R600_CTL_CONST_OFFSET) >> 2; 661} 662 663static INLINE void r600_store_loop_const_seq(struct r600_command_buffer *cb, unsigned reg, unsigned num) 664{ 665 assert(reg >= R600_LOOP_CONST_OFFSET); 666 assert(cb->atom.num_dw+2+num <= cb->max_num_dw); 667 cb->buf[cb->atom.num_dw++] = PKT3(PKT3_SET_LOOP_CONST, num, 0); 668 cb->buf[cb->atom.num_dw++] = (reg - R600_LOOP_CONST_OFFSET) >> 2; 669} 670 671/** 672 * Needs cb->pkt_flags set to RADEON_CP_PACKET3_COMPUTE_MODE for compute 673 * shaders. 674 */ 675static INLINE void eg_store_loop_const_seq(struct r600_command_buffer *cb, unsigned reg, unsigned num) 676{ 677 assert(reg >= EG_LOOP_CONST_OFFSET); 678 assert(cb->atom.num_dw+2+num <= cb->max_num_dw); 679 cb->buf[cb->atom.num_dw++] = PKT3(PKT3_SET_LOOP_CONST, num, 0) | cb->pkt_flags; 680 cb->buf[cb->atom.num_dw++] = (reg - EG_LOOP_CONST_OFFSET) >> 2; 681} 682 683static INLINE void r600_store_config_reg(struct r600_command_buffer *cb, unsigned reg, unsigned value) 684{ 685 r600_store_config_reg_seq(cb, reg, 1); 686 r600_store_value(cb, value); 687} 688 689static INLINE void r600_store_context_reg(struct r600_command_buffer *cb, unsigned reg, unsigned value) 690{ 691 r600_store_context_reg_seq(cb, reg, 1); 692 r600_store_value(cb, value); 693} 694 695static INLINE void r600_store_ctl_const(struct r600_command_buffer *cb, unsigned reg, unsigned value) 696{ 697 r600_store_ctl_const_seq(cb, reg, 1); 698 r600_store_value(cb, value); 699} 700 701static INLINE void r600_store_loop_const(struct r600_command_buffer *cb, unsigned reg, unsigned value) 702{ 703 r600_store_loop_const_seq(cb, reg, 1); 704 r600_store_value(cb, value); 705} 706 707static INLINE void eg_store_loop_const(struct r600_command_buffer *cb, unsigned reg, unsigned value) 708{ 709 eg_store_loop_const_seq(cb, reg, 1); 710 r600_store_value(cb, value); 711} 712 713void r600_init_command_buffer(struct r600_command_buffer *cb, unsigned num_dw, enum r600_atom_flags flags); 714void r600_release_command_buffer(struct r600_command_buffer *cb); 715 716/* 717 * Helpers for emitting state into a command stream directly. 718 */ 719 720static INLINE unsigned r600_context_bo_reloc(struct r600_context *ctx, struct r600_resource *rbo, 721 enum radeon_bo_usage usage) 722{ 723 assert(usage); 724 return ctx->ws->cs_add_reloc(ctx->cs, rbo->cs_buf, usage, rbo->domains) * 4; 725} 726 727static INLINE void r600_write_value(struct radeon_winsys_cs *cs, unsigned value) 728{ 729 cs->buf[cs->cdw++] = value; 730} 731 732static INLINE void r600_write_array(struct radeon_winsys_cs *cs, unsigned num, unsigned *ptr) 733{ 734 assert(cs->cdw+num <= RADEON_MAX_CMDBUF_DWORDS); 735 memcpy(&cs->buf[cs->cdw], ptr, num * sizeof(ptr[0])); 736 cs->cdw += num; 737} 738 739static INLINE void r600_write_config_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num) 740{ 741 assert(reg < R600_CONTEXT_REG_OFFSET); 742 assert(cs->cdw+2+num <= RADEON_MAX_CMDBUF_DWORDS); 743 cs->buf[cs->cdw++] = PKT3(PKT3_SET_CONFIG_REG, num, 0); 744 cs->buf[cs->cdw++] = (reg - R600_CONFIG_REG_OFFSET) >> 2; 745} 746 747static INLINE void r600_write_context_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num) 748{ 749 assert(reg >= R600_CONTEXT_REG_OFFSET && reg < R600_CTL_CONST_OFFSET); 750 assert(cs->cdw+2+num <= RADEON_MAX_CMDBUF_DWORDS); 751 cs->buf[cs->cdw++] = PKT3(PKT3_SET_CONTEXT_REG, num, 0); 752 cs->buf[cs->cdw++] = (reg - R600_CONTEXT_REG_OFFSET) >> 2; 753} 754 755static INLINE void r600_write_compute_context_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num) 756{ 757 r600_write_context_reg_seq(cs, reg, num); 758 /* Set the compute bit on the packet header */ 759 cs->buf[cs->cdw - 2] |= RADEON_CP_PACKET3_COMPUTE_MODE; 760} 761 762static INLINE void r600_write_ctl_const_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num) 763{ 764 assert(reg >= R600_CTL_CONST_OFFSET); 765 assert(cs->cdw+2+num <= RADEON_MAX_CMDBUF_DWORDS); 766 cs->buf[cs->cdw++] = PKT3(PKT3_SET_CTL_CONST, num, 0); 767 cs->buf[cs->cdw++] = (reg - R600_CTL_CONST_OFFSET) >> 2; 768} 769 770static INLINE void r600_write_config_reg(struct radeon_winsys_cs *cs, unsigned reg, unsigned value) 771{ 772 r600_write_config_reg_seq(cs, reg, 1); 773 r600_write_value(cs, value); 774} 775 776static INLINE void r600_write_context_reg(struct radeon_winsys_cs *cs, unsigned reg, unsigned value) 777{ 778 r600_write_context_reg_seq(cs, reg, 1); 779 r600_write_value(cs, value); 780} 781 782static INLINE void r600_write_compute_context_reg(struct radeon_winsys_cs *cs, unsigned reg, unsigned value) 783{ 784 r600_write_compute_context_reg_seq(cs, reg, 1); 785 r600_write_value(cs, value); 786} 787 788static INLINE void r600_write_ctl_const(struct radeon_winsys_cs *cs, unsigned reg, unsigned value) 789{ 790 r600_write_ctl_const_seq(cs, reg, 1); 791 r600_write_value(cs, value); 792} 793 794/* 795 * common helpers 796 */ 797static INLINE uint32_t S_FIXED(float value, uint32_t frac_bits) 798{ 799 return value * (1 << frac_bits); 800} 801#define ALIGN_DIVUP(x, y) (((x) + (y) - 1) / (y)) 802 803static inline unsigned r600_tex_aniso_filter(unsigned filter) 804{ 805 if (filter <= 1) return 0; 806 if (filter <= 2) return 1; 807 if (filter <= 4) return 2; 808 if (filter <= 8) return 3; 809 /* else */ return 4; 810} 811 812/* 12.4 fixed-point */ 813static INLINE unsigned r600_pack_float_12p4(float x) 814{ 815 return x <= 0 ? 0 : 816 x >= 4096 ? 0xffff : x * 16; 817} 818 819static INLINE uint64_t r600_resource_va(struct pipe_screen *screen, struct pipe_resource *resource) 820{ 821 struct r600_screen *rscreen = (struct r600_screen*)screen; 822 struct r600_resource *rresource = (struct r600_resource*)resource; 823 824 return rscreen->ws->buffer_get_virtual_address(rresource->cs_buf); 825} 826 827#endif 828