radeon_common_context.h revision d61f07318c8678901b948fdaa8ccdf37aa3203e9
1 2#ifndef COMMON_CONTEXT_H 3#define COMMON_CONTEXT_H 4 5#include "main/mm.h" 6#include "math/m_vector.h" 7#include "texmem.h" 8#include "tnl/t_context.h" 9#include "main/colormac.h" 10 11#include "radeon_debug.h" 12#include "radeon_screen.h" 13#include "radeon_drm.h" 14#include "dri_util.h" 15#include "tnl/t_vertex.h" 16 17#include "dri_metaops.h" 18struct radeon_context; 19 20#include "radeon_bocs_wrapper.h" 21 22/* This union is used to avoid warnings/miscompilation 23 with float to uint32_t casts due to strict-aliasing */ 24typedef union { GLfloat f; uint32_t ui32; } float_ui32_type; 25 26struct radeon_context; 27typedef struct radeon_context radeonContextRec; 28typedef struct radeon_context *radeonContextPtr; 29 30 31#define TEX_0 0x1 32#define TEX_1 0x2 33#define TEX_2 0x4 34#define TEX_3 0x8 35#define TEX_4 0x10 36#define TEX_5 0x20 37 38/* Rasterizing fallbacks */ 39/* See correponding strings in r200_swtcl.c */ 40#define RADEON_FALLBACK_TEXTURE 0x0001 41#define RADEON_FALLBACK_DRAW_BUFFER 0x0002 42#define RADEON_FALLBACK_STENCIL 0x0004 43#define RADEON_FALLBACK_RENDER_MODE 0x0008 44#define RADEON_FALLBACK_BLEND_EQ 0x0010 45#define RADEON_FALLBACK_BLEND_FUNC 0x0020 46#define RADEON_FALLBACK_DISABLE 0x0040 47#define RADEON_FALLBACK_BORDER_MODE 0x0080 48#define RADEON_FALLBACK_DEPTH_BUFFER 0x0100 49#define RADEON_FALLBACK_STENCIL_BUFFER 0x0200 50 51#define R200_FALLBACK_TEXTURE 0x01 52#define R200_FALLBACK_DRAW_BUFFER 0x02 53#define R200_FALLBACK_STENCIL 0x04 54#define R200_FALLBACK_RENDER_MODE 0x08 55#define R200_FALLBACK_DISABLE 0x10 56#define R200_FALLBACK_BORDER_MODE 0x20 57 58#define RADEON_TCL_FALLBACK_RASTER 0x1 /* rasterization */ 59#define RADEON_TCL_FALLBACK_UNFILLED 0x2 /* unfilled tris */ 60#define RADEON_TCL_FALLBACK_LIGHT_TWOSIDE 0x4 /* twoside tris */ 61#define RADEON_TCL_FALLBACK_MATERIAL 0x8 /* material in vb */ 62#define RADEON_TCL_FALLBACK_TEXGEN_0 0x10 /* texgen, unit 0 */ 63#define RADEON_TCL_FALLBACK_TEXGEN_1 0x20 /* texgen, unit 1 */ 64#define RADEON_TCL_FALLBACK_TEXGEN_2 0x40 /* texgen, unit 2 */ 65#define RADEON_TCL_FALLBACK_TCL_DISABLE 0x80 /* user disable */ 66#define RADEON_TCL_FALLBACK_FOGCOORDSPEC 0x100 /* fogcoord, sep. spec light */ 67 68/* The blit width for texture uploads 69 */ 70#define BLIT_WIDTH_BYTES 1024 71 72/* Use the templated vertex format: 73 */ 74#define COLOR_IS_RGBA 75#define TAG(x) radeon##x 76#include "tnl_dd/t_dd_vertex.h" 77#undef TAG 78 79#define RADEON_RB_CLASS 0xdeadbeef 80 81struct radeon_renderbuffer 82{ 83 struct gl_renderbuffer base; 84 struct radeon_bo *bo; 85 unsigned int cpp; 86 /* unsigned int offset; */ 87 unsigned int pitch; 88 89 uint32_t draw_offset; /* FBO */ 90 /* boo Xorg 6.8.2 compat */ 91 int has_surface; 92 93 GLuint pf_pending; /**< sequence number of pending flip */ 94 GLuint vbl_pending; /**< vblank sequence number of pending flip */ 95 __DRIdrawable *dPriv; 96}; 97 98struct radeon_framebuffer 99{ 100 struct gl_framebuffer base; 101 102 struct radeon_renderbuffer *color_rb[2]; 103 104 GLuint vbl_waited; 105 106 /* buffer swap */ 107 int64_t swap_ust; 108 int64_t swap_missed_ust; 109 110 GLuint swap_count; 111 GLuint swap_missed_count; 112 113 /* Drawable page flipping state */ 114 GLboolean pf_active; 115 GLint pf_current_page; 116 GLint pf_num_pages; 117 118}; 119 120 121struct radeon_colorbuffer_state { 122 GLuint clear; 123 int roundEnable; 124 struct gl_renderbuffer *rb; 125 uint32_t draw_offset; /* offset into color renderbuffer - FBOs */ 126}; 127 128struct radeon_depthbuffer_state { 129 GLuint clear; 130 struct gl_renderbuffer *rb; 131}; 132 133struct radeon_scissor_state { 134 drm_clip_rect_t rect; 135 GLboolean enabled; 136 137 GLuint numClipRects; /* Cliprects active */ 138 GLuint numAllocedClipRects; /* Cliprects available */ 139 drm_clip_rect_t *pClipRects; 140}; 141 142struct radeon_stencilbuffer_state { 143 GLuint clear; /* rb3d_stencilrefmask value */ 144}; 145 146struct radeon_state_atom { 147 struct radeon_state_atom *next, *prev; 148 const char *name; /* for debug */ 149 int cmd_size; /* size in bytes */ 150 GLuint idx; 151 GLuint is_tcl; 152 GLuint *cmd; /* one or more cmd's */ 153 GLuint *lastcmd; /* one or more cmd's */ 154 GLboolean dirty; /* dirty-mark in emit_state_list */ 155 int (*check) (GLcontext *, struct radeon_state_atom *atom); /* is this state active? */ 156 void (*emit) (GLcontext *, struct radeon_state_atom *atom); 157}; 158 159struct radeon_hw_state { 160 /* Head of the linked list of state atoms. */ 161 struct radeon_state_atom atomlist; 162 int max_state_size; /* Number of bytes necessary for a full state emit. */ 163 int max_post_flush_size; /* Number of bytes necessary for post flushing emits */ 164 GLboolean is_dirty, all_dirty; 165}; 166 167 168/* Texture related */ 169typedef struct _radeon_texture_image radeon_texture_image; 170 171struct _radeon_texture_image { 172 struct gl_texture_image base; 173 174 /** 175 * If mt != 0, the image is stored in hardware format in the 176 * given mipmap tree. In this case, base.Data may point into the 177 * mapping of the buffer object that contains the mipmap tree. 178 * 179 * If mt == 0, the image is stored in normal memory pointed to 180 * by base.Data. 181 */ 182 struct _radeon_mipmap_tree *mt; 183 struct radeon_bo *bo; 184 185 int mtlevel; /** if mt != 0, this is the image's level in the mipmap tree */ 186 int mtface; /** if mt != 0, this is the image's face in the mipmap tree */ 187}; 188 189 190static INLINE radeon_texture_image *get_radeon_texture_image(struct gl_texture_image *image) 191{ 192 return (radeon_texture_image*)image; 193} 194 195 196typedef struct radeon_tex_obj radeonTexObj, *radeonTexObjPtr; 197 198#define RADEON_TXO_MICRO_TILE (1 << 3) 199 200/* Texture object in locally shared texture space. 201 */ 202struct radeon_tex_obj { 203 struct gl_texture_object base; 204 struct _radeon_mipmap_tree *mt; 205 206 /** 207 * This is true if we've verified that the mipmap tree above is complete 208 * and so on. 209 */ 210 GLboolean validated; 211 /* Minimum LOD to be used during rendering */ 212 unsigned minLod; 213 /* Miximum LOD to be used during rendering */ 214 unsigned maxLod; 215 216 GLuint override_offset; 217 GLboolean image_override; /* Image overridden by GLX_EXT_tfp */ 218 GLuint tile_bits; /* hw texture tile bits used on this texture */ 219 struct radeon_bo *bo; 220 221 GLuint pp_txfilter; /* hardware register values */ 222 GLuint pp_txformat; 223 GLuint pp_txformat_x; 224 GLuint pp_txsize; /* npot only */ 225 GLuint pp_txpitch; /* npot only */ 226 GLuint pp_border_color; 227 GLuint pp_cubic_faces; /* cube face 1,2,3,4 log2 sizes */ 228 229 GLuint pp_txfilter_1; /* r300 */ 230 231 /* r700 texture states */ 232 GLuint SQ_TEX_RESOURCE0; 233 GLuint SQ_TEX_RESOURCE1; 234 GLuint SQ_TEX_RESOURCE2; 235 GLuint SQ_TEX_RESOURCE3; 236 GLuint SQ_TEX_RESOURCE4; 237 GLuint SQ_TEX_RESOURCE5; 238 GLuint SQ_TEX_RESOURCE6; 239 240 GLuint SQ_TEX_SAMPLER0; 241 GLuint SQ_TEX_SAMPLER1; 242 GLuint SQ_TEX_SAMPLER2; 243 244 GLuint TD_PS_SAMPLER0_BORDER_RED; 245 GLuint TD_PS_SAMPLER0_BORDER_GREEN; 246 GLuint TD_PS_SAMPLER0_BORDER_BLUE; 247 GLuint TD_PS_SAMPLER0_BORDER_ALPHA; 248 249 GLboolean border_fallback; 250 251 252}; 253 254static INLINE radeonTexObj* radeon_tex_obj(struct gl_texture_object *texObj) 255{ 256 return (radeonTexObj*)texObj; 257} 258 259/* occlusion query */ 260struct radeon_query_object { 261 struct gl_query_object Base; 262 struct radeon_bo *bo; 263 int curr_offset; 264 GLboolean emitted_begin; 265 266 /* Double linked list of not flushed query objects */ 267 struct radeon_query_object *prev, *next; 268}; 269 270/* Need refcounting on dma buffers: 271 */ 272struct radeon_dma_buffer { 273 int refcount; /* the number of retained regions in buf */ 274 drmBufPtr buf; 275}; 276 277struct radeon_aos { 278 struct radeon_bo *bo; /** Buffer object where vertex data is stored */ 279 int offset; /** Offset into buffer object, in bytes */ 280 int components; /** Number of components per vertex */ 281 int stride; /** Stride in dwords (may be 0 for repeating) */ 282 int count; /** Number of vertices */ 283}; 284 285#define DMA_BO_FREE_TIME 100 286 287struct radeon_dma_bo { 288 struct radeon_dma_bo *next, *prev; 289 struct radeon_bo *bo; 290 int expire_counter; 291}; 292 293struct radeon_dma { 294 /* Active dma region. Allocations for vertices and retained 295 * regions come from here. Also used for emitting random vertices, 296 * these may be flushed by calling flush_current(); 297 */ 298 struct radeon_dma_bo free; 299 struct radeon_dma_bo wait; 300 struct radeon_dma_bo reserved; 301 size_t current_used; /** Number of bytes allocated and forgotten about */ 302 size_t current_vertexptr; /** End of active vertex region */ 303 size_t minimum_size; 304 305 /** 306 * If current_vertexptr != current_used then flush must be non-zero. 307 * flush must be called before non-active vertex allocations can be 308 * performed. 309 */ 310 void (*flush) (GLcontext *); 311}; 312 313/* radeon_swtcl.c 314 */ 315struct radeon_swtcl_info { 316 317 GLuint RenderIndex; 318 GLuint vertex_size; 319 GLubyte *verts; 320 321 /* Fallback rasterization functions 322 */ 323 GLuint hw_primitive; 324 GLenum render_primitive; 325 GLuint numverts; 326 327 struct tnl_attr_map vertex_attrs[VERT_ATTRIB_MAX]; 328 GLuint vertex_attr_count; 329 330 GLuint emit_prediction; 331 struct radeon_bo *bo; 332}; 333 334#define RADEON_MAX_AOS_ARRAYS 16 335struct radeon_tcl_info { 336 struct radeon_aos aos[RADEON_MAX_AOS_ARRAYS]; 337 GLuint aos_count; 338 struct radeon_bo *elt_dma_bo; /** Buffer object that contains element indices */ 339 int elt_dma_offset; /** Offset into this buffer object, in bytes */ 340}; 341 342struct radeon_ioctl { 343 GLuint vertex_offset; 344 GLuint vertex_max; 345 struct radeon_bo *bo; 346 GLuint vertex_size; 347}; 348 349#define RADEON_MAX_PRIMS 64 350 351struct radeon_prim { 352 GLuint start; 353 GLuint end; 354 GLuint prim; 355}; 356 357static INLINE GLuint radeonPackColor(GLuint cpp, 358 GLubyte r, GLubyte g, 359 GLubyte b, GLubyte a) 360{ 361 switch (cpp) { 362 case 2: 363 return PACK_COLOR_565(r, g, b); 364 case 4: 365 return PACK_COLOR_8888(a, r, g, b); 366 default: 367 return 0; 368 } 369} 370 371#define MAX_CMD_BUF_SZ (16*1024) 372 373#define MAX_DMA_BUF_SZ (64*1024) 374 375struct radeon_store { 376 GLuint statenr; 377 GLuint primnr; 378 char cmd_buf[MAX_CMD_BUF_SZ]; 379 int cmd_used; 380 int elts_start; 381}; 382 383struct radeon_dri_mirror { 384 __DRIcontext *context; /* DRI context */ 385 __DRIscreen *screen; /* DRI screen */ 386 387 drm_context_t hwContext; 388 drm_hw_lock_t *hwLock; 389 int hwLockCount; 390 int fd; 391 int drmMinor; 392}; 393 394typedef void (*radeon_tri_func) (radeonContextPtr, 395 radeonVertex *, 396 radeonVertex *, radeonVertex *); 397 398typedef void (*radeon_line_func) (radeonContextPtr, 399 radeonVertex *, radeonVertex *); 400 401typedef void (*radeon_point_func) (radeonContextPtr, radeonVertex *); 402 403#define RADEON_MAX_BOS 32 404struct radeon_state { 405 struct radeon_colorbuffer_state color; 406 struct radeon_depthbuffer_state depth; 407 struct radeon_scissor_state scissor; 408 struct radeon_stencilbuffer_state stencil; 409}; 410 411/** 412 * This structure holds the command buffer while it is being constructed. 413 * 414 * The first batch of commands in the buffer is always the state that needs 415 * to be re-emitted when the context is lost. This batch can be skipped 416 * otherwise. 417 */ 418struct radeon_cmdbuf { 419 struct radeon_cs_manager *csm; 420 struct radeon_cs *cs; 421 int size; /** # of dwords total */ 422 unsigned int flushing:1; /** whether we're currently in FlushCmdBufLocked */ 423}; 424 425struct radeon_context { 426 GLcontext *glCtx; 427 radeonScreenPtr radeonScreen; /* Screen private DRI data */ 428 429 /* Texture object bookkeeping 430 */ 431 int texture_depth; 432 float initialMaxAnisotropy; 433 uint32_t texture_row_align; 434 uint32_t texture_rect_row_align; 435 uint32_t texture_compressed_row_align; 436 437 struct radeon_dma dma; 438 struct radeon_hw_state hw; 439 /* Rasterization and vertex state: 440 */ 441 GLuint TclFallback; 442 GLuint Fallback; 443 GLuint NewGLState; 444 DECLARE_RENDERINPUTS(tnl_index_bitset); /* index of bits for last tnl_install_attrs */ 445 446 /* Drawable, cliprect and scissor information */ 447 GLuint numClipRects; /* Cliprects for the draw buffer */ 448 drm_clip_rect_t *pClipRects; 449 unsigned int lastStamp; 450 drm_radeon_sarea_t *sarea; /* Private SAREA data */ 451 452 /* Mirrors of some DRI state */ 453 struct radeon_dri_mirror dri; 454 455 /* Busy waiting */ 456 GLuint do_usleeps; 457 GLuint do_irqs; 458 GLuint irqsEmitted; 459 drm_radeon_irq_wait_t iw; 460 461 /* Derived state - for r300 only */ 462 struct radeon_state state; 463 464 struct radeon_swtcl_info swtcl; 465 struct radeon_tcl_info tcl; 466 /* Configuration cache 467 */ 468 driOptionCache optionCache; 469 470 struct radeon_cmdbuf cmdbuf; 471 472 struct radeon_debug debug; 473 474 drm_clip_rect_t fboRect; 475 GLboolean constant_cliprect; /* use for FBO or DRI2 rendering */ 476 GLboolean front_cliprects; 477 478 /** 479 * Set if rendering has occured to the drawable's front buffer. 480 * 481 * This is used in the DRI2 case to detect that glFlush should also copy 482 * the contents of the fake front buffer to the real front buffer. 483 */ 484 GLboolean front_buffer_dirty; 485 486 /** 487 * Track whether front-buffer rendering is currently enabled 488 * 489 * A separate flag is used to track this in order to support MRT more 490 * easily. 491 */ 492 GLboolean is_front_buffer_rendering; 493 494 /** 495 * Track whether front-buffer is the current read target. 496 * 497 * This is closely associated with is_front_buffer_rendering, but may 498 * be set separately. The DRI2 fake front buffer must be referenced 499 * either way. 500 */ 501 GLboolean is_front_buffer_reading; 502 503 struct dri_metaops meta; 504 505 struct { 506 struct radeon_query_object *current; 507 struct radeon_state_atom queryobj; 508 } query; 509 510 struct { 511 void (*get_lock)(radeonContextPtr radeon); 512 void (*update_viewport_offset)(GLcontext *ctx); 513 void (*emit_cs_header)(struct radeon_cs *cs, radeonContextPtr rmesa); 514 void (*swtcl_flush)(GLcontext *ctx, uint32_t offset); 515 void (*pre_emit_atoms)(radeonContextPtr rmesa); 516 void (*pre_emit_state)(radeonContextPtr rmesa); 517 void (*fallback)(GLcontext *ctx, GLuint bit, GLboolean mode); 518 void (*free_context)(GLcontext *ctx); 519 void (*emit_query_finish)(radeonContextPtr radeon); 520 void (*update_scissor)(GLcontext *ctx); 521 } vtbl; 522}; 523 524#define RADEON_CONTEXT(glctx) ((radeonContextPtr)(ctx->DriverCtx)) 525 526static inline __DRIdrawable* radeon_get_drawable(radeonContextPtr radeon) 527{ 528 return radeon->dri.context->driDrawablePriv; 529} 530 531static inline __DRIdrawable* radeon_get_readable(radeonContextPtr radeon) 532{ 533 return radeon->dri.context->driReadablePriv; 534} 535 536/** 537 * This function takes a float and packs it into a uint32_t 538 */ 539static INLINE uint32_t radeonPackFloat32(float fl) 540{ 541 union { 542 float fl; 543 uint32_t u; 544 } u; 545 546 u.fl = fl; 547 return u.u; 548} 549 550/* This is probably wrong for some values, I need to test this 551 * some more. Range checking would be a good idea also.. 552 * 553 * But it works for most things. I'll fix it later if someone 554 * else with a better clue doesn't 555 */ 556static INLINE uint32_t radeonPackFloat24(float f) 557{ 558 float mantissa; 559 int exponent; 560 uint32_t float24 = 0; 561 562 if (f == 0.0) 563 return 0; 564 565 mantissa = frexpf(f, &exponent); 566 567 /* Handle -ve */ 568 if (mantissa < 0) { 569 float24 |= (1 << 23); 570 mantissa = mantissa * -1.0; 571 } 572 /* Handle exponent, bias of 63 */ 573 exponent += 62; 574 float24 |= (exponent << 16); 575 /* Kill 7 LSB of mantissa */ 576 float24 |= (radeonPackFloat32(mantissa) & 0x7FFFFF) >> 7; 577 578 return float24; 579} 580 581GLboolean radeonInitContext(radeonContextPtr radeon, 582 struct dd_function_table* functions, 583 const __GLcontextModes * glVisual, 584 __DRIcontext * driContextPriv, 585 void *sharedContextPrivate); 586 587void radeonCleanupContext(radeonContextPtr radeon); 588GLboolean radeonUnbindContext(__DRIcontext * driContextPriv); 589void radeon_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable, 590 GLboolean front_only); 591GLboolean radeonMakeCurrent(__DRIcontext * driContextPriv, 592 __DRIdrawable * driDrawPriv, 593 __DRIdrawable * driReadPriv); 594extern void radeonDestroyContext(__DRIcontext * driContextPriv); 595 596#endif 597