1#ifndef _INTEL_RINGBUFFER_H_ 2#define _INTEL_RINGBUFFER_H_ 3 4#include <linux/hashtable.h> 5 6#define I915_CMD_HASH_ORDER 9 7 8/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, 9 * but keeps the logic simple. Indeed, the whole purpose of this macro is just 10 * to give some inclination as to some of the magic values used in the various 11 * workarounds! 12 */ 13#define CACHELINE_BYTES 64 14 15/* 16 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" 17 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use" 18 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use" 19 * 20 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same 21 * cacheline, the Head Pointer must not be greater than the Tail 22 * Pointer." 23 */ 24#define I915_RING_FREE_SPACE 64 25 26struct intel_hw_status_page { 27 u32 *page_addr; 28 unsigned int gfx_addr; 29 struct drm_i915_gem_object *obj; 30}; 31 32#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base)) 33#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) 34 35#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base)) 36#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) 37 38#define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base)) 39#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) 40 41#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base)) 42#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) 43 44#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) 45#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) 46 47#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) 48#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val) 49 50/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to 51 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b. 52 */ 53#define i915_semaphore_seqno_size sizeof(uint64_t) 54#define GEN8_SIGNAL_OFFSET(__ring, to) \ 55 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ 56 ((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \ 57 (i915_semaphore_seqno_size * (to))) 58 59#define GEN8_WAIT_OFFSET(__ring, from) \ 60 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ 61 ((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \ 62 (i915_semaphore_seqno_size * (__ring)->id)) 63 64#define GEN8_RING_SEMAPHORE_INIT do { \ 65 if (!dev_priv->semaphore_obj) { \ 66 break; \ 67 } \ 68 ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \ 69 ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \ 70 ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \ 71 ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \ 72 ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \ 73 ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \ 74 } while(0) 75 76enum intel_ring_hangcheck_action { 77 HANGCHECK_IDLE = 0, 78 HANGCHECK_WAIT, 79 HANGCHECK_ACTIVE, 80 HANGCHECK_ACTIVE_LOOP, 81 HANGCHECK_KICK, 82 HANGCHECK_HUNG, 83}; 84 85#define HANGCHECK_SCORE_RING_HUNG 31 86 87struct intel_ring_hangcheck { 88 u64 acthd; 89 u64 max_acthd; 90 u32 seqno; 91 int score; 92 enum intel_ring_hangcheck_action action; 93 int deadlock; 94}; 95 96struct intel_ringbuffer { 97 struct drm_i915_gem_object *obj; 98 void __iomem *virtual_start; 99 100 struct intel_engine_cs *ring; 101 102 /* 103 * FIXME: This backpointer is an artifact of the history of how the 104 * execlist patches came into being. It will get removed once the basic 105 * code has landed. 106 */ 107 struct intel_context *FIXME_lrc_ctx; 108 109 u32 head; 110 u32 tail; 111 int space; 112 int size; 113 int effective_size; 114 115 /** We track the position of the requests in the ring buffer, and 116 * when each is retired we increment last_retired_head as the GPU 117 * must have finished processing the request and so we know we 118 * can advance the ringbuffer up to that position. 119 * 120 * last_retired_head is set to -1 after the value is consumed so 121 * we can detect new retirements. 122 */ 123 u32 last_retired_head; 124}; 125 126struct intel_engine_cs { 127 const char *name; 128 enum intel_ring_id { 129 RCS = 0x0, 130 VCS, 131 BCS, 132 VECS, 133 VCS2 134 } id; 135#define I915_NUM_RINGS 5 136#define LAST_USER_RING (VECS + 1) 137 u32 mmio_base; 138 struct drm_device *dev; 139 struct intel_ringbuffer *buffer; 140 141 struct intel_hw_status_page status_page; 142 143 unsigned irq_refcount; /* protected by dev_priv->irq_lock */ 144 u32 irq_enable_mask; /* bitmask to enable ring interrupt */ 145 u32 trace_irq_seqno; 146 bool __must_check (*irq_get)(struct intel_engine_cs *ring); 147 void (*irq_put)(struct intel_engine_cs *ring); 148 149 int (*init)(struct intel_engine_cs *ring); 150 151 int (*init_context)(struct intel_engine_cs *ring); 152 153 void (*write_tail)(struct intel_engine_cs *ring, 154 u32 value); 155 int __must_check (*flush)(struct intel_engine_cs *ring, 156 u32 invalidate_domains, 157 u32 flush_domains); 158 int (*add_request)(struct intel_engine_cs *ring); 159 /* Some chipsets are not quite as coherent as advertised and need 160 * an expensive kick to force a true read of the up-to-date seqno. 161 * However, the up-to-date seqno is not always required and the last 162 * seen value is good enough. Note that the seqno will always be 163 * monotonic, even if not coherent. 164 */ 165 u32 (*get_seqno)(struct intel_engine_cs *ring, 166 bool lazy_coherency); 167 void (*set_seqno)(struct intel_engine_cs *ring, 168 u32 seqno); 169 int (*dispatch_execbuffer)(struct intel_engine_cs *ring, 170 u64 offset, u32 length, 171 unsigned flags); 172#define I915_DISPATCH_SECURE 0x1 173#define I915_DISPATCH_PINNED 0x2 174 void (*cleanup)(struct intel_engine_cs *ring); 175 176 /* GEN8 signal/wait table - never trust comments! 177 * signal to signal to signal to signal to signal to 178 * RCS VCS BCS VECS VCS2 179 * -------------------------------------------------------------------- 180 * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) | 181 * |------------------------------------------------------------------- 182 * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) | 183 * |------------------------------------------------------------------- 184 * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) | 185 * |------------------------------------------------------------------- 186 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) | 187 * |------------------------------------------------------------------- 188 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) | 189 * |------------------------------------------------------------------- 190 * 191 * Generalization: 192 * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id) 193 * ie. transpose of g(x, y) 194 * 195 * sync from sync from sync from sync from sync from 196 * RCS VCS BCS VECS VCS2 197 * -------------------------------------------------------------------- 198 * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) | 199 * |------------------------------------------------------------------- 200 * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) | 201 * |------------------------------------------------------------------- 202 * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) | 203 * |------------------------------------------------------------------- 204 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) | 205 * |------------------------------------------------------------------- 206 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) | 207 * |------------------------------------------------------------------- 208 * 209 * Generalization: 210 * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id) 211 * ie. transpose of f(x, y) 212 */ 213 struct { 214 u32 sync_seqno[I915_NUM_RINGS-1]; 215 216 union { 217 struct { 218 /* our mbox written by others */ 219 u32 wait[I915_NUM_RINGS]; 220 /* mboxes this ring signals to */ 221 u32 signal[I915_NUM_RINGS]; 222 } mbox; 223 u64 signal_ggtt[I915_NUM_RINGS]; 224 }; 225 226 /* AKA wait() */ 227 int (*sync_to)(struct intel_engine_cs *ring, 228 struct intel_engine_cs *to, 229 u32 seqno); 230 int (*signal)(struct intel_engine_cs *signaller, 231 /* num_dwords needed by caller */ 232 unsigned int num_dwords); 233 } semaphore; 234 235 /* Execlists */ 236 spinlock_t execlist_lock; 237 struct list_head execlist_queue; 238 u8 next_context_status_buffer; 239 u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */ 240 int (*emit_request)(struct intel_ringbuffer *ringbuf); 241 int (*emit_flush)(struct intel_ringbuffer *ringbuf, 242 u32 invalidate_domains, 243 u32 flush_domains); 244 int (*emit_bb_start)(struct intel_ringbuffer *ringbuf, 245 u64 offset, unsigned flags); 246 247 /** 248 * List of objects currently involved in rendering from the 249 * ringbuffer. 250 * 251 * Includes buffers having the contents of their GPU caches 252 * flushed, not necessarily primitives. last_rendering_seqno 253 * represents when the rendering involved will be completed. 254 * 255 * A reference is held on the buffer while on this list. 256 */ 257 struct list_head active_list; 258 259 /** 260 * List of breadcrumbs associated with GPU requests currently 261 * outstanding. 262 */ 263 struct list_head request_list; 264 265 /** 266 * Do we have some not yet emitted requests outstanding? 267 */ 268 struct drm_i915_gem_request *preallocated_lazy_request; 269 u32 outstanding_lazy_seqno; 270 bool gpu_caches_dirty; 271 bool fbc_dirty; 272 273 wait_queue_head_t irq_queue; 274 275 struct intel_context *default_context; 276 struct intel_context *last_context; 277 278 struct intel_ring_hangcheck hangcheck; 279 280 struct { 281 struct drm_i915_gem_object *obj; 282 u32 gtt_offset; 283 volatile u32 *cpu_page; 284 } scratch; 285 286 bool needs_cmd_parser; 287 288 /* 289 * Table of commands the command parser needs to know about 290 * for this ring. 291 */ 292 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER); 293 294 /* 295 * Table of registers allowed in commands that read/write registers. 296 */ 297 const u32 *reg_table; 298 int reg_count; 299 300 /* 301 * Table of registers allowed in commands that read/write registers, but 302 * only from the DRM master. 303 */ 304 const u32 *master_reg_table; 305 int master_reg_count; 306 307 /* 308 * Returns the bitmask for the length field of the specified command. 309 * Return 0 for an unrecognized/invalid command. 310 * 311 * If the command parser finds an entry for a command in the ring's 312 * cmd_tables, it gets the command's length based on the table entry. 313 * If not, it calls this function to determine the per-ring length field 314 * encoding for the command (i.e. certain opcode ranges use certain bits 315 * to encode the command length in the header). 316 */ 317 u32 (*get_cmd_length_mask)(u32 cmd_header); 318}; 319 320bool intel_ring_initialized(struct intel_engine_cs *ring); 321 322static inline unsigned 323intel_ring_flag(struct intel_engine_cs *ring) 324{ 325 return 1 << ring->id; 326} 327 328static inline u32 329intel_ring_sync_index(struct intel_engine_cs *ring, 330 struct intel_engine_cs *other) 331{ 332 int idx; 333 334 /* 335 * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2; 336 * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs; 337 * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs; 338 * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs; 339 * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs; 340 */ 341 342 idx = (other - ring) - 1; 343 if (idx < 0) 344 idx += I915_NUM_RINGS; 345 346 return idx; 347} 348 349static inline u32 350intel_read_status_page(struct intel_engine_cs *ring, 351 int reg) 352{ 353 /* Ensure that the compiler doesn't optimize away the load. */ 354 barrier(); 355 return ring->status_page.page_addr[reg]; 356} 357 358static inline void 359intel_write_status_page(struct intel_engine_cs *ring, 360 int reg, u32 value) 361{ 362 ring->status_page.page_addr[reg] = value; 363} 364 365/** 366 * Reads a dword out of the status page, which is written to from the command 367 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or 368 * MI_STORE_DATA_IMM. 369 * 370 * The following dwords have a reserved meaning: 371 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. 372 * 0x04: ring 0 head pointer 373 * 0x05: ring 1 head pointer (915-class) 374 * 0x06: ring 2 head pointer (915-class) 375 * 0x10-0x1b: Context status DWords (GM45) 376 * 0x1f: Last written status offset. (GM45) 377 * 378 * The area from dword 0x20 to 0x3ff is available for driver usage. 379 */ 380#define I915_GEM_HWS_INDEX 0x20 381#define I915_GEM_HWS_SCRATCH_INDEX 0x30 382#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) 383 384void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf); 385int intel_alloc_ringbuffer_obj(struct drm_device *dev, 386 struct intel_ringbuffer *ringbuf); 387 388void intel_stop_ring_buffer(struct intel_engine_cs *ring); 389void intel_cleanup_ring_buffer(struct intel_engine_cs *ring); 390 391int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n); 392int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring); 393static inline void intel_ring_emit(struct intel_engine_cs *ring, 394 u32 data) 395{ 396 struct intel_ringbuffer *ringbuf = ring->buffer; 397 iowrite32(data, ringbuf->virtual_start + ringbuf->tail); 398 ringbuf->tail += 4; 399} 400static inline void intel_ring_advance(struct intel_engine_cs *ring) 401{ 402 struct intel_ringbuffer *ringbuf = ring->buffer; 403 ringbuf->tail &= ringbuf->size - 1; 404} 405int __intel_ring_space(int head, int tail, int size); 406int intel_ring_space(struct intel_ringbuffer *ringbuf); 407bool intel_ring_stopped(struct intel_engine_cs *ring); 408void __intel_ring_advance(struct intel_engine_cs *ring); 409 410int __must_check intel_ring_idle(struct intel_engine_cs *ring); 411void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno); 412int intel_ring_flush_all_caches(struct intel_engine_cs *ring); 413int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring); 414 415void intel_fini_pipe_control(struct intel_engine_cs *ring); 416int intel_init_pipe_control(struct intel_engine_cs *ring); 417 418int intel_init_render_ring_buffer(struct drm_device *dev); 419int intel_init_bsd_ring_buffer(struct drm_device *dev); 420int intel_init_bsd2_ring_buffer(struct drm_device *dev); 421int intel_init_blt_ring_buffer(struct drm_device *dev); 422int intel_init_vebox_ring_buffer(struct drm_device *dev); 423 424u64 intel_ring_get_active_head(struct intel_engine_cs *ring); 425void intel_ring_setup_status_page(struct intel_engine_cs *ring); 426 427static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf) 428{ 429 return ringbuf->tail; 430} 431 432static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring) 433{ 434 BUG_ON(ring->outstanding_lazy_seqno == 0); 435 return ring->outstanding_lazy_seqno; 436} 437 438static inline void i915_trace_irq_get(struct intel_engine_cs *ring, u32 seqno) 439{ 440 if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) 441 ring->trace_irq_seqno = seqno; 442} 443 444/* DRI warts */ 445int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size); 446 447#endif /* _INTEL_RINGBUFFER_H_ */ 448