intel_batchbuffer.c revision f75843a517bd188639e6866db2a7b04de3524e16
1/************************************************************************** 2 * 3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28#include "intel_batchbuffer.h" 29#include "intel_ioctl.h" 30#include "intel_decode.h" 31#include "intel_reg.h" 32#include "intel_bufmgr.h" 33 34/* Relocations in kernel space: 35 * - pass dma buffer seperately 36 * - memory manager knows how to patch 37 * - pass list of dependent buffers 38 * - pass relocation list 39 * 40 * Either: 41 * - get back an offset for buffer to fire 42 * - memory manager knows how to fire buffer 43 * 44 * Really want the buffer to be AGP and pinned. 45 * 46 */ 47 48/* Cliprect fence: The highest fence protecting a dma buffer 49 * containing explicit cliprect information. Like the old drawable 50 * lock but irq-driven. X server must wait for this fence to expire 51 * before changing cliprects [and then doing sw rendering?]. For 52 * other dma buffers, the scheduler will grab current cliprect info 53 * and mix into buffer. X server must hold the lock while changing 54 * cliprects??? Make per-drawable. Need cliprects in shared memory 55 * -- beats storing them with every cmd buffer in the queue. 56 * 57 * ==> X server must wait for this fence to expire before touching the 58 * framebuffer with new cliprects. 59 * 60 * ==> Cliprect-dependent buffers associated with a 61 * cliprect-timestamp. All of the buffers associated with a timestamp 62 * must go to hardware before any buffer with a newer timestamp. 63 * 64 * ==> Dma should be queued per-drawable for correct X/GL 65 * synchronization. Or can fences be used for this? 66 * 67 * Applies to: Blit operations, metaops, X server operations -- X 68 * server automatically waits on its own dma to complete before 69 * modifying cliprects ??? 70 */ 71 72void 73intel_batchbuffer_reset(struct intel_batchbuffer *batch) 74{ 75 struct intel_context *intel = batch->intel; 76 77 if (batch->buf != NULL) { 78 dri_bo_unreference(batch->buf); 79 batch->buf = NULL; 80 } 81 82 if (!batch->buffer && intel->ttm == GL_TRUE) 83 batch->buffer = malloc (intel->maxBatchSize); 84 85 batch->buf = dri_bo_alloc(intel->bufmgr, "batchbuffer", 86 intel->maxBatchSize, 4096); 87 if (batch->buffer) 88 batch->map = batch->buffer; 89 else { 90 dri_bo_map(batch->buf, GL_TRUE); 91 batch->map = batch->buf->virtual; 92 } 93 batch->size = intel->maxBatchSize; 94 batch->ptr = batch->map; 95 batch->dirty_state = ~0; 96 batch->cliprect_mode = IGNORE_CLIPRECTS; 97} 98 99struct intel_batchbuffer * 100intel_batchbuffer_alloc(struct intel_context *intel) 101{ 102 struct intel_batchbuffer *batch = calloc(sizeof(*batch), 1); 103 104 batch->intel = intel; 105 intel_batchbuffer_reset(batch); 106 107 return batch; 108} 109 110void 111intel_batchbuffer_free(struct intel_batchbuffer *batch) 112{ 113 if (batch->buffer) 114 free (batch->buffer); 115 else { 116 if (batch->map) { 117 dri_bo_unmap(batch->buf); 118 batch->map = NULL; 119 } 120 } 121 dri_bo_unreference(batch->buf); 122 batch->buf = NULL; 123 free(batch); 124} 125 126 127 128/* TODO: Push this whole function into bufmgr. 129 */ 130static void 131do_flush_locked(struct intel_batchbuffer *batch, 132 GLuint used, GLboolean allow_unlock) 133{ 134 struct intel_context *intel = batch->intel; 135 int ret = 0; 136 137 if (batch->buffer) 138 dri_bo_subdata (batch->buf, 0, used, batch->buffer); 139 else 140 dri_bo_unmap(batch->buf); 141 142 batch->map = NULL; 143 batch->ptr = NULL; 144 145 /* Throw away non-effective packets. Won't work once we have 146 * hardware contexts which would preserve statechanges beyond a 147 * single buffer. 148 */ 149 150 if (!(intel->numClipRects == 0 && 151 batch->cliprect_mode == LOOP_CLIPRECTS)) { 152 if (intel->ttm == GL_TRUE) { 153 struct drm_i915_gem_execbuffer *execbuf; 154 155 execbuf = dri_process_relocs(batch->buf); 156 ret = intel_exec_ioctl(batch->intel, 157 used, 158 batch->cliprect_mode != LOOP_CLIPRECTS, 159 allow_unlock, 160 execbuf); 161 } else { 162 dri_process_relocs(batch->buf); 163 ret = intel_batch_ioctl(batch->intel, 164 batch->buf->offset, 165 used, 166 batch->cliprect_mode != LOOP_CLIPRECTS, 167 allow_unlock); 168 } 169 } 170 171 dri_post_submit(batch->buf); 172 173 if (intel->numClipRects == 0 && 174 batch->cliprect_mode == LOOP_CLIPRECTS) { 175 if (allow_unlock) { 176 /* If we are not doing any actual user-visible rendering, 177 * do a sched_yield to keep the app from pegging the cpu while 178 * achieving nothing. 179 */ 180 UNLOCK_HARDWARE(intel); 181 sched_yield(); 182 LOCK_HARDWARE(intel); 183 } 184 } 185 186 if (INTEL_DEBUG & DEBUG_BATCH) { 187 dri_bo_map(batch->buf, GL_FALSE); 188 intel_decode(batch->buf->virtual, used / 4, batch->buf->offset, 189 intel->intelScreen->deviceID); 190 dri_bo_unmap(batch->buf); 191 192 if (intel->vtbl.debug_batch != NULL) 193 intel->vtbl.debug_batch(intel); 194 } 195 196 if (ret != 0) { 197 UNLOCK_HARDWARE(intel); 198 exit(1); 199 } 200 intel->vtbl.new_batch(intel); 201} 202 203void 204_intel_batchbuffer_flush(struct intel_batchbuffer *batch, const char *file, 205 int line) 206{ 207 struct intel_context *intel = batch->intel; 208 GLuint used = batch->ptr - batch->map; 209 GLboolean was_locked = intel->locked; 210 211 if (used == 0) 212 return; 213 214 if (INTEL_DEBUG & DEBUG_BATCH) 215 fprintf(stderr, "%s:%d: Batchbuffer flush with %db used\n", file, line, 216 used); 217 218 /* Emit a flush if the bufmgr doesn't do it for us. */ 219 if (!intel->ttm) { 220 *(GLuint *) (batch->ptr) = intel->vtbl.flush_cmd(); 221 batch->ptr += 4; 222 used = batch->ptr - batch->map; 223 } 224 225 /* Round batchbuffer usage to 2 DWORDs. */ 226 227 if ((used & 4) == 0) { 228 *(GLuint *) (batch->ptr) = 0; /* noop */ 229 batch->ptr += 4; 230 used = batch->ptr - batch->map; 231 } 232 233 /* Mark the end of the buffer. */ 234 *(GLuint *) (batch->ptr) = MI_BATCH_BUFFER_END; /* noop */ 235 batch->ptr += 4; 236 used = batch->ptr - batch->map; 237 238 /* Workaround for recursive batchbuffer flushing: If the window is 239 * moved, we can get into a case where we try to flush during a 240 * flush. What happens is that when we try to grab the lock for 241 * the first flush, we detect that the window moved which then 242 * causes another flush (from the intel_draw_buffer() call in 243 * intelUpdatePageFlipping()). To work around this we reset the 244 * batchbuffer tail pointer before trying to get the lock. This 245 * prevent the nested buffer flush, but a better fix would be to 246 * avoid that in the first place. */ 247 batch->ptr = batch->map; 248 249 if (intel->vtbl.finish_batch) 250 intel->vtbl.finish_batch(intel); 251 252 /* TODO: Just pass the relocation list and dma buffer up to the 253 * kernel. 254 */ 255 if (!was_locked) 256 LOCK_HARDWARE(intel); 257 258 do_flush_locked(batch, used, GL_FALSE); 259 260 if (!was_locked) 261 UNLOCK_HARDWARE(intel); 262 263 if (INTEL_DEBUG & DEBUG_SYNC) { 264 int irq; 265 266 fprintf(stderr, "waiting for idle\n"); 267 LOCK_HARDWARE(intel); 268 irq = intelEmitIrqLocked(intel); 269 UNLOCK_HARDWARE(intel); 270 intelWaitIrq(intel, irq); 271 } 272 273 /* Reset the buffer: 274 */ 275 intel_batchbuffer_reset(batch); 276} 277 278 279/* This is the only way buffers get added to the validate list. 280 */ 281GLboolean 282intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch, 283 dri_bo *buffer, 284 uint32_t read_domains, uint32_t write_domain, 285 uint32_t delta) 286{ 287 int ret; 288 289 if (batch->ptr - batch->map > batch->buf->size) 290 _mesa_printf ("bad relocation ptr %p map %p offset %d size %d\n", 291 batch->ptr, batch->map, batch->ptr - batch->map, batch->buf->size); 292 ret = intel_bo_emit_reloc(batch->buf, read_domains, write_domain, 293 delta, batch->ptr - batch->map, buffer); 294 295 /* 296 * Using the old buffer offset, write in what the right data would be, in case 297 * the buffer doesn't move and we can short-circuit the relocation processing 298 * in the kernel 299 */ 300 intel_batchbuffer_emit_dword (batch, buffer->offset + delta); 301 302 return GL_TRUE; 303} 304 305void 306intel_batchbuffer_data(struct intel_batchbuffer *batch, 307 const void *data, GLuint bytes, 308 enum cliprect_mode cliprect_mode) 309{ 310 assert((bytes & 3) == 0); 311 intel_batchbuffer_require_space(batch, bytes, cliprect_mode); 312 __memcpy(batch->ptr, data, bytes); 313 batch->ptr += bytes; 314} 315