intel_batchbuffer.c revision 145523ba3acb95a9ff390430a9e0a3fa958cae1b
1/************************************************************************** 2 * 3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28#include "intel_batchbuffer.h" 29#include "intel_ioctl.h" 30#include "intel_decode.h" 31#include "intel_reg.h" 32 33/* Relocations in kernel space: 34 * - pass dma buffer seperately 35 * - memory manager knows how to patch 36 * - pass list of dependent buffers 37 * - pass relocation list 38 * 39 * Either: 40 * - get back an offset for buffer to fire 41 * - memory manager knows how to fire buffer 42 * 43 * Really want the buffer to be AGP and pinned. 44 * 45 */ 46 47/* Cliprect fence: The highest fence protecting a dma buffer 48 * containing explicit cliprect information. Like the old drawable 49 * lock but irq-driven. X server must wait for this fence to expire 50 * before changing cliprects [and then doing sw rendering?]. For 51 * other dma buffers, the scheduler will grab current cliprect info 52 * and mix into buffer. X server must hold the lock while changing 53 * cliprects??? Make per-drawable. Need cliprects in shared memory 54 * -- beats storing them with every cmd buffer in the queue. 55 * 56 * ==> X server must wait for this fence to expire before touching the 57 * framebuffer with new cliprects. 58 * 59 * ==> Cliprect-dependent buffers associated with a 60 * cliprect-timestamp. All of the buffers associated with a timestamp 61 * must go to hardware before any buffer with a newer timestamp. 62 * 63 * ==> Dma should be queued per-drawable for correct X/GL 64 * synchronization. Or can fences be used for this? 65 * 66 * Applies to: Blit operations, metaops, X server operations -- X 67 * server automatically waits on its own dma to complete before 68 * modifying cliprects ??? 69 */ 70 71void 72intel_batchbuffer_reset(struct intel_batchbuffer *batch) 73{ 74 struct intel_context *intel = batch->intel; 75 76 if (batch->buf != NULL) { 77 dri_bo_unreference(batch->buf); 78 batch->buf = NULL; 79 } 80 81 if (!batch->buffer && intel->ttm == GL_TRUE) 82 batch->buffer = malloc (intel->maxBatchSize); 83 84 batch->buf = dri_bo_alloc(intel->bufmgr, "batchbuffer", 85 intel->maxBatchSize, 4096, 86 DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED | DRM_BO_FLAG_CACHED_MAPPED); 87 if (batch->buffer) 88 batch->map = batch->buffer; 89 else { 90 dri_bo_map(batch->buf, GL_TRUE); 91 batch->map = batch->buf->virtual; 92 } 93 batch->size = intel->maxBatchSize; 94 batch->ptr = batch->map; 95 batch->dirty_state = ~0; 96 batch->cliprect_mode = IGNORE_CLIPRECTS; 97 98 /* account batchbuffer in aperture */ 99 dri_bufmgr_check_aperture_space(batch->buf); 100 101} 102 103struct intel_batchbuffer * 104intel_batchbuffer_alloc(struct intel_context *intel) 105{ 106 struct intel_batchbuffer *batch = calloc(sizeof(*batch), 1); 107 108 batch->intel = intel; 109 intel_batchbuffer_reset(batch); 110 111 return batch; 112} 113 114void 115intel_batchbuffer_free(struct intel_batchbuffer *batch) 116{ 117 if (batch->buffer) 118 free (batch->buffer); 119 else { 120 if (batch->map) { 121 dri_bo_unmap(batch->buf); 122 batch->map = NULL; 123 } 124 } 125 dri_bo_unreference(batch->buf); 126 batch->buf = NULL; 127 free(batch); 128} 129 130 131 132/* TODO: Push this whole function into bufmgr. 133 */ 134static void 135do_flush_locked(struct intel_batchbuffer *batch, 136 GLuint used, GLboolean allow_unlock) 137{ 138 struct intel_context *intel = batch->intel; 139 int ret = 0; 140 141 if (batch->buffer) 142 dri_bo_subdata (batch->buf, 0, used, batch->buffer); 143 else 144 dri_bo_unmap(batch->buf); 145 146 batch->map = NULL; 147 batch->ptr = NULL; 148 149 /* Throw away non-effective packets. Won't work once we have 150 * hardware contexts which would preserve statechanges beyond a 151 * single buffer. 152 */ 153 154 if (!(intel->numClipRects == 0 && 155 batch->cliprect_mode == LOOP_CLIPRECTS)) { 156 if (intel->ttm == GL_TRUE) { 157 struct drm_i915_gem_execbuffer *execbuf; 158 159 execbuf = dri_process_relocs(batch->buf); 160 ret = intel_exec_ioctl(batch->intel, 161 used, 162 batch->cliprect_mode != LOOP_CLIPRECTS, 163 allow_unlock, 164 execbuf); 165 } else { 166 dri_process_relocs(batch->buf); 167 ret = intel_batch_ioctl(batch->intel, 168 batch->buf->offset, 169 used, 170 batch->cliprect_mode != LOOP_CLIPRECTS, 171 allow_unlock); 172 } 173 } 174 175 dri_post_submit(batch->buf); 176 177 if (intel->numClipRects == 0 && 178 batch->cliprect_mode == LOOP_CLIPRECTS) { 179 if (allow_unlock) { 180 /* If we are not doing any actual user-visible rendering, 181 * do a sched_yield to keep the app from pegging the cpu while 182 * achieving nothing. 183 */ 184 UNLOCK_HARDWARE(intel); 185 sched_yield(); 186 LOCK_HARDWARE(intel); 187 } 188 } 189 190 if (INTEL_DEBUG & DEBUG_BATCH) { 191 dri_bo_map(batch->buf, GL_FALSE); 192 intel_decode(batch->buf->virtual, used / 4, batch->buf->offset, 193 intel->intelScreen->deviceID); 194 dri_bo_unmap(batch->buf); 195 196 if (intel->vtbl.debug_batch != NULL) 197 intel->vtbl.debug_batch(intel); 198 } 199 200 if (ret != 0) { 201 UNLOCK_HARDWARE(intel); 202 exit(1); 203 } 204 intel->vtbl.new_batch(intel); 205} 206 207void 208_intel_batchbuffer_flush(struct intel_batchbuffer *batch, const char *file, 209 int line) 210{ 211 struct intel_context *intel = batch->intel; 212 GLuint used = batch->ptr - batch->map; 213 GLboolean was_locked = intel->locked; 214 215 if (used == 0) 216 return; 217 218 if (INTEL_DEBUG & DEBUG_BATCH) 219 fprintf(stderr, "%s:%d: Batchbuffer flush with %db used\n", file, line, 220 used); 221 222 /* Emit a flush if the bufmgr doesn't do it for us. */ 223 if (!intel->ttm) { 224 *(GLuint *) (batch->ptr) = intel->vtbl.flush_cmd(); 225 batch->ptr += 4; 226 used = batch->ptr - batch->map; 227 } 228 229 /* Round batchbuffer usage to 2 DWORDs. */ 230 231 if ((used & 4) == 0) { 232 *(GLuint *) (batch->ptr) = 0; /* noop */ 233 batch->ptr += 4; 234 used = batch->ptr - batch->map; 235 } 236 237 /* Mark the end of the buffer. */ 238 *(GLuint *) (batch->ptr) = MI_BATCH_BUFFER_END; /* noop */ 239 batch->ptr += 4; 240 used = batch->ptr - batch->map; 241 242 /* Workaround for recursive batchbuffer flushing: If the window is 243 * moved, we can get into a case where we try to flush during a 244 * flush. What happens is that when we try to grab the lock for 245 * the first flush, we detect that the window moved which then 246 * causes another flush (from the intel_draw_buffer() call in 247 * intelUpdatePageFlipping()). To work around this we reset the 248 * batchbuffer tail pointer before trying to get the lock. This 249 * prevent the nested buffer flush, but a better fix would be to 250 * avoid that in the first place. */ 251 batch->ptr = batch->map; 252 253 /* TODO: Just pass the relocation list and dma buffer up to the 254 * kernel. 255 */ 256 if (!was_locked) 257 LOCK_HARDWARE(intel); 258 259 do_flush_locked(batch, used, GL_FALSE); 260 261 if (!was_locked) 262 UNLOCK_HARDWARE(intel); 263 264 if (INTEL_DEBUG & DEBUG_SYNC) { 265 int irq; 266 267 fprintf(stderr, "waiting for idle\n"); 268 LOCK_HARDWARE(intel); 269 irq = intelEmitIrqLocked(intel); 270 UNLOCK_HARDWARE(intel); 271 intelWaitIrq(intel, irq); 272 } 273 274 /* Reset the buffer: 275 */ 276 intel_batchbuffer_reset(batch); 277} 278 279 280/* This is the only way buffers get added to the validate list. 281 */ 282GLboolean 283intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch, 284 dri_bo *buffer, 285 uint32_t read_domains, uint32_t write_domain, 286 uint32_t delta) 287{ 288 int ret; 289 290 if (batch->ptr - batch->map > batch->buf->size) 291 _mesa_printf ("bad relocation ptr %p map %p offset %d size %d\n", 292 batch->ptr, batch->map, batch->ptr - batch->map, batch->buf->size); 293 ret = dri_emit_reloc(batch->buf, read_domains, write_domain, 294 delta, batch->ptr - batch->map, buffer); 295 296 /* 297 * Using the old buffer offset, write in what the right data would be, in case 298 * the buffer doesn't move and we can short-circuit the relocation processing 299 * in the kernel 300 */ 301 intel_batchbuffer_emit_dword (batch, buffer->offset + delta); 302 303 return GL_TRUE; 304} 305 306void 307intel_batchbuffer_data(struct intel_batchbuffer *batch, 308 const void *data, GLuint bytes, 309 enum cliprect_mode cliprect_mode) 310{ 311 assert((bytes & 3) == 0); 312 intel_batchbuffer_require_space(batch, bytes, cliprect_mode); 313 __memcpy(batch->ptr, data, bytes); 314 batch->ptr += bytes; 315} 316