intel_batchbuffer.c revision f3687284c12f34268172b9c60e2effd697162129
1/************************************************************************** 2 * 3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28#include "intel_context.h" 29#include "intel_batchbuffer.h" 30#include "intel_decode.h" 31#include "intel_reg.h" 32#include "intel_bufmgr.h" 33#include "intel_buffers.h" 34 35/* Relocations in kernel space: 36 * - pass dma buffer seperately 37 * - memory manager knows how to patch 38 * - pass list of dependent buffers 39 * - pass relocation list 40 * 41 * Either: 42 * - get back an offset for buffer to fire 43 * - memory manager knows how to fire buffer 44 * 45 * Really want the buffer to be AGP and pinned. 46 * 47 */ 48 49/* Cliprect fence: The highest fence protecting a dma buffer 50 * containing explicit cliprect information. Like the old drawable 51 * lock but irq-driven. X server must wait for this fence to expire 52 * before changing cliprects [and then doing sw rendering?]. For 53 * other dma buffers, the scheduler will grab current cliprect info 54 * and mix into buffer. X server must hold the lock while changing 55 * cliprects??? Make per-drawable. Need cliprects in shared memory 56 * -- beats storing them with every cmd buffer in the queue. 57 * 58 * ==> X server must wait for this fence to expire before touching the 59 * framebuffer with new cliprects. 60 * 61 * ==> Cliprect-dependent buffers associated with a 62 * cliprect-timestamp. All of the buffers associated with a timestamp 63 * must go to hardware before any buffer with a newer timestamp. 64 * 65 * ==> Dma should be queued per-drawable for correct X/GL 66 * synchronization. Or can fences be used for this? 67 * 68 * Applies to: Blit operations, metaops, X server operations -- X 69 * server automatically waits on its own dma to complete before 70 * modifying cliprects ??? 71 */ 72 73void 74intel_batchbuffer_reset(struct intel_batchbuffer *batch) 75{ 76 struct intel_context *intel = batch->intel; 77 78 if (batch->buf != NULL) { 79 dri_bo_unreference(batch->buf); 80 batch->buf = NULL; 81 } 82 83 if (!batch->buffer && intel->ttm == GL_TRUE) 84 batch->buffer = malloc (intel->maxBatchSize); 85 86 batch->buf = dri_bo_alloc(intel->bufmgr, "batchbuffer", 87 intel->maxBatchSize, 4096); 88 if (batch->buffer) 89 batch->map = batch->buffer; 90 else { 91 dri_bo_map(batch->buf, GL_TRUE); 92 batch->map = batch->buf->virtual; 93 } 94 batch->size = intel->maxBatchSize; 95 batch->ptr = batch->map; 96 batch->dirty_state = ~0; 97 batch->cliprect_mode = IGNORE_CLIPRECTS; 98} 99 100struct intel_batchbuffer * 101intel_batchbuffer_alloc(struct intel_context *intel) 102{ 103 struct intel_batchbuffer *batch = calloc(sizeof(*batch), 1); 104 105 batch->intel = intel; 106 intel_batchbuffer_reset(batch); 107 108 return batch; 109} 110 111void 112intel_batchbuffer_free(struct intel_batchbuffer *batch) 113{ 114 if (batch->buffer) 115 free (batch->buffer); 116 else { 117 if (batch->map) { 118 dri_bo_unmap(batch->buf); 119 batch->map = NULL; 120 } 121 } 122 dri_bo_unreference(batch->buf); 123 batch->buf = NULL; 124 free(batch); 125} 126 127 128 129/* TODO: Push this whole function into bufmgr. 130 */ 131static void 132do_flush_locked(struct intel_batchbuffer *batch, 133 GLuint used, GLboolean allow_unlock) 134{ 135 struct intel_context *intel = batch->intel; 136 int ret = 0; 137 unsigned int num_cliprects = 0; 138 struct drm_clip_rect *cliprects = NULL; 139 int x_off = 0, y_off = 0; 140 141 if (batch->buffer) 142 dri_bo_subdata (batch->buf, 0, used, batch->buffer); 143 else 144 dri_bo_unmap(batch->buf); 145 146 batch->map = NULL; 147 batch->ptr = NULL; 148 149 150 if (batch->cliprect_mode == LOOP_CLIPRECTS) { 151 intel_get_cliprects(intel, &cliprects, &num_cliprects, &x_off, &y_off); 152 } 153 /* Dispatch the batchbuffer, if it has some effect (nonzero cliprects). 154 * Can't short-circuit like this once we have hardware contexts, but we 155 * should always be in DRI2 mode by then anyway. 156 */ 157 if ((batch->cliprect_mode != LOOP_CLIPRECTS || 158 num_cliprects != 0) && !intel->no_hw) { 159 dri_bo_exec(batch->buf, used, cliprects, num_cliprects, 160 (x_off & 0xffff) | (y_off << 16)); 161 } 162 163 if (batch->cliprect_mode == LOOP_CLIPRECTS && num_cliprects == 0) { 164 if (allow_unlock) { 165 /* If we are not doing any actual user-visible rendering, 166 * do a sched_yield to keep the app from pegging the cpu while 167 * achieving nothing. 168 */ 169 UNLOCK_HARDWARE(intel); 170 sched_yield(); 171 LOCK_HARDWARE(intel); 172 } 173 } 174 175 if (INTEL_DEBUG & DEBUG_BATCH) { 176 dri_bo_map(batch->buf, GL_FALSE); 177 intel_decode(batch->buf->virtual, used / 4, batch->buf->offset, 178 intel->intelScreen->deviceID); 179 dri_bo_unmap(batch->buf); 180 181 if (intel->vtbl.debug_batch != NULL) 182 intel->vtbl.debug_batch(intel); 183 } 184 185 if (ret != 0) { 186 UNLOCK_HARDWARE(intel); 187 exit(1); 188 } 189 intel->vtbl.new_batch(intel); 190} 191 192void 193_intel_batchbuffer_flush(struct intel_batchbuffer *batch, const char *file, 194 int line) 195{ 196 struct intel_context *intel = batch->intel; 197 GLuint used = batch->ptr - batch->map; 198 GLboolean was_locked = intel->locked; 199 200 if (used == 0) { 201 batch->cliprect_mode = IGNORE_CLIPRECTS; 202 return; 203 } 204 205 if (INTEL_DEBUG & DEBUG_BATCH) 206 fprintf(stderr, "%s:%d: Batchbuffer flush with %db used\n", file, line, 207 used); 208 209 /* Emit a flush if the bufmgr doesn't do it for us. */ 210 if (intel->always_flush_cache || !intel->ttm) { 211 *(GLuint *) (batch->ptr) = intel->vtbl.flush_cmd(); 212 batch->ptr += 4; 213 used = batch->ptr - batch->map; 214 } 215 216 /* Round batchbuffer usage to 2 DWORDs. */ 217 218 if ((used & 4) == 0) { 219 *(GLuint *) (batch->ptr) = 0; /* noop */ 220 batch->ptr += 4; 221 used = batch->ptr - batch->map; 222 } 223 224 /* Mark the end of the buffer. */ 225 *(GLuint *) (batch->ptr) = MI_BATCH_BUFFER_END; /* noop */ 226 batch->ptr += 4; 227 used = batch->ptr - batch->map; 228 229 /* Workaround for recursive batchbuffer flushing: If the window is 230 * moved, we can get into a case where we try to flush during a 231 * flush. What happens is that when we try to grab the lock for 232 * the first flush, we detect that the window moved which then 233 * causes another flush (from the intel_draw_buffer() call in 234 * intelUpdatePageFlipping()). To work around this we reset the 235 * batchbuffer tail pointer before trying to get the lock. This 236 * prevent the nested buffer flush, but a better fix would be to 237 * avoid that in the first place. */ 238 batch->ptr = batch->map; 239 240 if (intel->vtbl.finish_batch) 241 intel->vtbl.finish_batch(intel); 242 243 /* TODO: Just pass the relocation list and dma buffer up to the 244 * kernel. 245 */ 246 if (!was_locked) 247 LOCK_HARDWARE(intel); 248 249 do_flush_locked(batch, used, GL_FALSE); 250 251 if (!was_locked) 252 UNLOCK_HARDWARE(intel); 253 254 if (INTEL_DEBUG & DEBUG_SYNC) { 255 fprintf(stderr, "waiting for idle\n"); 256 dri_bo_map(batch->buf, GL_TRUE); 257 dri_bo_unmap(batch->buf); 258 } 259 260 /* Reset the buffer: 261 */ 262 intel_batchbuffer_reset(batch); 263} 264 265 266/* This is the only way buffers get added to the validate list. 267 */ 268GLboolean 269intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch, 270 dri_bo *buffer, 271 uint32_t read_domains, uint32_t write_domain, 272 uint32_t delta) 273{ 274 int ret; 275 276 if (batch->ptr - batch->map > batch->buf->size) 277 _mesa_printf ("bad relocation ptr %p map %p offset %d size %d\n", 278 batch->ptr, batch->map, batch->ptr - batch->map, batch->buf->size); 279 ret = dri_bo_emit_reloc(batch->buf, read_domains, write_domain, 280 delta, batch->ptr - batch->map, buffer); 281 282 /* 283 * Using the old buffer offset, write in what the right data would be, in case 284 * the buffer doesn't move and we can short-circuit the relocation processing 285 * in the kernel 286 */ 287 intel_batchbuffer_emit_dword (batch, buffer->offset + delta); 288 289 return GL_TRUE; 290} 291 292void 293intel_batchbuffer_data(struct intel_batchbuffer *batch, 294 const void *data, GLuint bytes, 295 enum cliprect_mode cliprect_mode) 296{ 297 assert((bytes & 3) == 0); 298 intel_batchbuffer_require_space(batch, bytes, cliprect_mode); 299 __memcpy(batch->ptr, data, bytes); 300 batch->ptr += bytes; 301} 302