intel_batchbuffer.c revision 22409756d4ed941f2ec6729ab0c312149749106f
1/************************************************************************** 2 * 3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28#include "intel_context.h" 29#include "intel_batchbuffer.h" 30#include "intel_decode.h" 31#include "intel_reg.h" 32#include "intel_bufmgr.h" 33#include "intel_buffers.h" 34 35void 36intel_batchbuffer_reset(struct intel_batchbuffer *batch) 37{ 38 struct intel_context *intel = batch->intel; 39 40 if (batch->buf != NULL) { 41 dri_bo_unreference(batch->buf); 42 batch->buf = NULL; 43 } 44 45 batch->buf = dri_bo_alloc(intel->bufmgr, "batchbuffer", 46 intel->maxBatchSize, 4096); 47 batch->map = batch->buffer; 48 batch->size = intel->maxBatchSize; 49 batch->ptr = batch->map; 50 batch->reserved_space = BATCH_RESERVED; 51 batch->dirty_state = ~0; 52} 53 54struct intel_batchbuffer * 55intel_batchbuffer_alloc(struct intel_context *intel) 56{ 57 struct intel_batchbuffer *batch = calloc(sizeof(*batch), 1); 58 59 batch->intel = intel; 60 batch->buffer = malloc(intel->maxBatchSize); 61 intel_batchbuffer_reset(batch); 62 63 return batch; 64} 65 66void 67intel_batchbuffer_free(struct intel_batchbuffer *batch) 68{ 69 free (batch->buffer); 70 dri_bo_unreference(batch->buf); 71 batch->buf = NULL; 72 free(batch); 73} 74 75 76 77/* TODO: Push this whole function into bufmgr. 78 */ 79static void 80do_flush_locked(struct intel_batchbuffer *batch, GLuint used) 81{ 82 struct intel_context *intel = batch->intel; 83 int ret = 0; 84 int x_off = 0, y_off = 0; 85 86 dri_bo_subdata (batch->buf, 0, used, batch->buffer); 87 88 batch->ptr = NULL; 89 90 if (!intel->no_hw) 91 dri_bo_exec(batch->buf, used, NULL, 0, (x_off & 0xffff) | (y_off << 16)); 92 93 if (INTEL_DEBUG & DEBUG_BATCH) { 94 dri_bo_map(batch->buf, GL_FALSE); 95 intel_decode(batch->buf->virtual, used / 4, batch->buf->offset, 96 intel->intelScreen->deviceID); 97 dri_bo_unmap(batch->buf); 98 99 if (intel->vtbl.debug_batch != NULL) 100 intel->vtbl.debug_batch(intel); 101 } 102 103 if (ret != 0) { 104 exit(1); 105 } 106 intel->vtbl.new_batch(intel); 107} 108 109void 110_intel_batchbuffer_flush(struct intel_batchbuffer *batch, const char *file, 111 int line) 112{ 113 struct intel_context *intel = batch->intel; 114 GLuint used = batch->ptr - batch->map; 115 116 if (intel->first_post_swapbuffers_batch == NULL) { 117 intel->first_post_swapbuffers_batch = intel->batch->buf; 118 drm_intel_bo_reference(intel->first_post_swapbuffers_batch); 119 } 120 121 if (used == 0) 122 return; 123 124 if (INTEL_DEBUG & DEBUG_BATCH) 125 fprintf(stderr, "%s:%d: Batchbuffer flush with %db used\n", file, line, 126 used); 127 128 batch->reserved_space = 0; 129 130 if (intel->always_flush_cache) { 131 intel_batchbuffer_emit_mi_flush(batch); 132 used = batch->ptr - batch->map; 133 } 134 135 /* Round batchbuffer usage to 2 DWORDs. */ 136 137 if ((used & 4) == 0) { 138 *(GLuint *) (batch->ptr) = 0; /* noop */ 139 batch->ptr += 4; 140 used = batch->ptr - batch->map; 141 } 142 143 /* Mark the end of the buffer. */ 144 *(GLuint *) (batch->ptr) = MI_BATCH_BUFFER_END; 145 batch->ptr += 4; 146 used = batch->ptr - batch->map; 147 assert (used <= batch->buf->size); 148 149 /* Workaround for recursive batchbuffer flushing: If the window is 150 * moved, we can get into a case where we try to flush during a 151 * flush. What happens is that when we try to grab the lock for 152 * the first flush, we detect that the window moved which then 153 * causes another flush (from the intel_draw_buffer() call in 154 * intelUpdatePageFlipping()). To work around this we reset the 155 * batchbuffer tail pointer before trying to get the lock. This 156 * prevent the nested buffer flush, but a better fix would be to 157 * avoid that in the first place. */ 158 batch->ptr = batch->map; 159 160 if (intel->vtbl.finish_batch) 161 intel->vtbl.finish_batch(intel); 162 163 /* Check that we didn't just wrap our batchbuffer at a bad time. */ 164 assert(!intel->no_batch_wrap); 165 166 do_flush_locked(batch, used); 167 168 if (INTEL_DEBUG & DEBUG_SYNC) { 169 fprintf(stderr, "waiting for idle\n"); 170 dri_bo_map(batch->buf, GL_TRUE); 171 dri_bo_unmap(batch->buf); 172 } 173 174 /* Reset the buffer: 175 */ 176 intel_batchbuffer_reset(batch); 177} 178 179 180/* This is the only way buffers get added to the validate list. 181 */ 182GLboolean 183intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch, 184 dri_bo *buffer, 185 uint32_t read_domains, uint32_t write_domain, 186 uint32_t delta) 187{ 188 int ret; 189 190 assert(delta < buffer->size); 191 192 if (batch->ptr - batch->map > batch->buf->size) 193 printf ("bad relocation ptr %p map %p offset %d size %lu\n", 194 batch->ptr, batch->map, batch->ptr - batch->map, batch->buf->size); 195 ret = dri_bo_emit_reloc(batch->buf, read_domains, write_domain, 196 delta, batch->ptr - batch->map, buffer); 197 198 /* 199 * Using the old buffer offset, write in what the right data would be, in case 200 * the buffer doesn't move and we can short-circuit the relocation processing 201 * in the kernel 202 */ 203 intel_batchbuffer_emit_dword (batch, buffer->offset + delta); 204 205 return GL_TRUE; 206} 207 208GLboolean 209intel_batchbuffer_emit_reloc_fenced(struct intel_batchbuffer *batch, 210 drm_intel_bo *buffer, 211 uint32_t read_domains, uint32_t write_domain, 212 uint32_t delta) 213{ 214 int ret; 215 216 assert(delta < buffer->size); 217 218 if (batch->ptr - batch->map > batch->buf->size) 219 printf ("bad relocation ptr %p map %p offset %d size %lu\n", 220 batch->ptr, batch->map, batch->ptr - batch->map, batch->buf->size); 221 ret = drm_intel_bo_emit_reloc_fence(batch->buf, batch->ptr - batch->map, 222 buffer, delta, 223 read_domains, write_domain); 224 225 /* 226 * Using the old buffer offset, write in what the right data would 227 * be, in case the buffer doesn't move and we can short-circuit the 228 * relocation processing in the kernel 229 */ 230 intel_batchbuffer_emit_dword (batch, buffer->offset + delta); 231 232 return GL_TRUE; 233} 234 235void 236intel_batchbuffer_data(struct intel_batchbuffer *batch, 237 const void *data, GLuint bytes) 238{ 239 assert((bytes & 3) == 0); 240 intel_batchbuffer_require_space(batch, bytes); 241 __memcpy(batch->ptr, data, bytes); 242 batch->ptr += bytes; 243} 244 245/* Emit a pipelined flush to either flush render and texture cache for 246 * reading from a FBO-drawn texture, or flush so that frontbuffer 247 * render appears on the screen in DRI1. 248 * 249 * This is also used for the always_flush_cache driconf debug option. 250 */ 251void 252intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch) 253{ 254 struct intel_context *intel = batch->intel; 255 256 if (intel->gen >= 6) { 257 BEGIN_BATCH(4); 258 OUT_BATCH(_3DSTATE_PIPE_CONTROL); 259 OUT_BATCH(PIPE_CONTROL_INSTRUCTION_FLUSH | 260 PIPE_CONTROL_WRITE_FLUSH | 261 PIPE_CONTROL_NO_WRITE); 262 OUT_BATCH(0); /* write address */ 263 OUT_BATCH(0); /* write data */ 264 ADVANCE_BATCH(); 265 } else if (intel->gen >= 4) { 266 BEGIN_BATCH(4); 267 OUT_BATCH(_3DSTATE_PIPE_CONTROL | 268 PIPE_CONTROL_WRITE_FLUSH | 269 PIPE_CONTROL_NO_WRITE); 270 OUT_BATCH(0); /* write address */ 271 OUT_BATCH(0); /* write data */ 272 OUT_BATCH(0); /* write data */ 273 ADVANCE_BATCH(); 274 } else { 275 BEGIN_BATCH(1); 276 OUT_BATCH(MI_FLUSH); 277 ADVANCE_BATCH(); 278 } 279} 280