intel_batchbuffer.c revision e72b87736d8453e79bb6da48ba4cfcc2e97c8e14
1/************************************************************************** 2 * 3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28#include "intel_context.h" 29#include "intel_batchbuffer.h" 30#include "intel_decode.h" 31#include "intel_reg.h" 32#include "intel_bufmgr.h" 33#include "intel_buffers.h" 34 35void 36intel_batchbuffer_reset(struct intel_batchbuffer *batch) 37{ 38 struct intel_context *intel = batch->intel; 39 40 if (batch->buf != NULL) { 41 drm_intel_bo_unreference(batch->buf); 42 batch->buf = NULL; 43 } 44 45 batch->buf = drm_intel_bo_alloc(intel->bufmgr, "batchbuffer", 46 intel->maxBatchSize, 4096); 47 batch->map = batch->buffer; 48 batch->size = intel->maxBatchSize; 49 batch->ptr = batch->map; 50 batch->reserved_space = BATCH_RESERVED; 51 batch->dirty_state = ~0; 52 batch->state_batch_offset = batch->size; 53} 54 55struct intel_batchbuffer * 56intel_batchbuffer_alloc(struct intel_context *intel) 57{ 58 struct intel_batchbuffer *batch = calloc(sizeof(*batch), 1); 59 60 batch->intel = intel; 61 batch->buffer = malloc(intel->maxBatchSize); 62 intel_batchbuffer_reset(batch); 63 64 return batch; 65} 66 67void 68intel_batchbuffer_free(struct intel_batchbuffer *batch) 69{ 70 free (batch->buffer); 71 drm_intel_bo_unreference(batch->buf); 72 batch->buf = NULL; 73 free(batch); 74} 75 76 77 78/* TODO: Push this whole function into bufmgr. 79 */ 80static void 81do_flush_locked(struct intel_batchbuffer *batch, GLuint used) 82{ 83 struct intel_context *intel = batch->intel; 84 int ret = 0; 85 int x_off = 0, y_off = 0; 86 87 drm_intel_bo_subdata(batch->buf, 0, used, batch->buffer); 88 if (batch->state_batch_offset != batch->size) { 89 drm_intel_bo_subdata(batch->buf, 90 batch->state_batch_offset, 91 batch->size - batch->state_batch_offset, 92 batch->buffer + batch->state_batch_offset); 93 } 94 95 batch->ptr = NULL; 96 97 if (!intel->no_hw) { 98 drm_intel_bo_exec(batch->buf, used, NULL, 0, 99 (x_off & 0xffff) | (y_off << 16)); 100 } 101 102 if (INTEL_DEBUG & DEBUG_BATCH) { 103 drm_intel_bo_map(batch->buf, GL_FALSE); 104 intel_decode(batch->buf->virtual, used / 4, batch->buf->offset, 105 intel->intelScreen->deviceID, GL_TRUE); 106 drm_intel_bo_unmap(batch->buf); 107 108 if (intel->vtbl.debug_batch != NULL) 109 intel->vtbl.debug_batch(intel); 110 } 111 112 if (ret != 0) { 113 exit(1); 114 } 115 intel->vtbl.new_batch(intel); 116} 117 118void 119_intel_batchbuffer_flush(struct intel_batchbuffer *batch, const char *file, 120 int line) 121{ 122 struct intel_context *intel = batch->intel; 123 GLuint used = batch->ptr - batch->map; 124 125 if (intel->first_post_swapbuffers_batch == NULL) { 126 intel->first_post_swapbuffers_batch = intel->batch->buf; 127 drm_intel_bo_reference(intel->first_post_swapbuffers_batch); 128 } 129 130 if (used == 0) 131 return; 132 133 if (INTEL_DEBUG & DEBUG_BATCH) 134 fprintf(stderr, "%s:%d: Batchbuffer flush with %db used\n", file, line, 135 used); 136 137 batch->reserved_space = 0; 138 139 if (intel->always_flush_cache) { 140 intel_batchbuffer_emit_mi_flush(batch); 141 used = batch->ptr - batch->map; 142 } 143 144 /* Round batchbuffer usage to 2 DWORDs. */ 145 146 if ((used & 4) == 0) { 147 *(GLuint *) (batch->ptr) = 0; /* noop */ 148 batch->ptr += 4; 149 used = batch->ptr - batch->map; 150 } 151 152 /* Mark the end of the buffer. */ 153 *(GLuint *) (batch->ptr) = MI_BATCH_BUFFER_END; 154 batch->ptr += 4; 155 used = batch->ptr - batch->map; 156 assert (used <= batch->buf->size); 157 158 /* Workaround for recursive batchbuffer flushing: If the window is 159 * moved, we can get into a case where we try to flush during a 160 * flush. What happens is that when we try to grab the lock for 161 * the first flush, we detect that the window moved which then 162 * causes another flush (from the intel_draw_buffer() call in 163 * intelUpdatePageFlipping()). To work around this we reset the 164 * batchbuffer tail pointer before trying to get the lock. This 165 * prevent the nested buffer flush, but a better fix would be to 166 * avoid that in the first place. */ 167 batch->ptr = batch->map; 168 169 if (intel->vtbl.finish_batch) 170 intel->vtbl.finish_batch(intel); 171 172 /* Check that we didn't just wrap our batchbuffer at a bad time. */ 173 assert(!intel->no_batch_wrap); 174 175 do_flush_locked(batch, used); 176 177 if (INTEL_DEBUG & DEBUG_SYNC) { 178 fprintf(stderr, "waiting for idle\n"); 179 drm_intel_bo_map(batch->buf, GL_TRUE); 180 drm_intel_bo_unmap(batch->buf); 181 } 182 183 /* Reset the buffer: 184 */ 185 intel_batchbuffer_reset(batch); 186} 187 188 189/* This is the only way buffers get added to the validate list. 190 */ 191GLboolean 192intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch, 193 drm_intel_bo *buffer, 194 uint32_t read_domains, uint32_t write_domain, 195 uint32_t delta) 196{ 197 int ret; 198 199 assert(delta < buffer->size); 200 201 if (batch->ptr - batch->map > batch->buf->size) 202 printf ("bad relocation ptr %p map %p offset %d size %lu\n", 203 batch->ptr, batch->map, batch->ptr - batch->map, batch->buf->size); 204 ret = drm_intel_bo_emit_reloc(batch->buf, batch->ptr - batch->map, 205 buffer, delta, 206 read_domains, write_domain); 207 208 /* 209 * Using the old buffer offset, write in what the right data would be, in case 210 * the buffer doesn't move and we can short-circuit the relocation processing 211 * in the kernel 212 */ 213 intel_batchbuffer_emit_dword (batch, buffer->offset + delta); 214 215 return GL_TRUE; 216} 217 218GLboolean 219intel_batchbuffer_emit_reloc_fenced(struct intel_batchbuffer *batch, 220 drm_intel_bo *buffer, 221 uint32_t read_domains, uint32_t write_domain, 222 uint32_t delta) 223{ 224 int ret; 225 226 assert(delta < buffer->size); 227 228 if (batch->ptr - batch->map > batch->buf->size) 229 printf ("bad relocation ptr %p map %p offset %d size %lu\n", 230 batch->ptr, batch->map, batch->ptr - batch->map, batch->buf->size); 231 ret = drm_intel_bo_emit_reloc_fence(batch->buf, batch->ptr - batch->map, 232 buffer, delta, 233 read_domains, write_domain); 234 235 /* 236 * Using the old buffer offset, write in what the right data would 237 * be, in case the buffer doesn't move and we can short-circuit the 238 * relocation processing in the kernel 239 */ 240 intel_batchbuffer_emit_dword (batch, buffer->offset + delta); 241 242 return GL_TRUE; 243} 244 245void 246intel_batchbuffer_data(struct intel_batchbuffer *batch, 247 const void *data, GLuint bytes) 248{ 249 assert((bytes & 3) == 0); 250 intel_batchbuffer_require_space(batch, bytes); 251 __memcpy(batch->ptr, data, bytes); 252 batch->ptr += bytes; 253} 254 255/* Emit a pipelined flush to either flush render and texture cache for 256 * reading from a FBO-drawn texture, or flush so that frontbuffer 257 * render appears on the screen in DRI1. 258 * 259 * This is also used for the always_flush_cache driconf debug option. 260 */ 261void 262intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch) 263{ 264 struct intel_context *intel = batch->intel; 265 266 if (intel->gen >= 6) { 267 BEGIN_BATCH(4); 268 OUT_BATCH(_3DSTATE_PIPE_CONTROL); 269 OUT_BATCH(PIPE_CONTROL_INSTRUCTION_FLUSH | 270 PIPE_CONTROL_WRITE_FLUSH | 271 PIPE_CONTROL_NO_WRITE); 272 OUT_BATCH(0); /* write address */ 273 OUT_BATCH(0); /* write data */ 274 ADVANCE_BATCH(); 275 } else if (intel->gen >= 4) { 276 BEGIN_BATCH(4); 277 OUT_BATCH(_3DSTATE_PIPE_CONTROL | 278 PIPE_CONTROL_WRITE_FLUSH | 279 PIPE_CONTROL_NO_WRITE); 280 OUT_BATCH(0); /* write address */ 281 OUT_BATCH(0); /* write data */ 282 OUT_BATCH(0); /* write data */ 283 ADVANCE_BATCH(); 284 } else { 285 BEGIN_BATCH(1); 286 OUT_BATCH(MI_FLUSH); 287 ADVANCE_BATCH(); 288 } 289} 290