intel_batchbuffer.c revision b2f1aa2389473ed09170713301b042661d70a48e
1/************************************************************************** 2 * 3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28#include "intel_batchbuffer.h" 29#include "intel_ioctl.h" 30#include "intel_decode.h" 31#include "i915_debug.h" 32 33/* Relocations in kernel space: 34 * - pass dma buffer seperately 35 * - memory manager knows how to patch 36 * - pass list of dependent buffers 37 * - pass relocation list 38 * 39 * Either: 40 * - get back an offset for buffer to fire 41 * - memory manager knows how to fire buffer 42 * 43 * Really want the buffer to be AGP and pinned. 44 * 45 */ 46 47/* Cliprect fence: The highest fence protecting a dma buffer 48 * containing explicit cliprect information. Like the old drawable 49 * lock but irq-driven. X server must wait for this fence to expire 50 * before changing cliprects [and then doing sw rendering?]. For 51 * other dma buffers, the scheduler will grab current cliprect info 52 * and mix into buffer. X server must hold the lock while changing 53 * cliprects??? Make per-drawable. Need cliprects in shared memory 54 * -- beats storing them with every cmd buffer in the queue. 55 * 56 * ==> X server must wait for this fence to expire before touching the 57 * framebuffer with new cliprects. 58 * 59 * ==> Cliprect-dependent buffers associated with a 60 * cliprect-timestamp. All of the buffers associated with a timestamp 61 * must go to hardware before any buffer with a newer timestamp. 62 * 63 * ==> Dma should be queued per-drawable for correct X/GL 64 * synchronization. Or can fences be used for this? 65 * 66 * Applies to: Blit operations, metaops, X server operations -- X 67 * server automatically waits on its own dma to complete before 68 * modifying cliprects ??? 69 */ 70 71void 72intel_batchbuffer_reset(struct intel_batchbuffer *batch) 73{ 74 struct intel_context *intel = batch->intel; 75 76 if (batch->buf != NULL) { 77 dri_bo_unreference(batch->buf); 78 batch->buf = NULL; 79 } 80 81 batch->buf = dri_bo_alloc(intel->intelScreen->bufmgr, "batchbuffer", 82 intel->intelScreen->maxBatchSize, 4096, 83 DRM_BO_FLAG_MEM_TT); 84 dri_bo_map(batch->buf, GL_TRUE); 85 batch->map = batch->buf->virtual; 86 batch->size = intel->intelScreen->maxBatchSize; 87 batch->ptr = batch->map; 88} 89 90struct intel_batchbuffer * 91intel_batchbuffer_alloc(struct intel_context *intel) 92{ 93 struct intel_batchbuffer *batch = calloc(sizeof(*batch), 1); 94 95 batch->intel = intel; 96 batch->last_fence = NULL; 97 intel_batchbuffer_reset(batch); 98 99 return batch; 100} 101 102void 103intel_batchbuffer_free(struct intel_batchbuffer *batch) 104{ 105 if (batch->last_fence) { 106 dri_fence_wait(batch->last_fence); 107 dri_fence_unreference(batch->last_fence); 108 batch->last_fence = NULL; 109 } 110 if (batch->map) { 111 dri_bo_unmap(batch->buf); 112 batch->map = NULL; 113 } 114 dri_bo_unreference(batch->buf); 115 batch->buf = NULL; 116 free(batch); 117} 118 119static int 120relocation_sort(const void *a_in, const void *b_in) { 121 const struct buffer_reloc *a = a_in, *b = b_in; 122 123 return (intptr_t)a->buf < (intptr_t)b->buf ? -1 : 1; 124} 125 126 127/* TODO: Push this whole function into bufmgr. 128 */ 129static void 130do_flush_locked(struct intel_batchbuffer *batch, 131 GLuint used, 132 GLboolean ignore_cliprects, GLboolean allow_unlock) 133{ 134 GLuint *ptr; 135 GLuint i; 136 struct intel_context *intel = batch->intel; 137 dri_fence *fo; 138 GLboolean performed_rendering = GL_FALSE; 139 140 assert(batch->buf->virtual != NULL); 141 ptr = batch->buf->virtual; 142 143 /* Sort our relocation list in terms of referenced buffer pointer. 144 * This lets us uniquely validate the buffers with the sum of all the flags, 145 * while avoiding O(n^2) on number of relocations. 146 */ 147 qsort(batch->reloc, batch->nr_relocs, sizeof(batch->reloc[0]), 148 relocation_sort); 149 150 /* Perform the necessary validations of buffers, and enter the relocations 151 * in the batchbuffer. 152 */ 153 for (i = 0; i < batch->nr_relocs; i++) { 154 struct buffer_reloc *r = &batch->reloc[i]; 155 156 if (r->validate_flags & DRM_BO_FLAG_WRITE) 157 performed_rendering = GL_TRUE; 158 159 /* If this is the first time we've seen this buffer in the relocation 160 * list, figure out our flags and validate it. 161 */ 162 if (i == 0 || batch->reloc[i - 1].buf != r->buf) { 163 uint32_t validate_flags; 164 int j, ret; 165 166 /* Accumulate the flags we need for validating this buffer. */ 167 validate_flags = r->validate_flags; 168 for (j = i + 1; j < batch->nr_relocs; j++) { 169 if (batch->reloc[j].buf != r->buf) 170 break; 171 validate_flags |= batch->reloc[j].validate_flags; 172 } 173 174 /* Validate. If we fail, fence to clear the unfenced list and bail 175 * out. 176 */ 177 ret = dri_bo_validate(r->buf, validate_flags); 178 if (ret != 0) { 179 dri_bo_unmap(batch->buf); 180 fo = dri_fence_validated(intel->intelScreen->bufmgr, 181 "batchbuffer failure fence", GL_TRUE); 182 dri_fence_unreference(fo); 183 goto done; 184 } 185 } 186 ptr[r->offset / 4] = r->buf->offset + r->delta; 187 dri_bo_unreference(r->buf); 188 } 189 190 dri_bo_unmap(batch->buf); 191 batch->map = NULL; 192 batch->ptr = NULL; 193 194 dri_bo_validate(batch->buf, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE); 195 196 batch->list_count = 0; 197 batch->nr_relocs = 0; 198 batch->flags = 0; 199 200 /* Throw away non-effective packets. Won't work once we have 201 * hardware contexts which would preserve statechanges beyond a 202 * single buffer. 203 */ 204 205 if (!(intel->numClipRects == 0 && !ignore_cliprects)) { 206 intel_batch_ioctl(batch->intel, 207 batch->buf->offset, 208 used, ignore_cliprects, allow_unlock); 209 } 210 211 /* Associate a fence with the validated buffers, and note that we included 212 * a flush at the end. 213 */ 214 fo = dri_fence_validated(intel->intelScreen->bufmgr, 215 "Batch fence", GL_TRUE); 216 217 if (performed_rendering) { 218 dri_fence_unreference(batch->last_fence); 219 batch->last_fence = fo; 220 } else { 221 /* If we didn't validate any buffers for writing by the card, we don't 222 * need to track the fence for glFinish(). 223 */ 224 dri_fence_unreference(fo); 225 } 226 227 if (intel->numClipRects == 0 && !ignore_cliprects) { 228 if (allow_unlock) { 229 /* If we are not doing any actual user-visible rendering, 230 * do a sched_yield to keep the app from pegging the cpu while 231 * achieving nothing. 232 */ 233 UNLOCK_HARDWARE(intel); 234 sched_yield(); 235 LOCK_HARDWARE(intel); 236 } 237 intel->vtbl.lost_hardware(intel); 238 } 239 240done: 241 if (INTEL_DEBUG & DEBUG_BATCH) { 242 dri_bo_map(batch->buf, GL_FALSE); 243 intel_decode(ptr, used / 4, batch->buf->offset); 244 dri_bo_unmap(batch->buf); 245 } 246} 247 248 249void 250intel_batchbuffer_flush(struct intel_batchbuffer *batch) 251{ 252 struct intel_context *intel = batch->intel; 253 GLuint used = batch->ptr - batch->map; 254 GLboolean was_locked = intel->locked; 255 256 if (used == 0) 257 return; 258 259 /* Add the MI_BATCH_BUFFER_END. Always add an MI_FLUSH - this is a 260 * performance drain that we would like to avoid. 261 */ 262 if (used & 4) { 263 ((int *) batch->ptr)[0] = intel->vtbl.flush_cmd(); 264 ((int *) batch->ptr)[1] = 0; 265 ((int *) batch->ptr)[2] = MI_BATCH_BUFFER_END; 266 used += 12; 267 } 268 else { 269 ((int *) batch->ptr)[0] = intel->vtbl.flush_cmd(); 270 ((int *) batch->ptr)[1] = MI_BATCH_BUFFER_END; 271 used += 8; 272 } 273 274 /* TODO: Just pass the relocation list and dma buffer up to the 275 * kernel. 276 */ 277 if (!was_locked) 278 LOCK_HARDWARE(intel); 279 280 do_flush_locked(batch, used, !(batch->flags & INTEL_BATCH_CLIPRECTS), 281 GL_FALSE); 282 283 if (!was_locked) 284 UNLOCK_HARDWARE(intel); 285 286 /* Reset the buffer: 287 */ 288 intel_batchbuffer_reset(batch); 289} 290 291void 292intel_batchbuffer_finish(struct intel_batchbuffer *batch) 293{ 294 intel_batchbuffer_flush(batch); 295 if (batch->last_fence != NULL) 296 dri_fence_wait(batch->last_fence); 297} 298 299 300/* This is the only way buffers get added to the validate list. 301 */ 302GLboolean 303intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch, 304 dri_bo *buffer, 305 GLuint flags, GLuint delta) 306{ 307 struct buffer_reloc *r = &batch->reloc[batch->nr_relocs++]; 308 309 assert(batch->nr_relocs <= MAX_RELOCS); 310 311 dri_bo_reference(buffer); 312 r->buf = buffer; 313 r->offset = batch->ptr - batch->map; 314 r->delta = delta; 315 r->validate_flags = flags; 316 317 batch->ptr += 4; 318 return GL_TRUE; 319} 320 321 322 323void 324intel_batchbuffer_data(struct intel_batchbuffer *batch, 325 const void *data, GLuint bytes, GLuint flags) 326{ 327 assert((bytes & 3) == 0); 328 intel_batchbuffer_require_space(batch, bytes, flags); 329 __memcpy(batch->ptr, data, bytes); 330 batch->ptr += bytes; 331} 332