intel_batchbuffer.c revision e3a6e60040b7f6ea7965e52f8f9881ed31e0347c
1/************************************************************************** 2 * 3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28#include "intel_batchbuffer.h" 29#include "intel_ioctl.h" 30#include "intel_decode.h" 31#include "intel_reg.h" 32 33/* Relocations in kernel space: 34 * - pass dma buffer seperately 35 * - memory manager knows how to patch 36 * - pass list of dependent buffers 37 * - pass relocation list 38 * 39 * Either: 40 * - get back an offset for buffer to fire 41 * - memory manager knows how to fire buffer 42 * 43 * Really want the buffer to be AGP and pinned. 44 * 45 */ 46 47/* Cliprect fence: The highest fence protecting a dma buffer 48 * containing explicit cliprect information. Like the old drawable 49 * lock but irq-driven. X server must wait for this fence to expire 50 * before changing cliprects [and then doing sw rendering?]. For 51 * other dma buffers, the scheduler will grab current cliprect info 52 * and mix into buffer. X server must hold the lock while changing 53 * cliprects??? Make per-drawable. Need cliprects in shared memory 54 * -- beats storing them with every cmd buffer in the queue. 55 * 56 * ==> X server must wait for this fence to expire before touching the 57 * framebuffer with new cliprects. 58 * 59 * ==> Cliprect-dependent buffers associated with a 60 * cliprect-timestamp. All of the buffers associated with a timestamp 61 * must go to hardware before any buffer with a newer timestamp. 62 * 63 * ==> Dma should be queued per-drawable for correct X/GL 64 * synchronization. Or can fences be used for this? 65 * 66 * Applies to: Blit operations, metaops, X server operations -- X 67 * server automatically waits on its own dma to complete before 68 * modifying cliprects ??? 69 */ 70 71void 72intel_batchbuffer_reset(struct intel_batchbuffer *batch) 73{ 74 struct intel_context *intel = batch->intel; 75 76 if (batch->buf != NULL) { 77 dri_bo_unreference(batch->buf); 78 batch->buf = NULL; 79 } 80 81 batch->buf = dri_bo_alloc(intel->intelScreen->bufmgr, "batchbuffer", 82 intel->intelScreen->maxBatchSize, 4096, 83 DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED | DRM_BO_FLAG_CACHED_MAPPED); 84 dri_bo_map(batch->buf, GL_TRUE); 85 batch->map = batch->buf->virtual; 86 batch->size = intel->intelScreen->maxBatchSize; 87 batch->ptr = batch->map; 88 batch->dirty_state = ~0; 89 batch->id = batch->intel->intelScreen->batch_id++; 90} 91 92struct intel_batchbuffer * 93intel_batchbuffer_alloc(struct intel_context *intel) 94{ 95 struct intel_batchbuffer *batch = calloc(sizeof(*batch), 1); 96 97 batch->intel = intel; 98 batch->last_fence = NULL; 99 intel_batchbuffer_reset(batch); 100 101 return batch; 102} 103 104void 105intel_batchbuffer_free(struct intel_batchbuffer *batch) 106{ 107 if (batch->last_fence) { 108 dri_fence_wait(batch->last_fence); 109 dri_fence_unreference(batch->last_fence); 110 batch->last_fence = NULL; 111 } 112 if (batch->map) { 113 dri_bo_unmap(batch->buf); 114 batch->map = NULL; 115 } 116 dri_bo_unreference(batch->buf); 117 batch->buf = NULL; 118 free(batch); 119} 120 121 122 123/* TODO: Push this whole function into bufmgr. 124 */ 125static void 126do_flush_locked(struct intel_batchbuffer *batch, 127 GLuint used, 128 GLboolean ignore_cliprects, GLboolean allow_unlock) 129{ 130 struct intel_context *intel = batch->intel; 131 void *start; 132 GLuint count; 133 134 dri_bo_unmap(batch->buf); 135 start = dri_process_relocs(batch->buf, &count); 136 137 batch->map = NULL; 138 batch->ptr = NULL; 139 batch->flags = 0; 140 141 /* Throw away non-effective packets. Won't work once we have 142 * hardware contexts which would preserve statechanges beyond a 143 * single buffer. 144 */ 145 146 if (!(intel->numClipRects == 0 && !ignore_cliprects)) { 147 if (intel->intelScreen->ttm == GL_TRUE) { 148 intel_exec_ioctl(batch->intel, 149 used, ignore_cliprects, allow_unlock, 150 start, count, &batch->last_fence); 151 } else { 152 intel_batch_ioctl(batch->intel, 153 batch->buf->offset, 154 used, ignore_cliprects, allow_unlock); 155 } 156 } 157 158 dri_post_submit(batch->buf, &batch->last_fence); 159 160 if (intel->numClipRects == 0 && !ignore_cliprects) { 161 if (allow_unlock) { 162 /* If we are not doing any actual user-visible rendering, 163 * do a sched_yield to keep the app from pegging the cpu while 164 * achieving nothing. 165 */ 166 UNLOCK_HARDWARE(intel); 167 sched_yield(); 168 LOCK_HARDWARE(intel); 169 } 170 intel->vtbl.lost_hardware(intel); 171 } 172 173 if (INTEL_DEBUG & DEBUG_BATCH) { 174 dri_bo_map(batch->buf, GL_FALSE); 175 intel_decode(batch->buf->virtual, used / 4, batch->buf->offset, 176 intel->intelScreen->deviceID); 177 dri_bo_unmap(batch->buf); 178 179 if (intel->vtbl.debug_batch != NULL) 180 intel->vtbl.debug_batch(intel); 181 } 182} 183 184void 185intel_batchbuffer_flush(struct intel_batchbuffer *batch) 186{ 187 struct intel_context *intel = batch->intel; 188 GLuint used = batch->ptr - batch->map; 189 GLboolean was_locked = intel->locked; 190 191 if (used == 0) 192 return; 193 194 /* Add the MI_BATCH_BUFFER_END. Always add an MI_FLUSH - this is a 195 * performance drain that we would like to avoid. 196 */ 197 if (used & 4) { 198 ((int *) batch->ptr)[0] = intel->vtbl.flush_cmd(); 199 ((int *) batch->ptr)[1] = 0; 200 ((int *) batch->ptr)[2] = MI_BATCH_BUFFER_END; 201 used += 12; 202 } 203 else { 204 ((int *) batch->ptr)[0] = intel->vtbl.flush_cmd(); 205 ((int *) batch->ptr)[1] = MI_BATCH_BUFFER_END; 206 used += 8; 207 } 208 209 /* TODO: Just pass the relocation list and dma buffer up to the 210 * kernel. 211 */ 212 if (!was_locked) 213 LOCK_HARDWARE(intel); 214 215 do_flush_locked(batch, used, !(batch->flags & INTEL_BATCH_CLIPRECTS), 216 GL_FALSE); 217 218 if (!was_locked) 219 UNLOCK_HARDWARE(intel); 220 221 if (INTEL_DEBUG & DEBUG_SYNC) { 222 fprintf(stderr, "waiting for idle\n"); 223 if (batch->last_fence != NULL) 224 dri_fence_wait(batch->last_fence); 225 } 226 227 /* Reset the buffer: 228 */ 229 intel_batchbuffer_reset(batch); 230} 231 232void 233intel_batchbuffer_finish(struct intel_batchbuffer *batch) 234{ 235 intel_batchbuffer_flush(batch); 236 if (batch->last_fence != NULL) 237 dri_fence_wait(batch->last_fence); 238} 239 240 241/* This is the only way buffers get added to the validate list. 242 */ 243GLboolean 244intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch, 245 dri_bo *buffer, 246 GLuint flags, GLuint delta) 247{ 248 dri_emit_reloc(batch->buf, flags, delta, batch->ptr - batch->map, buffer); 249 batch->ptr += 4; 250 251 return GL_TRUE; 252} 253 254void 255intel_batchbuffer_data(struct intel_batchbuffer *batch, 256 const void *data, GLuint bytes, GLuint flags) 257{ 258 assert((bytes & 3) == 0); 259 intel_batchbuffer_require_space(batch, bytes, flags); 260 __memcpy(batch->ptr, data, bytes); 261 batch->ptr += bytes; 262} 263