Lines Matching defs:batch

43    struct cached_batch_item *item = intel->batch.cached_items;
51 intel->batch.cached_items = NULL;
62 * the buffer, and the kernel doesn't let us write to the batch.
64 intel->batch.workaround_bo = drm_intel_bo_alloc(intel->bufmgr,
73 if (intel->batch.last_bo != NULL) {
74 drm_intel_bo_unreference(intel->batch.last_bo);
75 intel->batch.last_bo = NULL;
77 intel->batch.last_bo = intel->batch.bo;
81 intel->batch.bo = drm_intel_bo_alloc(intel->bufmgr, "batchbuffer",
84 intel->batch.reserved_space = BATCH_RESERVED;
85 intel->batch.state_batch_offset = intel->batch.bo->size;
86 intel->batch.used = 0;
87 intel->batch.needs_sol_reset = false;
93 intel->batch.saved.used = intel->batch.used;
94 intel->batch.saved.reloc_count =
95 drm_intel_gem_bo_get_reloc_count(intel->batch.bo);
101 drm_intel_gem_bo_clear_relocs(intel->batch.bo, intel->batch.saved.reloc_count);
103 intel->batch.used = intel->batch.saved.used;
105 /* Cached batch state is dead, since we just cleared some unknown part of the
114 drm_intel_bo_unreference(intel->batch.last_bo);
115 drm_intel_bo_unreference(intel->batch.bo);
116 drm_intel_bo_unreference(intel->batch.workaround_bo);
124 struct intel_batchbuffer *batch = &intel->batch;
131 ret = drm_intel_bo_map(batch->bo, false);
134 batch->bo->virtual,
135 batch->bo->offset,
136 batch->used);
143 batch->map,
144 batch->bo->offset,
145 batch->used);
153 drm_intel_bo_unmap(batch->bo);
165 struct intel_batchbuffer *batch = &intel->batch;
168 ret = drm_intel_bo_subdata(batch->bo, 0, 4*batch->used, batch->map);
169 if (ret == 0 && batch->state_batch_offset != batch->bo->size) {
170 ret = drm_intel_bo_subdata(batch->bo,
171 batch->state_batch_offset,
172 batch->bo->size - batch->state_batch_offset,
173 (char *)batch->map + batch->state_batch_offset);
179 if (intel->gen < 6 || !batch->is_blit) {
185 if (batch->needs_sol_reset)
191 if (intel->hw_ctx == NULL || batch->is_blit) {
192 ret = drm_intel_bo_mrb_exec(batch->bo, 4 * batch->used, NULL, 0, 0,
195 ret = drm_intel_gem_bo_context_exec(batch->bo, intel->hw_ctx,
196 4 * batch->used, flags);
219 if (intel->batch.used == 0)
223 intel->first_post_swapbuffers_batch = intel->batch.bo;
229 4*intel->batch.used);
231 intel->batch.reserved_space = 0;
238 if (intel->batch.used & 1) {
252 drm_intel_bo_wait_rendering(intel->batch.bo);
273 ret = drm_intel_bo_emit_reloc(intel->batch.bo, 4*intel->batch.used,
298 ret = drm_intel_bo_emit_reloc_fence(intel->batch.bo, 4*intel->batch.used,
320 __memcpy(intel->batch.map + intel->batch.used, data, bytes);
321 intel->batch.used += bytes >> 2;
327 struct cached_batch_item **prev = &intel->batch.cached_items, *item;
328 uint32_t sz = (intel->batch.used - intel->batch.emit) * sizeof(uint32_t);
329 uint32_t *start = intel->batch.map + intel->batch.emit;
336 old = intel->batch.map + item->header;
339 if (prev != &intel->batch.cached_items) {
341 item->next = intel->batch.cached_items;
342 intel->batch.cached_items = item;
344 intel->batch.used = intel->batch.emit;
357 item->next = intel->batch.cached_items;
358 intel->batch.cached_items = item;
362 item->header = intel->batch.emit;
420 OUT_RELOC(intel->batch.workaround_bo,
466 if (!intel->batch.need_workaround_flush)
480 OUT_RELOC(intel->batch.workaround_bo,
485 intel->batch.need_workaround_flush = false;
498 if (intel->batch.is_blit) {