r600_buffer.c revision 1235becaa1cf7e29f580900592563c3329d326de
1/* 2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org> 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * on the rights to use, copy, modify, merge, publish, distribute, sub 8 * license, and/or sell copies of the Software, and to permit persons to whom 9 * the Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, 19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 21 * USE OR OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Jerome Glisse 25 * Corbin Simpson <MostAwesomeDude@gmail.com> 26 */ 27#include <pipe/p_screen.h> 28#include <util/u_format.h> 29#include <util/u_math.h> 30#include <util/u_inlines.h> 31#include <util/u_memory.h> 32#include <util/u_upload_mgr.h> 33#include "state_tracker/drm_driver.h" 34#include <xf86drm.h> 35#include "radeon_drm.h" 36#include "r600.h" 37#include "r600_pipe.h" 38 39extern struct u_resource_vtbl r600_buffer_vtbl; 40 41u32 r600_domain_from_usage(unsigned usage) 42{ 43 u32 domain = RADEON_GEM_DOMAIN_GTT; 44 45 if (usage & PIPE_BIND_RENDER_TARGET) { 46 domain |= RADEON_GEM_DOMAIN_VRAM; 47 } 48 if (usage & PIPE_BIND_DEPTH_STENCIL) { 49 domain |= RADEON_GEM_DOMAIN_VRAM; 50 } 51 if (usage & PIPE_BIND_SAMPLER_VIEW) { 52 domain |= RADEON_GEM_DOMAIN_VRAM; 53 } 54 /* also need BIND_BLIT_SOURCE/DESTINATION ? */ 55 if (usage & PIPE_BIND_VERTEX_BUFFER) { 56 domain |= RADEON_GEM_DOMAIN_GTT; 57 } 58 if (usage & PIPE_BIND_INDEX_BUFFER) { 59 domain |= RADEON_GEM_DOMAIN_GTT; 60 } 61 if (usage & PIPE_BIND_CONSTANT_BUFFER) { 62 domain |= RADEON_GEM_DOMAIN_VRAM; 63 } 64 65 return domain; 66} 67 68struct pipe_resource *r600_buffer_create(struct pipe_screen *screen, 69 const struct pipe_resource *templ) 70{ 71 struct r600_resource_buffer *rbuffer; 72 struct radeon_ws_bo *bo; 73 /* XXX We probably want a different alignment for buffers and textures. */ 74 unsigned alignment = 4096; 75 76 rbuffer = CALLOC_STRUCT(r600_resource_buffer); 77 if (rbuffer == NULL) 78 return NULL; 79 80 rbuffer->magic = R600_BUFFER_MAGIC; 81 rbuffer->user_buffer = NULL; 82 rbuffer->num_ranges = 0; 83 rbuffer->r.base.b = *templ; 84 pipe_reference_init(&rbuffer->r.base.b.reference, 1); 85 rbuffer->r.base.b.screen = screen; 86 rbuffer->r.base.vtbl = &r600_buffer_vtbl; 87 rbuffer->r.size = rbuffer->r.base.b.width0; 88 rbuffer->r.domain = r600_domain_from_usage(rbuffer->r.base.b.bind); 89 bo = radeon_ws_bo((struct radeon*)screen->winsys, rbuffer->r.base.b.width0, alignment, rbuffer->r.base.b.bind); 90 if (bo == NULL) { 91 FREE(rbuffer); 92 return NULL; 93 } 94 rbuffer->r.bo = bo; 95 return &rbuffer->r.base.b; 96} 97 98struct pipe_resource *r600_user_buffer_create(struct pipe_screen *screen, 99 void *ptr, unsigned bytes, 100 unsigned bind) 101{ 102 struct r600_resource_buffer *rbuffer; 103 104 rbuffer = CALLOC_STRUCT(r600_resource_buffer); 105 if (rbuffer == NULL) 106 return NULL; 107 108 rbuffer->magic = R600_BUFFER_MAGIC; 109 pipe_reference_init(&rbuffer->r.base.b.reference, 1); 110 rbuffer->r.base.vtbl = &r600_buffer_vtbl; 111 rbuffer->r.base.b.screen = screen; 112 rbuffer->r.base.b.target = PIPE_BUFFER; 113 rbuffer->r.base.b.format = PIPE_FORMAT_R8_UNORM; 114 rbuffer->r.base.b.usage = PIPE_USAGE_IMMUTABLE; 115 rbuffer->r.base.b.bind = bind; 116 rbuffer->r.base.b.width0 = bytes; 117 rbuffer->r.base.b.height0 = 1; 118 rbuffer->r.base.b.depth0 = 1; 119 rbuffer->r.base.b.flags = 0; 120 rbuffer->num_ranges = 0; 121 rbuffer->r.bo = NULL; 122 rbuffer->user_buffer = ptr; 123 return &rbuffer->r.base.b; 124} 125 126static void r600_buffer_destroy(struct pipe_screen *screen, 127 struct pipe_resource *buf) 128{ 129 struct r600_resource_buffer *rbuffer = r600_buffer(buf); 130 131 if (rbuffer->r.bo) { 132 radeon_ws_bo_reference((struct radeon*)screen->winsys, &rbuffer->r.bo, NULL); 133 } 134 FREE(rbuffer); 135} 136 137static void *r600_buffer_transfer_map(struct pipe_context *pipe, 138 struct pipe_transfer *transfer) 139{ 140 struct r600_resource_buffer *rbuffer = r600_buffer(transfer->resource); 141 int write = 0; 142 uint8_t *data; 143 int i; 144 boolean flush = FALSE; 145 146 if (rbuffer->user_buffer) 147 return (uint8_t*)rbuffer->user_buffer + transfer->box.x; 148 149 if (transfer->usage & PIPE_TRANSFER_DISCARD) { 150 for (i = 0; i < rbuffer->num_ranges; i++) { 151 if ((transfer->box.x >= rbuffer->ranges[i].start) && 152 (transfer->box.x < rbuffer->ranges[i].end)) 153 flush = TRUE; 154 155 if (flush) { 156 radeon_ws_bo_reference((struct radeon*)pipe->winsys, &rbuffer->r.bo, NULL); 157 rbuffer->num_ranges = 0; 158 rbuffer->r.bo = radeon_ws_bo((struct radeon*)pipe->winsys, 159 rbuffer->r.base.b.width0, 0, 160 rbuffer->r.base.b.bind); 161 break; 162 } 163 } 164 } 165 if (transfer->usage & PIPE_TRANSFER_DONTBLOCK) { 166 /* FIXME */ 167 } 168 if (transfer->usage & PIPE_TRANSFER_WRITE) { 169 write = 1; 170 } 171 data = radeon_ws_bo_map((struct radeon*)pipe->winsys, rbuffer->r.bo, transfer->usage, pipe); 172 if (!data) 173 return NULL; 174 175 return (uint8_t*)data + transfer->box.x; 176} 177 178static void r600_buffer_transfer_unmap(struct pipe_context *pipe, 179 struct pipe_transfer *transfer) 180{ 181 struct r600_resource_buffer *rbuffer = r600_buffer(transfer->resource); 182 183 if (rbuffer->r.bo) 184 radeon_ws_bo_unmap((struct radeon*)pipe->winsys, rbuffer->r.bo); 185} 186 187static void r600_buffer_transfer_flush_region(struct pipe_context *pipe, 188 struct pipe_transfer *transfer, 189 const struct pipe_box *box) 190{ 191 struct r600_resource_buffer *rbuffer = r600_buffer(transfer->resource); 192 unsigned i; 193 unsigned offset = transfer->box.x + box->x; 194 unsigned length = box->width; 195 196 assert(box->x + box->width <= transfer->box.width); 197 198 if (rbuffer->user_buffer) 199 return; 200 201 /* mark the range as used */ 202 for(i = 0; i < rbuffer->num_ranges; ++i) { 203 if(offset <= rbuffer->ranges[i].end && rbuffer->ranges[i].start <= (offset+box->width)) { 204 rbuffer->ranges[i].start = MIN2(rbuffer->ranges[i].start, offset); 205 rbuffer->ranges[i].end = MAX2(rbuffer->ranges[i].end, (offset+length)); 206 return; 207 } 208 } 209 210 rbuffer->ranges[rbuffer->num_ranges].start = offset; 211 rbuffer->ranges[rbuffer->num_ranges].end = offset+length; 212 rbuffer->num_ranges++; 213} 214 215unsigned r600_buffer_is_referenced_by_cs(struct pipe_context *context, 216 struct pipe_resource *buf, 217 unsigned face, unsigned level) 218{ 219 /* FIXME */ 220 return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE; 221} 222 223struct pipe_resource *r600_buffer_from_handle(struct pipe_screen *screen, 224 struct winsys_handle *whandle) 225{ 226 struct radeon *rw = (struct radeon*)screen->winsys; 227 struct r600_resource *rbuffer; 228 struct radeon_ws_bo *bo = NULL; 229 230 bo = radeon_ws_bo_handle(rw, whandle->handle); 231 if (bo == NULL) { 232 return NULL; 233 } 234 235 rbuffer = CALLOC_STRUCT(r600_resource); 236 if (rbuffer == NULL) { 237 radeon_ws_bo_reference(rw, &bo, NULL); 238 return NULL; 239 } 240 241 pipe_reference_init(&rbuffer->base.b.reference, 1); 242 rbuffer->base.b.target = PIPE_BUFFER; 243 rbuffer->base.b.screen = screen; 244 rbuffer->base.vtbl = &r600_buffer_vtbl; 245 rbuffer->bo = bo; 246 return &rbuffer->base.b; 247} 248 249struct u_resource_vtbl r600_buffer_vtbl = 250{ 251 u_default_resource_get_handle, /* get_handle */ 252 r600_buffer_destroy, /* resource_destroy */ 253 r600_buffer_is_referenced_by_cs, /* is_buffer_referenced */ 254 u_default_get_transfer, /* get_transfer */ 255 u_default_transfer_destroy, /* transfer_destroy */ 256 r600_buffer_transfer_map, /* transfer_map */ 257 r600_buffer_transfer_flush_region, /* transfer_flush_region */ 258 r600_buffer_transfer_unmap, /* transfer_unmap */ 259 u_default_transfer_inline_write /* transfer_inline_write */ 260}; 261 262int r600_upload_index_buffer(struct r600_pipe_context *rctx, struct r600_drawl *draw) 263{ 264 struct pipe_resource *upload_buffer = NULL; 265 unsigned index_offset = draw->index_buffer_offset; 266 int ret = 0; 267 268 if (r600_buffer_is_user_buffer(draw->index_buffer)) { 269 ret = u_upload_buffer(rctx->upload_ib, 270 index_offset, 271 draw->count * draw->index_size, 272 draw->index_buffer, 273 &index_offset, 274 &upload_buffer); 275 if (ret) { 276 goto done; 277 } 278 draw->index_buffer_offset = index_offset; 279 280 /* Transfer ownership. */ 281 pipe_resource_reference(&draw->index_buffer, upload_buffer); 282 pipe_resource_reference(&upload_buffer, NULL); 283 } 284 285done: 286 return ret; 287} 288 289int r600_upload_user_buffers(struct r600_pipe_context *rctx) 290{ 291 enum pipe_error ret = PIPE_OK; 292 int i, nr; 293 294 nr = rctx->vertex_elements->count; 295 296 for (i = 0; i < nr; i++) { 297 struct pipe_vertex_buffer *vb = 298 &rctx->vertex_buffer[rctx->vertex_elements->elements[i].vertex_buffer_index]; 299 300 if (r600_buffer_is_user_buffer(vb->buffer)) { 301 struct pipe_resource *upload_buffer = NULL; 302 unsigned offset = 0; /*vb->buffer_offset * 4;*/ 303 unsigned size = vb->buffer->width0; 304 unsigned upload_offset; 305 ret = u_upload_buffer(rctx->upload_vb, 306 offset, size, 307 vb->buffer, 308 &upload_offset, &upload_buffer); 309 if (ret) 310 return ret; 311 312 pipe_resource_reference(&vb->buffer, NULL); 313 vb->buffer = upload_buffer; 314 vb->buffer_offset = upload_offset; 315 } 316 } 317 return ret; 318} 319