r600_query.c revision 29e55bc5f1b6d7375b6a86e24ca4ae58e399011e
1/* 2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org> 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * on the rights to use, copy, modify, merge, publish, distribute, sub 8 * license, and/or sell copies of the Software, and to permit persons to whom 9 * the Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, 19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 21 * USE OR OTHER DEALINGS IN THE SOFTWARE. 22 */ 23#include "r600_pipe.h" 24#include "r600d.h" 25#include "util/u_memory.h" 26#include "r600_hw_context_priv.h" 27 28static bool r600_is_timer_query(unsigned type) 29{ 30 return type == PIPE_QUERY_TIME_ELAPSED || 31 type == PIPE_QUERY_TIMESTAMP || 32 type == PIPE_QUERY_TIMESTAMP_DISJOINT; 33} 34 35static bool r600_query_needs_begin(unsigned type) 36{ 37 return type != PIPE_QUERY_GPU_FINISHED && 38 type != PIPE_QUERY_TIMESTAMP; 39} 40 41static struct r600_resource *r600_new_query_buffer(struct r600_context *ctx, unsigned type) 42{ 43 unsigned j, i, num_results, buf_size = 4096; 44 uint32_t *results; 45 /* Queries are normally read by the CPU after 46 * being written by the gpu, hence staging is probably a good 47 * usage pattern. 48 */ 49 struct r600_resource *buf = (struct r600_resource*) 50 pipe_buffer_create(&ctx->screen->screen, PIPE_BIND_CUSTOM, 51 PIPE_USAGE_STAGING, buf_size); 52 53 switch (type) { 54 case PIPE_QUERY_OCCLUSION_COUNTER: 55 case PIPE_QUERY_OCCLUSION_PREDICATE: 56 results = ctx->ws->buffer_map(buf->buf, ctx->cs, PIPE_TRANSFER_WRITE); 57 memset(results, 0, buf_size); 58 59 /* Set top bits for unused backends. */ 60 num_results = buf_size / (16 * ctx->max_db); 61 for (j = 0; j < num_results; j++) { 62 for (i = 0; i < ctx->max_db; i++) { 63 if (!(ctx->backend_mask & (1<<i))) { 64 results[(i * 4)+1] = 0x80000000; 65 results[(i * 4)+3] = 0x80000000; 66 } 67 } 68 results += 4 * ctx->max_db; 69 } 70 ctx->ws->buffer_unmap(buf->buf); 71 break; 72 case PIPE_QUERY_TIME_ELAPSED: 73 break; 74 case PIPE_QUERY_PRIMITIVES_EMITTED: 75 case PIPE_QUERY_PRIMITIVES_GENERATED: 76 case PIPE_QUERY_SO_STATISTICS: 77 case PIPE_QUERY_SO_OVERFLOW_PREDICATE: 78 results = ctx->ws->buffer_map(buf->buf, ctx->cs, PIPE_TRANSFER_WRITE); 79 memset(results, 0, buf_size); 80 ctx->ws->buffer_unmap(buf->buf); 81 break; 82 default: 83 assert(0); 84 } 85 return buf; 86} 87 88static void r600_emit_query_begin(struct r600_context *ctx, struct r600_query *query) 89{ 90 struct radeon_winsys_cs *cs = ctx->cs; 91 uint64_t va; 92 93 r600_need_cs_space(ctx, query->num_cs_dw * 2, TRUE); 94 95 /* Get a new query buffer if needed. */ 96 if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.b.width0) { 97 struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer); 98 *qbuf = query->buffer; 99 query->buffer.buf = r600_new_query_buffer(ctx, query->type); 100 query->buffer.results_end = 0; 101 query->buffer.previous = qbuf; 102 } 103 104 /* emit begin query */ 105 va = r600_resource_va(&ctx->screen->screen, (void*)query->buffer.buf); 106 va += query->buffer.results_end; 107 108 switch (query->type) { 109 case PIPE_QUERY_OCCLUSION_COUNTER: 110 case PIPE_QUERY_OCCLUSION_PREDICATE: 111 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0); 112 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1); 113 cs->buf[cs->cdw++] = va; 114 cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF; 115 break; 116 case PIPE_QUERY_PRIMITIVES_EMITTED: 117 case PIPE_QUERY_PRIMITIVES_GENERATED: 118 case PIPE_QUERY_SO_STATISTICS: 119 case PIPE_QUERY_SO_OVERFLOW_PREDICATE: 120 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0); 121 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_SAMPLE_STREAMOUTSTATS) | EVENT_INDEX(3); 122 cs->buf[cs->cdw++] = va; 123 cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF; 124 break; 125 case PIPE_QUERY_TIME_ELAPSED: 126 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE_EOP, 4, 0); 127 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5); 128 cs->buf[cs->cdw++] = va; 129 cs->buf[cs->cdw++] = (3 << 29) | ((va >> 32UL) & 0xFF); 130 cs->buf[cs->cdw++] = 0; 131 cs->buf[cs->cdw++] = 0; 132 break; 133 default: 134 assert(0); 135 } 136 cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); 137 cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, query->buffer.buf, RADEON_USAGE_WRITE); 138 139 if (r600_is_timer_query(query->type)) { 140 ctx->num_cs_dw_timer_queries_suspend += query->num_cs_dw; 141 } else { 142 ctx->num_cs_dw_nontimer_queries_suspend += query->num_cs_dw; 143 } 144} 145 146static void r600_emit_query_end(struct r600_context *ctx, struct r600_query *query) 147{ 148 struct radeon_winsys_cs *cs = ctx->cs; 149 uint64_t va; 150 151 /* The queries which need begin already called this in begin_query. */ 152 if (!r600_query_needs_begin(query->type)) { 153 r600_need_cs_space(ctx, query->num_cs_dw, FALSE); 154 } 155 156 va = r600_resource_va(&ctx->screen->screen, (void*)query->buffer.buf); 157 /* emit end query */ 158 switch (query->type) { 159 case PIPE_QUERY_OCCLUSION_COUNTER: 160 case PIPE_QUERY_OCCLUSION_PREDICATE: 161 va += query->buffer.results_end + 8; 162 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0); 163 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1); 164 cs->buf[cs->cdw++] = va; 165 cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF; 166 break; 167 case PIPE_QUERY_PRIMITIVES_EMITTED: 168 case PIPE_QUERY_PRIMITIVES_GENERATED: 169 case PIPE_QUERY_SO_STATISTICS: 170 case PIPE_QUERY_SO_OVERFLOW_PREDICATE: 171 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0); 172 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_SAMPLE_STREAMOUTSTATS) | EVENT_INDEX(3); 173 cs->buf[cs->cdw++] = query->buffer.results_end + query->result_size/2; 174 cs->buf[cs->cdw++] = 0; 175 break; 176 case PIPE_QUERY_TIME_ELAPSED: 177 va += query->buffer.results_end + query->result_size/2; 178 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE_EOP, 4, 0); 179 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5); 180 cs->buf[cs->cdw++] = va; 181 cs->buf[cs->cdw++] = (3 << 29) | ((va >> 32UL) & 0xFF); 182 cs->buf[cs->cdw++] = 0; 183 cs->buf[cs->cdw++] = 0; 184 break; 185 default: 186 assert(0); 187 } 188 cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); 189 cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, query->buffer.buf, RADEON_USAGE_WRITE); 190 191 query->buffer.results_end += query->result_size; 192 193 if (r600_query_needs_begin(query->type)) { 194 if (r600_is_timer_query(query->type)) { 195 ctx->num_cs_dw_timer_queries_suspend -= query->num_cs_dw; 196 } else { 197 ctx->num_cs_dw_nontimer_queries_suspend -= query->num_cs_dw; 198 } 199 } 200} 201 202static void r600_emit_query_predication(struct r600_context *ctx, struct r600_query *query, 203 int operation, bool flag_wait) 204{ 205 struct radeon_winsys_cs *cs = ctx->cs; 206 207 if (operation == PREDICATION_OP_CLEAR) { 208 r600_need_cs_space(ctx, 3, FALSE); 209 210 cs->buf[cs->cdw++] = PKT3(PKT3_SET_PREDICATION, 1, 0); 211 cs->buf[cs->cdw++] = 0; 212 cs->buf[cs->cdw++] = PRED_OP(PREDICATION_OP_CLEAR); 213 } else { 214 struct r600_query_buffer *qbuf; 215 unsigned count; 216 uint32_t op; 217 218 /* Find how many results there are. */ 219 count = 0; 220 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) { 221 count += qbuf->results_end / query->result_size; 222 } 223 224 r600_need_cs_space(ctx, 5 * count, TRUE); 225 226 op = PRED_OP(operation) | PREDICATION_DRAW_VISIBLE | 227 (flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW); 228 229 /* emit predicate packets for all data blocks */ 230 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) { 231 unsigned results_base = 0; 232 uint64_t va = r600_resource_va(&ctx->screen->screen, &qbuf->buf->b.b.b); 233 234 while (results_base < qbuf->results_end) { 235 cs->buf[cs->cdw++] = PKT3(PKT3_SET_PREDICATION, 1, 0); 236 cs->buf[cs->cdw++] = (va + results_base) & 0xFFFFFFFFUL; 237 cs->buf[cs->cdw++] = op | (((va + results_base) >> 32UL) & 0xFF); 238 cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); 239 cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, qbuf->buf, RADEON_USAGE_READ); 240 results_base += query->result_size; 241 242 /* set CONTINUE bit for all packets except the first */ 243 op |= PREDICATION_CONTINUE; 244 } 245 } while (qbuf); 246 } 247} 248 249static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type) 250{ 251 struct r600_context *rctx = (struct r600_context *)ctx; 252 253 struct r600_query *query; 254 255 query = CALLOC_STRUCT(r600_query); 256 if (query == NULL) 257 return NULL; 258 259 query->type = query_type; 260 261 switch (query_type) { 262 case PIPE_QUERY_OCCLUSION_COUNTER: 263 case PIPE_QUERY_OCCLUSION_PREDICATE: 264 query->result_size = 16 * rctx->max_db; 265 query->num_cs_dw = 6; 266 break; 267 case PIPE_QUERY_TIME_ELAPSED: 268 query->result_size = 16; 269 query->num_cs_dw = 8; 270 break; 271 case PIPE_QUERY_PRIMITIVES_EMITTED: 272 case PIPE_QUERY_PRIMITIVES_GENERATED: 273 case PIPE_QUERY_SO_STATISTICS: 274 case PIPE_QUERY_SO_OVERFLOW_PREDICATE: 275 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */ 276 query->result_size = 32; 277 query->num_cs_dw = 6; 278 break; 279 default: 280 assert(0); 281 FREE(query); 282 return NULL; 283 } 284 285 query->buffer.buf = r600_new_query_buffer(rctx, query_type); 286 if (!query->buffer.buf) { 287 FREE(query); 288 return NULL; 289 } 290 return (struct pipe_query*)query; 291} 292 293static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query) 294{ 295 struct r600_query *rquery = (struct r600_query*)query; 296 struct r600_query_buffer *prev = rquery->buffer.previous; 297 298 /* Release all query buffers. */ 299 while (prev) { 300 struct r600_query_buffer *qbuf = prev; 301 prev = prev->previous; 302 pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL); 303 FREE(qbuf); 304 } 305 306 pipe_resource_reference((struct pipe_resource**)&rquery->buffer.buf, NULL); 307 FREE(query); 308} 309 310static void r600_update_occlusion_query_state(struct r600_context *rctx, 311 unsigned type, int diff) 312{ 313 if (type == PIPE_QUERY_OCCLUSION_COUNTER || 314 type == PIPE_QUERY_OCCLUSION_PREDICATE) { 315 bool enable; 316 317 rctx->num_occlusion_queries += diff; 318 assert(rctx->num_occlusion_queries >= 0); 319 320 enable = rctx->num_occlusion_queries != 0; 321 322 if (rctx->atom_db_misc_state.occlusion_query_enabled != enable) { 323 rctx->atom_db_misc_state.occlusion_query_enabled = enable; 324 r600_atom_dirty(rctx, &rctx->atom_db_misc_state.atom); 325 } 326 } 327} 328 329static void r600_begin_query(struct pipe_context *ctx, struct pipe_query *query) 330{ 331 struct r600_context *rctx = (struct r600_context *)ctx; 332 struct r600_query *rquery = (struct r600_query *)query; 333 struct r600_query_buffer *prev = rquery->buffer.previous; 334 335 if (!r600_query_needs_begin(rquery->type)) { 336 assert(0); 337 return; 338 } 339 340 /* Discard the old query buffers. */ 341 while (prev) { 342 struct r600_query_buffer *qbuf = prev; 343 prev = prev->previous; 344 pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL); 345 FREE(qbuf); 346 } 347 348 /* Obtain a new buffer if the current one can't be mapped without a stall. */ 349 if (rctx->ws->cs_is_buffer_referenced(rctx->cs, rquery->buffer.buf->cs_buf, RADEON_USAGE_READWRITE) || 350 rctx->ws->buffer_is_busy(rquery->buffer.buf->buf, RADEON_USAGE_READWRITE)) { 351 pipe_resource_reference((struct pipe_resource**)&rquery->buffer.buf, NULL); 352 rquery->buffer.buf = r600_new_query_buffer(rctx, rquery->type); 353 } 354 355 rquery->buffer.results_end = 0; 356 rquery->buffer.previous = NULL; 357 358 r600_update_occlusion_query_state(rctx, rquery->type, 1); 359 360 r600_emit_query_begin(rctx, rquery); 361 362 if (r600_is_timer_query(rquery->type)) { 363 LIST_ADDTAIL(&rquery->list, &rctx->active_timer_queries); 364 } else { 365 LIST_ADDTAIL(&rquery->list, &rctx->active_nontimer_queries); 366 } 367} 368 369static void r600_end_query(struct pipe_context *ctx, struct pipe_query *query) 370{ 371 struct r600_context *rctx = (struct r600_context *)ctx; 372 struct r600_query *rquery = (struct r600_query *)query; 373 374 r600_emit_query_end(rctx, rquery); 375 376 if (r600_query_needs_begin(rquery->type)) { 377 LIST_DELINIT(&rquery->list); 378 } 379 380 r600_update_occlusion_query_state(rctx, rquery->type, -1); 381} 382 383static unsigned r600_query_read_result(char *map, unsigned start_index, unsigned end_index, 384 bool test_status_bit) 385{ 386 uint32_t *current_result = (uint32_t*)map; 387 uint64_t start, end; 388 389 start = (uint64_t)current_result[start_index] | 390 (uint64_t)current_result[start_index+1] << 32; 391 end = (uint64_t)current_result[end_index] | 392 (uint64_t)current_result[end_index+1] << 32; 393 394 if (!test_status_bit || 395 ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) { 396 return end - start; 397 } 398 return 0; 399} 400 401static boolean r600_get_query_buffer_result(struct r600_context *ctx, 402 struct r600_query *query, 403 struct r600_query_buffer *qbuf, 404 boolean wait, 405 union r600_query_result *result) 406{ 407 unsigned results_base = 0; 408 char *map; 409 410 map = ctx->ws->buffer_map(qbuf->buf->buf, ctx->cs, 411 PIPE_TRANSFER_READ | 412 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK)); 413 if (!map) 414 return FALSE; 415 416 /* count all results across all data blocks */ 417 switch (query->type) { 418 case PIPE_QUERY_OCCLUSION_COUNTER: 419 while (results_base != qbuf->results_end) { 420 result->u64 += 421 r600_query_read_result(map + results_base, 0, 2, true); 422 results_base += 16; 423 } 424 break; 425 case PIPE_QUERY_OCCLUSION_PREDICATE: 426 while (results_base != qbuf->results_end) { 427 result->b = result->b || 428 r600_query_read_result(map + results_base, 0, 2, true) != 0; 429 results_base += 16; 430 } 431 break; 432 case PIPE_QUERY_TIME_ELAPSED: 433 while (results_base != qbuf->results_end) { 434 result->u64 += 435 r600_query_read_result(map + results_base, 0, 2, false); 436 results_base += query->result_size; 437 } 438 break; 439 case PIPE_QUERY_PRIMITIVES_EMITTED: 440 /* SAMPLE_STREAMOUTSTATS stores this structure: 441 * { 442 * u64 NumPrimitivesWritten; 443 * u64 PrimitiveStorageNeeded; 444 * } 445 * We only need NumPrimitivesWritten here. */ 446 while (results_base != qbuf->results_end) { 447 result->u64 += 448 r600_query_read_result(map + results_base, 2, 6, true); 449 results_base += query->result_size; 450 } 451 break; 452 case PIPE_QUERY_PRIMITIVES_GENERATED: 453 /* Here we read PrimitiveStorageNeeded. */ 454 while (results_base != qbuf->results_end) { 455 result->u64 += 456 r600_query_read_result(map + results_base, 0, 4, true); 457 results_base += query->result_size; 458 } 459 break; 460 case PIPE_QUERY_SO_STATISTICS: 461 while (results_base != qbuf->results_end) { 462 result->so.num_primitives_written += 463 r600_query_read_result(map + results_base, 2, 6, true); 464 result->so.primitives_storage_needed += 465 r600_query_read_result(map + results_base, 0, 4, true); 466 results_base += query->result_size; 467 } 468 break; 469 case PIPE_QUERY_SO_OVERFLOW_PREDICATE: 470 while (results_base != qbuf->results_end) { 471 result->b = result->b || 472 r600_query_read_result(map + results_base, 2, 6, true) != 473 r600_query_read_result(map + results_base, 0, 4, true); 474 results_base += query->result_size; 475 } 476 break; 477 default: 478 assert(0); 479 } 480 481 ctx->ws->buffer_unmap(qbuf->buf->buf); 482 return TRUE; 483} 484 485static boolean r600_get_query_result(struct pipe_context *ctx, 486 struct pipe_query *query, 487 boolean wait, void *vresult) 488{ 489 struct r600_context *rctx = (struct r600_context *)ctx; 490 struct r600_query *rquery = (struct r600_query *)query; 491 boolean *result_b = (boolean*)vresult; 492 uint64_t *result_u64 = (uint64_t*)vresult; 493 union r600_query_result result; 494 struct pipe_query_data_so_statistics *result_so = 495 (struct pipe_query_data_so_statistics*)vresult; 496 struct r600_query_buffer *qbuf; 497 498 memset(&result, 0, sizeof(result)); 499 500 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous) { 501 if (!r600_get_query_buffer_result(rctx, rquery, qbuf, wait, &result)) { 502 return FALSE; 503 } 504 } 505 506 switch (rquery->type) { 507 case PIPE_QUERY_OCCLUSION_COUNTER: 508 case PIPE_QUERY_PRIMITIVES_EMITTED: 509 case PIPE_QUERY_PRIMITIVES_GENERATED: 510 *result_u64 = result.u64; 511 break; 512 case PIPE_QUERY_OCCLUSION_PREDICATE: 513 case PIPE_QUERY_SO_OVERFLOW_PREDICATE: 514 *result_b = result.b; 515 break; 516 case PIPE_QUERY_TIME_ELAPSED: 517 *result_u64 = (1000000 * result.u64) / rctx->screen->info.r600_clock_crystal_freq; 518 break; 519 case PIPE_QUERY_SO_STATISTICS: 520 *result_so = result.so; 521 break; 522 default: 523 assert(0); 524 } 525 return TRUE; 526} 527 528static void r600_render_condition(struct pipe_context *ctx, 529 struct pipe_query *query, 530 uint mode) 531{ 532 struct r600_context *rctx = (struct r600_context *)ctx; 533 struct r600_query *rquery = (struct r600_query *)query; 534 bool wait_flag = false; 535 536 rctx->current_render_cond = query; 537 rctx->current_render_cond_mode = mode; 538 539 if (query == NULL) { 540 if (rctx->predicate_drawing) { 541 rctx->predicate_drawing = false; 542 r600_emit_query_predication(rctx, NULL, PREDICATION_OP_CLEAR, false); 543 } 544 return; 545 } 546 547 if (mode == PIPE_RENDER_COND_WAIT || 548 mode == PIPE_RENDER_COND_BY_REGION_WAIT) { 549 wait_flag = true; 550 } 551 552 rctx->predicate_drawing = true; 553 554 switch (rquery->type) { 555 case PIPE_QUERY_OCCLUSION_COUNTER: 556 case PIPE_QUERY_OCCLUSION_PREDICATE: 557 r600_emit_query_predication(rctx, rquery, PREDICATION_OP_ZPASS, wait_flag); 558 break; 559 case PIPE_QUERY_PRIMITIVES_EMITTED: 560 case PIPE_QUERY_PRIMITIVES_GENERATED: 561 case PIPE_QUERY_SO_STATISTICS: 562 case PIPE_QUERY_SO_OVERFLOW_PREDICATE: 563 r600_emit_query_predication(rctx, rquery, PREDICATION_OP_PRIMCOUNT, wait_flag); 564 break; 565 default: 566 assert(0); 567 } 568} 569 570void r600_suspend_nontimer_queries(struct r600_context *ctx) 571{ 572 struct r600_query *query; 573 574 LIST_FOR_EACH_ENTRY(query, &ctx->active_nontimer_queries, list) { 575 r600_emit_query_end(ctx, query); 576 } 577 assert(ctx->num_cs_dw_nontimer_queries_suspend == 0); 578} 579 580void r600_resume_nontimer_queries(struct r600_context *ctx) 581{ 582 struct r600_query *query; 583 584 assert(ctx->num_cs_dw_nontimer_queries_suspend == 0); 585 586 LIST_FOR_EACH_ENTRY(query, &ctx->active_nontimer_queries, list) { 587 r600_emit_query_begin(ctx, query); 588 } 589} 590 591void r600_suspend_timer_queries(struct r600_context *ctx) 592{ 593 struct r600_query *query; 594 595 LIST_FOR_EACH_ENTRY(query, &ctx->active_timer_queries, list) { 596 r600_emit_query_end(ctx, query); 597 } 598 599 assert(ctx->num_cs_dw_timer_queries_suspend == 0); 600} 601 602void r600_resume_timer_queries(struct r600_context *ctx) 603{ 604 struct r600_query *query; 605 606 assert(ctx->num_cs_dw_timer_queries_suspend == 0); 607 608 LIST_FOR_EACH_ENTRY(query, &ctx->active_timer_queries, list) { 609 r600_emit_query_begin(ctx, query); 610 } 611} 612 613void r600_init_query_functions(struct r600_context *rctx) 614{ 615 rctx->context.create_query = r600_create_query; 616 rctx->context.destroy_query = r600_destroy_query; 617 rctx->context.begin_query = r600_begin_query; 618 rctx->context.end_query = r600_end_query; 619 rctx->context.get_query_result = r600_get_query_result; 620 621 if (rctx->screen->info.r600_num_backends > 0) 622 rctx->context.render_condition = r600_render_condition; 623} 624