r600_query.c revision e363dd5c7d8ba40984d937ad7487abbb5be439bc
1/*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23#include "r600_pipe.h"
24#include "r600d.h"
25#include "util/u_memory.h"
26
27static bool r600_is_timer_query(unsigned type)
28{
29	return type == PIPE_QUERY_TIME_ELAPSED ||
30	       type == PIPE_QUERY_TIMESTAMP ||
31	       type == PIPE_QUERY_TIMESTAMP_DISJOINT;
32}
33
34static bool r600_query_needs_begin(unsigned type)
35{
36	return type != PIPE_QUERY_GPU_FINISHED &&
37	       type != PIPE_QUERY_TIMESTAMP;
38}
39
40static struct r600_resource *r600_new_query_buffer(struct r600_context *ctx, unsigned type)
41{
42	unsigned j, i, num_results, buf_size = 4096;
43	uint32_t *results;
44	/* Queries are normally read by the CPU after
45	 * being written by the gpu, hence staging is probably a good
46	 * usage pattern.
47	 */
48	struct r600_resource *buf = (struct r600_resource*)
49		pipe_buffer_create(&ctx->screen->screen, PIPE_BIND_CUSTOM,
50				   PIPE_USAGE_STAGING, buf_size);
51
52	switch (type) {
53	case PIPE_QUERY_OCCLUSION_COUNTER:
54	case PIPE_QUERY_OCCLUSION_PREDICATE:
55		results = ctx->ws->buffer_map(buf->buf, ctx->cs, PIPE_TRANSFER_WRITE);
56		memset(results, 0, buf_size);
57
58		/* Set top bits for unused backends. */
59		num_results = buf_size / (16 * ctx->max_db);
60		for (j = 0; j < num_results; j++) {
61			for (i = 0; i < ctx->max_db; i++) {
62				if (!(ctx->backend_mask & (1<<i))) {
63					results[(i * 4)+1] = 0x80000000;
64					results[(i * 4)+3] = 0x80000000;
65				}
66			}
67			results += 4 * ctx->max_db;
68		}
69		ctx->ws->buffer_unmap(buf->buf);
70		break;
71	case PIPE_QUERY_TIME_ELAPSED:
72		break;
73	case PIPE_QUERY_PRIMITIVES_EMITTED:
74	case PIPE_QUERY_PRIMITIVES_GENERATED:
75	case PIPE_QUERY_SO_STATISTICS:
76	case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
77		results = ctx->ws->buffer_map(buf->buf, ctx->cs, PIPE_TRANSFER_WRITE);
78		memset(results, 0, buf_size);
79		ctx->ws->buffer_unmap(buf->buf);
80		break;
81	default:
82		assert(0);
83	}
84	return buf;
85}
86
87static void r600_emit_query_begin(struct r600_context *ctx, struct r600_query *query)
88{
89	struct radeon_winsys_cs *cs = ctx->cs;
90	uint64_t va;
91
92	r600_need_cs_space(ctx, query->num_cs_dw * 2, TRUE);
93
94	/* Get a new query buffer if needed. */
95	if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.b.width0) {
96		struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
97		*qbuf = query->buffer;
98		query->buffer.buf = r600_new_query_buffer(ctx, query->type);
99		query->buffer.results_end = 0;
100		query->buffer.previous = qbuf;
101	}
102
103	/* emit begin query */
104	va = r600_resource_va(&ctx->screen->screen, (void*)query->buffer.buf);
105	va += query->buffer.results_end;
106
107	switch (query->type) {
108	case PIPE_QUERY_OCCLUSION_COUNTER:
109	case PIPE_QUERY_OCCLUSION_PREDICATE:
110		cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0);
111		cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1);
112		cs->buf[cs->cdw++] = va;
113		cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF;
114		break;
115	case PIPE_QUERY_PRIMITIVES_EMITTED:
116	case PIPE_QUERY_PRIMITIVES_GENERATED:
117	case PIPE_QUERY_SO_STATISTICS:
118	case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
119		cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0);
120		cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_SAMPLE_STREAMOUTSTATS) | EVENT_INDEX(3);
121		cs->buf[cs->cdw++] = va;
122		cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF;
123		break;
124	case PIPE_QUERY_TIME_ELAPSED:
125		cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE_EOP, 4, 0);
126		cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5);
127		cs->buf[cs->cdw++] = va;
128		cs->buf[cs->cdw++] = (3 << 29) | ((va >> 32UL) & 0xFF);
129		cs->buf[cs->cdw++] = 0;
130		cs->buf[cs->cdw++] = 0;
131		break;
132	default:
133		assert(0);
134	}
135	cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
136	cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, query->buffer.buf, RADEON_USAGE_WRITE);
137
138	if (r600_is_timer_query(query->type)) {
139		ctx->num_cs_dw_timer_queries_suspend += query->num_cs_dw;
140	} else {
141		ctx->num_cs_dw_nontimer_queries_suspend += query->num_cs_dw;
142	}
143}
144
145static void r600_emit_query_end(struct r600_context *ctx, struct r600_query *query)
146{
147	struct radeon_winsys_cs *cs = ctx->cs;
148	uint64_t va;
149
150	/* The queries which need begin already called this in begin_query. */
151	if (!r600_query_needs_begin(query->type)) {
152		r600_need_cs_space(ctx, query->num_cs_dw, FALSE);
153	}
154
155	va = r600_resource_va(&ctx->screen->screen, (void*)query->buffer.buf);
156	/* emit end query */
157	switch (query->type) {
158	case PIPE_QUERY_OCCLUSION_COUNTER:
159	case PIPE_QUERY_OCCLUSION_PREDICATE:
160		va += query->buffer.results_end + 8;
161		cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0);
162		cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1);
163		cs->buf[cs->cdw++] = va;
164		cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF;
165		break;
166	case PIPE_QUERY_PRIMITIVES_EMITTED:
167	case PIPE_QUERY_PRIMITIVES_GENERATED:
168	case PIPE_QUERY_SO_STATISTICS:
169	case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
170		cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0);
171		cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_SAMPLE_STREAMOUTSTATS) | EVENT_INDEX(3);
172		cs->buf[cs->cdw++] = query->buffer.results_end + query->result_size/2;
173		cs->buf[cs->cdw++] = 0;
174		break;
175	case PIPE_QUERY_TIME_ELAPSED:
176		va += query->buffer.results_end + query->result_size/2;
177		cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE_EOP, 4, 0);
178		cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5);
179		cs->buf[cs->cdw++] = va;
180		cs->buf[cs->cdw++] = (3 << 29) | ((va >> 32UL) & 0xFF);
181		cs->buf[cs->cdw++] = 0;
182		cs->buf[cs->cdw++] = 0;
183		break;
184	default:
185		assert(0);
186	}
187	cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
188	cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, query->buffer.buf, RADEON_USAGE_WRITE);
189
190	query->buffer.results_end += query->result_size;
191
192	if (r600_query_needs_begin(query->type)) {
193		if (r600_is_timer_query(query->type)) {
194			ctx->num_cs_dw_timer_queries_suspend -= query->num_cs_dw;
195		} else {
196			ctx->num_cs_dw_nontimer_queries_suspend -= query->num_cs_dw;
197		}
198	}
199}
200
201static void r600_emit_query_predication(struct r600_context *ctx, struct r600_query *query,
202					int operation, bool flag_wait)
203{
204	struct radeon_winsys_cs *cs = ctx->cs;
205
206	if (operation == PREDICATION_OP_CLEAR) {
207		r600_need_cs_space(ctx, 3, FALSE);
208
209		cs->buf[cs->cdw++] = PKT3(PKT3_SET_PREDICATION, 1, 0);
210		cs->buf[cs->cdw++] = 0;
211		cs->buf[cs->cdw++] = PRED_OP(PREDICATION_OP_CLEAR);
212	} else {
213		struct r600_query_buffer *qbuf;
214		unsigned count;
215		uint32_t op;
216
217		/* Find how many results there are. */
218		count = 0;
219		for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
220			count += qbuf->results_end / query->result_size;
221		}
222
223		r600_need_cs_space(ctx, 5 * count, TRUE);
224
225		op = PRED_OP(operation) | PREDICATION_DRAW_VISIBLE |
226				(flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW);
227
228		/* emit predicate packets for all data blocks */
229		for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
230			unsigned results_base = 0;
231			uint64_t va = r600_resource_va(&ctx->screen->screen, &qbuf->buf->b.b.b);
232
233			while (results_base < qbuf->results_end) {
234				cs->buf[cs->cdw++] = PKT3(PKT3_SET_PREDICATION, 1, 0);
235				cs->buf[cs->cdw++] = (va + results_base) & 0xFFFFFFFFUL;
236				cs->buf[cs->cdw++] = op | (((va + results_base) >> 32UL) & 0xFF);
237				cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
238				cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, qbuf->buf, RADEON_USAGE_READ);
239				results_base += query->result_size;
240
241				/* set CONTINUE bit for all packets except the first */
242				op |= PREDICATION_CONTINUE;
243			}
244		} while (qbuf);
245	}
246}
247
248static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type)
249{
250	struct r600_context *rctx = (struct r600_context *)ctx;
251
252	struct r600_query *query;
253
254	query = CALLOC_STRUCT(r600_query);
255	if (query == NULL)
256		return NULL;
257
258	query->type = query_type;
259
260	switch (query_type) {
261	case PIPE_QUERY_OCCLUSION_COUNTER:
262	case PIPE_QUERY_OCCLUSION_PREDICATE:
263		query->result_size = 16 * rctx->max_db;
264		query->num_cs_dw = 6;
265		break;
266	case PIPE_QUERY_TIME_ELAPSED:
267		query->result_size = 16;
268		query->num_cs_dw = 8;
269		break;
270	case PIPE_QUERY_PRIMITIVES_EMITTED:
271	case PIPE_QUERY_PRIMITIVES_GENERATED:
272	case PIPE_QUERY_SO_STATISTICS:
273	case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
274		/* NumPrimitivesWritten, PrimitiveStorageNeeded. */
275		query->result_size = 32;
276		query->num_cs_dw = 6;
277		break;
278	default:
279		assert(0);
280		FREE(query);
281		return NULL;
282	}
283
284	query->buffer.buf = r600_new_query_buffer(rctx, query_type);
285	if (!query->buffer.buf) {
286		FREE(query);
287		return NULL;
288	}
289	return (struct pipe_query*)query;
290}
291
292static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
293{
294	struct r600_query *rquery = (struct r600_query*)query;
295	struct r600_query_buffer *prev = rquery->buffer.previous;
296
297	/* Release all query buffers. */
298	while (prev) {
299		struct r600_query_buffer *qbuf = prev;
300		prev = prev->previous;
301		pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
302		FREE(qbuf);
303	}
304
305	pipe_resource_reference((struct pipe_resource**)&rquery->buffer.buf, NULL);
306	FREE(query);
307}
308
309static void r600_update_occlusion_query_state(struct r600_context *rctx,
310					      unsigned type, int diff)
311{
312	if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
313	    type == PIPE_QUERY_OCCLUSION_PREDICATE) {
314		bool enable;
315
316		rctx->num_occlusion_queries += diff;
317		assert(rctx->num_occlusion_queries >= 0);
318
319		enable = rctx->num_occlusion_queries != 0;
320
321		if (rctx->db_misc_state.occlusion_query_enabled != enable) {
322			rctx->db_misc_state.occlusion_query_enabled = enable;
323			r600_atom_dirty(rctx, &rctx->db_misc_state.atom);
324		}
325	}
326}
327
328static void r600_begin_query(struct pipe_context *ctx, struct pipe_query *query)
329{
330	struct r600_context *rctx = (struct r600_context *)ctx;
331	struct r600_query *rquery = (struct r600_query *)query;
332	struct r600_query_buffer *prev = rquery->buffer.previous;
333
334	if (!r600_query_needs_begin(rquery->type)) {
335		assert(0);
336		return;
337	}
338
339	/* Discard the old query buffers. */
340	while (prev) {
341		struct r600_query_buffer *qbuf = prev;
342		prev = prev->previous;
343		pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
344		FREE(qbuf);
345	}
346
347	/* Obtain a new buffer if the current one can't be mapped without a stall. */
348	if (rctx->ws->cs_is_buffer_referenced(rctx->cs, rquery->buffer.buf->cs_buf, RADEON_USAGE_READWRITE) ||
349	    rctx->ws->buffer_is_busy(rquery->buffer.buf->buf, RADEON_USAGE_READWRITE)) {
350		pipe_resource_reference((struct pipe_resource**)&rquery->buffer.buf, NULL);
351		rquery->buffer.buf = r600_new_query_buffer(rctx, rquery->type);
352	}
353
354	rquery->buffer.results_end = 0;
355	rquery->buffer.previous = NULL;
356
357	r600_update_occlusion_query_state(rctx, rquery->type, 1);
358
359	r600_emit_query_begin(rctx, rquery);
360
361	if (r600_is_timer_query(rquery->type)) {
362		LIST_ADDTAIL(&rquery->list, &rctx->active_timer_queries);
363	} else {
364		LIST_ADDTAIL(&rquery->list, &rctx->active_nontimer_queries);
365	}
366}
367
368static void r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
369{
370	struct r600_context *rctx = (struct r600_context *)ctx;
371	struct r600_query *rquery = (struct r600_query *)query;
372
373	r600_emit_query_end(rctx, rquery);
374
375	if (r600_query_needs_begin(rquery->type)) {
376		LIST_DELINIT(&rquery->list);
377	}
378
379	r600_update_occlusion_query_state(rctx, rquery->type, -1);
380}
381
382static unsigned r600_query_read_result(char *map, unsigned start_index, unsigned end_index,
383				       bool test_status_bit)
384{
385	uint32_t *current_result = (uint32_t*)map;
386	uint64_t start, end;
387
388	start = (uint64_t)current_result[start_index] |
389		(uint64_t)current_result[start_index+1] << 32;
390	end = (uint64_t)current_result[end_index] |
391	      (uint64_t)current_result[end_index+1] << 32;
392
393	if (!test_status_bit ||
394	    ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
395		return end - start;
396	}
397	return 0;
398}
399
400static boolean r600_get_query_buffer_result(struct r600_context *ctx,
401					    struct r600_query *query,
402					    struct r600_query_buffer *qbuf,
403					    boolean wait,
404					    union r600_query_result *result)
405{
406	unsigned results_base = 0;
407	char *map;
408
409	map = ctx->ws->buffer_map(qbuf->buf->buf, ctx->cs,
410				  PIPE_TRANSFER_READ |
411				  (wait ? 0 : PIPE_TRANSFER_DONTBLOCK));
412	if (!map)
413		return FALSE;
414
415	/* count all results across all data blocks */
416	switch (query->type) {
417	case PIPE_QUERY_OCCLUSION_COUNTER:
418		while (results_base != qbuf->results_end) {
419			result->u64 +=
420				r600_query_read_result(map + results_base, 0, 2, true);
421			results_base += 16;
422		}
423		break;
424	case PIPE_QUERY_OCCLUSION_PREDICATE:
425		while (results_base != qbuf->results_end) {
426			result->b = result->b ||
427				r600_query_read_result(map + results_base, 0, 2, true) != 0;
428			results_base += 16;
429		}
430		break;
431	case PIPE_QUERY_TIME_ELAPSED:
432		while (results_base != qbuf->results_end) {
433			result->u64 +=
434				r600_query_read_result(map + results_base, 0, 2, false);
435			results_base += query->result_size;
436		}
437		break;
438	case PIPE_QUERY_PRIMITIVES_EMITTED:
439		/* SAMPLE_STREAMOUTSTATS stores this structure:
440		 * {
441		 *    u64 NumPrimitivesWritten;
442		 *    u64 PrimitiveStorageNeeded;
443		 * }
444		 * We only need NumPrimitivesWritten here. */
445		while (results_base != qbuf->results_end) {
446			result->u64 +=
447				r600_query_read_result(map + results_base, 2, 6, true);
448			results_base += query->result_size;
449		}
450		break;
451	case PIPE_QUERY_PRIMITIVES_GENERATED:
452		/* Here we read PrimitiveStorageNeeded. */
453		while (results_base != qbuf->results_end) {
454			result->u64 +=
455				r600_query_read_result(map + results_base, 0, 4, true);
456			results_base += query->result_size;
457		}
458		break;
459	case PIPE_QUERY_SO_STATISTICS:
460		while (results_base != qbuf->results_end) {
461			result->so.num_primitives_written +=
462				r600_query_read_result(map + results_base, 2, 6, true);
463			result->so.primitives_storage_needed +=
464				r600_query_read_result(map + results_base, 0, 4, true);
465			results_base += query->result_size;
466		}
467		break;
468	case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
469		while (results_base != qbuf->results_end) {
470			result->b = result->b ||
471				r600_query_read_result(map + results_base, 2, 6, true) !=
472				r600_query_read_result(map + results_base, 0, 4, true);
473			results_base += query->result_size;
474		}
475		break;
476	default:
477		assert(0);
478	}
479
480	ctx->ws->buffer_unmap(qbuf->buf->buf);
481	return TRUE;
482}
483
484static boolean r600_get_query_result(struct pipe_context *ctx,
485					struct pipe_query *query,
486					boolean wait, void *vresult)
487{
488	struct r600_context *rctx = (struct r600_context *)ctx;
489	struct r600_query *rquery = (struct r600_query *)query;
490	boolean *result_b = (boolean*)vresult;
491	uint64_t *result_u64 = (uint64_t*)vresult;
492	union r600_query_result result;
493	struct pipe_query_data_so_statistics *result_so =
494		(struct pipe_query_data_so_statistics*)vresult;
495	struct r600_query_buffer *qbuf;
496
497	memset(&result, 0, sizeof(result));
498
499	for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous) {
500		if (!r600_get_query_buffer_result(rctx, rquery, qbuf, wait, &result)) {
501			return FALSE;
502		}
503	}
504
505	switch (rquery->type) {
506	case PIPE_QUERY_OCCLUSION_COUNTER:
507	case PIPE_QUERY_PRIMITIVES_EMITTED:
508	case PIPE_QUERY_PRIMITIVES_GENERATED:
509		*result_u64 = result.u64;
510		break;
511	case PIPE_QUERY_OCCLUSION_PREDICATE:
512	case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
513		*result_b = result.b;
514		break;
515	case PIPE_QUERY_TIME_ELAPSED:
516		*result_u64 = (1000000 * result.u64) / rctx->screen->info.r600_clock_crystal_freq;
517		break;
518	case PIPE_QUERY_SO_STATISTICS:
519		*result_so = result.so;
520		break;
521	default:
522		assert(0);
523	}
524	return TRUE;
525}
526
527static void r600_render_condition(struct pipe_context *ctx,
528				  struct pipe_query *query,
529				  uint mode)
530{
531	struct r600_context *rctx = (struct r600_context *)ctx;
532	struct r600_query *rquery = (struct r600_query *)query;
533	bool wait_flag = false;
534
535	rctx->current_render_cond = query;
536	rctx->current_render_cond_mode = mode;
537
538	if (query == NULL) {
539		if (rctx->predicate_drawing) {
540			rctx->predicate_drawing = false;
541			r600_emit_query_predication(rctx, NULL, PREDICATION_OP_CLEAR, false);
542		}
543		return;
544	}
545
546	if (mode == PIPE_RENDER_COND_WAIT ||
547	    mode == PIPE_RENDER_COND_BY_REGION_WAIT) {
548		wait_flag = true;
549	}
550
551	rctx->predicate_drawing = true;
552
553	switch (rquery->type) {
554	case PIPE_QUERY_OCCLUSION_COUNTER:
555	case PIPE_QUERY_OCCLUSION_PREDICATE:
556		r600_emit_query_predication(rctx, rquery, PREDICATION_OP_ZPASS, wait_flag);
557		break;
558	case PIPE_QUERY_PRIMITIVES_EMITTED:
559	case PIPE_QUERY_PRIMITIVES_GENERATED:
560	case PIPE_QUERY_SO_STATISTICS:
561	case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
562		r600_emit_query_predication(rctx, rquery, PREDICATION_OP_PRIMCOUNT, wait_flag);
563		break;
564	default:
565		assert(0);
566	}
567}
568
569void r600_suspend_nontimer_queries(struct r600_context *ctx)
570{
571	struct r600_query *query;
572
573	LIST_FOR_EACH_ENTRY(query, &ctx->active_nontimer_queries, list) {
574		r600_emit_query_end(ctx, query);
575	}
576	assert(ctx->num_cs_dw_nontimer_queries_suspend == 0);
577}
578
579void r600_resume_nontimer_queries(struct r600_context *ctx)
580{
581	struct r600_query *query;
582
583	assert(ctx->num_cs_dw_nontimer_queries_suspend == 0);
584
585	LIST_FOR_EACH_ENTRY(query, &ctx->active_nontimer_queries, list) {
586		r600_emit_query_begin(ctx, query);
587	}
588}
589
590void r600_suspend_timer_queries(struct r600_context *ctx)
591{
592	struct r600_query *query;
593
594	LIST_FOR_EACH_ENTRY(query, &ctx->active_timer_queries, list) {
595		r600_emit_query_end(ctx, query);
596	}
597
598	assert(ctx->num_cs_dw_timer_queries_suspend == 0);
599}
600
601void r600_resume_timer_queries(struct r600_context *ctx)
602{
603	struct r600_query *query;
604
605	assert(ctx->num_cs_dw_timer_queries_suspend == 0);
606
607	LIST_FOR_EACH_ENTRY(query, &ctx->active_timer_queries, list) {
608		r600_emit_query_begin(ctx, query);
609	}
610}
611
612void r600_init_query_functions(struct r600_context *rctx)
613{
614	rctx->context.create_query = r600_create_query;
615	rctx->context.destroy_query = r600_destroy_query;
616	rctx->context.begin_query = r600_begin_query;
617	rctx->context.end_query = r600_end_query;
618	rctx->context.get_query_result = r600_get_query_result;
619
620	if (rctx->screen->info.r600_num_backends > 0)
621	    rctx->context.render_condition = r600_render_condition;
622}
623