r600_query.c revision 44f14ebd7b9ba7186342039d2602fdd6ea5077f5
1/*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23#include "r600_pipe.h"
24#include "r600d.h"
25#include "util/u_memory.h"
26
27static bool r600_is_timer_query(unsigned type)
28{
29	return type == PIPE_QUERY_TIME_ELAPSED ||
30	       type == PIPE_QUERY_TIMESTAMP ||
31	       type == PIPE_QUERY_TIMESTAMP_DISJOINT;
32}
33
34static bool r600_query_needs_begin(unsigned type)
35{
36	return type != PIPE_QUERY_GPU_FINISHED &&
37	       type != PIPE_QUERY_TIMESTAMP;
38}
39
40static struct r600_resource *r600_new_query_buffer(struct r600_context *ctx, unsigned type)
41{
42	unsigned j, i, num_results, buf_size = 4096;
43	uint32_t *results;
44	/* Queries are normally read by the CPU after
45	 * being written by the gpu, hence staging is probably a good
46	 * usage pattern.
47	 */
48	struct r600_resource *buf = (struct r600_resource*)
49		pipe_buffer_create(&ctx->screen->screen, PIPE_BIND_CUSTOM,
50				   PIPE_USAGE_STAGING, buf_size);
51
52	switch (type) {
53	case PIPE_QUERY_OCCLUSION_COUNTER:
54	case PIPE_QUERY_OCCLUSION_PREDICATE:
55		results = ctx->ws->buffer_map(buf->cs_buf, ctx->cs, PIPE_TRANSFER_WRITE);
56		memset(results, 0, buf_size);
57
58		/* Set top bits for unused backends. */
59		num_results = buf_size / (16 * ctx->max_db);
60		for (j = 0; j < num_results; j++) {
61			for (i = 0; i < ctx->max_db; i++) {
62				if (!(ctx->backend_mask & (1<<i))) {
63					results[(i * 4)+1] = 0x80000000;
64					results[(i * 4)+3] = 0x80000000;
65				}
66			}
67			results += 4 * ctx->max_db;
68		}
69		ctx->ws->buffer_unmap(buf->cs_buf);
70		break;
71	case PIPE_QUERY_TIME_ELAPSED:
72	case PIPE_QUERY_TIMESTAMP:
73		break;
74	case PIPE_QUERY_PRIMITIVES_EMITTED:
75	case PIPE_QUERY_PRIMITIVES_GENERATED:
76	case PIPE_QUERY_SO_STATISTICS:
77	case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
78		results = ctx->ws->buffer_map(buf->cs_buf, ctx->cs, PIPE_TRANSFER_WRITE);
79		memset(results, 0, buf_size);
80		ctx->ws->buffer_unmap(buf->cs_buf);
81		break;
82	default:
83		assert(0);
84	}
85	return buf;
86}
87
88static void r600_emit_query_begin(struct r600_context *ctx, struct r600_query *query)
89{
90	struct radeon_winsys_cs *cs = ctx->cs;
91	uint64_t va;
92
93	r600_need_cs_space(ctx, query->num_cs_dw * 2, TRUE);
94
95	/* Get a new query buffer if needed. */
96	if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
97		struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
98		*qbuf = query->buffer;
99		query->buffer.buf = r600_new_query_buffer(ctx, query->type);
100		query->buffer.results_end = 0;
101		query->buffer.previous = qbuf;
102	}
103
104	/* emit begin query */
105	va = r600_resource_va(&ctx->screen->screen, (void*)query->buffer.buf);
106	va += query->buffer.results_end;
107
108	switch (query->type) {
109	case PIPE_QUERY_OCCLUSION_COUNTER:
110	case PIPE_QUERY_OCCLUSION_PREDICATE:
111		cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0);
112		cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1);
113		cs->buf[cs->cdw++] = va;
114		cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF;
115		break;
116	case PIPE_QUERY_PRIMITIVES_EMITTED:
117	case PIPE_QUERY_PRIMITIVES_GENERATED:
118	case PIPE_QUERY_SO_STATISTICS:
119	case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
120		cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0);
121		cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_SAMPLE_STREAMOUTSTATS) | EVENT_INDEX(3);
122		cs->buf[cs->cdw++] = va;
123		cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF;
124		break;
125	case PIPE_QUERY_TIME_ELAPSED:
126		cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE_EOP, 4, 0);
127		cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5);
128		cs->buf[cs->cdw++] = va;
129		cs->buf[cs->cdw++] = (3 << 29) | ((va >> 32UL) & 0xFF);
130		cs->buf[cs->cdw++] = 0;
131		cs->buf[cs->cdw++] = 0;
132		break;
133	default:
134		assert(0);
135	}
136	cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
137	cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, query->buffer.buf, RADEON_USAGE_WRITE);
138
139	if (r600_is_timer_query(query->type)) {
140		ctx->num_cs_dw_timer_queries_suspend += query->num_cs_dw;
141	} else {
142		ctx->num_cs_dw_nontimer_queries_suspend += query->num_cs_dw;
143	}
144}
145
146static void r600_emit_query_end(struct r600_context *ctx, struct r600_query *query)
147{
148	struct radeon_winsys_cs *cs = ctx->cs;
149	uint64_t va;
150
151	/* The queries which need begin already called this in begin_query. */
152	if (!r600_query_needs_begin(query->type)) {
153		r600_need_cs_space(ctx, query->num_cs_dw, FALSE);
154	}
155
156	va = r600_resource_va(&ctx->screen->screen, (void*)query->buffer.buf);
157	/* emit end query */
158	switch (query->type) {
159	case PIPE_QUERY_OCCLUSION_COUNTER:
160	case PIPE_QUERY_OCCLUSION_PREDICATE:
161		va += query->buffer.results_end + 8;
162		cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0);
163		cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1);
164		cs->buf[cs->cdw++] = va;
165		cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF;
166		break;
167	case PIPE_QUERY_PRIMITIVES_EMITTED:
168	case PIPE_QUERY_PRIMITIVES_GENERATED:
169	case PIPE_QUERY_SO_STATISTICS:
170	case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
171		cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0);
172		cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_SAMPLE_STREAMOUTSTATS) | EVENT_INDEX(3);
173		cs->buf[cs->cdw++] = query->buffer.results_end + query->result_size/2;
174		cs->buf[cs->cdw++] = 0;
175		break;
176	case PIPE_QUERY_TIME_ELAPSED:
177		va += query->buffer.results_end + query->result_size/2;
178		/* fall through */
179	case PIPE_QUERY_TIMESTAMP:
180		cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE_EOP, 4, 0);
181		cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5);
182		cs->buf[cs->cdw++] = va;
183		cs->buf[cs->cdw++] = (3 << 29) | ((va >> 32UL) & 0xFF);
184		cs->buf[cs->cdw++] = 0;
185		cs->buf[cs->cdw++] = 0;
186		break;
187	default:
188		assert(0);
189	}
190	cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
191	cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, query->buffer.buf, RADEON_USAGE_WRITE);
192
193	query->buffer.results_end += query->result_size;
194
195	if (r600_query_needs_begin(query->type)) {
196		if (r600_is_timer_query(query->type)) {
197			ctx->num_cs_dw_timer_queries_suspend -= query->num_cs_dw;
198		} else {
199			ctx->num_cs_dw_nontimer_queries_suspend -= query->num_cs_dw;
200		}
201	}
202}
203
204static void r600_emit_query_predication(struct r600_context *ctx, struct r600_query *query,
205					int operation, bool flag_wait)
206{
207	struct radeon_winsys_cs *cs = ctx->cs;
208
209	if (operation == PREDICATION_OP_CLEAR) {
210		r600_need_cs_space(ctx, 3, FALSE);
211
212		cs->buf[cs->cdw++] = PKT3(PKT3_SET_PREDICATION, 1, 0);
213		cs->buf[cs->cdw++] = 0;
214		cs->buf[cs->cdw++] = PRED_OP(PREDICATION_OP_CLEAR);
215	} else {
216		struct r600_query_buffer *qbuf;
217		unsigned count;
218		uint32_t op;
219
220		/* Find how many results there are. */
221		count = 0;
222		for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
223			count += qbuf->results_end / query->result_size;
224		}
225
226		r600_need_cs_space(ctx, 5 * count, TRUE);
227
228		op = PRED_OP(operation) | PREDICATION_DRAW_VISIBLE |
229				(flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW);
230
231		/* emit predicate packets for all data blocks */
232		for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
233			unsigned results_base = 0;
234			uint64_t va = r600_resource_va(&ctx->screen->screen, &qbuf->buf->b.b);
235
236			while (results_base < qbuf->results_end) {
237				cs->buf[cs->cdw++] = PKT3(PKT3_SET_PREDICATION, 1, 0);
238				cs->buf[cs->cdw++] = (va + results_base) & 0xFFFFFFFFUL;
239				cs->buf[cs->cdw++] = op | (((va + results_base) >> 32UL) & 0xFF);
240				cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
241				cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, qbuf->buf, RADEON_USAGE_READ);
242				results_base += query->result_size;
243
244				/* set CONTINUE bit for all packets except the first */
245				op |= PREDICATION_CONTINUE;
246			}
247		} while (qbuf);
248	}
249}
250
251static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type)
252{
253	struct r600_context *rctx = (struct r600_context *)ctx;
254
255	struct r600_query *query;
256
257	query = CALLOC_STRUCT(r600_query);
258	if (query == NULL)
259		return NULL;
260
261	query->type = query_type;
262
263	switch (query_type) {
264	case PIPE_QUERY_OCCLUSION_COUNTER:
265	case PIPE_QUERY_OCCLUSION_PREDICATE:
266		query->result_size = 16 * rctx->max_db;
267		query->num_cs_dw = 6;
268		break;
269	case PIPE_QUERY_TIME_ELAPSED:
270		query->result_size = 16;
271		query->num_cs_dw = 8;
272		break;
273	case PIPE_QUERY_TIMESTAMP:
274		query->result_size = 8;
275		query->num_cs_dw = 8;
276		break;
277	case PIPE_QUERY_PRIMITIVES_EMITTED:
278	case PIPE_QUERY_PRIMITIVES_GENERATED:
279	case PIPE_QUERY_SO_STATISTICS:
280	case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
281		/* NumPrimitivesWritten, PrimitiveStorageNeeded. */
282		query->result_size = 32;
283		query->num_cs_dw = 6;
284		break;
285	default:
286		assert(0);
287		FREE(query);
288		return NULL;
289	}
290
291	query->buffer.buf = r600_new_query_buffer(rctx, query_type);
292	if (!query->buffer.buf) {
293		FREE(query);
294		return NULL;
295	}
296	return (struct pipe_query*)query;
297}
298
299static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
300{
301	struct r600_query *rquery = (struct r600_query*)query;
302	struct r600_query_buffer *prev = rquery->buffer.previous;
303
304	/* Release all query buffers. */
305	while (prev) {
306		struct r600_query_buffer *qbuf = prev;
307		prev = prev->previous;
308		pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
309		FREE(qbuf);
310	}
311
312	pipe_resource_reference((struct pipe_resource**)&rquery->buffer.buf, NULL);
313	FREE(query);
314}
315
316static void r600_update_occlusion_query_state(struct r600_context *rctx,
317					      unsigned type, int diff)
318{
319	if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
320	    type == PIPE_QUERY_OCCLUSION_PREDICATE) {
321		bool enable;
322
323		rctx->num_occlusion_queries += diff;
324		assert(rctx->num_occlusion_queries >= 0);
325
326		enable = rctx->num_occlusion_queries != 0;
327
328		if (rctx->db_misc_state.occlusion_query_enabled != enable) {
329			rctx->db_misc_state.occlusion_query_enabled = enable;
330			r600_atom_dirty(rctx, &rctx->db_misc_state.atom);
331		}
332	}
333}
334
335static void r600_begin_query(struct pipe_context *ctx, struct pipe_query *query)
336{
337	struct r600_context *rctx = (struct r600_context *)ctx;
338	struct r600_query *rquery = (struct r600_query *)query;
339	struct r600_query_buffer *prev = rquery->buffer.previous;
340
341	if (!r600_query_needs_begin(rquery->type)) {
342		assert(0);
343		return;
344	}
345
346	/* Discard the old query buffers. */
347	while (prev) {
348		struct r600_query_buffer *qbuf = prev;
349		prev = prev->previous;
350		pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
351		FREE(qbuf);
352	}
353
354	/* Obtain a new buffer if the current one can't be mapped without a stall. */
355	if (rctx->ws->cs_is_buffer_referenced(rctx->cs, rquery->buffer.buf->cs_buf, RADEON_USAGE_READWRITE) ||
356	    rctx->ws->buffer_is_busy(rquery->buffer.buf->buf, RADEON_USAGE_READWRITE)) {
357		pipe_resource_reference((struct pipe_resource**)&rquery->buffer.buf, NULL);
358		rquery->buffer.buf = r600_new_query_buffer(rctx, rquery->type);
359	}
360
361	rquery->buffer.results_end = 0;
362	rquery->buffer.previous = NULL;
363
364	r600_update_occlusion_query_state(rctx, rquery->type, 1);
365
366	r600_emit_query_begin(rctx, rquery);
367
368	if (r600_is_timer_query(rquery->type)) {
369		LIST_ADDTAIL(&rquery->list, &rctx->active_timer_queries);
370	} else {
371		LIST_ADDTAIL(&rquery->list, &rctx->active_nontimer_queries);
372	}
373}
374
375static void r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
376{
377	struct r600_context *rctx = (struct r600_context *)ctx;
378	struct r600_query *rquery = (struct r600_query *)query;
379
380	r600_emit_query_end(rctx, rquery);
381
382	if (r600_query_needs_begin(rquery->type)) {
383		LIST_DELINIT(&rquery->list);
384	}
385
386	r600_update_occlusion_query_state(rctx, rquery->type, -1);
387}
388
389static unsigned r600_query_read_result(char *map, unsigned start_index, unsigned end_index,
390				       bool test_status_bit)
391{
392	uint32_t *current_result = (uint32_t*)map;
393	uint64_t start, end;
394
395	start = (uint64_t)current_result[start_index] |
396		(uint64_t)current_result[start_index+1] << 32;
397	end = (uint64_t)current_result[end_index] |
398	      (uint64_t)current_result[end_index+1] << 32;
399
400	if (!test_status_bit ||
401	    ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
402		return end - start;
403	}
404	return 0;
405}
406
407static boolean r600_get_query_buffer_result(struct r600_context *ctx,
408					    struct r600_query *query,
409					    struct r600_query_buffer *qbuf,
410					    boolean wait,
411					    union pipe_query_result *result)
412{
413	unsigned results_base = 0;
414	char *map;
415
416	map = ctx->ws->buffer_map(qbuf->buf->cs_buf, ctx->cs,
417				  PIPE_TRANSFER_READ |
418				  (wait ? 0 : PIPE_TRANSFER_DONTBLOCK));
419	if (!map)
420		return FALSE;
421
422	/* count all results across all data blocks */
423	switch (query->type) {
424	case PIPE_QUERY_OCCLUSION_COUNTER:
425		while (results_base != qbuf->results_end) {
426			result->u64 +=
427				r600_query_read_result(map + results_base, 0, 2, true);
428			results_base += 16;
429		}
430		break;
431	case PIPE_QUERY_OCCLUSION_PREDICATE:
432		while (results_base != qbuf->results_end) {
433			result->b = result->b ||
434				r600_query_read_result(map + results_base, 0, 2, true) != 0;
435			results_base += 16;
436		}
437		break;
438	case PIPE_QUERY_TIME_ELAPSED:
439		while (results_base != qbuf->results_end) {
440			result->u64 +=
441				r600_query_read_result(map + results_base, 0, 2, false);
442			results_base += query->result_size;
443		}
444		break;
445	case PIPE_QUERY_TIMESTAMP:
446	{
447		uint32_t *current_result = (uint32_t*)map;
448		result->u64 = (uint64_t)current_result[0] |
449			      (uint64_t)current_result[1] << 32;
450		break;
451	}
452	case PIPE_QUERY_PRIMITIVES_EMITTED:
453		/* SAMPLE_STREAMOUTSTATS stores this structure:
454		 * {
455		 *    u64 NumPrimitivesWritten;
456		 *    u64 PrimitiveStorageNeeded;
457		 * }
458		 * We only need NumPrimitivesWritten here. */
459		while (results_base != qbuf->results_end) {
460			result->u64 +=
461				r600_query_read_result(map + results_base, 2, 6, true);
462			results_base += query->result_size;
463		}
464		break;
465	case PIPE_QUERY_PRIMITIVES_GENERATED:
466		/* Here we read PrimitiveStorageNeeded. */
467		while (results_base != qbuf->results_end) {
468			result->u64 +=
469				r600_query_read_result(map + results_base, 0, 4, true);
470			results_base += query->result_size;
471		}
472		break;
473	case PIPE_QUERY_SO_STATISTICS:
474		while (results_base != qbuf->results_end) {
475			result->so_statistics.num_primitives_written +=
476				r600_query_read_result(map + results_base, 2, 6, true);
477			result->so_statistics.primitives_storage_needed +=
478				r600_query_read_result(map + results_base, 0, 4, true);
479			results_base += query->result_size;
480		}
481		break;
482	case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
483		while (results_base != qbuf->results_end) {
484			result->b = result->b ||
485				r600_query_read_result(map + results_base, 2, 6, true) !=
486				r600_query_read_result(map + results_base, 0, 4, true);
487			results_base += query->result_size;
488		}
489		break;
490	default:
491		assert(0);
492	}
493
494	ctx->ws->buffer_unmap(qbuf->buf->cs_buf);
495	return TRUE;
496}
497
498static boolean r600_get_query_result(struct pipe_context *ctx,
499					struct pipe_query *query,
500					boolean wait, union pipe_query_result *result)
501{
502	struct r600_context *rctx = (struct r600_context *)ctx;
503	struct r600_query *rquery = (struct r600_query *)query;
504	struct r600_query_buffer *qbuf;
505
506	util_query_clear_result(result, rquery->type);
507
508	for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous) {
509		if (!r600_get_query_buffer_result(rctx, rquery, qbuf, wait, result)) {
510			return FALSE;
511		}
512	}
513
514	/* Convert the time to expected units. */
515	if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
516	    rquery->type == PIPE_QUERY_TIMESTAMP) {
517		result->u64 = (1000000 * result->u64) / rctx->screen->info.r600_clock_crystal_freq;
518	}
519	return TRUE;
520}
521
522static void r600_render_condition(struct pipe_context *ctx,
523				  struct pipe_query *query,
524				  uint mode)
525{
526	struct r600_context *rctx = (struct r600_context *)ctx;
527	struct r600_query *rquery = (struct r600_query *)query;
528	bool wait_flag = false;
529
530	rctx->current_render_cond = query;
531	rctx->current_render_cond_mode = mode;
532
533	if (query == NULL) {
534		if (rctx->predicate_drawing) {
535			rctx->predicate_drawing = false;
536			r600_emit_query_predication(rctx, NULL, PREDICATION_OP_CLEAR, false);
537		}
538		return;
539	}
540
541	if (mode == PIPE_RENDER_COND_WAIT ||
542	    mode == PIPE_RENDER_COND_BY_REGION_WAIT) {
543		wait_flag = true;
544	}
545
546	rctx->predicate_drawing = true;
547
548	switch (rquery->type) {
549	case PIPE_QUERY_OCCLUSION_COUNTER:
550	case PIPE_QUERY_OCCLUSION_PREDICATE:
551		r600_emit_query_predication(rctx, rquery, PREDICATION_OP_ZPASS, wait_flag);
552		break;
553	case PIPE_QUERY_PRIMITIVES_EMITTED:
554	case PIPE_QUERY_PRIMITIVES_GENERATED:
555	case PIPE_QUERY_SO_STATISTICS:
556	case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
557		r600_emit_query_predication(rctx, rquery, PREDICATION_OP_PRIMCOUNT, wait_flag);
558		break;
559	default:
560		assert(0);
561	}
562}
563
564void r600_suspend_nontimer_queries(struct r600_context *ctx)
565{
566	struct r600_query *query;
567
568	LIST_FOR_EACH_ENTRY(query, &ctx->active_nontimer_queries, list) {
569		r600_emit_query_end(ctx, query);
570	}
571	assert(ctx->num_cs_dw_nontimer_queries_suspend == 0);
572}
573
574void r600_resume_nontimer_queries(struct r600_context *ctx)
575{
576	struct r600_query *query;
577
578	assert(ctx->num_cs_dw_nontimer_queries_suspend == 0);
579
580	LIST_FOR_EACH_ENTRY(query, &ctx->active_nontimer_queries, list) {
581		r600_emit_query_begin(ctx, query);
582	}
583}
584
585void r600_suspend_timer_queries(struct r600_context *ctx)
586{
587	struct r600_query *query;
588
589	LIST_FOR_EACH_ENTRY(query, &ctx->active_timer_queries, list) {
590		r600_emit_query_end(ctx, query);
591	}
592
593	assert(ctx->num_cs_dw_timer_queries_suspend == 0);
594}
595
596void r600_resume_timer_queries(struct r600_context *ctx)
597{
598	struct r600_query *query;
599
600	assert(ctx->num_cs_dw_timer_queries_suspend == 0);
601
602	LIST_FOR_EACH_ENTRY(query, &ctx->active_timer_queries, list) {
603		r600_emit_query_begin(ctx, query);
604	}
605}
606
607void r600_init_query_functions(struct r600_context *rctx)
608{
609	rctx->context.create_query = r600_create_query;
610	rctx->context.destroy_query = r600_destroy_query;
611	rctx->context.begin_query = r600_begin_query;
612	rctx->context.end_query = r600_end_query;
613	rctx->context.get_query_result = r600_get_query_result;
614
615	if (rctx->screen->info.r600_num_backends > 0)
616	    rctx->context.render_condition = r600_render_condition;
617}
618