1/* r128_state.c -- State support for r128 -*- linux-c -*-
2 * Created: Thu Jan 27 02:53:43 2000 by gareth@valinux.com
3 */
4/*
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 *    Gareth Hughes <gareth@valinux.com>
29 */
30
31#include "drmP.h"
32#include "drm.h"
33#include "r128_drm.h"
34#include "r128_drv.h"
35
36/* ================================================================
37 * CCE hardware state programming functions
38 */
39
40static void r128_emit_clip_rects(drm_r128_private_t *dev_priv,
41				 struct drm_clip_rect *boxes, int count)
42{
43	u32 aux_sc_cntl = 0x00000000;
44	RING_LOCALS;
45	DRM_DEBUG("\n");
46
47	BEGIN_RING((count < 3 ? count : 3) * 5 + 2);
48
49	if (count >= 1) {
50		OUT_RING(CCE_PACKET0(R128_AUX1_SC_LEFT, 3));
51		OUT_RING(boxes[0].x1);
52		OUT_RING(boxes[0].x2 - 1);
53		OUT_RING(boxes[0].y1);
54		OUT_RING(boxes[0].y2 - 1);
55
56		aux_sc_cntl |= (R128_AUX1_SC_EN | R128_AUX1_SC_MODE_OR);
57	}
58	if (count >= 2) {
59		OUT_RING(CCE_PACKET0(R128_AUX2_SC_LEFT, 3));
60		OUT_RING(boxes[1].x1);
61		OUT_RING(boxes[1].x2 - 1);
62		OUT_RING(boxes[1].y1);
63		OUT_RING(boxes[1].y2 - 1);
64
65		aux_sc_cntl |= (R128_AUX2_SC_EN | R128_AUX2_SC_MODE_OR);
66	}
67	if (count >= 3) {
68		OUT_RING(CCE_PACKET0(R128_AUX3_SC_LEFT, 3));
69		OUT_RING(boxes[2].x1);
70		OUT_RING(boxes[2].x2 - 1);
71		OUT_RING(boxes[2].y1);
72		OUT_RING(boxes[2].y2 - 1);
73
74		aux_sc_cntl |= (R128_AUX3_SC_EN | R128_AUX3_SC_MODE_OR);
75	}
76
77	OUT_RING(CCE_PACKET0(R128_AUX_SC_CNTL, 0));
78	OUT_RING(aux_sc_cntl);
79
80	ADVANCE_RING();
81}
82
83static __inline__ void r128_emit_core(drm_r128_private_t *dev_priv)
84{
85	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
86	drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
87	RING_LOCALS;
88	DRM_DEBUG("\n");
89
90	BEGIN_RING(2);
91
92	OUT_RING(CCE_PACKET0(R128_SCALE_3D_CNTL, 0));
93	OUT_RING(ctx->scale_3d_cntl);
94
95	ADVANCE_RING();
96}
97
98static __inline__ void r128_emit_context(drm_r128_private_t *dev_priv)
99{
100	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
101	drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
102	RING_LOCALS;
103	DRM_DEBUG("\n");
104
105	BEGIN_RING(13);
106
107	OUT_RING(CCE_PACKET0(R128_DST_PITCH_OFFSET_C, 11));
108	OUT_RING(ctx->dst_pitch_offset_c);
109	OUT_RING(ctx->dp_gui_master_cntl_c);
110	OUT_RING(ctx->sc_top_left_c);
111	OUT_RING(ctx->sc_bottom_right_c);
112	OUT_RING(ctx->z_offset_c);
113	OUT_RING(ctx->z_pitch_c);
114	OUT_RING(ctx->z_sten_cntl_c);
115	OUT_RING(ctx->tex_cntl_c);
116	OUT_RING(ctx->misc_3d_state_cntl_reg);
117	OUT_RING(ctx->texture_clr_cmp_clr_c);
118	OUT_RING(ctx->texture_clr_cmp_msk_c);
119	OUT_RING(ctx->fog_color_c);
120
121	ADVANCE_RING();
122}
123
124static __inline__ void r128_emit_setup(drm_r128_private_t *dev_priv)
125{
126	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
127	drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
128	RING_LOCALS;
129	DRM_DEBUG("\n");
130
131	BEGIN_RING(3);
132
133	OUT_RING(CCE_PACKET1(R128_SETUP_CNTL, R128_PM4_VC_FPU_SETUP));
134	OUT_RING(ctx->setup_cntl);
135	OUT_RING(ctx->pm4_vc_fpu_setup);
136
137	ADVANCE_RING();
138}
139
140static __inline__ void r128_emit_masks(drm_r128_private_t *dev_priv)
141{
142	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
143	drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
144	RING_LOCALS;
145	DRM_DEBUG("\n");
146
147	BEGIN_RING(5);
148
149	OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0));
150	OUT_RING(ctx->dp_write_mask);
151
152	OUT_RING(CCE_PACKET0(R128_STEN_REF_MASK_C, 1));
153	OUT_RING(ctx->sten_ref_mask_c);
154	OUT_RING(ctx->plane_3d_mask_c);
155
156	ADVANCE_RING();
157}
158
159static __inline__ void r128_emit_window(drm_r128_private_t *dev_priv)
160{
161	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
162	drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
163	RING_LOCALS;
164	DRM_DEBUG("\n");
165
166	BEGIN_RING(2);
167
168	OUT_RING(CCE_PACKET0(R128_WINDOW_XY_OFFSET, 0));
169	OUT_RING(ctx->window_xy_offset);
170
171	ADVANCE_RING();
172}
173
174static __inline__ void r128_emit_tex0(drm_r128_private_t *dev_priv)
175{
176	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
177	drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
178	drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[0];
179	int i;
180	RING_LOCALS;
181	DRM_DEBUG("\n");
182
183	BEGIN_RING(7 + R128_MAX_TEXTURE_LEVELS);
184
185	OUT_RING(CCE_PACKET0(R128_PRIM_TEX_CNTL_C,
186			     2 + R128_MAX_TEXTURE_LEVELS));
187	OUT_RING(tex->tex_cntl);
188	OUT_RING(tex->tex_combine_cntl);
189	OUT_RING(ctx->tex_size_pitch_c);
190	for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++)
191		OUT_RING(tex->tex_offset[i]);
192
193	OUT_RING(CCE_PACKET0(R128_CONSTANT_COLOR_C, 1));
194	OUT_RING(ctx->constant_color_c);
195	OUT_RING(tex->tex_border_color);
196
197	ADVANCE_RING();
198}
199
200static __inline__ void r128_emit_tex1(drm_r128_private_t *dev_priv)
201{
202	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
203	drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[1];
204	int i;
205	RING_LOCALS;
206	DRM_DEBUG("\n");
207
208	BEGIN_RING(5 + R128_MAX_TEXTURE_LEVELS);
209
210	OUT_RING(CCE_PACKET0(R128_SEC_TEX_CNTL_C, 1 + R128_MAX_TEXTURE_LEVELS));
211	OUT_RING(tex->tex_cntl);
212	OUT_RING(tex->tex_combine_cntl);
213	for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++)
214		OUT_RING(tex->tex_offset[i]);
215
216	OUT_RING(CCE_PACKET0(R128_SEC_TEXTURE_BORDER_COLOR_C, 0));
217	OUT_RING(tex->tex_border_color);
218
219	ADVANCE_RING();
220}
221
222static void r128_emit_state(drm_r128_private_t *dev_priv)
223{
224	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
225	unsigned int dirty = sarea_priv->dirty;
226
227	DRM_DEBUG("dirty=0x%08x\n", dirty);
228
229	if (dirty & R128_UPLOAD_CORE) {
230		r128_emit_core(dev_priv);
231		sarea_priv->dirty &= ~R128_UPLOAD_CORE;
232	}
233
234	if (dirty & R128_UPLOAD_CONTEXT) {
235		r128_emit_context(dev_priv);
236		sarea_priv->dirty &= ~R128_UPLOAD_CONTEXT;
237	}
238
239	if (dirty & R128_UPLOAD_SETUP) {
240		r128_emit_setup(dev_priv);
241		sarea_priv->dirty &= ~R128_UPLOAD_SETUP;
242	}
243
244	if (dirty & R128_UPLOAD_MASKS) {
245		r128_emit_masks(dev_priv);
246		sarea_priv->dirty &= ~R128_UPLOAD_MASKS;
247	}
248
249	if (dirty & R128_UPLOAD_WINDOW) {
250		r128_emit_window(dev_priv);
251		sarea_priv->dirty &= ~R128_UPLOAD_WINDOW;
252	}
253
254	if (dirty & R128_UPLOAD_TEX0) {
255		r128_emit_tex0(dev_priv);
256		sarea_priv->dirty &= ~R128_UPLOAD_TEX0;
257	}
258
259	if (dirty & R128_UPLOAD_TEX1) {
260		r128_emit_tex1(dev_priv);
261		sarea_priv->dirty &= ~R128_UPLOAD_TEX1;
262	}
263
264	/* Turn off the texture cache flushing */
265	sarea_priv->context_state.tex_cntl_c &= ~R128_TEX_CACHE_FLUSH;
266
267	sarea_priv->dirty &= ~R128_REQUIRE_QUIESCENCE;
268}
269
270#if R128_PERFORMANCE_BOXES
271/* ================================================================
272 * Performance monitoring functions
273 */
274
275static void r128_clear_box(drm_r128_private_t *dev_priv,
276			   int x, int y, int w, int h, int r, int g, int b)
277{
278	u32 pitch, offset;
279	u32 fb_bpp, color;
280	RING_LOCALS;
281
282	switch (dev_priv->fb_bpp) {
283	case 16:
284		fb_bpp = R128_GMC_DST_16BPP;
285		color = (((r & 0xf8) << 8) |
286			 ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
287		break;
288	case 24:
289		fb_bpp = R128_GMC_DST_24BPP;
290		color = ((r << 16) | (g << 8) | b);
291		break;
292	case 32:
293		fb_bpp = R128_GMC_DST_32BPP;
294		color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
295		break;
296	default:
297		return;
298	}
299
300	offset = dev_priv->back_offset;
301	pitch = dev_priv->back_pitch >> 3;
302
303	BEGIN_RING(6);
304
305	OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
306	OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
307		 R128_GMC_BRUSH_SOLID_COLOR |
308		 fb_bpp |
309		 R128_GMC_SRC_DATATYPE_COLOR |
310		 R128_ROP3_P |
311		 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_AUX_CLIP_DIS);
312
313	OUT_RING((pitch << 21) | (offset >> 5));
314	OUT_RING(color);
315
316	OUT_RING((x << 16) | y);
317	OUT_RING((w << 16) | h);
318
319	ADVANCE_RING();
320}
321
322static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
323{
324	if (atomic_read(&dev_priv->idle_count) == 0)
325		r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
326	else
327		atomic_set(&dev_priv->idle_count, 0);
328}
329
330#endif
331
332/* ================================================================
333 * CCE command dispatch functions
334 */
335
336static void r128_print_dirty(const char *msg, unsigned int flags)
337{
338	DRM_INFO("%s: (0x%x) %s%s%s%s%s%s%s%s%s\n",
339		 msg,
340		 flags,
341		 (flags & R128_UPLOAD_CORE) ? "core, " : "",
342		 (flags & R128_UPLOAD_CONTEXT) ? "context, " : "",
343		 (flags & R128_UPLOAD_SETUP) ? "setup, " : "",
344		 (flags & R128_UPLOAD_TEX0) ? "tex0, " : "",
345		 (flags & R128_UPLOAD_TEX1) ? "tex1, " : "",
346		 (flags & R128_UPLOAD_MASKS) ? "masks, " : "",
347		 (flags & R128_UPLOAD_WINDOW) ? "window, " : "",
348		 (flags & R128_UPLOAD_CLIPRECTS) ? "cliprects, " : "",
349		 (flags & R128_REQUIRE_QUIESCENCE) ? "quiescence, " : "");
350}
351
352static void r128_cce_dispatch_clear(struct drm_device *dev,
353				    drm_r128_clear_t *clear)
354{
355	drm_r128_private_t *dev_priv = dev->dev_private;
356	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
357	int nbox = sarea_priv->nbox;
358	struct drm_clip_rect *pbox = sarea_priv->boxes;
359	unsigned int flags = clear->flags;
360	int i;
361	RING_LOCALS;
362	DRM_DEBUG("\n");
363
364	if (dev_priv->page_flipping && dev_priv->current_page == 1) {
365		unsigned int tmp = flags;
366
367		flags &= ~(R128_FRONT | R128_BACK);
368		if (tmp & R128_FRONT)
369			flags |= R128_BACK;
370		if (tmp & R128_BACK)
371			flags |= R128_FRONT;
372	}
373
374	for (i = 0; i < nbox; i++) {
375		int x = pbox[i].x1;
376		int y = pbox[i].y1;
377		int w = pbox[i].x2 - x;
378		int h = pbox[i].y2 - y;
379
380		DRM_DEBUG("dispatch clear %d,%d-%d,%d flags 0x%x\n",
381			  pbox[i].x1, pbox[i].y1, pbox[i].x2,
382			  pbox[i].y2, flags);
383
384		if (flags & (R128_FRONT | R128_BACK)) {
385			BEGIN_RING(2);
386
387			OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0));
388			OUT_RING(clear->color_mask);
389
390			ADVANCE_RING();
391		}
392
393		if (flags & R128_FRONT) {
394			BEGIN_RING(6);
395
396			OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
397			OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
398				 R128_GMC_BRUSH_SOLID_COLOR |
399				 (dev_priv->color_fmt << 8) |
400				 R128_GMC_SRC_DATATYPE_COLOR |
401				 R128_ROP3_P |
402				 R128_GMC_CLR_CMP_CNTL_DIS |
403				 R128_GMC_AUX_CLIP_DIS);
404
405			OUT_RING(dev_priv->front_pitch_offset_c);
406			OUT_RING(clear->clear_color);
407
408			OUT_RING((x << 16) | y);
409			OUT_RING((w << 16) | h);
410
411			ADVANCE_RING();
412		}
413
414		if (flags & R128_BACK) {
415			BEGIN_RING(6);
416
417			OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
418			OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
419				 R128_GMC_BRUSH_SOLID_COLOR |
420				 (dev_priv->color_fmt << 8) |
421				 R128_GMC_SRC_DATATYPE_COLOR |
422				 R128_ROP3_P |
423				 R128_GMC_CLR_CMP_CNTL_DIS |
424				 R128_GMC_AUX_CLIP_DIS);
425
426			OUT_RING(dev_priv->back_pitch_offset_c);
427			OUT_RING(clear->clear_color);
428
429			OUT_RING((x << 16) | y);
430			OUT_RING((w << 16) | h);
431
432			ADVANCE_RING();
433		}
434
435		if (flags & R128_DEPTH) {
436			BEGIN_RING(6);
437
438			OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
439			OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
440				 R128_GMC_BRUSH_SOLID_COLOR |
441				 (dev_priv->depth_fmt << 8) |
442				 R128_GMC_SRC_DATATYPE_COLOR |
443				 R128_ROP3_P |
444				 R128_GMC_CLR_CMP_CNTL_DIS |
445				 R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS);
446
447			OUT_RING(dev_priv->depth_pitch_offset_c);
448			OUT_RING(clear->clear_depth);
449
450			OUT_RING((x << 16) | y);
451			OUT_RING((w << 16) | h);
452
453			ADVANCE_RING();
454		}
455	}
456}
457
458static void r128_cce_dispatch_swap(struct drm_device *dev)
459{
460	drm_r128_private_t *dev_priv = dev->dev_private;
461	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
462	int nbox = sarea_priv->nbox;
463	struct drm_clip_rect *pbox = sarea_priv->boxes;
464	int i;
465	RING_LOCALS;
466	DRM_DEBUG("\n");
467
468#if R128_PERFORMANCE_BOXES
469	/* Do some trivial performance monitoring...
470	 */
471	r128_cce_performance_boxes(dev_priv);
472#endif
473
474	for (i = 0; i < nbox; i++) {
475		int x = pbox[i].x1;
476		int y = pbox[i].y1;
477		int w = pbox[i].x2 - x;
478		int h = pbox[i].y2 - y;
479
480		BEGIN_RING(7);
481
482		OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
483		OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
484			 R128_GMC_DST_PITCH_OFFSET_CNTL |
485			 R128_GMC_BRUSH_NONE |
486			 (dev_priv->color_fmt << 8) |
487			 R128_GMC_SRC_DATATYPE_COLOR |
488			 R128_ROP3_S |
489			 R128_DP_SRC_SOURCE_MEMORY |
490			 R128_GMC_CLR_CMP_CNTL_DIS |
491			 R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS);
492
493		/* Make this work even if front & back are flipped:
494		 */
495		if (dev_priv->current_page == 0) {
496			OUT_RING(dev_priv->back_pitch_offset_c);
497			OUT_RING(dev_priv->front_pitch_offset_c);
498		} else {
499			OUT_RING(dev_priv->front_pitch_offset_c);
500			OUT_RING(dev_priv->back_pitch_offset_c);
501		}
502
503		OUT_RING((x << 16) | y);
504		OUT_RING((x << 16) | y);
505		OUT_RING((w << 16) | h);
506
507		ADVANCE_RING();
508	}
509
510	/* Increment the frame counter.  The client-side 3D driver must
511	 * throttle the framerate by waiting for this value before
512	 * performing the swapbuffer ioctl.
513	 */
514	dev_priv->sarea_priv->last_frame++;
515
516	BEGIN_RING(2);
517
518	OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0));
519	OUT_RING(dev_priv->sarea_priv->last_frame);
520
521	ADVANCE_RING();
522}
523
524static void r128_cce_dispatch_flip(struct drm_device *dev)
525{
526	drm_r128_private_t *dev_priv = dev->dev_private;
527	RING_LOCALS;
528	DRM_DEBUG("page=%d pfCurrentPage=%d\n",
529		  dev_priv->current_page, dev_priv->sarea_priv->pfCurrentPage);
530
531#if R128_PERFORMANCE_BOXES
532	/* Do some trivial performance monitoring...
533	 */
534	r128_cce_performance_boxes(dev_priv);
535#endif
536
537	BEGIN_RING(4);
538
539	R128_WAIT_UNTIL_PAGE_FLIPPED();
540	OUT_RING(CCE_PACKET0(R128_CRTC_OFFSET, 0));
541
542	if (dev_priv->current_page == 0)
543		OUT_RING(dev_priv->back_offset);
544	else
545		OUT_RING(dev_priv->front_offset);
546
547	ADVANCE_RING();
548
549	/* Increment the frame counter.  The client-side 3D driver must
550	 * throttle the framerate by waiting for this value before
551	 * performing the swapbuffer ioctl.
552	 */
553	dev_priv->sarea_priv->last_frame++;
554	dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page =
555	    1 - dev_priv->current_page;
556
557	BEGIN_RING(2);
558
559	OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0));
560	OUT_RING(dev_priv->sarea_priv->last_frame);
561
562	ADVANCE_RING();
563}
564
565static void r128_cce_dispatch_vertex(struct drm_device *dev, struct drm_buf *buf)
566{
567	drm_r128_private_t *dev_priv = dev->dev_private;
568	drm_r128_buf_priv_t *buf_priv = buf->dev_private;
569	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
570	int format = sarea_priv->vc_format;
571	int offset = buf->bus_address;
572	int size = buf->used;
573	int prim = buf_priv->prim;
574	int i = 0;
575	RING_LOCALS;
576	DRM_DEBUG("buf=%d nbox=%d\n", buf->idx, sarea_priv->nbox);
577
578	if (0)
579		r128_print_dirty("dispatch_vertex", sarea_priv->dirty);
580
581	if (buf->used) {
582		buf_priv->dispatched = 1;
583
584		if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS)
585			r128_emit_state(dev_priv);
586
587		do {
588			/* Emit the next set of up to three cliprects */
589			if (i < sarea_priv->nbox) {
590				r128_emit_clip_rects(dev_priv,
591						     &sarea_priv->boxes[i],
592						     sarea_priv->nbox - i);
593			}
594
595			/* Emit the vertex buffer rendering commands */
596			BEGIN_RING(5);
597
598			OUT_RING(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM, 3));
599			OUT_RING(offset);
600			OUT_RING(size);
601			OUT_RING(format);
602			OUT_RING(prim | R128_CCE_VC_CNTL_PRIM_WALK_LIST |
603				 (size << R128_CCE_VC_CNTL_NUM_SHIFT));
604
605			ADVANCE_RING();
606
607			i += 3;
608		} while (i < sarea_priv->nbox);
609	}
610
611	if (buf_priv->discard) {
612		buf_priv->age = dev_priv->sarea_priv->last_dispatch;
613
614		/* Emit the vertex buffer age */
615		BEGIN_RING(2);
616
617		OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
618		OUT_RING(buf_priv->age);
619
620		ADVANCE_RING();
621
622		buf->pending = 1;
623		buf->used = 0;
624		/* FIXME: Check dispatched field */
625		buf_priv->dispatched = 0;
626	}
627
628	dev_priv->sarea_priv->last_dispatch++;
629
630	sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
631	sarea_priv->nbox = 0;
632}
633
634static void r128_cce_dispatch_indirect(struct drm_device *dev,
635				       struct drm_buf *buf, int start, int end)
636{
637	drm_r128_private_t *dev_priv = dev->dev_private;
638	drm_r128_buf_priv_t *buf_priv = buf->dev_private;
639	RING_LOCALS;
640	DRM_DEBUG("indirect: buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
641
642	if (start != end) {
643		int offset = buf->bus_address + start;
644		int dwords = (end - start + 3) / sizeof(u32);
645
646		/* Indirect buffer data must be an even number of
647		 * dwords, so if we've been given an odd number we must
648		 * pad the data with a Type-2 CCE packet.
649		 */
650		if (dwords & 1) {
651			u32 *data = (u32 *)
652			    ((char *)dev->agp_buffer_map->handle
653			     + buf->offset + start);
654			data[dwords++] = cpu_to_le32(R128_CCE_PACKET2);
655		}
656
657		buf_priv->dispatched = 1;
658
659		/* Fire off the indirect buffer */
660		BEGIN_RING(3);
661
662		OUT_RING(CCE_PACKET0(R128_PM4_IW_INDOFF, 1));
663		OUT_RING(offset);
664		OUT_RING(dwords);
665
666		ADVANCE_RING();
667	}
668
669	if (buf_priv->discard) {
670		buf_priv->age = dev_priv->sarea_priv->last_dispatch;
671
672		/* Emit the indirect buffer age */
673		BEGIN_RING(2);
674
675		OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
676		OUT_RING(buf_priv->age);
677
678		ADVANCE_RING();
679
680		buf->pending = 1;
681		buf->used = 0;
682		/* FIXME: Check dispatched field */
683		buf_priv->dispatched = 0;
684	}
685
686	dev_priv->sarea_priv->last_dispatch++;
687}
688
689static void r128_cce_dispatch_indices(struct drm_device *dev,
690				      struct drm_buf *buf,
691				      int start, int end, int count)
692{
693	drm_r128_private_t *dev_priv = dev->dev_private;
694	drm_r128_buf_priv_t *buf_priv = buf->dev_private;
695	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
696	int format = sarea_priv->vc_format;
697	int offset = dev->agp_buffer_map->offset - dev_priv->cce_buffers_offset;
698	int prim = buf_priv->prim;
699	u32 *data;
700	int dwords;
701	int i = 0;
702	RING_LOCALS;
703	DRM_DEBUG("indices: s=%d e=%d c=%d\n", start, end, count);
704
705	if (0)
706		r128_print_dirty("dispatch_indices", sarea_priv->dirty);
707
708	if (start != end) {
709		buf_priv->dispatched = 1;
710
711		if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS)
712			r128_emit_state(dev_priv);
713
714		dwords = (end - start + 3) / sizeof(u32);
715
716		data = (u32 *) ((char *)dev->agp_buffer_map->handle
717				+ buf->offset + start);
718
719		data[0] = cpu_to_le32(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM,
720						  dwords - 2));
721
722		data[1] = cpu_to_le32(offset);
723		data[2] = cpu_to_le32(R128_MAX_VB_VERTS);
724		data[3] = cpu_to_le32(format);
725		data[4] = cpu_to_le32((prim | R128_CCE_VC_CNTL_PRIM_WALK_IND |
726				       (count << 16)));
727
728		if (count & 0x1) {
729#ifdef __LITTLE_ENDIAN
730			data[dwords - 1] &= 0x0000ffff;
731#else
732			data[dwords - 1] &= 0xffff0000;
733#endif
734		}
735
736		do {
737			/* Emit the next set of up to three cliprects */
738			if (i < sarea_priv->nbox) {
739				r128_emit_clip_rects(dev_priv,
740						     &sarea_priv->boxes[i],
741						     sarea_priv->nbox - i);
742			}
743
744			r128_cce_dispatch_indirect(dev, buf, start, end);
745
746			i += 3;
747		} while (i < sarea_priv->nbox);
748	}
749
750	if (buf_priv->discard) {
751		buf_priv->age = dev_priv->sarea_priv->last_dispatch;
752
753		/* Emit the vertex buffer age */
754		BEGIN_RING(2);
755
756		OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
757		OUT_RING(buf_priv->age);
758
759		ADVANCE_RING();
760
761		buf->pending = 1;
762		/* FIXME: Check dispatched field */
763		buf_priv->dispatched = 0;
764	}
765
766	dev_priv->sarea_priv->last_dispatch++;
767
768	sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
769	sarea_priv->nbox = 0;
770}
771
772static int r128_cce_dispatch_blit(struct drm_device *dev,
773				  struct drm_file *file_priv,
774				  drm_r128_blit_t *blit)
775{
776	drm_r128_private_t *dev_priv = dev->dev_private;
777	struct drm_device_dma *dma = dev->dma;
778	struct drm_buf *buf;
779	drm_r128_buf_priv_t *buf_priv;
780	u32 *data;
781	int dword_shift, dwords;
782	RING_LOCALS;
783	DRM_DEBUG("\n");
784
785	/* The compiler won't optimize away a division by a variable,
786	 * even if the only legal values are powers of two.  Thus, we'll
787	 * use a shift instead.
788	 */
789	switch (blit->format) {
790	case R128_DATATYPE_ARGB8888:
791		dword_shift = 0;
792		break;
793	case R128_DATATYPE_ARGB1555:
794	case R128_DATATYPE_RGB565:
795	case R128_DATATYPE_ARGB4444:
796	case R128_DATATYPE_YVYU422:
797	case R128_DATATYPE_VYUY422:
798		dword_shift = 1;
799		break;
800	case R128_DATATYPE_CI8:
801	case R128_DATATYPE_RGB8:
802		dword_shift = 2;
803		break;
804	default:
805		DRM_ERROR("invalid blit format %d\n", blit->format);
806		return -EINVAL;
807	}
808
809	/* Flush the pixel cache, and mark the contents as Read Invalid.
810	 * This ensures no pixel data gets mixed up with the texture
811	 * data from the host data blit, otherwise part of the texture
812	 * image may be corrupted.
813	 */
814	BEGIN_RING(2);
815
816	OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0));
817	OUT_RING(R128_PC_RI_GUI | R128_PC_FLUSH_GUI);
818
819	ADVANCE_RING();
820
821	/* Dispatch the indirect buffer.
822	 */
823	buf = dma->buflist[blit->idx];
824	buf_priv = buf->dev_private;
825
826	if (buf->file_priv != file_priv) {
827		DRM_ERROR("process %d using buffer owned by %p\n",
828			  DRM_CURRENTPID, buf->file_priv);
829		return -EINVAL;
830	}
831	if (buf->pending) {
832		DRM_ERROR("sending pending buffer %d\n", blit->idx);
833		return -EINVAL;
834	}
835
836	buf_priv->discard = 1;
837
838	dwords = (blit->width * blit->height) >> dword_shift;
839
840	data = (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
841
842	data[0] = cpu_to_le32(CCE_PACKET3(R128_CNTL_HOSTDATA_BLT, dwords + 6));
843	data[1] = cpu_to_le32((R128_GMC_DST_PITCH_OFFSET_CNTL |
844			       R128_GMC_BRUSH_NONE |
845			       (blit->format << 8) |
846			       R128_GMC_SRC_DATATYPE_COLOR |
847			       R128_ROP3_S |
848			       R128_DP_SRC_SOURCE_HOST_DATA |
849			       R128_GMC_CLR_CMP_CNTL_DIS |
850			       R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS));
851
852	data[2] = cpu_to_le32((blit->pitch << 21) | (blit->offset >> 5));
853	data[3] = cpu_to_le32(0xffffffff);
854	data[4] = cpu_to_le32(0xffffffff);
855	data[5] = cpu_to_le32((blit->y << 16) | blit->x);
856	data[6] = cpu_to_le32((blit->height << 16) | blit->width);
857	data[7] = cpu_to_le32(dwords);
858
859	buf->used = (dwords + 8) * sizeof(u32);
860
861	r128_cce_dispatch_indirect(dev, buf, 0, buf->used);
862
863	/* Flush the pixel cache after the blit completes.  This ensures
864	 * the texture data is written out to memory before rendering
865	 * continues.
866	 */
867	BEGIN_RING(2);
868
869	OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0));
870	OUT_RING(R128_PC_FLUSH_GUI);
871
872	ADVANCE_RING();
873
874	return 0;
875}
876
877/* ================================================================
878 * Tiled depth buffer management
879 *
880 * FIXME: These should all set the destination write mask for when we
881 * have hardware stencil support.
882 */
883
884static int r128_cce_dispatch_write_span(struct drm_device *dev,
885					drm_r128_depth_t *depth)
886{
887	drm_r128_private_t *dev_priv = dev->dev_private;
888	int count, x, y;
889	u32 *buffer;
890	u8 *mask;
891	int i, buffer_size, mask_size;
892	RING_LOCALS;
893	DRM_DEBUG("\n");
894
895	count = depth->n;
896	if (count > 4096 || count <= 0)
897		return -EMSGSIZE;
898
899	if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x)))
900		return -EFAULT;
901	if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y)))
902		return -EFAULT;
903
904	buffer_size = depth->n * sizeof(u32);
905	buffer = kmalloc(buffer_size, GFP_KERNEL);
906	if (buffer == NULL)
907		return -ENOMEM;
908	if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
909		kfree(buffer);
910		return -EFAULT;
911	}
912
913	mask_size = depth->n * sizeof(u8);
914	if (depth->mask) {
915		mask = kmalloc(mask_size, GFP_KERNEL);
916		if (mask == NULL) {
917			kfree(buffer);
918			return -ENOMEM;
919		}
920		if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
921			kfree(buffer);
922			kfree(mask);
923			return -EFAULT;
924		}
925
926		for (i = 0; i < count; i++, x++) {
927			if (mask[i]) {
928				BEGIN_RING(6);
929
930				OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
931				OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
932					 R128_GMC_BRUSH_SOLID_COLOR |
933					 (dev_priv->depth_fmt << 8) |
934					 R128_GMC_SRC_DATATYPE_COLOR |
935					 R128_ROP3_P |
936					 R128_GMC_CLR_CMP_CNTL_DIS |
937					 R128_GMC_WR_MSK_DIS);
938
939				OUT_RING(dev_priv->depth_pitch_offset_c);
940				OUT_RING(buffer[i]);
941
942				OUT_RING((x << 16) | y);
943				OUT_RING((1 << 16) | 1);
944
945				ADVANCE_RING();
946			}
947		}
948
949		kfree(mask);
950	} else {
951		for (i = 0; i < count; i++, x++) {
952			BEGIN_RING(6);
953
954			OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
955			OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
956				 R128_GMC_BRUSH_SOLID_COLOR |
957				 (dev_priv->depth_fmt << 8) |
958				 R128_GMC_SRC_DATATYPE_COLOR |
959				 R128_ROP3_P |
960				 R128_GMC_CLR_CMP_CNTL_DIS |
961				 R128_GMC_WR_MSK_DIS);
962
963			OUT_RING(dev_priv->depth_pitch_offset_c);
964			OUT_RING(buffer[i]);
965
966			OUT_RING((x << 16) | y);
967			OUT_RING((1 << 16) | 1);
968
969			ADVANCE_RING();
970		}
971	}
972
973	kfree(buffer);
974
975	return 0;
976}
977
978static int r128_cce_dispatch_write_pixels(struct drm_device *dev,
979					  drm_r128_depth_t *depth)
980{
981	drm_r128_private_t *dev_priv = dev->dev_private;
982	int count, *x, *y;
983	u32 *buffer;
984	u8 *mask;
985	int i, xbuf_size, ybuf_size, buffer_size, mask_size;
986	RING_LOCALS;
987	DRM_DEBUG("\n");
988
989	count = depth->n;
990	if (count > 4096 || count <= 0)
991		return -EMSGSIZE;
992
993	xbuf_size = count * sizeof(*x);
994	ybuf_size = count * sizeof(*y);
995	x = kmalloc(xbuf_size, GFP_KERNEL);
996	if (x == NULL)
997		return -ENOMEM;
998	y = kmalloc(ybuf_size, GFP_KERNEL);
999	if (y == NULL) {
1000		kfree(x);
1001		return -ENOMEM;
1002	}
1003	if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
1004		kfree(x);
1005		kfree(y);
1006		return -EFAULT;
1007	}
1008	if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) {
1009		kfree(x);
1010		kfree(y);
1011		return -EFAULT;
1012	}
1013
1014	buffer_size = depth->n * sizeof(u32);
1015	buffer = kmalloc(buffer_size, GFP_KERNEL);
1016	if (buffer == NULL) {
1017		kfree(x);
1018		kfree(y);
1019		return -ENOMEM;
1020	}
1021	if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
1022		kfree(x);
1023		kfree(y);
1024		kfree(buffer);
1025		return -EFAULT;
1026	}
1027
1028	if (depth->mask) {
1029		mask_size = depth->n * sizeof(u8);
1030		mask = kmalloc(mask_size, GFP_KERNEL);
1031		if (mask == NULL) {
1032			kfree(x);
1033			kfree(y);
1034			kfree(buffer);
1035			return -ENOMEM;
1036		}
1037		if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
1038			kfree(x);
1039			kfree(y);
1040			kfree(buffer);
1041			kfree(mask);
1042			return -EFAULT;
1043		}
1044
1045		for (i = 0; i < count; i++) {
1046			if (mask[i]) {
1047				BEGIN_RING(6);
1048
1049				OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
1050				OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
1051					 R128_GMC_BRUSH_SOLID_COLOR |
1052					 (dev_priv->depth_fmt << 8) |
1053					 R128_GMC_SRC_DATATYPE_COLOR |
1054					 R128_ROP3_P |
1055					 R128_GMC_CLR_CMP_CNTL_DIS |
1056					 R128_GMC_WR_MSK_DIS);
1057
1058				OUT_RING(dev_priv->depth_pitch_offset_c);
1059				OUT_RING(buffer[i]);
1060
1061				OUT_RING((x[i] << 16) | y[i]);
1062				OUT_RING((1 << 16) | 1);
1063
1064				ADVANCE_RING();
1065			}
1066		}
1067
1068		kfree(mask);
1069	} else {
1070		for (i = 0; i < count; i++) {
1071			BEGIN_RING(6);
1072
1073			OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
1074			OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
1075				 R128_GMC_BRUSH_SOLID_COLOR |
1076				 (dev_priv->depth_fmt << 8) |
1077				 R128_GMC_SRC_DATATYPE_COLOR |
1078				 R128_ROP3_P |
1079				 R128_GMC_CLR_CMP_CNTL_DIS |
1080				 R128_GMC_WR_MSK_DIS);
1081
1082			OUT_RING(dev_priv->depth_pitch_offset_c);
1083			OUT_RING(buffer[i]);
1084
1085			OUT_RING((x[i] << 16) | y[i]);
1086			OUT_RING((1 << 16) | 1);
1087
1088			ADVANCE_RING();
1089		}
1090	}
1091
1092	kfree(x);
1093	kfree(y);
1094	kfree(buffer);
1095
1096	return 0;
1097}
1098
1099static int r128_cce_dispatch_read_span(struct drm_device *dev,
1100				       drm_r128_depth_t *depth)
1101{
1102	drm_r128_private_t *dev_priv = dev->dev_private;
1103	int count, x, y;
1104	RING_LOCALS;
1105	DRM_DEBUG("\n");
1106
1107	count = depth->n;
1108	if (count > 4096 || count <= 0)
1109		return -EMSGSIZE;
1110
1111	if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x)))
1112		return -EFAULT;
1113	if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y)))
1114		return -EFAULT;
1115
1116	BEGIN_RING(7);
1117
1118	OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
1119	OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
1120		 R128_GMC_DST_PITCH_OFFSET_CNTL |
1121		 R128_GMC_BRUSH_NONE |
1122		 (dev_priv->depth_fmt << 8) |
1123		 R128_GMC_SRC_DATATYPE_COLOR |
1124		 R128_ROP3_S |
1125		 R128_DP_SRC_SOURCE_MEMORY |
1126		 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS);
1127
1128	OUT_RING(dev_priv->depth_pitch_offset_c);
1129	OUT_RING(dev_priv->span_pitch_offset_c);
1130
1131	OUT_RING((x << 16) | y);
1132	OUT_RING((0 << 16) | 0);
1133	OUT_RING((count << 16) | 1);
1134
1135	ADVANCE_RING();
1136
1137	return 0;
1138}
1139
1140static int r128_cce_dispatch_read_pixels(struct drm_device *dev,
1141					 drm_r128_depth_t *depth)
1142{
1143	drm_r128_private_t *dev_priv = dev->dev_private;
1144	int count, *x, *y;
1145	int i, xbuf_size, ybuf_size;
1146	RING_LOCALS;
1147	DRM_DEBUG("\n");
1148
1149	count = depth->n;
1150	if (count > 4096 || count <= 0)
1151		return -EMSGSIZE;
1152
1153	if (count > dev_priv->depth_pitch)
1154		count = dev_priv->depth_pitch;
1155
1156	xbuf_size = count * sizeof(*x);
1157	ybuf_size = count * sizeof(*y);
1158	x = kmalloc(xbuf_size, GFP_KERNEL);
1159	if (x == NULL)
1160		return -ENOMEM;
1161	y = kmalloc(ybuf_size, GFP_KERNEL);
1162	if (y == NULL) {
1163		kfree(x);
1164		return -ENOMEM;
1165	}
1166	if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
1167		kfree(x);
1168		kfree(y);
1169		return -EFAULT;
1170	}
1171	if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) {
1172		kfree(x);
1173		kfree(y);
1174		return -EFAULT;
1175	}
1176
1177	for (i = 0; i < count; i++) {
1178		BEGIN_RING(7);
1179
1180		OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
1181		OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
1182			 R128_GMC_DST_PITCH_OFFSET_CNTL |
1183			 R128_GMC_BRUSH_NONE |
1184			 (dev_priv->depth_fmt << 8) |
1185			 R128_GMC_SRC_DATATYPE_COLOR |
1186			 R128_ROP3_S |
1187			 R128_DP_SRC_SOURCE_MEMORY |
1188			 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS);
1189
1190		OUT_RING(dev_priv->depth_pitch_offset_c);
1191		OUT_RING(dev_priv->span_pitch_offset_c);
1192
1193		OUT_RING((x[i] << 16) | y[i]);
1194		OUT_RING((i << 16) | 0);
1195		OUT_RING((1 << 16) | 1);
1196
1197		ADVANCE_RING();
1198	}
1199
1200	kfree(x);
1201	kfree(y);
1202
1203	return 0;
1204}
1205
1206/* ================================================================
1207 * Polygon stipple
1208 */
1209
1210static void r128_cce_dispatch_stipple(struct drm_device *dev, u32 *stipple)
1211{
1212	drm_r128_private_t *dev_priv = dev->dev_private;
1213	int i;
1214	RING_LOCALS;
1215	DRM_DEBUG("\n");
1216
1217	BEGIN_RING(33);
1218
1219	OUT_RING(CCE_PACKET0(R128_BRUSH_DATA0, 31));
1220	for (i = 0; i < 32; i++)
1221		OUT_RING(stipple[i]);
1222
1223	ADVANCE_RING();
1224}
1225
1226/* ================================================================
1227 * IOCTL functions
1228 */
1229
1230static int r128_cce_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
1231{
1232	drm_r128_private_t *dev_priv = dev->dev_private;
1233	drm_r128_sarea_t *sarea_priv;
1234	drm_r128_clear_t *clear = data;
1235	DRM_DEBUG("\n");
1236
1237	LOCK_TEST_WITH_RETURN(dev, file_priv);
1238
1239	DEV_INIT_TEST_WITH_RETURN(dev_priv);
1240
1241	RING_SPACE_TEST_WITH_RETURN(dev_priv);
1242
1243	sarea_priv = dev_priv->sarea_priv;
1244
1245	if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
1246		sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
1247
1248	r128_cce_dispatch_clear(dev, clear);
1249	COMMIT_RING();
1250
1251	/* Make sure we restore the 3D state next time.
1252	 */
1253	dev_priv->sarea_priv->dirty |= R128_UPLOAD_CONTEXT | R128_UPLOAD_MASKS;
1254
1255	return 0;
1256}
1257
1258static int r128_do_init_pageflip(struct drm_device *dev)
1259{
1260	drm_r128_private_t *dev_priv = dev->dev_private;
1261	DRM_DEBUG("\n");
1262
1263	dev_priv->crtc_offset = R128_READ(R128_CRTC_OFFSET);
1264	dev_priv->crtc_offset_cntl = R128_READ(R128_CRTC_OFFSET_CNTL);
1265
1266	R128_WRITE(R128_CRTC_OFFSET, dev_priv->front_offset);
1267	R128_WRITE(R128_CRTC_OFFSET_CNTL,
1268		   dev_priv->crtc_offset_cntl | R128_CRTC_OFFSET_FLIP_CNTL);
1269
1270	dev_priv->page_flipping = 1;
1271	dev_priv->current_page = 0;
1272	dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page;
1273
1274	return 0;
1275}
1276
1277static int r128_do_cleanup_pageflip(struct drm_device *dev)
1278{
1279	drm_r128_private_t *dev_priv = dev->dev_private;
1280	DRM_DEBUG("\n");
1281
1282	R128_WRITE(R128_CRTC_OFFSET, dev_priv->crtc_offset);
1283	R128_WRITE(R128_CRTC_OFFSET_CNTL, dev_priv->crtc_offset_cntl);
1284
1285	if (dev_priv->current_page != 0) {
1286		r128_cce_dispatch_flip(dev);
1287		COMMIT_RING();
1288	}
1289
1290	dev_priv->page_flipping = 0;
1291	return 0;
1292}
1293
1294/* Swapping and flipping are different operations, need different ioctls.
1295 * They can & should be intermixed to support multiple 3d windows.
1296 */
1297
1298static int r128_cce_flip(struct drm_device *dev, void *data, struct drm_file *file_priv)
1299{
1300	drm_r128_private_t *dev_priv = dev->dev_private;
1301	DRM_DEBUG("\n");
1302
1303	LOCK_TEST_WITH_RETURN(dev, file_priv);
1304
1305	DEV_INIT_TEST_WITH_RETURN(dev_priv);
1306
1307	RING_SPACE_TEST_WITH_RETURN(dev_priv);
1308
1309	if (!dev_priv->page_flipping)
1310		r128_do_init_pageflip(dev);
1311
1312	r128_cce_dispatch_flip(dev);
1313
1314	COMMIT_RING();
1315	return 0;
1316}
1317
1318static int r128_cce_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
1319{
1320	drm_r128_private_t *dev_priv = dev->dev_private;
1321	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
1322	DRM_DEBUG("\n");
1323
1324	LOCK_TEST_WITH_RETURN(dev, file_priv);
1325
1326	DEV_INIT_TEST_WITH_RETURN(dev_priv);
1327
1328	RING_SPACE_TEST_WITH_RETURN(dev_priv);
1329
1330	if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
1331		sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
1332
1333	r128_cce_dispatch_swap(dev);
1334	dev_priv->sarea_priv->dirty |= (R128_UPLOAD_CONTEXT |
1335					R128_UPLOAD_MASKS);
1336
1337	COMMIT_RING();
1338	return 0;
1339}
1340
1341static int r128_cce_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
1342{
1343	drm_r128_private_t *dev_priv = dev->dev_private;
1344	struct drm_device_dma *dma = dev->dma;
1345	struct drm_buf *buf;
1346	drm_r128_buf_priv_t *buf_priv;
1347	drm_r128_vertex_t *vertex = data;
1348
1349	LOCK_TEST_WITH_RETURN(dev, file_priv);
1350
1351	DEV_INIT_TEST_WITH_RETURN(dev_priv);
1352
1353	DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
1354		  DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard);
1355
1356	if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
1357		DRM_ERROR("buffer index %d (of %d max)\n",
1358			  vertex->idx, dma->buf_count - 1);
1359		return -EINVAL;
1360	}
1361	if (vertex->prim < 0 ||
1362	    vertex->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
1363		DRM_ERROR("buffer prim %d\n", vertex->prim);
1364		return -EINVAL;
1365	}
1366
1367	RING_SPACE_TEST_WITH_RETURN(dev_priv);
1368	VB_AGE_TEST_WITH_RETURN(dev_priv);
1369
1370	buf = dma->buflist[vertex->idx];
1371	buf_priv = buf->dev_private;
1372
1373	if (buf->file_priv != file_priv) {
1374		DRM_ERROR("process %d using buffer owned by %p\n",
1375			  DRM_CURRENTPID, buf->file_priv);
1376		return -EINVAL;
1377	}
1378	if (buf->pending) {
1379		DRM_ERROR("sending pending buffer %d\n", vertex->idx);
1380		return -EINVAL;
1381	}
1382
1383	buf->used = vertex->count;
1384	buf_priv->prim = vertex->prim;
1385	buf_priv->discard = vertex->discard;
1386
1387	r128_cce_dispatch_vertex(dev, buf);
1388
1389	COMMIT_RING();
1390	return 0;
1391}
1392
1393static int r128_cce_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
1394{
1395	drm_r128_private_t *dev_priv = dev->dev_private;
1396	struct drm_device_dma *dma = dev->dma;
1397	struct drm_buf *buf;
1398	drm_r128_buf_priv_t *buf_priv;
1399	drm_r128_indices_t *elts = data;
1400	int count;
1401
1402	LOCK_TEST_WITH_RETURN(dev, file_priv);
1403
1404	DEV_INIT_TEST_WITH_RETURN(dev_priv);
1405
1406	DRM_DEBUG("pid=%d buf=%d s=%d e=%d d=%d\n", DRM_CURRENTPID,
1407		  elts->idx, elts->start, elts->end, elts->discard);
1408
1409	if (elts->idx < 0 || elts->idx >= dma->buf_count) {
1410		DRM_ERROR("buffer index %d (of %d max)\n",
1411			  elts->idx, dma->buf_count - 1);
1412		return -EINVAL;
1413	}
1414	if (elts->prim < 0 ||
1415	    elts->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
1416		DRM_ERROR("buffer prim %d\n", elts->prim);
1417		return -EINVAL;
1418	}
1419
1420	RING_SPACE_TEST_WITH_RETURN(dev_priv);
1421	VB_AGE_TEST_WITH_RETURN(dev_priv);
1422
1423	buf = dma->buflist[elts->idx];
1424	buf_priv = buf->dev_private;
1425
1426	if (buf->file_priv != file_priv) {
1427		DRM_ERROR("process %d using buffer owned by %p\n",
1428			  DRM_CURRENTPID, buf->file_priv);
1429		return -EINVAL;
1430	}
1431	if (buf->pending) {
1432		DRM_ERROR("sending pending buffer %d\n", elts->idx);
1433		return -EINVAL;
1434	}
1435
1436	count = (elts->end - elts->start) / sizeof(u16);
1437	elts->start -= R128_INDEX_PRIM_OFFSET;
1438
1439	if (elts->start & 0x7) {
1440		DRM_ERROR("misaligned buffer 0x%x\n", elts->start);
1441		return -EINVAL;
1442	}
1443	if (elts->start < buf->used) {
1444		DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used);
1445		return -EINVAL;
1446	}
1447
1448	buf->used = elts->end;
1449	buf_priv->prim = elts->prim;
1450	buf_priv->discard = elts->discard;
1451
1452	r128_cce_dispatch_indices(dev, buf, elts->start, elts->end, count);
1453
1454	COMMIT_RING();
1455	return 0;
1456}
1457
1458static int r128_cce_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
1459{
1460	struct drm_device_dma *dma = dev->dma;
1461	drm_r128_private_t *dev_priv = dev->dev_private;
1462	drm_r128_blit_t *blit = data;
1463	int ret;
1464
1465	LOCK_TEST_WITH_RETURN(dev, file_priv);
1466
1467	DEV_INIT_TEST_WITH_RETURN(dev_priv);
1468
1469	DRM_DEBUG("pid=%d index=%d\n", DRM_CURRENTPID, blit->idx);
1470
1471	if (blit->idx < 0 || blit->idx >= dma->buf_count) {
1472		DRM_ERROR("buffer index %d (of %d max)\n",
1473			  blit->idx, dma->buf_count - 1);
1474		return -EINVAL;
1475	}
1476
1477	RING_SPACE_TEST_WITH_RETURN(dev_priv);
1478	VB_AGE_TEST_WITH_RETURN(dev_priv);
1479
1480	ret = r128_cce_dispatch_blit(dev, file_priv, blit);
1481
1482	COMMIT_RING();
1483	return ret;
1484}
1485
1486static int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *file_priv)
1487{
1488	drm_r128_private_t *dev_priv = dev->dev_private;
1489	drm_r128_depth_t *depth = data;
1490	int ret;
1491
1492	LOCK_TEST_WITH_RETURN(dev, file_priv);
1493
1494	DEV_INIT_TEST_WITH_RETURN(dev_priv);
1495
1496	RING_SPACE_TEST_WITH_RETURN(dev_priv);
1497
1498	ret = -EINVAL;
1499	switch (depth->func) {
1500	case R128_WRITE_SPAN:
1501		ret = r128_cce_dispatch_write_span(dev, depth);
1502		break;
1503	case R128_WRITE_PIXELS:
1504		ret = r128_cce_dispatch_write_pixels(dev, depth);
1505		break;
1506	case R128_READ_SPAN:
1507		ret = r128_cce_dispatch_read_span(dev, depth);
1508		break;
1509	case R128_READ_PIXELS:
1510		ret = r128_cce_dispatch_read_pixels(dev, depth);
1511		break;
1512	}
1513
1514	COMMIT_RING();
1515	return ret;
1516}
1517
1518static int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv)
1519{
1520	drm_r128_private_t *dev_priv = dev->dev_private;
1521	drm_r128_stipple_t *stipple = data;
1522	u32 mask[32];
1523
1524	LOCK_TEST_WITH_RETURN(dev, file_priv);
1525
1526	DEV_INIT_TEST_WITH_RETURN(dev_priv);
1527
1528	if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32)))
1529		return -EFAULT;
1530
1531	RING_SPACE_TEST_WITH_RETURN(dev_priv);
1532
1533	r128_cce_dispatch_stipple(dev, mask);
1534
1535	COMMIT_RING();
1536	return 0;
1537}
1538
1539static int r128_cce_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv)
1540{
1541	drm_r128_private_t *dev_priv = dev->dev_private;
1542	struct drm_device_dma *dma = dev->dma;
1543	struct drm_buf *buf;
1544	drm_r128_buf_priv_t *buf_priv;
1545	drm_r128_indirect_t *indirect = data;
1546#if 0
1547	RING_LOCALS;
1548#endif
1549
1550	LOCK_TEST_WITH_RETURN(dev, file_priv);
1551
1552	DEV_INIT_TEST_WITH_RETURN(dev_priv);
1553
1554	DRM_DEBUG("idx=%d s=%d e=%d d=%d\n",
1555		  indirect->idx, indirect->start, indirect->end,
1556		  indirect->discard);
1557
1558	if (indirect->idx < 0 || indirect->idx >= dma->buf_count) {
1559		DRM_ERROR("buffer index %d (of %d max)\n",
1560			  indirect->idx, dma->buf_count - 1);
1561		return -EINVAL;
1562	}
1563
1564	buf = dma->buflist[indirect->idx];
1565	buf_priv = buf->dev_private;
1566
1567	if (buf->file_priv != file_priv) {
1568		DRM_ERROR("process %d using buffer owned by %p\n",
1569			  DRM_CURRENTPID, buf->file_priv);
1570		return -EINVAL;
1571	}
1572	if (buf->pending) {
1573		DRM_ERROR("sending pending buffer %d\n", indirect->idx);
1574		return -EINVAL;
1575	}
1576
1577	if (indirect->start < buf->used) {
1578		DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
1579			  indirect->start, buf->used);
1580		return -EINVAL;
1581	}
1582
1583	RING_SPACE_TEST_WITH_RETURN(dev_priv);
1584	VB_AGE_TEST_WITH_RETURN(dev_priv);
1585
1586	buf->used = indirect->end;
1587	buf_priv->discard = indirect->discard;
1588
1589#if 0
1590	/* Wait for the 3D stream to idle before the indirect buffer
1591	 * containing 2D acceleration commands is processed.
1592	 */
1593	BEGIN_RING(2);
1594	RADEON_WAIT_UNTIL_3D_IDLE();
1595	ADVANCE_RING();
1596#endif
1597
1598	/* Dispatch the indirect buffer full of commands from the
1599	 * X server.  This is insecure and is thus only available to
1600	 * privileged clients.
1601	 */
1602	r128_cce_dispatch_indirect(dev, buf, indirect->start, indirect->end);
1603
1604	COMMIT_RING();
1605	return 0;
1606}
1607
1608static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
1609{
1610	drm_r128_private_t *dev_priv = dev->dev_private;
1611	drm_r128_getparam_t *param = data;
1612	int value;
1613
1614	DEV_INIT_TEST_WITH_RETURN(dev_priv);
1615
1616	DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
1617
1618	switch (param->param) {
1619	case R128_PARAM_IRQ_NR:
1620		value = drm_dev_to_irq(dev);
1621		break;
1622	default:
1623		return -EINVAL;
1624	}
1625
1626	if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
1627		DRM_ERROR("copy_to_user\n");
1628		return -EFAULT;
1629	}
1630
1631	return 0;
1632}
1633
1634void r128_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
1635{
1636	if (dev->dev_private) {
1637		drm_r128_private_t *dev_priv = dev->dev_private;
1638		if (dev_priv->page_flipping)
1639			r128_do_cleanup_pageflip(dev);
1640	}
1641}
1642void r128_driver_lastclose(struct drm_device *dev)
1643{
1644	r128_do_cleanup_cce(dev);
1645}
1646
1647struct drm_ioctl_desc r128_ioctls[] = {
1648	DRM_IOCTL_DEF_DRV(R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1649	DRM_IOCTL_DEF_DRV(R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1650	DRM_IOCTL_DEF_DRV(R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1651	DRM_IOCTL_DEF_DRV(R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1652	DRM_IOCTL_DEF_DRV(R128_CCE_IDLE, r128_cce_idle, DRM_AUTH),
1653	DRM_IOCTL_DEF_DRV(R128_RESET, r128_engine_reset, DRM_AUTH),
1654	DRM_IOCTL_DEF_DRV(R128_FULLSCREEN, r128_fullscreen, DRM_AUTH),
1655	DRM_IOCTL_DEF_DRV(R128_SWAP, r128_cce_swap, DRM_AUTH),
1656	DRM_IOCTL_DEF_DRV(R128_FLIP, r128_cce_flip, DRM_AUTH),
1657	DRM_IOCTL_DEF_DRV(R128_CLEAR, r128_cce_clear, DRM_AUTH),
1658	DRM_IOCTL_DEF_DRV(R128_VERTEX, r128_cce_vertex, DRM_AUTH),
1659	DRM_IOCTL_DEF_DRV(R128_INDICES, r128_cce_indices, DRM_AUTH),
1660	DRM_IOCTL_DEF_DRV(R128_BLIT, r128_cce_blit, DRM_AUTH),
1661	DRM_IOCTL_DEF_DRV(R128_DEPTH, r128_cce_depth, DRM_AUTH),
1662	DRM_IOCTL_DEF_DRV(R128_STIPPLE, r128_cce_stipple, DRM_AUTH),
1663	DRM_IOCTL_DEF_DRV(R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1664	DRM_IOCTL_DEF_DRV(R128_GETPARAM, r128_getparam, DRM_AUTH),
1665};
1666
1667int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);
1668