i915_dma.c revision 19966754328d99ee003ddfc7a8c31ceb115483ac
1/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2 */
3/*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29#include "drmP.h"
30#include "drm.h"
31#include "drm_crtc_helper.h"
32#include "drm_fb_helper.h"
33#include "intel_drv.h"
34#include "i915_drm.h"
35#include "i915_drv.h"
36#include "i915_trace.h"
37#include <linux/pci.h>
38#include <linux/vgaarb.h>
39#include <linux/acpi.h>
40#include <linux/pnp.h>
41#include <linux/vga_switcheroo.h>
42#include <linux/slab.h>
43#include <acpi/video.h>
44
45/**
46 * Sets up the hardware status page for devices that need a physical address
47 * in the register.
48 */
49static int i915_init_phys_hws(struct drm_device *dev)
50{
51	drm_i915_private_t *dev_priv = dev->dev_private;
52	/* Program Hardware Status Page */
53	dev_priv->status_page_dmah =
54		drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
55
56	if (!dev_priv->status_page_dmah) {
57		DRM_ERROR("Can not allocate hardware status page\n");
58		return -ENOMEM;
59	}
60	dev_priv->render_ring.status_page.page_addr
61		= dev_priv->status_page_dmah->vaddr;
62	dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
63
64	memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
65
66	if (IS_I965G(dev))
67		dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
68					     0xf0;
69
70	I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
71	DRM_DEBUG_DRIVER("Enabled hardware status page\n");
72	return 0;
73}
74
75/**
76 * Frees the hardware status page, whether it's a physical address or a virtual
77 * address set up by the X Server.
78 */
79static void i915_free_hws(struct drm_device *dev)
80{
81	drm_i915_private_t *dev_priv = dev->dev_private;
82	if (dev_priv->status_page_dmah) {
83		drm_pci_free(dev, dev_priv->status_page_dmah);
84		dev_priv->status_page_dmah = NULL;
85	}
86
87	if (dev_priv->render_ring.status_page.gfx_addr) {
88		dev_priv->render_ring.status_page.gfx_addr = 0;
89		drm_core_ioremapfree(&dev_priv->hws_map, dev);
90	}
91
92	/* Need to rewrite hardware status page */
93	I915_WRITE(HWS_PGA, 0x1ffff000);
94}
95
96void i915_kernel_lost_context(struct drm_device * dev)
97{
98	drm_i915_private_t *dev_priv = dev->dev_private;
99	struct drm_i915_master_private *master_priv;
100	struct intel_ring_buffer *ring = &dev_priv->render_ring;
101
102	/*
103	 * We should never lose context on the ring with modesetting
104	 * as we don't expose it to userspace
105	 */
106	if (drm_core_check_feature(dev, DRIVER_MODESET))
107		return;
108
109	ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
110	ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
111	ring->space = ring->head - (ring->tail + 8);
112	if (ring->space < 0)
113		ring->space += ring->size;
114
115	if (!dev->primary->master)
116		return;
117
118	master_priv = dev->primary->master->driver_priv;
119	if (ring->head == ring->tail && master_priv->sarea_priv)
120		master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
121}
122
123static int i915_dma_cleanup(struct drm_device * dev)
124{
125	drm_i915_private_t *dev_priv = dev->dev_private;
126	/* Make sure interrupts are disabled here because the uninstall ioctl
127	 * may not have been called from userspace and after dev_private
128	 * is freed, it's too late.
129	 */
130	if (dev->irq_enabled)
131		drm_irq_uninstall(dev);
132
133	mutex_lock(&dev->struct_mutex);
134	intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
135	if (HAS_BSD(dev))
136		intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
137	mutex_unlock(&dev->struct_mutex);
138
139	/* Clear the HWS virtual address at teardown */
140	if (I915_NEED_GFX_HWS(dev))
141		i915_free_hws(dev);
142
143	return 0;
144}
145
146static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
147{
148	drm_i915_private_t *dev_priv = dev->dev_private;
149	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
150
151	master_priv->sarea = drm_getsarea(dev);
152	if (master_priv->sarea) {
153		master_priv->sarea_priv = (drm_i915_sarea_t *)
154			((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
155	} else {
156		DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
157	}
158
159	if (init->ring_size != 0) {
160		if (dev_priv->render_ring.gem_object != NULL) {
161			i915_dma_cleanup(dev);
162			DRM_ERROR("Client tried to initialize ringbuffer in "
163				  "GEM mode\n");
164			return -EINVAL;
165		}
166
167		dev_priv->render_ring.size = init->ring_size;
168
169		dev_priv->render_ring.map.offset = init->ring_start;
170		dev_priv->render_ring.map.size = init->ring_size;
171		dev_priv->render_ring.map.type = 0;
172		dev_priv->render_ring.map.flags = 0;
173		dev_priv->render_ring.map.mtrr = 0;
174
175		drm_core_ioremap_wc(&dev_priv->render_ring.map, dev);
176
177		if (dev_priv->render_ring.map.handle == NULL) {
178			i915_dma_cleanup(dev);
179			DRM_ERROR("can not ioremap virtual address for"
180				  " ring buffer\n");
181			return -ENOMEM;
182		}
183	}
184
185	dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle;
186
187	dev_priv->cpp = init->cpp;
188	dev_priv->back_offset = init->back_offset;
189	dev_priv->front_offset = init->front_offset;
190	dev_priv->current_page = 0;
191	if (master_priv->sarea_priv)
192		master_priv->sarea_priv->pf_current_page = 0;
193
194	/* Allow hardware batchbuffers unless told otherwise.
195	 */
196	dev_priv->allow_batchbuffer = 1;
197
198	return 0;
199}
200
201static int i915_dma_resume(struct drm_device * dev)
202{
203	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
204
205	struct intel_ring_buffer *ring;
206	DRM_DEBUG_DRIVER("%s\n", __func__);
207
208	ring = &dev_priv->render_ring;
209
210	if (ring->map.handle == NULL) {
211		DRM_ERROR("can not ioremap virtual address for"
212			  " ring buffer\n");
213		return -ENOMEM;
214	}
215
216	/* Program Hardware Status Page */
217	if (!ring->status_page.page_addr) {
218		DRM_ERROR("Can not find hardware status page\n");
219		return -EINVAL;
220	}
221	DRM_DEBUG_DRIVER("hw status page @ %p\n",
222				ring->status_page.page_addr);
223	if (ring->status_page.gfx_addr != 0)
224		ring->setup_status_page(dev, ring);
225	else
226		I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
227
228	DRM_DEBUG_DRIVER("Enabled hardware status page\n");
229
230	return 0;
231}
232
233static int i915_dma_init(struct drm_device *dev, void *data,
234			 struct drm_file *file_priv)
235{
236	drm_i915_init_t *init = data;
237	int retcode = 0;
238
239	switch (init->func) {
240	case I915_INIT_DMA:
241		retcode = i915_initialize(dev, init);
242		break;
243	case I915_CLEANUP_DMA:
244		retcode = i915_dma_cleanup(dev);
245		break;
246	case I915_RESUME_DMA:
247		retcode = i915_dma_resume(dev);
248		break;
249	default:
250		retcode = -EINVAL;
251		break;
252	}
253
254	return retcode;
255}
256
257/* Implement basically the same security restrictions as hardware does
258 * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
259 *
260 * Most of the calculations below involve calculating the size of a
261 * particular instruction.  It's important to get the size right as
262 * that tells us where the next instruction to check is.  Any illegal
263 * instruction detected will be given a size of zero, which is a
264 * signal to abort the rest of the buffer.
265 */
266static int do_validate_cmd(int cmd)
267{
268	switch (((cmd >> 29) & 0x7)) {
269	case 0x0:
270		switch ((cmd >> 23) & 0x3f) {
271		case 0x0:
272			return 1;	/* MI_NOOP */
273		case 0x4:
274			return 1;	/* MI_FLUSH */
275		default:
276			return 0;	/* disallow everything else */
277		}
278		break;
279	case 0x1:
280		return 0;	/* reserved */
281	case 0x2:
282		return (cmd & 0xff) + 2;	/* 2d commands */
283	case 0x3:
284		if (((cmd >> 24) & 0x1f) <= 0x18)
285			return 1;
286
287		switch ((cmd >> 24) & 0x1f) {
288		case 0x1c:
289			return 1;
290		case 0x1d:
291			switch ((cmd >> 16) & 0xff) {
292			case 0x3:
293				return (cmd & 0x1f) + 2;
294			case 0x4:
295				return (cmd & 0xf) + 2;
296			default:
297				return (cmd & 0xffff) + 2;
298			}
299		case 0x1e:
300			if (cmd & (1 << 23))
301				return (cmd & 0xffff) + 1;
302			else
303				return 1;
304		case 0x1f:
305			if ((cmd & (1 << 23)) == 0)	/* inline vertices */
306				return (cmd & 0x1ffff) + 2;
307			else if (cmd & (1 << 17))	/* indirect random */
308				if ((cmd & 0xffff) == 0)
309					return 0;	/* unknown length, too hard */
310				else
311					return (((cmd & 0xffff) + 1) / 2) + 1;
312			else
313				return 2;	/* indirect sequential */
314		default:
315			return 0;
316		}
317	default:
318		return 0;
319	}
320
321	return 0;
322}
323
324static int validate_cmd(int cmd)
325{
326	int ret = do_validate_cmd(cmd);
327
328/*	printk("validate_cmd( %x ): %d\n", cmd, ret); */
329
330	return ret;
331}
332
333static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
334{
335	drm_i915_private_t *dev_priv = dev->dev_private;
336	int i;
337
338	if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8)
339		return -EINVAL;
340
341	BEGIN_LP_RING((dwords+1)&~1);
342
343	for (i = 0; i < dwords;) {
344		int cmd, sz;
345
346		cmd = buffer[i];
347
348		if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
349			return -EINVAL;
350
351		OUT_RING(cmd);
352
353		while (++i, --sz) {
354			OUT_RING(buffer[i]);
355		}
356	}
357
358	if (dwords & 1)
359		OUT_RING(0);
360
361	ADVANCE_LP_RING();
362
363	return 0;
364}
365
366int
367i915_emit_box(struct drm_device *dev,
368	      struct drm_clip_rect *boxes,
369	      int i, int DR1, int DR4)
370{
371	struct drm_clip_rect box = boxes[i];
372
373	if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
374		DRM_ERROR("Bad box %d,%d..%d,%d\n",
375			  box.x1, box.y1, box.x2, box.y2);
376		return -EINVAL;
377	}
378
379	if (IS_I965G(dev)) {
380		BEGIN_LP_RING(4);
381		OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
382		OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
383		OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
384		OUT_RING(DR4);
385		ADVANCE_LP_RING();
386	} else {
387		BEGIN_LP_RING(6);
388		OUT_RING(GFX_OP_DRAWRECT_INFO);
389		OUT_RING(DR1);
390		OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
391		OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
392		OUT_RING(DR4);
393		OUT_RING(0);
394		ADVANCE_LP_RING();
395	}
396
397	return 0;
398}
399
400/* XXX: Emitting the counter should really be moved to part of the IRQ
401 * emit. For now, do it in both places:
402 */
403
404static void i915_emit_breadcrumb(struct drm_device *dev)
405{
406	drm_i915_private_t *dev_priv = dev->dev_private;
407	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
408
409	dev_priv->counter++;
410	if (dev_priv->counter > 0x7FFFFFFFUL)
411		dev_priv->counter = 0;
412	if (master_priv->sarea_priv)
413		master_priv->sarea_priv->last_enqueue = dev_priv->counter;
414
415	BEGIN_LP_RING(4);
416	OUT_RING(MI_STORE_DWORD_INDEX);
417	OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
418	OUT_RING(dev_priv->counter);
419	OUT_RING(0);
420	ADVANCE_LP_RING();
421}
422
423static int i915_dispatch_cmdbuffer(struct drm_device * dev,
424				   drm_i915_cmdbuffer_t *cmd,
425				   struct drm_clip_rect *cliprects,
426				   void *cmdbuf)
427{
428	int nbox = cmd->num_cliprects;
429	int i = 0, count, ret;
430
431	if (cmd->sz & 0x3) {
432		DRM_ERROR("alignment");
433		return -EINVAL;
434	}
435
436	i915_kernel_lost_context(dev);
437
438	count = nbox ? nbox : 1;
439
440	for (i = 0; i < count; i++) {
441		if (i < nbox) {
442			ret = i915_emit_box(dev, cliprects, i,
443					    cmd->DR1, cmd->DR4);
444			if (ret)
445				return ret;
446		}
447
448		ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
449		if (ret)
450			return ret;
451	}
452
453	i915_emit_breadcrumb(dev);
454	return 0;
455}
456
457static int i915_dispatch_batchbuffer(struct drm_device * dev,
458				     drm_i915_batchbuffer_t * batch,
459				     struct drm_clip_rect *cliprects)
460{
461	int nbox = batch->num_cliprects;
462	int i = 0, count;
463
464	if ((batch->start | batch->used) & 0x7) {
465		DRM_ERROR("alignment");
466		return -EINVAL;
467	}
468
469	i915_kernel_lost_context(dev);
470
471	count = nbox ? nbox : 1;
472
473	for (i = 0; i < count; i++) {
474		if (i < nbox) {
475			int ret = i915_emit_box(dev, cliprects, i,
476						batch->DR1, batch->DR4);
477			if (ret)
478				return ret;
479		}
480
481		if (!IS_I830(dev) && !IS_845G(dev)) {
482			BEGIN_LP_RING(2);
483			if (IS_I965G(dev)) {
484				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
485				OUT_RING(batch->start);
486			} else {
487				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
488				OUT_RING(batch->start | MI_BATCH_NON_SECURE);
489			}
490			ADVANCE_LP_RING();
491		} else {
492			BEGIN_LP_RING(4);
493			OUT_RING(MI_BATCH_BUFFER);
494			OUT_RING(batch->start | MI_BATCH_NON_SECURE);
495			OUT_RING(batch->start + batch->used - 4);
496			OUT_RING(0);
497			ADVANCE_LP_RING();
498		}
499	}
500
501
502	if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
503		BEGIN_LP_RING(2);
504		OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
505		OUT_RING(MI_NOOP);
506		ADVANCE_LP_RING();
507	}
508	i915_emit_breadcrumb(dev);
509
510	return 0;
511}
512
513static int i915_dispatch_flip(struct drm_device * dev)
514{
515	drm_i915_private_t *dev_priv = dev->dev_private;
516	struct drm_i915_master_private *master_priv =
517		dev->primary->master->driver_priv;
518
519	if (!master_priv->sarea_priv)
520		return -EINVAL;
521
522	DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
523			  __func__,
524			 dev_priv->current_page,
525			 master_priv->sarea_priv->pf_current_page);
526
527	i915_kernel_lost_context(dev);
528
529	BEGIN_LP_RING(2);
530	OUT_RING(MI_FLUSH | MI_READ_FLUSH);
531	OUT_RING(0);
532	ADVANCE_LP_RING();
533
534	BEGIN_LP_RING(6);
535	OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
536	OUT_RING(0);
537	if (dev_priv->current_page == 0) {
538		OUT_RING(dev_priv->back_offset);
539		dev_priv->current_page = 1;
540	} else {
541		OUT_RING(dev_priv->front_offset);
542		dev_priv->current_page = 0;
543	}
544	OUT_RING(0);
545	ADVANCE_LP_RING();
546
547	BEGIN_LP_RING(2);
548	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
549	OUT_RING(0);
550	ADVANCE_LP_RING();
551
552	master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
553
554	BEGIN_LP_RING(4);
555	OUT_RING(MI_STORE_DWORD_INDEX);
556	OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
557	OUT_RING(dev_priv->counter);
558	OUT_RING(0);
559	ADVANCE_LP_RING();
560
561	master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
562	return 0;
563}
564
565static int i915_quiescent(struct drm_device * dev)
566{
567	drm_i915_private_t *dev_priv = dev->dev_private;
568
569	i915_kernel_lost_context(dev);
570	return intel_wait_ring_buffer(dev, &dev_priv->render_ring,
571				      dev_priv->render_ring.size - 8);
572}
573
574static int i915_flush_ioctl(struct drm_device *dev, void *data,
575			    struct drm_file *file_priv)
576{
577	int ret;
578
579	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
580
581	mutex_lock(&dev->struct_mutex);
582	ret = i915_quiescent(dev);
583	mutex_unlock(&dev->struct_mutex);
584
585	return ret;
586}
587
588static int i915_batchbuffer(struct drm_device *dev, void *data,
589			    struct drm_file *file_priv)
590{
591	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
592	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
593	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
594	    master_priv->sarea_priv;
595	drm_i915_batchbuffer_t *batch = data;
596	int ret;
597	struct drm_clip_rect *cliprects = NULL;
598
599	if (!dev_priv->allow_batchbuffer) {
600		DRM_ERROR("Batchbuffer ioctl disabled\n");
601		return -EINVAL;
602	}
603
604	DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
605			batch->start, batch->used, batch->num_cliprects);
606
607	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
608
609	if (batch->num_cliprects < 0)
610		return -EINVAL;
611
612	if (batch->num_cliprects) {
613		cliprects = kcalloc(batch->num_cliprects,
614				    sizeof(struct drm_clip_rect),
615				    GFP_KERNEL);
616		if (cliprects == NULL)
617			return -ENOMEM;
618
619		ret = copy_from_user(cliprects, batch->cliprects,
620				     batch->num_cliprects *
621				     sizeof(struct drm_clip_rect));
622		if (ret != 0) {
623			ret = -EFAULT;
624			goto fail_free;
625		}
626	}
627
628	mutex_lock(&dev->struct_mutex);
629	ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
630	mutex_unlock(&dev->struct_mutex);
631
632	if (sarea_priv)
633		sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
634
635fail_free:
636	kfree(cliprects);
637
638	return ret;
639}
640
641static int i915_cmdbuffer(struct drm_device *dev, void *data,
642			  struct drm_file *file_priv)
643{
644	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
645	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
646	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
647	    master_priv->sarea_priv;
648	drm_i915_cmdbuffer_t *cmdbuf = data;
649	struct drm_clip_rect *cliprects = NULL;
650	void *batch_data;
651	int ret;
652
653	DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
654			cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
655
656	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
657
658	if (cmdbuf->num_cliprects < 0)
659		return -EINVAL;
660
661	batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL);
662	if (batch_data == NULL)
663		return -ENOMEM;
664
665	ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
666	if (ret != 0) {
667		ret = -EFAULT;
668		goto fail_batch_free;
669	}
670
671	if (cmdbuf->num_cliprects) {
672		cliprects = kcalloc(cmdbuf->num_cliprects,
673				    sizeof(struct drm_clip_rect), GFP_KERNEL);
674		if (cliprects == NULL) {
675			ret = -ENOMEM;
676			goto fail_batch_free;
677		}
678
679		ret = copy_from_user(cliprects, cmdbuf->cliprects,
680				     cmdbuf->num_cliprects *
681				     sizeof(struct drm_clip_rect));
682		if (ret != 0) {
683			ret = -EFAULT;
684			goto fail_clip_free;
685		}
686	}
687
688	mutex_lock(&dev->struct_mutex);
689	ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
690	mutex_unlock(&dev->struct_mutex);
691	if (ret) {
692		DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
693		goto fail_clip_free;
694	}
695
696	if (sarea_priv)
697		sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
698
699fail_clip_free:
700	kfree(cliprects);
701fail_batch_free:
702	kfree(batch_data);
703
704	return ret;
705}
706
707static int i915_flip_bufs(struct drm_device *dev, void *data,
708			  struct drm_file *file_priv)
709{
710	int ret;
711
712	DRM_DEBUG_DRIVER("%s\n", __func__);
713
714	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
715
716	mutex_lock(&dev->struct_mutex);
717	ret = i915_dispatch_flip(dev);
718	mutex_unlock(&dev->struct_mutex);
719
720	return ret;
721}
722
723static int i915_getparam(struct drm_device *dev, void *data,
724			 struct drm_file *file_priv)
725{
726	drm_i915_private_t *dev_priv = dev->dev_private;
727	drm_i915_getparam_t *param = data;
728	int value;
729
730	if (!dev_priv) {
731		DRM_ERROR("called with no initialization\n");
732		return -EINVAL;
733	}
734
735	switch (param->param) {
736	case I915_PARAM_IRQ_ACTIVE:
737		value = dev->pdev->irq ? 1 : 0;
738		break;
739	case I915_PARAM_ALLOW_BATCHBUFFER:
740		value = dev_priv->allow_batchbuffer ? 1 : 0;
741		break;
742	case I915_PARAM_LAST_DISPATCH:
743		value = READ_BREADCRUMB(dev_priv);
744		break;
745	case I915_PARAM_CHIPSET_ID:
746		value = dev->pci_device;
747		break;
748	case I915_PARAM_HAS_GEM:
749		value = dev_priv->has_gem;
750		break;
751	case I915_PARAM_NUM_FENCES_AVAIL:
752		value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
753		break;
754	case I915_PARAM_HAS_OVERLAY:
755		value = dev_priv->overlay ? 1 : 0;
756		break;
757	case I915_PARAM_HAS_PAGEFLIPPING:
758		value = 1;
759		break;
760	case I915_PARAM_HAS_EXECBUF2:
761		/* depends on GEM */
762		value = dev_priv->has_gem;
763		break;
764	case I915_PARAM_HAS_BSD:
765		value = HAS_BSD(dev);
766		break;
767	default:
768		DRM_DEBUG_DRIVER("Unknown parameter %d\n",
769				 param->param);
770		return -EINVAL;
771	}
772
773	if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
774		DRM_ERROR("DRM_COPY_TO_USER failed\n");
775		return -EFAULT;
776	}
777
778	return 0;
779}
780
781static int i915_setparam(struct drm_device *dev, void *data,
782			 struct drm_file *file_priv)
783{
784	drm_i915_private_t *dev_priv = dev->dev_private;
785	drm_i915_setparam_t *param = data;
786
787	if (!dev_priv) {
788		DRM_ERROR("called with no initialization\n");
789		return -EINVAL;
790	}
791
792	switch (param->param) {
793	case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
794		break;
795	case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
796		dev_priv->tex_lru_log_granularity = param->value;
797		break;
798	case I915_SETPARAM_ALLOW_BATCHBUFFER:
799		dev_priv->allow_batchbuffer = param->value;
800		break;
801	case I915_SETPARAM_NUM_USED_FENCES:
802		if (param->value > dev_priv->num_fence_regs ||
803		    param->value < 0)
804			return -EINVAL;
805		/* Userspace can use first N regs */
806		dev_priv->fence_reg_start = param->value;
807		break;
808	default:
809		DRM_DEBUG_DRIVER("unknown parameter %d\n",
810					param->param);
811		return -EINVAL;
812	}
813
814	return 0;
815}
816
817static int i915_set_status_page(struct drm_device *dev, void *data,
818				struct drm_file *file_priv)
819{
820	drm_i915_private_t *dev_priv = dev->dev_private;
821	drm_i915_hws_addr_t *hws = data;
822	struct intel_ring_buffer *ring = &dev_priv->render_ring;
823
824	if (!I915_NEED_GFX_HWS(dev))
825		return -EINVAL;
826
827	if (!dev_priv) {
828		DRM_ERROR("called with no initialization\n");
829		return -EINVAL;
830	}
831
832	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
833		WARN(1, "tried to set status page when mode setting active\n");
834		return 0;
835	}
836
837	DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
838
839	ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
840
841	dev_priv->hws_map.offset = dev->agp->base + hws->addr;
842	dev_priv->hws_map.size = 4*1024;
843	dev_priv->hws_map.type = 0;
844	dev_priv->hws_map.flags = 0;
845	dev_priv->hws_map.mtrr = 0;
846
847	drm_core_ioremap_wc(&dev_priv->hws_map, dev);
848	if (dev_priv->hws_map.handle == NULL) {
849		i915_dma_cleanup(dev);
850		ring->status_page.gfx_addr = 0;
851		DRM_ERROR("can not ioremap virtual address for"
852				" G33 hw status page\n");
853		return -ENOMEM;
854	}
855	ring->status_page.page_addr = dev_priv->hws_map.handle;
856	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
857	I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
858
859	DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
860			 ring->status_page.gfx_addr);
861	DRM_DEBUG_DRIVER("load hws at %p\n",
862			 ring->status_page.page_addr);
863	return 0;
864}
865
866static int i915_get_bridge_dev(struct drm_device *dev)
867{
868	struct drm_i915_private *dev_priv = dev->dev_private;
869
870	dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
871	if (!dev_priv->bridge_dev) {
872		DRM_ERROR("bridge device not found\n");
873		return -1;
874	}
875	return 0;
876}
877
878#define MCHBAR_I915 0x44
879#define MCHBAR_I965 0x48
880#define MCHBAR_SIZE (4*4096)
881
882#define DEVEN_REG 0x54
883#define   DEVEN_MCHBAR_EN (1 << 28)
884
885/* Allocate space for the MCH regs if needed, return nonzero on error */
886static int
887intel_alloc_mchbar_resource(struct drm_device *dev)
888{
889	drm_i915_private_t *dev_priv = dev->dev_private;
890	int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
891	u32 temp_lo, temp_hi = 0;
892	u64 mchbar_addr;
893	int ret;
894
895	if (IS_I965G(dev))
896		pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
897	pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
898	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
899
900	/* If ACPI doesn't have it, assume we need to allocate it ourselves */
901#ifdef CONFIG_PNP
902	if (mchbar_addr &&
903	    pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
904		return 0;
905#endif
906
907	/* Get some space for it */
908	dev_priv->mch_res.name = "i915 MCHBAR";
909	dev_priv->mch_res.flags = IORESOURCE_MEM;
910	ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
911				     &dev_priv->mch_res,
912				     MCHBAR_SIZE, MCHBAR_SIZE,
913				     PCIBIOS_MIN_MEM,
914				     0, pcibios_align_resource,
915				     dev_priv->bridge_dev);
916	if (ret) {
917		DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
918		dev_priv->mch_res.start = 0;
919		return ret;
920	}
921
922	if (IS_I965G(dev))
923		pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
924				       upper_32_bits(dev_priv->mch_res.start));
925
926	pci_write_config_dword(dev_priv->bridge_dev, reg,
927			       lower_32_bits(dev_priv->mch_res.start));
928	return 0;
929}
930
931/* Setup MCHBAR if possible, return true if we should disable it again */
932static void
933intel_setup_mchbar(struct drm_device *dev)
934{
935	drm_i915_private_t *dev_priv = dev->dev_private;
936	int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
937	u32 temp;
938	bool enabled;
939
940	dev_priv->mchbar_need_disable = false;
941
942	if (IS_I915G(dev) || IS_I915GM(dev)) {
943		pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
944		enabled = !!(temp & DEVEN_MCHBAR_EN);
945	} else {
946		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
947		enabled = temp & 1;
948	}
949
950	/* If it's already enabled, don't have to do anything */
951	if (enabled)
952		return;
953
954	if (intel_alloc_mchbar_resource(dev))
955		return;
956
957	dev_priv->mchbar_need_disable = true;
958
959	/* Space is allocated or reserved, so enable it. */
960	if (IS_I915G(dev) || IS_I915GM(dev)) {
961		pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
962				       temp | DEVEN_MCHBAR_EN);
963	} else {
964		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
965		pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
966	}
967}
968
969static void
970intel_teardown_mchbar(struct drm_device *dev)
971{
972	drm_i915_private_t *dev_priv = dev->dev_private;
973	int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
974	u32 temp;
975
976	if (dev_priv->mchbar_need_disable) {
977		if (IS_I915G(dev) || IS_I915GM(dev)) {
978			pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
979			temp &= ~DEVEN_MCHBAR_EN;
980			pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
981		} else {
982			pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
983			temp &= ~1;
984			pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
985		}
986	}
987
988	if (dev_priv->mch_res.start)
989		release_resource(&dev_priv->mch_res);
990}
991
992#define PTE_ADDRESS_MASK		0xfffff000
993#define PTE_ADDRESS_MASK_HIGH		0x000000f0 /* i915+ */
994#define PTE_MAPPING_TYPE_UNCACHED	(0 << 1)
995#define PTE_MAPPING_TYPE_DCACHE		(1 << 1) /* i830 only */
996#define PTE_MAPPING_TYPE_CACHED		(3 << 1)
997#define PTE_MAPPING_TYPE_MASK		(3 << 1)
998#define PTE_VALID			(1 << 0)
999
1000/**
1001 * i915_gtt_to_phys - take a GTT address and turn it into a physical one
1002 * @dev: drm device
1003 * @gtt_addr: address to translate
1004 *
1005 * Some chip functions require allocations from stolen space but need the
1006 * physical address of the memory in question.  We use this routine
1007 * to get a physical address suitable for register programming from a given
1008 * GTT address.
1009 */
1010static unsigned long i915_gtt_to_phys(struct drm_device *dev,
1011				      unsigned long gtt_addr)
1012{
1013	unsigned long *gtt;
1014	unsigned long entry, phys;
1015	int gtt_bar = IS_I9XX(dev) ? 0 : 1;
1016	int gtt_offset, gtt_size;
1017
1018	if (IS_I965G(dev)) {
1019		if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
1020			gtt_offset = 2*1024*1024;
1021			gtt_size = 2*1024*1024;
1022		} else {
1023			gtt_offset = 512*1024;
1024			gtt_size = 512*1024;
1025		}
1026	} else {
1027		gtt_bar = 3;
1028		gtt_offset = 0;
1029		gtt_size = pci_resource_len(dev->pdev, gtt_bar);
1030	}
1031
1032	gtt = ioremap_wc(pci_resource_start(dev->pdev, gtt_bar) + gtt_offset,
1033			 gtt_size);
1034	if (!gtt) {
1035		DRM_ERROR("ioremap of GTT failed\n");
1036		return 0;
1037	}
1038
1039	entry = *(volatile u32 *)(gtt + (gtt_addr / 1024));
1040
1041	DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
1042
1043	/* Mask out these reserved bits on this hardware. */
1044	if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) ||
1045	    IS_I945G(dev) || IS_I945GM(dev)) {
1046		entry &= ~PTE_ADDRESS_MASK_HIGH;
1047	}
1048
1049	/* If it's not a mapping type we know, then bail. */
1050	if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED &&
1051	    (entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_CACHED)	{
1052		iounmap(gtt);
1053		return 0;
1054	}
1055
1056	if (!(entry & PTE_VALID)) {
1057		DRM_ERROR("bad GTT entry in stolen space\n");
1058		iounmap(gtt);
1059		return 0;
1060	}
1061
1062	iounmap(gtt);
1063
1064	phys =(entry & PTE_ADDRESS_MASK) |
1065		((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4));
1066
1067	DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys);
1068
1069	return phys;
1070}
1071
1072static void i915_warn_stolen(struct drm_device *dev)
1073{
1074	DRM_ERROR("not enough stolen space for compressed buffer, disabling\n");
1075	DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
1076}
1077
1078static void i915_setup_compression(struct drm_device *dev, int size)
1079{
1080	struct drm_i915_private *dev_priv = dev->dev_private;
1081	struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
1082	unsigned long cfb_base;
1083	unsigned long ll_base = 0;
1084
1085	/* Leave 1M for line length buffer & misc. */
1086	compressed_fb = drm_mm_search_free(&dev_priv->mm.vram, size, 4096, 0);
1087	if (!compressed_fb) {
1088		dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1089		i915_warn_stolen(dev);
1090		return;
1091	}
1092
1093	compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
1094	if (!compressed_fb) {
1095		i915_warn_stolen(dev);
1096		dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1097		return;
1098	}
1099
1100	cfb_base = i915_gtt_to_phys(dev, compressed_fb->start);
1101	if (!cfb_base) {
1102		DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
1103		drm_mm_put_block(compressed_fb);
1104	}
1105
1106	if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) {
1107		compressed_llb = drm_mm_search_free(&dev_priv->mm.vram, 4096,
1108						    4096, 0);
1109		if (!compressed_llb) {
1110			i915_warn_stolen(dev);
1111			return;
1112		}
1113
1114		compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096);
1115		if (!compressed_llb) {
1116			i915_warn_stolen(dev);
1117			return;
1118		}
1119
1120		ll_base = i915_gtt_to_phys(dev, compressed_llb->start);
1121		if (!ll_base) {
1122			DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
1123			drm_mm_put_block(compressed_fb);
1124			drm_mm_put_block(compressed_llb);
1125		}
1126	}
1127
1128	dev_priv->cfb_size = size;
1129
1130	intel_disable_fbc(dev);
1131	dev_priv->compressed_fb = compressed_fb;
1132	if (IS_IRONLAKE_M(dev))
1133		I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
1134	else if (IS_GM45(dev)) {
1135		I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
1136	} else {
1137		I915_WRITE(FBC_CFB_BASE, cfb_base);
1138		I915_WRITE(FBC_LL_BASE, ll_base);
1139		dev_priv->compressed_llb = compressed_llb;
1140	}
1141
1142	DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
1143		  ll_base, size >> 20);
1144}
1145
1146static void i915_cleanup_compression(struct drm_device *dev)
1147{
1148	struct drm_i915_private *dev_priv = dev->dev_private;
1149
1150	drm_mm_put_block(dev_priv->compressed_fb);
1151	if (dev_priv->compressed_llb)
1152		drm_mm_put_block(dev_priv->compressed_llb);
1153}
1154
1155/* true = enable decode, false = disable decoder */
1156static unsigned int i915_vga_set_decode(void *cookie, bool state)
1157{
1158	struct drm_device *dev = cookie;
1159
1160	intel_modeset_vga_set_state(dev, state);
1161	if (state)
1162		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1163		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1164	else
1165		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1166}
1167
1168static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1169{
1170	struct drm_device *dev = pci_get_drvdata(pdev);
1171	pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
1172	if (state == VGA_SWITCHEROO_ON) {
1173		printk(KERN_INFO "i915: switched on\n");
1174		/* i915 resume handler doesn't set to D0 */
1175		pci_set_power_state(dev->pdev, PCI_D0);
1176		i915_resume(dev);
1177		drm_kms_helper_poll_enable(dev);
1178	} else {
1179		printk(KERN_ERR "i915: switched off\n");
1180		drm_kms_helper_poll_disable(dev);
1181		i915_suspend(dev, pmm);
1182	}
1183}
1184
1185static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
1186{
1187	struct drm_device *dev = pci_get_drvdata(pdev);
1188	bool can_switch;
1189
1190	spin_lock(&dev->count_lock);
1191	can_switch = (dev->open_count == 0);
1192	spin_unlock(&dev->count_lock);
1193	return can_switch;
1194}
1195
1196static int i915_load_modeset_init(struct drm_device *dev,
1197				  unsigned long prealloc_size,
1198				  unsigned long agp_size)
1199{
1200	struct drm_i915_private *dev_priv = dev->dev_private;
1201	int ret = 0;
1202
1203	/* Basic memrange allocator for stolen space (aka mm.vram) */
1204	drm_mm_init(&dev_priv->mm.vram, 0, prealloc_size);
1205	DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024));
1206
1207	/* We're off and running w/KMS */
1208	dev_priv->mm.suspended = 0;
1209
1210	/* Let GEM Manage from end of prealloc space to end of aperture.
1211	 *
1212	 * However, leave one page at the end still bound to the scratch page.
1213	 * There are a number of places where the hardware apparently
1214	 * prefetches past the end of the object, and we've seen multiple
1215	 * hangs with the GPU head pointer stuck in a batchbuffer bound
1216	 * at the last page of the aperture.  One page should be enough to
1217	 * keep any prefetching inside of the aperture.
1218	 */
1219	i915_gem_do_init(dev, prealloc_size, agp_size - 4096);
1220
1221	mutex_lock(&dev->struct_mutex);
1222	ret = i915_gem_init_ringbuffer(dev);
1223	mutex_unlock(&dev->struct_mutex);
1224	if (ret)
1225		goto out;
1226
1227	/* Try to set up FBC with a reasonable compressed buffer size */
1228	if (I915_HAS_FBC(dev) && i915_powersave) {
1229		int cfb_size;
1230
1231		/* Try to get an 8M buffer... */
1232		if (prealloc_size > (9*1024*1024))
1233			cfb_size = 8*1024*1024;
1234		else /* fall back to 7/8 of the stolen space */
1235			cfb_size = prealloc_size * 7 / 8;
1236		i915_setup_compression(dev, cfb_size);
1237	}
1238
1239	/* Allow hardware batchbuffers unless told otherwise.
1240	 */
1241	dev_priv->allow_batchbuffer = 1;
1242
1243	ret = intel_init_bios(dev);
1244	if (ret)
1245		DRM_INFO("failed to find VBIOS tables\n");
1246
1247	/* if we have > 1 VGA cards, then disable the radeon VGA resources */
1248	ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
1249	if (ret)
1250		goto cleanup_ringbuffer;
1251
1252	ret = vga_switcheroo_register_client(dev->pdev,
1253					     i915_switcheroo_set_state,
1254					     i915_switcheroo_can_switch);
1255	if (ret)
1256		goto cleanup_vga_client;
1257
1258	/* IIR "flip pending" bit means done if this bit is set */
1259	if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE))
1260		dev_priv->flip_pending_is_done = true;
1261
1262	intel_modeset_init(dev);
1263
1264	ret = drm_irq_install(dev);
1265	if (ret)
1266		goto cleanup_vga_switcheroo;
1267
1268	/* Always safe in the mode setting case. */
1269	/* FIXME: do pre/post-mode set stuff in core KMS code */
1270	dev->vblank_disable_allowed = 1;
1271
1272	ret = intel_fbdev_init(dev);
1273	if (ret)
1274		goto cleanup_irq;
1275
1276	drm_kms_helper_poll_init(dev);
1277	return 0;
1278
1279cleanup_irq:
1280	drm_irq_uninstall(dev);
1281cleanup_vga_switcheroo:
1282	vga_switcheroo_unregister_client(dev->pdev);
1283cleanup_vga_client:
1284	vga_client_register(dev->pdev, NULL, NULL, NULL);
1285cleanup_ringbuffer:
1286	mutex_lock(&dev->struct_mutex);
1287	i915_gem_cleanup_ringbuffer(dev);
1288	mutex_unlock(&dev->struct_mutex);
1289out:
1290	return ret;
1291}
1292
1293int i915_master_create(struct drm_device *dev, struct drm_master *master)
1294{
1295	struct drm_i915_master_private *master_priv;
1296
1297	master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
1298	if (!master_priv)
1299		return -ENOMEM;
1300
1301	master->driver_priv = master_priv;
1302	return 0;
1303}
1304
1305void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1306{
1307	struct drm_i915_master_private *master_priv = master->driver_priv;
1308
1309	if (!master_priv)
1310		return;
1311
1312	kfree(master_priv);
1313
1314	master->driver_priv = NULL;
1315}
1316
1317static void i915_pineview_get_mem_freq(struct drm_device *dev)
1318{
1319	drm_i915_private_t *dev_priv = dev->dev_private;
1320	u32 tmp;
1321
1322	tmp = I915_READ(CLKCFG);
1323
1324	switch (tmp & CLKCFG_FSB_MASK) {
1325	case CLKCFG_FSB_533:
1326		dev_priv->fsb_freq = 533; /* 133*4 */
1327		break;
1328	case CLKCFG_FSB_800:
1329		dev_priv->fsb_freq = 800; /* 200*4 */
1330		break;
1331	case CLKCFG_FSB_667:
1332		dev_priv->fsb_freq =  667; /* 167*4 */
1333		break;
1334	case CLKCFG_FSB_400:
1335		dev_priv->fsb_freq = 400; /* 100*4 */
1336		break;
1337	}
1338
1339	switch (tmp & CLKCFG_MEM_MASK) {
1340	case CLKCFG_MEM_533:
1341		dev_priv->mem_freq = 533;
1342		break;
1343	case CLKCFG_MEM_667:
1344		dev_priv->mem_freq = 667;
1345		break;
1346	case CLKCFG_MEM_800:
1347		dev_priv->mem_freq = 800;
1348		break;
1349	}
1350
1351	/* detect pineview DDR3 setting */
1352	tmp = I915_READ(CSHRDDR3CTL);
1353	dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
1354}
1355
1356static void i915_ironlake_get_mem_freq(struct drm_device *dev)
1357{
1358	drm_i915_private_t *dev_priv = dev->dev_private;
1359	u16 ddrpll, csipll;
1360
1361	ddrpll = I915_READ16(DDRMPLL1);
1362	csipll = I915_READ16(CSIPLL0);
1363
1364	switch (ddrpll & 0xff) {
1365	case 0xc:
1366		dev_priv->mem_freq = 800;
1367		break;
1368	case 0x10:
1369		dev_priv->mem_freq = 1066;
1370		break;
1371	case 0x14:
1372		dev_priv->mem_freq = 1333;
1373		break;
1374	case 0x18:
1375		dev_priv->mem_freq = 1600;
1376		break;
1377	default:
1378		DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
1379				 ddrpll & 0xff);
1380		dev_priv->mem_freq = 0;
1381		break;
1382	}
1383
1384	dev_priv->r_t = dev_priv->mem_freq;
1385
1386	switch (csipll & 0x3ff) {
1387	case 0x00c:
1388		dev_priv->fsb_freq = 3200;
1389		break;
1390	case 0x00e:
1391		dev_priv->fsb_freq = 3733;
1392		break;
1393	case 0x010:
1394		dev_priv->fsb_freq = 4266;
1395		break;
1396	case 0x012:
1397		dev_priv->fsb_freq = 4800;
1398		break;
1399	case 0x014:
1400		dev_priv->fsb_freq = 5333;
1401		break;
1402	case 0x016:
1403		dev_priv->fsb_freq = 5866;
1404		break;
1405	case 0x018:
1406		dev_priv->fsb_freq = 6400;
1407		break;
1408	default:
1409		DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
1410				 csipll & 0x3ff);
1411		dev_priv->fsb_freq = 0;
1412		break;
1413	}
1414
1415	if (dev_priv->fsb_freq == 3200) {
1416		dev_priv->c_m = 0;
1417	} else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
1418		dev_priv->c_m = 1;
1419	} else {
1420		dev_priv->c_m = 2;
1421	}
1422}
1423
1424struct v_table {
1425	u8 vid;
1426	unsigned long vd; /* in .1 mil */
1427	unsigned long vm; /* in .1 mil */
1428	u8 pvid;
1429};
1430
1431static struct v_table v_table[] = {
1432	{ 0, 16125, 15000, 0x7f, },
1433	{ 1, 16000, 14875, 0x7e, },
1434	{ 2, 15875, 14750, 0x7d, },
1435	{ 3, 15750, 14625, 0x7c, },
1436	{ 4, 15625, 14500, 0x7b, },
1437	{ 5, 15500, 14375, 0x7a, },
1438	{ 6, 15375, 14250, 0x79, },
1439	{ 7, 15250, 14125, 0x78, },
1440	{ 8, 15125, 14000, 0x77, },
1441	{ 9, 15000, 13875, 0x76, },
1442	{ 10, 14875, 13750, 0x75, },
1443	{ 11, 14750, 13625, 0x74, },
1444	{ 12, 14625, 13500, 0x73, },
1445	{ 13, 14500, 13375, 0x72, },
1446	{ 14, 14375, 13250, 0x71, },
1447	{ 15, 14250, 13125, 0x70, },
1448	{ 16, 14125, 13000, 0x6f, },
1449	{ 17, 14000, 12875, 0x6e, },
1450	{ 18, 13875, 12750, 0x6d, },
1451	{ 19, 13750, 12625, 0x6c, },
1452	{ 20, 13625, 12500, 0x6b, },
1453	{ 21, 13500, 12375, 0x6a, },
1454	{ 22, 13375, 12250, 0x69, },
1455	{ 23, 13250, 12125, 0x68, },
1456	{ 24, 13125, 12000, 0x67, },
1457	{ 25, 13000, 11875, 0x66, },
1458	{ 26, 12875, 11750, 0x65, },
1459	{ 27, 12750, 11625, 0x64, },
1460	{ 28, 12625, 11500, 0x63, },
1461	{ 29, 12500, 11375, 0x62, },
1462	{ 30, 12375, 11250, 0x61, },
1463	{ 31, 12250, 11125, 0x60, },
1464	{ 32, 12125, 11000, 0x5f, },
1465	{ 33, 12000, 10875, 0x5e, },
1466	{ 34, 11875, 10750, 0x5d, },
1467	{ 35, 11750, 10625, 0x5c, },
1468	{ 36, 11625, 10500, 0x5b, },
1469	{ 37, 11500, 10375, 0x5a, },
1470	{ 38, 11375, 10250, 0x59, },
1471	{ 39, 11250, 10125, 0x58, },
1472	{ 40, 11125, 10000, 0x57, },
1473	{ 41, 11000, 9875, 0x56, },
1474	{ 42, 10875, 9750, 0x55, },
1475	{ 43, 10750, 9625, 0x54, },
1476	{ 44, 10625, 9500, 0x53, },
1477	{ 45, 10500, 9375, 0x52, },
1478	{ 46, 10375, 9250, 0x51, },
1479	{ 47, 10250, 9125, 0x50, },
1480	{ 48, 10125, 9000, 0x4f, },
1481	{ 49, 10000, 8875, 0x4e, },
1482	{ 50, 9875, 8750, 0x4d, },
1483	{ 51, 9750, 8625, 0x4c, },
1484	{ 52, 9625, 8500, 0x4b, },
1485	{ 53, 9500, 8375, 0x4a, },
1486	{ 54, 9375, 8250, 0x49, },
1487	{ 55, 9250, 8125, 0x48, },
1488	{ 56, 9125, 8000, 0x47, },
1489	{ 57, 9000, 7875, 0x46, },
1490	{ 58, 8875, 7750, 0x45, },
1491	{ 59, 8750, 7625, 0x44, },
1492	{ 60, 8625, 7500, 0x43, },
1493	{ 61, 8500, 7375, 0x42, },
1494	{ 62, 8375, 7250, 0x41, },
1495	{ 63, 8250, 7125, 0x40, },
1496	{ 64, 8125, 7000, 0x3f, },
1497	{ 65, 8000, 6875, 0x3e, },
1498	{ 66, 7875, 6750, 0x3d, },
1499	{ 67, 7750, 6625, 0x3c, },
1500	{ 68, 7625, 6500, 0x3b, },
1501	{ 69, 7500, 6375, 0x3a, },
1502	{ 70, 7375, 6250, 0x39, },
1503	{ 71, 7250, 6125, 0x38, },
1504	{ 72, 7125, 6000, 0x37, },
1505	{ 73, 7000, 5875, 0x36, },
1506	{ 74, 6875, 5750, 0x35, },
1507	{ 75, 6750, 5625, 0x34, },
1508	{ 76, 6625, 5500, 0x33, },
1509	{ 77, 6500, 5375, 0x32, },
1510	{ 78, 6375, 5250, 0x31, },
1511	{ 79, 6250, 5125, 0x30, },
1512	{ 80, 6125, 5000, 0x2f, },
1513	{ 81, 6000, 4875, 0x2e, },
1514	{ 82, 5875, 4750, 0x2d, },
1515	{ 83, 5750, 4625, 0x2c, },
1516	{ 84, 5625, 4500, 0x2b, },
1517	{ 85, 5500, 4375, 0x2a, },
1518	{ 86, 5375, 4250, 0x29, },
1519	{ 87, 5250, 4125, 0x28, },
1520	{ 88, 5125, 4000, 0x27, },
1521	{ 89, 5000, 3875, 0x26, },
1522	{ 90, 4875, 3750, 0x25, },
1523	{ 91, 4750, 3625, 0x24, },
1524	{ 92, 4625, 3500, 0x23, },
1525	{ 93, 4500, 3375, 0x22, },
1526	{ 94, 4375, 3250, 0x21, },
1527	{ 95, 4250, 3125, 0x20, },
1528	{ 96, 4125, 3000, 0x1f, },
1529	{ 97, 4125, 3000, 0x1e, },
1530	{ 98, 4125, 3000, 0x1d, },
1531	{ 99, 4125, 3000, 0x1c, },
1532	{ 100, 4125, 3000, 0x1b, },
1533	{ 101, 4125, 3000, 0x1a, },
1534	{ 102, 4125, 3000, 0x19, },
1535	{ 103, 4125, 3000, 0x18, },
1536	{ 104, 4125, 3000, 0x17, },
1537	{ 105, 4125, 3000, 0x16, },
1538	{ 106, 4125, 3000, 0x15, },
1539	{ 107, 4125, 3000, 0x14, },
1540	{ 108, 4125, 3000, 0x13, },
1541	{ 109, 4125, 3000, 0x12, },
1542	{ 110, 4125, 3000, 0x11, },
1543	{ 111, 4125, 3000, 0x10, },
1544	{ 112, 4125, 3000, 0x0f, },
1545	{ 113, 4125, 3000, 0x0e, },
1546	{ 114, 4125, 3000, 0x0d, },
1547	{ 115, 4125, 3000, 0x0c, },
1548	{ 116, 4125, 3000, 0x0b, },
1549	{ 117, 4125, 3000, 0x0a, },
1550	{ 118, 4125, 3000, 0x09, },
1551	{ 119, 4125, 3000, 0x08, },
1552	{ 120, 1125, 0, 0x07, },
1553	{ 121, 1000, 0, 0x06, },
1554	{ 122, 875, 0, 0x05, },
1555	{ 123, 750, 0, 0x04, },
1556	{ 124, 625, 0, 0x03, },
1557	{ 125, 500, 0, 0x02, },
1558	{ 126, 375, 0, 0x01, },
1559	{ 127, 0, 0, 0x00, },
1560};
1561
1562struct cparams {
1563	int i;
1564	int t;
1565	int m;
1566	int c;
1567};
1568
1569static struct cparams cparams[] = {
1570	{ 1, 1333, 301, 28664 },
1571	{ 1, 1066, 294, 24460 },
1572	{ 1, 800, 294, 25192 },
1573	{ 0, 1333, 276, 27605 },
1574	{ 0, 1066, 276, 27605 },
1575	{ 0, 800, 231, 23784 },
1576};
1577
1578unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
1579{
1580	u64 total_count, diff, ret;
1581	u32 count1, count2, count3, m = 0, c = 0;
1582	unsigned long now = jiffies_to_msecs(jiffies), diff1;
1583	int i;
1584
1585	diff1 = now - dev_priv->last_time1;
1586
1587	count1 = I915_READ(DMIEC);
1588	count2 = I915_READ(DDREC);
1589	count3 = I915_READ(CSIEC);
1590
1591	total_count = count1 + count2 + count3;
1592
1593	/* FIXME: handle per-counter overflow */
1594	if (total_count < dev_priv->last_count1) {
1595		diff = ~0UL - dev_priv->last_count1;
1596		diff += total_count;
1597	} else {
1598		diff = total_count - dev_priv->last_count1;
1599	}
1600
1601	for (i = 0; i < ARRAY_SIZE(cparams); i++) {
1602		if (cparams[i].i == dev_priv->c_m &&
1603		    cparams[i].t == dev_priv->r_t) {
1604			m = cparams[i].m;
1605			c = cparams[i].c;
1606			break;
1607		}
1608	}
1609
1610	div_u64(diff, diff1);
1611	ret = ((m * diff) + c);
1612	div_u64(ret, 10);
1613
1614	dev_priv->last_count1 = total_count;
1615	dev_priv->last_time1 = now;
1616
1617	return ret;
1618}
1619
1620unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
1621{
1622	unsigned long m, x, b;
1623	u32 tsfs;
1624
1625	tsfs = I915_READ(TSFS);
1626
1627	m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
1628	x = I915_READ8(TR1);
1629
1630	b = tsfs & TSFS_INTR_MASK;
1631
1632	return ((m * x) / 127) - b;
1633}
1634
1635static unsigned long pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
1636{
1637	unsigned long val = 0;
1638	int i;
1639
1640	for (i = 0; i < ARRAY_SIZE(v_table); i++) {
1641		if (v_table[i].pvid == pxvid) {
1642			if (IS_MOBILE(dev_priv->dev))
1643				val = v_table[i].vm;
1644			else
1645				val = v_table[i].vd;
1646		}
1647	}
1648
1649	return val;
1650}
1651
1652void i915_update_gfx_val(struct drm_i915_private *dev_priv)
1653{
1654	struct timespec now, diff1;
1655	u64 diff;
1656	unsigned long diffms;
1657	u32 count;
1658
1659	getrawmonotonic(&now);
1660	diff1 = timespec_sub(now, dev_priv->last_time2);
1661
1662	/* Don't divide by 0 */
1663	diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
1664	if (!diffms)
1665		return;
1666
1667	count = I915_READ(GFXEC);
1668
1669	if (count < dev_priv->last_count2) {
1670		diff = ~0UL - dev_priv->last_count2;
1671		diff += count;
1672	} else {
1673		diff = count - dev_priv->last_count2;
1674	}
1675
1676	dev_priv->last_count2 = count;
1677	dev_priv->last_time2 = now;
1678
1679	/* More magic constants... */
1680	diff = diff * 1181;
1681	div_u64(diff, diffms * 10);
1682	dev_priv->gfx_power = diff;
1683}
1684
1685unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
1686{
1687	unsigned long t, corr, state1, corr2, state2;
1688	u32 pxvid, ext_v;
1689
1690	pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
1691	pxvid = (pxvid >> 24) & 0x7f;
1692	ext_v = pvid_to_extvid(dev_priv, pxvid);
1693
1694	state1 = ext_v;
1695
1696	t = i915_mch_val(dev_priv);
1697
1698	/* Revel in the empirically derived constants */
1699
1700	/* Correction factor in 1/100000 units */
1701	if (t > 80)
1702		corr = ((t * 2349) + 135940);
1703	else if (t >= 50)
1704		corr = ((t * 964) + 29317);
1705	else /* < 50 */
1706		corr = ((t * 301) + 1004);
1707
1708	corr = corr * ((150142 * state1) / 10000 - 78642);
1709	corr /= 100000;
1710	corr2 = (corr * dev_priv->corr);
1711
1712	state2 = (corr2 * state1) / 10000;
1713	state2 /= 100; /* convert to mW */
1714
1715	i915_update_gfx_val(dev_priv);
1716
1717	return dev_priv->gfx_power + state2;
1718}
1719
1720/* Global for IPS driver to get at the current i915 device */
1721static struct drm_i915_private *i915_mch_dev;
1722/*
1723 * Lock protecting IPS related data structures
1724 *   - i915_mch_dev
1725 *   - dev_priv->max_delay
1726 *   - dev_priv->min_delay
1727 *   - dev_priv->fmax
1728 *   - dev_priv->gpu_busy
1729 */
1730static DEFINE_SPINLOCK(mchdev_lock);
1731
1732/**
1733 * i915_read_mch_val - return value for IPS use
1734 *
1735 * Calculate and return a value for the IPS driver to use when deciding whether
1736 * we have thermal and power headroom to increase CPU or GPU power budget.
1737 */
1738unsigned long i915_read_mch_val(void)
1739{
1740  	struct drm_i915_private *dev_priv;
1741	unsigned long chipset_val, graphics_val, ret = 0;
1742
1743  	spin_lock(&mchdev_lock);
1744	if (!i915_mch_dev)
1745		goto out_unlock;
1746	dev_priv = i915_mch_dev;
1747
1748	chipset_val = i915_chipset_val(dev_priv);
1749	graphics_val = i915_gfx_val(dev_priv);
1750
1751	ret = chipset_val + graphics_val;
1752
1753out_unlock:
1754  	spin_unlock(&mchdev_lock);
1755
1756  	return ret;
1757}
1758EXPORT_SYMBOL_GPL(i915_read_mch_val);
1759
1760/**
1761 * i915_gpu_raise - raise GPU frequency limit
1762 *
1763 * Raise the limit; IPS indicates we have thermal headroom.
1764 */
1765bool i915_gpu_raise(void)
1766{
1767  	struct drm_i915_private *dev_priv;
1768	bool ret = true;
1769
1770  	spin_lock(&mchdev_lock);
1771	if (!i915_mch_dev) {
1772		ret = false;
1773		goto out_unlock;
1774	}
1775	dev_priv = i915_mch_dev;
1776
1777	if (dev_priv->max_delay > dev_priv->fmax)
1778		dev_priv->max_delay--;
1779
1780out_unlock:
1781  	spin_unlock(&mchdev_lock);
1782
1783  	return ret;
1784}
1785EXPORT_SYMBOL_GPL(i915_gpu_raise);
1786
1787/**
1788 * i915_gpu_lower - lower GPU frequency limit
1789 *
1790 * IPS indicates we're close to a thermal limit, so throttle back the GPU
1791 * frequency maximum.
1792 */
1793bool i915_gpu_lower(void)
1794{
1795  	struct drm_i915_private *dev_priv;
1796	bool ret = true;
1797
1798  	spin_lock(&mchdev_lock);
1799	if (!i915_mch_dev) {
1800		ret = false;
1801		goto out_unlock;
1802	}
1803	dev_priv = i915_mch_dev;
1804
1805	if (dev_priv->max_delay < dev_priv->min_delay)
1806		dev_priv->max_delay++;
1807
1808out_unlock:
1809  	spin_unlock(&mchdev_lock);
1810
1811  	return ret;
1812}
1813EXPORT_SYMBOL_GPL(i915_gpu_lower);
1814
1815/**
1816 * i915_gpu_busy - indicate GPU business to IPS
1817 *
1818 * Tell the IPS driver whether or not the GPU is busy.
1819 */
1820bool i915_gpu_busy(void)
1821{
1822  	struct drm_i915_private *dev_priv;
1823	bool ret = false;
1824
1825  	spin_lock(&mchdev_lock);
1826	if (!i915_mch_dev)
1827		goto out_unlock;
1828	dev_priv = i915_mch_dev;
1829
1830	ret = dev_priv->busy;
1831
1832out_unlock:
1833  	spin_unlock(&mchdev_lock);
1834
1835  	return ret;
1836}
1837EXPORT_SYMBOL_GPL(i915_gpu_busy);
1838
1839/**
1840 * i915_gpu_turbo_disable - disable graphics turbo
1841 *
1842 * Disable graphics turbo by resetting the max frequency and setting the
1843 * current frequency to the default.
1844 */
1845bool i915_gpu_turbo_disable(void)
1846{
1847  	struct drm_i915_private *dev_priv;
1848	bool ret = true;
1849
1850  	spin_lock(&mchdev_lock);
1851	if (!i915_mch_dev) {
1852		ret = false;
1853		goto out_unlock;
1854	}
1855	dev_priv = i915_mch_dev;
1856
1857	dev_priv->max_delay = dev_priv->fstart;
1858
1859	if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
1860		ret = false;
1861
1862out_unlock:
1863  	spin_unlock(&mchdev_lock);
1864
1865  	return ret;
1866}
1867EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
1868
1869/**
1870 * i915_driver_load - setup chip and create an initial config
1871 * @dev: DRM device
1872 * @flags: startup flags
1873 *
1874 * The driver load routine has to do several things:
1875 *   - drive output discovery via intel_modeset_init()
1876 *   - initialize the memory manager
1877 *   - allocate initial config memory
1878 *   - setup the DRM framebuffer with the allocated memory
1879 */
1880int i915_driver_load(struct drm_device *dev, unsigned long flags)
1881{
1882	struct drm_i915_private *dev_priv;
1883	resource_size_t base, size;
1884	int ret = 0, mmio_bar;
1885	uint32_t agp_size, prealloc_size;
1886	/* i915 has 4 more counters */
1887	dev->counters += 4;
1888	dev->types[6] = _DRM_STAT_IRQ;
1889	dev->types[7] = _DRM_STAT_PRIMARY;
1890	dev->types[8] = _DRM_STAT_SECONDARY;
1891	dev->types[9] = _DRM_STAT_DMA;
1892
1893	dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
1894	if (dev_priv == NULL)
1895		return -ENOMEM;
1896
1897	dev->dev_private = (void *)dev_priv;
1898	dev_priv->dev = dev;
1899	dev_priv->info = (struct intel_device_info *) flags;
1900
1901	/* Add register map (needed for suspend/resume) */
1902	mmio_bar = IS_I9XX(dev) ? 0 : 1;
1903	base = pci_resource_start(dev->pdev, mmio_bar);
1904	size = pci_resource_len(dev->pdev, mmio_bar);
1905
1906	if (i915_get_bridge_dev(dev)) {
1907		ret = -EIO;
1908		goto free_priv;
1909	}
1910
1911	/* overlay on gen2 is broken and can't address above 1G */
1912	if (IS_GEN2(dev))
1913		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
1914
1915	dev_priv->regs = ioremap(base, size);
1916	if (!dev_priv->regs) {
1917		DRM_ERROR("failed to map registers\n");
1918		ret = -EIO;
1919		goto put_bridge;
1920	}
1921
1922        dev_priv->mm.gtt_mapping =
1923		io_mapping_create_wc(dev->agp->base,
1924				     dev->agp->agp_info.aper_size * 1024*1024);
1925	if (dev_priv->mm.gtt_mapping == NULL) {
1926		ret = -EIO;
1927		goto out_rmmap;
1928	}
1929
1930	/* Set up a WC MTRR for non-PAT systems.  This is more common than
1931	 * one would think, because the kernel disables PAT on first
1932	 * generation Core chips because WC PAT gets overridden by a UC
1933	 * MTRR if present.  Even if a UC MTRR isn't present.
1934	 */
1935	dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
1936					 dev->agp->agp_info.aper_size *
1937					 1024 * 1024,
1938					 MTRR_TYPE_WRCOMB, 1);
1939	if (dev_priv->mm.gtt_mtrr < 0) {
1940		DRM_INFO("MTRR allocation failed.  Graphics "
1941			 "performance may suffer.\n");
1942	}
1943
1944	dev_priv->mm.gtt = intel_gtt_get();
1945	if (!dev_priv->mm.gtt) {
1946		DRM_ERROR("Failed to initialize GTT\n");
1947		ret = -ENODEV;
1948		goto out_iomapfree;
1949	}
1950
1951	prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT;
1952	agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
1953
1954	dev_priv->wq = create_singlethread_workqueue("i915");
1955	if (dev_priv->wq == NULL) {
1956		DRM_ERROR("Failed to create our workqueue.\n");
1957		ret = -ENOMEM;
1958		goto out_iomapfree;
1959	}
1960
1961	/* enable GEM by default */
1962	dev_priv->has_gem = 1;
1963
1964	if (prealloc_size > agp_size * 3 / 4) {
1965		DRM_ERROR("Detected broken video BIOS with %d/%dkB of video "
1966			  "memory stolen.\n",
1967			  prealloc_size / 1024, agp_size / 1024);
1968		DRM_ERROR("Disabling GEM. (try reducing stolen memory or "
1969			  "updating the BIOS to fix).\n");
1970		dev_priv->has_gem = 0;
1971	}
1972
1973	if (dev_priv->has_gem == 0 &&
1974	    drm_core_check_feature(dev, DRIVER_MODESET)) {
1975		DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n");
1976		ret = -ENODEV;
1977		goto out_iomapfree;
1978	}
1979
1980	dev->driver->get_vblank_counter = i915_get_vblank_counter;
1981	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
1982	if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
1983		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
1984		dev->driver->get_vblank_counter = gm45_get_vblank_counter;
1985	}
1986
1987	/* Try to make sure MCHBAR is enabled before poking at it */
1988	intel_setup_mchbar(dev);
1989	intel_opregion_setup(dev);
1990
1991	i915_gem_load(dev);
1992
1993	/* Init HWS */
1994	if (!I915_NEED_GFX_HWS(dev)) {
1995		ret = i915_init_phys_hws(dev);
1996		if (ret != 0)
1997			goto out_workqueue_free;
1998	}
1999
2000	if (IS_PINEVIEW(dev))
2001		i915_pineview_get_mem_freq(dev);
2002	else if (IS_IRONLAKE(dev))
2003		i915_ironlake_get_mem_freq(dev);
2004
2005	/* On the 945G/GM, the chipset reports the MSI capability on the
2006	 * integrated graphics even though the support isn't actually there
2007	 * according to the published specs.  It doesn't appear to function
2008	 * correctly in testing on 945G.
2009	 * This may be a side effect of MSI having been made available for PEG
2010	 * and the registers being closely associated.
2011	 *
2012	 * According to chipset errata, on the 965GM, MSI interrupts may
2013	 * be lost or delayed, but we use them anyways to avoid
2014	 * stuck interrupts on some machines.
2015	 */
2016	if (!IS_I945G(dev) && !IS_I945GM(dev))
2017		pci_enable_msi(dev->pdev);
2018
2019	spin_lock_init(&dev_priv->user_irq_lock);
2020	spin_lock_init(&dev_priv->error_lock);
2021	dev_priv->trace_irq_seqno = 0;
2022
2023	ret = drm_vblank_init(dev, I915_NUM_PIPE);
2024
2025	if (ret) {
2026		(void) i915_driver_unload(dev);
2027		return ret;
2028	}
2029
2030	/* Start out suspended */
2031	dev_priv->mm.suspended = 1;
2032
2033	intel_detect_pch(dev);
2034
2035	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
2036		ret = i915_load_modeset_init(dev, prealloc_size, agp_size);
2037		if (ret < 0) {
2038			DRM_ERROR("failed to init modeset\n");
2039			goto out_workqueue_free;
2040		}
2041	}
2042
2043	/* Must be done after probing outputs */
2044	intel_opregion_init(dev);
2045	acpi_video_register();
2046
2047	setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
2048		    (unsigned long) dev);
2049
2050	spin_lock(&mchdev_lock);
2051	i915_mch_dev = dev_priv;
2052	dev_priv->mchdev_lock = &mchdev_lock;
2053	spin_unlock(&mchdev_lock);
2054
2055	return 0;
2056
2057out_workqueue_free:
2058	destroy_workqueue(dev_priv->wq);
2059out_iomapfree:
2060	io_mapping_free(dev_priv->mm.gtt_mapping);
2061out_rmmap:
2062	iounmap(dev_priv->regs);
2063put_bridge:
2064	pci_dev_put(dev_priv->bridge_dev);
2065free_priv:
2066	kfree(dev_priv);
2067	return ret;
2068}
2069
2070int i915_driver_unload(struct drm_device *dev)
2071{
2072	struct drm_i915_private *dev_priv = dev->dev_private;
2073	int ret;
2074
2075	spin_lock(&mchdev_lock);
2076	i915_mch_dev = NULL;
2077	spin_unlock(&mchdev_lock);
2078
2079	mutex_lock(&dev->struct_mutex);
2080	ret = i915_gpu_idle(dev);
2081	if (ret)
2082		DRM_ERROR("failed to idle hardware: %d\n", ret);
2083	mutex_unlock(&dev->struct_mutex);
2084
2085	/* Cancel the retire work handler, which should be idle now. */
2086	cancel_delayed_work_sync(&dev_priv->mm.retire_work);
2087
2088	io_mapping_free(dev_priv->mm.gtt_mapping);
2089	if (dev_priv->mm.gtt_mtrr >= 0) {
2090		mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
2091			 dev->agp->agp_info.aper_size * 1024 * 1024);
2092		dev_priv->mm.gtt_mtrr = -1;
2093	}
2094
2095	acpi_video_unregister();
2096
2097	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
2098		intel_modeset_cleanup(dev);
2099
2100		/*
2101		 * free the memory space allocated for the child device
2102		 * config parsed from VBT
2103		 */
2104		if (dev_priv->child_dev && dev_priv->child_dev_num) {
2105			kfree(dev_priv->child_dev);
2106			dev_priv->child_dev = NULL;
2107			dev_priv->child_dev_num = 0;
2108		}
2109
2110		vga_switcheroo_unregister_client(dev->pdev);
2111		vga_client_register(dev->pdev, NULL, NULL, NULL);
2112	}
2113
2114	/* Free error state after interrupts are fully disabled. */
2115	del_timer_sync(&dev_priv->hangcheck_timer);
2116	cancel_work_sync(&dev_priv->error_work);
2117	i915_destroy_error_state(dev);
2118
2119	if (dev->pdev->msi_enabled)
2120		pci_disable_msi(dev->pdev);
2121
2122	if (dev_priv->regs != NULL)
2123		iounmap(dev_priv->regs);
2124
2125	intel_opregion_fini(dev);
2126
2127	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
2128		/* Flush any outstanding unpin_work. */
2129		flush_workqueue(dev_priv->wq);
2130
2131		i915_gem_free_all_phys_object(dev);
2132
2133		mutex_lock(&dev->struct_mutex);
2134		i915_gem_cleanup_ringbuffer(dev);
2135		mutex_unlock(&dev->struct_mutex);
2136		if (I915_HAS_FBC(dev) && i915_powersave)
2137			i915_cleanup_compression(dev);
2138		drm_mm_takedown(&dev_priv->mm.vram);
2139
2140		intel_cleanup_overlay(dev);
2141	}
2142
2143	intel_teardown_mchbar(dev);
2144
2145	destroy_workqueue(dev_priv->wq);
2146
2147	pci_dev_put(dev_priv->bridge_dev);
2148	kfree(dev->dev_private);
2149
2150	return 0;
2151}
2152
2153int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
2154{
2155	struct drm_i915_file_private *i915_file_priv;
2156
2157	DRM_DEBUG_DRIVER("\n");
2158	i915_file_priv = (struct drm_i915_file_private *)
2159	    kmalloc(sizeof(*i915_file_priv), GFP_KERNEL);
2160
2161	if (!i915_file_priv)
2162		return -ENOMEM;
2163
2164	file_priv->driver_priv = i915_file_priv;
2165
2166	INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
2167
2168	return 0;
2169}
2170
2171/**
2172 * i915_driver_lastclose - clean up after all DRM clients have exited
2173 * @dev: DRM device
2174 *
2175 * Take care of cleaning up after all DRM clients have exited.  In the
2176 * mode setting case, we want to restore the kernel's initial mode (just
2177 * in case the last client left us in a bad state).
2178 *
2179 * Additionally, in the non-mode setting case, we'll tear down the AGP
2180 * and DMA structures, since the kernel won't be using them, and clea
2181 * up any GEM state.
2182 */
2183void i915_driver_lastclose(struct drm_device * dev)
2184{
2185	drm_i915_private_t *dev_priv = dev->dev_private;
2186
2187	if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
2188		drm_fb_helper_restore();
2189		vga_switcheroo_process_delayed_switch();
2190		return;
2191	}
2192
2193	i915_gem_lastclose(dev);
2194
2195	if (dev_priv->agp_heap)
2196		i915_mem_takedown(&(dev_priv->agp_heap));
2197
2198	i915_dma_cleanup(dev);
2199}
2200
2201void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
2202{
2203	drm_i915_private_t *dev_priv = dev->dev_private;
2204	i915_gem_release(dev, file_priv);
2205	if (!drm_core_check_feature(dev, DRIVER_MODESET))
2206		i915_mem_release(dev, file_priv, dev_priv->agp_heap);
2207}
2208
2209void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
2210{
2211	struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
2212
2213	kfree(i915_file_priv);
2214}
2215
2216struct drm_ioctl_desc i915_ioctls[] = {
2217	DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2218	DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
2219	DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
2220	DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
2221	DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
2222	DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
2223	DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
2224	DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2225	DRM_IOCTL_DEF_DRV(I915_ALLOC, i915_mem_alloc, DRM_AUTH),
2226	DRM_IOCTL_DEF_DRV(I915_FREE, i915_mem_free, DRM_AUTH),
2227	DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2228	DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
2229	DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2230	DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2231	DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH),
2232	DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
2233	DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2234	DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
2235	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
2236	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
2237	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
2238	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
2239	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
2240	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
2241	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
2242	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
2243	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
2244	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
2245	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
2246	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
2247	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
2248	DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
2249	DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
2250	DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
2251	DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
2252	DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
2253	DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
2254	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
2255	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
2256	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
2257};
2258
2259int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
2260
2261/**
2262 * Determine if the device really is AGP or not.
2263 *
2264 * All Intel graphics chipsets are treated as AGP, even if they are really
2265 * PCI-e.
2266 *
2267 * \param dev   The device to be tested.
2268 *
2269 * \returns
2270 * A value of 1 is always retured to indictate every i9x5 is AGP.
2271 */
2272int i915_driver_device_is_agp(struct drm_device * dev)
2273{
2274	return 1;
2275}
2276