i915_debugfs.c revision c724e8a9407683a8a2ee8eb00b972badf237bbe1
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 *    Eric Anholt <eric@anholt.net>
25 *    Keith Packard <keithp@keithp.com>
26 *
27 */
28
29#include <linux/seq_file.h>
30#include <linux/debugfs.h>
31#include <linux/slab.h>
32#include "drmP.h"
33#include "drm.h"
34#include "intel_drv.h"
35#include "intel_ringbuffer.h"
36#include "i915_drm.h"
37#include "i915_drv.h"
38
39#define DRM_I915_RING_DEBUG 1
40
41
42#if defined(CONFIG_DEBUG_FS)
43
44enum {
45	ACTIVE_LIST,
46	FLUSHING_LIST,
47	INACTIVE_LIST,
48	PINNED_LIST,
49	DEFERRED_FREE_LIST,
50};
51
52static const char *yesno(int v)
53{
54	return v ? "yes" : "no";
55}
56
57static int i915_capabilities(struct seq_file *m, void *data)
58{
59	struct drm_info_node *node = (struct drm_info_node *) m->private;
60	struct drm_device *dev = node->minor->dev;
61	const struct intel_device_info *info = INTEL_INFO(dev);
62
63	seq_printf(m, "gen: %d\n", info->gen);
64#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
65	B(is_mobile);
66	B(is_i85x);
67	B(is_i915g);
68	B(is_i945gm);
69	B(is_g33);
70	B(need_gfx_hws);
71	B(is_g4x);
72	B(is_pineview);
73	B(is_broadwater);
74	B(is_crestline);
75	B(has_fbc);
76	B(has_rc6);
77	B(has_pipe_cxsr);
78	B(has_hotplug);
79	B(cursor_needs_physical);
80	B(has_overlay);
81	B(overlay_needs_physical);
82	B(supports_tv);
83	B(has_bsd_ring);
84	B(has_blt_ring);
85#undef B
86
87	return 0;
88}
89
90static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
91{
92	if (obj_priv->user_pin_count > 0)
93		return "P";
94	else if (obj_priv->pin_count > 0)
95		return "p";
96	else
97		return " ";
98}
99
100static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
101{
102    switch (obj_priv->tiling_mode) {
103    default:
104    case I915_TILING_NONE: return " ";
105    case I915_TILING_X: return "X";
106    case I915_TILING_Y: return "Y";
107    }
108}
109
110static void
111describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
112{
113	seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s",
114		   &obj->base,
115		   get_pin_flag(obj),
116		   get_tiling_flag(obj),
117		   obj->base.size,
118		   obj->base.read_domains,
119		   obj->base.write_domain,
120		   obj->last_rendering_seqno,
121		   obj->dirty ? " dirty" : "",
122		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
123	if (obj->base.name)
124		seq_printf(m, " (name: %d)", obj->base.name);
125	if (obj->fence_reg != I915_FENCE_REG_NONE)
126		seq_printf(m, " (fence: %d)", obj->fence_reg);
127	if (obj->gtt_space != NULL)
128		seq_printf(m, " (gtt offset: %08x, size: %08x)",
129			   obj->gtt_offset, (unsigned int)obj->gtt_space->size);
130	if (obj->pin_mappable || obj->fault_mappable)
131		seq_printf(m, " (mappable)");
132	if (obj->ring != NULL)
133		seq_printf(m, " (%s)", obj->ring->name);
134}
135
136static int i915_gem_object_list_info(struct seq_file *m, void *data)
137{
138	struct drm_info_node *node = (struct drm_info_node *) m->private;
139	uintptr_t list = (uintptr_t) node->info_ent->data;
140	struct list_head *head;
141	struct drm_device *dev = node->minor->dev;
142	drm_i915_private_t *dev_priv = dev->dev_private;
143	struct drm_i915_gem_object *obj_priv;
144	size_t total_obj_size, total_gtt_size;
145	int count, ret;
146
147	ret = mutex_lock_interruptible(&dev->struct_mutex);
148	if (ret)
149		return ret;
150
151	switch (list) {
152	case ACTIVE_LIST:
153		seq_printf(m, "Active:\n");
154		head = &dev_priv->mm.active_list;
155		break;
156	case INACTIVE_LIST:
157		seq_printf(m, "Inactive:\n");
158		head = &dev_priv->mm.inactive_list;
159		break;
160	case PINNED_LIST:
161		seq_printf(m, "Pinned:\n");
162		head = &dev_priv->mm.pinned_list;
163		break;
164	case FLUSHING_LIST:
165		seq_printf(m, "Flushing:\n");
166		head = &dev_priv->mm.flushing_list;
167		break;
168	case DEFERRED_FREE_LIST:
169		seq_printf(m, "Deferred free:\n");
170		head = &dev_priv->mm.deferred_free_list;
171		break;
172	default:
173		mutex_unlock(&dev->struct_mutex);
174		return -EINVAL;
175	}
176
177	total_obj_size = total_gtt_size = count = 0;
178	list_for_each_entry(obj_priv, head, mm_list) {
179		seq_printf(m, "   ");
180		describe_obj(m, obj_priv);
181		seq_printf(m, "\n");
182		total_obj_size += obj_priv->base.size;
183		total_gtt_size += obj_priv->gtt_space->size;
184		count++;
185	}
186	mutex_unlock(&dev->struct_mutex);
187
188	seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
189		   count, total_obj_size, total_gtt_size);
190	return 0;
191}
192
193static int i915_gem_object_info(struct seq_file *m, void* data)
194{
195	struct drm_info_node *node = (struct drm_info_node *) m->private;
196	struct drm_device *dev = node->minor->dev;
197	struct drm_i915_private *dev_priv = dev->dev_private;
198	int ret;
199
200	ret = mutex_lock_interruptible(&dev->struct_mutex);
201	if (ret)
202		return ret;
203
204	seq_printf(m, "%u objects\n", dev_priv->mm.object_count);
205	seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory);
206	seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count);
207	seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory);
208	seq_printf(m, "%u mappable objects in gtt\n", dev_priv->mm.gtt_mappable_count);
209	seq_printf(m, "%zu mappable gtt bytes\n", dev_priv->mm.gtt_mappable_memory);
210	seq_printf(m, "%zu mappable gtt used bytes\n", dev_priv->mm.mappable_gtt_used);
211	seq_printf(m, "%zu mappable gtt total\n", dev_priv->mm.mappable_gtt_total);
212	seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count);
213	seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory);
214	seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total);
215
216	mutex_unlock(&dev->struct_mutex);
217
218	return 0;
219}
220
221
222static int i915_gem_pageflip_info(struct seq_file *m, void *data)
223{
224	struct drm_info_node *node = (struct drm_info_node *) m->private;
225	struct drm_device *dev = node->minor->dev;
226	unsigned long flags;
227	struct intel_crtc *crtc;
228
229	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
230		const char *pipe = crtc->pipe ? "B" : "A";
231		const char *plane = crtc->plane ? "B" : "A";
232		struct intel_unpin_work *work;
233
234		spin_lock_irqsave(&dev->event_lock, flags);
235		work = crtc->unpin_work;
236		if (work == NULL) {
237			seq_printf(m, "No flip due on pipe %s (plane %s)\n",
238				   pipe, plane);
239		} else {
240			if (!work->pending) {
241				seq_printf(m, "Flip queued on pipe %s (plane %s)\n",
242					   pipe, plane);
243			} else {
244				seq_printf(m, "Flip pending (waiting for vsync) on pipe %s (plane %s)\n",
245					   pipe, plane);
246			}
247			if (work->enable_stall_check)
248				seq_printf(m, "Stall check enabled, ");
249			else
250				seq_printf(m, "Stall check waiting for page flip ioctl, ");
251			seq_printf(m, "%d prepares\n", work->pending);
252
253			if (work->old_fb_obj) {
254				struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj);
255				if(obj_priv)
256					seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
257			}
258			if (work->pending_flip_obj) {
259				struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj);
260				if(obj_priv)
261					seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
262			}
263		}
264		spin_unlock_irqrestore(&dev->event_lock, flags);
265	}
266
267	return 0;
268}
269
270static int i915_gem_request_info(struct seq_file *m, void *data)
271{
272	struct drm_info_node *node = (struct drm_info_node *) m->private;
273	struct drm_device *dev = node->minor->dev;
274	drm_i915_private_t *dev_priv = dev->dev_private;
275	struct drm_i915_gem_request *gem_request;
276	int ret, count;
277
278	ret = mutex_lock_interruptible(&dev->struct_mutex);
279	if (ret)
280		return ret;
281
282	count = 0;
283	if (!list_empty(&dev_priv->render_ring.request_list)) {
284		seq_printf(m, "Render requests:\n");
285		list_for_each_entry(gem_request,
286				    &dev_priv->render_ring.request_list,
287				    list) {
288			seq_printf(m, "    %d @ %d\n",
289				   gem_request->seqno,
290				   (int) (jiffies - gem_request->emitted_jiffies));
291		}
292		count++;
293	}
294	if (!list_empty(&dev_priv->bsd_ring.request_list)) {
295		seq_printf(m, "BSD requests:\n");
296		list_for_each_entry(gem_request,
297				    &dev_priv->bsd_ring.request_list,
298				    list) {
299			seq_printf(m, "    %d @ %d\n",
300				   gem_request->seqno,
301				   (int) (jiffies - gem_request->emitted_jiffies));
302		}
303		count++;
304	}
305	if (!list_empty(&dev_priv->blt_ring.request_list)) {
306		seq_printf(m, "BLT requests:\n");
307		list_for_each_entry(gem_request,
308				    &dev_priv->blt_ring.request_list,
309				    list) {
310			seq_printf(m, "    %d @ %d\n",
311				   gem_request->seqno,
312				   (int) (jiffies - gem_request->emitted_jiffies));
313		}
314		count++;
315	}
316	mutex_unlock(&dev->struct_mutex);
317
318	if (count == 0)
319		seq_printf(m, "No requests\n");
320
321	return 0;
322}
323
324static void i915_ring_seqno_info(struct seq_file *m,
325				 struct intel_ring_buffer *ring)
326{
327	if (ring->get_seqno) {
328		seq_printf(m, "Current sequence (%s): %d\n",
329			   ring->name, ring->get_seqno(ring));
330		seq_printf(m, "Waiter sequence (%s):  %d\n",
331			   ring->name, ring->waiting_seqno);
332		seq_printf(m, "IRQ sequence (%s):     %d\n",
333			   ring->name, ring->irq_seqno);
334	}
335}
336
337static int i915_gem_seqno_info(struct seq_file *m, void *data)
338{
339	struct drm_info_node *node = (struct drm_info_node *) m->private;
340	struct drm_device *dev = node->minor->dev;
341	drm_i915_private_t *dev_priv = dev->dev_private;
342	int ret;
343
344	ret = mutex_lock_interruptible(&dev->struct_mutex);
345	if (ret)
346		return ret;
347
348	i915_ring_seqno_info(m, &dev_priv->render_ring);
349	i915_ring_seqno_info(m, &dev_priv->bsd_ring);
350	i915_ring_seqno_info(m, &dev_priv->blt_ring);
351
352	mutex_unlock(&dev->struct_mutex);
353
354	return 0;
355}
356
357
358static int i915_interrupt_info(struct seq_file *m, void *data)
359{
360	struct drm_info_node *node = (struct drm_info_node *) m->private;
361	struct drm_device *dev = node->minor->dev;
362	drm_i915_private_t *dev_priv = dev->dev_private;
363	int ret;
364
365	ret = mutex_lock_interruptible(&dev->struct_mutex);
366	if (ret)
367		return ret;
368
369	if (!HAS_PCH_SPLIT(dev)) {
370		seq_printf(m, "Interrupt enable:    %08x\n",
371			   I915_READ(IER));
372		seq_printf(m, "Interrupt identity:  %08x\n",
373			   I915_READ(IIR));
374		seq_printf(m, "Interrupt mask:      %08x\n",
375			   I915_READ(IMR));
376		seq_printf(m, "Pipe A stat:         %08x\n",
377			   I915_READ(PIPEASTAT));
378		seq_printf(m, "Pipe B stat:         %08x\n",
379			   I915_READ(PIPEBSTAT));
380	} else {
381		seq_printf(m, "North Display Interrupt enable:		%08x\n",
382			   I915_READ(DEIER));
383		seq_printf(m, "North Display Interrupt identity:	%08x\n",
384			   I915_READ(DEIIR));
385		seq_printf(m, "North Display Interrupt mask:		%08x\n",
386			   I915_READ(DEIMR));
387		seq_printf(m, "South Display Interrupt enable:		%08x\n",
388			   I915_READ(SDEIER));
389		seq_printf(m, "South Display Interrupt identity:	%08x\n",
390			   I915_READ(SDEIIR));
391		seq_printf(m, "South Display Interrupt mask:		%08x\n",
392			   I915_READ(SDEIMR));
393		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
394			   I915_READ(GTIER));
395		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
396			   I915_READ(GTIIR));
397		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
398			   I915_READ(GTIMR));
399	}
400	seq_printf(m, "Interrupts received: %d\n",
401		   atomic_read(&dev_priv->irq_received));
402	i915_ring_seqno_info(m, &dev_priv->render_ring);
403	i915_ring_seqno_info(m, &dev_priv->bsd_ring);
404	i915_ring_seqno_info(m, &dev_priv->blt_ring);
405	mutex_unlock(&dev->struct_mutex);
406
407	return 0;
408}
409
410static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
411{
412	struct drm_info_node *node = (struct drm_info_node *) m->private;
413	struct drm_device *dev = node->minor->dev;
414	drm_i915_private_t *dev_priv = dev->dev_private;
415	int i, ret;
416
417	ret = mutex_lock_interruptible(&dev->struct_mutex);
418	if (ret)
419		return ret;
420
421	seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
422	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
423	for (i = 0; i < dev_priv->num_fence_regs; i++) {
424		struct drm_gem_object *obj = dev_priv->fence_regs[i].obj;
425
426		seq_printf(m, "Fenced object[%2d] = ", i);
427		if (obj == NULL)
428			seq_printf(m, "unused");
429		else
430			describe_obj(m, to_intel_bo(obj));
431		seq_printf(m, "\n");
432	}
433	mutex_unlock(&dev->struct_mutex);
434
435	return 0;
436}
437
438static int i915_hws_info(struct seq_file *m, void *data)
439{
440	struct drm_info_node *node = (struct drm_info_node *) m->private;
441	struct drm_device *dev = node->minor->dev;
442	drm_i915_private_t *dev_priv = dev->dev_private;
443	struct intel_ring_buffer *ring;
444	volatile u32 *hws;
445	int i;
446
447	switch ((uintptr_t)node->info_ent->data) {
448	case RING_RENDER: ring = &dev_priv->render_ring; break;
449	case RING_BSD: ring = &dev_priv->bsd_ring; break;
450	case RING_BLT: ring = &dev_priv->blt_ring; break;
451	default: return -EINVAL;
452	}
453
454	hws = (volatile u32 *)ring->status_page.page_addr;
455	if (hws == NULL)
456		return 0;
457
458	for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
459		seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
460			   i * 4,
461			   hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
462	}
463	return 0;
464}
465
466static void i915_dump_object(struct seq_file *m,
467			     struct io_mapping *mapping,
468			     struct drm_i915_gem_object *obj_priv)
469{
470	int page, page_count, i;
471
472	page_count = obj_priv->base.size / PAGE_SIZE;
473	for (page = 0; page < page_count; page++) {
474		u32 *mem = io_mapping_map_wc(mapping,
475					     obj_priv->gtt_offset + page * PAGE_SIZE);
476		for (i = 0; i < PAGE_SIZE; i += 4)
477			seq_printf(m, "%08x :  %08x\n", i, mem[i / 4]);
478		io_mapping_unmap(mem);
479	}
480}
481
482static int i915_batchbuffer_info(struct seq_file *m, void *data)
483{
484	struct drm_info_node *node = (struct drm_info_node *) m->private;
485	struct drm_device *dev = node->minor->dev;
486	drm_i915_private_t *dev_priv = dev->dev_private;
487	struct drm_gem_object *obj;
488	struct drm_i915_gem_object *obj_priv;
489	int ret;
490
491	ret = mutex_lock_interruptible(&dev->struct_mutex);
492	if (ret)
493		return ret;
494
495	list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
496		obj = &obj_priv->base;
497		if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
498		    seq_printf(m, "--- gtt_offset = 0x%08x\n",
499			       obj_priv->gtt_offset);
500		    i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv);
501		}
502	}
503
504	mutex_unlock(&dev->struct_mutex);
505
506	return 0;
507}
508
509static int i915_ringbuffer_data(struct seq_file *m, void *data)
510{
511	struct drm_info_node *node = (struct drm_info_node *) m->private;
512	struct drm_device *dev = node->minor->dev;
513	drm_i915_private_t *dev_priv = dev->dev_private;
514	struct intel_ring_buffer *ring;
515	int ret;
516
517	switch ((uintptr_t)node->info_ent->data) {
518	case RING_RENDER: ring = &dev_priv->render_ring; break;
519	case RING_BSD: ring = &dev_priv->bsd_ring; break;
520	case RING_BLT: ring = &dev_priv->blt_ring; break;
521	default: return -EINVAL;
522	}
523
524	ret = mutex_lock_interruptible(&dev->struct_mutex);
525	if (ret)
526		return ret;
527
528	if (!ring->gem_object) {
529		seq_printf(m, "No ringbuffer setup\n");
530	} else {
531		u8 *virt = ring->virtual_start;
532		uint32_t off;
533
534		for (off = 0; off < ring->size; off += 4) {
535			uint32_t *ptr = (uint32_t *)(virt + off);
536			seq_printf(m, "%08x :  %08x\n", off, *ptr);
537		}
538	}
539	mutex_unlock(&dev->struct_mutex);
540
541	return 0;
542}
543
544static int i915_ringbuffer_info(struct seq_file *m, void *data)
545{
546	struct drm_info_node *node = (struct drm_info_node *) m->private;
547	struct drm_device *dev = node->minor->dev;
548	drm_i915_private_t *dev_priv = dev->dev_private;
549	struct intel_ring_buffer *ring;
550
551	switch ((uintptr_t)node->info_ent->data) {
552	case RING_RENDER: ring = &dev_priv->render_ring; break;
553	case RING_BSD: ring = &dev_priv->bsd_ring; break;
554	case RING_BLT: ring = &dev_priv->blt_ring; break;
555	default: return -EINVAL;
556	}
557
558	if (ring->size == 0)
559	    return 0;
560
561	seq_printf(m, "Ring %s:\n", ring->name);
562	seq_printf(m, "  Head :    %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
563	seq_printf(m, "  Tail :    %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
564	seq_printf(m, "  Size :    %08x\n", ring->size);
565	seq_printf(m, "  Active :  %08x\n", intel_ring_get_active_head(ring));
566	seq_printf(m, "  Control : %08x\n", I915_READ_CTL(ring));
567	seq_printf(m, "  Start :   %08x\n", I915_READ_START(ring));
568
569	return 0;
570}
571
572static const char *ring_str(int ring)
573{
574	switch (ring) {
575	case RING_RENDER: return "render";
576	case RING_BSD: return "bsd";
577	case RING_BLT: return "blt";
578	default: return "";
579	}
580}
581
582static const char *pin_flag(int pinned)
583{
584	if (pinned > 0)
585		return " P";
586	else if (pinned < 0)
587		return " p";
588	else
589		return "";
590}
591
592static const char *tiling_flag(int tiling)
593{
594	switch (tiling) {
595	default:
596	case I915_TILING_NONE: return "";
597	case I915_TILING_X: return " X";
598	case I915_TILING_Y: return " Y";
599	}
600}
601
602static const char *dirty_flag(int dirty)
603{
604	return dirty ? " dirty" : "";
605}
606
607static const char *purgeable_flag(int purgeable)
608{
609	return purgeable ? " purgeable" : "";
610}
611
612static void print_error_buffers(struct seq_file *m,
613				const char *name,
614				struct drm_i915_error_buffer *err,
615				int count)
616{
617	seq_printf(m, "%s [%d]:\n", name, count);
618
619	while (count--) {
620		seq_printf(m, "  %08x %8zd %04x %04x %08x%s%s%s%s%s",
621			   err->gtt_offset,
622			   err->size,
623			   err->read_domains,
624			   err->write_domain,
625			   err->seqno,
626			   pin_flag(err->pinned),
627			   tiling_flag(err->tiling),
628			   dirty_flag(err->dirty),
629			   purgeable_flag(err->purgeable),
630			   ring_str(err->ring));
631
632		if (err->name)
633			seq_printf(m, " (name: %d)", err->name);
634		if (err->fence_reg != I915_FENCE_REG_NONE)
635			seq_printf(m, " (fence: %d)", err->fence_reg);
636
637		seq_printf(m, "\n");
638		err++;
639	}
640}
641
642static int i915_error_state(struct seq_file *m, void *unused)
643{
644	struct drm_info_node *node = (struct drm_info_node *) m->private;
645	struct drm_device *dev = node->minor->dev;
646	drm_i915_private_t *dev_priv = dev->dev_private;
647	struct drm_i915_error_state *error;
648	unsigned long flags;
649	int i, page, offset, elt;
650
651	spin_lock_irqsave(&dev_priv->error_lock, flags);
652	if (!dev_priv->first_error) {
653		seq_printf(m, "no error state collected\n");
654		goto out;
655	}
656
657	error = dev_priv->first_error;
658
659	seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
660		   error->time.tv_usec);
661	seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
662	seq_printf(m, "EIR: 0x%08x\n", error->eir);
663	seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
664	if (INTEL_INFO(dev)->gen >= 6) {
665		seq_printf(m, "ERROR: 0x%08x\n", error->error);
666		seq_printf(m, "Blitter command stream:\n");
667		seq_printf(m, "  ACTHD:    0x%08x\n", error->bcs_acthd);
668		seq_printf(m, "  IPEIR:    0x%08x\n", error->bcs_ipeir);
669		seq_printf(m, "  IPEHR:    0x%08x\n", error->bcs_ipehr);
670		seq_printf(m, "  INSTDONE: 0x%08x\n", error->bcs_instdone);
671		seq_printf(m, "  seqno:    0x%08x\n", error->bcs_seqno);
672		seq_printf(m, "Video (BSD) command stream:\n");
673		seq_printf(m, "  ACTHD:    0x%08x\n", error->vcs_acthd);
674		seq_printf(m, "  IPEIR:    0x%08x\n", error->vcs_ipeir);
675		seq_printf(m, "  IPEHR:    0x%08x\n", error->vcs_ipehr);
676		seq_printf(m, "  INSTDONE: 0x%08x\n", error->vcs_instdone);
677		seq_printf(m, "  seqno:    0x%08x\n", error->vcs_seqno);
678	}
679	seq_printf(m, "Render command stream:\n");
680	seq_printf(m, "  ACTHD: 0x%08x\n", error->acthd);
681	seq_printf(m, "  IPEIR: 0x%08x\n", error->ipeir);
682	seq_printf(m, "  IPEHR: 0x%08x\n", error->ipehr);
683	seq_printf(m, "  INSTDONE: 0x%08x\n", error->instdone);
684	if (INTEL_INFO(dev)->gen >= 4) {
685		seq_printf(m, "  INSTDONE1: 0x%08x\n", error->instdone1);
686		seq_printf(m, "  INSTPS: 0x%08x\n", error->instps);
687	}
688	seq_printf(m, "  INSTPM: 0x%08x\n", error->instpm);
689	seq_printf(m, "  seqno: 0x%08x\n", error->seqno);
690
691	if (error->active_bo)
692		print_error_buffers(m, "Active",
693				    error->active_bo,
694				    error->active_bo_count);
695
696	if (error->pinned_bo)
697		print_error_buffers(m, "Pinned",
698				    error->pinned_bo,
699				    error->pinned_bo_count);
700
701	for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
702		if (error->batchbuffer[i]) {
703			struct drm_i915_error_object *obj = error->batchbuffer[i];
704
705			seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
706			offset = 0;
707			for (page = 0; page < obj->page_count; page++) {
708				for (elt = 0; elt < PAGE_SIZE/4; elt++) {
709					seq_printf(m, "%08x :  %08x\n", offset, obj->pages[page][elt]);
710					offset += 4;
711				}
712			}
713		}
714	}
715
716	if (error->ringbuffer) {
717		struct drm_i915_error_object *obj = error->ringbuffer;
718
719		seq_printf(m, "--- ringbuffer = 0x%08x\n", obj->gtt_offset);
720		offset = 0;
721		for (page = 0; page < obj->page_count; page++) {
722			for (elt = 0; elt < PAGE_SIZE/4; elt++) {
723				seq_printf(m, "%08x :  %08x\n", offset, obj->pages[page][elt]);
724				offset += 4;
725			}
726		}
727	}
728
729	if (error->overlay)
730		intel_overlay_print_error_state(m, error->overlay);
731
732out:
733	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
734
735	return 0;
736}
737
738static int i915_rstdby_delays(struct seq_file *m, void *unused)
739{
740	struct drm_info_node *node = (struct drm_info_node *) m->private;
741	struct drm_device *dev = node->minor->dev;
742	drm_i915_private_t *dev_priv = dev->dev_private;
743	u16 crstanddelay = I915_READ16(CRSTANDVID);
744
745	seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
746
747	return 0;
748}
749
750static int i915_cur_delayinfo(struct seq_file *m, void *unused)
751{
752	struct drm_info_node *node = (struct drm_info_node *) m->private;
753	struct drm_device *dev = node->minor->dev;
754	drm_i915_private_t *dev_priv = dev->dev_private;
755	u16 rgvswctl = I915_READ16(MEMSWCTL);
756	u16 rgvstat = I915_READ16(MEMSTAT_ILK);
757
758	seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
759	seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
760	seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
761		   MEMSTAT_VID_SHIFT);
762	seq_printf(m, "Current P-state: %d\n",
763		   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
764
765	return 0;
766}
767
768static int i915_delayfreq_table(struct seq_file *m, void *unused)
769{
770	struct drm_info_node *node = (struct drm_info_node *) m->private;
771	struct drm_device *dev = node->minor->dev;
772	drm_i915_private_t *dev_priv = dev->dev_private;
773	u32 delayfreq;
774	int i;
775
776	for (i = 0; i < 16; i++) {
777		delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
778		seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
779			   (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
780	}
781
782	return 0;
783}
784
785static inline int MAP_TO_MV(int map)
786{
787	return 1250 - (map * 25);
788}
789
790static int i915_inttoext_table(struct seq_file *m, void *unused)
791{
792	struct drm_info_node *node = (struct drm_info_node *) m->private;
793	struct drm_device *dev = node->minor->dev;
794	drm_i915_private_t *dev_priv = dev->dev_private;
795	u32 inttoext;
796	int i;
797
798	for (i = 1; i <= 32; i++) {
799		inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
800		seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
801	}
802
803	return 0;
804}
805
806static int i915_drpc_info(struct seq_file *m, void *unused)
807{
808	struct drm_info_node *node = (struct drm_info_node *) m->private;
809	struct drm_device *dev = node->minor->dev;
810	drm_i915_private_t *dev_priv = dev->dev_private;
811	u32 rgvmodectl = I915_READ(MEMMODECTL);
812	u32 rstdbyctl = I915_READ(MCHBAR_RENDER_STANDBY);
813	u16 crstandvid = I915_READ16(CRSTANDVID);
814
815	seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
816		   "yes" : "no");
817	seq_printf(m, "Boost freq: %d\n",
818		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
819		   MEMMODE_BOOST_FREQ_SHIFT);
820	seq_printf(m, "HW control enabled: %s\n",
821		   rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
822	seq_printf(m, "SW control enabled: %s\n",
823		   rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
824	seq_printf(m, "Gated voltage change: %s\n",
825		   rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
826	seq_printf(m, "Starting frequency: P%d\n",
827		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
828	seq_printf(m, "Max P-state: P%d\n",
829		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
830	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
831	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
832	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
833	seq_printf(m, "Render standby enabled: %s\n",
834		   (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
835
836	return 0;
837}
838
839static int i915_fbc_status(struct seq_file *m, void *unused)
840{
841	struct drm_info_node *node = (struct drm_info_node *) m->private;
842	struct drm_device *dev = node->minor->dev;
843	drm_i915_private_t *dev_priv = dev->dev_private;
844
845	if (!I915_HAS_FBC(dev)) {
846		seq_printf(m, "FBC unsupported on this chipset\n");
847		return 0;
848	}
849
850	if (intel_fbc_enabled(dev)) {
851		seq_printf(m, "FBC enabled\n");
852	} else {
853		seq_printf(m, "FBC disabled: ");
854		switch (dev_priv->no_fbc_reason) {
855		case FBC_NO_OUTPUT:
856			seq_printf(m, "no outputs");
857			break;
858		case FBC_STOLEN_TOO_SMALL:
859			seq_printf(m, "not enough stolen memory");
860			break;
861		case FBC_UNSUPPORTED_MODE:
862			seq_printf(m, "mode not supported");
863			break;
864		case FBC_MODE_TOO_LARGE:
865			seq_printf(m, "mode too large");
866			break;
867		case FBC_BAD_PLANE:
868			seq_printf(m, "FBC unsupported on plane");
869			break;
870		case FBC_NOT_TILED:
871			seq_printf(m, "scanout buffer not tiled");
872			break;
873		case FBC_MULTIPLE_PIPES:
874			seq_printf(m, "multiple pipes are enabled");
875			break;
876		default:
877			seq_printf(m, "unknown reason");
878		}
879		seq_printf(m, "\n");
880	}
881	return 0;
882}
883
884static int i915_sr_status(struct seq_file *m, void *unused)
885{
886	struct drm_info_node *node = (struct drm_info_node *) m->private;
887	struct drm_device *dev = node->minor->dev;
888	drm_i915_private_t *dev_priv = dev->dev_private;
889	bool sr_enabled = false;
890
891	if (IS_GEN5(dev))
892		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
893	else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
894		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
895	else if (IS_I915GM(dev))
896		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
897	else if (IS_PINEVIEW(dev))
898		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
899
900	seq_printf(m, "self-refresh: %s\n",
901		   sr_enabled ? "enabled" : "disabled");
902
903	return 0;
904}
905
906static int i915_emon_status(struct seq_file *m, void *unused)
907{
908	struct drm_info_node *node = (struct drm_info_node *) m->private;
909	struct drm_device *dev = node->minor->dev;
910	drm_i915_private_t *dev_priv = dev->dev_private;
911	unsigned long temp, chipset, gfx;
912	int ret;
913
914	ret = mutex_lock_interruptible(&dev->struct_mutex);
915	if (ret)
916		return ret;
917
918	temp = i915_mch_val(dev_priv);
919	chipset = i915_chipset_val(dev_priv);
920	gfx = i915_gfx_val(dev_priv);
921	mutex_unlock(&dev->struct_mutex);
922
923	seq_printf(m, "GMCH temp: %ld\n", temp);
924	seq_printf(m, "Chipset power: %ld\n", chipset);
925	seq_printf(m, "GFX power: %ld\n", gfx);
926	seq_printf(m, "Total power: %ld\n", chipset + gfx);
927
928	return 0;
929}
930
931static int i915_gfxec(struct seq_file *m, void *unused)
932{
933	struct drm_info_node *node = (struct drm_info_node *) m->private;
934	struct drm_device *dev = node->minor->dev;
935	drm_i915_private_t *dev_priv = dev->dev_private;
936
937	seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
938
939	return 0;
940}
941
942static int i915_opregion(struct seq_file *m, void *unused)
943{
944	struct drm_info_node *node = (struct drm_info_node *) m->private;
945	struct drm_device *dev = node->minor->dev;
946	drm_i915_private_t *dev_priv = dev->dev_private;
947	struct intel_opregion *opregion = &dev_priv->opregion;
948	int ret;
949
950	ret = mutex_lock_interruptible(&dev->struct_mutex);
951	if (ret)
952		return ret;
953
954	if (opregion->header)
955		seq_write(m, opregion->header, OPREGION_SIZE);
956
957	mutex_unlock(&dev->struct_mutex);
958
959	return 0;
960}
961
962static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
963{
964	struct drm_info_node *node = (struct drm_info_node *) m->private;
965	struct drm_device *dev = node->minor->dev;
966	drm_i915_private_t *dev_priv = dev->dev_private;
967	struct intel_fbdev *ifbdev;
968	struct intel_framebuffer *fb;
969	int ret;
970
971	ret = mutex_lock_interruptible(&dev->mode_config.mutex);
972	if (ret)
973		return ret;
974
975	ifbdev = dev_priv->fbdev;
976	fb = to_intel_framebuffer(ifbdev->helper.fb);
977
978	seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
979		   fb->base.width,
980		   fb->base.height,
981		   fb->base.depth,
982		   fb->base.bits_per_pixel);
983	describe_obj(m, to_intel_bo(fb->obj));
984	seq_printf(m, "\n");
985
986	list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
987		if (&fb->base == ifbdev->helper.fb)
988			continue;
989
990		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
991			   fb->base.width,
992			   fb->base.height,
993			   fb->base.depth,
994			   fb->base.bits_per_pixel);
995		describe_obj(m, to_intel_bo(fb->obj));
996		seq_printf(m, "\n");
997	}
998
999	mutex_unlock(&dev->mode_config.mutex);
1000
1001	return 0;
1002}
1003
1004static int
1005i915_wedged_open(struct inode *inode,
1006		 struct file *filp)
1007{
1008	filp->private_data = inode->i_private;
1009	return 0;
1010}
1011
1012static ssize_t
1013i915_wedged_read(struct file *filp,
1014		 char __user *ubuf,
1015		 size_t max,
1016		 loff_t *ppos)
1017{
1018	struct drm_device *dev = filp->private_data;
1019	drm_i915_private_t *dev_priv = dev->dev_private;
1020	char buf[80];
1021	int len;
1022
1023	len = snprintf(buf, sizeof (buf),
1024		       "wedged :  %d\n",
1025		       atomic_read(&dev_priv->mm.wedged));
1026
1027	if (len > sizeof (buf))
1028		len = sizeof (buf);
1029
1030	return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1031}
1032
1033static ssize_t
1034i915_wedged_write(struct file *filp,
1035		  const char __user *ubuf,
1036		  size_t cnt,
1037		  loff_t *ppos)
1038{
1039	struct drm_device *dev = filp->private_data;
1040	char buf[20];
1041	int val = 1;
1042
1043	if (cnt > 0) {
1044		if (cnt > sizeof (buf) - 1)
1045			return -EINVAL;
1046
1047		if (copy_from_user(buf, ubuf, cnt))
1048			return -EFAULT;
1049		buf[cnt] = 0;
1050
1051		val = simple_strtoul(buf, NULL, 0);
1052	}
1053
1054	DRM_INFO("Manually setting wedged to %d\n", val);
1055	i915_handle_error(dev, val);
1056
1057	return cnt;
1058}
1059
1060static const struct file_operations i915_wedged_fops = {
1061	.owner = THIS_MODULE,
1062	.open = i915_wedged_open,
1063	.read = i915_wedged_read,
1064	.write = i915_wedged_write,
1065	.llseek = default_llseek,
1066};
1067
1068/* As the drm_debugfs_init() routines are called before dev->dev_private is
1069 * allocated we need to hook into the minor for release. */
1070static int
1071drm_add_fake_info_node(struct drm_minor *minor,
1072		       struct dentry *ent,
1073		       const void *key)
1074{
1075	struct drm_info_node *node;
1076
1077	node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
1078	if (node == NULL) {
1079		debugfs_remove(ent);
1080		return -ENOMEM;
1081	}
1082
1083	node->minor = minor;
1084	node->dent = ent;
1085	node->info_ent = (void *) key;
1086	list_add(&node->list, &minor->debugfs_nodes.list);
1087
1088	return 0;
1089}
1090
1091static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
1092{
1093	struct drm_device *dev = minor->dev;
1094	struct dentry *ent;
1095
1096	ent = debugfs_create_file("i915_wedged",
1097				  S_IRUGO | S_IWUSR,
1098				  root, dev,
1099				  &i915_wedged_fops);
1100	if (IS_ERR(ent))
1101		return PTR_ERR(ent);
1102
1103	return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
1104}
1105
1106static struct drm_info_list i915_debugfs_list[] = {
1107	{"i915_capabilities", i915_capabilities, 0, 0},
1108	{"i915_gem_objects", i915_gem_object_info, 0},
1109	{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
1110	{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
1111	{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
1112	{"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
1113	{"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
1114	{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
1115	{"i915_gem_request", i915_gem_request_info, 0},
1116	{"i915_gem_seqno", i915_gem_seqno_info, 0},
1117	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
1118	{"i915_gem_interrupt", i915_interrupt_info, 0},
1119	{"i915_gem_hws", i915_hws_info, 0, (void *)RING_RENDER},
1120	{"i915_gem_hws_blt", i915_hws_info, 0, (void *)RING_BLT},
1121	{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)RING_BSD},
1122	{"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RING_RENDER},
1123	{"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RING_RENDER},
1124	{"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RING_BSD},
1125	{"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RING_BSD},
1126	{"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RING_BLT},
1127	{"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RING_BLT},
1128	{"i915_batchbuffers", i915_batchbuffer_info, 0},
1129	{"i915_error_state", i915_error_state, 0},
1130	{"i915_rstdby_delays", i915_rstdby_delays, 0},
1131	{"i915_cur_delayinfo", i915_cur_delayinfo, 0},
1132	{"i915_delayfreq_table", i915_delayfreq_table, 0},
1133	{"i915_inttoext_table", i915_inttoext_table, 0},
1134	{"i915_drpc_info", i915_drpc_info, 0},
1135	{"i915_emon_status", i915_emon_status, 0},
1136	{"i915_gfxec", i915_gfxec, 0},
1137	{"i915_fbc_status", i915_fbc_status, 0},
1138	{"i915_sr_status", i915_sr_status, 0},
1139	{"i915_opregion", i915_opregion, 0},
1140	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
1141};
1142#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
1143
1144int i915_debugfs_init(struct drm_minor *minor)
1145{
1146	int ret;
1147
1148	ret = i915_wedged_create(minor->debugfs_root, minor);
1149	if (ret)
1150		return ret;
1151
1152	return drm_debugfs_create_files(i915_debugfs_list,
1153					I915_DEBUGFS_ENTRIES,
1154					minor->debugfs_root, minor);
1155}
1156
1157void i915_debugfs_cleanup(struct drm_minor *minor)
1158{
1159	drm_debugfs_remove_files(i915_debugfs_list,
1160				 I915_DEBUGFS_ENTRIES, minor);
1161	drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
1162				 1, minor);
1163}
1164
1165#endif /* CONFIG_DEBUG_FS */
1166