hw_breakpoint.c revision 0d352e3d006c9589f22580212c3822cf62b6d775
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
14 *
15 * Copyright (C) 2009, 2010 ARM Limited
16 *
17 * Author: Will Deacon <will.deacon@arm.com>
18 */
19
20/*
21 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
22 * using the CPU's debug registers.
23 */
24#define pr_fmt(fmt) "hw-breakpoint: " fmt
25
26#include <linux/errno.h>
27#include <linux/hardirq.h>
28#include <linux/perf_event.h>
29#include <linux/hw_breakpoint.h>
30#include <linux/smp.h>
31
32#include <asm/cacheflush.h>
33#include <asm/cputype.h>
34#include <asm/current.h>
35#include <asm/hw_breakpoint.h>
36#include <asm/kdebug.h>
37#include <asm/system.h>
38#include <asm/traps.h>
39
40/* Breakpoint currently in use for each BRP. */
41static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
42
43/* Watchpoint currently in use for each WRP. */
44static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
45
46/* Number of BRP/WRP registers on this CPU. */
47static int core_num_brps;
48static int core_num_wrps;
49
50/* Debug architecture version. */
51static u8 debug_arch;
52
53/* Maximum supported watchpoint length. */
54static u8 max_watchpoint_len;
55
56#define READ_WB_REG_CASE(OP2, M, VAL)		\
57	case ((OP2 << 4) + M):			\
58		ARM_DBG_READ(c ## M, OP2, VAL); \
59		break
60
61#define WRITE_WB_REG_CASE(OP2, M, VAL)		\
62	case ((OP2 << 4) + M):			\
63		ARM_DBG_WRITE(c ## M, OP2, VAL);\
64		break
65
66#define GEN_READ_WB_REG_CASES(OP2, VAL)		\
67	READ_WB_REG_CASE(OP2, 0, VAL);		\
68	READ_WB_REG_CASE(OP2, 1, VAL);		\
69	READ_WB_REG_CASE(OP2, 2, VAL);		\
70	READ_WB_REG_CASE(OP2, 3, VAL);		\
71	READ_WB_REG_CASE(OP2, 4, VAL);		\
72	READ_WB_REG_CASE(OP2, 5, VAL);		\
73	READ_WB_REG_CASE(OP2, 6, VAL);		\
74	READ_WB_REG_CASE(OP2, 7, VAL);		\
75	READ_WB_REG_CASE(OP2, 8, VAL);		\
76	READ_WB_REG_CASE(OP2, 9, VAL);		\
77	READ_WB_REG_CASE(OP2, 10, VAL);		\
78	READ_WB_REG_CASE(OP2, 11, VAL);		\
79	READ_WB_REG_CASE(OP2, 12, VAL);		\
80	READ_WB_REG_CASE(OP2, 13, VAL);		\
81	READ_WB_REG_CASE(OP2, 14, VAL);		\
82	READ_WB_REG_CASE(OP2, 15, VAL)
83
84#define GEN_WRITE_WB_REG_CASES(OP2, VAL)	\
85	WRITE_WB_REG_CASE(OP2, 0, VAL);		\
86	WRITE_WB_REG_CASE(OP2, 1, VAL);		\
87	WRITE_WB_REG_CASE(OP2, 2, VAL);		\
88	WRITE_WB_REG_CASE(OP2, 3, VAL);		\
89	WRITE_WB_REG_CASE(OP2, 4, VAL);		\
90	WRITE_WB_REG_CASE(OP2, 5, VAL);		\
91	WRITE_WB_REG_CASE(OP2, 6, VAL);		\
92	WRITE_WB_REG_CASE(OP2, 7, VAL);		\
93	WRITE_WB_REG_CASE(OP2, 8, VAL);		\
94	WRITE_WB_REG_CASE(OP2, 9, VAL);		\
95	WRITE_WB_REG_CASE(OP2, 10, VAL);	\
96	WRITE_WB_REG_CASE(OP2, 11, VAL);	\
97	WRITE_WB_REG_CASE(OP2, 12, VAL);	\
98	WRITE_WB_REG_CASE(OP2, 13, VAL);	\
99	WRITE_WB_REG_CASE(OP2, 14, VAL);	\
100	WRITE_WB_REG_CASE(OP2, 15, VAL)
101
102static u32 read_wb_reg(int n)
103{
104	u32 val = 0;
105
106	switch (n) {
107	GEN_READ_WB_REG_CASES(ARM_OP2_BVR, val);
108	GEN_READ_WB_REG_CASES(ARM_OP2_BCR, val);
109	GEN_READ_WB_REG_CASES(ARM_OP2_WVR, val);
110	GEN_READ_WB_REG_CASES(ARM_OP2_WCR, val);
111	default:
112		pr_warning("attempt to read from unknown breakpoint "
113				"register %d\n", n);
114	}
115
116	return val;
117}
118
119static void write_wb_reg(int n, u32 val)
120{
121	switch (n) {
122	GEN_WRITE_WB_REG_CASES(ARM_OP2_BVR, val);
123	GEN_WRITE_WB_REG_CASES(ARM_OP2_BCR, val);
124	GEN_WRITE_WB_REG_CASES(ARM_OP2_WVR, val);
125	GEN_WRITE_WB_REG_CASES(ARM_OP2_WCR, val);
126	default:
127		pr_warning("attempt to write to unknown breakpoint "
128				"register %d\n", n);
129	}
130	isb();
131}
132
133/* Determine debug architecture. */
134static u8 get_debug_arch(void)
135{
136	u32 didr;
137
138	/* Do we implement the extended CPUID interface? */
139	if (WARN_ONCE((((read_cpuid_id() >> 16) & 0xf) != 0xf),
140	    "CPUID feature registers not supported. "
141	    "Assuming v6 debug is present.\n"))
142		return ARM_DEBUG_ARCH_V6;
143
144	ARM_DBG_READ(c0, 0, didr);
145	return (didr >> 16) & 0xf;
146}
147
148u8 arch_get_debug_arch(void)
149{
150	return debug_arch;
151}
152
153static int debug_arch_supported(void)
154{
155	u8 arch = get_debug_arch();
156
157	/* We don't support the memory-mapped interface. */
158	return (arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14) ||
159		arch >= ARM_DEBUG_ARCH_V7_1;
160}
161
162/* Determine number of WRP registers available. */
163static int get_num_wrp_resources(void)
164{
165	u32 didr;
166	ARM_DBG_READ(c0, 0, didr);
167	return ((didr >> 28) & 0xf) + 1;
168}
169
170/* Determine number of BRP registers available. */
171static int get_num_brp_resources(void)
172{
173	u32 didr;
174	ARM_DBG_READ(c0, 0, didr);
175	return ((didr >> 24) & 0xf) + 1;
176}
177
178/* Does this core support mismatch breakpoints? */
179static int core_has_mismatch_brps(void)
180{
181	return (get_debug_arch() >= ARM_DEBUG_ARCH_V7_ECP14 &&
182		get_num_brp_resources() > 1);
183}
184
185/* Determine number of usable WRPs available. */
186static int get_num_wrps(void)
187{
188	/*
189	 * On debug architectures prior to 7.1, when a watchpoint fires, the
190	 * only way to work out which watchpoint it was is by disassembling
191	 * the faulting instruction and working out the address of the memory
192	 * access.
193	 *
194	 * Furthermore, we can only do this if the watchpoint was precise
195	 * since imprecise watchpoints prevent us from calculating register
196	 * based addresses.
197	 *
198	 * Providing we have more than 1 breakpoint register, we only report
199	 * a single watchpoint register for the time being. This way, we always
200	 * know which watchpoint fired. In the future we can either add a
201	 * disassembler and address generation emulator, or we can insert a
202	 * check to see if the DFAR is set on watchpoint exception entry
203	 * [the ARM ARM states that the DFAR is UNKNOWN, but experience shows
204	 * that it is set on some implementations].
205	 */
206	if (get_debug_arch() < ARM_DEBUG_ARCH_V7_1)
207		return 1;
208
209	return get_num_wrp_resources();
210}
211
212/* Determine number of usable BRPs available. */
213static int get_num_brps(void)
214{
215	int brps = get_num_brp_resources();
216	return core_has_mismatch_brps() ? brps - 1 : brps;
217}
218
219/*
220 * In order to access the breakpoint/watchpoint control registers,
221 * we must be running in debug monitor mode. Unfortunately, we can
222 * be put into halting debug mode at any time by an external debugger
223 * but there is nothing we can do to prevent that.
224 */
225static int enable_monitor_mode(void)
226{
227	u32 dscr;
228	int ret = 0;
229
230	ARM_DBG_READ(c1, 0, dscr);
231
232	/* Ensure that halting mode is disabled. */
233	if (WARN_ONCE(dscr & ARM_DSCR_HDBGEN,
234			"halting debug mode enabled. Unable to access hardware resources.\n")) {
235		ret = -EPERM;
236		goto out;
237	}
238
239	/* If monitor mode is already enabled, just return. */
240	if (dscr & ARM_DSCR_MDBGEN)
241		goto out;
242
243	/* Write to the corresponding DSCR. */
244	switch (get_debug_arch()) {
245	case ARM_DEBUG_ARCH_V6:
246	case ARM_DEBUG_ARCH_V6_1:
247		ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN));
248		break;
249	case ARM_DEBUG_ARCH_V7_ECP14:
250	case ARM_DEBUG_ARCH_V7_1:
251		ARM_DBG_WRITE(c2, 2, (dscr | ARM_DSCR_MDBGEN));
252		break;
253	default:
254		ret = -ENODEV;
255		goto out;
256	}
257
258	/* Check that the write made it through. */
259	ARM_DBG_READ(c1, 0, dscr);
260	if (!(dscr & ARM_DSCR_MDBGEN))
261		ret = -EPERM;
262
263out:
264	return ret;
265}
266
267int hw_breakpoint_slots(int type)
268{
269	if (!debug_arch_supported())
270		return 0;
271
272	/*
273	 * We can be called early, so don't rely on
274	 * our static variables being initialised.
275	 */
276	switch (type) {
277	case TYPE_INST:
278		return get_num_brps();
279	case TYPE_DATA:
280		return get_num_wrps();
281	default:
282		pr_warning("unknown slot type: %d\n", type);
283		return 0;
284	}
285}
286
287/*
288 * Check if 8-bit byte-address select is available.
289 * This clobbers WRP 0.
290 */
291static u8 get_max_wp_len(void)
292{
293	u32 ctrl_reg;
294	struct arch_hw_breakpoint_ctrl ctrl;
295	u8 size = 4;
296
297	if (debug_arch < ARM_DEBUG_ARCH_V7_ECP14)
298		goto out;
299
300	memset(&ctrl, 0, sizeof(ctrl));
301	ctrl.len = ARM_BREAKPOINT_LEN_8;
302	ctrl_reg = encode_ctrl_reg(ctrl);
303
304	write_wb_reg(ARM_BASE_WVR, 0);
305	write_wb_reg(ARM_BASE_WCR, ctrl_reg);
306	if ((read_wb_reg(ARM_BASE_WCR) & ctrl_reg) == ctrl_reg)
307		size = 8;
308
309out:
310	return size;
311}
312
313u8 arch_get_max_wp_len(void)
314{
315	return max_watchpoint_len;
316}
317
318/*
319 * Install a perf counter breakpoint.
320 */
321int arch_install_hw_breakpoint(struct perf_event *bp)
322{
323	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
324	struct perf_event **slot, **slots;
325	int i, max_slots, ctrl_base, val_base, ret = 0;
326	u32 addr, ctrl;
327
328	/* Ensure that we are in monitor mode and halting mode is disabled. */
329	ret = enable_monitor_mode();
330	if (ret)
331		goto out;
332
333	addr = info->address;
334	ctrl = encode_ctrl_reg(info->ctrl) | 0x1;
335
336	if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
337		/* Breakpoint */
338		ctrl_base = ARM_BASE_BCR;
339		val_base = ARM_BASE_BVR;
340		slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
341		max_slots = core_num_brps;
342	} else {
343		/* Watchpoint */
344		ctrl_base = ARM_BASE_WCR;
345		val_base = ARM_BASE_WVR;
346		slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
347		max_slots = core_num_wrps;
348	}
349
350	for (i = 0; i < max_slots; ++i) {
351		slot = &slots[i];
352
353		if (!*slot) {
354			*slot = bp;
355			break;
356		}
357	}
358
359	if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n")) {
360		ret = -EBUSY;
361		goto out;
362	}
363
364	/* Override the breakpoint data with the step data. */
365	if (info->step_ctrl.enabled) {
366		addr = info->trigger & ~0x3;
367		ctrl = encode_ctrl_reg(info->step_ctrl);
368		if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE) {
369			i = 0;
370			ctrl_base = ARM_BASE_BCR + core_num_brps;
371			val_base = ARM_BASE_BVR + core_num_brps;
372		}
373	}
374
375	/* Setup the address register. */
376	write_wb_reg(val_base + i, addr);
377
378	/* Setup the control register. */
379	write_wb_reg(ctrl_base + i, ctrl);
380
381out:
382	return ret;
383}
384
385void arch_uninstall_hw_breakpoint(struct perf_event *bp)
386{
387	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
388	struct perf_event **slot, **slots;
389	int i, max_slots, base;
390
391	if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
392		/* Breakpoint */
393		base = ARM_BASE_BCR;
394		slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
395		max_slots = core_num_brps;
396	} else {
397		/* Watchpoint */
398		base = ARM_BASE_WCR;
399		slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
400		max_slots = core_num_wrps;
401	}
402
403	/* Remove the breakpoint. */
404	for (i = 0; i < max_slots; ++i) {
405		slot = &slots[i];
406
407		if (*slot == bp) {
408			*slot = NULL;
409			break;
410		}
411	}
412
413	if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n"))
414		return;
415
416	/* Ensure that we disable the mismatch breakpoint. */
417	if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE &&
418	    info->step_ctrl.enabled) {
419		i = 0;
420		base = ARM_BASE_BCR + core_num_brps;
421	}
422
423	/* Reset the control register. */
424	write_wb_reg(base + i, 0);
425}
426
427static int get_hbp_len(u8 hbp_len)
428{
429	unsigned int len_in_bytes = 0;
430
431	switch (hbp_len) {
432	case ARM_BREAKPOINT_LEN_1:
433		len_in_bytes = 1;
434		break;
435	case ARM_BREAKPOINT_LEN_2:
436		len_in_bytes = 2;
437		break;
438	case ARM_BREAKPOINT_LEN_4:
439		len_in_bytes = 4;
440		break;
441	case ARM_BREAKPOINT_LEN_8:
442		len_in_bytes = 8;
443		break;
444	}
445
446	return len_in_bytes;
447}
448
449/*
450 * Check whether bp virtual address is in kernel space.
451 */
452int arch_check_bp_in_kernelspace(struct perf_event *bp)
453{
454	unsigned int len;
455	unsigned long va;
456	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
457
458	va = info->address;
459	len = get_hbp_len(info->ctrl.len);
460
461	return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
462}
463
464/*
465 * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl.
466 * Hopefully this will disappear when ptrace can bypass the conversion
467 * to generic breakpoint descriptions.
468 */
469int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
470			   int *gen_len, int *gen_type)
471{
472	/* Type */
473	switch (ctrl.type) {
474	case ARM_BREAKPOINT_EXECUTE:
475		*gen_type = HW_BREAKPOINT_X;
476		break;
477	case ARM_BREAKPOINT_LOAD:
478		*gen_type = HW_BREAKPOINT_R;
479		break;
480	case ARM_BREAKPOINT_STORE:
481		*gen_type = HW_BREAKPOINT_W;
482		break;
483	case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE:
484		*gen_type = HW_BREAKPOINT_RW;
485		break;
486	default:
487		return -EINVAL;
488	}
489
490	/* Len */
491	switch (ctrl.len) {
492	case ARM_BREAKPOINT_LEN_1:
493		*gen_len = HW_BREAKPOINT_LEN_1;
494		break;
495	case ARM_BREAKPOINT_LEN_2:
496		*gen_len = HW_BREAKPOINT_LEN_2;
497		break;
498	case ARM_BREAKPOINT_LEN_4:
499		*gen_len = HW_BREAKPOINT_LEN_4;
500		break;
501	case ARM_BREAKPOINT_LEN_8:
502		*gen_len = HW_BREAKPOINT_LEN_8;
503		break;
504	default:
505		return -EINVAL;
506	}
507
508	return 0;
509}
510
511/*
512 * Construct an arch_hw_breakpoint from a perf_event.
513 */
514static int arch_build_bp_info(struct perf_event *bp)
515{
516	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
517
518	/* Type */
519	switch (bp->attr.bp_type) {
520	case HW_BREAKPOINT_X:
521		info->ctrl.type = ARM_BREAKPOINT_EXECUTE;
522		break;
523	case HW_BREAKPOINT_R:
524		info->ctrl.type = ARM_BREAKPOINT_LOAD;
525		break;
526	case HW_BREAKPOINT_W:
527		info->ctrl.type = ARM_BREAKPOINT_STORE;
528		break;
529	case HW_BREAKPOINT_RW:
530		info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
531		break;
532	default:
533		return -EINVAL;
534	}
535
536	/* Len */
537	switch (bp->attr.bp_len) {
538	case HW_BREAKPOINT_LEN_1:
539		info->ctrl.len = ARM_BREAKPOINT_LEN_1;
540		break;
541	case HW_BREAKPOINT_LEN_2:
542		info->ctrl.len = ARM_BREAKPOINT_LEN_2;
543		break;
544	case HW_BREAKPOINT_LEN_4:
545		info->ctrl.len = ARM_BREAKPOINT_LEN_4;
546		break;
547	case HW_BREAKPOINT_LEN_8:
548		info->ctrl.len = ARM_BREAKPOINT_LEN_8;
549		if ((info->ctrl.type != ARM_BREAKPOINT_EXECUTE)
550			&& max_watchpoint_len >= 8)
551			break;
552	default:
553		return -EINVAL;
554	}
555
556	/*
557	 * Breakpoints must be of length 2 (thumb) or 4 (ARM) bytes.
558	 * Watchpoints can be of length 1, 2, 4 or 8 bytes if supported
559	 * by the hardware and must be aligned to the appropriate number of
560	 * bytes.
561	 */
562	if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE &&
563	    info->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
564	    info->ctrl.len != ARM_BREAKPOINT_LEN_4)
565		return -EINVAL;
566
567	/* Address */
568	info->address = bp->attr.bp_addr;
569
570	/* Privilege */
571	info->ctrl.privilege = ARM_BREAKPOINT_USER;
572	if (arch_check_bp_in_kernelspace(bp))
573		info->ctrl.privilege |= ARM_BREAKPOINT_PRIV;
574
575	/* Enabled? */
576	info->ctrl.enabled = !bp->attr.disabled;
577
578	/* Mismatch */
579	info->ctrl.mismatch = 0;
580
581	return 0;
582}
583
584/*
585 * Validate the arch-specific HW Breakpoint register settings.
586 */
587int arch_validate_hwbkpt_settings(struct perf_event *bp)
588{
589	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
590	int ret = 0;
591	u32 offset, alignment_mask = 0x3;
592
593	/* Build the arch_hw_breakpoint. */
594	ret = arch_build_bp_info(bp);
595	if (ret)
596		goto out;
597
598	/* Check address alignment. */
599	if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
600		alignment_mask = 0x7;
601	offset = info->address & alignment_mask;
602	switch (offset) {
603	case 0:
604		/* Aligned */
605		break;
606	case 1:
607		/* Allow single byte watchpoint. */
608		if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
609			break;
610	case 2:
611		/* Allow halfword watchpoints and breakpoints. */
612		if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
613			break;
614	default:
615		ret = -EINVAL;
616		goto out;
617	}
618
619	info->address &= ~alignment_mask;
620	info->ctrl.len <<= offset;
621
622	/*
623	 * Currently we rely on an overflow handler to take
624	 * care of single-stepping the breakpoint when it fires.
625	 * In the case of userspace breakpoints on a core with V7 debug,
626	 * we can use the mismatch feature as a poor-man's hardware
627	 * single-step, but this only works for per-task breakpoints.
628	 */
629	if (WARN_ONCE(!bp->overflow_handler &&
630		(arch_check_bp_in_kernelspace(bp) || !core_has_mismatch_brps()
631		 || !bp->hw.bp_target),
632			"overflow handler required but none found\n")) {
633		ret = -EINVAL;
634	}
635out:
636	return ret;
637}
638
639/*
640 * Enable/disable single-stepping over the breakpoint bp at address addr.
641 */
642static void enable_single_step(struct perf_event *bp, u32 addr)
643{
644	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
645
646	arch_uninstall_hw_breakpoint(bp);
647	info->step_ctrl.mismatch  = 1;
648	info->step_ctrl.len	  = ARM_BREAKPOINT_LEN_4;
649	info->step_ctrl.type	  = ARM_BREAKPOINT_EXECUTE;
650	info->step_ctrl.privilege = info->ctrl.privilege;
651	info->step_ctrl.enabled	  = 1;
652	info->trigger		  = addr;
653	arch_install_hw_breakpoint(bp);
654}
655
656static void disable_single_step(struct perf_event *bp)
657{
658	arch_uninstall_hw_breakpoint(bp);
659	counter_arch_bp(bp)->step_ctrl.enabled = 0;
660	arch_install_hw_breakpoint(bp);
661}
662
663static void watchpoint_handler(unsigned long addr, unsigned int fsr,
664			       struct pt_regs *regs)
665{
666	int i, access;
667	u32 val, ctrl_reg, alignment_mask;
668	struct perf_event *wp, **slots;
669	struct arch_hw_breakpoint *info;
670	struct arch_hw_breakpoint_ctrl ctrl;
671
672	slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
673
674	for (i = 0; i < core_num_wrps; ++i) {
675		rcu_read_lock();
676
677		wp = slots[i];
678
679		if (wp == NULL)
680			goto unlock;
681
682		info = counter_arch_bp(wp);
683		/*
684		 * The DFAR is an unknown value on debug architectures prior
685		 * to 7.1. Since we only allow a single watchpoint on these
686		 * older CPUs, we can set the trigger to the lowest possible
687		 * faulting address.
688		 */
689		if (debug_arch < ARM_DEBUG_ARCH_V7_1) {
690			BUG_ON(i > 0);
691			info->trigger = wp->attr.bp_addr;
692		} else {
693			if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
694				alignment_mask = 0x7;
695			else
696				alignment_mask = 0x3;
697
698			/* Check if the watchpoint value matches. */
699			val = read_wb_reg(ARM_BASE_WVR + i);
700			if (val != (addr & ~alignment_mask))
701				goto unlock;
702
703			/* Possible match, check the byte address select. */
704			ctrl_reg = read_wb_reg(ARM_BASE_WCR + i);
705			decode_ctrl_reg(ctrl_reg, &ctrl);
706			if (!((1 << (addr & alignment_mask)) & ctrl.len))
707				goto unlock;
708
709			/* Check that the access type matches. */
710			access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W :
711				 HW_BREAKPOINT_R;
712			if (!(access & hw_breakpoint_type(wp)))
713				goto unlock;
714
715			/* We have a winner. */
716			info->trigger = addr;
717		}
718
719		pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
720		perf_bp_event(wp, regs);
721
722		/*
723		 * If no overflow handler is present, insert a temporary
724		 * mismatch breakpoint so we can single-step over the
725		 * watchpoint trigger.
726		 */
727		if (!wp->overflow_handler)
728			enable_single_step(wp, instruction_pointer(regs));
729
730unlock:
731		rcu_read_unlock();
732	}
733}
734
735static void watchpoint_single_step_handler(unsigned long pc)
736{
737	int i;
738	struct perf_event *wp, **slots;
739	struct arch_hw_breakpoint *info;
740
741	slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
742
743	for (i = 0; i < core_num_wrps; ++i) {
744		rcu_read_lock();
745
746		wp = slots[i];
747
748		if (wp == NULL)
749			goto unlock;
750
751		info = counter_arch_bp(wp);
752		if (!info->step_ctrl.enabled)
753			goto unlock;
754
755		/*
756		 * Restore the original watchpoint if we've completed the
757		 * single-step.
758		 */
759		if (info->trigger != pc)
760			disable_single_step(wp);
761
762unlock:
763		rcu_read_unlock();
764	}
765}
766
767static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
768{
769	int i;
770	u32 ctrl_reg, val, addr;
771	struct perf_event *bp, **slots;
772	struct arch_hw_breakpoint *info;
773	struct arch_hw_breakpoint_ctrl ctrl;
774
775	slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
776
777	/* The exception entry code places the amended lr in the PC. */
778	addr = regs->ARM_pc;
779
780	/* Check the currently installed breakpoints first. */
781	for (i = 0; i < core_num_brps; ++i) {
782		rcu_read_lock();
783
784		bp = slots[i];
785
786		if (bp == NULL)
787			goto unlock;
788
789		info = counter_arch_bp(bp);
790
791		/* Check if the breakpoint value matches. */
792		val = read_wb_reg(ARM_BASE_BVR + i);
793		if (val != (addr & ~0x3))
794			goto mismatch;
795
796		/* Possible match, check the byte address select to confirm. */
797		ctrl_reg = read_wb_reg(ARM_BASE_BCR + i);
798		decode_ctrl_reg(ctrl_reg, &ctrl);
799		if ((1 << (addr & 0x3)) & ctrl.len) {
800			info->trigger = addr;
801			pr_debug("breakpoint fired: address = 0x%x\n", addr);
802			perf_bp_event(bp, regs);
803			if (!bp->overflow_handler)
804				enable_single_step(bp, addr);
805			goto unlock;
806		}
807
808mismatch:
809		/* If we're stepping a breakpoint, it can now be restored. */
810		if (info->step_ctrl.enabled)
811			disable_single_step(bp);
812unlock:
813		rcu_read_unlock();
814	}
815
816	/* Handle any pending watchpoint single-step breakpoints. */
817	watchpoint_single_step_handler(addr);
818}
819
820/*
821 * Called from either the Data Abort Handler [watchpoint] or the
822 * Prefetch Abort Handler [breakpoint] with interrupts disabled.
823 */
824static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
825				 struct pt_regs *regs)
826{
827	int ret = 0;
828	u32 dscr;
829
830	preempt_disable();
831
832	if (interrupts_enabled(regs))
833		local_irq_enable();
834
835	/* We only handle watchpoints and hardware breakpoints. */
836	ARM_DBG_READ(c1, 0, dscr);
837
838	/* Perform perf callbacks. */
839	switch (ARM_DSCR_MOE(dscr)) {
840	case ARM_ENTRY_BREAKPOINT:
841		breakpoint_handler(addr, regs);
842		break;
843	case ARM_ENTRY_ASYNC_WATCHPOINT:
844		WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
845	case ARM_ENTRY_SYNC_WATCHPOINT:
846		watchpoint_handler(addr, fsr, regs);
847		break;
848	default:
849		ret = 1; /* Unhandled fault. */
850	}
851
852	preempt_enable();
853
854	return ret;
855}
856
857/*
858 * One-time initialisation.
859 */
860static cpumask_t debug_err_mask;
861
862static int debug_reg_trap(struct pt_regs *regs, unsigned int instr)
863{
864	int cpu = smp_processor_id();
865
866	pr_warning("Debug register access (0x%x) caused undefined instruction on CPU %d\n",
867		   instr, cpu);
868
869	/* Set the error flag for this CPU and skip the faulting instruction. */
870	cpumask_set_cpu(cpu, &debug_err_mask);
871	instruction_pointer(regs) += 4;
872	return 0;
873}
874
875static struct undef_hook debug_reg_hook = {
876	.instr_mask	= 0x0fe80f10,
877	.instr_val	= 0x0e000e10,
878	.fn		= debug_reg_trap,
879};
880
881static void reset_ctrl_regs(void *unused)
882{
883	int i, raw_num_brps, err = 0, cpu = smp_processor_id();
884	u32 dbg_power;
885
886	/*
887	 * v7 debug contains save and restore registers so that debug state
888	 * can be maintained across low-power modes without leaving the debug
889	 * logic powered up. It is IMPLEMENTATION DEFINED whether we can access
890	 * the debug registers out of reset, so we must unlock the OS Lock
891	 * Access Register to avoid taking undefined instruction exceptions
892	 * later on.
893	 */
894	switch (debug_arch) {
895	case ARM_DEBUG_ARCH_V7_ECP14:
896		/*
897		 * Ensure sticky power-down is clear (i.e. debug logic is
898		 * powered up).
899		 */
900		asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power));
901		if ((dbg_power & 0x1) == 0)
902			err = -EPERM;
903		break;
904	case ARM_DEBUG_ARCH_V7_1:
905		/*
906		 * Ensure the OS double lock is clear.
907		 */
908		asm volatile("mrc p14, 0, %0, c1, c3, 4" : "=r" (dbg_power));
909		if ((dbg_power & 0x1) == 1)
910			err = -EPERM;
911		break;
912	}
913
914	if (err) {
915		pr_warning("CPU %d debug is powered down!\n", cpu);
916		cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
917		return;
918	}
919
920	/*
921	 * Unconditionally clear the lock by writing a value
922	 * other than 0xC5ACCE55 to the access register.
923	 */
924	asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0));
925	isb();
926
927	/*
928	 * Clear any configured vector-catch events before
929	 * enabling monitor mode.
930	 */
931	asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0));
932	isb();
933
934	if (enable_monitor_mode())
935		return;
936
937	/* We must also reset any reserved registers. */
938	raw_num_brps = get_num_brp_resources();
939	for (i = 0; i < raw_num_brps; ++i) {
940		write_wb_reg(ARM_BASE_BCR + i, 0UL);
941		write_wb_reg(ARM_BASE_BVR + i, 0UL);
942	}
943
944	for (i = 0; i < core_num_wrps; ++i) {
945		write_wb_reg(ARM_BASE_WCR + i, 0UL);
946		write_wb_reg(ARM_BASE_WVR + i, 0UL);
947	}
948}
949
950static int __cpuinit dbg_reset_notify(struct notifier_block *self,
951				      unsigned long action, void *cpu)
952{
953	if (action == CPU_ONLINE)
954		smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1);
955
956	return NOTIFY_OK;
957}
958
959static struct notifier_block __cpuinitdata dbg_reset_nb = {
960	.notifier_call = dbg_reset_notify,
961};
962
963static int __init arch_hw_breakpoint_init(void)
964{
965	u32 dscr;
966
967	debug_arch = get_debug_arch();
968
969	if (!debug_arch_supported()) {
970		pr_info("debug architecture 0x%x unsupported.\n", debug_arch);
971		return 0;
972	}
973
974	/* Determine how many BRPs/WRPs are available. */
975	core_num_brps = get_num_brps();
976	core_num_wrps = get_num_wrps();
977
978	/*
979	 * We need to tread carefully here because DBGSWENABLE may be
980	 * driven low on this core and there isn't an architected way to
981	 * determine that.
982	 */
983	register_undef_hook(&debug_reg_hook);
984
985	/*
986	 * Reset the breakpoint resources. We assume that a halting
987	 * debugger will leave the world in a nice state for us.
988	 */
989	on_each_cpu(reset_ctrl_regs, NULL, 1);
990	unregister_undef_hook(&debug_reg_hook);
991	if (!cpumask_empty(&debug_err_mask)) {
992		core_num_brps = 0;
993		core_num_wrps = 0;
994		return 0;
995	}
996
997	pr_info("found %d " "%s" "breakpoint and %d watchpoint registers.\n",
998		core_num_brps, core_has_mismatch_brps() ? "(+1 reserved) " :
999		"", core_num_wrps);
1000
1001	ARM_DBG_READ(c1, 0, dscr);
1002	if (dscr & ARM_DSCR_HDBGEN) {
1003		max_watchpoint_len = 4;
1004		pr_warning("halting debug mode enabled. Assuming maximum watchpoint size of %u bytes.\n",
1005			   max_watchpoint_len);
1006	} else {
1007		/* Work out the maximum supported watchpoint length. */
1008		max_watchpoint_len = get_max_wp_len();
1009		pr_info("maximum watchpoint size is %u bytes.\n",
1010				max_watchpoint_len);
1011	}
1012
1013	/* Register debug fault handler. */
1014	hook_fault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT,
1015			"watchpoint debug exception");
1016	hook_ifault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT,
1017			"breakpoint debug exception");
1018
1019	/* Register hotplug notifier. */
1020	register_cpu_notifier(&dbg_reset_nb);
1021	return 0;
1022}
1023arch_initcall(arch_hw_breakpoint_init);
1024
1025void hw_breakpoint_pmu_read(struct perf_event *bp)
1026{
1027}
1028
1029/*
1030 * Dummy function to register with die_notifier.
1031 */
1032int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
1033					unsigned long val, void *data)
1034{
1035	return NOTIFY_DONE;
1036}
1037