hw_breakpoint.c revision 7f4050a07be8ce5fad069722326ccd550577a93a
1/* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License version 2 as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 14 * 15 * Copyright (C) 2009, 2010 ARM Limited 16 * 17 * Author: Will Deacon <will.deacon@arm.com> 18 */ 19 20/* 21 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, 22 * using the CPU's debug registers. 23 */ 24#define pr_fmt(fmt) "hw-breakpoint: " fmt 25 26#include <linux/errno.h> 27#include <linux/hardirq.h> 28#include <linux/perf_event.h> 29#include <linux/hw_breakpoint.h> 30#include <linux/smp.h> 31 32#include <asm/cacheflush.h> 33#include <asm/cputype.h> 34#include <asm/current.h> 35#include <asm/hw_breakpoint.h> 36#include <asm/kdebug.h> 37#include <asm/traps.h> 38 39/* Breakpoint currently in use for each BRP. */ 40static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]); 41 42/* Watchpoint currently in use for each WRP. */ 43static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]); 44 45/* Number of BRP/WRP registers on this CPU. */ 46static int core_num_brps; 47static int core_num_wrps; 48 49/* Debug architecture version. */ 50static u8 debug_arch; 51 52/* Maximum supported watchpoint length. */ 53static u8 max_watchpoint_len; 54 55#define READ_WB_REG_CASE(OP2, M, VAL) \ 56 case ((OP2 << 4) + M): \ 57 ARM_DBG_READ(c ## M, OP2, VAL); \ 58 break 59 60#define WRITE_WB_REG_CASE(OP2, M, VAL) \ 61 case ((OP2 << 4) + M): \ 62 ARM_DBG_WRITE(c ## M, OP2, VAL);\ 63 break 64 65#define GEN_READ_WB_REG_CASES(OP2, VAL) \ 66 READ_WB_REG_CASE(OP2, 0, VAL); \ 67 READ_WB_REG_CASE(OP2, 1, VAL); \ 68 READ_WB_REG_CASE(OP2, 2, VAL); \ 69 READ_WB_REG_CASE(OP2, 3, VAL); \ 70 READ_WB_REG_CASE(OP2, 4, VAL); \ 71 READ_WB_REG_CASE(OP2, 5, VAL); \ 72 READ_WB_REG_CASE(OP2, 6, VAL); \ 73 READ_WB_REG_CASE(OP2, 7, VAL); \ 74 READ_WB_REG_CASE(OP2, 8, VAL); \ 75 READ_WB_REG_CASE(OP2, 9, VAL); \ 76 READ_WB_REG_CASE(OP2, 10, VAL); \ 77 READ_WB_REG_CASE(OP2, 11, VAL); \ 78 READ_WB_REG_CASE(OP2, 12, VAL); \ 79 READ_WB_REG_CASE(OP2, 13, VAL); \ 80 READ_WB_REG_CASE(OP2, 14, VAL); \ 81 READ_WB_REG_CASE(OP2, 15, VAL) 82 83#define GEN_WRITE_WB_REG_CASES(OP2, VAL) \ 84 WRITE_WB_REG_CASE(OP2, 0, VAL); \ 85 WRITE_WB_REG_CASE(OP2, 1, VAL); \ 86 WRITE_WB_REG_CASE(OP2, 2, VAL); \ 87 WRITE_WB_REG_CASE(OP2, 3, VAL); \ 88 WRITE_WB_REG_CASE(OP2, 4, VAL); \ 89 WRITE_WB_REG_CASE(OP2, 5, VAL); \ 90 WRITE_WB_REG_CASE(OP2, 6, VAL); \ 91 WRITE_WB_REG_CASE(OP2, 7, VAL); \ 92 WRITE_WB_REG_CASE(OP2, 8, VAL); \ 93 WRITE_WB_REG_CASE(OP2, 9, VAL); \ 94 WRITE_WB_REG_CASE(OP2, 10, VAL); \ 95 WRITE_WB_REG_CASE(OP2, 11, VAL); \ 96 WRITE_WB_REG_CASE(OP2, 12, VAL); \ 97 WRITE_WB_REG_CASE(OP2, 13, VAL); \ 98 WRITE_WB_REG_CASE(OP2, 14, VAL); \ 99 WRITE_WB_REG_CASE(OP2, 15, VAL) 100 101static u32 read_wb_reg(int n) 102{ 103 u32 val = 0; 104 105 switch (n) { 106 GEN_READ_WB_REG_CASES(ARM_OP2_BVR, val); 107 GEN_READ_WB_REG_CASES(ARM_OP2_BCR, val); 108 GEN_READ_WB_REG_CASES(ARM_OP2_WVR, val); 109 GEN_READ_WB_REG_CASES(ARM_OP2_WCR, val); 110 default: 111 pr_warning("attempt to read from unknown breakpoint " 112 "register %d\n", n); 113 } 114 115 return val; 116} 117 118static void write_wb_reg(int n, u32 val) 119{ 120 switch (n) { 121 GEN_WRITE_WB_REG_CASES(ARM_OP2_BVR, val); 122 GEN_WRITE_WB_REG_CASES(ARM_OP2_BCR, val); 123 GEN_WRITE_WB_REG_CASES(ARM_OP2_WVR, val); 124 GEN_WRITE_WB_REG_CASES(ARM_OP2_WCR, val); 125 default: 126 pr_warning("attempt to write to unknown breakpoint " 127 "register %d\n", n); 128 } 129 isb(); 130} 131 132/* Determine debug architecture. */ 133static u8 get_debug_arch(void) 134{ 135 u32 didr; 136 137 /* Do we implement the extended CPUID interface? */ 138 if (((read_cpuid_id() >> 16) & 0xf) != 0xf) { 139 pr_warning("CPUID feature registers not supported. " 140 "Assuming v6 debug is present.\n"); 141 return ARM_DEBUG_ARCH_V6; 142 } 143 144 ARM_DBG_READ(c0, 0, didr); 145 return (didr >> 16) & 0xf; 146} 147 148u8 arch_get_debug_arch(void) 149{ 150 return debug_arch; 151} 152 153static int debug_arch_supported(void) 154{ 155 u8 arch = get_debug_arch(); 156 157 /* We don't support the memory-mapped interface. */ 158 return (arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14) || 159 arch >= ARM_DEBUG_ARCH_V7_1; 160} 161 162/* Can we determine the watchpoint access type from the fsr? */ 163static int debug_exception_updates_fsr(void) 164{ 165 return 0; 166} 167 168/* Determine number of WRP registers available. */ 169static int get_num_wrp_resources(void) 170{ 171 u32 didr; 172 ARM_DBG_READ(c0, 0, didr); 173 return ((didr >> 28) & 0xf) + 1; 174} 175 176/* Determine number of BRP registers available. */ 177static int get_num_brp_resources(void) 178{ 179 u32 didr; 180 ARM_DBG_READ(c0, 0, didr); 181 return ((didr >> 24) & 0xf) + 1; 182} 183 184/* Does this core support mismatch breakpoints? */ 185static int core_has_mismatch_brps(void) 186{ 187 return (get_debug_arch() >= ARM_DEBUG_ARCH_V7_ECP14 && 188 get_num_brp_resources() > 1); 189} 190 191/* Determine number of usable WRPs available. */ 192static int get_num_wrps(void) 193{ 194 /* 195 * On debug architectures prior to 7.1, when a watchpoint fires, the 196 * only way to work out which watchpoint it was is by disassembling 197 * the faulting instruction and working out the address of the memory 198 * access. 199 * 200 * Furthermore, we can only do this if the watchpoint was precise 201 * since imprecise watchpoints prevent us from calculating register 202 * based addresses. 203 * 204 * Providing we have more than 1 breakpoint register, we only report 205 * a single watchpoint register for the time being. This way, we always 206 * know which watchpoint fired. In the future we can either add a 207 * disassembler and address generation emulator, or we can insert a 208 * check to see if the DFAR is set on watchpoint exception entry 209 * [the ARM ARM states that the DFAR is UNKNOWN, but experience shows 210 * that it is set on some implementations]. 211 */ 212 if (get_debug_arch() < ARM_DEBUG_ARCH_V7_1) 213 return 1; 214 215 return get_num_wrp_resources(); 216} 217 218/* Determine number of usable BRPs available. */ 219static int get_num_brps(void) 220{ 221 int brps = get_num_brp_resources(); 222 return core_has_mismatch_brps() ? brps - 1 : brps; 223} 224 225/* 226 * In order to access the breakpoint/watchpoint control registers, 227 * we must be running in debug monitor mode. Unfortunately, we can 228 * be put into halting debug mode at any time by an external debugger 229 * but there is nothing we can do to prevent that. 230 */ 231static int enable_monitor_mode(void) 232{ 233 u32 dscr; 234 ARM_DBG_READ(c1, 0, dscr); 235 236 /* If monitor mode is already enabled, just return. */ 237 if (dscr & ARM_DSCR_MDBGEN) 238 goto out; 239 240 /* Write to the corresponding DSCR. */ 241 switch (get_debug_arch()) { 242 case ARM_DEBUG_ARCH_V6: 243 case ARM_DEBUG_ARCH_V6_1: 244 ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN)); 245 break; 246 case ARM_DEBUG_ARCH_V7_ECP14: 247 case ARM_DEBUG_ARCH_V7_1: 248 ARM_DBG_WRITE(c2, 2, (dscr | ARM_DSCR_MDBGEN)); 249 isb(); 250 break; 251 default: 252 return -ENODEV; 253 } 254 255 /* Check that the write made it through. */ 256 ARM_DBG_READ(c1, 0, dscr); 257 if (WARN_ONCE(!(dscr & ARM_DSCR_MDBGEN), 258 "Failed to enable monitor mode on CPU %d.\n", 259 smp_processor_id())) 260 return -EPERM; 261 262out: 263 return 0; 264} 265 266int hw_breakpoint_slots(int type) 267{ 268 if (!debug_arch_supported()) 269 return 0; 270 271 /* 272 * We can be called early, so don't rely on 273 * our static variables being initialised. 274 */ 275 switch (type) { 276 case TYPE_INST: 277 return get_num_brps(); 278 case TYPE_DATA: 279 return get_num_wrps(); 280 default: 281 pr_warning("unknown slot type: %d\n", type); 282 return 0; 283 } 284} 285 286/* 287 * Check if 8-bit byte-address select is available. 288 * This clobbers WRP 0. 289 */ 290static u8 get_max_wp_len(void) 291{ 292 u32 ctrl_reg; 293 struct arch_hw_breakpoint_ctrl ctrl; 294 u8 size = 4; 295 296 if (debug_arch < ARM_DEBUG_ARCH_V7_ECP14) 297 goto out; 298 299 memset(&ctrl, 0, sizeof(ctrl)); 300 ctrl.len = ARM_BREAKPOINT_LEN_8; 301 ctrl_reg = encode_ctrl_reg(ctrl); 302 303 write_wb_reg(ARM_BASE_WVR, 0); 304 write_wb_reg(ARM_BASE_WCR, ctrl_reg); 305 if ((read_wb_reg(ARM_BASE_WCR) & ctrl_reg) == ctrl_reg) 306 size = 8; 307 308out: 309 return size; 310} 311 312u8 arch_get_max_wp_len(void) 313{ 314 return max_watchpoint_len; 315} 316 317/* 318 * Install a perf counter breakpoint. 319 */ 320int arch_install_hw_breakpoint(struct perf_event *bp) 321{ 322 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 323 struct perf_event **slot, **slots; 324 int i, max_slots, ctrl_base, val_base, ret = 0; 325 u32 addr, ctrl; 326 327 /* Ensure that we are in monitor mode and halting mode is disabled. */ 328 ret = enable_monitor_mode(); 329 if (ret) 330 goto out; 331 332 addr = info->address; 333 ctrl = encode_ctrl_reg(info->ctrl) | 0x1; 334 335 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { 336 /* Breakpoint */ 337 ctrl_base = ARM_BASE_BCR; 338 val_base = ARM_BASE_BVR; 339 slots = (struct perf_event **)__get_cpu_var(bp_on_reg); 340 max_slots = core_num_brps; 341 } else { 342 /* Watchpoint */ 343 ctrl_base = ARM_BASE_WCR; 344 val_base = ARM_BASE_WVR; 345 slots = (struct perf_event **)__get_cpu_var(wp_on_reg); 346 max_slots = core_num_wrps; 347 } 348 349 for (i = 0; i < max_slots; ++i) { 350 slot = &slots[i]; 351 352 if (!*slot) { 353 *slot = bp; 354 break; 355 } 356 } 357 358 if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n")) { 359 ret = -EBUSY; 360 goto out; 361 } 362 363 /* Override the breakpoint data with the step data. */ 364 if (info->step_ctrl.enabled) { 365 addr = info->trigger & ~0x3; 366 ctrl = encode_ctrl_reg(info->step_ctrl); 367 if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE) { 368 i = 0; 369 ctrl_base = ARM_BASE_BCR + core_num_brps; 370 val_base = ARM_BASE_BVR + core_num_brps; 371 } 372 } 373 374 /* Setup the address register. */ 375 write_wb_reg(val_base + i, addr); 376 377 /* Setup the control register. */ 378 write_wb_reg(ctrl_base + i, ctrl); 379 380out: 381 return ret; 382} 383 384void arch_uninstall_hw_breakpoint(struct perf_event *bp) 385{ 386 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 387 struct perf_event **slot, **slots; 388 int i, max_slots, base; 389 390 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { 391 /* Breakpoint */ 392 base = ARM_BASE_BCR; 393 slots = (struct perf_event **)__get_cpu_var(bp_on_reg); 394 max_slots = core_num_brps; 395 } else { 396 /* Watchpoint */ 397 base = ARM_BASE_WCR; 398 slots = (struct perf_event **)__get_cpu_var(wp_on_reg); 399 max_slots = core_num_wrps; 400 } 401 402 /* Remove the breakpoint. */ 403 for (i = 0; i < max_slots; ++i) { 404 slot = &slots[i]; 405 406 if (*slot == bp) { 407 *slot = NULL; 408 break; 409 } 410 } 411 412 if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n")) 413 return; 414 415 /* Ensure that we disable the mismatch breakpoint. */ 416 if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE && 417 info->step_ctrl.enabled) { 418 i = 0; 419 base = ARM_BASE_BCR + core_num_brps; 420 } 421 422 /* Reset the control register. */ 423 write_wb_reg(base + i, 0); 424} 425 426static int get_hbp_len(u8 hbp_len) 427{ 428 unsigned int len_in_bytes = 0; 429 430 switch (hbp_len) { 431 case ARM_BREAKPOINT_LEN_1: 432 len_in_bytes = 1; 433 break; 434 case ARM_BREAKPOINT_LEN_2: 435 len_in_bytes = 2; 436 break; 437 case ARM_BREAKPOINT_LEN_4: 438 len_in_bytes = 4; 439 break; 440 case ARM_BREAKPOINT_LEN_8: 441 len_in_bytes = 8; 442 break; 443 } 444 445 return len_in_bytes; 446} 447 448/* 449 * Check whether bp virtual address is in kernel space. 450 */ 451int arch_check_bp_in_kernelspace(struct perf_event *bp) 452{ 453 unsigned int len; 454 unsigned long va; 455 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 456 457 va = info->address; 458 len = get_hbp_len(info->ctrl.len); 459 460 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); 461} 462 463/* 464 * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl. 465 * Hopefully this will disappear when ptrace can bypass the conversion 466 * to generic breakpoint descriptions. 467 */ 468int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, 469 int *gen_len, int *gen_type) 470{ 471 /* Type */ 472 switch (ctrl.type) { 473 case ARM_BREAKPOINT_EXECUTE: 474 *gen_type = HW_BREAKPOINT_X; 475 break; 476 case ARM_BREAKPOINT_LOAD: 477 *gen_type = HW_BREAKPOINT_R; 478 break; 479 case ARM_BREAKPOINT_STORE: 480 *gen_type = HW_BREAKPOINT_W; 481 break; 482 case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE: 483 *gen_type = HW_BREAKPOINT_RW; 484 break; 485 default: 486 return -EINVAL; 487 } 488 489 /* Len */ 490 switch (ctrl.len) { 491 case ARM_BREAKPOINT_LEN_1: 492 *gen_len = HW_BREAKPOINT_LEN_1; 493 break; 494 case ARM_BREAKPOINT_LEN_2: 495 *gen_len = HW_BREAKPOINT_LEN_2; 496 break; 497 case ARM_BREAKPOINT_LEN_4: 498 *gen_len = HW_BREAKPOINT_LEN_4; 499 break; 500 case ARM_BREAKPOINT_LEN_8: 501 *gen_len = HW_BREAKPOINT_LEN_8; 502 break; 503 default: 504 return -EINVAL; 505 } 506 507 return 0; 508} 509 510/* 511 * Construct an arch_hw_breakpoint from a perf_event. 512 */ 513static int arch_build_bp_info(struct perf_event *bp) 514{ 515 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 516 517 /* Type */ 518 switch (bp->attr.bp_type) { 519 case HW_BREAKPOINT_X: 520 info->ctrl.type = ARM_BREAKPOINT_EXECUTE; 521 break; 522 case HW_BREAKPOINT_R: 523 info->ctrl.type = ARM_BREAKPOINT_LOAD; 524 break; 525 case HW_BREAKPOINT_W: 526 info->ctrl.type = ARM_BREAKPOINT_STORE; 527 break; 528 case HW_BREAKPOINT_RW: 529 info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE; 530 break; 531 default: 532 return -EINVAL; 533 } 534 535 /* Len */ 536 switch (bp->attr.bp_len) { 537 case HW_BREAKPOINT_LEN_1: 538 info->ctrl.len = ARM_BREAKPOINT_LEN_1; 539 break; 540 case HW_BREAKPOINT_LEN_2: 541 info->ctrl.len = ARM_BREAKPOINT_LEN_2; 542 break; 543 case HW_BREAKPOINT_LEN_4: 544 info->ctrl.len = ARM_BREAKPOINT_LEN_4; 545 break; 546 case HW_BREAKPOINT_LEN_8: 547 info->ctrl.len = ARM_BREAKPOINT_LEN_8; 548 if ((info->ctrl.type != ARM_BREAKPOINT_EXECUTE) 549 && max_watchpoint_len >= 8) 550 break; 551 default: 552 return -EINVAL; 553 } 554 555 /* 556 * Breakpoints must be of length 2 (thumb) or 4 (ARM) bytes. 557 * Watchpoints can be of length 1, 2, 4 or 8 bytes if supported 558 * by the hardware and must be aligned to the appropriate number of 559 * bytes. 560 */ 561 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE && 562 info->ctrl.len != ARM_BREAKPOINT_LEN_2 && 563 info->ctrl.len != ARM_BREAKPOINT_LEN_4) 564 return -EINVAL; 565 566 /* Address */ 567 info->address = bp->attr.bp_addr; 568 569 /* Privilege */ 570 info->ctrl.privilege = ARM_BREAKPOINT_USER; 571 if (arch_check_bp_in_kernelspace(bp)) 572 info->ctrl.privilege |= ARM_BREAKPOINT_PRIV; 573 574 /* Enabled? */ 575 info->ctrl.enabled = !bp->attr.disabled; 576 577 /* Mismatch */ 578 info->ctrl.mismatch = 0; 579 580 return 0; 581} 582 583/* 584 * Validate the arch-specific HW Breakpoint register settings. 585 */ 586int arch_validate_hwbkpt_settings(struct perf_event *bp) 587{ 588 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 589 int ret = 0; 590 u32 offset, alignment_mask = 0x3; 591 592 /* Build the arch_hw_breakpoint. */ 593 ret = arch_build_bp_info(bp); 594 if (ret) 595 goto out; 596 597 /* Check address alignment. */ 598 if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) 599 alignment_mask = 0x7; 600 offset = info->address & alignment_mask; 601 switch (offset) { 602 case 0: 603 /* Aligned */ 604 break; 605 case 1: 606 case 2: 607 /* Allow halfword watchpoints and breakpoints. */ 608 if (info->ctrl.len == ARM_BREAKPOINT_LEN_2) 609 break; 610 case 3: 611 /* Allow single byte watchpoint. */ 612 if (info->ctrl.len == ARM_BREAKPOINT_LEN_1) 613 break; 614 default: 615 ret = -EINVAL; 616 goto out; 617 } 618 619 info->address &= ~alignment_mask; 620 info->ctrl.len <<= offset; 621 622 if (!bp->overflow_handler) { 623 /* 624 * Mismatch breakpoints are required for single-stepping 625 * breakpoints. 626 */ 627 if (!core_has_mismatch_brps()) 628 return -EINVAL; 629 630 /* We don't allow mismatch breakpoints in kernel space. */ 631 if (arch_check_bp_in_kernelspace(bp)) 632 return -EPERM; 633 634 /* 635 * Per-cpu breakpoints are not supported by our stepping 636 * mechanism. 637 */ 638 if (!bp->hw.bp_target) 639 return -EINVAL; 640 641 /* 642 * We only support specific access types if the fsr 643 * reports them. 644 */ 645 if (!debug_exception_updates_fsr() && 646 (info->ctrl.type == ARM_BREAKPOINT_LOAD || 647 info->ctrl.type == ARM_BREAKPOINT_STORE)) 648 return -EINVAL; 649 } 650 651out: 652 return ret; 653} 654 655/* 656 * Enable/disable single-stepping over the breakpoint bp at address addr. 657 */ 658static void enable_single_step(struct perf_event *bp, u32 addr) 659{ 660 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 661 662 arch_uninstall_hw_breakpoint(bp); 663 info->step_ctrl.mismatch = 1; 664 info->step_ctrl.len = ARM_BREAKPOINT_LEN_4; 665 info->step_ctrl.type = ARM_BREAKPOINT_EXECUTE; 666 info->step_ctrl.privilege = info->ctrl.privilege; 667 info->step_ctrl.enabled = 1; 668 info->trigger = addr; 669 arch_install_hw_breakpoint(bp); 670} 671 672static void disable_single_step(struct perf_event *bp) 673{ 674 arch_uninstall_hw_breakpoint(bp); 675 counter_arch_bp(bp)->step_ctrl.enabled = 0; 676 arch_install_hw_breakpoint(bp); 677} 678 679static void watchpoint_handler(unsigned long addr, unsigned int fsr, 680 struct pt_regs *regs) 681{ 682 int i, access; 683 u32 val, ctrl_reg, alignment_mask; 684 struct perf_event *wp, **slots; 685 struct arch_hw_breakpoint *info; 686 struct arch_hw_breakpoint_ctrl ctrl; 687 688 slots = (struct perf_event **)__get_cpu_var(wp_on_reg); 689 690 for (i = 0; i < core_num_wrps; ++i) { 691 rcu_read_lock(); 692 693 wp = slots[i]; 694 695 if (wp == NULL) 696 goto unlock; 697 698 info = counter_arch_bp(wp); 699 /* 700 * The DFAR is an unknown value on debug architectures prior 701 * to 7.1. Since we only allow a single watchpoint on these 702 * older CPUs, we can set the trigger to the lowest possible 703 * faulting address. 704 */ 705 if (debug_arch < ARM_DEBUG_ARCH_V7_1) { 706 BUG_ON(i > 0); 707 info->trigger = wp->attr.bp_addr; 708 } else { 709 if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) 710 alignment_mask = 0x7; 711 else 712 alignment_mask = 0x3; 713 714 /* Check if the watchpoint value matches. */ 715 val = read_wb_reg(ARM_BASE_WVR + i); 716 if (val != (addr & ~alignment_mask)) 717 goto unlock; 718 719 /* Possible match, check the byte address select. */ 720 ctrl_reg = read_wb_reg(ARM_BASE_WCR + i); 721 decode_ctrl_reg(ctrl_reg, &ctrl); 722 if (!((1 << (addr & alignment_mask)) & ctrl.len)) 723 goto unlock; 724 725 /* Check that the access type matches. */ 726 if (debug_exception_updates_fsr()) { 727 access = (fsr & ARM_FSR_ACCESS_MASK) ? 728 HW_BREAKPOINT_W : HW_BREAKPOINT_R; 729 if (!(access & hw_breakpoint_type(wp))) 730 goto unlock; 731 } 732 733 /* We have a winner. */ 734 info->trigger = addr; 735 } 736 737 pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); 738 perf_bp_event(wp, regs); 739 740 /* 741 * If no overflow handler is present, insert a temporary 742 * mismatch breakpoint so we can single-step over the 743 * watchpoint trigger. 744 */ 745 if (!wp->overflow_handler) 746 enable_single_step(wp, instruction_pointer(regs)); 747 748unlock: 749 rcu_read_unlock(); 750 } 751} 752 753static void watchpoint_single_step_handler(unsigned long pc) 754{ 755 int i; 756 struct perf_event *wp, **slots; 757 struct arch_hw_breakpoint *info; 758 759 slots = (struct perf_event **)__get_cpu_var(wp_on_reg); 760 761 for (i = 0; i < core_num_wrps; ++i) { 762 rcu_read_lock(); 763 764 wp = slots[i]; 765 766 if (wp == NULL) 767 goto unlock; 768 769 info = counter_arch_bp(wp); 770 if (!info->step_ctrl.enabled) 771 goto unlock; 772 773 /* 774 * Restore the original watchpoint if we've completed the 775 * single-step. 776 */ 777 if (info->trigger != pc) 778 disable_single_step(wp); 779 780unlock: 781 rcu_read_unlock(); 782 } 783} 784 785static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs) 786{ 787 int i; 788 u32 ctrl_reg, val, addr; 789 struct perf_event *bp, **slots; 790 struct arch_hw_breakpoint *info; 791 struct arch_hw_breakpoint_ctrl ctrl; 792 793 slots = (struct perf_event **)__get_cpu_var(bp_on_reg); 794 795 /* The exception entry code places the amended lr in the PC. */ 796 addr = regs->ARM_pc; 797 798 /* Check the currently installed breakpoints first. */ 799 for (i = 0; i < core_num_brps; ++i) { 800 rcu_read_lock(); 801 802 bp = slots[i]; 803 804 if (bp == NULL) 805 goto unlock; 806 807 info = counter_arch_bp(bp); 808 809 /* Check if the breakpoint value matches. */ 810 val = read_wb_reg(ARM_BASE_BVR + i); 811 if (val != (addr & ~0x3)) 812 goto mismatch; 813 814 /* Possible match, check the byte address select to confirm. */ 815 ctrl_reg = read_wb_reg(ARM_BASE_BCR + i); 816 decode_ctrl_reg(ctrl_reg, &ctrl); 817 if ((1 << (addr & 0x3)) & ctrl.len) { 818 info->trigger = addr; 819 pr_debug("breakpoint fired: address = 0x%x\n", addr); 820 perf_bp_event(bp, regs); 821 if (!bp->overflow_handler) 822 enable_single_step(bp, addr); 823 goto unlock; 824 } 825 826mismatch: 827 /* If we're stepping a breakpoint, it can now be restored. */ 828 if (info->step_ctrl.enabled) 829 disable_single_step(bp); 830unlock: 831 rcu_read_unlock(); 832 } 833 834 /* Handle any pending watchpoint single-step breakpoints. */ 835 watchpoint_single_step_handler(addr); 836} 837 838/* 839 * Called from either the Data Abort Handler [watchpoint] or the 840 * Prefetch Abort Handler [breakpoint] with interrupts disabled. 841 */ 842static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, 843 struct pt_regs *regs) 844{ 845 int ret = 0; 846 u32 dscr; 847 848 preempt_disable(); 849 850 if (interrupts_enabled(regs)) 851 local_irq_enable(); 852 853 /* We only handle watchpoints and hardware breakpoints. */ 854 ARM_DBG_READ(c1, 0, dscr); 855 856 /* Perform perf callbacks. */ 857 switch (ARM_DSCR_MOE(dscr)) { 858 case ARM_ENTRY_BREAKPOINT: 859 breakpoint_handler(addr, regs); 860 break; 861 case ARM_ENTRY_ASYNC_WATCHPOINT: 862 WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n"); 863 case ARM_ENTRY_SYNC_WATCHPOINT: 864 watchpoint_handler(addr, fsr, regs); 865 break; 866 default: 867 ret = 1; /* Unhandled fault. */ 868 } 869 870 preempt_enable(); 871 872 return ret; 873} 874 875/* 876 * One-time initialisation. 877 */ 878static cpumask_t debug_err_mask; 879 880static int debug_reg_trap(struct pt_regs *regs, unsigned int instr) 881{ 882 int cpu = smp_processor_id(); 883 884 pr_warning("Debug register access (0x%x) caused undefined instruction on CPU %d\n", 885 instr, cpu); 886 887 /* Set the error flag for this CPU and skip the faulting instruction. */ 888 cpumask_set_cpu(cpu, &debug_err_mask); 889 instruction_pointer(regs) += 4; 890 return 0; 891} 892 893static struct undef_hook debug_reg_hook = { 894 .instr_mask = 0x0fe80f10, 895 .instr_val = 0x0e000e10, 896 .fn = debug_reg_trap, 897}; 898 899static void reset_ctrl_regs(void *unused) 900{ 901 int i, raw_num_brps, err = 0, cpu = smp_processor_id(); 902 u32 val; 903 904 /* 905 * v7 debug contains save and restore registers so that debug state 906 * can be maintained across low-power modes without leaving the debug 907 * logic powered up. It is IMPLEMENTATION DEFINED whether we can access 908 * the debug registers out of reset, so we must unlock the OS Lock 909 * Access Register to avoid taking undefined instruction exceptions 910 * later on. 911 */ 912 switch (debug_arch) { 913 case ARM_DEBUG_ARCH_V6: 914 case ARM_DEBUG_ARCH_V6_1: 915 /* ARMv6 cores clear the registers out of reset. */ 916 goto out_mdbgen; 917 case ARM_DEBUG_ARCH_V7_ECP14: 918 /* 919 * Ensure sticky power-down is clear (i.e. debug logic is 920 * powered up). 921 */ 922 asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (val)); 923 if ((val & 0x1) == 0) 924 err = -EPERM; 925 926 /* 927 * Check whether we implement OS save and restore. 928 */ 929 asm volatile("mrc p14, 0, %0, c1, c1, 4" : "=r" (val)); 930 if ((val & 0x9) == 0) 931 goto clear_vcr; 932 break; 933 case ARM_DEBUG_ARCH_V7_1: 934 /* 935 * Ensure the OS double lock is clear. 936 */ 937 asm volatile("mrc p14, 0, %0, c1, c3, 4" : "=r" (val)); 938 if ((val & 0x1) == 1) 939 err = -EPERM; 940 break; 941 } 942 943 if (err) { 944 pr_warning("CPU %d debug is powered down!\n", cpu); 945 cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu)); 946 return; 947 } 948 949 /* 950 * Unconditionally clear the OS lock by writing a value 951 * other than 0xC5ACCE55 to the access register. 952 */ 953 asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0)); 954 isb(); 955 956 /* 957 * Clear any configured vector-catch events before 958 * enabling monitor mode. 959 */ 960clear_vcr: 961 asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0)); 962 isb(); 963 964 if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { 965 pr_warning("CPU %d failed to disable vector catch\n", cpu); 966 return; 967 } 968 969 /* 970 * The control/value register pairs are UNKNOWN out of reset so 971 * clear them to avoid spurious debug events. 972 */ 973 raw_num_brps = get_num_brp_resources(); 974 for (i = 0; i < raw_num_brps; ++i) { 975 write_wb_reg(ARM_BASE_BCR + i, 0UL); 976 write_wb_reg(ARM_BASE_BVR + i, 0UL); 977 } 978 979 for (i = 0; i < core_num_wrps; ++i) { 980 write_wb_reg(ARM_BASE_WCR + i, 0UL); 981 write_wb_reg(ARM_BASE_WVR + i, 0UL); 982 } 983 984 if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { 985 pr_warning("CPU %d failed to clear debug register pairs\n", cpu); 986 return; 987 } 988 989 /* 990 * Have a crack at enabling monitor mode. We don't actually need 991 * it yet, but reporting an error early is useful if it fails. 992 */ 993out_mdbgen: 994 if (enable_monitor_mode()) 995 cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu)); 996} 997 998static int __cpuinit dbg_reset_notify(struct notifier_block *self, 999 unsigned long action, void *cpu) 1000{ 1001 if (action == CPU_ONLINE) 1002 smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1); 1003 1004 return NOTIFY_OK; 1005} 1006 1007static struct notifier_block __cpuinitdata dbg_reset_nb = { 1008 .notifier_call = dbg_reset_notify, 1009}; 1010 1011static int __init arch_hw_breakpoint_init(void) 1012{ 1013 debug_arch = get_debug_arch(); 1014 1015 if (!debug_arch_supported()) { 1016 pr_info("debug architecture 0x%x unsupported.\n", debug_arch); 1017 return 0; 1018 } 1019 1020 /* Determine how many BRPs/WRPs are available. */ 1021 core_num_brps = get_num_brps(); 1022 core_num_wrps = get_num_wrps(); 1023 1024 /* 1025 * We need to tread carefully here because DBGSWENABLE may be 1026 * driven low on this core and there isn't an architected way to 1027 * determine that. 1028 */ 1029 register_undef_hook(&debug_reg_hook); 1030 1031 /* 1032 * Reset the breakpoint resources. We assume that a halting 1033 * debugger will leave the world in a nice state for us. 1034 */ 1035 on_each_cpu(reset_ctrl_regs, NULL, 1); 1036 unregister_undef_hook(&debug_reg_hook); 1037 if (!cpumask_empty(&debug_err_mask)) { 1038 core_num_brps = 0; 1039 core_num_wrps = 0; 1040 return 0; 1041 } 1042 1043 pr_info("found %d " "%s" "breakpoint and %d watchpoint registers.\n", 1044 core_num_brps, core_has_mismatch_brps() ? "(+1 reserved) " : 1045 "", core_num_wrps); 1046 1047 /* Work out the maximum supported watchpoint length. */ 1048 max_watchpoint_len = get_max_wp_len(); 1049 pr_info("maximum watchpoint size is %u bytes.\n", 1050 max_watchpoint_len); 1051 1052 /* Register debug fault handler. */ 1053 hook_fault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP, 1054 TRAP_HWBKPT, "watchpoint debug exception"); 1055 hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP, 1056 TRAP_HWBKPT, "breakpoint debug exception"); 1057 1058 /* Register hotplug notifier. */ 1059 register_cpu_notifier(&dbg_reset_nb); 1060 return 0; 1061} 1062arch_initcall(arch_hw_breakpoint_init); 1063 1064void hw_breakpoint_pmu_read(struct perf_event *bp) 1065{ 1066} 1067 1068/* 1069 * Dummy function to register with die_notifier. 1070 */ 1071int hw_breakpoint_exceptions_notify(struct notifier_block *unused, 1072 unsigned long val, void *data) 1073{ 1074 return NOTIFY_DONE; 1075} 1076