hw_breakpoint.c revision 0daa034e696ac601061cbf60fda41ad39678ae14
1/* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License version 2 as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 14 * 15 * Copyright (C) 2009, 2010 ARM Limited 16 * 17 * Author: Will Deacon <will.deacon@arm.com> 18 */ 19 20/* 21 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, 22 * using the CPU's debug registers. 23 */ 24#define pr_fmt(fmt) "hw-breakpoint: " fmt 25 26#include <linux/errno.h> 27#include <linux/hardirq.h> 28#include <linux/perf_event.h> 29#include <linux/hw_breakpoint.h> 30#include <linux/smp.h> 31 32#include <asm/cacheflush.h> 33#include <asm/cputype.h> 34#include <asm/current.h> 35#include <asm/hw_breakpoint.h> 36#include <asm/kdebug.h> 37#include <asm/traps.h> 38 39/* Breakpoint currently in use for each BRP. */ 40static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]); 41 42/* Watchpoint currently in use for each WRP. */ 43static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]); 44 45/* Number of BRP/WRP registers on this CPU. */ 46static int core_num_brps; 47static int core_num_wrps; 48 49/* Debug architecture version. */ 50static u8 debug_arch; 51 52/* Maximum supported watchpoint length. */ 53static u8 max_watchpoint_len; 54 55#define READ_WB_REG_CASE(OP2, M, VAL) \ 56 case ((OP2 << 4) + M): \ 57 ARM_DBG_READ(c ## M, OP2, VAL); \ 58 break 59 60#define WRITE_WB_REG_CASE(OP2, M, VAL) \ 61 case ((OP2 << 4) + M): \ 62 ARM_DBG_WRITE(c ## M, OP2, VAL);\ 63 break 64 65#define GEN_READ_WB_REG_CASES(OP2, VAL) \ 66 READ_WB_REG_CASE(OP2, 0, VAL); \ 67 READ_WB_REG_CASE(OP2, 1, VAL); \ 68 READ_WB_REG_CASE(OP2, 2, VAL); \ 69 READ_WB_REG_CASE(OP2, 3, VAL); \ 70 READ_WB_REG_CASE(OP2, 4, VAL); \ 71 READ_WB_REG_CASE(OP2, 5, VAL); \ 72 READ_WB_REG_CASE(OP2, 6, VAL); \ 73 READ_WB_REG_CASE(OP2, 7, VAL); \ 74 READ_WB_REG_CASE(OP2, 8, VAL); \ 75 READ_WB_REG_CASE(OP2, 9, VAL); \ 76 READ_WB_REG_CASE(OP2, 10, VAL); \ 77 READ_WB_REG_CASE(OP2, 11, VAL); \ 78 READ_WB_REG_CASE(OP2, 12, VAL); \ 79 READ_WB_REG_CASE(OP2, 13, VAL); \ 80 READ_WB_REG_CASE(OP2, 14, VAL); \ 81 READ_WB_REG_CASE(OP2, 15, VAL) 82 83#define GEN_WRITE_WB_REG_CASES(OP2, VAL) \ 84 WRITE_WB_REG_CASE(OP2, 0, VAL); \ 85 WRITE_WB_REG_CASE(OP2, 1, VAL); \ 86 WRITE_WB_REG_CASE(OP2, 2, VAL); \ 87 WRITE_WB_REG_CASE(OP2, 3, VAL); \ 88 WRITE_WB_REG_CASE(OP2, 4, VAL); \ 89 WRITE_WB_REG_CASE(OP2, 5, VAL); \ 90 WRITE_WB_REG_CASE(OP2, 6, VAL); \ 91 WRITE_WB_REG_CASE(OP2, 7, VAL); \ 92 WRITE_WB_REG_CASE(OP2, 8, VAL); \ 93 WRITE_WB_REG_CASE(OP2, 9, VAL); \ 94 WRITE_WB_REG_CASE(OP2, 10, VAL); \ 95 WRITE_WB_REG_CASE(OP2, 11, VAL); \ 96 WRITE_WB_REG_CASE(OP2, 12, VAL); \ 97 WRITE_WB_REG_CASE(OP2, 13, VAL); \ 98 WRITE_WB_REG_CASE(OP2, 14, VAL); \ 99 WRITE_WB_REG_CASE(OP2, 15, VAL) 100 101static u32 read_wb_reg(int n) 102{ 103 u32 val = 0; 104 105 switch (n) { 106 GEN_READ_WB_REG_CASES(ARM_OP2_BVR, val); 107 GEN_READ_WB_REG_CASES(ARM_OP2_BCR, val); 108 GEN_READ_WB_REG_CASES(ARM_OP2_WVR, val); 109 GEN_READ_WB_REG_CASES(ARM_OP2_WCR, val); 110 default: 111 pr_warning("attempt to read from unknown breakpoint " 112 "register %d\n", n); 113 } 114 115 return val; 116} 117 118static void write_wb_reg(int n, u32 val) 119{ 120 switch (n) { 121 GEN_WRITE_WB_REG_CASES(ARM_OP2_BVR, val); 122 GEN_WRITE_WB_REG_CASES(ARM_OP2_BCR, val); 123 GEN_WRITE_WB_REG_CASES(ARM_OP2_WVR, val); 124 GEN_WRITE_WB_REG_CASES(ARM_OP2_WCR, val); 125 default: 126 pr_warning("attempt to write to unknown breakpoint " 127 "register %d\n", n); 128 } 129 isb(); 130} 131 132/* Determine debug architecture. */ 133static u8 get_debug_arch(void) 134{ 135 u32 didr; 136 137 /* Do we implement the extended CPUID interface? */ 138 if (((read_cpuid_id() >> 16) & 0xf) != 0xf) { 139 pr_warn_once("CPUID feature registers not supported. " 140 "Assuming v6 debug is present.\n"); 141 return ARM_DEBUG_ARCH_V6; 142 } 143 144 ARM_DBG_READ(c0, 0, didr); 145 return (didr >> 16) & 0xf; 146} 147 148u8 arch_get_debug_arch(void) 149{ 150 return debug_arch; 151} 152 153static int debug_arch_supported(void) 154{ 155 u8 arch = get_debug_arch(); 156 157 /* We don't support the memory-mapped interface. */ 158 return (arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14) || 159 arch >= ARM_DEBUG_ARCH_V7_1; 160} 161 162/* Can we determine the watchpoint access type from the fsr? */ 163static int debug_exception_updates_fsr(void) 164{ 165 return 0; 166} 167 168/* Determine number of WRP registers available. */ 169static int get_num_wrp_resources(void) 170{ 171 u32 didr; 172 ARM_DBG_READ(c0, 0, didr); 173 return ((didr >> 28) & 0xf) + 1; 174} 175 176/* Determine number of BRP registers available. */ 177static int get_num_brp_resources(void) 178{ 179 u32 didr; 180 ARM_DBG_READ(c0, 0, didr); 181 return ((didr >> 24) & 0xf) + 1; 182} 183 184/* Does this core support mismatch breakpoints? */ 185static int core_has_mismatch_brps(void) 186{ 187 return (get_debug_arch() >= ARM_DEBUG_ARCH_V7_ECP14 && 188 get_num_brp_resources() > 1); 189} 190 191/* Determine number of usable WRPs available. */ 192static int get_num_wrps(void) 193{ 194 /* 195 * On debug architectures prior to 7.1, when a watchpoint fires, the 196 * only way to work out which watchpoint it was is by disassembling 197 * the faulting instruction and working out the address of the memory 198 * access. 199 * 200 * Furthermore, we can only do this if the watchpoint was precise 201 * since imprecise watchpoints prevent us from calculating register 202 * based addresses. 203 * 204 * Providing we have more than 1 breakpoint register, we only report 205 * a single watchpoint register for the time being. This way, we always 206 * know which watchpoint fired. In the future we can either add a 207 * disassembler and address generation emulator, or we can insert a 208 * check to see if the DFAR is set on watchpoint exception entry 209 * [the ARM ARM states that the DFAR is UNKNOWN, but experience shows 210 * that it is set on some implementations]. 211 */ 212 if (get_debug_arch() < ARM_DEBUG_ARCH_V7_1) 213 return 1; 214 215 return get_num_wrp_resources(); 216} 217 218/* Determine number of usable BRPs available. */ 219static int get_num_brps(void) 220{ 221 int brps = get_num_brp_resources(); 222 return core_has_mismatch_brps() ? brps - 1 : brps; 223} 224 225/* 226 * In order to access the breakpoint/watchpoint control registers, 227 * we must be running in debug monitor mode. Unfortunately, we can 228 * be put into halting debug mode at any time by an external debugger 229 * but there is nothing we can do to prevent that. 230 */ 231static int monitor_mode_enabled(void) 232{ 233 u32 dscr; 234 ARM_DBG_READ(c1, 0, dscr); 235 return !!(dscr & ARM_DSCR_MDBGEN); 236} 237 238static int enable_monitor_mode(void) 239{ 240 u32 dscr; 241 ARM_DBG_READ(c1, 0, dscr); 242 243 /* If monitor mode is already enabled, just return. */ 244 if (dscr & ARM_DSCR_MDBGEN) 245 goto out; 246 247 /* Write to the corresponding DSCR. */ 248 switch (get_debug_arch()) { 249 case ARM_DEBUG_ARCH_V6: 250 case ARM_DEBUG_ARCH_V6_1: 251 ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN)); 252 break; 253 case ARM_DEBUG_ARCH_V7_ECP14: 254 case ARM_DEBUG_ARCH_V7_1: 255 ARM_DBG_WRITE(c2, 2, (dscr | ARM_DSCR_MDBGEN)); 256 isb(); 257 break; 258 default: 259 return -ENODEV; 260 } 261 262 /* Check that the write made it through. */ 263 ARM_DBG_READ(c1, 0, dscr); 264 if (WARN_ONCE(!(dscr & ARM_DSCR_MDBGEN), 265 "Failed to enable monitor mode on CPU %d.\n", 266 smp_processor_id())) 267 return -EPERM; 268 269out: 270 return 0; 271} 272 273int hw_breakpoint_slots(int type) 274{ 275 if (!debug_arch_supported()) 276 return 0; 277 278 /* 279 * We can be called early, so don't rely on 280 * our static variables being initialised. 281 */ 282 switch (type) { 283 case TYPE_INST: 284 return get_num_brps(); 285 case TYPE_DATA: 286 return get_num_wrps(); 287 default: 288 pr_warning("unknown slot type: %d\n", type); 289 return 0; 290 } 291} 292 293/* 294 * Check if 8-bit byte-address select is available. 295 * This clobbers WRP 0. 296 */ 297static u8 get_max_wp_len(void) 298{ 299 u32 ctrl_reg; 300 struct arch_hw_breakpoint_ctrl ctrl; 301 u8 size = 4; 302 303 if (debug_arch < ARM_DEBUG_ARCH_V7_ECP14) 304 goto out; 305 306 memset(&ctrl, 0, sizeof(ctrl)); 307 ctrl.len = ARM_BREAKPOINT_LEN_8; 308 ctrl_reg = encode_ctrl_reg(ctrl); 309 310 write_wb_reg(ARM_BASE_WVR, 0); 311 write_wb_reg(ARM_BASE_WCR, ctrl_reg); 312 if ((read_wb_reg(ARM_BASE_WCR) & ctrl_reg) == ctrl_reg) 313 size = 8; 314 315out: 316 return size; 317} 318 319u8 arch_get_max_wp_len(void) 320{ 321 return max_watchpoint_len; 322} 323 324/* 325 * Install a perf counter breakpoint. 326 */ 327int arch_install_hw_breakpoint(struct perf_event *bp) 328{ 329 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 330 struct perf_event **slot, **slots; 331 int i, max_slots, ctrl_base, val_base; 332 u32 addr, ctrl; 333 334 addr = info->address; 335 ctrl = encode_ctrl_reg(info->ctrl) | 0x1; 336 337 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { 338 /* Breakpoint */ 339 ctrl_base = ARM_BASE_BCR; 340 val_base = ARM_BASE_BVR; 341 slots = (struct perf_event **)__get_cpu_var(bp_on_reg); 342 max_slots = core_num_brps; 343 } else { 344 /* Watchpoint */ 345 ctrl_base = ARM_BASE_WCR; 346 val_base = ARM_BASE_WVR; 347 slots = (struct perf_event **)__get_cpu_var(wp_on_reg); 348 max_slots = core_num_wrps; 349 } 350 351 for (i = 0; i < max_slots; ++i) { 352 slot = &slots[i]; 353 354 if (!*slot) { 355 *slot = bp; 356 break; 357 } 358 } 359 360 if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n")) 361 return -EBUSY; 362 363 /* Override the breakpoint data with the step data. */ 364 if (info->step_ctrl.enabled) { 365 addr = info->trigger & ~0x3; 366 ctrl = encode_ctrl_reg(info->step_ctrl); 367 if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE) { 368 i = 0; 369 ctrl_base = ARM_BASE_BCR + core_num_brps; 370 val_base = ARM_BASE_BVR + core_num_brps; 371 } 372 } 373 374 /* Setup the address register. */ 375 write_wb_reg(val_base + i, addr); 376 377 /* Setup the control register. */ 378 write_wb_reg(ctrl_base + i, ctrl); 379 return 0; 380} 381 382void arch_uninstall_hw_breakpoint(struct perf_event *bp) 383{ 384 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 385 struct perf_event **slot, **slots; 386 int i, max_slots, base; 387 388 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { 389 /* Breakpoint */ 390 base = ARM_BASE_BCR; 391 slots = (struct perf_event **)__get_cpu_var(bp_on_reg); 392 max_slots = core_num_brps; 393 } else { 394 /* Watchpoint */ 395 base = ARM_BASE_WCR; 396 slots = (struct perf_event **)__get_cpu_var(wp_on_reg); 397 max_slots = core_num_wrps; 398 } 399 400 /* Remove the breakpoint. */ 401 for (i = 0; i < max_slots; ++i) { 402 slot = &slots[i]; 403 404 if (*slot == bp) { 405 *slot = NULL; 406 break; 407 } 408 } 409 410 if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n")) 411 return; 412 413 /* Ensure that we disable the mismatch breakpoint. */ 414 if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE && 415 info->step_ctrl.enabled) { 416 i = 0; 417 base = ARM_BASE_BCR + core_num_brps; 418 } 419 420 /* Reset the control register. */ 421 write_wb_reg(base + i, 0); 422} 423 424static int get_hbp_len(u8 hbp_len) 425{ 426 unsigned int len_in_bytes = 0; 427 428 switch (hbp_len) { 429 case ARM_BREAKPOINT_LEN_1: 430 len_in_bytes = 1; 431 break; 432 case ARM_BREAKPOINT_LEN_2: 433 len_in_bytes = 2; 434 break; 435 case ARM_BREAKPOINT_LEN_4: 436 len_in_bytes = 4; 437 break; 438 case ARM_BREAKPOINT_LEN_8: 439 len_in_bytes = 8; 440 break; 441 } 442 443 return len_in_bytes; 444} 445 446/* 447 * Check whether bp virtual address is in kernel space. 448 */ 449int arch_check_bp_in_kernelspace(struct perf_event *bp) 450{ 451 unsigned int len; 452 unsigned long va; 453 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 454 455 va = info->address; 456 len = get_hbp_len(info->ctrl.len); 457 458 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); 459} 460 461/* 462 * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl. 463 * Hopefully this will disappear when ptrace can bypass the conversion 464 * to generic breakpoint descriptions. 465 */ 466int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, 467 int *gen_len, int *gen_type) 468{ 469 /* Type */ 470 switch (ctrl.type) { 471 case ARM_BREAKPOINT_EXECUTE: 472 *gen_type = HW_BREAKPOINT_X; 473 break; 474 case ARM_BREAKPOINT_LOAD: 475 *gen_type = HW_BREAKPOINT_R; 476 break; 477 case ARM_BREAKPOINT_STORE: 478 *gen_type = HW_BREAKPOINT_W; 479 break; 480 case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE: 481 *gen_type = HW_BREAKPOINT_RW; 482 break; 483 default: 484 return -EINVAL; 485 } 486 487 /* Len */ 488 switch (ctrl.len) { 489 case ARM_BREAKPOINT_LEN_1: 490 *gen_len = HW_BREAKPOINT_LEN_1; 491 break; 492 case ARM_BREAKPOINT_LEN_2: 493 *gen_len = HW_BREAKPOINT_LEN_2; 494 break; 495 case ARM_BREAKPOINT_LEN_4: 496 *gen_len = HW_BREAKPOINT_LEN_4; 497 break; 498 case ARM_BREAKPOINT_LEN_8: 499 *gen_len = HW_BREAKPOINT_LEN_8; 500 break; 501 default: 502 return -EINVAL; 503 } 504 505 return 0; 506} 507 508/* 509 * Construct an arch_hw_breakpoint from a perf_event. 510 */ 511static int arch_build_bp_info(struct perf_event *bp) 512{ 513 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 514 515 /* Type */ 516 switch (bp->attr.bp_type) { 517 case HW_BREAKPOINT_X: 518 info->ctrl.type = ARM_BREAKPOINT_EXECUTE; 519 break; 520 case HW_BREAKPOINT_R: 521 info->ctrl.type = ARM_BREAKPOINT_LOAD; 522 break; 523 case HW_BREAKPOINT_W: 524 info->ctrl.type = ARM_BREAKPOINT_STORE; 525 break; 526 case HW_BREAKPOINT_RW: 527 info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE; 528 break; 529 default: 530 return -EINVAL; 531 } 532 533 /* Len */ 534 switch (bp->attr.bp_len) { 535 case HW_BREAKPOINT_LEN_1: 536 info->ctrl.len = ARM_BREAKPOINT_LEN_1; 537 break; 538 case HW_BREAKPOINT_LEN_2: 539 info->ctrl.len = ARM_BREAKPOINT_LEN_2; 540 break; 541 case HW_BREAKPOINT_LEN_4: 542 info->ctrl.len = ARM_BREAKPOINT_LEN_4; 543 break; 544 case HW_BREAKPOINT_LEN_8: 545 info->ctrl.len = ARM_BREAKPOINT_LEN_8; 546 if ((info->ctrl.type != ARM_BREAKPOINT_EXECUTE) 547 && max_watchpoint_len >= 8) 548 break; 549 default: 550 return -EINVAL; 551 } 552 553 /* 554 * Breakpoints must be of length 2 (thumb) or 4 (ARM) bytes. 555 * Watchpoints can be of length 1, 2, 4 or 8 bytes if supported 556 * by the hardware and must be aligned to the appropriate number of 557 * bytes. 558 */ 559 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE && 560 info->ctrl.len != ARM_BREAKPOINT_LEN_2 && 561 info->ctrl.len != ARM_BREAKPOINT_LEN_4) 562 return -EINVAL; 563 564 /* Address */ 565 info->address = bp->attr.bp_addr; 566 567 /* Privilege */ 568 info->ctrl.privilege = ARM_BREAKPOINT_USER; 569 if (arch_check_bp_in_kernelspace(bp)) 570 info->ctrl.privilege |= ARM_BREAKPOINT_PRIV; 571 572 /* Enabled? */ 573 info->ctrl.enabled = !bp->attr.disabled; 574 575 /* Mismatch */ 576 info->ctrl.mismatch = 0; 577 578 return 0; 579} 580 581/* 582 * Validate the arch-specific HW Breakpoint register settings. 583 */ 584int arch_validate_hwbkpt_settings(struct perf_event *bp) 585{ 586 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 587 int ret = 0; 588 u32 offset, alignment_mask = 0x3; 589 590 /* Ensure that we are in monitor debug mode. */ 591 if (!monitor_mode_enabled()) 592 return -ENODEV; 593 594 /* Build the arch_hw_breakpoint. */ 595 ret = arch_build_bp_info(bp); 596 if (ret) 597 goto out; 598 599 /* Check address alignment. */ 600 if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) 601 alignment_mask = 0x7; 602 offset = info->address & alignment_mask; 603 switch (offset) { 604 case 0: 605 /* Aligned */ 606 break; 607 case 1: 608 case 2: 609 /* Allow halfword watchpoints and breakpoints. */ 610 if (info->ctrl.len == ARM_BREAKPOINT_LEN_2) 611 break; 612 case 3: 613 /* Allow single byte watchpoint. */ 614 if (info->ctrl.len == ARM_BREAKPOINT_LEN_1) 615 break; 616 default: 617 ret = -EINVAL; 618 goto out; 619 } 620 621 info->address &= ~alignment_mask; 622 info->ctrl.len <<= offset; 623 624 if (!bp->overflow_handler) { 625 /* 626 * Mismatch breakpoints are required for single-stepping 627 * breakpoints. 628 */ 629 if (!core_has_mismatch_brps()) 630 return -EINVAL; 631 632 /* We don't allow mismatch breakpoints in kernel space. */ 633 if (arch_check_bp_in_kernelspace(bp)) 634 return -EPERM; 635 636 /* 637 * Per-cpu breakpoints are not supported by our stepping 638 * mechanism. 639 */ 640 if (!bp->hw.bp_target) 641 return -EINVAL; 642 643 /* 644 * We only support specific access types if the fsr 645 * reports them. 646 */ 647 if (!debug_exception_updates_fsr() && 648 (info->ctrl.type == ARM_BREAKPOINT_LOAD || 649 info->ctrl.type == ARM_BREAKPOINT_STORE)) 650 return -EINVAL; 651 } 652 653out: 654 return ret; 655} 656 657/* 658 * Enable/disable single-stepping over the breakpoint bp at address addr. 659 */ 660static void enable_single_step(struct perf_event *bp, u32 addr) 661{ 662 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 663 664 arch_uninstall_hw_breakpoint(bp); 665 info->step_ctrl.mismatch = 1; 666 info->step_ctrl.len = ARM_BREAKPOINT_LEN_4; 667 info->step_ctrl.type = ARM_BREAKPOINT_EXECUTE; 668 info->step_ctrl.privilege = info->ctrl.privilege; 669 info->step_ctrl.enabled = 1; 670 info->trigger = addr; 671 arch_install_hw_breakpoint(bp); 672} 673 674static void disable_single_step(struct perf_event *bp) 675{ 676 arch_uninstall_hw_breakpoint(bp); 677 counter_arch_bp(bp)->step_ctrl.enabled = 0; 678 arch_install_hw_breakpoint(bp); 679} 680 681static void watchpoint_handler(unsigned long addr, unsigned int fsr, 682 struct pt_regs *regs) 683{ 684 int i, access; 685 u32 val, ctrl_reg, alignment_mask; 686 struct perf_event *wp, **slots; 687 struct arch_hw_breakpoint *info; 688 struct arch_hw_breakpoint_ctrl ctrl; 689 690 slots = (struct perf_event **)__get_cpu_var(wp_on_reg); 691 692 for (i = 0; i < core_num_wrps; ++i) { 693 rcu_read_lock(); 694 695 wp = slots[i]; 696 697 if (wp == NULL) 698 goto unlock; 699 700 info = counter_arch_bp(wp); 701 /* 702 * The DFAR is an unknown value on debug architectures prior 703 * to 7.1. Since we only allow a single watchpoint on these 704 * older CPUs, we can set the trigger to the lowest possible 705 * faulting address. 706 */ 707 if (debug_arch < ARM_DEBUG_ARCH_V7_1) { 708 BUG_ON(i > 0); 709 info->trigger = wp->attr.bp_addr; 710 } else { 711 if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) 712 alignment_mask = 0x7; 713 else 714 alignment_mask = 0x3; 715 716 /* Check if the watchpoint value matches. */ 717 val = read_wb_reg(ARM_BASE_WVR + i); 718 if (val != (addr & ~alignment_mask)) 719 goto unlock; 720 721 /* Possible match, check the byte address select. */ 722 ctrl_reg = read_wb_reg(ARM_BASE_WCR + i); 723 decode_ctrl_reg(ctrl_reg, &ctrl); 724 if (!((1 << (addr & alignment_mask)) & ctrl.len)) 725 goto unlock; 726 727 /* Check that the access type matches. */ 728 if (debug_exception_updates_fsr()) { 729 access = (fsr & ARM_FSR_ACCESS_MASK) ? 730 HW_BREAKPOINT_W : HW_BREAKPOINT_R; 731 if (!(access & hw_breakpoint_type(wp))) 732 goto unlock; 733 } 734 735 /* We have a winner. */ 736 info->trigger = addr; 737 } 738 739 pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); 740 perf_bp_event(wp, regs); 741 742 /* 743 * If no overflow handler is present, insert a temporary 744 * mismatch breakpoint so we can single-step over the 745 * watchpoint trigger. 746 */ 747 if (!wp->overflow_handler) 748 enable_single_step(wp, instruction_pointer(regs)); 749 750unlock: 751 rcu_read_unlock(); 752 } 753} 754 755static void watchpoint_single_step_handler(unsigned long pc) 756{ 757 int i; 758 struct perf_event *wp, **slots; 759 struct arch_hw_breakpoint *info; 760 761 slots = (struct perf_event **)__get_cpu_var(wp_on_reg); 762 763 for (i = 0; i < core_num_wrps; ++i) { 764 rcu_read_lock(); 765 766 wp = slots[i]; 767 768 if (wp == NULL) 769 goto unlock; 770 771 info = counter_arch_bp(wp); 772 if (!info->step_ctrl.enabled) 773 goto unlock; 774 775 /* 776 * Restore the original watchpoint if we've completed the 777 * single-step. 778 */ 779 if (info->trigger != pc) 780 disable_single_step(wp); 781 782unlock: 783 rcu_read_unlock(); 784 } 785} 786 787static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs) 788{ 789 int i; 790 u32 ctrl_reg, val, addr; 791 struct perf_event *bp, **slots; 792 struct arch_hw_breakpoint *info; 793 struct arch_hw_breakpoint_ctrl ctrl; 794 795 slots = (struct perf_event **)__get_cpu_var(bp_on_reg); 796 797 /* The exception entry code places the amended lr in the PC. */ 798 addr = regs->ARM_pc; 799 800 /* Check the currently installed breakpoints first. */ 801 for (i = 0; i < core_num_brps; ++i) { 802 rcu_read_lock(); 803 804 bp = slots[i]; 805 806 if (bp == NULL) 807 goto unlock; 808 809 info = counter_arch_bp(bp); 810 811 /* Check if the breakpoint value matches. */ 812 val = read_wb_reg(ARM_BASE_BVR + i); 813 if (val != (addr & ~0x3)) 814 goto mismatch; 815 816 /* Possible match, check the byte address select to confirm. */ 817 ctrl_reg = read_wb_reg(ARM_BASE_BCR + i); 818 decode_ctrl_reg(ctrl_reg, &ctrl); 819 if ((1 << (addr & 0x3)) & ctrl.len) { 820 info->trigger = addr; 821 pr_debug("breakpoint fired: address = 0x%x\n", addr); 822 perf_bp_event(bp, regs); 823 if (!bp->overflow_handler) 824 enable_single_step(bp, addr); 825 goto unlock; 826 } 827 828mismatch: 829 /* If we're stepping a breakpoint, it can now be restored. */ 830 if (info->step_ctrl.enabled) 831 disable_single_step(bp); 832unlock: 833 rcu_read_unlock(); 834 } 835 836 /* Handle any pending watchpoint single-step breakpoints. */ 837 watchpoint_single_step_handler(addr); 838} 839 840/* 841 * Called from either the Data Abort Handler [watchpoint] or the 842 * Prefetch Abort Handler [breakpoint] with interrupts disabled. 843 */ 844static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, 845 struct pt_regs *regs) 846{ 847 int ret = 0; 848 u32 dscr; 849 850 preempt_disable(); 851 852 if (interrupts_enabled(regs)) 853 local_irq_enable(); 854 855 /* We only handle watchpoints and hardware breakpoints. */ 856 ARM_DBG_READ(c1, 0, dscr); 857 858 /* Perform perf callbacks. */ 859 switch (ARM_DSCR_MOE(dscr)) { 860 case ARM_ENTRY_BREAKPOINT: 861 breakpoint_handler(addr, regs); 862 break; 863 case ARM_ENTRY_ASYNC_WATCHPOINT: 864 WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n"); 865 case ARM_ENTRY_SYNC_WATCHPOINT: 866 watchpoint_handler(addr, fsr, regs); 867 break; 868 default: 869 ret = 1; /* Unhandled fault. */ 870 } 871 872 preempt_enable(); 873 874 return ret; 875} 876 877/* 878 * One-time initialisation. 879 */ 880static cpumask_t debug_err_mask; 881 882static int debug_reg_trap(struct pt_regs *regs, unsigned int instr) 883{ 884 int cpu = smp_processor_id(); 885 886 pr_warning("Debug register access (0x%x) caused undefined instruction on CPU %d\n", 887 instr, cpu); 888 889 /* Set the error flag for this CPU and skip the faulting instruction. */ 890 cpumask_set_cpu(cpu, &debug_err_mask); 891 instruction_pointer(regs) += 4; 892 return 0; 893} 894 895static struct undef_hook debug_reg_hook = { 896 .instr_mask = 0x0fe80f10, 897 .instr_val = 0x0e000e10, 898 .fn = debug_reg_trap, 899}; 900 901static void reset_ctrl_regs(void *unused) 902{ 903 int i, raw_num_brps, err = 0, cpu = smp_processor_id(); 904 u32 val; 905 906 /* 907 * v7 debug contains save and restore registers so that debug state 908 * can be maintained across low-power modes without leaving the debug 909 * logic powered up. It is IMPLEMENTATION DEFINED whether we can access 910 * the debug registers out of reset, so we must unlock the OS Lock 911 * Access Register to avoid taking undefined instruction exceptions 912 * later on. 913 */ 914 switch (debug_arch) { 915 case ARM_DEBUG_ARCH_V6: 916 case ARM_DEBUG_ARCH_V6_1: 917 /* ARMv6 cores clear the registers out of reset. */ 918 goto out_mdbgen; 919 case ARM_DEBUG_ARCH_V7_ECP14: 920 /* 921 * Ensure sticky power-down is clear (i.e. debug logic is 922 * powered up). 923 */ 924 asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (val)); 925 if ((val & 0x1) == 0) 926 err = -EPERM; 927 928 /* 929 * Check whether we implement OS save and restore. 930 */ 931 asm volatile("mrc p14, 0, %0, c1, c1, 4" : "=r" (val)); 932 if ((val & 0x9) == 0) 933 goto clear_vcr; 934 break; 935 case ARM_DEBUG_ARCH_V7_1: 936 /* 937 * Ensure the OS double lock is clear. 938 */ 939 asm volatile("mrc p14, 0, %0, c1, c3, 4" : "=r" (val)); 940 if ((val & 0x1) == 1) 941 err = -EPERM; 942 break; 943 } 944 945 if (err) { 946 pr_warning("CPU %d debug is powered down!\n", cpu); 947 cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu)); 948 return; 949 } 950 951 /* 952 * Unconditionally clear the OS lock by writing a value 953 * other than 0xC5ACCE55 to the access register. 954 */ 955 asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0)); 956 isb(); 957 958 /* 959 * Clear any configured vector-catch events before 960 * enabling monitor mode. 961 */ 962clear_vcr: 963 asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0)); 964 isb(); 965 966 if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { 967 pr_warning("CPU %d failed to disable vector catch\n", cpu); 968 return; 969 } 970 971 /* 972 * The control/value register pairs are UNKNOWN out of reset so 973 * clear them to avoid spurious debug events. 974 */ 975 raw_num_brps = get_num_brp_resources(); 976 for (i = 0; i < raw_num_brps; ++i) { 977 write_wb_reg(ARM_BASE_BCR + i, 0UL); 978 write_wb_reg(ARM_BASE_BVR + i, 0UL); 979 } 980 981 for (i = 0; i < core_num_wrps; ++i) { 982 write_wb_reg(ARM_BASE_WCR + i, 0UL); 983 write_wb_reg(ARM_BASE_WVR + i, 0UL); 984 } 985 986 if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { 987 pr_warning("CPU %d failed to clear debug register pairs\n", cpu); 988 return; 989 } 990 991 /* 992 * Have a crack at enabling monitor mode. We don't actually need 993 * it yet, but reporting an error early is useful if it fails. 994 */ 995out_mdbgen: 996 if (enable_monitor_mode()) 997 cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu)); 998} 999 1000static int __cpuinit dbg_reset_notify(struct notifier_block *self, 1001 unsigned long action, void *cpu) 1002{ 1003 if (action == CPU_ONLINE) 1004 smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1); 1005 1006 return NOTIFY_OK; 1007} 1008 1009static struct notifier_block __cpuinitdata dbg_reset_nb = { 1010 .notifier_call = dbg_reset_notify, 1011}; 1012 1013static int __init arch_hw_breakpoint_init(void) 1014{ 1015 debug_arch = get_debug_arch(); 1016 1017 if (!debug_arch_supported()) { 1018 pr_info("debug architecture 0x%x unsupported.\n", debug_arch); 1019 return 0; 1020 } 1021 1022 /* Determine how many BRPs/WRPs are available. */ 1023 core_num_brps = get_num_brps(); 1024 core_num_wrps = get_num_wrps(); 1025 1026 /* 1027 * We need to tread carefully here because DBGSWENABLE may be 1028 * driven low on this core and there isn't an architected way to 1029 * determine that. 1030 */ 1031 register_undef_hook(&debug_reg_hook); 1032 1033 /* 1034 * Reset the breakpoint resources. We assume that a halting 1035 * debugger will leave the world in a nice state for us. 1036 */ 1037 on_each_cpu(reset_ctrl_regs, NULL, 1); 1038 unregister_undef_hook(&debug_reg_hook); 1039 if (!cpumask_empty(&debug_err_mask)) { 1040 core_num_brps = 0; 1041 core_num_wrps = 0; 1042 return 0; 1043 } 1044 1045 pr_info("found %d " "%s" "breakpoint and %d watchpoint registers.\n", 1046 core_num_brps, core_has_mismatch_brps() ? "(+1 reserved) " : 1047 "", core_num_wrps); 1048 1049 /* Work out the maximum supported watchpoint length. */ 1050 max_watchpoint_len = get_max_wp_len(); 1051 pr_info("maximum watchpoint size is %u bytes.\n", 1052 max_watchpoint_len); 1053 1054 /* Register debug fault handler. */ 1055 hook_fault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP, 1056 TRAP_HWBKPT, "watchpoint debug exception"); 1057 hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP, 1058 TRAP_HWBKPT, "breakpoint debug exception"); 1059 1060 /* Register hotplug notifier. */ 1061 register_cpu_notifier(&dbg_reset_nb); 1062 return 0; 1063} 1064arch_initcall(arch_hw_breakpoint_init); 1065 1066void hw_breakpoint_pmu_read(struct perf_event *bp) 1067{ 1068} 1069 1070/* 1071 * Dummy function to register with die_notifier. 1072 */ 1073int hw_breakpoint_exceptions_notify(struct notifier_block *unused, 1074 unsigned long val, void *data) 1075{ 1076 return NOTIFY_DONE; 1077} 1078