perf_event.h revision e87eaf040ab639e94ed0a58ff0eac68d1d38fb0a
1/* 2 * Performance events: 3 * 4 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> 5 * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar 6 * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra 7 * 8 * Data type definitions, declarations, prototypes. 9 * 10 * Started by: Thomas Gleixner and Ingo Molnar 11 * 12 * For licencing details see kernel-base/COPYING 13 */ 14#ifndef _UAPI_LINUX_PERF_EVENT_H 15#define _UAPI_LINUX_PERF_EVENT_H 16 17#include <linux/types.h> 18#include <linux/ioctl.h> 19#include <asm/byteorder.h> 20 21/* 22 * User-space ABI bits: 23 */ 24 25/* 26 * attr.type 27 */ 28enum perf_type_id { 29 PERF_TYPE_HARDWARE = 0, 30 PERF_TYPE_SOFTWARE = 1, 31 PERF_TYPE_TRACEPOINT = 2, 32 PERF_TYPE_HW_CACHE = 3, 33 PERF_TYPE_RAW = 4, 34 PERF_TYPE_BREAKPOINT = 5, 35 36 PERF_TYPE_MAX, /* non-ABI */ 37}; 38 39/* 40 * Generalized performance event event_id types, used by the 41 * attr.event_id parameter of the sys_perf_event_open() 42 * syscall: 43 */ 44enum perf_hw_id { 45 /* 46 * Common hardware events, generalized by the kernel: 47 */ 48 PERF_COUNT_HW_CPU_CYCLES = 0, 49 PERF_COUNT_HW_INSTRUCTIONS = 1, 50 PERF_COUNT_HW_CACHE_REFERENCES = 2, 51 PERF_COUNT_HW_CACHE_MISSES = 3, 52 PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, 53 PERF_COUNT_HW_BRANCH_MISSES = 5, 54 PERF_COUNT_HW_BUS_CYCLES = 6, 55 PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, 56 PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, 57 PERF_COUNT_HW_REF_CPU_CYCLES = 9, 58 59 PERF_COUNT_HW_MAX, /* non-ABI */ 60}; 61 62/* 63 * Generalized hardware cache events: 64 * 65 * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x 66 * { read, write, prefetch } x 67 * { accesses, misses } 68 */ 69enum perf_hw_cache_id { 70 PERF_COUNT_HW_CACHE_L1D = 0, 71 PERF_COUNT_HW_CACHE_L1I = 1, 72 PERF_COUNT_HW_CACHE_LL = 2, 73 PERF_COUNT_HW_CACHE_DTLB = 3, 74 PERF_COUNT_HW_CACHE_ITLB = 4, 75 PERF_COUNT_HW_CACHE_BPU = 5, 76 PERF_COUNT_HW_CACHE_NODE = 6, 77 78 PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ 79}; 80 81enum perf_hw_cache_op_id { 82 PERF_COUNT_HW_CACHE_OP_READ = 0, 83 PERF_COUNT_HW_CACHE_OP_WRITE = 1, 84 PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, 85 86 PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */ 87}; 88 89enum perf_hw_cache_op_result_id { 90 PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, 91 PERF_COUNT_HW_CACHE_RESULT_MISS = 1, 92 93 PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */ 94}; 95 96/* 97 * Special "software" events provided by the kernel, even if the hardware 98 * does not support performance events. These events measure various 99 * physical and sw events of the kernel (and allow the profiling of them as 100 * well): 101 */ 102enum perf_sw_ids { 103 PERF_COUNT_SW_CPU_CLOCK = 0, 104 PERF_COUNT_SW_TASK_CLOCK = 1, 105 PERF_COUNT_SW_PAGE_FAULTS = 2, 106 PERF_COUNT_SW_CONTEXT_SWITCHES = 3, 107 PERF_COUNT_SW_CPU_MIGRATIONS = 4, 108 PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, 109 PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, 110 PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, 111 PERF_COUNT_SW_EMULATION_FAULTS = 8, 112 PERF_COUNT_SW_DUMMY = 9, 113 114 PERF_COUNT_SW_MAX, /* non-ABI */ 115}; 116 117/* 118 * Bits that can be set in attr.sample_type to request information 119 * in the overflow packets. 120 */ 121enum perf_event_sample_format { 122 PERF_SAMPLE_IP = 1U << 0, 123 PERF_SAMPLE_TID = 1U << 1, 124 PERF_SAMPLE_TIME = 1U << 2, 125 PERF_SAMPLE_ADDR = 1U << 3, 126 PERF_SAMPLE_READ = 1U << 4, 127 PERF_SAMPLE_CALLCHAIN = 1U << 5, 128 PERF_SAMPLE_ID = 1U << 6, 129 PERF_SAMPLE_CPU = 1U << 7, 130 PERF_SAMPLE_PERIOD = 1U << 8, 131 PERF_SAMPLE_STREAM_ID = 1U << 9, 132 PERF_SAMPLE_RAW = 1U << 10, 133 PERF_SAMPLE_BRANCH_STACK = 1U << 11, 134 PERF_SAMPLE_REGS_USER = 1U << 12, 135 PERF_SAMPLE_STACK_USER = 1U << 13, 136 PERF_SAMPLE_WEIGHT = 1U << 14, 137 PERF_SAMPLE_DATA_SRC = 1U << 15, 138 PERF_SAMPLE_IDENTIFIER = 1U << 16, 139 PERF_SAMPLE_TRANSACTION = 1U << 17, 140 141 PERF_SAMPLE_MAX = 1U << 18, /* non-ABI */ 142}; 143 144/* 145 * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set 146 * 147 * If the user does not pass priv level information via branch_sample_type, 148 * the kernel uses the event's priv level. Branch and event priv levels do 149 * not have to match. Branch priv level is checked for permissions. 150 * 151 * The branch types can be combined, however BRANCH_ANY covers all types 152 * of branches and therefore it supersedes all the other types. 153 */ 154enum perf_branch_sample_type { 155 PERF_SAMPLE_BRANCH_USER = 1U << 0, /* user branches */ 156 PERF_SAMPLE_BRANCH_KERNEL = 1U << 1, /* kernel branches */ 157 PERF_SAMPLE_BRANCH_HV = 1U << 2, /* hypervisor branches */ 158 159 PERF_SAMPLE_BRANCH_ANY = 1U << 3, /* any branch types */ 160 PERF_SAMPLE_BRANCH_ANY_CALL = 1U << 4, /* any call branch */ 161 PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << 5, /* any return branch */ 162 PERF_SAMPLE_BRANCH_IND_CALL = 1U << 6, /* indirect calls */ 163 PERF_SAMPLE_BRANCH_ABORT_TX = 1U << 7, /* transaction aborts */ 164 PERF_SAMPLE_BRANCH_IN_TX = 1U << 8, /* in transaction */ 165 PERF_SAMPLE_BRANCH_NO_TX = 1U << 9, /* not in transaction */ 166 167 PERF_SAMPLE_BRANCH_MAX = 1U << 10, /* non-ABI */ 168}; 169 170#define PERF_SAMPLE_BRANCH_PLM_ALL \ 171 (PERF_SAMPLE_BRANCH_USER|\ 172 PERF_SAMPLE_BRANCH_KERNEL|\ 173 PERF_SAMPLE_BRANCH_HV) 174 175/* 176 * Values to determine ABI of the registers dump. 177 */ 178enum perf_sample_regs_abi { 179 PERF_SAMPLE_REGS_ABI_NONE = 0, 180 PERF_SAMPLE_REGS_ABI_32 = 1, 181 PERF_SAMPLE_REGS_ABI_64 = 2, 182}; 183 184/* 185 * Values for the memory transaction event qualifier, mostly for 186 * abort events. Multiple bits can be set. 187 */ 188enum { 189 PERF_TXN_ELISION = (1 << 0), /* From elision */ 190 PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */ 191 PERF_TXN_SYNC = (1 << 2), /* Instruction is related */ 192 PERF_TXN_ASYNC = (1 << 3), /* Instruction not related */ 193 PERF_TXN_RETRY = (1 << 4), /* Retry possible */ 194 PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */ 195 PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */ 196 PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */ 197 198 PERF_TXN_MAX = (1 << 8), /* non-ABI */ 199 200 /* bits 32..63 are reserved for the abort code */ 201 202 PERF_TXN_ABORT_MASK = (0xffffffffULL << 32), 203 PERF_TXN_ABORT_SHIFT = 32, 204}; 205 206/* 207 * The format of the data returned by read() on a perf event fd, 208 * as specified by attr.read_format: 209 * 210 * struct read_format { 211 * { u64 value; 212 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED 213 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING 214 * { u64 id; } && PERF_FORMAT_ID 215 * } && !PERF_FORMAT_GROUP 216 * 217 * { u64 nr; 218 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED 219 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING 220 * { u64 value; 221 * { u64 id; } && PERF_FORMAT_ID 222 * } cntr[nr]; 223 * } && PERF_FORMAT_GROUP 224 * }; 225 */ 226enum perf_event_read_format { 227 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, 228 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, 229 PERF_FORMAT_ID = 1U << 2, 230 PERF_FORMAT_GROUP = 1U << 3, 231 232 PERF_FORMAT_MAX = 1U << 4, /* non-ABI */ 233}; 234 235#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ 236#define PERF_ATTR_SIZE_VER1 72 /* add: config2 */ 237#define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */ 238#define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */ 239 /* add: sample_stack_user */ 240 241/* 242 * Hardware event_id to monitor via a performance monitoring event: 243 */ 244struct perf_event_attr { 245 246 /* 247 * Major type: hardware/software/tracepoint/etc. 248 */ 249 __u32 type; 250 251 /* 252 * Size of the attr structure, for fwd/bwd compat. 253 */ 254 __u32 size; 255 256 /* 257 * Type specific configuration information. 258 */ 259 __u64 config; 260 261 union { 262 __u64 sample_period; 263 __u64 sample_freq; 264 }; 265 266 __u64 sample_type; 267 __u64 read_format; 268 269 __u64 disabled : 1, /* off by default */ 270 inherit : 1, /* children inherit it */ 271 pinned : 1, /* must always be on PMU */ 272 exclusive : 1, /* only group on PMU */ 273 exclude_user : 1, /* don't count user */ 274 exclude_kernel : 1, /* ditto kernel */ 275 exclude_hv : 1, /* ditto hypervisor */ 276 exclude_idle : 1, /* don't count when idle */ 277 mmap : 1, /* include mmap data */ 278 comm : 1, /* include comm data */ 279 freq : 1, /* use freq, not period */ 280 inherit_stat : 1, /* per task counts */ 281 enable_on_exec : 1, /* next exec enables */ 282 task : 1, /* trace fork/exit */ 283 watermark : 1, /* wakeup_watermark */ 284 /* 285 * precise_ip: 286 * 287 * 0 - SAMPLE_IP can have arbitrary skid 288 * 1 - SAMPLE_IP must have constant skid 289 * 2 - SAMPLE_IP requested to have 0 skid 290 * 3 - SAMPLE_IP must have 0 skid 291 * 292 * See also PERF_RECORD_MISC_EXACT_IP 293 */ 294 precise_ip : 2, /* skid constraint */ 295 mmap_data : 1, /* non-exec mmap data */ 296 sample_id_all : 1, /* sample_type all events */ 297 298 exclude_host : 1, /* don't count in host */ 299 exclude_guest : 1, /* don't count in guest */ 300 301 exclude_callchain_kernel : 1, /* exclude kernel callchains */ 302 exclude_callchain_user : 1, /* exclude user callchains */ 303 mmap2 : 1, /* include mmap with inode data */ 304 305 __reserved_1 : 40; 306 307 union { 308 __u32 wakeup_events; /* wakeup every n events */ 309 __u32 wakeup_watermark; /* bytes before wakeup */ 310 }; 311 312 __u32 bp_type; 313 union { 314 __u64 bp_addr; 315 __u64 config1; /* extension of config */ 316 }; 317 union { 318 __u64 bp_len; 319 __u64 config2; /* extension of config1 */ 320 }; 321 __u64 branch_sample_type; /* enum perf_branch_sample_type */ 322 323 /* 324 * Defines set of user regs to dump on samples. 325 * See asm/perf_regs.h for details. 326 */ 327 __u64 sample_regs_user; 328 329 /* 330 * Defines size of the user stack to dump on samples. 331 */ 332 __u32 sample_stack_user; 333 334 /* Align to u64. */ 335 __u32 __reserved_2; 336}; 337 338#define perf_flags(attr) (*(&(attr)->read_format + 1)) 339 340/* 341 * Ioctls that can be done on a perf event fd: 342 */ 343#define PERF_EVENT_IOC_ENABLE _IO ('$', 0) 344#define PERF_EVENT_IOC_DISABLE _IO ('$', 1) 345#define PERF_EVENT_IOC_REFRESH _IO ('$', 2) 346#define PERF_EVENT_IOC_RESET _IO ('$', 3) 347#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64) 348#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) 349#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *) 350#define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *) 351 352enum perf_event_ioc_flags { 353 PERF_IOC_FLAG_GROUP = 1U << 0, 354}; 355 356/* 357 * Structure of the page that can be mapped via mmap 358 */ 359struct perf_event_mmap_page { 360 __u32 version; /* version number of this structure */ 361 __u32 compat_version; /* lowest version this is compat with */ 362 363 /* 364 * Bits needed to read the hw events in user-space. 365 * 366 * u32 seq, time_mult, time_shift, idx, width; 367 * u64 count, enabled, running; 368 * u64 cyc, time_offset; 369 * s64 pmc = 0; 370 * 371 * do { 372 * seq = pc->lock; 373 * barrier() 374 * 375 * enabled = pc->time_enabled; 376 * running = pc->time_running; 377 * 378 * if (pc->cap_usr_time && enabled != running) { 379 * cyc = rdtsc(); 380 * time_offset = pc->time_offset; 381 * time_mult = pc->time_mult; 382 * time_shift = pc->time_shift; 383 * } 384 * 385 * idx = pc->index; 386 * count = pc->offset; 387 * if (pc->cap_usr_rdpmc && idx) { 388 * width = pc->pmc_width; 389 * pmc = rdpmc(idx - 1); 390 * } 391 * 392 * barrier(); 393 * } while (pc->lock != seq); 394 * 395 * NOTE: for obvious reason this only works on self-monitoring 396 * processes. 397 */ 398 __u32 lock; /* seqlock for synchronization */ 399 __u32 index; /* hardware event identifier */ 400 __s64 offset; /* add to hardware event value */ 401 __u64 time_enabled; /* time event active */ 402 __u64 time_running; /* time event on cpu */ 403 union { 404 __u64 capabilities; 405 struct { 406 __u64 cap_bit0 : 1, /* Always 0, deprecated, see commit 860f085b74e9 */ 407 cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */ 408 409 cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */ 410 cap_user_time : 1, /* The time_* fields are used */ 411 cap_user_time_zero : 1, /* The time_zero field is used */ 412 cap_____res : 59; 413 }; 414 }; 415 416 /* 417 * If cap_usr_rdpmc this field provides the bit-width of the value 418 * read using the rdpmc() or equivalent instruction. This can be used 419 * to sign extend the result like: 420 * 421 * pmc <<= 64 - width; 422 * pmc >>= 64 - width; // signed shift right 423 * count += pmc; 424 */ 425 __u16 pmc_width; 426 427 /* 428 * If cap_usr_time the below fields can be used to compute the time 429 * delta since time_enabled (in ns) using rdtsc or similar. 430 * 431 * u64 quot, rem; 432 * u64 delta; 433 * 434 * quot = (cyc >> time_shift); 435 * rem = cyc & ((1 << time_shift) - 1); 436 * delta = time_offset + quot * time_mult + 437 * ((rem * time_mult) >> time_shift); 438 * 439 * Where time_offset,time_mult,time_shift and cyc are read in the 440 * seqcount loop described above. This delta can then be added to 441 * enabled and possible running (if idx), improving the scaling: 442 * 443 * enabled += delta; 444 * if (idx) 445 * running += delta; 446 * 447 * quot = count / running; 448 * rem = count % running; 449 * count = quot * enabled + (rem * enabled) / running; 450 */ 451 __u16 time_shift; 452 __u32 time_mult; 453 __u64 time_offset; 454 /* 455 * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated 456 * from sample timestamps. 457 * 458 * time = timestamp - time_zero; 459 * quot = time / time_mult; 460 * rem = time % time_mult; 461 * cyc = (quot << time_shift) + (rem << time_shift) / time_mult; 462 * 463 * And vice versa: 464 * 465 * quot = cyc >> time_shift; 466 * rem = cyc & ((1 << time_shift) - 1); 467 * timestamp = time_zero + quot * time_mult + 468 * ((rem * time_mult) >> time_shift); 469 */ 470 __u64 time_zero; 471 __u32 size; /* Header size up to __reserved[] fields. */ 472 473 /* 474 * Hole for extension of the self monitor capabilities 475 */ 476 477 __u8 __reserved[118*8+4]; /* align to 1k. */ 478 479 /* 480 * Control data for the mmap() data buffer. 481 * 482 * User-space reading the @data_head value should issue an smp_rmb(), 483 * after reading this value. 484 * 485 * When the mapping is PROT_WRITE the @data_tail value should be 486 * written by userspace to reflect the last read data, after issueing 487 * an smp_mb() to separate the data read from the ->data_tail store. 488 * In this case the kernel will not over-write unread data. 489 * 490 * See perf_output_put_handle() for the data ordering. 491 */ 492 __u64 data_head; /* head in the data section */ 493 __u64 data_tail; /* user-space written tail */ 494}; 495 496#define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0) 497#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) 498#define PERF_RECORD_MISC_KERNEL (1 << 0) 499#define PERF_RECORD_MISC_USER (2 << 0) 500#define PERF_RECORD_MISC_HYPERVISOR (3 << 0) 501#define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0) 502#define PERF_RECORD_MISC_GUEST_USER (5 << 0) 503 504#define PERF_RECORD_MISC_MMAP_DATA (1 << 13) 505/* 506 * Indicates that the content of PERF_SAMPLE_IP points to 507 * the actual instruction that triggered the event. See also 508 * perf_event_attr::precise_ip. 509 */ 510#define PERF_RECORD_MISC_EXACT_IP (1 << 14) 511/* 512 * Reserve the last bit to indicate some extended misc field 513 */ 514#define PERF_RECORD_MISC_EXT_RESERVED (1 << 15) 515 516struct perf_event_header { 517 __u32 type; 518 __u16 misc; 519 __u16 size; 520}; 521 522enum perf_event_type { 523 524 /* 525 * If perf_event_attr.sample_id_all is set then all event types will 526 * have the sample_type selected fields related to where/when 527 * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU, 528 * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed 529 * just after the perf_event_header and the fields already present for 530 * the existing fields, i.e. at the end of the payload. That way a newer 531 * perf.data file will be supported by older perf tools, with these new 532 * optional fields being ignored. 533 * 534 * struct sample_id { 535 * { u32 pid, tid; } && PERF_SAMPLE_TID 536 * { u64 time; } && PERF_SAMPLE_TIME 537 * { u64 id; } && PERF_SAMPLE_ID 538 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID 539 * { u32 cpu, res; } && PERF_SAMPLE_CPU 540 * { u64 id; } && PERF_SAMPLE_IDENTIFIER 541 * } && perf_event_attr::sample_id_all 542 * 543 * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. The 544 * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed 545 * relative to header.size. 546 */ 547 548 /* 549 * The MMAP events record the PROT_EXEC mappings so that we can 550 * correlate userspace IPs to code. They have the following structure: 551 * 552 * struct { 553 * struct perf_event_header header; 554 * 555 * u32 pid, tid; 556 * u64 addr; 557 * u64 len; 558 * u64 pgoff; 559 * char filename[]; 560 * struct sample_id sample_id; 561 * }; 562 */ 563 PERF_RECORD_MMAP = 1, 564 565 /* 566 * struct { 567 * struct perf_event_header header; 568 * u64 id; 569 * u64 lost; 570 * struct sample_id sample_id; 571 * }; 572 */ 573 PERF_RECORD_LOST = 2, 574 575 /* 576 * struct { 577 * struct perf_event_header header; 578 * 579 * u32 pid, tid; 580 * char comm[]; 581 * struct sample_id sample_id; 582 * }; 583 */ 584 PERF_RECORD_COMM = 3, 585 586 /* 587 * struct { 588 * struct perf_event_header header; 589 * u32 pid, ppid; 590 * u32 tid, ptid; 591 * u64 time; 592 * struct sample_id sample_id; 593 * }; 594 */ 595 PERF_RECORD_EXIT = 4, 596 597 /* 598 * struct { 599 * struct perf_event_header header; 600 * u64 time; 601 * u64 id; 602 * u64 stream_id; 603 * struct sample_id sample_id; 604 * }; 605 */ 606 PERF_RECORD_THROTTLE = 5, 607 PERF_RECORD_UNTHROTTLE = 6, 608 609 /* 610 * struct { 611 * struct perf_event_header header; 612 * u32 pid, ppid; 613 * u32 tid, ptid; 614 * u64 time; 615 * struct sample_id sample_id; 616 * }; 617 */ 618 PERF_RECORD_FORK = 7, 619 620 /* 621 * struct { 622 * struct perf_event_header header; 623 * u32 pid, tid; 624 * 625 * struct read_format values; 626 * struct sample_id sample_id; 627 * }; 628 */ 629 PERF_RECORD_READ = 8, 630 631 /* 632 * struct { 633 * struct perf_event_header header; 634 * 635 * # 636 * # Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. 637 * # The advantage of PERF_SAMPLE_IDENTIFIER is that its position 638 * # is fixed relative to header. 639 * # 640 * 641 * { u64 id; } && PERF_SAMPLE_IDENTIFIER 642 * { u64 ip; } && PERF_SAMPLE_IP 643 * { u32 pid, tid; } && PERF_SAMPLE_TID 644 * { u64 time; } && PERF_SAMPLE_TIME 645 * { u64 addr; } && PERF_SAMPLE_ADDR 646 * { u64 id; } && PERF_SAMPLE_ID 647 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID 648 * { u32 cpu, res; } && PERF_SAMPLE_CPU 649 * { u64 period; } && PERF_SAMPLE_PERIOD 650 * 651 * { struct read_format values; } && PERF_SAMPLE_READ 652 * 653 * { u64 nr, 654 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN 655 * 656 * # 657 * # The RAW record below is opaque data wrt the ABI 658 * # 659 * # That is, the ABI doesn't make any promises wrt to 660 * # the stability of its content, it may vary depending 661 * # on event, hardware, kernel version and phase of 662 * # the moon. 663 * # 664 * # In other words, PERF_SAMPLE_RAW contents are not an ABI. 665 * # 666 * 667 * { u32 size; 668 * char data[size];}&& PERF_SAMPLE_RAW 669 * 670 * { u64 nr; 671 * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK 672 * 673 * { u64 abi; # enum perf_sample_regs_abi 674 * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER 675 * 676 * { u64 size; 677 * char data[size]; 678 * u64 dyn_size; } && PERF_SAMPLE_STACK_USER 679 * 680 * { u64 weight; } && PERF_SAMPLE_WEIGHT 681 * { u64 data_src; } && PERF_SAMPLE_DATA_SRC 682 * { u64 transaction; } && PERF_SAMPLE_TRANSACTION 683 * }; 684 */ 685 PERF_RECORD_SAMPLE = 9, 686 687 /* 688 * The MMAP2 records are an augmented version of MMAP, they add 689 * maj, min, ino numbers to be used to uniquely identify each mapping 690 * 691 * struct { 692 * struct perf_event_header header; 693 * 694 * u32 pid, tid; 695 * u64 addr; 696 * u64 len; 697 * u64 pgoff; 698 * u32 maj; 699 * u32 min; 700 * u64 ino; 701 * u64 ino_generation; 702 * char filename[]; 703 * struct sample_id sample_id; 704 * }; 705 */ 706 PERF_RECORD_MMAP2 = 10, 707 708 PERF_RECORD_MAX, /* non-ABI */ 709}; 710 711#define PERF_MAX_STACK_DEPTH 127 712 713enum perf_callchain_context { 714 PERF_CONTEXT_HV = (__u64)-32, 715 PERF_CONTEXT_KERNEL = (__u64)-128, 716 PERF_CONTEXT_USER = (__u64)-512, 717 718 PERF_CONTEXT_GUEST = (__u64)-2048, 719 PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, 720 PERF_CONTEXT_GUEST_USER = (__u64)-2560, 721 722 PERF_CONTEXT_MAX = (__u64)-4095, 723}; 724 725#define PERF_FLAG_FD_NO_GROUP (1U << 0) 726#define PERF_FLAG_FD_OUTPUT (1U << 1) 727#define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */ 728#define PERF_FLAG_FD_CLOEXEC (1U << 3) /* O_CLOEXEC */ 729 730union perf_mem_data_src { 731 __u64 val; 732 struct { 733 __u64 mem_op:5, /* type of opcode */ 734 mem_lvl:14, /* memory hierarchy level */ 735 mem_snoop:5, /* snoop mode */ 736 mem_lock:2, /* lock instr */ 737 mem_dtlb:7, /* tlb access */ 738 mem_rsvd:31; 739 }; 740}; 741 742/* type of opcode (load/store/prefetch,code) */ 743#define PERF_MEM_OP_NA 0x01 /* not available */ 744#define PERF_MEM_OP_LOAD 0x02 /* load instruction */ 745#define PERF_MEM_OP_STORE 0x04 /* store instruction */ 746#define PERF_MEM_OP_PFETCH 0x08 /* prefetch */ 747#define PERF_MEM_OP_EXEC 0x10 /* code (execution) */ 748#define PERF_MEM_OP_SHIFT 0 749 750/* memory hierarchy (memory level, hit or miss) */ 751#define PERF_MEM_LVL_NA 0x01 /* not available */ 752#define PERF_MEM_LVL_HIT 0x02 /* hit level */ 753#define PERF_MEM_LVL_MISS 0x04 /* miss level */ 754#define PERF_MEM_LVL_L1 0x08 /* L1 */ 755#define PERF_MEM_LVL_LFB 0x10 /* Line Fill Buffer */ 756#define PERF_MEM_LVL_L2 0x20 /* L2 */ 757#define PERF_MEM_LVL_L3 0x40 /* L3 */ 758#define PERF_MEM_LVL_LOC_RAM 0x80 /* Local DRAM */ 759#define PERF_MEM_LVL_REM_RAM1 0x100 /* Remote DRAM (1 hop) */ 760#define PERF_MEM_LVL_REM_RAM2 0x200 /* Remote DRAM (2 hops) */ 761#define PERF_MEM_LVL_REM_CCE1 0x400 /* Remote Cache (1 hop) */ 762#define PERF_MEM_LVL_REM_CCE2 0x800 /* Remote Cache (2 hops) */ 763#define PERF_MEM_LVL_IO 0x1000 /* I/O memory */ 764#define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */ 765#define PERF_MEM_LVL_SHIFT 5 766 767/* snoop mode */ 768#define PERF_MEM_SNOOP_NA 0x01 /* not available */ 769#define PERF_MEM_SNOOP_NONE 0x02 /* no snoop */ 770#define PERF_MEM_SNOOP_HIT 0x04 /* snoop hit */ 771#define PERF_MEM_SNOOP_MISS 0x08 /* snoop miss */ 772#define PERF_MEM_SNOOP_HITM 0x10 /* snoop hit modified */ 773#define PERF_MEM_SNOOP_SHIFT 19 774 775/* locked instruction */ 776#define PERF_MEM_LOCK_NA 0x01 /* not available */ 777#define PERF_MEM_LOCK_LOCKED 0x02 /* locked transaction */ 778#define PERF_MEM_LOCK_SHIFT 24 779 780/* TLB access */ 781#define PERF_MEM_TLB_NA 0x01 /* not available */ 782#define PERF_MEM_TLB_HIT 0x02 /* hit level */ 783#define PERF_MEM_TLB_MISS 0x04 /* miss level */ 784#define PERF_MEM_TLB_L1 0x08 /* L1 */ 785#define PERF_MEM_TLB_L2 0x10 /* L2 */ 786#define PERF_MEM_TLB_WK 0x20 /* Hardware Walker*/ 787#define PERF_MEM_TLB_OS 0x40 /* OS fault handler */ 788#define PERF_MEM_TLB_SHIFT 26 789 790#define PERF_MEM_S(a, s) \ 791 (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT) 792 793/* 794 * single taken branch record layout: 795 * 796 * from: source instruction (may not always be a branch insn) 797 * to: branch target 798 * mispred: branch target was mispredicted 799 * predicted: branch target was predicted 800 * 801 * support for mispred, predicted is optional. In case it 802 * is not supported mispred = predicted = 0. 803 * 804 * in_tx: running in a hardware transaction 805 * abort: aborting a hardware transaction 806 */ 807struct perf_branch_entry { 808 __u64 from; 809 __u64 to; 810 __u64 mispred:1, /* target mispredicted */ 811 predicted:1,/* target predicted */ 812 in_tx:1, /* in transaction */ 813 abort:1, /* transaction abort */ 814 reserved:60; 815}; 816 817#endif /* _UAPI_LINUX_PERF_EVENT_H */ 818