1 #include <linux/perf_event.h>
2 #include <linux/types.h>
4 #include <asm/perf_event.h>
8 #include "../perf_event.h"
12 LBR_FORMAT_LIP = 0x01,
13 LBR_FORMAT_EIP = 0x02,
14 LBR_FORMAT_EIP_FLAGS = 0x03,
15 LBR_FORMAT_EIP_FLAGS2 = 0x04,
16 LBR_FORMAT_INFO = 0x05,
17 LBR_FORMAT_TIME = 0x06,
18 LBR_FORMAT_MAX_KNOWN = LBR_FORMAT_TIME,
24 } lbr_desc[LBR_FORMAT_MAX_KNOWN + 1] = {
25 [LBR_FORMAT_EIP_FLAGS] = LBR_EIP_FLAGS,
26 [LBR_FORMAT_EIP_FLAGS2] = LBR_EIP_FLAGS | LBR_TSX,
30 * Intel LBR_SELECT bits
31 * Intel Vol3a, April 2011, Section 16.7 Table 16-10
33 * Hardware branch filter (not available on all CPUs)
35 #define LBR_KERNEL_BIT 0 /* do not capture at ring0 */
36 #define LBR_USER_BIT 1 /* do not capture at ring > 0 */
37 #define LBR_JCC_BIT 2 /* do not capture conditional branches */
38 #define LBR_REL_CALL_BIT 3 /* do not capture relative calls */
39 #define LBR_IND_CALL_BIT 4 /* do not capture indirect calls */
40 #define LBR_RETURN_BIT 5 /* do not capture near returns */
41 #define LBR_IND_JMP_BIT 6 /* do not capture indirect jumps */
42 #define LBR_REL_JMP_BIT 7 /* do not capture relative jumps */
43 #define LBR_FAR_BIT 8 /* do not capture far branches */
44 #define LBR_CALL_STACK_BIT 9 /* enable call stack */
47 * Following bit only exists in Linux; we mask it out before writing it to
48 * the actual MSR. But it helps the constraint perf code to understand
49 * that this is a separate configuration.
51 #define LBR_NO_INFO_BIT 63 /* don't read LBR_INFO. */
53 #define LBR_KERNEL (1 << LBR_KERNEL_BIT)
54 #define LBR_USER (1 << LBR_USER_BIT)
55 #define LBR_JCC (1 << LBR_JCC_BIT)
56 #define LBR_REL_CALL (1 << LBR_REL_CALL_BIT)
57 #define LBR_IND_CALL (1 << LBR_IND_CALL_BIT)
58 #define LBR_RETURN (1 << LBR_RETURN_BIT)
59 #define LBR_REL_JMP (1 << LBR_REL_JMP_BIT)
60 #define LBR_IND_JMP (1 << LBR_IND_JMP_BIT)
61 #define LBR_FAR (1 << LBR_FAR_BIT)
62 #define LBR_CALL_STACK (1 << LBR_CALL_STACK_BIT)
63 #define LBR_NO_INFO (1ULL << LBR_NO_INFO_BIT)
65 #define LBR_PLM (LBR_KERNEL | LBR_USER)
67 #define LBR_SEL_MASK 0x3ff /* valid bits in LBR_SELECT */
68 #define LBR_NOT_SUPP -1 /* LBR filter not supported */
69 #define LBR_IGN 0 /* ignored */
80 #define LBR_FROM_FLAG_MISPRED BIT_ULL(63)
81 #define LBR_FROM_FLAG_IN_TX BIT_ULL(62)
82 #define LBR_FROM_FLAG_ABORT BIT_ULL(61)
84 #define LBR_FROM_SIGNEXT_2MSB (BIT_ULL(60) | BIT_ULL(59))
87 * x86control flow change classification
88 * x86control flow changes include branches, interrupts, traps, faults
91 X86_BR_NONE = 0, /* unknown */
93 X86_BR_USER = 1 << 0, /* branch target is user */
94 X86_BR_KERNEL = 1 << 1, /* branch target is kernel */
96 X86_BR_CALL = 1 << 2, /* call */
97 X86_BR_RET = 1 << 3, /* return */
98 X86_BR_SYSCALL = 1 << 4, /* syscall */
99 X86_BR_SYSRET = 1 << 5, /* syscall return */
100 X86_BR_INT = 1 << 6, /* sw interrupt */
101 X86_BR_IRET = 1 << 7, /* return from interrupt */
102 X86_BR_JCC = 1 << 8, /* conditional */
103 X86_BR_JMP = 1 << 9, /* jump */
104 X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */
105 X86_BR_IND_CALL = 1 << 11,/* indirect calls */
106 X86_BR_ABORT = 1 << 12,/* transaction abort */
107 X86_BR_IN_TX = 1 << 13,/* in transaction */
108 X86_BR_NO_TX = 1 << 14,/* not in transaction */
109 X86_BR_ZERO_CALL = 1 << 15,/* zero length call */
110 X86_BR_CALL_STACK = 1 << 16,/* call stack */
111 X86_BR_IND_JMP = 1 << 17,/* indirect jump */
114 #define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL)
115 #define X86_BR_ANYTX (X86_BR_NO_TX | X86_BR_IN_TX)
132 #define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY)
134 #define X86_BR_ANY_CALL \
142 static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc);
145 * We only support LBR implementations that have FREEZE_LBRS_ON_PMI
146 * otherwise it becomes near impossible to get a reliable stack.
149 static void __intel_pmu_lbr_enable(bool pmi)
151 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
152 u64 debugctl, lbr_select = 0, orig_debugctl;
155 * No need to unfreeze manually, as v4 can do that as part
156 * of the GLOBAL_STATUS ack.
158 if (pmi && x86_pmu.version >= 4)
162 * No need to reprogram LBR_SELECT in a PMI, as it
166 lbr_select = cpuc->lbr_sel->config & x86_pmu.lbr_sel_mask;
167 if (!pmi && cpuc->lbr_sel)
168 wrmsrl(MSR_LBR_SELECT, lbr_select);
170 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
171 orig_debugctl = debugctl;
172 debugctl |= DEBUGCTLMSR_LBR;
174 * LBR callstack does not work well with FREEZE_LBRS_ON_PMI.
175 * If FREEZE_LBRS_ON_PMI is set, PMI near call/return instructions
176 * may cause superfluous increase/decrease of LBR_TOS.
178 if (!(lbr_select & LBR_CALL_STACK))
179 debugctl |= DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
180 if (orig_debugctl != debugctl)
181 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
184 static void __intel_pmu_lbr_disable(void)
188 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
189 debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
190 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
193 static void intel_pmu_lbr_reset_32(void)
197 for (i = 0; i < x86_pmu.lbr_nr; i++)
198 wrmsrl(x86_pmu.lbr_from + i, 0);
201 static void intel_pmu_lbr_reset_64(void)
205 for (i = 0; i < x86_pmu.lbr_nr; i++) {
206 wrmsrl(x86_pmu.lbr_from + i, 0);
207 wrmsrl(x86_pmu.lbr_to + i, 0);
208 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
209 wrmsrl(MSR_LBR_INFO_0 + i, 0);
213 void intel_pmu_lbr_reset(void)
218 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
219 intel_pmu_lbr_reset_32();
221 intel_pmu_lbr_reset_64();
225 * TOS = most recently recorded branch
227 static inline u64 intel_pmu_lbr_tos(void)
231 rdmsrl(x86_pmu.lbr_tos, tos);
241 * For formats with LBR_TSX flags (e.g. LBR_FORMAT_EIP_FLAGS2), bits 61:62 in
242 * MSR_LAST_BRANCH_FROM_x are the TSX flags when TSX is supported, but when
243 * TSX is not supported they have no consistent behavior:
245 * - For wrmsr(), bits 61:62 are considered part of the sign extension.
246 * - For HW updates (branch captures) bits 61:62 are always OFF and are not
247 * part of the sign extension.
251 * 1) LBR has TSX format
252 * 2) CPU has no TSX support enabled
254 * ... then any value passed to wrmsr() must be sign extended to 63 bits and any
255 * value from rdmsr() must be converted to have a 61 bits sign extension,
256 * ignoring the TSX flags.
258 static inline bool lbr_from_signext_quirk_needed(void)
260 int lbr_format = x86_pmu.intel_cap.lbr_format;
261 bool tsx_support = boot_cpu_has(X86_FEATURE_HLE) ||
262 boot_cpu_has(X86_FEATURE_RTM);
264 return !tsx_support && (lbr_desc[lbr_format] & LBR_TSX);
267 DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key);
269 /* If quirk is enabled, ensure sign extension is 63 bits: */
270 inline u64 lbr_from_signext_quirk_wr(u64 val)
272 if (static_branch_unlikely(&lbr_from_quirk_key)) {
274 * Sign extend into bits 61:62 while preserving bit 63.
276 * Quirk is enabled when TSX is disabled. Therefore TSX bits
277 * in val are always OFF and must be changed to be sign
278 * extension bits. Since bits 59:60 are guaranteed to be
279 * part of the sign extension bits, we can just copy them
282 val |= (LBR_FROM_SIGNEXT_2MSB & val) << 2;
288 * If quirk is needed, ensure sign extension is 61 bits:
290 u64 lbr_from_signext_quirk_rd(u64 val)
292 if (static_branch_unlikely(&lbr_from_quirk_key))
294 * Quirk is on when TSX is not enabled. Therefore TSX
295 * flags must be read as OFF.
297 val &= ~(LBR_FROM_FLAG_IN_TX | LBR_FROM_FLAG_ABORT);
301 static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
304 unsigned lbr_idx, mask;
307 if (task_ctx->lbr_callstack_users == 0 ||
308 task_ctx->lbr_stack_state == LBR_NONE) {
309 intel_pmu_lbr_reset();
313 mask = x86_pmu.lbr_nr - 1;
315 for (i = 0; i < tos; i++) {
316 lbr_idx = (tos - i) & mask;
317 wrmsrl(x86_pmu.lbr_from + lbr_idx,
318 lbr_from_signext_quirk_wr(task_ctx->lbr_from[i]));
319 wrmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]);
320 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
321 wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
323 wrmsrl(x86_pmu.lbr_tos, tos);
324 task_ctx->lbr_stack_state = LBR_NONE;
327 static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
330 unsigned lbr_idx, mask;
333 if (task_ctx->lbr_callstack_users == 0) {
334 task_ctx->lbr_stack_state = LBR_NONE;
338 mask = x86_pmu.lbr_nr - 1;
339 tos = intel_pmu_lbr_tos();
340 for (i = 0; i < tos; i++) {
341 lbr_idx = (tos - i) & mask;
342 rdmsrl(x86_pmu.lbr_from + lbr_idx, val);
343 task_ctx->lbr_from[i] = lbr_from_signext_quirk_rd(val);
344 rdmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]);
345 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
346 rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
349 task_ctx->lbr_stack_state = LBR_VALID;
352 void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
354 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
355 struct x86_perf_task_context *task_ctx;
358 * If LBR callstack feature is enabled and the stack was saved when
359 * the task was scheduled out, restore the stack. Otherwise flush
362 task_ctx = ctx ? ctx->task_ctx_data : NULL;
365 __intel_pmu_lbr_restore(task_ctx);
366 cpuc->lbr_context = ctx;
368 __intel_pmu_lbr_save(task_ctx);
374 * When sampling the branck stack in system-wide, it may be
375 * necessary to flush the stack on context switch. This happens
376 * when the branch stack does not tag its entries with the pid
377 * of the current task. Otherwise it becomes impossible to
378 * associate a branch entry with a task. This ambiguity is more
379 * likely to appear when the branch stack supports priv level
380 * filtering and the user sets it to monitor only at the user
381 * level (which could be a useful measurement in system-wide
382 * mode). In that case, the risk is high of having a branch
383 * stack with branch from multiple tasks.
386 intel_pmu_lbr_reset();
387 cpuc->lbr_context = ctx;
391 static inline bool branch_user_callstack(unsigned br_sel)
393 return (br_sel & X86_BR_USER) && (br_sel & X86_BR_CALL_STACK);
396 void intel_pmu_lbr_enable(struct perf_event *event)
398 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
399 struct x86_perf_task_context *task_ctx;
405 * Reset the LBR stack if we changed task context to
408 if (event->ctx->task && cpuc->lbr_context != event->ctx) {
409 intel_pmu_lbr_reset();
410 cpuc->lbr_context = event->ctx;
412 cpuc->br_sel = event->hw.branch_reg.reg;
414 if (branch_user_callstack(cpuc->br_sel) && event->ctx &&
415 event->ctx->task_ctx_data) {
416 task_ctx = event->ctx->task_ctx_data;
417 task_ctx->lbr_callstack_users++;
421 perf_sched_cb_inc(event->ctx->pmu);
424 void intel_pmu_lbr_disable(struct perf_event *event)
426 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
427 struct x86_perf_task_context *task_ctx;
432 if (branch_user_callstack(cpuc->br_sel) && event->ctx &&
433 event->ctx->task_ctx_data) {
434 task_ctx = event->ctx->task_ctx_data;
435 task_ctx->lbr_callstack_users--;
439 WARN_ON_ONCE(cpuc->lbr_users < 0);
440 perf_sched_cb_dec(event->ctx->pmu);
442 if (cpuc->enabled && !cpuc->lbr_users) {
443 __intel_pmu_lbr_disable();
444 /* avoid stale pointer */
445 cpuc->lbr_context = NULL;
449 void intel_pmu_lbr_enable_all(bool pmi)
451 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
454 __intel_pmu_lbr_enable(pmi);
457 void intel_pmu_lbr_disable_all(void)
459 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
462 __intel_pmu_lbr_disable();
465 static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
467 unsigned long mask = x86_pmu.lbr_nr - 1;
468 u64 tos = intel_pmu_lbr_tos();
471 for (i = 0; i < x86_pmu.lbr_nr; i++) {
472 unsigned long lbr_idx = (tos - i) & mask;
481 rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
483 cpuc->lbr_entries[i].from = msr_lastbranch.from;
484 cpuc->lbr_entries[i].to = msr_lastbranch.to;
485 cpuc->lbr_entries[i].mispred = 0;
486 cpuc->lbr_entries[i].predicted = 0;
487 cpuc->lbr_entries[i].reserved = 0;
489 cpuc->lbr_stack.nr = i;
493 * Due to lack of segmentation in Linux the effective address (offset)
494 * is the same as the linear address, allowing us to merge the LIP and EIP
497 static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
499 bool need_info = false;
500 unsigned long mask = x86_pmu.lbr_nr - 1;
501 int lbr_format = x86_pmu.intel_cap.lbr_format;
502 u64 tos = intel_pmu_lbr_tos();
505 int num = x86_pmu.lbr_nr;
508 need_info = !(cpuc->lbr_sel->config & LBR_NO_INFO);
509 if (cpuc->lbr_sel->config & LBR_CALL_STACK)
513 for (i = 0; i < num; i++) {
514 unsigned long lbr_idx = (tos - i) & mask;
515 u64 from, to, mis = 0, pred = 0, in_tx = 0, abort = 0;
518 int lbr_flags = lbr_desc[lbr_format];
520 rdmsrl(x86_pmu.lbr_from + lbr_idx, from);
521 from = lbr_from_signext_quirk_rd(from);
523 rdmsrl(x86_pmu.lbr_to + lbr_idx, to);
525 if (lbr_format == LBR_FORMAT_INFO && need_info) {
528 rdmsrl(MSR_LBR_INFO_0 + lbr_idx, info);
529 mis = !!(info & LBR_INFO_MISPRED);
531 in_tx = !!(info & LBR_INFO_IN_TX);
532 abort = !!(info & LBR_INFO_ABORT);
533 cycles = (info & LBR_INFO_CYCLES);
536 if (lbr_format == LBR_FORMAT_TIME) {
537 mis = !!(from & LBR_FROM_FLAG_MISPRED);
540 cycles = ((to >> 48) & LBR_INFO_CYCLES);
542 to = (u64)((((s64)to) << 16) >> 16);
545 if (lbr_flags & LBR_EIP_FLAGS) {
546 mis = !!(from & LBR_FROM_FLAG_MISPRED);
550 if (lbr_flags & LBR_TSX) {
551 in_tx = !!(from & LBR_FROM_FLAG_IN_TX);
552 abort = !!(from & LBR_FROM_FLAG_ABORT);
555 from = (u64)((((s64)from) << skip) >> skip);
558 * Some CPUs report duplicated abort records,
559 * with the second entry not having an abort bit set.
560 * Skip them here. This loop runs backwards,
561 * so we need to undo the previous record.
562 * If the abort just happened outside the window
563 * the extra entry cannot be removed.
565 if (abort && x86_pmu.lbr_double_abort && out > 0)
568 cpuc->lbr_entries[out].from = from;
569 cpuc->lbr_entries[out].to = to;
570 cpuc->lbr_entries[out].mispred = mis;
571 cpuc->lbr_entries[out].predicted = pred;
572 cpuc->lbr_entries[out].in_tx = in_tx;
573 cpuc->lbr_entries[out].abort = abort;
574 cpuc->lbr_entries[out].cycles = cycles;
575 cpuc->lbr_entries[out].reserved = 0;
578 cpuc->lbr_stack.nr = out;
581 void intel_pmu_lbr_read(void)
583 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
585 if (!cpuc->lbr_users)
588 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
589 intel_pmu_lbr_read_32(cpuc);
591 intel_pmu_lbr_read_64(cpuc);
593 intel_pmu_lbr_filter(cpuc);
598 * - in case there is no HW filter
599 * - in case the HW filter has errata or limitations
601 static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
603 u64 br_type = event->attr.branch_sample_type;
606 if (br_type & PERF_SAMPLE_BRANCH_USER)
609 if (br_type & PERF_SAMPLE_BRANCH_KERNEL)
610 mask |= X86_BR_KERNEL;
612 /* we ignore BRANCH_HV here */
614 if (br_type & PERF_SAMPLE_BRANCH_ANY)
617 if (br_type & PERF_SAMPLE_BRANCH_ANY_CALL)
618 mask |= X86_BR_ANY_CALL;
620 if (br_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
621 mask |= X86_BR_RET | X86_BR_IRET | X86_BR_SYSRET;
623 if (br_type & PERF_SAMPLE_BRANCH_IND_CALL)
624 mask |= X86_BR_IND_CALL;
626 if (br_type & PERF_SAMPLE_BRANCH_ABORT_TX)
627 mask |= X86_BR_ABORT;
629 if (br_type & PERF_SAMPLE_BRANCH_IN_TX)
630 mask |= X86_BR_IN_TX;
632 if (br_type & PERF_SAMPLE_BRANCH_NO_TX)
633 mask |= X86_BR_NO_TX;
635 if (br_type & PERF_SAMPLE_BRANCH_COND)
638 if (br_type & PERF_SAMPLE_BRANCH_CALL_STACK) {
639 if (!x86_pmu_has_lbr_callstack())
641 if (mask & ~(X86_BR_USER | X86_BR_KERNEL))
643 mask |= X86_BR_CALL | X86_BR_IND_CALL | X86_BR_RET |
647 if (br_type & PERF_SAMPLE_BRANCH_IND_JUMP)
648 mask |= X86_BR_IND_JMP;
650 if (br_type & PERF_SAMPLE_BRANCH_CALL)
651 mask |= X86_BR_CALL | X86_BR_ZERO_CALL;
653 * stash actual user request into reg, it may
654 * be used by fixup code for some CPU
656 event->hw.branch_reg.reg = mask;
661 * setup the HW LBR filter
662 * Used only when available, may not be enough to disambiguate
663 * all branches, may need the help of the SW filter
665 static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
667 struct hw_perf_event_extra *reg;
668 u64 br_type = event->attr.branch_sample_type;
672 for (i = 0; i < PERF_SAMPLE_BRANCH_MAX_SHIFT; i++) {
673 if (!(br_type & (1ULL << i)))
676 v = x86_pmu.lbr_sel_map[i];
677 if (v == LBR_NOT_SUPP)
684 reg = &event->hw.branch_reg;
685 reg->idx = EXTRA_REG_LBR;
688 * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate
689 * in suppress mode. So LBR_SELECT should be set to
690 * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK)
691 * But the 10th bit LBR_CALL_STACK does not operate
694 reg->config = mask ^ (x86_pmu.lbr_sel_mask & ~LBR_CALL_STACK);
696 if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) &&
697 (br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) &&
698 (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO))
699 reg->config |= LBR_NO_INFO;
704 int intel_pmu_setup_lbr_filter(struct perf_event *event)
715 * setup SW LBR filter
717 ret = intel_pmu_setup_sw_lbr_filter(event);
722 * setup HW LBR filter, if any
724 if (x86_pmu.lbr_sel_map)
725 ret = intel_pmu_setup_hw_lbr_filter(event);
731 * return the type of control flow change at address "from"
732 * instruction is not necessarily a branch (in case of interrupt).
734 * The branch type returned also includes the priv level of the
735 * target of the control flow change (X86_BR_USER, X86_BR_KERNEL).
737 * If a branch type is unknown OR the instruction cannot be
738 * decoded (e.g., text page not present), then X86_BR_NONE is
741 static int branch_type(unsigned long from, unsigned long to, int abort)
745 int bytes_read, bytes_left;
746 int ret = X86_BR_NONE;
747 int ext, to_plm, from_plm;
748 u8 buf[MAX_INSN_SIZE];
751 to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER;
752 from_plm = kernel_ip(from) ? X86_BR_KERNEL : X86_BR_USER;
755 * maybe zero if lbr did not fill up after a reset by the time
756 * we get a PMU interrupt
758 if (from == 0 || to == 0)
762 return X86_BR_ABORT | to_plm;
764 if (from_plm == X86_BR_USER) {
766 * can happen if measuring at the user level only
767 * and we interrupt in a kernel thread, e.g., idle.
772 /* may fail if text not present */
773 bytes_left = copy_from_user_nmi(buf, (void __user *)from,
775 bytes_read = MAX_INSN_SIZE - bytes_left;
782 * The LBR logs any address in the IP, even if the IP just
783 * faulted. This means userspace can control the from address.
784 * Ensure we don't blindy read any address by validating it is
785 * a known text address.
787 if (kernel_text_address(from)) {
790 * Assume we can get the maximum possible size
791 * when grabbing kernel data. This is not
792 * _strictly_ true since we could possibly be
793 * executing up next to a memory hole, but
794 * it is very unlikely to be a problem.
796 bytes_read = MAX_INSN_SIZE;
803 * decoder needs to know the ABI especially
804 * on 64-bit systems running 32-bit apps
807 is64 = kernel_ip((unsigned long)addr) || !test_thread_flag(TIF_IA32);
809 insn_init(&insn, addr, bytes_read, is64);
810 insn_get_opcode(&insn);
811 if (!insn.opcode.got)
814 switch (insn.opcode.bytes[0]) {
816 switch (insn.opcode.bytes[1]) {
817 case 0x05: /* syscall */
818 case 0x34: /* sysenter */
819 ret = X86_BR_SYSCALL;
821 case 0x07: /* sysret */
822 case 0x35: /* sysexit */
825 case 0x80 ... 0x8f: /* conditional */
832 case 0x70 ... 0x7f: /* conditional */
835 case 0xc2: /* near ret */
836 case 0xc3: /* near ret */
837 case 0xca: /* far ret */
838 case 0xcb: /* far ret */
841 case 0xcf: /* iret */
844 case 0xcc ... 0xce: /* int */
847 case 0xe8: /* call near rel */
848 insn_get_immediate(&insn);
849 if (insn.immediate1.value == 0) {
850 /* zero length call */
851 ret = X86_BR_ZERO_CALL;
854 case 0x9a: /* call far absolute */
857 case 0xe0 ... 0xe3: /* loop jmp */
860 case 0xe9 ... 0xeb: /* jmp */
863 case 0xff: /* call near absolute, call far absolute ind */
864 insn_get_modrm(&insn);
865 ext = (insn.modrm.bytes[0] >> 3) & 0x7;
867 case 2: /* near ind call */
868 case 3: /* far ind call */
869 ret = X86_BR_IND_CALL;
873 ret = X86_BR_IND_JMP;
881 * interrupts, traps, faults (and thus ring transition) may
882 * occur on any instructions. Thus, to classify them correctly,
883 * we need to first look at the from and to priv levels. If they
884 * are different and to is in the kernel, then it indicates
885 * a ring transition. If the from instruction is not a ring
886 * transition instr (syscall, systenter, int), then it means
887 * it was a irq, trap or fault.
889 * we have no way of detecting kernel to kernel faults.
891 if (from_plm == X86_BR_USER && to_plm == X86_BR_KERNEL
892 && ret != X86_BR_SYSCALL && ret != X86_BR_INT)
896 * branch priv level determined by target as
897 * is done by HW when LBR_SELECT is implemented
899 if (ret != X86_BR_NONE)
906 * implement actual branch filter based on user demand.
907 * Hardware may not exactly satisfy that request, thus
908 * we need to inspect opcodes. Mismatched branches are
909 * discarded. Therefore, the number of branches returned
910 * in PERF_SAMPLE_BRANCH_STACK sample may vary.
913 intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
916 int br_sel = cpuc->br_sel;
918 bool compress = false;
920 /* if sampling all branches, then nothing to filter */
921 if ((br_sel & X86_BR_ALL) == X86_BR_ALL)
924 for (i = 0; i < cpuc->lbr_stack.nr; i++) {
926 from = cpuc->lbr_entries[i].from;
927 to = cpuc->lbr_entries[i].to;
929 type = branch_type(from, to, cpuc->lbr_entries[i].abort);
930 if (type != X86_BR_NONE && (br_sel & X86_BR_ANYTX)) {
931 if (cpuc->lbr_entries[i].in_tx)
932 type |= X86_BR_IN_TX;
934 type |= X86_BR_NO_TX;
937 /* if type does not correspond, then discard */
938 if (type == X86_BR_NONE || (br_sel & type) != type) {
939 cpuc->lbr_entries[i].from = 0;
947 /* remove all entries with from=0 */
948 for (i = 0; i < cpuc->lbr_stack.nr; ) {
949 if (!cpuc->lbr_entries[i].from) {
951 while (++j < cpuc->lbr_stack.nr)
952 cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j];
953 cpuc->lbr_stack.nr--;
954 if (!cpuc->lbr_entries[i].from)
962 * Map interface branch filters onto LBR filters
964 static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
965 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
966 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
967 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
968 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
969 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_REL_JMP
970 | LBR_IND_JMP | LBR_FAR,
972 * NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches
974 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] =
975 LBR_REL_CALL | LBR_IND_CALL | LBR_REL_JMP | LBR_IND_JMP | LBR_FAR,
977 * NHM/WSM erratum: must include IND_JMP to capture IND_CALL
979 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL | LBR_IND_JMP,
980 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
981 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
984 static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
985 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
986 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
987 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
988 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
989 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR,
990 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
992 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
993 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
994 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
995 [PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_REL_CALL,
998 static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
999 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
1000 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
1001 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
1002 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1003 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR,
1004 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1006 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
1007 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
1008 [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1009 | LBR_RETURN | LBR_CALL_STACK,
1010 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
1011 [PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_REL_CALL,
1015 void __init intel_pmu_lbr_init_core(void)
1018 x86_pmu.lbr_tos = MSR_LBR_TOS;
1019 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1020 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
1023 * SW branch filter usage:
1024 * - compensate for lack of HW filter
1028 /* nehalem/westmere */
1029 void __init intel_pmu_lbr_init_nhm(void)
1031 x86_pmu.lbr_nr = 16;
1032 x86_pmu.lbr_tos = MSR_LBR_TOS;
1033 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1034 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1036 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1037 x86_pmu.lbr_sel_map = nhm_lbr_sel_map;
1040 * SW branch filter usage:
1041 * - workaround LBR_SEL errata (see above)
1042 * - support syscall, sysret capture.
1043 * That requires LBR_FAR but that means far
1044 * jmp need to be filtered out
1049 void __init intel_pmu_lbr_init_snb(void)
1051 x86_pmu.lbr_nr = 16;
1052 x86_pmu.lbr_tos = MSR_LBR_TOS;
1053 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1054 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1056 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1057 x86_pmu.lbr_sel_map = snb_lbr_sel_map;
1060 * SW branch filter usage:
1061 * - support syscall, sysret capture.
1062 * That requires LBR_FAR but that means far
1063 * jmp need to be filtered out
1068 void intel_pmu_lbr_init_hsw(void)
1070 x86_pmu.lbr_nr = 16;
1071 x86_pmu.lbr_tos = MSR_LBR_TOS;
1072 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1073 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1075 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1076 x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
1078 if (lbr_from_signext_quirk_needed())
1079 static_branch_enable(&lbr_from_quirk_key);
1083 __init void intel_pmu_lbr_init_skl(void)
1085 x86_pmu.lbr_nr = 32;
1086 x86_pmu.lbr_tos = MSR_LBR_TOS;
1087 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1088 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1090 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1091 x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
1094 * SW branch filter usage:
1095 * - support syscall, sysret capture.
1096 * That requires LBR_FAR but that means far
1097 * jmp need to be filtered out
1102 void __init intel_pmu_lbr_init_atom(void)
1105 * only models starting at stepping 10 seems
1106 * to have an operational LBR which can freeze
1109 if (boot_cpu_data.x86_model == 28
1110 && boot_cpu_data.x86_mask < 10) {
1111 pr_cont("LBR disabled due to erratum");
1116 x86_pmu.lbr_tos = MSR_LBR_TOS;
1117 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1118 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
1121 * SW branch filter usage:
1122 * - compensate for lack of HW filter
1127 void __init intel_pmu_lbr_init_slm(void)
1130 x86_pmu.lbr_tos = MSR_LBR_TOS;
1131 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1132 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
1134 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1135 x86_pmu.lbr_sel_map = nhm_lbr_sel_map;
1138 * SW branch filter usage:
1139 * - compensate for lack of HW filter
1141 pr_cont("8-deep LBR, ");
1144 /* Knights Landing */
1145 void intel_pmu_lbr_init_knl(void)
1148 x86_pmu.lbr_tos = MSR_LBR_TOS;
1149 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1150 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1152 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1153 x86_pmu.lbr_sel_map = snb_lbr_sel_map;