LCOV - code coverage report
Current view: top level - arch/x86/include/asm - perf_event.h (source / functions) Hit Total Coverage
Test: landlock.info Lines: 0 4 0.0 %
Date: 2021-04-22 12:43:58 Functions: 0 0 -

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef _ASM_X86_PERF_EVENT_H
       3             : #define _ASM_X86_PERF_EVENT_H
       4             : 
       5             : /*
       6             :  * Performance event hw details:
       7             :  */
       8             : 
       9             : #define INTEL_PMC_MAX_GENERIC                                  32
      10             : #define INTEL_PMC_MAX_FIXED                                     4
      11             : #define INTEL_PMC_IDX_FIXED                                    32
      12             : 
      13             : #define X86_PMC_IDX_MAX                                        64
      14             : 
      15             : #define MSR_ARCH_PERFMON_PERFCTR0                             0xc1
      16             : #define MSR_ARCH_PERFMON_PERFCTR1                             0xc2
      17             : 
      18             : #define MSR_ARCH_PERFMON_EVENTSEL0                           0x186
      19             : #define MSR_ARCH_PERFMON_EVENTSEL1                           0x187
      20             : 
      21             : #define ARCH_PERFMON_EVENTSEL_EVENT                     0x000000FFULL
      22             : #define ARCH_PERFMON_EVENTSEL_UMASK                     0x0000FF00ULL
      23             : #define ARCH_PERFMON_EVENTSEL_USR                       (1ULL << 16)
      24             : #define ARCH_PERFMON_EVENTSEL_OS                        (1ULL << 17)
      25             : #define ARCH_PERFMON_EVENTSEL_EDGE                      (1ULL << 18)
      26             : #define ARCH_PERFMON_EVENTSEL_PIN_CONTROL               (1ULL << 19)
      27             : #define ARCH_PERFMON_EVENTSEL_INT                       (1ULL << 20)
      28             : #define ARCH_PERFMON_EVENTSEL_ANY                       (1ULL << 21)
      29             : #define ARCH_PERFMON_EVENTSEL_ENABLE                    (1ULL << 22)
      30             : #define ARCH_PERFMON_EVENTSEL_INV                       (1ULL << 23)
      31             : #define ARCH_PERFMON_EVENTSEL_CMASK                     0xFF000000ULL
      32             : 
      33             : #define HSW_IN_TX                                       (1ULL << 32)
      34             : #define HSW_IN_TX_CHECKPOINTED                          (1ULL << 33)
      35             : #define ICL_EVENTSEL_ADAPTIVE                           (1ULL << 34)
      36             : #define ICL_FIXED_0_ADAPTIVE                            (1ULL << 32)
      37             : 
      38             : #define AMD64_EVENTSEL_INT_CORE_ENABLE                  (1ULL << 36)
      39             : #define AMD64_EVENTSEL_GUESTONLY                        (1ULL << 40)
      40             : #define AMD64_EVENTSEL_HOSTONLY                         (1ULL << 41)
      41             : 
      42             : #define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT               37
      43             : #define AMD64_EVENTSEL_INT_CORE_SEL_MASK                \
      44             :         (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)
      45             : 
      46             : #define AMD64_EVENTSEL_EVENT    \
      47             :         (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
      48             : #define INTEL_ARCH_EVENT_MASK   \
      49             :         (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
      50             : 
      51             : #define AMD64_L3_SLICE_SHIFT                            48
      52             : #define AMD64_L3_SLICE_MASK                             \
      53             :         (0xFULL << AMD64_L3_SLICE_SHIFT)
      54             : #define AMD64_L3_SLICEID_MASK                           \
      55             :         (0x7ULL << AMD64_L3_SLICE_SHIFT)
      56             : 
      57             : #define AMD64_L3_THREAD_SHIFT                           56
      58             : #define AMD64_L3_THREAD_MASK                            \
      59             :         (0xFFULL << AMD64_L3_THREAD_SHIFT)
      60             : #define AMD64_L3_F19H_THREAD_MASK                       \
      61             :         (0x3ULL << AMD64_L3_THREAD_SHIFT)
      62             : 
      63             : #define AMD64_L3_EN_ALL_CORES                           BIT_ULL(47)
      64             : #define AMD64_L3_EN_ALL_SLICES                          BIT_ULL(46)
      65             : 
      66             : #define AMD64_L3_COREID_SHIFT                           42
      67             : #define AMD64_L3_COREID_MASK                            \
      68             :         (0x7ULL << AMD64_L3_COREID_SHIFT)
      69             : 
      70             : #define X86_RAW_EVENT_MASK              \
      71             :         (ARCH_PERFMON_EVENTSEL_EVENT |  \
      72             :          ARCH_PERFMON_EVENTSEL_UMASK |  \
      73             :          ARCH_PERFMON_EVENTSEL_EDGE  |  \
      74             :          ARCH_PERFMON_EVENTSEL_INV   |  \
      75             :          ARCH_PERFMON_EVENTSEL_CMASK)
      76             : #define X86_ALL_EVENT_FLAGS                     \
      77             :         (ARCH_PERFMON_EVENTSEL_EDGE |           \
      78             :          ARCH_PERFMON_EVENTSEL_INV |            \
      79             :          ARCH_PERFMON_EVENTSEL_CMASK |          \
      80             :          ARCH_PERFMON_EVENTSEL_ANY |            \
      81             :          ARCH_PERFMON_EVENTSEL_PIN_CONTROL |    \
      82             :          HSW_IN_TX |                            \
      83             :          HSW_IN_TX_CHECKPOINTED)
      84             : #define AMD64_RAW_EVENT_MASK            \
      85             :         (X86_RAW_EVENT_MASK          |  \
      86             :          AMD64_EVENTSEL_EVENT)
      87             : #define AMD64_RAW_EVENT_MASK_NB         \
      88             :         (AMD64_EVENTSEL_EVENT        |  \
      89             :          ARCH_PERFMON_EVENTSEL_UMASK)
      90             : #define AMD64_NUM_COUNTERS                              4
      91             : #define AMD64_NUM_COUNTERS_CORE                         6
      92             : #define AMD64_NUM_COUNTERS_NB                           4
      93             : 
      94             : #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL           0x3c
      95             : #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK         (0x00 << 8)
      96             : #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX         0
      97             : #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
      98             :                 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
      99             : 
     100             : #define ARCH_PERFMON_BRANCH_MISSES_RETIRED              6
     101             : #define ARCH_PERFMON_EVENTS_COUNT                       7
     102             : 
     103             : #define PEBS_DATACFG_MEMINFO    BIT_ULL(0)
     104             : #define PEBS_DATACFG_GP BIT_ULL(1)
     105             : #define PEBS_DATACFG_XMMS       BIT_ULL(2)
     106             : #define PEBS_DATACFG_LBRS       BIT_ULL(3)
     107             : #define PEBS_DATACFG_LBR_SHIFT  24
     108             : 
     109             : /*
     110             :  * Intel "Architectural Performance Monitoring" CPUID
     111             :  * detection/enumeration details:
     112             :  */
     113             : union cpuid10_eax {
     114             :         struct {
     115             :                 unsigned int version_id:8;
     116             :                 unsigned int num_counters:8;
     117             :                 unsigned int bit_width:8;
     118             :                 unsigned int mask_length:8;
     119             :         } split;
     120             :         unsigned int full;
     121             : };
     122             : 
     123             : union cpuid10_ebx {
     124             :         struct {
     125             :                 unsigned int no_unhalted_core_cycles:1;
     126             :                 unsigned int no_instructions_retired:1;
     127             :                 unsigned int no_unhalted_reference_cycles:1;
     128             :                 unsigned int no_llc_reference:1;
     129             :                 unsigned int no_llc_misses:1;
     130             :                 unsigned int no_branch_instruction_retired:1;
     131             :                 unsigned int no_branch_misses_retired:1;
     132             :         } split;
     133             :         unsigned int full;
     134             : };
     135             : 
     136             : union cpuid10_edx {
     137             :         struct {
     138             :                 unsigned int num_counters_fixed:5;
     139             :                 unsigned int bit_width_fixed:8;
     140             :                 unsigned int reserved1:2;
     141             :                 unsigned int anythread_deprecated:1;
     142             :                 unsigned int reserved2:16;
     143             :         } split;
     144             :         unsigned int full;
     145             : };
     146             : 
     147             : /*
     148             :  * Intel Architectural LBR CPUID detection/enumeration details:
     149             :  */
     150             : union cpuid28_eax {
     151             :         struct {
     152             :                 /* Supported LBR depth values */
     153             :                 unsigned int    lbr_depth_mask:8;
     154             :                 unsigned int    reserved:22;
     155             :                 /* Deep C-state Reset */
     156             :                 unsigned int    lbr_deep_c_reset:1;
     157             :                 /* IP values contain LIP */
     158             :                 unsigned int    lbr_lip:1;
     159             :         } split;
     160             :         unsigned int            full;
     161             : };
     162             : 
     163             : union cpuid28_ebx {
     164             :         struct {
     165             :                 /* CPL Filtering Supported */
     166             :                 unsigned int    lbr_cpl:1;
     167             :                 /* Branch Filtering Supported */
     168             :                 unsigned int    lbr_filter:1;
     169             :                 /* Call-stack Mode Supported */
     170             :                 unsigned int    lbr_call_stack:1;
     171             :         } split;
     172             :         unsigned int            full;
     173             : };
     174             : 
     175             : union cpuid28_ecx {
     176             :         struct {
     177             :                 /* Mispredict Bit Supported */
     178             :                 unsigned int    lbr_mispred:1;
     179             :                 /* Timed LBRs Supported */
     180             :                 unsigned int    lbr_timed_lbr:1;
     181             :                 /* Branch Type Field Supported */
     182             :                 unsigned int    lbr_br_type:1;
     183             :         } split;
     184             :         unsigned int            full;
     185             : };
     186             : 
     187             : struct x86_pmu_capability {
     188             :         int             version;
     189             :         int             num_counters_gp;
     190             :         int             num_counters_fixed;
     191             :         int             bit_width_gp;
     192             :         int             bit_width_fixed;
     193             :         unsigned int    events_mask;
     194             :         int             events_mask_len;
     195             : };
     196             : 
     197             : /*
     198             :  * Fixed-purpose performance events:
     199             :  */
     200             : 
     201             : /* RDPMC offset for Fixed PMCs */
     202             : #define INTEL_PMC_FIXED_RDPMC_BASE              (1 << 30)
     203             : #define INTEL_PMC_FIXED_RDPMC_METRICS           (1 << 29)
     204             : 
     205             : /*
     206             :  * All the fixed-mode PMCs are configured via this single MSR:
     207             :  */
     208             : #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
     209             : 
     210             : /*
     211             :  * There is no event-code assigned to the fixed-mode PMCs.
     212             :  *
     213             :  * For a fixed-mode PMC, which has an equivalent event on a general-purpose
     214             :  * PMC, the event-code of the equivalent event is used for the fixed-mode PMC,
     215             :  * e.g., Instr_Retired.Any and CPU_CLK_Unhalted.Core.
     216             :  *
     217             :  * For a fixed-mode PMC, which doesn't have an equivalent event, a
     218             :  * pseudo-encoding is used, e.g., CPU_CLK_Unhalted.Ref and TOPDOWN.SLOTS.
     219             :  * The pseudo event-code for a fixed-mode PMC must be 0x00.
     220             :  * The pseudo umask-code is 0xX. The X equals the index of the fixed
     221             :  * counter + 1, e.g., the fixed counter 2 has the pseudo-encoding 0x0300.
     222             :  *
     223             :  * The counts are available in separate MSRs:
     224             :  */
     225             : 
     226             : /* Instr_Retired.Any: */
     227             : #define MSR_ARCH_PERFMON_FIXED_CTR0     0x309
     228             : #define INTEL_PMC_IDX_FIXED_INSTRUCTIONS        (INTEL_PMC_IDX_FIXED + 0)
     229             : 
     230             : /* CPU_CLK_Unhalted.Core: */
     231             : #define MSR_ARCH_PERFMON_FIXED_CTR1     0x30a
     232             : #define INTEL_PMC_IDX_FIXED_CPU_CYCLES  (INTEL_PMC_IDX_FIXED + 1)
     233             : 
     234             : /* CPU_CLK_Unhalted.Ref: event=0x00,umask=0x3 (pseudo-encoding) */
     235             : #define MSR_ARCH_PERFMON_FIXED_CTR2     0x30b
     236             : #define INTEL_PMC_IDX_FIXED_REF_CYCLES  (INTEL_PMC_IDX_FIXED + 2)
     237             : #define INTEL_PMC_MSK_FIXED_REF_CYCLES  (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
     238             : 
     239             : /* TOPDOWN.SLOTS: event=0x00,umask=0x4 (pseudo-encoding) */
     240             : #define MSR_ARCH_PERFMON_FIXED_CTR3     0x30c
     241             : #define INTEL_PMC_IDX_FIXED_SLOTS       (INTEL_PMC_IDX_FIXED + 3)
     242             : #define INTEL_PMC_MSK_FIXED_SLOTS       (1ULL << INTEL_PMC_IDX_FIXED_SLOTS)
     243             : 
     244             : /*
     245             :  * We model BTS tracing as another fixed-mode PMC.
     246             :  *
     247             :  * We choose the value 47 for the fixed index of BTS, since lower
     248             :  * values are used by actual fixed events and higher values are used
     249             :  * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
     250             :  */
     251             : #define INTEL_PMC_IDX_FIXED_BTS                 (INTEL_PMC_IDX_FIXED + 15)
     252             : 
     253             : /*
     254             :  * The PERF_METRICS MSR is modeled as several magic fixed-mode PMCs, one for
     255             :  * each TopDown metric event.
     256             :  *
     257             :  * Internally the TopDown metric events are mapped to the FxCtr 3 (SLOTS).
     258             :  */
     259             : #define INTEL_PMC_IDX_METRIC_BASE               (INTEL_PMC_IDX_FIXED + 16)
     260             : #define INTEL_PMC_IDX_TD_RETIRING               (INTEL_PMC_IDX_METRIC_BASE + 0)
     261             : #define INTEL_PMC_IDX_TD_BAD_SPEC               (INTEL_PMC_IDX_METRIC_BASE + 1)
     262             : #define INTEL_PMC_IDX_TD_FE_BOUND               (INTEL_PMC_IDX_METRIC_BASE + 2)
     263             : #define INTEL_PMC_IDX_TD_BE_BOUND               (INTEL_PMC_IDX_METRIC_BASE + 3)
     264             : #define INTEL_PMC_IDX_TD_HEAVY_OPS              (INTEL_PMC_IDX_METRIC_BASE + 4)
     265             : #define INTEL_PMC_IDX_TD_BR_MISPREDICT          (INTEL_PMC_IDX_METRIC_BASE + 5)
     266             : #define INTEL_PMC_IDX_TD_FETCH_LAT              (INTEL_PMC_IDX_METRIC_BASE + 6)
     267             : #define INTEL_PMC_IDX_TD_MEM_BOUND              (INTEL_PMC_IDX_METRIC_BASE + 7)
     268             : #define INTEL_PMC_IDX_METRIC_END                INTEL_PMC_IDX_TD_MEM_BOUND
     269             : #define INTEL_PMC_MSK_TOPDOWN                   ((0xffull << INTEL_PMC_IDX_METRIC_BASE) | \
     270             :                                                 INTEL_PMC_MSK_FIXED_SLOTS)
     271             : 
     272             : /*
     273             :  * There is no event-code assigned to the TopDown events.
     274             :  *
     275             :  * For the slots event, use the pseudo code of the fixed counter 3.
     276             :  *
     277             :  * For the metric events, the pseudo event-code is 0x00.
     278             :  * The pseudo umask-code starts from the middle of the pseudo event
     279             :  * space, 0x80.
     280             :  */
     281             : #define INTEL_TD_SLOTS                          0x0400  /* TOPDOWN.SLOTS */
     282             : /* Level 1 metrics */
     283             : #define INTEL_TD_METRIC_RETIRING                0x8000  /* Retiring metric */
     284             : #define INTEL_TD_METRIC_BAD_SPEC                0x8100  /* Bad speculation metric */
     285             : #define INTEL_TD_METRIC_FE_BOUND                0x8200  /* FE bound metric */
     286             : #define INTEL_TD_METRIC_BE_BOUND                0x8300  /* BE bound metric */
     287             : /* Level 2 metrics */
     288             : #define INTEL_TD_METRIC_HEAVY_OPS               0x8400  /* Heavy Operations metric */
     289             : #define INTEL_TD_METRIC_BR_MISPREDICT           0x8500  /* Branch Mispredict metric */
     290             : #define INTEL_TD_METRIC_FETCH_LAT               0x8600  /* Fetch Latency metric */
     291             : #define INTEL_TD_METRIC_MEM_BOUND               0x8700  /* Memory bound metric */
     292             : 
     293             : #define INTEL_TD_METRIC_MAX                     INTEL_TD_METRIC_MEM_BOUND
     294             : #define INTEL_TD_METRIC_NUM                     8
     295             : 
     296           0 : static inline bool is_metric_idx(int idx)
     297             : {
     298           0 :         return (unsigned)(idx - INTEL_PMC_IDX_METRIC_BASE) < INTEL_TD_METRIC_NUM;
     299             : }
     300             : 
     301           0 : static inline bool is_topdown_idx(int idx)
     302             : {
     303           0 :         return is_metric_idx(idx) || idx == INTEL_PMC_IDX_FIXED_SLOTS;
     304             : }
     305             : 
     306             : #define INTEL_PMC_OTHER_TOPDOWN_BITS(bit)       \
     307             :                         (~(0x1ull << bit) & INTEL_PMC_MSK_TOPDOWN)
     308             : 
     309             : #define GLOBAL_STATUS_COND_CHG                  BIT_ULL(63)
     310             : #define GLOBAL_STATUS_BUFFER_OVF_BIT            62
     311             : #define GLOBAL_STATUS_BUFFER_OVF                BIT_ULL(GLOBAL_STATUS_BUFFER_OVF_BIT)
     312             : #define GLOBAL_STATUS_UNC_OVF                   BIT_ULL(61)
     313             : #define GLOBAL_STATUS_ASIF                      BIT_ULL(60)
     314             : #define GLOBAL_STATUS_COUNTERS_FROZEN           BIT_ULL(59)
     315             : #define GLOBAL_STATUS_LBRS_FROZEN_BIT           58
     316             : #define GLOBAL_STATUS_LBRS_FROZEN               BIT_ULL(GLOBAL_STATUS_LBRS_FROZEN_BIT)
     317             : #define GLOBAL_STATUS_TRACE_TOPAPMI_BIT         55
     318             : #define GLOBAL_STATUS_TRACE_TOPAPMI             BIT_ULL(GLOBAL_STATUS_TRACE_TOPAPMI_BIT)
     319             : #define GLOBAL_STATUS_PERF_METRICS_OVF_BIT      48
     320             : 
     321             : #define GLOBAL_CTRL_EN_PERF_METRICS             48
     322             : /*
     323             :  * We model guest LBR event tracing as another fixed-mode PMC like BTS.
     324             :  *
     325             :  * We choose bit 58 because it's used to indicate LBR stack frozen state
     326             :  * for architectural perfmon v4, also we unconditionally mask that bit in
     327             :  * the handle_pmi_common(), so it'll never be set in the overflow handling.
     328             :  *
     329             :  * With this fake counter assigned, the guest LBR event user (such as KVM),
     330             :  * can program the LBR registers on its own, and we don't actually do anything
     331             :  * with then in the host context.
     332             :  */
     333             : #define INTEL_PMC_IDX_FIXED_VLBR        (GLOBAL_STATUS_LBRS_FROZEN_BIT)
     334             : 
     335             : /*
     336             :  * Pseudo-encoding the guest LBR event as event=0x00,umask=0x1b,
     337             :  * since it would claim bit 58 which is effectively Fixed26.
     338             :  */
     339             : #define INTEL_FIXED_VLBR_EVENT  0x1b00
     340             : 
     341             : /*
     342             :  * Adaptive PEBS v4
     343             :  */
     344             : 
     345             : struct pebs_basic {
     346             :         u64 format_size;
     347             :         u64 ip;
     348             :         u64 applicable_counters;
     349             :         u64 tsc;
     350             : };
     351             : 
     352             : struct pebs_meminfo {
     353             :         u64 address;
     354             :         u64 aux;
     355             :         u64 latency;
     356             :         u64 tsx_tuning;
     357             : };
     358             : 
     359             : struct pebs_gprs {
     360             :         u64 flags, ip, ax, cx, dx, bx, sp, bp, si, di;
     361             :         u64 r8, r9, r10, r11, r12, r13, r14, r15;
     362             : };
     363             : 
     364             : struct pebs_xmm {
     365             :         u64 xmm[16*2];  /* two entries for each register */
     366             : };
     367             : 
     368             : /*
     369             :  * IBS cpuid feature detection
     370             :  */
     371             : 
     372             : #define IBS_CPUID_FEATURES              0x8000001b
     373             : 
     374             : /*
     375             :  * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
     376             :  * bit 0 is used to indicate the existence of IBS.
     377             :  */
     378             : #define IBS_CAPS_AVAIL                  (1U<<0)
     379             : #define IBS_CAPS_FETCHSAM               (1U<<1)
     380             : #define IBS_CAPS_OPSAM                  (1U<<2)
     381             : #define IBS_CAPS_RDWROPCNT              (1U<<3)
     382             : #define IBS_CAPS_OPCNT                  (1U<<4)
     383             : #define IBS_CAPS_BRNTRGT                (1U<<5)
     384             : #define IBS_CAPS_OPCNTEXT               (1U<<6)
     385             : #define IBS_CAPS_RIPINVALIDCHK          (1U<<7)
     386             : #define IBS_CAPS_OPBRNFUSE              (1U<<8)
     387             : #define IBS_CAPS_FETCHCTLEXTD           (1U<<9)
     388             : #define IBS_CAPS_OPDATA4                (1U<<10)
     389             : 
     390             : #define IBS_CAPS_DEFAULT                (IBS_CAPS_AVAIL         \
     391             :                                          | IBS_CAPS_FETCHSAM    \
     392             :                                          | IBS_CAPS_OPSAM)
     393             : 
     394             : /*
     395             :  * IBS APIC setup
     396             :  */
     397             : #define IBSCTL                          0x1cc
     398             : #define IBSCTL_LVT_OFFSET_VALID         (1ULL<<8)
     399             : #define IBSCTL_LVT_OFFSET_MASK          0x0F
     400             : 
     401             : /* IBS fetch bits/masks */
     402             : #define IBS_FETCH_RAND_EN       (1ULL<<57)
     403             : #define IBS_FETCH_VAL           (1ULL<<49)
     404             : #define IBS_FETCH_ENABLE        (1ULL<<48)
     405             : #define IBS_FETCH_CNT           0xFFFF0000ULL
     406             : #define IBS_FETCH_MAX_CNT       0x0000FFFFULL
     407             : 
     408             : /*
     409             :  * IBS op bits/masks
     410             :  * The lower 7 bits of the current count are random bits
     411             :  * preloaded by hardware and ignored in software
     412             :  */
     413             : #define IBS_OP_CUR_CNT          (0xFFF80ULL<<32)
     414             : #define IBS_OP_CUR_CNT_RAND     (0x0007FULL<<32)
     415             : #define IBS_OP_CNT_CTL          (1ULL<<19)
     416             : #define IBS_OP_VAL              (1ULL<<18)
     417             : #define IBS_OP_ENABLE           (1ULL<<17)
     418             : #define IBS_OP_MAX_CNT          0x0000FFFFULL
     419             : #define IBS_OP_MAX_CNT_EXT      0x007FFFFFULL   /* not a register bit mask */
     420             : #define IBS_OP_MAX_CNT_EXT_MASK (0x7FULL<<20)     /* separate upper 7 bits */
     421             : #define IBS_RIP_INVALID         (1ULL<<38)
     422             : 
     423             : #ifdef CONFIG_X86_LOCAL_APIC
     424             : extern u32 get_ibs_caps(void);
     425             : #else
     426             : static inline u32 get_ibs_caps(void) { return 0; }
     427             : #endif
     428             : 
     429             : #ifdef CONFIG_PERF_EVENTS
     430             : extern void perf_events_lapic_init(void);
     431             : 
     432             : /*
     433             :  * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
     434             :  * unused and ABI specified to be 0, so nobody should care what we do with
     435             :  * them.
     436             :  *
     437             :  * EXACT - the IP points to the exact instruction that triggered the
     438             :  *         event (HW bugs exempt).
     439             :  * VM    - original X86_VM_MASK; see set_linear_ip().
     440             :  */
     441             : #define PERF_EFLAGS_EXACT       (1UL << 3)
     442             : #define PERF_EFLAGS_VM          (1UL << 5)
     443             : 
     444             : struct pt_regs;
     445             : struct x86_perf_regs {
     446             :         struct pt_regs  regs;
     447             :         u64             *xmm_regs;
     448             : };
     449             : 
     450             : extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
     451             : extern unsigned long perf_misc_flags(struct pt_regs *regs);
     452             : #define perf_misc_flags(regs)   perf_misc_flags(regs)
     453             : 
     454             : #include <asm/stacktrace.h>
     455             : 
     456             : /*
     457             :  * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
     458             :  * and the comment with PERF_EFLAGS_EXACT.
     459             :  */
     460             : #define perf_arch_fetch_caller_regs(regs, __ip)         {       \
     461             :         (regs)->ip = (__ip);                                 \
     462             :         (regs)->sp = (unsigned long)__builtin_frame_address(0);      \
     463             :         (regs)->cs = __KERNEL_CS;                            \
     464             :         regs->flags = 0;                                     \
     465             : }
     466             : 
     467             : struct perf_guest_switch_msr {
     468             :         unsigned msr;
     469             :         u64 host, guest;
     470             : };
     471             : 
     472             : struct x86_pmu_lbr {
     473             :         unsigned int    nr;
     474             :         unsigned int    from;
     475             :         unsigned int    to;
     476             :         unsigned int    info;
     477             : };
     478             : 
     479             : extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
     480             : extern void perf_check_microcode(void);
     481             : extern int x86_perf_rdpmc_index(struct perf_event *event);
     482             : #else
     483             : static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
     484             : {
     485             :         memset(cap, 0, sizeof(*cap));
     486             : }
     487             : 
     488             : static inline void perf_events_lapic_init(void) { }
     489             : static inline void perf_check_microcode(void) { }
     490             : #endif
     491             : 
     492             : #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
     493             : extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
     494             : extern int x86_perf_get_lbr(struct x86_pmu_lbr *lbr);
     495             : #else
     496             : struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
     497             : static inline int x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
     498             : {
     499             :         return -1;
     500             : }
     501             : #endif
     502             : 
     503             : #ifdef CONFIG_CPU_SUP_INTEL
     504             :  extern void intel_pt_handle_vmx(int on);
     505             : #else
     506             : static inline void intel_pt_handle_vmx(int on)
     507             : {
     508             : 
     509             : }
     510             : #endif
     511             : 
     512             : #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
     513             :  extern void amd_pmu_enable_virt(void);
     514             :  extern void amd_pmu_disable_virt(void);
     515             : #else
     516             :  static inline void amd_pmu_enable_virt(void) { }
     517             :  static inline void amd_pmu_disable_virt(void) { }
     518             : #endif
     519             : 
     520             : #define arch_perf_out_copy_user copy_from_user_nmi
     521             : 
     522             : #endif /* _ASM_X86_PERF_EVENT_H */

Generated by: LCOV version 1.14