diff options
Diffstat (limited to 'arch/x86/include/asm/perf_event.h')
| -rw-r--r-- | arch/x86/include/asm/perf_event.h | 22 | 
1 files changed, 11 insertions, 11 deletions
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 588f52ea810..c78f14a0df0 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -5,11 +5,10 @@   * Performance event hw details:   */ -#define X86_PMC_MAX_GENERIC				       32 -#define X86_PMC_MAX_FIXED					3 +#define INTEL_PMC_MAX_GENERIC				       32 +#define INTEL_PMC_MAX_FIXED					3 +#define INTEL_PMC_IDX_FIXED				       32 -#define X86_PMC_IDX_GENERIC				        0 -#define X86_PMC_IDX_FIXED				       32  #define X86_PMC_IDX_MAX					       64  #define MSR_ARCH_PERFMON_PERFCTR0			      0xc1 @@ -48,8 +47,7 @@  	(X86_RAW_EVENT_MASK          |  \  	 AMD64_EVENTSEL_EVENT)  #define AMD64_NUM_COUNTERS				4 -#define AMD64_NUM_COUNTERS_F15H				6 -#define AMD64_NUM_COUNTERS_MAX				AMD64_NUM_COUNTERS_F15H +#define AMD64_NUM_COUNTERS_CORE				6  #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL		0x3c  #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK		(0x00 << 8) @@ -121,16 +119,16 @@ struct x86_pmu_capability {  /* Instr_Retired.Any: */  #define MSR_ARCH_PERFMON_FIXED_CTR0	0x309 -#define X86_PMC_IDX_FIXED_INSTRUCTIONS	(X86_PMC_IDX_FIXED + 0) +#define INTEL_PMC_IDX_FIXED_INSTRUCTIONS	(INTEL_PMC_IDX_FIXED + 0)  /* CPU_CLK_Unhalted.Core: */  #define MSR_ARCH_PERFMON_FIXED_CTR1	0x30a -#define X86_PMC_IDX_FIXED_CPU_CYCLES	(X86_PMC_IDX_FIXED + 1) +#define INTEL_PMC_IDX_FIXED_CPU_CYCLES	(INTEL_PMC_IDX_FIXED + 1)  /* CPU_CLK_Unhalted.Ref: */  #define MSR_ARCH_PERFMON_FIXED_CTR2	0x30b -#define X86_PMC_IDX_FIXED_REF_CYCLES	(X86_PMC_IDX_FIXED + 2) -#define X86_PMC_MSK_FIXED_REF_CYCLES	(1ULL << X86_PMC_IDX_FIXED_REF_CYCLES) +#define INTEL_PMC_IDX_FIXED_REF_CYCLES	(INTEL_PMC_IDX_FIXED + 2) +#define INTEL_PMC_MSK_FIXED_REF_CYCLES	(1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)  /*   * We model BTS tracing as another fixed-mode PMC. @@ -139,7 +137,7 @@ struct x86_pmu_capability {   * values are used by actual fixed events and higher values are used   * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.   */ -#define X86_PMC_IDX_FIXED_BTS				(X86_PMC_IDX_FIXED + 16) +#define INTEL_PMC_IDX_FIXED_BTS				(INTEL_PMC_IDX_FIXED + 16)  /*   * IBS cpuid feature detection @@ -234,6 +232,7 @@ struct perf_guest_switch_msr {  extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);  extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap); +extern void perf_check_microcode(void);  #else  static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr)  { @@ -247,6 +246,7 @@ static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)  }  static inline void perf_events_lapic_init(void)	{ } +static inline void perf_check_microcode(void) { }  #endif  #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)  |