diff options
Diffstat (limited to 'arch/powerpc/perf/core-book3s.c')
| -rw-r--r-- | arch/powerpc/perf/core-book3s.c | 46 | 
1 files changed, 39 insertions, 7 deletions
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 7cd2dbd6e4c..0db88f501f9 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -82,6 +82,11 @@ static inline int perf_intr_is_nmi(struct pt_regs *regs)  	return 0;  } +static inline int siar_valid(struct pt_regs *regs) +{ +	return 1; +} +  #endif /* CONFIG_PPC32 */  /* @@ -106,14 +111,20 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs)   * If we're not doing instruction sampling, give them the SDAR   * (sampled data address).  If we are doing instruction sampling, then   * only give them the SDAR if it corresponds to the instruction - * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC - * bit in MMCRA. + * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC or + * the [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA.   */  static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)  {  	unsigned long mmcra = regs->dsisr; -	unsigned long sdsync = (ppmu->flags & PPMU_ALT_SIPR) ? -		POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC; +	unsigned long sdsync; + +	if (ppmu->flags & PPMU_SIAR_VALID) +		sdsync = POWER7P_MMCRA_SDAR_VALID; +	else if (ppmu->flags & PPMU_ALT_SIPR) +		sdsync = POWER6_MMCRA_SDSYNC; +	else +		sdsync = MMCRA_SDSYNC;  	if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))  		*addrp = mfspr(SPRN_SDAR); @@ -230,6 +241,24 @@ static inline int perf_intr_is_nmi(struct pt_regs *regs)  	return !regs->softe;  } +/* + * On processors like P7+ that have the SIAR-Valid bit, marked instructions + * must be sampled only if the SIAR-valid bit is set. + * + * For unmarked instructions and for processors that don't have the SIAR-Valid + * bit, assume that SIAR is valid. + */ +static inline int siar_valid(struct pt_regs *regs) +{ +	unsigned long mmcra = regs->dsisr; +	int marked = mmcra & MMCRA_SAMPLE_ENABLE; + +	if ((ppmu->flags & PPMU_SIAR_VALID) && marked) +		return mmcra & POWER7P_MMCRA_SIAR_VALID; + +	return 1; +} +  #endif /* CONFIG_PPC64 */  static void perf_event_interrupt(struct pt_regs *regs); @@ -1291,6 +1320,7 @@ struct pmu power_pmu = {  	.event_idx	= power_pmu_event_idx,  }; +  /*   * A counter has overflowed; update its count and record   * things if requested.  Note that interrupts are hard-disabled @@ -1324,7 +1354,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,  			left += period;  			if (left <= 0)  				left = period; -			record = 1; +			record = siar_valid(regs);  			event->hw.last_period = event->hw.sample_period;  		}  		if (left < 0x80000000LL) @@ -1374,8 +1404,10 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs)  {  	unsigned long use_siar = regs->result; -	if (use_siar) +	if (use_siar && siar_valid(regs))  		return mfspr(SPRN_SIAR) + perf_ip_adjust(regs); +	else if (use_siar) +		return 0;		// no valid instruction pointer  	else  		return regs->nip;  } @@ -1396,7 +1428,7 @@ static bool pmc_overflow(unsigned long val)  	 * PMCs because a user might set a period of less than 256 and we  	 * don't want to mistakenly reset them.  	 */ -	if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) +	if (pvr_version_is(PVR_POWER7) && ((0x80000000 - val) <= 256))  		return true;  	return false;  |