diff options
Diffstat (limited to 'arch/i386/kernel/nmi.c')
| -rw-r--r-- | arch/i386/kernel/nmi.c | 26 | 
1 files changed, 26 insertions, 0 deletions
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index eaafe233a5d..171194ccb7b 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c @@ -22,6 +22,7 @@  #include <linux/percpu.h>  #include <linux/dmi.h>  #include <linux/kprobes.h> +#include <linux/cpumask.h>  #include <asm/smp.h>  #include <asm/nmi.h> @@ -42,6 +43,8 @@ int nmi_watchdog_enabled;  static DEFINE_PER_CPU(unsigned long, perfctr_nmi_owner);  static DEFINE_PER_CPU(unsigned long, evntsel_nmi_owner[3]); +static cpumask_t backtrace_mask = CPU_MASK_NONE; +  /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's   * offset from MSR_P4_BSU_ESCR0.  It will be the max for all platforms (for now)   */ @@ -907,6 +910,16 @@ __kprobes int nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)  		touched = 1;  	} +	if (cpu_isset(cpu, backtrace_mask)) { +		static DEFINE_SPINLOCK(lock);	/* Serialise the printks */ + +		spin_lock(&lock); +		printk("NMI backtrace for cpu %d\n", cpu); +		dump_stack(); +		spin_unlock(&lock); +		cpu_clear(cpu, backtrace_mask); +	} +  	sum = per_cpu(irq_stat, cpu).apic_timer_irqs;  	/* if the apic timer isn't firing, this cpu isn't doing much */ @@ -1033,6 +1046,19 @@ int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file,  #endif +void __trigger_all_cpu_backtrace(void) +{ +	int i; + +	backtrace_mask = cpu_online_map; +	/* Wait for up to 10 seconds for all CPUs to do the backtrace */ +	for (i = 0; i < 10 * 1000; i++) { +		if (cpus_empty(backtrace_mask)) +			break; +		mdelay(1); +	} +} +  EXPORT_SYMBOL(nmi_active);  EXPORT_SYMBOL(nmi_watchdog);  EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi);  |