diff options
| author | Chuansheng Liu <chuansheng.liu@intel.com> | 2012-12-17 15:59:50 -0800 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-17 17:15:13 -0800 | 
| commit | 0f34c400914f165b7b3812459be2d77b8aa1f1e4 (patch) | |
| tree | b45e4acaadd16362d08d107f24ac63b6e61e142c | |
| parent | 2bf0a8f67fae9906c6497886203f6e5cb7168df6 (diff) | |
| download | olio-linux-3.10-0f34c400914f165b7b3812459be2d77b8aa1f1e4.tar.xz olio-linux-3.10-0f34c400914f165b7b3812459be2d77b8aa1f1e4.zip | |
watchdog: store the watchdog sample period as a variable
Currently getting the sample period is always thru a complex
calculation: get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5).
We can store the sample period as a variable, and set it as __read_mostly
type.
Signed-off-by: liu chuansheng <chuansheng.liu@intel.com>
Cc: Don Zickus <dzickus@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| -rw-r--r-- | kernel/watchdog.c | 13 | 
1 files changed, 8 insertions, 5 deletions
| diff --git a/kernel/watchdog.c b/kernel/watchdog.c index c8c21be11ab..997c6a16ec2 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -31,6 +31,7 @@  int watchdog_enabled = 1;  int __read_mostly watchdog_thresh = 10;  static int __read_mostly watchdog_disabled; +static u64 __read_mostly sample_period;  static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);  static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); @@ -116,7 +117,7 @@ static unsigned long get_timestamp(int this_cpu)  	return cpu_clock(this_cpu) >> 30LL;  /* 2^30 ~= 10^9 */  } -static u64 get_sample_period(void) +static void set_sample_period(void)  {  	/*  	 * convert watchdog_thresh from seconds to ns @@ -125,7 +126,7 @@ static u64 get_sample_period(void)  	 * and hard thresholds) to increment before the  	 * hardlockup detector generates a warning  	 */ -	return get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5); +	sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);  }  /* Commands for resetting the watchdog */ @@ -275,7 +276,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)  	wake_up_process(__this_cpu_read(softlockup_watchdog));  	/* .. and repeat */ -	hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period())); +	hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));  	if (touch_ts == 0) {  		if (unlikely(__this_cpu_read(softlockup_touch_sync))) { @@ -356,7 +357,7 @@ static void watchdog_enable(unsigned int cpu)  	hrtimer->function = watchdog_timer_fn;  	/* done here because hrtimer_start can only pin to smp_processor_id() */ -	hrtimer_start(hrtimer, ns_to_ktime(get_sample_period()), +	hrtimer_start(hrtimer, ns_to_ktime(sample_period),  		      HRTIMER_MODE_REL_PINNED);  	/* initialize timestamp */ @@ -386,7 +387,7 @@ static int watchdog_should_run(unsigned int cpu)  /*   * The watchdog thread function - touches the timestamp.   * - * It only runs once every get_sample_period() seconds (4 seconds by + * It only runs once every sample_period seconds (4 seconds by   * default) to reset the softlockup timestamp. If this gets delayed   * for more than 2*watchdog_thresh seconds then the debug-printout   * triggers in watchdog_timer_fn(). @@ -519,6 +520,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,  	if (ret || !write)  		return ret; +	set_sample_period();  	if (watchdog_enabled && watchdog_thresh)  		watchdog_enable_all_cpus();  	else @@ -540,6 +542,7 @@ static struct smp_hotplug_thread watchdog_threads = {  void __init lockup_detector_init(void)  { +	set_sample_period();  	if (smpboot_register_percpu_thread(&watchdog_threads)) {  		pr_err("Failed to create watchdog threads, disabled\n");  		watchdog_disabled = -ENODEV; |