diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/fork.c | 2 | ||||
| -rw-r--r-- | kernel/futex.c | 11 | ||||
| -rw-r--r-- | kernel/hrtimer.c | 212 | ||||
| -rw-r--r-- | kernel/posix-timers.c | 10 | ||||
| -rw-r--r-- | kernel/rtmutex.c | 3 | ||||
| -rw-r--r-- | kernel/sched.c | 7 | ||||
| -rw-r--r-- | kernel/sys.c | 10 | ||||
| -rw-r--r-- | kernel/time.c | 18 | ||||
| -rw-r--r-- | kernel/time/ntp.c | 3 | ||||
| -rw-r--r-- | kernel/time/tick-sched.c | 21 | ||||
| -rw-r--r-- | kernel/time/timer_list.c | 8 | 
11 files changed, 252 insertions, 53 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index 30de644a40c..37b3e150ae3 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -989,6 +989,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,  	p->prev_utime = cputime_zero;  	p->prev_stime = cputime_zero; +	p->default_timer_slack_ns = current->timer_slack_ns; +  #ifdef CONFIG_DETECT_SOFTLOCKUP  	p->last_switch_count = 0;  	p->last_switch_timestamp = 0; diff --git a/kernel/futex.c b/kernel/futex.c index 7d1136e97c1..8af10027514 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -1296,13 +1296,16 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,  		if (!abs_time)  			schedule();  		else { +			unsigned long slack; +			slack = current->timer_slack_ns; +			if (rt_task(current)) +				slack = 0;  			hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC,  						HRTIMER_MODE_ABS);  			hrtimer_init_sleeper(&t, current); -			t.timer.expires = *abs_time; +			hrtimer_set_expires_range_ns(&t.timer, *abs_time, slack); -			hrtimer_start(&t.timer, t.timer.expires, -						HRTIMER_MODE_ABS); +			hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS);  			if (!hrtimer_active(&t.timer))  				t.task = NULL; @@ -1404,7 +1407,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,  		hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,  				      HRTIMER_MODE_ABS);  		hrtimer_init_sleeper(to, current); -		to->timer.expires = *time; +		hrtimer_set_expires(&to->timer, *time);  	}  	q.pi_state = NULL; diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index cdec83e722f..51ee90bca2d 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -517,7 +517,7 @@ static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base)  		if (!base->first)  			continue;  		timer = rb_entry(base->first, struct hrtimer, node); -		expires = ktime_sub(timer->expires, base->offset); +		expires = ktime_sub(hrtimer_get_expires(timer), base->offset);  		if (expires.tv64 < cpu_base->expires_next.tv64)  			cpu_base->expires_next = expires;  	} @@ -539,10 +539,10 @@ static int hrtimer_reprogram(struct hrtimer *timer,  			     struct hrtimer_clock_base *base)  {  	ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next; -	ktime_t expires = ktime_sub(timer->expires, base->offset); +	ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);  	int res; -	WARN_ON_ONCE(timer->expires.tv64 < 0); +	WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);  	/*  	 * When the callback is running, we do not reprogram the clock event @@ -795,7 +795,7 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)  	u64 orun = 1;  	ktime_t delta; -	delta = ktime_sub(now, timer->expires); +	delta = ktime_sub(now, hrtimer_get_expires(timer));  	if (delta.tv64 < 0)  		return 0; @@ -807,8 +807,8 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)  		s64 incr = ktime_to_ns(interval);  		orun = ktime_divns(delta, incr); -		timer->expires = ktime_add_ns(timer->expires, incr * orun); -		if (timer->expires.tv64 > now.tv64) +		hrtimer_add_expires_ns(timer, incr * orun); +		if (hrtimer_get_expires_tv64(timer) > now.tv64)  			return orun;  		/*  		 * This (and the ktime_add() below) is the @@ -816,7 +816,7 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)  		 */  		orun++;  	} -	timer->expires = ktime_add_safe(timer->expires, interval); +	hrtimer_add_expires(timer, interval);  	return orun;  } @@ -848,7 +848,8 @@ static void enqueue_hrtimer(struct hrtimer *timer,  		 * We dont care about collisions. Nodes with  		 * the same expiry time stay together.  		 */ -		if (timer->expires.tv64 < entry->expires.tv64) { +		if (hrtimer_get_expires_tv64(timer) < +				hrtimer_get_expires_tv64(entry)) {  			link = &(*link)->rb_left;  		} else {  			link = &(*link)->rb_right; @@ -945,9 +946,10 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)  }  /** - * hrtimer_start - (re)start an relative timer on the current CPU + * hrtimer_start_range_ns - (re)start an relative timer on the current CPU   * @timer:	the timer to be added   * @tim:	expiry time + * @delta_ns:	"slack" range for the timer   * @mode:	expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)   *   * Returns: @@ -955,7 +957,8 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)   *  1 when the timer was active   */  int -hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) +hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_ns, +			const enum hrtimer_mode mode)  {  	struct hrtimer_clock_base *base, *new_base;  	unsigned long flags; @@ -983,7 +986,7 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)  #endif  	} -	timer->expires = tim; +	hrtimer_set_expires_range_ns(timer, tim, delta_ns);  	timer_stats_hrtimer_set_start_info(timer); @@ -1016,8 +1019,26 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)  	return ret;  } +EXPORT_SYMBOL_GPL(hrtimer_start_range_ns); + +/** + * hrtimer_start - (re)start an relative timer on the current CPU + * @timer:	the timer to be added + * @tim:	expiry time + * @mode:	expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) + * + * Returns: + *  0 on success + *  1 when the timer was active + */ +int +hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) +{ +	return hrtimer_start_range_ns(timer, tim, 0, mode); +}  EXPORT_SYMBOL_GPL(hrtimer_start); +  /**   * hrtimer_try_to_cancel - try to deactivate a timer   * @timer:	hrtimer to stop @@ -1077,7 +1098,7 @@ ktime_t hrtimer_get_remaining(const struct hrtimer *timer)  	ktime_t rem;  	base = lock_hrtimer_base(timer, &flags); -	rem = ktime_sub(timer->expires, base->get_time()); +	rem = hrtimer_expires_remaining(timer);  	unlock_hrtimer_base(timer, &flags);  	return rem; @@ -1109,7 +1130,7 @@ ktime_t hrtimer_get_next_event(void)  				continue;  			timer = rb_entry(base->first, struct hrtimer, node); -			delta.tv64 = timer->expires.tv64; +			delta.tv64 = hrtimer_get_expires_tv64(timer);  			delta = ktime_sub(delta, base->get_time());  			if (delta.tv64 < mindelta.tv64)  				mindelta.tv64 = delta.tv64; @@ -1310,10 +1331,23 @@ void hrtimer_interrupt(struct clock_event_device *dev)  			timer = rb_entry(node, struct hrtimer, node); -			if (basenow.tv64 < timer->expires.tv64) { +			/* +			 * The immediate goal for using the softexpires is +			 * minimizing wakeups, not running timers at the +			 * earliest interrupt after their soft expiration. +			 * This allows us to avoid using a Priority Search +			 * Tree, which can answer a stabbing querry for +			 * overlapping intervals and instead use the simple +			 * BST we already have. +			 * We don't add extra wakeups by delaying timers that +			 * are right-of a not yet expired timer, because that +			 * timer will have to trigger a wakeup anyway. +			 */ + +			if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) {  				ktime_t expires; -				expires = ktime_sub(timer->expires, +				expires = ktime_sub(hrtimer_get_expires(timer),  						    base->offset);  				if (expires.tv64 < expires_next.tv64)  					expires_next = expires; @@ -1349,6 +1383,36 @@ void hrtimer_interrupt(struct clock_event_device *dev)  		raise_softirq(HRTIMER_SOFTIRQ);  } +/** + * hrtimer_peek_ahead_timers -- run soft-expired timers now + * + * hrtimer_peek_ahead_timers will peek at the timer queue of + * the current cpu and check if there are any timers for which + * the soft expires time has passed. If any such timers exist, + * they are run immediately and then removed from the timer queue. + * + */ +void hrtimer_peek_ahead_timers(void) +{ +	unsigned long flags; +	struct tick_device *td; +	struct clock_event_device *dev; + +	if (!hrtimer_hres_active()) +		return; + +	local_irq_save(flags); +	td = &__get_cpu_var(tick_cpu_device); +	if (!td) +		goto out; +	dev = td->evtdev; +	if (!dev) +		goto out; +	hrtimer_interrupt(dev); +out: +	local_irq_restore(flags); +} +  static void run_hrtimer_softirq(struct softirq_action *h)  {  	run_hrtimer_pending(&__get_cpu_var(hrtimer_bases)); @@ -1416,7 +1480,8 @@ void hrtimer_run_queues(void)  			struct hrtimer *timer;  			timer = rb_entry(node, struct hrtimer, node); -			if (base->softirq_time.tv64 <= timer->expires.tv64) +			if (base->softirq_time.tv64 <= +					hrtimer_get_expires_tv64(timer))  				break;  			if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) { @@ -1464,7 +1529,7 @@ static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mod  	do {  		set_current_state(TASK_INTERRUPTIBLE); -		hrtimer_start(&t->timer, t->timer.expires, mode); +		hrtimer_start_expires(&t->timer, mode);  		if (!hrtimer_active(&t->timer))  			t->task = NULL; @@ -1486,7 +1551,7 @@ static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp)  	struct timespec rmt;  	ktime_t rem; -	rem = ktime_sub(timer->expires, timer->base->get_time()); +	rem = hrtimer_expires_remaining(timer);  	if (rem.tv64 <= 0)  		return 0;  	rmt = ktime_to_timespec(rem); @@ -1505,7 +1570,7 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)  	hrtimer_init_on_stack(&t.timer, restart->nanosleep.index,  				HRTIMER_MODE_ABS); -	t.timer.expires.tv64 = restart->nanosleep.expires; +	hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);  	if (do_nanosleep(&t, HRTIMER_MODE_ABS))  		goto out; @@ -1530,9 +1595,14 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,  	struct restart_block *restart;  	struct hrtimer_sleeper t;  	int ret = 0; +	unsigned long slack; + +	slack = current->timer_slack_ns; +	if (rt_task(current)) +		slack = 0;  	hrtimer_init_on_stack(&t.timer, clockid, mode); -	t.timer.expires = timespec_to_ktime(*rqtp); +	hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);  	if (do_nanosleep(&t, mode))  		goto out; @@ -1552,7 +1622,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,  	restart->fn = hrtimer_nanosleep_restart;  	restart->nanosleep.index = t.timer.base->index;  	restart->nanosleep.rmtp = rmtp; -	restart->nanosleep.expires = t.timer.expires.tv64; +	restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);  	ret = -ERESTART_RESTARTBLOCK;  out: @@ -1753,3 +1823,103 @@ void __init hrtimers_init(void)  #endif  } +/** + * schedule_hrtimeout_range - sleep until timeout + * @expires:	timeout value (ktime_t) + * @delta:	slack in expires timeout (ktime_t) + * @mode:	timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL + * + * Make the current task sleep until the given expiry time has + * elapsed. The routine will return immediately unless + * the current task state has been set (see set_current_state()). + * + * The @delta argument gives the kernel the freedom to schedule the + * actual wakeup to a time that is both power and performance friendly. + * The kernel give the normal best effort behavior for "@expires+@delta", + * but may decide to fire the timer earlier, but no earlier than @expires. + * + * You can set the task state as follows - + * + * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to + * pass before the routine returns. + * + * %TASK_INTERRUPTIBLE - the routine may return early if a signal is + * delivered to the current task. + * + * The current task state is guaranteed to be TASK_RUNNING when this + * routine returns. + * + * Returns 0 when the timer has expired otherwise -EINTR + */ +int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta, +			       const enum hrtimer_mode mode) +{ +	struct hrtimer_sleeper t; + +	/* +	 * Optimize when a zero timeout value is given. It does not +	 * matter whether this is an absolute or a relative time. +	 */ +	if (expires && !expires->tv64) { +		__set_current_state(TASK_RUNNING); +		return 0; +	} + +	/* +	 * A NULL parameter means "inifinte" +	 */ +	if (!expires) { +		schedule(); +		__set_current_state(TASK_RUNNING); +		return -EINTR; +	} + +	hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, mode); +	hrtimer_set_expires_range_ns(&t.timer, *expires, delta); + +	hrtimer_init_sleeper(&t, current); + +	hrtimer_start_expires(&t.timer, mode); +	if (!hrtimer_active(&t.timer)) +		t.task = NULL; + +	if (likely(t.task)) +		schedule(); + +	hrtimer_cancel(&t.timer); +	destroy_hrtimer_on_stack(&t.timer); + +	__set_current_state(TASK_RUNNING); + +	return !t.task ? 0 : -EINTR; +} +EXPORT_SYMBOL_GPL(schedule_hrtimeout_range); + +/** + * schedule_hrtimeout - sleep until timeout + * @expires:	timeout value (ktime_t) + * @mode:	timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL + * + * Make the current task sleep until the given expiry time has + * elapsed. The routine will return immediately unless + * the current task state has been set (see set_current_state()). + * + * You can set the task state as follows - + * + * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to + * pass before the routine returns. + * + * %TASK_INTERRUPTIBLE - the routine may return early if a signal is + * delivered to the current task. + * + * The current task state is guaranteed to be TASK_RUNNING when this + * routine returns. + * + * Returns 0 when the timer has expired otherwise -EINTR + */ +int __sched schedule_hrtimeout(ktime_t *expires, +			       const enum hrtimer_mode mode) +{ +	return schedule_hrtimeout_range(expires, 0, mode); +} +EXPORT_SYMBOL_GPL(schedule_hrtimeout); diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index 5131e547116..ee204586149 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -668,7 +668,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)  	    (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))  		timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv); -	remaining = ktime_sub(timer->expires, now); +	remaining = ktime_sub(hrtimer_get_expires(timer), now);  	/* Return 0 only, when the timer is expired and not pending */  	if (remaining.tv64 <= 0) {  		/* @@ -762,7 +762,7 @@ common_timer_set(struct k_itimer *timr, int flags,  	hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);  	timr->it.real.timer.function = posix_timer_fn; -	timer->expires = timespec_to_ktime(new_setting->it_value); +	hrtimer_set_expires(timer, timespec_to_ktime(new_setting->it_value));  	/* Convert interval */  	timr->it.real.interval = timespec_to_ktime(new_setting->it_interval); @@ -771,14 +771,12 @@ common_timer_set(struct k_itimer *timr, int flags,  	if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) {  		/* Setup correct expiry time for relative timers */  		if (mode == HRTIMER_MODE_REL) { -			timer->expires = -				ktime_add_safe(timer->expires, -					       timer->base->get_time()); +			hrtimer_add_expires(timer, timer->base->get_time());  		}  		return 0;  	} -	hrtimer_start(timer, timer->expires, mode); +	hrtimer_start_expires(timer, mode);  	return 0;  } diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index 6522ae5b14a..69d9cb921ff 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -631,8 +631,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,  	/* Setup the timer, when timeout != NULL */  	if (unlikely(timeout)) { -		hrtimer_start(&timeout->timer, timeout->timer.expires, -			      HRTIMER_MODE_ABS); +		hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);  		if (!hrtimer_active(&timeout->timer))  			timeout->task = NULL;  	} diff --git a/kernel/sched.c b/kernel/sched.c index 6f230596bd0..eb3c7295361 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -226,9 +226,8 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)  		now = hrtimer_cb_get_time(&rt_b->rt_period_timer);  		hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period); -		hrtimer_start(&rt_b->rt_period_timer, -			      rt_b->rt_period_timer.expires, -			      HRTIMER_MODE_ABS); +		hrtimer_start_expires(&rt_b->rt_period_timer, +				HRTIMER_MODE_ABS);  	}  	spin_unlock(&rt_b->rt_runtime_lock);  } @@ -1063,7 +1062,7 @@ static void hrtick_start(struct rq *rq, u64 delay)  	struct hrtimer *timer = &rq->hrtick_timer;  	ktime_t time = ktime_add_ns(timer->base->get_time(), delay); -	timer->expires = time; +	hrtimer_set_expires(timer, time);  	if (rq == this_rq()) {  		hrtimer_restart(timer); diff --git a/kernel/sys.c b/kernel/sys.c index 0bc8fa3c228..fc71f99fb46 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1739,6 +1739,16 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,  		case PR_SET_TSC:  			error = SET_TSC_CTL(arg2);  			break; +		case PR_GET_TIMERSLACK: +			error = current->timer_slack_ns; +			break; +		case PR_SET_TIMERSLACK: +			if (arg2 <= 0) +				current->timer_slack_ns = +					current->default_timer_slack_ns; +			else +				current->timer_slack_ns = arg2; +			break;  		default:  			error = -EINVAL;  			break; diff --git a/kernel/time.c b/kernel/time.c index 6a08660b4fa..d63a4336fad 100644 --- a/kernel/time.c +++ b/kernel/time.c @@ -669,3 +669,21 @@ EXPORT_SYMBOL(get_jiffies_64);  #endif  EXPORT_SYMBOL(jiffies); + +/* + * Add two timespec values and do a safety check for overflow. + * It's assumed that both values are valid (>= 0) + */ +struct timespec timespec_add_safe(const struct timespec lhs, +				  const struct timespec rhs) +{ +	struct timespec res; + +	set_normalized_timespec(&res, lhs.tv_sec + rhs.tv_sec, +				lhs.tv_nsec + rhs.tv_nsec); + +	if (res.tv_sec < lhs.tv_sec || res.tv_sec < rhs.tv_sec) +		res.tv_sec = TIME_T_MAX; + +	return res; +} diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 1ad46f3df6e..9c114b726ab 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -142,8 +142,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)  		time_state = TIME_OOP;  		printk(KERN_NOTICE "Clock: "  		       "inserting leap second 23:59:60 UTC\n"); -		leap_timer.expires = ktime_add_ns(leap_timer.expires, -						  NSEC_PER_SEC); +		hrtimer_add_expires_ns(&leap_timer, NSEC_PER_SEC);  		res = HRTIMER_RESTART;  		break;  	case TIME_DEL: diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index b711ffcb106..a547be11cf9 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -300,7 +300,7 @@ void tick_nohz_stop_sched_tick(int inidle)  				goto out;  			} -			ts->idle_tick = ts->sched_timer.expires; +			ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);  			ts->tick_stopped = 1;  			ts->idle_jiffies = last_jiffies;  			rcu_enter_nohz(); @@ -431,21 +431,21 @@ void tick_nohz_restart_sched_tick(void)  	ts->tick_stopped  = 0;  	ts->idle_exittime = now;  	hrtimer_cancel(&ts->sched_timer); -	ts->sched_timer.expires = ts->idle_tick; +	hrtimer_set_expires(&ts->sched_timer, ts->idle_tick);  	while (1) {  		/* Forward the time to expire in the future */  		hrtimer_forward(&ts->sched_timer, now, tick_period);  		if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { -			hrtimer_start(&ts->sched_timer, -				      ts->sched_timer.expires, +			hrtimer_start_expires(&ts->sched_timer,  				      HRTIMER_MODE_ABS);  			/* Check, if the timer was already in the past */  			if (hrtimer_active(&ts->sched_timer))  				break;  		} else { -			if (!tick_program_event(ts->sched_timer.expires, 0)) +			if (!tick_program_event( +				hrtimer_get_expires(&ts->sched_timer), 0))  				break;  		}  		/* Update jiffies and reread time */ @@ -458,7 +458,7 @@ void tick_nohz_restart_sched_tick(void)  static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now)  {  	hrtimer_forward(&ts->sched_timer, now, tick_period); -	return tick_program_event(ts->sched_timer.expires, 0); +	return tick_program_event(hrtimer_get_expires(&ts->sched_timer), 0);  }  /* @@ -541,7 +541,7 @@ static void tick_nohz_switch_to_nohz(void)  	next = tick_init_jiffy_update();  	for (;;) { -		ts->sched_timer.expires = next; +		hrtimer_set_expires(&ts->sched_timer, next);  		if (!tick_program_event(next, 0))  			break;  		next = ktime_add(next, tick_period); @@ -637,16 +637,15 @@ void tick_setup_sched_timer(void)  	ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;  	/* Get the next period (per cpu) */ -	ts->sched_timer.expires = tick_init_jiffy_update(); +	hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());  	offset = ktime_to_ns(tick_period) >> 1;  	do_div(offset, num_possible_cpus());  	offset *= smp_processor_id(); -	ts->sched_timer.expires = ktime_add_ns(ts->sched_timer.expires, offset); +	hrtimer_add_expires_ns(&ts->sched_timer, offset);  	for (;;) {  		hrtimer_forward(&ts->sched_timer, now, tick_period); -		hrtimer_start(&ts->sched_timer, ts->sched_timer.expires, -			      HRTIMER_MODE_ABS); +		hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS);  		/* Check, if the timer was already in the past */  		if (hrtimer_active(&ts->sched_timer))  			break; diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index a40e20fd000..122ee751d2d 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c @@ -65,9 +65,11 @@ print_timer(struct seq_file *m, struct hrtimer *timer, int idx, u64 now)  	SEQ_printf(m, ", %s/%d", tmp, timer->start_pid);  #endif  	SEQ_printf(m, "\n"); -	SEQ_printf(m, " # expires at %Lu nsecs [in %Ld nsecs]\n", -		(unsigned long long)ktime_to_ns(timer->expires), -		(long long)(ktime_to_ns(timer->expires) - now)); +	SEQ_printf(m, " # expires at %Lu-%Lu nsecs [in %Ld to %Ld nsecs]\n", +		(unsigned long long)ktime_to_ns(hrtimer_get_softexpires(timer)), +		(unsigned long long)ktime_to_ns(hrtimer_get_expires(timer)), +		(long long)(ktime_to_ns(hrtimer_get_softexpires(timer)) - now), +		(long long)(ktime_to_ns(hrtimer_get_expires(timer)) - now));  }  static void  |