diff options
Diffstat (limited to 'kernel/rcutree_plugin.h')
| -rw-r--r-- | kernel/rcutree_plugin.h | 223 | 
1 files changed, 44 insertions, 179 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 3e4899459f3..7f3244c0df0 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -68,17 +68,21 @@ static void __init rcu_bootup_announce_oddness(void)  	printk(KERN_INFO "\tAdditional per-CPU info printed with stalls.\n");  #endif  #if NUM_RCU_LVL_4 != 0 -	printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n"); +	printk(KERN_INFO "\tFour-level hierarchy is enabled.\n");  #endif +	if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF) +		printk(KERN_INFO "\tExperimental boot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf); +	if (nr_cpu_ids != NR_CPUS) +		printk(KERN_INFO "\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);  }  #ifdef CONFIG_TREE_PREEMPT_RCU -struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt); +struct rcu_state rcu_preempt_state = +	RCU_STATE_INITIALIZER(rcu_preempt, call_rcu);  DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);  static struct rcu_state *rcu_state = &rcu_preempt_state; -static void rcu_read_unlock_special(struct task_struct *t);  static int rcu_preempted_readers_exp(struct rcu_node *rnp);  /* @@ -233,18 +237,6 @@ static void rcu_preempt_note_context_switch(int cpu)  }  /* - * Tree-preemptible RCU implementation for rcu_read_lock(). - * Just increment ->rcu_read_lock_nesting, shared state will be updated - * if we block. - */ -void __rcu_read_lock(void) -{ -	current->rcu_read_lock_nesting++; -	barrier();  /* needed if we ever invoke rcu_read_lock in rcutree.c */ -} -EXPORT_SYMBOL_GPL(__rcu_read_lock); - -/*   * Check for preempted RCU readers blocking the current grace period   * for the specified rcu_node structure.  If the caller needs a reliable   * answer, it must hold the rcu_node's ->lock. @@ -310,7 +302,7 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t,   * notify RCU core processing or task having blocked during the RCU   * read-side critical section.   */ -static noinline void rcu_read_unlock_special(struct task_struct *t) +void rcu_read_unlock_special(struct task_struct *t)  {  	int empty;  	int empty_exp; @@ -398,8 +390,9 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)  							 rnp->grphi,  							 !!rnp->gp_tasks);  			rcu_report_unblock_qs_rnp(rnp, flags); -		} else +		} else {  			raw_spin_unlock_irqrestore(&rnp->lock, flags); +		}  #ifdef CONFIG_RCU_BOOST  		/* Unboost if we were boosted. */ @@ -418,38 +411,6 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)  	}  } -/* - * Tree-preemptible RCU implementation for rcu_read_unlock(). - * Decrement ->rcu_read_lock_nesting.  If the result is zero (outermost - * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then - * invoke rcu_read_unlock_special() to clean up after a context switch - * in an RCU read-side critical section and other special cases. - */ -void __rcu_read_unlock(void) -{ -	struct task_struct *t = current; - -	if (t->rcu_read_lock_nesting != 1) -		--t->rcu_read_lock_nesting; -	else { -		barrier();  /* critical section before exit code. */ -		t->rcu_read_lock_nesting = INT_MIN; -		barrier();  /* assign before ->rcu_read_unlock_special load */ -		if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) -			rcu_read_unlock_special(t); -		barrier();  /* ->rcu_read_unlock_special load before assign */ -		t->rcu_read_lock_nesting = 0; -	} -#ifdef CONFIG_PROVE_LOCKING -	{ -		int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting); - -		WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); -	} -#endif /* #ifdef CONFIG_PROVE_LOCKING */ -} -EXPORT_SYMBOL_GPL(__rcu_read_unlock); -  #ifdef CONFIG_RCU_CPU_STALL_VERBOSE  /* @@ -540,16 +501,6 @@ static int rcu_print_task_stall(struct rcu_node *rnp)  }  /* - * Suppress preemptible RCU's CPU stall warnings by pushing the - * time of the next stall-warning message comfortably far into the - * future. - */ -static void rcu_preempt_stall_reset(void) -{ -	rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2; -} - -/*   * Check that the list of blocked tasks for the newly completed grace   * period is in fact empty.  It is a serious bug to complete a grace   * period that still has RCU readers blocked!  This function must be @@ -650,14 +601,6 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,  #endif /* #ifdef CONFIG_HOTPLUG_CPU */  /* - * Do CPU-offline processing for preemptible RCU. - */ -static void rcu_preempt_cleanup_dead_cpu(int cpu) -{ -	rcu_cleanup_dead_cpu(cpu, &rcu_preempt_state); -} - -/*   * Check for a quiescent state from the current CPU.  When a task blocks,   * the task is recorded in the corresponding CPU's rcu_node structure,   * which is checked elsewhere. @@ -677,15 +620,6 @@ static void rcu_preempt_check_callbacks(int cpu)  		t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;  } -/* - * Process callbacks for preemptible RCU. - */ -static void rcu_preempt_process_callbacks(void) -{ -	__rcu_process_callbacks(&rcu_preempt_state, -				&__get_cpu_var(rcu_preempt_data)); -} -  #ifdef CONFIG_RCU_BOOST  static void rcu_preempt_do_callbacks(void) @@ -824,9 +758,9 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)  	int must_wait = 0;  	raw_spin_lock_irqsave(&rnp->lock, flags); -	if (list_empty(&rnp->blkd_tasks)) +	if (list_empty(&rnp->blkd_tasks)) {  		raw_spin_unlock_irqrestore(&rnp->lock, flags); -	else { +	} else {  		rnp->exp_tasks = rnp->blkd_tasks.next;  		rcu_initiate_boost(rnp, flags);  /* releases rnp->lock */  		must_wait = 1; @@ -870,9 +804,9 @@ void synchronize_rcu_expedited(void)  	 * expedited grace period for us, just leave.  	 */  	while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) { -		if (trycount++ < 10) +		if (trycount++ < 10) {  			udelay(trycount * num_online_cpus()); -		else { +		} else {  			synchronize_rcu();  			return;  		} @@ -917,51 +851,16 @@ mb_ret:  }  EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); -/* - * Check to see if there is any immediate preemptible-RCU-related work - * to be done. - */ -static int rcu_preempt_pending(int cpu) -{ -	return __rcu_pending(&rcu_preempt_state, -			     &per_cpu(rcu_preempt_data, cpu)); -} - -/* - * Does preemptible RCU have callbacks on this CPU? - */ -static int rcu_preempt_cpu_has_callbacks(int cpu) -{ -	return !!per_cpu(rcu_preempt_data, cpu).nxtlist; -} -  /**   * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.   */  void rcu_barrier(void)  { -	_rcu_barrier(&rcu_preempt_state, call_rcu); +	_rcu_barrier(&rcu_preempt_state);  }  EXPORT_SYMBOL_GPL(rcu_barrier);  /* - * Initialize preemptible RCU's per-CPU data. - */ -static void __cpuinit rcu_preempt_init_percpu_data(int cpu) -{ -	rcu_init_percpu_data(cpu, &rcu_preempt_state, 1); -} - -/* - * Move preemptible RCU's callbacks from dying CPU to other online CPU - * and record a quiescent state. - */ -static void rcu_preempt_cleanup_dying_cpu(void) -{ -	rcu_cleanup_dying_cpu(&rcu_preempt_state); -} - -/*   * Initialize preemptible RCU's state structures.   */  static void __init __rcu_init_preempt(void) @@ -1046,14 +945,6 @@ static int rcu_print_task_stall(struct rcu_node *rnp)  }  /* - * Because preemptible RCU does not exist, there is no need to suppress - * its CPU stall warnings. - */ -static void rcu_preempt_stall_reset(void) -{ -} - -/*   * Because there is no preemptible RCU, there can be no readers blocked,   * so there is no need to check for blocked tasks.  So check only for   * bogus qsmask values. @@ -1081,14 +972,6 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,  #endif /* #ifdef CONFIG_HOTPLUG_CPU */  /* - * Because preemptible RCU does not exist, it never needs CPU-offline - * processing. - */ -static void rcu_preempt_cleanup_dead_cpu(int cpu) -{ -} - -/*   * Because preemptible RCU does not exist, it never has any callbacks   * to check.   */ @@ -1097,14 +980,6 @@ static void rcu_preempt_check_callbacks(int cpu)  }  /* - * Because preemptible RCU does not exist, it never has any callbacks - * to process. - */ -static void rcu_preempt_process_callbacks(void) -{ -} - -/*   * Queue an RCU callback for lazy invocation after a grace period.   * This will likely be later named something like "call_rcu_lazy()",   * but this change will require some way of tagging the lazy RCU @@ -1145,22 +1020,6 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,  #endif /* #ifdef CONFIG_HOTPLUG_CPU */  /* - * Because preemptible RCU does not exist, it never has any work to do. - */ -static int rcu_preempt_pending(int cpu) -{ -	return 0; -} - -/* - * Because preemptible RCU does not exist, it never has callbacks - */ -static int rcu_preempt_cpu_has_callbacks(int cpu) -{ -	return 0; -} - -/*   * Because preemptible RCU does not exist, rcu_barrier() is just   * another name for rcu_barrier_sched().   */ @@ -1171,21 +1030,6 @@ void rcu_barrier(void)  EXPORT_SYMBOL_GPL(rcu_barrier);  /* - * Because preemptible RCU does not exist, there is no per-CPU - * data to initialize. - */ -static void __cpuinit rcu_preempt_init_percpu_data(int cpu) -{ -} - -/* - * Because there is no preemptible RCU, there is no cleanup to do. - */ -static void rcu_preempt_cleanup_dying_cpu(void) -{ -} - -/*   * Because preemptible RCU does not exist, it need not be initialized.   */  static void __init __rcu_init_preempt(void) @@ -1968,9 +1812,11 @@ static void rcu_idle_count_callbacks_posted(void)   */  #define RCU_IDLE_FLUSHES 5		/* Number of dyntick-idle tries. */  #define RCU_IDLE_OPT_FLUSHES 3		/* Optional dyntick-idle tries. */ -#define RCU_IDLE_GP_DELAY 6		/* Roughly one grace period. */ +#define RCU_IDLE_GP_DELAY 4		/* Roughly one grace period. */  #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ)	/* Roughly six seconds. */ +extern int tick_nohz_enabled; +  /*   * Does the specified flavor of RCU have non-lazy callbacks pending on   * the specified CPU?  Both RCU flavor and CPU are specified by the @@ -2047,10 +1893,13 @@ int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)  		return 1;  	}  	/* Set up for the possibility that RCU will post a timer. */ -	if (rcu_cpu_has_nonlazy_callbacks(cpu)) -		*delta_jiffies = RCU_IDLE_GP_DELAY; -	else -		*delta_jiffies = RCU_IDLE_LAZY_GP_DELAY; +	if (rcu_cpu_has_nonlazy_callbacks(cpu)) { +		*delta_jiffies = round_up(RCU_IDLE_GP_DELAY + jiffies, +					  RCU_IDLE_GP_DELAY) - jiffies; +	} else { +		*delta_jiffies = jiffies + RCU_IDLE_LAZY_GP_DELAY; +		*delta_jiffies = round_jiffies(*delta_jiffies) - jiffies; +	}  	return 0;  } @@ -2109,6 +1958,7 @@ static void rcu_cleanup_after_idle(int cpu)  	del_timer(&rdtp->idle_gp_timer);  	trace_rcu_prep_idle("Cleanup after idle"); +	rdtp->tick_nohz_enabled_snap = ACCESS_ONCE(tick_nohz_enabled);  }  /* @@ -2134,6 +1984,18 @@ static void rcu_prepare_for_idle(int cpu)  {  	struct timer_list *tp;  	struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); +	int tne; + +	/* Handle nohz enablement switches conservatively. */ +	tne = ACCESS_ONCE(tick_nohz_enabled); +	if (tne != rdtp->tick_nohz_enabled_snap) { +		if (rcu_cpu_has_callbacks(cpu)) +			invoke_rcu_core(); /* force nohz to see update. */ +		rdtp->tick_nohz_enabled_snap = tne; +		return; +	} +	if (!tne) +		return;  	/*  	 * If this is an idle re-entry, for example, due to use of @@ -2187,10 +2049,11 @@ static void rcu_prepare_for_idle(int cpu)  		if (rcu_cpu_has_nonlazy_callbacks(cpu)) {  			trace_rcu_prep_idle("Dyntick with callbacks");  			rdtp->idle_gp_timer_expires = -					   jiffies + RCU_IDLE_GP_DELAY; +				round_up(jiffies + RCU_IDLE_GP_DELAY, +					 RCU_IDLE_GP_DELAY);  		} else {  			rdtp->idle_gp_timer_expires = -					   jiffies + RCU_IDLE_LAZY_GP_DELAY; +				round_jiffies(jiffies + RCU_IDLE_LAZY_GP_DELAY);  			trace_rcu_prep_idle("Dyntick with lazy callbacks");  		}  		tp = &rdtp->idle_gp_timer; @@ -2231,8 +2094,9 @@ static void rcu_prepare_for_idle(int cpu)  	if (rcu_cpu_has_callbacks(cpu)) {  		trace_rcu_prep_idle("More callbacks");  		invoke_rcu_core(); -	} else +	} else {  		trace_rcu_prep_idle("Callbacks drained"); +	}  }  /* @@ -2269,6 +2133,7 @@ static void print_cpu_stall_fast_no_hz(char *cp, int cpu)  static void print_cpu_stall_fast_no_hz(char *cp, int cpu)  { +	*cp = '\0';  }  #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */  |