diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-22 18:27:32 -0700 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-22 18:27:32 -0700 | 
| commit | d79ee93de909dfb252279b9a95978bbda9a814a9 (patch) | |
| tree | bfccca60fd36259ff4bcc5e78a2c272fbd680065 /include/linux/topology.h | |
| parent | 2ff2b289a695807e291e1ed9f639d8a3ba5f4254 (diff) | |
| parent | 1c2927f18576d65631d8e0ddd19e1d023183222e (diff) | |
| download | olio-linux-3.10-d79ee93de909dfb252279b9a95978bbda9a814a9.tar.xz olio-linux-3.10-d79ee93de909dfb252279b9a95978bbda9a814a9.zip  | |
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler changes from Ingo Molnar:
 "The biggest change is the cleanup/simplification of the load-balancer:
  instead of the current practice of architectures twiddling scheduler
  internal data structures and providing the scheduler domains in
  colorfully inconsistent ways, we now have generic scheduler code in
  kernel/sched/core.c:sched_init_numa() that looks at the architecture's
  node_distance() parameters and (while not fully trusting it) deducts a
  NUMA topology from it.
  This inevitably changes balancing behavior - hopefully for the better.
  There are various smaller optimizations, cleanups and fixlets as well"
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched: Taint kernel with TAINT_WARN after sleep-in-atomic bug
  sched: Remove stale power aware scheduling remnants and dysfunctional knobs
  sched/debug: Fix printing large integers on 32-bit platforms
  sched/fair: Improve the ->group_imb logic
  sched/nohz: Fix rq->cpu_load[] calculations
  sched/numa: Don't scale the imbalance
  sched/fair: Revert sched-domain iteration breakage
  sched/x86: Rewrite set_cpu_sibling_map()
  sched/numa: Fix the new NUMA topology bits
  sched/numa: Rewrite the CONFIG_NUMA sched domain support
  sched/fair: Propagate 'struct lb_env' usage into find_busiest_group
  sched/fair: Add some serialization to the sched_domain load-balance walk
  sched/fair: Let minimally loaded cpu balance the group
  sched: Change rq->nr_running to unsigned int
  x86/numa: Check for nonsensical topologies on real hw as well
  x86/numa: Hard partition cpu topology masks on node boundaries
  x86/numa: Allow specifying node_distance() for numa=fake
  x86/sched: Make mwait_usable() heed to "idle=" kernel parameters properly
  sched: Update documentation and comments
  sched_rt: Avoid unnecessary dequeue and enqueue of pushable tasks in set_cpus_allowed_rt()
Diffstat (limited to 'include/linux/topology.h')
| -rw-r--r-- | include/linux/topology.h | 42 | 
1 files changed, 0 insertions, 42 deletions
diff --git a/include/linux/topology.h b/include/linux/topology.h index 9dc427cdb6f..e91cd43394d 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -70,7 +70,6 @@ int arch_update_cpu_topology(void);   * Below are the 3 major initializers used in building sched_domains:   * SD_SIBLING_INIT, for SMT domains   * SD_CPU_INIT, for SMP domains - * SD_NODE_INIT, for NUMA domains   *   * Any architecture that cares to do any tuning to these values should do so   * by defining their own arch-specific initializer in include/asm/topology.h. @@ -99,7 +98,6 @@ int arch_update_cpu_topology(void);  				| 0*SD_BALANCE_WAKE			\  				| 1*SD_WAKE_AFFINE			\  				| 1*SD_SHARE_CPUPOWER			\ -				| 0*SD_POWERSAVINGS_BALANCE		\  				| 1*SD_SHARE_PKG_RESOURCES		\  				| 0*SD_SERIALIZE			\  				| 0*SD_PREFER_SIBLING			\ @@ -135,8 +133,6 @@ int arch_update_cpu_topology(void);  				| 0*SD_SHARE_CPUPOWER			\  				| 1*SD_SHARE_PKG_RESOURCES		\  				| 0*SD_SERIALIZE			\ -				| sd_balance_for_mc_power()		\ -				| sd_power_saving_flags()		\  				,					\  	.last_balance		= jiffies,				\  	.balance_interval	= 1,					\ @@ -168,56 +164,18 @@ int arch_update_cpu_topology(void);  				| 0*SD_SHARE_CPUPOWER			\  				| 0*SD_SHARE_PKG_RESOURCES		\  				| 0*SD_SERIALIZE			\ -				| sd_balance_for_package_power()	\ -				| sd_power_saving_flags()		\  				,					\  	.last_balance		= jiffies,				\  	.balance_interval	= 1,					\  }  #endif -/* sched_domains SD_ALLNODES_INIT for NUMA machines */ -#define SD_ALLNODES_INIT (struct sched_domain) {			\ -	.min_interval		= 64,					\ -	.max_interval		= 64*num_online_cpus(),			\ -	.busy_factor		= 128,					\ -	.imbalance_pct		= 133,					\ -	.cache_nice_tries	= 1,					\ -	.busy_idx		= 3,					\ -	.idle_idx		= 3,					\ -	.flags			= 1*SD_LOAD_BALANCE			\ -				| 1*SD_BALANCE_NEWIDLE			\ -				| 0*SD_BALANCE_EXEC			\ -				| 0*SD_BALANCE_FORK			\ -				| 0*SD_BALANCE_WAKE			\ -				| 0*SD_WAKE_AFFINE			\ -				| 0*SD_SHARE_CPUPOWER			\ -				| 0*SD_POWERSAVINGS_BALANCE		\ -				| 0*SD_SHARE_PKG_RESOURCES		\ -				| 1*SD_SERIALIZE			\ -				| 0*SD_PREFER_SIBLING			\ -				,					\ -	.last_balance		= jiffies,				\ -	.balance_interval	= 64,					\ -} - -#ifndef SD_NODES_PER_DOMAIN -#define SD_NODES_PER_DOMAIN 16 -#endif -  #ifdef CONFIG_SCHED_BOOK  #ifndef SD_BOOK_INIT  #error Please define an appropriate SD_BOOK_INIT in include/asm/topology.h!!!  #endif  #endif /* CONFIG_SCHED_BOOK */ -#ifdef CONFIG_NUMA -#ifndef SD_NODE_INIT -#error Please define an appropriate SD_NODE_INIT in include/asm/topology.h!!! -#endif - -#endif /* CONFIG_NUMA */ -  #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID  DECLARE_PER_CPU(int, numa_node);  |