diff options
| -rw-r--r-- | arch/i386/kernel/nmi.c | 2 | ||||
| -rw-r--r-- | block/ll_rw_blk.c | 2 | ||||
| -rw-r--r-- | drivers/scsi/scsi.c | 2 | ||||
| -rw-r--r-- | fs/file.c | 3 | ||||
| -rw-r--r-- | kernel/sched.c | 2 | ||||
| -rw-r--r-- | mm/page_alloc.c | 10 | ||||
| -rw-r--r-- | net/core/dev.c | 2 | ||||
| -rw-r--r-- | net/core/utils.c | 4 | ||||
| -rw-r--r-- | net/ipv4/proc.c | 2 | ||||
| -rw-r--r-- | net/ipv6/proc.c | 2 | ||||
| -rw-r--r-- | net/socket.c | 2 | 
11 files changed, 17 insertions, 16 deletions
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index d661703ac1c..63f39a7e2c9 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c @@ -138,7 +138,7 @@ static int __init check_nmi_watchdog(void)  	if (nmi_watchdog == NMI_LOCAL_APIC)  		smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); -	for (cpu = 0; cpu < NR_CPUS; cpu++) +	for_each_cpu(cpu)  		prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count;  	local_irq_enable();  	mdelay((10*1000)/nmi_hz); // wait 10 ticks diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index f9fc07efd2d..e5aad831458 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -3453,7 +3453,7 @@ int __init blk_dev_init(void)  	iocontext_cachep = kmem_cache_create("blkdev_ioc",  			sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL); -	for (i = 0; i < NR_CPUS; i++) +	for_each_cpu(i)  		INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));  	open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 245ca99a641..c551bb84dbf 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -1245,7 +1245,7 @@ static int __init init_scsi(void)  	if (error)  		goto cleanup_sysctl; -	for (i = 0; i < NR_CPUS; i++) +	for_each_cpu(i)  		INIT_LIST_HEAD(&per_cpu(scsi_done_q, i));  	devfs_mk_dir("scsi"); diff --git a/fs/file.c b/fs/file.c index fd066b261c7..cea7cbea11d 100644 --- a/fs/file.c +++ b/fs/file.c @@ -379,7 +379,6 @@ static void __devinit fdtable_defer_list_init(int cpu)  void __init files_defer_init(void)  {  	int i; -	/* Really early - can't use for_each_cpu */ -	for (i = 0; i < NR_CPUS; i++) +	for_each_cpu(i)  		fdtable_defer_list_init(i);  } diff --git a/kernel/sched.c b/kernel/sched.c index f77f23f8f47..839466fdfb4 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -6109,7 +6109,7 @@ void __init sched_init(void)  	runqueue_t *rq;  	int i, j, k; -	for (i = 0; i < NR_CPUS; i++) { +	for_each_cpu(i) {  		prio_array_t *array;  		rq = cpu_rq(i); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 44b4eb4202d..dde04ff4be3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1213,18 +1213,21 @@ static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)  {  	int cpu = 0; -	memset(ret, 0, sizeof(*ret)); +	memset(ret, 0, nr * sizeof(unsigned long));  	cpus_and(*cpumask, *cpumask, cpu_online_map);  	cpu = first_cpu(*cpumask);  	while (cpu < NR_CPUS) {  		unsigned long *in, *out, off; +		if (!cpu_isset(cpu, *cpumask)) +			continue; +  		in = (unsigned long *)&per_cpu(page_states, cpu);  		cpu = next_cpu(cpu, *cpumask); -		if (cpu < NR_CPUS) +		if (likely(cpu < NR_CPUS))  			prefetch(&per_cpu(page_states, cpu));  		out = (unsigned long *)ret; @@ -1886,8 +1889,7 @@ static void setup_pagelist_highmark(struct per_cpu_pageset *p,   * not check if the processor is online before following the pageset pointer.   * Other parts of the kernel may not check if the zone is available.   */ -static struct per_cpu_pageset -	boot_pageset[NR_CPUS]; +static struct per_cpu_pageset boot_pageset[NR_CPUS];  /*   * Dynamically allocate memory for the diff --git a/net/core/dev.c b/net/core/dev.c index ffb82073056..2afb0de9532 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3237,7 +3237,7 @@ static int __init net_dev_init(void)  	 *	Initialise the packet receive queues.  	 */ -	for (i = 0; i < NR_CPUS; i++) { +	for_each_cpu(i) {  		struct softnet_data *queue;  		queue = &per_cpu(softnet_data, i); diff --git a/net/core/utils.c b/net/core/utils.c index ac1d1fcf867..fdc4f38bc46 100644 --- a/net/core/utils.c +++ b/net/core/utils.c @@ -121,7 +121,7 @@ void __init net_random_init(void)  {  	int i; -	for (i = 0; i < NR_CPUS; i++) { +	for_each_cpu(i) {  		struct nrnd_state *state = &per_cpu(net_rand_state,i);  		__net_srandom(state, i+jiffies);  	} @@ -133,7 +133,7 @@ static int net_random_reseed(void)  	unsigned long seed[NR_CPUS];  	get_random_bytes(seed, sizeof(seed)); -	for (i = 0; i < NR_CPUS; i++) { +	for_each_cpu(i) {  		struct nrnd_state *state = &per_cpu(net_rand_state,i);  		__net_srandom(state, seed[i]);  	} diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 39d49dc333a..1b167c4bb3b 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -49,7 +49,7 @@ static int fold_prot_inuse(struct proto *proto)  	int res = 0;  	int cpu; -	for (cpu = 0; cpu < NR_CPUS; cpu++) +	for_each_cpu(cpu)  		res += proto->stats[cpu].inuse;  	return res; diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index 50a13e75d70..4238b1ed886 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c @@ -38,7 +38,7 @@ static int fold_prot_inuse(struct proto *proto)  	int res = 0;  	int cpu; -	for (cpu=0; cpu<NR_CPUS; cpu++) +	for_each_cpu(cpu)  		res += proto->stats[cpu].inuse;  	return res; diff --git a/net/socket.c b/net/socket.c index b38a263853c..a00851f981d 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2078,7 +2078,7 @@ void socket_seq_show(struct seq_file *seq)  	int cpu;  	int counter = 0; -	for (cpu = 0; cpu < NR_CPUS; cpu++) +	for_each_cpu(cpu)  		counter += per_cpu(sockets_in_use, cpu);  	/* It can be negative, by the way. 8) */  |