mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 05:32:00 +00:00
[PATCH] percpu data: only iterate over possible CPUs
percpu_data blindly allocates bootmem memory to store NR_CPUS instances of cpudata, instead of allocating memory only for possible cpus. As a preparation for changing that, we need to convert various 0 -> NR_CPUS loops to use for_each_cpu(). (The above only applies to users of asm-generic/percpu.h. powerpc has gone it alone and is presently only allocating memory for present CPUs, so it's currently corrupting memory). Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: James Bottomley <James.Bottomley@steeleye.com> Acked-by: Ingo Molnar <mingo@elte.hu> Cc: Jens Axboe <axboe@suse.de> Cc: Anton Blanchard <anton@samba.org> Acked-by: William Irwin <wli@holomorphy.com> Cc: Andi Kleen <ak@muc.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
cef5076987
commit
88a2a4ac6b
@ -138,7 +138,7 @@ static int __init check_nmi_watchdog(void)
|
||||
if (nmi_watchdog == NMI_LOCAL_APIC)
|
||||
smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
|
||||
|
||||
for (cpu = 0; cpu < NR_CPUS; cpu++)
|
||||
for_each_cpu(cpu)
|
||||
prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count;
|
||||
local_irq_enable();
|
||||
mdelay((10*1000)/nmi_hz); // wait 10 ticks
|
||||
|
@ -3453,7 +3453,7 @@ int __init blk_dev_init(void)
|
||||
iocontext_cachep = kmem_cache_create("blkdev_ioc",
|
||||
sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
|
||||
|
||||
for (i = 0; i < NR_CPUS; i++)
|
||||
for_each_cpu(i)
|
||||
INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
|
||||
|
||||
open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
|
||||
|
@ -1245,7 +1245,7 @@ static int __init init_scsi(void)
|
||||
if (error)
|
||||
goto cleanup_sysctl;
|
||||
|
||||
for (i = 0; i < NR_CPUS; i++)
|
||||
for_each_cpu(i)
|
||||
INIT_LIST_HEAD(&per_cpu(scsi_done_q, i));
|
||||
|
||||
devfs_mk_dir("scsi");
|
||||
|
@ -379,7 +379,6 @@ static void __devinit fdtable_defer_list_init(int cpu)
|
||||
void __init files_defer_init(void)
|
||||
{
|
||||
int i;
|
||||
/* Really early - can't use for_each_cpu */
|
||||
for (i = 0; i < NR_CPUS; i++)
|
||||
for_each_cpu(i)
|
||||
fdtable_defer_list_init(i);
|
||||
}
|
||||
|
@ -6109,7 +6109,7 @@ void __init sched_init(void)
|
||||
runqueue_t *rq;
|
||||
int i, j, k;
|
||||
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
for_each_cpu(i) {
|
||||
prio_array_t *array;
|
||||
|
||||
rq = cpu_rq(i);
|
||||
|
@ -1213,18 +1213,21 @@ static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
|
||||
{
|
||||
int cpu = 0;
|
||||
|
||||
memset(ret, 0, sizeof(*ret));
|
||||
memset(ret, 0, nr * sizeof(unsigned long));
|
||||
cpus_and(*cpumask, *cpumask, cpu_online_map);
|
||||
|
||||
cpu = first_cpu(*cpumask);
|
||||
while (cpu < NR_CPUS) {
|
||||
unsigned long *in, *out, off;
|
||||
|
||||
if (!cpu_isset(cpu, *cpumask))
|
||||
continue;
|
||||
|
||||
in = (unsigned long *)&per_cpu(page_states, cpu);
|
||||
|
||||
cpu = next_cpu(cpu, *cpumask);
|
||||
|
||||
if (cpu < NR_CPUS)
|
||||
if (likely(cpu < NR_CPUS))
|
||||
prefetch(&per_cpu(page_states, cpu));
|
||||
|
||||
out = (unsigned long *)ret;
|
||||
@ -1886,8 +1889,7 @@ static void setup_pagelist_highmark(struct per_cpu_pageset *p,
|
||||
* not check if the processor is online before following the pageset pointer.
|
||||
* Other parts of the kernel may not check if the zone is available.
|
||||
*/
|
||||
static struct per_cpu_pageset
|
||||
boot_pageset[NR_CPUS];
|
||||
static struct per_cpu_pageset boot_pageset[NR_CPUS];
|
||||
|
||||
/*
|
||||
* Dynamically allocate memory for the
|
||||
|
@ -3237,7 +3237,7 @@ static int __init net_dev_init(void)
|
||||
* Initialise the packet receive queues.
|
||||
*/
|
||||
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
for_each_cpu(i) {
|
||||
struct softnet_data *queue;
|
||||
|
||||
queue = &per_cpu(softnet_data, i);
|
||||
|
@ -121,7 +121,7 @@ void __init net_random_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
for_each_cpu(i) {
|
||||
struct nrnd_state *state = &per_cpu(net_rand_state,i);
|
||||
__net_srandom(state, i+jiffies);
|
||||
}
|
||||
@ -133,7 +133,7 @@ static int net_random_reseed(void)
|
||||
unsigned long seed[NR_CPUS];
|
||||
|
||||
get_random_bytes(seed, sizeof(seed));
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
for_each_cpu(i) {
|
||||
struct nrnd_state *state = &per_cpu(net_rand_state,i);
|
||||
__net_srandom(state, seed[i]);
|
||||
}
|
||||
|
@ -49,7 +49,7 @@ static int fold_prot_inuse(struct proto *proto)
|
||||
int res = 0;
|
||||
int cpu;
|
||||
|
||||
for (cpu = 0; cpu < NR_CPUS; cpu++)
|
||||
for_each_cpu(cpu)
|
||||
res += proto->stats[cpu].inuse;
|
||||
|
||||
return res;
|
||||
|
@ -38,7 +38,7 @@ static int fold_prot_inuse(struct proto *proto)
|
||||
int res = 0;
|
||||
int cpu;
|
||||
|
||||
for (cpu=0; cpu<NR_CPUS; cpu++)
|
||||
for_each_cpu(cpu)
|
||||
res += proto->stats[cpu].inuse;
|
||||
|
||||
return res;
|
||||
|
@ -2078,7 +2078,7 @@ void socket_seq_show(struct seq_file *seq)
|
||||
int cpu;
|
||||
int counter = 0;
|
||||
|
||||
for (cpu = 0; cpu < NR_CPUS; cpu++)
|
||||
for_each_cpu(cpu)
|
||||
counter += per_cpu(sockets_in_use, cpu);
|
||||
|
||||
/* It can be negative, by the way. 8) */
|
||||
|
Loading…
Reference in New Issue
Block a user