]> nv-tegra.nvidia Code Review - linux-2.6.git/commitdiff
Merge branch 'linus' into cpus4096
authorIngo Molnar <mingo@elte.hu>
Tue, 15 Jul 2008 22:29:07 +0000 (00:29 +0200)
committerIngo Molnar <mingo@elte.hu>
Tue, 15 Jul 2008 22:29:07 +0000 (00:29 +0200)
Conflicts:

arch/x86/xen/smp.c
kernel/sched_rt.c
net/iucv/iucv.c

Signed-off-by: Ingo Molnar <mingo@elte.hu>
34 files changed:
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/cpu/mcheck/mce_amd_64.c
arch/x86/kernel/io_apic_64.c
arch/x86/kernel/smpboot.c
arch/x86/xen/smp.c
drivers/acpi/processor_throttling.c
drivers/base/cpu.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_conservative.c
drivers/cpufreq/cpufreq_ondemand.c
drivers/infiniband/hw/ehca/ehca_irq.c
include/asm-x86/ipi.h
include/linux/cpumask.h
kernel/cpu.c
kernel/rcuclassic.c
kernel/rcupreempt.c
kernel/sched.c
kernel/sched_fair.c
kernel/sched_rt.c
kernel/taskstats.c
kernel/time/clocksource.c
kernel/time/tick-broadcast.c
kernel/workqueue.c
lib/cpumask.c
mm/allocpercpu.c
mm/vmstat.c
net/core/dev.c
net/iucv/iucv.c
net/sunrpc/svc.c

index b0c8208df9fa17363b5a3ed5ce35bdec8c52ad25..dd097b835839dd73d4139d807c23e8fca1648344 100644 (file)
@@ -202,7 +202,7 @@ static void drv_write(struct drv_cmd *cmd)
        cpumask_t saved_mask = current->cpus_allowed;
        unsigned int i;
 
-       for_each_cpu_mask(i, cmd->mask) {
+       for_each_cpu_mask_nr(i, cmd->mask) {
                set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
                do_drv_write(cmd);
        }
@@ -451,7 +451,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
 
        freqs.old = perf->states[perf->state].core_frequency * 1000;
        freqs.new = data->freq_table[next_state].frequency;
-       for_each_cpu_mask(i, cmd.mask) {
+       for_each_cpu_mask_nr(i, cmd.mask) {
                freqs.cpu = i;
                cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
        }
@@ -466,7 +466,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
                }
        }
 
-       for_each_cpu_mask(i, cmd.mask) {
+       for_each_cpu_mask_nr(i, cmd.mask) {
                freqs.cpu = i;
                cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
        }
index 199e4e05e5dc1c7cb82cbe417864377cb399d2d8..f1685fb91fbd313058b92af93ee055af160a2447 100644 (file)
@@ -122,7 +122,7 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
                return 0;
 
        /* notifiers */
-       for_each_cpu_mask(i, policy->cpus) {
+       for_each_cpu_mask_nr(i, policy->cpus) {
                freqs.cpu = i;
                cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
        }
@@ -130,11 +130,11 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
        /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software
         * Developer's Manual, Volume 3
         */
-       for_each_cpu_mask(i, policy->cpus)
+       for_each_cpu_mask_nr(i, policy->cpus)
                cpufreq_p4_setdc(i, p4clockmod_table[newstate].index);
 
        /* notifiers */
-       for_each_cpu_mask(i, policy->cpus) {
+       for_each_cpu_mask_nr(i, policy->cpus) {
                freqs.cpu = i;
                cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
        }
index 206791eb46e3da04f61f70c2af2eb55d384bd606..c45ca6d4dce101fbe7e5044420410edab5ad8b42 100644 (file)
@@ -966,7 +966,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned i
        freqs.old = find_khz_freq_from_fid(data->currfid);
        freqs.new = find_khz_freq_from_fid(fid);
 
-       for_each_cpu_mask(i, *(data->available_cores)) {
+       for_each_cpu_mask_nr(i, *(data->available_cores)) {
                freqs.cpu = i;
                cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
        }
@@ -974,7 +974,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned i
        res = transition_fid_vid(data, fid, vid);
        freqs.new = find_khz_freq_from_fid(data->currfid);
 
-       for_each_cpu_mask(i, *(data->available_cores)) {
+       for_each_cpu_mask_nr(i, *(data->available_cores)) {
                freqs.cpu = i;
                cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
        }
@@ -997,7 +997,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i
        freqs.old = find_khz_freq_from_pstate(data->powernow_table, data->currpstate);
        freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
 
-       for_each_cpu_mask(i, *(data->available_cores)) {
+       for_each_cpu_mask_nr(i, *(data->available_cores)) {
                freqs.cpu = i;
                cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
        }
@@ -1005,7 +1005,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i
        res = transition_pstate(data, pstate);
        freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
 
-       for_each_cpu_mask(i, *(data->available_cores)) {
+       for_each_cpu_mask_nr(i, *(data->available_cores)) {
                freqs.cpu = i;
                cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
        }
index 908dd347c67ec3dc29aa5ab8b3dcbae9a12e5e37..8b0dd6f2a1acd101aea9c9cca165ab67c3f0b19a 100644 (file)
@@ -476,7 +476,7 @@ static int centrino_target (struct cpufreq_policy *policy,
        saved_mask = current->cpus_allowed;
        first_cpu = 1;
        cpus_clear(covered_cpus);
-       for_each_cpu_mask(j, online_policy_cpus) {
+       for_each_cpu_mask_nr(j, online_policy_cpus) {
                /*
                 * Support for SMP systems.
                 * Make sure we are running on CPU that wants to change freq
@@ -517,7 +517,7 @@ static int centrino_target (struct cpufreq_policy *policy,
                        dprintk("target=%dkHz old=%d new=%d msr=%04x\n",
                                target_freq, freqs.old, freqs.new, msr);
 
-                       for_each_cpu_mask(k, online_policy_cpus) {
+                       for_each_cpu_mask_nr(k, online_policy_cpus) {
                                freqs.cpu = k;
                                cpufreq_notify_transition(&freqs,
                                        CPUFREQ_PRECHANGE);
@@ -540,7 +540,7 @@ static int centrino_target (struct cpufreq_policy *policy,
                preempt_enable();
        }
 
-       for_each_cpu_mask(k, online_policy_cpus) {
+       for_each_cpu_mask_nr(k, online_policy_cpus) {
                freqs.cpu = k;
                cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
        }
@@ -554,7 +554,7 @@ static int centrino_target (struct cpufreq_policy *policy,
                 */
 
                if (!cpus_empty(covered_cpus)) {
-                       for_each_cpu_mask(j, covered_cpus) {
+                       for_each_cpu_mask_nr(j, covered_cpus) {
                                set_cpus_allowed_ptr(current,
                                                     &cpumask_of_cpu(j));
                                wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
@@ -564,7 +564,7 @@ static int centrino_target (struct cpufreq_policy *policy,
                tmp = freqs.new;
                freqs.new = freqs.old;
                freqs.old = tmp;
-               for_each_cpu_mask(j, online_policy_cpus) {
+               for_each_cpu_mask_nr(j, online_policy_cpus) {
                        freqs.cpu = j;
                        cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
                        cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
index 1b50244b1fdfd892e016d015e922fc12130992e2..191f7263c61dce1b42864cd4709e249773339243 100644 (file)
@@ -279,7 +279,7 @@ static int speedstep_target (struct cpufreq_policy *policy,
 
        cpus_allowed = current->cpus_allowed;
 
-       for_each_cpu_mask(i, policy->cpus) {
+       for_each_cpu_mask_nr(i, policy->cpus) {
                freqs.cpu = i;
                cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
        }
@@ -292,7 +292,7 @@ static int speedstep_target (struct cpufreq_policy *policy,
        /* allow to be run on all CPUs */
        set_cpus_allowed_ptr(current, &cpus_allowed);
 
-       for_each_cpu_mask(i, policy->cpus) {
+       for_each_cpu_mask_nr(i, policy->cpus) {
                freqs.cpu = i;
                cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
        }
index 2c8afafa18e860ff98fafb243f2ba7964496c42d..a7b0f8f1736bd72e0ef73113d26adc2c46cdf6b1 100644 (file)
@@ -489,7 +489,7 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
        int sibling;
 
        this_leaf = CPUID4_INFO_IDX(cpu, index);
-       for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) {
+       for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) {
                sibling_leaf = CPUID4_INFO_IDX(sibling, index); 
                cpu_clear(cpu, sibling_leaf->shared_cpu_map);
        }
index 7c9a813e11939b7e7fa583549d4c9affdde26476..88736cadbaa62d02e5e36671b06cfa512df7e683 100644 (file)
@@ -527,7 +527,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
        if (err)
                goto out_free;
 
-       for_each_cpu_mask(i, b->cpus) {
+       for_each_cpu_mask_nr(i, b->cpus) {
                if (i == cpu)
                        continue;
 
@@ -617,7 +617,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
 #endif
 
        /* remove all sibling symlinks before unregistering */
-       for_each_cpu_mask(i, b->cpus) {
+       for_each_cpu_mask_nr(i, b->cpus) {
                if (i == cpu)
                        continue;
 
index 6510cde36b3549149eabefa4aaf724c72e5a0959..bf27114773d5bea5042b1f04bcf530ff0e24eb92 100644 (file)
@@ -731,7 +731,7 @@ static int __assign_irq_vector(int irq, cpumask_t mask)
                        return 0;
        }
 
-       for_each_cpu_mask(cpu, mask) {
+       for_each_cpu_mask_nr(cpu, mask) {
                cpumask_t domain, new_mask;
                int new_cpu;
                int vector, offset;
@@ -752,7 +752,7 @@ next:
                        continue;
                if (vector == IA32_SYSCALL_VECTOR)
                        goto next;
-               for_each_cpu_mask(new_cpu, new_mask)
+               for_each_cpu_mask_nr(new_cpu, new_mask)
                        if (per_cpu(vector_irq, new_cpu)[vector] != -1)
                                goto next;
                /* Found one! */
@@ -762,7 +762,7 @@ next:
                        cfg->move_in_progress = 1;
                        cfg->old_domain = cfg->domain;
                }
-               for_each_cpu_mask(new_cpu, new_mask)
+               for_each_cpu_mask_nr(new_cpu, new_mask)
                        per_cpu(vector_irq, new_cpu)[vector] = irq;
                cfg->vector = vector;
                cfg->domain = domain;
@@ -794,7 +794,7 @@ static void __clear_irq_vector(int irq)
 
        vector = cfg->vector;
        cpus_and(mask, cfg->domain, cpu_online_map);
-       for_each_cpu_mask(cpu, mask)
+       for_each_cpu_mask_nr(cpu, mask)
                per_cpu(vector_irq, cpu)[vector] = -1;
 
        cfg->vector = 0;
index 687376ab07e82ece4ab1eeb609d738d76245d226..09b98cd6332cd8ef2a742de580eece1b2ba51952 100644 (file)
@@ -438,7 +438,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
        cpu_set(cpu, cpu_sibling_setup_map);
 
        if (smp_num_siblings > 1) {
-               for_each_cpu_mask(i, cpu_sibling_setup_map) {
+               for_each_cpu_mask_nr(i, cpu_sibling_setup_map) {
                        if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
                            c->cpu_core_id == cpu_data(i).cpu_core_id) {
                                cpu_set(i, per_cpu(cpu_sibling_map, cpu));
@@ -461,7 +461,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
                return;
        }
 
-       for_each_cpu_mask(i, cpu_sibling_setup_map) {
+       for_each_cpu_mask_nr(i, cpu_sibling_setup_map) {
                if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
                    per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
                        cpu_set(i, c->llc_shared_map);
@@ -1230,7 +1230,7 @@ static void remove_siblinginfo(int cpu)
        int sibling;
        struct cpuinfo_x86 *c = &cpu_data(cpu);
 
-       for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
+       for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) {
                cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
                /*/
                 * last thread sibling in this cpu core going down
@@ -1239,7 +1239,7 @@ static void remove_siblinginfo(int cpu)
                        cpu_data(sibling).booted_cores--;
        }
 
-       for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
+       for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu))
                cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
        cpus_clear(per_cpu(cpu_sibling_map, cpu));
        cpus_clear(per_cpu(cpu_core_map, cpu));
index 233156f39b7f39f605fdb2c0cbd266da60783e28..463adecc5cba4a26270dc0479b8068797dd716f4 100644 (file)
@@ -351,7 +351,7 @@ static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector)
 
        cpus_and(mask, mask, cpu_online_map);
 
-       for_each_cpu_mask(cpu, mask)
+       for_each_cpu_mask_nr(cpu, mask)
                xen_send_IPI_one(cpu, vector);
 }
 
@@ -362,7 +362,7 @@ void xen_smp_send_call_function_ipi(cpumask_t mask)
        xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
 
        /* Make sure other vcpus get a chance to run if they need to. */
-       for_each_cpu_mask(cpu, mask) {
+       for_each_cpu_mask_nr(cpu, mask) {
                if (xen_vcpu_stolen(cpu)) {
                        HYPERVISOR_sched_op(SCHEDOP_yield, 0);
                        break;
index bb06738860c4113599ab185435834253d8af4afb..28509fbba6f9cc20a11af6ab0836f0a2213d2674 100644 (file)
@@ -1013,7 +1013,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
         * affected cpu in order to get one proper T-state.
         * The notifier event is THROTTLING_PRECHANGE.
         */
-       for_each_cpu_mask(i, online_throttling_cpus) {
+       for_each_cpu_mask_nr(i, online_throttling_cpus) {
                t_state.cpu = i;
                acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
                                                        &t_state);
@@ -1034,7 +1034,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
                 * it is necessary to set T-state for every affected
                 * cpus.
                 */
-               for_each_cpu_mask(i, online_throttling_cpus) {
+               for_each_cpu_mask_nr(i, online_throttling_cpus) {
                        match_pr = processors[i];
                        /*
                         * If the pointer is invalid, we will report the
@@ -1068,7 +1068,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
         * affected cpu to update the T-states.
         * The notifier event is THROTTLING_POSTCHANGE
         */
-       for_each_cpu_mask(i, online_throttling_cpus) {
+       for_each_cpu_mask_nr(i, online_throttling_cpus) {
                t_state.cpu = i;
                acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
                                                        &t_state);
index e38dfed41d80b8e6dff45379ee1fdfb8e8d3c750..5000402ae09234267aa9112ef4fe36a7d36b0d4f 100644 (file)
@@ -119,14 +119,14 @@ static ssize_t print_cpus_##type(struct sysdev_class *class, char *buf)   \
 {                                                                      \
        return print_cpus_map(buf, &cpu_##type##_map);                  \
 }                                                                      \
-struct sysdev_class_attribute attr_##type##_map =                      \
+static struct sysdev_class_attribute attr_##type##_map =               \
        _SYSDEV_CLASS_ATTR(type, 0444, print_cpus_##type, NULL)
 
 print_cpus_func(online);
 print_cpus_func(possible);
 print_cpus_func(present);
 
-struct sysdev_class_attribute *cpu_state_attr[] = {
+static struct sysdev_class_attribute *cpu_state_attr[] = {
        &attr_online_map,
        &attr_possible_map,
        &attr_present_map,
index 1d41496ed2f83b648e1311ee8fecccbd56ad741d..d8f8b1e0edde129f6511b051ee2565b38d86bd75 100644 (file)
@@ -589,7 +589,7 @@ static ssize_t show_cpus(cpumask_t mask, char *buf)
        ssize_t i = 0;
        unsigned int cpu;
 
-       for_each_cpu_mask(cpu, mask) {
+       for_each_cpu_mask_nr(cpu, mask) {
                if (i)
                        i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
                i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
@@ -835,7 +835,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
        }
 #endif
 
-       for_each_cpu_mask(j, policy->cpus) {
+       for_each_cpu_mask_nr(j, policy->cpus) {
                if (cpu == j)
                        continue;
 
@@ -898,14 +898,14 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
        }
 
        spin_lock_irqsave(&cpufreq_driver_lock, flags);
-       for_each_cpu_mask(j, policy->cpus) {
+       for_each_cpu_mask_nr(j, policy->cpus) {
                cpufreq_cpu_data[j] = policy;
                per_cpu(policy_cpu, j) = policy->cpu;
        }
        spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
        /* symlink affected CPUs */
-       for_each_cpu_mask(j, policy->cpus) {
+       for_each_cpu_mask_nr(j, policy->cpus) {
                if (j == cpu)
                        continue;
                if (!cpu_online(j))
@@ -945,7 +945,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
 
 err_out_unregister:
        spin_lock_irqsave(&cpufreq_driver_lock, flags);
-       for_each_cpu_mask(j, policy->cpus)
+       for_each_cpu_mask_nr(j, policy->cpus)
                cpufreq_cpu_data[j] = NULL;
        spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
@@ -1028,7 +1028,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
         * links afterwards.
         */
        if (unlikely(cpus_weight(data->cpus) > 1)) {
-               for_each_cpu_mask(j, data->cpus) {
+               for_each_cpu_mask_nr(j, data->cpus) {
                        if (j == cpu)
                                continue;
                        cpufreq_cpu_data[j] = NULL;
@@ -1038,7 +1038,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
        spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
        if (unlikely(cpus_weight(data->cpus) > 1)) {
-               for_each_cpu_mask(j, data->cpus) {
+               for_each_cpu_mask_nr(j, data->cpus) {
                        if (j == cpu)
                                continue;
                        dprintk("removing link for cpu %u\n", j);
index 5d3a04ba6ad2c38aaf18d443b508511206bfac0c..fe565ee437575731124f3488a1962602bec51ee2 100644 (file)
@@ -497,7 +497,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                        return rc;
                }
 
-               for_each_cpu_mask(j, policy->cpus) {
+               for_each_cpu_mask_nr(j, policy->cpus) {
                        struct cpu_dbs_info_s *j_dbs_info;
                        j_dbs_info = &per_cpu(cpu_dbs_info, j);
                        j_dbs_info->cur_policy = policy;
index d2af20dda3827919d218d9dc592aeb8ca0fd8f03..33855cb3cf16f1ecd30528f35343c2c029959b1e 100644 (file)
@@ -367,7 +367,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
 
        /* Get Idle Time */
        idle_ticks = UINT_MAX;
-       for_each_cpu_mask(j, policy->cpus) {
+       for_each_cpu_mask_nr(j, policy->cpus) {
                cputime64_t total_idle_ticks;
                unsigned int tmp_idle_ticks;
                struct cpu_dbs_info_s *j_dbs_info;
@@ -521,7 +521,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                        return rc;
                }
 
-               for_each_cpu_mask(j, policy->cpus) {
+               for_each_cpu_mask_nr(j, policy->cpus) {
                        struct cpu_dbs_info_s *j_dbs_info;
                        j_dbs_info = &per_cpu(cpu_dbs_info, j);
                        j_dbs_info->cur_policy = policy;
index 0792d930c481d508fb4ea87e6161795bcdcb5cd4..7a64aa9b51b6e6bf3866ac72fba4899bb9fcd348 100644 (file)
@@ -646,8 +646,8 @@ static inline int find_next_online_cpu(struct ehca_comp_pool *pool)
                ehca_dmp(&cpu_online_map, sizeof(cpumask_t), "");
 
        spin_lock_irqsave(&pool->last_cpu_lock, flags);
-       cpu = next_cpu(pool->last_cpu, cpu_online_map);
-       if (cpu == NR_CPUS)
+       cpu = next_cpu_nr(pool->last_cpu, cpu_online_map);
+       if (cpu >= nr_cpu_ids)
                cpu = first_cpu(cpu_online_map);
        pool->last_cpu = cpu;
        spin_unlock_irqrestore(&pool->last_cpu_lock, flags);
index 196d63c28aa44aa3365977da4b729d59444fcb69..bb1c09f7a76ced4cd028d631d79a13d0171e8c2c 100644 (file)
@@ -122,7 +122,7 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
         * - mbligh
         */
        local_irq_save(flags);
-       for_each_cpu_mask(query_cpu, mask) {
+       for_each_cpu_mask_nr(query_cpu, mask) {
                __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu),
                                      vector, APIC_DEST_PHYSICAL);
        }
index c24875bd9c5b0ef1b6347f61a87716c5e8673914..80226e776143242778708cfa682a2c0d58b38d3a 100644 (file)
  * For details of cpus_onto(), see bitmap_onto in lib/bitmap.c.
  * For details of cpus_fold(), see bitmap_fold in lib/bitmap.c.
  *
+ * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+ * Note: The alternate operations with the suffix "_nr" are used
+ *       to limit the range of the loop to nr_cpu_ids instead of
+ *       NR_CPUS when NR_CPUS > 64 for performance reasons.
+ *       If NR_CPUS is <= 64 then most assembler bitmask
+ *       operators execute faster with a constant range, so
+ *       the operator will continue to use NR_CPUS.
+ *
+ *       Another consideration is that nr_cpu_ids is initialized
+ *       to NR_CPUS and isn't lowered until the possible cpus are
+ *       discovered (including any disabled cpus).  So early uses
+ *       will span the entire range of NR_CPUS.
+ * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
+ *
  * The available cpumask operations are:
  *
  * void cpu_set(cpu, mask)             turn on bit 'cpu' in mask
  * int cpus_empty(mask)                        Is mask empty (no bits sets)?
  * int cpus_full(mask)                 Is mask full (all bits sets)?
  * int cpus_weight(mask)               Hamming weigh - number of set bits
+ * int cpus_weight_nr(mask)            Same using nr_cpu_ids instead of NR_CPUS
  *
  * void cpus_shift_right(dst, src, n)  Shift right
  * void cpus_shift_left(dst, src, n)   Shift left
  *
  * int first_cpu(mask)                 Number lowest set bit, or NR_CPUS
  * int next_cpu(cpu, mask)             Next cpu past 'cpu', or NR_CPUS
+ * int next_cpu_nr(cpu, mask)          Next cpu past 'cpu', or nr_cpu_ids
  *
  * cpumask_t cpumask_of_cpu(cpu)       Return cpumask with bit 'cpu' set
  * CPU_MASK_ALL                                Initializer - all bits set
@@ -59,7 +75,8 @@
  * void cpus_onto(dst, orig, relmap)   *dst = orig relative to relmap
  * void cpus_fold(dst, orig, sz)       dst bits = orig bits mod sz
  *
- * for_each_cpu_mask(cpu, mask)                for-loop cpu over mask
+ * for_each_cpu_mask(cpu, mask)                for-loop cpu over mask using NR_CPUS
+ * for_each_cpu_mask_nr(cpu, mask)     for-loop cpu over mask using nr_cpu_ids
  *
  * int num_online_cpus()               Number of online CPUs
  * int num_possible_cpus()             Number of all possible CPUs
@@ -216,15 +233,6 @@ static inline void __cpus_shift_left(cpumask_t *dstp,
        bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
 }
 
-#ifdef CONFIG_SMP
-int __first_cpu(const cpumask_t *srcp);
-#define first_cpu(src) __first_cpu(&(src))
-int __next_cpu(int n, const cpumask_t *srcp);
-#define next_cpu(n, src) __next_cpu((n), &(src))
-#else
-#define first_cpu(src)         ({ (void)(src); 0; })
-#define next_cpu(n, src)       ({ (void)(src); 1; })
-#endif
 
 #ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
 extern cpumask_t *cpumask_of_cpu_map;
@@ -343,20 +351,49 @@ static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp,
        bitmap_fold(dstp->bits, origp->bits, sz, nbits);
 }
 
-#if NR_CPUS > 1
-#define for_each_cpu_mask(cpu, mask)           \
-       for ((cpu) = first_cpu(mask);           \
-               (cpu) < NR_CPUS;                \
-               (cpu) = next_cpu((cpu), (mask)))
-#else /* NR_CPUS == 1 */
-#define for_each_cpu_mask(cpu, mask)           \
+#if NR_CPUS == 1
+
+#define nr_cpu_ids             1
+#define first_cpu(src)         ({ (void)(src); 0; })
+#define next_cpu(n, src)       ({ (void)(src); 1; })
+#define any_online_cpu(mask)   0
+#define for_each_cpu_mask(cpu, mask)   \
        for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
-#endif /* NR_CPUS */
+
+#else /* NR_CPUS > 1 */
+
+extern int nr_cpu_ids;
+int __first_cpu(const cpumask_t *srcp);
+int __next_cpu(int n, const cpumask_t *srcp);
+int __any_online_cpu(const cpumask_t *mask);
+
+#define first_cpu(src)         __first_cpu(&(src))
+#define next_cpu(n, src)       __next_cpu((n), &(src))
+#define any_online_cpu(mask) __any_online_cpu(&(mask))
+#define for_each_cpu_mask(cpu, mask)                   \
+       for ((cpu) = -1;                                \
+               (cpu) = next_cpu((cpu), (mask)),        \
+               (cpu) < NR_CPUS; )
+#endif
+
+#if NR_CPUS <= 64
 
 #define next_cpu_nr(n, src)            next_cpu(n, src)
 #define cpus_weight_nr(cpumask)                cpus_weight(cpumask)
 #define for_each_cpu_mask_nr(cpu, mask)        for_each_cpu_mask(cpu, mask)
 
+#else /* NR_CPUS > 64 */
+
+int __next_cpu_nr(int n, const cpumask_t *srcp);
+#define next_cpu_nr(n, src)    __next_cpu_nr((n), &(src))
+#define cpus_weight_nr(cpumask)        __cpus_weight(&(cpumask), nr_cpu_ids)
+#define for_each_cpu_mask_nr(cpu, mask)                        \
+       for ((cpu) = -1;                                \
+               (cpu) = next_cpu_nr((cpu), (mask)),     \
+               (cpu) < nr_cpu_ids; )
+
+#endif /* NR_CPUS > 64 */
+
 /*
  * The following particular system cpumasks and operations manage
  * possible, present and online cpus.  Each of them is a fixed size
@@ -418,9 +455,9 @@ extern cpumask_t cpu_online_map;
 extern cpumask_t cpu_present_map;
 
 #if NR_CPUS > 1
-#define num_online_cpus()      cpus_weight(cpu_online_map)
-#define num_possible_cpus()    cpus_weight(cpu_possible_map)
-#define num_present_cpus()     cpus_weight(cpu_present_map)
+#define num_online_cpus()      cpus_weight_nr(cpu_online_map)
+#define num_possible_cpus()    cpus_weight_nr(cpu_possible_map)
+#define num_present_cpus()     cpus_weight_nr(cpu_present_map)
 #define cpu_online(cpu)                cpu_isset((cpu), cpu_online_map)
 #define cpu_possible(cpu)      cpu_isset((cpu), cpu_possible_map)
 #define cpu_present(cpu)       cpu_isset((cpu), cpu_present_map)
@@ -435,17 +472,8 @@ extern cpumask_t cpu_present_map;
 
 #define cpu_is_offline(cpu)    unlikely(!cpu_online(cpu))
 
-#ifdef CONFIG_SMP
-extern int nr_cpu_ids;
-#define any_online_cpu(mask) __any_online_cpu(&(mask))
-int __any_online_cpu(const cpumask_t *mask);
-#else
-#define nr_cpu_ids                     1
-#define any_online_cpu(mask)           0
-#endif
-
-#define for_each_possible_cpu(cpu)  for_each_cpu_mask((cpu), cpu_possible_map)
-#define for_each_online_cpu(cpu)  for_each_cpu_mask((cpu), cpu_online_map)
-#define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map)
+#define for_each_possible_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_possible_map)
+#define for_each_online_cpu(cpu)   for_each_cpu_mask_nr((cpu), cpu_online_map)
+#define for_each_present_cpu(cpu)  for_each_cpu_mask_nr((cpu), cpu_present_map)
 
 #endif /* __LINUX_CPUMASK_H */
index b11f06dc149add3a29c072e2ec9356499dd9dbc1..b21ba7e9036dfcaa69594827973e3d5d2d2d7970 100644 (file)
@@ -412,7 +412,7 @@ void __ref enable_nonboot_cpus(void)
                goto out;
 
        printk("Enabling non-boot CPUs ...\n");
-       for_each_cpu_mask(cpu, frozen_cpus) {
+       for_each_cpu_mask_nr(cpu, frozen_cpus) {
                error = _cpu_up(cpu, 1);
                if (!error) {
                        printk("CPU%d is up\n", cpu);
index 16eeeaa9d618c7ef4c7ef4f31499fb1195a396b3..6f8696c502f429ebcbaf65e537fdf47e3127d300 100644 (file)
@@ -106,7 +106,7 @@ static void force_quiescent_state(struct rcu_data *rdp,
                 */
                cpus_and(cpumask, rcp->cpumask, cpu_online_map);
                cpu_clear(rdp->cpu, cpumask);
-               for_each_cpu_mask(cpu, cpumask)
+               for_each_cpu_mask_nr(cpu, cpumask)
                        smp_send_reschedule(cpu);
        }
 }
index 6f62b77d93c41978f030e28ef617b6ddfbe0c5e8..27827931ca0dd6ca905040955616c970b2e7539d 100644 (file)
@@ -756,7 +756,7 @@ rcu_try_flip_idle(void)
 
        /* Now ask each CPU for acknowledgement of the flip. */
 
-       for_each_cpu_mask(cpu, rcu_cpu_online_map) {
+       for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
                per_cpu(rcu_flip_flag, cpu) = rcu_flipped;
                dyntick_save_progress_counter(cpu);
        }
@@ -774,7 +774,7 @@ rcu_try_flip_waitack(void)
        int cpu;
 
        RCU_TRACE_ME(rcupreempt_trace_try_flip_a1);
-       for_each_cpu_mask(cpu, rcu_cpu_online_map)
+       for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
                if (rcu_try_flip_waitack_needed(cpu) &&
                    per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) {
                        RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1);
@@ -806,7 +806,7 @@ rcu_try_flip_waitzero(void)
        /* Check to see if the sum of the "last" counters is zero. */
 
        RCU_TRACE_ME(rcupreempt_trace_try_flip_z1);
-       for_each_cpu_mask(cpu, rcu_cpu_online_map)
+       for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
                sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx];
        if (sum != 0) {
                RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1);
@@ -821,7 +821,7 @@ rcu_try_flip_waitzero(void)
        smp_mb();  /*  ^^^^^^^^^^^^ */
 
        /* Call for a memory barrier from each CPU. */
-       for_each_cpu_mask(cpu, rcu_cpu_online_map) {
+       for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
                per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed;
                dyntick_save_progress_counter(cpu);
        }
@@ -841,7 +841,7 @@ rcu_try_flip_waitmb(void)
        int cpu;
 
        RCU_TRACE_ME(rcupreempt_trace_try_flip_m1);
-       for_each_cpu_mask(cpu, rcu_cpu_online_map)
+       for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
                if (rcu_try_flip_waitmb_needed(cpu) &&
                    per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) {
                        RCU_TRACE_ME(rcupreempt_trace_try_flip_me1);
index 99e6d850ecab097e4d1c5cbda1e0419548093681..a6d0a7f3f7c3fd4afd6027ddeb849e4b06fbf8bb 100644 (file)
@@ -2108,7 +2108,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
                /* Tally up the load of all CPUs in the group */
                avg_load = 0;
 
-               for_each_cpu_mask(i, group->cpumask) {
+               for_each_cpu_mask_nr(i, group->cpumask) {
                        /* Bias balancing toward cpus of our domain */
                        if (local_group)
                                load = source_load(i, load_idx);
@@ -2150,7 +2150,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu,
        /* Traverse only the allowed CPUs */
        cpus_and(*tmp, group->cpumask, p->cpus_allowed);
 
-       for_each_cpu_mask(i, *tmp) {
+       for_each_cpu_mask_nr(i, *tmp) {
                load = weighted_cpuload(i);
 
                if (load < min_load || (load == min_load && i == this_cpu)) {
@@ -3168,7 +3168,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
                max_cpu_load = 0;
                min_cpu_load = ~0UL;
 
-               for_each_cpu_mask(i, group->cpumask) {
+               for_each_cpu_mask_nr(i, group->cpumask) {
                        struct rq *rq;
 
                        if (!cpu_isset(i, *cpus))
@@ -3447,7 +3447,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
        unsigned long max_load = 0;
        int i;
 
-       for_each_cpu_mask(i, group->cpumask) {
+       for_each_cpu_mask_nr(i, group->cpumask) {
                unsigned long wl;
 
                if (!cpu_isset(i, *cpus))
@@ -3989,7 +3989,7 @@ static void run_rebalance_domains(struct softirq_action *h)
                int balance_cpu;
 
                cpu_clear(this_cpu, cpus);
-               for_each_cpu_mask(balance_cpu, cpus) {
+               for_each_cpu_mask_nr(balance_cpu, cpus) {
                        /*
                         * If this cpu gets work to do, stop the load balancing
                         * work being done for other cpus. Next load
@@ -6802,7 +6802,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
 
        cpus_clear(*covered);
 
-       for_each_cpu_mask(i, *span) {
+       for_each_cpu_mask_nr(i, *span) {
                struct sched_group *sg;
                int group = group_fn(i, cpu_map, &sg, tmpmask);
                int j;
@@ -6813,7 +6813,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
                cpus_clear(sg->cpumask);
                sg->__cpu_power = 0;
 
-               for_each_cpu_mask(j, *span) {
+               for_each_cpu_mask_nr(j, *span) {
                        if (group_fn(j, cpu_map, NULL, tmpmask) != group)
                                continue;
 
@@ -7013,7 +7013,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
        if (!sg)
                return;
        do {
-               for_each_cpu_mask(j, sg->cpumask) {
+               for_each_cpu_mask_nr(j, sg->cpumask) {
                        struct sched_domain *sd;
 
                        sd = &per_cpu(phys_domains, j);
@@ -7038,7 +7038,7 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
 {
        int cpu, i;
 
-       for_each_cpu_mask(cpu, *cpu_map) {
+       for_each_cpu_mask_nr(cpu, *cpu_map) {
                struct sched_group **sched_group_nodes
                        = sched_group_nodes_bycpu[cpu];
 
@@ -7277,7 +7277,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
        /*
         * Set up domains for cpus specified by the cpu_map.
         */
-       for_each_cpu_mask(i, *cpu_map) {
+       for_each_cpu_mask_nr(i, *cpu_map) {
                struct sched_domain *sd = NULL, *p;
                SCHED_CPUMASK_VAR(nodemask, allmasks);
 
@@ -7344,7 +7344,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
 
 #ifdef CONFIG_SCHED_SMT
        /* Set up CPU (sibling) groups */
-       for_each_cpu_mask(i, *cpu_map) {
+       for_each_cpu_mask_nr(i, *cpu_map) {
                SCHED_CPUMASK_VAR(this_sibling_map, allmasks);
                SCHED_CPUMASK_VAR(send_covered, allmasks);
 
@@ -7361,7 +7361,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
 
 #ifdef CONFIG_SCHED_MC
        /* Set up multi-core groups */
-       for_each_cpu_mask(i, *cpu_map) {
+       for_each_cpu_mask_nr(i, *cpu_map) {
                SCHED_CPUMASK_VAR(this_core_map, allmasks);
                SCHED_CPUMASK_VAR(send_covered, allmasks);
 
@@ -7428,7 +7428,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
                        goto error;
                }
                sched_group_nodes[i] = sg;
-               for_each_cpu_mask(j, *nodemask) {
+               for_each_cpu_mask_nr(j, *nodemask) {
                        struct sched_domain *sd;
 
                        sd = &per_cpu(node_domains, j);
@@ -7474,21 +7474,21 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
 
        /* Calculate CPU power for physical packages and nodes */
 #ifdef CONFIG_SCHED_SMT
-       for_each_cpu_mask(i, *cpu_map) {
+       for_each_cpu_mask_nr(i, *cpu_map) {
                struct sched_domain *sd = &per_cpu(cpu_domains, i);
 
                init_sched_groups_power(i, sd);
        }
 #endif
 #ifdef CONFIG_SCHED_MC
-       for_each_cpu_mask(i, *cpu_map) {
+       for_each_cpu_mask_nr(i, *cpu_map) {
                struct sched_domain *sd = &per_cpu(core_domains, i);
 
                init_sched_groups_power(i, sd);
        }
 #endif
 
-       for_each_cpu_mask(i, *cpu_map) {
+       for_each_cpu_mask_nr(i, *cpu_map) {
                struct sched_domain *sd = &per_cpu(phys_domains, i);
 
                init_sched_groups_power(i, sd);
@@ -7508,7 +7508,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
 #endif
 
        /* Attach the domains */
-       for_each_cpu_mask(i, *cpu_map) {
+       for_each_cpu_mask_nr(i, *cpu_map) {
                struct sched_domain *sd;
 #ifdef CONFIG_SCHED_SMT
                sd = &per_cpu(cpu_domains, i);
@@ -7603,7 +7603,7 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
 
        unregister_sched_domain_sysctl();
 
-       for_each_cpu_mask(i, *cpu_map)
+       for_each_cpu_mask_nr(i, *cpu_map)
                cpu_attach_domain(NULL, &def_root_domain, i);
        synchronize_sched();
        arch_destroy_sched_domains(cpu_map, &tmpmask);
index f2aa987027d695750f2ca4b8f917d02171eeb3b8..bb61fe26b62cc50bd0e60a84781ff658f29e22e2 100644 (file)
@@ -1031,7 +1031,7 @@ static int wake_idle(int cpu, struct task_struct *p)
                    || ((sd->flags & SD_WAKE_IDLE_FAR)
                        && !task_hot(p, task_rq(p)->clock, sd))) {
                        cpus_and(tmp, sd->span, p->cpus_allowed);
-                       for_each_cpu_mask(i, tmp) {
+                       for_each_cpu_mask_nr(i, tmp) {
                                if (idle_cpu(i)) {
                                        if (i != task_cpu(p)) {
                                                schedstat_inc(p,
index 47ceac9e8552f309930c9f9af4d4131fd18ca8f3..7c9614728c597433eb4f10200207994d8e3eee6b 100644 (file)
@@ -240,7 +240,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
 
        spin_lock(&rt_b->rt_runtime_lock);
        rt_period = ktime_to_ns(rt_b->rt_period);
-       for_each_cpu_mask(i, rd->span) {
+       for_each_cpu_mask_nr(i, rd->span) {
                struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
                s64 diff;
 
@@ -1107,7 +1107,7 @@ static int pull_rt_task(struct rq *this_rq)
 
        next = pick_next_task_rt(this_rq);
 
-       for_each_cpu_mask(cpu, this_rq->rd->rto_mask) {
+       for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) {
                if (this_cpu == cpu)
                        continue;
 
index 4a23517169a6495cf1aa1bc689137e1c38698c62..06b17547f4e76869f16fedddc00f3a880fc6c364 100644 (file)
@@ -301,7 +301,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
                return -EINVAL;
 
        if (isadd == REGISTER) {
-               for_each_cpu_mask(cpu, mask) {
+               for_each_cpu_mask_nr(cpu, mask) {
                        s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
                                         cpu_to_node(cpu));
                        if (!s)
@@ -320,7 +320,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
 
        /* Deregister or cleanup */
 cleanup:
-       for_each_cpu_mask(cpu, mask) {
+       for_each_cpu_mask_nr(cpu, mask) {
                listeners = &per_cpu(listener_array, cpu);
                down_write(&listeners->sem);
                list_for_each_entry_safe(s, tmp, &listeners->list, list) {
index dadde5361f32df668877aaa8d5a2e56abb5bb9ff..60ceabd53f2ead02e3c04045efc72aa1eac58ba5 100644 (file)
@@ -145,9 +145,9 @@ static void clocksource_watchdog(unsigned long data)
                 * Cycle through CPUs to check if the CPUs stay
                 * synchronized to each other.
                 */
-               int next_cpu = next_cpu(raw_smp_processor_id(), cpu_online_map);
+               int next_cpu = next_cpu_nr(raw_smp_processor_id(), cpu_online_map);
 
-               if (next_cpu >= NR_CPUS)
+               if (next_cpu >= nr_cpu_ids)
                        next_cpu = first_cpu(cpu_online_map);
                watchdog_timer.expires += WATCHDOG_INTERVAL;
                add_timer_on(&watchdog_timer, next_cpu);
index f48d0f09d32f9c8763190a766f2f23a9fef9e923..31463d370b944ff37919d184a125e7efc1bf563a 100644 (file)
@@ -399,8 +399,7 @@ again:
        mask = CPU_MASK_NONE;
        now = ktime_get();
        /* Find all expired events */
-       for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS;
-            cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) {
+       for_each_cpu_mask_nr(cpu, tick_broadcast_oneshot_mask) {
                td = &per_cpu(tick_cpu_device, cpu);
                if (td->evtdev->next_event.tv64 <= now.tv64)
                        cpu_set(cpu, mask);
index ce7799540c91d28d3109e0e31fc53e623859ed1e..a6d36346d10ad11efc2cf0461720b34e5392ba19 100644 (file)
@@ -397,7 +397,7 @@ void flush_workqueue(struct workqueue_struct *wq)
        might_sleep();
        lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
        lock_release(&wq->lockdep_map, 1, _THIS_IP_);
-       for_each_cpu_mask(cpu, *cpu_map)
+       for_each_cpu_mask_nr(cpu, *cpu_map)
                flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
 }
 EXPORT_SYMBOL_GPL(flush_workqueue);
@@ -477,7 +477,7 @@ static void wait_on_work(struct work_struct *work)
        wq = cwq->wq;
        cpu_map = wq_cpu_map(wq);
 
-       for_each_cpu_mask(cpu, *cpu_map)
+       for_each_cpu_mask_nr(cpu, *cpu_map)
                wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
 }
 
@@ -813,7 +813,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
        list_del(&wq->list);
        spin_unlock(&workqueue_lock);
 
-       for_each_cpu_mask(cpu, *cpu_map)
+       for_each_cpu_mask_nr(cpu, *cpu_map)
                cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
        put_online_cpus();
 
index bb4f76d3c3e7cd327488006e4878296d58fe1167..5f97dc25ef9c925fb68b83fecec5a7c0114ab339 100644 (file)
@@ -15,6 +15,15 @@ int __next_cpu(int n, const cpumask_t *srcp)
 }
 EXPORT_SYMBOL(__next_cpu);
 
+#if NR_CPUS > 64
+int __next_cpu_nr(int n, const cpumask_t *srcp)
+{
+       return min_t(int, nr_cpu_ids,
+                               find_next_bit(srcp->bits, nr_cpu_ids, n+1));
+}
+EXPORT_SYMBOL(__next_cpu_nr);
+#endif
+
 int __any_online_cpu(const cpumask_t *mask)
 {
        int cpu;
index 05f2b4009cccd4c249ccfbd50aa8bc3237d4cb59..843364594e23d1674669713b023cd6670bde8364 100644 (file)
@@ -35,7 +35,7 @@ EXPORT_SYMBOL_GPL(percpu_depopulate);
 void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask)
 {
        int cpu;
-       for_each_cpu_mask(cpu, *mask)
+       for_each_cpu_mask_nr(cpu, *mask)
                percpu_depopulate(__pdata, cpu);
 }
 EXPORT_SYMBOL_GPL(__percpu_depopulate_mask);
@@ -86,7 +86,7 @@ int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
        int cpu;
 
        cpus_clear(populated);
-       for_each_cpu_mask(cpu, *mask)
+       for_each_cpu_mask_nr(cpu, *mask)
                if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) {
                        __percpu_depopulate_mask(__pdata, &populated);
                        return -ENOMEM;
index db9eabb2c5b3a992a04ac2396bf791b72bbdcf94..c3d4a781802f24c48a61864c9d40f1c94134cbc8 100644 (file)
@@ -26,7 +26,7 @@ static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask)
 
        memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
 
-       for_each_cpu_mask(cpu, *cpumask) {
+       for_each_cpu_mask_nr(cpu, *cpumask) {
                struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
 
                for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
index 821cb1628e5ee98acf431ba67feeb7158fdbe1ce..df5520a60b90cc33a753075643c8490fd4c0f705 100644 (file)
@@ -2261,7 +2261,7 @@ out:
         */
        if (!cpus_empty(net_dma.channel_mask)) {
                int chan_idx;
-               for_each_cpu_mask(chan_idx, net_dma.channel_mask) {
+               for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) {
                        struct dma_chan *chan = net_dma.channels[chan_idx];
                        if (chan)
                                dma_async_memcpy_issue_pending(chan);
@@ -4322,7 +4322,7 @@ static void net_dma_rebalance(struct net_dma *net_dma)
        i = 0;
        cpu = first_cpu(cpu_online_map);
 
-       for_each_cpu_mask(chan_idx, net_dma->channel_mask) {
+       for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) {
                chan = net_dma->channels[chan_idx];
 
                n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
index cc34ac769a3c9d3eb19bf6c1f52828d6e961725d..411b339a0c8a5edeedbc6822742c1da73514f884 100644 (file)
@@ -497,7 +497,7 @@ static void iucv_setmask_up(void)
        /* Disable all cpu but the first in cpu_irq_cpumask. */
        cpumask = iucv_irq_cpumask;
        cpu_clear(first_cpu(iucv_irq_cpumask), cpumask);
-       for_each_cpu_mask(cpu, cpumask)
+       for_each_cpu_mask_nr(cpu, cpumask)
                smp_call_function_single(cpu, iucv_block_cpu, NULL, 1);
 }
 
index 01c7e311b904597e826b1da925a03c888aa090f7..d43cf8ddff67c13bd0646c728b618f2b39863fc3 100644 (file)
@@ -603,7 +603,7 @@ __svc_create_thread(svc_thread_fn func, struct svc_serv *serv,
        error = kernel_thread((int (*)(void *)) func, rqstp, 0);
 
        if (have_oldmask)
-               set_cpus_allowed(current, oldmask);
+               set_cpus_allowed_ptr(current, &oldmask);
 
        if (error < 0)
                goto out_thread;