Remove obsolete #include <linux/config.h>
[linux-2.6.git] / arch / i386 / kernel / smpboot.c
index c5517f3..6f5fea0 100644 (file)
@@ -34,7 +34,6 @@
 *              Rusty Russell   :       Hacked into shape for new "hotplug" boot process. */
 
 #include <linux/module.h>
-#include <linux/config.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 
@@ -42,7 +41,6 @@
 #include <linux/sched.h>
 #include <linux/kernel_stat.h>
 #include <linux/smp_lock.h>
-#include <linux/irq.h>
 #include <linux/bootmem.h>
 #include <linux/notifier.h>
 #include <linux/cpu.h>
 #include <asm/tlbflush.h>
 #include <asm/desc.h>
 #include <asm/arch_hooks.h>
+#include <asm/nmi.h>
 
 #include <mach_apic.h>
 #include <mach_wakecpu.h>
 #include <smpboot_hooks.h>
 
 /* Set if we find a B stepping CPU */
-static int __initdata smp_b_stepping;
+static int __devinitdata smp_b_stepping;
 
 /* Number of siblings per CPU package */
 int smp_num_siblings = 1;
 #ifdef CONFIG_X86_HT
 EXPORT_SYMBOL(smp_num_siblings);
 #endif
-int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
-EXPORT_SYMBOL(phys_proc_id);
-int cpu_core_id[NR_CPUS]; /* Core ID of each logical CPU */
-EXPORT_SYMBOL(cpu_core_id);
+
+/* Last level cache ID of each logical CPU */
+int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
+
+/* representing HT siblings of each logical CPU */
+cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(cpu_sibling_map);
+
+/* representing HT and core siblings of each logical CPU */
+cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(cpu_core_map);
 
 /* bitmap of online cpus */
-cpumask_t cpu_online_map;
+cpumask_t cpu_online_map __read_mostly;
 EXPORT_SYMBOL(cpu_online_map);
 
 cpumask_t cpu_callin_map;
 cpumask_t cpu_callout_map;
 EXPORT_SYMBOL(cpu_callout_map);
+cpumask_t cpu_possible_map;
+EXPORT_SYMBOL(cpu_possible_map);
 static cpumask_t smp_commenced_mask;
 
+/* TSC's upper 32 bits can't be written in eariler CPU (before prescott), there
+ * is no way to resync one AP against BP. TBD: for prescott and above, we
+ * should use IA64's algorithm
+ */
+static int __devinitdata tsc_sync_disabled;
+
 /* Per CPU bogomips and other parameters */
 struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
 EXPORT_SYMBOL(cpu_data);
 
-u8 x86_cpu_to_apicid[NR_CPUS] =
+u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly =
                        { [0 ... NR_CPUS-1] = 0xff };
 EXPORT_SYMBOL(x86_cpu_to_apicid);
 
@@ -108,7 +122,7 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 };
  * has made sure it's suitably aligned.
  */
 
-static unsigned long __init setup_trampoline(void)
+static unsigned long __devinit setup_trampoline(void)
 {
        memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data);
        return virt_to_phys(trampoline_base);
@@ -138,7 +152,7 @@ void __init smp_alloc_memory(void)
  * a given CPU
  */
 
-static void __init smp_store_cpu_info(int id)
+static void __devinit smp_store_cpu_info(int id)
 {
        struct cpuinfo_x86 *c = cpu_data + id;
 
@@ -184,7 +198,7 @@ static void __init smp_store_cpu_info(int id)
                                goto valid_k7;
 
                /* If we get here, it's not a certified SMP capable AMD system. */
-               tainted |= TAINT_UNSAFE_SMP;
+               add_taint(TAINT_UNSAFE_SMP);
        }
 
 valid_k7:
@@ -237,7 +251,7 @@ static void __init synchronize_tsc_bp (void)
                 * all APs synchronize but they loop on '== num_cpus'
                 */
                while (atomic_read(&tsc_count_start) != num_booting_cpus()-1)
-                       mb();
+                       cpu_relax();
                atomic_set(&tsc_count_stop, 0);
                wmb();
                /*
@@ -256,7 +270,7 @@ static void __init synchronize_tsc_bp (void)
                 * Wait for all APs to leave the synchronization point:
                 */
                while (atomic_read(&tsc_count_stop) != num_booting_cpus()-1)
-                       mb();
+                       cpu_relax();
                atomic_set(&tsc_count_start, 0);
                wmb();
                atomic_inc(&tsc_count_stop);
@@ -293,7 +307,9 @@ static void __init synchronize_tsc_bp (void)
                        if (tsc_values[i] < avg)
                                realdelta = -realdelta;
 
-                       printk(KERN_INFO "CPU#%d had %ld usecs TSC skew, fixed it up.\n", i, realdelta);
+                       if (realdelta > 0)
+                               printk(KERN_INFO "CPU#%d had %ld usecs TSC "
+                                       "skew, fixed it up.\n", i, realdelta);
                }
 
                sum += delta;
@@ -311,19 +327,21 @@ static void __init synchronize_tsc_ap (void)
         * this gets called, so we first wait for the BP to
         * finish SMP initialization:
         */
-       while (!atomic_read(&tsc_start_flag)) mb();
+       while (!atomic_read(&tsc_start_flag))
+               cpu_relax();
 
        for (i = 0; i < NR_LOOPS; i++) {
                atomic_inc(&tsc_count_start);
                while (atomic_read(&tsc_count_start) != num_booting_cpus())
-                       mb();
+                       cpu_relax();
 
                rdtscll(tsc_values[smp_processor_id()]);
                if (i == NR_LOOPS-1)
                        write_tsc(0, 0);
 
                atomic_inc(&tsc_count_stop);
-               while (atomic_read(&tsc_count_stop) != num_booting_cpus()) mb();
+               while (atomic_read(&tsc_count_stop) != num_booting_cpus())
+                       cpu_relax();
        }
 }
 #undef NR_LOOPS
@@ -332,7 +350,7 @@ extern void calibrate_delay(void);
 
 static atomic_t init_deasserted;
 
-static void __init smp_callin(void)
+static void __devinit smp_callin(void)
 {
        int cpuid, phys_id;
        unsigned long timeout;
@@ -417,16 +435,96 @@ static void __init smp_callin(void)
        /*
         *      Synchronize the TSC with the BP
         */
-       if (cpu_has_tsc && cpu_khz)
+       if (cpu_has_tsc && cpu_khz && !tsc_sync_disabled)
                synchronize_tsc_ap();
 }
 
 static int cpucount;
 
+/* maps the cpu to the sched domain representing multi-core */
+cpumask_t cpu_coregroup_map(int cpu)
+{
+       struct cpuinfo_x86 *c = cpu_data + cpu;
+       /*
+        * For perf, we return last level cache shared map.
+        * And for power savings, we return cpu_core_map
+        */
+       if (sched_mc_power_savings || sched_smt_power_savings)
+               return cpu_core_map[cpu];
+       else
+               return c->llc_shared_map;
+}
+
+/* representing cpus for which sibling maps can be computed */
+static cpumask_t cpu_sibling_setup_map;
+
+static inline void
+set_cpu_sibling_map(int cpu)
+{
+       int i;
+       struct cpuinfo_x86 *c = cpu_data;
+
+       cpu_set(cpu, cpu_sibling_setup_map);
+
+       if (smp_num_siblings > 1) {
+               for_each_cpu_mask(i, cpu_sibling_setup_map) {
+                       if (c[cpu].phys_proc_id == c[i].phys_proc_id &&
+                           c[cpu].cpu_core_id == c[i].cpu_core_id) {
+                               cpu_set(i, cpu_sibling_map[cpu]);
+                               cpu_set(cpu, cpu_sibling_map[i]);
+                               cpu_set(i, cpu_core_map[cpu]);
+                               cpu_set(cpu, cpu_core_map[i]);
+                               cpu_set(i, c[cpu].llc_shared_map);
+                               cpu_set(cpu, c[i].llc_shared_map);
+                       }
+               }
+       } else {
+               cpu_set(cpu, cpu_sibling_map[cpu]);
+       }
+
+       cpu_set(cpu, c[cpu].llc_shared_map);
+
+       if (current_cpu_data.x86_max_cores == 1) {
+               cpu_core_map[cpu] = cpu_sibling_map[cpu];
+               c[cpu].booted_cores = 1;
+               return;
+       }
+
+       for_each_cpu_mask(i, cpu_sibling_setup_map) {
+               if (cpu_llc_id[cpu] != BAD_APICID &&
+                   cpu_llc_id[cpu] == cpu_llc_id[i]) {
+                       cpu_set(i, c[cpu].llc_shared_map);
+                       cpu_set(cpu, c[i].llc_shared_map);
+               }
+               if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
+                       cpu_set(i, cpu_core_map[cpu]);
+                       cpu_set(cpu, cpu_core_map[i]);
+                       /*
+                        *  Does this new cpu bringup a new core?
+                        */
+                       if (cpus_weight(cpu_sibling_map[cpu]) == 1) {
+                               /*
+                                * for each core in package, increment
+                                * the booted_cores for this new cpu
+                                */
+                               if (first_cpu(cpu_sibling_map[i]) == i)
+                                       c[cpu].booted_cores++;
+                               /*
+                                * increment the core count for all
+                                * the other cpus in this package
+                                */
+                               if (i != cpu)
+                                       c[i].booted_cores++;
+                       } else if (i != cpu && !c[cpu].booted_cores)
+                               c[cpu].booted_cores = c[i].booted_cores;
+               }
+       }
+}
+
 /*
  * Activate a secondary processor.
  */
-static void __init start_secondary(void *unused)
+static void __devinit start_secondary(void *unused)
 {
        /*
         * Dont put anything before smp_callin(), SMP
@@ -434,6 +532,7 @@ static void __init start_secondary(void *unused)
         * things done here to the most necessary things.
         */
        cpu_init();
+       preempt_disable();
        smp_callin();
        while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
                rep_nop();
@@ -450,6 +549,10 @@ static void __init start_secondary(void *unused)
         */
        local_flush_tlb();
 
+       /* This must be done before setting cpu_online_map */
+       set_cpu_sibling_map(raw_smp_processor_id());
+       wmb();
+
        /*
         * We need to hold call_lock, so there is no inconsistency
         * between the time smp_call_function() determines number of
@@ -461,6 +564,7 @@ static void __init start_secondary(void *unused)
        lock_ipi_call_lock();
        cpu_set(smp_processor_id(), cpu_online_map);
        unlock_ipi_call_lock();
+       per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
 
        /* We can take interrupts now: we're officially "up". */
        local_irq_enable();
@@ -475,7 +579,7 @@ static void __init start_secondary(void *unused)
  * from the task structure
  * This function must not return.
  */
-void __init initialize_secondary(void)
+void __devinit initialize_secondary(void)
 {
        /*
         * We don't actually need to load the full TSS,
@@ -497,10 +601,10 @@ extern struct {
 #ifdef CONFIG_NUMA
 
 /* which logical CPUs are on which nodes */
-cpumask_t node_2_cpu_mask[MAX_NUMNODES] =
+cpumask_t node_2_cpu_mask[MAX_NUMNODES] __read_mostly =
                                { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
 /* which node each logical CPU is on */
-int cpu_2_node[NR_CPUS] = { [0 ... NR_CPUS-1] = 0 };
+int cpu_2_node[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
 EXPORT_SYMBOL(cpu_2_node);
 
 /* set up a mapping between cpu and node. */
@@ -528,7 +632,7 @@ static inline void unmap_cpu_to_node(int cpu)
 
 #endif /* CONFIG_NUMA */
 
-u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
+u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
 
 static void map_cpu_to_logical_apicid(void)
 {
@@ -554,7 +658,7 @@ static inline void __inquire_remote_apic(int apicid)
 
        printk("Inquiring remote APIC #%d...\n", apicid);
 
-       for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
+       for (i = 0; i < ARRAY_SIZE(regs); i++) {
                printk("... APIC #%d %s: ", apicid, names[i]);
 
                /*
@@ -589,7 +693,7 @@ static inline void __inquire_remote_apic(int apicid)
  * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
  * won't ... remember to clear down the APIC, etc later.
  */
-static int __init
+static int __devinit
 wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
 {
        unsigned long send_status = 0, accept_status = 0;
@@ -635,7 +739,7 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
 #endif /* WAKE_SECONDARY_VIA_NMI */
 
 #ifdef WAKE_SECONDARY_VIA_INIT
-static int __init
+static int __devinit
 wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
 {
        unsigned long send_status = 0, accept_status = 0;
@@ -770,8 +874,42 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
 #endif /* WAKE_SECONDARY_VIA_INIT */
 
 extern cpumask_t cpu_initialized;
+static inline int alloc_cpu_id(void)
+{
+       cpumask_t       tmp_map;
+       int cpu;
+       cpus_complement(tmp_map, cpu_present_map);
+       cpu = first_cpu(tmp_map);
+       if (cpu >= NR_CPUS)
+               return -ENODEV;
+       return cpu;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static struct task_struct * __devinitdata cpu_idle_tasks[NR_CPUS];
+static inline struct task_struct * alloc_idle_task(int cpu)
+{
+       struct task_struct *idle;
+
+       if ((idle = cpu_idle_tasks[cpu]) != NULL) {
+               /* initialize thread_struct.  we really want to avoid destroy
+                * idle tread
+                */
+               idle->thread.esp = (unsigned long)task_pt_regs(idle);
+               init_idle(idle, cpu);
+               return idle;
+       }
+       idle = fork_idle(cpu);
+
+       if (!IS_ERR(idle))
+               cpu_idle_tasks[cpu] = idle;
+       return idle;
+}
+#else
+#define alloc_idle_task(cpu) fork_idle(cpu)
+#endif
 
-static int __init do_boot_cpu(int apicid)
+static int __devinit do_boot_cpu(int apicid, int cpu)
 /*
  * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
  * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
@@ -780,16 +918,18 @@ static int __init do_boot_cpu(int apicid)
 {
        struct task_struct *idle;
        unsigned long boot_error;
-       int timeout, cpu;
+       int timeout;
        unsigned long start_eip;
        unsigned short nmi_high = 0, nmi_low = 0;
 
-       cpu = ++cpucount;
+       ++cpucount;
+       alternatives_smp_switch(1);
+
        /*
         * We can't use kernel_thread since we must avoid to
         * reschedule the child.
         */
-       idle = fork_idle(cpu);
+       idle = alloc_idle_task(cpu);
        if (IS_ERR(idle))
                panic("failed fork for CPU %d", cpu);
        idle->thread.eip = (unsigned long) start_secondary;
@@ -856,13 +996,16 @@ static int __init do_boot_cpu(int apicid)
                        inquire_remote_apic(apicid);
                }
        }
-       x86_cpu_to_apicid[cpu] = apicid;
+
        if (boot_error) {
                /* Try to put things back the way they were before ... */
                unmap_cpu_to_logical_apicid(cpu);
                cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
                cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
                cpucount--;
+       } else {
+               x86_cpu_to_apicid[cpu] = apicid;
+               cpu_set(cpu, cpu_present_map);
        }
 
        /* mark "stuck" area as not stuck */
@@ -871,6 +1014,85 @@ static int __init do_boot_cpu(int apicid)
        return boot_error;
 }
 
+#ifdef CONFIG_HOTPLUG_CPU
+void cpu_exit_clear(void)
+{
+       int cpu = raw_smp_processor_id();
+
+       idle_task_exit();
+
+       cpucount --;
+       cpu_uninit();
+       irq_ctx_exit(cpu);
+
+       cpu_clear(cpu, cpu_callout_map);
+       cpu_clear(cpu, cpu_callin_map);
+
+       cpu_clear(cpu, smp_commenced_mask);
+       unmap_cpu_to_logical_apicid(cpu);
+}
+
+struct warm_boot_cpu_info {
+       struct completion *complete;
+       int apicid;
+       int cpu;
+};
+
+static void __cpuinit do_warm_boot_cpu(void *p)
+{
+       struct warm_boot_cpu_info *info = p;
+       do_boot_cpu(info->apicid, info->cpu);
+       complete(info->complete);
+}
+
+static int __cpuinit __smp_prepare_cpu(int cpu)
+{
+       DECLARE_COMPLETION(done);
+       struct warm_boot_cpu_info info;
+       struct work_struct task;
+       int     apicid, ret;
+       struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
+
+       apicid = x86_cpu_to_apicid[cpu];
+       if (apicid == BAD_APICID) {
+               ret = -ENODEV;
+               goto exit;
+       }
+
+       /*
+        * the CPU isn't initialized at boot time, allocate gdt table here.
+        * cpu_init will initialize it
+        */
+       if (!cpu_gdt_descr->address) {
+               cpu_gdt_descr->address = get_zeroed_page(GFP_KERNEL);
+               if (!cpu_gdt_descr->address)
+                       printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu);
+                       ret = -ENOMEM;
+                       goto exit;
+       }
+
+       info.complete = &done;
+       info.apicid = apicid;
+       info.cpu = cpu;
+       INIT_WORK(&task, do_warm_boot_cpu, &info);
+
+       tsc_sync_disabled = 1;
+
+       /* init low mem mapping */
+       clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
+                       KERNEL_PGD_PTRS);
+       flush_tlb_all();
+       schedule_work(&task);
+       wait_for_completion(&done);
+
+       tsc_sync_disabled = 0;
+       zap_low_mappings();
+       ret = 0;
+exit:
+       return ret;
+}
+#endif
+
 static void smp_tune_scheduling (void)
 {
        unsigned long cachesize;       /* kB   */
@@ -898,6 +1120,7 @@ static void smp_tune_scheduling (void)
                        cachesize = 16; /* Pentiums, 2x8kB cache */
                        bandwidth = 100;
                }
+               max_cache_size = cachesize * 1024;
        }
 }
 
@@ -912,13 +1135,6 @@ void *xquad_portio;
 EXPORT_SYMBOL(xquad_portio);
 #endif
 
-cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
-#ifdef CONFIG_X86_HT
-EXPORT_SYMBOL(cpu_sibling_map);
-#endif
-cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
-EXPORT_SYMBOL(cpu_core_map);
-
 static void __init smp_boot_cpus(unsigned int max_cpus)
 {
        int apicid, cpu, bit, kicked;
@@ -937,11 +1153,8 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
 
        current_thread_info()->cpu = 0;
        smp_tune_scheduling();
-       cpus_clear(cpu_sibling_map[0]);
-       cpu_set(0, cpu_sibling_map[0]);
 
-       cpus_clear(cpu_core_map[0]);
-       cpu_set(0, cpu_core_map[0]);
+       set_cpu_sibling_map(0);
 
        /*
         * If we couldn't find an SMP configuration at boot time,
@@ -1030,7 +1243,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
                if (max_cpus <= cpucount+1)
                        continue;
 
-               if (do_boot_cpu(apicid))
+               if (((cpu = alloc_cpu_id()) <= 0) || do_boot_cpu(apicid, cpu))
                        printk("CPU #%d not responding - cannot use it.\n",
                                                                apicid);
                else
@@ -1082,44 +1295,8 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
                cpus_clear(cpu_core_map[cpu]);
        }
 
-       for (cpu = 0; cpu < NR_CPUS; cpu++) {
-               struct cpuinfo_x86 *c = cpu_data + cpu;
-               int siblings = 0;
-               int i;
-               if (!cpu_isset(cpu, cpu_callout_map))
-                       continue;
-
-               if (smp_num_siblings > 1) {
-                       for (i = 0; i < NR_CPUS; i++) {
-                               if (!cpu_isset(i, cpu_callout_map))
-                                       continue;
-                               if (cpu_core_id[cpu] == cpu_core_id[i]) {
-                                       siblings++;
-                                       cpu_set(i, cpu_sibling_map[cpu]);
-                               }
-                       }
-               } else {
-                       siblings++;
-                       cpu_set(cpu, cpu_sibling_map[cpu]);
-               }
-
-               if (siblings != smp_num_siblings) {
-                       printk(KERN_WARNING "WARNING: %d siblings found for CPU%d, should be %d\n", siblings, cpu, smp_num_siblings);
-                       smp_num_siblings = siblings;
-               }
-
-               if (c->x86_num_cores > 1) {
-                       for (i = 0; i < NR_CPUS; i++) {
-                               if (!cpu_isset(i, cpu_callout_map))
-                                       continue;
-                               if (phys_proc_id[cpu] == phys_proc_id[i]) {
-                                       cpu_set(i, cpu_core_map[cpu]);
-                               }
-                       }
-               } else {
-                       cpu_core_map[cpu] = cpu_sibling_map[cpu];
-               }
-       }
+       cpu_set(0, cpu_sibling_map[0]);
+       cpu_set(0, cpu_core_map[0]);
 
        smpboot_setup_io_apic();
 
@@ -1146,25 +1323,34 @@ void __devinit smp_prepare_boot_cpu(void)
 {
        cpu_set(smp_processor_id(), cpu_online_map);
        cpu_set(smp_processor_id(), cpu_callout_map);
+       cpu_set(smp_processor_id(), cpu_present_map);
+       cpu_set(smp_processor_id(), cpu_possible_map);
+       per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-
-/* must be called with the cpucontrol mutex held */
-static int __devinit cpu_enable(unsigned int cpu)
+static void
+remove_siblinginfo(int cpu)
 {
-       /* get the target out of its holding state */
-       per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
-       wmb();
-
-       /* wait for the processor to ack it. timeout? */
-       while (!cpu_online(cpu))
-               cpu_relax();
+       int sibling;
+       struct cpuinfo_x86 *c = cpu_data;
 
-       fixup_irqs(cpu_online_map);
-       /* counter the disable in fixup_irqs() */
-       local_irq_enable();
-       return 0;
+       for_each_cpu_mask(sibling, cpu_core_map[cpu]) {
+               cpu_clear(cpu, cpu_core_map[sibling]);
+               /*
+                * last thread sibling in this cpu core going down
+                */
+               if (cpus_weight(cpu_sibling_map[cpu]) == 1)
+                       c[sibling].booted_cores--;
+       }
+                       
+       for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
+               cpu_clear(cpu, cpu_sibling_map[sibling]);
+       cpus_clear(cpu_sibling_map[cpu]);
+       cpus_clear(cpu_core_map[cpu]);
+       c[cpu].phys_proc_id = 0;
+       c[cpu].cpu_core_id = 0;
+       cpu_clear(cpu, cpu_sibling_setup_map);
 }
 
 int __cpu_disable(void)
@@ -1183,13 +1369,14 @@ int __cpu_disable(void)
        if (cpu == 0)
                return -EBUSY;
 
-       /* We enable the timer again on the exit path of the death loop */
-       disable_APIC_timer();
+       clear_local_APIC();
        /* Allow any queued timer interrupts to get serviced */
        local_irq_enable();
        mdelay(1);
        local_irq_disable();
 
+       remove_siblinginfo(cpu);
+
        cpu_clear(cpu, map);
        fixup_irqs(map);
        /* It's now safe to remove this processor from the online map */
@@ -1204,10 +1391,13 @@ void __cpu_die(unsigned int cpu)
 
        for (i = 0; i < 10; i++) {
                /* They ack this in play_dead by setting CPU_DEAD */
-               if (per_cpu(cpu_state, cpu) == CPU_DEAD)
+               if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
+                       printk ("CPU %d is now offline\n", cpu);
+                       if (1 == num_online_cpus())
+                               alternatives_smp_switch(0);
                        return;
-               current->state = TASK_UNINTERRUPTIBLE;
-               schedule_timeout(HZ/10);
+               }
+               msleep(100);
        }
        printk(KERN_ERR "CPU %u didn't die...\n", cpu);
 }
@@ -1226,6 +1416,22 @@ void __cpu_die(unsigned int cpu)
 
 int __devinit __cpu_up(unsigned int cpu)
 {
+#ifdef CONFIG_HOTPLUG_CPU
+       int ret=0;
+
+       /*
+        * We do warm boot only on cpus that had booted earlier
+        * Otherwise cold boot is all handled from smp_boot_cpus().
+        * cpu_callin_map is set during AP kickstart process. Its reset
+        * when a cpu is taken offline from cpu_exit_clear().
+        */
+       if (!cpu_isset(cpu, cpu_callin_map))
+               ret = __smp_prepare_cpu(cpu);
+
+       if (ret)
+               return -EIO;
+#endif
+
        /* In case one didn't come up */
        if (!cpu_isset(cpu, cpu_callin_map)) {
                printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu);
@@ -1233,19 +1439,12 @@ int __devinit __cpu_up(unsigned int cpu)
                return -EIO;
        }
 
-#ifdef CONFIG_HOTPLUG_CPU
-       /* Already up, and in cpu_quiescent now? */
-       if (cpu_isset(cpu, smp_commenced_mask)) {
-               cpu_enable(cpu);
-               return 0;
-       }
-#endif
-
        local_irq_enable();
+       per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
        /* Unleash the CPU! */
        cpu_set(cpu, smp_commenced_mask);
        while (!cpu_isset(cpu, cpu_online_map))
-               mb();
+               cpu_relax();
        return 0;
 }
 
@@ -1255,10 +1454,12 @@ void __init smp_cpus_done(unsigned int max_cpus)
        setup_ioapic_dest();
 #endif
        zap_low_mappings();
+#ifndef CONFIG_HOTPLUG_CPU
        /*
         * Disable executability of the SMP trampoline:
         */
        set_kernel_exec((unsigned long)trampoline_base, trampoline_exec);
+#endif
 }
 
 void __init smp_intr_init(void)