blackfin: bf60x: make clock changeable in kernel menuconfig
[linux-3.10.git] / arch / blackfin / mach-common / smp.c
index 930608d..ac8f8a4 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/sched.h>
 #include <linux/interrupt.h>
 #include <linux/cache.h>
+#include <linux/clockchips.h>
 #include <linux/profile.h>
 #include <linux/errno.h>
 #include <linux/mm.h>
@@ -23,8 +24,9 @@
 #include <linux/seq_file.h>
 #include <linux/irq.h>
 #include <linux/slab.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
 #include <asm/cacheflush.h>
+#include <asm/irq_handler.h>
 #include <asm/mmu_context.h>
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
  */
 struct corelock_slot corelock __attribute__ ((__section__(".l2.bss")));
 
-void __cpuinitdata *init_retx_coreb, *init_saved_retx_coreb,
-       *init_saved_seqstat_coreb, *init_saved_icplb_fault_addr_coreb,
-       *init_saved_dcplb_fault_addr_coreb;
+#ifdef CONFIG_ICACHE_FLUSH_L1
+unsigned long blackfin_iflush_l1_entry[NR_CPUS];
+#endif
+
+struct blackfin_initial_pda __cpuinitdata initial_pda_coreb;
 
-#define BFIN_IPI_RESCHEDULE   0
-#define BFIN_IPI_CALL_FUNC    1
-#define BFIN_IPI_CPU_STOP     2
+#define BFIN_IPI_TIMER       0
+#define BFIN_IPI_RESCHEDULE   1
+#define BFIN_IPI_CALL_FUNC    2
+#define BFIN_IPI_CPU_STOP     3
 
 struct blackfin_flush_data {
        unsigned long start;
@@ -60,8 +65,7 @@ struct smp_call_struct {
        void (*func)(void *info);
        void *info;
        int wait;
-       cpumask_t pending;
-       cpumask_t waitmask;
+       cpumask_t *waitmask;
 };
 
 static struct blackfin_flush_data smp_flush_data;
@@ -69,15 +73,19 @@ static struct blackfin_flush_data smp_flush_data;
 static DEFINE_SPINLOCK(stop_lock);
 
 struct ipi_message {
-       struct list_head list;
        unsigned long type;
        struct smp_call_struct call_struct;
 };
 
+/* A magic number - stress test shows this is safe for common cases */
+#define BFIN_IPI_MSGQ_LEN 5
+
+/* Simple FIFO buffer, overflow leads to panic */
 struct ipi_message_queue {
-       struct list_head head;
        spinlock_t lock;
        unsigned long count;
+       unsigned long head; /* head of the queue */
+       struct ipi_message ipi_message[BFIN_IPI_MSGQ_LEN];
 };
 
 static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue);
@@ -89,7 +97,7 @@ static void ipi_cpu_stop(unsigned int cpu)
        dump_stack();
        spin_unlock(&stop_lock);
 
-       cpu_clear(cpu, cpu_online_map);
+       set_cpu_online(cpu, false);
 
        local_irq_disable();
 
@@ -105,6 +113,19 @@ static void ipi_flush_icache(void *info)
        blackfin_dcache_invalidate_range((unsigned long)fdata,
                                         (unsigned long)fdata + sizeof(*fdata));
 
+       /* Make sure all write buffers in the data side of the core
+        * are flushed before trying to invalidate the icache.  This
+        * needs to be after the data flush and before the icache
+        * flush so that the SSYNC does the right thing in preventing
+        * the instruction prefetcher from hitting things in cached
+        * memory at the wrong time -- it runs much further ahead than
+        * the pipeline.
+        */
+       SSYNC();
+
+       /* ipi_flaush_icache is invoked by generic flush_icache_range,
+        * so call blackfin arch icache flush directly here.
+        */
        blackfin_icache_flush_range(fdata->start, fdata->end);
 }
 
@@ -116,7 +137,6 @@ static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
        func = msg->call_struct.func;
        info = msg->call_struct.info;
        wait = msg->call_struct.wait;
-       cpu_clear(cpu, msg->call_struct.pending);
        func(info);
        if (wait) {
 #ifdef __ARCH_SYNC_CORE_DCACHE
@@ -127,51 +147,67 @@ static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
                 */
                resync_core_dcache();
 #endif
-               cpu_clear(cpu, msg->call_struct.waitmask);
-       } else
-               kfree(msg);
+               cpumask_clear_cpu(cpu, msg->call_struct.waitmask);
+       }
 }
 
-static irqreturn_t ipi_handler(int irq, void *dev_instance)
+/* Use IRQ_SUPPLE_0 to request reschedule.
+ * When returning from interrupt to user space,
+ * there is chance to reschedule */
+static irqreturn_t ipi_handler_int0(int irq, void *dev_instance)
+{
+       unsigned int cpu = smp_processor_id();
+
+       platform_clear_ipi(cpu, IRQ_SUPPLE_0);
+       return IRQ_HANDLED;
+}
+
+DECLARE_PER_CPU(struct clock_event_device, coretmr_events);
+void ipi_timer(void)
+{
+       int cpu = smp_processor_id();
+       struct clock_event_device *evt = &per_cpu(coretmr_events, cpu);
+       evt->event_handler(evt);
+}
+
+static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
 {
        struct ipi_message *msg;
        struct ipi_message_queue *msg_queue;
        unsigned int cpu = smp_processor_id();
+       unsigned long flags;
 
-       platform_clear_ipi(cpu);
+       platform_clear_ipi(cpu, IRQ_SUPPLE_1);
 
        msg_queue = &__get_cpu_var(ipi_msg_queue);
-       msg_queue->count++;
 
-       spin_lock(&msg_queue->lock);
-       while (!list_empty(&msg_queue->head)) {
-               msg = list_entry(msg_queue->head.next, typeof(*msg), list);
-               list_del(&msg->list);
+       spin_lock_irqsave(&msg_queue->lock, flags);
+
+       while (msg_queue->count) {
+               msg = &msg_queue->ipi_message[msg_queue->head];
                switch (msg->type) {
+               case BFIN_IPI_TIMER:
+                       ipi_timer();
+                       break;
                case BFIN_IPI_RESCHEDULE:
-                       /* That's the easiest one; leave it to
-                        * return_from_int. */
-                       kfree(msg);
+                       scheduler_ipi();
                        break;
                case BFIN_IPI_CALL_FUNC:
-                       spin_unlock(&msg_queue->lock);
                        ipi_call_function(cpu, msg);
-                       spin_lock(&msg_queue->lock);
                        break;
                case BFIN_IPI_CPU_STOP:
-                       spin_unlock(&msg_queue->lock);
                        ipi_cpu_stop(cpu);
-                       spin_lock(&msg_queue->lock);
-                       kfree(msg);
                        break;
                default:
                        printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n",
                               cpu, msg->type);
-                       kfree(msg);
                        break;
                }
+               msg_queue->head++;
+               msg_queue->head %= BFIN_IPI_MSGQ_LEN;
+               msg_queue->count--;
        }
-       spin_unlock(&msg_queue->lock);
+       spin_unlock_irqrestore(&msg_queue->lock, flags);
        return IRQ_HANDLED;
 }
 
@@ -181,48 +217,48 @@ static void ipi_queue_init(void)
        struct ipi_message_queue *msg_queue;
        for_each_possible_cpu(cpu) {
                msg_queue = &per_cpu(ipi_msg_queue, cpu);
-               INIT_LIST_HEAD(&msg_queue->head);
                spin_lock_init(&msg_queue->lock);
                msg_queue->count = 0;
+               msg_queue->head = 0;
        }
 }
 
-int smp_call_function(void (*func)(void *info), void *info, int wait)
+static inline void smp_send_message(cpumask_t callmap, unsigned long type,
+                                       void (*func) (void *info), void *info, int wait)
 {
        unsigned int cpu;
-       cpumask_t callmap;
-       unsigned long flags;
        struct ipi_message_queue *msg_queue;
        struct ipi_message *msg;
+       unsigned long flags, next_msg;
+       cpumask_t waitmask; /* waitmask is shared by all cpus */
 
-       callmap = cpu_online_map;
-       cpu_clear(smp_processor_id(), callmap);
-       if (cpus_empty(callmap))
-               return 0;
-
-       msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
-       if (!msg)
-               return -ENOMEM;
-       INIT_LIST_HEAD(&msg->list);
-       msg->call_struct.func = func;
-       msg->call_struct.info = info;
-       msg->call_struct.wait = wait;
-       msg->call_struct.pending = callmap;
-       msg->call_struct.waitmask = callmap;
-       msg->type = BFIN_IPI_CALL_FUNC;
-
-       for_each_cpu_mask(cpu, callmap) {
+       cpumask_copy(&waitmask, &callmap);
+       for_each_cpu(cpu, &callmap) {
                msg_queue = &per_cpu(ipi_msg_queue, cpu);
                spin_lock_irqsave(&msg_queue->lock, flags);
-               list_add_tail(&msg->list, &msg_queue->head);
+               if (msg_queue->count < BFIN_IPI_MSGQ_LEN) {
+                       next_msg = (msg_queue->head + msg_queue->count)
+                                       % BFIN_IPI_MSGQ_LEN;
+                       msg = &msg_queue->ipi_message[next_msg];
+                       msg->type = type;
+                       if (type == BFIN_IPI_CALL_FUNC) {
+                               msg->call_struct.func = func;
+                               msg->call_struct.info = info;
+                               msg->call_struct.wait = wait;
+                               msg->call_struct.waitmask = &waitmask;
+                       }
+                       msg_queue->count++;
+               } else
+                       panic("IPI message queue overflow\n");
                spin_unlock_irqrestore(&msg_queue->lock, flags);
-               platform_send_ipi_cpu(cpu);
+               platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1);
        }
+
        if (wait) {
-               while (!cpus_empty(msg->call_struct.waitmask))
+               while (!cpumask_empty(&waitmask))
                        blackfin_dcache_invalidate_range(
-                               (unsigned long)(&msg->call_struct.waitmask),
-                               (unsigned long)(&msg->call_struct.waitmask));
+                               (unsigned long)(&waitmask),
+                               (unsigned long)(&waitmask));
 #ifdef __ARCH_SYNC_CORE_DCACHE
                /*
                 * Invalidate D cache in case shared data was changed by
@@ -230,8 +266,21 @@ int smp_call_function(void (*func)(void *info), void *info, int wait)
                 */
                resync_core_dcache();
 #endif
-               kfree(msg);
        }
+}
+
+int smp_call_function(void (*func)(void *info), void *info, int wait)
+{
+       cpumask_t callmap;
+
+       preempt_disable();
+       cpumask_copy(&callmap, cpu_online_mask);
+       cpumask_clear_cpu(smp_processor_id(), &callmap);
+       if (!cpumask_empty(&callmap))
+               smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
+
+       preempt_enable();
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(smp_call_function);
@@ -241,117 +290,77 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
 {
        unsigned int cpu = cpuid;
        cpumask_t callmap;
-       unsigned long flags;
-       struct ipi_message_queue *msg_queue;
-       struct ipi_message *msg;
 
        if (cpu_is_offline(cpu))
                return 0;
-       cpus_clear(callmap);
-       cpu_set(cpu, callmap);
-
-       msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
-       if (!msg)
-               return -ENOMEM;
-       INIT_LIST_HEAD(&msg->list);
-       msg->call_struct.func = func;
-       msg->call_struct.info = info;
-       msg->call_struct.wait = wait;
-       msg->call_struct.pending = callmap;
-       msg->call_struct.waitmask = callmap;
-       msg->type = BFIN_IPI_CALL_FUNC;
-
-       msg_queue = &per_cpu(ipi_msg_queue, cpu);
-       spin_lock_irqsave(&msg_queue->lock, flags);
-       list_add_tail(&msg->list, &msg_queue->head);
-       spin_unlock_irqrestore(&msg_queue->lock, flags);
-       platform_send_ipi_cpu(cpu);
+       cpumask_clear(&callmap);
+       cpumask_set_cpu(cpu, &callmap);
+
+       smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
 
-       if (wait) {
-               while (!cpus_empty(msg->call_struct.waitmask))
-                       blackfin_dcache_invalidate_range(
-                               (unsigned long)(&msg->call_struct.waitmask),
-                               (unsigned long)(&msg->call_struct.waitmask));
-#ifdef __ARCH_SYNC_CORE_DCACHE
-               /*
-                * Invalidate D cache in case shared data was changed by
-                * other processors to ensure cache coherence.
-                */
-               resync_core_dcache();
-#endif
-               kfree(msg);
-       }
        return 0;
 }
 EXPORT_SYMBOL_GPL(smp_call_function_single);
 
 void smp_send_reschedule(int cpu)
 {
-       unsigned long flags;
-       struct ipi_message_queue *msg_queue;
-       struct ipi_message *msg;
-
-       if (cpu_is_offline(cpu))
-               return;
+       cpumask_t callmap;
+       /* simply trigger an ipi */
 
-       msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
-       if (!msg)
-               return;
-       INIT_LIST_HEAD(&msg->list);
-       msg->type = BFIN_IPI_RESCHEDULE;
+       cpumask_clear(&callmap);
+       cpumask_set_cpu(cpu, &callmap);
 
-       msg_queue = &per_cpu(ipi_msg_queue, cpu);
-       spin_lock_irqsave(&msg_queue->lock, flags);
-       list_add_tail(&msg->list, &msg_queue->head);
-       spin_unlock_irqrestore(&msg_queue->lock, flags);
-       platform_send_ipi_cpu(cpu);
+       smp_send_message(callmap, BFIN_IPI_RESCHEDULE, NULL, NULL, 0);
 
        return;
 }
 
+void smp_send_msg(const struct cpumask *mask, unsigned long type)
+{
+       smp_send_message(*mask, type, NULL, NULL, 0);
+}
+
+void smp_timer_broadcast(const struct cpumask *mask)
+{
+       smp_send_msg(mask, BFIN_IPI_TIMER);
+}
+
 void smp_send_stop(void)
 {
-       unsigned int cpu;
        cpumask_t callmap;
-       unsigned long flags;
-       struct ipi_message_queue *msg_queue;
-       struct ipi_message *msg;
 
-       callmap = cpu_online_map;
-       cpu_clear(smp_processor_id(), callmap);
-       if (cpus_empty(callmap))
-               return;
+       preempt_disable();
+       cpumask_copy(&callmap, cpu_online_mask);
+       cpumask_clear_cpu(smp_processor_id(), &callmap);
+       if (!cpumask_empty(&callmap))
+               smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0);
 
-       msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
-       if (!msg)
-               return;
-       INIT_LIST_HEAD(&msg->list);
-       msg->type = BFIN_IPI_CPU_STOP;
+       preempt_enable();
 
-       for_each_cpu_mask(cpu, callmap) {
-               msg_queue = &per_cpu(ipi_msg_queue, cpu);
-               spin_lock_irqsave(&msg_queue->lock, flags);
-               list_add_tail(&msg->list, &msg_queue->head);
-               spin_unlock_irqrestore(&msg_queue->lock, flags);
-               platform_send_ipi_cpu(cpu);
-       }
        return;
 }
 
 int __cpuinit __cpu_up(unsigned int cpu)
 {
        int ret;
-       static struct task_struct *idle;
+       struct blackfin_cpudata *ci = &per_cpu(cpu_data, cpu);
+       struct task_struct *idle = ci->idle;
 
-       if (idle)
+       if (idle) {
                free_task(idle);
-
-       idle = fork_idle(cpu);
-       if (IS_ERR(idle)) {
-               printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
-               return PTR_ERR(idle);
+               idle = NULL;
        }
 
+       if (!idle) {
+               idle = fork_idle(cpu);
+               if (IS_ERR(idle)) {
+                       printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
+                       return PTR_ERR(idle);
+               }
+               ci->idle = idle;
+       } else {
+               init_idle(idle, cpu);
+       }
        secondary_stack = task_stack_page(idle) + THREAD_SIZE;
 
        ret = platform_boot_secondary(cpu, idle);
@@ -387,13 +396,16 @@ void __cpuinit secondary_start_kernel(void)
        if (_bfin_swrst & SWRST_DBL_FAULT_B) {
                printk(KERN_EMERG "CoreB Recovering from DOUBLE FAULT event\n");
 #ifdef CONFIG_DEBUG_DOUBLEFAULT
-               printk(KERN_EMERG " While handling exception (EXCAUSE = 0x%x) at %pF\n",
-                       (int)init_saved_seqstat_coreb & SEQSTAT_EXCAUSE, init_saved_retx_coreb);
-               printk(KERN_NOTICE "   DCPLB_FAULT_ADDR: %pF\n", init_saved_dcplb_fault_addr_coreb);
-               printk(KERN_NOTICE "   ICPLB_FAULT_ADDR: %pF\n", init_saved_icplb_fault_addr_coreb);
+               printk(KERN_EMERG " While handling exception (EXCAUSE = %#x) at %pF\n",
+                       initial_pda_coreb.seqstat_doublefault & SEQSTAT_EXCAUSE,
+                       initial_pda_coreb.retx_doublefault);
+               printk(KERN_NOTICE "   DCPLB_FAULT_ADDR: %pF\n",
+                       initial_pda_coreb.dcplb_doublefault_addr);
+               printk(KERN_NOTICE "   ICPLB_FAULT_ADDR: %pF\n",
+                       initial_pda_coreb.icplb_doublefault_addr);
 #endif
                printk(KERN_NOTICE " The instruction at %pF caused a double exception\n",
-                       init_retx_coreb);
+                       initial_pda_coreb.retx);
        }
 
        /*
@@ -403,8 +415,6 @@ void __cpuinit secondary_start_kernel(void)
         */
        init_exception_vectors();
 
-       bfin_setup_caches(cpu);
-
        local_irq_disable();
 
        /* Attach the new idle task to the global mm. */
@@ -423,6 +433,9 @@ void __cpuinit secondary_start_kernel(void)
 
        local_irq_enable();
 
+       bfin_setup_caches(cpu);
+
+       notify_cpu_starting(cpu);
        /*
         * Calibrate loops per jiffy value.
         * IRQs need to be enabled here - D-cache can be invalidated
@@ -441,7 +454,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 {
        platform_prepare_cpus(max_cpus);
        ipi_queue_init();
-       platform_request_ipi(ipi_handler);
+       platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0);
+       platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1);
 }
 
 void __init smp_cpus_done(unsigned int max_cpus)
@@ -464,8 +478,10 @@ void smp_icache_flush_range_others(unsigned long start, unsigned long end)
        smp_flush_data.start = start;
        smp_flush_data.end = end;
 
-       if (smp_call_function(&ipi_flush_icache, &smp_flush_data, 0))
+       preempt_disable();
+       if (smp_call_function(&ipi_flush_icache, &smp_flush_data, 1))
                printk(KERN_WARNING "SMP: failed to run I-cache flush request on other CPUs\n");
+       preempt_enable();
 }
 EXPORT_SYMBOL_GPL(smp_icache_flush_range_others);