#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/cpu.h>
+#include <asm/time.h>
#include <linux/err.h>
+/*
+ * Anomaly notes:
+ * 05000120 - we always define corelock as 32-bit integer in L2
+ */
struct corelock_slot corelock __attribute__ ((__section__(".l2.bss")));
void __cpuinitdata *init_retx_coreb, *init_saved_retx_coreb,
static irqreturn_t ipi_handler(int irq, void *dev_instance)
{
- struct ipi_message *msg, *mg;
+ struct ipi_message *msg;
struct ipi_message_queue *msg_queue;
unsigned int cpu = smp_processor_id();
msg_queue->count++;
spin_lock(&msg_queue->lock);
- list_for_each_entry_safe(msg, mg, &msg_queue->head, list) {
+ while (!list_empty(&msg_queue->head)) {
+ msg = list_entry(msg_queue->head.next, typeof(*msg), list);
list_del(&msg->list);
switch (msg->type) {
case BFIN_IPI_RESCHEDULE:
kfree(msg);
break;
case BFIN_IPI_CALL_FUNC:
+ spin_unlock(&msg_queue->lock);
ipi_call_function(cpu, msg);
+ spin_lock(&msg_queue->lock);
break;
case BFIN_IPI_CPU_STOP:
+ spin_unlock(&msg_queue->lock);
ipi_cpu_stop(cpu);
+ spin_lock(&msg_queue->lock);
kfree(msg);
break;
default:
return 0;
msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
+ if (!msg)
+ return -ENOMEM;
INIT_LIST_HEAD(&msg->list);
msg->call_struct.func = func;
msg->call_struct.info = info;
for_each_cpu_mask(cpu, callmap) {
msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags);
- list_add(&msg->list, &msg_queue->head);
+ list_add_tail(&msg->list, &msg_queue->head);
spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu);
}
cpu_set(cpu, callmap);
msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
+ if (!msg)
+ return -ENOMEM;
INIT_LIST_HEAD(&msg->list);
msg->call_struct.func = func;
msg->call_struct.info = info;
msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags);
- list_add(&msg->list, &msg_queue->head);
+ list_add_tail(&msg->list, &msg_queue->head);
spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu);
return;
msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
+ if (!msg)
+ return;
memset(msg, 0, sizeof(msg));
INIT_LIST_HEAD(&msg->list);
msg->type = BFIN_IPI_RESCHEDULE;
msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags);
- list_add(&msg->list, &msg_queue->head);
+ list_add_tail(&msg->list, &msg_queue->head);
spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu);
return;
msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
+ if (!msg)
+ return;
memset(msg, 0, sizeof(msg));
INIT_LIST_HEAD(&msg->list);
msg->type = BFIN_IPI_CPU_STOP;
for_each_cpu_mask(cpu, callmap) {
msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags);
- list_add(&msg->list, &msg_queue->head);
+ list_add_tail(&msg->list, &msg_queue->head);
spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu);
}
static void __cpuinit setup_secondary(unsigned int cpu)
{
-#ifndef CONFIG_TICK_SOURCE_SYSTMR0
+#if !defined(CONFIG_TICKSOURCE_GPTMR0)
struct irq_desc *timer_desc;
#endif
unsigned long ilat;
bfin_write_ILAT(ilat);
CSYNC();
- /* Reserve the PDA space for the secondary CPU. */
- reserve_pda();
-
/* Enable interrupt levels IVG7-15. IARs have been already
* programmed by the boot CPU. */
- irq_flags |= IMASK_IVG15 |
+ bfin_irq_flags |= IMASK_IVG15 |
IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
-#ifdef CONFIG_TICK_SOURCE_SYSTMR0
+#if defined(CONFIG_TICKSOURCE_GPTMR0)
/* Power down the core timer, just to play safe. */
bfin_write_TCNTL(0);
unsigned int cpu;
for_each_online_cpu(cpu)
- bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
+ bogosum += loops_per_jiffy;
printk(KERN_INFO "SMP: Total of %d processors activated "
"(%lu.%02lu BogoMIPS).\n",
smp_flush_data.start = start;
smp_flush_data.end = end;
- if (smp_call_function(&ipi_flush_icache, &smp_flush_data, 1))
+ if (smp_call_function(&ipi_flush_icache, &smp_flush_data, 0))
printk(KERN_WARNING "SMP: failed to run I-cache flush request on other CPUs\n");
}
EXPORT_SYMBOL_GPL(smp_icache_flush_range_others);
+#ifdef __ARCH_SYNC_CORE_ICACHE
+void resync_core_icache(void)
+{
+ unsigned int cpu = get_cpu();
+ blackfin_invalidate_entire_icache();
+ ++per_cpu(cpu_data, cpu).icache_invld_count;
+ put_cpu();
+}
+EXPORT_SYMBOL(resync_core_icache);
+#endif
+
#ifdef __ARCH_SYNC_CORE_DCACHE
unsigned long barrier_mask __attribute__ ((__section__(".l2.bss")));