* Mask with 0xf so similar TLB entries aren't written in the same 4-way
* entry group.
*/
- local_save_flags(flags);
- local_irq_disable();
+ local_irq_save(flags);
for (mmu = 1; mmu <= 2; mmu++) {
SUPP_BANK_SEL(mmu); /* Select the MMU */
return;
/* Mark the TLB entries that match the page_id as invalid. */
- local_save_flags(flags);
- local_irq_disable();
+ local_irq_save(flags);
for (mmu = 1; mmu <= 2; mmu++) {
SUPP_BANK_SEL(mmu);
* Invalidate those TLB entries that match both the mm context and the
* requested virtual address.
*/
- local_save_flags(flags);
- local_irq_disable();
+ local_irq_save(flags);
for (mmu = 1; mmu <= 2; mmu++) {
SUPP_BANK_SEL(mmu);
return 0;
}
+static DEFINE_SPINLOCK(mmu_context_lock);
+
/* Called in schedule() just before actually doing the switch_to. */
void
switch_mm(struct mm_struct *prev, struct mm_struct *next,
int cpu = smp_processor_id();
/* Make sure there is a MMU context. */
- spin_lock(&next->page_table_lock);
+ spin_lock(&mmu_context_lock);
get_mmu_context(next);
cpu_set(cpu, next->cpu_vm_mask);
- spin_unlock(&next->page_table_lock);
+ spin_unlock(&mmu_context_lock);
/*
* Remember the pgd for the fault handlers. Keep a seperate copy of it
per_cpu(current_pgd, cpu) = next->pgd;
/* Switch context in the MMU. */
- if (tsk && tsk->thread_info)
+ if (tsk && task_thread_info(tsk))
{
- SPEC_REG_WR(SPEC_REG_PID, next->context.page_id | tsk->thread_info->tls);
+ SPEC_REG_WR(SPEC_REG_PID, next->context.page_id | task_thread_info(tsk)->tls);
}
else
{