[SPARC64]: Move away from virtual page tables, part 1.
[linux-2.6.git] / arch / sparc64 / mm / tlb.c
1 /* arch/sparc64/mm/tlb.c
2  *
3  * Copyright (C) 2004 David S. Miller <davem@redhat.com>
4  */
5
6 #include <linux/kernel.h>
7 #include <linux/init.h>
8 #include <linux/percpu.h>
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11
12 #include <asm/pgtable.h>
13 #include <asm/pgalloc.h>
14 #include <asm/tlbflush.h>
15 #include <asm/cacheflush.h>
16 #include <asm/mmu_context.h>
17 #include <asm/tlb.h>
18
19 /* Heavily inspired by the ppc64 code.  */
20
21 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers) = { 0, };
22
23 void flush_tlb_pending(void)
24 {
25         struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
26
27         if (mp->tlb_nr) {
28                 flush_tsb_user(mp);
29
30                 if (CTX_VALID(mp->mm->context)) {
31 #ifdef CONFIG_SMP
32                         smp_flush_tlb_pending(mp->mm, mp->tlb_nr,
33                                               &mp->vaddrs[0]);
34 #else
35                         __flush_tlb_pending(CTX_HWBITS(mp->mm->context),
36                                             mp->tlb_nr, &mp->vaddrs[0]);
37 #endif
38                 }
39                 mp->tlb_nr = 0;
40         }
41 }
42
43 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig)
44 {
45         struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
46         unsigned long nr;
47
48         vaddr &= PAGE_MASK;
49         if (pte_exec(orig))
50                 vaddr |= 0x1UL;
51
52         if (pte_dirty(orig)) {
53                 unsigned long paddr, pfn = pte_pfn(orig);
54                 struct address_space *mapping;
55                 struct page *page;
56
57                 if (!pfn_valid(pfn))
58                         goto no_cache_flush;
59
60                 page = pfn_to_page(pfn);
61                 if (PageReserved(page))
62                         goto no_cache_flush;
63
64                 /* A real file page? */
65                 mapping = page_mapping(page);
66                 if (!mapping)
67                         goto no_cache_flush;
68
69                 paddr = (unsigned long) page_address(page);
70                 if ((paddr ^ vaddr) & (1 << 13))
71                         flush_dcache_page_all(mm, page);
72         }
73
74 no_cache_flush:
75
76         if (mp->fullmm)
77                 return;
78
79         nr = mp->tlb_nr;
80
81         if (unlikely(nr != 0 && mm != mp->mm)) {
82                 flush_tlb_pending();
83                 nr = 0;
84         }
85
86         if (nr == 0)
87                 mp->mm = mm;
88
89         mp->vaddrs[nr] = vaddr;
90         mp->tlb_nr = ++nr;
91         if (nr >= TLB_BATCH_NR)
92                 flush_tlb_pending();
93 }