[PATCH] mm: pagefault_{disable,enable}()
[linux-2.6.git] / arch / mips / mm / highmem.c
1 #include <linux/module.h>
2 #include <linux/highmem.h>
3 #include <asm/tlbflush.h>
4
5 void *__kmap(struct page *page)
6 {
7         void *addr;
8
9         might_sleep();
10         if (!PageHighMem(page))
11                 return page_address(page);
12         addr = kmap_high(page);
13         flush_tlb_one((unsigned long)addr);
14
15         return addr;
16 }
17
18 void __kunmap(struct page *page)
19 {
20         if (in_interrupt())
21                 BUG();
22         if (!PageHighMem(page))
23                 return;
24         kunmap_high(page);
25 }
26
27 /*
28  * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
29  * no global lock is needed and because the kmap code must perform a global TLB
30  * invalidation when the kmap pool wraps.
31  *
32  * However when holding an atomic kmap is is not legal to sleep, so atomic
33  * kmaps are appropriate for short, tight code paths only.
34  */
35
36 void *__kmap_atomic(struct page *page, enum km_type type)
37 {
38         enum fixed_addresses idx;
39         unsigned long vaddr;
40
41         /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
42         pagefault_disable();
43         if (!PageHighMem(page))
44                 return page_address(page);
45
46         idx = type + KM_TYPE_NR*smp_processor_id();
47         vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
48 #ifdef CONFIG_DEBUG_HIGHMEM
49         if (!pte_none(*(kmap_pte-idx)))
50                 BUG();
51 #endif
52         set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
53         local_flush_tlb_one((unsigned long)vaddr);
54
55         return (void*) vaddr;
56 }
57
58 void __kunmap_atomic(void *kvaddr, enum km_type type)
59 {
60 #ifdef CONFIG_DEBUG_HIGHMEM
61         unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
62         enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
63
64         if (vaddr < FIXADDR_START) { // FIXME
65                 pagefault_enable();
66                 return;
67         }
68
69         if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
70                 BUG();
71
72         /*
73          * force other mappings to Oops if they'll try to access
74          * this pte without first remap it
75          */
76         pte_clear(&init_mm, vaddr, kmap_pte-idx);
77         local_flush_tlb_one(vaddr);
78 #endif
79
80         pagefault_enable();
81 }
82
83 #ifndef CONFIG_LIMITED_DMA
84 /*
85  * This is the same as kmap_atomic() but can map memory that doesn't
86  * have a struct page associated with it.
87  */
88 void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
89 {
90         enum fixed_addresses idx;
91         unsigned long vaddr;
92
93         pagefault_disable();
94
95         idx = type + KM_TYPE_NR*smp_processor_id();
96         vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
97         set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
98         flush_tlb_one(vaddr);
99
100         return (void*) vaddr;
101 }
102 #endif /* CONFIG_LIMITED_DMA */
103
104 struct page *__kmap_atomic_to_page(void *ptr)
105 {
106         unsigned long idx, vaddr = (unsigned long)ptr;
107         pte_t *pte;
108
109         if (vaddr < FIXADDR_START)
110                 return virt_to_page(ptr);
111
112         idx = virt_to_fix(vaddr);
113         pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
114         return pte_page(*pte);
115 }
116
117 EXPORT_SYMBOL(__kmap);
118 EXPORT_SYMBOL(__kunmap);
119 EXPORT_SYMBOL(__kmap_atomic);
120 EXPORT_SYMBOL(__kunmap_atomic);
121 EXPORT_SYMBOL(__kmap_atomic_to_page);