Linux-2.6.12-rc2
[linux-3.10.git] / arch / i386 / mm / highmem.c
1 #include <linux/highmem.h>
2
3 void *kmap(struct page *page)
4 {
5         might_sleep();
6         if (!PageHighMem(page))
7                 return page_address(page);
8         return kmap_high(page);
9 }
10
11 void kunmap(struct page *page)
12 {
13         if (in_interrupt())
14                 BUG();
15         if (!PageHighMem(page))
16                 return;
17         kunmap_high(page);
18 }
19
20 /*
21  * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
22  * no global lock is needed and because the kmap code must perform a global TLB
23  * invalidation when the kmap pool wraps.
24  *
25  * However when holding an atomic kmap is is not legal to sleep, so atomic
26  * kmaps are appropriate for short, tight code paths only.
27  */
28 void *kmap_atomic(struct page *page, enum km_type type)
29 {
30         enum fixed_addresses idx;
31         unsigned long vaddr;
32
33         /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
34         inc_preempt_count();
35         if (!PageHighMem(page))
36                 return page_address(page);
37
38         idx = type + KM_TYPE_NR*smp_processor_id();
39         vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
40 #ifdef CONFIG_DEBUG_HIGHMEM
41         if (!pte_none(*(kmap_pte-idx)))
42                 BUG();
43 #endif
44         set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
45         __flush_tlb_one(vaddr);
46
47         return (void*) vaddr;
48 }
49
50 void kunmap_atomic(void *kvaddr, enum km_type type)
51 {
52 #ifdef CONFIG_DEBUG_HIGHMEM
53         unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
54         enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
55
56         if (vaddr < FIXADDR_START) { // FIXME
57                 dec_preempt_count();
58                 preempt_check_resched();
59                 return;
60         }
61
62         if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
63                 BUG();
64
65         /*
66          * force other mappings to Oops if they'll try to access
67          * this pte without first remap it
68          */
69         pte_clear(&init_mm, vaddr, kmap_pte-idx);
70         __flush_tlb_one(vaddr);
71 #endif
72
73         dec_preempt_count();
74         preempt_check_resched();
75 }
76
77 struct page *kmap_atomic_to_page(void *ptr)
78 {
79         unsigned long idx, vaddr = (unsigned long)ptr;
80         pte_t *pte;
81
82         if (vaddr < FIXADDR_START)
83                 return virt_to_page(ptr);
84
85         idx = virt_to_fix(vaddr);
86         pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
87         return pte_page(*pte);
88 }
89