8126e8d1a2a4a789509cb49af563b6cbb76395ae
[linux-2.6.git] / arch / x86 / mm / highmem_32.c
1 #include <linux/highmem.h>
2 #include <linux/module.h>
3 #include <linux/swap.h> /* for totalram_pages */
4
5 void *kmap(struct page *page)
6 {
7         might_sleep();
8         if (!PageHighMem(page))
9                 return page_address(page);
10         return kmap_high(page);
11 }
12
13 void kunmap(struct page *page)
14 {
15         if (in_interrupt())
16                 BUG();
17         if (!PageHighMem(page))
18                 return;
19         kunmap_high(page);
20 }
21
22 /*
23  * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
24  * no global lock is needed and because the kmap code must perform a global TLB
25  * invalidation when the kmap pool wraps.
26  *
27  * However when holding an atomic kmap is is not legal to sleep, so atomic
28  * kmaps are appropriate for short, tight code paths only.
29  */
30 void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
31 {
32         enum fixed_addresses idx;
33         unsigned long vaddr;
34
35         /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
36         pagefault_disable();
37
38         if (!PageHighMem(page))
39                 return page_address(page);
40
41         debug_kmap_atomic(type);
42
43         idx = type + KM_TYPE_NR*smp_processor_id();
44         vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
45         BUG_ON(!pte_none(*(kmap_pte-idx)));
46         set_pte(kmap_pte-idx, mk_pte(page, prot));
47         arch_flush_lazy_mmu_mode();
48
49         return (void *)vaddr;
50 }
51
52 void *kmap_atomic(struct page *page, enum km_type type)
53 {
54         return kmap_atomic_prot(page, type, kmap_prot);
55 }
56
57 void kunmap_atomic(void *kvaddr, enum km_type type)
58 {
59         unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
60         enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
61
62         /*
63          * Force other mappings to Oops if they'll try to access this pte
64          * without first remap it.  Keeping stale mappings around is a bad idea
65          * also, in case the page changes cacheability attributes or becomes
66          * a protected page in a hypervisor.
67          */
68         if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
69                 kpte_clear_flush(kmap_pte-idx, vaddr);
70         else {
71 #ifdef CONFIG_DEBUG_HIGHMEM
72                 BUG_ON(vaddr < PAGE_OFFSET);
73                 BUG_ON(vaddr >= (unsigned long)high_memory);
74 #endif
75         }
76
77         arch_flush_lazy_mmu_mode();
78         pagefault_enable();
79 }
80
81 /*
82  * This is the same as kmap_atomic() but can map memory that doesn't
83  * have a struct page associated with it.
84  */
85 void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
86 {
87         return kmap_atomic_prot_pfn(pfn, type, kmap_prot);
88 }
89 EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
90
91 struct page *kmap_atomic_to_page(void *ptr)
92 {
93         unsigned long idx, vaddr = (unsigned long)ptr;
94         pte_t *pte;
95
96         if (vaddr < FIXADDR_START)
97                 return virt_to_page(ptr);
98
99         idx = virt_to_fix(vaddr);
100         pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
101         return pte_page(*pte);
102 }
103
104 EXPORT_SYMBOL(kmap);
105 EXPORT_SYMBOL(kunmap);
106 EXPORT_SYMBOL(kmap_atomic);
107 EXPORT_SYMBOL(kunmap_atomic);
108
109 void __init set_highmem_pages_init(void)
110 {
111         struct zone *zone;
112         int nid;
113
114         for_each_zone(zone) {
115                 unsigned long zone_start_pfn, zone_end_pfn;
116
117                 if (!is_highmem(zone))
118                         continue;
119
120                 zone_start_pfn = zone->zone_start_pfn;
121                 zone_end_pfn = zone_start_pfn + zone->spanned_pages;
122
123                 nid = zone_to_nid(zone);
124                 printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
125                                 zone->name, nid, zone_start_pfn, zone_end_pfn);
126
127                 add_highpages_with_active_regions(nid, zone_start_pfn,
128                                  zone_end_pfn);
129         }
130         totalram_pages += totalhigh_pages;
131 }