mm: bugfix: set current->reclaim_state to NULL while returning from kswapd()
[linux-2.6.git] / mm / highmem.c
index 7da4a7b..57d82c6 100644 (file)
@@ -17,7 +17,7 @@
  */
 
 #include <linux/mm.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/swap.h>
 #include <linux/bio.h>
 #include <linux/pagemap.h>
 #include <linux/init.h>
 #include <linux/hash.h>
 #include <linux/highmem.h>
-#include <linux/blktrace_api.h>
+#include <linux/kgdb.h>
 #include <asm/tlbflush.h>
 
+
+#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
+DEFINE_PER_CPU(int, __kmap_atomic_idx);
+#endif
+
 /*
  * Virtual_count is not a pure "count".
  *  0 means that it is not mapped, and has not been mapped
 #ifdef CONFIG_HIGHMEM
 
 unsigned long totalhigh_pages __read_mostly;
+EXPORT_SYMBOL(totalhigh_pages);
+
+
+EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx);
 
 unsigned int nr_free_highpages (void)
 {
@@ -66,9 +75,29 @@ pte_t * pkmap_page_table;
 
 static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
 
+/*
+ * Most architectures have no use for kmap_high_get(), so let's abstract
+ * the disabling of IRQ out of the locking in that case to save on a
+ * potential useless overhead.
+ */
+#ifdef ARCH_NEEDS_KMAP_HIGH_GET
+#define lock_kmap()             spin_lock_irq(&kmap_lock)
+#define unlock_kmap()           spin_unlock_irq(&kmap_lock)
+#define lock_kmap_any(flags)    spin_lock_irqsave(&kmap_lock, flags)
+#define unlock_kmap_any(flags)  spin_unlock_irqrestore(&kmap_lock, flags)
+#else
+#define lock_kmap()             spin_lock(&kmap_lock)
+#define unlock_kmap()           spin_unlock(&kmap_lock)
+#define lock_kmap_any(flags)    \
+               do { spin_lock(&kmap_lock); (void)(flags); } while (0)
+#define unlock_kmap_any(flags)  \
+               do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
+#endif
+
 static void flush_all_zero_pkmaps(void)
 {
        int i;
+       int need_flush = 0;
 
        flush_cache_kmaps();
 
@@ -100,8 +129,10 @@ static void flush_all_zero_pkmaps(void)
                          &pkmap_page_table[i]);
 
                set_page_address(page, NULL);
+               need_flush = 1;
        }
-       flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
+       if (need_flush)
+               flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
 }
 
 /**
@@ -109,9 +140,9 @@ static void flush_all_zero_pkmaps(void)
  */
 void kmap_flush_unused(void)
 {
-       spin_lock(&kmap_lock);
+       lock_kmap();
        flush_all_zero_pkmaps();
-       spin_unlock(&kmap_lock);
+       unlock_kmap();
 }
 
 static inline unsigned long map_new_virtual(struct page *page)
@@ -141,10 +172,10 @@ start:
 
                        __set_current_state(TASK_UNINTERRUPTIBLE);
                        add_wait_queue(&pkmap_map_wait, &wait);
-                       spin_unlock(&kmap_lock);
+                       unlock_kmap();
                        schedule();
                        remove_wait_queue(&pkmap_map_wait, &wait);
-                       spin_lock(&kmap_lock);
+                       lock_kmap();
 
                        /* Somebody else might have mapped it while we slept */
                        if (page_address(page))
@@ -180,29 +211,59 @@ void *kmap_high(struct page *page)
         * For highmem pages, we can't trust "virtual" until
         * after we have the lock.
         */
-       spin_lock(&kmap_lock);
+       lock_kmap();
        vaddr = (unsigned long)page_address(page);
        if (!vaddr)
                vaddr = map_new_virtual(page);
        pkmap_count[PKMAP_NR(vaddr)]++;
        BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
-       spin_unlock(&kmap_lock);
+       unlock_kmap();
        return (void*) vaddr;
 }
 
 EXPORT_SYMBOL(kmap_high);
 
+#ifdef ARCH_NEEDS_KMAP_HIGH_GET
 /**
- * kunmap_high - map a highmem page into memory
+ * kmap_high_get - pin a highmem page into memory
+ * @page: &struct page to pin
+ *
+ * Returns the page's current virtual memory address, or NULL if no mapping
+ * exists.  If and only if a non null address is returned then a
+ * matching call to kunmap_high() is necessary.
+ *
+ * This can be called from any context.
+ */
+void *kmap_high_get(struct page *page)
+{
+       unsigned long vaddr, flags;
+
+       lock_kmap_any(flags);
+       vaddr = (unsigned long)page_address(page);
+       if (vaddr) {
+               BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1);
+               pkmap_count[PKMAP_NR(vaddr)]++;
+       }
+       unlock_kmap_any(flags);
+       return (void*) vaddr;
+}
+#endif
+
+/**
+ * kunmap_high - unmap a highmem page into memory
  * @page: &struct page to unmap
+ *
+ * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called
+ * only from user context.
  */
 void kunmap_high(struct page *page)
 {
        unsigned long vaddr;
        unsigned long nr;
+       unsigned long flags;
        int need_wakeup;
 
-       spin_lock(&kmap_lock);
+       lock_kmap_any(flags);
        vaddr = (unsigned long)page_address(page);
        BUG_ON(!vaddr);
        nr = PKMAP_NR(vaddr);
@@ -228,7 +289,7 @@ void kunmap_high(struct page *page)
                 */
                need_wakeup = waitqueue_active(&pkmap_map_wait);
        }
-       spin_unlock(&kmap_lock);
+       unlock_kmap_any(flags);
 
        /* do wake-up, if needed, race-free outside of the spin lock */
        if (need_wakeup)
@@ -265,7 +326,7 @@ static struct page_address_slot {
        spinlock_t lock;                        /* Protect this bucket's list */
 } ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER];
 
-static struct page_address_slot *page_slot(struct page *page)
+static struct page_address_slot *page_slot(const struct page *page)
 {
        return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)];
 }
@@ -276,7 +337,7 @@ static struct page_address_slot *page_slot(struct page *page)
  *
  * Returns the page's virtual address.
  */
-void *page_address(struct page *page)
+void *page_address(const struct page *page)
 {
        unsigned long flags;
        void *ret;