blob: c8232622c8d9c42c62f854ad92328a44a61f390e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* asm-generic/tlb.h
2 *
3 * Generic TLB shootdown code
4 *
5 * Copyright 2001 Red Hat, Inc.
6 * Based on code from mm/memory.c Copyright Linus Torvalds and others.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13#ifndef _ASM_GENERIC__TLB_H
14#define _ASM_GENERIC__TLB_H
15
16#include <linux/config.h>
17#include <linux/swap.h>
18#include <asm/pgalloc.h>
19#include <asm/tlbflush.h>
20
21/*
22 * For UP we don't need to worry about TLB flush
23 * and page free order so much..
24 */
25#ifdef CONFIG_SMP
Andi Kleen2b4a0812005-09-12 18:49:24 +020026 #ifdef ARCH_FREE_PTR_NR
27 #define FREE_PTR_NR ARCH_FREE_PTR_NR
28 #else
29 #define FREE_PTE_NR 506
30 #endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070031 #define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
32#else
33 #define FREE_PTE_NR 1
34 #define tlb_fast_mode(tlb) 1
35#endif
36
37/* struct mmu_gather is an opaque type used by the mm code for passing around
Hugh Dickins15a23ff2005-10-29 18:16:01 -070038 * any data needed by arch specific code for tlb_remove_page.
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 */
40struct mmu_gather {
41 struct mm_struct *mm;
42 unsigned int nr; /* set to ~0U means fast mode */
43 unsigned int need_flush;/* Really unmapped some ptes? */
44 unsigned int fullmm; /* non-zero means full mm flush */
45 unsigned long freed;
46 struct page * pages[FREE_PTE_NR];
47};
48
49/* Users of the generic TLB shootdown code must declare this storage space. */
50DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
51
52/* tlb_gather_mmu
53 * Return a pointer to an initialized struct mmu_gather.
54 */
55static inline struct mmu_gather *
56tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
57{
Hugh Dickins15a23ff2005-10-29 18:16:01 -070058 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
60 tlb->mm = mm;
61
62 /* Use fast mode if only one CPU is online */
63 tlb->nr = num_online_cpus() > 1 ? 0U : ~0U;
64
65 tlb->fullmm = full_mm_flush;
66 tlb->freed = 0;
67
68 return tlb;
69}
70
71static inline void
72tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
73{
74 if (!tlb->need_flush)
75 return;
76 tlb->need_flush = 0;
77 tlb_flush(tlb);
78 if (!tlb_fast_mode(tlb)) {
79 free_pages_and_swap_cache(tlb->pages, tlb->nr);
80 tlb->nr = 0;
81 }
82}
83
84/* tlb_finish_mmu
85 * Called at the end of the shootdown operation to free up any resources
Hugh Dickins15a23ff2005-10-29 18:16:01 -070086 * that were required.
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 */
88static inline void
89tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
90{
91 int freed = tlb->freed;
92 struct mm_struct *mm = tlb->mm;
93 int rss = get_mm_counter(mm, rss);
94
95 if (rss < freed)
96 freed = rss;
97 add_mm_counter(mm, rss, -freed);
98 tlb_flush_mmu(tlb, start, end);
99
100 /* keep the page table cache within bounds */
101 check_pgt_cache();
Hugh Dickins15a23ff2005-10-29 18:16:01 -0700102
103 put_cpu_var(mmu_gathers);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104}
105
106static inline unsigned int
107tlb_is_full_mm(struct mmu_gather *tlb)
108{
109 return tlb->fullmm;
110}
111
112/* tlb_remove_page
113 * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
114 * handling the additional races in SMP caused by other CPUs caching valid
115 * mappings in their TLBs.
116 */
117static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
118{
119 tlb->need_flush = 1;
120 if (tlb_fast_mode(tlb)) {
121 free_page_and_swap_cache(page);
122 return;
123 }
124 tlb->pages[tlb->nr++] = page;
125 if (tlb->nr >= FREE_PTE_NR)
126 tlb_flush_mmu(tlb, 0, 0);
127}
128
129/**
130 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
131 *
132 * Record the fact that pte's were really umapped in ->need_flush, so we can
133 * later optimise away the tlb invalidate. This helps when userspace is
134 * unmapping already-unmapped pages, which happens quite a lot.
135 */
136#define tlb_remove_tlb_entry(tlb, ptep, address) \
137 do { \
138 tlb->need_flush = 1; \
139 __tlb_remove_tlb_entry(tlb, ptep, address); \
140 } while (0)
141
142#define pte_free_tlb(tlb, ptep) \
143 do { \
144 tlb->need_flush = 1; \
145 __pte_free_tlb(tlb, ptep); \
146 } while (0)
147
148#ifndef __ARCH_HAS_4LEVEL_HACK
149#define pud_free_tlb(tlb, pudp) \
150 do { \
151 tlb->need_flush = 1; \
152 __pud_free_tlb(tlb, pudp); \
153 } while (0)
154#endif
155
156#define pmd_free_tlb(tlb, pmdp) \
157 do { \
158 tlb->need_flush = 1; \
159 __pmd_free_tlb(tlb, pmdp); \
160 } while (0)
161
162#define tlb_migrate_finish(mm) do {} while (0)
163
164#endif /* _ASM_GENERIC__TLB_H */