ksm: cleanup some function arguments
[linux-2.6.git] / mm / ksm.c
1 /*
2  * Memory merging support.
3  *
4  * This code enables dynamic sharing of identical pages found in different
5  * memory areas, even if they are not shared by fork()
6  *
7  * Copyright (C) 2008-2009 Red Hat, Inc.
8  * Authors:
9  *      Izik Eidus
10  *      Andrea Arcangeli
11  *      Chris Wright
12  *      Hugh Dickins
13  *
14  * This work is licensed under the terms of the GNU GPL, version 2.
15  */
16
17 #include <linux/errno.h>
18 #include <linux/mm.h>
19 #include <linux/fs.h>
20 #include <linux/mman.h>
21 #include <linux/sched.h>
22 #include <linux/rwsem.h>
23 #include <linux/pagemap.h>
24 #include <linux/rmap.h>
25 #include <linux/spinlock.h>
26 #include <linux/jhash.h>
27 #include <linux/delay.h>
28 #include <linux/kthread.h>
29 #include <linux/wait.h>
30 #include <linux/slab.h>
31 #include <linux/rbtree.h>
32 #include <linux/mmu_notifier.h>
33 #include <linux/swap.h>
34 #include <linux/ksm.h>
35
36 #include <asm/tlbflush.h>
37
38 /*
39  * A few notes about the KSM scanning process,
40  * to make it easier to understand the data structures below:
41  *
42  * In order to reduce excessive scanning, KSM sorts the memory pages by their
43  * contents into a data structure that holds pointers to the pages' locations.
44  *
45  * Since the contents of the pages may change at any moment, KSM cannot just
46  * insert the pages into a normal sorted tree and expect it to find anything.
47  * Therefore KSM uses two data structures - the stable and the unstable tree.
48  *
49  * The stable tree holds pointers to all the merged pages (ksm pages), sorted
50  * by their contents.  Because each such page is write-protected, searching on
51  * this tree is fully assured to be working (except when pages are unmapped),
52  * and therefore this tree is called the stable tree.
53  *
54  * In addition to the stable tree, KSM uses a second data structure called the
55  * unstable tree: this tree holds pointers to pages which have been found to
56  * be "unchanged for a period of time".  The unstable tree sorts these pages
57  * by their contents, but since they are not write-protected, KSM cannot rely
58  * upon the unstable tree to work correctly - the unstable tree is liable to
59  * be corrupted as its contents are modified, and so it is called unstable.
60  *
61  * KSM solves this problem by several techniques:
62  *
63  * 1) The unstable tree is flushed every time KSM completes scanning all
64  *    memory areas, and then the tree is rebuilt again from the beginning.
65  * 2) KSM will only insert into the unstable tree, pages whose hash value
66  *    has not changed since the previous scan of all memory areas.
67  * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the
68  *    colors of the nodes and not on their contents, assuring that even when
69  *    the tree gets "corrupted" it won't get out of balance, so scanning time
70  *    remains the same (also, searching and inserting nodes in an rbtree uses
71  *    the same algorithm, so we have no overhead when we flush and rebuild).
72  * 4) KSM never flushes the stable tree, which means that even if it were to
73  *    take 10 attempts to find a page in the unstable tree, once it is found,
74  *    it is secured in the stable tree.  (When we scan a new page, we first
75  *    compare it against the stable tree, and then against the unstable tree.)
76  */
77
78 /**
79  * struct mm_slot - ksm information per mm that is being scanned
80  * @link: link to the mm_slots hash list
81  * @mm_list: link into the mm_slots list, rooted in ksm_mm_head
82  * @rmap_list: head for this mm_slot's list of rmap_items
83  * @mm: the mm that this information is valid for
84  */
85 struct mm_slot {
86         struct hlist_node link;
87         struct list_head mm_list;
88         struct list_head rmap_list;
89         struct mm_struct *mm;
90 };
91
92 /**
93  * struct ksm_scan - cursor for scanning
94  * @mm_slot: the current mm_slot we are scanning
95  * @address: the next address inside that to be scanned
96  * @rmap_item: the current rmap that we are scanning inside the rmap_list
97  * @seqnr: count of completed full scans (needed when removing unstable node)
98  *
99  * There is only the one ksm_scan instance of this cursor structure.
100  */
101 struct ksm_scan {
102         struct mm_slot *mm_slot;
103         unsigned long address;
104         struct rmap_item *rmap_item;
105         unsigned long seqnr;
106 };
107
108 /**
109  * struct rmap_item - reverse mapping item for virtual addresses
110  * @link: link into mm_slot's rmap_list (rmap_list is per mm)
111  * @mm: the memory structure this rmap_item is pointing into
112  * @address: the virtual address this rmap_item tracks (+ flags in low bits)
113  * @oldchecksum: previous checksum of the page at that virtual address
114  * @node: rb_node of this rmap_item in either unstable or stable tree
115  * @next: next rmap_item hanging off the same node of the stable tree
116  * @prev: previous rmap_item hanging off the same node of the stable tree
117  */
118 struct rmap_item {
119         struct list_head link;
120         struct mm_struct *mm;
121         unsigned long address;          /* + low bits used for flags below */
122         union {
123                 unsigned int oldchecksum;               /* when unstable */
124                 struct rmap_item *next;                 /* when stable */
125         };
126         union {
127                 struct rb_node node;                    /* when tree node */
128                 struct rmap_item *prev;                 /* in stable list */
129         };
130 };
131
132 #define SEQNR_MASK      0x0ff   /* low bits of unstable tree seqnr */
133 #define NODE_FLAG       0x100   /* is a node of unstable or stable tree */
134 #define STABLE_FLAG     0x200   /* is a node or list item of stable tree */
135
136 /* The stable and unstable tree heads */
137 static struct rb_root root_stable_tree = RB_ROOT;
138 static struct rb_root root_unstable_tree = RB_ROOT;
139
140 #define MM_SLOTS_HASH_HEADS 1024
141 static struct hlist_head *mm_slots_hash;
142
143 static struct mm_slot ksm_mm_head = {
144         .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list),
145 };
146 static struct ksm_scan ksm_scan = {
147         .mm_slot = &ksm_mm_head,
148 };
149
150 static struct kmem_cache *rmap_item_cache;
151 static struct kmem_cache *mm_slot_cache;
152
153 /* The number of nodes in the stable tree */
154 static unsigned long ksm_pages_shared;
155
156 /* The number of page slots additionally sharing those nodes */
157 static unsigned long ksm_pages_sharing;
158
159 /* The number of nodes in the unstable tree */
160 static unsigned long ksm_pages_unshared;
161
162 /* The number of rmap_items in use: to calculate pages_volatile */
163 static unsigned long ksm_rmap_items;
164
165 /* Limit on the number of unswappable pages used */
166 static unsigned long ksm_max_kernel_pages;
167
168 /* Number of pages ksmd should scan in one batch */
169 static unsigned int ksm_thread_pages_to_scan = 100;
170
171 /* Milliseconds ksmd should sleep between batches */
172 static unsigned int ksm_thread_sleep_millisecs = 20;
173
174 #define KSM_RUN_STOP    0
175 #define KSM_RUN_MERGE   1
176 #define KSM_RUN_UNMERGE 2
177 static unsigned int ksm_run = KSM_RUN_STOP;
178
179 static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait);
180 static DEFINE_MUTEX(ksm_thread_mutex);
181 static DEFINE_SPINLOCK(ksm_mmlist_lock);
182
183 #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\
184                 sizeof(struct __struct), __alignof__(struct __struct),\
185                 (__flags), NULL)
186
187 static int __init ksm_slab_init(void)
188 {
189         rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0);
190         if (!rmap_item_cache)
191                 goto out;
192
193         mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0);
194         if (!mm_slot_cache)
195                 goto out_free;
196
197         return 0;
198
199 out_free:
200         kmem_cache_destroy(rmap_item_cache);
201 out:
202         return -ENOMEM;
203 }
204
205 static void __init ksm_slab_free(void)
206 {
207         kmem_cache_destroy(mm_slot_cache);
208         kmem_cache_destroy(rmap_item_cache);
209         mm_slot_cache = NULL;
210 }
211
212 static inline struct rmap_item *alloc_rmap_item(void)
213 {
214         struct rmap_item *rmap_item;
215
216         rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL);
217         if (rmap_item)
218                 ksm_rmap_items++;
219         return rmap_item;
220 }
221
222 static inline void free_rmap_item(struct rmap_item *rmap_item)
223 {
224         ksm_rmap_items--;
225         rmap_item->mm = NULL;   /* debug safety */
226         kmem_cache_free(rmap_item_cache, rmap_item);
227 }
228
229 static inline struct mm_slot *alloc_mm_slot(void)
230 {
231         if (!mm_slot_cache)     /* initialization failed */
232                 return NULL;
233         return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
234 }
235
236 static inline void free_mm_slot(struct mm_slot *mm_slot)
237 {
238         kmem_cache_free(mm_slot_cache, mm_slot);
239 }
240
241 static int __init mm_slots_hash_init(void)
242 {
243         mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head),
244                                 GFP_KERNEL);
245         if (!mm_slots_hash)
246                 return -ENOMEM;
247         return 0;
248 }
249
250 static void __init mm_slots_hash_free(void)
251 {
252         kfree(mm_slots_hash);
253 }
254
255 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
256 {
257         struct mm_slot *mm_slot;
258         struct hlist_head *bucket;
259         struct hlist_node *node;
260
261         bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
262                                 % MM_SLOTS_HASH_HEADS];
263         hlist_for_each_entry(mm_slot, node, bucket, link) {
264                 if (mm == mm_slot->mm)
265                         return mm_slot;
266         }
267         return NULL;
268 }
269
270 static void insert_to_mm_slots_hash(struct mm_struct *mm,
271                                     struct mm_slot *mm_slot)
272 {
273         struct hlist_head *bucket;
274
275         bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
276                                 % MM_SLOTS_HASH_HEADS];
277         mm_slot->mm = mm;
278         INIT_LIST_HEAD(&mm_slot->rmap_list);
279         hlist_add_head(&mm_slot->link, bucket);
280 }
281
282 static inline int in_stable_tree(struct rmap_item *rmap_item)
283 {
284         return rmap_item->address & STABLE_FLAG;
285 }
286
287 /*
288  * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
289  * page tables after it has passed through ksm_exit() - which, if necessary,
290  * takes mmap_sem briefly to serialize against them.  ksm_exit() does not set
291  * a special flag: they can just back out as soon as mm_users goes to zero.
292  * ksm_test_exit() is used throughout to make this test for exit: in some
293  * places for correctness, in some places just to avoid unnecessary work.
294  */
295 static inline bool ksm_test_exit(struct mm_struct *mm)
296 {
297         return atomic_read(&mm->mm_users) == 0;
298 }
299
300 /*
301  * We use break_ksm to break COW on a ksm page: it's a stripped down
302  *
303  *      if (get_user_pages(current, mm, addr, 1, 1, 1, &page, NULL) == 1)
304  *              put_page(page);
305  *
306  * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma,
307  * in case the application has unmapped and remapped mm,addr meanwhile.
308  * Could a ksm page appear anywhere else?  Actually yes, in a VM_PFNMAP
309  * mmap of /dev/mem or /dev/kmem, where we would not want to touch it.
310  */
311 static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
312 {
313         struct page *page;
314         int ret = 0;
315
316         do {
317                 cond_resched();
318                 page = follow_page(vma, addr, FOLL_GET);
319                 if (!page)
320                         break;
321                 if (PageKsm(page))
322                         ret = handle_mm_fault(vma->vm_mm, vma, addr,
323                                                         FAULT_FLAG_WRITE);
324                 else
325                         ret = VM_FAULT_WRITE;
326                 put_page(page);
327         } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM)));
328         /*
329          * We must loop because handle_mm_fault() may back out if there's
330          * any difficulty e.g. if pte accessed bit gets updated concurrently.
331          *
332          * VM_FAULT_WRITE is what we have been hoping for: it indicates that
333          * COW has been broken, even if the vma does not permit VM_WRITE;
334          * but note that a concurrent fault might break PageKsm for us.
335          *
336          * VM_FAULT_SIGBUS could occur if we race with truncation of the
337          * backing file, which also invalidates anonymous pages: that's
338          * okay, that truncation will have unmapped the PageKsm for us.
339          *
340          * VM_FAULT_OOM: at the time of writing (late July 2009), setting
341          * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the
342          * current task has TIF_MEMDIE set, and will be OOM killed on return
343          * to user; and ksmd, having no mm, would never be chosen for that.
344          *
345          * But if the mm is in a limited mem_cgroup, then the fault may fail
346          * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and
347          * even ksmd can fail in this way - though it's usually breaking ksm
348          * just to undo a merge it made a moment before, so unlikely to oom.
349          *
350          * That's a pity: we might therefore have more kernel pages allocated
351          * than we're counting as nodes in the stable tree; but ksm_do_scan
352          * will retry to break_cow on each pass, so should recover the page
353          * in due course.  The important thing is to not let VM_MERGEABLE
354          * be cleared while any such pages might remain in the area.
355          */
356         return (ret & VM_FAULT_OOM) ? -ENOMEM : 0;
357 }
358
359 static void break_cow(struct rmap_item *rmap_item)
360 {
361         struct mm_struct *mm = rmap_item->mm;
362         unsigned long addr = rmap_item->address;
363         struct vm_area_struct *vma;
364
365         down_read(&mm->mmap_sem);
366         if (ksm_test_exit(mm))
367                 goto out;
368         vma = find_vma(mm, addr);
369         if (!vma || vma->vm_start > addr)
370                 goto out;
371         if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
372                 goto out;
373         break_ksm(vma, addr);
374 out:
375         up_read(&mm->mmap_sem);
376 }
377
378 static struct page *get_mergeable_page(struct rmap_item *rmap_item)
379 {
380         struct mm_struct *mm = rmap_item->mm;
381         unsigned long addr = rmap_item->address;
382         struct vm_area_struct *vma;
383         struct page *page;
384
385         down_read(&mm->mmap_sem);
386         if (ksm_test_exit(mm))
387                 goto out;
388         vma = find_vma(mm, addr);
389         if (!vma || vma->vm_start > addr)
390                 goto out;
391         if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
392                 goto out;
393
394         page = follow_page(vma, addr, FOLL_GET);
395         if (!page)
396                 goto out;
397         if (PageAnon(page)) {
398                 flush_anon_page(vma, page, addr);
399                 flush_dcache_page(page);
400         } else {
401                 put_page(page);
402 out:            page = NULL;
403         }
404         up_read(&mm->mmap_sem);
405         return page;
406 }
407
408 /*
409  * get_ksm_page: checks if the page at the virtual address in rmap_item
410  * is still PageKsm, in which case we can trust the content of the page,
411  * and it returns the gotten page; but NULL if the page has been zapped.
412  */
413 static struct page *get_ksm_page(struct rmap_item *rmap_item)
414 {
415         struct page *page;
416
417         page = get_mergeable_page(rmap_item);
418         if (page && !PageKsm(page)) {
419                 put_page(page);
420                 page = NULL;
421         }
422         return page;
423 }
424
425 /*
426  * Removing rmap_item from stable or unstable tree.
427  * This function will clean the information from the stable/unstable tree.
428  */
429 static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
430 {
431         if (in_stable_tree(rmap_item)) {
432                 struct rmap_item *next_item = rmap_item->next;
433
434                 if (rmap_item->address & NODE_FLAG) {
435                         if (next_item) {
436                                 rb_replace_node(&rmap_item->node,
437                                                 &next_item->node,
438                                                 &root_stable_tree);
439                                 next_item->address |= NODE_FLAG;
440                                 ksm_pages_sharing--;
441                         } else {
442                                 rb_erase(&rmap_item->node, &root_stable_tree);
443                                 ksm_pages_shared--;
444                         }
445                 } else {
446                         struct rmap_item *prev_item = rmap_item->prev;
447
448                         BUG_ON(prev_item->next != rmap_item);
449                         prev_item->next = next_item;
450                         if (next_item) {
451                                 BUG_ON(next_item->prev != rmap_item);
452                                 next_item->prev = rmap_item->prev;
453                         }
454                         ksm_pages_sharing--;
455                 }
456
457                 rmap_item->next = NULL;
458                 rmap_item->address &= PAGE_MASK;
459
460         } else if (rmap_item->address & NODE_FLAG) {
461                 unsigned char age;
462                 /*
463                  * Usually ksmd can and must skip the rb_erase, because
464                  * root_unstable_tree was already reset to RB_ROOT.
465                  * But be careful when an mm is exiting: do the rb_erase
466                  * if this rmap_item was inserted by this scan, rather
467                  * than left over from before.
468                  */
469                 age = (unsigned char)(ksm_scan.seqnr - rmap_item->address);
470                 BUG_ON(age > 1);
471                 if (!age)
472                         rb_erase(&rmap_item->node, &root_unstable_tree);
473
474                 ksm_pages_unshared--;
475                 rmap_item->address &= PAGE_MASK;
476         }
477
478         cond_resched();         /* we're called from many long loops */
479 }
480
481 static void remove_trailing_rmap_items(struct mm_slot *mm_slot,
482                                        struct list_head *cur)
483 {
484         struct rmap_item *rmap_item;
485
486         while (cur != &mm_slot->rmap_list) {
487                 rmap_item = list_entry(cur, struct rmap_item, link);
488                 cur = cur->next;
489                 remove_rmap_item_from_tree(rmap_item);
490                 list_del(&rmap_item->link);
491                 free_rmap_item(rmap_item);
492         }
493 }
494
495 /*
496  * Though it's very tempting to unmerge in_stable_tree(rmap_item)s rather
497  * than check every pte of a given vma, the locking doesn't quite work for
498  * that - an rmap_item is assigned to the stable tree after inserting ksm
499  * page and upping mmap_sem.  Nor does it fit with the way we skip dup'ing
500  * rmap_items from parent to child at fork time (so as not to waste time
501  * if exit comes before the next scan reaches it).
502  *
503  * Similarly, although we'd like to remove rmap_items (so updating counts
504  * and freeing memory) when unmerging an area, it's easier to leave that
505  * to the next pass of ksmd - consider, for example, how ksmd might be
506  * in cmp_and_merge_page on one of the rmap_items we would be removing.
507  */
508 static int unmerge_ksm_pages(struct vm_area_struct *vma,
509                              unsigned long start, unsigned long end)
510 {
511         unsigned long addr;
512         int err = 0;
513
514         for (addr = start; addr < end && !err; addr += PAGE_SIZE) {
515                 if (ksm_test_exit(vma->vm_mm))
516                         break;
517                 if (signal_pending(current))
518                         err = -ERESTARTSYS;
519                 else
520                         err = break_ksm(vma, addr);
521         }
522         return err;
523 }
524
525 #ifdef CONFIG_SYSFS
526 /*
527  * Only called through the sysfs control interface:
528  */
529 static int unmerge_and_remove_all_rmap_items(void)
530 {
531         struct mm_slot *mm_slot;
532         struct mm_struct *mm;
533         struct vm_area_struct *vma;
534         int err = 0;
535
536         spin_lock(&ksm_mmlist_lock);
537         ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next,
538                                                 struct mm_slot, mm_list);
539         spin_unlock(&ksm_mmlist_lock);
540
541         for (mm_slot = ksm_scan.mm_slot;
542                         mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) {
543                 mm = mm_slot->mm;
544                 down_read(&mm->mmap_sem);
545                 for (vma = mm->mmap; vma; vma = vma->vm_next) {
546                         if (ksm_test_exit(mm))
547                                 break;
548                         if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
549                                 continue;
550                         err = unmerge_ksm_pages(vma,
551                                                 vma->vm_start, vma->vm_end);
552                         if (err)
553                                 goto error;
554                 }
555
556                 remove_trailing_rmap_items(mm_slot, mm_slot->rmap_list.next);
557
558                 spin_lock(&ksm_mmlist_lock);
559                 ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next,
560                                                 struct mm_slot, mm_list);
561                 if (ksm_test_exit(mm)) {
562                         hlist_del(&mm_slot->link);
563                         list_del(&mm_slot->mm_list);
564                         spin_unlock(&ksm_mmlist_lock);
565
566                         free_mm_slot(mm_slot);
567                         clear_bit(MMF_VM_MERGEABLE, &mm->flags);
568                         up_read(&mm->mmap_sem);
569                         mmdrop(mm);
570                 } else {
571                         spin_unlock(&ksm_mmlist_lock);
572                         up_read(&mm->mmap_sem);
573                 }
574         }
575
576         ksm_scan.seqnr = 0;
577         return 0;
578
579 error:
580         up_read(&mm->mmap_sem);
581         spin_lock(&ksm_mmlist_lock);
582         ksm_scan.mm_slot = &ksm_mm_head;
583         spin_unlock(&ksm_mmlist_lock);
584         return err;
585 }
586 #endif /* CONFIG_SYSFS */
587
588 static u32 calc_checksum(struct page *page)
589 {
590         u32 checksum;
591         void *addr = kmap_atomic(page, KM_USER0);
592         checksum = jhash2(addr, PAGE_SIZE / 4, 17);
593         kunmap_atomic(addr, KM_USER0);
594         return checksum;
595 }
596
597 static int memcmp_pages(struct page *page1, struct page *page2)
598 {
599         char *addr1, *addr2;
600         int ret;
601
602         addr1 = kmap_atomic(page1, KM_USER0);
603         addr2 = kmap_atomic(page2, KM_USER1);
604         ret = memcmp(addr1, addr2, PAGE_SIZE);
605         kunmap_atomic(addr2, KM_USER1);
606         kunmap_atomic(addr1, KM_USER0);
607         return ret;
608 }
609
610 static inline int pages_identical(struct page *page1, struct page *page2)
611 {
612         return !memcmp_pages(page1, page2);
613 }
614
615 static int write_protect_page(struct vm_area_struct *vma, struct page *page,
616                               pte_t *orig_pte)
617 {
618         struct mm_struct *mm = vma->vm_mm;
619         unsigned long addr;
620         pte_t *ptep;
621         spinlock_t *ptl;
622         int swapped;
623         int err = -EFAULT;
624
625         addr = page_address_in_vma(page, vma);
626         if (addr == -EFAULT)
627                 goto out;
628
629         ptep = page_check_address(page, mm, addr, &ptl, 0);
630         if (!ptep)
631                 goto out;
632
633         if (pte_write(*ptep)) {
634                 pte_t entry;
635
636                 swapped = PageSwapCache(page);
637                 flush_cache_page(vma, addr, page_to_pfn(page));
638                 /*
639                  * Ok this is tricky, when get_user_pages_fast() run it doesnt
640                  * take any lock, therefore the check that we are going to make
641                  * with the pagecount against the mapcount is racey and
642                  * O_DIRECT can happen right after the check.
643                  * So we clear the pte and flush the tlb before the check
644                  * this assure us that no O_DIRECT can happen after the check
645                  * or in the middle of the check.
646                  */
647                 entry = ptep_clear_flush(vma, addr, ptep);
648                 /*
649                  * Check that no O_DIRECT or similar I/O is in progress on the
650                  * page
651                  */
652                 if (page_mapcount(page) + 1 + swapped != page_count(page)) {
653                         set_pte_at_notify(mm, addr, ptep, entry);
654                         goto out_unlock;
655                 }
656                 entry = pte_wrprotect(entry);
657                 set_pte_at_notify(mm, addr, ptep, entry);
658         }
659         *orig_pte = *ptep;
660         err = 0;
661
662 out_unlock:
663         pte_unmap_unlock(ptep, ptl);
664 out:
665         return err;
666 }
667
668 /**
669  * replace_page - replace page in vma by new ksm page
670  * @vma:      vma that holds the pte pointing to page
671  * @page:     the page we are replacing by kpage
672  * @kpage:    the ksm page we replace page by
673  * @orig_pte: the original value of the pte
674  *
675  * Returns 0 on success, -EFAULT on failure.
676  */
677 static int replace_page(struct vm_area_struct *vma, struct page *page,
678                         struct page *kpage, pte_t orig_pte)
679 {
680         struct mm_struct *mm = vma->vm_mm;
681         pgd_t *pgd;
682         pud_t *pud;
683         pmd_t *pmd;
684         pte_t *ptep;
685         spinlock_t *ptl;
686         unsigned long addr;
687         int err = -EFAULT;
688
689         addr = page_address_in_vma(page, vma);
690         if (addr == -EFAULT)
691                 goto out;
692
693         pgd = pgd_offset(mm, addr);
694         if (!pgd_present(*pgd))
695                 goto out;
696
697         pud = pud_offset(pgd, addr);
698         if (!pud_present(*pud))
699                 goto out;
700
701         pmd = pmd_offset(pud, addr);
702         if (!pmd_present(*pmd))
703                 goto out;
704
705         ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
706         if (!pte_same(*ptep, orig_pte)) {
707                 pte_unmap_unlock(ptep, ptl);
708                 goto out;
709         }
710
711         get_page(kpage);
712         page_add_ksm_rmap(kpage);
713
714         flush_cache_page(vma, addr, pte_pfn(*ptep));
715         ptep_clear_flush(vma, addr, ptep);
716         set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
717
718         page_remove_rmap(page);
719         put_page(page);
720
721         pte_unmap_unlock(ptep, ptl);
722         err = 0;
723 out:
724         return err;
725 }
726
727 /*
728  * try_to_merge_one_page - take two pages and merge them into one
729  * @vma: the vma that holds the pte pointing to page
730  * @page: the PageAnon page that we want to replace with kpage
731  * @kpage: the PageKsm page (or newly allocated page which page_add_ksm_rmap
732  *         will make PageKsm) that we want to map instead of page
733  *
734  * This function returns 0 if the pages were merged, -EFAULT otherwise.
735  */
736 static int try_to_merge_one_page(struct vm_area_struct *vma,
737                                  struct page *page, struct page *kpage)
738 {
739         pte_t orig_pte = __pte(0);
740         int err = -EFAULT;
741
742         if (!(vma->vm_flags & VM_MERGEABLE))
743                 goto out;
744         if (!PageAnon(page))
745                 goto out;
746
747         /*
748          * We need the page lock to read a stable PageSwapCache in
749          * write_protect_page().  We use trylock_page() instead of
750          * lock_page() because we don't want to wait here - we
751          * prefer to continue scanning and merging different pages,
752          * then come back to this page when it is unlocked.
753          */
754         if (!trylock_page(page))
755                 goto out;
756         /*
757          * If this anonymous page is mapped only here, its pte may need
758          * to be write-protected.  If it's mapped elsewhere, all of its
759          * ptes are necessarily already write-protected.  But in either
760          * case, we need to lock and check page_count is not raised.
761          */
762         if (write_protect_page(vma, page, &orig_pte) == 0 &&
763             pages_identical(page, kpage))
764                 err = replace_page(vma, page, kpage, orig_pte);
765
766         unlock_page(page);
767 out:
768         return err;
769 }
770
771 /*
772  * try_to_merge_with_ksm_page - like try_to_merge_two_pages,
773  * but no new kernel page is allocated: kpage must already be a ksm page.
774  *
775  * This function returns 0 if the pages were merged, -EFAULT otherwise.
776  */
777 static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item,
778                                       struct page *page, struct page *kpage)
779 {
780         struct mm_struct *mm = rmap_item->mm;
781         struct vm_area_struct *vma;
782         int err = -EFAULT;
783
784         down_read(&mm->mmap_sem);
785         if (ksm_test_exit(mm))
786                 goto out;
787         vma = find_vma(mm, rmap_item->address);
788         if (!vma || vma->vm_start > rmap_item->address)
789                 goto out;
790
791         err = try_to_merge_one_page(vma, page, kpage);
792 out:
793         up_read(&mm->mmap_sem);
794         return err;
795 }
796
797 /*
798  * try_to_merge_two_pages - take two identical pages and prepare them
799  * to be merged into one page.
800  *
801  * This function returns the kpage if we successfully merged two identical
802  * pages into one ksm page, NULL otherwise.
803  *
804  * Note that this function allocates a new kernel page: if one of the pages
805  * is already a ksm page, try_to_merge_with_ksm_page should be used.
806  */
807 static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item,
808                                            struct page *page,
809                                            struct rmap_item *tree_rmap_item,
810                                            struct page *tree_page)
811 {
812         struct mm_struct *mm = rmap_item->mm;
813         struct vm_area_struct *vma;
814         struct page *kpage;
815         int err = -EFAULT;
816
817         /*
818          * The number of nodes in the stable tree
819          * is the number of kernel pages that we hold.
820          */
821         if (ksm_max_kernel_pages &&
822             ksm_max_kernel_pages <= ksm_pages_shared)
823                 return NULL;
824
825         kpage = alloc_page(GFP_HIGHUSER);
826         if (!kpage)
827                 return NULL;
828
829         down_read(&mm->mmap_sem);
830         if (ksm_test_exit(mm))
831                 goto up;
832         vma = find_vma(mm, rmap_item->address);
833         if (!vma || vma->vm_start > rmap_item->address)
834                 goto up;
835
836         copy_user_highpage(kpage, page, rmap_item->address, vma);
837         err = try_to_merge_one_page(vma, page, kpage);
838 up:
839         up_read(&mm->mmap_sem);
840
841         if (!err) {
842                 err = try_to_merge_with_ksm_page(tree_rmap_item,
843                                                         tree_page, kpage);
844                 /*
845                  * If that fails, we have a ksm page with only one pte
846                  * pointing to it: so break it.
847                  */
848                 if (err)
849                         break_cow(rmap_item);
850         }
851         if (err) {
852                 put_page(kpage);
853                 kpage = NULL;
854         }
855         return kpage;
856 }
857
858 /*
859  * stable_tree_search - search for page inside the stable tree
860  *
861  * This function checks if there is a page inside the stable tree
862  * with identical content to the page that we are scanning right now.
863  *
864  * This function return rmap_item pointer to the identical item if found,
865  * NULL otherwise.
866  */
867 static struct rmap_item *stable_tree_search(struct page *page,
868                                             struct page **tree_pagep)
869 {
870         struct rb_node *node = root_stable_tree.rb_node;
871
872         while (node) {
873                 struct rmap_item *tree_rmap_item, *next_rmap_item;
874                 struct page *tree_page;
875                 int ret;
876
877                 tree_rmap_item = rb_entry(node, struct rmap_item, node);
878                 while (tree_rmap_item) {
879                         BUG_ON(!in_stable_tree(tree_rmap_item));
880                         cond_resched();
881                         tree_page = get_ksm_page(tree_rmap_item);
882                         if (tree_page)
883                                 break;
884                         next_rmap_item = tree_rmap_item->next;
885                         remove_rmap_item_from_tree(tree_rmap_item);
886                         tree_rmap_item = next_rmap_item;
887                 }
888                 if (!tree_rmap_item)
889                         return NULL;
890
891                 ret = memcmp_pages(page, tree_page);
892
893                 if (ret < 0) {
894                         put_page(tree_page);
895                         node = node->rb_left;
896                 } else if (ret > 0) {
897                         put_page(tree_page);
898                         node = node->rb_right;
899                 } else {
900                         *tree_pagep = tree_page;
901                         return tree_rmap_item;
902                 }
903         }
904
905         return NULL;
906 }
907
908 /*
909  * stable_tree_insert - insert rmap_item pointing to new ksm page
910  * into the stable tree.
911  *
912  * This function returns rmap_item if success, NULL otherwise.
913  */
914 static struct rmap_item *stable_tree_insert(struct page *kpage,
915                                             struct rmap_item *rmap_item)
916 {
917         struct rb_node **new = &root_stable_tree.rb_node;
918         struct rb_node *parent = NULL;
919
920         while (*new) {
921                 struct rmap_item *tree_rmap_item, *next_rmap_item;
922                 struct page *tree_page;
923                 int ret;
924
925                 tree_rmap_item = rb_entry(*new, struct rmap_item, node);
926                 while (tree_rmap_item) {
927                         BUG_ON(!in_stable_tree(tree_rmap_item));
928                         cond_resched();
929                         tree_page = get_ksm_page(tree_rmap_item);
930                         if (tree_page)
931                                 break;
932                         next_rmap_item = tree_rmap_item->next;
933                         remove_rmap_item_from_tree(tree_rmap_item);
934                         tree_rmap_item = next_rmap_item;
935                 }
936                 if (!tree_rmap_item)
937                         return NULL;
938
939                 ret = memcmp_pages(kpage, tree_page);
940                 put_page(tree_page);
941
942                 parent = *new;
943                 if (ret < 0)
944                         new = &parent->rb_left;
945                 else if (ret > 0)
946                         new = &parent->rb_right;
947                 else {
948                         /*
949                          * It is not a bug that stable_tree_search() didn't
950                          * find this node: because at that time our page was
951                          * not yet write-protected, so may have changed since.
952                          */
953                         return NULL;
954                 }
955         }
956
957         rmap_item->address |= NODE_FLAG | STABLE_FLAG;
958         rmap_item->next = NULL;
959         rb_link_node(&rmap_item->node, parent, new);
960         rb_insert_color(&rmap_item->node, &root_stable_tree);
961
962         ksm_pages_shared++;
963         return rmap_item;
964 }
965
966 /*
967  * unstable_tree_search_insert - search for identical page,
968  * else insert rmap_item into the unstable tree.
969  *
970  * This function searches for a page in the unstable tree identical to the
971  * page currently being scanned; and if no identical page is found in the
972  * tree, we insert rmap_item as a new object into the unstable tree.
973  *
974  * This function returns pointer to rmap_item found to be identical
975  * to the currently scanned page, NULL otherwise.
976  *
977  * This function does both searching and inserting, because they share
978  * the same walking algorithm in an rbtree.
979  */
980 static
981 struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
982                                               struct page *page,
983                                               struct page **tree_pagep)
984
985 {
986         struct rb_node **new = &root_unstable_tree.rb_node;
987         struct rb_node *parent = NULL;
988
989         while (*new) {
990                 struct rmap_item *tree_rmap_item;
991                 struct page *tree_page;
992                 int ret;
993
994                 cond_resched();
995                 tree_rmap_item = rb_entry(*new, struct rmap_item, node);
996                 tree_page = get_mergeable_page(tree_rmap_item);
997                 if (!tree_page)
998                         return NULL;
999
1000                 /*
1001                  * Don't substitute a ksm page for a forked page.
1002                  */
1003                 if (page == tree_page) {
1004                         put_page(tree_page);
1005                         return NULL;
1006                 }
1007
1008                 ret = memcmp_pages(page, tree_page);
1009
1010                 parent = *new;
1011                 if (ret < 0) {
1012                         put_page(tree_page);
1013                         new = &parent->rb_left;
1014                 } else if (ret > 0) {
1015                         put_page(tree_page);
1016                         new = &parent->rb_right;
1017                 } else {
1018                         *tree_pagep = tree_page;
1019                         return tree_rmap_item;
1020                 }
1021         }
1022
1023         rmap_item->address |= NODE_FLAG;
1024         rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK);
1025         rb_link_node(&rmap_item->node, parent, new);
1026         rb_insert_color(&rmap_item->node, &root_unstable_tree);
1027
1028         ksm_pages_unshared++;
1029         return NULL;
1030 }
1031
1032 /*
1033  * stable_tree_append - add another rmap_item to the linked list of
1034  * rmap_items hanging off a given node of the stable tree, all sharing
1035  * the same ksm page.
1036  */
1037 static void stable_tree_append(struct rmap_item *rmap_item,
1038                                struct rmap_item *tree_rmap_item)
1039 {
1040         rmap_item->next = tree_rmap_item->next;
1041         rmap_item->prev = tree_rmap_item;
1042
1043         if (tree_rmap_item->next)
1044                 tree_rmap_item->next->prev = rmap_item;
1045
1046         tree_rmap_item->next = rmap_item;
1047         rmap_item->address |= STABLE_FLAG;
1048
1049         ksm_pages_sharing++;
1050 }
1051
1052 /*
1053  * cmp_and_merge_page - first see if page can be merged into the stable tree;
1054  * if not, compare checksum to previous and if it's the same, see if page can
1055  * be inserted into the unstable tree, or merged with a page already there and
1056  * both transferred to the stable tree.
1057  *
1058  * @page: the page that we are searching identical page to.
1059  * @rmap_item: the reverse mapping into the virtual address of this page
1060  */
1061 static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
1062 {
1063         struct rmap_item *tree_rmap_item;
1064         struct page *tree_page = NULL;
1065         struct page *kpage;
1066         unsigned int checksum;
1067         int err;
1068
1069         remove_rmap_item_from_tree(rmap_item);
1070
1071         /* We first start with searching the page inside the stable tree */
1072         tree_rmap_item = stable_tree_search(page, &tree_page);
1073         if (tree_rmap_item) {
1074                 kpage = tree_page;
1075                 if (page == kpage)                      /* forked */
1076                         err = 0;
1077                 else
1078                         err = try_to_merge_with_ksm_page(rmap_item,
1079                                                          page, kpage);
1080                 if (!err) {
1081                         /*
1082                          * The page was successfully merged:
1083                          * add its rmap_item to the stable tree.
1084                          */
1085                         stable_tree_append(rmap_item, tree_rmap_item);
1086                 }
1087                 put_page(kpage);
1088                 return;
1089         }
1090
1091         /*
1092          * A ksm page might have got here by fork, but its other
1093          * references have already been removed from the stable tree.
1094          * Or it might be left over from a break_ksm which failed
1095          * when the mem_cgroup had reached its limit: try again now.
1096          */
1097         if (PageKsm(page))
1098                 break_cow(rmap_item);
1099
1100         /*
1101          * In case the hash value of the page was changed from the last time we
1102          * have calculated it, this page to be changed frequely, therefore we
1103          * don't want to insert it to the unstable tree, and we don't want to
1104          * waste our time to search if there is something identical to it there.
1105          */
1106         checksum = calc_checksum(page);
1107         if (rmap_item->oldchecksum != checksum) {
1108                 rmap_item->oldchecksum = checksum;
1109                 return;
1110         }
1111
1112         tree_rmap_item =
1113                 unstable_tree_search_insert(rmap_item, page, &tree_page);
1114         if (tree_rmap_item) {
1115                 kpage = try_to_merge_two_pages(rmap_item, page,
1116                                                 tree_rmap_item, tree_page);
1117                 put_page(tree_page);
1118                 /*
1119                  * As soon as we merge this page, we want to remove the
1120                  * rmap_item of the page we have merged with from the unstable
1121                  * tree, and insert it instead as new node in the stable tree.
1122                  */
1123                 if (kpage) {
1124                         remove_rmap_item_from_tree(tree_rmap_item);
1125
1126                         /*
1127                          * If we fail to insert the page into the stable tree,
1128                          * we will have 2 virtual addresses that are pointing
1129                          * to a ksm page left outside the stable tree,
1130                          * in which case we need to break_cow on both.
1131                          */
1132                         if (stable_tree_insert(kpage, tree_rmap_item))
1133                                 stable_tree_append(rmap_item, tree_rmap_item);
1134                         else {
1135                                 break_cow(tree_rmap_item);
1136                                 break_cow(rmap_item);
1137                         }
1138                         put_page(kpage);
1139                 }
1140         }
1141 }
1142
1143 static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot,
1144                                             struct list_head *cur,
1145                                             unsigned long addr)
1146 {
1147         struct rmap_item *rmap_item;
1148
1149         while (cur != &mm_slot->rmap_list) {
1150                 rmap_item = list_entry(cur, struct rmap_item, link);
1151                 if ((rmap_item->address & PAGE_MASK) == addr)
1152                         return rmap_item;
1153                 if (rmap_item->address > addr)
1154                         break;
1155                 cur = cur->next;
1156                 remove_rmap_item_from_tree(rmap_item);
1157                 list_del(&rmap_item->link);
1158                 free_rmap_item(rmap_item);
1159         }
1160
1161         rmap_item = alloc_rmap_item();
1162         if (rmap_item) {
1163                 /* It has already been zeroed */
1164                 rmap_item->mm = mm_slot->mm;
1165                 rmap_item->address = addr;
1166                 list_add_tail(&rmap_item->link, cur);
1167         }
1168         return rmap_item;
1169 }
1170
1171 static struct rmap_item *scan_get_next_rmap_item(struct page **page)
1172 {
1173         struct mm_struct *mm;
1174         struct mm_slot *slot;
1175         struct vm_area_struct *vma;
1176         struct rmap_item *rmap_item;
1177
1178         if (list_empty(&ksm_mm_head.mm_list))
1179                 return NULL;
1180
1181         slot = ksm_scan.mm_slot;
1182         if (slot == &ksm_mm_head) {
1183                 root_unstable_tree = RB_ROOT;
1184
1185                 spin_lock(&ksm_mmlist_lock);
1186                 slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list);
1187                 ksm_scan.mm_slot = slot;
1188                 spin_unlock(&ksm_mmlist_lock);
1189 next_mm:
1190                 ksm_scan.address = 0;
1191                 ksm_scan.rmap_item = list_entry(&slot->rmap_list,
1192                                                 struct rmap_item, link);
1193         }
1194
1195         mm = slot->mm;
1196         down_read(&mm->mmap_sem);
1197         if (ksm_test_exit(mm))
1198                 vma = NULL;
1199         else
1200                 vma = find_vma(mm, ksm_scan.address);
1201
1202         for (; vma; vma = vma->vm_next) {
1203                 if (!(vma->vm_flags & VM_MERGEABLE))
1204                         continue;
1205                 if (ksm_scan.address < vma->vm_start)
1206                         ksm_scan.address = vma->vm_start;
1207                 if (!vma->anon_vma)
1208                         ksm_scan.address = vma->vm_end;
1209
1210                 while (ksm_scan.address < vma->vm_end) {
1211                         if (ksm_test_exit(mm))
1212                                 break;
1213                         *page = follow_page(vma, ksm_scan.address, FOLL_GET);
1214                         if (*page && PageAnon(*page)) {
1215                                 flush_anon_page(vma, *page, ksm_scan.address);
1216                                 flush_dcache_page(*page);
1217                                 rmap_item = get_next_rmap_item(slot,
1218                                         ksm_scan.rmap_item->link.next,
1219                                         ksm_scan.address);
1220                                 if (rmap_item) {
1221                                         ksm_scan.rmap_item = rmap_item;
1222                                         ksm_scan.address += PAGE_SIZE;
1223                                 } else
1224                                         put_page(*page);
1225                                 up_read(&mm->mmap_sem);
1226                                 return rmap_item;
1227                         }
1228                         if (*page)
1229                                 put_page(*page);
1230                         ksm_scan.address += PAGE_SIZE;
1231                         cond_resched();
1232                 }
1233         }
1234
1235         if (ksm_test_exit(mm)) {
1236                 ksm_scan.address = 0;
1237                 ksm_scan.rmap_item = list_entry(&slot->rmap_list,
1238                                                 struct rmap_item, link);
1239         }
1240         /*
1241          * Nuke all the rmap_items that are above this current rmap:
1242          * because there were no VM_MERGEABLE vmas with such addresses.
1243          */
1244         remove_trailing_rmap_items(slot, ksm_scan.rmap_item->link.next);
1245
1246         spin_lock(&ksm_mmlist_lock);
1247         ksm_scan.mm_slot = list_entry(slot->mm_list.next,
1248                                                 struct mm_slot, mm_list);
1249         if (ksm_scan.address == 0) {
1250                 /*
1251                  * We've completed a full scan of all vmas, holding mmap_sem
1252                  * throughout, and found no VM_MERGEABLE: so do the same as
1253                  * __ksm_exit does to remove this mm from all our lists now.
1254                  * This applies either when cleaning up after __ksm_exit
1255                  * (but beware: we can reach here even before __ksm_exit),
1256                  * or when all VM_MERGEABLE areas have been unmapped (and
1257                  * mmap_sem then protects against race with MADV_MERGEABLE).
1258                  */
1259                 hlist_del(&slot->link);
1260                 list_del(&slot->mm_list);
1261                 spin_unlock(&ksm_mmlist_lock);
1262
1263                 free_mm_slot(slot);
1264                 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
1265                 up_read(&mm->mmap_sem);
1266                 mmdrop(mm);
1267         } else {
1268                 spin_unlock(&ksm_mmlist_lock);
1269                 up_read(&mm->mmap_sem);
1270         }
1271
1272         /* Repeat until we've completed scanning the whole list */
1273         slot = ksm_scan.mm_slot;
1274         if (slot != &ksm_mm_head)
1275                 goto next_mm;
1276
1277         ksm_scan.seqnr++;
1278         return NULL;
1279 }
1280
1281 /**
1282  * ksm_do_scan  - the ksm scanner main worker function.
1283  * @scan_npages - number of pages we want to scan before we return.
1284  */
1285 static void ksm_do_scan(unsigned int scan_npages)
1286 {
1287         struct rmap_item *rmap_item;
1288         struct page *page;
1289
1290         while (scan_npages--) {
1291                 cond_resched();
1292                 rmap_item = scan_get_next_rmap_item(&page);
1293                 if (!rmap_item)
1294                         return;
1295                 if (!PageKsm(page) || !in_stable_tree(rmap_item))
1296                         cmp_and_merge_page(page, rmap_item);
1297                 else if (page_mapcount(page) == 1) {
1298                         /*
1299                          * Replace now-unshared ksm page by ordinary page.
1300                          */
1301                         break_cow(rmap_item);
1302                         remove_rmap_item_from_tree(rmap_item);
1303                         rmap_item->oldchecksum = calc_checksum(page);
1304                 }
1305                 put_page(page);
1306         }
1307 }
1308
1309 static int ksmd_should_run(void)
1310 {
1311         return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list);
1312 }
1313
1314 static int ksm_scan_thread(void *nothing)
1315 {
1316         set_user_nice(current, 5);
1317
1318         while (!kthread_should_stop()) {
1319                 mutex_lock(&ksm_thread_mutex);
1320                 if (ksmd_should_run())
1321                         ksm_do_scan(ksm_thread_pages_to_scan);
1322                 mutex_unlock(&ksm_thread_mutex);
1323
1324                 if (ksmd_should_run()) {
1325                         schedule_timeout_interruptible(
1326                                 msecs_to_jiffies(ksm_thread_sleep_millisecs));
1327                 } else {
1328                         wait_event_interruptible(ksm_thread_wait,
1329                                 ksmd_should_run() || kthread_should_stop());
1330                 }
1331         }
1332         return 0;
1333 }
1334
1335 int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
1336                 unsigned long end, int advice, unsigned long *vm_flags)
1337 {
1338         struct mm_struct *mm = vma->vm_mm;
1339         int err;
1340
1341         switch (advice) {
1342         case MADV_MERGEABLE:
1343                 /*
1344                  * Be somewhat over-protective for now!
1345                  */
1346                 if (*vm_flags & (VM_MERGEABLE | VM_SHARED  | VM_MAYSHARE   |
1347                                  VM_PFNMAP    | VM_IO      | VM_DONTEXPAND |
1348                                  VM_RESERVED  | VM_HUGETLB | VM_INSERTPAGE |
1349                                  VM_MIXEDMAP  | VM_SAO))
1350                         return 0;               /* just ignore the advice */
1351
1352                 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
1353                         err = __ksm_enter(mm);
1354                         if (err)
1355                                 return err;
1356                 }
1357
1358                 *vm_flags |= VM_MERGEABLE;
1359                 break;
1360
1361         case MADV_UNMERGEABLE:
1362                 if (!(*vm_flags & VM_MERGEABLE))
1363                         return 0;               /* just ignore the advice */
1364
1365                 if (vma->anon_vma) {
1366                         err = unmerge_ksm_pages(vma, start, end);
1367                         if (err)
1368                                 return err;
1369                 }
1370
1371                 *vm_flags &= ~VM_MERGEABLE;
1372                 break;
1373         }
1374
1375         return 0;
1376 }
1377
1378 int __ksm_enter(struct mm_struct *mm)
1379 {
1380         struct mm_slot *mm_slot;
1381         int needs_wakeup;
1382
1383         mm_slot = alloc_mm_slot();
1384         if (!mm_slot)
1385                 return -ENOMEM;
1386
1387         /* Check ksm_run too?  Would need tighter locking */
1388         needs_wakeup = list_empty(&ksm_mm_head.mm_list);
1389
1390         spin_lock(&ksm_mmlist_lock);
1391         insert_to_mm_slots_hash(mm, mm_slot);
1392         /*
1393          * Insert just behind the scanning cursor, to let the area settle
1394          * down a little; when fork is followed by immediate exec, we don't
1395          * want ksmd to waste time setting up and tearing down an rmap_list.
1396          */
1397         list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list);
1398         spin_unlock(&ksm_mmlist_lock);
1399
1400         set_bit(MMF_VM_MERGEABLE, &mm->flags);
1401         atomic_inc(&mm->mm_count);
1402
1403         if (needs_wakeup)
1404                 wake_up_interruptible(&ksm_thread_wait);
1405
1406         return 0;
1407 }
1408
1409 void __ksm_exit(struct mm_struct *mm)
1410 {
1411         struct mm_slot *mm_slot;
1412         int easy_to_free = 0;
1413
1414         /*
1415          * This process is exiting: if it's straightforward (as is the
1416          * case when ksmd was never running), free mm_slot immediately.
1417          * But if it's at the cursor or has rmap_items linked to it, use
1418          * mmap_sem to synchronize with any break_cows before pagetables
1419          * are freed, and leave the mm_slot on the list for ksmd to free.
1420          * Beware: ksm may already have noticed it exiting and freed the slot.
1421          */
1422
1423         spin_lock(&ksm_mmlist_lock);
1424         mm_slot = get_mm_slot(mm);
1425         if (mm_slot && ksm_scan.mm_slot != mm_slot) {
1426                 if (list_empty(&mm_slot->rmap_list)) {
1427                         hlist_del(&mm_slot->link);
1428                         list_del(&mm_slot->mm_list);
1429                         easy_to_free = 1;
1430                 } else {
1431                         list_move(&mm_slot->mm_list,
1432                                   &ksm_scan.mm_slot->mm_list);
1433                 }
1434         }
1435         spin_unlock(&ksm_mmlist_lock);
1436
1437         if (easy_to_free) {
1438                 free_mm_slot(mm_slot);
1439                 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
1440                 mmdrop(mm);
1441         } else if (mm_slot) {
1442                 down_write(&mm->mmap_sem);
1443                 up_write(&mm->mmap_sem);
1444         }
1445 }
1446
1447 #ifdef CONFIG_SYSFS
1448 /*
1449  * This all compiles without CONFIG_SYSFS, but is a waste of space.
1450  */
1451
1452 #define KSM_ATTR_RO(_name) \
1453         static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1454 #define KSM_ATTR(_name) \
1455         static struct kobj_attribute _name##_attr = \
1456                 __ATTR(_name, 0644, _name##_show, _name##_store)
1457
1458 static ssize_t sleep_millisecs_show(struct kobject *kobj,
1459                                     struct kobj_attribute *attr, char *buf)
1460 {
1461         return sprintf(buf, "%u\n", ksm_thread_sleep_millisecs);
1462 }
1463
1464 static ssize_t sleep_millisecs_store(struct kobject *kobj,
1465                                      struct kobj_attribute *attr,
1466                                      const char *buf, size_t count)
1467 {
1468         unsigned long msecs;
1469         int err;
1470
1471         err = strict_strtoul(buf, 10, &msecs);
1472         if (err || msecs > UINT_MAX)
1473                 return -EINVAL;
1474
1475         ksm_thread_sleep_millisecs = msecs;
1476
1477         return count;
1478 }
1479 KSM_ATTR(sleep_millisecs);
1480
1481 static ssize_t pages_to_scan_show(struct kobject *kobj,
1482                                   struct kobj_attribute *attr, char *buf)
1483 {
1484         return sprintf(buf, "%u\n", ksm_thread_pages_to_scan);
1485 }
1486
1487 static ssize_t pages_to_scan_store(struct kobject *kobj,
1488                                    struct kobj_attribute *attr,
1489                                    const char *buf, size_t count)
1490 {
1491         int err;
1492         unsigned long nr_pages;
1493
1494         err = strict_strtoul(buf, 10, &nr_pages);
1495         if (err || nr_pages > UINT_MAX)
1496                 return -EINVAL;
1497
1498         ksm_thread_pages_to_scan = nr_pages;
1499
1500         return count;
1501 }
1502 KSM_ATTR(pages_to_scan);
1503
1504 static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr,
1505                         char *buf)
1506 {
1507         return sprintf(buf, "%u\n", ksm_run);
1508 }
1509
1510 static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
1511                          const char *buf, size_t count)
1512 {
1513         int err;
1514         unsigned long flags;
1515
1516         err = strict_strtoul(buf, 10, &flags);
1517         if (err || flags > UINT_MAX)
1518                 return -EINVAL;
1519         if (flags > KSM_RUN_UNMERGE)
1520                 return -EINVAL;
1521
1522         /*
1523          * KSM_RUN_MERGE sets ksmd running, and 0 stops it running.
1524          * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items,
1525          * breaking COW to free the unswappable pages_shared (but leaves
1526          * mm_slots on the list for when ksmd may be set running again).
1527          */
1528
1529         mutex_lock(&ksm_thread_mutex);
1530         if (ksm_run != flags) {
1531                 ksm_run = flags;
1532                 if (flags & KSM_RUN_UNMERGE) {
1533                         current->flags |= PF_OOM_ORIGIN;
1534                         err = unmerge_and_remove_all_rmap_items();
1535                         current->flags &= ~PF_OOM_ORIGIN;
1536                         if (err) {
1537                                 ksm_run = KSM_RUN_STOP;
1538                                 count = err;
1539                         }
1540                 }
1541         }
1542         mutex_unlock(&ksm_thread_mutex);
1543
1544         if (flags & KSM_RUN_MERGE)
1545                 wake_up_interruptible(&ksm_thread_wait);
1546
1547         return count;
1548 }
1549 KSM_ATTR(run);
1550
1551 static ssize_t max_kernel_pages_store(struct kobject *kobj,
1552                                       struct kobj_attribute *attr,
1553                                       const char *buf, size_t count)
1554 {
1555         int err;
1556         unsigned long nr_pages;
1557
1558         err = strict_strtoul(buf, 10, &nr_pages);
1559         if (err)
1560                 return -EINVAL;
1561
1562         ksm_max_kernel_pages = nr_pages;
1563
1564         return count;
1565 }
1566
1567 static ssize_t max_kernel_pages_show(struct kobject *kobj,
1568                                      struct kobj_attribute *attr, char *buf)
1569 {
1570         return sprintf(buf, "%lu\n", ksm_max_kernel_pages);
1571 }
1572 KSM_ATTR(max_kernel_pages);
1573
1574 static ssize_t pages_shared_show(struct kobject *kobj,
1575                                  struct kobj_attribute *attr, char *buf)
1576 {
1577         return sprintf(buf, "%lu\n", ksm_pages_shared);
1578 }
1579 KSM_ATTR_RO(pages_shared);
1580
1581 static ssize_t pages_sharing_show(struct kobject *kobj,
1582                                   struct kobj_attribute *attr, char *buf)
1583 {
1584         return sprintf(buf, "%lu\n", ksm_pages_sharing);
1585 }
1586 KSM_ATTR_RO(pages_sharing);
1587
1588 static ssize_t pages_unshared_show(struct kobject *kobj,
1589                                    struct kobj_attribute *attr, char *buf)
1590 {
1591         return sprintf(buf, "%lu\n", ksm_pages_unshared);
1592 }
1593 KSM_ATTR_RO(pages_unshared);
1594
1595 static ssize_t pages_volatile_show(struct kobject *kobj,
1596                                    struct kobj_attribute *attr, char *buf)
1597 {
1598         long ksm_pages_volatile;
1599
1600         ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared
1601                                 - ksm_pages_sharing - ksm_pages_unshared;
1602         /*
1603          * It was not worth any locking to calculate that statistic,
1604          * but it might therefore sometimes be negative: conceal that.
1605          */
1606         if (ksm_pages_volatile < 0)
1607                 ksm_pages_volatile = 0;
1608         return sprintf(buf, "%ld\n", ksm_pages_volatile);
1609 }
1610 KSM_ATTR_RO(pages_volatile);
1611
1612 static ssize_t full_scans_show(struct kobject *kobj,
1613                                struct kobj_attribute *attr, char *buf)
1614 {
1615         return sprintf(buf, "%lu\n", ksm_scan.seqnr);
1616 }
1617 KSM_ATTR_RO(full_scans);
1618
1619 static struct attribute *ksm_attrs[] = {
1620         &sleep_millisecs_attr.attr,
1621         &pages_to_scan_attr.attr,
1622         &run_attr.attr,
1623         &max_kernel_pages_attr.attr,
1624         &pages_shared_attr.attr,
1625         &pages_sharing_attr.attr,
1626         &pages_unshared_attr.attr,
1627         &pages_volatile_attr.attr,
1628         &full_scans_attr.attr,
1629         NULL,
1630 };
1631
1632 static struct attribute_group ksm_attr_group = {
1633         .attrs = ksm_attrs,
1634         .name = "ksm",
1635 };
1636 #endif /* CONFIG_SYSFS */
1637
1638 static int __init ksm_init(void)
1639 {
1640         struct task_struct *ksm_thread;
1641         int err;
1642
1643         ksm_max_kernel_pages = totalram_pages / 4;
1644
1645         err = ksm_slab_init();
1646         if (err)
1647                 goto out;
1648
1649         err = mm_slots_hash_init();
1650         if (err)
1651                 goto out_free1;
1652
1653         ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd");
1654         if (IS_ERR(ksm_thread)) {
1655                 printk(KERN_ERR "ksm: creating kthread failed\n");
1656                 err = PTR_ERR(ksm_thread);
1657                 goto out_free2;
1658         }
1659
1660 #ifdef CONFIG_SYSFS
1661         err = sysfs_create_group(mm_kobj, &ksm_attr_group);
1662         if (err) {
1663                 printk(KERN_ERR "ksm: register sysfs failed\n");
1664                 kthread_stop(ksm_thread);
1665                 goto out_free2;
1666         }
1667 #else
1668         ksm_run = KSM_RUN_MERGE;        /* no way for user to start it */
1669
1670 #endif /* CONFIG_SYSFS */
1671
1672         return 0;
1673
1674 out_free2:
1675         mm_slots_hash_free();
1676 out_free1:
1677         ksm_slab_free();
1678 out:
1679         return err;
1680 }
1681 module_init(ksm_init)