generic-ipi: Optimize accesses by using DEFINE_PER_CPU_SHARED_ALIGNED for IPI data
[linux-2.6.git] / mm / swapfile.c
1 /*
2  *  linux/mm/swapfile.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  *  Swap reorganised 29.12.95, Stephen Tweedie
6  */
7
8 #include <linux/mm.h>
9 #include <linux/hugetlb.h>
10 #include <linux/mman.h>
11 #include <linux/slab.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/swap.h>
14 #include <linux/vmalloc.h>
15 #include <linux/pagemap.h>
16 #include <linux/namei.h>
17 #include <linux/shm.h>
18 #include <linux/blkdev.h>
19 #include <linux/random.h>
20 #include <linux/writeback.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/init.h>
24 #include <linux/module.h>
25 #include <linux/ksm.h>
26 #include <linux/rmap.h>
27 #include <linux/security.h>
28 #include <linux/backing-dev.h>
29 #include <linux/mutex.h>
30 #include <linux/capability.h>
31 #include <linux/syscalls.h>
32 #include <linux/memcontrol.h>
33
34 #include <asm/pgtable.h>
35 #include <asm/tlbflush.h>
36 #include <linux/swapops.h>
37 #include <linux/page_cgroup.h>
38
39 static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
40                                  unsigned char);
41 static void free_swap_count_continuations(struct swap_info_struct *);
42 static sector_t map_swap_entry(swp_entry_t, struct block_device**);
43
44 static DEFINE_SPINLOCK(swap_lock);
45 static unsigned int nr_swapfiles;
46 long nr_swap_pages;
47 long total_swap_pages;
48 static int least_priority;
49
50 static const char Bad_file[] = "Bad swap file entry ";
51 static const char Unused_file[] = "Unused swap file entry ";
52 static const char Bad_offset[] = "Bad swap offset entry ";
53 static const char Unused_offset[] = "Unused swap offset entry ";
54
55 static struct swap_list_t swap_list = {-1, -1};
56
57 static struct swap_info_struct *swap_info[MAX_SWAPFILES];
58
59 static DEFINE_MUTEX(swapon_mutex);
60
61 static inline unsigned char swap_count(unsigned char ent)
62 {
63         return ent & ~SWAP_HAS_CACHE;   /* may include SWAP_HAS_CONT flag */
64 }
65
66 /* returns 1 if swap entry is freed */
67 static int
68 __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
69 {
70         swp_entry_t entry = swp_entry(si->type, offset);
71         struct page *page;
72         int ret = 0;
73
74         page = find_get_page(&swapper_space, entry.val);
75         if (!page)
76                 return 0;
77         /*
78          * This function is called from scan_swap_map() and it's called
79          * by vmscan.c at reclaiming pages. So, we hold a lock on a page, here.
80          * We have to use trylock for avoiding deadlock. This is a special
81          * case and you should use try_to_free_swap() with explicit lock_page()
82          * in usual operations.
83          */
84         if (trylock_page(page)) {
85                 ret = try_to_free_swap(page);
86                 unlock_page(page);
87         }
88         page_cache_release(page);
89         return ret;
90 }
91
92 /*
93  * We need this because the bdev->unplug_fn can sleep and we cannot
94  * hold swap_lock while calling the unplug_fn. And swap_lock
95  * cannot be turned into a mutex.
96  */
97 static DECLARE_RWSEM(swap_unplug_sem);
98
99 void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page)
100 {
101         swp_entry_t entry;
102
103         down_read(&swap_unplug_sem);
104         entry.val = page_private(page);
105         if (PageSwapCache(page)) {
106                 struct block_device *bdev = swap_info[swp_type(entry)]->bdev;
107                 struct backing_dev_info *bdi;
108
109                 /*
110                  * If the page is removed from swapcache from under us (with a
111                  * racy try_to_unuse/swapoff) we need an additional reference
112                  * count to avoid reading garbage from page_private(page) above.
113                  * If the WARN_ON triggers during a swapoff it maybe the race
114                  * condition and it's harmless. However if it triggers without
115                  * swapoff it signals a problem.
116                  */
117                 WARN_ON(page_count(page) <= 1);
118
119                 bdi = bdev->bd_inode->i_mapping->backing_dev_info;
120                 blk_run_backing_dev(bdi, page);
121         }
122         up_read(&swap_unplug_sem);
123 }
124
125 /*
126  * swapon tell device that all the old swap contents can be discarded,
127  * to allow the swap device to optimize its wear-levelling.
128  */
129 static int discard_swap(struct swap_info_struct *si)
130 {
131         struct swap_extent *se;
132         sector_t start_block;
133         sector_t nr_blocks;
134         int err = 0;
135
136         /* Do not discard the swap header page! */
137         se = &si->first_swap_extent;
138         start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
139         nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
140         if (nr_blocks) {
141                 err = blkdev_issue_discard(si->bdev, start_block,
142                                 nr_blocks, GFP_KERNEL, DISCARD_FL_BARRIER);
143                 if (err)
144                         return err;
145                 cond_resched();
146         }
147
148         list_for_each_entry(se, &si->first_swap_extent.list, list) {
149                 start_block = se->start_block << (PAGE_SHIFT - 9);
150                 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
151
152                 err = blkdev_issue_discard(si->bdev, start_block,
153                                 nr_blocks, GFP_KERNEL, DISCARD_FL_BARRIER);
154                 if (err)
155                         break;
156
157                 cond_resched();
158         }
159         return err;             /* That will often be -EOPNOTSUPP */
160 }
161
162 /*
163  * swap allocation tell device that a cluster of swap can now be discarded,
164  * to allow the swap device to optimize its wear-levelling.
165  */
166 static void discard_swap_cluster(struct swap_info_struct *si,
167                                  pgoff_t start_page, pgoff_t nr_pages)
168 {
169         struct swap_extent *se = si->curr_swap_extent;
170         int found_extent = 0;
171
172         while (nr_pages) {
173                 struct list_head *lh;
174
175                 if (se->start_page <= start_page &&
176                     start_page < se->start_page + se->nr_pages) {
177                         pgoff_t offset = start_page - se->start_page;
178                         sector_t start_block = se->start_block + offset;
179                         sector_t nr_blocks = se->nr_pages - offset;
180
181                         if (nr_blocks > nr_pages)
182                                 nr_blocks = nr_pages;
183                         start_page += nr_blocks;
184                         nr_pages -= nr_blocks;
185
186                         if (!found_extent++)
187                                 si->curr_swap_extent = se;
188
189                         start_block <<= PAGE_SHIFT - 9;
190                         nr_blocks <<= PAGE_SHIFT - 9;
191                         if (blkdev_issue_discard(si->bdev, start_block,
192                                     nr_blocks, GFP_NOIO, DISCARD_FL_BARRIER))
193                                 break;
194                 }
195
196                 lh = se->list.next;
197                 se = list_entry(lh, struct swap_extent, list);
198         }
199 }
200
201 static int wait_for_discard(void *word)
202 {
203         schedule();
204         return 0;
205 }
206
207 #define SWAPFILE_CLUSTER        256
208 #define LATENCY_LIMIT           256
209
210 static inline unsigned long scan_swap_map(struct swap_info_struct *si,
211                                           unsigned char usage)
212 {
213         unsigned long offset;
214         unsigned long scan_base;
215         unsigned long last_in_cluster = 0;
216         int latency_ration = LATENCY_LIMIT;
217         int found_free_cluster = 0;
218
219         /*
220          * We try to cluster swap pages by allocating them sequentially
221          * in swap.  Once we've allocated SWAPFILE_CLUSTER pages this
222          * way, however, we resort to first-free allocation, starting
223          * a new cluster.  This prevents us from scattering swap pages
224          * all over the entire swap partition, so that we reduce
225          * overall disk seek times between swap pages.  -- sct
226          * But we do now try to find an empty cluster.  -Andrea
227          * And we let swap pages go all over an SSD partition.  Hugh
228          */
229
230         si->flags += SWP_SCANNING;
231         scan_base = offset = si->cluster_next;
232
233         if (unlikely(!si->cluster_nr--)) {
234                 if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
235                         si->cluster_nr = SWAPFILE_CLUSTER - 1;
236                         goto checks;
237                 }
238                 if (si->flags & SWP_DISCARDABLE) {
239                         /*
240                          * Start range check on racing allocations, in case
241                          * they overlap the cluster we eventually decide on
242                          * (we scan without swap_lock to allow preemption).
243                          * It's hardly conceivable that cluster_nr could be
244                          * wrapped during our scan, but don't depend on it.
245                          */
246                         if (si->lowest_alloc)
247                                 goto checks;
248                         si->lowest_alloc = si->max;
249                         si->highest_alloc = 0;
250                 }
251                 spin_unlock(&swap_lock);
252
253                 /*
254                  * If seek is expensive, start searching for new cluster from
255                  * start of partition, to minimize the span of allocated swap.
256                  * But if seek is cheap, search from our current position, so
257                  * that swap is allocated from all over the partition: if the
258                  * Flash Translation Layer only remaps within limited zones,
259                  * we don't want to wear out the first zone too quickly.
260                  */
261                 if (!(si->flags & SWP_SOLIDSTATE))
262                         scan_base = offset = si->lowest_bit;
263                 last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
264
265                 /* Locate the first empty (unaligned) cluster */
266                 for (; last_in_cluster <= si->highest_bit; offset++) {
267                         if (si->swap_map[offset])
268                                 last_in_cluster = offset + SWAPFILE_CLUSTER;
269                         else if (offset == last_in_cluster) {
270                                 spin_lock(&swap_lock);
271                                 offset -= SWAPFILE_CLUSTER - 1;
272                                 si->cluster_next = offset;
273                                 si->cluster_nr = SWAPFILE_CLUSTER - 1;
274                                 found_free_cluster = 1;
275                                 goto checks;
276                         }
277                         if (unlikely(--latency_ration < 0)) {
278                                 cond_resched();
279                                 latency_ration = LATENCY_LIMIT;
280                         }
281                 }
282
283                 offset = si->lowest_bit;
284                 last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
285
286                 /* Locate the first empty (unaligned) cluster */
287                 for (; last_in_cluster < scan_base; offset++) {
288                         if (si->swap_map[offset])
289                                 last_in_cluster = offset + SWAPFILE_CLUSTER;
290                         else if (offset == last_in_cluster) {
291                                 spin_lock(&swap_lock);
292                                 offset -= SWAPFILE_CLUSTER - 1;
293                                 si->cluster_next = offset;
294                                 si->cluster_nr = SWAPFILE_CLUSTER - 1;
295                                 found_free_cluster = 1;
296                                 goto checks;
297                         }
298                         if (unlikely(--latency_ration < 0)) {
299                                 cond_resched();
300                                 latency_ration = LATENCY_LIMIT;
301                         }
302                 }
303
304                 offset = scan_base;
305                 spin_lock(&swap_lock);
306                 si->cluster_nr = SWAPFILE_CLUSTER - 1;
307                 si->lowest_alloc = 0;
308         }
309
310 checks:
311         if (!(si->flags & SWP_WRITEOK))
312                 goto no_page;
313         if (!si->highest_bit)
314                 goto no_page;
315         if (offset > si->highest_bit)
316                 scan_base = offset = si->lowest_bit;
317
318         /* reuse swap entry of cache-only swap if not busy. */
319         if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
320                 int swap_was_freed;
321                 spin_unlock(&swap_lock);
322                 swap_was_freed = __try_to_reclaim_swap(si, offset);
323                 spin_lock(&swap_lock);
324                 /* entry was freed successfully, try to use this again */
325                 if (swap_was_freed)
326                         goto checks;
327                 goto scan; /* check next one */
328         }
329
330         if (si->swap_map[offset])
331                 goto scan;
332
333         if (offset == si->lowest_bit)
334                 si->lowest_bit++;
335         if (offset == si->highest_bit)
336                 si->highest_bit--;
337         si->inuse_pages++;
338         if (si->inuse_pages == si->pages) {
339                 si->lowest_bit = si->max;
340                 si->highest_bit = 0;
341         }
342         si->swap_map[offset] = usage;
343         si->cluster_next = offset + 1;
344         si->flags -= SWP_SCANNING;
345
346         if (si->lowest_alloc) {
347                 /*
348                  * Only set when SWP_DISCARDABLE, and there's a scan
349                  * for a free cluster in progress or just completed.
350                  */
351                 if (found_free_cluster) {
352                         /*
353                          * To optimize wear-levelling, discard the
354                          * old data of the cluster, taking care not to
355                          * discard any of its pages that have already
356                          * been allocated by racing tasks (offset has
357                          * already stepped over any at the beginning).
358                          */
359                         if (offset < si->highest_alloc &&
360                             si->lowest_alloc <= last_in_cluster)
361                                 last_in_cluster = si->lowest_alloc - 1;
362                         si->flags |= SWP_DISCARDING;
363                         spin_unlock(&swap_lock);
364
365                         if (offset < last_in_cluster)
366                                 discard_swap_cluster(si, offset,
367                                         last_in_cluster - offset + 1);
368
369                         spin_lock(&swap_lock);
370                         si->lowest_alloc = 0;
371                         si->flags &= ~SWP_DISCARDING;
372
373                         smp_mb();       /* wake_up_bit advises this */
374                         wake_up_bit(&si->flags, ilog2(SWP_DISCARDING));
375
376                 } else if (si->flags & SWP_DISCARDING) {
377                         /*
378                          * Delay using pages allocated by racing tasks
379                          * until the whole discard has been issued. We
380                          * could defer that delay until swap_writepage,
381                          * but it's easier to keep this self-contained.
382                          */
383                         spin_unlock(&swap_lock);
384                         wait_on_bit(&si->flags, ilog2(SWP_DISCARDING),
385                                 wait_for_discard, TASK_UNINTERRUPTIBLE);
386                         spin_lock(&swap_lock);
387                 } else {
388                         /*
389                          * Note pages allocated by racing tasks while
390                          * scan for a free cluster is in progress, so
391                          * that its final discard can exclude them.
392                          */
393                         if (offset < si->lowest_alloc)
394                                 si->lowest_alloc = offset;
395                         if (offset > si->highest_alloc)
396                                 si->highest_alloc = offset;
397                 }
398         }
399         return offset;
400
401 scan:
402         spin_unlock(&swap_lock);
403         while (++offset <= si->highest_bit) {
404                 if (!si->swap_map[offset]) {
405                         spin_lock(&swap_lock);
406                         goto checks;
407                 }
408                 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
409                         spin_lock(&swap_lock);
410                         goto checks;
411                 }
412                 if (unlikely(--latency_ration < 0)) {
413                         cond_resched();
414                         latency_ration = LATENCY_LIMIT;
415                 }
416         }
417         offset = si->lowest_bit;
418         while (++offset < scan_base) {
419                 if (!si->swap_map[offset]) {
420                         spin_lock(&swap_lock);
421                         goto checks;
422                 }
423                 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
424                         spin_lock(&swap_lock);
425                         goto checks;
426                 }
427                 if (unlikely(--latency_ration < 0)) {
428                         cond_resched();
429                         latency_ration = LATENCY_LIMIT;
430                 }
431         }
432         spin_lock(&swap_lock);
433
434 no_page:
435         si->flags -= SWP_SCANNING;
436         return 0;
437 }
438
439 swp_entry_t get_swap_page(void)
440 {
441         struct swap_info_struct *si;
442         pgoff_t offset;
443         int type, next;
444         int wrapped = 0;
445
446         spin_lock(&swap_lock);
447         if (nr_swap_pages <= 0)
448                 goto noswap;
449         nr_swap_pages--;
450
451         for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
452                 si = swap_info[type];
453                 next = si->next;
454                 if (next < 0 ||
455                     (!wrapped && si->prio != swap_info[next]->prio)) {
456                         next = swap_list.head;
457                         wrapped++;
458                 }
459
460                 if (!si->highest_bit)
461                         continue;
462                 if (!(si->flags & SWP_WRITEOK))
463                         continue;
464
465                 swap_list.next = next;
466                 /* This is called for allocating swap entry for cache */
467                 offset = scan_swap_map(si, SWAP_HAS_CACHE);
468                 if (offset) {
469                         spin_unlock(&swap_lock);
470                         return swp_entry(type, offset);
471                 }
472                 next = swap_list.next;
473         }
474
475         nr_swap_pages++;
476 noswap:
477         spin_unlock(&swap_lock);
478         return (swp_entry_t) {0};
479 }
480
481 /* The only caller of this function is now susupend routine */
482 swp_entry_t get_swap_page_of_type(int type)
483 {
484         struct swap_info_struct *si;
485         pgoff_t offset;
486
487         spin_lock(&swap_lock);
488         si = swap_info[type];
489         if (si && (si->flags & SWP_WRITEOK)) {
490                 nr_swap_pages--;
491                 /* This is called for allocating swap entry, not cache */
492                 offset = scan_swap_map(si, 1);
493                 if (offset) {
494                         spin_unlock(&swap_lock);
495                         return swp_entry(type, offset);
496                 }
497                 nr_swap_pages++;
498         }
499         spin_unlock(&swap_lock);
500         return (swp_entry_t) {0};
501 }
502
503 static struct swap_info_struct *swap_info_get(swp_entry_t entry)
504 {
505         struct swap_info_struct *p;
506         unsigned long offset, type;
507
508         if (!entry.val)
509                 goto out;
510         type = swp_type(entry);
511         if (type >= nr_swapfiles)
512                 goto bad_nofile;
513         p = swap_info[type];
514         if (!(p->flags & SWP_USED))
515                 goto bad_device;
516         offset = swp_offset(entry);
517         if (offset >= p->max)
518                 goto bad_offset;
519         if (!p->swap_map[offset])
520                 goto bad_free;
521         spin_lock(&swap_lock);
522         return p;
523
524 bad_free:
525         printk(KERN_ERR "swap_free: %s%08lx\n", Unused_offset, entry.val);
526         goto out;
527 bad_offset:
528         printk(KERN_ERR "swap_free: %s%08lx\n", Bad_offset, entry.val);
529         goto out;
530 bad_device:
531         printk(KERN_ERR "swap_free: %s%08lx\n", Unused_file, entry.val);
532         goto out;
533 bad_nofile:
534         printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val);
535 out:
536         return NULL;
537 }
538
539 static unsigned char swap_entry_free(struct swap_info_struct *p,
540                                      swp_entry_t entry, unsigned char usage)
541 {
542         unsigned long offset = swp_offset(entry);
543         unsigned char count;
544         unsigned char has_cache;
545
546         count = p->swap_map[offset];
547         has_cache = count & SWAP_HAS_CACHE;
548         count &= ~SWAP_HAS_CACHE;
549
550         if (usage == SWAP_HAS_CACHE) {
551                 VM_BUG_ON(!has_cache);
552                 has_cache = 0;
553         } else if (count == SWAP_MAP_SHMEM) {
554                 /*
555                  * Or we could insist on shmem.c using a special
556                  * swap_shmem_free() and free_shmem_swap_and_cache()...
557                  */
558                 count = 0;
559         } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
560                 if (count == COUNT_CONTINUED) {
561                         if (swap_count_continued(p, offset, count))
562                                 count = SWAP_MAP_MAX | COUNT_CONTINUED;
563                         else
564                                 count = SWAP_MAP_MAX;
565                 } else
566                         count--;
567         }
568
569         if (!count)
570                 mem_cgroup_uncharge_swap(entry);
571
572         usage = count | has_cache;
573         p->swap_map[offset] = usage;
574
575         /* free if no reference */
576         if (!usage) {
577                 if (offset < p->lowest_bit)
578                         p->lowest_bit = offset;
579                 if (offset > p->highest_bit)
580                         p->highest_bit = offset;
581                 if (swap_list.next >= 0 &&
582                     p->prio > swap_info[swap_list.next]->prio)
583                         swap_list.next = p->type;
584                 nr_swap_pages++;
585                 p->inuse_pages--;
586         }
587
588         return usage;
589 }
590
591 /*
592  * Caller has made sure that the swapdevice corresponding to entry
593  * is still around or has not been recycled.
594  */
595 void swap_free(swp_entry_t entry)
596 {
597         struct swap_info_struct *p;
598
599         p = swap_info_get(entry);
600         if (p) {
601                 swap_entry_free(p, entry, 1);
602                 spin_unlock(&swap_lock);
603         }
604 }
605
606 /*
607  * Called after dropping swapcache to decrease refcnt to swap entries.
608  */
609 void swapcache_free(swp_entry_t entry, struct page *page)
610 {
611         struct swap_info_struct *p;
612         unsigned char count;
613
614         p = swap_info_get(entry);
615         if (p) {
616                 count = swap_entry_free(p, entry, SWAP_HAS_CACHE);
617                 if (page)
618                         mem_cgroup_uncharge_swapcache(page, entry, count != 0);
619                 spin_unlock(&swap_lock);
620         }
621 }
622
623 /*
624  * How many references to page are currently swapped out?
625  * This does not give an exact answer when swap count is continued,
626  * but does include the high COUNT_CONTINUED flag to allow for that.
627  */
628 static inline int page_swapcount(struct page *page)
629 {
630         int count = 0;
631         struct swap_info_struct *p;
632         swp_entry_t entry;
633
634         entry.val = page_private(page);
635         p = swap_info_get(entry);
636         if (p) {
637                 count = swap_count(p->swap_map[swp_offset(entry)]);
638                 spin_unlock(&swap_lock);
639         }
640         return count;
641 }
642
643 /*
644  * We can write to an anon page without COW if there are no other references
645  * to it.  And as a side-effect, free up its swap: because the old content
646  * on disk will never be read, and seeking back there to write new content
647  * later would only waste time away from clustering.
648  */
649 int reuse_swap_page(struct page *page)
650 {
651         int count;
652
653         VM_BUG_ON(!PageLocked(page));
654         if (unlikely(PageKsm(page)))
655                 return 0;
656         count = page_mapcount(page);
657         if (count <= 1 && PageSwapCache(page)) {
658                 count += page_swapcount(page);
659                 if (count == 1 && !PageWriteback(page)) {
660                         delete_from_swap_cache(page);
661                         SetPageDirty(page);
662                 }
663         }
664         return count <= 1;
665 }
666
667 /*
668  * If swap is getting full, or if there are no more mappings of this page,
669  * then try_to_free_swap is called to free its swap space.
670  */
671 int try_to_free_swap(struct page *page)
672 {
673         VM_BUG_ON(!PageLocked(page));
674
675         if (!PageSwapCache(page))
676                 return 0;
677         if (PageWriteback(page))
678                 return 0;
679         if (page_swapcount(page))
680                 return 0;
681
682         delete_from_swap_cache(page);
683         SetPageDirty(page);
684         return 1;
685 }
686
687 /*
688  * Free the swap entry like above, but also try to
689  * free the page cache entry if it is the last user.
690  */
691 int free_swap_and_cache(swp_entry_t entry)
692 {
693         struct swap_info_struct *p;
694         struct page *page = NULL;
695
696         if (non_swap_entry(entry))
697                 return 1;
698
699         p = swap_info_get(entry);
700         if (p) {
701                 if (swap_entry_free(p, entry, 1) == SWAP_HAS_CACHE) {
702                         page = find_get_page(&swapper_space, entry.val);
703                         if (page && !trylock_page(page)) {
704                                 page_cache_release(page);
705                                 page = NULL;
706                         }
707                 }
708                 spin_unlock(&swap_lock);
709         }
710         if (page) {
711                 /*
712                  * Not mapped elsewhere, or swap space full? Free it!
713                  * Also recheck PageSwapCache now page is locked (above).
714                  */
715                 if (PageSwapCache(page) && !PageWriteback(page) &&
716                                 (!page_mapped(page) || vm_swap_full())) {
717                         delete_from_swap_cache(page);
718                         SetPageDirty(page);
719                 }
720                 unlock_page(page);
721                 page_cache_release(page);
722         }
723         return p != NULL;
724 }
725
726 #ifdef CONFIG_HIBERNATION
727 /*
728  * Find the swap type that corresponds to given device (if any).
729  *
730  * @offset - number of the PAGE_SIZE-sized block of the device, starting
731  * from 0, in which the swap header is expected to be located.
732  *
733  * This is needed for the suspend to disk (aka swsusp).
734  */
735 int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
736 {
737         struct block_device *bdev = NULL;
738         int type;
739
740         if (device)
741                 bdev = bdget(device);
742
743         spin_lock(&swap_lock);
744         for (type = 0; type < nr_swapfiles; type++) {
745                 struct swap_info_struct *sis = swap_info[type];
746
747                 if (!(sis->flags & SWP_WRITEOK))
748                         continue;
749
750                 if (!bdev) {
751                         if (bdev_p)
752                                 *bdev_p = bdgrab(sis->bdev);
753
754                         spin_unlock(&swap_lock);
755                         return type;
756                 }
757                 if (bdev == sis->bdev) {
758                         struct swap_extent *se = &sis->first_swap_extent;
759
760                         if (se->start_block == offset) {
761                                 if (bdev_p)
762                                         *bdev_p = bdgrab(sis->bdev);
763
764                                 spin_unlock(&swap_lock);
765                                 bdput(bdev);
766                                 return type;
767                         }
768                 }
769         }
770         spin_unlock(&swap_lock);
771         if (bdev)
772                 bdput(bdev);
773
774         return -ENODEV;
775 }
776
777 /*
778  * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
779  * corresponding to given index in swap_info (swap type).
780  */
781 sector_t swapdev_block(int type, pgoff_t offset)
782 {
783         struct block_device *bdev;
784
785         if ((unsigned int)type >= nr_swapfiles)
786                 return 0;
787         if (!(swap_info[type]->flags & SWP_WRITEOK))
788                 return 0;
789         return map_swap_entry(swp_entry(type, offset), &bdev);
790 }
791
792 /*
793  * Return either the total number of swap pages of given type, or the number
794  * of free pages of that type (depending on @free)
795  *
796  * This is needed for software suspend
797  */
798 unsigned int count_swap_pages(int type, int free)
799 {
800         unsigned int n = 0;
801
802         spin_lock(&swap_lock);
803         if ((unsigned int)type < nr_swapfiles) {
804                 struct swap_info_struct *sis = swap_info[type];
805
806                 if (sis->flags & SWP_WRITEOK) {
807                         n = sis->pages;
808                         if (free)
809                                 n -= sis->inuse_pages;
810                 }
811         }
812         spin_unlock(&swap_lock);
813         return n;
814 }
815 #endif /* CONFIG_HIBERNATION */
816
817 /*
818  * No need to decide whether this PTE shares the swap entry with others,
819  * just let do_wp_page work it out if a write is requested later - to
820  * force COW, vm_page_prot omits write permission from any private vma.
821  */
822 static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
823                 unsigned long addr, swp_entry_t entry, struct page *page)
824 {
825         struct mem_cgroup *ptr = NULL;
826         spinlock_t *ptl;
827         pte_t *pte;
828         int ret = 1;
829
830         if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, GFP_KERNEL, &ptr)) {
831                 ret = -ENOMEM;
832                 goto out_nolock;
833         }
834
835         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
836         if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) {
837                 if (ret > 0)
838                         mem_cgroup_cancel_charge_swapin(ptr);
839                 ret = 0;
840                 goto out;
841         }
842
843         inc_mm_counter(vma->vm_mm, anon_rss);
844         get_page(page);
845         set_pte_at(vma->vm_mm, addr, pte,
846                    pte_mkold(mk_pte(page, vma->vm_page_prot)));
847         page_add_anon_rmap(page, vma, addr);
848         mem_cgroup_commit_charge_swapin(page, ptr);
849         swap_free(entry);
850         /*
851          * Move the page to the active list so it is not
852          * immediately swapped out again after swapon.
853          */
854         activate_page(page);
855 out:
856         pte_unmap_unlock(pte, ptl);
857 out_nolock:
858         return ret;
859 }
860
861 static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
862                                 unsigned long addr, unsigned long end,
863                                 swp_entry_t entry, struct page *page)
864 {
865         pte_t swp_pte = swp_entry_to_pte(entry);
866         pte_t *pte;
867         int ret = 0;
868
869         /*
870          * We don't actually need pte lock while scanning for swp_pte: since
871          * we hold page lock and mmap_sem, swp_pte cannot be inserted into the
872          * page table while we're scanning; though it could get zapped, and on
873          * some architectures (e.g. x86_32 with PAE) we might catch a glimpse
874          * of unmatched parts which look like swp_pte, so unuse_pte must
875          * recheck under pte lock.  Scanning without pte lock lets it be
876          * preemptible whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE.
877          */
878         pte = pte_offset_map(pmd, addr);
879         do {
880                 /*
881                  * swapoff spends a _lot_ of time in this loop!
882                  * Test inline before going to call unuse_pte.
883                  */
884                 if (unlikely(pte_same(*pte, swp_pte))) {
885                         pte_unmap(pte);
886                         ret = unuse_pte(vma, pmd, addr, entry, page);
887                         if (ret)
888                                 goto out;
889                         pte = pte_offset_map(pmd, addr);
890                 }
891         } while (pte++, addr += PAGE_SIZE, addr != end);
892         pte_unmap(pte - 1);
893 out:
894         return ret;
895 }
896
897 static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
898                                 unsigned long addr, unsigned long end,
899                                 swp_entry_t entry, struct page *page)
900 {
901         pmd_t *pmd;
902         unsigned long next;
903         int ret;
904
905         pmd = pmd_offset(pud, addr);
906         do {
907                 next = pmd_addr_end(addr, end);
908                 if (pmd_none_or_clear_bad(pmd))
909                         continue;
910                 ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
911                 if (ret)
912                         return ret;
913         } while (pmd++, addr = next, addr != end);
914         return 0;
915 }
916
917 static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
918                                 unsigned long addr, unsigned long end,
919                                 swp_entry_t entry, struct page *page)
920 {
921         pud_t *pud;
922         unsigned long next;
923         int ret;
924
925         pud = pud_offset(pgd, addr);
926         do {
927                 next = pud_addr_end(addr, end);
928                 if (pud_none_or_clear_bad(pud))
929                         continue;
930                 ret = unuse_pmd_range(vma, pud, addr, next, entry, page);
931                 if (ret)
932                         return ret;
933         } while (pud++, addr = next, addr != end);
934         return 0;
935 }
936
937 static int unuse_vma(struct vm_area_struct *vma,
938                                 swp_entry_t entry, struct page *page)
939 {
940         pgd_t *pgd;
941         unsigned long addr, end, next;
942         int ret;
943
944         if (page_anon_vma(page)) {
945                 addr = page_address_in_vma(page, vma);
946                 if (addr == -EFAULT)
947                         return 0;
948                 else
949                         end = addr + PAGE_SIZE;
950         } else {
951                 addr = vma->vm_start;
952                 end = vma->vm_end;
953         }
954
955         pgd = pgd_offset(vma->vm_mm, addr);
956         do {
957                 next = pgd_addr_end(addr, end);
958                 if (pgd_none_or_clear_bad(pgd))
959                         continue;
960                 ret = unuse_pud_range(vma, pgd, addr, next, entry, page);
961                 if (ret)
962                         return ret;
963         } while (pgd++, addr = next, addr != end);
964         return 0;
965 }
966
967 static int unuse_mm(struct mm_struct *mm,
968                                 swp_entry_t entry, struct page *page)
969 {
970         struct vm_area_struct *vma;
971         int ret = 0;
972
973         if (!down_read_trylock(&mm->mmap_sem)) {
974                 /*
975                  * Activate page so shrink_inactive_list is unlikely to unmap
976                  * its ptes while lock is dropped, so swapoff can make progress.
977                  */
978                 activate_page(page);
979                 unlock_page(page);
980                 down_read(&mm->mmap_sem);
981                 lock_page(page);
982         }
983         for (vma = mm->mmap; vma; vma = vma->vm_next) {
984                 if (vma->anon_vma && (ret = unuse_vma(vma, entry, page)))
985                         break;
986         }
987         up_read(&mm->mmap_sem);
988         return (ret < 0)? ret: 0;
989 }
990
991 /*
992  * Scan swap_map from current position to next entry still in use.
993  * Recycle to start on reaching the end, returning 0 when empty.
994  */
995 static unsigned int find_next_to_unuse(struct swap_info_struct *si,
996                                         unsigned int prev)
997 {
998         unsigned int max = si->max;
999         unsigned int i = prev;
1000         unsigned char count;
1001
1002         /*
1003          * No need for swap_lock here: we're just looking
1004          * for whether an entry is in use, not modifying it; false
1005          * hits are okay, and sys_swapoff() has already prevented new
1006          * allocations from this area (while holding swap_lock).
1007          */
1008         for (;;) {
1009                 if (++i >= max) {
1010                         if (!prev) {
1011                                 i = 0;
1012                                 break;
1013                         }
1014                         /*
1015                          * No entries in use at top of swap_map,
1016                          * loop back to start and recheck there.
1017                          */
1018                         max = prev + 1;
1019                         prev = 0;
1020                         i = 1;
1021                 }
1022                 count = si->swap_map[i];
1023                 if (count && swap_count(count) != SWAP_MAP_BAD)
1024                         break;
1025         }
1026         return i;
1027 }
1028
1029 /*
1030  * We completely avoid races by reading each swap page in advance,
1031  * and then search for the process using it.  All the necessary
1032  * page table adjustments can then be made atomically.
1033  */
1034 static int try_to_unuse(unsigned int type)
1035 {
1036         struct swap_info_struct *si = swap_info[type];
1037         struct mm_struct *start_mm;
1038         unsigned char *swap_map;
1039         unsigned char swcount;
1040         struct page *page;
1041         swp_entry_t entry;
1042         unsigned int i = 0;
1043         int retval = 0;
1044
1045         /*
1046          * When searching mms for an entry, a good strategy is to
1047          * start at the first mm we freed the previous entry from
1048          * (though actually we don't notice whether we or coincidence
1049          * freed the entry).  Initialize this start_mm with a hold.
1050          *
1051          * A simpler strategy would be to start at the last mm we
1052          * freed the previous entry from; but that would take less
1053          * advantage of mmlist ordering, which clusters forked mms
1054          * together, child after parent.  If we race with dup_mmap(), we
1055          * prefer to resolve parent before child, lest we miss entries
1056          * duplicated after we scanned child: using last mm would invert
1057          * that.
1058          */
1059         start_mm = &init_mm;
1060         atomic_inc(&init_mm.mm_users);
1061
1062         /*
1063          * Keep on scanning until all entries have gone.  Usually,
1064          * one pass through swap_map is enough, but not necessarily:
1065          * there are races when an instance of an entry might be missed.
1066          */
1067         while ((i = find_next_to_unuse(si, i)) != 0) {
1068                 if (signal_pending(current)) {
1069                         retval = -EINTR;
1070                         break;
1071                 }
1072
1073                 /*
1074                  * Get a page for the entry, using the existing swap
1075                  * cache page if there is one.  Otherwise, get a clean
1076                  * page and read the swap into it.
1077                  */
1078                 swap_map = &si->swap_map[i];
1079                 entry = swp_entry(type, i);
1080                 page = read_swap_cache_async(entry,
1081                                         GFP_HIGHUSER_MOVABLE, NULL, 0);
1082                 if (!page) {
1083                         /*
1084                          * Either swap_duplicate() failed because entry
1085                          * has been freed independently, and will not be
1086                          * reused since sys_swapoff() already disabled
1087                          * allocation from here, or alloc_page() failed.
1088                          */
1089                         if (!*swap_map)
1090                                 continue;
1091                         retval = -ENOMEM;
1092                         break;
1093                 }
1094
1095                 /*
1096                  * Don't hold on to start_mm if it looks like exiting.
1097                  */
1098                 if (atomic_read(&start_mm->mm_users) == 1) {
1099                         mmput(start_mm);
1100                         start_mm = &init_mm;
1101                         atomic_inc(&init_mm.mm_users);
1102                 }
1103
1104                 /*
1105                  * Wait for and lock page.  When do_swap_page races with
1106                  * try_to_unuse, do_swap_page can handle the fault much
1107                  * faster than try_to_unuse can locate the entry.  This
1108                  * apparently redundant "wait_on_page_locked" lets try_to_unuse
1109                  * defer to do_swap_page in such a case - in some tests,
1110                  * do_swap_page and try_to_unuse repeatedly compete.
1111                  */
1112                 wait_on_page_locked(page);
1113                 wait_on_page_writeback(page);
1114                 lock_page(page);
1115                 wait_on_page_writeback(page);
1116
1117                 /*
1118                  * Remove all references to entry.
1119                  */
1120                 swcount = *swap_map;
1121                 if (swap_count(swcount) == SWAP_MAP_SHMEM) {
1122                         retval = shmem_unuse(entry, page);
1123                         /* page has already been unlocked and released */
1124                         if (retval < 0)
1125                                 break;
1126                         continue;
1127                 }
1128                 if (swap_count(swcount) && start_mm != &init_mm)
1129                         retval = unuse_mm(start_mm, entry, page);
1130
1131                 if (swap_count(*swap_map)) {
1132                         int set_start_mm = (*swap_map >= swcount);
1133                         struct list_head *p = &start_mm->mmlist;
1134                         struct mm_struct *new_start_mm = start_mm;
1135                         struct mm_struct *prev_mm = start_mm;
1136                         struct mm_struct *mm;
1137
1138                         atomic_inc(&new_start_mm->mm_users);
1139                         atomic_inc(&prev_mm->mm_users);
1140                         spin_lock(&mmlist_lock);
1141                         while (swap_count(*swap_map) && !retval &&
1142                                         (p = p->next) != &start_mm->mmlist) {
1143                                 mm = list_entry(p, struct mm_struct, mmlist);
1144                                 if (!atomic_inc_not_zero(&mm->mm_users))
1145                                         continue;
1146                                 spin_unlock(&mmlist_lock);
1147                                 mmput(prev_mm);
1148                                 prev_mm = mm;
1149
1150                                 cond_resched();
1151
1152                                 swcount = *swap_map;
1153                                 if (!swap_count(swcount)) /* any usage ? */
1154                                         ;
1155                                 else if (mm == &init_mm)
1156                                         set_start_mm = 1;
1157                                 else
1158                                         retval = unuse_mm(mm, entry, page);
1159
1160                                 if (set_start_mm && *swap_map < swcount) {
1161                                         mmput(new_start_mm);
1162                                         atomic_inc(&mm->mm_users);
1163                                         new_start_mm = mm;
1164                                         set_start_mm = 0;
1165                                 }
1166                                 spin_lock(&mmlist_lock);
1167                         }
1168                         spin_unlock(&mmlist_lock);
1169                         mmput(prev_mm);
1170                         mmput(start_mm);
1171                         start_mm = new_start_mm;
1172                 }
1173                 if (retval) {
1174                         unlock_page(page);
1175                         page_cache_release(page);
1176                         break;
1177                 }
1178
1179                 /*
1180                  * If a reference remains (rare), we would like to leave
1181                  * the page in the swap cache; but try_to_unmap could
1182                  * then re-duplicate the entry once we drop page lock,
1183                  * so we might loop indefinitely; also, that page could
1184                  * not be swapped out to other storage meanwhile.  So:
1185                  * delete from cache even if there's another reference,
1186                  * after ensuring that the data has been saved to disk -
1187                  * since if the reference remains (rarer), it will be
1188                  * read from disk into another page.  Splitting into two
1189                  * pages would be incorrect if swap supported "shared
1190                  * private" pages, but they are handled by tmpfs files.
1191                  *
1192                  * Given how unuse_vma() targets one particular offset
1193                  * in an anon_vma, once the anon_vma has been determined,
1194                  * this splitting happens to be just what is needed to
1195                  * handle where KSM pages have been swapped out: re-reading
1196                  * is unnecessarily slow, but we can fix that later on.
1197                  */
1198                 if (swap_count(*swap_map) &&
1199                      PageDirty(page) && PageSwapCache(page)) {
1200                         struct writeback_control wbc = {
1201                                 .sync_mode = WB_SYNC_NONE,
1202                         };
1203
1204                         swap_writepage(page, &wbc);
1205                         lock_page(page);
1206                         wait_on_page_writeback(page);
1207                 }
1208
1209                 /*
1210                  * It is conceivable that a racing task removed this page from
1211                  * swap cache just before we acquired the page lock at the top,
1212                  * or while we dropped it in unuse_mm().  The page might even
1213                  * be back in swap cache on another swap area: that we must not
1214                  * delete, since it may not have been written out to swap yet.
1215                  */
1216                 if (PageSwapCache(page) &&
1217                     likely(page_private(page) == entry.val))
1218                         delete_from_swap_cache(page);
1219
1220                 /*
1221                  * So we could skip searching mms once swap count went
1222                  * to 1, we did not mark any present ptes as dirty: must
1223                  * mark page dirty so shrink_page_list will preserve it.
1224                  */
1225                 SetPageDirty(page);
1226                 unlock_page(page);
1227                 page_cache_release(page);
1228
1229                 /*
1230                  * Make sure that we aren't completely killing
1231                  * interactive performance.
1232                  */
1233                 cond_resched();
1234         }
1235
1236         mmput(start_mm);
1237         return retval;
1238 }
1239
1240 /*
1241  * After a successful try_to_unuse, if no swap is now in use, we know
1242  * we can empty the mmlist.  swap_lock must be held on entry and exit.
1243  * Note that mmlist_lock nests inside swap_lock, and an mm must be
1244  * added to the mmlist just after page_duplicate - before would be racy.
1245  */
1246 static void drain_mmlist(void)
1247 {
1248         struct list_head *p, *next;
1249         unsigned int type;
1250
1251         for (type = 0; type < nr_swapfiles; type++)
1252                 if (swap_info[type]->inuse_pages)
1253                         return;
1254         spin_lock(&mmlist_lock);
1255         list_for_each_safe(p, next, &init_mm.mmlist)
1256                 list_del_init(p);
1257         spin_unlock(&mmlist_lock);
1258 }
1259
1260 /*
1261  * Use this swapdev's extent info to locate the (PAGE_SIZE) block which
1262  * corresponds to page offset for the specified swap entry.
1263  * Note that the type of this function is sector_t, but it returns page offset
1264  * into the bdev, not sector offset.
1265  */
1266 static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
1267 {
1268         struct swap_info_struct *sis;
1269         struct swap_extent *start_se;
1270         struct swap_extent *se;
1271         pgoff_t offset;
1272
1273         sis = swap_info[swp_type(entry)];
1274         *bdev = sis->bdev;
1275
1276         offset = swp_offset(entry);
1277         start_se = sis->curr_swap_extent;
1278         se = start_se;
1279
1280         for ( ; ; ) {
1281                 struct list_head *lh;
1282
1283                 if (se->start_page <= offset &&
1284                                 offset < (se->start_page + se->nr_pages)) {
1285                         return se->start_block + (offset - se->start_page);
1286                 }
1287                 lh = se->list.next;
1288                 se = list_entry(lh, struct swap_extent, list);
1289                 sis->curr_swap_extent = se;
1290                 BUG_ON(se == start_se);         /* It *must* be present */
1291         }
1292 }
1293
1294 /*
1295  * Returns the page offset into bdev for the specified page's swap entry.
1296  */
1297 sector_t map_swap_page(struct page *page, struct block_device **bdev)
1298 {
1299         swp_entry_t entry;
1300         entry.val = page_private(page);
1301         return map_swap_entry(entry, bdev);
1302 }
1303
1304 /*
1305  * Free all of a swapdev's extent information
1306  */
1307 static void destroy_swap_extents(struct swap_info_struct *sis)
1308 {
1309         while (!list_empty(&sis->first_swap_extent.list)) {
1310                 struct swap_extent *se;
1311
1312                 se = list_entry(sis->first_swap_extent.list.next,
1313                                 struct swap_extent, list);
1314                 list_del(&se->list);
1315                 kfree(se);
1316         }
1317 }
1318
1319 /*
1320  * Add a block range (and the corresponding page range) into this swapdev's
1321  * extent list.  The extent list is kept sorted in page order.
1322  *
1323  * This function rather assumes that it is called in ascending page order.
1324  */
1325 static int
1326 add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
1327                 unsigned long nr_pages, sector_t start_block)
1328 {
1329         struct swap_extent *se;
1330         struct swap_extent *new_se;
1331         struct list_head *lh;
1332
1333         if (start_page == 0) {
1334                 se = &sis->first_swap_extent;
1335                 sis->curr_swap_extent = se;
1336                 se->start_page = 0;
1337                 se->nr_pages = nr_pages;
1338                 se->start_block = start_block;
1339                 return 1;
1340         } else {
1341                 lh = sis->first_swap_extent.list.prev;  /* Highest extent */
1342                 se = list_entry(lh, struct swap_extent, list);
1343                 BUG_ON(se->start_page + se->nr_pages != start_page);
1344                 if (se->start_block + se->nr_pages == start_block) {
1345                         /* Merge it */
1346                         se->nr_pages += nr_pages;
1347                         return 0;
1348                 }
1349         }
1350
1351         /*
1352          * No merge.  Insert a new extent, preserving ordering.
1353          */
1354         new_se = kmalloc(sizeof(*se), GFP_KERNEL);
1355         if (new_se == NULL)
1356                 return -ENOMEM;
1357         new_se->start_page = start_page;
1358         new_se->nr_pages = nr_pages;
1359         new_se->start_block = start_block;
1360
1361         list_add_tail(&new_se->list, &sis->first_swap_extent.list);
1362         return 1;
1363 }
1364
1365 /*
1366  * A `swap extent' is a simple thing which maps a contiguous range of pages
1367  * onto a contiguous range of disk blocks.  An ordered list of swap extents
1368  * is built at swapon time and is then used at swap_writepage/swap_readpage
1369  * time for locating where on disk a page belongs.
1370  *
1371  * If the swapfile is an S_ISBLK block device, a single extent is installed.
1372  * This is done so that the main operating code can treat S_ISBLK and S_ISREG
1373  * swap files identically.
1374  *
1375  * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
1376  * extent list operates in PAGE_SIZE disk blocks.  Both S_ISREG and S_ISBLK
1377  * swapfiles are handled *identically* after swapon time.
1378  *
1379  * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
1380  * and will parse them into an ordered extent list, in PAGE_SIZE chunks.  If
1381  * some stray blocks are found which do not fall within the PAGE_SIZE alignment
1382  * requirements, they are simply tossed out - we will never use those blocks
1383  * for swapping.
1384  *
1385  * For S_ISREG swapfiles we set S_SWAPFILE across the life of the swapon.  This
1386  * prevents root from shooting her foot off by ftruncating an in-use swapfile,
1387  * which will scribble on the fs.
1388  *
1389  * The amount of disk space which a single swap extent represents varies.
1390  * Typically it is in the 1-4 megabyte range.  So we can have hundreds of
1391  * extents in the list.  To avoid much list walking, we cache the previous
1392  * search location in `curr_swap_extent', and start new searches from there.
1393  * This is extremely effective.  The average number of iterations in
1394  * map_swap_page() has been measured at about 0.3 per page.  - akpm.
1395  */
1396 static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
1397 {
1398         struct inode *inode;
1399         unsigned blocks_per_page;
1400         unsigned long page_no;
1401         unsigned blkbits;
1402         sector_t probe_block;
1403         sector_t last_block;
1404         sector_t lowest_block = -1;
1405         sector_t highest_block = 0;
1406         int nr_extents = 0;
1407         int ret;
1408
1409         inode = sis->swap_file->f_mapping->host;
1410         if (S_ISBLK(inode->i_mode)) {
1411                 ret = add_swap_extent(sis, 0, sis->max, 0);
1412                 *span = sis->pages;
1413                 goto out;
1414         }
1415
1416         blkbits = inode->i_blkbits;
1417         blocks_per_page = PAGE_SIZE >> blkbits;
1418
1419         /*
1420          * Map all the blocks into the extent list.  This code doesn't try
1421          * to be very smart.
1422          */
1423         probe_block = 0;
1424         page_no = 0;
1425         last_block = i_size_read(inode) >> blkbits;
1426         while ((probe_block + blocks_per_page) <= last_block &&
1427                         page_no < sis->max) {
1428                 unsigned block_in_page;
1429                 sector_t first_block;
1430
1431                 first_block = bmap(inode, probe_block);
1432                 if (first_block == 0)
1433                         goto bad_bmap;
1434
1435                 /*
1436                  * It must be PAGE_SIZE aligned on-disk
1437                  */
1438                 if (first_block & (blocks_per_page - 1)) {
1439                         probe_block++;
1440                         goto reprobe;
1441                 }
1442
1443                 for (block_in_page = 1; block_in_page < blocks_per_page;
1444                                         block_in_page++) {
1445                         sector_t block;
1446
1447                         block = bmap(inode, probe_block + block_in_page);
1448                         if (block == 0)
1449                                 goto bad_bmap;
1450                         if (block != first_block + block_in_page) {
1451                                 /* Discontiguity */
1452                                 probe_block++;
1453                                 goto reprobe;
1454                         }
1455                 }
1456
1457                 first_block >>= (PAGE_SHIFT - blkbits);
1458                 if (page_no) {  /* exclude the header page */
1459                         if (first_block < lowest_block)
1460                                 lowest_block = first_block;
1461                         if (first_block > highest_block)
1462                                 highest_block = first_block;
1463                 }
1464
1465                 /*
1466                  * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
1467                  */
1468                 ret = add_swap_extent(sis, page_no, 1, first_block);
1469                 if (ret < 0)
1470                         goto out;
1471                 nr_extents += ret;
1472                 page_no++;
1473                 probe_block += blocks_per_page;
1474 reprobe:
1475                 continue;
1476         }
1477         ret = nr_extents;
1478         *span = 1 + highest_block - lowest_block;
1479         if (page_no == 0)
1480                 page_no = 1;    /* force Empty message */
1481         sis->max = page_no;
1482         sis->pages = page_no - 1;
1483         sis->highest_bit = page_no - 1;
1484 out:
1485         return ret;
1486 bad_bmap:
1487         printk(KERN_ERR "swapon: swapfile has holes\n");
1488         ret = -EINVAL;
1489         goto out;
1490 }
1491
1492 SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1493 {
1494         struct swap_info_struct *p = NULL;
1495         unsigned char *swap_map;
1496         struct file *swap_file, *victim;
1497         struct address_space *mapping;
1498         struct inode *inode;
1499         char *pathname;
1500         int i, type, prev;
1501         int err;
1502
1503         if (!capable(CAP_SYS_ADMIN))
1504                 return -EPERM;
1505
1506         pathname = getname(specialfile);
1507         err = PTR_ERR(pathname);
1508         if (IS_ERR(pathname))
1509                 goto out;
1510
1511         victim = filp_open(pathname, O_RDWR|O_LARGEFILE, 0);
1512         putname(pathname);
1513         err = PTR_ERR(victim);
1514         if (IS_ERR(victim))
1515                 goto out;
1516
1517         mapping = victim->f_mapping;
1518         prev = -1;
1519         spin_lock(&swap_lock);
1520         for (type = swap_list.head; type >= 0; type = swap_info[type]->next) {
1521                 p = swap_info[type];
1522                 if (p->flags & SWP_WRITEOK) {
1523                         if (p->swap_file->f_mapping == mapping)
1524                                 break;
1525                 }
1526                 prev = type;
1527         }
1528         if (type < 0) {
1529                 err = -EINVAL;
1530                 spin_unlock(&swap_lock);
1531                 goto out_dput;
1532         }
1533         if (!security_vm_enough_memory(p->pages))
1534                 vm_unacct_memory(p->pages);
1535         else {
1536                 err = -ENOMEM;
1537                 spin_unlock(&swap_lock);
1538                 goto out_dput;
1539         }
1540         if (prev < 0)
1541                 swap_list.head = p->next;
1542         else
1543                 swap_info[prev]->next = p->next;
1544         if (type == swap_list.next) {
1545                 /* just pick something that's safe... */
1546                 swap_list.next = swap_list.head;
1547         }
1548         if (p->prio < 0) {
1549                 for (i = p->next; i >= 0; i = swap_info[i]->next)
1550                         swap_info[i]->prio = p->prio--;
1551                 least_priority++;
1552         }
1553         nr_swap_pages -= p->pages;
1554         total_swap_pages -= p->pages;
1555         p->flags &= ~SWP_WRITEOK;
1556         spin_unlock(&swap_lock);
1557
1558         current->flags |= PF_OOM_ORIGIN;
1559         err = try_to_unuse(type);
1560         current->flags &= ~PF_OOM_ORIGIN;
1561
1562         if (err) {
1563                 /* re-insert swap space back into swap_list */
1564                 spin_lock(&swap_lock);
1565                 if (p->prio < 0)
1566                         p->prio = --least_priority;
1567                 prev = -1;
1568                 for (i = swap_list.head; i >= 0; i = swap_info[i]->next) {
1569                         if (p->prio >= swap_info[i]->prio)
1570                                 break;
1571                         prev = i;
1572                 }
1573                 p->next = i;
1574                 if (prev < 0)
1575                         swap_list.head = swap_list.next = type;
1576                 else
1577                         swap_info[prev]->next = type;
1578                 nr_swap_pages += p->pages;
1579                 total_swap_pages += p->pages;
1580                 p->flags |= SWP_WRITEOK;
1581                 spin_unlock(&swap_lock);
1582                 goto out_dput;
1583         }
1584
1585         /* wait for any unplug function to finish */
1586         down_write(&swap_unplug_sem);
1587         up_write(&swap_unplug_sem);
1588
1589         destroy_swap_extents(p);
1590         if (p->flags & SWP_CONTINUED)
1591                 free_swap_count_continuations(p);
1592
1593         mutex_lock(&swapon_mutex);
1594         spin_lock(&swap_lock);
1595         drain_mmlist();
1596
1597         /* wait for anyone still in scan_swap_map */
1598         p->highest_bit = 0;             /* cuts scans short */
1599         while (p->flags >= SWP_SCANNING) {
1600                 spin_unlock(&swap_lock);
1601                 schedule_timeout_uninterruptible(1);
1602                 spin_lock(&swap_lock);
1603         }
1604
1605         swap_file = p->swap_file;
1606         p->swap_file = NULL;
1607         p->max = 0;
1608         swap_map = p->swap_map;
1609         p->swap_map = NULL;
1610         p->flags = 0;
1611         spin_unlock(&swap_lock);
1612         mutex_unlock(&swapon_mutex);
1613         vfree(swap_map);
1614         /* Destroy swap account informatin */
1615         swap_cgroup_swapoff(type);
1616
1617         inode = mapping->host;
1618         if (S_ISBLK(inode->i_mode)) {
1619                 struct block_device *bdev = I_BDEV(inode);
1620                 set_blocksize(bdev, p->old_block_size);
1621                 bd_release(bdev);
1622         } else {
1623                 mutex_lock(&inode->i_mutex);
1624                 inode->i_flags &= ~S_SWAPFILE;
1625                 mutex_unlock(&inode->i_mutex);
1626         }
1627         filp_close(swap_file, NULL);
1628         err = 0;
1629
1630 out_dput:
1631         filp_close(victim, NULL);
1632 out:
1633         return err;
1634 }
1635
1636 #ifdef CONFIG_PROC_FS
1637 /* iterator */
1638 static void *swap_start(struct seq_file *swap, loff_t *pos)
1639 {
1640         struct swap_info_struct *si;
1641         int type;
1642         loff_t l = *pos;
1643
1644         mutex_lock(&swapon_mutex);
1645
1646         if (!l)
1647                 return SEQ_START_TOKEN;
1648
1649         for (type = 0; type < nr_swapfiles; type++) {
1650                 smp_rmb();      /* read nr_swapfiles before swap_info[type] */
1651                 si = swap_info[type];
1652                 if (!(si->flags & SWP_USED) || !si->swap_map)
1653                         continue;
1654                 if (!--l)
1655                         return si;
1656         }
1657
1658         return NULL;
1659 }
1660
1661 static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
1662 {
1663         struct swap_info_struct *si = v;
1664         int type;
1665
1666         if (v == SEQ_START_TOKEN)
1667                 type = 0;
1668         else
1669                 type = si->type + 1;
1670
1671         for (; type < nr_swapfiles; type++) {
1672                 smp_rmb();      /* read nr_swapfiles before swap_info[type] */
1673                 si = swap_info[type];
1674                 if (!(si->flags & SWP_USED) || !si->swap_map)
1675                         continue;
1676                 ++*pos;
1677                 return si;
1678         }
1679
1680         return NULL;
1681 }
1682
1683 static void swap_stop(struct seq_file *swap, void *v)
1684 {
1685         mutex_unlock(&swapon_mutex);
1686 }
1687
1688 static int swap_show(struct seq_file *swap, void *v)
1689 {
1690         struct swap_info_struct *si = v;
1691         struct file *file;
1692         int len;
1693
1694         if (si == SEQ_START_TOKEN) {
1695                 seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
1696                 return 0;
1697         }
1698
1699         file = si->swap_file;
1700         len = seq_path(swap, &file->f_path, " \t\n\\");
1701         seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
1702                         len < 40 ? 40 - len : 1, " ",
1703                         S_ISBLK(file->f_path.dentry->d_inode->i_mode) ?
1704                                 "partition" : "file\t",
1705                         si->pages << (PAGE_SHIFT - 10),
1706                         si->inuse_pages << (PAGE_SHIFT - 10),
1707                         si->prio);
1708         return 0;
1709 }
1710
1711 static const struct seq_operations swaps_op = {
1712         .start =        swap_start,
1713         .next =         swap_next,
1714         .stop =         swap_stop,
1715         .show =         swap_show
1716 };
1717
1718 static int swaps_open(struct inode *inode, struct file *file)
1719 {
1720         return seq_open(file, &swaps_op);
1721 }
1722
1723 static const struct file_operations proc_swaps_operations = {
1724         .open           = swaps_open,
1725         .read           = seq_read,
1726         .llseek         = seq_lseek,
1727         .release        = seq_release,
1728 };
1729
1730 static int __init procswaps_init(void)
1731 {
1732         proc_create("swaps", 0, NULL, &proc_swaps_operations);
1733         return 0;
1734 }
1735 __initcall(procswaps_init);
1736 #endif /* CONFIG_PROC_FS */
1737
1738 #ifdef MAX_SWAPFILES_CHECK
1739 static int __init max_swapfiles_check(void)
1740 {
1741         MAX_SWAPFILES_CHECK();
1742         return 0;
1743 }
1744 late_initcall(max_swapfiles_check);
1745 #endif
1746
1747 /*
1748  * Written 01/25/92 by Simmule Turner, heavily changed by Linus.
1749  *
1750  * The swapon system call
1751  */
1752 SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
1753 {
1754         struct swap_info_struct *p;
1755         char *name = NULL;
1756         struct block_device *bdev = NULL;
1757         struct file *swap_file = NULL;
1758         struct address_space *mapping;
1759         unsigned int type;
1760         int i, prev;
1761         int error;
1762         union swap_header *swap_header = NULL;
1763         unsigned int nr_good_pages = 0;
1764         int nr_extents = 0;
1765         sector_t span;
1766         unsigned long maxpages = 1;
1767         unsigned long swapfilepages;
1768         unsigned char *swap_map = NULL;
1769         struct page *page = NULL;
1770         struct inode *inode = NULL;
1771         int did_down = 0;
1772
1773         if (!capable(CAP_SYS_ADMIN))
1774                 return -EPERM;
1775
1776         p = kzalloc(sizeof(*p), GFP_KERNEL);
1777         if (!p)
1778                 return -ENOMEM;
1779
1780         spin_lock(&swap_lock);
1781         for (type = 0; type < nr_swapfiles; type++) {
1782                 if (!(swap_info[type]->flags & SWP_USED))
1783                         break;
1784         }
1785         error = -EPERM;
1786         if (type >= MAX_SWAPFILES) {
1787                 spin_unlock(&swap_lock);
1788                 kfree(p);
1789                 goto out;
1790         }
1791         if (type >= nr_swapfiles) {
1792                 p->type = type;
1793                 swap_info[type] = p;
1794                 /*
1795                  * Write swap_info[type] before nr_swapfiles, in case a
1796                  * racing procfs swap_start() or swap_next() is reading them.
1797                  * (We never shrink nr_swapfiles, we never free this entry.)
1798                  */
1799                 smp_wmb();
1800                 nr_swapfiles++;
1801         } else {
1802                 kfree(p);
1803                 p = swap_info[type];
1804                 /*
1805                  * Do not memset this entry: a racing procfs swap_next()
1806                  * would be relying on p->type to remain valid.
1807                  */
1808         }
1809         INIT_LIST_HEAD(&p->first_swap_extent.list);
1810         p->flags = SWP_USED;
1811         p->next = -1;
1812         spin_unlock(&swap_lock);
1813
1814         name = getname(specialfile);
1815         error = PTR_ERR(name);
1816         if (IS_ERR(name)) {
1817                 name = NULL;
1818                 goto bad_swap_2;
1819         }
1820         swap_file = filp_open(name, O_RDWR|O_LARGEFILE, 0);
1821         error = PTR_ERR(swap_file);
1822         if (IS_ERR(swap_file)) {
1823                 swap_file = NULL;
1824                 goto bad_swap_2;
1825         }
1826
1827         p->swap_file = swap_file;
1828         mapping = swap_file->f_mapping;
1829         inode = mapping->host;
1830
1831         error = -EBUSY;
1832         for (i = 0; i < nr_swapfiles; i++) {
1833                 struct swap_info_struct *q = swap_info[i];
1834
1835                 if (i == type || !q->swap_file)
1836                         continue;
1837                 if (mapping == q->swap_file->f_mapping)
1838                         goto bad_swap;
1839         }
1840
1841         error = -EINVAL;
1842         if (S_ISBLK(inode->i_mode)) {
1843                 bdev = I_BDEV(inode);
1844                 error = bd_claim(bdev, sys_swapon);
1845                 if (error < 0) {
1846                         bdev = NULL;
1847                         error = -EINVAL;
1848                         goto bad_swap;
1849                 }
1850                 p->old_block_size = block_size(bdev);
1851                 error = set_blocksize(bdev, PAGE_SIZE);
1852                 if (error < 0)
1853                         goto bad_swap;
1854                 p->bdev = bdev;
1855         } else if (S_ISREG(inode->i_mode)) {
1856                 p->bdev = inode->i_sb->s_bdev;
1857                 mutex_lock(&inode->i_mutex);
1858                 did_down = 1;
1859                 if (IS_SWAPFILE(inode)) {
1860                         error = -EBUSY;
1861                         goto bad_swap;
1862                 }
1863         } else {
1864                 goto bad_swap;
1865         }
1866
1867         swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
1868
1869         /*
1870          * Read the swap header.
1871          */
1872         if (!mapping->a_ops->readpage) {
1873                 error = -EINVAL;
1874                 goto bad_swap;
1875         }
1876         page = read_mapping_page(mapping, 0, swap_file);
1877         if (IS_ERR(page)) {
1878                 error = PTR_ERR(page);
1879                 goto bad_swap;
1880         }
1881         swap_header = kmap(page);
1882
1883         if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
1884                 printk(KERN_ERR "Unable to find swap-space signature\n");
1885                 error = -EINVAL;
1886                 goto bad_swap;
1887         }
1888
1889         /* swap partition endianess hack... */
1890         if (swab32(swap_header->info.version) == 1) {
1891                 swab32s(&swap_header->info.version);
1892                 swab32s(&swap_header->info.last_page);
1893                 swab32s(&swap_header->info.nr_badpages);
1894                 for (i = 0; i < swap_header->info.nr_badpages; i++)
1895                         swab32s(&swap_header->info.badpages[i]);
1896         }
1897         /* Check the swap header's sub-version */
1898         if (swap_header->info.version != 1) {
1899                 printk(KERN_WARNING
1900                        "Unable to handle swap header version %d\n",
1901                        swap_header->info.version);
1902                 error = -EINVAL;
1903                 goto bad_swap;
1904         }
1905
1906         p->lowest_bit  = 1;
1907         p->cluster_next = 1;
1908         p->cluster_nr = 0;
1909
1910         /*
1911          * Find out how many pages are allowed for a single swap
1912          * device. There are two limiting factors: 1) the number of
1913          * bits for the swap offset in the swp_entry_t type and
1914          * 2) the number of bits in the a swap pte as defined by
1915          * the different architectures. In order to find the
1916          * largest possible bit mask a swap entry with swap type 0
1917          * and swap offset ~0UL is created, encoded to a swap pte,
1918          * decoded to a swp_entry_t again and finally the swap
1919          * offset is extracted. This will mask all the bits from
1920          * the initial ~0UL mask that can't be encoded in either
1921          * the swp_entry_t or the architecture definition of a
1922          * swap pte.
1923          */
1924         maxpages = swp_offset(pte_to_swp_entry(
1925                         swp_entry_to_pte(swp_entry(0, ~0UL)))) - 1;
1926         if (maxpages > swap_header->info.last_page)
1927                 maxpages = swap_header->info.last_page;
1928         p->highest_bit = maxpages - 1;
1929
1930         error = -EINVAL;
1931         if (!maxpages)
1932                 goto bad_swap;
1933         if (swapfilepages && maxpages > swapfilepages) {
1934                 printk(KERN_WARNING
1935                        "Swap area shorter than signature indicates\n");
1936                 goto bad_swap;
1937         }
1938         if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
1939                 goto bad_swap;
1940         if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
1941                 goto bad_swap;
1942
1943         /* OK, set up the swap map and apply the bad block list */
1944         swap_map = vmalloc(maxpages);
1945         if (!swap_map) {
1946                 error = -ENOMEM;
1947                 goto bad_swap;
1948         }
1949
1950         memset(swap_map, 0, maxpages);
1951         for (i = 0; i < swap_header->info.nr_badpages; i++) {
1952                 int page_nr = swap_header->info.badpages[i];
1953                 if (page_nr <= 0 || page_nr >= swap_header->info.last_page) {
1954                         error = -EINVAL;
1955                         goto bad_swap;
1956                 }
1957                 swap_map[page_nr] = SWAP_MAP_BAD;
1958         }
1959
1960         error = swap_cgroup_swapon(type, maxpages);
1961         if (error)
1962                 goto bad_swap;
1963
1964         nr_good_pages = swap_header->info.last_page -
1965                         swap_header->info.nr_badpages -
1966                         1 /* header page */;
1967
1968         if (nr_good_pages) {
1969                 swap_map[0] = SWAP_MAP_BAD;
1970                 p->max = maxpages;
1971                 p->pages = nr_good_pages;
1972                 nr_extents = setup_swap_extents(p, &span);
1973                 if (nr_extents < 0) {
1974                         error = nr_extents;
1975                         goto bad_swap;
1976                 }
1977                 nr_good_pages = p->pages;
1978         }
1979         if (!nr_good_pages) {
1980                 printk(KERN_WARNING "Empty swap-file\n");
1981                 error = -EINVAL;
1982                 goto bad_swap;
1983         }
1984
1985         if (p->bdev) {
1986                 if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
1987                         p->flags |= SWP_SOLIDSTATE;
1988                         p->cluster_next = 1 + (random32() % p->highest_bit);
1989                 }
1990                 if (discard_swap(p) == 0)
1991                         p->flags |= SWP_DISCARDABLE;
1992         }
1993
1994         mutex_lock(&swapon_mutex);
1995         spin_lock(&swap_lock);
1996         if (swap_flags & SWAP_FLAG_PREFER)
1997                 p->prio =
1998                   (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
1999         else
2000                 p->prio = --least_priority;
2001         p->swap_map = swap_map;
2002         p->flags |= SWP_WRITEOK;
2003         nr_swap_pages += nr_good_pages;
2004         total_swap_pages += nr_good_pages;
2005
2006         printk(KERN_INFO "Adding %uk swap on %s.  "
2007                         "Priority:%d extents:%d across:%lluk %s%s\n",
2008                 nr_good_pages<<(PAGE_SHIFT-10), name, p->prio,
2009                 nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
2010                 (p->flags & SWP_SOLIDSTATE) ? "SS" : "",
2011                 (p->flags & SWP_DISCARDABLE) ? "D" : "");
2012
2013         /* insert swap space into swap_list: */
2014         prev = -1;
2015         for (i = swap_list.head; i >= 0; i = swap_info[i]->next) {
2016                 if (p->prio >= swap_info[i]->prio)
2017                         break;
2018                 prev = i;
2019         }
2020         p->next = i;
2021         if (prev < 0)
2022                 swap_list.head = swap_list.next = type;
2023         else
2024                 swap_info[prev]->next = type;
2025         spin_unlock(&swap_lock);
2026         mutex_unlock(&swapon_mutex);
2027         error = 0;
2028         goto out;
2029 bad_swap:
2030         if (bdev) {
2031                 set_blocksize(bdev, p->old_block_size);
2032                 bd_release(bdev);
2033         }
2034         destroy_swap_extents(p);
2035         swap_cgroup_swapoff(type);
2036 bad_swap_2:
2037         spin_lock(&swap_lock);
2038         p->swap_file = NULL;
2039         p->flags = 0;
2040         spin_unlock(&swap_lock);
2041         vfree(swap_map);
2042         if (swap_file)
2043                 filp_close(swap_file, NULL);
2044 out:
2045         if (page && !IS_ERR(page)) {
2046                 kunmap(page);
2047                 page_cache_release(page);
2048         }
2049         if (name)
2050                 putname(name);
2051         if (did_down) {
2052                 if (!error)
2053                         inode->i_flags |= S_SWAPFILE;
2054                 mutex_unlock(&inode->i_mutex);
2055         }
2056         return error;
2057 }
2058
2059 void si_swapinfo(struct sysinfo *val)
2060 {
2061         unsigned int type;
2062         unsigned long nr_to_be_unused = 0;
2063
2064         spin_lock(&swap_lock);
2065         for (type = 0; type < nr_swapfiles; type++) {
2066                 struct swap_info_struct *si = swap_info[type];
2067
2068                 if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
2069                         nr_to_be_unused += si->inuse_pages;
2070         }
2071         val->freeswap = nr_swap_pages + nr_to_be_unused;
2072         val->totalswap = total_swap_pages + nr_to_be_unused;
2073         spin_unlock(&swap_lock);
2074 }
2075
2076 /*
2077  * Verify that a swap entry is valid and increment its swap map count.
2078  *
2079  * Returns error code in following case.
2080  * - success -> 0
2081  * - swp_entry is invalid -> EINVAL
2082  * - swp_entry is migration entry -> EINVAL
2083  * - swap-cache reference is requested but there is already one. -> EEXIST
2084  * - swap-cache reference is requested but the entry is not used. -> ENOENT
2085  * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
2086  */
2087 static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
2088 {
2089         struct swap_info_struct *p;
2090         unsigned long offset, type;
2091         unsigned char count;
2092         unsigned char has_cache;
2093         int err = -EINVAL;
2094
2095         if (non_swap_entry(entry))
2096                 goto out;
2097
2098         type = swp_type(entry);
2099         if (type >= nr_swapfiles)
2100                 goto bad_file;
2101         p = swap_info[type];
2102         offset = swp_offset(entry);
2103
2104         spin_lock(&swap_lock);
2105         if (unlikely(offset >= p->max))
2106                 goto unlock_out;
2107
2108         count = p->swap_map[offset];
2109         has_cache = count & SWAP_HAS_CACHE;
2110         count &= ~SWAP_HAS_CACHE;
2111         err = 0;
2112
2113         if (usage == SWAP_HAS_CACHE) {
2114
2115                 /* set SWAP_HAS_CACHE if there is no cache and entry is used */
2116                 if (!has_cache && count)
2117                         has_cache = SWAP_HAS_CACHE;
2118                 else if (has_cache)             /* someone else added cache */
2119                         err = -EEXIST;
2120                 else                            /* no users remaining */
2121                         err = -ENOENT;
2122
2123         } else if (count || has_cache) {
2124
2125                 if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
2126                         count += usage;
2127                 else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
2128                         err = -EINVAL;
2129                 else if (swap_count_continued(p, offset, count))
2130                         count = COUNT_CONTINUED;
2131                 else
2132                         err = -ENOMEM;
2133         } else
2134                 err = -ENOENT;                  /* unused swap entry */
2135
2136         p->swap_map[offset] = count | has_cache;
2137
2138 unlock_out:
2139         spin_unlock(&swap_lock);
2140 out:
2141         return err;
2142
2143 bad_file:
2144         printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val);
2145         goto out;
2146 }
2147
2148 /*
2149  * Help swapoff by noting that swap entry belongs to shmem/tmpfs
2150  * (in which case its reference count is never incremented).
2151  */
2152 void swap_shmem_alloc(swp_entry_t entry)
2153 {
2154         __swap_duplicate(entry, SWAP_MAP_SHMEM);
2155 }
2156
2157 /*
2158  * increase reference count of swap entry by 1.
2159  */
2160 int swap_duplicate(swp_entry_t entry)
2161 {
2162         int err = 0;
2163
2164         while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
2165                 err = add_swap_count_continuation(entry, GFP_ATOMIC);
2166         return err;
2167 }
2168
2169 /*
2170  * @entry: swap entry for which we allocate swap cache.
2171  *
2172  * Called when allocating swap cache for existing swap entry,
2173  * This can return error codes. Returns 0 at success.
2174  * -EBUSY means there is a swap cache.
2175  * Note: return code is different from swap_duplicate().
2176  */
2177 int swapcache_prepare(swp_entry_t entry)
2178 {
2179         return __swap_duplicate(entry, SWAP_HAS_CACHE);
2180 }
2181
2182 /*
2183  * swap_lock prevents swap_map being freed. Don't grab an extra
2184  * reference on the swaphandle, it doesn't matter if it becomes unused.
2185  */
2186 int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
2187 {
2188         struct swap_info_struct *si;
2189         int our_page_cluster = page_cluster;
2190         pgoff_t target, toff;
2191         pgoff_t base, end;
2192         int nr_pages = 0;
2193
2194         if (!our_page_cluster)  /* no readahead */
2195                 return 0;
2196
2197         si = swap_info[swp_type(entry)];
2198         target = swp_offset(entry);
2199         base = (target >> our_page_cluster) << our_page_cluster;
2200         end = base + (1 << our_page_cluster);
2201         if (!base)              /* first page is swap header */
2202                 base++;
2203
2204         spin_lock(&swap_lock);
2205         if (end > si->max)      /* don't go beyond end of map */
2206                 end = si->max;
2207
2208         /* Count contiguous allocated slots above our target */
2209         for (toff = target; ++toff < end; nr_pages++) {
2210                 /* Don't read in free or bad pages */
2211                 if (!si->swap_map[toff])
2212                         break;
2213                 if (swap_count(si->swap_map[toff]) == SWAP_MAP_BAD)
2214                         break;
2215         }
2216         /* Count contiguous allocated slots below our target */
2217         for (toff = target; --toff >= base; nr_pages++) {
2218                 /* Don't read in free or bad pages */
2219                 if (!si->swap_map[toff])
2220                         break;
2221                 if (swap_count(si->swap_map[toff]) == SWAP_MAP_BAD)
2222                         break;
2223         }
2224         spin_unlock(&swap_lock);
2225
2226         /*
2227          * Indicate starting offset, and return number of pages to get:
2228          * if only 1, say 0, since there's then no readahead to be done.
2229          */
2230         *offset = ++toff;
2231         return nr_pages? ++nr_pages: 0;
2232 }
2233
2234 /*
2235  * add_swap_count_continuation - called when a swap count is duplicated
2236  * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
2237  * page of the original vmalloc'ed swap_map, to hold the continuation count
2238  * (for that entry and for its neighbouring PAGE_SIZE swap entries).  Called
2239  * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
2240  *
2241  * These continuation pages are seldom referenced: the common paths all work
2242  * on the original swap_map, only referring to a continuation page when the
2243  * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
2244  *
2245  * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
2246  * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
2247  * can be called after dropping locks.
2248  */
2249 int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
2250 {
2251         struct swap_info_struct *si;
2252         struct page *head;
2253         struct page *page;
2254         struct page *list_page;
2255         pgoff_t offset;
2256         unsigned char count;
2257
2258         /*
2259          * When debugging, it's easier to use __GFP_ZERO here; but it's better
2260          * for latency not to zero a page while GFP_ATOMIC and holding locks.
2261          */
2262         page = alloc_page(gfp_mask | __GFP_HIGHMEM);
2263
2264         si = swap_info_get(entry);
2265         if (!si) {
2266                 /*
2267                  * An acceptable race has occurred since the failing
2268                  * __swap_duplicate(): the swap entry has been freed,
2269                  * perhaps even the whole swap_map cleared for swapoff.
2270                  */
2271                 goto outer;
2272         }
2273
2274         offset = swp_offset(entry);
2275         count = si->swap_map[offset] & ~SWAP_HAS_CACHE;
2276
2277         if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
2278                 /*
2279                  * The higher the swap count, the more likely it is that tasks
2280                  * will race to add swap count continuation: we need to avoid
2281                  * over-provisioning.
2282                  */
2283                 goto out;
2284         }
2285
2286         if (!page) {
2287                 spin_unlock(&swap_lock);
2288                 return -ENOMEM;
2289         }
2290
2291         /*
2292          * We are fortunate that although vmalloc_to_page uses pte_offset_map,
2293          * no architecture is using highmem pages for kernel pagetables: so it
2294          * will not corrupt the GFP_ATOMIC caller's atomic pagetable kmaps.
2295          */
2296         head = vmalloc_to_page(si->swap_map + offset);
2297         offset &= ~PAGE_MASK;
2298
2299         /*
2300          * Page allocation does not initialize the page's lru field,
2301          * but it does always reset its private field.
2302          */
2303         if (!page_private(head)) {
2304                 BUG_ON(count & COUNT_CONTINUED);
2305                 INIT_LIST_HEAD(&head->lru);
2306                 set_page_private(head, SWP_CONTINUED);
2307                 si->flags |= SWP_CONTINUED;
2308         }
2309
2310         list_for_each_entry(list_page, &head->lru, lru) {
2311                 unsigned char *map;
2312
2313                 /*
2314                  * If the previous map said no continuation, but we've found
2315                  * a continuation page, free our allocation and use this one.
2316                  */
2317                 if (!(count & COUNT_CONTINUED))
2318                         goto out;
2319
2320                 map = kmap_atomic(list_page, KM_USER0) + offset;
2321                 count = *map;
2322                 kunmap_atomic(map, KM_USER0);
2323
2324                 /*
2325                  * If this continuation count now has some space in it,
2326                  * free our allocation and use this one.
2327                  */
2328                 if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
2329                         goto out;
2330         }
2331
2332         list_add_tail(&page->lru, &head->lru);
2333         page = NULL;                    /* now it's attached, don't free it */
2334 out:
2335         spin_unlock(&swap_lock);
2336 outer:
2337         if (page)
2338                 __free_page(page);
2339         return 0;
2340 }
2341
2342 /*
2343  * swap_count_continued - when the original swap_map count is incremented
2344  * from SWAP_MAP_MAX, check if there is already a continuation page to carry
2345  * into, carry if so, or else fail until a new continuation page is allocated;
2346  * when the original swap_map count is decremented from 0 with continuation,
2347  * borrow from the continuation and report whether it still holds more.
2348  * Called while __swap_duplicate() or swap_entry_free() holds swap_lock.
2349  */
2350 static bool swap_count_continued(struct swap_info_struct *si,
2351                                  pgoff_t offset, unsigned char count)
2352 {
2353         struct page *head;
2354         struct page *page;
2355         unsigned char *map;
2356
2357         head = vmalloc_to_page(si->swap_map + offset);
2358         if (page_private(head) != SWP_CONTINUED) {
2359                 BUG_ON(count & COUNT_CONTINUED);
2360                 return false;           /* need to add count continuation */
2361         }
2362
2363         offset &= ~PAGE_MASK;
2364         page = list_entry(head->lru.next, struct page, lru);
2365         map = kmap_atomic(page, KM_USER0) + offset;
2366
2367         if (count == SWAP_MAP_MAX)      /* initial increment from swap_map */
2368                 goto init_map;          /* jump over SWAP_CONT_MAX checks */
2369
2370         if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
2371                 /*
2372                  * Think of how you add 1 to 999
2373                  */
2374                 while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
2375                         kunmap_atomic(map, KM_USER0);
2376                         page = list_entry(page->lru.next, struct page, lru);
2377                         BUG_ON(page == head);
2378                         map = kmap_atomic(page, KM_USER0) + offset;
2379                 }
2380                 if (*map == SWAP_CONT_MAX) {
2381                         kunmap_atomic(map, KM_USER0);
2382                         page = list_entry(page->lru.next, struct page, lru);
2383                         if (page == head)
2384                                 return false;   /* add count continuation */
2385                         map = kmap_atomic(page, KM_USER0) + offset;
2386 init_map:               *map = 0;               /* we didn't zero the page */
2387                 }
2388                 *map += 1;
2389                 kunmap_atomic(map, KM_USER0);
2390                 page = list_entry(page->lru.prev, struct page, lru);
2391                 while (page != head) {
2392                         map = kmap_atomic(page, KM_USER0) + offset;
2393                         *map = COUNT_CONTINUED;
2394                         kunmap_atomic(map, KM_USER0);
2395                         page = list_entry(page->lru.prev, struct page, lru);
2396                 }
2397                 return true;                    /* incremented */
2398
2399         } else {                                /* decrementing */
2400                 /*
2401                  * Think of how you subtract 1 from 1000
2402                  */
2403                 BUG_ON(count != COUNT_CONTINUED);
2404                 while (*map == COUNT_CONTINUED) {
2405                         kunmap_atomic(map, KM_USER0);
2406                         page = list_entry(page->lru.next, struct page, lru);
2407                         BUG_ON(page == head);
2408                         map = kmap_atomic(page, KM_USER0) + offset;
2409                 }
2410                 BUG_ON(*map == 0);
2411                 *map -= 1;
2412                 if (*map == 0)
2413                         count = 0;
2414                 kunmap_atomic(map, KM_USER0);
2415                 page = list_entry(page->lru.prev, struct page, lru);
2416                 while (page != head) {
2417                         map = kmap_atomic(page, KM_USER0) + offset;
2418                         *map = SWAP_CONT_MAX | count;
2419                         count = COUNT_CONTINUED;
2420                         kunmap_atomic(map, KM_USER0);
2421                         page = list_entry(page->lru.prev, struct page, lru);
2422                 }
2423                 return count == COUNT_CONTINUED;
2424         }
2425 }
2426
2427 /*
2428  * free_swap_count_continuations - swapoff free all the continuation pages
2429  * appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
2430  */
2431 static void free_swap_count_continuations(struct swap_info_struct *si)
2432 {
2433         pgoff_t offset;
2434
2435         for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
2436                 struct page *head;
2437                 head = vmalloc_to_page(si->swap_map + offset);
2438                 if (page_private(head)) {
2439                         struct list_head *this, *next;
2440                         list_for_each_safe(this, next, &head->lru) {
2441                                 struct page *page;
2442                                 page = list_entry(this, struct page, lru);
2443                                 list_del(this);
2444                                 __free_page(page);
2445                         }
2446                 }
2447         }
2448 }