x86/PCI: truncate _CRS windows with _LEN > _MAX - _MIN + 1
[linux-2.6.git] / arch / s390 / mm / page-states.c
1 /*
2  * Copyright IBM Corp. 2008
3  *
4  * Guest page hinting for unused pages.
5  *
6  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7  */
8
9 #include <linux/kernel.h>
10 #include <linux/errno.h>
11 #include <linux/types.h>
12 #include <linux/mm.h>
13 #include <linux/init.h>
14
15 #define ESSA_SET_STABLE         1
16 #define ESSA_SET_UNUSED         2
17
18 static int cmma_flag = 1;
19
20 static int __init cmma(char *str)
21 {
22         char *parm;
23
24         parm = strstrip(str);
25         if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) {
26                 cmma_flag = 1;
27                 return 1;
28         }
29         cmma_flag = 0;
30         if (strcmp(parm, "no") == 0 || strcmp(parm, "off") == 0)
31                 return 1;
32         return 0;
33 }
34 __setup("cmma=", cmma);
35
36 void __init cmma_init(void)
37 {
38         register unsigned long tmp asm("0") = 0;
39         register int rc asm("1") = -EOPNOTSUPP;
40
41         if (!cmma_flag)
42                 return;
43         asm volatile(
44                 "       .insn rrf,0xb9ab0000,%1,%1,0,0\n"
45                 "0:     la      %0,0\n"
46                 "1:\n"
47                 EX_TABLE(0b,1b)
48                 : "+&d" (rc), "+&d" (tmp));
49         if (rc)
50                 cmma_flag = 0;
51 }
52
53 static inline void set_page_unstable(struct page *page, int order)
54 {
55         int i, rc;
56
57         for (i = 0; i < (1 << order); i++)
58                 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
59                              : "=&d" (rc)
60                              : "a" (page_to_phys(page + i)),
61                                "i" (ESSA_SET_UNUSED));
62 }
63
64 void arch_free_page(struct page *page, int order)
65 {
66         if (!cmma_flag)
67                 return;
68         set_page_unstable(page, order);
69 }
70
71 static inline void set_page_stable(struct page *page, int order)
72 {
73         int i, rc;
74
75         for (i = 0; i < (1 << order); i++)
76                 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
77                              : "=&d" (rc)
78                              : "a" (page_to_phys(page + i)),
79                                "i" (ESSA_SET_STABLE));
80 }
81
82 void arch_alloc_page(struct page *page, int order)
83 {
84         if (!cmma_flag)
85                 return;
86         set_page_stable(page, order);
87 }
88
89 void arch_set_page_states(int make_stable)
90 {
91         unsigned long flags, order, t;
92         struct list_head *l;
93         struct page *page;
94         struct zone *zone;
95
96         if (!cmma_flag)
97                 return;
98         if (make_stable)
99                 drain_local_pages(NULL);
100         for_each_populated_zone(zone) {
101                 spin_lock_irqsave(&zone->lock, flags);
102                 for_each_migratetype_order(order, t) {
103                         list_for_each(l, &zone->free_area[order].free_list[t]) {
104                                 page = list_entry(l, struct page, lru);
105                                 if (make_stable)
106                                         set_page_stable(page, order);
107                                 else
108                                         set_page_unstable(page, order);
109                         }
110                 }
111                 spin_unlock_irqrestore(&zone->lock, flags);
112         }
113 }