block: split tag and sysfs handling from blk-core.c
[linux-2.6.git] / block / blk-sysfs.c
1 /*
2  * Functions related to sysfs handling
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <linux/blktrace_api.h>
9
10 #include "blk.h"
11
12 struct queue_sysfs_entry {
13         struct attribute attr;
14         ssize_t (*show)(struct request_queue *, char *);
15         ssize_t (*store)(struct request_queue *, const char *, size_t);
16 };
17
18 static ssize_t
19 queue_var_show(unsigned int var, char *page)
20 {
21         return sprintf(page, "%d\n", var);
22 }
23
24 static ssize_t
25 queue_var_store(unsigned long *var, const char *page, size_t count)
26 {
27         char *p = (char *) page;
28
29         *var = simple_strtoul(p, &p, 10);
30         return count;
31 }
32
33 static ssize_t queue_requests_show(struct request_queue *q, char *page)
34 {
35         return queue_var_show(q->nr_requests, (page));
36 }
37
38 static ssize_t
39 queue_requests_store(struct request_queue *q, const char *page, size_t count)
40 {
41         struct request_list *rl = &q->rq;
42         unsigned long nr;
43         int ret = queue_var_store(&nr, page, count);
44         if (nr < BLKDEV_MIN_RQ)
45                 nr = BLKDEV_MIN_RQ;
46
47         spin_lock_irq(q->queue_lock);
48         q->nr_requests = nr;
49         blk_queue_congestion_threshold(q);
50
51         if (rl->count[READ] >= queue_congestion_on_threshold(q))
52                 blk_set_queue_congested(q, READ);
53         else if (rl->count[READ] < queue_congestion_off_threshold(q))
54                 blk_clear_queue_congested(q, READ);
55
56         if (rl->count[WRITE] >= queue_congestion_on_threshold(q))
57                 blk_set_queue_congested(q, WRITE);
58         else if (rl->count[WRITE] < queue_congestion_off_threshold(q))
59                 blk_clear_queue_congested(q, WRITE);
60
61         if (rl->count[READ] >= q->nr_requests) {
62                 blk_set_queue_full(q, READ);
63         } else if (rl->count[READ]+1 <= q->nr_requests) {
64                 blk_clear_queue_full(q, READ);
65                 wake_up(&rl->wait[READ]);
66         }
67
68         if (rl->count[WRITE] >= q->nr_requests) {
69                 blk_set_queue_full(q, WRITE);
70         } else if (rl->count[WRITE]+1 <= q->nr_requests) {
71                 blk_clear_queue_full(q, WRITE);
72                 wake_up(&rl->wait[WRITE]);
73         }
74         spin_unlock_irq(q->queue_lock);
75         return ret;
76 }
77
78 static ssize_t queue_ra_show(struct request_queue *q, char *page)
79 {
80         int ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
81
82         return queue_var_show(ra_kb, (page));
83 }
84
85 static ssize_t
86 queue_ra_store(struct request_queue *q, const char *page, size_t count)
87 {
88         unsigned long ra_kb;
89         ssize_t ret = queue_var_store(&ra_kb, page, count);
90
91         spin_lock_irq(q->queue_lock);
92         q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
93         spin_unlock_irq(q->queue_lock);
94
95         return ret;
96 }
97
98 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
99 {
100         int max_sectors_kb = q->max_sectors >> 1;
101
102         return queue_var_show(max_sectors_kb, (page));
103 }
104
105 static ssize_t
106 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
107 {
108         unsigned long max_sectors_kb,
109                         max_hw_sectors_kb = q->max_hw_sectors >> 1,
110                         page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
111         ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
112
113         if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
114                 return -EINVAL;
115         /*
116          * Take the queue lock to update the readahead and max_sectors
117          * values synchronously:
118          */
119         spin_lock_irq(q->queue_lock);
120         q->max_sectors = max_sectors_kb << 1;
121         spin_unlock_irq(q->queue_lock);
122
123         return ret;
124 }
125
126 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
127 {
128         int max_hw_sectors_kb = q->max_hw_sectors >> 1;
129
130         return queue_var_show(max_hw_sectors_kb, (page));
131 }
132
133
134 static struct queue_sysfs_entry queue_requests_entry = {
135         .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
136         .show = queue_requests_show,
137         .store = queue_requests_store,
138 };
139
140 static struct queue_sysfs_entry queue_ra_entry = {
141         .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
142         .show = queue_ra_show,
143         .store = queue_ra_store,
144 };
145
146 static struct queue_sysfs_entry queue_max_sectors_entry = {
147         .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
148         .show = queue_max_sectors_show,
149         .store = queue_max_sectors_store,
150 };
151
152 static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
153         .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
154         .show = queue_max_hw_sectors_show,
155 };
156
157 static struct queue_sysfs_entry queue_iosched_entry = {
158         .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
159         .show = elv_iosched_show,
160         .store = elv_iosched_store,
161 };
162
163 static struct attribute *default_attrs[] = {
164         &queue_requests_entry.attr,
165         &queue_ra_entry.attr,
166         &queue_max_hw_sectors_entry.attr,
167         &queue_max_sectors_entry.attr,
168         &queue_iosched_entry.attr,
169         NULL,
170 };
171
172 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
173
174 static ssize_t
175 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
176 {
177         struct queue_sysfs_entry *entry = to_queue(attr);
178         struct request_queue *q =
179                 container_of(kobj, struct request_queue, kobj);
180         ssize_t res;
181
182         if (!entry->show)
183                 return -EIO;
184         mutex_lock(&q->sysfs_lock);
185         if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
186                 mutex_unlock(&q->sysfs_lock);
187                 return -ENOENT;
188         }
189         res = entry->show(q, page);
190         mutex_unlock(&q->sysfs_lock);
191         return res;
192 }
193
194 static ssize_t
195 queue_attr_store(struct kobject *kobj, struct attribute *attr,
196                     const char *page, size_t length)
197 {
198         struct queue_sysfs_entry *entry = to_queue(attr);
199         struct request_queue *q = container_of(kobj, struct request_queue, kobj);
200
201         ssize_t res;
202
203         if (!entry->store)
204                 return -EIO;
205         mutex_lock(&q->sysfs_lock);
206         if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
207                 mutex_unlock(&q->sysfs_lock);
208                 return -ENOENT;
209         }
210         res = entry->store(q, page, length);
211         mutex_unlock(&q->sysfs_lock);
212         return res;
213 }
214
215 /**
216  * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed
217  * @kobj:    the kobj belonging of the request queue to be released
218  *
219  * Description:
220  *     blk_cleanup_queue is the pair to blk_init_queue() or
221  *     blk_queue_make_request().  It should be called when a request queue is
222  *     being released; typically when a block device is being de-registered.
223  *     Currently, its primary task it to free all the &struct request
224  *     structures that were allocated to the queue and the queue itself.
225  *
226  * Caveat:
227  *     Hopefully the low level driver will have finished any
228  *     outstanding requests first...
229  **/
230 static void blk_release_queue(struct kobject *kobj)
231 {
232         struct request_queue *q =
233                 container_of(kobj, struct request_queue, kobj);
234         struct request_list *rl = &q->rq;
235
236         blk_sync_queue(q);
237
238         if (rl->rq_pool)
239                 mempool_destroy(rl->rq_pool);
240
241         if (q->queue_tags)
242                 __blk_queue_free_tags(q);
243
244         blk_trace_shutdown(q);
245
246         bdi_destroy(&q->backing_dev_info);
247         kmem_cache_free(blk_requestq_cachep, q);
248 }
249
250 static struct sysfs_ops queue_sysfs_ops = {
251         .show   = queue_attr_show,
252         .store  = queue_attr_store,
253 };
254
255 struct kobj_type blk_queue_ktype = {
256         .sysfs_ops      = &queue_sysfs_ops,
257         .default_attrs  = default_attrs,
258         .release        = blk_release_queue,
259 };
260
261 int blk_register_queue(struct gendisk *disk)
262 {
263         int ret;
264
265         struct request_queue *q = disk->queue;
266
267         if (!q || !q->request_fn)
268                 return -ENXIO;
269
270         ret = kobject_add(&q->kobj, kobject_get(&disk->dev.kobj),
271                           "%s", "queue");
272         if (ret < 0)
273                 return ret;
274
275         kobject_uevent(&q->kobj, KOBJ_ADD);
276
277         ret = elv_register_queue(q);
278         if (ret) {
279                 kobject_uevent(&q->kobj, KOBJ_REMOVE);
280                 kobject_del(&q->kobj);
281                 return ret;
282         }
283
284         return 0;
285 }
286
287 void blk_unregister_queue(struct gendisk *disk)
288 {
289         struct request_queue *q = disk->queue;
290
291         if (q && q->request_fn) {
292                 elv_unregister_queue(q);
293
294                 kobject_uevent(&q->kobj, KOBJ_REMOVE);
295                 kobject_del(&q->kobj);
296                 kobject_put(&disk->dev.kobj);
297         }
298 }