[PATCH] oprofile: Use vmalloc_node() in alloc_cpu_buffers()
[linux-2.6.git] / drivers / oprofile / event_buffer.c
1 /**
2  * @file event_buffer.c
3  *
4  * @remark Copyright 2002 OProfile authors
5  * @remark Read the file COPYING
6  *
7  * @author John Levon <levon@movementarian.org>
8  *
9  * This is the global event buffer that the user-space
10  * daemon reads from. The event buffer is an untyped array
11  * of unsigned longs. Entries are prefixed by the
12  * escape value ESCAPE_CODE followed by an identifying code.
13  */
14
15 #include <linux/vmalloc.h>
16 #include <linux/oprofile.h>
17 #include <linux/sched.h>
18 #include <linux/dcookies.h>
19 #include <linux/fs.h>
20 #include <asm/uaccess.h>
21  
22 #include "oprof.h"
23 #include "event_buffer.h"
24 #include "oprofile_stats.h"
25
26 DECLARE_MUTEX(buffer_sem);
27  
28 static unsigned long buffer_opened;
29 static DECLARE_WAIT_QUEUE_HEAD(buffer_wait);
30 static unsigned long * event_buffer;
31 static unsigned long buffer_size;
32 static unsigned long buffer_watershed;
33 static size_t buffer_pos;
34 /* atomic_t because wait_event checks it outside of buffer_sem */
35 static atomic_t buffer_ready = ATOMIC_INIT(0);
36
37 /* Add an entry to the event buffer. When we
38  * get near to the end we wake up the process
39  * sleeping on the read() of the file.
40  */
41 void add_event_entry(unsigned long value)
42 {
43         if (buffer_pos == buffer_size) {
44                 atomic_inc(&oprofile_stats.event_lost_overflow);
45                 return;
46         }
47
48         event_buffer[buffer_pos] = value;
49         if (++buffer_pos == buffer_size - buffer_watershed) {
50                 atomic_set(&buffer_ready, 1);
51                 wake_up(&buffer_wait);
52         }
53 }
54
55
56 /* Wake up the waiting process if any. This happens
57  * on "echo 0 >/dev/oprofile/enable" so the daemon
58  * processes the data remaining in the event buffer.
59  */
60 void wake_up_buffer_waiter(void)
61 {
62         down(&buffer_sem);
63         atomic_set(&buffer_ready, 1);
64         wake_up(&buffer_wait);
65         up(&buffer_sem);
66 }
67
68  
69 int alloc_event_buffer(void)
70 {
71         int err = -ENOMEM;
72
73         spin_lock(&oprofilefs_lock);
74         buffer_size = fs_buffer_size;
75         buffer_watershed = fs_buffer_watershed;
76         spin_unlock(&oprofilefs_lock);
77  
78         if (buffer_watershed >= buffer_size)
79                 return -EINVAL;
80  
81         event_buffer = vmalloc(sizeof(unsigned long) * buffer_size);
82         if (!event_buffer)
83                 goto out; 
84
85         err = 0;
86 out:
87         return err;
88 }
89
90
91 void free_event_buffer(void)
92 {
93         vfree(event_buffer);
94 }
95
96  
97 static int event_buffer_open(struct inode * inode, struct file * file)
98 {
99         int err = -EPERM;
100
101         if (!capable(CAP_SYS_ADMIN))
102                 return -EPERM;
103
104         if (test_and_set_bit(0, &buffer_opened))
105                 return -EBUSY;
106
107         /* Register as a user of dcookies
108          * to ensure they persist for the lifetime of
109          * the open event file
110          */
111         err = -EINVAL;
112         file->private_data = dcookie_register();
113         if (!file->private_data)
114                 goto out;
115                  
116         if ((err = oprofile_setup()))
117                 goto fail;
118
119         /* NB: the actual start happens from userspace
120          * echo 1 >/dev/oprofile/enable
121          */
122  
123         return 0;
124
125 fail:
126         dcookie_unregister(file->private_data);
127 out:
128         clear_bit(0, &buffer_opened);
129         return err;
130 }
131
132
133 static int event_buffer_release(struct inode * inode, struct file * file)
134 {
135         oprofile_stop();
136         oprofile_shutdown();
137         dcookie_unregister(file->private_data);
138         buffer_pos = 0;
139         atomic_set(&buffer_ready, 0);
140         clear_bit(0, &buffer_opened);
141         return 0;
142 }
143
144
145 static ssize_t event_buffer_read(struct file * file, char __user * buf,
146                                  size_t count, loff_t * offset)
147 {
148         int retval = -EINVAL;
149         size_t const max = buffer_size * sizeof(unsigned long);
150
151         /* handling partial reads is more trouble than it's worth */
152         if (count != max || *offset)
153                 return -EINVAL;
154
155         wait_event_interruptible(buffer_wait, atomic_read(&buffer_ready));
156
157         if (signal_pending(current))
158                 return -EINTR;
159
160         /* can't currently happen */
161         if (!atomic_read(&buffer_ready))
162                 return -EAGAIN;
163
164         down(&buffer_sem);
165
166         atomic_set(&buffer_ready, 0);
167
168         retval = -EFAULT;
169
170         count = buffer_pos * sizeof(unsigned long);
171  
172         if (copy_to_user(buf, event_buffer, count))
173                 goto out;
174
175         retval = count;
176         buffer_pos = 0;
177  
178 out:
179         up(&buffer_sem);
180         return retval;
181 }
182  
183 struct file_operations event_buffer_fops = {
184         .open           = event_buffer_open,
185         .release        = event_buffer_release,
186         .read           = event_buffer_read,
187 };