]> nv-tegra.nvidia Code Review - linux-2.6.git/blob - net/rfkill/input.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[linux-2.6.git] / net / rfkill / input.c
1 /*
2  * Input layer to RF Kill interface connector
3  *
4  * Copyright (c) 2007 Dmitry Torokhov
5  * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License version 2 as published
9  * by the Free Software Foundation.
10  *
11  * If you ever run into a situation in which you have a SW_ type rfkill
12  * input device, then you can revive code that was removed in the patch
13  * "rfkill-input: remove unused code".
14  */
15
16 #include <linux/input.h>
17 #include <linux/slab.h>
18 #include <linux/moduleparam.h>
19 #include <linux/workqueue.h>
20 #include <linux/init.h>
21 #include <linux/rfkill.h>
22 #include <linux/sched.h>
23
24 #include "rfkill.h"
25
26 enum rfkill_input_master_mode {
27         RFKILL_INPUT_MASTER_UNLOCK = 0,
28         RFKILL_INPUT_MASTER_RESTORE = 1,
29         RFKILL_INPUT_MASTER_UNBLOCKALL = 2,
30         NUM_RFKILL_INPUT_MASTER_MODES
31 };
32
33 /* Delay (in ms) between consecutive switch ops */
34 #define RFKILL_OPS_DELAY 200
35
36 static enum rfkill_input_master_mode rfkill_master_switch_mode =
37                                         RFKILL_INPUT_MASTER_UNBLOCKALL;
38 module_param_named(master_switch_mode, rfkill_master_switch_mode, uint, 0);
39 MODULE_PARM_DESC(master_switch_mode,
40         "SW_RFKILL_ALL ON should: 0=do nothing (only unlock); 1=restore; 2=unblock all");
41
42 static spinlock_t rfkill_op_lock;
43 static bool rfkill_op_pending;
44 static unsigned long rfkill_sw_pending[BITS_TO_LONGS(NUM_RFKILL_TYPES)];
45 static unsigned long rfkill_sw_state[BITS_TO_LONGS(NUM_RFKILL_TYPES)];
46
47 enum rfkill_sched_op {
48         RFKILL_GLOBAL_OP_EPO = 0,
49         RFKILL_GLOBAL_OP_RESTORE,
50         RFKILL_GLOBAL_OP_UNLOCK,
51         RFKILL_GLOBAL_OP_UNBLOCK,
52 };
53
54 static enum rfkill_sched_op rfkill_master_switch_op;
55 static enum rfkill_sched_op rfkill_op;
56
57 static void __rfkill_handle_global_op(enum rfkill_sched_op op)
58 {
59         unsigned int i;
60
61         switch (op) {
62         case RFKILL_GLOBAL_OP_EPO:
63                 rfkill_epo();
64                 break;
65         case RFKILL_GLOBAL_OP_RESTORE:
66                 rfkill_restore_states();
67                 break;
68         case RFKILL_GLOBAL_OP_UNLOCK:
69                 rfkill_remove_epo_lock();
70                 break;
71         case RFKILL_GLOBAL_OP_UNBLOCK:
72                 rfkill_remove_epo_lock();
73                 for (i = 0; i < NUM_RFKILL_TYPES; i++)
74                         rfkill_switch_all(i, false);
75                 break;
76         default:
77                 /* memory corruption or bug, fail safely */
78                 rfkill_epo();
79                 WARN(1, "Unknown requested operation %d! "
80                         "rfkill Emergency Power Off activated\n",
81                         op);
82         }
83 }
84
85 static void __rfkill_handle_normal_op(const enum rfkill_type type,
86                                       const bool complement)
87 {
88         bool blocked;
89
90         blocked = rfkill_get_global_sw_state(type);
91         if (complement)
92                 blocked = !blocked;
93
94         rfkill_switch_all(type, blocked);
95 }
96
97 static void rfkill_op_handler(struct work_struct *work)
98 {
99         unsigned int i;
100         bool c;
101
102         spin_lock_irq(&rfkill_op_lock);
103         do {
104                 if (rfkill_op_pending) {
105                         enum rfkill_sched_op op = rfkill_op;
106                         rfkill_op_pending = false;
107                         memset(rfkill_sw_pending, 0,
108                                 sizeof(rfkill_sw_pending));
109                         spin_unlock_irq(&rfkill_op_lock);
110
111                         __rfkill_handle_global_op(op);
112
113                         spin_lock_irq(&rfkill_op_lock);
114
115                         /*
116                          * handle global ops first -- during unlocked period
117                          * we might have gotten a new global op.
118                          */
119                         if (rfkill_op_pending)
120                                 continue;
121                 }
122
123                 if (rfkill_is_epo_lock_active())
124                         continue;
125
126                 for (i = 0; i < NUM_RFKILL_TYPES; i++) {
127                         if (__test_and_clear_bit(i, rfkill_sw_pending)) {
128                                 c = __test_and_clear_bit(i, rfkill_sw_state);
129                                 spin_unlock_irq(&rfkill_op_lock);
130
131                                 __rfkill_handle_normal_op(i, c);
132
133                                 spin_lock_irq(&rfkill_op_lock);
134                         }
135                 }
136         } while (rfkill_op_pending);
137         spin_unlock_irq(&rfkill_op_lock);
138 }
139
140 static DECLARE_DELAYED_WORK(rfkill_op_work, rfkill_op_handler);
141 static unsigned long rfkill_last_scheduled;
142
143 static unsigned long rfkill_ratelimit(const unsigned long last)
144 {
145         const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY);
146         return time_after(jiffies, last + delay) ? 0 : delay;
147 }
148
149 static void rfkill_schedule_ratelimited(void)
150 {
151         if (delayed_work_pending(&rfkill_op_work))
152                 return;
153         schedule_delayed_work(&rfkill_op_work,
154                               rfkill_ratelimit(rfkill_last_scheduled));
155         rfkill_last_scheduled = jiffies;
156 }
157
158 static void rfkill_schedule_global_op(enum rfkill_sched_op op)
159 {
160         unsigned long flags;
161
162         spin_lock_irqsave(&rfkill_op_lock, flags);
163         rfkill_op = op;
164         rfkill_op_pending = true;
165         if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) {
166                 /* bypass the limiter for EPO */
167                 cancel_delayed_work(&rfkill_op_work);
168                 schedule_delayed_work(&rfkill_op_work, 0);
169                 rfkill_last_scheduled = jiffies;
170         } else
171                 rfkill_schedule_ratelimited();
172         spin_unlock_irqrestore(&rfkill_op_lock, flags);
173 }
174
175 static void rfkill_schedule_toggle(enum rfkill_type type)
176 {
177         unsigned long flags;
178
179         if (rfkill_is_epo_lock_active())
180                 return;
181
182         spin_lock_irqsave(&rfkill_op_lock, flags);
183         if (!rfkill_op_pending) {
184                 __set_bit(type, rfkill_sw_pending);
185                 __change_bit(type, rfkill_sw_state);
186                 rfkill_schedule_ratelimited();
187         }
188         spin_unlock_irqrestore(&rfkill_op_lock, flags);
189 }
190
191 static void rfkill_schedule_evsw_rfkillall(int state)
192 {
193         if (state)
194                 rfkill_schedule_global_op(rfkill_master_switch_op);
195         else
196                 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO);
197 }
198
199 static void rfkill_event(struct input_handle *handle, unsigned int type,
200                         unsigned int code, int data)
201 {
202         if (type == EV_KEY && data == 1) {
203                 switch (code) {
204                 case KEY_WLAN:
205                         rfkill_schedule_toggle(RFKILL_TYPE_WLAN);
206                         break;
207                 case KEY_BLUETOOTH:
208                         rfkill_schedule_toggle(RFKILL_TYPE_BLUETOOTH);
209                         break;
210                 case KEY_UWB:
211                         rfkill_schedule_toggle(RFKILL_TYPE_UWB);
212                         break;
213                 case KEY_WIMAX:
214                         rfkill_schedule_toggle(RFKILL_TYPE_WIMAX);
215                         break;
216                 case KEY_RFKILL:
217                         rfkill_schedule_toggle(RFKILL_TYPE_ALL);
218                         break;
219                 }
220         } else if (type == EV_SW && code == SW_RFKILL_ALL)
221                 rfkill_schedule_evsw_rfkillall(data);
222 }
223
224 static int rfkill_connect(struct input_handler *handler, struct input_dev *dev,
225                           const struct input_device_id *id)
226 {
227         struct input_handle *handle;
228         int error;
229
230         handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
231         if (!handle)
232                 return -ENOMEM;
233
234         handle->dev = dev;
235         handle->handler = handler;
236         handle->name = "rfkill";
237
238         /* causes rfkill_start() to be called */
239         error = input_register_handle(handle);
240         if (error)
241                 goto err_free_handle;
242
243         error = input_open_device(handle);
244         if (error)
245                 goto err_unregister_handle;
246
247         return 0;
248
249  err_unregister_handle:
250         input_unregister_handle(handle);
251  err_free_handle:
252         kfree(handle);
253         return error;
254 }
255
256 static void rfkill_start(struct input_handle *handle)
257 {
258         /*
259          * Take event_lock to guard against configuration changes, we
260          * should be able to deal with concurrency with rfkill_event()
261          * just fine (which event_lock will also avoid).
262          */
263         spin_lock_irq(&handle->dev->event_lock);
264
265         if (test_bit(EV_SW, handle->dev->evbit) &&
266             test_bit(SW_RFKILL_ALL, handle->dev->swbit))
267                 rfkill_schedule_evsw_rfkillall(test_bit(SW_RFKILL_ALL,
268                                                         handle->dev->sw));
269
270         spin_unlock_irq(&handle->dev->event_lock);
271 }
272
273 static void rfkill_disconnect(struct input_handle *handle)
274 {
275         input_close_device(handle);
276         input_unregister_handle(handle);
277         kfree(handle);
278 }
279
280 static const struct input_device_id rfkill_ids[] = {
281         {
282                 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
283                 .evbit = { BIT_MASK(EV_KEY) },
284                 .keybit = { [BIT_WORD(KEY_WLAN)] = BIT_MASK(KEY_WLAN) },
285         },
286         {
287                 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
288                 .evbit = { BIT_MASK(EV_KEY) },
289                 .keybit = { [BIT_WORD(KEY_BLUETOOTH)] = BIT_MASK(KEY_BLUETOOTH) },
290         },
291         {
292                 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
293                 .evbit = { BIT_MASK(EV_KEY) },
294                 .keybit = { [BIT_WORD(KEY_UWB)] = BIT_MASK(KEY_UWB) },
295         },
296         {
297                 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
298                 .evbit = { BIT_MASK(EV_KEY) },
299                 .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) },
300         },
301         {
302                 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
303                 .evbit = { BIT_MASK(EV_KEY) },
304                 .keybit = { [BIT_WORD(KEY_RFKILL)] = BIT_MASK(KEY_RFKILL) },
305         },
306         {
307                 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT,
308                 .evbit = { BIT(EV_SW) },
309                 .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) },
310         },
311         { }
312 };
313
314 static struct input_handler rfkill_handler = {
315         .name = "rfkill",
316         .event = rfkill_event,
317         .connect = rfkill_connect,
318         .start = rfkill_start,
319         .disconnect = rfkill_disconnect,
320         .id_table = rfkill_ids,
321 };
322
323 int __init rfkill_handler_init(void)
324 {
325         switch (rfkill_master_switch_mode) {
326         case RFKILL_INPUT_MASTER_UNBLOCKALL:
327                 rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNBLOCK;
328                 break;
329         case RFKILL_INPUT_MASTER_RESTORE:
330                 rfkill_master_switch_op = RFKILL_GLOBAL_OP_RESTORE;
331                 break;
332         case RFKILL_INPUT_MASTER_UNLOCK:
333                 rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNLOCK;
334                 break;
335         default:
336                 return -EINVAL;
337         }
338
339         spin_lock_init(&rfkill_op_lock);
340
341         /* Avoid delay at first schedule */
342         rfkill_last_scheduled =
343                         jiffies - msecs_to_jiffies(RFKILL_OPS_DELAY) - 1;
344         return input_register_handler(&rfkill_handler);
345 }
346
347 void __exit rfkill_handler_exit(void)
348 {
349         input_unregister_handler(&rfkill_handler);
350         cancel_delayed_work_sync(&rfkill_op_work);
351 }