]> nv-tegra.nvidia Code Review - linux-2.6.git/blob - net/rfkill/input.c
rfkill: don't restore software blocked state on persistent devices
[linux-2.6.git] / net / rfkill / input.c
1 /*
2  * Input layer to RF Kill interface connector
3  *
4  * Copyright (c) 2007 Dmitry Torokhov
5  * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License version 2 as published
9  * by the Free Software Foundation.
10  *
11  * If you ever run into a situation in which you have a SW_ type rfkill
12  * input device, then you can revive code that was removed in the patch
13  * "rfkill-input: remove unused code".
14  */
15
16 #include <linux/input.h>
17 #include <linux/slab.h>
18 #include <linux/workqueue.h>
19 #include <linux/init.h>
20 #include <linux/rfkill.h>
21 #include <linux/sched.h>
22
23 #include "rfkill.h"
24
25 enum rfkill_input_master_mode {
26         RFKILL_INPUT_MASTER_UNLOCK = 0,
27         RFKILL_INPUT_MASTER_RESTORE = 1,
28         RFKILL_INPUT_MASTER_UNBLOCKALL = 2,
29         NUM_RFKILL_INPUT_MASTER_MODES
30 };
31
32 /* Delay (in ms) between consecutive switch ops */
33 #define RFKILL_OPS_DELAY 200
34
35 static enum rfkill_input_master_mode rfkill_master_switch_mode =
36                                         RFKILL_INPUT_MASTER_UNBLOCKALL;
37 module_param_named(master_switch_mode, rfkill_master_switch_mode, uint, 0);
38 MODULE_PARM_DESC(master_switch_mode,
39         "SW_RFKILL_ALL ON should: 0=do nothing (only unlock); 1=restore; 2=unblock all");
40
41 static spinlock_t rfkill_op_lock;
42 static bool rfkill_op_pending;
43 static unsigned long rfkill_sw_pending[BITS_TO_LONGS(NUM_RFKILL_TYPES)];
44 static unsigned long rfkill_sw_state[BITS_TO_LONGS(NUM_RFKILL_TYPES)];
45
46 enum rfkill_sched_op {
47         RFKILL_GLOBAL_OP_EPO = 0,
48         RFKILL_GLOBAL_OP_RESTORE,
49         RFKILL_GLOBAL_OP_UNLOCK,
50         RFKILL_GLOBAL_OP_UNBLOCK,
51 };
52
53 static enum rfkill_sched_op rfkill_master_switch_op;
54 static enum rfkill_sched_op rfkill_op;
55
56 static void __rfkill_handle_global_op(enum rfkill_sched_op op)
57 {
58         unsigned int i;
59
60         switch (op) {
61         case RFKILL_GLOBAL_OP_EPO:
62                 rfkill_epo();
63                 break;
64         case RFKILL_GLOBAL_OP_RESTORE:
65                 rfkill_restore_states();
66                 break;
67         case RFKILL_GLOBAL_OP_UNLOCK:
68                 rfkill_remove_epo_lock();
69                 break;
70         case RFKILL_GLOBAL_OP_UNBLOCK:
71                 rfkill_remove_epo_lock();
72                 for (i = 0; i < NUM_RFKILL_TYPES; i++)
73                         rfkill_switch_all(i, false);
74                 break;
75         default:
76                 /* memory corruption or bug, fail safely */
77                 rfkill_epo();
78                 WARN(1, "Unknown requested operation %d! "
79                         "rfkill Emergency Power Off activated\n",
80                         op);
81         }
82 }
83
84 static void __rfkill_handle_normal_op(const enum rfkill_type type,
85                                       const bool complement)
86 {
87         bool blocked;
88
89         blocked = rfkill_get_global_sw_state(type);
90         if (complement)
91                 blocked = !blocked;
92
93         rfkill_switch_all(type, blocked);
94 }
95
96 static void rfkill_op_handler(struct work_struct *work)
97 {
98         unsigned int i;
99         bool c;
100
101         spin_lock_irq(&rfkill_op_lock);
102         do {
103                 if (rfkill_op_pending) {
104                         enum rfkill_sched_op op = rfkill_op;
105                         rfkill_op_pending = false;
106                         memset(rfkill_sw_pending, 0,
107                                 sizeof(rfkill_sw_pending));
108                         spin_unlock_irq(&rfkill_op_lock);
109
110                         __rfkill_handle_global_op(op);
111
112                         spin_lock_irq(&rfkill_op_lock);
113
114                         /*
115                          * handle global ops first -- during unlocked period
116                          * we might have gotten a new global op.
117                          */
118                         if (rfkill_op_pending)
119                                 continue;
120                 }
121
122                 if (rfkill_is_epo_lock_active())
123                         continue;
124
125                 for (i = 0; i < NUM_RFKILL_TYPES; i++) {
126                         if (__test_and_clear_bit(i, rfkill_sw_pending)) {
127                                 c = __test_and_clear_bit(i, rfkill_sw_state);
128                                 spin_unlock_irq(&rfkill_op_lock);
129
130                                 __rfkill_handle_normal_op(i, c);
131
132                                 spin_lock_irq(&rfkill_op_lock);
133                         }
134                 }
135         } while (rfkill_op_pending);
136         spin_unlock_irq(&rfkill_op_lock);
137 }
138
139 static DECLARE_DELAYED_WORK(rfkill_op_work, rfkill_op_handler);
140 static unsigned long rfkill_last_scheduled;
141
142 static unsigned long rfkill_ratelimit(const unsigned long last)
143 {
144         const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY);
145         return (time_after(jiffies, last + delay)) ? 0 : delay;
146 }
147
148 static void rfkill_schedule_ratelimited(void)
149 {
150         if (delayed_work_pending(&rfkill_op_work))
151                 return;
152         schedule_delayed_work(&rfkill_op_work,
153                               rfkill_ratelimit(rfkill_last_scheduled));
154         rfkill_last_scheduled = jiffies;
155 }
156
157 static void rfkill_schedule_global_op(enum rfkill_sched_op op)
158 {
159         unsigned long flags;
160
161         spin_lock_irqsave(&rfkill_op_lock, flags);
162         rfkill_op = op;
163         rfkill_op_pending = true;
164         if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) {
165                 /* bypass the limiter for EPO */
166                 cancel_delayed_work(&rfkill_op_work);
167                 schedule_delayed_work(&rfkill_op_work, 0);
168                 rfkill_last_scheduled = jiffies;
169         } else
170                 rfkill_schedule_ratelimited();
171         spin_unlock_irqrestore(&rfkill_op_lock, flags);
172 }
173
174 static void rfkill_schedule_toggle(enum rfkill_type type)
175 {
176         unsigned long flags;
177
178         if (rfkill_is_epo_lock_active())
179                 return;
180
181         spin_lock_irqsave(&rfkill_op_lock, flags);
182         if (!rfkill_op_pending) {
183                 __set_bit(type, rfkill_sw_pending);
184                 __change_bit(type, rfkill_sw_state);
185                 rfkill_schedule_ratelimited();
186         }
187         spin_unlock_irqrestore(&rfkill_op_lock, flags);
188 }
189
190 static void rfkill_schedule_evsw_rfkillall(int state)
191 {
192         if (state)
193                 rfkill_schedule_global_op(rfkill_master_switch_op);
194         else
195                 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO);
196 }
197
198 static void rfkill_event(struct input_handle *handle, unsigned int type,
199                         unsigned int code, int data)
200 {
201         if (type == EV_KEY && data == 1) {
202                 switch (code) {
203                 case KEY_WLAN:
204                         rfkill_schedule_toggle(RFKILL_TYPE_WLAN);
205                         break;
206                 case KEY_BLUETOOTH:
207                         rfkill_schedule_toggle(RFKILL_TYPE_BLUETOOTH);
208                         break;
209                 case KEY_UWB:
210                         rfkill_schedule_toggle(RFKILL_TYPE_UWB);
211                         break;
212                 case KEY_WIMAX:
213                         rfkill_schedule_toggle(RFKILL_TYPE_WIMAX);
214                         break;
215                 }
216         } else if (type == EV_SW && code == SW_RFKILL_ALL)
217                 rfkill_schedule_evsw_rfkillall(data);
218 }
219
220 static int rfkill_connect(struct input_handler *handler, struct input_dev *dev,
221                           const struct input_device_id *id)
222 {
223         struct input_handle *handle;
224         int error;
225
226         handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
227         if (!handle)
228                 return -ENOMEM;
229
230         handle->dev = dev;
231         handle->handler = handler;
232         handle->name = "rfkill";
233
234         /* causes rfkill_start() to be called */
235         error = input_register_handle(handle);
236         if (error)
237                 goto err_free_handle;
238
239         error = input_open_device(handle);
240         if (error)
241                 goto err_unregister_handle;
242
243         return 0;
244
245  err_unregister_handle:
246         input_unregister_handle(handle);
247  err_free_handle:
248         kfree(handle);
249         return error;
250 }
251
252 static void rfkill_start(struct input_handle *handle)
253 {
254         /*
255          * Take event_lock to guard against configuration changes, we
256          * should be able to deal with concurrency with rfkill_event()
257          * just fine (which event_lock will also avoid).
258          */
259         spin_lock_irq(&handle->dev->event_lock);
260
261         if (test_bit(EV_SW, handle->dev->evbit) &&
262             test_bit(SW_RFKILL_ALL, handle->dev->swbit))
263                 rfkill_schedule_evsw_rfkillall(test_bit(SW_RFKILL_ALL,
264                                                         handle->dev->sw));
265
266         spin_unlock_irq(&handle->dev->event_lock);
267 }
268
269 static void rfkill_disconnect(struct input_handle *handle)
270 {
271         input_close_device(handle);
272         input_unregister_handle(handle);
273         kfree(handle);
274 }
275
276 static const struct input_device_id rfkill_ids[] = {
277         {
278                 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
279                 .evbit = { BIT_MASK(EV_KEY) },
280                 .keybit = { [BIT_WORD(KEY_WLAN)] = BIT_MASK(KEY_WLAN) },
281         },
282         {
283                 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
284                 .evbit = { BIT_MASK(EV_KEY) },
285                 .keybit = { [BIT_WORD(KEY_BLUETOOTH)] = BIT_MASK(KEY_BLUETOOTH) },
286         },
287         {
288                 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
289                 .evbit = { BIT_MASK(EV_KEY) },
290                 .keybit = { [BIT_WORD(KEY_UWB)] = BIT_MASK(KEY_UWB) },
291         },
292         {
293                 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
294                 .evbit = { BIT_MASK(EV_KEY) },
295                 .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) },
296         },
297         {
298                 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT,
299                 .evbit = { BIT(EV_SW) },
300                 .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) },
301         },
302         { }
303 };
304
305 static struct input_handler rfkill_handler = {
306         .name = "rfkill",
307         .event = rfkill_event,
308         .connect = rfkill_connect,
309         .start = rfkill_start,
310         .disconnect = rfkill_disconnect,
311         .id_table = rfkill_ids,
312 };
313
314 int __init rfkill_handler_init(void)
315 {
316         switch (rfkill_master_switch_mode) {
317         case RFKILL_INPUT_MASTER_UNBLOCKALL:
318                 rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNBLOCK;
319                 break;
320         case RFKILL_INPUT_MASTER_RESTORE:
321                 rfkill_master_switch_op = RFKILL_GLOBAL_OP_RESTORE;
322                 break;
323         case RFKILL_INPUT_MASTER_UNLOCK:
324                 rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNLOCK;
325                 break;
326         default:
327                 return -EINVAL;
328         }
329
330         spin_lock_init(&rfkill_op_lock);
331
332         /* Avoid delay at first schedule */
333         rfkill_last_scheduled =
334                         jiffies - msecs_to_jiffies(RFKILL_OPS_DELAY) - 1;
335         return input_register_handler(&rfkill_handler);
336 }
337
338 void __exit rfkill_handler_exit(void)
339 {
340         input_unregister_handler(&rfkill_handler);
341         cancel_delayed_work_sync(&rfkill_op_work);
342 }