rfkill-input.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459
  1. /*
  2. * Input layer to RF Kill interface connector
  3. *
  4. * Copyright (c) 2007 Dmitry Torokhov
  5. */
  6. /*
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License version 2 as published
  9. * by the Free Software Foundation.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/input.h>
  13. #include <linux/slab.h>
  14. #include <linux/workqueue.h>
  15. #include <linux/init.h>
  16. #include <linux/rfkill.h>
  17. #include <linux/sched.h>
  18. #include "rfkill-input.h"
  19. MODULE_AUTHOR("Dmitry Torokhov <dtor@mail.ru>");
  20. MODULE_DESCRIPTION("Input layer to RF switch connector");
  21. MODULE_LICENSE("GPL");
  22. enum rfkill_input_master_mode {
  23. RFKILL_INPUT_MASTER_DONOTHING = 0,
  24. RFKILL_INPUT_MASTER_RESTORE = 1,
  25. RFKILL_INPUT_MASTER_UNBLOCKALL = 2,
  26. RFKILL_INPUT_MASTER_MAX, /* marker */
  27. };
  28. /* Delay (in ms) between consecutive switch ops */
  29. #define RFKILL_OPS_DELAY 200
  30. static enum rfkill_input_master_mode rfkill_master_switch_mode =
  31. RFKILL_INPUT_MASTER_UNBLOCKALL;
  32. module_param_named(master_switch_mode, rfkill_master_switch_mode, uint, 0);
  33. MODULE_PARM_DESC(master_switch_mode,
  34. "SW_RFKILL_ALL ON should: 0=do nothing; 1=restore; 2=unblock all");
  35. enum rfkill_global_sched_op {
  36. RFKILL_GLOBAL_OP_EPO = 0,
  37. RFKILL_GLOBAL_OP_RESTORE,
  38. RFKILL_GLOBAL_OP_UNLOCK,
  39. RFKILL_GLOBAL_OP_UNBLOCK,
  40. };
  41. /*
  42. * Currently, the code marked with RFKILL_NEED_SWSET is inactive.
  43. * If handling of EV_SW SW_WLAN/WWAN/BLUETOOTH/etc is needed in the
  44. * future, when such events are added, that code will be necessary.
  45. */
  46. struct rfkill_task {
  47. struct delayed_work dwork;
  48. /* ensures that task is serialized */
  49. struct mutex mutex;
  50. /* protects everything below */
  51. spinlock_t lock;
  52. /* pending regular switch operations (1=pending) */
  53. unsigned long sw_pending[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
  54. #ifdef RFKILL_NEED_SWSET
  55. /* set operation pending (1=pending) */
  56. unsigned long sw_setpending[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
  57. /* desired state for pending set operation (1=unblock) */
  58. unsigned long sw_newstate[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
  59. #endif
  60. /* should the state be complemented (1=yes) */
  61. unsigned long sw_togglestate[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
  62. bool global_op_pending;
  63. enum rfkill_global_sched_op op;
  64. /* last time it was scheduled */
  65. unsigned long last_scheduled;
  66. };
  67. static void __rfkill_handle_global_op(enum rfkill_global_sched_op op)
  68. {
  69. unsigned int i;
  70. switch (op) {
  71. case RFKILL_GLOBAL_OP_EPO:
  72. rfkill_epo();
  73. break;
  74. case RFKILL_GLOBAL_OP_RESTORE:
  75. rfkill_restore_states();
  76. break;
  77. case RFKILL_GLOBAL_OP_UNLOCK:
  78. rfkill_remove_epo_lock();
  79. break;
  80. case RFKILL_GLOBAL_OP_UNBLOCK:
  81. rfkill_remove_epo_lock();
  82. for (i = 0; i < RFKILL_TYPE_MAX; i++)
  83. rfkill_switch_all(i, RFKILL_STATE_UNBLOCKED);
  84. break;
  85. default:
  86. /* memory corruption or bug, fail safely */
  87. rfkill_epo();
  88. WARN(1, "Unknown requested operation %d! "
  89. "rfkill Emergency Power Off activated\n",
  90. op);
  91. }
  92. }
  93. #ifdef RFKILL_NEED_SWSET
  94. static void __rfkill_handle_normal_op(const enum rfkill_type type,
  95. const bool sp, const bool s, const bool c)
  96. {
  97. enum rfkill_state state;
  98. if (sp)
  99. state = (s) ? RFKILL_STATE_UNBLOCKED :
  100. RFKILL_STATE_SOFT_BLOCKED;
  101. else
  102. state = rfkill_get_global_state(type);
  103. if (c)
  104. state = rfkill_state_complement(state);
  105. rfkill_switch_all(type, state);
  106. }
  107. #else
  108. static void __rfkill_handle_normal_op(const enum rfkill_type type,
  109. const bool c)
  110. {
  111. enum rfkill_state state;
  112. state = rfkill_get_global_state(type);
  113. if (c)
  114. state = rfkill_state_complement(state);
  115. rfkill_switch_all(type, state);
  116. }
  117. #endif
  118. static void rfkill_task_handler(struct work_struct *work)
  119. {
  120. struct rfkill_task *task = container_of(work,
  121. struct rfkill_task, dwork.work);
  122. bool doit = true;
  123. mutex_lock(&task->mutex);
  124. spin_lock_irq(&task->lock);
  125. while (doit) {
  126. if (task->global_op_pending) {
  127. enum rfkill_global_sched_op op = task->op;
  128. task->global_op_pending = false;
  129. memset(task->sw_pending, 0, sizeof(task->sw_pending));
  130. spin_unlock_irq(&task->lock);
  131. __rfkill_handle_global_op(op);
  132. /* make sure we do at least one pass with
  133. * !task->global_op_pending */
  134. spin_lock_irq(&task->lock);
  135. continue;
  136. } else if (!rfkill_is_epo_lock_active()) {
  137. unsigned int i = 0;
  138. while (!task->global_op_pending &&
  139. i < RFKILL_TYPE_MAX) {
  140. if (test_and_clear_bit(i, task->sw_pending)) {
  141. bool c;
  142. #ifdef RFKILL_NEED_SWSET
  143. bool sp, s;
  144. sp = test_and_clear_bit(i,
  145. task->sw_setpending);
  146. s = test_bit(i, task->sw_newstate);
  147. #endif
  148. c = test_and_clear_bit(i,
  149. task->sw_togglestate);
  150. spin_unlock_irq(&task->lock);
  151. #ifdef RFKILL_NEED_SWSET
  152. __rfkill_handle_normal_op(i, sp, s, c);
  153. #else
  154. __rfkill_handle_normal_op(i, c);
  155. #endif
  156. spin_lock_irq(&task->lock);
  157. }
  158. i++;
  159. }
  160. }
  161. doit = task->global_op_pending;
  162. }
  163. spin_unlock_irq(&task->lock);
  164. mutex_unlock(&task->mutex);
  165. }
  166. static struct rfkill_task rfkill_task = {
  167. .dwork = __DELAYED_WORK_INITIALIZER(rfkill_task.dwork,
  168. rfkill_task_handler),
  169. .mutex = __MUTEX_INITIALIZER(rfkill_task.mutex),
  170. .lock = __SPIN_LOCK_UNLOCKED(rfkill_task.lock),
  171. };
  172. static unsigned long rfkill_ratelimit(const unsigned long last)
  173. {
  174. const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY);
  175. return (time_after(jiffies, last + delay)) ? 0 : delay;
  176. }
  177. static void rfkill_schedule_ratelimited(void)
  178. {
  179. if (!delayed_work_pending(&rfkill_task.dwork)) {
  180. schedule_delayed_work(&rfkill_task.dwork,
  181. rfkill_ratelimit(rfkill_task.last_scheduled));
  182. rfkill_task.last_scheduled = jiffies;
  183. }
  184. }
  185. static void rfkill_schedule_global_op(enum rfkill_global_sched_op op)
  186. {
  187. unsigned long flags;
  188. spin_lock_irqsave(&rfkill_task.lock, flags);
  189. rfkill_task.op = op;
  190. rfkill_task.global_op_pending = true;
  191. if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) {
  192. /* bypass the limiter for EPO */
  193. cancel_delayed_work(&rfkill_task.dwork);
  194. schedule_delayed_work(&rfkill_task.dwork, 0);
  195. rfkill_task.last_scheduled = jiffies;
  196. } else
  197. rfkill_schedule_ratelimited();
  198. spin_unlock_irqrestore(&rfkill_task.lock, flags);
  199. }
  200. #ifdef RFKILL_NEED_SWSET
  201. /* Use this if you need to add EV_SW SW_WLAN/WWAN/BLUETOOTH/etc handling */
  202. static void rfkill_schedule_set(enum rfkill_type type,
  203. enum rfkill_state desired_state)
  204. {
  205. unsigned long flags;
  206. if (rfkill_is_epo_lock_active())
  207. return;
  208. spin_lock_irqsave(&rfkill_task.lock, flags);
  209. if (!rfkill_task.global_op_pending) {
  210. set_bit(type, rfkill_task.sw_pending);
  211. set_bit(type, rfkill_task.sw_setpending);
  212. clear_bit(type, rfkill_task.sw_togglestate);
  213. if (desired_state)
  214. set_bit(type, rfkill_task.sw_newstate);
  215. else
  216. clear_bit(type, rfkill_task.sw_newstate);
  217. rfkill_schedule_ratelimited();
  218. }
  219. spin_unlock_irqrestore(&rfkill_task.lock, flags);
  220. }
  221. #endif
  222. static void rfkill_schedule_toggle(enum rfkill_type type)
  223. {
  224. unsigned long flags;
  225. if (rfkill_is_epo_lock_active())
  226. return;
  227. spin_lock_irqsave(&rfkill_task.lock, flags);
  228. if (!rfkill_task.global_op_pending) {
  229. set_bit(type, rfkill_task.sw_pending);
  230. change_bit(type, rfkill_task.sw_togglestate);
  231. rfkill_schedule_ratelimited();
  232. }
  233. spin_unlock_irqrestore(&rfkill_task.lock, flags);
  234. }
  235. static void rfkill_schedule_evsw_rfkillall(int state)
  236. {
  237. if (state) {
  238. switch (rfkill_master_switch_mode) {
  239. case RFKILL_INPUT_MASTER_UNBLOCKALL:
  240. rfkill_schedule_global_op(RFKILL_GLOBAL_OP_UNBLOCK);
  241. break;
  242. case RFKILL_INPUT_MASTER_RESTORE:
  243. rfkill_schedule_global_op(RFKILL_GLOBAL_OP_RESTORE);
  244. break;
  245. case RFKILL_INPUT_MASTER_DONOTHING:
  246. rfkill_schedule_global_op(RFKILL_GLOBAL_OP_UNLOCK);
  247. break;
  248. default:
  249. /* memory corruption or driver bug! fail safely */
  250. rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO);
  251. WARN(1, "Unknown rfkill_master_switch_mode (%d), "
  252. "driver bug or memory corruption detected!\n",
  253. rfkill_master_switch_mode);
  254. break;
  255. }
  256. } else
  257. rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO);
  258. }
  259. static void rfkill_event(struct input_handle *handle, unsigned int type,
  260. unsigned int code, int data)
  261. {
  262. if (type == EV_KEY && data == 1) {
  263. enum rfkill_type t;
  264. switch (code) {
  265. case KEY_WLAN:
  266. t = RFKILL_TYPE_WLAN;
  267. break;
  268. case KEY_BLUETOOTH:
  269. t = RFKILL_TYPE_BLUETOOTH;
  270. break;
  271. case KEY_UWB:
  272. t = RFKILL_TYPE_UWB;
  273. break;
  274. case KEY_WIMAX:
  275. t = RFKILL_TYPE_WIMAX;
  276. break;
  277. default:
  278. return;
  279. }
  280. rfkill_schedule_toggle(t);
  281. return;
  282. } else if (type == EV_SW) {
  283. switch (code) {
  284. case SW_RFKILL_ALL:
  285. rfkill_schedule_evsw_rfkillall(data);
  286. return;
  287. default:
  288. return;
  289. }
  290. }
  291. }
  292. static int rfkill_connect(struct input_handler *handler, struct input_dev *dev,
  293. const struct input_device_id *id)
  294. {
  295. struct input_handle *handle;
  296. int error;
  297. handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
  298. if (!handle)
  299. return -ENOMEM;
  300. handle->dev = dev;
  301. handle->handler = handler;
  302. handle->name = "rfkill";
  303. /* causes rfkill_start() to be called */
  304. error = input_register_handle(handle);
  305. if (error)
  306. goto err_free_handle;
  307. error = input_open_device(handle);
  308. if (error)
  309. goto err_unregister_handle;
  310. return 0;
  311. err_unregister_handle:
  312. input_unregister_handle(handle);
  313. err_free_handle:
  314. kfree(handle);
  315. return error;
  316. }
  317. static void rfkill_start(struct input_handle *handle)
  318. {
  319. /* Take event_lock to guard against configuration changes, we
  320. * should be able to deal with concurrency with rfkill_event()
  321. * just fine (which event_lock will also avoid). */
  322. spin_lock_irq(&handle->dev->event_lock);
  323. if (test_bit(EV_SW, handle->dev->evbit)) {
  324. if (test_bit(SW_RFKILL_ALL, handle->dev->swbit))
  325. rfkill_schedule_evsw_rfkillall(test_bit(SW_RFKILL_ALL,
  326. handle->dev->sw));
  327. /* add resync for further EV_SW events here */
  328. }
  329. spin_unlock_irq(&handle->dev->event_lock);
  330. }
  331. static void rfkill_disconnect(struct input_handle *handle)
  332. {
  333. input_close_device(handle);
  334. input_unregister_handle(handle);
  335. kfree(handle);
  336. }
  337. static const struct input_device_id rfkill_ids[] = {
  338. {
  339. .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
  340. .evbit = { BIT_MASK(EV_KEY) },
  341. .keybit = { [BIT_WORD(KEY_WLAN)] = BIT_MASK(KEY_WLAN) },
  342. },
  343. {
  344. .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
  345. .evbit = { BIT_MASK(EV_KEY) },
  346. .keybit = { [BIT_WORD(KEY_BLUETOOTH)] = BIT_MASK(KEY_BLUETOOTH) },
  347. },
  348. {
  349. .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
  350. .evbit = { BIT_MASK(EV_KEY) },
  351. .keybit = { [BIT_WORD(KEY_UWB)] = BIT_MASK(KEY_UWB) },
  352. },
  353. {
  354. .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
  355. .evbit = { BIT_MASK(EV_KEY) },
  356. .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) },
  357. },
  358. {
  359. .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT,
  360. .evbit = { BIT(EV_SW) },
  361. .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) },
  362. },
  363. { }
  364. };
  365. static struct input_handler rfkill_handler = {
  366. .event = rfkill_event,
  367. .connect = rfkill_connect,
  368. .disconnect = rfkill_disconnect,
  369. .start = rfkill_start,
  370. .name = "rfkill",
  371. .id_table = rfkill_ids,
  372. };
  373. static int __init rfkill_handler_init(void)
  374. {
  375. if (rfkill_master_switch_mode >= RFKILL_INPUT_MASTER_MAX)
  376. return -EINVAL;
  377. /*
  378. * The penalty to not doing this is a possible RFKILL_OPS_DELAY delay
  379. * at the first use. Acceptable, but if we can avoid it, why not?
  380. */
  381. rfkill_task.last_scheduled =
  382. jiffies - msecs_to_jiffies(RFKILL_OPS_DELAY) - 1;
  383. return input_register_handler(&rfkill_handler);
  384. }
  385. static void __exit rfkill_handler_exit(void)
  386. {
  387. input_unregister_handler(&rfkill_handler);
  388. cancel_delayed_work_sync(&rfkill_task.dwork);
  389. rfkill_remove_epo_lock();
  390. }
  391. module_init(rfkill_handler_init);
  392. module_exit(rfkill_handler_exit);