dfs_pri_detector.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425
  1. /*
  2. * Copyright (c) 2012 Neratec Solutions AG
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <linux/slab.h>
  17. #include <linux/spinlock.h>
  18. #include "ath9k.h"
  19. #include "dfs_pattern_detector.h"
  20. #include "dfs_pri_detector.h"
  21. #include "dfs_debug.h"
  22. /**
  23. * struct pulse_elem - elements in pulse queue
  24. * @ts: time stamp in usecs
  25. */
  26. struct pulse_elem {
  27. struct list_head head;
  28. u64 ts;
  29. };
  30. /**
  31. * pde_get_multiple() - get number of multiples considering a given tolerance
  32. * @return factor if abs(val - factor*fraction) <= tolerance, 0 otherwise
  33. */
  34. static u32 pde_get_multiple(u32 val, u32 fraction, u32 tolerance)
  35. {
  36. u32 remainder;
  37. u32 factor;
  38. u32 delta;
  39. if (fraction == 0)
  40. return 0;
  41. delta = (val < fraction) ? (fraction - val) : (val - fraction);
  42. if (delta <= tolerance)
  43. /* val and fraction are within tolerance */
  44. return 1;
  45. factor = val / fraction;
  46. remainder = val % fraction;
  47. if (remainder > tolerance) {
  48. /* no exact match */
  49. if ((fraction - remainder) <= tolerance)
  50. /* remainder is within tolerance */
  51. factor++;
  52. else
  53. factor = 0;
  54. }
  55. return factor;
  56. }
  57. /**
  58. * DOC: Singleton Pulse and Sequence Pools
  59. *
  60. * Instances of pri_sequence and pulse_elem are kept in singleton pools to
  61. * reduce the number of dynamic allocations. They are shared between all
  62. * instances and grow up to the peak number of simultaneously used objects.
  63. *
  64. * Memory is freed after all references to the pools are released.
  65. */
  66. static u32 singleton_pool_references;
  67. static LIST_HEAD(pulse_pool);
  68. static LIST_HEAD(pseq_pool);
  69. static DEFINE_SPINLOCK(pool_lock);
  70. static void pool_register_ref(void)
  71. {
  72. spin_lock_bh(&pool_lock);
  73. singleton_pool_references++;
  74. DFS_POOL_STAT_INC(pool_reference);
  75. spin_unlock_bh(&pool_lock);
  76. }
  77. static void pool_deregister_ref(void)
  78. {
  79. spin_lock_bh(&pool_lock);
  80. singleton_pool_references--;
  81. DFS_POOL_STAT_DEC(pool_reference);
  82. if (singleton_pool_references == 0) {
  83. /* free singleton pools with no references left */
  84. struct pri_sequence *ps, *ps0;
  85. struct pulse_elem *p, *p0;
  86. list_for_each_entry_safe(p, p0, &pulse_pool, head) {
  87. list_del(&p->head);
  88. DFS_POOL_STAT_DEC(pulse_allocated);
  89. kfree(p);
  90. }
  91. list_for_each_entry_safe(ps, ps0, &pseq_pool, head) {
  92. list_del(&ps->head);
  93. DFS_POOL_STAT_DEC(pseq_allocated);
  94. kfree(ps);
  95. }
  96. }
  97. spin_unlock_bh(&pool_lock);
  98. }
  99. static void pool_put_pulse_elem(struct pulse_elem *pe)
  100. {
  101. spin_lock_bh(&pool_lock);
  102. list_add(&pe->head, &pulse_pool);
  103. DFS_POOL_STAT_DEC(pulse_used);
  104. spin_unlock_bh(&pool_lock);
  105. }
  106. static void pool_put_pseq_elem(struct pri_sequence *pse)
  107. {
  108. spin_lock_bh(&pool_lock);
  109. list_add(&pse->head, &pseq_pool);
  110. DFS_POOL_STAT_DEC(pseq_used);
  111. spin_unlock_bh(&pool_lock);
  112. }
  113. static struct pri_sequence *pool_get_pseq_elem(void)
  114. {
  115. struct pri_sequence *pse = NULL;
  116. spin_lock_bh(&pool_lock);
  117. if (!list_empty(&pseq_pool)) {
  118. pse = list_first_entry(&pseq_pool, struct pri_sequence, head);
  119. list_del(&pse->head);
  120. DFS_POOL_STAT_INC(pseq_used);
  121. }
  122. spin_unlock_bh(&pool_lock);
  123. return pse;
  124. }
  125. static struct pulse_elem *pool_get_pulse_elem(void)
  126. {
  127. struct pulse_elem *pe = NULL;
  128. spin_lock_bh(&pool_lock);
  129. if (!list_empty(&pulse_pool)) {
  130. pe = list_first_entry(&pulse_pool, struct pulse_elem, head);
  131. list_del(&pe->head);
  132. DFS_POOL_STAT_INC(pulse_used);
  133. }
  134. spin_unlock_bh(&pool_lock);
  135. return pe;
  136. }
  137. static struct pulse_elem *pulse_queue_get_tail(struct pri_detector *pde)
  138. {
  139. struct list_head *l = &pde->pulses;
  140. if (list_empty(l))
  141. return NULL;
  142. return list_entry(l->prev, struct pulse_elem, head);
  143. }
  144. static bool pulse_queue_dequeue(struct pri_detector *pde)
  145. {
  146. struct pulse_elem *p = pulse_queue_get_tail(pde);
  147. if (p != NULL) {
  148. list_del_init(&p->head);
  149. pde->count--;
  150. /* give it back to pool */
  151. pool_put_pulse_elem(p);
  152. }
  153. return (pde->count > 0);
  154. }
  155. /* remove pulses older than window */
  156. static void pulse_queue_check_window(struct pri_detector *pde)
  157. {
  158. u64 min_valid_ts;
  159. struct pulse_elem *p;
  160. /* there is no delta time with less than 2 pulses */
  161. if (pde->count < 2)
  162. return;
  163. if (pde->last_ts <= pde->window_size)
  164. return;
  165. min_valid_ts = pde->last_ts - pde->window_size;
  166. while ((p = pulse_queue_get_tail(pde)) != NULL) {
  167. if (p->ts >= min_valid_ts)
  168. return;
  169. pulse_queue_dequeue(pde);
  170. }
  171. }
  172. static bool pulse_queue_enqueue(struct pri_detector *pde, u64 ts)
  173. {
  174. struct pulse_elem *p = pool_get_pulse_elem();
  175. if (p == NULL) {
  176. p = kmalloc(sizeof(*p), GFP_ATOMIC);
  177. if (p == NULL) {
  178. DFS_POOL_STAT_INC(pulse_alloc_error);
  179. return false;
  180. }
  181. DFS_POOL_STAT_INC(pulse_allocated);
  182. DFS_POOL_STAT_INC(pulse_used);
  183. }
  184. INIT_LIST_HEAD(&p->head);
  185. p->ts = ts;
  186. list_add(&p->head, &pde->pulses);
  187. pde->count++;
  188. pde->last_ts = ts;
  189. pulse_queue_check_window(pde);
  190. if (pde->count >= pde->max_count)
  191. pulse_queue_dequeue(pde);
  192. return true;
  193. }
  194. static bool pseq_handler_create_sequences(struct pri_detector *pde,
  195. u64 ts, u32 min_count)
  196. {
  197. struct pulse_elem *p;
  198. list_for_each_entry(p, &pde->pulses, head) {
  199. struct pri_sequence ps, *new_ps;
  200. struct pulse_elem *p2;
  201. u32 tmp_false_count;
  202. u64 min_valid_ts;
  203. u32 delta_ts = ts - p->ts;
  204. if (delta_ts < pde->rs->pri_min)
  205. /* ignore too small pri */
  206. continue;
  207. if (delta_ts > pde->rs->pri_max)
  208. /* stop on too large pri (sorted list) */
  209. break;
  210. /* build a new sequence with new potential pri */
  211. ps.count = 2;
  212. ps.count_falses = 0;
  213. ps.first_ts = p->ts;
  214. ps.last_ts = ts;
  215. ps.pri = ts - p->ts;
  216. ps.dur = ps.pri * (pde->rs->ppb - 1)
  217. + 2 * pde->rs->max_pri_tolerance;
  218. p2 = p;
  219. tmp_false_count = 0;
  220. min_valid_ts = ts - ps.dur;
  221. /* check which past pulses are candidates for new sequence */
  222. list_for_each_entry_continue(p2, &pde->pulses, head) {
  223. u32 factor;
  224. if (p2->ts < min_valid_ts)
  225. /* stop on crossing window border */
  226. break;
  227. /* check if pulse match (multi)PRI */
  228. factor = pde_get_multiple(ps.last_ts - p2->ts, ps.pri,
  229. pde->rs->max_pri_tolerance);
  230. if (factor > 0) {
  231. ps.count++;
  232. ps.first_ts = p2->ts;
  233. /*
  234. * on match, add the intermediate falses
  235. * and reset counter
  236. */
  237. ps.count_falses += tmp_false_count;
  238. tmp_false_count = 0;
  239. } else {
  240. /* this is a potential false one */
  241. tmp_false_count++;
  242. }
  243. }
  244. if (ps.count < min_count)
  245. /* did not reach minimum count, drop sequence */
  246. continue;
  247. /* this is a valid one, add it */
  248. ps.deadline_ts = ps.first_ts + ps.dur;
  249. new_ps = pool_get_pseq_elem();
  250. if (new_ps == NULL) {
  251. new_ps = kmalloc(sizeof(*new_ps), GFP_ATOMIC);
  252. if (new_ps == NULL) {
  253. DFS_POOL_STAT_INC(pseq_alloc_error);
  254. return false;
  255. }
  256. DFS_POOL_STAT_INC(pseq_allocated);
  257. DFS_POOL_STAT_INC(pseq_used);
  258. }
  259. memcpy(new_ps, &ps, sizeof(ps));
  260. INIT_LIST_HEAD(&new_ps->head);
  261. list_add(&new_ps->head, &pde->sequences);
  262. }
  263. return true;
  264. }
  265. /* check new ts and add to all matching existing sequences */
  266. static u32
  267. pseq_handler_add_to_existing_seqs(struct pri_detector *pde, u64 ts)
  268. {
  269. u32 max_count = 0;
  270. struct pri_sequence *ps, *ps2;
  271. list_for_each_entry_safe(ps, ps2, &pde->sequences, head) {
  272. u32 delta_ts;
  273. u32 factor;
  274. /* first ensure that sequence is within window */
  275. if (ts > ps->deadline_ts) {
  276. list_del_init(&ps->head);
  277. pool_put_pseq_elem(ps);
  278. continue;
  279. }
  280. delta_ts = ts - ps->last_ts;
  281. factor = pde_get_multiple(delta_ts, ps->pri,
  282. pde->rs->max_pri_tolerance);
  283. if (factor > 0) {
  284. ps->last_ts = ts;
  285. ps->count++;
  286. if (max_count < ps->count)
  287. max_count = ps->count;
  288. } else {
  289. ps->count_falses++;
  290. }
  291. }
  292. return max_count;
  293. }
  294. static struct pri_sequence *
  295. pseq_handler_check_detection(struct pri_detector *pde)
  296. {
  297. struct pri_sequence *ps;
  298. if (list_empty(&pde->sequences))
  299. return NULL;
  300. list_for_each_entry(ps, &pde->sequences, head) {
  301. /*
  302. * we assume to have enough matching confidence if we
  303. * 1) have enough pulses
  304. * 2) have more matching than false pulses
  305. */
  306. if ((ps->count >= pde->rs->ppb_thresh) &&
  307. (ps->count * pde->rs->num_pri >= ps->count_falses))
  308. return ps;
  309. }
  310. return NULL;
  311. }
  312. /* free pulse queue and sequences list and give objects back to pools */
  313. static void pri_detector_reset(struct pri_detector *pde, u64 ts)
  314. {
  315. struct pri_sequence *ps, *ps0;
  316. struct pulse_elem *p, *p0;
  317. list_for_each_entry_safe(ps, ps0, &pde->sequences, head) {
  318. list_del_init(&ps->head);
  319. pool_put_pseq_elem(ps);
  320. }
  321. list_for_each_entry_safe(p, p0, &pde->pulses, head) {
  322. list_del_init(&p->head);
  323. pool_put_pulse_elem(p);
  324. }
  325. pde->count = 0;
  326. pde->last_ts = ts;
  327. }
  328. static void pri_detector_exit(struct pri_detector *de)
  329. {
  330. pri_detector_reset(de, 0);
  331. pool_deregister_ref();
  332. kfree(de);
  333. }
  334. static struct pri_sequence *pri_detector_add_pulse(struct pri_detector *de,
  335. struct pulse_event *event)
  336. {
  337. u32 max_updated_seq;
  338. struct pri_sequence *ps;
  339. u64 ts = event->ts;
  340. const struct radar_detector_specs *rs = de->rs;
  341. /* ignore pulses not within width range */
  342. if ((rs->width_min > event->width) || (rs->width_max < event->width))
  343. return NULL;
  344. if ((ts - de->last_ts) < rs->max_pri_tolerance)
  345. /* if delta to last pulse is too short, don't use this pulse */
  346. return NULL;
  347. de->last_ts = ts;
  348. max_updated_seq = pseq_handler_add_to_existing_seqs(de, ts);
  349. if (!pseq_handler_create_sequences(de, ts, max_updated_seq)) {
  350. pri_detector_reset(de, ts);
  351. return false;
  352. }
  353. ps = pseq_handler_check_detection(de);
  354. if (ps == NULL)
  355. pulse_queue_enqueue(de, ts);
  356. return ps;
  357. }
  358. struct pri_detector *pri_detector_init(const struct radar_detector_specs *rs)
  359. {
  360. struct pri_detector *de;
  361. de = kzalloc(sizeof(*de), GFP_ATOMIC);
  362. if (de == NULL)
  363. return NULL;
  364. de->exit = pri_detector_exit;
  365. de->add_pulse = pri_detector_add_pulse;
  366. de->reset = pri_detector_reset;
  367. INIT_LIST_HEAD(&de->sequences);
  368. INIT_LIST_HEAD(&de->pulses);
  369. de->window_size = rs->pri_max * rs->ppb * rs->num_pri;
  370. de->max_count = rs->ppb * 2;
  371. de->rs = rs;
  372. pool_register_ref();
  373. return de;
  374. }