mesh_pathtbl.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120
  1. /*
  2. * Copyright (c) 2008, 2009 open80211s Ltd.
  3. * Author: Luis Carlos Cobo <luisca@cozybit.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. */
  9. #include <linux/etherdevice.h>
  10. #include <linux/list.h>
  11. #include <linux/random.h>
  12. #include <linux/slab.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/string.h>
  15. #include <net/mac80211.h>
  16. #include "wme.h"
  17. #include "ieee80211_i.h"
  18. #include "mesh.h"
  19. /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
  20. #define INIT_PATHS_SIZE_ORDER 2
  21. /* Keep the mean chain length below this constant */
  22. #define MEAN_CHAIN_LEN 2
  23. #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
  24. time_after(jiffies, mpath->exp_time) && \
  25. !(mpath->flags & MESH_PATH_FIXED))
  26. struct mpath_node {
  27. struct hlist_node list;
  28. struct rcu_head rcu;
  29. /* This indirection allows two different tables to point to the same
  30. * mesh_path structure, useful when resizing
  31. */
  32. struct mesh_path *mpath;
  33. };
  34. static struct mesh_table __rcu *mesh_paths;
  35. static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
  36. int mesh_paths_generation;
  37. /* This lock will have the grow table function as writer and add / delete nodes
  38. * as readers. RCU provides sufficient protection only when reading the table
  39. * (i.e. doing lookups). Adding or adding or removing nodes requires we take
  40. * the read lock or we risk operating on an old table. The write lock is only
  41. * needed when modifying the number of buckets a table.
  42. */
  43. static DEFINE_RWLOCK(pathtbl_resize_lock);
  44. static inline struct mesh_table *resize_dereference_mesh_paths(void)
  45. {
  46. return rcu_dereference_protected(mesh_paths,
  47. lockdep_is_held(&pathtbl_resize_lock));
  48. }
  49. static inline struct mesh_table *resize_dereference_mpp_paths(void)
  50. {
  51. return rcu_dereference_protected(mpp_paths,
  52. lockdep_is_held(&pathtbl_resize_lock));
  53. }
  54. /*
  55. * CAREFUL -- "tbl" must not be an expression,
  56. * in particular not an rcu_dereference(), since
  57. * it's used twice. So it is illegal to do
  58. * for_each_mesh_entry(rcu_dereference(...), ...)
  59. */
  60. #define for_each_mesh_entry(tbl, p, node, i) \
  61. for (i = 0; i <= tbl->hash_mask; i++) \
  62. hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)
  63. static struct mesh_table *mesh_table_alloc(int size_order)
  64. {
  65. int i;
  66. struct mesh_table *newtbl;
  67. newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
  68. if (!newtbl)
  69. return NULL;
  70. newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
  71. (1 << size_order), GFP_ATOMIC);
  72. if (!newtbl->hash_buckets) {
  73. kfree(newtbl);
  74. return NULL;
  75. }
  76. newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
  77. (1 << size_order), GFP_ATOMIC);
  78. if (!newtbl->hashwlock) {
  79. kfree(newtbl->hash_buckets);
  80. kfree(newtbl);
  81. return NULL;
  82. }
  83. newtbl->size_order = size_order;
  84. newtbl->hash_mask = (1 << size_order) - 1;
  85. atomic_set(&newtbl->entries, 0);
  86. get_random_bytes(&newtbl->hash_rnd,
  87. sizeof(newtbl->hash_rnd));
  88. for (i = 0; i <= newtbl->hash_mask; i++)
  89. spin_lock_init(&newtbl->hashwlock[i]);
  90. spin_lock_init(&newtbl->gates_lock);
  91. return newtbl;
  92. }
  93. static void __mesh_table_free(struct mesh_table *tbl)
  94. {
  95. kfree(tbl->hash_buckets);
  96. kfree(tbl->hashwlock);
  97. kfree(tbl);
  98. }
  99. static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
  100. {
  101. struct hlist_head *mesh_hash;
  102. struct hlist_node *p, *q;
  103. struct mpath_node *gate;
  104. int i;
  105. mesh_hash = tbl->hash_buckets;
  106. for (i = 0; i <= tbl->hash_mask; i++) {
  107. spin_lock_bh(&tbl->hashwlock[i]);
  108. hlist_for_each_safe(p, q, &mesh_hash[i]) {
  109. tbl->free_node(p, free_leafs);
  110. atomic_dec(&tbl->entries);
  111. }
  112. spin_unlock_bh(&tbl->hashwlock[i]);
  113. }
  114. if (free_leafs) {
  115. spin_lock_bh(&tbl->gates_lock);
  116. hlist_for_each_entry_safe(gate, p, q,
  117. tbl->known_gates, list) {
  118. hlist_del(&gate->list);
  119. kfree(gate);
  120. }
  121. kfree(tbl->known_gates);
  122. spin_unlock_bh(&tbl->gates_lock);
  123. }
  124. __mesh_table_free(tbl);
  125. }
  126. static int mesh_table_grow(struct mesh_table *oldtbl,
  127. struct mesh_table *newtbl)
  128. {
  129. struct hlist_head *oldhash;
  130. struct hlist_node *p, *q;
  131. int i;
  132. if (atomic_read(&oldtbl->entries)
  133. < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
  134. return -EAGAIN;
  135. newtbl->free_node = oldtbl->free_node;
  136. newtbl->mean_chain_len = oldtbl->mean_chain_len;
  137. newtbl->copy_node = oldtbl->copy_node;
  138. newtbl->known_gates = oldtbl->known_gates;
  139. atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
  140. oldhash = oldtbl->hash_buckets;
  141. for (i = 0; i <= oldtbl->hash_mask; i++)
  142. hlist_for_each(p, &oldhash[i])
  143. if (oldtbl->copy_node(p, newtbl) < 0)
  144. goto errcopy;
  145. return 0;
  146. errcopy:
  147. for (i = 0; i <= newtbl->hash_mask; i++) {
  148. hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
  149. oldtbl->free_node(p, 0);
  150. }
  151. return -ENOMEM;
  152. }
  153. static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
  154. struct mesh_table *tbl)
  155. {
  156. /* Use last four bytes of hw addr and interface index as hash index */
  157. return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
  158. & tbl->hash_mask;
  159. }
  160. /**
  161. *
  162. * mesh_path_assign_nexthop - update mesh path next hop
  163. *
  164. * @mpath: mesh path to update
  165. * @sta: next hop to assign
  166. *
  167. * Locking: mpath->state_lock must be held when calling this function
  168. */
  169. void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
  170. {
  171. struct sk_buff *skb;
  172. struct ieee80211_hdr *hdr;
  173. struct sk_buff_head tmpq;
  174. unsigned long flags;
  175. rcu_assign_pointer(mpath->next_hop, sta);
  176. __skb_queue_head_init(&tmpq);
  177. spin_lock_irqsave(&mpath->frame_queue.lock, flags);
  178. while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
  179. hdr = (struct ieee80211_hdr *) skb->data;
  180. memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
  181. memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
  182. __skb_queue_tail(&tmpq, skb);
  183. }
  184. skb_queue_splice(&tmpq, &mpath->frame_queue);
  185. spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
  186. }
  187. static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
  188. struct mesh_path *gate_mpath)
  189. {
  190. struct ieee80211_hdr *hdr;
  191. struct ieee80211s_hdr *mshdr;
  192. int mesh_hdrlen, hdrlen;
  193. char *next_hop;
  194. hdr = (struct ieee80211_hdr *) skb->data;
  195. hdrlen = ieee80211_hdrlen(hdr->frame_control);
  196. mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
  197. if (!(mshdr->flags & MESH_FLAGS_AE)) {
  198. /* size of the fixed part of the mesh header */
  199. mesh_hdrlen = 6;
  200. /* make room for the two extended addresses */
  201. skb_push(skb, 2 * ETH_ALEN);
  202. memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
  203. hdr = (struct ieee80211_hdr *) skb->data;
  204. /* we preserve the previous mesh header and only add
  205. * the new addreses */
  206. mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
  207. mshdr->flags = MESH_FLAGS_AE_A5_A6;
  208. memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
  209. memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
  210. }
  211. /* update next hop */
  212. hdr = (struct ieee80211_hdr *) skb->data;
  213. rcu_read_lock();
  214. next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
  215. memcpy(hdr->addr1, next_hop, ETH_ALEN);
  216. rcu_read_unlock();
  217. memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
  218. memcpy(hdr->addr3, dst_addr, ETH_ALEN);
  219. }
  220. /**
  221. *
  222. * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
  223. *
  224. * This function is used to transfer or copy frames from an unresolved mpath to
  225. * a gate mpath. The function also adds the Address Extension field and
  226. * updates the next hop.
  227. *
  228. * If a frame already has an Address Extension field, only the next hop and
  229. * destination addresses are updated.
  230. *
  231. * The gate mpath must be an active mpath with a valid mpath->next_hop.
  232. *
  233. * @mpath: An active mpath the frames will be sent to (i.e. the gate)
  234. * @from_mpath: The failed mpath
  235. * @copy: When true, copy all the frames to the new mpath queue. When false,
  236. * move them.
  237. */
  238. static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
  239. struct mesh_path *from_mpath,
  240. bool copy)
  241. {
  242. struct sk_buff *skb, *cp_skb = NULL;
  243. struct sk_buff_head gateq, failq;
  244. unsigned long flags;
  245. int num_skbs;
  246. BUG_ON(gate_mpath == from_mpath);
  247. BUG_ON(!gate_mpath->next_hop);
  248. __skb_queue_head_init(&gateq);
  249. __skb_queue_head_init(&failq);
  250. spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
  251. skb_queue_splice_init(&from_mpath->frame_queue, &failq);
  252. spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
  253. num_skbs = skb_queue_len(&failq);
  254. while (num_skbs--) {
  255. skb = __skb_dequeue(&failq);
  256. if (copy) {
  257. cp_skb = skb_copy(skb, GFP_ATOMIC);
  258. if (cp_skb)
  259. __skb_queue_tail(&failq, cp_skb);
  260. }
  261. prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
  262. __skb_queue_tail(&gateq, skb);
  263. }
  264. spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
  265. skb_queue_splice(&gateq, &gate_mpath->frame_queue);
  266. mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
  267. gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
  268. spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);
  269. if (!copy)
  270. return;
  271. spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
  272. skb_queue_splice(&failq, &from_mpath->frame_queue);
  273. spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
  274. }
  275. static struct mesh_path *mpath_lookup(struct mesh_table *tbl, u8 *dst,
  276. struct ieee80211_sub_if_data *sdata)
  277. {
  278. struct mesh_path *mpath;
  279. struct hlist_node *n;
  280. struct hlist_head *bucket;
  281. struct mpath_node *node;
  282. bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
  283. hlist_for_each_entry_rcu(node, n, bucket, list) {
  284. mpath = node->mpath;
  285. if (mpath->sdata == sdata &&
  286. ether_addr_equal(dst, mpath->dst)) {
  287. if (MPATH_EXPIRED(mpath)) {
  288. spin_lock_bh(&mpath->state_lock);
  289. mpath->flags &= ~MESH_PATH_ACTIVE;
  290. spin_unlock_bh(&mpath->state_lock);
  291. }
  292. return mpath;
  293. }
  294. }
  295. return NULL;
  296. }
  297. /**
  298. * mesh_path_lookup - look up a path in the mesh path table
  299. * @dst: hardware address (ETH_ALEN length) of destination
  300. * @sdata: local subif
  301. *
  302. * Returns: pointer to the mesh path structure, or NULL if not found
  303. *
  304. * Locking: must be called within a read rcu section.
  305. */
  306. struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
  307. {
  308. return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata);
  309. }
  310. struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
  311. {
  312. return mpath_lookup(rcu_dereference(mpp_paths), dst, sdata);
  313. }
  314. /**
  315. * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
  316. * @idx: index
  317. * @sdata: local subif, or NULL for all entries
  318. *
  319. * Returns: pointer to the mesh path structure, or NULL if not found.
  320. *
  321. * Locking: must be called within a read rcu section.
  322. */
  323. struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
  324. {
  325. struct mesh_table *tbl = rcu_dereference(mesh_paths);
  326. struct mpath_node *node;
  327. struct hlist_node *p;
  328. int i;
  329. int j = 0;
  330. for_each_mesh_entry(tbl, p, node, i) {
  331. if (sdata && node->mpath->sdata != sdata)
  332. continue;
  333. if (j++ == idx) {
  334. if (MPATH_EXPIRED(node->mpath)) {
  335. spin_lock_bh(&node->mpath->state_lock);
  336. node->mpath->flags &= ~MESH_PATH_ACTIVE;
  337. spin_unlock_bh(&node->mpath->state_lock);
  338. }
  339. return node->mpath;
  340. }
  341. }
  342. return NULL;
  343. }
  344. /**
  345. * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
  346. * @mpath: gate path to add to table
  347. */
  348. int mesh_path_add_gate(struct mesh_path *mpath)
  349. {
  350. struct mesh_table *tbl;
  351. struct mpath_node *gate, *new_gate;
  352. struct hlist_node *n;
  353. int err;
  354. rcu_read_lock();
  355. tbl = rcu_dereference(mesh_paths);
  356. hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list)
  357. if (gate->mpath == mpath) {
  358. err = -EEXIST;
  359. goto err_rcu;
  360. }
  361. new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
  362. if (!new_gate) {
  363. err = -ENOMEM;
  364. goto err_rcu;
  365. }
  366. mpath->is_gate = true;
  367. mpath->sdata->u.mesh.num_gates++;
  368. new_gate->mpath = mpath;
  369. spin_lock_bh(&tbl->gates_lock);
  370. hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
  371. spin_unlock_bh(&tbl->gates_lock);
  372. rcu_read_unlock();
  373. mpath_dbg(mpath->sdata,
  374. "Mesh path: Recorded new gate: %pM. %d known gates\n",
  375. mpath->dst, mpath->sdata->u.mesh.num_gates);
  376. return 0;
  377. err_rcu:
  378. rcu_read_unlock();
  379. return err;
  380. }
  381. /**
  382. * mesh_gate_del - remove a mesh gate from the list of known gates
  383. * @tbl: table which holds our list of known gates
  384. * @mpath: gate mpath
  385. *
  386. * Returns: 0 on success
  387. *
  388. * Locking: must be called inside rcu_read_lock() section
  389. */
  390. static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
  391. {
  392. struct mpath_node *gate;
  393. struct hlist_node *p, *q;
  394. hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list)
  395. if (gate->mpath == mpath) {
  396. spin_lock_bh(&tbl->gates_lock);
  397. hlist_del_rcu(&gate->list);
  398. kfree_rcu(gate, rcu);
  399. spin_unlock_bh(&tbl->gates_lock);
  400. mpath->sdata->u.mesh.num_gates--;
  401. mpath->is_gate = false;
  402. mpath_dbg(mpath->sdata,
  403. "Mesh path: Deleted gate: %pM. %d known gates\n",
  404. mpath->dst, mpath->sdata->u.mesh.num_gates);
  405. break;
  406. }
  407. return 0;
  408. }
  409. /**
  410. * mesh_gate_num - number of gates known to this interface
  411. * @sdata: subif data
  412. */
  413. int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
  414. {
  415. return sdata->u.mesh.num_gates;
  416. }
  417. /**
  418. * mesh_path_add - allocate and add a new path to the mesh path table
  419. * @addr: destination address of the path (ETH_ALEN length)
  420. * @sdata: local subif
  421. *
  422. * Returns: 0 on success
  423. *
  424. * State: the initial state of the new path is set to 0
  425. */
  426. int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
  427. {
  428. struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
  429. struct ieee80211_local *local = sdata->local;
  430. struct mesh_table *tbl;
  431. struct mesh_path *mpath, *new_mpath;
  432. struct mpath_node *node, *new_node;
  433. struct hlist_head *bucket;
  434. struct hlist_node *n;
  435. int grow = 0;
  436. int err = 0;
  437. u32 hash_idx;
  438. if (ether_addr_equal(dst, sdata->vif.addr))
  439. /* never add ourselves as neighbours */
  440. return -ENOTSUPP;
  441. if (is_multicast_ether_addr(dst))
  442. return -ENOTSUPP;
  443. if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
  444. return -ENOSPC;
  445. err = -ENOMEM;
  446. new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
  447. if (!new_mpath)
  448. goto err_path_alloc;
  449. new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
  450. if (!new_node)
  451. goto err_node_alloc;
  452. read_lock_bh(&pathtbl_resize_lock);
  453. memcpy(new_mpath->dst, dst, ETH_ALEN);
  454. memset(new_mpath->rann_snd_addr, 0xff, ETH_ALEN);
  455. new_mpath->is_root = false;
  456. new_mpath->sdata = sdata;
  457. new_mpath->flags = 0;
  458. skb_queue_head_init(&new_mpath->frame_queue);
  459. new_node->mpath = new_mpath;
  460. new_mpath->timer.data = (unsigned long) new_mpath;
  461. new_mpath->timer.function = mesh_path_timer;
  462. new_mpath->exp_time = jiffies;
  463. spin_lock_init(&new_mpath->state_lock);
  464. init_timer(&new_mpath->timer);
  465. tbl = resize_dereference_mesh_paths();
  466. hash_idx = mesh_table_hash(dst, sdata, tbl);
  467. bucket = &tbl->hash_buckets[hash_idx];
  468. spin_lock(&tbl->hashwlock[hash_idx]);
  469. err = -EEXIST;
  470. hlist_for_each_entry(node, n, bucket, list) {
  471. mpath = node->mpath;
  472. if (mpath->sdata == sdata &&
  473. ether_addr_equal(dst, mpath->dst))
  474. goto err_exists;
  475. }
  476. hlist_add_head_rcu(&new_node->list, bucket);
  477. if (atomic_inc_return(&tbl->entries) >=
  478. tbl->mean_chain_len * (tbl->hash_mask + 1))
  479. grow = 1;
  480. mesh_paths_generation++;
  481. spin_unlock(&tbl->hashwlock[hash_idx]);
  482. read_unlock_bh(&pathtbl_resize_lock);
  483. if (grow) {
  484. set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
  485. ieee80211_queue_work(&local->hw, &sdata->work);
  486. }
  487. return 0;
  488. err_exists:
  489. spin_unlock(&tbl->hashwlock[hash_idx]);
  490. read_unlock_bh(&pathtbl_resize_lock);
  491. kfree(new_node);
  492. err_node_alloc:
  493. kfree(new_mpath);
  494. err_path_alloc:
  495. atomic_dec(&sdata->u.mesh.mpaths);
  496. return err;
  497. }
  498. static void mesh_table_free_rcu(struct rcu_head *rcu)
  499. {
  500. struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
  501. mesh_table_free(tbl, false);
  502. }
  503. void mesh_mpath_table_grow(void)
  504. {
  505. struct mesh_table *oldtbl, *newtbl;
  506. write_lock_bh(&pathtbl_resize_lock);
  507. oldtbl = resize_dereference_mesh_paths();
  508. newtbl = mesh_table_alloc(oldtbl->size_order + 1);
  509. if (!newtbl)
  510. goto out;
  511. if (mesh_table_grow(oldtbl, newtbl) < 0) {
  512. __mesh_table_free(newtbl);
  513. goto out;
  514. }
  515. rcu_assign_pointer(mesh_paths, newtbl);
  516. call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
  517. out:
  518. write_unlock_bh(&pathtbl_resize_lock);
  519. }
  520. void mesh_mpp_table_grow(void)
  521. {
  522. struct mesh_table *oldtbl, *newtbl;
  523. write_lock_bh(&pathtbl_resize_lock);
  524. oldtbl = resize_dereference_mpp_paths();
  525. newtbl = mesh_table_alloc(oldtbl->size_order + 1);
  526. if (!newtbl)
  527. goto out;
  528. if (mesh_table_grow(oldtbl, newtbl) < 0) {
  529. __mesh_table_free(newtbl);
  530. goto out;
  531. }
  532. rcu_assign_pointer(mpp_paths, newtbl);
  533. call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
  534. out:
  535. write_unlock_bh(&pathtbl_resize_lock);
  536. }
  537. int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
  538. {
  539. struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
  540. struct ieee80211_local *local = sdata->local;
  541. struct mesh_table *tbl;
  542. struct mesh_path *mpath, *new_mpath;
  543. struct mpath_node *node, *new_node;
  544. struct hlist_head *bucket;
  545. struct hlist_node *n;
  546. int grow = 0;
  547. int err = 0;
  548. u32 hash_idx;
  549. if (ether_addr_equal(dst, sdata->vif.addr))
  550. /* never add ourselves as neighbours */
  551. return -ENOTSUPP;
  552. if (is_multicast_ether_addr(dst))
  553. return -ENOTSUPP;
  554. err = -ENOMEM;
  555. new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
  556. if (!new_mpath)
  557. goto err_path_alloc;
  558. new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
  559. if (!new_node)
  560. goto err_node_alloc;
  561. read_lock_bh(&pathtbl_resize_lock);
  562. memcpy(new_mpath->dst, dst, ETH_ALEN);
  563. memcpy(new_mpath->mpp, mpp, ETH_ALEN);
  564. new_mpath->sdata = sdata;
  565. new_mpath->flags = 0;
  566. skb_queue_head_init(&new_mpath->frame_queue);
  567. new_node->mpath = new_mpath;
  568. init_timer(&new_mpath->timer);
  569. new_mpath->exp_time = jiffies;
  570. spin_lock_init(&new_mpath->state_lock);
  571. tbl = resize_dereference_mpp_paths();
  572. hash_idx = mesh_table_hash(dst, sdata, tbl);
  573. bucket = &tbl->hash_buckets[hash_idx];
  574. spin_lock(&tbl->hashwlock[hash_idx]);
  575. err = -EEXIST;
  576. hlist_for_each_entry(node, n, bucket, list) {
  577. mpath = node->mpath;
  578. if (mpath->sdata == sdata &&
  579. ether_addr_equal(dst, mpath->dst))
  580. goto err_exists;
  581. }
  582. hlist_add_head_rcu(&new_node->list, bucket);
  583. if (atomic_inc_return(&tbl->entries) >=
  584. tbl->mean_chain_len * (tbl->hash_mask + 1))
  585. grow = 1;
  586. spin_unlock(&tbl->hashwlock[hash_idx]);
  587. read_unlock_bh(&pathtbl_resize_lock);
  588. if (grow) {
  589. set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
  590. ieee80211_queue_work(&local->hw, &sdata->work);
  591. }
  592. return 0;
  593. err_exists:
  594. spin_unlock(&tbl->hashwlock[hash_idx]);
  595. read_unlock_bh(&pathtbl_resize_lock);
  596. kfree(new_node);
  597. err_node_alloc:
  598. kfree(new_mpath);
  599. err_path_alloc:
  600. return err;
  601. }
  602. /**
  603. * mesh_plink_broken - deactivates paths and sends perr when a link breaks
  604. *
  605. * @sta: broken peer link
  606. *
  607. * This function must be called from the rate control algorithm if enough
  608. * delivery errors suggest that a peer link is no longer usable.
  609. */
  610. void mesh_plink_broken(struct sta_info *sta)
  611. {
  612. struct mesh_table *tbl;
  613. static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
  614. struct mesh_path *mpath;
  615. struct mpath_node *node;
  616. struct hlist_node *p;
  617. struct ieee80211_sub_if_data *sdata = sta->sdata;
  618. int i;
  619. __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE);
  620. rcu_read_lock();
  621. tbl = rcu_dereference(mesh_paths);
  622. for_each_mesh_entry(tbl, p, node, i) {
  623. mpath = node->mpath;
  624. if (rcu_dereference(mpath->next_hop) == sta &&
  625. mpath->flags & MESH_PATH_ACTIVE &&
  626. !(mpath->flags & MESH_PATH_FIXED)) {
  627. spin_lock_bh(&mpath->state_lock);
  628. mpath->flags &= ~MESH_PATH_ACTIVE;
  629. ++mpath->sn;
  630. spin_unlock_bh(&mpath->state_lock);
  631. mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl,
  632. mpath->dst, cpu_to_le32(mpath->sn),
  633. reason, bcast, sdata);
  634. }
  635. }
  636. rcu_read_unlock();
  637. }
  638. static void mesh_path_node_reclaim(struct rcu_head *rp)
  639. {
  640. struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
  641. struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
  642. del_timer_sync(&node->mpath->timer);
  643. atomic_dec(&sdata->u.mesh.mpaths);
  644. kfree(node->mpath);
  645. kfree(node);
  646. }
  647. /* needs to be called with the corresponding hashwlock taken */
  648. static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
  649. {
  650. struct mesh_path *mpath;
  651. mpath = node->mpath;
  652. spin_lock(&mpath->state_lock);
  653. mpath->flags |= MESH_PATH_RESOLVING;
  654. if (mpath->is_gate)
  655. mesh_gate_del(tbl, mpath);
  656. hlist_del_rcu(&node->list);
  657. call_rcu(&node->rcu, mesh_path_node_reclaim);
  658. spin_unlock(&mpath->state_lock);
  659. atomic_dec(&tbl->entries);
  660. }
  661. /**
  662. * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
  663. *
  664. * @sta: mesh peer to match
  665. *
  666. * RCU notes: this function is called when a mesh plink transitions from
  667. * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
  668. * allows path creation. This will happen before the sta can be freed (because
  669. * sta_info_destroy() calls this) so any reader in a rcu read block will be
  670. * protected against the plink disappearing.
  671. */
  672. void mesh_path_flush_by_nexthop(struct sta_info *sta)
  673. {
  674. struct mesh_table *tbl;
  675. struct mesh_path *mpath;
  676. struct mpath_node *node;
  677. struct hlist_node *p;
  678. int i;
  679. rcu_read_lock();
  680. read_lock_bh(&pathtbl_resize_lock);
  681. tbl = resize_dereference_mesh_paths();
  682. for_each_mesh_entry(tbl, p, node, i) {
  683. mpath = node->mpath;
  684. if (rcu_dereference(mpath->next_hop) == sta) {
  685. spin_lock(&tbl->hashwlock[i]);
  686. __mesh_path_del(tbl, node);
  687. spin_unlock(&tbl->hashwlock[i]);
  688. }
  689. }
  690. read_unlock_bh(&pathtbl_resize_lock);
  691. rcu_read_unlock();
  692. }
  693. static void table_flush_by_iface(struct mesh_table *tbl,
  694. struct ieee80211_sub_if_data *sdata)
  695. {
  696. struct mesh_path *mpath;
  697. struct mpath_node *node;
  698. struct hlist_node *p;
  699. int i;
  700. WARN_ON(!rcu_read_lock_held());
  701. for_each_mesh_entry(tbl, p, node, i) {
  702. mpath = node->mpath;
  703. if (mpath->sdata != sdata)
  704. continue;
  705. spin_lock_bh(&tbl->hashwlock[i]);
  706. __mesh_path_del(tbl, node);
  707. spin_unlock_bh(&tbl->hashwlock[i]);
  708. }
  709. }
  710. /**
  711. * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
  712. *
  713. * This function deletes both mesh paths as well as mesh portal paths.
  714. *
  715. * @sdata: interface data to match
  716. *
  717. */
  718. void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
  719. {
  720. struct mesh_table *tbl;
  721. rcu_read_lock();
  722. read_lock_bh(&pathtbl_resize_lock);
  723. tbl = resize_dereference_mesh_paths();
  724. table_flush_by_iface(tbl, sdata);
  725. tbl = resize_dereference_mpp_paths();
  726. table_flush_by_iface(tbl, sdata);
  727. read_unlock_bh(&pathtbl_resize_lock);
  728. rcu_read_unlock();
  729. }
  730. /**
  731. * mesh_path_del - delete a mesh path from the table
  732. *
  733. * @addr: dst address (ETH_ALEN length)
  734. * @sdata: local subif
  735. *
  736. * Returns: 0 if successful
  737. */
  738. int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
  739. {
  740. struct mesh_table *tbl;
  741. struct mesh_path *mpath;
  742. struct mpath_node *node;
  743. struct hlist_head *bucket;
  744. struct hlist_node *n;
  745. int hash_idx;
  746. int err = 0;
  747. read_lock_bh(&pathtbl_resize_lock);
  748. tbl = resize_dereference_mesh_paths();
  749. hash_idx = mesh_table_hash(addr, sdata, tbl);
  750. bucket = &tbl->hash_buckets[hash_idx];
  751. spin_lock(&tbl->hashwlock[hash_idx]);
  752. hlist_for_each_entry(node, n, bucket, list) {
  753. mpath = node->mpath;
  754. if (mpath->sdata == sdata &&
  755. ether_addr_equal(addr, mpath->dst)) {
  756. __mesh_path_del(tbl, node);
  757. goto enddel;
  758. }
  759. }
  760. err = -ENXIO;
  761. enddel:
  762. mesh_paths_generation++;
  763. spin_unlock(&tbl->hashwlock[hash_idx]);
  764. read_unlock_bh(&pathtbl_resize_lock);
  765. return err;
  766. }
  767. /**
  768. * mesh_path_tx_pending - sends pending frames in a mesh path queue
  769. *
  770. * @mpath: mesh path to activate
  771. *
  772. * Locking: the state_lock of the mpath structure must NOT be held when calling
  773. * this function.
  774. */
  775. void mesh_path_tx_pending(struct mesh_path *mpath)
  776. {
  777. if (mpath->flags & MESH_PATH_ACTIVE)
  778. ieee80211_add_pending_skbs(mpath->sdata->local,
  779. &mpath->frame_queue);
  780. }
  781. /**
  782. * mesh_path_send_to_gates - sends pending frames to all known mesh gates
  783. *
  784. * @mpath: mesh path whose queue will be emptied
  785. *
  786. * If there is only one gate, the frames are transferred from the failed mpath
  787. * queue to that gate's queue. If there are more than one gates, the frames
  788. * are copied from each gate to the next. After frames are copied, the
  789. * mpath queues are emptied onto the transmission queue.
  790. */
  791. int mesh_path_send_to_gates(struct mesh_path *mpath)
  792. {
  793. struct ieee80211_sub_if_data *sdata = mpath->sdata;
  794. struct hlist_node *n;
  795. struct mesh_table *tbl;
  796. struct mesh_path *from_mpath = mpath;
  797. struct mpath_node *gate = NULL;
  798. bool copy = false;
  799. struct hlist_head *known_gates;
  800. rcu_read_lock();
  801. tbl = rcu_dereference(mesh_paths);
  802. known_gates = tbl->known_gates;
  803. rcu_read_unlock();
  804. if (!known_gates)
  805. return -EHOSTUNREACH;
  806. hlist_for_each_entry_rcu(gate, n, known_gates, list) {
  807. if (gate->mpath->sdata != sdata)
  808. continue;
  809. if (gate->mpath->flags & MESH_PATH_ACTIVE) {
  810. mpath_dbg(sdata, "Forwarding to %pM\n", gate->mpath->dst);
  811. mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
  812. from_mpath = gate->mpath;
  813. copy = true;
  814. } else {
  815. mpath_dbg(sdata,
  816. "Not forwarding %p (flags %#x)\n",
  817. gate->mpath, gate->mpath->flags);
  818. }
  819. }
  820. hlist_for_each_entry_rcu(gate, n, known_gates, list)
  821. if (gate->mpath->sdata == sdata) {
  822. mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst);
  823. mesh_path_tx_pending(gate->mpath);
  824. }
  825. return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
  826. }
  827. /**
  828. * mesh_path_discard_frame - discard a frame whose path could not be resolved
  829. *
  830. * @skb: frame to discard
  831. * @sdata: network subif the frame was to be sent through
  832. *
  833. * Locking: the function must me called within a rcu_read_lock region
  834. */
  835. void mesh_path_discard_frame(struct sk_buff *skb,
  836. struct ieee80211_sub_if_data *sdata)
  837. {
  838. kfree_skb(skb);
  839. sdata->u.mesh.mshstats.dropped_frames_no_route++;
  840. }
  841. /**
  842. * mesh_path_flush_pending - free the pending queue of a mesh path
  843. *
  844. * @mpath: mesh path whose queue has to be freed
  845. *
  846. * Locking: the function must me called within a rcu_read_lock region
  847. */
  848. void mesh_path_flush_pending(struct mesh_path *mpath)
  849. {
  850. struct sk_buff *skb;
  851. while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
  852. mesh_path_discard_frame(skb, mpath->sdata);
  853. }
  854. /**
  855. * mesh_path_fix_nexthop - force a specific next hop for a mesh path
  856. *
  857. * @mpath: the mesh path to modify
  858. * @next_hop: the next hop to force
  859. *
  860. * Locking: this function must be called holding mpath->state_lock
  861. */
  862. void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
  863. {
  864. spin_lock_bh(&mpath->state_lock);
  865. mesh_path_assign_nexthop(mpath, next_hop);
  866. mpath->sn = 0xffff;
  867. mpath->metric = 0;
  868. mpath->hop_count = 0;
  869. mpath->exp_time = 0;
  870. mpath->flags |= MESH_PATH_FIXED;
  871. mesh_path_activate(mpath);
  872. spin_unlock_bh(&mpath->state_lock);
  873. mesh_path_tx_pending(mpath);
  874. }
  875. static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
  876. {
  877. struct mesh_path *mpath;
  878. struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
  879. mpath = node->mpath;
  880. hlist_del_rcu(p);
  881. if (free_leafs) {
  882. del_timer_sync(&mpath->timer);
  883. kfree(mpath);
  884. }
  885. kfree(node);
  886. }
  887. static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
  888. {
  889. struct mesh_path *mpath;
  890. struct mpath_node *node, *new_node;
  891. u32 hash_idx;
  892. new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
  893. if (new_node == NULL)
  894. return -ENOMEM;
  895. node = hlist_entry(p, struct mpath_node, list);
  896. mpath = node->mpath;
  897. new_node->mpath = mpath;
  898. hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
  899. hlist_add_head(&new_node->list,
  900. &newtbl->hash_buckets[hash_idx]);
  901. return 0;
  902. }
  903. int mesh_pathtbl_init(void)
  904. {
  905. struct mesh_table *tbl_path, *tbl_mpp;
  906. int ret;
  907. tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
  908. if (!tbl_path)
  909. return -ENOMEM;
  910. tbl_path->free_node = &mesh_path_node_free;
  911. tbl_path->copy_node = &mesh_path_node_copy;
  912. tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
  913. tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
  914. if (!tbl_path->known_gates) {
  915. ret = -ENOMEM;
  916. goto free_path;
  917. }
  918. INIT_HLIST_HEAD(tbl_path->known_gates);
  919. tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
  920. if (!tbl_mpp) {
  921. ret = -ENOMEM;
  922. goto free_path;
  923. }
  924. tbl_mpp->free_node = &mesh_path_node_free;
  925. tbl_mpp->copy_node = &mesh_path_node_copy;
  926. tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
  927. tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
  928. if (!tbl_mpp->known_gates) {
  929. ret = -ENOMEM;
  930. goto free_mpp;
  931. }
  932. INIT_HLIST_HEAD(tbl_mpp->known_gates);
  933. /* Need no locking since this is during init */
  934. RCU_INIT_POINTER(mesh_paths, tbl_path);
  935. RCU_INIT_POINTER(mpp_paths, tbl_mpp);
  936. return 0;
  937. free_mpp:
  938. mesh_table_free(tbl_mpp, true);
  939. free_path:
  940. mesh_table_free(tbl_path, true);
  941. return ret;
  942. }
  943. void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
  944. {
  945. struct mesh_table *tbl;
  946. struct mesh_path *mpath;
  947. struct mpath_node *node;
  948. struct hlist_node *p;
  949. int i;
  950. rcu_read_lock();
  951. tbl = rcu_dereference(mesh_paths);
  952. for_each_mesh_entry(tbl, p, node, i) {
  953. if (node->mpath->sdata != sdata)
  954. continue;
  955. mpath = node->mpath;
  956. if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
  957. (!(mpath->flags & MESH_PATH_FIXED)) &&
  958. time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
  959. mesh_path_del(mpath->dst, mpath->sdata);
  960. }
  961. rcu_read_unlock();
  962. }
  963. void mesh_pathtbl_unregister(void)
  964. {
  965. /* no need for locking during exit path */
  966. mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
  967. mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);
  968. }