mesh_pathtbl.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124
  1. /*
  2. * Copyright (c) 2008, 2009 open80211s Ltd.
  3. * Author: Luis Carlos Cobo <luisca@cozybit.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. */
  9. #include <linux/etherdevice.h>
  10. #include <linux/list.h>
  11. #include <linux/random.h>
  12. #include <linux/slab.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/string.h>
  15. #include <net/mac80211.h>
  16. #include "wme.h"
  17. #include "ieee80211_i.h"
  18. #include "mesh.h"
  19. #ifdef CONFIG_MAC80211_VERBOSE_MPATH_DEBUG
  20. #define mpath_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args)
  21. #else
  22. #define mpath_dbg(fmt, args...) do { (void)(0); } while (0)
  23. #endif
  24. /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
  25. #define INIT_PATHS_SIZE_ORDER 2
  26. /* Keep the mean chain length below this constant */
  27. #define MEAN_CHAIN_LEN 2
  28. #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
  29. time_after(jiffies, mpath->exp_time) && \
  30. !(mpath->flags & MESH_PATH_FIXED))
  31. struct mpath_node {
  32. struct hlist_node list;
  33. struct rcu_head rcu;
  34. /* This indirection allows two different tables to point to the same
  35. * mesh_path structure, useful when resizing
  36. */
  37. struct mesh_path *mpath;
  38. };
  39. static struct mesh_table __rcu *mesh_paths;
  40. static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
  41. int mesh_paths_generation;
  42. /* This lock will have the grow table function as writer and add / delete nodes
  43. * as readers. RCU provides sufficient protection only when reading the table
  44. * (i.e. doing lookups). Adding or adding or removing nodes requires we take
  45. * the read lock or we risk operating on an old table. The write lock is only
  46. * needed when modifying the number of buckets a table.
  47. */
  48. static DEFINE_RWLOCK(pathtbl_resize_lock);
  49. static inline struct mesh_table *resize_dereference_mesh_paths(void)
  50. {
  51. return rcu_dereference_protected(mesh_paths,
  52. lockdep_is_held(&pathtbl_resize_lock));
  53. }
  54. static inline struct mesh_table *resize_dereference_mpp_paths(void)
  55. {
  56. return rcu_dereference_protected(mpp_paths,
  57. lockdep_is_held(&pathtbl_resize_lock));
  58. }
  59. /*
  60. * CAREFUL -- "tbl" must not be an expression,
  61. * in particular not an rcu_dereference(), since
  62. * it's used twice. So it is illegal to do
  63. * for_each_mesh_entry(rcu_dereference(...), ...)
  64. */
  65. #define for_each_mesh_entry(tbl, p, node, i) \
  66. for (i = 0; i <= tbl->hash_mask; i++) \
  67. hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)
  68. static struct mesh_table *mesh_table_alloc(int size_order)
  69. {
  70. int i;
  71. struct mesh_table *newtbl;
  72. newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
  73. if (!newtbl)
  74. return NULL;
  75. newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
  76. (1 << size_order), GFP_ATOMIC);
  77. if (!newtbl->hash_buckets) {
  78. kfree(newtbl);
  79. return NULL;
  80. }
  81. newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
  82. (1 << size_order), GFP_ATOMIC);
  83. if (!newtbl->hashwlock) {
  84. kfree(newtbl->hash_buckets);
  85. kfree(newtbl);
  86. return NULL;
  87. }
  88. newtbl->size_order = size_order;
  89. newtbl->hash_mask = (1 << size_order) - 1;
  90. atomic_set(&newtbl->entries, 0);
  91. get_random_bytes(&newtbl->hash_rnd,
  92. sizeof(newtbl->hash_rnd));
  93. for (i = 0; i <= newtbl->hash_mask; i++)
  94. spin_lock_init(&newtbl->hashwlock[i]);
  95. spin_lock_init(&newtbl->gates_lock);
  96. return newtbl;
  97. }
  98. static void __mesh_table_free(struct mesh_table *tbl)
  99. {
  100. kfree(tbl->hash_buckets);
  101. kfree(tbl->hashwlock);
  102. kfree(tbl);
  103. }
  104. static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
  105. {
  106. struct hlist_head *mesh_hash;
  107. struct hlist_node *p, *q;
  108. struct mpath_node *gate;
  109. int i;
  110. mesh_hash = tbl->hash_buckets;
  111. for (i = 0; i <= tbl->hash_mask; i++) {
  112. spin_lock_bh(&tbl->hashwlock[i]);
  113. hlist_for_each_safe(p, q, &mesh_hash[i]) {
  114. tbl->free_node(p, free_leafs);
  115. atomic_dec(&tbl->entries);
  116. }
  117. spin_unlock_bh(&tbl->hashwlock[i]);
  118. }
  119. if (free_leafs) {
  120. spin_lock_bh(&tbl->gates_lock);
  121. hlist_for_each_entry_safe(gate, p, q,
  122. tbl->known_gates, list) {
  123. hlist_del(&gate->list);
  124. kfree(gate);
  125. }
  126. kfree(tbl->known_gates);
  127. spin_unlock_bh(&tbl->gates_lock);
  128. }
  129. __mesh_table_free(tbl);
  130. }
  131. static int mesh_table_grow(struct mesh_table *oldtbl,
  132. struct mesh_table *newtbl)
  133. {
  134. struct hlist_head *oldhash;
  135. struct hlist_node *p, *q;
  136. int i;
  137. if (atomic_read(&oldtbl->entries)
  138. < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
  139. return -EAGAIN;
  140. newtbl->free_node = oldtbl->free_node;
  141. newtbl->mean_chain_len = oldtbl->mean_chain_len;
  142. newtbl->copy_node = oldtbl->copy_node;
  143. newtbl->known_gates = oldtbl->known_gates;
  144. atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
  145. oldhash = oldtbl->hash_buckets;
  146. for (i = 0; i <= oldtbl->hash_mask; i++)
  147. hlist_for_each(p, &oldhash[i])
  148. if (oldtbl->copy_node(p, newtbl) < 0)
  149. goto errcopy;
  150. return 0;
  151. errcopy:
  152. for (i = 0; i <= newtbl->hash_mask; i++) {
  153. hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
  154. oldtbl->free_node(p, 0);
  155. }
  156. return -ENOMEM;
  157. }
  158. static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
  159. struct mesh_table *tbl)
  160. {
  161. /* Use last four bytes of hw addr and interface index as hash index */
  162. return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
  163. & tbl->hash_mask;
  164. }
  165. /**
  166. *
  167. * mesh_path_assign_nexthop - update mesh path next hop
  168. *
  169. * @mpath: mesh path to update
  170. * @sta: next hop to assign
  171. *
  172. * Locking: mpath->state_lock must be held when calling this function
  173. */
  174. void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
  175. {
  176. struct sk_buff *skb;
  177. struct ieee80211_hdr *hdr;
  178. struct sk_buff_head tmpq;
  179. unsigned long flags;
  180. rcu_assign_pointer(mpath->next_hop, sta);
  181. __skb_queue_head_init(&tmpq);
  182. spin_lock_irqsave(&mpath->frame_queue.lock, flags);
  183. while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
  184. hdr = (struct ieee80211_hdr *) skb->data;
  185. memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
  186. memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
  187. __skb_queue_tail(&tmpq, skb);
  188. }
  189. skb_queue_splice(&tmpq, &mpath->frame_queue);
  190. spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
  191. }
  192. static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
  193. struct mesh_path *gate_mpath)
  194. {
  195. struct ieee80211_hdr *hdr;
  196. struct ieee80211s_hdr *mshdr;
  197. int mesh_hdrlen, hdrlen;
  198. char *next_hop;
  199. hdr = (struct ieee80211_hdr *) skb->data;
  200. hdrlen = ieee80211_hdrlen(hdr->frame_control);
  201. mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
  202. if (!(mshdr->flags & MESH_FLAGS_AE)) {
  203. /* size of the fixed part of the mesh header */
  204. mesh_hdrlen = 6;
  205. /* make room for the two extended addresses */
  206. skb_push(skb, 2 * ETH_ALEN);
  207. memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
  208. hdr = (struct ieee80211_hdr *) skb->data;
  209. /* we preserve the previous mesh header and only add
  210. * the new addreses */
  211. mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
  212. mshdr->flags = MESH_FLAGS_AE_A5_A6;
  213. memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
  214. memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
  215. }
  216. /* update next hop */
  217. hdr = (struct ieee80211_hdr *) skb->data;
  218. rcu_read_lock();
  219. next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
  220. memcpy(hdr->addr1, next_hop, ETH_ALEN);
  221. rcu_read_unlock();
  222. memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
  223. memcpy(hdr->addr3, dst_addr, ETH_ALEN);
  224. }
  225. /**
  226. *
  227. * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
  228. *
  229. * This function is used to transfer or copy frames from an unresolved mpath to
  230. * a gate mpath. The function also adds the Address Extension field and
  231. * updates the next hop.
  232. *
  233. * If a frame already has an Address Extension field, only the next hop and
  234. * destination addresses are updated.
  235. *
  236. * The gate mpath must be an active mpath with a valid mpath->next_hop.
  237. *
  238. * @mpath: An active mpath the frames will be sent to (i.e. the gate)
  239. * @from_mpath: The failed mpath
  240. * @copy: When true, copy all the frames to the new mpath queue. When false,
  241. * move them.
  242. */
  243. static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
  244. struct mesh_path *from_mpath,
  245. bool copy)
  246. {
  247. struct sk_buff *skb, *cp_skb = NULL;
  248. struct sk_buff_head gateq, failq;
  249. unsigned long flags;
  250. int num_skbs;
  251. BUG_ON(gate_mpath == from_mpath);
  252. BUG_ON(!gate_mpath->next_hop);
  253. __skb_queue_head_init(&gateq);
  254. __skb_queue_head_init(&failq);
  255. spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
  256. skb_queue_splice_init(&from_mpath->frame_queue, &failq);
  257. spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
  258. num_skbs = skb_queue_len(&failq);
  259. while (num_skbs--) {
  260. skb = __skb_dequeue(&failq);
  261. if (copy) {
  262. cp_skb = skb_copy(skb, GFP_ATOMIC);
  263. if (cp_skb)
  264. __skb_queue_tail(&failq, cp_skb);
  265. }
  266. prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
  267. __skb_queue_tail(&gateq, skb);
  268. }
  269. spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
  270. skb_queue_splice(&gateq, &gate_mpath->frame_queue);
  271. mpath_dbg("Mpath queue for gate %pM has %d frames\n",
  272. gate_mpath->dst,
  273. skb_queue_len(&gate_mpath->frame_queue));
  274. spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);
  275. if (!copy)
  276. return;
  277. spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
  278. skb_queue_splice(&failq, &from_mpath->frame_queue);
  279. spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
  280. }
  281. static struct mesh_path *mpath_lookup(struct mesh_table *tbl, u8 *dst,
  282. struct ieee80211_sub_if_data *sdata)
  283. {
  284. struct mesh_path *mpath;
  285. struct hlist_node *n;
  286. struct hlist_head *bucket;
  287. struct mpath_node *node;
  288. bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
  289. hlist_for_each_entry_rcu(node, n, bucket, list) {
  290. mpath = node->mpath;
  291. if (mpath->sdata == sdata &&
  292. compare_ether_addr(dst, mpath->dst) == 0) {
  293. if (MPATH_EXPIRED(mpath)) {
  294. spin_lock_bh(&mpath->state_lock);
  295. mpath->flags &= ~MESH_PATH_ACTIVE;
  296. spin_unlock_bh(&mpath->state_lock);
  297. }
  298. return mpath;
  299. }
  300. }
  301. return NULL;
  302. }
  303. /**
  304. * mesh_path_lookup - look up a path in the mesh path table
  305. * @dst: hardware address (ETH_ALEN length) of destination
  306. * @sdata: local subif
  307. *
  308. * Returns: pointer to the mesh path structure, or NULL if not found
  309. *
  310. * Locking: must be called within a read rcu section.
  311. */
  312. struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
  313. {
  314. return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata);
  315. }
  316. struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
  317. {
  318. return mpath_lookup(rcu_dereference(mpp_paths), dst, sdata);
  319. }
  320. /**
  321. * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
  322. * @idx: index
  323. * @sdata: local subif, or NULL for all entries
  324. *
  325. * Returns: pointer to the mesh path structure, or NULL if not found.
  326. *
  327. * Locking: must be called within a read rcu section.
  328. */
  329. struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
  330. {
  331. struct mesh_table *tbl = rcu_dereference(mesh_paths);
  332. struct mpath_node *node;
  333. struct hlist_node *p;
  334. int i;
  335. int j = 0;
  336. for_each_mesh_entry(tbl, p, node, i) {
  337. if (sdata && node->mpath->sdata != sdata)
  338. continue;
  339. if (j++ == idx) {
  340. if (MPATH_EXPIRED(node->mpath)) {
  341. spin_lock_bh(&node->mpath->state_lock);
  342. node->mpath->flags &= ~MESH_PATH_ACTIVE;
  343. spin_unlock_bh(&node->mpath->state_lock);
  344. }
  345. return node->mpath;
  346. }
  347. }
  348. return NULL;
  349. }
  350. /**
  351. * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
  352. * @mpath: gate path to add to table
  353. */
  354. int mesh_path_add_gate(struct mesh_path *mpath)
  355. {
  356. struct mesh_table *tbl;
  357. struct mpath_node *gate, *new_gate;
  358. struct hlist_node *n;
  359. int err;
  360. rcu_read_lock();
  361. tbl = rcu_dereference(mesh_paths);
  362. hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list)
  363. if (gate->mpath == mpath) {
  364. err = -EEXIST;
  365. goto err_rcu;
  366. }
  367. new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
  368. if (!new_gate) {
  369. err = -ENOMEM;
  370. goto err_rcu;
  371. }
  372. mpath->is_gate = true;
  373. mpath->sdata->u.mesh.num_gates++;
  374. new_gate->mpath = mpath;
  375. spin_lock_bh(&tbl->gates_lock);
  376. hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
  377. spin_unlock_bh(&tbl->gates_lock);
  378. rcu_read_unlock();
  379. mpath_dbg("Mesh path (%s): Recorded new gate: %pM. %d known gates\n",
  380. mpath->sdata->name, mpath->dst,
  381. mpath->sdata->u.mesh.num_gates);
  382. return 0;
  383. err_rcu:
  384. rcu_read_unlock();
  385. return err;
  386. }
  387. /**
  388. * mesh_gate_del - remove a mesh gate from the list of known gates
  389. * @tbl: table which holds our list of known gates
  390. * @mpath: gate mpath
  391. *
  392. * Returns: 0 on success
  393. *
  394. * Locking: must be called inside rcu_read_lock() section
  395. */
  396. static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
  397. {
  398. struct mpath_node *gate;
  399. struct hlist_node *p, *q;
  400. hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list)
  401. if (gate->mpath == mpath) {
  402. spin_lock_bh(&tbl->gates_lock);
  403. hlist_del_rcu(&gate->list);
  404. kfree_rcu(gate, rcu);
  405. spin_unlock_bh(&tbl->gates_lock);
  406. mpath->sdata->u.mesh.num_gates--;
  407. mpath->is_gate = false;
  408. mpath_dbg("Mesh path (%s): Deleted gate: %pM. "
  409. "%d known gates\n", mpath->sdata->name,
  410. mpath->dst, mpath->sdata->u.mesh.num_gates);
  411. break;
  412. }
  413. return 0;
  414. }
  415. /**
  416. * mesh_gate_num - number of gates known to this interface
  417. * @sdata: subif data
  418. */
  419. int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
  420. {
  421. return sdata->u.mesh.num_gates;
  422. }
  423. /**
  424. * mesh_path_add - allocate and add a new path to the mesh path table
  425. * @addr: destination address of the path (ETH_ALEN length)
  426. * @sdata: local subif
  427. *
  428. * Returns: 0 on success
  429. *
  430. * State: the initial state of the new path is set to 0
  431. */
  432. int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
  433. {
  434. struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
  435. struct ieee80211_local *local = sdata->local;
  436. struct mesh_table *tbl;
  437. struct mesh_path *mpath, *new_mpath;
  438. struct mpath_node *node, *new_node;
  439. struct hlist_head *bucket;
  440. struct hlist_node *n;
  441. int grow = 0;
  442. int err = 0;
  443. u32 hash_idx;
  444. if (compare_ether_addr(dst, sdata->vif.addr) == 0)
  445. /* never add ourselves as neighbours */
  446. return -ENOTSUPP;
  447. if (is_multicast_ether_addr(dst))
  448. return -ENOTSUPP;
  449. if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
  450. return -ENOSPC;
  451. err = -ENOMEM;
  452. new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
  453. if (!new_mpath)
  454. goto err_path_alloc;
  455. new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
  456. if (!new_node)
  457. goto err_node_alloc;
  458. read_lock_bh(&pathtbl_resize_lock);
  459. memcpy(new_mpath->dst, dst, ETH_ALEN);
  460. new_mpath->sdata = sdata;
  461. new_mpath->flags = 0;
  462. skb_queue_head_init(&new_mpath->frame_queue);
  463. new_node->mpath = new_mpath;
  464. new_mpath->timer.data = (unsigned long) new_mpath;
  465. new_mpath->timer.function = mesh_path_timer;
  466. new_mpath->exp_time = jiffies;
  467. spin_lock_init(&new_mpath->state_lock);
  468. init_timer(&new_mpath->timer);
  469. tbl = resize_dereference_mesh_paths();
  470. hash_idx = mesh_table_hash(dst, sdata, tbl);
  471. bucket = &tbl->hash_buckets[hash_idx];
  472. spin_lock(&tbl->hashwlock[hash_idx]);
  473. err = -EEXIST;
  474. hlist_for_each_entry(node, n, bucket, list) {
  475. mpath = node->mpath;
  476. if (mpath->sdata == sdata &&
  477. compare_ether_addr(dst, mpath->dst) == 0)
  478. goto err_exists;
  479. }
  480. hlist_add_head_rcu(&new_node->list, bucket);
  481. if (atomic_inc_return(&tbl->entries) >=
  482. tbl->mean_chain_len * (tbl->hash_mask + 1))
  483. grow = 1;
  484. mesh_paths_generation++;
  485. spin_unlock(&tbl->hashwlock[hash_idx]);
  486. read_unlock_bh(&pathtbl_resize_lock);
  487. if (grow) {
  488. set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
  489. ieee80211_queue_work(&local->hw, &sdata->work);
  490. }
  491. return 0;
  492. err_exists:
  493. spin_unlock(&tbl->hashwlock[hash_idx]);
  494. read_unlock_bh(&pathtbl_resize_lock);
  495. kfree(new_node);
  496. err_node_alloc:
  497. kfree(new_mpath);
  498. err_path_alloc:
  499. atomic_dec(&sdata->u.mesh.mpaths);
  500. return err;
  501. }
  502. static void mesh_table_free_rcu(struct rcu_head *rcu)
  503. {
  504. struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
  505. mesh_table_free(tbl, false);
  506. }
  507. void mesh_mpath_table_grow(void)
  508. {
  509. struct mesh_table *oldtbl, *newtbl;
  510. write_lock_bh(&pathtbl_resize_lock);
  511. oldtbl = resize_dereference_mesh_paths();
  512. newtbl = mesh_table_alloc(oldtbl->size_order + 1);
  513. if (!newtbl)
  514. goto out;
  515. if (mesh_table_grow(oldtbl, newtbl) < 0) {
  516. __mesh_table_free(newtbl);
  517. goto out;
  518. }
  519. rcu_assign_pointer(mesh_paths, newtbl);
  520. call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
  521. out:
  522. write_unlock_bh(&pathtbl_resize_lock);
  523. }
  524. void mesh_mpp_table_grow(void)
  525. {
  526. struct mesh_table *oldtbl, *newtbl;
  527. write_lock_bh(&pathtbl_resize_lock);
  528. oldtbl = resize_dereference_mpp_paths();
  529. newtbl = mesh_table_alloc(oldtbl->size_order + 1);
  530. if (!newtbl)
  531. goto out;
  532. if (mesh_table_grow(oldtbl, newtbl) < 0) {
  533. __mesh_table_free(newtbl);
  534. goto out;
  535. }
  536. rcu_assign_pointer(mpp_paths, newtbl);
  537. call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
  538. out:
  539. write_unlock_bh(&pathtbl_resize_lock);
  540. }
  541. int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
  542. {
  543. struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
  544. struct ieee80211_local *local = sdata->local;
  545. struct mesh_table *tbl;
  546. struct mesh_path *mpath, *new_mpath;
  547. struct mpath_node *node, *new_node;
  548. struct hlist_head *bucket;
  549. struct hlist_node *n;
  550. int grow = 0;
  551. int err = 0;
  552. u32 hash_idx;
  553. if (compare_ether_addr(dst, sdata->vif.addr) == 0)
  554. /* never add ourselves as neighbours */
  555. return -ENOTSUPP;
  556. if (is_multicast_ether_addr(dst))
  557. return -ENOTSUPP;
  558. err = -ENOMEM;
  559. new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
  560. if (!new_mpath)
  561. goto err_path_alloc;
  562. new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
  563. if (!new_node)
  564. goto err_node_alloc;
  565. read_lock_bh(&pathtbl_resize_lock);
  566. memcpy(new_mpath->dst, dst, ETH_ALEN);
  567. memcpy(new_mpath->mpp, mpp, ETH_ALEN);
  568. new_mpath->sdata = sdata;
  569. new_mpath->flags = 0;
  570. skb_queue_head_init(&new_mpath->frame_queue);
  571. new_node->mpath = new_mpath;
  572. init_timer(&new_mpath->timer);
  573. new_mpath->exp_time = jiffies;
  574. spin_lock_init(&new_mpath->state_lock);
  575. tbl = resize_dereference_mpp_paths();
  576. hash_idx = mesh_table_hash(dst, sdata, tbl);
  577. bucket = &tbl->hash_buckets[hash_idx];
  578. spin_lock(&tbl->hashwlock[hash_idx]);
  579. err = -EEXIST;
  580. hlist_for_each_entry(node, n, bucket, list) {
  581. mpath = node->mpath;
  582. if (mpath->sdata == sdata &&
  583. compare_ether_addr(dst, mpath->dst) == 0)
  584. goto err_exists;
  585. }
  586. hlist_add_head_rcu(&new_node->list, bucket);
  587. if (atomic_inc_return(&tbl->entries) >=
  588. tbl->mean_chain_len * (tbl->hash_mask + 1))
  589. grow = 1;
  590. spin_unlock(&tbl->hashwlock[hash_idx]);
  591. read_unlock_bh(&pathtbl_resize_lock);
  592. if (grow) {
  593. set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
  594. ieee80211_queue_work(&local->hw, &sdata->work);
  595. }
  596. return 0;
  597. err_exists:
  598. spin_unlock(&tbl->hashwlock[hash_idx]);
  599. read_unlock_bh(&pathtbl_resize_lock);
  600. kfree(new_node);
  601. err_node_alloc:
  602. kfree(new_mpath);
  603. err_path_alloc:
  604. return err;
  605. }
  606. /**
  607. * mesh_plink_broken - deactivates paths and sends perr when a link breaks
  608. *
  609. * @sta: broken peer link
  610. *
  611. * This function must be called from the rate control algorithm if enough
  612. * delivery errors suggest that a peer link is no longer usable.
  613. */
  614. void mesh_plink_broken(struct sta_info *sta)
  615. {
  616. struct mesh_table *tbl;
  617. static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
  618. struct mesh_path *mpath;
  619. struct mpath_node *node;
  620. struct hlist_node *p;
  621. struct ieee80211_sub_if_data *sdata = sta->sdata;
  622. int i;
  623. __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE);
  624. rcu_read_lock();
  625. tbl = rcu_dereference(mesh_paths);
  626. for_each_mesh_entry(tbl, p, node, i) {
  627. mpath = node->mpath;
  628. if (rcu_dereference(mpath->next_hop) == sta &&
  629. mpath->flags & MESH_PATH_ACTIVE &&
  630. !(mpath->flags & MESH_PATH_FIXED)) {
  631. spin_lock_bh(&mpath->state_lock);
  632. mpath->flags &= ~MESH_PATH_ACTIVE;
  633. ++mpath->sn;
  634. spin_unlock_bh(&mpath->state_lock);
  635. mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl,
  636. mpath->dst, cpu_to_le32(mpath->sn),
  637. reason, bcast, sdata);
  638. }
  639. }
  640. rcu_read_unlock();
  641. }
  642. static void mesh_path_node_reclaim(struct rcu_head *rp)
  643. {
  644. struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
  645. struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
  646. del_timer_sync(&node->mpath->timer);
  647. atomic_dec(&sdata->u.mesh.mpaths);
  648. kfree(node->mpath);
  649. kfree(node);
  650. }
  651. /* needs to be called with the corresponding hashwlock taken */
  652. static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
  653. {
  654. struct mesh_path *mpath;
  655. mpath = node->mpath;
  656. spin_lock(&mpath->state_lock);
  657. mpath->flags |= MESH_PATH_RESOLVING;
  658. if (mpath->is_gate)
  659. mesh_gate_del(tbl, mpath);
  660. hlist_del_rcu(&node->list);
  661. call_rcu(&node->rcu, mesh_path_node_reclaim);
  662. spin_unlock(&mpath->state_lock);
  663. atomic_dec(&tbl->entries);
  664. }
  665. /**
  666. * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
  667. *
  668. * @sta - mesh peer to match
  669. *
  670. * RCU notes: this function is called when a mesh plink transitions from
  671. * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
  672. * allows path creation. This will happen before the sta can be freed (because
  673. * sta_info_destroy() calls this) so any reader in a rcu read block will be
  674. * protected against the plink disappearing.
  675. */
  676. void mesh_path_flush_by_nexthop(struct sta_info *sta)
  677. {
  678. struct mesh_table *tbl;
  679. struct mesh_path *mpath;
  680. struct mpath_node *node;
  681. struct hlist_node *p;
  682. int i;
  683. rcu_read_lock();
  684. read_lock_bh(&pathtbl_resize_lock);
  685. tbl = resize_dereference_mesh_paths();
  686. for_each_mesh_entry(tbl, p, node, i) {
  687. mpath = node->mpath;
  688. if (rcu_dereference(mpath->next_hop) == sta) {
  689. spin_lock(&tbl->hashwlock[i]);
  690. __mesh_path_del(tbl, node);
  691. spin_unlock(&tbl->hashwlock[i]);
  692. }
  693. }
  694. read_unlock_bh(&pathtbl_resize_lock);
  695. rcu_read_unlock();
  696. }
  697. static void table_flush_by_iface(struct mesh_table *tbl,
  698. struct ieee80211_sub_if_data *sdata)
  699. {
  700. struct mesh_path *mpath;
  701. struct mpath_node *node;
  702. struct hlist_node *p;
  703. int i;
  704. WARN_ON(!rcu_read_lock_held());
  705. for_each_mesh_entry(tbl, p, node, i) {
  706. mpath = node->mpath;
  707. if (mpath->sdata != sdata)
  708. continue;
  709. spin_lock_bh(&tbl->hashwlock[i]);
  710. __mesh_path_del(tbl, node);
  711. spin_unlock_bh(&tbl->hashwlock[i]);
  712. }
  713. }
  714. /**
  715. * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
  716. *
  717. * This function deletes both mesh paths as well as mesh portal paths.
  718. *
  719. * @sdata - interface data to match
  720. *
  721. */
  722. void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
  723. {
  724. struct mesh_table *tbl;
  725. rcu_read_lock();
  726. read_lock_bh(&pathtbl_resize_lock);
  727. tbl = resize_dereference_mesh_paths();
  728. table_flush_by_iface(tbl, sdata);
  729. tbl = resize_dereference_mpp_paths();
  730. table_flush_by_iface(tbl, sdata);
  731. read_unlock_bh(&pathtbl_resize_lock);
  732. rcu_read_unlock();
  733. }
  734. /**
  735. * mesh_path_del - delete a mesh path from the table
  736. *
  737. * @addr: dst address (ETH_ALEN length)
  738. * @sdata: local subif
  739. *
  740. * Returns: 0 if successful
  741. */
  742. int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
  743. {
  744. struct mesh_table *tbl;
  745. struct mesh_path *mpath;
  746. struct mpath_node *node;
  747. struct hlist_head *bucket;
  748. struct hlist_node *n;
  749. int hash_idx;
  750. int err = 0;
  751. read_lock_bh(&pathtbl_resize_lock);
  752. tbl = resize_dereference_mesh_paths();
  753. hash_idx = mesh_table_hash(addr, sdata, tbl);
  754. bucket = &tbl->hash_buckets[hash_idx];
  755. spin_lock(&tbl->hashwlock[hash_idx]);
  756. hlist_for_each_entry(node, n, bucket, list) {
  757. mpath = node->mpath;
  758. if (mpath->sdata == sdata &&
  759. compare_ether_addr(addr, mpath->dst) == 0) {
  760. __mesh_path_del(tbl, node);
  761. goto enddel;
  762. }
  763. }
  764. err = -ENXIO;
  765. enddel:
  766. mesh_paths_generation++;
  767. spin_unlock(&tbl->hashwlock[hash_idx]);
  768. read_unlock_bh(&pathtbl_resize_lock);
  769. return err;
  770. }
  771. /**
  772. * mesh_path_tx_pending - sends pending frames in a mesh path queue
  773. *
  774. * @mpath: mesh path to activate
  775. *
  776. * Locking: the state_lock of the mpath structure must NOT be held when calling
  777. * this function.
  778. */
  779. void mesh_path_tx_pending(struct mesh_path *mpath)
  780. {
  781. if (mpath->flags & MESH_PATH_ACTIVE)
  782. ieee80211_add_pending_skbs(mpath->sdata->local,
  783. &mpath->frame_queue);
  784. }
  785. /**
  786. * mesh_path_send_to_gates - sends pending frames to all known mesh gates
  787. *
  788. * @mpath: mesh path whose queue will be emptied
  789. *
  790. * If there is only one gate, the frames are transferred from the failed mpath
  791. * queue to that gate's queue. If there are more than one gates, the frames
  792. * are copied from each gate to the next. After frames are copied, the
  793. * mpath queues are emptied onto the transmission queue.
  794. */
  795. int mesh_path_send_to_gates(struct mesh_path *mpath)
  796. {
  797. struct ieee80211_sub_if_data *sdata = mpath->sdata;
  798. struct hlist_node *n;
  799. struct mesh_table *tbl;
  800. struct mesh_path *from_mpath = mpath;
  801. struct mpath_node *gate = NULL;
  802. bool copy = false;
  803. struct hlist_head *known_gates;
  804. rcu_read_lock();
  805. tbl = rcu_dereference(mesh_paths);
  806. known_gates = tbl->known_gates;
  807. rcu_read_unlock();
  808. if (!known_gates)
  809. return -EHOSTUNREACH;
  810. hlist_for_each_entry_rcu(gate, n, known_gates, list) {
  811. if (gate->mpath->sdata != sdata)
  812. continue;
  813. if (gate->mpath->flags & MESH_PATH_ACTIVE) {
  814. mpath_dbg("Forwarding to %pM\n", gate->mpath->dst);
  815. mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
  816. from_mpath = gate->mpath;
  817. copy = true;
  818. } else {
  819. mpath_dbg("Not forwarding %p\n", gate->mpath);
  820. mpath_dbg("flags %x\n", gate->mpath->flags);
  821. }
  822. }
  823. hlist_for_each_entry_rcu(gate, n, known_gates, list)
  824. if (gate->mpath->sdata == sdata) {
  825. mpath_dbg("Sending to %pM\n", gate->mpath->dst);
  826. mesh_path_tx_pending(gate->mpath);
  827. }
  828. return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
  829. }
  830. /**
  831. * mesh_path_discard_frame - discard a frame whose path could not be resolved
  832. *
  833. * @skb: frame to discard
  834. * @sdata: network subif the frame was to be sent through
  835. *
  836. * Locking: the function must me called within a rcu_read_lock region
  837. */
  838. void mesh_path_discard_frame(struct sk_buff *skb,
  839. struct ieee80211_sub_if_data *sdata)
  840. {
  841. kfree_skb(skb);
  842. sdata->u.mesh.mshstats.dropped_frames_no_route++;
  843. }
  844. /**
  845. * mesh_path_flush_pending - free the pending queue of a mesh path
  846. *
  847. * @mpath: mesh path whose queue has to be freed
  848. *
  849. * Locking: the function must me called within a rcu_read_lock region
  850. */
  851. void mesh_path_flush_pending(struct mesh_path *mpath)
  852. {
  853. struct sk_buff *skb;
  854. while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
  855. mesh_path_discard_frame(skb, mpath->sdata);
  856. }
  857. /**
  858. * mesh_path_fix_nexthop - force a specific next hop for a mesh path
  859. *
  860. * @mpath: the mesh path to modify
  861. * @next_hop: the next hop to force
  862. *
  863. * Locking: this function must be called holding mpath->state_lock
  864. */
  865. void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
  866. {
  867. spin_lock_bh(&mpath->state_lock);
  868. mesh_path_assign_nexthop(mpath, next_hop);
  869. mpath->sn = 0xffff;
  870. mpath->metric = 0;
  871. mpath->hop_count = 0;
  872. mpath->exp_time = 0;
  873. mpath->flags |= MESH_PATH_FIXED;
  874. mesh_path_activate(mpath);
  875. spin_unlock_bh(&mpath->state_lock);
  876. mesh_path_tx_pending(mpath);
  877. }
  878. static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
  879. {
  880. struct mesh_path *mpath;
  881. struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
  882. mpath = node->mpath;
  883. hlist_del_rcu(p);
  884. if (free_leafs) {
  885. del_timer_sync(&mpath->timer);
  886. kfree(mpath);
  887. }
  888. kfree(node);
  889. }
  890. static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
  891. {
  892. struct mesh_path *mpath;
  893. struct mpath_node *node, *new_node;
  894. u32 hash_idx;
  895. new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
  896. if (new_node == NULL)
  897. return -ENOMEM;
  898. node = hlist_entry(p, struct mpath_node, list);
  899. mpath = node->mpath;
  900. new_node->mpath = mpath;
  901. hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
  902. hlist_add_head(&new_node->list,
  903. &newtbl->hash_buckets[hash_idx]);
  904. return 0;
  905. }
  906. int mesh_pathtbl_init(void)
  907. {
  908. struct mesh_table *tbl_path, *tbl_mpp;
  909. int ret;
  910. tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
  911. if (!tbl_path)
  912. return -ENOMEM;
  913. tbl_path->free_node = &mesh_path_node_free;
  914. tbl_path->copy_node = &mesh_path_node_copy;
  915. tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
  916. tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
  917. if (!tbl_path->known_gates) {
  918. ret = -ENOMEM;
  919. goto free_path;
  920. }
  921. INIT_HLIST_HEAD(tbl_path->known_gates);
  922. tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
  923. if (!tbl_mpp) {
  924. ret = -ENOMEM;
  925. goto free_path;
  926. }
  927. tbl_mpp->free_node = &mesh_path_node_free;
  928. tbl_mpp->copy_node = &mesh_path_node_copy;
  929. tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
  930. tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
  931. if (!tbl_mpp->known_gates) {
  932. ret = -ENOMEM;
  933. goto free_mpp;
  934. }
  935. INIT_HLIST_HEAD(tbl_mpp->known_gates);
  936. /* Need no locking since this is during init */
  937. RCU_INIT_POINTER(mesh_paths, tbl_path);
  938. RCU_INIT_POINTER(mpp_paths, tbl_mpp);
  939. return 0;
  940. free_mpp:
  941. mesh_table_free(tbl_mpp, true);
  942. free_path:
  943. mesh_table_free(tbl_path, true);
  944. return ret;
  945. }
  946. void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
  947. {
  948. struct mesh_table *tbl;
  949. struct mesh_path *mpath;
  950. struct mpath_node *node;
  951. struct hlist_node *p;
  952. int i;
  953. rcu_read_lock();
  954. tbl = rcu_dereference(mesh_paths);
  955. for_each_mesh_entry(tbl, p, node, i) {
  956. if (node->mpath->sdata != sdata)
  957. continue;
  958. mpath = node->mpath;
  959. if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
  960. (!(mpath->flags & MESH_PATH_FIXED)) &&
  961. time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
  962. mesh_path_del(mpath->dst, mpath->sdata);
  963. }
  964. rcu_read_unlock();
  965. }
  966. void mesh_pathtbl_unregister(void)
  967. {
  968. /* no need for locking during exit path */
  969. mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
  970. mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);
  971. }