mesh_pathtbl.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169
  1. /*
  2. * Copyright (c) 2008, 2009 open80211s Ltd.
  3. * Author: Luis Carlos Cobo <luisca@cozybit.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. */
  9. #include <linux/etherdevice.h>
  10. #include <linux/list.h>
  11. #include <linux/random.h>
  12. #include <linux/slab.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/string.h>
  15. #include <net/mac80211.h>
  16. #include "ieee80211_i.h"
  17. #include "mesh.h"
  18. #ifdef CONFIG_MAC80211_VERBOSE_MPATH_DEBUG
  19. #define mpath_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args)
  20. #else
  21. #define mpath_dbg(fmt, args...) do { (void)(0); } while (0)
  22. #endif
  23. /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
  24. #define INIT_PATHS_SIZE_ORDER 2
  25. /* Keep the mean chain length below this constant */
  26. #define MEAN_CHAIN_LEN 2
  27. #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
  28. time_after(jiffies, mpath->exp_time) && \
  29. !(mpath->flags & MESH_PATH_FIXED))
  30. struct mpath_node {
  31. struct hlist_node list;
  32. struct rcu_head rcu;
  33. /* This indirection allows two different tables to point to the same
  34. * mesh_path structure, useful when resizing
  35. */
  36. struct mesh_path *mpath;
  37. };
  38. static struct mesh_table __rcu *mesh_paths;
  39. static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
  40. int mesh_paths_generation;
  41. /* This lock will have the grow table function as writer and add / delete nodes
  42. * as readers. RCU provides sufficient protection only when reading the table
  43. * (i.e. doing lookups). Adding or adding or removing nodes requires we take
  44. * the read lock or we risk operating on an old table. The write lock is only
  45. * needed when modifying the number of buckets a table.
  46. */
  47. static DEFINE_RWLOCK(pathtbl_resize_lock);
  48. static inline struct mesh_table *resize_dereference_mesh_paths(void)
  49. {
  50. return rcu_dereference_protected(mesh_paths,
  51. lockdep_is_held(&pathtbl_resize_lock));
  52. }
  53. static inline struct mesh_table *resize_dereference_mpp_paths(void)
  54. {
  55. return rcu_dereference_protected(mpp_paths,
  56. lockdep_is_held(&pathtbl_resize_lock));
  57. }
  58. static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath);
  59. /*
  60. * CAREFUL -- "tbl" must not be an expression,
  61. * in particular not an rcu_dereference(), since
  62. * it's used twice. So it is illegal to do
  63. * for_each_mesh_entry(rcu_dereference(...), ...)
  64. */
  65. #define for_each_mesh_entry(tbl, p, node, i) \
  66. for (i = 0; i <= tbl->hash_mask; i++) \
  67. hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)
  68. static struct mesh_table *mesh_table_alloc(int size_order)
  69. {
  70. int i;
  71. struct mesh_table *newtbl;
  72. newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
  73. if (!newtbl)
  74. return NULL;
  75. newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
  76. (1 << size_order), GFP_ATOMIC);
  77. if (!newtbl->hash_buckets) {
  78. kfree(newtbl);
  79. return NULL;
  80. }
  81. newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
  82. (1 << size_order), GFP_ATOMIC);
  83. if (!newtbl->hashwlock) {
  84. kfree(newtbl->hash_buckets);
  85. kfree(newtbl);
  86. return NULL;
  87. }
  88. newtbl->size_order = size_order;
  89. newtbl->hash_mask = (1 << size_order) - 1;
  90. atomic_set(&newtbl->entries, 0);
  91. get_random_bytes(&newtbl->hash_rnd,
  92. sizeof(newtbl->hash_rnd));
  93. for (i = 0; i <= newtbl->hash_mask; i++)
  94. spin_lock_init(&newtbl->hashwlock[i]);
  95. spin_lock_init(&newtbl->gates_lock);
  96. return newtbl;
  97. }
  98. static void __mesh_table_free(struct mesh_table *tbl)
  99. {
  100. kfree(tbl->hash_buckets);
  101. kfree(tbl->hashwlock);
  102. kfree(tbl);
  103. }
  104. static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
  105. {
  106. struct hlist_head *mesh_hash;
  107. struct hlist_node *p, *q;
  108. struct mpath_node *gate;
  109. int i;
  110. mesh_hash = tbl->hash_buckets;
  111. for (i = 0; i <= tbl->hash_mask; i++) {
  112. spin_lock_bh(&tbl->hashwlock[i]);
  113. hlist_for_each_safe(p, q, &mesh_hash[i]) {
  114. tbl->free_node(p, free_leafs);
  115. atomic_dec(&tbl->entries);
  116. }
  117. spin_unlock_bh(&tbl->hashwlock[i]);
  118. }
  119. if (free_leafs) {
  120. spin_lock_bh(&tbl->gates_lock);
  121. hlist_for_each_entry_safe(gate, p, q,
  122. tbl->known_gates, list) {
  123. hlist_del(&gate->list);
  124. kfree(gate);
  125. }
  126. kfree(tbl->known_gates);
  127. spin_unlock_bh(&tbl->gates_lock);
  128. }
  129. __mesh_table_free(tbl);
  130. }
  131. static int mesh_table_grow(struct mesh_table *oldtbl,
  132. struct mesh_table *newtbl)
  133. {
  134. struct hlist_head *oldhash;
  135. struct hlist_node *p, *q;
  136. int i;
  137. if (atomic_read(&oldtbl->entries)
  138. < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
  139. return -EAGAIN;
  140. newtbl->free_node = oldtbl->free_node;
  141. newtbl->mean_chain_len = oldtbl->mean_chain_len;
  142. newtbl->copy_node = oldtbl->copy_node;
  143. newtbl->known_gates = oldtbl->known_gates;
  144. atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
  145. oldhash = oldtbl->hash_buckets;
  146. for (i = 0; i <= oldtbl->hash_mask; i++)
  147. hlist_for_each(p, &oldhash[i])
  148. if (oldtbl->copy_node(p, newtbl) < 0)
  149. goto errcopy;
  150. return 0;
  151. errcopy:
  152. for (i = 0; i <= newtbl->hash_mask; i++) {
  153. hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
  154. oldtbl->free_node(p, 0);
  155. }
  156. return -ENOMEM;
  157. }
  158. static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
  159. struct mesh_table *tbl)
  160. {
  161. /* Use last four bytes of hw addr and interface index as hash index */
  162. return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
  163. & tbl->hash_mask;
  164. }
  165. /**
  166. *
  167. * mesh_path_assign_nexthop - update mesh path next hop
  168. *
  169. * @mpath: mesh path to update
  170. * @sta: next hop to assign
  171. *
  172. * Locking: mpath->state_lock must be held when calling this function
  173. */
  174. void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
  175. {
  176. struct sk_buff *skb;
  177. struct ieee80211_hdr *hdr;
  178. struct sk_buff_head tmpq;
  179. unsigned long flags;
  180. rcu_assign_pointer(mpath->next_hop, sta);
  181. __skb_queue_head_init(&tmpq);
  182. spin_lock_irqsave(&mpath->frame_queue.lock, flags);
  183. while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
  184. hdr = (struct ieee80211_hdr *) skb->data;
  185. memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
  186. __skb_queue_tail(&tmpq, skb);
  187. }
  188. skb_queue_splice(&tmpq, &mpath->frame_queue);
  189. spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
  190. }
  191. static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
  192. struct mesh_path *gate_mpath)
  193. {
  194. struct ieee80211_hdr *hdr;
  195. struct ieee80211s_hdr *mshdr;
  196. int mesh_hdrlen, hdrlen;
  197. char *next_hop;
  198. hdr = (struct ieee80211_hdr *) skb->data;
  199. hdrlen = ieee80211_hdrlen(hdr->frame_control);
  200. mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
  201. if (!(mshdr->flags & MESH_FLAGS_AE)) {
  202. /* size of the fixed part of the mesh header */
  203. mesh_hdrlen = 6;
  204. /* make room for the two extended addresses */
  205. skb_push(skb, 2 * ETH_ALEN);
  206. memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
  207. hdr = (struct ieee80211_hdr *) skb->data;
  208. /* we preserve the previous mesh header and only add
  209. * the new addreses */
  210. mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
  211. mshdr->flags = MESH_FLAGS_AE_A5_A6;
  212. memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
  213. memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
  214. }
  215. /* update next hop */
  216. hdr = (struct ieee80211_hdr *) skb->data;
  217. rcu_read_lock();
  218. next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
  219. memcpy(hdr->addr1, next_hop, ETH_ALEN);
  220. rcu_read_unlock();
  221. memcpy(hdr->addr3, dst_addr, ETH_ALEN);
  222. }
  223. /**
  224. *
  225. * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
  226. *
  227. * This function is used to transfer or copy frames from an unresolved mpath to
  228. * a gate mpath. The function also adds the Address Extension field and
  229. * updates the next hop.
  230. *
  231. * If a frame already has an Address Extension field, only the next hop and
  232. * destination addresses are updated.
  233. *
  234. * The gate mpath must be an active mpath with a valid mpath->next_hop.
  235. *
  236. * @mpath: An active mpath the frames will be sent to (i.e. the gate)
  237. * @from_mpath: The failed mpath
  238. * @copy: When true, copy all the frames to the new mpath queue. When false,
  239. * move them.
  240. */
  241. static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
  242. struct mesh_path *from_mpath,
  243. bool copy)
  244. {
  245. struct sk_buff *skb, *cp_skb = NULL;
  246. struct sk_buff_head gateq, failq;
  247. unsigned long flags;
  248. int num_skbs;
  249. BUG_ON(gate_mpath == from_mpath);
  250. BUG_ON(!gate_mpath->next_hop);
  251. __skb_queue_head_init(&gateq);
  252. __skb_queue_head_init(&failq);
  253. spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
  254. skb_queue_splice_init(&from_mpath->frame_queue, &failq);
  255. spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
  256. num_skbs = skb_queue_len(&failq);
  257. while (num_skbs--) {
  258. skb = __skb_dequeue(&failq);
  259. if (copy) {
  260. cp_skb = skb_copy(skb, GFP_ATOMIC);
  261. if (cp_skb)
  262. __skb_queue_tail(&failq, cp_skb);
  263. }
  264. prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
  265. __skb_queue_tail(&gateq, skb);
  266. }
  267. spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
  268. skb_queue_splice(&gateq, &gate_mpath->frame_queue);
  269. mpath_dbg("Mpath queue for gate %pM has %d frames\n",
  270. gate_mpath->dst,
  271. skb_queue_len(&gate_mpath->frame_queue));
  272. spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);
  273. if (!copy)
  274. return;
  275. spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
  276. skb_queue_splice(&failq, &from_mpath->frame_queue);
  277. spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
  278. }
  279. static struct mesh_path *path_lookup(struct mesh_table *tbl, u8 *dst,
  280. struct ieee80211_sub_if_data *sdata)
  281. {
  282. struct mesh_path *mpath;
  283. struct hlist_node *n;
  284. struct hlist_head *bucket;
  285. struct mpath_node *node;
  286. bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
  287. hlist_for_each_entry_rcu(node, n, bucket, list) {
  288. mpath = node->mpath;
  289. if (mpath->sdata == sdata &&
  290. memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
  291. if (MPATH_EXPIRED(mpath)) {
  292. spin_lock_bh(&mpath->state_lock);
  293. mpath->flags &= ~MESH_PATH_ACTIVE;
  294. spin_unlock_bh(&mpath->state_lock);
  295. }
  296. return mpath;
  297. }
  298. }
  299. return NULL;
  300. }
  301. /**
  302. * mesh_path_lookup - look up a path in the mesh path table
  303. * @dst: hardware address (ETH_ALEN length) of destination
  304. * @sdata: local subif
  305. *
  306. * Returns: pointer to the mesh path structure, or NULL if not found
  307. *
  308. * Locking: must be called within a read rcu section.
  309. */
  310. struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
  311. {
  312. return path_lookup(rcu_dereference(mesh_paths), dst, sdata);
  313. }
  314. struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
  315. {
  316. return path_lookup(rcu_dereference(mpp_paths), dst, sdata);
  317. }
  318. /**
  319. * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
  320. * @idx: index
  321. * @sdata: local subif, or NULL for all entries
  322. *
  323. * Returns: pointer to the mesh path structure, or NULL if not found.
  324. *
  325. * Locking: must be called within a read rcu section.
  326. */
  327. struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
  328. {
  329. struct mesh_table *tbl = rcu_dereference(mesh_paths);
  330. struct mpath_node *node;
  331. struct hlist_node *p;
  332. int i;
  333. int j = 0;
  334. for_each_mesh_entry(tbl, p, node, i) {
  335. if (sdata && node->mpath->sdata != sdata)
  336. continue;
  337. if (j++ == idx) {
  338. if (MPATH_EXPIRED(node->mpath)) {
  339. spin_lock_bh(&node->mpath->state_lock);
  340. node->mpath->flags &= ~MESH_PATH_ACTIVE;
  341. spin_unlock_bh(&node->mpath->state_lock);
  342. }
  343. return node->mpath;
  344. }
  345. }
  346. return NULL;
  347. }
  348. static void mesh_gate_node_reclaim(struct rcu_head *rp)
  349. {
  350. struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
  351. kfree(node);
  352. }
  353. /**
  354. * mesh_gate_add - mark mpath as path to a mesh gate and add to known_gates
  355. * @mesh_tbl: table which contains known_gates list
  356. * @mpath: mpath to known mesh gate
  357. *
  358. * Returns: 0 on success
  359. *
  360. */
  361. static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath)
  362. {
  363. struct mpath_node *gate, *new_gate;
  364. struct hlist_node *n;
  365. int err;
  366. rcu_read_lock();
  367. tbl = rcu_dereference(tbl);
  368. hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list)
  369. if (gate->mpath == mpath) {
  370. err = -EEXIST;
  371. goto err_rcu;
  372. }
  373. new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
  374. if (!new_gate) {
  375. err = -ENOMEM;
  376. goto err_rcu;
  377. }
  378. mpath->is_gate = true;
  379. mpath->sdata->u.mesh.num_gates++;
  380. new_gate->mpath = mpath;
  381. spin_lock_bh(&tbl->gates_lock);
  382. hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
  383. spin_unlock_bh(&tbl->gates_lock);
  384. rcu_read_unlock();
  385. mpath_dbg("Mesh path (%s): Recorded new gate: %pM. %d known gates\n",
  386. mpath->sdata->name, mpath->dst,
  387. mpath->sdata->u.mesh.num_gates);
  388. return 0;
  389. err_rcu:
  390. rcu_read_unlock();
  391. return err;
  392. }
  393. /**
  394. * mesh_gate_del - remove a mesh gate from the list of known gates
  395. * @tbl: table which holds our list of known gates
  396. * @mpath: gate mpath
  397. *
  398. * Returns: 0 on success
  399. *
  400. * Locking: must be called inside rcu_read_lock() section
  401. */
  402. static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
  403. {
  404. struct mpath_node *gate;
  405. struct hlist_node *p, *q;
  406. tbl = rcu_dereference(tbl);
  407. hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list)
  408. if (gate->mpath == mpath) {
  409. spin_lock_bh(&tbl->gates_lock);
  410. hlist_del_rcu(&gate->list);
  411. call_rcu(&gate->rcu, mesh_gate_node_reclaim);
  412. spin_unlock_bh(&tbl->gates_lock);
  413. mpath->sdata->u.mesh.num_gates--;
  414. mpath->is_gate = false;
  415. mpath_dbg("Mesh path (%s): Deleted gate: %pM. "
  416. "%d known gates\n", mpath->sdata->name,
  417. mpath->dst, mpath->sdata->u.mesh.num_gates);
  418. break;
  419. }
  420. return 0;
  421. }
  422. /**
  423. *
  424. * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
  425. * @mpath: gate path to add to table
  426. */
  427. int mesh_path_add_gate(struct mesh_path *mpath)
  428. {
  429. return mesh_gate_add(mesh_paths, mpath);
  430. }
  431. /**
  432. * mesh_gate_num - number of gates known to this interface
  433. * @sdata: subif data
  434. */
  435. int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
  436. {
  437. return sdata->u.mesh.num_gates;
  438. }
  439. /**
  440. * mesh_path_add - allocate and add a new path to the mesh path table
  441. * @addr: destination address of the path (ETH_ALEN length)
  442. * @sdata: local subif
  443. *
  444. * Returns: 0 on success
  445. *
  446. * State: the initial state of the new path is set to 0
  447. */
  448. int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
  449. {
  450. struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
  451. struct ieee80211_local *local = sdata->local;
  452. struct mesh_table *tbl;
  453. struct mesh_path *mpath, *new_mpath;
  454. struct mpath_node *node, *new_node;
  455. struct hlist_head *bucket;
  456. struct hlist_node *n;
  457. int grow = 0;
  458. int err = 0;
  459. u32 hash_idx;
  460. if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
  461. /* never add ourselves as neighbours */
  462. return -ENOTSUPP;
  463. if (is_multicast_ether_addr(dst))
  464. return -ENOTSUPP;
  465. if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
  466. return -ENOSPC;
  467. err = -ENOMEM;
  468. new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
  469. if (!new_mpath)
  470. goto err_path_alloc;
  471. new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
  472. if (!new_node)
  473. goto err_node_alloc;
  474. read_lock_bh(&pathtbl_resize_lock);
  475. memcpy(new_mpath->dst, dst, ETH_ALEN);
  476. new_mpath->sdata = sdata;
  477. new_mpath->flags = 0;
  478. skb_queue_head_init(&new_mpath->frame_queue);
  479. new_node->mpath = new_mpath;
  480. new_mpath->timer.data = (unsigned long) new_mpath;
  481. new_mpath->timer.function = mesh_path_timer;
  482. new_mpath->exp_time = jiffies;
  483. spin_lock_init(&new_mpath->state_lock);
  484. init_timer(&new_mpath->timer);
  485. tbl = resize_dereference_mesh_paths();
  486. hash_idx = mesh_table_hash(dst, sdata, tbl);
  487. bucket = &tbl->hash_buckets[hash_idx];
  488. spin_lock_bh(&tbl->hashwlock[hash_idx]);
  489. err = -EEXIST;
  490. hlist_for_each_entry(node, n, bucket, list) {
  491. mpath = node->mpath;
  492. if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
  493. goto err_exists;
  494. }
  495. hlist_add_head_rcu(&new_node->list, bucket);
  496. if (atomic_inc_return(&tbl->entries) >=
  497. tbl->mean_chain_len * (tbl->hash_mask + 1))
  498. grow = 1;
  499. mesh_paths_generation++;
  500. spin_unlock_bh(&tbl->hashwlock[hash_idx]);
  501. read_unlock_bh(&pathtbl_resize_lock);
  502. if (grow) {
  503. set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
  504. ieee80211_queue_work(&local->hw, &sdata->work);
  505. }
  506. return 0;
  507. err_exists:
  508. spin_unlock_bh(&tbl->hashwlock[hash_idx]);
  509. read_unlock_bh(&pathtbl_resize_lock);
  510. kfree(new_node);
  511. err_node_alloc:
  512. kfree(new_mpath);
  513. err_path_alloc:
  514. atomic_dec(&sdata->u.mesh.mpaths);
  515. return err;
  516. }
  517. static void mesh_table_free_rcu(struct rcu_head *rcu)
  518. {
  519. struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
  520. mesh_table_free(tbl, false);
  521. }
  522. void mesh_mpath_table_grow(void)
  523. {
  524. struct mesh_table *oldtbl, *newtbl;
  525. write_lock_bh(&pathtbl_resize_lock);
  526. oldtbl = resize_dereference_mesh_paths();
  527. newtbl = mesh_table_alloc(oldtbl->size_order + 1);
  528. if (!newtbl)
  529. goto out;
  530. if (mesh_table_grow(oldtbl, newtbl) < 0) {
  531. __mesh_table_free(newtbl);
  532. goto out;
  533. }
  534. rcu_assign_pointer(mesh_paths, newtbl);
  535. call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
  536. out:
  537. write_unlock_bh(&pathtbl_resize_lock);
  538. }
  539. void mesh_mpp_table_grow(void)
  540. {
  541. struct mesh_table *oldtbl, *newtbl;
  542. write_lock_bh(&pathtbl_resize_lock);
  543. oldtbl = resize_dereference_mpp_paths();
  544. newtbl = mesh_table_alloc(oldtbl->size_order + 1);
  545. if (!newtbl)
  546. goto out;
  547. if (mesh_table_grow(oldtbl, newtbl) < 0) {
  548. __mesh_table_free(newtbl);
  549. goto out;
  550. }
  551. rcu_assign_pointer(mpp_paths, newtbl);
  552. call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
  553. out:
  554. write_unlock_bh(&pathtbl_resize_lock);
  555. }
  556. int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
  557. {
  558. struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
  559. struct ieee80211_local *local = sdata->local;
  560. struct mesh_table *tbl;
  561. struct mesh_path *mpath, *new_mpath;
  562. struct mpath_node *node, *new_node;
  563. struct hlist_head *bucket;
  564. struct hlist_node *n;
  565. int grow = 0;
  566. int err = 0;
  567. u32 hash_idx;
  568. if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
  569. /* never add ourselves as neighbours */
  570. return -ENOTSUPP;
  571. if (is_multicast_ether_addr(dst))
  572. return -ENOTSUPP;
  573. err = -ENOMEM;
  574. new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
  575. if (!new_mpath)
  576. goto err_path_alloc;
  577. new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
  578. if (!new_node)
  579. goto err_node_alloc;
  580. read_lock_bh(&pathtbl_resize_lock);
  581. memcpy(new_mpath->dst, dst, ETH_ALEN);
  582. memcpy(new_mpath->mpp, mpp, ETH_ALEN);
  583. new_mpath->sdata = sdata;
  584. new_mpath->flags = 0;
  585. skb_queue_head_init(&new_mpath->frame_queue);
  586. new_node->mpath = new_mpath;
  587. init_timer(&new_mpath->timer);
  588. new_mpath->exp_time = jiffies;
  589. spin_lock_init(&new_mpath->state_lock);
  590. tbl = resize_dereference_mpp_paths();
  591. hash_idx = mesh_table_hash(dst, sdata, tbl);
  592. bucket = &tbl->hash_buckets[hash_idx];
  593. spin_lock_bh(&tbl->hashwlock[hash_idx]);
  594. err = -EEXIST;
  595. hlist_for_each_entry(node, n, bucket, list) {
  596. mpath = node->mpath;
  597. if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
  598. goto err_exists;
  599. }
  600. hlist_add_head_rcu(&new_node->list, bucket);
  601. if (atomic_inc_return(&tbl->entries) >=
  602. tbl->mean_chain_len * (tbl->hash_mask + 1))
  603. grow = 1;
  604. spin_unlock_bh(&tbl->hashwlock[hash_idx]);
  605. read_unlock_bh(&pathtbl_resize_lock);
  606. if (grow) {
  607. set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
  608. ieee80211_queue_work(&local->hw, &sdata->work);
  609. }
  610. return 0;
  611. err_exists:
  612. spin_unlock_bh(&tbl->hashwlock[hash_idx]);
  613. read_unlock_bh(&pathtbl_resize_lock);
  614. kfree(new_node);
  615. err_node_alloc:
  616. kfree(new_mpath);
  617. err_path_alloc:
  618. return err;
  619. }
  620. /**
  621. * mesh_plink_broken - deactivates paths and sends perr when a link breaks
  622. *
  623. * @sta: broken peer link
  624. *
  625. * This function must be called from the rate control algorithm if enough
  626. * delivery errors suggest that a peer link is no longer usable.
  627. */
  628. void mesh_plink_broken(struct sta_info *sta)
  629. {
  630. struct mesh_table *tbl;
  631. static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
  632. struct mesh_path *mpath;
  633. struct mpath_node *node;
  634. struct hlist_node *p;
  635. struct ieee80211_sub_if_data *sdata = sta->sdata;
  636. int i;
  637. __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE);
  638. rcu_read_lock();
  639. tbl = rcu_dereference(mesh_paths);
  640. for_each_mesh_entry(tbl, p, node, i) {
  641. mpath = node->mpath;
  642. if (rcu_dereference(mpath->next_hop) == sta &&
  643. mpath->flags & MESH_PATH_ACTIVE &&
  644. !(mpath->flags & MESH_PATH_FIXED)) {
  645. spin_lock_bh(&mpath->state_lock);
  646. mpath->flags &= ~MESH_PATH_ACTIVE;
  647. ++mpath->sn;
  648. spin_unlock_bh(&mpath->state_lock);
  649. mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl,
  650. mpath->dst, cpu_to_le32(mpath->sn),
  651. reason, bcast, sdata);
  652. }
  653. }
  654. rcu_read_unlock();
  655. }
  656. static void mesh_path_node_reclaim(struct rcu_head *rp)
  657. {
  658. struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
  659. struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
  660. del_timer_sync(&node->mpath->timer);
  661. atomic_dec(&sdata->u.mesh.mpaths);
  662. kfree(node->mpath);
  663. kfree(node);
  664. }
  665. /* needs to be called with the corresponding hashwlock taken */
  666. static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
  667. {
  668. struct mesh_path *mpath;
  669. mpath = node->mpath;
  670. spin_lock(&mpath->state_lock);
  671. mpath->flags |= MESH_PATH_RESOLVING;
  672. if (mpath->is_gate)
  673. mesh_gate_del(tbl, mpath);
  674. hlist_del_rcu(&node->list);
  675. call_rcu(&node->rcu, mesh_path_node_reclaim);
  676. spin_unlock(&mpath->state_lock);
  677. atomic_dec(&tbl->entries);
  678. }
  679. /**
  680. * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
  681. *
  682. * @sta - mesh peer to match
  683. *
  684. * RCU notes: this function is called when a mesh plink transitions from
  685. * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
  686. * allows path creation. This will happen before the sta can be freed (because
  687. * sta_info_destroy() calls this) so any reader in a rcu read block will be
  688. * protected against the plink disappearing.
  689. */
  690. void mesh_path_flush_by_nexthop(struct sta_info *sta)
  691. {
  692. struct mesh_table *tbl;
  693. struct mesh_path *mpath;
  694. struct mpath_node *node;
  695. struct hlist_node *p;
  696. int i;
  697. rcu_read_lock();
  698. read_lock_bh(&pathtbl_resize_lock);
  699. tbl = resize_dereference_mesh_paths();
  700. for_each_mesh_entry(tbl, p, node, i) {
  701. mpath = node->mpath;
  702. if (rcu_dereference(mpath->next_hop) == sta) {
  703. spin_lock_bh(&tbl->hashwlock[i]);
  704. __mesh_path_del(tbl, node);
  705. spin_unlock_bh(&tbl->hashwlock[i]);
  706. }
  707. }
  708. read_unlock_bh(&pathtbl_resize_lock);
  709. rcu_read_unlock();
  710. }
  711. static void table_flush_by_iface(struct mesh_table *tbl,
  712. struct ieee80211_sub_if_data *sdata)
  713. {
  714. struct mesh_path *mpath;
  715. struct mpath_node *node;
  716. struct hlist_node *p;
  717. int i;
  718. WARN_ON(!rcu_read_lock_held());
  719. for_each_mesh_entry(tbl, p, node, i) {
  720. mpath = node->mpath;
  721. if (mpath->sdata != sdata)
  722. continue;
  723. spin_lock_bh(&tbl->hashwlock[i]);
  724. __mesh_path_del(tbl, node);
  725. spin_unlock_bh(&tbl->hashwlock[i]);
  726. }
  727. }
  728. /**
  729. * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
  730. *
  731. * This function deletes both mesh paths as well as mesh portal paths.
  732. *
  733. * @sdata - interface data to match
  734. *
  735. */
  736. void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
  737. {
  738. struct mesh_table *tbl;
  739. rcu_read_lock();
  740. read_lock_bh(&pathtbl_resize_lock);
  741. tbl = resize_dereference_mesh_paths();
  742. table_flush_by_iface(tbl, sdata);
  743. tbl = resize_dereference_mpp_paths();
  744. table_flush_by_iface(tbl, sdata);
  745. read_unlock_bh(&pathtbl_resize_lock);
  746. rcu_read_unlock();
  747. }
  748. /**
  749. * mesh_path_del - delete a mesh path from the table
  750. *
  751. * @addr: dst address (ETH_ALEN length)
  752. * @sdata: local subif
  753. *
  754. * Returns: 0 if successful
  755. */
  756. int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
  757. {
  758. struct mesh_table *tbl;
  759. struct mesh_path *mpath;
  760. struct mpath_node *node;
  761. struct hlist_head *bucket;
  762. struct hlist_node *n;
  763. int hash_idx;
  764. int err = 0;
  765. read_lock_bh(&pathtbl_resize_lock);
  766. tbl = resize_dereference_mesh_paths();
  767. hash_idx = mesh_table_hash(addr, sdata, tbl);
  768. bucket = &tbl->hash_buckets[hash_idx];
  769. spin_lock_bh(&tbl->hashwlock[hash_idx]);
  770. hlist_for_each_entry(node, n, bucket, list) {
  771. mpath = node->mpath;
  772. if (mpath->sdata == sdata &&
  773. memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
  774. __mesh_path_del(tbl, node);
  775. goto enddel;
  776. }
  777. }
  778. err = -ENXIO;
  779. enddel:
  780. mesh_paths_generation++;
  781. spin_unlock_bh(&tbl->hashwlock[hash_idx]);
  782. read_unlock_bh(&pathtbl_resize_lock);
  783. return err;
  784. }
  785. /**
  786. * mesh_path_tx_pending - sends pending frames in a mesh path queue
  787. *
  788. * @mpath: mesh path to activate
  789. *
  790. * Locking: the state_lock of the mpath structure must NOT be held when calling
  791. * this function.
  792. */
  793. void mesh_path_tx_pending(struct mesh_path *mpath)
  794. {
  795. if (mpath->flags & MESH_PATH_ACTIVE)
  796. ieee80211_add_pending_skbs(mpath->sdata->local,
  797. &mpath->frame_queue);
  798. }
  799. /**
  800. * mesh_path_send_to_gates - sends pending frames to all known mesh gates
  801. *
  802. * @mpath: mesh path whose queue will be emptied
  803. *
  804. * If there is only one gate, the frames are transferred from the failed mpath
  805. * queue to that gate's queue. If there are more than one gates, the frames
  806. * are copied from each gate to the next. After frames are copied, the
  807. * mpath queues are emptied onto the transmission queue.
  808. */
  809. int mesh_path_send_to_gates(struct mesh_path *mpath)
  810. {
  811. struct ieee80211_sub_if_data *sdata = mpath->sdata;
  812. struct hlist_node *n;
  813. struct mesh_table *tbl;
  814. struct mesh_path *from_mpath = mpath;
  815. struct mpath_node *gate = NULL;
  816. bool copy = false;
  817. struct hlist_head *known_gates;
  818. rcu_read_lock();
  819. tbl = rcu_dereference(mesh_paths);
  820. known_gates = tbl->known_gates;
  821. rcu_read_unlock();
  822. if (!known_gates)
  823. return -EHOSTUNREACH;
  824. hlist_for_each_entry_rcu(gate, n, known_gates, list) {
  825. if (gate->mpath->sdata != sdata)
  826. continue;
  827. if (gate->mpath->flags & MESH_PATH_ACTIVE) {
  828. mpath_dbg("Forwarding to %pM\n", gate->mpath->dst);
  829. mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
  830. from_mpath = gate->mpath;
  831. copy = true;
  832. } else {
  833. mpath_dbg("Not forwarding %p\n", gate->mpath);
  834. mpath_dbg("flags %x\n", gate->mpath->flags);
  835. }
  836. }
  837. hlist_for_each_entry_rcu(gate, n, known_gates, list)
  838. if (gate->mpath->sdata == sdata) {
  839. mpath_dbg("Sending to %pM\n", gate->mpath->dst);
  840. mesh_path_tx_pending(gate->mpath);
  841. }
  842. return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
  843. }
  844. /**
  845. * mesh_path_discard_frame - discard a frame whose path could not be resolved
  846. *
  847. * @skb: frame to discard
  848. * @sdata: network subif the frame was to be sent through
  849. *
  850. * If the frame was being forwarded from another MP, a PERR frame will be sent
  851. * to the precursor. The precursor's address (i.e. the previous hop) was saved
  852. * in addr1 of the frame-to-be-forwarded, and would only be overwritten once
  853. * the destination is successfully resolved.
  854. *
  855. * Locking: the function must me called within a rcu_read_lock region
  856. */
  857. void mesh_path_discard_frame(struct sk_buff *skb,
  858. struct ieee80211_sub_if_data *sdata)
  859. {
  860. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  861. struct mesh_path *mpath;
  862. u32 sn = 0;
  863. __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD);
  864. if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) {
  865. u8 *ra, *da;
  866. da = hdr->addr3;
  867. ra = hdr->addr1;
  868. rcu_read_lock();
  869. mpath = mesh_path_lookup(da, sdata);
  870. if (mpath) {
  871. spin_lock_bh(&mpath->state_lock);
  872. sn = ++mpath->sn;
  873. spin_unlock_bh(&mpath->state_lock);
  874. }
  875. rcu_read_unlock();
  876. mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data,
  877. cpu_to_le32(sn), reason, ra, sdata);
  878. }
  879. kfree_skb(skb);
  880. sdata->u.mesh.mshstats.dropped_frames_no_route++;
  881. }
  882. /**
  883. * mesh_path_flush_pending - free the pending queue of a mesh path
  884. *
  885. * @mpath: mesh path whose queue has to be freed
  886. *
  887. * Locking: the function must me called within a rcu_read_lock region
  888. */
  889. void mesh_path_flush_pending(struct mesh_path *mpath)
  890. {
  891. struct sk_buff *skb;
  892. while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
  893. mesh_path_discard_frame(skb, mpath->sdata);
  894. }
  895. /**
  896. * mesh_path_fix_nexthop - force a specific next hop for a mesh path
  897. *
  898. * @mpath: the mesh path to modify
  899. * @next_hop: the next hop to force
  900. *
  901. * Locking: this function must be called holding mpath->state_lock
  902. */
  903. void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
  904. {
  905. spin_lock_bh(&mpath->state_lock);
  906. mesh_path_assign_nexthop(mpath, next_hop);
  907. mpath->sn = 0xffff;
  908. mpath->metric = 0;
  909. mpath->hop_count = 0;
  910. mpath->exp_time = 0;
  911. mpath->flags |= MESH_PATH_FIXED;
  912. mesh_path_activate(mpath);
  913. spin_unlock_bh(&mpath->state_lock);
  914. mesh_path_tx_pending(mpath);
  915. }
  916. static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
  917. {
  918. struct mesh_path *mpath;
  919. struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
  920. mpath = node->mpath;
  921. hlist_del_rcu(p);
  922. if (free_leafs) {
  923. del_timer_sync(&mpath->timer);
  924. kfree(mpath);
  925. }
  926. kfree(node);
  927. }
  928. static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
  929. {
  930. struct mesh_path *mpath;
  931. struct mpath_node *node, *new_node;
  932. u32 hash_idx;
  933. new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
  934. if (new_node == NULL)
  935. return -ENOMEM;
  936. node = hlist_entry(p, struct mpath_node, list);
  937. mpath = node->mpath;
  938. new_node->mpath = mpath;
  939. hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
  940. hlist_add_head(&new_node->list,
  941. &newtbl->hash_buckets[hash_idx]);
  942. return 0;
  943. }
  944. int mesh_pathtbl_init(void)
  945. {
  946. struct mesh_table *tbl_path, *tbl_mpp;
  947. int ret;
  948. tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
  949. if (!tbl_path)
  950. return -ENOMEM;
  951. tbl_path->free_node = &mesh_path_node_free;
  952. tbl_path->copy_node = &mesh_path_node_copy;
  953. tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
  954. tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
  955. if (!tbl_path->known_gates) {
  956. ret = -ENOMEM;
  957. goto free_path;
  958. }
  959. INIT_HLIST_HEAD(tbl_path->known_gates);
  960. tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
  961. if (!tbl_mpp) {
  962. ret = -ENOMEM;
  963. goto free_path;
  964. }
  965. tbl_mpp->free_node = &mesh_path_node_free;
  966. tbl_mpp->copy_node = &mesh_path_node_copy;
  967. tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
  968. tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
  969. if (!tbl_mpp->known_gates) {
  970. ret = -ENOMEM;
  971. goto free_mpp;
  972. }
  973. INIT_HLIST_HEAD(tbl_mpp->known_gates);
  974. /* Need no locking since this is during init */
  975. RCU_INIT_POINTER(mesh_paths, tbl_path);
  976. RCU_INIT_POINTER(mpp_paths, tbl_mpp);
  977. return 0;
  978. free_mpp:
  979. mesh_table_free(tbl_mpp, true);
  980. free_path:
  981. mesh_table_free(tbl_path, true);
  982. return ret;
  983. }
  984. void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
  985. {
  986. struct mesh_table *tbl;
  987. struct mesh_path *mpath;
  988. struct mpath_node *node;
  989. struct hlist_node *p;
  990. int i;
  991. rcu_read_lock();
  992. tbl = rcu_dereference(mesh_paths);
  993. for_each_mesh_entry(tbl, p, node, i) {
  994. if (node->mpath->sdata != sdata)
  995. continue;
  996. mpath = node->mpath;
  997. if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
  998. (!(mpath->flags & MESH_PATH_FIXED)) &&
  999. time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
  1000. mesh_path_del(mpath->dst, mpath->sdata);
  1001. }
  1002. rcu_read_unlock();
  1003. }
  1004. void mesh_pathtbl_unregister(void)
  1005. {
  1006. /* no need for locking during exit path */
  1007. mesh_table_free(rcu_dereference_raw(mesh_paths), true);
  1008. mesh_table_free(rcu_dereference_raw(mpp_paths), true);
  1009. }