mesh_pathtbl.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741
  1. /*
  2. * Copyright (c) 2008 open80211s Ltd.
  3. * Author: Luis Carlos Cobo <luisca@cozybit.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. */
  9. #include <linux/etherdevice.h>
  10. #include <linux/list.h>
  11. #include <linux/random.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/string.h>
  14. #include <net/mac80211.h>
  15. #include "ieee80211_i.h"
  16. #include "mesh.h"
  17. /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
  18. #define INIT_PATHS_SIZE_ORDER 2
  19. /* Keep the mean chain length below this constant */
  20. #define MEAN_CHAIN_LEN 2
  21. #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
  22. time_after(jiffies, mpath->exp_time) && \
  23. !(mpath->flags & MESH_PATH_FIXED))
  24. struct mpath_node {
  25. struct hlist_node list;
  26. struct rcu_head rcu;
  27. /* This indirection allows two different tables to point to the same
  28. * mesh_path structure, useful when resizing
  29. */
  30. struct mesh_path *mpath;
  31. };
  32. static struct mesh_table *mesh_paths;
  33. static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
  34. int mesh_paths_generation;
  35. static void __mesh_table_free(struct mesh_table *tbl)
  36. {
  37. kfree(tbl->hash_buckets);
  38. kfree(tbl->hashwlock);
  39. kfree(tbl);
  40. }
  41. void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
  42. {
  43. struct hlist_head *mesh_hash;
  44. struct hlist_node *p, *q;
  45. int i;
  46. mesh_hash = tbl->hash_buckets;
  47. for (i = 0; i <= tbl->hash_mask; i++) {
  48. spin_lock(&tbl->hashwlock[i]);
  49. hlist_for_each_safe(p, q, &mesh_hash[i]) {
  50. tbl->free_node(p, free_leafs);
  51. atomic_dec(&tbl->entries);
  52. }
  53. spin_unlock(&tbl->hashwlock[i]);
  54. }
  55. __mesh_table_free(tbl);
  56. }
  57. static struct mesh_table *mesh_table_grow(struct mesh_table *tbl)
  58. {
  59. struct mesh_table *newtbl;
  60. struct hlist_head *oldhash;
  61. struct hlist_node *p, *q;
  62. int i;
  63. if (atomic_read(&tbl->entries)
  64. < tbl->mean_chain_len * (tbl->hash_mask + 1))
  65. goto endgrow;
  66. newtbl = mesh_table_alloc(tbl->size_order + 1);
  67. if (!newtbl)
  68. goto endgrow;
  69. newtbl->free_node = tbl->free_node;
  70. newtbl->mean_chain_len = tbl->mean_chain_len;
  71. newtbl->copy_node = tbl->copy_node;
  72. atomic_set(&newtbl->entries, atomic_read(&tbl->entries));
  73. oldhash = tbl->hash_buckets;
  74. for (i = 0; i <= tbl->hash_mask; i++)
  75. hlist_for_each(p, &oldhash[i])
  76. if (tbl->copy_node(p, newtbl) < 0)
  77. goto errcopy;
  78. return newtbl;
  79. errcopy:
  80. for (i = 0; i <= newtbl->hash_mask; i++) {
  81. hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
  82. tbl->free_node(p, 0);
  83. }
  84. __mesh_table_free(newtbl);
  85. endgrow:
  86. return NULL;
  87. }
  88. /* This lock will have the grow table function as writer and add / delete nodes
  89. * as readers. When reading the table (i.e. doing lookups) we are well protected
  90. * by RCU
  91. */
  92. static DEFINE_RWLOCK(pathtbl_resize_lock);
  93. /**
  94. *
  95. * mesh_path_assign_nexthop - update mesh path next hop
  96. *
  97. * @mpath: mesh path to update
  98. * @sta: next hop to assign
  99. *
  100. * Locking: mpath->state_lock must be held when calling this function
  101. */
  102. void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
  103. {
  104. struct sk_buff *skb;
  105. struct ieee80211_hdr *hdr;
  106. struct sk_buff_head tmpq;
  107. unsigned long flags;
  108. rcu_assign_pointer(mpath->next_hop, sta);
  109. __skb_queue_head_init(&tmpq);
  110. spin_lock_irqsave(&mpath->frame_queue.lock, flags);
  111. while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
  112. hdr = (struct ieee80211_hdr *) skb->data;
  113. memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
  114. __skb_queue_tail(&tmpq, skb);
  115. }
  116. skb_queue_splice(&tmpq, &mpath->frame_queue);
  117. spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
  118. }
  119. /**
  120. * mesh_path_lookup - look up a path in the mesh path table
  121. * @dst: hardware address (ETH_ALEN length) of destination
  122. * @sdata: local subif
  123. *
  124. * Returns: pointer to the mesh path structure, or NULL if not found
  125. *
  126. * Locking: must be called within a read rcu section.
  127. */
  128. struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
  129. {
  130. struct mesh_path *mpath;
  131. struct hlist_node *n;
  132. struct hlist_head *bucket;
  133. struct mesh_table *tbl;
  134. struct mpath_node *node;
  135. tbl = rcu_dereference(mesh_paths);
  136. bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
  137. hlist_for_each_entry_rcu(node, n, bucket, list) {
  138. mpath = node->mpath;
  139. if (mpath->sdata == sdata &&
  140. memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
  141. if (MPATH_EXPIRED(mpath)) {
  142. spin_lock_bh(&mpath->state_lock);
  143. if (MPATH_EXPIRED(mpath))
  144. mpath->flags &= ~MESH_PATH_ACTIVE;
  145. spin_unlock_bh(&mpath->state_lock);
  146. }
  147. return mpath;
  148. }
  149. }
  150. return NULL;
  151. }
  152. struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
  153. {
  154. struct mesh_path *mpath;
  155. struct hlist_node *n;
  156. struct hlist_head *bucket;
  157. struct mesh_table *tbl;
  158. struct mpath_node *node;
  159. tbl = rcu_dereference(mpp_paths);
  160. bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
  161. hlist_for_each_entry_rcu(node, n, bucket, list) {
  162. mpath = node->mpath;
  163. if (mpath->sdata == sdata &&
  164. memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
  165. if (MPATH_EXPIRED(mpath)) {
  166. spin_lock_bh(&mpath->state_lock);
  167. if (MPATH_EXPIRED(mpath))
  168. mpath->flags &= ~MESH_PATH_ACTIVE;
  169. spin_unlock_bh(&mpath->state_lock);
  170. }
  171. return mpath;
  172. }
  173. }
  174. return NULL;
  175. }
  176. /**
  177. * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
  178. * @idx: index
  179. * @sdata: local subif, or NULL for all entries
  180. *
  181. * Returns: pointer to the mesh path structure, or NULL if not found.
  182. *
  183. * Locking: must be called within a read rcu section.
  184. */
  185. struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
  186. {
  187. struct mpath_node *node;
  188. struct hlist_node *p;
  189. int i;
  190. int j = 0;
  191. for_each_mesh_entry(mesh_paths, p, node, i) {
  192. if (sdata && node->mpath->sdata != sdata)
  193. continue;
  194. if (j++ == idx) {
  195. if (MPATH_EXPIRED(node->mpath)) {
  196. spin_lock_bh(&node->mpath->state_lock);
  197. if (MPATH_EXPIRED(node->mpath))
  198. node->mpath->flags &= ~MESH_PATH_ACTIVE;
  199. spin_unlock_bh(&node->mpath->state_lock);
  200. }
  201. return node->mpath;
  202. }
  203. }
  204. return NULL;
  205. }
  206. /**
  207. * mesh_path_add - allocate and add a new path to the mesh path table
  208. * @addr: destination address of the path (ETH_ALEN length)
  209. * @sdata: local subif
  210. *
  211. * Returns: 0 on sucess
  212. *
  213. * State: the initial state of the new path is set to 0
  214. */
  215. int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
  216. {
  217. struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
  218. struct ieee80211_local *local = sdata->local;
  219. struct mesh_path *mpath, *new_mpath;
  220. struct mpath_node *node, *new_node;
  221. struct hlist_head *bucket;
  222. struct hlist_node *n;
  223. int grow = 0;
  224. int err = 0;
  225. u32 hash_idx;
  226. if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
  227. /* never add ourselves as neighbours */
  228. return -ENOTSUPP;
  229. if (is_multicast_ether_addr(dst))
  230. return -ENOTSUPP;
  231. if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
  232. return -ENOSPC;
  233. err = -ENOMEM;
  234. new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
  235. if (!new_mpath)
  236. goto err_path_alloc;
  237. new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
  238. if (!new_node)
  239. goto err_node_alloc;
  240. read_lock(&pathtbl_resize_lock);
  241. memcpy(new_mpath->dst, dst, ETH_ALEN);
  242. new_mpath->sdata = sdata;
  243. new_mpath->flags = 0;
  244. skb_queue_head_init(&new_mpath->frame_queue);
  245. new_node->mpath = new_mpath;
  246. new_mpath->timer.data = (unsigned long) new_mpath;
  247. new_mpath->timer.function = mesh_path_timer;
  248. new_mpath->exp_time = jiffies;
  249. spin_lock_init(&new_mpath->state_lock);
  250. init_timer(&new_mpath->timer);
  251. hash_idx = mesh_table_hash(dst, sdata, mesh_paths);
  252. bucket = &mesh_paths->hash_buckets[hash_idx];
  253. spin_lock(&mesh_paths->hashwlock[hash_idx]);
  254. err = -EEXIST;
  255. hlist_for_each_entry(node, n, bucket, list) {
  256. mpath = node->mpath;
  257. if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
  258. goto err_exists;
  259. }
  260. hlist_add_head_rcu(&new_node->list, bucket);
  261. if (atomic_inc_return(&mesh_paths->entries) >=
  262. mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1))
  263. grow = 1;
  264. mesh_paths_generation++;
  265. spin_unlock(&mesh_paths->hashwlock[hash_idx]);
  266. read_unlock(&pathtbl_resize_lock);
  267. if (grow) {
  268. set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
  269. ieee80211_queue_work(&local->hw, &ifmsh->work);
  270. }
  271. return 0;
  272. err_exists:
  273. spin_unlock(&mesh_paths->hashwlock[hash_idx]);
  274. read_unlock(&pathtbl_resize_lock);
  275. kfree(new_node);
  276. err_node_alloc:
  277. kfree(new_mpath);
  278. err_path_alloc:
  279. atomic_dec(&sdata->u.mesh.mpaths);
  280. return err;
  281. }
  282. void mesh_mpath_table_grow(void)
  283. {
  284. struct mesh_table *oldtbl, *newtbl;
  285. write_lock(&pathtbl_resize_lock);
  286. oldtbl = mesh_paths;
  287. newtbl = mesh_table_grow(mesh_paths);
  288. if (!newtbl) {
  289. write_unlock(&pathtbl_resize_lock);
  290. return;
  291. }
  292. rcu_assign_pointer(mesh_paths, newtbl);
  293. write_unlock(&pathtbl_resize_lock);
  294. synchronize_rcu();
  295. mesh_table_free(oldtbl, false);
  296. }
  297. void mesh_mpp_table_grow(void)
  298. {
  299. struct mesh_table *oldtbl, *newtbl;
  300. write_lock(&pathtbl_resize_lock);
  301. oldtbl = mpp_paths;
  302. newtbl = mesh_table_grow(mpp_paths);
  303. if (!newtbl) {
  304. write_unlock(&pathtbl_resize_lock);
  305. return;
  306. }
  307. rcu_assign_pointer(mpp_paths, newtbl);
  308. write_unlock(&pathtbl_resize_lock);
  309. synchronize_rcu();
  310. mesh_table_free(oldtbl, false);
  311. }
  312. int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
  313. {
  314. struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
  315. struct ieee80211_local *local = sdata->local;
  316. struct mesh_path *mpath, *new_mpath;
  317. struct mpath_node *node, *new_node;
  318. struct hlist_head *bucket;
  319. struct hlist_node *n;
  320. int grow = 0;
  321. int err = 0;
  322. u32 hash_idx;
  323. if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
  324. /* never add ourselves as neighbours */
  325. return -ENOTSUPP;
  326. if (is_multicast_ether_addr(dst))
  327. return -ENOTSUPP;
  328. err = -ENOMEM;
  329. new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
  330. if (!new_mpath)
  331. goto err_path_alloc;
  332. new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
  333. if (!new_node)
  334. goto err_node_alloc;
  335. read_lock(&pathtbl_resize_lock);
  336. memcpy(new_mpath->dst, dst, ETH_ALEN);
  337. memcpy(new_mpath->mpp, mpp, ETH_ALEN);
  338. new_mpath->sdata = sdata;
  339. new_mpath->flags = 0;
  340. skb_queue_head_init(&new_mpath->frame_queue);
  341. new_node->mpath = new_mpath;
  342. new_mpath->exp_time = jiffies;
  343. spin_lock_init(&new_mpath->state_lock);
  344. hash_idx = mesh_table_hash(dst, sdata, mpp_paths);
  345. bucket = &mpp_paths->hash_buckets[hash_idx];
  346. spin_lock(&mpp_paths->hashwlock[hash_idx]);
  347. err = -EEXIST;
  348. hlist_for_each_entry(node, n, bucket, list) {
  349. mpath = node->mpath;
  350. if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
  351. goto err_exists;
  352. }
  353. hlist_add_head_rcu(&new_node->list, bucket);
  354. if (atomic_inc_return(&mpp_paths->entries) >=
  355. mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1))
  356. grow = 1;
  357. spin_unlock(&mpp_paths->hashwlock[hash_idx]);
  358. read_unlock(&pathtbl_resize_lock);
  359. if (grow) {
  360. set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
  361. ieee80211_queue_work(&local->hw, &ifmsh->work);
  362. }
  363. return 0;
  364. err_exists:
  365. spin_unlock(&mpp_paths->hashwlock[hash_idx]);
  366. read_unlock(&pathtbl_resize_lock);
  367. kfree(new_node);
  368. err_node_alloc:
  369. kfree(new_mpath);
  370. err_path_alloc:
  371. return err;
  372. }
  373. /**
  374. * mesh_plink_broken - deactivates paths and sends perr when a link breaks
  375. *
  376. * @sta: broken peer link
  377. *
  378. * This function must be called from the rate control algorithm if enough
  379. * delivery errors suggest that a peer link is no longer usable.
  380. */
  381. void mesh_plink_broken(struct sta_info *sta)
  382. {
  383. struct mesh_path *mpath;
  384. struct mpath_node *node;
  385. struct hlist_node *p;
  386. struct ieee80211_sub_if_data *sdata = sta->sdata;
  387. int i;
  388. rcu_read_lock();
  389. for_each_mesh_entry(mesh_paths, p, node, i) {
  390. mpath = node->mpath;
  391. spin_lock_bh(&mpath->state_lock);
  392. if (mpath->next_hop == sta &&
  393. mpath->flags & MESH_PATH_ACTIVE &&
  394. !(mpath->flags & MESH_PATH_FIXED)) {
  395. mpath->flags &= ~MESH_PATH_ACTIVE;
  396. ++mpath->sn;
  397. spin_unlock_bh(&mpath->state_lock);
  398. mesh_path_error_tx(MESH_TTL, mpath->dst,
  399. cpu_to_le32(mpath->sn),
  400. PERR_RCODE_DEST_UNREACH,
  401. sdata->dev->broadcast, sdata);
  402. } else
  403. spin_unlock_bh(&mpath->state_lock);
  404. }
  405. rcu_read_unlock();
  406. }
  407. /**
  408. * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
  409. *
  410. * @sta - mesh peer to match
  411. *
  412. * RCU notes: this function is called when a mesh plink transitions from
  413. * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
  414. * allows path creation. This will happen before the sta can be freed (because
  415. * sta_info_destroy() calls this) so any reader in a rcu read block will be
  416. * protected against the plink disappearing.
  417. */
  418. void mesh_path_flush_by_nexthop(struct sta_info *sta)
  419. {
  420. struct mesh_path *mpath;
  421. struct mpath_node *node;
  422. struct hlist_node *p;
  423. int i;
  424. for_each_mesh_entry(mesh_paths, p, node, i) {
  425. mpath = node->mpath;
  426. if (mpath->next_hop == sta)
  427. mesh_path_del(mpath->dst, mpath->sdata);
  428. }
  429. }
  430. void mesh_path_flush(struct ieee80211_sub_if_data *sdata)
  431. {
  432. struct mesh_path *mpath;
  433. struct mpath_node *node;
  434. struct hlist_node *p;
  435. int i;
  436. for_each_mesh_entry(mesh_paths, p, node, i) {
  437. mpath = node->mpath;
  438. if (mpath->sdata == sdata)
  439. mesh_path_del(mpath->dst, mpath->sdata);
  440. }
  441. }
  442. static void mesh_path_node_reclaim(struct rcu_head *rp)
  443. {
  444. struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
  445. struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
  446. del_timer_sync(&node->mpath->timer);
  447. atomic_dec(&sdata->u.mesh.mpaths);
  448. kfree(node->mpath);
  449. kfree(node);
  450. }
  451. /**
  452. * mesh_path_del - delete a mesh path from the table
  453. *
  454. * @addr: dst address (ETH_ALEN length)
  455. * @sdata: local subif
  456. *
  457. * Returns: 0 if succesful
  458. */
  459. int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
  460. {
  461. struct mesh_path *mpath;
  462. struct mpath_node *node;
  463. struct hlist_head *bucket;
  464. struct hlist_node *n;
  465. int hash_idx;
  466. int err = 0;
  467. read_lock(&pathtbl_resize_lock);
  468. hash_idx = mesh_table_hash(addr, sdata, mesh_paths);
  469. bucket = &mesh_paths->hash_buckets[hash_idx];
  470. spin_lock(&mesh_paths->hashwlock[hash_idx]);
  471. hlist_for_each_entry(node, n, bucket, list) {
  472. mpath = node->mpath;
  473. if (mpath->sdata == sdata &&
  474. memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
  475. spin_lock_bh(&mpath->state_lock);
  476. mpath->flags |= MESH_PATH_RESOLVING;
  477. hlist_del_rcu(&node->list);
  478. call_rcu(&node->rcu, mesh_path_node_reclaim);
  479. atomic_dec(&mesh_paths->entries);
  480. spin_unlock_bh(&mpath->state_lock);
  481. goto enddel;
  482. }
  483. }
  484. err = -ENXIO;
  485. enddel:
  486. mesh_paths_generation++;
  487. spin_unlock(&mesh_paths->hashwlock[hash_idx]);
  488. read_unlock(&pathtbl_resize_lock);
  489. return err;
  490. }
  491. /**
  492. * mesh_path_tx_pending - sends pending frames in a mesh path queue
  493. *
  494. * @mpath: mesh path to activate
  495. *
  496. * Locking: the state_lock of the mpath structure must NOT be held when calling
  497. * this function.
  498. */
  499. void mesh_path_tx_pending(struct mesh_path *mpath)
  500. {
  501. if (mpath->flags & MESH_PATH_ACTIVE)
  502. ieee80211_add_pending_skbs(mpath->sdata->local,
  503. &mpath->frame_queue);
  504. }
  505. /**
  506. * mesh_path_discard_frame - discard a frame whose path could not be resolved
  507. *
  508. * @skb: frame to discard
  509. * @sdata: network subif the frame was to be sent through
  510. *
  511. * If the frame was being forwarded from another MP, a PERR frame will be sent
  512. * to the precursor. The precursor's address (i.e. the previous hop) was saved
  513. * in addr1 of the frame-to-be-forwarded, and would only be overwritten once
  514. * the destination is successfully resolved.
  515. *
  516. * Locking: the function must me called within a rcu_read_lock region
  517. */
  518. void mesh_path_discard_frame(struct sk_buff *skb,
  519. struct ieee80211_sub_if_data *sdata)
  520. {
  521. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  522. struct mesh_path *mpath;
  523. u32 sn = 0;
  524. if (memcmp(hdr->addr4, sdata->dev->dev_addr, ETH_ALEN) != 0) {
  525. u8 *ra, *da;
  526. da = hdr->addr3;
  527. ra = hdr->addr1;
  528. mpath = mesh_path_lookup(da, sdata);
  529. if (mpath)
  530. sn = ++mpath->sn;
  531. mesh_path_error_tx(MESH_TTL, skb->data, cpu_to_le32(sn),
  532. PERR_RCODE_NO_ROUTE, ra, sdata);
  533. }
  534. kfree_skb(skb);
  535. sdata->u.mesh.mshstats.dropped_frames_no_route++;
  536. }
  537. /**
  538. * mesh_path_flush_pending - free the pending queue of a mesh path
  539. *
  540. * @mpath: mesh path whose queue has to be freed
  541. *
  542. * Locking: the function must me called withing a rcu_read_lock region
  543. */
  544. void mesh_path_flush_pending(struct mesh_path *mpath)
  545. {
  546. struct sk_buff *skb;
  547. while ((skb = skb_dequeue(&mpath->frame_queue)) &&
  548. (mpath->flags & MESH_PATH_ACTIVE))
  549. mesh_path_discard_frame(skb, mpath->sdata);
  550. }
  551. /**
  552. * mesh_path_fix_nexthop - force a specific next hop for a mesh path
  553. *
  554. * @mpath: the mesh path to modify
  555. * @next_hop: the next hop to force
  556. *
  557. * Locking: this function must be called holding mpath->state_lock
  558. */
  559. void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
  560. {
  561. spin_lock_bh(&mpath->state_lock);
  562. mesh_path_assign_nexthop(mpath, next_hop);
  563. mpath->sn = 0xffff;
  564. mpath->metric = 0;
  565. mpath->hop_count = 0;
  566. mpath->exp_time = 0;
  567. mpath->flags |= MESH_PATH_FIXED;
  568. mesh_path_activate(mpath);
  569. spin_unlock_bh(&mpath->state_lock);
  570. mesh_path_tx_pending(mpath);
  571. }
  572. static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
  573. {
  574. struct mesh_path *mpath;
  575. struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
  576. mpath = node->mpath;
  577. hlist_del_rcu(p);
  578. if (free_leafs)
  579. kfree(mpath);
  580. kfree(node);
  581. }
  582. static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
  583. {
  584. struct mesh_path *mpath;
  585. struct mpath_node *node, *new_node;
  586. u32 hash_idx;
  587. new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
  588. if (new_node == NULL)
  589. return -ENOMEM;
  590. node = hlist_entry(p, struct mpath_node, list);
  591. mpath = node->mpath;
  592. new_node->mpath = mpath;
  593. hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
  594. hlist_add_head(&new_node->list,
  595. &newtbl->hash_buckets[hash_idx]);
  596. return 0;
  597. }
  598. int mesh_pathtbl_init(void)
  599. {
  600. mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
  601. if (!mesh_paths)
  602. return -ENOMEM;
  603. mesh_paths->free_node = &mesh_path_node_free;
  604. mesh_paths->copy_node = &mesh_path_node_copy;
  605. mesh_paths->mean_chain_len = MEAN_CHAIN_LEN;
  606. mpp_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
  607. if (!mpp_paths) {
  608. mesh_table_free(mesh_paths, true);
  609. return -ENOMEM;
  610. }
  611. mpp_paths->free_node = &mesh_path_node_free;
  612. mpp_paths->copy_node = &mesh_path_node_copy;
  613. mpp_paths->mean_chain_len = MEAN_CHAIN_LEN;
  614. return 0;
  615. }
  616. void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
  617. {
  618. struct mesh_path *mpath;
  619. struct mpath_node *node;
  620. struct hlist_node *p;
  621. int i;
  622. read_lock(&pathtbl_resize_lock);
  623. for_each_mesh_entry(mesh_paths, p, node, i) {
  624. if (node->mpath->sdata != sdata)
  625. continue;
  626. mpath = node->mpath;
  627. spin_lock_bh(&mpath->state_lock);
  628. if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
  629. (!(mpath->flags & MESH_PATH_FIXED)) &&
  630. time_after(jiffies,
  631. mpath->exp_time + MESH_PATH_EXPIRE)) {
  632. spin_unlock_bh(&mpath->state_lock);
  633. mesh_path_del(mpath->dst, mpath->sdata);
  634. } else
  635. spin_unlock_bh(&mpath->state_lock);
  636. }
  637. read_unlock(&pathtbl_resize_lock);
  638. }
  639. void mesh_pathtbl_unregister(void)
  640. {
  641. mesh_table_free(mesh_paths, true);
  642. mesh_table_free(mpp_paths, true);
  643. }