main.c 53 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939
  1. /*
  2. * Copyright 2002-2005, Instant802 Networks, Inc.
  3. * Copyright 2005-2006, Devicescape Software, Inc.
  4. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <net/mac80211.h>
  11. #include <net/ieee80211_radiotap.h>
  12. #include <linux/module.h>
  13. #include <linux/init.h>
  14. #include <linux/netdevice.h>
  15. #include <linux/types.h>
  16. #include <linux/slab.h>
  17. #include <linux/skbuff.h>
  18. #include <linux/etherdevice.h>
  19. #include <linux/if_arp.h>
  20. #include <linux/wireless.h>
  21. #include <linux/rtnetlink.h>
  22. #include <linux/bitmap.h>
  23. #include <net/net_namespace.h>
  24. #include <net/cfg80211.h>
  25. #include "ieee80211_i.h"
  26. #include "rate.h"
  27. #include "mesh.h"
  28. #include "wep.h"
  29. #include "wme.h"
  30. #include "aes_ccm.h"
  31. #include "led.h"
  32. #include "cfg.h"
  33. #include "debugfs.h"
  34. #include "debugfs_netdev.h"
  35. /*
  36. * For seeing transmitted packets on monitor interfaces
  37. * we have a radiotap header too.
  38. */
  39. struct ieee80211_tx_status_rtap_hdr {
  40. struct ieee80211_radiotap_header hdr;
  41. __le16 tx_flags;
  42. u8 data_retries;
  43. } __attribute__ ((packed));
  44. /* common interface routines */
  45. static int header_parse_80211(const struct sk_buff *skb, unsigned char *haddr)
  46. {
  47. memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); /* addr2 */
  48. return ETH_ALEN;
  49. }
  50. /* must be called under mdev tx lock */
  51. static void ieee80211_configure_filter(struct ieee80211_local *local)
  52. {
  53. unsigned int changed_flags;
  54. unsigned int new_flags = 0;
  55. if (atomic_read(&local->iff_promiscs))
  56. new_flags |= FIF_PROMISC_IN_BSS;
  57. if (atomic_read(&local->iff_allmultis))
  58. new_flags |= FIF_ALLMULTI;
  59. if (local->monitors)
  60. new_flags |= FIF_BCN_PRBRESP_PROMISC;
  61. if (local->fif_fcsfail)
  62. new_flags |= FIF_FCSFAIL;
  63. if (local->fif_plcpfail)
  64. new_flags |= FIF_PLCPFAIL;
  65. if (local->fif_control)
  66. new_flags |= FIF_CONTROL;
  67. if (local->fif_other_bss)
  68. new_flags |= FIF_OTHER_BSS;
  69. changed_flags = local->filter_flags ^ new_flags;
  70. /* be a bit nasty */
  71. new_flags |= (1<<31);
  72. local->ops->configure_filter(local_to_hw(local),
  73. changed_flags, &new_flags,
  74. local->mdev->mc_count,
  75. local->mdev->mc_list);
  76. WARN_ON(new_flags & (1<<31));
  77. local->filter_flags = new_flags & ~(1<<31);
  78. }
  79. /* master interface */
  80. static int ieee80211_master_open(struct net_device *dev)
  81. {
  82. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  83. struct ieee80211_sub_if_data *sdata;
  84. int res = -EOPNOTSUPP;
  85. /* we hold the RTNL here so can safely walk the list */
  86. list_for_each_entry(sdata, &local->interfaces, list) {
  87. if (sdata->dev != dev && netif_running(sdata->dev)) {
  88. res = 0;
  89. break;
  90. }
  91. }
  92. if (res)
  93. return res;
  94. netif_start_queue(local->mdev);
  95. return 0;
  96. }
  97. static int ieee80211_master_stop(struct net_device *dev)
  98. {
  99. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  100. struct ieee80211_sub_if_data *sdata;
  101. /* we hold the RTNL here so can safely walk the list */
  102. list_for_each_entry(sdata, &local->interfaces, list)
  103. if (sdata->dev != dev && netif_running(sdata->dev))
  104. dev_close(sdata->dev);
  105. return 0;
  106. }
  107. static void ieee80211_master_set_multicast_list(struct net_device *dev)
  108. {
  109. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  110. ieee80211_configure_filter(local);
  111. }
  112. /* regular interfaces */
  113. static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
  114. {
  115. int meshhdrlen;
  116. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  117. meshhdrlen = (sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) ? 5 : 0;
  118. /* FIX: what would be proper limits for MTU?
  119. * This interface uses 802.3 frames. */
  120. if (new_mtu < 256 ||
  121. new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) {
  122. return -EINVAL;
  123. }
  124. #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
  125. printk(KERN_DEBUG "%s: setting MTU %d\n", dev->name, new_mtu);
  126. #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
  127. dev->mtu = new_mtu;
  128. return 0;
  129. }
  130. static inline int identical_mac_addr_allowed(int type1, int type2)
  131. {
  132. return (type1 == IEEE80211_IF_TYPE_MNTR ||
  133. type2 == IEEE80211_IF_TYPE_MNTR ||
  134. (type1 == IEEE80211_IF_TYPE_AP &&
  135. type2 == IEEE80211_IF_TYPE_WDS) ||
  136. (type1 == IEEE80211_IF_TYPE_WDS &&
  137. (type2 == IEEE80211_IF_TYPE_WDS ||
  138. type2 == IEEE80211_IF_TYPE_AP)) ||
  139. (type1 == IEEE80211_IF_TYPE_AP &&
  140. type2 == IEEE80211_IF_TYPE_VLAN) ||
  141. (type1 == IEEE80211_IF_TYPE_VLAN &&
  142. (type2 == IEEE80211_IF_TYPE_AP ||
  143. type2 == IEEE80211_IF_TYPE_VLAN)));
  144. }
  145. static int ieee80211_open(struct net_device *dev)
  146. {
  147. struct ieee80211_sub_if_data *sdata, *nsdata;
  148. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  149. struct ieee80211_if_init_conf conf;
  150. int res;
  151. bool need_hw_reconfig = 0;
  152. struct sta_info *sta;
  153. sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  154. /* we hold the RTNL here so can safely walk the list */
  155. list_for_each_entry(nsdata, &local->interfaces, list) {
  156. struct net_device *ndev = nsdata->dev;
  157. if (ndev != dev && ndev != local->mdev && netif_running(ndev)) {
  158. /*
  159. * Allow only a single IBSS interface to be up at any
  160. * time. This is restricted because beacon distribution
  161. * cannot work properly if both are in the same IBSS.
  162. *
  163. * To remove this restriction we'd have to disallow them
  164. * from setting the same SSID on different IBSS interfaces
  165. * belonging to the same hardware. Then, however, we're
  166. * faced with having to adopt two different TSF timers...
  167. */
  168. if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS &&
  169. nsdata->vif.type == IEEE80211_IF_TYPE_IBSS)
  170. return -EBUSY;
  171. /*
  172. * Disallow multiple IBSS/STA mode interfaces.
  173. *
  174. * This is a technical restriction, it is possible although
  175. * most likely not IEEE 802.11 compliant to have multiple
  176. * STAs with just a single hardware (the TSF timer will not
  177. * be adjusted properly.)
  178. *
  179. * However, because mac80211 uses the master device's BSS
  180. * information for each STA/IBSS interface, doing this will
  181. * currently corrupt that BSS information completely, unless,
  182. * a not very useful case, both STAs are associated to the
  183. * same BSS.
  184. *
  185. * To remove this restriction, the BSS information needs to
  186. * be embedded in the STA/IBSS mode sdata instead of using
  187. * the master device's BSS structure.
  188. */
  189. if ((sdata->vif.type == IEEE80211_IF_TYPE_STA ||
  190. sdata->vif.type == IEEE80211_IF_TYPE_IBSS) &&
  191. (nsdata->vif.type == IEEE80211_IF_TYPE_STA ||
  192. nsdata->vif.type == IEEE80211_IF_TYPE_IBSS))
  193. return -EBUSY;
  194. /*
  195. * The remaining checks are only performed for interfaces
  196. * with the same MAC address.
  197. */
  198. if (compare_ether_addr(dev->dev_addr, ndev->dev_addr))
  199. continue;
  200. /*
  201. * check whether it may have the same address
  202. */
  203. if (!identical_mac_addr_allowed(sdata->vif.type,
  204. nsdata->vif.type))
  205. return -ENOTUNIQ;
  206. /*
  207. * can only add VLANs to enabled APs
  208. */
  209. if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN &&
  210. nsdata->vif.type == IEEE80211_IF_TYPE_AP)
  211. sdata->u.vlan.ap = nsdata;
  212. }
  213. }
  214. switch (sdata->vif.type) {
  215. case IEEE80211_IF_TYPE_WDS:
  216. if (!is_valid_ether_addr(sdata->u.wds.remote_addr))
  217. return -ENOLINK;
  218. break;
  219. case IEEE80211_IF_TYPE_VLAN:
  220. if (!sdata->u.vlan.ap)
  221. return -ENOLINK;
  222. break;
  223. case IEEE80211_IF_TYPE_AP:
  224. case IEEE80211_IF_TYPE_STA:
  225. case IEEE80211_IF_TYPE_MNTR:
  226. case IEEE80211_IF_TYPE_IBSS:
  227. case IEEE80211_IF_TYPE_MESH_POINT:
  228. /* no special treatment */
  229. break;
  230. case IEEE80211_IF_TYPE_INVALID:
  231. /* cannot happen */
  232. WARN_ON(1);
  233. break;
  234. }
  235. if (local->open_count == 0) {
  236. res = 0;
  237. if (local->ops->start)
  238. res = local->ops->start(local_to_hw(local));
  239. if (res)
  240. return res;
  241. need_hw_reconfig = 1;
  242. ieee80211_led_radio(local, local->hw.conf.radio_enabled);
  243. }
  244. switch (sdata->vif.type) {
  245. case IEEE80211_IF_TYPE_VLAN:
  246. list_add(&sdata->u.vlan.list, &sdata->u.vlan.ap->u.ap.vlans);
  247. /* no need to tell driver */
  248. break;
  249. case IEEE80211_IF_TYPE_MNTR:
  250. if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
  251. local->cooked_mntrs++;
  252. break;
  253. }
  254. /* must be before the call to ieee80211_configure_filter */
  255. local->monitors++;
  256. if (local->monitors == 1)
  257. local->hw.conf.flags |= IEEE80211_CONF_RADIOTAP;
  258. if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
  259. local->fif_fcsfail++;
  260. if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
  261. local->fif_plcpfail++;
  262. if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
  263. local->fif_control++;
  264. if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
  265. local->fif_other_bss++;
  266. netif_tx_lock_bh(local->mdev);
  267. ieee80211_configure_filter(local);
  268. netif_tx_unlock_bh(local->mdev);
  269. break;
  270. case IEEE80211_IF_TYPE_STA:
  271. case IEEE80211_IF_TYPE_IBSS:
  272. sdata->u.sta.flags &= ~IEEE80211_STA_PREV_BSSID_SET;
  273. /* fall through */
  274. default:
  275. conf.vif = &sdata->vif;
  276. conf.type = sdata->vif.type;
  277. conf.mac_addr = dev->dev_addr;
  278. res = local->ops->add_interface(local_to_hw(local), &conf);
  279. if (res)
  280. goto err_stop;
  281. ieee80211_if_config(dev);
  282. ieee80211_reset_erp_info(dev);
  283. ieee80211_enable_keys(sdata);
  284. if (sdata->vif.type == IEEE80211_IF_TYPE_STA &&
  285. !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME))
  286. netif_carrier_off(dev);
  287. else
  288. netif_carrier_on(dev);
  289. }
  290. if (sdata->vif.type == IEEE80211_IF_TYPE_WDS) {
  291. /* Create STA entry for the WDS peer */
  292. sta = sta_info_alloc(sdata, sdata->u.wds.remote_addr,
  293. GFP_KERNEL);
  294. if (!sta) {
  295. res = -ENOMEM;
  296. goto err_del_interface;
  297. }
  298. /* no locking required since STA is not live yet */
  299. sta->flags |= WLAN_STA_AUTHORIZED;
  300. res = sta_info_insert(sta);
  301. if (res) {
  302. /* STA has been freed */
  303. goto err_del_interface;
  304. }
  305. }
  306. if (local->open_count == 0) {
  307. res = dev_open(local->mdev);
  308. WARN_ON(res);
  309. if (res)
  310. goto err_del_interface;
  311. tasklet_enable(&local->tx_pending_tasklet);
  312. tasklet_enable(&local->tasklet);
  313. }
  314. /*
  315. * set_multicast_list will be invoked by the networking core
  316. * which will check whether any increments here were done in
  317. * error and sync them down to the hardware as filter flags.
  318. */
  319. if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
  320. atomic_inc(&local->iff_allmultis);
  321. if (sdata->flags & IEEE80211_SDATA_PROMISC)
  322. atomic_inc(&local->iff_promiscs);
  323. local->open_count++;
  324. if (need_hw_reconfig)
  325. ieee80211_hw_config(local);
  326. /*
  327. * ieee80211_sta_work is disabled while network interface
  328. * is down. Therefore, some configuration changes may not
  329. * yet be effective. Trigger execution of ieee80211_sta_work
  330. * to fix this.
  331. */
  332. if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
  333. sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
  334. struct ieee80211_if_sta *ifsta = &sdata->u.sta;
  335. queue_work(local->hw.workqueue, &ifsta->work);
  336. }
  337. netif_start_queue(dev);
  338. return 0;
  339. err_del_interface:
  340. local->ops->remove_interface(local_to_hw(local), &conf);
  341. err_stop:
  342. if (!local->open_count && local->ops->stop)
  343. local->ops->stop(local_to_hw(local));
  344. return res;
  345. }
  346. static int ieee80211_stop(struct net_device *dev)
  347. {
  348. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  349. struct ieee80211_local *local = sdata->local;
  350. struct ieee80211_if_init_conf conf;
  351. struct sta_info *sta;
  352. /*
  353. * Stop TX on this interface first.
  354. */
  355. netif_stop_queue(dev);
  356. /*
  357. * Now delete all active aggregation sessions.
  358. */
  359. rcu_read_lock();
  360. list_for_each_entry_rcu(sta, &local->sta_list, list) {
  361. if (sta->sdata == sdata)
  362. ieee80211_sta_tear_down_BA_sessions(dev, sta->addr);
  363. }
  364. rcu_read_unlock();
  365. /*
  366. * Remove all stations associated with this interface.
  367. *
  368. * This must be done before calling ops->remove_interface()
  369. * because otherwise we can later invoke ops->sta_notify()
  370. * whenever the STAs are removed, and that invalidates driver
  371. * assumptions about always getting a vif pointer that is valid
  372. * (because if we remove a STA after ops->remove_interface()
  373. * the driver will have removed the vif info already!)
  374. *
  375. * We could relax this and only unlink the stations from the
  376. * hash table and list but keep them on a per-sdata list that
  377. * will be inserted back again when the interface is brought
  378. * up again, but I don't currently see a use case for that,
  379. * except with WDS which gets a STA entry created when it is
  380. * brought up.
  381. */
  382. sta_info_flush(local, sdata);
  383. /*
  384. * Don't count this interface for promisc/allmulti while it
  385. * is down. dev_mc_unsync() will invoke set_multicast_list
  386. * on the master interface which will sync these down to the
  387. * hardware as filter flags.
  388. */
  389. if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
  390. atomic_dec(&local->iff_allmultis);
  391. if (sdata->flags & IEEE80211_SDATA_PROMISC)
  392. atomic_dec(&local->iff_promiscs);
  393. dev_mc_unsync(local->mdev, dev);
  394. /* APs need special treatment */
  395. if (sdata->vif.type == IEEE80211_IF_TYPE_AP) {
  396. struct ieee80211_sub_if_data *vlan, *tmp;
  397. struct beacon_data *old_beacon = sdata->u.ap.beacon;
  398. /* remove beacon */
  399. rcu_assign_pointer(sdata->u.ap.beacon, NULL);
  400. synchronize_rcu();
  401. kfree(old_beacon);
  402. /* down all dependent devices, that is VLANs */
  403. list_for_each_entry_safe(vlan, tmp, &sdata->u.ap.vlans,
  404. u.vlan.list)
  405. dev_close(vlan->dev);
  406. WARN_ON(!list_empty(&sdata->u.ap.vlans));
  407. }
  408. local->open_count--;
  409. switch (sdata->vif.type) {
  410. case IEEE80211_IF_TYPE_VLAN:
  411. list_del(&sdata->u.vlan.list);
  412. sdata->u.vlan.ap = NULL;
  413. /* no need to tell driver */
  414. break;
  415. case IEEE80211_IF_TYPE_MNTR:
  416. if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
  417. local->cooked_mntrs--;
  418. break;
  419. }
  420. local->monitors--;
  421. if (local->monitors == 0)
  422. local->hw.conf.flags &= ~IEEE80211_CONF_RADIOTAP;
  423. if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
  424. local->fif_fcsfail--;
  425. if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
  426. local->fif_plcpfail--;
  427. if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
  428. local->fif_control--;
  429. if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
  430. local->fif_other_bss--;
  431. netif_tx_lock_bh(local->mdev);
  432. ieee80211_configure_filter(local);
  433. netif_tx_unlock_bh(local->mdev);
  434. break;
  435. case IEEE80211_IF_TYPE_MESH_POINT:
  436. case IEEE80211_IF_TYPE_STA:
  437. case IEEE80211_IF_TYPE_IBSS:
  438. sdata->u.sta.state = IEEE80211_DISABLED;
  439. memset(sdata->u.sta.bssid, 0, ETH_ALEN);
  440. del_timer_sync(&sdata->u.sta.timer);
  441. /*
  442. * When we get here, the interface is marked down.
  443. * Call synchronize_rcu() to wait for the RX path
  444. * should it be using the interface and enqueuing
  445. * frames at this very time on another CPU.
  446. */
  447. synchronize_rcu();
  448. skb_queue_purge(&sdata->u.sta.skb_queue);
  449. if (local->scan_dev == sdata->dev) {
  450. if (!local->ops->hw_scan) {
  451. local->sta_sw_scanning = 0;
  452. cancel_delayed_work(&local->scan_work);
  453. } else
  454. local->sta_hw_scanning = 0;
  455. }
  456. flush_workqueue(local->hw.workqueue);
  457. sdata->u.sta.flags &= ~IEEE80211_STA_PRIVACY_INVOKED;
  458. kfree(sdata->u.sta.extra_ie);
  459. sdata->u.sta.extra_ie = NULL;
  460. sdata->u.sta.extra_ie_len = 0;
  461. /* fall through */
  462. default:
  463. conf.vif = &sdata->vif;
  464. conf.type = sdata->vif.type;
  465. conf.mac_addr = dev->dev_addr;
  466. /* disable all keys for as long as this netdev is down */
  467. ieee80211_disable_keys(sdata);
  468. local->ops->remove_interface(local_to_hw(local), &conf);
  469. }
  470. if (local->open_count == 0) {
  471. if (netif_running(local->mdev))
  472. dev_close(local->mdev);
  473. if (local->ops->stop)
  474. local->ops->stop(local_to_hw(local));
  475. ieee80211_led_radio(local, 0);
  476. tasklet_disable(&local->tx_pending_tasklet);
  477. tasklet_disable(&local->tasklet);
  478. }
  479. return 0;
  480. }
  481. int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
  482. {
  483. struct ieee80211_local *local = hw_to_local(hw);
  484. struct sta_info *sta;
  485. struct ieee80211_sub_if_data *sdata;
  486. u16 start_seq_num = 0;
  487. u8 *state;
  488. int ret;
  489. DECLARE_MAC_BUF(mac);
  490. if (tid >= STA_TID_NUM)
  491. return -EINVAL;
  492. #ifdef CONFIG_MAC80211_HT_DEBUG
  493. printk(KERN_DEBUG "Open BA session requested for %s tid %u\n",
  494. print_mac(mac, ra), tid);
  495. #endif /* CONFIG_MAC80211_HT_DEBUG */
  496. rcu_read_lock();
  497. sta = sta_info_get(local, ra);
  498. if (!sta) {
  499. #ifdef CONFIG_MAC80211_HT_DEBUG
  500. printk(KERN_DEBUG "Could not find the station\n");
  501. #endif
  502. ret = -ENOENT;
  503. goto exit;
  504. }
  505. spin_lock_bh(&sta->lock);
  506. /* we have tried too many times, receiver does not want A-MPDU */
  507. if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
  508. ret = -EBUSY;
  509. goto err_unlock_sta;
  510. }
  511. state = &sta->ampdu_mlme.tid_state_tx[tid];
  512. /* check if the TID is not in aggregation flow already */
  513. if (*state != HT_AGG_STATE_IDLE) {
  514. #ifdef CONFIG_MAC80211_HT_DEBUG
  515. printk(KERN_DEBUG "BA request denied - session is not "
  516. "idle on tid %u\n", tid);
  517. #endif /* CONFIG_MAC80211_HT_DEBUG */
  518. ret = -EAGAIN;
  519. goto err_unlock_sta;
  520. }
  521. /* prepare A-MPDU MLME for Tx aggregation */
  522. sta->ampdu_mlme.tid_tx[tid] =
  523. kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
  524. if (!sta->ampdu_mlme.tid_tx[tid]) {
  525. #ifdef CONFIG_MAC80211_HT_DEBUG
  526. if (net_ratelimit())
  527. printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
  528. tid);
  529. #endif
  530. ret = -ENOMEM;
  531. goto err_unlock_sta;
  532. }
  533. /* Tx timer */
  534. sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function =
  535. sta_addba_resp_timer_expired;
  536. sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data =
  537. (unsigned long)&sta->timer_to_tid[tid];
  538. init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
  539. /* ensure that TX flow won't interrupt us
  540. * until the end of the call to requeue function */
  541. spin_lock_bh(&local->mdev->queue_lock);
  542. /* create a new queue for this aggregation */
  543. ret = ieee80211_ht_agg_queue_add(local, sta, tid);
  544. /* case no queue is available to aggregation
  545. * don't switch to aggregation */
  546. if (ret) {
  547. #ifdef CONFIG_MAC80211_HT_DEBUG
  548. printk(KERN_DEBUG "BA request denied - queue unavailable for"
  549. " tid %d\n", tid);
  550. #endif /* CONFIG_MAC80211_HT_DEBUG */
  551. goto err_unlock_queue;
  552. }
  553. sdata = sta->sdata;
  554. /* Ok, the Addba frame hasn't been sent yet, but if the driver calls the
  555. * call back right away, it must see that the flow has begun */
  556. *state |= HT_ADDBA_REQUESTED_MSK;
  557. if (local->ops->ampdu_action)
  558. ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_START,
  559. ra, tid, &start_seq_num);
  560. if (ret) {
  561. /* No need to requeue the packets in the agg queue, since we
  562. * held the tx lock: no packet could be enqueued to the newly
  563. * allocated queue */
  564. ieee80211_ht_agg_queue_remove(local, sta, tid, 0);
  565. #ifdef CONFIG_MAC80211_HT_DEBUG
  566. printk(KERN_DEBUG "BA request denied - HW unavailable for"
  567. " tid %d\n", tid);
  568. #endif /* CONFIG_MAC80211_HT_DEBUG */
  569. *state = HT_AGG_STATE_IDLE;
  570. goto err_unlock_queue;
  571. }
  572. /* Will put all the packets in the new SW queue */
  573. ieee80211_requeue(local, ieee802_1d_to_ac[tid]);
  574. spin_unlock_bh(&local->mdev->queue_lock);
  575. spin_unlock_bh(&sta->lock);
  576. /* send an addBA request */
  577. sta->ampdu_mlme.dialog_token_allocator++;
  578. sta->ampdu_mlme.tid_tx[tid]->dialog_token =
  579. sta->ampdu_mlme.dialog_token_allocator;
  580. sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
  581. ieee80211_send_addba_request(sta->sdata->dev, ra, tid,
  582. sta->ampdu_mlme.tid_tx[tid]->dialog_token,
  583. sta->ampdu_mlme.tid_tx[tid]->ssn,
  584. 0x40, 5000);
  585. /* activate the timer for the recipient's addBA response */
  586. sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires =
  587. jiffies + ADDBA_RESP_INTERVAL;
  588. add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
  589. #ifdef CONFIG_MAC80211_HT_DEBUG
  590. printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
  591. #endif
  592. goto exit;
  593. err_unlock_queue:
  594. kfree(sta->ampdu_mlme.tid_tx[tid]);
  595. sta->ampdu_mlme.tid_tx[tid] = NULL;
  596. spin_unlock_bh(&local->mdev->queue_lock);
  597. ret = -EBUSY;
  598. err_unlock_sta:
  599. spin_unlock_bh(&sta->lock);
  600. exit:
  601. rcu_read_unlock();
  602. return ret;
  603. }
  604. EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
  605. int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw,
  606. u8 *ra, u16 tid,
  607. enum ieee80211_back_parties initiator)
  608. {
  609. struct ieee80211_local *local = hw_to_local(hw);
  610. struct sta_info *sta;
  611. u8 *state;
  612. int ret = 0;
  613. DECLARE_MAC_BUF(mac);
  614. if (tid >= STA_TID_NUM)
  615. return -EINVAL;
  616. rcu_read_lock();
  617. sta = sta_info_get(local, ra);
  618. if (!sta) {
  619. rcu_read_unlock();
  620. return -ENOENT;
  621. }
  622. /* check if the TID is in aggregation */
  623. state = &sta->ampdu_mlme.tid_state_tx[tid];
  624. spin_lock_bh(&sta->lock);
  625. if (*state != HT_AGG_STATE_OPERATIONAL) {
  626. ret = -ENOENT;
  627. goto stop_BA_exit;
  628. }
  629. #ifdef CONFIG_MAC80211_HT_DEBUG
  630. printk(KERN_DEBUG "Tx BA session stop requested for %s tid %u\n",
  631. print_mac(mac, ra), tid);
  632. #endif /* CONFIG_MAC80211_HT_DEBUG */
  633. ieee80211_stop_queue(hw, sta->tid_to_tx_q[tid]);
  634. *state = HT_AGG_STATE_REQ_STOP_BA_MSK |
  635. (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
  636. if (local->ops->ampdu_action)
  637. ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_STOP,
  638. ra, tid, NULL);
  639. /* case HW denied going back to legacy */
  640. if (ret) {
  641. WARN_ON(ret != -EBUSY);
  642. *state = HT_AGG_STATE_OPERATIONAL;
  643. ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
  644. goto stop_BA_exit;
  645. }
  646. stop_BA_exit:
  647. spin_unlock_bh(&sta->lock);
  648. rcu_read_unlock();
  649. return ret;
  650. }
  651. EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
  652. void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
  653. {
  654. struct ieee80211_local *local = hw_to_local(hw);
  655. struct sta_info *sta;
  656. u8 *state;
  657. DECLARE_MAC_BUF(mac);
  658. if (tid >= STA_TID_NUM) {
  659. #ifdef CONFIG_MAC80211_HT_DEBUG
  660. printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
  661. tid, STA_TID_NUM);
  662. #endif
  663. return;
  664. }
  665. rcu_read_lock();
  666. sta = sta_info_get(local, ra);
  667. if (!sta) {
  668. rcu_read_unlock();
  669. #ifdef CONFIG_MAC80211_HT_DEBUG
  670. printk(KERN_DEBUG "Could not find station: %s\n",
  671. print_mac(mac, ra));
  672. #endif
  673. return;
  674. }
  675. state = &sta->ampdu_mlme.tid_state_tx[tid];
  676. spin_lock_bh(&sta->lock);
  677. if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
  678. #ifdef CONFIG_MAC80211_HT_DEBUG
  679. printk(KERN_DEBUG "addBA was not requested yet, state is %d\n",
  680. *state);
  681. #endif
  682. spin_unlock_bh(&sta->lock);
  683. rcu_read_unlock();
  684. return;
  685. }
  686. WARN_ON_ONCE(*state & HT_ADDBA_DRV_READY_MSK);
  687. *state |= HT_ADDBA_DRV_READY_MSK;
  688. if (*state == HT_AGG_STATE_OPERATIONAL) {
  689. #ifdef CONFIG_MAC80211_HT_DEBUG
  690. printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid);
  691. #endif
  692. ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
  693. }
  694. spin_unlock_bh(&sta->lock);
  695. rcu_read_unlock();
  696. }
  697. EXPORT_SYMBOL(ieee80211_start_tx_ba_cb);
  698. void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
  699. {
  700. struct ieee80211_local *local = hw_to_local(hw);
  701. struct sta_info *sta;
  702. u8 *state;
  703. int agg_queue;
  704. DECLARE_MAC_BUF(mac);
  705. if (tid >= STA_TID_NUM) {
  706. #ifdef CONFIG_MAC80211_HT_DEBUG
  707. printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
  708. tid, STA_TID_NUM);
  709. #endif
  710. return;
  711. }
  712. #ifdef CONFIG_MAC80211_HT_DEBUG
  713. printk(KERN_DEBUG "Stopping Tx BA session for %s tid %d\n",
  714. print_mac(mac, ra), tid);
  715. #endif /* CONFIG_MAC80211_HT_DEBUG */
  716. rcu_read_lock();
  717. sta = sta_info_get(local, ra);
  718. if (!sta) {
  719. #ifdef CONFIG_MAC80211_HT_DEBUG
  720. printk(KERN_DEBUG "Could not find station: %s\n",
  721. print_mac(mac, ra));
  722. #endif
  723. rcu_read_unlock();
  724. return;
  725. }
  726. state = &sta->ampdu_mlme.tid_state_tx[tid];
  727. /* NOTE: no need to use sta->lock in this state check, as
  728. * ieee80211_stop_tx_ba_session will let only
  729. * one stop call to pass through per sta/tid */
  730. if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) {
  731. #ifdef CONFIG_MAC80211_HT_DEBUG
  732. printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
  733. #endif
  734. rcu_read_unlock();
  735. return;
  736. }
  737. if (*state & HT_AGG_STATE_INITIATOR_MSK)
  738. ieee80211_send_delba(sta->sdata->dev, ra, tid,
  739. WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
  740. agg_queue = sta->tid_to_tx_q[tid];
  741. /* avoid ordering issues: we are the only one that can modify
  742. * the content of the qdiscs */
  743. spin_lock_bh(&local->mdev->queue_lock);
  744. /* remove the queue for this aggregation */
  745. ieee80211_ht_agg_queue_remove(local, sta, tid, 1);
  746. spin_unlock_bh(&local->mdev->queue_lock);
  747. /* we just requeued the all the frames that were in the removed
  748. * queue, and since we might miss a softirq we do netif_schedule.
  749. * ieee80211_wake_queue is not used here as this queue is not
  750. * necessarily stopped */
  751. netif_schedule(local->mdev);
  752. spin_lock_bh(&sta->lock);
  753. *state = HT_AGG_STATE_IDLE;
  754. sta->ampdu_mlme.addba_req_num[tid] = 0;
  755. kfree(sta->ampdu_mlme.tid_tx[tid]);
  756. sta->ampdu_mlme.tid_tx[tid] = NULL;
  757. spin_unlock_bh(&sta->lock);
  758. rcu_read_unlock();
  759. }
  760. EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb);
  761. void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
  762. const u8 *ra, u16 tid)
  763. {
  764. struct ieee80211_local *local = hw_to_local(hw);
  765. struct ieee80211_ra_tid *ra_tid;
  766. struct sk_buff *skb = dev_alloc_skb(0);
  767. if (unlikely(!skb)) {
  768. #ifdef CONFIG_MAC80211_HT_DEBUG
  769. if (net_ratelimit())
  770. printk(KERN_WARNING "%s: Not enough memory, "
  771. "dropping start BA session", skb->dev->name);
  772. #endif
  773. return;
  774. }
  775. ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
  776. memcpy(&ra_tid->ra, ra, ETH_ALEN);
  777. ra_tid->tid = tid;
  778. skb->pkt_type = IEEE80211_ADDBA_MSG;
  779. skb_queue_tail(&local->skb_queue, skb);
  780. tasklet_schedule(&local->tasklet);
  781. }
  782. EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
  783. void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
  784. const u8 *ra, u16 tid)
  785. {
  786. struct ieee80211_local *local = hw_to_local(hw);
  787. struct ieee80211_ra_tid *ra_tid;
  788. struct sk_buff *skb = dev_alloc_skb(0);
  789. if (unlikely(!skb)) {
  790. #ifdef CONFIG_MAC80211_HT_DEBUG
  791. if (net_ratelimit())
  792. printk(KERN_WARNING "%s: Not enough memory, "
  793. "dropping stop BA session", skb->dev->name);
  794. #endif
  795. return;
  796. }
  797. ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
  798. memcpy(&ra_tid->ra, ra, ETH_ALEN);
  799. ra_tid->tid = tid;
  800. skb->pkt_type = IEEE80211_DELBA_MSG;
  801. skb_queue_tail(&local->skb_queue, skb);
  802. tasklet_schedule(&local->tasklet);
  803. }
  804. EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
  805. static void ieee80211_set_multicast_list(struct net_device *dev)
  806. {
  807. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  808. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  809. int allmulti, promisc, sdata_allmulti, sdata_promisc;
  810. allmulti = !!(dev->flags & IFF_ALLMULTI);
  811. promisc = !!(dev->flags & IFF_PROMISC);
  812. sdata_allmulti = !!(sdata->flags & IEEE80211_SDATA_ALLMULTI);
  813. sdata_promisc = !!(sdata->flags & IEEE80211_SDATA_PROMISC);
  814. if (allmulti != sdata_allmulti) {
  815. if (dev->flags & IFF_ALLMULTI)
  816. atomic_inc(&local->iff_allmultis);
  817. else
  818. atomic_dec(&local->iff_allmultis);
  819. sdata->flags ^= IEEE80211_SDATA_ALLMULTI;
  820. }
  821. if (promisc != sdata_promisc) {
  822. if (dev->flags & IFF_PROMISC)
  823. atomic_inc(&local->iff_promiscs);
  824. else
  825. atomic_dec(&local->iff_promiscs);
  826. sdata->flags ^= IEEE80211_SDATA_PROMISC;
  827. }
  828. dev_mc_sync(local->mdev, dev);
  829. }
  830. static const struct header_ops ieee80211_header_ops = {
  831. .create = eth_header,
  832. .parse = header_parse_80211,
  833. .rebuild = eth_rebuild_header,
  834. .cache = eth_header_cache,
  835. .cache_update = eth_header_cache_update,
  836. };
  837. /* Must not be called for mdev */
  838. void ieee80211_if_setup(struct net_device *dev)
  839. {
  840. ether_setup(dev);
  841. dev->hard_start_xmit = ieee80211_subif_start_xmit;
  842. dev->wireless_handlers = &ieee80211_iw_handler_def;
  843. dev->set_multicast_list = ieee80211_set_multicast_list;
  844. dev->change_mtu = ieee80211_change_mtu;
  845. dev->open = ieee80211_open;
  846. dev->stop = ieee80211_stop;
  847. dev->destructor = ieee80211_if_free;
  848. }
  849. /* everything else */
  850. static int __ieee80211_if_config(struct net_device *dev,
  851. struct sk_buff *beacon)
  852. {
  853. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  854. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  855. struct ieee80211_if_conf conf;
  856. if (!local->ops->config_interface || !netif_running(dev))
  857. return 0;
  858. memset(&conf, 0, sizeof(conf));
  859. conf.type = sdata->vif.type;
  860. if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
  861. sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
  862. conf.bssid = sdata->u.sta.bssid;
  863. conf.ssid = sdata->u.sta.ssid;
  864. conf.ssid_len = sdata->u.sta.ssid_len;
  865. } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
  866. conf.beacon = beacon;
  867. ieee80211_start_mesh(dev);
  868. } else if (sdata->vif.type == IEEE80211_IF_TYPE_AP) {
  869. conf.ssid = sdata->u.ap.ssid;
  870. conf.ssid_len = sdata->u.ap.ssid_len;
  871. conf.beacon = beacon;
  872. }
  873. return local->ops->config_interface(local_to_hw(local),
  874. &sdata->vif, &conf);
  875. }
  876. int ieee80211_if_config(struct net_device *dev)
  877. {
  878. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  879. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  880. if (sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT &&
  881. (local->hw.flags & IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE))
  882. return ieee80211_if_config_beacon(dev);
  883. return __ieee80211_if_config(dev, NULL);
  884. }
  885. int ieee80211_if_config_beacon(struct net_device *dev)
  886. {
  887. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  888. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  889. struct sk_buff *skb;
  890. if (!(local->hw.flags & IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE))
  891. return 0;
  892. skb = ieee80211_beacon_get(local_to_hw(local), &sdata->vif);
  893. if (!skb)
  894. return -ENOMEM;
  895. return __ieee80211_if_config(dev, skb);
  896. }
  897. int ieee80211_hw_config(struct ieee80211_local *local)
  898. {
  899. struct ieee80211_channel *chan;
  900. int ret = 0;
  901. if (local->sta_sw_scanning)
  902. chan = local->scan_channel;
  903. else
  904. chan = local->oper_channel;
  905. local->hw.conf.channel = chan;
  906. if (!local->hw.conf.power_level)
  907. local->hw.conf.power_level = chan->max_power;
  908. else
  909. local->hw.conf.power_level = min(chan->max_power,
  910. local->hw.conf.power_level);
  911. local->hw.conf.max_antenna_gain = chan->max_antenna_gain;
  912. #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
  913. printk(KERN_DEBUG "%s: HW CONFIG: freq=%d\n",
  914. wiphy_name(local->hw.wiphy), chan->center_freq);
  915. #endif
  916. if (local->open_count)
  917. ret = local->ops->config(local_to_hw(local), &local->hw.conf);
  918. return ret;
  919. }
  920. /**
  921. * ieee80211_handle_ht should be used only after legacy configuration
  922. * has been determined namely band, as ht configuration depends upon
  923. * the hardware's HT abilities for a _specific_ band.
  924. */
  925. u32 ieee80211_handle_ht(struct ieee80211_local *local, int enable_ht,
  926. struct ieee80211_ht_info *req_ht_cap,
  927. struct ieee80211_ht_bss_info *req_bss_cap)
  928. {
  929. struct ieee80211_conf *conf = &local->hw.conf;
  930. struct ieee80211_supported_band *sband;
  931. struct ieee80211_ht_info ht_conf;
  932. struct ieee80211_ht_bss_info ht_bss_conf;
  933. u32 changed = 0;
  934. int i;
  935. u8 max_tx_streams = IEEE80211_HT_CAP_MAX_STREAMS;
  936. u8 tx_mcs_set_cap;
  937. sband = local->hw.wiphy->bands[conf->channel->band];
  938. memset(&ht_conf, 0, sizeof(struct ieee80211_ht_info));
  939. memset(&ht_bss_conf, 0, sizeof(struct ieee80211_ht_bss_info));
  940. /* HT is not supported */
  941. if (!sband->ht_info.ht_supported) {
  942. conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE;
  943. goto out;
  944. }
  945. /* disable HT */
  946. if (!enable_ht) {
  947. if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE)
  948. changed |= BSS_CHANGED_HT;
  949. conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE;
  950. conf->ht_conf.ht_supported = 0;
  951. goto out;
  952. }
  953. if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE))
  954. changed |= BSS_CHANGED_HT;
  955. conf->flags |= IEEE80211_CONF_SUPPORT_HT_MODE;
  956. ht_conf.ht_supported = 1;
  957. ht_conf.cap = req_ht_cap->cap & sband->ht_info.cap;
  958. ht_conf.cap &= ~(IEEE80211_HT_CAP_MIMO_PS);
  959. ht_conf.cap |= sband->ht_info.cap & IEEE80211_HT_CAP_MIMO_PS;
  960. ht_bss_conf.primary_channel = req_bss_cap->primary_channel;
  961. ht_bss_conf.bss_cap = req_bss_cap->bss_cap;
  962. ht_bss_conf.bss_op_mode = req_bss_cap->bss_op_mode;
  963. ht_conf.ampdu_factor = req_ht_cap->ampdu_factor;
  964. ht_conf.ampdu_density = req_ht_cap->ampdu_density;
  965. /* Bits 96-100 */
  966. tx_mcs_set_cap = sband->ht_info.supp_mcs_set[12];
  967. /* configure suppoerted Tx MCS according to requested MCS
  968. * (based in most cases on Rx capabilities of peer) and self
  969. * Tx MCS capabilities (as defined by low level driver HW
  970. * Tx capabilities) */
  971. if (!(tx_mcs_set_cap & IEEE80211_HT_CAP_MCS_TX_DEFINED))
  972. goto check_changed;
  973. /* Counting from 0 therfore + 1 */
  974. if (tx_mcs_set_cap & IEEE80211_HT_CAP_MCS_TX_RX_DIFF)
  975. max_tx_streams = ((tx_mcs_set_cap &
  976. IEEE80211_HT_CAP_MCS_TX_STREAMS) >> 2) + 1;
  977. for (i = 0; i < max_tx_streams; i++)
  978. ht_conf.supp_mcs_set[i] =
  979. sband->ht_info.supp_mcs_set[i] &
  980. req_ht_cap->supp_mcs_set[i];
  981. if (tx_mcs_set_cap & IEEE80211_HT_CAP_MCS_TX_UEQM)
  982. for (i = IEEE80211_SUPP_MCS_SET_UEQM;
  983. i < IEEE80211_SUPP_MCS_SET_LEN; i++)
  984. ht_conf.supp_mcs_set[i] =
  985. sband->ht_info.supp_mcs_set[i] &
  986. req_ht_cap->supp_mcs_set[i];
  987. check_changed:
  988. /* if bss configuration changed store the new one */
  989. if (memcmp(&conf->ht_conf, &ht_conf, sizeof(ht_conf)) ||
  990. memcmp(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf))) {
  991. changed |= BSS_CHANGED_HT;
  992. memcpy(&conf->ht_conf, &ht_conf, sizeof(ht_conf));
  993. memcpy(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf));
  994. }
  995. out:
  996. return changed;
  997. }
  998. void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
  999. u32 changed)
  1000. {
  1001. struct ieee80211_local *local = sdata->local;
  1002. if (!changed)
  1003. return;
  1004. if (local->ops->bss_info_changed)
  1005. local->ops->bss_info_changed(local_to_hw(local),
  1006. &sdata->vif,
  1007. &sdata->bss_conf,
  1008. changed);
  1009. }
  1010. void ieee80211_reset_erp_info(struct net_device *dev)
  1011. {
  1012. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  1013. sdata->bss_conf.use_cts_prot = 0;
  1014. sdata->bss_conf.use_short_preamble = 0;
  1015. ieee80211_bss_info_change_notify(sdata,
  1016. BSS_CHANGED_ERP_CTS_PROT |
  1017. BSS_CHANGED_ERP_PREAMBLE);
  1018. }
  1019. void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
  1020. struct sk_buff *skb)
  1021. {
  1022. struct ieee80211_local *local = hw_to_local(hw);
  1023. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  1024. int tmp;
  1025. skb->dev = local->mdev;
  1026. skb->pkt_type = IEEE80211_TX_STATUS_MSG;
  1027. skb_queue_tail(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS ?
  1028. &local->skb_queue : &local->skb_queue_unreliable, skb);
  1029. tmp = skb_queue_len(&local->skb_queue) +
  1030. skb_queue_len(&local->skb_queue_unreliable);
  1031. while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT &&
  1032. (skb = skb_dequeue(&local->skb_queue_unreliable))) {
  1033. dev_kfree_skb_irq(skb);
  1034. tmp--;
  1035. I802_DEBUG_INC(local->tx_status_drop);
  1036. }
  1037. tasklet_schedule(&local->tasklet);
  1038. }
  1039. EXPORT_SYMBOL(ieee80211_tx_status_irqsafe);
  1040. static void ieee80211_tasklet_handler(unsigned long data)
  1041. {
  1042. struct ieee80211_local *local = (struct ieee80211_local *) data;
  1043. struct sk_buff *skb;
  1044. struct ieee80211_rx_status rx_status;
  1045. struct ieee80211_ra_tid *ra_tid;
  1046. while ((skb = skb_dequeue(&local->skb_queue)) ||
  1047. (skb = skb_dequeue(&local->skb_queue_unreliable))) {
  1048. switch (skb->pkt_type) {
  1049. case IEEE80211_RX_MSG:
  1050. /* status is in skb->cb */
  1051. memcpy(&rx_status, skb->cb, sizeof(rx_status));
  1052. /* Clear skb->pkt_type in order to not confuse kernel
  1053. * netstack. */
  1054. skb->pkt_type = 0;
  1055. __ieee80211_rx(local_to_hw(local), skb, &rx_status);
  1056. break;
  1057. case IEEE80211_TX_STATUS_MSG:
  1058. skb->pkt_type = 0;
  1059. ieee80211_tx_status(local_to_hw(local), skb);
  1060. break;
  1061. case IEEE80211_DELBA_MSG:
  1062. ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
  1063. ieee80211_stop_tx_ba_cb(local_to_hw(local),
  1064. ra_tid->ra, ra_tid->tid);
  1065. dev_kfree_skb(skb);
  1066. break;
  1067. case IEEE80211_ADDBA_MSG:
  1068. ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
  1069. ieee80211_start_tx_ba_cb(local_to_hw(local),
  1070. ra_tid->ra, ra_tid->tid);
  1071. dev_kfree_skb(skb);
  1072. break ;
  1073. default:
  1074. WARN_ON(1);
  1075. dev_kfree_skb(skb);
  1076. break;
  1077. }
  1078. }
  1079. }
  1080. /* Remove added headers (e.g., QoS control), encryption header/MIC, etc. to
  1081. * make a prepared TX frame (one that has been given to hw) to look like brand
  1082. * new IEEE 802.11 frame that is ready to go through TX processing again.
  1083. * Also, tx_packet_data in cb is restored from tx_control. */
  1084. static void ieee80211_remove_tx_extra(struct ieee80211_local *local,
  1085. struct ieee80211_key *key,
  1086. struct sk_buff *skb)
  1087. {
  1088. int hdrlen, iv_len, mic_len;
  1089. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  1090. info->flags &= IEEE80211_TX_CTL_REQ_TX_STATUS |
  1091. IEEE80211_TX_CTL_DO_NOT_ENCRYPT |
  1092. IEEE80211_TX_CTL_REQUEUE |
  1093. IEEE80211_TX_CTL_EAPOL_FRAME;
  1094. hdrlen = ieee80211_get_hdrlen_from_skb(skb);
  1095. if (!key)
  1096. goto no_key;
  1097. switch (key->conf.alg) {
  1098. case ALG_WEP:
  1099. iv_len = WEP_IV_LEN;
  1100. mic_len = WEP_ICV_LEN;
  1101. break;
  1102. case ALG_TKIP:
  1103. iv_len = TKIP_IV_LEN;
  1104. mic_len = TKIP_ICV_LEN;
  1105. break;
  1106. case ALG_CCMP:
  1107. iv_len = CCMP_HDR_LEN;
  1108. mic_len = CCMP_MIC_LEN;
  1109. break;
  1110. default:
  1111. goto no_key;
  1112. }
  1113. if (skb->len >= mic_len &&
  1114. !(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
  1115. skb_trim(skb, skb->len - mic_len);
  1116. if (skb->len >= iv_len && skb->len > hdrlen) {
  1117. memmove(skb->data + iv_len, skb->data, hdrlen);
  1118. skb_pull(skb, iv_len);
  1119. }
  1120. no_key:
  1121. {
  1122. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  1123. u16 fc = le16_to_cpu(hdr->frame_control);
  1124. if ((fc & 0x8C) == 0x88) /* QoS Control Field */ {
  1125. fc &= ~IEEE80211_STYPE_QOS_DATA;
  1126. hdr->frame_control = cpu_to_le16(fc);
  1127. memmove(skb->data + 2, skb->data, hdrlen - 2);
  1128. skb_pull(skb, 2);
  1129. }
  1130. }
  1131. }
  1132. static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
  1133. struct sta_info *sta,
  1134. struct sk_buff *skb)
  1135. {
  1136. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  1137. sta->tx_filtered_count++;
  1138. /*
  1139. * Clear the TX filter mask for this STA when sending the next
  1140. * packet. If the STA went to power save mode, this will happen
  1141. * when it wakes up for the next time.
  1142. */
  1143. set_sta_flags(sta, WLAN_STA_CLEAR_PS_FILT);
  1144. /*
  1145. * This code races in the following way:
  1146. *
  1147. * (1) STA sends frame indicating it will go to sleep and does so
  1148. * (2) hardware/firmware adds STA to filter list, passes frame up
  1149. * (3) hardware/firmware processes TX fifo and suppresses a frame
  1150. * (4) we get TX status before having processed the frame and
  1151. * knowing that the STA has gone to sleep.
  1152. *
  1153. * This is actually quite unlikely even when both those events are
  1154. * processed from interrupts coming in quickly after one another or
  1155. * even at the same time because we queue both TX status events and
  1156. * RX frames to be processed by a tasklet and process them in the
  1157. * same order that they were received or TX status last. Hence, there
  1158. * is no race as long as the frame RX is processed before the next TX
  1159. * status, which drivers can ensure, see below.
  1160. *
  1161. * Note that this can only happen if the hardware or firmware can
  1162. * actually add STAs to the filter list, if this is done by the
  1163. * driver in response to set_tim() (which will only reduce the race
  1164. * this whole filtering tries to solve, not completely solve it)
  1165. * this situation cannot happen.
  1166. *
  1167. * To completely solve this race drivers need to make sure that they
  1168. * (a) don't mix the irq-safe/not irq-safe TX status/RX processing
  1169. * functions and
  1170. * (b) always process RX events before TX status events if ordering
  1171. * can be unknown, for example with different interrupt status
  1172. * bits.
  1173. */
  1174. if (test_sta_flags(sta, WLAN_STA_PS) &&
  1175. skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) {
  1176. ieee80211_remove_tx_extra(local, sta->key, skb);
  1177. skb_queue_tail(&sta->tx_filtered, skb);
  1178. return;
  1179. }
  1180. if (!test_sta_flags(sta, WLAN_STA_PS) &&
  1181. !(info->flags & IEEE80211_TX_CTL_REQUEUE)) {
  1182. /* Software retry the packet once */
  1183. info->flags |= IEEE80211_TX_CTL_REQUEUE;
  1184. ieee80211_remove_tx_extra(local, sta->key, skb);
  1185. dev_queue_xmit(skb);
  1186. return;
  1187. }
  1188. #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
  1189. if (net_ratelimit())
  1190. printk(KERN_DEBUG "%s: dropped TX filtered frame, "
  1191. "queue_len=%d PS=%d @%lu\n",
  1192. wiphy_name(local->hw.wiphy),
  1193. skb_queue_len(&sta->tx_filtered),
  1194. !!test_sta_flags(sta, WLAN_STA_PS), jiffies);
  1195. #endif
  1196. dev_kfree_skb(skb);
  1197. }
  1198. void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
  1199. {
  1200. struct sk_buff *skb2;
  1201. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  1202. struct ieee80211_local *local = hw_to_local(hw);
  1203. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  1204. u16 frag, type;
  1205. struct ieee80211_tx_status_rtap_hdr *rthdr;
  1206. struct ieee80211_sub_if_data *sdata;
  1207. struct net_device *prev_dev = NULL;
  1208. rcu_read_lock();
  1209. if (info->status.excessive_retries) {
  1210. struct sta_info *sta;
  1211. sta = sta_info_get(local, hdr->addr1);
  1212. if (sta) {
  1213. if (test_sta_flags(sta, WLAN_STA_PS)) {
  1214. /*
  1215. * The STA is in power save mode, so assume
  1216. * that this TX packet failed because of that.
  1217. */
  1218. ieee80211_handle_filtered_frame(local, sta, skb);
  1219. rcu_read_unlock();
  1220. return;
  1221. }
  1222. }
  1223. }
  1224. if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
  1225. struct sta_info *sta;
  1226. sta = sta_info_get(local, hdr->addr1);
  1227. if (sta) {
  1228. ieee80211_handle_filtered_frame(local, sta, skb);
  1229. rcu_read_unlock();
  1230. return;
  1231. }
  1232. } else
  1233. rate_control_tx_status(local->mdev, skb);
  1234. rcu_read_unlock();
  1235. ieee80211_led_tx(local, 0);
  1236. /* SNMP counters
  1237. * Fragments are passed to low-level drivers as separate skbs, so these
  1238. * are actually fragments, not frames. Update frame counters only for
  1239. * the first fragment of the frame. */
  1240. frag = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
  1241. type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE;
  1242. if (info->flags & IEEE80211_TX_STAT_ACK) {
  1243. if (frag == 0) {
  1244. local->dot11TransmittedFrameCount++;
  1245. if (is_multicast_ether_addr(hdr->addr1))
  1246. local->dot11MulticastTransmittedFrameCount++;
  1247. if (info->status.retry_count > 0)
  1248. local->dot11RetryCount++;
  1249. if (info->status.retry_count > 1)
  1250. local->dot11MultipleRetryCount++;
  1251. }
  1252. /* This counter shall be incremented for an acknowledged MPDU
  1253. * with an individual address in the address 1 field or an MPDU
  1254. * with a multicast address in the address 1 field of type Data
  1255. * or Management. */
  1256. if (!is_multicast_ether_addr(hdr->addr1) ||
  1257. type == IEEE80211_FTYPE_DATA ||
  1258. type == IEEE80211_FTYPE_MGMT)
  1259. local->dot11TransmittedFragmentCount++;
  1260. } else {
  1261. if (frag == 0)
  1262. local->dot11FailedCount++;
  1263. }
  1264. /* this was a transmitted frame, but now we want to reuse it */
  1265. skb_orphan(skb);
  1266. /*
  1267. * This is a bit racy but we can avoid a lot of work
  1268. * with this test...
  1269. */
  1270. if (!local->monitors && !local->cooked_mntrs) {
  1271. dev_kfree_skb(skb);
  1272. return;
  1273. }
  1274. /* send frame to monitor interfaces now */
  1275. if (skb_headroom(skb) < sizeof(*rthdr)) {
  1276. printk(KERN_ERR "ieee80211_tx_status: headroom too small\n");
  1277. dev_kfree_skb(skb);
  1278. return;
  1279. }
  1280. rthdr = (struct ieee80211_tx_status_rtap_hdr *)
  1281. skb_push(skb, sizeof(*rthdr));
  1282. memset(rthdr, 0, sizeof(*rthdr));
  1283. rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
  1284. rthdr->hdr.it_present =
  1285. cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) |
  1286. (1 << IEEE80211_RADIOTAP_DATA_RETRIES));
  1287. if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
  1288. !is_multicast_ether_addr(hdr->addr1))
  1289. rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL);
  1290. if ((info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) &&
  1291. (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT))
  1292. rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS);
  1293. else if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS)
  1294. rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS);
  1295. rthdr->data_retries = info->status.retry_count;
  1296. /* XXX: is this sufficient for BPF? */
  1297. skb_set_mac_header(skb, 0);
  1298. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1299. skb->pkt_type = PACKET_OTHERHOST;
  1300. skb->protocol = htons(ETH_P_802_2);
  1301. memset(skb->cb, 0, sizeof(skb->cb));
  1302. rcu_read_lock();
  1303. list_for_each_entry_rcu(sdata, &local->interfaces, list) {
  1304. if (sdata->vif.type == IEEE80211_IF_TYPE_MNTR) {
  1305. if (!netif_running(sdata->dev))
  1306. continue;
  1307. if (prev_dev) {
  1308. skb2 = skb_clone(skb, GFP_ATOMIC);
  1309. if (skb2) {
  1310. skb2->dev = prev_dev;
  1311. netif_rx(skb2);
  1312. }
  1313. }
  1314. prev_dev = sdata->dev;
  1315. }
  1316. }
  1317. if (prev_dev) {
  1318. skb->dev = prev_dev;
  1319. netif_rx(skb);
  1320. skb = NULL;
  1321. }
  1322. rcu_read_unlock();
  1323. dev_kfree_skb(skb);
  1324. }
  1325. EXPORT_SYMBOL(ieee80211_tx_status);
  1326. struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
  1327. const struct ieee80211_ops *ops)
  1328. {
  1329. struct ieee80211_local *local;
  1330. int priv_size;
  1331. struct wiphy *wiphy;
  1332. /* Ensure 32-byte alignment of our private data and hw private data.
  1333. * We use the wiphy priv data for both our ieee80211_local and for
  1334. * the driver's private data
  1335. *
  1336. * In memory it'll be like this:
  1337. *
  1338. * +-------------------------+
  1339. * | struct wiphy |
  1340. * +-------------------------+
  1341. * | struct ieee80211_local |
  1342. * +-------------------------+
  1343. * | driver's private data |
  1344. * +-------------------------+
  1345. *
  1346. */
  1347. priv_size = ((sizeof(struct ieee80211_local) +
  1348. NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST) +
  1349. priv_data_len;
  1350. wiphy = wiphy_new(&mac80211_config_ops, priv_size);
  1351. if (!wiphy)
  1352. return NULL;
  1353. wiphy->privid = mac80211_wiphy_privid;
  1354. local = wiphy_priv(wiphy);
  1355. local->hw.wiphy = wiphy;
  1356. local->hw.priv = (char *)local +
  1357. ((sizeof(struct ieee80211_local) +
  1358. NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
  1359. BUG_ON(!ops->tx);
  1360. BUG_ON(!ops->start);
  1361. BUG_ON(!ops->stop);
  1362. BUG_ON(!ops->config);
  1363. BUG_ON(!ops->add_interface);
  1364. BUG_ON(!ops->remove_interface);
  1365. BUG_ON(!ops->configure_filter);
  1366. local->ops = ops;
  1367. local->hw.queues = 1; /* default */
  1368. local->bridge_packets = 1;
  1369. local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
  1370. local->fragmentation_threshold = IEEE80211_MAX_FRAG_THRESHOLD;
  1371. local->short_retry_limit = 7;
  1372. local->long_retry_limit = 4;
  1373. local->hw.conf.radio_enabled = 1;
  1374. INIT_LIST_HEAD(&local->interfaces);
  1375. spin_lock_init(&local->key_lock);
  1376. INIT_DELAYED_WORK(&local->scan_work, ieee80211_sta_scan_work);
  1377. sta_info_init(local);
  1378. tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending,
  1379. (unsigned long)local);
  1380. tasklet_disable(&local->tx_pending_tasklet);
  1381. tasklet_init(&local->tasklet,
  1382. ieee80211_tasklet_handler,
  1383. (unsigned long) local);
  1384. tasklet_disable(&local->tasklet);
  1385. skb_queue_head_init(&local->skb_queue);
  1386. skb_queue_head_init(&local->skb_queue_unreliable);
  1387. return local_to_hw(local);
  1388. }
  1389. EXPORT_SYMBOL(ieee80211_alloc_hw);
  1390. int ieee80211_register_hw(struct ieee80211_hw *hw)
  1391. {
  1392. struct ieee80211_local *local = hw_to_local(hw);
  1393. const char *name;
  1394. int result;
  1395. enum ieee80211_band band;
  1396. struct net_device *mdev;
  1397. struct ieee80211_sub_if_data *sdata;
  1398. /*
  1399. * generic code guarantees at least one band,
  1400. * set this very early because much code assumes
  1401. * that hw.conf.channel is assigned
  1402. */
  1403. for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
  1404. struct ieee80211_supported_band *sband;
  1405. sband = local->hw.wiphy->bands[band];
  1406. if (sband) {
  1407. /* init channel we're on */
  1408. local->hw.conf.channel =
  1409. local->oper_channel =
  1410. local->scan_channel = &sband->channels[0];
  1411. break;
  1412. }
  1413. }
  1414. result = wiphy_register(local->hw.wiphy);
  1415. if (result < 0)
  1416. return result;
  1417. /*
  1418. * We use the number of queues for feature tests (QoS, HT) internally
  1419. * so restrict them appropriately.
  1420. */
  1421. #ifdef CONFIG_MAC80211_QOS
  1422. if (hw->queues > IEEE80211_MAX_QUEUES)
  1423. hw->queues = IEEE80211_MAX_QUEUES;
  1424. if (hw->ampdu_queues > IEEE80211_MAX_AMPDU_QUEUES)
  1425. hw->ampdu_queues = IEEE80211_MAX_AMPDU_QUEUES;
  1426. if (hw->queues < 4)
  1427. hw->ampdu_queues = 0;
  1428. #else
  1429. hw->queues = 1;
  1430. hw->ampdu_queues = 0;
  1431. #endif
  1432. /* for now, mdev needs sub_if_data :/ */
  1433. mdev = alloc_netdev_mq(sizeof(struct ieee80211_sub_if_data),
  1434. "wmaster%d", ether_setup,
  1435. ieee80211_num_queues(hw));
  1436. if (!mdev)
  1437. goto fail_mdev_alloc;
  1438. if (ieee80211_num_queues(hw) > 1)
  1439. mdev->features |= NETIF_F_MULTI_QUEUE;
  1440. sdata = IEEE80211_DEV_TO_SUB_IF(mdev);
  1441. mdev->ieee80211_ptr = &sdata->wdev;
  1442. sdata->wdev.wiphy = local->hw.wiphy;
  1443. local->mdev = mdev;
  1444. ieee80211_rx_bss_list_init(mdev);
  1445. mdev->hard_start_xmit = ieee80211_master_start_xmit;
  1446. mdev->open = ieee80211_master_open;
  1447. mdev->stop = ieee80211_master_stop;
  1448. mdev->type = ARPHRD_IEEE80211;
  1449. mdev->header_ops = &ieee80211_header_ops;
  1450. mdev->set_multicast_list = ieee80211_master_set_multicast_list;
  1451. sdata->vif.type = IEEE80211_IF_TYPE_AP;
  1452. sdata->dev = mdev;
  1453. sdata->local = local;
  1454. sdata->u.ap.force_unicast_rateidx = -1;
  1455. sdata->u.ap.max_ratectrl_rateidx = -1;
  1456. ieee80211_if_sdata_init(sdata);
  1457. /* no RCU needed since we're still during init phase */
  1458. list_add_tail(&sdata->list, &local->interfaces);
  1459. name = wiphy_dev(local->hw.wiphy)->driver->name;
  1460. local->hw.workqueue = create_freezeable_workqueue(name);
  1461. if (!local->hw.workqueue) {
  1462. result = -ENOMEM;
  1463. goto fail_workqueue;
  1464. }
  1465. /*
  1466. * The hardware needs headroom for sending the frame,
  1467. * and we need some headroom for passing the frame to monitor
  1468. * interfaces, but never both at the same time.
  1469. */
  1470. local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom,
  1471. sizeof(struct ieee80211_tx_status_rtap_hdr));
  1472. debugfs_hw_add(local);
  1473. if (local->hw.conf.beacon_int < 10)
  1474. local->hw.conf.beacon_int = 100;
  1475. local->wstats_flags |= local->hw.flags & (IEEE80211_HW_SIGNAL_UNSPEC |
  1476. IEEE80211_HW_SIGNAL_DB |
  1477. IEEE80211_HW_SIGNAL_DBM) ?
  1478. IW_QUAL_QUAL_UPDATED : IW_QUAL_QUAL_INVALID;
  1479. local->wstats_flags |= local->hw.flags & IEEE80211_HW_NOISE_DBM ?
  1480. IW_QUAL_NOISE_UPDATED : IW_QUAL_NOISE_INVALID;
  1481. if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
  1482. local->wstats_flags |= IW_QUAL_DBM;
  1483. result = sta_info_start(local);
  1484. if (result < 0)
  1485. goto fail_sta_info;
  1486. rtnl_lock();
  1487. result = dev_alloc_name(local->mdev, local->mdev->name);
  1488. if (result < 0)
  1489. goto fail_dev;
  1490. memcpy(local->mdev->dev_addr, local->hw.wiphy->perm_addr, ETH_ALEN);
  1491. SET_NETDEV_DEV(local->mdev, wiphy_dev(local->hw.wiphy));
  1492. result = register_netdevice(local->mdev);
  1493. if (result < 0)
  1494. goto fail_dev;
  1495. ieee80211_debugfs_add_netdev(IEEE80211_DEV_TO_SUB_IF(local->mdev));
  1496. ieee80211_if_set_type(local->mdev, IEEE80211_IF_TYPE_AP);
  1497. result = ieee80211_init_rate_ctrl_alg(local,
  1498. hw->rate_control_algorithm);
  1499. if (result < 0) {
  1500. printk(KERN_DEBUG "%s: Failed to initialize rate control "
  1501. "algorithm\n", wiphy_name(local->hw.wiphy));
  1502. goto fail_rate;
  1503. }
  1504. result = ieee80211_wep_init(local);
  1505. if (result < 0) {
  1506. printk(KERN_DEBUG "%s: Failed to initialize wep\n",
  1507. wiphy_name(local->hw.wiphy));
  1508. goto fail_wep;
  1509. }
  1510. ieee80211_install_qdisc(local->mdev);
  1511. /* add one default STA interface */
  1512. result = ieee80211_if_add(local->mdev, "wlan%d", NULL,
  1513. IEEE80211_IF_TYPE_STA, NULL);
  1514. if (result)
  1515. printk(KERN_WARNING "%s: Failed to add default virtual iface\n",
  1516. wiphy_name(local->hw.wiphy));
  1517. local->reg_state = IEEE80211_DEV_REGISTERED;
  1518. rtnl_unlock();
  1519. ieee80211_led_init(local);
  1520. return 0;
  1521. fail_wep:
  1522. rate_control_deinitialize(local);
  1523. fail_rate:
  1524. ieee80211_debugfs_remove_netdev(IEEE80211_DEV_TO_SUB_IF(local->mdev));
  1525. unregister_netdevice(local->mdev);
  1526. local->mdev = NULL;
  1527. fail_dev:
  1528. rtnl_unlock();
  1529. sta_info_stop(local);
  1530. fail_sta_info:
  1531. debugfs_hw_del(local);
  1532. destroy_workqueue(local->hw.workqueue);
  1533. fail_workqueue:
  1534. if (local->mdev != NULL) {
  1535. ieee80211_if_free(local->mdev);
  1536. local->mdev = NULL;
  1537. }
  1538. fail_mdev_alloc:
  1539. wiphy_unregister(local->hw.wiphy);
  1540. return result;
  1541. }
  1542. EXPORT_SYMBOL(ieee80211_register_hw);
  1543. void ieee80211_unregister_hw(struct ieee80211_hw *hw)
  1544. {
  1545. struct ieee80211_local *local = hw_to_local(hw);
  1546. struct ieee80211_sub_if_data *sdata, *tmp;
  1547. tasklet_kill(&local->tx_pending_tasklet);
  1548. tasklet_kill(&local->tasklet);
  1549. rtnl_lock();
  1550. BUG_ON(local->reg_state != IEEE80211_DEV_REGISTERED);
  1551. local->reg_state = IEEE80211_DEV_UNREGISTERED;
  1552. /*
  1553. * At this point, interface list manipulations are fine
  1554. * because the driver cannot be handing us frames any
  1555. * more and the tasklet is killed.
  1556. */
  1557. /*
  1558. * First, we remove all non-master interfaces. Do this because they
  1559. * may have bss pointer dependency on the master, and when we free
  1560. * the master these would be freed as well, breaking our list
  1561. * iteration completely.
  1562. */
  1563. list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
  1564. if (sdata->dev == local->mdev)
  1565. continue;
  1566. list_del(&sdata->list);
  1567. __ieee80211_if_del(local, sdata);
  1568. }
  1569. /* then, finally, remove the master interface */
  1570. __ieee80211_if_del(local, IEEE80211_DEV_TO_SUB_IF(local->mdev));
  1571. rtnl_unlock();
  1572. ieee80211_rx_bss_list_deinit(local->mdev);
  1573. ieee80211_clear_tx_pending(local);
  1574. sta_info_stop(local);
  1575. rate_control_deinitialize(local);
  1576. debugfs_hw_del(local);
  1577. if (skb_queue_len(&local->skb_queue)
  1578. || skb_queue_len(&local->skb_queue_unreliable))
  1579. printk(KERN_WARNING "%s: skb_queue not empty\n",
  1580. wiphy_name(local->hw.wiphy));
  1581. skb_queue_purge(&local->skb_queue);
  1582. skb_queue_purge(&local->skb_queue_unreliable);
  1583. destroy_workqueue(local->hw.workqueue);
  1584. wiphy_unregister(local->hw.wiphy);
  1585. ieee80211_wep_free(local);
  1586. ieee80211_led_exit(local);
  1587. ieee80211_if_free(local->mdev);
  1588. local->mdev = NULL;
  1589. }
  1590. EXPORT_SYMBOL(ieee80211_unregister_hw);
  1591. void ieee80211_free_hw(struct ieee80211_hw *hw)
  1592. {
  1593. struct ieee80211_local *local = hw_to_local(hw);
  1594. wiphy_free(local->hw.wiphy);
  1595. }
  1596. EXPORT_SYMBOL(ieee80211_free_hw);
  1597. static int __init ieee80211_init(void)
  1598. {
  1599. struct sk_buff *skb;
  1600. int ret;
  1601. BUILD_BUG_ON(sizeof(struct ieee80211_tx_info) > sizeof(skb->cb));
  1602. BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, driver_data) +
  1603. IEEE80211_TX_INFO_DRIVER_DATA_SIZE > sizeof(skb->cb));
  1604. ret = rc80211_pid_init();
  1605. if (ret)
  1606. goto out;
  1607. ret = ieee80211_wme_register();
  1608. if (ret) {
  1609. printk(KERN_DEBUG "ieee80211_init: failed to "
  1610. "initialize WME (err=%d)\n", ret);
  1611. goto out_cleanup_pid;
  1612. }
  1613. ieee80211_debugfs_netdev_init();
  1614. return 0;
  1615. out_cleanup_pid:
  1616. rc80211_pid_exit();
  1617. out:
  1618. return ret;
  1619. }
  1620. static void __exit ieee80211_exit(void)
  1621. {
  1622. rc80211_pid_exit();
  1623. /*
  1624. * For key todo, it'll be empty by now but the work
  1625. * might still be scheduled.
  1626. */
  1627. flush_scheduled_work();
  1628. if (mesh_allocated)
  1629. ieee80211s_stop();
  1630. ieee80211_wme_unregister();
  1631. ieee80211_debugfs_netdev_exit();
  1632. }
  1633. subsys_initcall(ieee80211_init);
  1634. module_exit(ieee80211_exit);
  1635. MODULE_DESCRIPTION("IEEE 802.11 subsystem");
  1636. MODULE_LICENSE("GPL");