main.c 52 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910
  1. /*
  2. * Copyright 2002-2005, Instant802 Networks, Inc.
  3. * Copyright 2005-2006, Devicescape Software, Inc.
  4. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <net/mac80211.h>
  11. #include <net/ieee80211_radiotap.h>
  12. #include <linux/module.h>
  13. #include <linux/init.h>
  14. #include <linux/netdevice.h>
  15. #include <linux/types.h>
  16. #include <linux/slab.h>
  17. #include <linux/skbuff.h>
  18. #include <linux/etherdevice.h>
  19. #include <linux/if_arp.h>
  20. #include <linux/wireless.h>
  21. #include <linux/rtnetlink.h>
  22. #include <linux/bitmap.h>
  23. #include <net/net_namespace.h>
  24. #include <net/cfg80211.h>
  25. #include "ieee80211_i.h"
  26. #include "rate.h"
  27. #include "mesh.h"
  28. #include "wep.h"
  29. #include "wme.h"
  30. #include "aes_ccm.h"
  31. #include "led.h"
  32. #include "cfg.h"
  33. #include "debugfs.h"
  34. #include "debugfs_netdev.h"
  35. /*
  36. * For seeing transmitted packets on monitor interfaces
  37. * we have a radiotap header too.
  38. */
  39. struct ieee80211_tx_status_rtap_hdr {
  40. struct ieee80211_radiotap_header hdr;
  41. __le16 tx_flags;
  42. u8 data_retries;
  43. } __attribute__ ((packed));
  44. /* common interface routines */
  45. static int header_parse_80211(const struct sk_buff *skb, unsigned char *haddr)
  46. {
  47. memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); /* addr2 */
  48. return ETH_ALEN;
  49. }
  50. /* must be called under mdev tx lock */
  51. static void ieee80211_configure_filter(struct ieee80211_local *local)
  52. {
  53. unsigned int changed_flags;
  54. unsigned int new_flags = 0;
  55. if (atomic_read(&local->iff_promiscs))
  56. new_flags |= FIF_PROMISC_IN_BSS;
  57. if (atomic_read(&local->iff_allmultis))
  58. new_flags |= FIF_ALLMULTI;
  59. if (local->monitors)
  60. new_flags |= FIF_BCN_PRBRESP_PROMISC;
  61. if (local->fif_fcsfail)
  62. new_flags |= FIF_FCSFAIL;
  63. if (local->fif_plcpfail)
  64. new_flags |= FIF_PLCPFAIL;
  65. if (local->fif_control)
  66. new_flags |= FIF_CONTROL;
  67. if (local->fif_other_bss)
  68. new_flags |= FIF_OTHER_BSS;
  69. changed_flags = local->filter_flags ^ new_flags;
  70. /* be a bit nasty */
  71. new_flags |= (1<<31);
  72. local->ops->configure_filter(local_to_hw(local),
  73. changed_flags, &new_flags,
  74. local->mdev->mc_count,
  75. local->mdev->mc_list);
  76. WARN_ON(new_flags & (1<<31));
  77. local->filter_flags = new_flags & ~(1<<31);
  78. }
  79. /* master interface */
  80. static int ieee80211_master_open(struct net_device *dev)
  81. {
  82. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  83. struct ieee80211_sub_if_data *sdata;
  84. int res = -EOPNOTSUPP;
  85. /* we hold the RTNL here so can safely walk the list */
  86. list_for_each_entry(sdata, &local->interfaces, list) {
  87. if (sdata->dev != dev && netif_running(sdata->dev)) {
  88. res = 0;
  89. break;
  90. }
  91. }
  92. if (res)
  93. return res;
  94. netif_start_queue(local->mdev);
  95. return 0;
  96. }
  97. static int ieee80211_master_stop(struct net_device *dev)
  98. {
  99. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  100. struct ieee80211_sub_if_data *sdata;
  101. /* we hold the RTNL here so can safely walk the list */
  102. list_for_each_entry(sdata, &local->interfaces, list)
  103. if (sdata->dev != dev && netif_running(sdata->dev))
  104. dev_close(sdata->dev);
  105. return 0;
  106. }
  107. static void ieee80211_master_set_multicast_list(struct net_device *dev)
  108. {
  109. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  110. ieee80211_configure_filter(local);
  111. }
  112. /* regular interfaces */
  113. static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
  114. {
  115. int meshhdrlen;
  116. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  117. meshhdrlen = (sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) ? 5 : 0;
  118. /* FIX: what would be proper limits for MTU?
  119. * This interface uses 802.3 frames. */
  120. if (new_mtu < 256 ||
  121. new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) {
  122. printk(KERN_WARNING "%s: invalid MTU %d\n",
  123. dev->name, new_mtu);
  124. return -EINVAL;
  125. }
  126. #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
  127. printk(KERN_DEBUG "%s: setting MTU %d\n", dev->name, new_mtu);
  128. #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
  129. dev->mtu = new_mtu;
  130. return 0;
  131. }
  132. static inline int identical_mac_addr_allowed(int type1, int type2)
  133. {
  134. return (type1 == IEEE80211_IF_TYPE_MNTR ||
  135. type2 == IEEE80211_IF_TYPE_MNTR ||
  136. (type1 == IEEE80211_IF_TYPE_AP &&
  137. type2 == IEEE80211_IF_TYPE_WDS) ||
  138. (type1 == IEEE80211_IF_TYPE_WDS &&
  139. (type2 == IEEE80211_IF_TYPE_WDS ||
  140. type2 == IEEE80211_IF_TYPE_AP)) ||
  141. (type1 == IEEE80211_IF_TYPE_AP &&
  142. type2 == IEEE80211_IF_TYPE_VLAN) ||
  143. (type1 == IEEE80211_IF_TYPE_VLAN &&
  144. (type2 == IEEE80211_IF_TYPE_AP ||
  145. type2 == IEEE80211_IF_TYPE_VLAN)));
  146. }
  147. static int ieee80211_open(struct net_device *dev)
  148. {
  149. struct ieee80211_sub_if_data *sdata, *nsdata;
  150. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  151. struct ieee80211_if_init_conf conf;
  152. int res;
  153. bool need_hw_reconfig = 0;
  154. struct sta_info *sta;
  155. sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  156. /* we hold the RTNL here so can safely walk the list */
  157. list_for_each_entry(nsdata, &local->interfaces, list) {
  158. struct net_device *ndev = nsdata->dev;
  159. if (ndev != dev && ndev != local->mdev && netif_running(ndev)) {
  160. /*
  161. * Allow only a single IBSS interface to be up at any
  162. * time. This is restricted because beacon distribution
  163. * cannot work properly if both are in the same IBSS.
  164. *
  165. * To remove this restriction we'd have to disallow them
  166. * from setting the same SSID on different IBSS interfaces
  167. * belonging to the same hardware. Then, however, we're
  168. * faced with having to adopt two different TSF timers...
  169. */
  170. if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS &&
  171. nsdata->vif.type == IEEE80211_IF_TYPE_IBSS)
  172. return -EBUSY;
  173. /*
  174. * Disallow multiple IBSS/STA mode interfaces.
  175. *
  176. * This is a technical restriction, it is possible although
  177. * most likely not IEEE 802.11 compliant to have multiple
  178. * STAs with just a single hardware (the TSF timer will not
  179. * be adjusted properly.)
  180. *
  181. * However, because mac80211 uses the master device's BSS
  182. * information for each STA/IBSS interface, doing this will
  183. * currently corrupt that BSS information completely, unless,
  184. * a not very useful case, both STAs are associated to the
  185. * same BSS.
  186. *
  187. * To remove this restriction, the BSS information needs to
  188. * be embedded in the STA/IBSS mode sdata instead of using
  189. * the master device's BSS structure.
  190. */
  191. if ((sdata->vif.type == IEEE80211_IF_TYPE_STA ||
  192. sdata->vif.type == IEEE80211_IF_TYPE_IBSS) &&
  193. (nsdata->vif.type == IEEE80211_IF_TYPE_STA ||
  194. nsdata->vif.type == IEEE80211_IF_TYPE_IBSS))
  195. return -EBUSY;
  196. /*
  197. * The remaining checks are only performed for interfaces
  198. * with the same MAC address.
  199. */
  200. if (compare_ether_addr(dev->dev_addr, ndev->dev_addr))
  201. continue;
  202. /*
  203. * check whether it may have the same address
  204. */
  205. if (!identical_mac_addr_allowed(sdata->vif.type,
  206. nsdata->vif.type))
  207. return -ENOTUNIQ;
  208. /*
  209. * can only add VLANs to enabled APs
  210. */
  211. if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN &&
  212. nsdata->vif.type == IEEE80211_IF_TYPE_AP)
  213. sdata->u.vlan.ap = nsdata;
  214. }
  215. }
  216. switch (sdata->vif.type) {
  217. case IEEE80211_IF_TYPE_WDS:
  218. if (!is_valid_ether_addr(sdata->u.wds.remote_addr))
  219. return -ENOLINK;
  220. break;
  221. case IEEE80211_IF_TYPE_VLAN:
  222. if (!sdata->u.vlan.ap)
  223. return -ENOLINK;
  224. break;
  225. case IEEE80211_IF_TYPE_AP:
  226. case IEEE80211_IF_TYPE_STA:
  227. case IEEE80211_IF_TYPE_MNTR:
  228. case IEEE80211_IF_TYPE_IBSS:
  229. case IEEE80211_IF_TYPE_MESH_POINT:
  230. /* no special treatment */
  231. break;
  232. case IEEE80211_IF_TYPE_INVALID:
  233. /* cannot happen */
  234. WARN_ON(1);
  235. break;
  236. }
  237. if (local->open_count == 0) {
  238. res = 0;
  239. if (local->ops->start)
  240. res = local->ops->start(local_to_hw(local));
  241. if (res)
  242. return res;
  243. need_hw_reconfig = 1;
  244. ieee80211_led_radio(local, local->hw.conf.radio_enabled);
  245. }
  246. switch (sdata->vif.type) {
  247. case IEEE80211_IF_TYPE_VLAN:
  248. list_add(&sdata->u.vlan.list, &sdata->u.vlan.ap->u.ap.vlans);
  249. /* no need to tell driver */
  250. break;
  251. case IEEE80211_IF_TYPE_MNTR:
  252. if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
  253. local->cooked_mntrs++;
  254. break;
  255. }
  256. /* must be before the call to ieee80211_configure_filter */
  257. local->monitors++;
  258. if (local->monitors == 1)
  259. local->hw.conf.flags |= IEEE80211_CONF_RADIOTAP;
  260. if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
  261. local->fif_fcsfail++;
  262. if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
  263. local->fif_plcpfail++;
  264. if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
  265. local->fif_control++;
  266. if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
  267. local->fif_other_bss++;
  268. netif_tx_lock_bh(local->mdev);
  269. ieee80211_configure_filter(local);
  270. netif_tx_unlock_bh(local->mdev);
  271. break;
  272. case IEEE80211_IF_TYPE_STA:
  273. case IEEE80211_IF_TYPE_IBSS:
  274. sdata->u.sta.flags &= ~IEEE80211_STA_PREV_BSSID_SET;
  275. /* fall through */
  276. default:
  277. conf.vif = &sdata->vif;
  278. conf.type = sdata->vif.type;
  279. conf.mac_addr = dev->dev_addr;
  280. res = local->ops->add_interface(local_to_hw(local), &conf);
  281. if (res)
  282. goto err_stop;
  283. ieee80211_if_config(dev);
  284. ieee80211_reset_erp_info(dev);
  285. ieee80211_enable_keys(sdata);
  286. if (sdata->vif.type == IEEE80211_IF_TYPE_STA &&
  287. !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME))
  288. netif_carrier_off(dev);
  289. else
  290. netif_carrier_on(dev);
  291. }
  292. if (sdata->vif.type == IEEE80211_IF_TYPE_WDS) {
  293. /* Create STA entry for the WDS peer */
  294. sta = sta_info_alloc(sdata, sdata->u.wds.remote_addr,
  295. GFP_KERNEL);
  296. if (!sta) {
  297. res = -ENOMEM;
  298. goto err_del_interface;
  299. }
  300. /* no locking required since STA is not live yet */
  301. sta->flags |= WLAN_STA_AUTHORIZED;
  302. res = sta_info_insert(sta);
  303. if (res) {
  304. /* STA has been freed */
  305. goto err_del_interface;
  306. }
  307. }
  308. if (local->open_count == 0) {
  309. res = dev_open(local->mdev);
  310. WARN_ON(res);
  311. if (res)
  312. goto err_del_interface;
  313. tasklet_enable(&local->tx_pending_tasklet);
  314. tasklet_enable(&local->tasklet);
  315. }
  316. /*
  317. * set_multicast_list will be invoked by the networking core
  318. * which will check whether any increments here were done in
  319. * error and sync them down to the hardware as filter flags.
  320. */
  321. if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
  322. atomic_inc(&local->iff_allmultis);
  323. if (sdata->flags & IEEE80211_SDATA_PROMISC)
  324. atomic_inc(&local->iff_promiscs);
  325. local->open_count++;
  326. if (need_hw_reconfig)
  327. ieee80211_hw_config(local);
  328. /*
  329. * ieee80211_sta_work is disabled while network interface
  330. * is down. Therefore, some configuration changes may not
  331. * yet be effective. Trigger execution of ieee80211_sta_work
  332. * to fix this.
  333. */
  334. if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
  335. sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
  336. struct ieee80211_if_sta *ifsta = &sdata->u.sta;
  337. queue_work(local->hw.workqueue, &ifsta->work);
  338. }
  339. netif_start_queue(dev);
  340. return 0;
  341. err_del_interface:
  342. local->ops->remove_interface(local_to_hw(local), &conf);
  343. err_stop:
  344. if (!local->open_count && local->ops->stop)
  345. local->ops->stop(local_to_hw(local));
  346. return res;
  347. }
  348. static int ieee80211_stop(struct net_device *dev)
  349. {
  350. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  351. struct ieee80211_local *local = sdata->local;
  352. struct ieee80211_if_init_conf conf;
  353. struct sta_info *sta;
  354. /*
  355. * Stop TX on this interface first.
  356. */
  357. netif_stop_queue(dev);
  358. /*
  359. * Now delete all active aggregation sessions.
  360. */
  361. rcu_read_lock();
  362. list_for_each_entry_rcu(sta, &local->sta_list, list) {
  363. if (sta->sdata == sdata)
  364. ieee80211_sta_tear_down_BA_sessions(dev, sta->addr);
  365. }
  366. rcu_read_unlock();
  367. /*
  368. * Remove all stations associated with this interface.
  369. *
  370. * This must be done before calling ops->remove_interface()
  371. * because otherwise we can later invoke ops->sta_notify()
  372. * whenever the STAs are removed, and that invalidates driver
  373. * assumptions about always getting a vif pointer that is valid
  374. * (because if we remove a STA after ops->remove_interface()
  375. * the driver will have removed the vif info already!)
  376. *
  377. * We could relax this and only unlink the stations from the
  378. * hash table and list but keep them on a per-sdata list that
  379. * will be inserted back again when the interface is brought
  380. * up again, but I don't currently see a use case for that,
  381. * except with WDS which gets a STA entry created when it is
  382. * brought up.
  383. */
  384. sta_info_flush(local, sdata);
  385. /*
  386. * Don't count this interface for promisc/allmulti while it
  387. * is down. dev_mc_unsync() will invoke set_multicast_list
  388. * on the master interface which will sync these down to the
  389. * hardware as filter flags.
  390. */
  391. if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
  392. atomic_dec(&local->iff_allmultis);
  393. if (sdata->flags & IEEE80211_SDATA_PROMISC)
  394. atomic_dec(&local->iff_promiscs);
  395. dev_mc_unsync(local->mdev, dev);
  396. /* APs need special treatment */
  397. if (sdata->vif.type == IEEE80211_IF_TYPE_AP) {
  398. struct ieee80211_sub_if_data *vlan, *tmp;
  399. struct beacon_data *old_beacon = sdata->u.ap.beacon;
  400. /* remove beacon */
  401. rcu_assign_pointer(sdata->u.ap.beacon, NULL);
  402. synchronize_rcu();
  403. kfree(old_beacon);
  404. /* down all dependent devices, that is VLANs */
  405. list_for_each_entry_safe(vlan, tmp, &sdata->u.ap.vlans,
  406. u.vlan.list)
  407. dev_close(vlan->dev);
  408. WARN_ON(!list_empty(&sdata->u.ap.vlans));
  409. }
  410. local->open_count--;
  411. switch (sdata->vif.type) {
  412. case IEEE80211_IF_TYPE_VLAN:
  413. list_del(&sdata->u.vlan.list);
  414. sdata->u.vlan.ap = NULL;
  415. /* no need to tell driver */
  416. break;
  417. case IEEE80211_IF_TYPE_MNTR:
  418. if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
  419. local->cooked_mntrs--;
  420. break;
  421. }
  422. local->monitors--;
  423. if (local->monitors == 0)
  424. local->hw.conf.flags &= ~IEEE80211_CONF_RADIOTAP;
  425. if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
  426. local->fif_fcsfail--;
  427. if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
  428. local->fif_plcpfail--;
  429. if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
  430. local->fif_control--;
  431. if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
  432. local->fif_other_bss--;
  433. netif_tx_lock_bh(local->mdev);
  434. ieee80211_configure_filter(local);
  435. netif_tx_unlock_bh(local->mdev);
  436. break;
  437. case IEEE80211_IF_TYPE_MESH_POINT:
  438. case IEEE80211_IF_TYPE_STA:
  439. case IEEE80211_IF_TYPE_IBSS:
  440. sdata->u.sta.state = IEEE80211_DISABLED;
  441. del_timer_sync(&sdata->u.sta.timer);
  442. /*
  443. * When we get here, the interface is marked down.
  444. * Call synchronize_rcu() to wait for the RX path
  445. * should it be using the interface and enqueuing
  446. * frames at this very time on another CPU.
  447. */
  448. synchronize_rcu();
  449. skb_queue_purge(&sdata->u.sta.skb_queue);
  450. if (local->scan_dev == sdata->dev) {
  451. if (!local->ops->hw_scan) {
  452. local->sta_sw_scanning = 0;
  453. cancel_delayed_work(&local->scan_work);
  454. } else
  455. local->sta_hw_scanning = 0;
  456. }
  457. flush_workqueue(local->hw.workqueue);
  458. sdata->u.sta.flags &= ~IEEE80211_STA_PRIVACY_INVOKED;
  459. kfree(sdata->u.sta.extra_ie);
  460. sdata->u.sta.extra_ie = NULL;
  461. sdata->u.sta.extra_ie_len = 0;
  462. /* fall through */
  463. default:
  464. conf.vif = &sdata->vif;
  465. conf.type = sdata->vif.type;
  466. conf.mac_addr = dev->dev_addr;
  467. /* disable all keys for as long as this netdev is down */
  468. ieee80211_disable_keys(sdata);
  469. local->ops->remove_interface(local_to_hw(local), &conf);
  470. }
  471. if (local->open_count == 0) {
  472. if (netif_running(local->mdev))
  473. dev_close(local->mdev);
  474. if (local->ops->stop)
  475. local->ops->stop(local_to_hw(local));
  476. ieee80211_led_radio(local, 0);
  477. tasklet_disable(&local->tx_pending_tasklet);
  478. tasklet_disable(&local->tasklet);
  479. }
  480. return 0;
  481. }
  482. int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
  483. {
  484. struct ieee80211_local *local = hw_to_local(hw);
  485. struct sta_info *sta;
  486. struct ieee80211_sub_if_data *sdata;
  487. u16 start_seq_num = 0;
  488. u8 *state;
  489. int ret;
  490. DECLARE_MAC_BUF(mac);
  491. if (tid >= STA_TID_NUM)
  492. return -EINVAL;
  493. #ifdef CONFIG_MAC80211_HT_DEBUG
  494. printk(KERN_DEBUG "Open BA session requested for %s tid %u\n",
  495. print_mac(mac, ra), tid);
  496. #endif /* CONFIG_MAC80211_HT_DEBUG */
  497. rcu_read_lock();
  498. sta = sta_info_get(local, ra);
  499. if (!sta) {
  500. printk(KERN_DEBUG "Could not find the station\n");
  501. rcu_read_unlock();
  502. return -ENOENT;
  503. }
  504. spin_lock_bh(&sta->lock);
  505. /* we have tried too many times, receiver does not want A-MPDU */
  506. if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
  507. ret = -EBUSY;
  508. goto start_ba_exit;
  509. }
  510. state = &sta->ampdu_mlme.tid_state_tx[tid];
  511. /* check if the TID is not in aggregation flow already */
  512. if (*state != HT_AGG_STATE_IDLE) {
  513. #ifdef CONFIG_MAC80211_HT_DEBUG
  514. printk(KERN_DEBUG "BA request denied - session is not "
  515. "idle on tid %u\n", tid);
  516. #endif /* CONFIG_MAC80211_HT_DEBUG */
  517. ret = -EAGAIN;
  518. goto start_ba_exit;
  519. }
  520. /* prepare A-MPDU MLME for Tx aggregation */
  521. sta->ampdu_mlme.tid_tx[tid] =
  522. kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
  523. if (!sta->ampdu_mlme.tid_tx[tid]) {
  524. if (net_ratelimit())
  525. printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
  526. tid);
  527. ret = -ENOMEM;
  528. goto start_ba_exit;
  529. }
  530. /* Tx timer */
  531. sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function =
  532. sta_addba_resp_timer_expired;
  533. sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data =
  534. (unsigned long)&sta->timer_to_tid[tid];
  535. init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
  536. /* ensure that TX flow won't interrupt us
  537. * until the end of the call to requeue function */
  538. spin_lock_bh(&local->mdev->queue_lock);
  539. /* create a new queue for this aggregation */
  540. ret = ieee80211_ht_agg_queue_add(local, sta, tid);
  541. /* case no queue is available to aggregation
  542. * don't switch to aggregation */
  543. if (ret) {
  544. #ifdef CONFIG_MAC80211_HT_DEBUG
  545. printk(KERN_DEBUG "BA request denied - queue unavailable for"
  546. " tid %d\n", tid);
  547. #endif /* CONFIG_MAC80211_HT_DEBUG */
  548. goto start_ba_err;
  549. }
  550. sdata = sta->sdata;
  551. /* Ok, the Addba frame hasn't been sent yet, but if the driver calls the
  552. * call back right away, it must see that the flow has begun */
  553. *state |= HT_ADDBA_REQUESTED_MSK;
  554. if (local->ops->ampdu_action)
  555. ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_START,
  556. ra, tid, &start_seq_num);
  557. if (ret) {
  558. /* No need to requeue the packets in the agg queue, since we
  559. * held the tx lock: no packet could be enqueued to the newly
  560. * allocated queue */
  561. ieee80211_ht_agg_queue_remove(local, sta, tid, 0);
  562. #ifdef CONFIG_MAC80211_HT_DEBUG
  563. printk(KERN_DEBUG "BA request denied - HW unavailable for"
  564. " tid %d\n", tid);
  565. #endif /* CONFIG_MAC80211_HT_DEBUG */
  566. *state = HT_AGG_STATE_IDLE;
  567. goto start_ba_err;
  568. }
  569. /* Will put all the packets in the new SW queue */
  570. ieee80211_requeue(local, ieee802_1d_to_ac[tid]);
  571. spin_unlock_bh(&local->mdev->queue_lock);
  572. /* send an addBA request */
  573. sta->ampdu_mlme.dialog_token_allocator++;
  574. sta->ampdu_mlme.tid_tx[tid]->dialog_token =
  575. sta->ampdu_mlme.dialog_token_allocator;
  576. sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
  577. ieee80211_send_addba_request(sta->sdata->dev, ra, tid,
  578. sta->ampdu_mlme.tid_tx[tid]->dialog_token,
  579. sta->ampdu_mlme.tid_tx[tid]->ssn,
  580. 0x40, 5000);
  581. /* activate the timer for the recipient's addBA response */
  582. sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires =
  583. jiffies + ADDBA_RESP_INTERVAL;
  584. add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
  585. printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
  586. goto start_ba_exit;
  587. start_ba_err:
  588. kfree(sta->ampdu_mlme.tid_tx[tid]);
  589. sta->ampdu_mlme.tid_tx[tid] = NULL;
  590. spin_unlock_bh(&local->mdev->queue_lock);
  591. ret = -EBUSY;
  592. start_ba_exit:
  593. spin_unlock_bh(&sta->lock);
  594. rcu_read_unlock();
  595. return ret;
  596. }
  597. EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
  598. int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw,
  599. u8 *ra, u16 tid,
  600. enum ieee80211_back_parties initiator)
  601. {
  602. struct ieee80211_local *local = hw_to_local(hw);
  603. struct sta_info *sta;
  604. u8 *state;
  605. int ret = 0;
  606. DECLARE_MAC_BUF(mac);
  607. if (tid >= STA_TID_NUM)
  608. return -EINVAL;
  609. rcu_read_lock();
  610. sta = sta_info_get(local, ra);
  611. if (!sta) {
  612. rcu_read_unlock();
  613. return -ENOENT;
  614. }
  615. /* check if the TID is in aggregation */
  616. state = &sta->ampdu_mlme.tid_state_tx[tid];
  617. spin_lock_bh(&sta->lock);
  618. if (*state != HT_AGG_STATE_OPERATIONAL) {
  619. ret = -ENOENT;
  620. goto stop_BA_exit;
  621. }
  622. #ifdef CONFIG_MAC80211_HT_DEBUG
  623. printk(KERN_DEBUG "Tx BA session stop requested for %s tid %u\n",
  624. print_mac(mac, ra), tid);
  625. #endif /* CONFIG_MAC80211_HT_DEBUG */
  626. ieee80211_stop_queue(hw, sta->tid_to_tx_q[tid]);
  627. *state = HT_AGG_STATE_REQ_STOP_BA_MSK |
  628. (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
  629. if (local->ops->ampdu_action)
  630. ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_STOP,
  631. ra, tid, NULL);
  632. /* case HW denied going back to legacy */
  633. if (ret) {
  634. WARN_ON(ret != -EBUSY);
  635. *state = HT_AGG_STATE_OPERATIONAL;
  636. ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
  637. goto stop_BA_exit;
  638. }
  639. stop_BA_exit:
  640. spin_unlock_bh(&sta->lock);
  641. rcu_read_unlock();
  642. return ret;
  643. }
  644. EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
  645. void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
  646. {
  647. struct ieee80211_local *local = hw_to_local(hw);
  648. struct sta_info *sta;
  649. u8 *state;
  650. DECLARE_MAC_BUF(mac);
  651. if (tid >= STA_TID_NUM) {
  652. printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
  653. tid, STA_TID_NUM);
  654. return;
  655. }
  656. rcu_read_lock();
  657. sta = sta_info_get(local, ra);
  658. if (!sta) {
  659. rcu_read_unlock();
  660. printk(KERN_DEBUG "Could not find station: %s\n",
  661. print_mac(mac, ra));
  662. return;
  663. }
  664. state = &sta->ampdu_mlme.tid_state_tx[tid];
  665. spin_lock_bh(&sta->lock);
  666. if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
  667. printk(KERN_DEBUG "addBA was not requested yet, state is %d\n",
  668. *state);
  669. spin_unlock_bh(&sta->lock);
  670. rcu_read_unlock();
  671. return;
  672. }
  673. WARN_ON_ONCE(*state & HT_ADDBA_DRV_READY_MSK);
  674. *state |= HT_ADDBA_DRV_READY_MSK;
  675. if (*state == HT_AGG_STATE_OPERATIONAL) {
  676. printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid);
  677. ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
  678. }
  679. spin_unlock_bh(&sta->lock);
  680. rcu_read_unlock();
  681. }
  682. EXPORT_SYMBOL(ieee80211_start_tx_ba_cb);
  683. void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
  684. {
  685. struct ieee80211_local *local = hw_to_local(hw);
  686. struct sta_info *sta;
  687. u8 *state;
  688. int agg_queue;
  689. DECLARE_MAC_BUF(mac);
  690. if (tid >= STA_TID_NUM) {
  691. printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
  692. tid, STA_TID_NUM);
  693. return;
  694. }
  695. #ifdef CONFIG_MAC80211_HT_DEBUG
  696. printk(KERN_DEBUG "Stopping Tx BA session for %s tid %d\n",
  697. print_mac(mac, ra), tid);
  698. #endif /* CONFIG_MAC80211_HT_DEBUG */
  699. rcu_read_lock();
  700. sta = sta_info_get(local, ra);
  701. if (!sta) {
  702. printk(KERN_DEBUG "Could not find station: %s\n",
  703. print_mac(mac, ra));
  704. rcu_read_unlock();
  705. return;
  706. }
  707. state = &sta->ampdu_mlme.tid_state_tx[tid];
  708. spin_lock_bh(&sta->lock);
  709. if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) {
  710. printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
  711. spin_unlock_bh(&sta->lock);
  712. rcu_read_unlock();
  713. return;
  714. }
  715. if (*state & HT_AGG_STATE_INITIATOR_MSK)
  716. ieee80211_send_delba(sta->sdata->dev, ra, tid,
  717. WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
  718. agg_queue = sta->tid_to_tx_q[tid];
  719. /* avoid ordering issues: we are the only one that can modify
  720. * the content of the qdiscs */
  721. spin_lock_bh(&local->mdev->queue_lock);
  722. /* remove the queue for this aggregation */
  723. ieee80211_ht_agg_queue_remove(local, sta, tid, 1);
  724. spin_unlock_bh(&local->mdev->queue_lock);
  725. /* we just requeued the all the frames that were in the removed
  726. * queue, and since we might miss a softirq we do netif_schedule.
  727. * ieee80211_wake_queue is not used here as this queue is not
  728. * necessarily stopped */
  729. netif_schedule(local->mdev);
  730. *state = HT_AGG_STATE_IDLE;
  731. sta->ampdu_mlme.addba_req_num[tid] = 0;
  732. kfree(sta->ampdu_mlme.tid_tx[tid]);
  733. sta->ampdu_mlme.tid_tx[tid] = NULL;
  734. spin_unlock_bh(&sta->lock);
  735. rcu_read_unlock();
  736. }
  737. EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb);
  738. void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
  739. const u8 *ra, u16 tid)
  740. {
  741. struct ieee80211_local *local = hw_to_local(hw);
  742. struct ieee80211_ra_tid *ra_tid;
  743. struct sk_buff *skb = dev_alloc_skb(0);
  744. if (unlikely(!skb)) {
  745. if (net_ratelimit())
  746. printk(KERN_WARNING "%s: Not enough memory, "
  747. "dropping start BA session", skb->dev->name);
  748. return;
  749. }
  750. ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
  751. memcpy(&ra_tid->ra, ra, ETH_ALEN);
  752. ra_tid->tid = tid;
  753. skb->pkt_type = IEEE80211_ADDBA_MSG;
  754. skb_queue_tail(&local->skb_queue, skb);
  755. tasklet_schedule(&local->tasklet);
  756. }
  757. EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
  758. void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
  759. const u8 *ra, u16 tid)
  760. {
  761. struct ieee80211_local *local = hw_to_local(hw);
  762. struct ieee80211_ra_tid *ra_tid;
  763. struct sk_buff *skb = dev_alloc_skb(0);
  764. if (unlikely(!skb)) {
  765. if (net_ratelimit())
  766. printk(KERN_WARNING "%s: Not enough memory, "
  767. "dropping stop BA session", skb->dev->name);
  768. return;
  769. }
  770. ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
  771. memcpy(&ra_tid->ra, ra, ETH_ALEN);
  772. ra_tid->tid = tid;
  773. skb->pkt_type = IEEE80211_DELBA_MSG;
  774. skb_queue_tail(&local->skb_queue, skb);
  775. tasklet_schedule(&local->tasklet);
  776. }
  777. EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
  778. static void ieee80211_set_multicast_list(struct net_device *dev)
  779. {
  780. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  781. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  782. int allmulti, promisc, sdata_allmulti, sdata_promisc;
  783. allmulti = !!(dev->flags & IFF_ALLMULTI);
  784. promisc = !!(dev->flags & IFF_PROMISC);
  785. sdata_allmulti = !!(sdata->flags & IEEE80211_SDATA_ALLMULTI);
  786. sdata_promisc = !!(sdata->flags & IEEE80211_SDATA_PROMISC);
  787. if (allmulti != sdata_allmulti) {
  788. if (dev->flags & IFF_ALLMULTI)
  789. atomic_inc(&local->iff_allmultis);
  790. else
  791. atomic_dec(&local->iff_allmultis);
  792. sdata->flags ^= IEEE80211_SDATA_ALLMULTI;
  793. }
  794. if (promisc != sdata_promisc) {
  795. if (dev->flags & IFF_PROMISC)
  796. atomic_inc(&local->iff_promiscs);
  797. else
  798. atomic_dec(&local->iff_promiscs);
  799. sdata->flags ^= IEEE80211_SDATA_PROMISC;
  800. }
  801. dev_mc_sync(local->mdev, dev);
  802. }
  803. static const struct header_ops ieee80211_header_ops = {
  804. .create = eth_header,
  805. .parse = header_parse_80211,
  806. .rebuild = eth_rebuild_header,
  807. .cache = eth_header_cache,
  808. .cache_update = eth_header_cache_update,
  809. };
  810. /* Must not be called for mdev */
  811. void ieee80211_if_setup(struct net_device *dev)
  812. {
  813. ether_setup(dev);
  814. dev->hard_start_xmit = ieee80211_subif_start_xmit;
  815. dev->wireless_handlers = &ieee80211_iw_handler_def;
  816. dev->set_multicast_list = ieee80211_set_multicast_list;
  817. dev->change_mtu = ieee80211_change_mtu;
  818. dev->open = ieee80211_open;
  819. dev->stop = ieee80211_stop;
  820. dev->destructor = ieee80211_if_free;
  821. }
  822. /* everything else */
  823. static int __ieee80211_if_config(struct net_device *dev,
  824. struct sk_buff *beacon)
  825. {
  826. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  827. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  828. struct ieee80211_if_conf conf;
  829. if (!local->ops->config_interface || !netif_running(dev))
  830. return 0;
  831. memset(&conf, 0, sizeof(conf));
  832. conf.type = sdata->vif.type;
  833. if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
  834. sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
  835. conf.bssid = sdata->u.sta.bssid;
  836. conf.ssid = sdata->u.sta.ssid;
  837. conf.ssid_len = sdata->u.sta.ssid_len;
  838. } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
  839. conf.beacon = beacon;
  840. ieee80211_start_mesh(dev);
  841. } else if (sdata->vif.type == IEEE80211_IF_TYPE_AP) {
  842. conf.ssid = sdata->u.ap.ssid;
  843. conf.ssid_len = sdata->u.ap.ssid_len;
  844. conf.beacon = beacon;
  845. }
  846. return local->ops->config_interface(local_to_hw(local),
  847. &sdata->vif, &conf);
  848. }
  849. int ieee80211_if_config(struct net_device *dev)
  850. {
  851. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  852. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  853. if (sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT &&
  854. (local->hw.flags & IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE))
  855. return ieee80211_if_config_beacon(dev);
  856. return __ieee80211_if_config(dev, NULL);
  857. }
  858. int ieee80211_if_config_beacon(struct net_device *dev)
  859. {
  860. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  861. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  862. struct sk_buff *skb;
  863. if (!(local->hw.flags & IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE))
  864. return 0;
  865. skb = ieee80211_beacon_get(local_to_hw(local), &sdata->vif);
  866. if (!skb)
  867. return -ENOMEM;
  868. return __ieee80211_if_config(dev, skb);
  869. }
  870. int ieee80211_hw_config(struct ieee80211_local *local)
  871. {
  872. struct ieee80211_channel *chan;
  873. int ret = 0;
  874. if (local->sta_sw_scanning)
  875. chan = local->scan_channel;
  876. else
  877. chan = local->oper_channel;
  878. local->hw.conf.channel = chan;
  879. if (!local->hw.conf.power_level)
  880. local->hw.conf.power_level = chan->max_power;
  881. else
  882. local->hw.conf.power_level = min(chan->max_power,
  883. local->hw.conf.power_level);
  884. local->hw.conf.max_antenna_gain = chan->max_antenna_gain;
  885. #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
  886. printk(KERN_DEBUG "%s: HW CONFIG: freq=%d\n",
  887. wiphy_name(local->hw.wiphy), chan->center_freq);
  888. #endif
  889. if (local->open_count)
  890. ret = local->ops->config(local_to_hw(local), &local->hw.conf);
  891. return ret;
  892. }
  893. /**
  894. * ieee80211_handle_ht should be used only after legacy configuration
  895. * has been determined namely band, as ht configuration depends upon
  896. * the hardware's HT abilities for a _specific_ band.
  897. */
  898. u32 ieee80211_handle_ht(struct ieee80211_local *local, int enable_ht,
  899. struct ieee80211_ht_info *req_ht_cap,
  900. struct ieee80211_ht_bss_info *req_bss_cap)
  901. {
  902. struct ieee80211_conf *conf = &local->hw.conf;
  903. struct ieee80211_supported_band *sband;
  904. struct ieee80211_ht_info ht_conf;
  905. struct ieee80211_ht_bss_info ht_bss_conf;
  906. u32 changed = 0;
  907. int i;
  908. u8 max_tx_streams = IEEE80211_HT_CAP_MAX_STREAMS;
  909. u8 tx_mcs_set_cap;
  910. sband = local->hw.wiphy->bands[conf->channel->band];
  911. memset(&ht_conf, 0, sizeof(struct ieee80211_ht_info));
  912. memset(&ht_bss_conf, 0, sizeof(struct ieee80211_ht_bss_info));
  913. /* HT is not supported */
  914. if (!sband->ht_info.ht_supported) {
  915. conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE;
  916. goto out;
  917. }
  918. /* disable HT */
  919. if (!enable_ht) {
  920. if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE)
  921. changed |= BSS_CHANGED_HT;
  922. conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE;
  923. conf->ht_conf.ht_supported = 0;
  924. goto out;
  925. }
  926. if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE))
  927. changed |= BSS_CHANGED_HT;
  928. conf->flags |= IEEE80211_CONF_SUPPORT_HT_MODE;
  929. ht_conf.ht_supported = 1;
  930. ht_conf.cap = req_ht_cap->cap & sband->ht_info.cap;
  931. ht_conf.cap &= ~(IEEE80211_HT_CAP_MIMO_PS);
  932. ht_conf.cap |= sband->ht_info.cap & IEEE80211_HT_CAP_MIMO_PS;
  933. ht_bss_conf.primary_channel = req_bss_cap->primary_channel;
  934. ht_bss_conf.bss_cap = req_bss_cap->bss_cap;
  935. ht_bss_conf.bss_op_mode = req_bss_cap->bss_op_mode;
  936. ht_conf.ampdu_factor = req_ht_cap->ampdu_factor;
  937. ht_conf.ampdu_density = req_ht_cap->ampdu_density;
  938. /* Bits 96-100 */
  939. tx_mcs_set_cap = sband->ht_info.supp_mcs_set[12];
  940. /* configure suppoerted Tx MCS according to requested MCS
  941. * (based in most cases on Rx capabilities of peer) and self
  942. * Tx MCS capabilities (as defined by low level driver HW
  943. * Tx capabilities) */
  944. if (!(tx_mcs_set_cap & IEEE80211_HT_CAP_MCS_TX_DEFINED))
  945. goto check_changed;
  946. /* Counting from 0 therfore + 1 */
  947. if (tx_mcs_set_cap & IEEE80211_HT_CAP_MCS_TX_RX_DIFF)
  948. max_tx_streams = ((tx_mcs_set_cap &
  949. IEEE80211_HT_CAP_MCS_TX_STREAMS) >> 2) + 1;
  950. for (i = 0; i < max_tx_streams; i++)
  951. ht_conf.supp_mcs_set[i] =
  952. sband->ht_info.supp_mcs_set[i] &
  953. req_ht_cap->supp_mcs_set[i];
  954. if (tx_mcs_set_cap & IEEE80211_HT_CAP_MCS_TX_UEQM)
  955. for (i = IEEE80211_SUPP_MCS_SET_UEQM;
  956. i < IEEE80211_SUPP_MCS_SET_LEN; i++)
  957. ht_conf.supp_mcs_set[i] =
  958. sband->ht_info.supp_mcs_set[i] &
  959. req_ht_cap->supp_mcs_set[i];
  960. check_changed:
  961. /* if bss configuration changed store the new one */
  962. if (memcmp(&conf->ht_conf, &ht_conf, sizeof(ht_conf)) ||
  963. memcmp(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf))) {
  964. changed |= BSS_CHANGED_HT;
  965. memcpy(&conf->ht_conf, &ht_conf, sizeof(ht_conf));
  966. memcpy(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf));
  967. }
  968. out:
  969. return changed;
  970. }
  971. void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
  972. u32 changed)
  973. {
  974. struct ieee80211_local *local = sdata->local;
  975. if (!changed)
  976. return;
  977. if (local->ops->bss_info_changed)
  978. local->ops->bss_info_changed(local_to_hw(local),
  979. &sdata->vif,
  980. &sdata->bss_conf,
  981. changed);
  982. }
  983. void ieee80211_reset_erp_info(struct net_device *dev)
  984. {
  985. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  986. sdata->bss_conf.use_cts_prot = 0;
  987. sdata->bss_conf.use_short_preamble = 0;
  988. ieee80211_bss_info_change_notify(sdata,
  989. BSS_CHANGED_ERP_CTS_PROT |
  990. BSS_CHANGED_ERP_PREAMBLE);
  991. }
  992. void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
  993. struct sk_buff *skb)
  994. {
  995. struct ieee80211_local *local = hw_to_local(hw);
  996. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  997. int tmp;
  998. skb->dev = local->mdev;
  999. skb->pkt_type = IEEE80211_TX_STATUS_MSG;
  1000. skb_queue_tail(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS ?
  1001. &local->skb_queue : &local->skb_queue_unreliable, skb);
  1002. tmp = skb_queue_len(&local->skb_queue) +
  1003. skb_queue_len(&local->skb_queue_unreliable);
  1004. while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT &&
  1005. (skb = skb_dequeue(&local->skb_queue_unreliable))) {
  1006. dev_kfree_skb_irq(skb);
  1007. tmp--;
  1008. I802_DEBUG_INC(local->tx_status_drop);
  1009. }
  1010. tasklet_schedule(&local->tasklet);
  1011. }
  1012. EXPORT_SYMBOL(ieee80211_tx_status_irqsafe);
  1013. static void ieee80211_tasklet_handler(unsigned long data)
  1014. {
  1015. struct ieee80211_local *local = (struct ieee80211_local *) data;
  1016. struct sk_buff *skb;
  1017. struct ieee80211_rx_status rx_status;
  1018. struct ieee80211_ra_tid *ra_tid;
  1019. while ((skb = skb_dequeue(&local->skb_queue)) ||
  1020. (skb = skb_dequeue(&local->skb_queue_unreliable))) {
  1021. switch (skb->pkt_type) {
  1022. case IEEE80211_RX_MSG:
  1023. /* status is in skb->cb */
  1024. memcpy(&rx_status, skb->cb, sizeof(rx_status));
  1025. /* Clear skb->pkt_type in order to not confuse kernel
  1026. * netstack. */
  1027. skb->pkt_type = 0;
  1028. __ieee80211_rx(local_to_hw(local), skb, &rx_status);
  1029. break;
  1030. case IEEE80211_TX_STATUS_MSG:
  1031. skb->pkt_type = 0;
  1032. ieee80211_tx_status(local_to_hw(local), skb);
  1033. break;
  1034. case IEEE80211_DELBA_MSG:
  1035. ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
  1036. ieee80211_stop_tx_ba_cb(local_to_hw(local),
  1037. ra_tid->ra, ra_tid->tid);
  1038. dev_kfree_skb(skb);
  1039. break;
  1040. case IEEE80211_ADDBA_MSG:
  1041. ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
  1042. ieee80211_start_tx_ba_cb(local_to_hw(local),
  1043. ra_tid->ra, ra_tid->tid);
  1044. dev_kfree_skb(skb);
  1045. break ;
  1046. default: /* should never get here! */
  1047. printk(KERN_ERR "%s: Unknown message type (%d)\n",
  1048. wiphy_name(local->hw.wiphy), skb->pkt_type);
  1049. dev_kfree_skb(skb);
  1050. break;
  1051. }
  1052. }
  1053. }
  1054. /* Remove added headers (e.g., QoS control), encryption header/MIC, etc. to
  1055. * make a prepared TX frame (one that has been given to hw) to look like brand
  1056. * new IEEE 802.11 frame that is ready to go through TX processing again.
  1057. * Also, tx_packet_data in cb is restored from tx_control. */
  1058. static void ieee80211_remove_tx_extra(struct ieee80211_local *local,
  1059. struct ieee80211_key *key,
  1060. struct sk_buff *skb)
  1061. {
  1062. int hdrlen, iv_len, mic_len;
  1063. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  1064. info->flags &= IEEE80211_TX_CTL_REQ_TX_STATUS |
  1065. IEEE80211_TX_CTL_DO_NOT_ENCRYPT |
  1066. IEEE80211_TX_CTL_REQUEUE |
  1067. IEEE80211_TX_CTL_EAPOL_FRAME;
  1068. hdrlen = ieee80211_get_hdrlen_from_skb(skb);
  1069. if (!key)
  1070. goto no_key;
  1071. switch (key->conf.alg) {
  1072. case ALG_WEP:
  1073. iv_len = WEP_IV_LEN;
  1074. mic_len = WEP_ICV_LEN;
  1075. break;
  1076. case ALG_TKIP:
  1077. iv_len = TKIP_IV_LEN;
  1078. mic_len = TKIP_ICV_LEN;
  1079. break;
  1080. case ALG_CCMP:
  1081. iv_len = CCMP_HDR_LEN;
  1082. mic_len = CCMP_MIC_LEN;
  1083. break;
  1084. default:
  1085. goto no_key;
  1086. }
  1087. if (skb->len >= mic_len &&
  1088. !(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
  1089. skb_trim(skb, skb->len - mic_len);
  1090. if (skb->len >= iv_len && skb->len > hdrlen) {
  1091. memmove(skb->data + iv_len, skb->data, hdrlen);
  1092. skb_pull(skb, iv_len);
  1093. }
  1094. no_key:
  1095. {
  1096. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  1097. u16 fc = le16_to_cpu(hdr->frame_control);
  1098. if ((fc & 0x8C) == 0x88) /* QoS Control Field */ {
  1099. fc &= ~IEEE80211_STYPE_QOS_DATA;
  1100. hdr->frame_control = cpu_to_le16(fc);
  1101. memmove(skb->data + 2, skb->data, hdrlen - 2);
  1102. skb_pull(skb, 2);
  1103. }
  1104. }
  1105. }
  1106. static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
  1107. struct sta_info *sta,
  1108. struct sk_buff *skb)
  1109. {
  1110. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  1111. sta->tx_filtered_count++;
  1112. /*
  1113. * Clear the TX filter mask for this STA when sending the next
  1114. * packet. If the STA went to power save mode, this will happen
  1115. * happen when it wakes up for the next time.
  1116. */
  1117. set_sta_flags(sta, WLAN_STA_CLEAR_PS_FILT);
  1118. /*
  1119. * This code races in the following way:
  1120. *
  1121. * (1) STA sends frame indicating it will go to sleep and does so
  1122. * (2) hardware/firmware adds STA to filter list, passes frame up
  1123. * (3) hardware/firmware processes TX fifo and suppresses a frame
  1124. * (4) we get TX status before having processed the frame and
  1125. * knowing that the STA has gone to sleep.
  1126. *
  1127. * This is actually quite unlikely even when both those events are
  1128. * processed from interrupts coming in quickly after one another or
  1129. * even at the same time because we queue both TX status events and
  1130. * RX frames to be processed by a tasklet and process them in the
  1131. * same order that they were received or TX status last. Hence, there
  1132. * is no race as long as the frame RX is processed before the next TX
  1133. * status, which drivers can ensure, see below.
  1134. *
  1135. * Note that this can only happen if the hardware or firmware can
  1136. * actually add STAs to the filter list, if this is done by the
  1137. * driver in response to set_tim() (which will only reduce the race
  1138. * this whole filtering tries to solve, not completely solve it)
  1139. * this situation cannot happen.
  1140. *
  1141. * To completely solve this race drivers need to make sure that they
  1142. * (a) don't mix the irq-safe/not irq-safe TX status/RX processing
  1143. * functions and
  1144. * (b) always process RX events before TX status events if ordering
  1145. * can be unknown, for example with different interrupt status
  1146. * bits.
  1147. */
  1148. if (test_sta_flags(sta, WLAN_STA_PS) &&
  1149. skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) {
  1150. ieee80211_remove_tx_extra(local, sta->key, skb);
  1151. skb_queue_tail(&sta->tx_filtered, skb);
  1152. return;
  1153. }
  1154. if (!test_sta_flags(sta, WLAN_STA_PS) &&
  1155. !(info->flags & IEEE80211_TX_CTL_REQUEUE)) {
  1156. /* Software retry the packet once */
  1157. info->flags |= IEEE80211_TX_CTL_REQUEUE;
  1158. ieee80211_remove_tx_extra(local, sta->key, skb);
  1159. dev_queue_xmit(skb);
  1160. return;
  1161. }
  1162. if (net_ratelimit())
  1163. printk(KERN_DEBUG "%s: dropped TX filtered frame, "
  1164. "queue_len=%d PS=%d @%lu\n",
  1165. wiphy_name(local->hw.wiphy),
  1166. skb_queue_len(&sta->tx_filtered),
  1167. !!test_sta_flags(sta, WLAN_STA_PS), jiffies);
  1168. dev_kfree_skb(skb);
  1169. }
  1170. void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
  1171. {
  1172. struct sk_buff *skb2;
  1173. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  1174. struct ieee80211_local *local = hw_to_local(hw);
  1175. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  1176. u16 frag, type;
  1177. struct ieee80211_tx_status_rtap_hdr *rthdr;
  1178. struct ieee80211_sub_if_data *sdata;
  1179. struct net_device *prev_dev = NULL;
  1180. rcu_read_lock();
  1181. if (info->status.excessive_retries) {
  1182. struct sta_info *sta;
  1183. sta = sta_info_get(local, hdr->addr1);
  1184. if (sta) {
  1185. if (test_sta_flags(sta, WLAN_STA_PS)) {
  1186. /*
  1187. * The STA is in power save mode, so assume
  1188. * that this TX packet failed because of that.
  1189. */
  1190. ieee80211_handle_filtered_frame(local, sta, skb);
  1191. rcu_read_unlock();
  1192. return;
  1193. }
  1194. }
  1195. }
  1196. if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
  1197. struct sta_info *sta;
  1198. sta = sta_info_get(local, hdr->addr1);
  1199. if (sta) {
  1200. ieee80211_handle_filtered_frame(local, sta, skb);
  1201. rcu_read_unlock();
  1202. return;
  1203. }
  1204. } else
  1205. rate_control_tx_status(local->mdev, skb);
  1206. rcu_read_unlock();
  1207. ieee80211_led_tx(local, 0);
  1208. /* SNMP counters
  1209. * Fragments are passed to low-level drivers as separate skbs, so these
  1210. * are actually fragments, not frames. Update frame counters only for
  1211. * the first fragment of the frame. */
  1212. frag = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
  1213. type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE;
  1214. if (info->flags & IEEE80211_TX_STAT_ACK) {
  1215. if (frag == 0) {
  1216. local->dot11TransmittedFrameCount++;
  1217. if (is_multicast_ether_addr(hdr->addr1))
  1218. local->dot11MulticastTransmittedFrameCount++;
  1219. if (info->status.retry_count > 0)
  1220. local->dot11RetryCount++;
  1221. if (info->status.retry_count > 1)
  1222. local->dot11MultipleRetryCount++;
  1223. }
  1224. /* This counter shall be incremented for an acknowledged MPDU
  1225. * with an individual address in the address 1 field or an MPDU
  1226. * with a multicast address in the address 1 field of type Data
  1227. * or Management. */
  1228. if (!is_multicast_ether_addr(hdr->addr1) ||
  1229. type == IEEE80211_FTYPE_DATA ||
  1230. type == IEEE80211_FTYPE_MGMT)
  1231. local->dot11TransmittedFragmentCount++;
  1232. } else {
  1233. if (frag == 0)
  1234. local->dot11FailedCount++;
  1235. }
  1236. /* this was a transmitted frame, but now we want to reuse it */
  1237. skb_orphan(skb);
  1238. /*
  1239. * This is a bit racy but we can avoid a lot of work
  1240. * with this test...
  1241. */
  1242. if (!local->monitors && !local->cooked_mntrs) {
  1243. dev_kfree_skb(skb);
  1244. return;
  1245. }
  1246. /* send frame to monitor interfaces now */
  1247. if (skb_headroom(skb) < sizeof(*rthdr)) {
  1248. printk(KERN_ERR "ieee80211_tx_status: headroom too small\n");
  1249. dev_kfree_skb(skb);
  1250. return;
  1251. }
  1252. rthdr = (struct ieee80211_tx_status_rtap_hdr *)
  1253. skb_push(skb, sizeof(*rthdr));
  1254. memset(rthdr, 0, sizeof(*rthdr));
  1255. rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
  1256. rthdr->hdr.it_present =
  1257. cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) |
  1258. (1 << IEEE80211_RADIOTAP_DATA_RETRIES));
  1259. if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
  1260. !is_multicast_ether_addr(hdr->addr1))
  1261. rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL);
  1262. if ((info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) &&
  1263. (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT))
  1264. rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS);
  1265. else if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS)
  1266. rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS);
  1267. rthdr->data_retries = info->status.retry_count;
  1268. /* XXX: is this sufficient for BPF? */
  1269. skb_set_mac_header(skb, 0);
  1270. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1271. skb->pkt_type = PACKET_OTHERHOST;
  1272. skb->protocol = htons(ETH_P_802_2);
  1273. memset(skb->cb, 0, sizeof(skb->cb));
  1274. rcu_read_lock();
  1275. list_for_each_entry_rcu(sdata, &local->interfaces, list) {
  1276. if (sdata->vif.type == IEEE80211_IF_TYPE_MNTR) {
  1277. if (!netif_running(sdata->dev))
  1278. continue;
  1279. if (prev_dev) {
  1280. skb2 = skb_clone(skb, GFP_ATOMIC);
  1281. if (skb2) {
  1282. skb2->dev = prev_dev;
  1283. netif_rx(skb2);
  1284. }
  1285. }
  1286. prev_dev = sdata->dev;
  1287. }
  1288. }
  1289. if (prev_dev) {
  1290. skb->dev = prev_dev;
  1291. netif_rx(skb);
  1292. skb = NULL;
  1293. }
  1294. rcu_read_unlock();
  1295. dev_kfree_skb(skb);
  1296. }
  1297. EXPORT_SYMBOL(ieee80211_tx_status);
  1298. struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
  1299. const struct ieee80211_ops *ops)
  1300. {
  1301. struct ieee80211_local *local;
  1302. int priv_size;
  1303. struct wiphy *wiphy;
  1304. /* Ensure 32-byte alignment of our private data and hw private data.
  1305. * We use the wiphy priv data for both our ieee80211_local and for
  1306. * the driver's private data
  1307. *
  1308. * In memory it'll be like this:
  1309. *
  1310. * +-------------------------+
  1311. * | struct wiphy |
  1312. * +-------------------------+
  1313. * | struct ieee80211_local |
  1314. * +-------------------------+
  1315. * | driver's private data |
  1316. * +-------------------------+
  1317. *
  1318. */
  1319. priv_size = ((sizeof(struct ieee80211_local) +
  1320. NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST) +
  1321. priv_data_len;
  1322. wiphy = wiphy_new(&mac80211_config_ops, priv_size);
  1323. if (!wiphy)
  1324. return NULL;
  1325. wiphy->privid = mac80211_wiphy_privid;
  1326. local = wiphy_priv(wiphy);
  1327. local->hw.wiphy = wiphy;
  1328. local->hw.priv = (char *)local +
  1329. ((sizeof(struct ieee80211_local) +
  1330. NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
  1331. BUG_ON(!ops->tx);
  1332. BUG_ON(!ops->start);
  1333. BUG_ON(!ops->stop);
  1334. BUG_ON(!ops->config);
  1335. BUG_ON(!ops->add_interface);
  1336. BUG_ON(!ops->remove_interface);
  1337. BUG_ON(!ops->configure_filter);
  1338. local->ops = ops;
  1339. local->hw.queues = 1; /* default */
  1340. local->bridge_packets = 1;
  1341. local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
  1342. local->fragmentation_threshold = IEEE80211_MAX_FRAG_THRESHOLD;
  1343. local->short_retry_limit = 7;
  1344. local->long_retry_limit = 4;
  1345. local->hw.conf.radio_enabled = 1;
  1346. INIT_LIST_HEAD(&local->interfaces);
  1347. spin_lock_init(&local->key_lock);
  1348. INIT_DELAYED_WORK(&local->scan_work, ieee80211_sta_scan_work);
  1349. sta_info_init(local);
  1350. tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending,
  1351. (unsigned long)local);
  1352. tasklet_disable(&local->tx_pending_tasklet);
  1353. tasklet_init(&local->tasklet,
  1354. ieee80211_tasklet_handler,
  1355. (unsigned long) local);
  1356. tasklet_disable(&local->tasklet);
  1357. skb_queue_head_init(&local->skb_queue);
  1358. skb_queue_head_init(&local->skb_queue_unreliable);
  1359. return local_to_hw(local);
  1360. }
  1361. EXPORT_SYMBOL(ieee80211_alloc_hw);
  1362. int ieee80211_register_hw(struct ieee80211_hw *hw)
  1363. {
  1364. struct ieee80211_local *local = hw_to_local(hw);
  1365. const char *name;
  1366. int result;
  1367. enum ieee80211_band band;
  1368. struct net_device *mdev;
  1369. struct ieee80211_sub_if_data *sdata;
  1370. /*
  1371. * generic code guarantees at least one band,
  1372. * set this very early because much code assumes
  1373. * that hw.conf.channel is assigned
  1374. */
  1375. for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
  1376. struct ieee80211_supported_band *sband;
  1377. sband = local->hw.wiphy->bands[band];
  1378. if (sband) {
  1379. /* init channel we're on */
  1380. local->hw.conf.channel =
  1381. local->oper_channel =
  1382. local->scan_channel = &sband->channels[0];
  1383. break;
  1384. }
  1385. }
  1386. result = wiphy_register(local->hw.wiphy);
  1387. if (result < 0)
  1388. return result;
  1389. /*
  1390. * We use the number of queues for feature tests (QoS, HT) internally
  1391. * so restrict them appropriately.
  1392. */
  1393. #ifdef CONFIG_MAC80211_QOS
  1394. if (hw->queues > IEEE80211_MAX_QUEUES)
  1395. hw->queues = IEEE80211_MAX_QUEUES;
  1396. if (hw->ampdu_queues > IEEE80211_MAX_AMPDU_QUEUES)
  1397. hw->ampdu_queues = IEEE80211_MAX_AMPDU_QUEUES;
  1398. if (hw->queues < 4)
  1399. hw->ampdu_queues = 0;
  1400. #else
  1401. hw->queues = 1;
  1402. hw->ampdu_queues = 0;
  1403. #endif
  1404. /* for now, mdev needs sub_if_data :/ */
  1405. mdev = alloc_netdev_mq(sizeof(struct ieee80211_sub_if_data),
  1406. "wmaster%d", ether_setup,
  1407. ieee80211_num_queues(hw));
  1408. if (!mdev)
  1409. goto fail_mdev_alloc;
  1410. if (ieee80211_num_queues(hw) > 1)
  1411. mdev->features |= NETIF_F_MULTI_QUEUE;
  1412. sdata = IEEE80211_DEV_TO_SUB_IF(mdev);
  1413. mdev->ieee80211_ptr = &sdata->wdev;
  1414. sdata->wdev.wiphy = local->hw.wiphy;
  1415. local->mdev = mdev;
  1416. ieee80211_rx_bss_list_init(mdev);
  1417. mdev->hard_start_xmit = ieee80211_master_start_xmit;
  1418. mdev->open = ieee80211_master_open;
  1419. mdev->stop = ieee80211_master_stop;
  1420. mdev->type = ARPHRD_IEEE80211;
  1421. mdev->header_ops = &ieee80211_header_ops;
  1422. mdev->set_multicast_list = ieee80211_master_set_multicast_list;
  1423. sdata->vif.type = IEEE80211_IF_TYPE_AP;
  1424. sdata->dev = mdev;
  1425. sdata->local = local;
  1426. sdata->u.ap.force_unicast_rateidx = -1;
  1427. sdata->u.ap.max_ratectrl_rateidx = -1;
  1428. ieee80211_if_sdata_init(sdata);
  1429. /* no RCU needed since we're still during init phase */
  1430. list_add_tail(&sdata->list, &local->interfaces);
  1431. name = wiphy_dev(local->hw.wiphy)->driver->name;
  1432. local->hw.workqueue = create_singlethread_workqueue(name);
  1433. if (!local->hw.workqueue) {
  1434. result = -ENOMEM;
  1435. goto fail_workqueue;
  1436. }
  1437. /*
  1438. * The hardware needs headroom for sending the frame,
  1439. * and we need some headroom for passing the frame to monitor
  1440. * interfaces, but never both at the same time.
  1441. */
  1442. local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom,
  1443. sizeof(struct ieee80211_tx_status_rtap_hdr));
  1444. debugfs_hw_add(local);
  1445. local->hw.conf.beacon_int = 1000;
  1446. local->wstats_flags |= local->hw.flags & (IEEE80211_HW_SIGNAL_UNSPEC |
  1447. IEEE80211_HW_SIGNAL_DB |
  1448. IEEE80211_HW_SIGNAL_DBM) ?
  1449. IW_QUAL_QUAL_UPDATED : IW_QUAL_QUAL_INVALID;
  1450. local->wstats_flags |= local->hw.flags & IEEE80211_HW_NOISE_DBM ?
  1451. IW_QUAL_NOISE_UPDATED : IW_QUAL_NOISE_INVALID;
  1452. if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
  1453. local->wstats_flags |= IW_QUAL_DBM;
  1454. result = sta_info_start(local);
  1455. if (result < 0)
  1456. goto fail_sta_info;
  1457. rtnl_lock();
  1458. result = dev_alloc_name(local->mdev, local->mdev->name);
  1459. if (result < 0)
  1460. goto fail_dev;
  1461. memcpy(local->mdev->dev_addr, local->hw.wiphy->perm_addr, ETH_ALEN);
  1462. SET_NETDEV_DEV(local->mdev, wiphy_dev(local->hw.wiphy));
  1463. result = register_netdevice(local->mdev);
  1464. if (result < 0)
  1465. goto fail_dev;
  1466. ieee80211_debugfs_add_netdev(IEEE80211_DEV_TO_SUB_IF(local->mdev));
  1467. ieee80211_if_set_type(local->mdev, IEEE80211_IF_TYPE_AP);
  1468. result = ieee80211_init_rate_ctrl_alg(local,
  1469. hw->rate_control_algorithm);
  1470. if (result < 0) {
  1471. printk(KERN_DEBUG "%s: Failed to initialize rate control "
  1472. "algorithm\n", wiphy_name(local->hw.wiphy));
  1473. goto fail_rate;
  1474. }
  1475. result = ieee80211_wep_init(local);
  1476. if (result < 0) {
  1477. printk(KERN_DEBUG "%s: Failed to initialize wep\n",
  1478. wiphy_name(local->hw.wiphy));
  1479. goto fail_wep;
  1480. }
  1481. ieee80211_install_qdisc(local->mdev);
  1482. /* add one default STA interface */
  1483. result = ieee80211_if_add(local->mdev, "wlan%d", NULL,
  1484. IEEE80211_IF_TYPE_STA, NULL);
  1485. if (result)
  1486. printk(KERN_WARNING "%s: Failed to add default virtual iface\n",
  1487. wiphy_name(local->hw.wiphy));
  1488. local->reg_state = IEEE80211_DEV_REGISTERED;
  1489. rtnl_unlock();
  1490. ieee80211_led_init(local);
  1491. return 0;
  1492. fail_wep:
  1493. rate_control_deinitialize(local);
  1494. fail_rate:
  1495. ieee80211_debugfs_remove_netdev(IEEE80211_DEV_TO_SUB_IF(local->mdev));
  1496. unregister_netdevice(local->mdev);
  1497. local->mdev = NULL;
  1498. fail_dev:
  1499. rtnl_unlock();
  1500. sta_info_stop(local);
  1501. fail_sta_info:
  1502. debugfs_hw_del(local);
  1503. destroy_workqueue(local->hw.workqueue);
  1504. fail_workqueue:
  1505. if (local->mdev != NULL) {
  1506. ieee80211_if_free(local->mdev);
  1507. local->mdev = NULL;
  1508. }
  1509. fail_mdev_alloc:
  1510. wiphy_unregister(local->hw.wiphy);
  1511. return result;
  1512. }
  1513. EXPORT_SYMBOL(ieee80211_register_hw);
  1514. void ieee80211_unregister_hw(struct ieee80211_hw *hw)
  1515. {
  1516. struct ieee80211_local *local = hw_to_local(hw);
  1517. struct ieee80211_sub_if_data *sdata, *tmp;
  1518. tasklet_kill(&local->tx_pending_tasklet);
  1519. tasklet_kill(&local->tasklet);
  1520. rtnl_lock();
  1521. BUG_ON(local->reg_state != IEEE80211_DEV_REGISTERED);
  1522. local->reg_state = IEEE80211_DEV_UNREGISTERED;
  1523. /*
  1524. * At this point, interface list manipulations are fine
  1525. * because the driver cannot be handing us frames any
  1526. * more and the tasklet is killed.
  1527. */
  1528. /*
  1529. * First, we remove all non-master interfaces. Do this because they
  1530. * may have bss pointer dependency on the master, and when we free
  1531. * the master these would be freed as well, breaking our list
  1532. * iteration completely.
  1533. */
  1534. list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
  1535. if (sdata->dev == local->mdev)
  1536. continue;
  1537. list_del(&sdata->list);
  1538. __ieee80211_if_del(local, sdata);
  1539. }
  1540. /* then, finally, remove the master interface */
  1541. __ieee80211_if_del(local, IEEE80211_DEV_TO_SUB_IF(local->mdev));
  1542. rtnl_unlock();
  1543. ieee80211_rx_bss_list_deinit(local->mdev);
  1544. ieee80211_clear_tx_pending(local);
  1545. sta_info_stop(local);
  1546. rate_control_deinitialize(local);
  1547. debugfs_hw_del(local);
  1548. if (skb_queue_len(&local->skb_queue)
  1549. || skb_queue_len(&local->skb_queue_unreliable))
  1550. printk(KERN_WARNING "%s: skb_queue not empty\n",
  1551. wiphy_name(local->hw.wiphy));
  1552. skb_queue_purge(&local->skb_queue);
  1553. skb_queue_purge(&local->skb_queue_unreliable);
  1554. destroy_workqueue(local->hw.workqueue);
  1555. wiphy_unregister(local->hw.wiphy);
  1556. ieee80211_wep_free(local);
  1557. ieee80211_led_exit(local);
  1558. ieee80211_if_free(local->mdev);
  1559. local->mdev = NULL;
  1560. }
  1561. EXPORT_SYMBOL(ieee80211_unregister_hw);
  1562. void ieee80211_free_hw(struct ieee80211_hw *hw)
  1563. {
  1564. struct ieee80211_local *local = hw_to_local(hw);
  1565. wiphy_free(local->hw.wiphy);
  1566. }
  1567. EXPORT_SYMBOL(ieee80211_free_hw);
  1568. static int __init ieee80211_init(void)
  1569. {
  1570. struct sk_buff *skb;
  1571. int ret;
  1572. BUILD_BUG_ON(sizeof(struct ieee80211_tx_info) > sizeof(skb->cb));
  1573. BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, driver_data) +
  1574. IEEE80211_TX_INFO_DRIVER_DATA_SIZE > sizeof(skb->cb));
  1575. ret = rc80211_pid_init();
  1576. if (ret)
  1577. goto out;
  1578. ret = ieee80211_wme_register();
  1579. if (ret) {
  1580. printk(KERN_DEBUG "ieee80211_init: failed to "
  1581. "initialize WME (err=%d)\n", ret);
  1582. goto out_cleanup_pid;
  1583. }
  1584. ieee80211_debugfs_netdev_init();
  1585. return 0;
  1586. out_cleanup_pid:
  1587. rc80211_pid_exit();
  1588. out:
  1589. return ret;
  1590. }
  1591. static void __exit ieee80211_exit(void)
  1592. {
  1593. rc80211_pid_exit();
  1594. /*
  1595. * For key todo, it'll be empty by now but the work
  1596. * might still be scheduled.
  1597. */
  1598. flush_scheduled_work();
  1599. if (mesh_allocated)
  1600. ieee80211s_stop();
  1601. ieee80211_wme_unregister();
  1602. ieee80211_debugfs_netdev_exit();
  1603. }
  1604. subsys_initcall(ieee80211_init);
  1605. module_exit(ieee80211_exit);
  1606. MODULE_DESCRIPTION("IEEE 802.11 subsystem");
  1607. MODULE_LICENSE("GPL");