main.c 52 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892
  1. /*
  2. * Copyright 2002-2005, Instant802 Networks, Inc.
  3. * Copyright 2005-2006, Devicescape Software, Inc.
  4. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <net/mac80211.h>
  11. #include <net/ieee80211_radiotap.h>
  12. #include <linux/module.h>
  13. #include <linux/init.h>
  14. #include <linux/netdevice.h>
  15. #include <linux/types.h>
  16. #include <linux/slab.h>
  17. #include <linux/skbuff.h>
  18. #include <linux/etherdevice.h>
  19. #include <linux/if_arp.h>
  20. #include <linux/wireless.h>
  21. #include <linux/rtnetlink.h>
  22. #include <linux/bitmap.h>
  23. #include <net/net_namespace.h>
  24. #include <net/cfg80211.h>
  25. #include "ieee80211_i.h"
  26. #include "rate.h"
  27. #include "mesh.h"
  28. #include "wep.h"
  29. #include "wme.h"
  30. #include "aes_ccm.h"
  31. #include "led.h"
  32. #include "cfg.h"
  33. #include "debugfs.h"
  34. #include "debugfs_netdev.h"
  35. #define SUPP_MCS_SET_LEN 16
  36. /*
  37. * For seeing transmitted packets on monitor interfaces
  38. * we have a radiotap header too.
  39. */
  40. struct ieee80211_tx_status_rtap_hdr {
  41. struct ieee80211_radiotap_header hdr;
  42. __le16 tx_flags;
  43. u8 data_retries;
  44. } __attribute__ ((packed));
  45. /* common interface routines */
  46. static int header_parse_80211(const struct sk_buff *skb, unsigned char *haddr)
  47. {
  48. memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); /* addr2 */
  49. return ETH_ALEN;
  50. }
  51. /* must be called under mdev tx lock */
  52. static void ieee80211_configure_filter(struct ieee80211_local *local)
  53. {
  54. unsigned int changed_flags;
  55. unsigned int new_flags = 0;
  56. if (atomic_read(&local->iff_promiscs))
  57. new_flags |= FIF_PROMISC_IN_BSS;
  58. if (atomic_read(&local->iff_allmultis))
  59. new_flags |= FIF_ALLMULTI;
  60. if (local->monitors)
  61. new_flags |= FIF_BCN_PRBRESP_PROMISC;
  62. if (local->fif_fcsfail)
  63. new_flags |= FIF_FCSFAIL;
  64. if (local->fif_plcpfail)
  65. new_flags |= FIF_PLCPFAIL;
  66. if (local->fif_control)
  67. new_flags |= FIF_CONTROL;
  68. if (local->fif_other_bss)
  69. new_flags |= FIF_OTHER_BSS;
  70. changed_flags = local->filter_flags ^ new_flags;
  71. /* be a bit nasty */
  72. new_flags |= (1<<31);
  73. local->ops->configure_filter(local_to_hw(local),
  74. changed_flags, &new_flags,
  75. local->mdev->mc_count,
  76. local->mdev->mc_list);
  77. WARN_ON(new_flags & (1<<31));
  78. local->filter_flags = new_flags & ~(1<<31);
  79. }
  80. /* master interface */
  81. static int ieee80211_master_open(struct net_device *dev)
  82. {
  83. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  84. struct ieee80211_sub_if_data *sdata;
  85. int res = -EOPNOTSUPP;
  86. /* we hold the RTNL here so can safely walk the list */
  87. list_for_each_entry(sdata, &local->interfaces, list) {
  88. if (sdata->dev != dev && netif_running(sdata->dev)) {
  89. res = 0;
  90. break;
  91. }
  92. }
  93. return res;
  94. }
  95. static int ieee80211_master_stop(struct net_device *dev)
  96. {
  97. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  98. struct ieee80211_sub_if_data *sdata;
  99. /* we hold the RTNL here so can safely walk the list */
  100. list_for_each_entry(sdata, &local->interfaces, list)
  101. if (sdata->dev != dev && netif_running(sdata->dev))
  102. dev_close(sdata->dev);
  103. return 0;
  104. }
  105. static void ieee80211_master_set_multicast_list(struct net_device *dev)
  106. {
  107. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  108. ieee80211_configure_filter(local);
  109. }
  110. /* regular interfaces */
  111. static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
  112. {
  113. int meshhdrlen;
  114. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  115. meshhdrlen = (sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) ? 5 : 0;
  116. /* FIX: what would be proper limits for MTU?
  117. * This interface uses 802.3 frames. */
  118. if (new_mtu < 256 ||
  119. new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) {
  120. printk(KERN_WARNING "%s: invalid MTU %d\n",
  121. dev->name, new_mtu);
  122. return -EINVAL;
  123. }
  124. #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
  125. printk(KERN_DEBUG "%s: setting MTU %d\n", dev->name, new_mtu);
  126. #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
  127. dev->mtu = new_mtu;
  128. return 0;
  129. }
  130. static inline int identical_mac_addr_allowed(int type1, int type2)
  131. {
  132. return (type1 == IEEE80211_IF_TYPE_MNTR ||
  133. type2 == IEEE80211_IF_TYPE_MNTR ||
  134. (type1 == IEEE80211_IF_TYPE_AP &&
  135. type2 == IEEE80211_IF_TYPE_WDS) ||
  136. (type1 == IEEE80211_IF_TYPE_WDS &&
  137. (type2 == IEEE80211_IF_TYPE_WDS ||
  138. type2 == IEEE80211_IF_TYPE_AP)) ||
  139. (type1 == IEEE80211_IF_TYPE_AP &&
  140. type2 == IEEE80211_IF_TYPE_VLAN) ||
  141. (type1 == IEEE80211_IF_TYPE_VLAN &&
  142. (type2 == IEEE80211_IF_TYPE_AP ||
  143. type2 == IEEE80211_IF_TYPE_VLAN)));
  144. }
  145. static int ieee80211_open(struct net_device *dev)
  146. {
  147. struct ieee80211_sub_if_data *sdata, *nsdata;
  148. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  149. struct ieee80211_if_init_conf conf;
  150. int res;
  151. bool need_hw_reconfig = 0;
  152. struct sta_info *sta;
  153. sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  154. /* we hold the RTNL here so can safely walk the list */
  155. list_for_each_entry(nsdata, &local->interfaces, list) {
  156. struct net_device *ndev = nsdata->dev;
  157. if (ndev != dev && ndev != local->mdev && netif_running(ndev)) {
  158. /*
  159. * Allow only a single IBSS interface to be up at any
  160. * time. This is restricted because beacon distribution
  161. * cannot work properly if both are in the same IBSS.
  162. *
  163. * To remove this restriction we'd have to disallow them
  164. * from setting the same SSID on different IBSS interfaces
  165. * belonging to the same hardware. Then, however, we're
  166. * faced with having to adopt two different TSF timers...
  167. */
  168. if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS &&
  169. nsdata->vif.type == IEEE80211_IF_TYPE_IBSS)
  170. return -EBUSY;
  171. /*
  172. * Disallow multiple IBSS/STA mode interfaces.
  173. *
  174. * This is a technical restriction, it is possible although
  175. * most likely not IEEE 802.11 compliant to have multiple
  176. * STAs with just a single hardware (the TSF timer will not
  177. * be adjusted properly.)
  178. *
  179. * However, because mac80211 uses the master device's BSS
  180. * information for each STA/IBSS interface, doing this will
  181. * currently corrupt that BSS information completely, unless,
  182. * a not very useful case, both STAs are associated to the
  183. * same BSS.
  184. *
  185. * To remove this restriction, the BSS information needs to
  186. * be embedded in the STA/IBSS mode sdata instead of using
  187. * the master device's BSS structure.
  188. */
  189. if ((sdata->vif.type == IEEE80211_IF_TYPE_STA ||
  190. sdata->vif.type == IEEE80211_IF_TYPE_IBSS) &&
  191. (nsdata->vif.type == IEEE80211_IF_TYPE_STA ||
  192. nsdata->vif.type == IEEE80211_IF_TYPE_IBSS))
  193. return -EBUSY;
  194. /*
  195. * The remaining checks are only performed for interfaces
  196. * with the same MAC address.
  197. */
  198. if (compare_ether_addr(dev->dev_addr, ndev->dev_addr))
  199. continue;
  200. /*
  201. * check whether it may have the same address
  202. */
  203. if (!identical_mac_addr_allowed(sdata->vif.type,
  204. nsdata->vif.type))
  205. return -ENOTUNIQ;
  206. /*
  207. * can only add VLANs to enabled APs
  208. */
  209. if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN &&
  210. nsdata->vif.type == IEEE80211_IF_TYPE_AP)
  211. sdata->u.vlan.ap = nsdata;
  212. }
  213. }
  214. switch (sdata->vif.type) {
  215. case IEEE80211_IF_TYPE_WDS:
  216. if (!is_valid_ether_addr(sdata->u.wds.remote_addr))
  217. return -ENOLINK;
  218. /* Create STA entry for the WDS peer */
  219. sta = sta_info_alloc(sdata, sdata->u.wds.remote_addr,
  220. GFP_KERNEL);
  221. if (!sta)
  222. return -ENOMEM;
  223. sta->flags |= WLAN_STA_AUTHORIZED;
  224. res = sta_info_insert(sta);
  225. if (res) {
  226. /* STA has been freed */
  227. return res;
  228. }
  229. break;
  230. case IEEE80211_IF_TYPE_VLAN:
  231. if (!sdata->u.vlan.ap)
  232. return -ENOLINK;
  233. break;
  234. case IEEE80211_IF_TYPE_AP:
  235. case IEEE80211_IF_TYPE_STA:
  236. case IEEE80211_IF_TYPE_MNTR:
  237. case IEEE80211_IF_TYPE_IBSS:
  238. case IEEE80211_IF_TYPE_MESH_POINT:
  239. /* no special treatment */
  240. break;
  241. case IEEE80211_IF_TYPE_INVALID:
  242. /* cannot happen */
  243. WARN_ON(1);
  244. break;
  245. }
  246. if (local->open_count == 0) {
  247. res = 0;
  248. if (local->ops->start)
  249. res = local->ops->start(local_to_hw(local));
  250. if (res)
  251. return res;
  252. need_hw_reconfig = 1;
  253. ieee80211_led_radio(local, local->hw.conf.radio_enabled);
  254. }
  255. switch (sdata->vif.type) {
  256. case IEEE80211_IF_TYPE_VLAN:
  257. list_add(&sdata->u.vlan.list, &sdata->u.vlan.ap->u.ap.vlans);
  258. /* no need to tell driver */
  259. break;
  260. case IEEE80211_IF_TYPE_MNTR:
  261. if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
  262. local->cooked_mntrs++;
  263. break;
  264. }
  265. /* must be before the call to ieee80211_configure_filter */
  266. local->monitors++;
  267. if (local->monitors == 1)
  268. local->hw.conf.flags |= IEEE80211_CONF_RADIOTAP;
  269. if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
  270. local->fif_fcsfail++;
  271. if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
  272. local->fif_plcpfail++;
  273. if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
  274. local->fif_control++;
  275. if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
  276. local->fif_other_bss++;
  277. netif_tx_lock_bh(local->mdev);
  278. ieee80211_configure_filter(local);
  279. netif_tx_unlock_bh(local->mdev);
  280. break;
  281. case IEEE80211_IF_TYPE_STA:
  282. case IEEE80211_IF_TYPE_IBSS:
  283. sdata->u.sta.flags &= ~IEEE80211_STA_PREV_BSSID_SET;
  284. /* fall through */
  285. default:
  286. conf.vif = &sdata->vif;
  287. conf.type = sdata->vif.type;
  288. conf.mac_addr = dev->dev_addr;
  289. res = local->ops->add_interface(local_to_hw(local), &conf);
  290. if (res && !local->open_count && local->ops->stop)
  291. local->ops->stop(local_to_hw(local));
  292. if (res)
  293. return res;
  294. ieee80211_if_config(dev);
  295. ieee80211_reset_erp_info(dev);
  296. ieee80211_enable_keys(sdata);
  297. if (sdata->vif.type == IEEE80211_IF_TYPE_STA &&
  298. !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME))
  299. netif_carrier_off(dev);
  300. else
  301. netif_carrier_on(dev);
  302. }
  303. if (local->open_count == 0) {
  304. res = dev_open(local->mdev);
  305. WARN_ON(res);
  306. tasklet_enable(&local->tx_pending_tasklet);
  307. tasklet_enable(&local->tasklet);
  308. }
  309. /*
  310. * set_multicast_list will be invoked by the networking core
  311. * which will check whether any increments here were done in
  312. * error and sync them down to the hardware as filter flags.
  313. */
  314. if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
  315. atomic_inc(&local->iff_allmultis);
  316. if (sdata->flags & IEEE80211_SDATA_PROMISC)
  317. atomic_inc(&local->iff_promiscs);
  318. local->open_count++;
  319. if (need_hw_reconfig)
  320. ieee80211_hw_config(local);
  321. /*
  322. * ieee80211_sta_work is disabled while network interface
  323. * is down. Therefore, some configuration changes may not
  324. * yet be effective. Trigger execution of ieee80211_sta_work
  325. * to fix this.
  326. */
  327. if(sdata->vif.type == IEEE80211_IF_TYPE_STA ||
  328. sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
  329. struct ieee80211_if_sta *ifsta = &sdata->u.sta;
  330. queue_work(local->hw.workqueue, &ifsta->work);
  331. }
  332. netif_start_queue(dev);
  333. return 0;
  334. }
  335. static int ieee80211_stop(struct net_device *dev)
  336. {
  337. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  338. struct ieee80211_local *local = sdata->local;
  339. struct ieee80211_if_init_conf conf;
  340. struct sta_info *sta;
  341. /*
  342. * Stop TX on this interface first.
  343. */
  344. netif_stop_queue(dev);
  345. /*
  346. * Now delete all active aggregation sessions.
  347. */
  348. rcu_read_lock();
  349. list_for_each_entry_rcu(sta, &local->sta_list, list) {
  350. if (sta->sdata == sdata)
  351. ieee80211_sta_tear_down_BA_sessions(dev, sta->addr);
  352. }
  353. rcu_read_unlock();
  354. /*
  355. * Remove all stations associated with this interface.
  356. *
  357. * This must be done before calling ops->remove_interface()
  358. * because otherwise we can later invoke ops->sta_notify()
  359. * whenever the STAs are removed, and that invalidates driver
  360. * assumptions about always getting a vif pointer that is valid
  361. * (because if we remove a STA after ops->remove_interface()
  362. * the driver will have removed the vif info already!)
  363. *
  364. * We could relax this and only unlink the stations from the
  365. * hash table and list but keep them on a per-sdata list that
  366. * will be inserted back again when the interface is brought
  367. * up again, but I don't currently see a use case for that,
  368. * except with WDS which gets a STA entry created when it is
  369. * brought up.
  370. */
  371. sta_info_flush(local, sdata);
  372. /*
  373. * Don't count this interface for promisc/allmulti while it
  374. * is down. dev_mc_unsync() will invoke set_multicast_list
  375. * on the master interface which will sync these down to the
  376. * hardware as filter flags.
  377. */
  378. if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
  379. atomic_dec(&local->iff_allmultis);
  380. if (sdata->flags & IEEE80211_SDATA_PROMISC)
  381. atomic_dec(&local->iff_promiscs);
  382. dev_mc_unsync(local->mdev, dev);
  383. /* APs need special treatment */
  384. if (sdata->vif.type == IEEE80211_IF_TYPE_AP) {
  385. struct ieee80211_sub_if_data *vlan, *tmp;
  386. struct beacon_data *old_beacon = sdata->u.ap.beacon;
  387. /* remove beacon */
  388. rcu_assign_pointer(sdata->u.ap.beacon, NULL);
  389. synchronize_rcu();
  390. kfree(old_beacon);
  391. /* down all dependent devices, that is VLANs */
  392. list_for_each_entry_safe(vlan, tmp, &sdata->u.ap.vlans,
  393. u.vlan.list)
  394. dev_close(vlan->dev);
  395. WARN_ON(!list_empty(&sdata->u.ap.vlans));
  396. }
  397. local->open_count--;
  398. switch (sdata->vif.type) {
  399. case IEEE80211_IF_TYPE_VLAN:
  400. list_del(&sdata->u.vlan.list);
  401. sdata->u.vlan.ap = NULL;
  402. /* no need to tell driver */
  403. break;
  404. case IEEE80211_IF_TYPE_MNTR:
  405. if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
  406. local->cooked_mntrs--;
  407. break;
  408. }
  409. local->monitors--;
  410. if (local->monitors == 0)
  411. local->hw.conf.flags &= ~IEEE80211_CONF_RADIOTAP;
  412. if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
  413. local->fif_fcsfail--;
  414. if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
  415. local->fif_plcpfail--;
  416. if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
  417. local->fif_control--;
  418. if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
  419. local->fif_other_bss--;
  420. netif_tx_lock_bh(local->mdev);
  421. ieee80211_configure_filter(local);
  422. netif_tx_unlock_bh(local->mdev);
  423. break;
  424. case IEEE80211_IF_TYPE_MESH_POINT:
  425. case IEEE80211_IF_TYPE_STA:
  426. case IEEE80211_IF_TYPE_IBSS:
  427. sdata->u.sta.state = IEEE80211_DISABLED;
  428. del_timer_sync(&sdata->u.sta.timer);
  429. /*
  430. * When we get here, the interface is marked down.
  431. * Call synchronize_rcu() to wait for the RX path
  432. * should it be using the interface and enqueuing
  433. * frames at this very time on another CPU.
  434. */
  435. synchronize_rcu();
  436. skb_queue_purge(&sdata->u.sta.skb_queue);
  437. if (local->scan_dev == sdata->dev) {
  438. if (!local->ops->hw_scan) {
  439. local->sta_sw_scanning = 0;
  440. cancel_delayed_work(&local->scan_work);
  441. } else
  442. local->sta_hw_scanning = 0;
  443. }
  444. flush_workqueue(local->hw.workqueue);
  445. sdata->u.sta.flags &= ~IEEE80211_STA_PRIVACY_INVOKED;
  446. kfree(sdata->u.sta.extra_ie);
  447. sdata->u.sta.extra_ie = NULL;
  448. sdata->u.sta.extra_ie_len = 0;
  449. /* fall through */
  450. default:
  451. conf.vif = &sdata->vif;
  452. conf.type = sdata->vif.type;
  453. conf.mac_addr = dev->dev_addr;
  454. /* disable all keys for as long as this netdev is down */
  455. ieee80211_disable_keys(sdata);
  456. local->ops->remove_interface(local_to_hw(local), &conf);
  457. }
  458. if (local->open_count == 0) {
  459. if (netif_running(local->mdev))
  460. dev_close(local->mdev);
  461. if (local->ops->stop)
  462. local->ops->stop(local_to_hw(local));
  463. ieee80211_led_radio(local, 0);
  464. tasklet_disable(&local->tx_pending_tasklet);
  465. tasklet_disable(&local->tasklet);
  466. }
  467. return 0;
  468. }
  469. int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
  470. {
  471. struct ieee80211_local *local = hw_to_local(hw);
  472. struct sta_info *sta;
  473. struct ieee80211_sub_if_data *sdata;
  474. u16 start_seq_num = 0;
  475. u8 *state;
  476. int ret;
  477. DECLARE_MAC_BUF(mac);
  478. if (tid >= STA_TID_NUM)
  479. return -EINVAL;
  480. #ifdef CONFIG_MAC80211_HT_DEBUG
  481. printk(KERN_DEBUG "Open BA session requested for %s tid %u\n",
  482. print_mac(mac, ra), tid);
  483. #endif /* CONFIG_MAC80211_HT_DEBUG */
  484. rcu_read_lock();
  485. sta = sta_info_get(local, ra);
  486. if (!sta) {
  487. printk(KERN_DEBUG "Could not find the station\n");
  488. rcu_read_unlock();
  489. return -ENOENT;
  490. }
  491. spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
  492. /* we have tried too many times, receiver does not want A-MPDU */
  493. if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
  494. ret = -EBUSY;
  495. goto start_ba_exit;
  496. }
  497. state = &sta->ampdu_mlme.tid_state_tx[tid];
  498. /* check if the TID is not in aggregation flow already */
  499. if (*state != HT_AGG_STATE_IDLE) {
  500. #ifdef CONFIG_MAC80211_HT_DEBUG
  501. printk(KERN_DEBUG "BA request denied - session is not "
  502. "idle on tid %u\n", tid);
  503. #endif /* CONFIG_MAC80211_HT_DEBUG */
  504. ret = -EAGAIN;
  505. goto start_ba_exit;
  506. }
  507. /* prepare A-MPDU MLME for Tx aggregation */
  508. sta->ampdu_mlme.tid_tx[tid] =
  509. kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
  510. if (!sta->ampdu_mlme.tid_tx[tid]) {
  511. if (net_ratelimit())
  512. printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
  513. tid);
  514. ret = -ENOMEM;
  515. goto start_ba_exit;
  516. }
  517. /* Tx timer */
  518. sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function =
  519. sta_addba_resp_timer_expired;
  520. sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data =
  521. (unsigned long)&sta->timer_to_tid[tid];
  522. init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
  523. /* ensure that TX flow won't interrupt us
  524. * until the end of the call to requeue function */
  525. spin_lock_bh(&local->mdev->queue_lock);
  526. /* create a new queue for this aggregation */
  527. ret = ieee80211_ht_agg_queue_add(local, sta, tid);
  528. /* case no queue is available to aggregation
  529. * don't switch to aggregation */
  530. if (ret) {
  531. #ifdef CONFIG_MAC80211_HT_DEBUG
  532. printk(KERN_DEBUG "BA request denied - queue unavailable for"
  533. " tid %d\n", tid);
  534. #endif /* CONFIG_MAC80211_HT_DEBUG */
  535. goto start_ba_err;
  536. }
  537. sdata = sta->sdata;
  538. /* Ok, the Addba frame hasn't been sent yet, but if the driver calls the
  539. * call back right away, it must see that the flow has begun */
  540. *state |= HT_ADDBA_REQUESTED_MSK;
  541. if (local->ops->ampdu_action)
  542. ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_START,
  543. ra, tid, &start_seq_num);
  544. if (ret) {
  545. /* No need to requeue the packets in the agg queue, since we
  546. * held the tx lock: no packet could be enqueued to the newly
  547. * allocated queue */
  548. ieee80211_ht_agg_queue_remove(local, sta, tid, 0);
  549. #ifdef CONFIG_MAC80211_HT_DEBUG
  550. printk(KERN_DEBUG "BA request denied - HW unavailable for"
  551. " tid %d\n", tid);
  552. #endif /* CONFIG_MAC80211_HT_DEBUG */
  553. *state = HT_AGG_STATE_IDLE;
  554. goto start_ba_err;
  555. }
  556. /* Will put all the packets in the new SW queue */
  557. ieee80211_requeue(local, ieee802_1d_to_ac[tid]);
  558. spin_unlock_bh(&local->mdev->queue_lock);
  559. /* send an addBA request */
  560. sta->ampdu_mlme.dialog_token_allocator++;
  561. sta->ampdu_mlme.tid_tx[tid]->dialog_token =
  562. sta->ampdu_mlme.dialog_token_allocator;
  563. sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
  564. ieee80211_send_addba_request(sta->sdata->dev, ra, tid,
  565. sta->ampdu_mlme.tid_tx[tid]->dialog_token,
  566. sta->ampdu_mlme.tid_tx[tid]->ssn,
  567. 0x40, 5000);
  568. /* activate the timer for the recipient's addBA response */
  569. sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires =
  570. jiffies + ADDBA_RESP_INTERVAL;
  571. add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
  572. printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
  573. goto start_ba_exit;
  574. start_ba_err:
  575. kfree(sta->ampdu_mlme.tid_tx[tid]);
  576. sta->ampdu_mlme.tid_tx[tid] = NULL;
  577. spin_unlock_bh(&local->mdev->queue_lock);
  578. ret = -EBUSY;
  579. start_ba_exit:
  580. spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
  581. rcu_read_unlock();
  582. return ret;
  583. }
  584. EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
  585. int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw,
  586. u8 *ra, u16 tid,
  587. enum ieee80211_back_parties initiator)
  588. {
  589. struct ieee80211_local *local = hw_to_local(hw);
  590. struct sta_info *sta;
  591. u8 *state;
  592. int ret = 0;
  593. DECLARE_MAC_BUF(mac);
  594. if (tid >= STA_TID_NUM)
  595. return -EINVAL;
  596. rcu_read_lock();
  597. sta = sta_info_get(local, ra);
  598. if (!sta) {
  599. rcu_read_unlock();
  600. return -ENOENT;
  601. }
  602. /* check if the TID is in aggregation */
  603. state = &sta->ampdu_mlme.tid_state_tx[tid];
  604. spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
  605. if (*state != HT_AGG_STATE_OPERATIONAL) {
  606. ret = -ENOENT;
  607. goto stop_BA_exit;
  608. }
  609. #ifdef CONFIG_MAC80211_HT_DEBUG
  610. printk(KERN_DEBUG "Tx BA session stop requested for %s tid %u\n",
  611. print_mac(mac, ra), tid);
  612. #endif /* CONFIG_MAC80211_HT_DEBUG */
  613. ieee80211_stop_queue(hw, sta->tid_to_tx_q[tid]);
  614. *state = HT_AGG_STATE_REQ_STOP_BA_MSK |
  615. (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
  616. if (local->ops->ampdu_action)
  617. ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_STOP,
  618. ra, tid, NULL);
  619. /* case HW denied going back to legacy */
  620. if (ret) {
  621. WARN_ON(ret != -EBUSY);
  622. *state = HT_AGG_STATE_OPERATIONAL;
  623. ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
  624. goto stop_BA_exit;
  625. }
  626. stop_BA_exit:
  627. spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
  628. rcu_read_unlock();
  629. return ret;
  630. }
  631. EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
  632. void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
  633. {
  634. struct ieee80211_local *local = hw_to_local(hw);
  635. struct sta_info *sta;
  636. u8 *state;
  637. DECLARE_MAC_BUF(mac);
  638. if (tid >= STA_TID_NUM) {
  639. printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
  640. tid, STA_TID_NUM);
  641. return;
  642. }
  643. rcu_read_lock();
  644. sta = sta_info_get(local, ra);
  645. if (!sta) {
  646. rcu_read_unlock();
  647. printk(KERN_DEBUG "Could not find station: %s\n",
  648. print_mac(mac, ra));
  649. return;
  650. }
  651. state = &sta->ampdu_mlme.tid_state_tx[tid];
  652. spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
  653. if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
  654. printk(KERN_DEBUG "addBA was not requested yet, state is %d\n",
  655. *state);
  656. spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
  657. rcu_read_unlock();
  658. return;
  659. }
  660. WARN_ON_ONCE(*state & HT_ADDBA_DRV_READY_MSK);
  661. *state |= HT_ADDBA_DRV_READY_MSK;
  662. if (*state == HT_AGG_STATE_OPERATIONAL) {
  663. printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid);
  664. ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
  665. }
  666. spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
  667. rcu_read_unlock();
  668. }
  669. EXPORT_SYMBOL(ieee80211_start_tx_ba_cb);
  670. void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
  671. {
  672. struct ieee80211_local *local = hw_to_local(hw);
  673. struct sta_info *sta;
  674. u8 *state;
  675. int agg_queue;
  676. DECLARE_MAC_BUF(mac);
  677. if (tid >= STA_TID_NUM) {
  678. printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
  679. tid, STA_TID_NUM);
  680. return;
  681. }
  682. #ifdef CONFIG_MAC80211_HT_DEBUG
  683. printk(KERN_DEBUG "Stopping Tx BA session for %s tid %d\n",
  684. print_mac(mac, ra), tid);
  685. #endif /* CONFIG_MAC80211_HT_DEBUG */
  686. rcu_read_lock();
  687. sta = sta_info_get(local, ra);
  688. if (!sta) {
  689. printk(KERN_DEBUG "Could not find station: %s\n",
  690. print_mac(mac, ra));
  691. rcu_read_unlock();
  692. return;
  693. }
  694. state = &sta->ampdu_mlme.tid_state_tx[tid];
  695. spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
  696. if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) {
  697. printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
  698. spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
  699. rcu_read_unlock();
  700. return;
  701. }
  702. if (*state & HT_AGG_STATE_INITIATOR_MSK)
  703. ieee80211_send_delba(sta->sdata->dev, ra, tid,
  704. WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
  705. agg_queue = sta->tid_to_tx_q[tid];
  706. /* avoid ordering issues: we are the only one that can modify
  707. * the content of the qdiscs */
  708. spin_lock_bh(&local->mdev->queue_lock);
  709. /* remove the queue for this aggregation */
  710. ieee80211_ht_agg_queue_remove(local, sta, tid, 1);
  711. spin_unlock_bh(&local->mdev->queue_lock);
  712. /* we just requeued the all the frames that were in the removed
  713. * queue, and since we might miss a softirq we do netif_schedule.
  714. * ieee80211_wake_queue is not used here as this queue is not
  715. * necessarily stopped */
  716. netif_schedule(local->mdev);
  717. *state = HT_AGG_STATE_IDLE;
  718. sta->ampdu_mlme.addba_req_num[tid] = 0;
  719. kfree(sta->ampdu_mlme.tid_tx[tid]);
  720. sta->ampdu_mlme.tid_tx[tid] = NULL;
  721. spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
  722. rcu_read_unlock();
  723. }
  724. EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb);
  725. void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
  726. const u8 *ra, u16 tid)
  727. {
  728. struct ieee80211_local *local = hw_to_local(hw);
  729. struct ieee80211_ra_tid *ra_tid;
  730. struct sk_buff *skb = dev_alloc_skb(0);
  731. if (unlikely(!skb)) {
  732. if (net_ratelimit())
  733. printk(KERN_WARNING "%s: Not enough memory, "
  734. "dropping start BA session", skb->dev->name);
  735. return;
  736. }
  737. ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
  738. memcpy(&ra_tid->ra, ra, ETH_ALEN);
  739. ra_tid->tid = tid;
  740. skb->pkt_type = IEEE80211_ADDBA_MSG;
  741. skb_queue_tail(&local->skb_queue, skb);
  742. tasklet_schedule(&local->tasklet);
  743. }
  744. EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
  745. void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
  746. const u8 *ra, u16 tid)
  747. {
  748. struct ieee80211_local *local = hw_to_local(hw);
  749. struct ieee80211_ra_tid *ra_tid;
  750. struct sk_buff *skb = dev_alloc_skb(0);
  751. if (unlikely(!skb)) {
  752. if (net_ratelimit())
  753. printk(KERN_WARNING "%s: Not enough memory, "
  754. "dropping stop BA session", skb->dev->name);
  755. return;
  756. }
  757. ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
  758. memcpy(&ra_tid->ra, ra, ETH_ALEN);
  759. ra_tid->tid = tid;
  760. skb->pkt_type = IEEE80211_DELBA_MSG;
  761. skb_queue_tail(&local->skb_queue, skb);
  762. tasklet_schedule(&local->tasklet);
  763. }
  764. EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
  765. static void ieee80211_set_multicast_list(struct net_device *dev)
  766. {
  767. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  768. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  769. int allmulti, promisc, sdata_allmulti, sdata_promisc;
  770. allmulti = !!(dev->flags & IFF_ALLMULTI);
  771. promisc = !!(dev->flags & IFF_PROMISC);
  772. sdata_allmulti = !!(sdata->flags & IEEE80211_SDATA_ALLMULTI);
  773. sdata_promisc = !!(sdata->flags & IEEE80211_SDATA_PROMISC);
  774. if (allmulti != sdata_allmulti) {
  775. if (dev->flags & IFF_ALLMULTI)
  776. atomic_inc(&local->iff_allmultis);
  777. else
  778. atomic_dec(&local->iff_allmultis);
  779. sdata->flags ^= IEEE80211_SDATA_ALLMULTI;
  780. }
  781. if (promisc != sdata_promisc) {
  782. if (dev->flags & IFF_PROMISC)
  783. atomic_inc(&local->iff_promiscs);
  784. else
  785. atomic_dec(&local->iff_promiscs);
  786. sdata->flags ^= IEEE80211_SDATA_PROMISC;
  787. }
  788. dev_mc_sync(local->mdev, dev);
  789. }
  790. static const struct header_ops ieee80211_header_ops = {
  791. .create = eth_header,
  792. .parse = header_parse_80211,
  793. .rebuild = eth_rebuild_header,
  794. .cache = eth_header_cache,
  795. .cache_update = eth_header_cache_update,
  796. };
  797. /* Must not be called for mdev */
  798. void ieee80211_if_setup(struct net_device *dev)
  799. {
  800. ether_setup(dev);
  801. dev->hard_start_xmit = ieee80211_subif_start_xmit;
  802. dev->wireless_handlers = &ieee80211_iw_handler_def;
  803. dev->set_multicast_list = ieee80211_set_multicast_list;
  804. dev->change_mtu = ieee80211_change_mtu;
  805. dev->open = ieee80211_open;
  806. dev->stop = ieee80211_stop;
  807. dev->destructor = ieee80211_if_free;
  808. }
  809. /* everything else */
  810. static int __ieee80211_if_config(struct net_device *dev,
  811. struct sk_buff *beacon,
  812. struct ieee80211_tx_control *control)
  813. {
  814. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  815. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  816. struct ieee80211_if_conf conf;
  817. if (!local->ops->config_interface || !netif_running(dev))
  818. return 0;
  819. memset(&conf, 0, sizeof(conf));
  820. conf.type = sdata->vif.type;
  821. if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
  822. sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
  823. conf.bssid = sdata->u.sta.bssid;
  824. conf.ssid = sdata->u.sta.ssid;
  825. conf.ssid_len = sdata->u.sta.ssid_len;
  826. } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
  827. conf.beacon = beacon;
  828. conf.beacon_control = control;
  829. ieee80211_start_mesh(dev);
  830. } else if (sdata->vif.type == IEEE80211_IF_TYPE_AP) {
  831. conf.ssid = sdata->u.ap.ssid;
  832. conf.ssid_len = sdata->u.ap.ssid_len;
  833. conf.beacon = beacon;
  834. conf.beacon_control = control;
  835. }
  836. return local->ops->config_interface(local_to_hw(local),
  837. &sdata->vif, &conf);
  838. }
  839. int ieee80211_if_config(struct net_device *dev)
  840. {
  841. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  842. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  843. if (sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT &&
  844. (local->hw.flags & IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE))
  845. return ieee80211_if_config_beacon(dev);
  846. return __ieee80211_if_config(dev, NULL, NULL);
  847. }
  848. int ieee80211_if_config_beacon(struct net_device *dev)
  849. {
  850. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  851. struct ieee80211_tx_control control;
  852. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  853. struct sk_buff *skb;
  854. if (!(local->hw.flags & IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE))
  855. return 0;
  856. skb = ieee80211_beacon_get(local_to_hw(local), &sdata->vif,
  857. &control);
  858. if (!skb)
  859. return -ENOMEM;
  860. return __ieee80211_if_config(dev, skb, &control);
  861. }
  862. int ieee80211_hw_config(struct ieee80211_local *local)
  863. {
  864. struct ieee80211_channel *chan;
  865. int ret = 0;
  866. if (local->sta_sw_scanning)
  867. chan = local->scan_channel;
  868. else
  869. chan = local->oper_channel;
  870. local->hw.conf.channel = chan;
  871. if (!local->hw.conf.power_level)
  872. local->hw.conf.power_level = chan->max_power;
  873. else
  874. local->hw.conf.power_level = min(chan->max_power,
  875. local->hw.conf.power_level);
  876. local->hw.conf.max_antenna_gain = chan->max_antenna_gain;
  877. #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
  878. printk(KERN_DEBUG "%s: HW CONFIG: freq=%d\n",
  879. wiphy_name(local->hw.wiphy), chan->center_freq);
  880. #endif
  881. if (local->open_count)
  882. ret = local->ops->config(local_to_hw(local), &local->hw.conf);
  883. return ret;
  884. }
  885. /**
  886. * ieee80211_handle_ht should be used only after legacy configuration
  887. * has been determined namely band, as ht configuration depends upon
  888. * the hardware's HT abilities for a _specific_ band.
  889. */
  890. u32 ieee80211_handle_ht(struct ieee80211_local *local, int enable_ht,
  891. struct ieee80211_ht_info *req_ht_cap,
  892. struct ieee80211_ht_bss_info *req_bss_cap)
  893. {
  894. struct ieee80211_conf *conf = &local->hw.conf;
  895. struct ieee80211_supported_band *sband;
  896. struct ieee80211_ht_info ht_conf;
  897. struct ieee80211_ht_bss_info ht_bss_conf;
  898. int i;
  899. u32 changed = 0;
  900. sband = local->hw.wiphy->bands[conf->channel->band];
  901. /* HT is not supported */
  902. if (!sband->ht_info.ht_supported) {
  903. conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE;
  904. return 0;
  905. }
  906. memset(&ht_conf, 0, sizeof(struct ieee80211_ht_info));
  907. memset(&ht_bss_conf, 0, sizeof(struct ieee80211_ht_bss_info));
  908. if (enable_ht) {
  909. if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE))
  910. changed |= BSS_CHANGED_HT;
  911. conf->flags |= IEEE80211_CONF_SUPPORT_HT_MODE;
  912. ht_conf.ht_supported = 1;
  913. ht_conf.cap = req_ht_cap->cap & sband->ht_info.cap;
  914. ht_conf.cap &= ~(IEEE80211_HT_CAP_MIMO_PS);
  915. ht_conf.cap |= sband->ht_info.cap & IEEE80211_HT_CAP_MIMO_PS;
  916. for (i = 0; i < SUPP_MCS_SET_LEN; i++)
  917. ht_conf.supp_mcs_set[i] =
  918. sband->ht_info.supp_mcs_set[i] &
  919. req_ht_cap->supp_mcs_set[i];
  920. ht_bss_conf.primary_channel = req_bss_cap->primary_channel;
  921. ht_bss_conf.bss_cap = req_bss_cap->bss_cap;
  922. ht_bss_conf.bss_op_mode = req_bss_cap->bss_op_mode;
  923. ht_conf.ampdu_factor = req_ht_cap->ampdu_factor;
  924. ht_conf.ampdu_density = req_ht_cap->ampdu_density;
  925. /* if bss configuration changed store the new one */
  926. if (memcmp(&conf->ht_conf, &ht_conf, sizeof(ht_conf)) ||
  927. memcmp(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf))) {
  928. changed |= BSS_CHANGED_HT;
  929. memcpy(&conf->ht_conf, &ht_conf, sizeof(ht_conf));
  930. memcpy(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf));
  931. }
  932. } else {
  933. if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE)
  934. changed |= BSS_CHANGED_HT;
  935. conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE;
  936. }
  937. return changed;
  938. }
  939. void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
  940. u32 changed)
  941. {
  942. struct ieee80211_local *local = sdata->local;
  943. if (!changed)
  944. return;
  945. if (local->ops->bss_info_changed)
  946. local->ops->bss_info_changed(local_to_hw(local),
  947. &sdata->vif,
  948. &sdata->bss_conf,
  949. changed);
  950. }
  951. void ieee80211_reset_erp_info(struct net_device *dev)
  952. {
  953. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  954. sdata->bss_conf.use_cts_prot = 0;
  955. sdata->bss_conf.use_short_preamble = 0;
  956. ieee80211_bss_info_change_notify(sdata,
  957. BSS_CHANGED_ERP_CTS_PROT |
  958. BSS_CHANGED_ERP_PREAMBLE);
  959. }
  960. void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
  961. struct sk_buff *skb,
  962. struct ieee80211_tx_status *status)
  963. {
  964. struct ieee80211_local *local = hw_to_local(hw);
  965. struct ieee80211_tx_status *saved;
  966. int tmp;
  967. skb->dev = local->mdev;
  968. saved = kmalloc(sizeof(struct ieee80211_tx_status), GFP_ATOMIC);
  969. if (unlikely(!saved)) {
  970. if (net_ratelimit())
  971. printk(KERN_WARNING "%s: Not enough memory, "
  972. "dropping tx status", skb->dev->name);
  973. /* should be dev_kfree_skb_irq, but due to this function being
  974. * named _irqsafe instead of just _irq we can't be sure that
  975. * people won't call it from non-irq contexts */
  976. dev_kfree_skb_any(skb);
  977. return;
  978. }
  979. memcpy(saved, status, sizeof(struct ieee80211_tx_status));
  980. /* copy pointer to saved status into skb->cb for use by tasklet */
  981. memcpy(skb->cb, &saved, sizeof(saved));
  982. skb->pkt_type = IEEE80211_TX_STATUS_MSG;
  983. skb_queue_tail(status->control.flags & IEEE80211_TXCTL_REQ_TX_STATUS ?
  984. &local->skb_queue : &local->skb_queue_unreliable, skb);
  985. tmp = skb_queue_len(&local->skb_queue) +
  986. skb_queue_len(&local->skb_queue_unreliable);
  987. while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT &&
  988. (skb = skb_dequeue(&local->skb_queue_unreliable))) {
  989. memcpy(&saved, skb->cb, sizeof(saved));
  990. kfree(saved);
  991. dev_kfree_skb_irq(skb);
  992. tmp--;
  993. I802_DEBUG_INC(local->tx_status_drop);
  994. }
  995. tasklet_schedule(&local->tasklet);
  996. }
  997. EXPORT_SYMBOL(ieee80211_tx_status_irqsafe);
  998. static void ieee80211_tasklet_handler(unsigned long data)
  999. {
  1000. struct ieee80211_local *local = (struct ieee80211_local *) data;
  1001. struct sk_buff *skb;
  1002. struct ieee80211_rx_status rx_status;
  1003. struct ieee80211_tx_status *tx_status;
  1004. struct ieee80211_ra_tid *ra_tid;
  1005. while ((skb = skb_dequeue(&local->skb_queue)) ||
  1006. (skb = skb_dequeue(&local->skb_queue_unreliable))) {
  1007. switch (skb->pkt_type) {
  1008. case IEEE80211_RX_MSG:
  1009. /* status is in skb->cb */
  1010. memcpy(&rx_status, skb->cb, sizeof(rx_status));
  1011. /* Clear skb->pkt_type in order to not confuse kernel
  1012. * netstack. */
  1013. skb->pkt_type = 0;
  1014. __ieee80211_rx(local_to_hw(local), skb, &rx_status);
  1015. break;
  1016. case IEEE80211_TX_STATUS_MSG:
  1017. /* get pointer to saved status out of skb->cb */
  1018. memcpy(&tx_status, skb->cb, sizeof(tx_status));
  1019. skb->pkt_type = 0;
  1020. ieee80211_tx_status(local_to_hw(local),
  1021. skb, tx_status);
  1022. kfree(tx_status);
  1023. break;
  1024. case IEEE80211_DELBA_MSG:
  1025. ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
  1026. ieee80211_stop_tx_ba_cb(local_to_hw(local),
  1027. ra_tid->ra, ra_tid->tid);
  1028. dev_kfree_skb(skb);
  1029. break;
  1030. case IEEE80211_ADDBA_MSG:
  1031. ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
  1032. ieee80211_start_tx_ba_cb(local_to_hw(local),
  1033. ra_tid->ra, ra_tid->tid);
  1034. dev_kfree_skb(skb);
  1035. break ;
  1036. default: /* should never get here! */
  1037. printk(KERN_ERR "%s: Unknown message type (%d)\n",
  1038. wiphy_name(local->hw.wiphy), skb->pkt_type);
  1039. dev_kfree_skb(skb);
  1040. break;
  1041. }
  1042. }
  1043. }
  1044. /* Remove added headers (e.g., QoS control), encryption header/MIC, etc. to
  1045. * make a prepared TX frame (one that has been given to hw) to look like brand
  1046. * new IEEE 802.11 frame that is ready to go through TX processing again.
  1047. * Also, tx_packet_data in cb is restored from tx_control. */
  1048. static void ieee80211_remove_tx_extra(struct ieee80211_local *local,
  1049. struct ieee80211_key *key,
  1050. struct sk_buff *skb,
  1051. struct ieee80211_tx_control *control)
  1052. {
  1053. int hdrlen, iv_len, mic_len;
  1054. struct ieee80211_tx_packet_data *pkt_data;
  1055. pkt_data = (struct ieee80211_tx_packet_data *)skb->cb;
  1056. pkt_data->ifindex = vif_to_sdata(control->vif)->dev->ifindex;
  1057. pkt_data->flags = 0;
  1058. if (control->flags & IEEE80211_TXCTL_REQ_TX_STATUS)
  1059. pkt_data->flags |= IEEE80211_TXPD_REQ_TX_STATUS;
  1060. if (control->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)
  1061. pkt_data->flags |= IEEE80211_TXPD_DO_NOT_ENCRYPT;
  1062. if (control->flags & IEEE80211_TXCTL_REQUEUE)
  1063. pkt_data->flags |= IEEE80211_TXPD_REQUEUE;
  1064. if (control->flags & IEEE80211_TXCTL_EAPOL_FRAME)
  1065. pkt_data->flags |= IEEE80211_TXPD_EAPOL_FRAME;
  1066. pkt_data->queue = control->queue;
  1067. hdrlen = ieee80211_get_hdrlen_from_skb(skb);
  1068. if (!key)
  1069. goto no_key;
  1070. switch (key->conf.alg) {
  1071. case ALG_WEP:
  1072. iv_len = WEP_IV_LEN;
  1073. mic_len = WEP_ICV_LEN;
  1074. break;
  1075. case ALG_TKIP:
  1076. iv_len = TKIP_IV_LEN;
  1077. mic_len = TKIP_ICV_LEN;
  1078. break;
  1079. case ALG_CCMP:
  1080. iv_len = CCMP_HDR_LEN;
  1081. mic_len = CCMP_MIC_LEN;
  1082. break;
  1083. default:
  1084. goto no_key;
  1085. }
  1086. if (skb->len >= mic_len &&
  1087. !(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
  1088. skb_trim(skb, skb->len - mic_len);
  1089. if (skb->len >= iv_len && skb->len > hdrlen) {
  1090. memmove(skb->data + iv_len, skb->data, hdrlen);
  1091. skb_pull(skb, iv_len);
  1092. }
  1093. no_key:
  1094. {
  1095. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  1096. u16 fc = le16_to_cpu(hdr->frame_control);
  1097. if ((fc & 0x8C) == 0x88) /* QoS Control Field */ {
  1098. fc &= ~IEEE80211_STYPE_QOS_DATA;
  1099. hdr->frame_control = cpu_to_le16(fc);
  1100. memmove(skb->data + 2, skb->data, hdrlen - 2);
  1101. skb_pull(skb, 2);
  1102. }
  1103. }
  1104. }
  1105. static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
  1106. struct sta_info *sta,
  1107. struct sk_buff *skb,
  1108. struct ieee80211_tx_status *status)
  1109. {
  1110. sta->tx_filtered_count++;
  1111. /*
  1112. * Clear the TX filter mask for this STA when sending the next
  1113. * packet. If the STA went to power save mode, this will happen
  1114. * happen when it wakes up for the next time.
  1115. */
  1116. sta->flags |= WLAN_STA_CLEAR_PS_FILT;
  1117. /*
  1118. * This code races in the following way:
  1119. *
  1120. * (1) STA sends frame indicating it will go to sleep and does so
  1121. * (2) hardware/firmware adds STA to filter list, passes frame up
  1122. * (3) hardware/firmware processes TX fifo and suppresses a frame
  1123. * (4) we get TX status before having processed the frame and
  1124. * knowing that the STA has gone to sleep.
  1125. *
  1126. * This is actually quite unlikely even when both those events are
  1127. * processed from interrupts coming in quickly after one another or
  1128. * even at the same time because we queue both TX status events and
  1129. * RX frames to be processed by a tasklet and process them in the
  1130. * same order that they were received or TX status last. Hence, there
  1131. * is no race as long as the frame RX is processed before the next TX
  1132. * status, which drivers can ensure, see below.
  1133. *
  1134. * Note that this can only happen if the hardware or firmware can
  1135. * actually add STAs to the filter list, if this is done by the
  1136. * driver in response to set_tim() (which will only reduce the race
  1137. * this whole filtering tries to solve, not completely solve it)
  1138. * this situation cannot happen.
  1139. *
  1140. * To completely solve this race drivers need to make sure that they
  1141. * (a) don't mix the irq-safe/not irq-safe TX status/RX processing
  1142. * functions and
  1143. * (b) always process RX events before TX status events if ordering
  1144. * can be unknown, for example with different interrupt status
  1145. * bits.
  1146. */
  1147. if (sta->flags & WLAN_STA_PS &&
  1148. skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) {
  1149. ieee80211_remove_tx_extra(local, sta->key, skb,
  1150. &status->control);
  1151. skb_queue_tail(&sta->tx_filtered, skb);
  1152. return;
  1153. }
  1154. if (!(sta->flags & WLAN_STA_PS) &&
  1155. !(status->control.flags & IEEE80211_TXCTL_REQUEUE)) {
  1156. /* Software retry the packet once */
  1157. status->control.flags |= IEEE80211_TXCTL_REQUEUE;
  1158. ieee80211_remove_tx_extra(local, sta->key, skb,
  1159. &status->control);
  1160. dev_queue_xmit(skb);
  1161. return;
  1162. }
  1163. if (net_ratelimit())
  1164. printk(KERN_DEBUG "%s: dropped TX filtered frame, "
  1165. "queue_len=%d PS=%d @%lu\n",
  1166. wiphy_name(local->hw.wiphy),
  1167. skb_queue_len(&sta->tx_filtered),
  1168. !!(sta->flags & WLAN_STA_PS), jiffies);
  1169. dev_kfree_skb(skb);
  1170. }
  1171. void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
  1172. struct ieee80211_tx_status *status)
  1173. {
  1174. struct sk_buff *skb2;
  1175. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  1176. struct ieee80211_local *local = hw_to_local(hw);
  1177. u16 frag, type;
  1178. struct ieee80211_tx_status_rtap_hdr *rthdr;
  1179. struct ieee80211_sub_if_data *sdata;
  1180. struct net_device *prev_dev = NULL;
  1181. if (!status) {
  1182. printk(KERN_ERR
  1183. "%s: ieee80211_tx_status called with NULL status\n",
  1184. wiphy_name(local->hw.wiphy));
  1185. dev_kfree_skb(skb);
  1186. return;
  1187. }
  1188. rcu_read_lock();
  1189. if (status->excessive_retries) {
  1190. struct sta_info *sta;
  1191. sta = sta_info_get(local, hdr->addr1);
  1192. if (sta) {
  1193. if (sta->flags & WLAN_STA_PS) {
  1194. /*
  1195. * The STA is in power save mode, so assume
  1196. * that this TX packet failed because of that.
  1197. */
  1198. status->excessive_retries = 0;
  1199. status->flags |= IEEE80211_TX_STATUS_TX_FILTERED;
  1200. ieee80211_handle_filtered_frame(local, sta,
  1201. skb, status);
  1202. rcu_read_unlock();
  1203. return;
  1204. }
  1205. }
  1206. }
  1207. if (status->flags & IEEE80211_TX_STATUS_TX_FILTERED) {
  1208. struct sta_info *sta;
  1209. sta = sta_info_get(local, hdr->addr1);
  1210. if (sta) {
  1211. ieee80211_handle_filtered_frame(local, sta, skb,
  1212. status);
  1213. rcu_read_unlock();
  1214. return;
  1215. }
  1216. } else
  1217. rate_control_tx_status(local->mdev, skb, status);
  1218. rcu_read_unlock();
  1219. ieee80211_led_tx(local, 0);
  1220. /* SNMP counters
  1221. * Fragments are passed to low-level drivers as separate skbs, so these
  1222. * are actually fragments, not frames. Update frame counters only for
  1223. * the first fragment of the frame. */
  1224. frag = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
  1225. type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE;
  1226. if (status->flags & IEEE80211_TX_STATUS_ACK) {
  1227. if (frag == 0) {
  1228. local->dot11TransmittedFrameCount++;
  1229. if (is_multicast_ether_addr(hdr->addr1))
  1230. local->dot11MulticastTransmittedFrameCount++;
  1231. if (status->retry_count > 0)
  1232. local->dot11RetryCount++;
  1233. if (status->retry_count > 1)
  1234. local->dot11MultipleRetryCount++;
  1235. }
  1236. /* This counter shall be incremented for an acknowledged MPDU
  1237. * with an individual address in the address 1 field or an MPDU
  1238. * with a multicast address in the address 1 field of type Data
  1239. * or Management. */
  1240. if (!is_multicast_ether_addr(hdr->addr1) ||
  1241. type == IEEE80211_FTYPE_DATA ||
  1242. type == IEEE80211_FTYPE_MGMT)
  1243. local->dot11TransmittedFragmentCount++;
  1244. } else {
  1245. if (frag == 0)
  1246. local->dot11FailedCount++;
  1247. }
  1248. /* this was a transmitted frame, but now we want to reuse it */
  1249. skb_orphan(skb);
  1250. /*
  1251. * This is a bit racy but we can avoid a lot of work
  1252. * with this test...
  1253. */
  1254. if (!local->monitors && !local->cooked_mntrs) {
  1255. dev_kfree_skb(skb);
  1256. return;
  1257. }
  1258. /* send frame to monitor interfaces now */
  1259. if (skb_headroom(skb) < sizeof(*rthdr)) {
  1260. printk(KERN_ERR "ieee80211_tx_status: headroom too small\n");
  1261. dev_kfree_skb(skb);
  1262. return;
  1263. }
  1264. rthdr = (struct ieee80211_tx_status_rtap_hdr*)
  1265. skb_push(skb, sizeof(*rthdr));
  1266. memset(rthdr, 0, sizeof(*rthdr));
  1267. rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
  1268. rthdr->hdr.it_present =
  1269. cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) |
  1270. (1 << IEEE80211_RADIOTAP_DATA_RETRIES));
  1271. if (!(status->flags & IEEE80211_TX_STATUS_ACK) &&
  1272. !is_multicast_ether_addr(hdr->addr1))
  1273. rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL);
  1274. if ((status->control.flags & IEEE80211_TXCTL_USE_RTS_CTS) &&
  1275. (status->control.flags & IEEE80211_TXCTL_USE_CTS_PROTECT))
  1276. rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS);
  1277. else if (status->control.flags & IEEE80211_TXCTL_USE_RTS_CTS)
  1278. rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS);
  1279. rthdr->data_retries = status->retry_count;
  1280. /* XXX: is this sufficient for BPF? */
  1281. skb_set_mac_header(skb, 0);
  1282. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1283. skb->pkt_type = PACKET_OTHERHOST;
  1284. skb->protocol = htons(ETH_P_802_2);
  1285. memset(skb->cb, 0, sizeof(skb->cb));
  1286. rcu_read_lock();
  1287. list_for_each_entry_rcu(sdata, &local->interfaces, list) {
  1288. if (sdata->vif.type == IEEE80211_IF_TYPE_MNTR) {
  1289. if (!netif_running(sdata->dev))
  1290. continue;
  1291. if (prev_dev) {
  1292. skb2 = skb_clone(skb, GFP_ATOMIC);
  1293. if (skb2) {
  1294. skb2->dev = prev_dev;
  1295. netif_rx(skb2);
  1296. }
  1297. }
  1298. prev_dev = sdata->dev;
  1299. }
  1300. }
  1301. if (prev_dev) {
  1302. skb->dev = prev_dev;
  1303. netif_rx(skb);
  1304. skb = NULL;
  1305. }
  1306. rcu_read_unlock();
  1307. dev_kfree_skb(skb);
  1308. }
  1309. EXPORT_SYMBOL(ieee80211_tx_status);
  1310. struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
  1311. const struct ieee80211_ops *ops)
  1312. {
  1313. struct ieee80211_local *local;
  1314. int priv_size;
  1315. struct wiphy *wiphy;
  1316. /* Ensure 32-byte alignment of our private data and hw private data.
  1317. * We use the wiphy priv data for both our ieee80211_local and for
  1318. * the driver's private data
  1319. *
  1320. * In memory it'll be like this:
  1321. *
  1322. * +-------------------------+
  1323. * | struct wiphy |
  1324. * +-------------------------+
  1325. * | struct ieee80211_local |
  1326. * +-------------------------+
  1327. * | driver's private data |
  1328. * +-------------------------+
  1329. *
  1330. */
  1331. priv_size = ((sizeof(struct ieee80211_local) +
  1332. NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST) +
  1333. priv_data_len;
  1334. wiphy = wiphy_new(&mac80211_config_ops, priv_size);
  1335. if (!wiphy)
  1336. return NULL;
  1337. wiphy->privid = mac80211_wiphy_privid;
  1338. local = wiphy_priv(wiphy);
  1339. local->hw.wiphy = wiphy;
  1340. local->hw.priv = (char *)local +
  1341. ((sizeof(struct ieee80211_local) +
  1342. NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
  1343. BUG_ON(!ops->tx);
  1344. BUG_ON(!ops->start);
  1345. BUG_ON(!ops->stop);
  1346. BUG_ON(!ops->config);
  1347. BUG_ON(!ops->add_interface);
  1348. BUG_ON(!ops->remove_interface);
  1349. BUG_ON(!ops->configure_filter);
  1350. local->ops = ops;
  1351. local->hw.queues = 1; /* default */
  1352. local->bridge_packets = 1;
  1353. local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
  1354. local->fragmentation_threshold = IEEE80211_MAX_FRAG_THRESHOLD;
  1355. local->short_retry_limit = 7;
  1356. local->long_retry_limit = 4;
  1357. local->hw.conf.radio_enabled = 1;
  1358. INIT_LIST_HEAD(&local->interfaces);
  1359. spin_lock_init(&local->key_lock);
  1360. INIT_DELAYED_WORK(&local->scan_work, ieee80211_sta_scan_work);
  1361. sta_info_init(local);
  1362. tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending,
  1363. (unsigned long)local);
  1364. tasklet_disable(&local->tx_pending_tasklet);
  1365. tasklet_init(&local->tasklet,
  1366. ieee80211_tasklet_handler,
  1367. (unsigned long) local);
  1368. tasklet_disable(&local->tasklet);
  1369. skb_queue_head_init(&local->skb_queue);
  1370. skb_queue_head_init(&local->skb_queue_unreliable);
  1371. return local_to_hw(local);
  1372. }
  1373. EXPORT_SYMBOL(ieee80211_alloc_hw);
  1374. int ieee80211_register_hw(struct ieee80211_hw *hw)
  1375. {
  1376. struct ieee80211_local *local = hw_to_local(hw);
  1377. const char *name;
  1378. int result;
  1379. enum ieee80211_band band;
  1380. struct net_device *mdev;
  1381. struct ieee80211_sub_if_data *sdata;
  1382. /*
  1383. * generic code guarantees at least one band,
  1384. * set this very early because much code assumes
  1385. * that hw.conf.channel is assigned
  1386. */
  1387. for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
  1388. struct ieee80211_supported_band *sband;
  1389. sband = local->hw.wiphy->bands[band];
  1390. if (sband) {
  1391. /* init channel we're on */
  1392. local->hw.conf.channel =
  1393. local->oper_channel =
  1394. local->scan_channel = &sband->channels[0];
  1395. break;
  1396. }
  1397. }
  1398. result = wiphy_register(local->hw.wiphy);
  1399. if (result < 0)
  1400. return result;
  1401. /* for now, mdev needs sub_if_data :/ */
  1402. mdev = alloc_netdev(sizeof(struct ieee80211_sub_if_data),
  1403. "wmaster%d", ether_setup);
  1404. if (!mdev)
  1405. goto fail_mdev_alloc;
  1406. sdata = IEEE80211_DEV_TO_SUB_IF(mdev);
  1407. mdev->ieee80211_ptr = &sdata->wdev;
  1408. sdata->wdev.wiphy = local->hw.wiphy;
  1409. local->mdev = mdev;
  1410. ieee80211_rx_bss_list_init(mdev);
  1411. mdev->hard_start_xmit = ieee80211_master_start_xmit;
  1412. mdev->open = ieee80211_master_open;
  1413. mdev->stop = ieee80211_master_stop;
  1414. mdev->type = ARPHRD_IEEE80211;
  1415. mdev->header_ops = &ieee80211_header_ops;
  1416. mdev->set_multicast_list = ieee80211_master_set_multicast_list;
  1417. sdata->vif.type = IEEE80211_IF_TYPE_AP;
  1418. sdata->dev = mdev;
  1419. sdata->local = local;
  1420. sdata->u.ap.force_unicast_rateidx = -1;
  1421. sdata->u.ap.max_ratectrl_rateidx = -1;
  1422. ieee80211_if_sdata_init(sdata);
  1423. /* no RCU needed since we're still during init phase */
  1424. list_add_tail(&sdata->list, &local->interfaces);
  1425. name = wiphy_dev(local->hw.wiphy)->driver->name;
  1426. local->hw.workqueue = create_singlethread_workqueue(name);
  1427. if (!local->hw.workqueue) {
  1428. result = -ENOMEM;
  1429. goto fail_workqueue;
  1430. }
  1431. /*
  1432. * The hardware needs headroom for sending the frame,
  1433. * and we need some headroom for passing the frame to monitor
  1434. * interfaces, but never both at the same time.
  1435. */
  1436. local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom,
  1437. sizeof(struct ieee80211_tx_status_rtap_hdr));
  1438. debugfs_hw_add(local);
  1439. local->hw.conf.beacon_int = 1000;
  1440. local->wstats_flags |= local->hw.max_rssi ?
  1441. IW_QUAL_LEVEL_UPDATED : IW_QUAL_LEVEL_INVALID;
  1442. local->wstats_flags |= local->hw.max_signal ?
  1443. IW_QUAL_QUAL_UPDATED : IW_QUAL_QUAL_INVALID;
  1444. local->wstats_flags |= local->hw.max_noise ?
  1445. IW_QUAL_NOISE_UPDATED : IW_QUAL_NOISE_INVALID;
  1446. if (local->hw.max_rssi < 0 || local->hw.max_noise < 0)
  1447. local->wstats_flags |= IW_QUAL_DBM;
  1448. result = sta_info_start(local);
  1449. if (result < 0)
  1450. goto fail_sta_info;
  1451. rtnl_lock();
  1452. result = dev_alloc_name(local->mdev, local->mdev->name);
  1453. if (result < 0)
  1454. goto fail_dev;
  1455. memcpy(local->mdev->dev_addr, local->hw.wiphy->perm_addr, ETH_ALEN);
  1456. SET_NETDEV_DEV(local->mdev, wiphy_dev(local->hw.wiphy));
  1457. result = register_netdevice(local->mdev);
  1458. if (result < 0)
  1459. goto fail_dev;
  1460. ieee80211_debugfs_add_netdev(IEEE80211_DEV_TO_SUB_IF(local->mdev));
  1461. ieee80211_if_set_type(local->mdev, IEEE80211_IF_TYPE_AP);
  1462. result = ieee80211_init_rate_ctrl_alg(local,
  1463. hw->rate_control_algorithm);
  1464. if (result < 0) {
  1465. printk(KERN_DEBUG "%s: Failed to initialize rate control "
  1466. "algorithm\n", wiphy_name(local->hw.wiphy));
  1467. goto fail_rate;
  1468. }
  1469. result = ieee80211_wep_init(local);
  1470. if (result < 0) {
  1471. printk(KERN_DEBUG "%s: Failed to initialize wep\n",
  1472. wiphy_name(local->hw.wiphy));
  1473. goto fail_wep;
  1474. }
  1475. ieee80211_install_qdisc(local->mdev);
  1476. /* add one default STA interface */
  1477. result = ieee80211_if_add(local->mdev, "wlan%d", NULL,
  1478. IEEE80211_IF_TYPE_STA, NULL);
  1479. if (result)
  1480. printk(KERN_WARNING "%s: Failed to add default virtual iface\n",
  1481. wiphy_name(local->hw.wiphy));
  1482. local->reg_state = IEEE80211_DEV_REGISTERED;
  1483. rtnl_unlock();
  1484. ieee80211_led_init(local);
  1485. return 0;
  1486. fail_wep:
  1487. rate_control_deinitialize(local);
  1488. fail_rate:
  1489. ieee80211_debugfs_remove_netdev(IEEE80211_DEV_TO_SUB_IF(local->mdev));
  1490. unregister_netdevice(local->mdev);
  1491. fail_dev:
  1492. rtnl_unlock();
  1493. sta_info_stop(local);
  1494. fail_sta_info:
  1495. debugfs_hw_del(local);
  1496. destroy_workqueue(local->hw.workqueue);
  1497. fail_workqueue:
  1498. ieee80211_if_free(local->mdev);
  1499. local->mdev = NULL;
  1500. fail_mdev_alloc:
  1501. wiphy_unregister(local->hw.wiphy);
  1502. return result;
  1503. }
  1504. EXPORT_SYMBOL(ieee80211_register_hw);
  1505. void ieee80211_unregister_hw(struct ieee80211_hw *hw)
  1506. {
  1507. struct ieee80211_local *local = hw_to_local(hw);
  1508. struct ieee80211_sub_if_data *sdata, *tmp;
  1509. tasklet_kill(&local->tx_pending_tasklet);
  1510. tasklet_kill(&local->tasklet);
  1511. rtnl_lock();
  1512. BUG_ON(local->reg_state != IEEE80211_DEV_REGISTERED);
  1513. local->reg_state = IEEE80211_DEV_UNREGISTERED;
  1514. /*
  1515. * At this point, interface list manipulations are fine
  1516. * because the driver cannot be handing us frames any
  1517. * more and the tasklet is killed.
  1518. */
  1519. /*
  1520. * First, we remove all non-master interfaces. Do this because they
  1521. * may have bss pointer dependency on the master, and when we free
  1522. * the master these would be freed as well, breaking our list
  1523. * iteration completely.
  1524. */
  1525. list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
  1526. if (sdata->dev == local->mdev)
  1527. continue;
  1528. list_del(&sdata->list);
  1529. __ieee80211_if_del(local, sdata);
  1530. }
  1531. /* then, finally, remove the master interface */
  1532. __ieee80211_if_del(local, IEEE80211_DEV_TO_SUB_IF(local->mdev));
  1533. rtnl_unlock();
  1534. ieee80211_rx_bss_list_deinit(local->mdev);
  1535. ieee80211_clear_tx_pending(local);
  1536. sta_info_stop(local);
  1537. rate_control_deinitialize(local);
  1538. debugfs_hw_del(local);
  1539. if (skb_queue_len(&local->skb_queue)
  1540. || skb_queue_len(&local->skb_queue_unreliable))
  1541. printk(KERN_WARNING "%s: skb_queue not empty\n",
  1542. wiphy_name(local->hw.wiphy));
  1543. skb_queue_purge(&local->skb_queue);
  1544. skb_queue_purge(&local->skb_queue_unreliable);
  1545. destroy_workqueue(local->hw.workqueue);
  1546. wiphy_unregister(local->hw.wiphy);
  1547. ieee80211_wep_free(local);
  1548. ieee80211_led_exit(local);
  1549. ieee80211_if_free(local->mdev);
  1550. local->mdev = NULL;
  1551. }
  1552. EXPORT_SYMBOL(ieee80211_unregister_hw);
  1553. void ieee80211_free_hw(struct ieee80211_hw *hw)
  1554. {
  1555. struct ieee80211_local *local = hw_to_local(hw);
  1556. wiphy_free(local->hw.wiphy);
  1557. }
  1558. EXPORT_SYMBOL(ieee80211_free_hw);
  1559. static int __init ieee80211_init(void)
  1560. {
  1561. struct sk_buff *skb;
  1562. int ret;
  1563. BUILD_BUG_ON(sizeof(struct ieee80211_tx_packet_data) > sizeof(skb->cb));
  1564. ret = rc80211_pid_init();
  1565. if (ret)
  1566. goto out;
  1567. ret = ieee80211_wme_register();
  1568. if (ret) {
  1569. printk(KERN_DEBUG "ieee80211_init: failed to "
  1570. "initialize WME (err=%d)\n", ret);
  1571. goto out_cleanup_pid;
  1572. }
  1573. ieee80211_debugfs_netdev_init();
  1574. return 0;
  1575. out_cleanup_pid:
  1576. rc80211_pid_exit();
  1577. out:
  1578. return ret;
  1579. }
  1580. static void __exit ieee80211_exit(void)
  1581. {
  1582. rc80211_pid_exit();
  1583. /*
  1584. * For key todo, it'll be empty by now but the work
  1585. * might still be scheduled.
  1586. */
  1587. flush_scheduled_work();
  1588. if (mesh_allocated)
  1589. ieee80211s_stop();
  1590. ieee80211_wme_unregister();
  1591. ieee80211_debugfs_netdev_exit();
  1592. }
  1593. subsys_initcall(ieee80211_init);
  1594. module_exit(ieee80211_exit);
  1595. MODULE_DESCRIPTION("IEEE 802.11 subsystem");
  1596. MODULE_LICENSE("GPL");