main.c 52 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916
  1. /*
  2. * Copyright 2002-2005, Instant802 Networks, Inc.
  3. * Copyright 2005-2006, Devicescape Software, Inc.
  4. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <net/mac80211.h>
  11. #include <net/ieee80211_radiotap.h>
  12. #include <linux/module.h>
  13. #include <linux/init.h>
  14. #include <linux/netdevice.h>
  15. #include <linux/types.h>
  16. #include <linux/slab.h>
  17. #include <linux/skbuff.h>
  18. #include <linux/etherdevice.h>
  19. #include <linux/if_arp.h>
  20. #include <linux/wireless.h>
  21. #include <linux/rtnetlink.h>
  22. #include <linux/bitmap.h>
  23. #include <net/net_namespace.h>
  24. #include <net/cfg80211.h>
  25. #include "ieee80211_i.h"
  26. #include "rate.h"
  27. #include "mesh.h"
  28. #include "wep.h"
  29. #include "wme.h"
  30. #include "aes_ccm.h"
  31. #include "led.h"
  32. #include "cfg.h"
  33. #include "debugfs.h"
  34. #include "debugfs_netdev.h"
  35. /*
  36. * For seeing transmitted packets on monitor interfaces
  37. * we have a radiotap header too.
  38. */
  39. struct ieee80211_tx_status_rtap_hdr {
  40. struct ieee80211_radiotap_header hdr;
  41. __le16 tx_flags;
  42. u8 data_retries;
  43. } __attribute__ ((packed));
  44. /* common interface routines */
  45. static int header_parse_80211(const struct sk_buff *skb, unsigned char *haddr)
  46. {
  47. memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); /* addr2 */
  48. return ETH_ALEN;
  49. }
  50. /* must be called under mdev tx lock */
  51. static void ieee80211_configure_filter(struct ieee80211_local *local)
  52. {
  53. unsigned int changed_flags;
  54. unsigned int new_flags = 0;
  55. if (atomic_read(&local->iff_promiscs))
  56. new_flags |= FIF_PROMISC_IN_BSS;
  57. if (atomic_read(&local->iff_allmultis))
  58. new_flags |= FIF_ALLMULTI;
  59. if (local->monitors)
  60. new_flags |= FIF_BCN_PRBRESP_PROMISC;
  61. if (local->fif_fcsfail)
  62. new_flags |= FIF_FCSFAIL;
  63. if (local->fif_plcpfail)
  64. new_flags |= FIF_PLCPFAIL;
  65. if (local->fif_control)
  66. new_flags |= FIF_CONTROL;
  67. if (local->fif_other_bss)
  68. new_flags |= FIF_OTHER_BSS;
  69. changed_flags = local->filter_flags ^ new_flags;
  70. /* be a bit nasty */
  71. new_flags |= (1<<31);
  72. local->ops->configure_filter(local_to_hw(local),
  73. changed_flags, &new_flags,
  74. local->mdev->mc_count,
  75. local->mdev->mc_list);
  76. WARN_ON(new_flags & (1<<31));
  77. local->filter_flags = new_flags & ~(1<<31);
  78. }
  79. /* master interface */
  80. static int ieee80211_master_open(struct net_device *dev)
  81. {
  82. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  83. struct ieee80211_sub_if_data *sdata;
  84. int res = -EOPNOTSUPP;
  85. /* we hold the RTNL here so can safely walk the list */
  86. list_for_each_entry(sdata, &local->interfaces, list) {
  87. if (sdata->dev != dev && netif_running(sdata->dev)) {
  88. res = 0;
  89. break;
  90. }
  91. }
  92. if (res)
  93. return res;
  94. netif_start_queue(local->mdev);
  95. return 0;
  96. }
  97. static int ieee80211_master_stop(struct net_device *dev)
  98. {
  99. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  100. struct ieee80211_sub_if_data *sdata;
  101. /* we hold the RTNL here so can safely walk the list */
  102. list_for_each_entry(sdata, &local->interfaces, list)
  103. if (sdata->dev != dev && netif_running(sdata->dev))
  104. dev_close(sdata->dev);
  105. return 0;
  106. }
  107. static void ieee80211_master_set_multicast_list(struct net_device *dev)
  108. {
  109. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  110. ieee80211_configure_filter(local);
  111. }
  112. /* regular interfaces */
  113. static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
  114. {
  115. int meshhdrlen;
  116. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  117. meshhdrlen = (sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) ? 5 : 0;
  118. /* FIX: what would be proper limits for MTU?
  119. * This interface uses 802.3 frames. */
  120. if (new_mtu < 256 ||
  121. new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) {
  122. printk(KERN_WARNING "%s: invalid MTU %d\n",
  123. dev->name, new_mtu);
  124. return -EINVAL;
  125. }
  126. #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
  127. printk(KERN_DEBUG "%s: setting MTU %d\n", dev->name, new_mtu);
  128. #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
  129. dev->mtu = new_mtu;
  130. return 0;
  131. }
  132. static inline int identical_mac_addr_allowed(int type1, int type2)
  133. {
  134. return (type1 == IEEE80211_IF_TYPE_MNTR ||
  135. type2 == IEEE80211_IF_TYPE_MNTR ||
  136. (type1 == IEEE80211_IF_TYPE_AP &&
  137. type2 == IEEE80211_IF_TYPE_WDS) ||
  138. (type1 == IEEE80211_IF_TYPE_WDS &&
  139. (type2 == IEEE80211_IF_TYPE_WDS ||
  140. type2 == IEEE80211_IF_TYPE_AP)) ||
  141. (type1 == IEEE80211_IF_TYPE_AP &&
  142. type2 == IEEE80211_IF_TYPE_VLAN) ||
  143. (type1 == IEEE80211_IF_TYPE_VLAN &&
  144. (type2 == IEEE80211_IF_TYPE_AP ||
  145. type2 == IEEE80211_IF_TYPE_VLAN)));
  146. }
  147. static int ieee80211_open(struct net_device *dev)
  148. {
  149. struct ieee80211_sub_if_data *sdata, *nsdata;
  150. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  151. struct ieee80211_if_init_conf conf;
  152. int res;
  153. bool need_hw_reconfig = 0;
  154. struct sta_info *sta;
  155. sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  156. /* we hold the RTNL here so can safely walk the list */
  157. list_for_each_entry(nsdata, &local->interfaces, list) {
  158. struct net_device *ndev = nsdata->dev;
  159. if (ndev != dev && ndev != local->mdev && netif_running(ndev)) {
  160. /*
  161. * Allow only a single IBSS interface to be up at any
  162. * time. This is restricted because beacon distribution
  163. * cannot work properly if both are in the same IBSS.
  164. *
  165. * To remove this restriction we'd have to disallow them
  166. * from setting the same SSID on different IBSS interfaces
  167. * belonging to the same hardware. Then, however, we're
  168. * faced with having to adopt two different TSF timers...
  169. */
  170. if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS &&
  171. nsdata->vif.type == IEEE80211_IF_TYPE_IBSS)
  172. return -EBUSY;
  173. /*
  174. * Disallow multiple IBSS/STA mode interfaces.
  175. *
  176. * This is a technical restriction, it is possible although
  177. * most likely not IEEE 802.11 compliant to have multiple
  178. * STAs with just a single hardware (the TSF timer will not
  179. * be adjusted properly.)
  180. *
  181. * However, because mac80211 uses the master device's BSS
  182. * information for each STA/IBSS interface, doing this will
  183. * currently corrupt that BSS information completely, unless,
  184. * a not very useful case, both STAs are associated to the
  185. * same BSS.
  186. *
  187. * To remove this restriction, the BSS information needs to
  188. * be embedded in the STA/IBSS mode sdata instead of using
  189. * the master device's BSS structure.
  190. */
  191. if ((sdata->vif.type == IEEE80211_IF_TYPE_STA ||
  192. sdata->vif.type == IEEE80211_IF_TYPE_IBSS) &&
  193. (nsdata->vif.type == IEEE80211_IF_TYPE_STA ||
  194. nsdata->vif.type == IEEE80211_IF_TYPE_IBSS))
  195. return -EBUSY;
  196. /*
  197. * The remaining checks are only performed for interfaces
  198. * with the same MAC address.
  199. */
  200. if (compare_ether_addr(dev->dev_addr, ndev->dev_addr))
  201. continue;
  202. /*
  203. * check whether it may have the same address
  204. */
  205. if (!identical_mac_addr_allowed(sdata->vif.type,
  206. nsdata->vif.type))
  207. return -ENOTUNIQ;
  208. /*
  209. * can only add VLANs to enabled APs
  210. */
  211. if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN &&
  212. nsdata->vif.type == IEEE80211_IF_TYPE_AP)
  213. sdata->u.vlan.ap = nsdata;
  214. }
  215. }
  216. switch (sdata->vif.type) {
  217. case IEEE80211_IF_TYPE_WDS:
  218. if (!is_valid_ether_addr(sdata->u.wds.remote_addr))
  219. return -ENOLINK;
  220. break;
  221. case IEEE80211_IF_TYPE_VLAN:
  222. if (!sdata->u.vlan.ap)
  223. return -ENOLINK;
  224. break;
  225. case IEEE80211_IF_TYPE_AP:
  226. case IEEE80211_IF_TYPE_STA:
  227. case IEEE80211_IF_TYPE_MNTR:
  228. case IEEE80211_IF_TYPE_IBSS:
  229. case IEEE80211_IF_TYPE_MESH_POINT:
  230. /* no special treatment */
  231. break;
  232. case IEEE80211_IF_TYPE_INVALID:
  233. /* cannot happen */
  234. WARN_ON(1);
  235. break;
  236. }
  237. if (local->open_count == 0) {
  238. res = 0;
  239. if (local->ops->start)
  240. res = local->ops->start(local_to_hw(local));
  241. if (res)
  242. return res;
  243. need_hw_reconfig = 1;
  244. ieee80211_led_radio(local, local->hw.conf.radio_enabled);
  245. }
  246. switch (sdata->vif.type) {
  247. case IEEE80211_IF_TYPE_VLAN:
  248. list_add(&sdata->u.vlan.list, &sdata->u.vlan.ap->u.ap.vlans);
  249. /* no need to tell driver */
  250. break;
  251. case IEEE80211_IF_TYPE_MNTR:
  252. if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
  253. local->cooked_mntrs++;
  254. break;
  255. }
  256. /* must be before the call to ieee80211_configure_filter */
  257. local->monitors++;
  258. if (local->monitors == 1)
  259. local->hw.conf.flags |= IEEE80211_CONF_RADIOTAP;
  260. if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
  261. local->fif_fcsfail++;
  262. if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
  263. local->fif_plcpfail++;
  264. if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
  265. local->fif_control++;
  266. if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
  267. local->fif_other_bss++;
  268. netif_tx_lock_bh(local->mdev);
  269. ieee80211_configure_filter(local);
  270. netif_tx_unlock_bh(local->mdev);
  271. break;
  272. case IEEE80211_IF_TYPE_STA:
  273. case IEEE80211_IF_TYPE_IBSS:
  274. sdata->u.sta.flags &= ~IEEE80211_STA_PREV_BSSID_SET;
  275. /* fall through */
  276. default:
  277. conf.vif = &sdata->vif;
  278. conf.type = sdata->vif.type;
  279. conf.mac_addr = dev->dev_addr;
  280. res = local->ops->add_interface(local_to_hw(local), &conf);
  281. if (res)
  282. goto err_stop;
  283. ieee80211_if_config(dev);
  284. ieee80211_reset_erp_info(dev);
  285. ieee80211_enable_keys(sdata);
  286. if (sdata->vif.type == IEEE80211_IF_TYPE_STA &&
  287. !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME))
  288. netif_carrier_off(dev);
  289. else
  290. netif_carrier_on(dev);
  291. }
  292. if (sdata->vif.type == IEEE80211_IF_TYPE_WDS) {
  293. /* Create STA entry for the WDS peer */
  294. sta = sta_info_alloc(sdata, sdata->u.wds.remote_addr,
  295. GFP_KERNEL);
  296. if (!sta) {
  297. res = -ENOMEM;
  298. goto err_del_interface;
  299. }
  300. /* no locking required since STA is not live yet */
  301. sta->flags |= WLAN_STA_AUTHORIZED;
  302. res = sta_info_insert(sta);
  303. if (res) {
  304. /* STA has been freed */
  305. goto err_del_interface;
  306. }
  307. }
  308. if (local->open_count == 0) {
  309. res = dev_open(local->mdev);
  310. WARN_ON(res);
  311. if (res)
  312. goto err_del_interface;
  313. tasklet_enable(&local->tx_pending_tasklet);
  314. tasklet_enable(&local->tasklet);
  315. }
  316. /*
  317. * set_multicast_list will be invoked by the networking core
  318. * which will check whether any increments here were done in
  319. * error and sync them down to the hardware as filter flags.
  320. */
  321. if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
  322. atomic_inc(&local->iff_allmultis);
  323. if (sdata->flags & IEEE80211_SDATA_PROMISC)
  324. atomic_inc(&local->iff_promiscs);
  325. local->open_count++;
  326. if (need_hw_reconfig)
  327. ieee80211_hw_config(local);
  328. /*
  329. * ieee80211_sta_work is disabled while network interface
  330. * is down. Therefore, some configuration changes may not
  331. * yet be effective. Trigger execution of ieee80211_sta_work
  332. * to fix this.
  333. */
  334. if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
  335. sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
  336. struct ieee80211_if_sta *ifsta = &sdata->u.sta;
  337. queue_work(local->hw.workqueue, &ifsta->work);
  338. }
  339. netif_start_queue(dev);
  340. return 0;
  341. err_del_interface:
  342. local->ops->remove_interface(local_to_hw(local), &conf);
  343. err_stop:
  344. if (!local->open_count && local->ops->stop)
  345. local->ops->stop(local_to_hw(local));
  346. return res;
  347. }
  348. static int ieee80211_stop(struct net_device *dev)
  349. {
  350. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  351. struct ieee80211_local *local = sdata->local;
  352. struct ieee80211_if_init_conf conf;
  353. struct sta_info *sta;
  354. /*
  355. * Stop TX on this interface first.
  356. */
  357. netif_stop_queue(dev);
  358. /*
  359. * Now delete all active aggregation sessions.
  360. */
  361. rcu_read_lock();
  362. list_for_each_entry_rcu(sta, &local->sta_list, list) {
  363. if (sta->sdata == sdata)
  364. ieee80211_sta_tear_down_BA_sessions(dev, sta->addr);
  365. }
  366. rcu_read_unlock();
  367. /*
  368. * Remove all stations associated with this interface.
  369. *
  370. * This must be done before calling ops->remove_interface()
  371. * because otherwise we can later invoke ops->sta_notify()
  372. * whenever the STAs are removed, and that invalidates driver
  373. * assumptions about always getting a vif pointer that is valid
  374. * (because if we remove a STA after ops->remove_interface()
  375. * the driver will have removed the vif info already!)
  376. *
  377. * We could relax this and only unlink the stations from the
  378. * hash table and list but keep them on a per-sdata list that
  379. * will be inserted back again when the interface is brought
  380. * up again, but I don't currently see a use case for that,
  381. * except with WDS which gets a STA entry created when it is
  382. * brought up.
  383. */
  384. sta_info_flush(local, sdata);
  385. /*
  386. * Don't count this interface for promisc/allmulti while it
  387. * is down. dev_mc_unsync() will invoke set_multicast_list
  388. * on the master interface which will sync these down to the
  389. * hardware as filter flags.
  390. */
  391. if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
  392. atomic_dec(&local->iff_allmultis);
  393. if (sdata->flags & IEEE80211_SDATA_PROMISC)
  394. atomic_dec(&local->iff_promiscs);
  395. dev_mc_unsync(local->mdev, dev);
  396. /* APs need special treatment */
  397. if (sdata->vif.type == IEEE80211_IF_TYPE_AP) {
  398. struct ieee80211_sub_if_data *vlan, *tmp;
  399. struct beacon_data *old_beacon = sdata->u.ap.beacon;
  400. /* remove beacon */
  401. rcu_assign_pointer(sdata->u.ap.beacon, NULL);
  402. synchronize_rcu();
  403. kfree(old_beacon);
  404. /* down all dependent devices, that is VLANs */
  405. list_for_each_entry_safe(vlan, tmp, &sdata->u.ap.vlans,
  406. u.vlan.list)
  407. dev_close(vlan->dev);
  408. WARN_ON(!list_empty(&sdata->u.ap.vlans));
  409. }
  410. local->open_count--;
  411. switch (sdata->vif.type) {
  412. case IEEE80211_IF_TYPE_VLAN:
  413. list_del(&sdata->u.vlan.list);
  414. sdata->u.vlan.ap = NULL;
  415. /* no need to tell driver */
  416. break;
  417. case IEEE80211_IF_TYPE_MNTR:
  418. if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
  419. local->cooked_mntrs--;
  420. break;
  421. }
  422. local->monitors--;
  423. if (local->monitors == 0)
  424. local->hw.conf.flags &= ~IEEE80211_CONF_RADIOTAP;
  425. if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
  426. local->fif_fcsfail--;
  427. if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
  428. local->fif_plcpfail--;
  429. if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
  430. local->fif_control--;
  431. if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
  432. local->fif_other_bss--;
  433. netif_tx_lock_bh(local->mdev);
  434. ieee80211_configure_filter(local);
  435. netif_tx_unlock_bh(local->mdev);
  436. break;
  437. case IEEE80211_IF_TYPE_MESH_POINT:
  438. case IEEE80211_IF_TYPE_STA:
  439. case IEEE80211_IF_TYPE_IBSS:
  440. sdata->u.sta.state = IEEE80211_DISABLED;
  441. memset(sdata->u.sta.bssid, 0, ETH_ALEN);
  442. del_timer_sync(&sdata->u.sta.timer);
  443. /*
  444. * When we get here, the interface is marked down.
  445. * Call synchronize_rcu() to wait for the RX path
  446. * should it be using the interface and enqueuing
  447. * frames at this very time on another CPU.
  448. */
  449. synchronize_rcu();
  450. skb_queue_purge(&sdata->u.sta.skb_queue);
  451. if (local->scan_dev == sdata->dev) {
  452. if (!local->ops->hw_scan) {
  453. local->sta_sw_scanning = 0;
  454. cancel_delayed_work(&local->scan_work);
  455. } else
  456. local->sta_hw_scanning = 0;
  457. }
  458. flush_workqueue(local->hw.workqueue);
  459. sdata->u.sta.flags &= ~IEEE80211_STA_PRIVACY_INVOKED;
  460. kfree(sdata->u.sta.extra_ie);
  461. sdata->u.sta.extra_ie = NULL;
  462. sdata->u.sta.extra_ie_len = 0;
  463. /* fall through */
  464. default:
  465. conf.vif = &sdata->vif;
  466. conf.type = sdata->vif.type;
  467. conf.mac_addr = dev->dev_addr;
  468. /* disable all keys for as long as this netdev is down */
  469. ieee80211_disable_keys(sdata);
  470. local->ops->remove_interface(local_to_hw(local), &conf);
  471. }
  472. if (local->open_count == 0) {
  473. if (netif_running(local->mdev))
  474. dev_close(local->mdev);
  475. if (local->ops->stop)
  476. local->ops->stop(local_to_hw(local));
  477. ieee80211_led_radio(local, 0);
  478. tasklet_disable(&local->tx_pending_tasklet);
  479. tasklet_disable(&local->tasklet);
  480. }
  481. return 0;
  482. }
  483. int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
  484. {
  485. struct ieee80211_local *local = hw_to_local(hw);
  486. struct sta_info *sta;
  487. struct ieee80211_sub_if_data *sdata;
  488. u16 start_seq_num = 0;
  489. u8 *state;
  490. int ret;
  491. DECLARE_MAC_BUF(mac);
  492. if (tid >= STA_TID_NUM)
  493. return -EINVAL;
  494. #ifdef CONFIG_MAC80211_HT_DEBUG
  495. printk(KERN_DEBUG "Open BA session requested for %s tid %u\n",
  496. print_mac(mac, ra), tid);
  497. #endif /* CONFIG_MAC80211_HT_DEBUG */
  498. rcu_read_lock();
  499. sta = sta_info_get(local, ra);
  500. if (!sta) {
  501. printk(KERN_DEBUG "Could not find the station\n");
  502. ret = -ENOENT;
  503. goto exit;
  504. }
  505. spin_lock_bh(&sta->lock);
  506. /* we have tried too many times, receiver does not want A-MPDU */
  507. if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
  508. ret = -EBUSY;
  509. goto err_unlock_sta;
  510. }
  511. state = &sta->ampdu_mlme.tid_state_tx[tid];
  512. /* check if the TID is not in aggregation flow already */
  513. if (*state != HT_AGG_STATE_IDLE) {
  514. #ifdef CONFIG_MAC80211_HT_DEBUG
  515. printk(KERN_DEBUG "BA request denied - session is not "
  516. "idle on tid %u\n", tid);
  517. #endif /* CONFIG_MAC80211_HT_DEBUG */
  518. ret = -EAGAIN;
  519. goto err_unlock_sta;
  520. }
  521. /* prepare A-MPDU MLME for Tx aggregation */
  522. sta->ampdu_mlme.tid_tx[tid] =
  523. kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
  524. if (!sta->ampdu_mlme.tid_tx[tid]) {
  525. if (net_ratelimit())
  526. printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
  527. tid);
  528. ret = -ENOMEM;
  529. goto err_unlock_sta;
  530. }
  531. /* Tx timer */
  532. sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function =
  533. sta_addba_resp_timer_expired;
  534. sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data =
  535. (unsigned long)&sta->timer_to_tid[tid];
  536. init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
  537. /* ensure that TX flow won't interrupt us
  538. * until the end of the call to requeue function */
  539. spin_lock_bh(&local->mdev->queue_lock);
  540. /* create a new queue for this aggregation */
  541. ret = ieee80211_ht_agg_queue_add(local, sta, tid);
  542. /* case no queue is available to aggregation
  543. * don't switch to aggregation */
  544. if (ret) {
  545. #ifdef CONFIG_MAC80211_HT_DEBUG
  546. printk(KERN_DEBUG "BA request denied - queue unavailable for"
  547. " tid %d\n", tid);
  548. #endif /* CONFIG_MAC80211_HT_DEBUG */
  549. goto err_unlock_queue;
  550. }
  551. sdata = sta->sdata;
  552. /* Ok, the Addba frame hasn't been sent yet, but if the driver calls the
  553. * call back right away, it must see that the flow has begun */
  554. *state |= HT_ADDBA_REQUESTED_MSK;
  555. if (local->ops->ampdu_action)
  556. ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_START,
  557. ra, tid, &start_seq_num);
  558. if (ret) {
  559. /* No need to requeue the packets in the agg queue, since we
  560. * held the tx lock: no packet could be enqueued to the newly
  561. * allocated queue */
  562. ieee80211_ht_agg_queue_remove(local, sta, tid, 0);
  563. #ifdef CONFIG_MAC80211_HT_DEBUG
  564. printk(KERN_DEBUG "BA request denied - HW unavailable for"
  565. " tid %d\n", tid);
  566. #endif /* CONFIG_MAC80211_HT_DEBUG */
  567. *state = HT_AGG_STATE_IDLE;
  568. goto err_unlock_queue;
  569. }
  570. /* Will put all the packets in the new SW queue */
  571. ieee80211_requeue(local, ieee802_1d_to_ac[tid]);
  572. spin_unlock_bh(&local->mdev->queue_lock);
  573. spin_unlock_bh(&sta->lock);
  574. /* send an addBA request */
  575. sta->ampdu_mlme.dialog_token_allocator++;
  576. sta->ampdu_mlme.tid_tx[tid]->dialog_token =
  577. sta->ampdu_mlme.dialog_token_allocator;
  578. sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
  579. ieee80211_send_addba_request(sta->sdata->dev, ra, tid,
  580. sta->ampdu_mlme.tid_tx[tid]->dialog_token,
  581. sta->ampdu_mlme.tid_tx[tid]->ssn,
  582. 0x40, 5000);
  583. /* activate the timer for the recipient's addBA response */
  584. sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires =
  585. jiffies + ADDBA_RESP_INTERVAL;
  586. add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
  587. printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
  588. goto exit;
  589. err_unlock_queue:
  590. kfree(sta->ampdu_mlme.tid_tx[tid]);
  591. sta->ampdu_mlme.tid_tx[tid] = NULL;
  592. spin_unlock_bh(&local->mdev->queue_lock);
  593. ret = -EBUSY;
  594. err_unlock_sta:
  595. spin_unlock_bh(&sta->lock);
  596. exit:
  597. rcu_read_unlock();
  598. return ret;
  599. }
  600. EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
  601. int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw,
  602. u8 *ra, u16 tid,
  603. enum ieee80211_back_parties initiator)
  604. {
  605. struct ieee80211_local *local = hw_to_local(hw);
  606. struct sta_info *sta;
  607. u8 *state;
  608. int ret = 0;
  609. DECLARE_MAC_BUF(mac);
  610. if (tid >= STA_TID_NUM)
  611. return -EINVAL;
  612. rcu_read_lock();
  613. sta = sta_info_get(local, ra);
  614. if (!sta) {
  615. rcu_read_unlock();
  616. return -ENOENT;
  617. }
  618. /* check if the TID is in aggregation */
  619. state = &sta->ampdu_mlme.tid_state_tx[tid];
  620. spin_lock_bh(&sta->lock);
  621. if (*state != HT_AGG_STATE_OPERATIONAL) {
  622. ret = -ENOENT;
  623. goto stop_BA_exit;
  624. }
  625. #ifdef CONFIG_MAC80211_HT_DEBUG
  626. printk(KERN_DEBUG "Tx BA session stop requested for %s tid %u\n",
  627. print_mac(mac, ra), tid);
  628. #endif /* CONFIG_MAC80211_HT_DEBUG */
  629. ieee80211_stop_queue(hw, sta->tid_to_tx_q[tid]);
  630. *state = HT_AGG_STATE_REQ_STOP_BA_MSK |
  631. (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
  632. if (local->ops->ampdu_action)
  633. ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_STOP,
  634. ra, tid, NULL);
  635. /* case HW denied going back to legacy */
  636. if (ret) {
  637. WARN_ON(ret != -EBUSY);
  638. *state = HT_AGG_STATE_OPERATIONAL;
  639. ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
  640. goto stop_BA_exit;
  641. }
  642. stop_BA_exit:
  643. spin_unlock_bh(&sta->lock);
  644. rcu_read_unlock();
  645. return ret;
  646. }
  647. EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
  648. void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
  649. {
  650. struct ieee80211_local *local = hw_to_local(hw);
  651. struct sta_info *sta;
  652. u8 *state;
  653. DECLARE_MAC_BUF(mac);
  654. if (tid >= STA_TID_NUM) {
  655. printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
  656. tid, STA_TID_NUM);
  657. return;
  658. }
  659. rcu_read_lock();
  660. sta = sta_info_get(local, ra);
  661. if (!sta) {
  662. rcu_read_unlock();
  663. printk(KERN_DEBUG "Could not find station: %s\n",
  664. print_mac(mac, ra));
  665. return;
  666. }
  667. state = &sta->ampdu_mlme.tid_state_tx[tid];
  668. spin_lock_bh(&sta->lock);
  669. if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
  670. printk(KERN_DEBUG "addBA was not requested yet, state is %d\n",
  671. *state);
  672. spin_unlock_bh(&sta->lock);
  673. rcu_read_unlock();
  674. return;
  675. }
  676. WARN_ON_ONCE(*state & HT_ADDBA_DRV_READY_MSK);
  677. *state |= HT_ADDBA_DRV_READY_MSK;
  678. if (*state == HT_AGG_STATE_OPERATIONAL) {
  679. printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid);
  680. ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
  681. }
  682. spin_unlock_bh(&sta->lock);
  683. rcu_read_unlock();
  684. }
  685. EXPORT_SYMBOL(ieee80211_start_tx_ba_cb);
  686. void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
  687. {
  688. struct ieee80211_local *local = hw_to_local(hw);
  689. struct sta_info *sta;
  690. u8 *state;
  691. int agg_queue;
  692. DECLARE_MAC_BUF(mac);
  693. if (tid >= STA_TID_NUM) {
  694. printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
  695. tid, STA_TID_NUM);
  696. return;
  697. }
  698. #ifdef CONFIG_MAC80211_HT_DEBUG
  699. printk(KERN_DEBUG "Stopping Tx BA session for %s tid %d\n",
  700. print_mac(mac, ra), tid);
  701. #endif /* CONFIG_MAC80211_HT_DEBUG */
  702. rcu_read_lock();
  703. sta = sta_info_get(local, ra);
  704. if (!sta) {
  705. printk(KERN_DEBUG "Could not find station: %s\n",
  706. print_mac(mac, ra));
  707. rcu_read_unlock();
  708. return;
  709. }
  710. state = &sta->ampdu_mlme.tid_state_tx[tid];
  711. /* NOTE: no need to use sta->lock in this state check, as
  712. * ieee80211_stop_tx_ba_session will let only
  713. * one stop call to pass through per sta/tid */
  714. if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) {
  715. printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
  716. rcu_read_unlock();
  717. return;
  718. }
  719. if (*state & HT_AGG_STATE_INITIATOR_MSK)
  720. ieee80211_send_delba(sta->sdata->dev, ra, tid,
  721. WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
  722. agg_queue = sta->tid_to_tx_q[tid];
  723. /* avoid ordering issues: we are the only one that can modify
  724. * the content of the qdiscs */
  725. spin_lock_bh(&local->mdev->queue_lock);
  726. /* remove the queue for this aggregation */
  727. ieee80211_ht_agg_queue_remove(local, sta, tid, 1);
  728. spin_unlock_bh(&local->mdev->queue_lock);
  729. /* we just requeued the all the frames that were in the removed
  730. * queue, and since we might miss a softirq we do netif_schedule.
  731. * ieee80211_wake_queue is not used here as this queue is not
  732. * necessarily stopped */
  733. netif_schedule(local->mdev);
  734. spin_lock_bh(&sta->lock);
  735. *state = HT_AGG_STATE_IDLE;
  736. sta->ampdu_mlme.addba_req_num[tid] = 0;
  737. kfree(sta->ampdu_mlme.tid_tx[tid]);
  738. sta->ampdu_mlme.tid_tx[tid] = NULL;
  739. spin_unlock_bh(&sta->lock);
  740. rcu_read_unlock();
  741. }
  742. EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb);
  743. void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
  744. const u8 *ra, u16 tid)
  745. {
  746. struct ieee80211_local *local = hw_to_local(hw);
  747. struct ieee80211_ra_tid *ra_tid;
  748. struct sk_buff *skb = dev_alloc_skb(0);
  749. if (unlikely(!skb)) {
  750. if (net_ratelimit())
  751. printk(KERN_WARNING "%s: Not enough memory, "
  752. "dropping start BA session", skb->dev->name);
  753. return;
  754. }
  755. ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
  756. memcpy(&ra_tid->ra, ra, ETH_ALEN);
  757. ra_tid->tid = tid;
  758. skb->pkt_type = IEEE80211_ADDBA_MSG;
  759. skb_queue_tail(&local->skb_queue, skb);
  760. tasklet_schedule(&local->tasklet);
  761. }
  762. EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
  763. void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
  764. const u8 *ra, u16 tid)
  765. {
  766. struct ieee80211_local *local = hw_to_local(hw);
  767. struct ieee80211_ra_tid *ra_tid;
  768. struct sk_buff *skb = dev_alloc_skb(0);
  769. if (unlikely(!skb)) {
  770. if (net_ratelimit())
  771. printk(KERN_WARNING "%s: Not enough memory, "
  772. "dropping stop BA session", skb->dev->name);
  773. return;
  774. }
  775. ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
  776. memcpy(&ra_tid->ra, ra, ETH_ALEN);
  777. ra_tid->tid = tid;
  778. skb->pkt_type = IEEE80211_DELBA_MSG;
  779. skb_queue_tail(&local->skb_queue, skb);
  780. tasklet_schedule(&local->tasklet);
  781. }
  782. EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
  783. static void ieee80211_set_multicast_list(struct net_device *dev)
  784. {
  785. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  786. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  787. int allmulti, promisc, sdata_allmulti, sdata_promisc;
  788. allmulti = !!(dev->flags & IFF_ALLMULTI);
  789. promisc = !!(dev->flags & IFF_PROMISC);
  790. sdata_allmulti = !!(sdata->flags & IEEE80211_SDATA_ALLMULTI);
  791. sdata_promisc = !!(sdata->flags & IEEE80211_SDATA_PROMISC);
  792. if (allmulti != sdata_allmulti) {
  793. if (dev->flags & IFF_ALLMULTI)
  794. atomic_inc(&local->iff_allmultis);
  795. else
  796. atomic_dec(&local->iff_allmultis);
  797. sdata->flags ^= IEEE80211_SDATA_ALLMULTI;
  798. }
  799. if (promisc != sdata_promisc) {
  800. if (dev->flags & IFF_PROMISC)
  801. atomic_inc(&local->iff_promiscs);
  802. else
  803. atomic_dec(&local->iff_promiscs);
  804. sdata->flags ^= IEEE80211_SDATA_PROMISC;
  805. }
  806. dev_mc_sync(local->mdev, dev);
  807. }
  808. static const struct header_ops ieee80211_header_ops = {
  809. .create = eth_header,
  810. .parse = header_parse_80211,
  811. .rebuild = eth_rebuild_header,
  812. .cache = eth_header_cache,
  813. .cache_update = eth_header_cache_update,
  814. };
  815. /* Must not be called for mdev */
  816. void ieee80211_if_setup(struct net_device *dev)
  817. {
  818. ether_setup(dev);
  819. dev->hard_start_xmit = ieee80211_subif_start_xmit;
  820. dev->wireless_handlers = &ieee80211_iw_handler_def;
  821. dev->set_multicast_list = ieee80211_set_multicast_list;
  822. dev->change_mtu = ieee80211_change_mtu;
  823. dev->open = ieee80211_open;
  824. dev->stop = ieee80211_stop;
  825. dev->destructor = ieee80211_if_free;
  826. }
  827. /* everything else */
  828. static int __ieee80211_if_config(struct net_device *dev,
  829. struct sk_buff *beacon)
  830. {
  831. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  832. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  833. struct ieee80211_if_conf conf;
  834. if (!local->ops->config_interface || !netif_running(dev))
  835. return 0;
  836. memset(&conf, 0, sizeof(conf));
  837. conf.type = sdata->vif.type;
  838. if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
  839. sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
  840. conf.bssid = sdata->u.sta.bssid;
  841. conf.ssid = sdata->u.sta.ssid;
  842. conf.ssid_len = sdata->u.sta.ssid_len;
  843. } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
  844. conf.beacon = beacon;
  845. ieee80211_start_mesh(dev);
  846. } else if (sdata->vif.type == IEEE80211_IF_TYPE_AP) {
  847. conf.ssid = sdata->u.ap.ssid;
  848. conf.ssid_len = sdata->u.ap.ssid_len;
  849. conf.beacon = beacon;
  850. }
  851. return local->ops->config_interface(local_to_hw(local),
  852. &sdata->vif, &conf);
  853. }
  854. int ieee80211_if_config(struct net_device *dev)
  855. {
  856. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  857. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  858. if (sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT &&
  859. (local->hw.flags & IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE))
  860. return ieee80211_if_config_beacon(dev);
  861. return __ieee80211_if_config(dev, NULL);
  862. }
  863. int ieee80211_if_config_beacon(struct net_device *dev)
  864. {
  865. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  866. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  867. struct sk_buff *skb;
  868. if (!(local->hw.flags & IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE))
  869. return 0;
  870. skb = ieee80211_beacon_get(local_to_hw(local), &sdata->vif);
  871. if (!skb)
  872. return -ENOMEM;
  873. return __ieee80211_if_config(dev, skb);
  874. }
  875. int ieee80211_hw_config(struct ieee80211_local *local)
  876. {
  877. struct ieee80211_channel *chan;
  878. int ret = 0;
  879. if (local->sta_sw_scanning)
  880. chan = local->scan_channel;
  881. else
  882. chan = local->oper_channel;
  883. local->hw.conf.channel = chan;
  884. if (!local->hw.conf.power_level)
  885. local->hw.conf.power_level = chan->max_power;
  886. else
  887. local->hw.conf.power_level = min(chan->max_power,
  888. local->hw.conf.power_level);
  889. local->hw.conf.max_antenna_gain = chan->max_antenna_gain;
  890. #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
  891. printk(KERN_DEBUG "%s: HW CONFIG: freq=%d\n",
  892. wiphy_name(local->hw.wiphy), chan->center_freq);
  893. #endif
  894. if (local->open_count)
  895. ret = local->ops->config(local_to_hw(local), &local->hw.conf);
  896. return ret;
  897. }
  898. /**
  899. * ieee80211_handle_ht should be used only after legacy configuration
  900. * has been determined namely band, as ht configuration depends upon
  901. * the hardware's HT abilities for a _specific_ band.
  902. */
  903. u32 ieee80211_handle_ht(struct ieee80211_local *local, int enable_ht,
  904. struct ieee80211_ht_info *req_ht_cap,
  905. struct ieee80211_ht_bss_info *req_bss_cap)
  906. {
  907. struct ieee80211_conf *conf = &local->hw.conf;
  908. struct ieee80211_supported_band *sband;
  909. struct ieee80211_ht_info ht_conf;
  910. struct ieee80211_ht_bss_info ht_bss_conf;
  911. u32 changed = 0;
  912. int i;
  913. u8 max_tx_streams = IEEE80211_HT_CAP_MAX_STREAMS;
  914. u8 tx_mcs_set_cap;
  915. sband = local->hw.wiphy->bands[conf->channel->band];
  916. memset(&ht_conf, 0, sizeof(struct ieee80211_ht_info));
  917. memset(&ht_bss_conf, 0, sizeof(struct ieee80211_ht_bss_info));
  918. /* HT is not supported */
  919. if (!sband->ht_info.ht_supported) {
  920. conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE;
  921. goto out;
  922. }
  923. /* disable HT */
  924. if (!enable_ht) {
  925. if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE)
  926. changed |= BSS_CHANGED_HT;
  927. conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE;
  928. conf->ht_conf.ht_supported = 0;
  929. goto out;
  930. }
  931. if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE))
  932. changed |= BSS_CHANGED_HT;
  933. conf->flags |= IEEE80211_CONF_SUPPORT_HT_MODE;
  934. ht_conf.ht_supported = 1;
  935. ht_conf.cap = req_ht_cap->cap & sband->ht_info.cap;
  936. ht_conf.cap &= ~(IEEE80211_HT_CAP_MIMO_PS);
  937. ht_conf.cap |= sband->ht_info.cap & IEEE80211_HT_CAP_MIMO_PS;
  938. ht_bss_conf.primary_channel = req_bss_cap->primary_channel;
  939. ht_bss_conf.bss_cap = req_bss_cap->bss_cap;
  940. ht_bss_conf.bss_op_mode = req_bss_cap->bss_op_mode;
  941. ht_conf.ampdu_factor = req_ht_cap->ampdu_factor;
  942. ht_conf.ampdu_density = req_ht_cap->ampdu_density;
  943. /* Bits 96-100 */
  944. tx_mcs_set_cap = sband->ht_info.supp_mcs_set[12];
  945. /* configure suppoerted Tx MCS according to requested MCS
  946. * (based in most cases on Rx capabilities of peer) and self
  947. * Tx MCS capabilities (as defined by low level driver HW
  948. * Tx capabilities) */
  949. if (!(tx_mcs_set_cap & IEEE80211_HT_CAP_MCS_TX_DEFINED))
  950. goto check_changed;
  951. /* Counting from 0 therfore + 1 */
  952. if (tx_mcs_set_cap & IEEE80211_HT_CAP_MCS_TX_RX_DIFF)
  953. max_tx_streams = ((tx_mcs_set_cap &
  954. IEEE80211_HT_CAP_MCS_TX_STREAMS) >> 2) + 1;
  955. for (i = 0; i < max_tx_streams; i++)
  956. ht_conf.supp_mcs_set[i] =
  957. sband->ht_info.supp_mcs_set[i] &
  958. req_ht_cap->supp_mcs_set[i];
  959. if (tx_mcs_set_cap & IEEE80211_HT_CAP_MCS_TX_UEQM)
  960. for (i = IEEE80211_SUPP_MCS_SET_UEQM;
  961. i < IEEE80211_SUPP_MCS_SET_LEN; i++)
  962. ht_conf.supp_mcs_set[i] =
  963. sband->ht_info.supp_mcs_set[i] &
  964. req_ht_cap->supp_mcs_set[i];
  965. check_changed:
  966. /* if bss configuration changed store the new one */
  967. if (memcmp(&conf->ht_conf, &ht_conf, sizeof(ht_conf)) ||
  968. memcmp(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf))) {
  969. changed |= BSS_CHANGED_HT;
  970. memcpy(&conf->ht_conf, &ht_conf, sizeof(ht_conf));
  971. memcpy(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf));
  972. }
  973. out:
  974. return changed;
  975. }
  976. void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
  977. u32 changed)
  978. {
  979. struct ieee80211_local *local = sdata->local;
  980. if (!changed)
  981. return;
  982. if (local->ops->bss_info_changed)
  983. local->ops->bss_info_changed(local_to_hw(local),
  984. &sdata->vif,
  985. &sdata->bss_conf,
  986. changed);
  987. }
  988. void ieee80211_reset_erp_info(struct net_device *dev)
  989. {
  990. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  991. sdata->bss_conf.use_cts_prot = 0;
  992. sdata->bss_conf.use_short_preamble = 0;
  993. ieee80211_bss_info_change_notify(sdata,
  994. BSS_CHANGED_ERP_CTS_PROT |
  995. BSS_CHANGED_ERP_PREAMBLE);
  996. }
  997. void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
  998. struct sk_buff *skb)
  999. {
  1000. struct ieee80211_local *local = hw_to_local(hw);
  1001. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  1002. int tmp;
  1003. skb->dev = local->mdev;
  1004. skb->pkt_type = IEEE80211_TX_STATUS_MSG;
  1005. skb_queue_tail(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS ?
  1006. &local->skb_queue : &local->skb_queue_unreliable, skb);
  1007. tmp = skb_queue_len(&local->skb_queue) +
  1008. skb_queue_len(&local->skb_queue_unreliable);
  1009. while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT &&
  1010. (skb = skb_dequeue(&local->skb_queue_unreliable))) {
  1011. dev_kfree_skb_irq(skb);
  1012. tmp--;
  1013. I802_DEBUG_INC(local->tx_status_drop);
  1014. }
  1015. tasklet_schedule(&local->tasklet);
  1016. }
  1017. EXPORT_SYMBOL(ieee80211_tx_status_irqsafe);
  1018. static void ieee80211_tasklet_handler(unsigned long data)
  1019. {
  1020. struct ieee80211_local *local = (struct ieee80211_local *) data;
  1021. struct sk_buff *skb;
  1022. struct ieee80211_rx_status rx_status;
  1023. struct ieee80211_ra_tid *ra_tid;
  1024. while ((skb = skb_dequeue(&local->skb_queue)) ||
  1025. (skb = skb_dequeue(&local->skb_queue_unreliable))) {
  1026. switch (skb->pkt_type) {
  1027. case IEEE80211_RX_MSG:
  1028. /* status is in skb->cb */
  1029. memcpy(&rx_status, skb->cb, sizeof(rx_status));
  1030. /* Clear skb->pkt_type in order to not confuse kernel
  1031. * netstack. */
  1032. skb->pkt_type = 0;
  1033. __ieee80211_rx(local_to_hw(local), skb, &rx_status);
  1034. break;
  1035. case IEEE80211_TX_STATUS_MSG:
  1036. skb->pkt_type = 0;
  1037. ieee80211_tx_status(local_to_hw(local), skb);
  1038. break;
  1039. case IEEE80211_DELBA_MSG:
  1040. ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
  1041. ieee80211_stop_tx_ba_cb(local_to_hw(local),
  1042. ra_tid->ra, ra_tid->tid);
  1043. dev_kfree_skb(skb);
  1044. break;
  1045. case IEEE80211_ADDBA_MSG:
  1046. ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
  1047. ieee80211_start_tx_ba_cb(local_to_hw(local),
  1048. ra_tid->ra, ra_tid->tid);
  1049. dev_kfree_skb(skb);
  1050. break ;
  1051. default: /* should never get here! */
  1052. printk(KERN_ERR "%s: Unknown message type (%d)\n",
  1053. wiphy_name(local->hw.wiphy), skb->pkt_type);
  1054. dev_kfree_skb(skb);
  1055. break;
  1056. }
  1057. }
  1058. }
  1059. /* Remove added headers (e.g., QoS control), encryption header/MIC, etc. to
  1060. * make a prepared TX frame (one that has been given to hw) to look like brand
  1061. * new IEEE 802.11 frame that is ready to go through TX processing again.
  1062. * Also, tx_packet_data in cb is restored from tx_control. */
  1063. static void ieee80211_remove_tx_extra(struct ieee80211_local *local,
  1064. struct ieee80211_key *key,
  1065. struct sk_buff *skb)
  1066. {
  1067. int hdrlen, iv_len, mic_len;
  1068. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  1069. info->flags &= IEEE80211_TX_CTL_REQ_TX_STATUS |
  1070. IEEE80211_TX_CTL_DO_NOT_ENCRYPT |
  1071. IEEE80211_TX_CTL_REQUEUE |
  1072. IEEE80211_TX_CTL_EAPOL_FRAME;
  1073. hdrlen = ieee80211_get_hdrlen_from_skb(skb);
  1074. if (!key)
  1075. goto no_key;
  1076. switch (key->conf.alg) {
  1077. case ALG_WEP:
  1078. iv_len = WEP_IV_LEN;
  1079. mic_len = WEP_ICV_LEN;
  1080. break;
  1081. case ALG_TKIP:
  1082. iv_len = TKIP_IV_LEN;
  1083. mic_len = TKIP_ICV_LEN;
  1084. break;
  1085. case ALG_CCMP:
  1086. iv_len = CCMP_HDR_LEN;
  1087. mic_len = CCMP_MIC_LEN;
  1088. break;
  1089. default:
  1090. goto no_key;
  1091. }
  1092. if (skb->len >= mic_len &&
  1093. !(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
  1094. skb_trim(skb, skb->len - mic_len);
  1095. if (skb->len >= iv_len && skb->len > hdrlen) {
  1096. memmove(skb->data + iv_len, skb->data, hdrlen);
  1097. skb_pull(skb, iv_len);
  1098. }
  1099. no_key:
  1100. {
  1101. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  1102. u16 fc = le16_to_cpu(hdr->frame_control);
  1103. if ((fc & 0x8C) == 0x88) /* QoS Control Field */ {
  1104. fc &= ~IEEE80211_STYPE_QOS_DATA;
  1105. hdr->frame_control = cpu_to_le16(fc);
  1106. memmove(skb->data + 2, skb->data, hdrlen - 2);
  1107. skb_pull(skb, 2);
  1108. }
  1109. }
  1110. }
  1111. static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
  1112. struct sta_info *sta,
  1113. struct sk_buff *skb)
  1114. {
  1115. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  1116. sta->tx_filtered_count++;
  1117. /*
  1118. * Clear the TX filter mask for this STA when sending the next
  1119. * packet. If the STA went to power save mode, this will happen
  1120. * when it wakes up for the next time.
  1121. */
  1122. set_sta_flags(sta, WLAN_STA_CLEAR_PS_FILT);
  1123. /*
  1124. * This code races in the following way:
  1125. *
  1126. * (1) STA sends frame indicating it will go to sleep and does so
  1127. * (2) hardware/firmware adds STA to filter list, passes frame up
  1128. * (3) hardware/firmware processes TX fifo and suppresses a frame
  1129. * (4) we get TX status before having processed the frame and
  1130. * knowing that the STA has gone to sleep.
  1131. *
  1132. * This is actually quite unlikely even when both those events are
  1133. * processed from interrupts coming in quickly after one another or
  1134. * even at the same time because we queue both TX status events and
  1135. * RX frames to be processed by a tasklet and process them in the
  1136. * same order that they were received or TX status last. Hence, there
  1137. * is no race as long as the frame RX is processed before the next TX
  1138. * status, which drivers can ensure, see below.
  1139. *
  1140. * Note that this can only happen if the hardware or firmware can
  1141. * actually add STAs to the filter list, if this is done by the
  1142. * driver in response to set_tim() (which will only reduce the race
  1143. * this whole filtering tries to solve, not completely solve it)
  1144. * this situation cannot happen.
  1145. *
  1146. * To completely solve this race drivers need to make sure that they
  1147. * (a) don't mix the irq-safe/not irq-safe TX status/RX processing
  1148. * functions and
  1149. * (b) always process RX events before TX status events if ordering
  1150. * can be unknown, for example with different interrupt status
  1151. * bits.
  1152. */
  1153. if (test_sta_flags(sta, WLAN_STA_PS) &&
  1154. skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) {
  1155. ieee80211_remove_tx_extra(local, sta->key, skb);
  1156. skb_queue_tail(&sta->tx_filtered, skb);
  1157. return;
  1158. }
  1159. if (!test_sta_flags(sta, WLAN_STA_PS) &&
  1160. !(info->flags & IEEE80211_TX_CTL_REQUEUE)) {
  1161. /* Software retry the packet once */
  1162. info->flags |= IEEE80211_TX_CTL_REQUEUE;
  1163. ieee80211_remove_tx_extra(local, sta->key, skb);
  1164. dev_queue_xmit(skb);
  1165. return;
  1166. }
  1167. if (net_ratelimit())
  1168. printk(KERN_DEBUG "%s: dropped TX filtered frame, "
  1169. "queue_len=%d PS=%d @%lu\n",
  1170. wiphy_name(local->hw.wiphy),
  1171. skb_queue_len(&sta->tx_filtered),
  1172. !!test_sta_flags(sta, WLAN_STA_PS), jiffies);
  1173. dev_kfree_skb(skb);
  1174. }
  1175. void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
  1176. {
  1177. struct sk_buff *skb2;
  1178. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  1179. struct ieee80211_local *local = hw_to_local(hw);
  1180. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  1181. u16 frag, type;
  1182. struct ieee80211_tx_status_rtap_hdr *rthdr;
  1183. struct ieee80211_sub_if_data *sdata;
  1184. struct net_device *prev_dev = NULL;
  1185. rcu_read_lock();
  1186. if (info->status.excessive_retries) {
  1187. struct sta_info *sta;
  1188. sta = sta_info_get(local, hdr->addr1);
  1189. if (sta) {
  1190. if (test_sta_flags(sta, WLAN_STA_PS)) {
  1191. /*
  1192. * The STA is in power save mode, so assume
  1193. * that this TX packet failed because of that.
  1194. */
  1195. ieee80211_handle_filtered_frame(local, sta, skb);
  1196. rcu_read_unlock();
  1197. return;
  1198. }
  1199. }
  1200. }
  1201. if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
  1202. struct sta_info *sta;
  1203. sta = sta_info_get(local, hdr->addr1);
  1204. if (sta) {
  1205. ieee80211_handle_filtered_frame(local, sta, skb);
  1206. rcu_read_unlock();
  1207. return;
  1208. }
  1209. } else
  1210. rate_control_tx_status(local->mdev, skb);
  1211. rcu_read_unlock();
  1212. ieee80211_led_tx(local, 0);
  1213. /* SNMP counters
  1214. * Fragments are passed to low-level drivers as separate skbs, so these
  1215. * are actually fragments, not frames. Update frame counters only for
  1216. * the first fragment of the frame. */
  1217. frag = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
  1218. type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE;
  1219. if (info->flags & IEEE80211_TX_STAT_ACK) {
  1220. if (frag == 0) {
  1221. local->dot11TransmittedFrameCount++;
  1222. if (is_multicast_ether_addr(hdr->addr1))
  1223. local->dot11MulticastTransmittedFrameCount++;
  1224. if (info->status.retry_count > 0)
  1225. local->dot11RetryCount++;
  1226. if (info->status.retry_count > 1)
  1227. local->dot11MultipleRetryCount++;
  1228. }
  1229. /* This counter shall be incremented for an acknowledged MPDU
  1230. * with an individual address in the address 1 field or an MPDU
  1231. * with a multicast address in the address 1 field of type Data
  1232. * or Management. */
  1233. if (!is_multicast_ether_addr(hdr->addr1) ||
  1234. type == IEEE80211_FTYPE_DATA ||
  1235. type == IEEE80211_FTYPE_MGMT)
  1236. local->dot11TransmittedFragmentCount++;
  1237. } else {
  1238. if (frag == 0)
  1239. local->dot11FailedCount++;
  1240. }
  1241. /* this was a transmitted frame, but now we want to reuse it */
  1242. skb_orphan(skb);
  1243. /*
  1244. * This is a bit racy but we can avoid a lot of work
  1245. * with this test...
  1246. */
  1247. if (!local->monitors && !local->cooked_mntrs) {
  1248. dev_kfree_skb(skb);
  1249. return;
  1250. }
  1251. /* send frame to monitor interfaces now */
  1252. if (skb_headroom(skb) < sizeof(*rthdr)) {
  1253. printk(KERN_ERR "ieee80211_tx_status: headroom too small\n");
  1254. dev_kfree_skb(skb);
  1255. return;
  1256. }
  1257. rthdr = (struct ieee80211_tx_status_rtap_hdr *)
  1258. skb_push(skb, sizeof(*rthdr));
  1259. memset(rthdr, 0, sizeof(*rthdr));
  1260. rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
  1261. rthdr->hdr.it_present =
  1262. cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) |
  1263. (1 << IEEE80211_RADIOTAP_DATA_RETRIES));
  1264. if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
  1265. !is_multicast_ether_addr(hdr->addr1))
  1266. rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL);
  1267. if ((info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) &&
  1268. (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT))
  1269. rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS);
  1270. else if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS)
  1271. rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS);
  1272. rthdr->data_retries = info->status.retry_count;
  1273. /* XXX: is this sufficient for BPF? */
  1274. skb_set_mac_header(skb, 0);
  1275. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1276. skb->pkt_type = PACKET_OTHERHOST;
  1277. skb->protocol = htons(ETH_P_802_2);
  1278. memset(skb->cb, 0, sizeof(skb->cb));
  1279. rcu_read_lock();
  1280. list_for_each_entry_rcu(sdata, &local->interfaces, list) {
  1281. if (sdata->vif.type == IEEE80211_IF_TYPE_MNTR) {
  1282. if (!netif_running(sdata->dev))
  1283. continue;
  1284. if (prev_dev) {
  1285. skb2 = skb_clone(skb, GFP_ATOMIC);
  1286. if (skb2) {
  1287. skb2->dev = prev_dev;
  1288. netif_rx(skb2);
  1289. }
  1290. }
  1291. prev_dev = sdata->dev;
  1292. }
  1293. }
  1294. if (prev_dev) {
  1295. skb->dev = prev_dev;
  1296. netif_rx(skb);
  1297. skb = NULL;
  1298. }
  1299. rcu_read_unlock();
  1300. dev_kfree_skb(skb);
  1301. }
  1302. EXPORT_SYMBOL(ieee80211_tx_status);
  1303. struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
  1304. const struct ieee80211_ops *ops)
  1305. {
  1306. struct ieee80211_local *local;
  1307. int priv_size;
  1308. struct wiphy *wiphy;
  1309. /* Ensure 32-byte alignment of our private data and hw private data.
  1310. * We use the wiphy priv data for both our ieee80211_local and for
  1311. * the driver's private data
  1312. *
  1313. * In memory it'll be like this:
  1314. *
  1315. * +-------------------------+
  1316. * | struct wiphy |
  1317. * +-------------------------+
  1318. * | struct ieee80211_local |
  1319. * +-------------------------+
  1320. * | driver's private data |
  1321. * +-------------------------+
  1322. *
  1323. */
  1324. priv_size = ((sizeof(struct ieee80211_local) +
  1325. NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST) +
  1326. priv_data_len;
  1327. wiphy = wiphy_new(&mac80211_config_ops, priv_size);
  1328. if (!wiphy)
  1329. return NULL;
  1330. wiphy->privid = mac80211_wiphy_privid;
  1331. local = wiphy_priv(wiphy);
  1332. local->hw.wiphy = wiphy;
  1333. local->hw.priv = (char *)local +
  1334. ((sizeof(struct ieee80211_local) +
  1335. NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
  1336. BUG_ON(!ops->tx);
  1337. BUG_ON(!ops->start);
  1338. BUG_ON(!ops->stop);
  1339. BUG_ON(!ops->config);
  1340. BUG_ON(!ops->add_interface);
  1341. BUG_ON(!ops->remove_interface);
  1342. BUG_ON(!ops->configure_filter);
  1343. local->ops = ops;
  1344. local->hw.queues = 1; /* default */
  1345. local->bridge_packets = 1;
  1346. local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
  1347. local->fragmentation_threshold = IEEE80211_MAX_FRAG_THRESHOLD;
  1348. local->short_retry_limit = 7;
  1349. local->long_retry_limit = 4;
  1350. local->hw.conf.radio_enabled = 1;
  1351. INIT_LIST_HEAD(&local->interfaces);
  1352. spin_lock_init(&local->key_lock);
  1353. INIT_DELAYED_WORK(&local->scan_work, ieee80211_sta_scan_work);
  1354. sta_info_init(local);
  1355. tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending,
  1356. (unsigned long)local);
  1357. tasklet_disable(&local->tx_pending_tasklet);
  1358. tasklet_init(&local->tasklet,
  1359. ieee80211_tasklet_handler,
  1360. (unsigned long) local);
  1361. tasklet_disable(&local->tasklet);
  1362. skb_queue_head_init(&local->skb_queue);
  1363. skb_queue_head_init(&local->skb_queue_unreliable);
  1364. return local_to_hw(local);
  1365. }
  1366. EXPORT_SYMBOL(ieee80211_alloc_hw);
  1367. int ieee80211_register_hw(struct ieee80211_hw *hw)
  1368. {
  1369. struct ieee80211_local *local = hw_to_local(hw);
  1370. const char *name;
  1371. int result;
  1372. enum ieee80211_band band;
  1373. struct net_device *mdev;
  1374. struct ieee80211_sub_if_data *sdata;
  1375. /*
  1376. * generic code guarantees at least one band,
  1377. * set this very early because much code assumes
  1378. * that hw.conf.channel is assigned
  1379. */
  1380. for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
  1381. struct ieee80211_supported_band *sband;
  1382. sband = local->hw.wiphy->bands[band];
  1383. if (sband) {
  1384. /* init channel we're on */
  1385. local->hw.conf.channel =
  1386. local->oper_channel =
  1387. local->scan_channel = &sband->channels[0];
  1388. break;
  1389. }
  1390. }
  1391. result = wiphy_register(local->hw.wiphy);
  1392. if (result < 0)
  1393. return result;
  1394. /*
  1395. * We use the number of queues for feature tests (QoS, HT) internally
  1396. * so restrict them appropriately.
  1397. */
  1398. #ifdef CONFIG_MAC80211_QOS
  1399. if (hw->queues > IEEE80211_MAX_QUEUES)
  1400. hw->queues = IEEE80211_MAX_QUEUES;
  1401. if (hw->ampdu_queues > IEEE80211_MAX_AMPDU_QUEUES)
  1402. hw->ampdu_queues = IEEE80211_MAX_AMPDU_QUEUES;
  1403. if (hw->queues < 4)
  1404. hw->ampdu_queues = 0;
  1405. #else
  1406. hw->queues = 1;
  1407. hw->ampdu_queues = 0;
  1408. #endif
  1409. /* for now, mdev needs sub_if_data :/ */
  1410. mdev = alloc_netdev_mq(sizeof(struct ieee80211_sub_if_data),
  1411. "wmaster%d", ether_setup,
  1412. ieee80211_num_queues(hw));
  1413. if (!mdev)
  1414. goto fail_mdev_alloc;
  1415. if (ieee80211_num_queues(hw) > 1)
  1416. mdev->features |= NETIF_F_MULTI_QUEUE;
  1417. sdata = IEEE80211_DEV_TO_SUB_IF(mdev);
  1418. mdev->ieee80211_ptr = &sdata->wdev;
  1419. sdata->wdev.wiphy = local->hw.wiphy;
  1420. local->mdev = mdev;
  1421. ieee80211_rx_bss_list_init(mdev);
  1422. mdev->hard_start_xmit = ieee80211_master_start_xmit;
  1423. mdev->open = ieee80211_master_open;
  1424. mdev->stop = ieee80211_master_stop;
  1425. mdev->type = ARPHRD_IEEE80211;
  1426. mdev->header_ops = &ieee80211_header_ops;
  1427. mdev->set_multicast_list = ieee80211_master_set_multicast_list;
  1428. sdata->vif.type = IEEE80211_IF_TYPE_AP;
  1429. sdata->dev = mdev;
  1430. sdata->local = local;
  1431. sdata->u.ap.force_unicast_rateidx = -1;
  1432. sdata->u.ap.max_ratectrl_rateidx = -1;
  1433. ieee80211_if_sdata_init(sdata);
  1434. /* no RCU needed since we're still during init phase */
  1435. list_add_tail(&sdata->list, &local->interfaces);
  1436. name = wiphy_dev(local->hw.wiphy)->driver->name;
  1437. local->hw.workqueue = create_freezeable_workqueue(name);
  1438. if (!local->hw.workqueue) {
  1439. result = -ENOMEM;
  1440. goto fail_workqueue;
  1441. }
  1442. /*
  1443. * The hardware needs headroom for sending the frame,
  1444. * and we need some headroom for passing the frame to monitor
  1445. * interfaces, but never both at the same time.
  1446. */
  1447. local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom,
  1448. sizeof(struct ieee80211_tx_status_rtap_hdr));
  1449. debugfs_hw_add(local);
  1450. if (local->hw.conf.beacon_int < 10)
  1451. local->hw.conf.beacon_int = 100;
  1452. local->wstats_flags |= local->hw.flags & (IEEE80211_HW_SIGNAL_UNSPEC |
  1453. IEEE80211_HW_SIGNAL_DB |
  1454. IEEE80211_HW_SIGNAL_DBM) ?
  1455. IW_QUAL_QUAL_UPDATED : IW_QUAL_QUAL_INVALID;
  1456. local->wstats_flags |= local->hw.flags & IEEE80211_HW_NOISE_DBM ?
  1457. IW_QUAL_NOISE_UPDATED : IW_QUAL_NOISE_INVALID;
  1458. if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
  1459. local->wstats_flags |= IW_QUAL_DBM;
  1460. result = sta_info_start(local);
  1461. if (result < 0)
  1462. goto fail_sta_info;
  1463. rtnl_lock();
  1464. result = dev_alloc_name(local->mdev, local->mdev->name);
  1465. if (result < 0)
  1466. goto fail_dev;
  1467. memcpy(local->mdev->dev_addr, local->hw.wiphy->perm_addr, ETH_ALEN);
  1468. SET_NETDEV_DEV(local->mdev, wiphy_dev(local->hw.wiphy));
  1469. result = register_netdevice(local->mdev);
  1470. if (result < 0)
  1471. goto fail_dev;
  1472. ieee80211_debugfs_add_netdev(IEEE80211_DEV_TO_SUB_IF(local->mdev));
  1473. ieee80211_if_set_type(local->mdev, IEEE80211_IF_TYPE_AP);
  1474. result = ieee80211_init_rate_ctrl_alg(local,
  1475. hw->rate_control_algorithm);
  1476. if (result < 0) {
  1477. printk(KERN_DEBUG "%s: Failed to initialize rate control "
  1478. "algorithm\n", wiphy_name(local->hw.wiphy));
  1479. goto fail_rate;
  1480. }
  1481. result = ieee80211_wep_init(local);
  1482. if (result < 0) {
  1483. printk(KERN_DEBUG "%s: Failed to initialize wep\n",
  1484. wiphy_name(local->hw.wiphy));
  1485. goto fail_wep;
  1486. }
  1487. ieee80211_install_qdisc(local->mdev);
  1488. /* add one default STA interface */
  1489. result = ieee80211_if_add(local->mdev, "wlan%d", NULL,
  1490. IEEE80211_IF_TYPE_STA, NULL);
  1491. if (result)
  1492. printk(KERN_WARNING "%s: Failed to add default virtual iface\n",
  1493. wiphy_name(local->hw.wiphy));
  1494. local->reg_state = IEEE80211_DEV_REGISTERED;
  1495. rtnl_unlock();
  1496. ieee80211_led_init(local);
  1497. return 0;
  1498. fail_wep:
  1499. rate_control_deinitialize(local);
  1500. fail_rate:
  1501. ieee80211_debugfs_remove_netdev(IEEE80211_DEV_TO_SUB_IF(local->mdev));
  1502. unregister_netdevice(local->mdev);
  1503. local->mdev = NULL;
  1504. fail_dev:
  1505. rtnl_unlock();
  1506. sta_info_stop(local);
  1507. fail_sta_info:
  1508. debugfs_hw_del(local);
  1509. destroy_workqueue(local->hw.workqueue);
  1510. fail_workqueue:
  1511. if (local->mdev != NULL) {
  1512. ieee80211_if_free(local->mdev);
  1513. local->mdev = NULL;
  1514. }
  1515. fail_mdev_alloc:
  1516. wiphy_unregister(local->hw.wiphy);
  1517. return result;
  1518. }
  1519. EXPORT_SYMBOL(ieee80211_register_hw);
  1520. void ieee80211_unregister_hw(struct ieee80211_hw *hw)
  1521. {
  1522. struct ieee80211_local *local = hw_to_local(hw);
  1523. struct ieee80211_sub_if_data *sdata, *tmp;
  1524. tasklet_kill(&local->tx_pending_tasklet);
  1525. tasklet_kill(&local->tasklet);
  1526. rtnl_lock();
  1527. BUG_ON(local->reg_state != IEEE80211_DEV_REGISTERED);
  1528. local->reg_state = IEEE80211_DEV_UNREGISTERED;
  1529. /*
  1530. * At this point, interface list manipulations are fine
  1531. * because the driver cannot be handing us frames any
  1532. * more and the tasklet is killed.
  1533. */
  1534. /*
  1535. * First, we remove all non-master interfaces. Do this because they
  1536. * may have bss pointer dependency on the master, and when we free
  1537. * the master these would be freed as well, breaking our list
  1538. * iteration completely.
  1539. */
  1540. list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
  1541. if (sdata->dev == local->mdev)
  1542. continue;
  1543. list_del(&sdata->list);
  1544. __ieee80211_if_del(local, sdata);
  1545. }
  1546. /* then, finally, remove the master interface */
  1547. __ieee80211_if_del(local, IEEE80211_DEV_TO_SUB_IF(local->mdev));
  1548. rtnl_unlock();
  1549. ieee80211_rx_bss_list_deinit(local->mdev);
  1550. ieee80211_clear_tx_pending(local);
  1551. sta_info_stop(local);
  1552. rate_control_deinitialize(local);
  1553. debugfs_hw_del(local);
  1554. if (skb_queue_len(&local->skb_queue)
  1555. || skb_queue_len(&local->skb_queue_unreliable))
  1556. printk(KERN_WARNING "%s: skb_queue not empty\n",
  1557. wiphy_name(local->hw.wiphy));
  1558. skb_queue_purge(&local->skb_queue);
  1559. skb_queue_purge(&local->skb_queue_unreliable);
  1560. destroy_workqueue(local->hw.workqueue);
  1561. wiphy_unregister(local->hw.wiphy);
  1562. ieee80211_wep_free(local);
  1563. ieee80211_led_exit(local);
  1564. ieee80211_if_free(local->mdev);
  1565. local->mdev = NULL;
  1566. }
  1567. EXPORT_SYMBOL(ieee80211_unregister_hw);
  1568. void ieee80211_free_hw(struct ieee80211_hw *hw)
  1569. {
  1570. struct ieee80211_local *local = hw_to_local(hw);
  1571. wiphy_free(local->hw.wiphy);
  1572. }
  1573. EXPORT_SYMBOL(ieee80211_free_hw);
  1574. static int __init ieee80211_init(void)
  1575. {
  1576. struct sk_buff *skb;
  1577. int ret;
  1578. BUILD_BUG_ON(sizeof(struct ieee80211_tx_info) > sizeof(skb->cb));
  1579. BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, driver_data) +
  1580. IEEE80211_TX_INFO_DRIVER_DATA_SIZE > sizeof(skb->cb));
  1581. ret = rc80211_pid_init();
  1582. if (ret)
  1583. goto out;
  1584. ret = ieee80211_wme_register();
  1585. if (ret) {
  1586. printk(KERN_DEBUG "ieee80211_init: failed to "
  1587. "initialize WME (err=%d)\n", ret);
  1588. goto out_cleanup_pid;
  1589. }
  1590. ieee80211_debugfs_netdev_init();
  1591. return 0;
  1592. out_cleanup_pid:
  1593. rc80211_pid_exit();
  1594. out:
  1595. return ret;
  1596. }
  1597. static void __exit ieee80211_exit(void)
  1598. {
  1599. rc80211_pid_exit();
  1600. /*
  1601. * For key todo, it'll be empty by now but the work
  1602. * might still be scheduled.
  1603. */
  1604. flush_scheduled_work();
  1605. if (mesh_allocated)
  1606. ieee80211s_stop();
  1607. ieee80211_wme_unregister();
  1608. ieee80211_debugfs_netdev_exit();
  1609. }
  1610. subsys_initcall(ieee80211_init);
  1611. module_exit(ieee80211_exit);
  1612. MODULE_DESCRIPTION("IEEE 802.11 subsystem");
  1613. MODULE_LICENSE("GPL");