main.c 53 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905
  1. /*
  2. * Copyright 2002-2005, Instant802 Networks, Inc.
  3. * Copyright 2005-2006, Devicescape Software, Inc.
  4. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <net/mac80211.h>
  11. #include <net/ieee80211_radiotap.h>
  12. #include <linux/module.h>
  13. #include <linux/init.h>
  14. #include <linux/netdevice.h>
  15. #include <linux/types.h>
  16. #include <linux/slab.h>
  17. #include <linux/skbuff.h>
  18. #include <linux/etherdevice.h>
  19. #include <linux/if_arp.h>
  20. #include <linux/wireless.h>
  21. #include <linux/rtnetlink.h>
  22. #include <linux/bitmap.h>
  23. #include <net/net_namespace.h>
  24. #include <net/cfg80211.h>
  25. #include "ieee80211_i.h"
  26. #include "rate.h"
  27. #include "mesh.h"
  28. #include "wep.h"
  29. #include "wme.h"
  30. #include "aes_ccm.h"
  31. #include "led.h"
  32. #include "cfg.h"
  33. #include "debugfs.h"
  34. #include "debugfs_netdev.h"
  35. #define SUPP_MCS_SET_LEN 16
  36. /*
  37. * For seeing transmitted packets on monitor interfaces
  38. * we have a radiotap header too.
  39. */
  40. struct ieee80211_tx_status_rtap_hdr {
  41. struct ieee80211_radiotap_header hdr;
  42. __le16 tx_flags;
  43. u8 data_retries;
  44. } __attribute__ ((packed));
  45. /* common interface routines */
  46. static int header_parse_80211(const struct sk_buff *skb, unsigned char *haddr)
  47. {
  48. memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); /* addr2 */
  49. return ETH_ALEN;
  50. }
  51. /* must be called under mdev tx lock */
  52. static void ieee80211_configure_filter(struct ieee80211_local *local)
  53. {
  54. unsigned int changed_flags;
  55. unsigned int new_flags = 0;
  56. if (atomic_read(&local->iff_promiscs))
  57. new_flags |= FIF_PROMISC_IN_BSS;
  58. if (atomic_read(&local->iff_allmultis))
  59. new_flags |= FIF_ALLMULTI;
  60. if (local->monitors)
  61. new_flags |= FIF_BCN_PRBRESP_PROMISC;
  62. if (local->fif_fcsfail)
  63. new_flags |= FIF_FCSFAIL;
  64. if (local->fif_plcpfail)
  65. new_flags |= FIF_PLCPFAIL;
  66. if (local->fif_control)
  67. new_flags |= FIF_CONTROL;
  68. if (local->fif_other_bss)
  69. new_flags |= FIF_OTHER_BSS;
  70. changed_flags = local->filter_flags ^ new_flags;
  71. /* be a bit nasty */
  72. new_flags |= (1<<31);
  73. local->ops->configure_filter(local_to_hw(local),
  74. changed_flags, &new_flags,
  75. local->mdev->mc_count,
  76. local->mdev->mc_list);
  77. WARN_ON(new_flags & (1<<31));
  78. local->filter_flags = new_flags & ~(1<<31);
  79. }
  80. /* master interface */
  81. static int ieee80211_master_open(struct net_device *dev)
  82. {
  83. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  84. struct ieee80211_sub_if_data *sdata;
  85. int res = -EOPNOTSUPP;
  86. /* we hold the RTNL here so can safely walk the list */
  87. list_for_each_entry(sdata, &local->interfaces, list) {
  88. if (sdata->dev != dev && netif_running(sdata->dev)) {
  89. res = 0;
  90. break;
  91. }
  92. }
  93. return res;
  94. }
  95. static int ieee80211_master_stop(struct net_device *dev)
  96. {
  97. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  98. struct ieee80211_sub_if_data *sdata;
  99. /* we hold the RTNL here so can safely walk the list */
  100. list_for_each_entry(sdata, &local->interfaces, list)
  101. if (sdata->dev != dev && netif_running(sdata->dev))
  102. dev_close(sdata->dev);
  103. return 0;
  104. }
  105. static void ieee80211_master_set_multicast_list(struct net_device *dev)
  106. {
  107. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  108. ieee80211_configure_filter(local);
  109. }
  110. /* regular interfaces */
  111. static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
  112. {
  113. int meshhdrlen;
  114. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  115. meshhdrlen = (sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) ? 5 : 0;
  116. /* FIX: what would be proper limits for MTU?
  117. * This interface uses 802.3 frames. */
  118. if (new_mtu < 256 ||
  119. new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) {
  120. printk(KERN_WARNING "%s: invalid MTU %d\n",
  121. dev->name, new_mtu);
  122. return -EINVAL;
  123. }
  124. #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
  125. printk(KERN_DEBUG "%s: setting MTU %d\n", dev->name, new_mtu);
  126. #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
  127. dev->mtu = new_mtu;
  128. return 0;
  129. }
  130. static inline int identical_mac_addr_allowed(int type1, int type2)
  131. {
  132. return (type1 == IEEE80211_IF_TYPE_MNTR ||
  133. type2 == IEEE80211_IF_TYPE_MNTR ||
  134. (type1 == IEEE80211_IF_TYPE_AP &&
  135. type2 == IEEE80211_IF_TYPE_WDS) ||
  136. (type1 == IEEE80211_IF_TYPE_WDS &&
  137. (type2 == IEEE80211_IF_TYPE_WDS ||
  138. type2 == IEEE80211_IF_TYPE_AP)) ||
  139. (type1 == IEEE80211_IF_TYPE_AP &&
  140. type2 == IEEE80211_IF_TYPE_VLAN) ||
  141. (type1 == IEEE80211_IF_TYPE_VLAN &&
  142. (type2 == IEEE80211_IF_TYPE_AP ||
  143. type2 == IEEE80211_IF_TYPE_VLAN)));
  144. }
  145. static int ieee80211_open(struct net_device *dev)
  146. {
  147. struct ieee80211_sub_if_data *sdata, *nsdata;
  148. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  149. struct ieee80211_if_init_conf conf;
  150. int res;
  151. bool need_hw_reconfig = 0;
  152. struct sta_info *sta;
  153. sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  154. /* we hold the RTNL here so can safely walk the list */
  155. list_for_each_entry(nsdata, &local->interfaces, list) {
  156. struct net_device *ndev = nsdata->dev;
  157. if (ndev != dev && ndev != local->mdev && netif_running(ndev)) {
  158. /*
  159. * Allow only a single IBSS interface to be up at any
  160. * time. This is restricted because beacon distribution
  161. * cannot work properly if both are in the same IBSS.
  162. *
  163. * To remove this restriction we'd have to disallow them
  164. * from setting the same SSID on different IBSS interfaces
  165. * belonging to the same hardware. Then, however, we're
  166. * faced with having to adopt two different TSF timers...
  167. */
  168. if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS &&
  169. nsdata->vif.type == IEEE80211_IF_TYPE_IBSS)
  170. return -EBUSY;
  171. /*
  172. * Disallow multiple IBSS/STA mode interfaces.
  173. *
  174. * This is a technical restriction, it is possible although
  175. * most likely not IEEE 802.11 compliant to have multiple
  176. * STAs with just a single hardware (the TSF timer will not
  177. * be adjusted properly.)
  178. *
  179. * However, because mac80211 uses the master device's BSS
  180. * information for each STA/IBSS interface, doing this will
  181. * currently corrupt that BSS information completely, unless,
  182. * a not very useful case, both STAs are associated to the
  183. * same BSS.
  184. *
  185. * To remove this restriction, the BSS information needs to
  186. * be embedded in the STA/IBSS mode sdata instead of using
  187. * the master device's BSS structure.
  188. */
  189. if ((sdata->vif.type == IEEE80211_IF_TYPE_STA ||
  190. sdata->vif.type == IEEE80211_IF_TYPE_IBSS) &&
  191. (nsdata->vif.type == IEEE80211_IF_TYPE_STA ||
  192. nsdata->vif.type == IEEE80211_IF_TYPE_IBSS))
  193. return -EBUSY;
  194. /*
  195. * The remaining checks are only performed for interfaces
  196. * with the same MAC address.
  197. */
  198. if (compare_ether_addr(dev->dev_addr, ndev->dev_addr))
  199. continue;
  200. /*
  201. * check whether it may have the same address
  202. */
  203. if (!identical_mac_addr_allowed(sdata->vif.type,
  204. nsdata->vif.type))
  205. return -ENOTUNIQ;
  206. /*
  207. * can only add VLANs to enabled APs
  208. */
  209. if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN &&
  210. nsdata->vif.type == IEEE80211_IF_TYPE_AP)
  211. sdata->u.vlan.ap = nsdata;
  212. }
  213. }
  214. switch (sdata->vif.type) {
  215. case IEEE80211_IF_TYPE_WDS:
  216. if (!is_valid_ether_addr(sdata->u.wds.remote_addr))
  217. return -ENOLINK;
  218. break;
  219. case IEEE80211_IF_TYPE_VLAN:
  220. if (!sdata->u.vlan.ap)
  221. return -ENOLINK;
  222. break;
  223. case IEEE80211_IF_TYPE_AP:
  224. case IEEE80211_IF_TYPE_STA:
  225. case IEEE80211_IF_TYPE_MNTR:
  226. case IEEE80211_IF_TYPE_IBSS:
  227. case IEEE80211_IF_TYPE_MESH_POINT:
  228. /* no special treatment */
  229. break;
  230. case IEEE80211_IF_TYPE_INVALID:
  231. /* cannot happen */
  232. WARN_ON(1);
  233. break;
  234. }
  235. if (local->open_count == 0) {
  236. res = 0;
  237. if (local->ops->start)
  238. res = local->ops->start(local_to_hw(local));
  239. if (res)
  240. return res;
  241. need_hw_reconfig = 1;
  242. ieee80211_led_radio(local, local->hw.conf.radio_enabled);
  243. }
  244. switch (sdata->vif.type) {
  245. case IEEE80211_IF_TYPE_VLAN:
  246. list_add(&sdata->u.vlan.list, &sdata->u.vlan.ap->u.ap.vlans);
  247. /* no need to tell driver */
  248. break;
  249. case IEEE80211_IF_TYPE_MNTR:
  250. if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
  251. local->cooked_mntrs++;
  252. break;
  253. }
  254. /* must be before the call to ieee80211_configure_filter */
  255. local->monitors++;
  256. if (local->monitors == 1)
  257. local->hw.conf.flags |= IEEE80211_CONF_RADIOTAP;
  258. if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
  259. local->fif_fcsfail++;
  260. if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
  261. local->fif_plcpfail++;
  262. if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
  263. local->fif_control++;
  264. if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
  265. local->fif_other_bss++;
  266. netif_tx_lock_bh(local->mdev);
  267. ieee80211_configure_filter(local);
  268. netif_tx_unlock_bh(local->mdev);
  269. break;
  270. case IEEE80211_IF_TYPE_STA:
  271. case IEEE80211_IF_TYPE_IBSS:
  272. sdata->u.sta.flags &= ~IEEE80211_STA_PREV_BSSID_SET;
  273. /* fall through */
  274. default:
  275. conf.vif = &sdata->vif;
  276. conf.type = sdata->vif.type;
  277. conf.mac_addr = dev->dev_addr;
  278. res = local->ops->add_interface(local_to_hw(local), &conf);
  279. if (res)
  280. goto err_stop;
  281. ieee80211_if_config(dev);
  282. ieee80211_reset_erp_info(dev);
  283. ieee80211_enable_keys(sdata);
  284. if (sdata->vif.type == IEEE80211_IF_TYPE_STA &&
  285. !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME))
  286. netif_carrier_off(dev);
  287. else
  288. netif_carrier_on(dev);
  289. }
  290. if (sdata->vif.type == IEEE80211_IF_TYPE_WDS) {
  291. /* Create STA entry for the WDS peer */
  292. sta = sta_info_alloc(sdata, sdata->u.wds.remote_addr,
  293. GFP_KERNEL);
  294. if (!sta) {
  295. res = -ENOMEM;
  296. goto err_del_interface;
  297. }
  298. sta->flags |= WLAN_STA_AUTHORIZED;
  299. res = sta_info_insert(sta);
  300. if (res) {
  301. /* STA has been freed */
  302. goto err_del_interface;
  303. }
  304. }
  305. if (local->open_count == 0) {
  306. res = dev_open(local->mdev);
  307. WARN_ON(res);
  308. if (res)
  309. goto err_del_interface;
  310. tasklet_enable(&local->tx_pending_tasklet);
  311. tasklet_enable(&local->tasklet);
  312. }
  313. /*
  314. * set_multicast_list will be invoked by the networking core
  315. * which will check whether any increments here were done in
  316. * error and sync them down to the hardware as filter flags.
  317. */
  318. if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
  319. atomic_inc(&local->iff_allmultis);
  320. if (sdata->flags & IEEE80211_SDATA_PROMISC)
  321. atomic_inc(&local->iff_promiscs);
  322. local->open_count++;
  323. if (need_hw_reconfig)
  324. ieee80211_hw_config(local);
  325. /*
  326. * ieee80211_sta_work is disabled while network interface
  327. * is down. Therefore, some configuration changes may not
  328. * yet be effective. Trigger execution of ieee80211_sta_work
  329. * to fix this.
  330. */
  331. if(sdata->vif.type == IEEE80211_IF_TYPE_STA ||
  332. sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
  333. struct ieee80211_if_sta *ifsta = &sdata->u.sta;
  334. queue_work(local->hw.workqueue, &ifsta->work);
  335. }
  336. netif_start_queue(dev);
  337. return 0;
  338. err_del_interface:
  339. local->ops->remove_interface(local_to_hw(local), &conf);
  340. err_stop:
  341. if (!local->open_count && local->ops->stop)
  342. local->ops->stop(local_to_hw(local));
  343. return res;
  344. }
  345. static int ieee80211_stop(struct net_device *dev)
  346. {
  347. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  348. struct ieee80211_local *local = sdata->local;
  349. struct ieee80211_if_init_conf conf;
  350. struct sta_info *sta;
  351. /*
  352. * Stop TX on this interface first.
  353. */
  354. netif_stop_queue(dev);
  355. /*
  356. * Now delete all active aggregation sessions.
  357. */
  358. rcu_read_lock();
  359. list_for_each_entry_rcu(sta, &local->sta_list, list) {
  360. if (sta->sdata == sdata)
  361. ieee80211_sta_tear_down_BA_sessions(dev, sta->addr);
  362. }
  363. rcu_read_unlock();
  364. /*
  365. * Remove all stations associated with this interface.
  366. *
  367. * This must be done before calling ops->remove_interface()
  368. * because otherwise we can later invoke ops->sta_notify()
  369. * whenever the STAs are removed, and that invalidates driver
  370. * assumptions about always getting a vif pointer that is valid
  371. * (because if we remove a STA after ops->remove_interface()
  372. * the driver will have removed the vif info already!)
  373. *
  374. * We could relax this and only unlink the stations from the
  375. * hash table and list but keep them on a per-sdata list that
  376. * will be inserted back again when the interface is brought
  377. * up again, but I don't currently see a use case for that,
  378. * except with WDS which gets a STA entry created when it is
  379. * brought up.
  380. */
  381. sta_info_flush(local, sdata);
  382. /*
  383. * Don't count this interface for promisc/allmulti while it
  384. * is down. dev_mc_unsync() will invoke set_multicast_list
  385. * on the master interface which will sync these down to the
  386. * hardware as filter flags.
  387. */
  388. if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
  389. atomic_dec(&local->iff_allmultis);
  390. if (sdata->flags & IEEE80211_SDATA_PROMISC)
  391. atomic_dec(&local->iff_promiscs);
  392. dev_mc_unsync(local->mdev, dev);
  393. /* APs need special treatment */
  394. if (sdata->vif.type == IEEE80211_IF_TYPE_AP) {
  395. struct ieee80211_sub_if_data *vlan, *tmp;
  396. struct beacon_data *old_beacon = sdata->u.ap.beacon;
  397. /* remove beacon */
  398. rcu_assign_pointer(sdata->u.ap.beacon, NULL);
  399. synchronize_rcu();
  400. kfree(old_beacon);
  401. /* down all dependent devices, that is VLANs */
  402. list_for_each_entry_safe(vlan, tmp, &sdata->u.ap.vlans,
  403. u.vlan.list)
  404. dev_close(vlan->dev);
  405. WARN_ON(!list_empty(&sdata->u.ap.vlans));
  406. }
  407. local->open_count--;
  408. switch (sdata->vif.type) {
  409. case IEEE80211_IF_TYPE_VLAN:
  410. list_del(&sdata->u.vlan.list);
  411. sdata->u.vlan.ap = NULL;
  412. /* no need to tell driver */
  413. break;
  414. case IEEE80211_IF_TYPE_MNTR:
  415. if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
  416. local->cooked_mntrs--;
  417. break;
  418. }
  419. local->monitors--;
  420. if (local->monitors == 0)
  421. local->hw.conf.flags &= ~IEEE80211_CONF_RADIOTAP;
  422. if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
  423. local->fif_fcsfail--;
  424. if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
  425. local->fif_plcpfail--;
  426. if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
  427. local->fif_control--;
  428. if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
  429. local->fif_other_bss--;
  430. netif_tx_lock_bh(local->mdev);
  431. ieee80211_configure_filter(local);
  432. netif_tx_unlock_bh(local->mdev);
  433. break;
  434. case IEEE80211_IF_TYPE_MESH_POINT:
  435. case IEEE80211_IF_TYPE_STA:
  436. case IEEE80211_IF_TYPE_IBSS:
  437. sdata->u.sta.state = IEEE80211_DISABLED;
  438. del_timer_sync(&sdata->u.sta.timer);
  439. /*
  440. * When we get here, the interface is marked down.
  441. * Call synchronize_rcu() to wait for the RX path
  442. * should it be using the interface and enqueuing
  443. * frames at this very time on another CPU.
  444. */
  445. synchronize_rcu();
  446. skb_queue_purge(&sdata->u.sta.skb_queue);
  447. if (local->scan_dev == sdata->dev) {
  448. if (!local->ops->hw_scan) {
  449. local->sta_sw_scanning = 0;
  450. cancel_delayed_work(&local->scan_work);
  451. } else
  452. local->sta_hw_scanning = 0;
  453. }
  454. flush_workqueue(local->hw.workqueue);
  455. sdata->u.sta.flags &= ~IEEE80211_STA_PRIVACY_INVOKED;
  456. kfree(sdata->u.sta.extra_ie);
  457. sdata->u.sta.extra_ie = NULL;
  458. sdata->u.sta.extra_ie_len = 0;
  459. /* fall through */
  460. default:
  461. conf.vif = &sdata->vif;
  462. conf.type = sdata->vif.type;
  463. conf.mac_addr = dev->dev_addr;
  464. /* disable all keys for as long as this netdev is down */
  465. ieee80211_disable_keys(sdata);
  466. local->ops->remove_interface(local_to_hw(local), &conf);
  467. }
  468. if (local->open_count == 0) {
  469. if (netif_running(local->mdev))
  470. dev_close(local->mdev);
  471. if (local->ops->stop)
  472. local->ops->stop(local_to_hw(local));
  473. ieee80211_led_radio(local, 0);
  474. tasklet_disable(&local->tx_pending_tasklet);
  475. tasklet_disable(&local->tasklet);
  476. }
  477. return 0;
  478. }
  479. int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
  480. {
  481. struct ieee80211_local *local = hw_to_local(hw);
  482. struct sta_info *sta;
  483. struct ieee80211_sub_if_data *sdata;
  484. u16 start_seq_num = 0;
  485. u8 *state;
  486. int ret;
  487. DECLARE_MAC_BUF(mac);
  488. if (tid >= STA_TID_NUM)
  489. return -EINVAL;
  490. #ifdef CONFIG_MAC80211_HT_DEBUG
  491. printk(KERN_DEBUG "Open BA session requested for %s tid %u\n",
  492. print_mac(mac, ra), tid);
  493. #endif /* CONFIG_MAC80211_HT_DEBUG */
  494. rcu_read_lock();
  495. sta = sta_info_get(local, ra);
  496. if (!sta) {
  497. printk(KERN_DEBUG "Could not find the station\n");
  498. rcu_read_unlock();
  499. return -ENOENT;
  500. }
  501. spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
  502. /* we have tried too many times, receiver does not want A-MPDU */
  503. if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
  504. ret = -EBUSY;
  505. goto start_ba_exit;
  506. }
  507. state = &sta->ampdu_mlme.tid_state_tx[tid];
  508. /* check if the TID is not in aggregation flow already */
  509. if (*state != HT_AGG_STATE_IDLE) {
  510. #ifdef CONFIG_MAC80211_HT_DEBUG
  511. printk(KERN_DEBUG "BA request denied - session is not "
  512. "idle on tid %u\n", tid);
  513. #endif /* CONFIG_MAC80211_HT_DEBUG */
  514. ret = -EAGAIN;
  515. goto start_ba_exit;
  516. }
  517. /* prepare A-MPDU MLME for Tx aggregation */
  518. sta->ampdu_mlme.tid_tx[tid] =
  519. kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
  520. if (!sta->ampdu_mlme.tid_tx[tid]) {
  521. if (net_ratelimit())
  522. printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
  523. tid);
  524. ret = -ENOMEM;
  525. goto start_ba_exit;
  526. }
  527. /* Tx timer */
  528. sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function =
  529. sta_addba_resp_timer_expired;
  530. sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data =
  531. (unsigned long)&sta->timer_to_tid[tid];
  532. init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
  533. /* ensure that TX flow won't interrupt us
  534. * until the end of the call to requeue function */
  535. spin_lock_bh(&local->mdev->queue_lock);
  536. /* create a new queue for this aggregation */
  537. ret = ieee80211_ht_agg_queue_add(local, sta, tid);
  538. /* case no queue is available to aggregation
  539. * don't switch to aggregation */
  540. if (ret) {
  541. #ifdef CONFIG_MAC80211_HT_DEBUG
  542. printk(KERN_DEBUG "BA request denied - queue unavailable for"
  543. " tid %d\n", tid);
  544. #endif /* CONFIG_MAC80211_HT_DEBUG */
  545. goto start_ba_err;
  546. }
  547. sdata = sta->sdata;
  548. /* Ok, the Addba frame hasn't been sent yet, but if the driver calls the
  549. * call back right away, it must see that the flow has begun */
  550. *state |= HT_ADDBA_REQUESTED_MSK;
  551. if (local->ops->ampdu_action)
  552. ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_START,
  553. ra, tid, &start_seq_num);
  554. if (ret) {
  555. /* No need to requeue the packets in the agg queue, since we
  556. * held the tx lock: no packet could be enqueued to the newly
  557. * allocated queue */
  558. ieee80211_ht_agg_queue_remove(local, sta, tid, 0);
  559. #ifdef CONFIG_MAC80211_HT_DEBUG
  560. printk(KERN_DEBUG "BA request denied - HW unavailable for"
  561. " tid %d\n", tid);
  562. #endif /* CONFIG_MAC80211_HT_DEBUG */
  563. *state = HT_AGG_STATE_IDLE;
  564. goto start_ba_err;
  565. }
  566. /* Will put all the packets in the new SW queue */
  567. ieee80211_requeue(local, ieee802_1d_to_ac[tid]);
  568. spin_unlock_bh(&local->mdev->queue_lock);
  569. /* send an addBA request */
  570. sta->ampdu_mlme.dialog_token_allocator++;
  571. sta->ampdu_mlme.tid_tx[tid]->dialog_token =
  572. sta->ampdu_mlme.dialog_token_allocator;
  573. sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
  574. ieee80211_send_addba_request(sta->sdata->dev, ra, tid,
  575. sta->ampdu_mlme.tid_tx[tid]->dialog_token,
  576. sta->ampdu_mlme.tid_tx[tid]->ssn,
  577. 0x40, 5000);
  578. /* activate the timer for the recipient's addBA response */
  579. sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires =
  580. jiffies + ADDBA_RESP_INTERVAL;
  581. add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
  582. printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
  583. goto start_ba_exit;
  584. start_ba_err:
  585. kfree(sta->ampdu_mlme.tid_tx[tid]);
  586. sta->ampdu_mlme.tid_tx[tid] = NULL;
  587. spin_unlock_bh(&local->mdev->queue_lock);
  588. ret = -EBUSY;
  589. start_ba_exit:
  590. spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
  591. rcu_read_unlock();
  592. return ret;
  593. }
  594. EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
  595. int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw,
  596. u8 *ra, u16 tid,
  597. enum ieee80211_back_parties initiator)
  598. {
  599. struct ieee80211_local *local = hw_to_local(hw);
  600. struct sta_info *sta;
  601. u8 *state;
  602. int ret = 0;
  603. DECLARE_MAC_BUF(mac);
  604. if (tid >= STA_TID_NUM)
  605. return -EINVAL;
  606. rcu_read_lock();
  607. sta = sta_info_get(local, ra);
  608. if (!sta) {
  609. rcu_read_unlock();
  610. return -ENOENT;
  611. }
  612. /* check if the TID is in aggregation */
  613. state = &sta->ampdu_mlme.tid_state_tx[tid];
  614. spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
  615. if (*state != HT_AGG_STATE_OPERATIONAL) {
  616. ret = -ENOENT;
  617. goto stop_BA_exit;
  618. }
  619. #ifdef CONFIG_MAC80211_HT_DEBUG
  620. printk(KERN_DEBUG "Tx BA session stop requested for %s tid %u\n",
  621. print_mac(mac, ra), tid);
  622. #endif /* CONFIG_MAC80211_HT_DEBUG */
  623. ieee80211_stop_queue(hw, sta->tid_to_tx_q[tid]);
  624. *state = HT_AGG_STATE_REQ_STOP_BA_MSK |
  625. (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
  626. if (local->ops->ampdu_action)
  627. ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_STOP,
  628. ra, tid, NULL);
  629. /* case HW denied going back to legacy */
  630. if (ret) {
  631. WARN_ON(ret != -EBUSY);
  632. *state = HT_AGG_STATE_OPERATIONAL;
  633. ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
  634. goto stop_BA_exit;
  635. }
  636. stop_BA_exit:
  637. spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
  638. rcu_read_unlock();
  639. return ret;
  640. }
  641. EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
  642. void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
  643. {
  644. struct ieee80211_local *local = hw_to_local(hw);
  645. struct sta_info *sta;
  646. u8 *state;
  647. DECLARE_MAC_BUF(mac);
  648. if (tid >= STA_TID_NUM) {
  649. printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
  650. tid, STA_TID_NUM);
  651. return;
  652. }
  653. rcu_read_lock();
  654. sta = sta_info_get(local, ra);
  655. if (!sta) {
  656. rcu_read_unlock();
  657. printk(KERN_DEBUG "Could not find station: %s\n",
  658. print_mac(mac, ra));
  659. return;
  660. }
  661. state = &sta->ampdu_mlme.tid_state_tx[tid];
  662. spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
  663. if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
  664. printk(KERN_DEBUG "addBA was not requested yet, state is %d\n",
  665. *state);
  666. spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
  667. rcu_read_unlock();
  668. return;
  669. }
  670. WARN_ON_ONCE(*state & HT_ADDBA_DRV_READY_MSK);
  671. *state |= HT_ADDBA_DRV_READY_MSK;
  672. if (*state == HT_AGG_STATE_OPERATIONAL) {
  673. printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid);
  674. ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
  675. }
  676. spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
  677. rcu_read_unlock();
  678. }
  679. EXPORT_SYMBOL(ieee80211_start_tx_ba_cb);
  680. void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
  681. {
  682. struct ieee80211_local *local = hw_to_local(hw);
  683. struct sta_info *sta;
  684. u8 *state;
  685. int agg_queue;
  686. DECLARE_MAC_BUF(mac);
  687. if (tid >= STA_TID_NUM) {
  688. printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
  689. tid, STA_TID_NUM);
  690. return;
  691. }
  692. #ifdef CONFIG_MAC80211_HT_DEBUG
  693. printk(KERN_DEBUG "Stopping Tx BA session for %s tid %d\n",
  694. print_mac(mac, ra), tid);
  695. #endif /* CONFIG_MAC80211_HT_DEBUG */
  696. rcu_read_lock();
  697. sta = sta_info_get(local, ra);
  698. if (!sta) {
  699. printk(KERN_DEBUG "Could not find station: %s\n",
  700. print_mac(mac, ra));
  701. rcu_read_unlock();
  702. return;
  703. }
  704. state = &sta->ampdu_mlme.tid_state_tx[tid];
  705. spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
  706. if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) {
  707. printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
  708. spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
  709. rcu_read_unlock();
  710. return;
  711. }
  712. if (*state & HT_AGG_STATE_INITIATOR_MSK)
  713. ieee80211_send_delba(sta->sdata->dev, ra, tid,
  714. WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
  715. agg_queue = sta->tid_to_tx_q[tid];
  716. /* avoid ordering issues: we are the only one that can modify
  717. * the content of the qdiscs */
  718. spin_lock_bh(&local->mdev->queue_lock);
  719. /* remove the queue for this aggregation */
  720. ieee80211_ht_agg_queue_remove(local, sta, tid, 1);
  721. spin_unlock_bh(&local->mdev->queue_lock);
  722. /* we just requeued the all the frames that were in the removed
  723. * queue, and since we might miss a softirq we do netif_schedule.
  724. * ieee80211_wake_queue is not used here as this queue is not
  725. * necessarily stopped */
  726. netif_schedule(local->mdev);
  727. *state = HT_AGG_STATE_IDLE;
  728. sta->ampdu_mlme.addba_req_num[tid] = 0;
  729. kfree(sta->ampdu_mlme.tid_tx[tid]);
  730. sta->ampdu_mlme.tid_tx[tid] = NULL;
  731. spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
  732. rcu_read_unlock();
  733. }
  734. EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb);
  735. void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
  736. const u8 *ra, u16 tid)
  737. {
  738. struct ieee80211_local *local = hw_to_local(hw);
  739. struct ieee80211_ra_tid *ra_tid;
  740. struct sk_buff *skb = dev_alloc_skb(0);
  741. if (unlikely(!skb)) {
  742. if (net_ratelimit())
  743. printk(KERN_WARNING "%s: Not enough memory, "
  744. "dropping start BA session", skb->dev->name);
  745. return;
  746. }
  747. ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
  748. memcpy(&ra_tid->ra, ra, ETH_ALEN);
  749. ra_tid->tid = tid;
  750. skb->pkt_type = IEEE80211_ADDBA_MSG;
  751. skb_queue_tail(&local->skb_queue, skb);
  752. tasklet_schedule(&local->tasklet);
  753. }
  754. EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
  755. void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
  756. const u8 *ra, u16 tid)
  757. {
  758. struct ieee80211_local *local = hw_to_local(hw);
  759. struct ieee80211_ra_tid *ra_tid;
  760. struct sk_buff *skb = dev_alloc_skb(0);
  761. if (unlikely(!skb)) {
  762. if (net_ratelimit())
  763. printk(KERN_WARNING "%s: Not enough memory, "
  764. "dropping stop BA session", skb->dev->name);
  765. return;
  766. }
  767. ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
  768. memcpy(&ra_tid->ra, ra, ETH_ALEN);
  769. ra_tid->tid = tid;
  770. skb->pkt_type = IEEE80211_DELBA_MSG;
  771. skb_queue_tail(&local->skb_queue, skb);
  772. tasklet_schedule(&local->tasklet);
  773. }
  774. EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
  775. static void ieee80211_set_multicast_list(struct net_device *dev)
  776. {
  777. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  778. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  779. int allmulti, promisc, sdata_allmulti, sdata_promisc;
  780. allmulti = !!(dev->flags & IFF_ALLMULTI);
  781. promisc = !!(dev->flags & IFF_PROMISC);
  782. sdata_allmulti = !!(sdata->flags & IEEE80211_SDATA_ALLMULTI);
  783. sdata_promisc = !!(sdata->flags & IEEE80211_SDATA_PROMISC);
  784. if (allmulti != sdata_allmulti) {
  785. if (dev->flags & IFF_ALLMULTI)
  786. atomic_inc(&local->iff_allmultis);
  787. else
  788. atomic_dec(&local->iff_allmultis);
  789. sdata->flags ^= IEEE80211_SDATA_ALLMULTI;
  790. }
  791. if (promisc != sdata_promisc) {
  792. if (dev->flags & IFF_PROMISC)
  793. atomic_inc(&local->iff_promiscs);
  794. else
  795. atomic_dec(&local->iff_promiscs);
  796. sdata->flags ^= IEEE80211_SDATA_PROMISC;
  797. }
  798. dev_mc_sync(local->mdev, dev);
  799. }
  800. static const struct header_ops ieee80211_header_ops = {
  801. .create = eth_header,
  802. .parse = header_parse_80211,
  803. .rebuild = eth_rebuild_header,
  804. .cache = eth_header_cache,
  805. .cache_update = eth_header_cache_update,
  806. };
  807. /* Must not be called for mdev */
  808. void ieee80211_if_setup(struct net_device *dev)
  809. {
  810. ether_setup(dev);
  811. dev->hard_start_xmit = ieee80211_subif_start_xmit;
  812. dev->wireless_handlers = &ieee80211_iw_handler_def;
  813. dev->set_multicast_list = ieee80211_set_multicast_list;
  814. dev->change_mtu = ieee80211_change_mtu;
  815. dev->open = ieee80211_open;
  816. dev->stop = ieee80211_stop;
  817. dev->destructor = ieee80211_if_free;
  818. }
  819. /* everything else */
  820. static int __ieee80211_if_config(struct net_device *dev,
  821. struct sk_buff *beacon,
  822. struct ieee80211_tx_control *control)
  823. {
  824. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  825. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  826. struct ieee80211_if_conf conf;
  827. if (!local->ops->config_interface || !netif_running(dev))
  828. return 0;
  829. memset(&conf, 0, sizeof(conf));
  830. conf.type = sdata->vif.type;
  831. if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
  832. sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
  833. conf.bssid = sdata->u.sta.bssid;
  834. conf.ssid = sdata->u.sta.ssid;
  835. conf.ssid_len = sdata->u.sta.ssid_len;
  836. } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
  837. conf.beacon = beacon;
  838. conf.beacon_control = control;
  839. ieee80211_start_mesh(dev);
  840. } else if (sdata->vif.type == IEEE80211_IF_TYPE_AP) {
  841. conf.ssid = sdata->u.ap.ssid;
  842. conf.ssid_len = sdata->u.ap.ssid_len;
  843. conf.beacon = beacon;
  844. conf.beacon_control = control;
  845. }
  846. return local->ops->config_interface(local_to_hw(local),
  847. &sdata->vif, &conf);
  848. }
  849. int ieee80211_if_config(struct net_device *dev)
  850. {
  851. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  852. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  853. if (sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT &&
  854. (local->hw.flags & IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE))
  855. return ieee80211_if_config_beacon(dev);
  856. return __ieee80211_if_config(dev, NULL, NULL);
  857. }
  858. int ieee80211_if_config_beacon(struct net_device *dev)
  859. {
  860. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  861. struct ieee80211_tx_control control;
  862. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  863. struct sk_buff *skb;
  864. if (!(local->hw.flags & IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE))
  865. return 0;
  866. skb = ieee80211_beacon_get(local_to_hw(local), &sdata->vif,
  867. &control);
  868. if (!skb)
  869. return -ENOMEM;
  870. return __ieee80211_if_config(dev, skb, &control);
  871. }
  872. int ieee80211_hw_config(struct ieee80211_local *local)
  873. {
  874. struct ieee80211_channel *chan;
  875. int ret = 0;
  876. if (local->sta_sw_scanning)
  877. chan = local->scan_channel;
  878. else
  879. chan = local->oper_channel;
  880. local->hw.conf.channel = chan;
  881. if (!local->hw.conf.power_level)
  882. local->hw.conf.power_level = chan->max_power;
  883. else
  884. local->hw.conf.power_level = min(chan->max_power,
  885. local->hw.conf.power_level);
  886. local->hw.conf.max_antenna_gain = chan->max_antenna_gain;
  887. #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
  888. printk(KERN_DEBUG "%s: HW CONFIG: freq=%d\n",
  889. wiphy_name(local->hw.wiphy), chan->center_freq);
  890. #endif
  891. if (local->open_count)
  892. ret = local->ops->config(local_to_hw(local), &local->hw.conf);
  893. return ret;
  894. }
  895. /**
  896. * ieee80211_handle_ht should be used only after legacy configuration
  897. * has been determined namely band, as ht configuration depends upon
  898. * the hardware's HT abilities for a _specific_ band.
  899. */
  900. u32 ieee80211_handle_ht(struct ieee80211_local *local, int enable_ht,
  901. struct ieee80211_ht_info *req_ht_cap,
  902. struct ieee80211_ht_bss_info *req_bss_cap)
  903. {
  904. struct ieee80211_conf *conf = &local->hw.conf;
  905. struct ieee80211_supported_band *sband;
  906. struct ieee80211_ht_info ht_conf;
  907. struct ieee80211_ht_bss_info ht_bss_conf;
  908. int i;
  909. u32 changed = 0;
  910. sband = local->hw.wiphy->bands[conf->channel->band];
  911. /* HT is not supported */
  912. if (!sband->ht_info.ht_supported) {
  913. conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE;
  914. return 0;
  915. }
  916. memset(&ht_conf, 0, sizeof(struct ieee80211_ht_info));
  917. memset(&ht_bss_conf, 0, sizeof(struct ieee80211_ht_bss_info));
  918. if (enable_ht) {
  919. if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE))
  920. changed |= BSS_CHANGED_HT;
  921. conf->flags |= IEEE80211_CONF_SUPPORT_HT_MODE;
  922. ht_conf.ht_supported = 1;
  923. ht_conf.cap = req_ht_cap->cap & sband->ht_info.cap;
  924. ht_conf.cap &= ~(IEEE80211_HT_CAP_MIMO_PS);
  925. ht_conf.cap |= sband->ht_info.cap & IEEE80211_HT_CAP_MIMO_PS;
  926. for (i = 0; i < SUPP_MCS_SET_LEN; i++)
  927. ht_conf.supp_mcs_set[i] =
  928. sband->ht_info.supp_mcs_set[i] &
  929. req_ht_cap->supp_mcs_set[i];
  930. ht_bss_conf.primary_channel = req_bss_cap->primary_channel;
  931. ht_bss_conf.bss_cap = req_bss_cap->bss_cap;
  932. ht_bss_conf.bss_op_mode = req_bss_cap->bss_op_mode;
  933. ht_conf.ampdu_factor = req_ht_cap->ampdu_factor;
  934. ht_conf.ampdu_density = req_ht_cap->ampdu_density;
  935. /* if bss configuration changed store the new one */
  936. if (memcmp(&conf->ht_conf, &ht_conf, sizeof(ht_conf)) ||
  937. memcmp(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf))) {
  938. changed |= BSS_CHANGED_HT;
  939. memcpy(&conf->ht_conf, &ht_conf, sizeof(ht_conf));
  940. memcpy(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf));
  941. }
  942. } else {
  943. if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE)
  944. changed |= BSS_CHANGED_HT;
  945. conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE;
  946. }
  947. return changed;
  948. }
  949. void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
  950. u32 changed)
  951. {
  952. struct ieee80211_local *local = sdata->local;
  953. if (!changed)
  954. return;
  955. if (local->ops->bss_info_changed)
  956. local->ops->bss_info_changed(local_to_hw(local),
  957. &sdata->vif,
  958. &sdata->bss_conf,
  959. changed);
  960. }
  961. void ieee80211_reset_erp_info(struct net_device *dev)
  962. {
  963. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  964. sdata->bss_conf.use_cts_prot = 0;
  965. sdata->bss_conf.use_short_preamble = 0;
  966. ieee80211_bss_info_change_notify(sdata,
  967. BSS_CHANGED_ERP_CTS_PROT |
  968. BSS_CHANGED_ERP_PREAMBLE);
  969. }
  970. void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
  971. struct sk_buff *skb,
  972. struct ieee80211_tx_status *status)
  973. {
  974. struct ieee80211_local *local = hw_to_local(hw);
  975. struct ieee80211_tx_status *saved;
  976. int tmp;
  977. skb->dev = local->mdev;
  978. saved = kmalloc(sizeof(struct ieee80211_tx_status), GFP_ATOMIC);
  979. if (unlikely(!saved)) {
  980. if (net_ratelimit())
  981. printk(KERN_WARNING "%s: Not enough memory, "
  982. "dropping tx status", skb->dev->name);
  983. /* should be dev_kfree_skb_irq, but due to this function being
  984. * named _irqsafe instead of just _irq we can't be sure that
  985. * people won't call it from non-irq contexts */
  986. dev_kfree_skb_any(skb);
  987. return;
  988. }
  989. memcpy(saved, status, sizeof(struct ieee80211_tx_status));
  990. /* copy pointer to saved status into skb->cb for use by tasklet */
  991. memcpy(skb->cb, &saved, sizeof(saved));
  992. skb->pkt_type = IEEE80211_TX_STATUS_MSG;
  993. skb_queue_tail(status->control.flags & IEEE80211_TXCTL_REQ_TX_STATUS ?
  994. &local->skb_queue : &local->skb_queue_unreliable, skb);
  995. tmp = skb_queue_len(&local->skb_queue) +
  996. skb_queue_len(&local->skb_queue_unreliable);
  997. while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT &&
  998. (skb = skb_dequeue(&local->skb_queue_unreliable))) {
  999. memcpy(&saved, skb->cb, sizeof(saved));
  1000. kfree(saved);
  1001. dev_kfree_skb_irq(skb);
  1002. tmp--;
  1003. I802_DEBUG_INC(local->tx_status_drop);
  1004. }
  1005. tasklet_schedule(&local->tasklet);
  1006. }
  1007. EXPORT_SYMBOL(ieee80211_tx_status_irqsafe);
  1008. static void ieee80211_tasklet_handler(unsigned long data)
  1009. {
  1010. struct ieee80211_local *local = (struct ieee80211_local *) data;
  1011. struct sk_buff *skb;
  1012. struct ieee80211_rx_status rx_status;
  1013. struct ieee80211_tx_status *tx_status;
  1014. struct ieee80211_ra_tid *ra_tid;
  1015. while ((skb = skb_dequeue(&local->skb_queue)) ||
  1016. (skb = skb_dequeue(&local->skb_queue_unreliable))) {
  1017. switch (skb->pkt_type) {
  1018. case IEEE80211_RX_MSG:
  1019. /* status is in skb->cb */
  1020. memcpy(&rx_status, skb->cb, sizeof(rx_status));
  1021. /* Clear skb->pkt_type in order to not confuse kernel
  1022. * netstack. */
  1023. skb->pkt_type = 0;
  1024. __ieee80211_rx(local_to_hw(local), skb, &rx_status);
  1025. break;
  1026. case IEEE80211_TX_STATUS_MSG:
  1027. /* get pointer to saved status out of skb->cb */
  1028. memcpy(&tx_status, skb->cb, sizeof(tx_status));
  1029. skb->pkt_type = 0;
  1030. ieee80211_tx_status(local_to_hw(local),
  1031. skb, tx_status);
  1032. kfree(tx_status);
  1033. break;
  1034. case IEEE80211_DELBA_MSG:
  1035. ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
  1036. ieee80211_stop_tx_ba_cb(local_to_hw(local),
  1037. ra_tid->ra, ra_tid->tid);
  1038. dev_kfree_skb(skb);
  1039. break;
  1040. case IEEE80211_ADDBA_MSG:
  1041. ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
  1042. ieee80211_start_tx_ba_cb(local_to_hw(local),
  1043. ra_tid->ra, ra_tid->tid);
  1044. dev_kfree_skb(skb);
  1045. break ;
  1046. default: /* should never get here! */
  1047. printk(KERN_ERR "%s: Unknown message type (%d)\n",
  1048. wiphy_name(local->hw.wiphy), skb->pkt_type);
  1049. dev_kfree_skb(skb);
  1050. break;
  1051. }
  1052. }
  1053. }
  1054. /* Remove added headers (e.g., QoS control), encryption header/MIC, etc. to
  1055. * make a prepared TX frame (one that has been given to hw) to look like brand
  1056. * new IEEE 802.11 frame that is ready to go through TX processing again.
  1057. * Also, tx_packet_data in cb is restored from tx_control. */
  1058. static void ieee80211_remove_tx_extra(struct ieee80211_local *local,
  1059. struct ieee80211_key *key,
  1060. struct sk_buff *skb,
  1061. struct ieee80211_tx_control *control)
  1062. {
  1063. int hdrlen, iv_len, mic_len;
  1064. struct ieee80211_tx_packet_data *pkt_data;
  1065. pkt_data = (struct ieee80211_tx_packet_data *)skb->cb;
  1066. pkt_data->ifindex = vif_to_sdata(control->vif)->dev->ifindex;
  1067. pkt_data->flags = 0;
  1068. if (control->flags & IEEE80211_TXCTL_REQ_TX_STATUS)
  1069. pkt_data->flags |= IEEE80211_TXPD_REQ_TX_STATUS;
  1070. if (control->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)
  1071. pkt_data->flags |= IEEE80211_TXPD_DO_NOT_ENCRYPT;
  1072. if (control->flags & IEEE80211_TXCTL_REQUEUE)
  1073. pkt_data->flags |= IEEE80211_TXPD_REQUEUE;
  1074. if (control->flags & IEEE80211_TXCTL_EAPOL_FRAME)
  1075. pkt_data->flags |= IEEE80211_TXPD_EAPOL_FRAME;
  1076. pkt_data->queue = control->queue;
  1077. hdrlen = ieee80211_get_hdrlen_from_skb(skb);
  1078. if (!key)
  1079. goto no_key;
  1080. switch (key->conf.alg) {
  1081. case ALG_WEP:
  1082. iv_len = WEP_IV_LEN;
  1083. mic_len = WEP_ICV_LEN;
  1084. break;
  1085. case ALG_TKIP:
  1086. iv_len = TKIP_IV_LEN;
  1087. mic_len = TKIP_ICV_LEN;
  1088. break;
  1089. case ALG_CCMP:
  1090. iv_len = CCMP_HDR_LEN;
  1091. mic_len = CCMP_MIC_LEN;
  1092. break;
  1093. default:
  1094. goto no_key;
  1095. }
  1096. if (skb->len >= mic_len &&
  1097. !(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
  1098. skb_trim(skb, skb->len - mic_len);
  1099. if (skb->len >= iv_len && skb->len > hdrlen) {
  1100. memmove(skb->data + iv_len, skb->data, hdrlen);
  1101. skb_pull(skb, iv_len);
  1102. }
  1103. no_key:
  1104. {
  1105. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  1106. u16 fc = le16_to_cpu(hdr->frame_control);
  1107. if ((fc & 0x8C) == 0x88) /* QoS Control Field */ {
  1108. fc &= ~IEEE80211_STYPE_QOS_DATA;
  1109. hdr->frame_control = cpu_to_le16(fc);
  1110. memmove(skb->data + 2, skb->data, hdrlen - 2);
  1111. skb_pull(skb, 2);
  1112. }
  1113. }
  1114. }
  1115. static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
  1116. struct sta_info *sta,
  1117. struct sk_buff *skb,
  1118. struct ieee80211_tx_status *status)
  1119. {
  1120. sta->tx_filtered_count++;
  1121. /*
  1122. * Clear the TX filter mask for this STA when sending the next
  1123. * packet. If the STA went to power save mode, this will happen
  1124. * when it wakes up for the next time.
  1125. */
  1126. sta->flags |= WLAN_STA_CLEAR_PS_FILT;
  1127. /*
  1128. * This code races in the following way:
  1129. *
  1130. * (1) STA sends frame indicating it will go to sleep and does so
  1131. * (2) hardware/firmware adds STA to filter list, passes frame up
  1132. * (3) hardware/firmware processes TX fifo and suppresses a frame
  1133. * (4) we get TX status before having processed the frame and
  1134. * knowing that the STA has gone to sleep.
  1135. *
  1136. * This is actually quite unlikely even when both those events are
  1137. * processed from interrupts coming in quickly after one another or
  1138. * even at the same time because we queue both TX status events and
  1139. * RX frames to be processed by a tasklet and process them in the
  1140. * same order that they were received or TX status last. Hence, there
  1141. * is no race as long as the frame RX is processed before the next TX
  1142. * status, which drivers can ensure, see below.
  1143. *
  1144. * Note that this can only happen if the hardware or firmware can
  1145. * actually add STAs to the filter list, if this is done by the
  1146. * driver in response to set_tim() (which will only reduce the race
  1147. * this whole filtering tries to solve, not completely solve it)
  1148. * this situation cannot happen.
  1149. *
  1150. * To completely solve this race drivers need to make sure that they
  1151. * (a) don't mix the irq-safe/not irq-safe TX status/RX processing
  1152. * functions and
  1153. * (b) always process RX events before TX status events if ordering
  1154. * can be unknown, for example with different interrupt status
  1155. * bits.
  1156. */
  1157. if (sta->flags & WLAN_STA_PS &&
  1158. skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) {
  1159. ieee80211_remove_tx_extra(local, sta->key, skb,
  1160. &status->control);
  1161. skb_queue_tail(&sta->tx_filtered, skb);
  1162. return;
  1163. }
  1164. if (!(sta->flags & WLAN_STA_PS) &&
  1165. !(status->control.flags & IEEE80211_TXCTL_REQUEUE)) {
  1166. /* Software retry the packet once */
  1167. status->control.flags |= IEEE80211_TXCTL_REQUEUE;
  1168. ieee80211_remove_tx_extra(local, sta->key, skb,
  1169. &status->control);
  1170. dev_queue_xmit(skb);
  1171. return;
  1172. }
  1173. if (net_ratelimit())
  1174. printk(KERN_DEBUG "%s: dropped TX filtered frame, "
  1175. "queue_len=%d PS=%d @%lu\n",
  1176. wiphy_name(local->hw.wiphy),
  1177. skb_queue_len(&sta->tx_filtered),
  1178. !!(sta->flags & WLAN_STA_PS), jiffies);
  1179. dev_kfree_skb(skb);
  1180. }
  1181. void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
  1182. struct ieee80211_tx_status *status)
  1183. {
  1184. struct sk_buff *skb2;
  1185. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  1186. struct ieee80211_local *local = hw_to_local(hw);
  1187. u16 frag, type;
  1188. struct ieee80211_tx_status_rtap_hdr *rthdr;
  1189. struct ieee80211_sub_if_data *sdata;
  1190. struct net_device *prev_dev = NULL;
  1191. if (!status) {
  1192. printk(KERN_ERR
  1193. "%s: ieee80211_tx_status called with NULL status\n",
  1194. wiphy_name(local->hw.wiphy));
  1195. dev_kfree_skb(skb);
  1196. return;
  1197. }
  1198. rcu_read_lock();
  1199. if (status->excessive_retries) {
  1200. struct sta_info *sta;
  1201. sta = sta_info_get(local, hdr->addr1);
  1202. if (sta) {
  1203. if (sta->flags & WLAN_STA_PS) {
  1204. /*
  1205. * The STA is in power save mode, so assume
  1206. * that this TX packet failed because of that.
  1207. */
  1208. status->excessive_retries = 0;
  1209. status->flags |= IEEE80211_TX_STATUS_TX_FILTERED;
  1210. ieee80211_handle_filtered_frame(local, sta,
  1211. skb, status);
  1212. rcu_read_unlock();
  1213. return;
  1214. }
  1215. }
  1216. }
  1217. if (status->flags & IEEE80211_TX_STATUS_TX_FILTERED) {
  1218. struct sta_info *sta;
  1219. sta = sta_info_get(local, hdr->addr1);
  1220. if (sta) {
  1221. ieee80211_handle_filtered_frame(local, sta, skb,
  1222. status);
  1223. rcu_read_unlock();
  1224. return;
  1225. }
  1226. } else
  1227. rate_control_tx_status(local->mdev, skb, status);
  1228. rcu_read_unlock();
  1229. ieee80211_led_tx(local, 0);
  1230. /* SNMP counters
  1231. * Fragments are passed to low-level drivers as separate skbs, so these
  1232. * are actually fragments, not frames. Update frame counters only for
  1233. * the first fragment of the frame. */
  1234. frag = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
  1235. type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE;
  1236. if (status->flags & IEEE80211_TX_STATUS_ACK) {
  1237. if (frag == 0) {
  1238. local->dot11TransmittedFrameCount++;
  1239. if (is_multicast_ether_addr(hdr->addr1))
  1240. local->dot11MulticastTransmittedFrameCount++;
  1241. if (status->retry_count > 0)
  1242. local->dot11RetryCount++;
  1243. if (status->retry_count > 1)
  1244. local->dot11MultipleRetryCount++;
  1245. }
  1246. /* This counter shall be incremented for an acknowledged MPDU
  1247. * with an individual address in the address 1 field or an MPDU
  1248. * with a multicast address in the address 1 field of type Data
  1249. * or Management. */
  1250. if (!is_multicast_ether_addr(hdr->addr1) ||
  1251. type == IEEE80211_FTYPE_DATA ||
  1252. type == IEEE80211_FTYPE_MGMT)
  1253. local->dot11TransmittedFragmentCount++;
  1254. } else {
  1255. if (frag == 0)
  1256. local->dot11FailedCount++;
  1257. }
  1258. /* this was a transmitted frame, but now we want to reuse it */
  1259. skb_orphan(skb);
  1260. /*
  1261. * This is a bit racy but we can avoid a lot of work
  1262. * with this test...
  1263. */
  1264. if (!local->monitors && !local->cooked_mntrs) {
  1265. dev_kfree_skb(skb);
  1266. return;
  1267. }
  1268. /* send frame to monitor interfaces now */
  1269. if (skb_headroom(skb) < sizeof(*rthdr)) {
  1270. printk(KERN_ERR "ieee80211_tx_status: headroom too small\n");
  1271. dev_kfree_skb(skb);
  1272. return;
  1273. }
  1274. rthdr = (struct ieee80211_tx_status_rtap_hdr*)
  1275. skb_push(skb, sizeof(*rthdr));
  1276. memset(rthdr, 0, sizeof(*rthdr));
  1277. rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
  1278. rthdr->hdr.it_present =
  1279. cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) |
  1280. (1 << IEEE80211_RADIOTAP_DATA_RETRIES));
  1281. if (!(status->flags & IEEE80211_TX_STATUS_ACK) &&
  1282. !is_multicast_ether_addr(hdr->addr1))
  1283. rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL);
  1284. if ((status->control.flags & IEEE80211_TXCTL_USE_RTS_CTS) &&
  1285. (status->control.flags & IEEE80211_TXCTL_USE_CTS_PROTECT))
  1286. rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS);
  1287. else if (status->control.flags & IEEE80211_TXCTL_USE_RTS_CTS)
  1288. rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS);
  1289. rthdr->data_retries = status->retry_count;
  1290. /* XXX: is this sufficient for BPF? */
  1291. skb_set_mac_header(skb, 0);
  1292. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1293. skb->pkt_type = PACKET_OTHERHOST;
  1294. skb->protocol = htons(ETH_P_802_2);
  1295. memset(skb->cb, 0, sizeof(skb->cb));
  1296. rcu_read_lock();
  1297. list_for_each_entry_rcu(sdata, &local->interfaces, list) {
  1298. if (sdata->vif.type == IEEE80211_IF_TYPE_MNTR) {
  1299. if (!netif_running(sdata->dev))
  1300. continue;
  1301. if (prev_dev) {
  1302. skb2 = skb_clone(skb, GFP_ATOMIC);
  1303. if (skb2) {
  1304. skb2->dev = prev_dev;
  1305. netif_rx(skb2);
  1306. }
  1307. }
  1308. prev_dev = sdata->dev;
  1309. }
  1310. }
  1311. if (prev_dev) {
  1312. skb->dev = prev_dev;
  1313. netif_rx(skb);
  1314. skb = NULL;
  1315. }
  1316. rcu_read_unlock();
  1317. dev_kfree_skb(skb);
  1318. }
  1319. EXPORT_SYMBOL(ieee80211_tx_status);
  1320. struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
  1321. const struct ieee80211_ops *ops)
  1322. {
  1323. struct ieee80211_local *local;
  1324. int priv_size;
  1325. struct wiphy *wiphy;
  1326. /* Ensure 32-byte alignment of our private data and hw private data.
  1327. * We use the wiphy priv data for both our ieee80211_local and for
  1328. * the driver's private data
  1329. *
  1330. * In memory it'll be like this:
  1331. *
  1332. * +-------------------------+
  1333. * | struct wiphy |
  1334. * +-------------------------+
  1335. * | struct ieee80211_local |
  1336. * +-------------------------+
  1337. * | driver's private data |
  1338. * +-------------------------+
  1339. *
  1340. */
  1341. priv_size = ((sizeof(struct ieee80211_local) +
  1342. NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST) +
  1343. priv_data_len;
  1344. wiphy = wiphy_new(&mac80211_config_ops, priv_size);
  1345. if (!wiphy)
  1346. return NULL;
  1347. wiphy->privid = mac80211_wiphy_privid;
  1348. local = wiphy_priv(wiphy);
  1349. local->hw.wiphy = wiphy;
  1350. local->hw.priv = (char *)local +
  1351. ((sizeof(struct ieee80211_local) +
  1352. NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
  1353. BUG_ON(!ops->tx);
  1354. BUG_ON(!ops->start);
  1355. BUG_ON(!ops->stop);
  1356. BUG_ON(!ops->config);
  1357. BUG_ON(!ops->add_interface);
  1358. BUG_ON(!ops->remove_interface);
  1359. BUG_ON(!ops->configure_filter);
  1360. local->ops = ops;
  1361. local->hw.queues = 1; /* default */
  1362. local->bridge_packets = 1;
  1363. local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
  1364. local->fragmentation_threshold = IEEE80211_MAX_FRAG_THRESHOLD;
  1365. local->short_retry_limit = 7;
  1366. local->long_retry_limit = 4;
  1367. local->hw.conf.radio_enabled = 1;
  1368. INIT_LIST_HEAD(&local->interfaces);
  1369. spin_lock_init(&local->key_lock);
  1370. INIT_DELAYED_WORK(&local->scan_work, ieee80211_sta_scan_work);
  1371. sta_info_init(local);
  1372. tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending,
  1373. (unsigned long)local);
  1374. tasklet_disable(&local->tx_pending_tasklet);
  1375. tasklet_init(&local->tasklet,
  1376. ieee80211_tasklet_handler,
  1377. (unsigned long) local);
  1378. tasklet_disable(&local->tasklet);
  1379. skb_queue_head_init(&local->skb_queue);
  1380. skb_queue_head_init(&local->skb_queue_unreliable);
  1381. return local_to_hw(local);
  1382. }
  1383. EXPORT_SYMBOL(ieee80211_alloc_hw);
  1384. int ieee80211_register_hw(struct ieee80211_hw *hw)
  1385. {
  1386. struct ieee80211_local *local = hw_to_local(hw);
  1387. const char *name;
  1388. int result;
  1389. enum ieee80211_band band;
  1390. struct net_device *mdev;
  1391. struct ieee80211_sub_if_data *sdata;
  1392. /*
  1393. * generic code guarantees at least one band,
  1394. * set this very early because much code assumes
  1395. * that hw.conf.channel is assigned
  1396. */
  1397. for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
  1398. struct ieee80211_supported_band *sband;
  1399. sband = local->hw.wiphy->bands[band];
  1400. if (sband) {
  1401. /* init channel we're on */
  1402. local->hw.conf.channel =
  1403. local->oper_channel =
  1404. local->scan_channel = &sband->channels[0];
  1405. break;
  1406. }
  1407. }
  1408. result = wiphy_register(local->hw.wiphy);
  1409. if (result < 0)
  1410. return result;
  1411. /* for now, mdev needs sub_if_data :/ */
  1412. mdev = alloc_netdev(sizeof(struct ieee80211_sub_if_data),
  1413. "wmaster%d", ether_setup);
  1414. if (!mdev)
  1415. goto fail_mdev_alloc;
  1416. sdata = IEEE80211_DEV_TO_SUB_IF(mdev);
  1417. mdev->ieee80211_ptr = &sdata->wdev;
  1418. sdata->wdev.wiphy = local->hw.wiphy;
  1419. local->mdev = mdev;
  1420. ieee80211_rx_bss_list_init(mdev);
  1421. mdev->hard_start_xmit = ieee80211_master_start_xmit;
  1422. mdev->open = ieee80211_master_open;
  1423. mdev->stop = ieee80211_master_stop;
  1424. mdev->type = ARPHRD_IEEE80211;
  1425. mdev->header_ops = &ieee80211_header_ops;
  1426. mdev->set_multicast_list = ieee80211_master_set_multicast_list;
  1427. sdata->vif.type = IEEE80211_IF_TYPE_AP;
  1428. sdata->dev = mdev;
  1429. sdata->local = local;
  1430. sdata->u.ap.force_unicast_rateidx = -1;
  1431. sdata->u.ap.max_ratectrl_rateidx = -1;
  1432. ieee80211_if_sdata_init(sdata);
  1433. /* no RCU needed since we're still during init phase */
  1434. list_add_tail(&sdata->list, &local->interfaces);
  1435. name = wiphy_dev(local->hw.wiphy)->driver->name;
  1436. local->hw.workqueue = create_singlethread_workqueue(name);
  1437. if (!local->hw.workqueue) {
  1438. result = -ENOMEM;
  1439. goto fail_workqueue;
  1440. }
  1441. /*
  1442. * The hardware needs headroom for sending the frame,
  1443. * and we need some headroom for passing the frame to monitor
  1444. * interfaces, but never both at the same time.
  1445. */
  1446. local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom,
  1447. sizeof(struct ieee80211_tx_status_rtap_hdr));
  1448. debugfs_hw_add(local);
  1449. local->hw.conf.beacon_int = 1000;
  1450. local->wstats_flags |= local->hw.max_rssi ?
  1451. IW_QUAL_LEVEL_UPDATED : IW_QUAL_LEVEL_INVALID;
  1452. local->wstats_flags |= local->hw.max_signal ?
  1453. IW_QUAL_QUAL_UPDATED : IW_QUAL_QUAL_INVALID;
  1454. local->wstats_flags |= local->hw.max_noise ?
  1455. IW_QUAL_NOISE_UPDATED : IW_QUAL_NOISE_INVALID;
  1456. if (local->hw.max_rssi < 0 || local->hw.max_noise < 0)
  1457. local->wstats_flags |= IW_QUAL_DBM;
  1458. result = sta_info_start(local);
  1459. if (result < 0)
  1460. goto fail_sta_info;
  1461. rtnl_lock();
  1462. result = dev_alloc_name(local->mdev, local->mdev->name);
  1463. if (result < 0)
  1464. goto fail_dev;
  1465. memcpy(local->mdev->dev_addr, local->hw.wiphy->perm_addr, ETH_ALEN);
  1466. SET_NETDEV_DEV(local->mdev, wiphy_dev(local->hw.wiphy));
  1467. result = register_netdevice(local->mdev);
  1468. if (result < 0)
  1469. goto fail_dev;
  1470. ieee80211_debugfs_add_netdev(IEEE80211_DEV_TO_SUB_IF(local->mdev));
  1471. ieee80211_if_set_type(local->mdev, IEEE80211_IF_TYPE_AP);
  1472. result = ieee80211_init_rate_ctrl_alg(local,
  1473. hw->rate_control_algorithm);
  1474. if (result < 0) {
  1475. printk(KERN_DEBUG "%s: Failed to initialize rate control "
  1476. "algorithm\n", wiphy_name(local->hw.wiphy));
  1477. goto fail_rate;
  1478. }
  1479. result = ieee80211_wep_init(local);
  1480. if (result < 0) {
  1481. printk(KERN_DEBUG "%s: Failed to initialize wep\n",
  1482. wiphy_name(local->hw.wiphy));
  1483. goto fail_wep;
  1484. }
  1485. ieee80211_install_qdisc(local->mdev);
  1486. /* add one default STA interface */
  1487. result = ieee80211_if_add(local->mdev, "wlan%d", NULL,
  1488. IEEE80211_IF_TYPE_STA, NULL);
  1489. if (result)
  1490. printk(KERN_WARNING "%s: Failed to add default virtual iface\n",
  1491. wiphy_name(local->hw.wiphy));
  1492. local->reg_state = IEEE80211_DEV_REGISTERED;
  1493. rtnl_unlock();
  1494. ieee80211_led_init(local);
  1495. return 0;
  1496. fail_wep:
  1497. rate_control_deinitialize(local);
  1498. fail_rate:
  1499. ieee80211_debugfs_remove_netdev(IEEE80211_DEV_TO_SUB_IF(local->mdev));
  1500. unregister_netdevice(local->mdev);
  1501. local->mdev = NULL;
  1502. fail_dev:
  1503. rtnl_unlock();
  1504. sta_info_stop(local);
  1505. fail_sta_info:
  1506. debugfs_hw_del(local);
  1507. destroy_workqueue(local->hw.workqueue);
  1508. fail_workqueue:
  1509. if (local->mdev != NULL) {
  1510. ieee80211_if_free(local->mdev);
  1511. local->mdev = NULL;
  1512. }
  1513. fail_mdev_alloc:
  1514. wiphy_unregister(local->hw.wiphy);
  1515. return result;
  1516. }
  1517. EXPORT_SYMBOL(ieee80211_register_hw);
  1518. void ieee80211_unregister_hw(struct ieee80211_hw *hw)
  1519. {
  1520. struct ieee80211_local *local = hw_to_local(hw);
  1521. struct ieee80211_sub_if_data *sdata, *tmp;
  1522. tasklet_kill(&local->tx_pending_tasklet);
  1523. tasklet_kill(&local->tasklet);
  1524. rtnl_lock();
  1525. BUG_ON(local->reg_state != IEEE80211_DEV_REGISTERED);
  1526. local->reg_state = IEEE80211_DEV_UNREGISTERED;
  1527. /*
  1528. * At this point, interface list manipulations are fine
  1529. * because the driver cannot be handing us frames any
  1530. * more and the tasklet is killed.
  1531. */
  1532. /*
  1533. * First, we remove all non-master interfaces. Do this because they
  1534. * may have bss pointer dependency on the master, and when we free
  1535. * the master these would be freed as well, breaking our list
  1536. * iteration completely.
  1537. */
  1538. list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
  1539. if (sdata->dev == local->mdev)
  1540. continue;
  1541. list_del(&sdata->list);
  1542. __ieee80211_if_del(local, sdata);
  1543. }
  1544. /* then, finally, remove the master interface */
  1545. __ieee80211_if_del(local, IEEE80211_DEV_TO_SUB_IF(local->mdev));
  1546. rtnl_unlock();
  1547. ieee80211_rx_bss_list_deinit(local->mdev);
  1548. ieee80211_clear_tx_pending(local);
  1549. sta_info_stop(local);
  1550. rate_control_deinitialize(local);
  1551. debugfs_hw_del(local);
  1552. if (skb_queue_len(&local->skb_queue)
  1553. || skb_queue_len(&local->skb_queue_unreliable))
  1554. printk(KERN_WARNING "%s: skb_queue not empty\n",
  1555. wiphy_name(local->hw.wiphy));
  1556. skb_queue_purge(&local->skb_queue);
  1557. skb_queue_purge(&local->skb_queue_unreliable);
  1558. destroy_workqueue(local->hw.workqueue);
  1559. wiphy_unregister(local->hw.wiphy);
  1560. ieee80211_wep_free(local);
  1561. ieee80211_led_exit(local);
  1562. ieee80211_if_free(local->mdev);
  1563. local->mdev = NULL;
  1564. }
  1565. EXPORT_SYMBOL(ieee80211_unregister_hw);
  1566. void ieee80211_free_hw(struct ieee80211_hw *hw)
  1567. {
  1568. struct ieee80211_local *local = hw_to_local(hw);
  1569. wiphy_free(local->hw.wiphy);
  1570. }
  1571. EXPORT_SYMBOL(ieee80211_free_hw);
  1572. static int __init ieee80211_init(void)
  1573. {
  1574. struct sk_buff *skb;
  1575. int ret;
  1576. BUILD_BUG_ON(sizeof(struct ieee80211_tx_packet_data) > sizeof(skb->cb));
  1577. ret = rc80211_pid_init();
  1578. if (ret)
  1579. goto out;
  1580. ret = ieee80211_wme_register();
  1581. if (ret) {
  1582. printk(KERN_DEBUG "ieee80211_init: failed to "
  1583. "initialize WME (err=%d)\n", ret);
  1584. goto out_cleanup_pid;
  1585. }
  1586. ieee80211_debugfs_netdev_init();
  1587. return 0;
  1588. out_cleanup_pid:
  1589. rc80211_pid_exit();
  1590. out:
  1591. return ret;
  1592. }
  1593. static void __exit ieee80211_exit(void)
  1594. {
  1595. rc80211_pid_exit();
  1596. /*
  1597. * For key todo, it'll be empty by now but the work
  1598. * might still be scheduled.
  1599. */
  1600. flush_scheduled_work();
  1601. if (mesh_allocated)
  1602. ieee80211s_stop();
  1603. ieee80211_wme_unregister();
  1604. ieee80211_debugfs_netdev_exit();
  1605. }
  1606. subsys_initcall(ieee80211_init);
  1607. module_exit(ieee80211_exit);
  1608. MODULE_DESCRIPTION("IEEE 802.11 subsystem");
  1609. MODULE_LICENSE("GPL");