main.c 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905
  1. /*
  2. * Copyright 2002-2005, Instant802 Networks, Inc.
  3. * Copyright 2005-2006, Devicescape Software, Inc.
  4. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <net/mac80211.h>
  11. #include <net/ieee80211_radiotap.h>
  12. #include <linux/module.h>
  13. #include <linux/init.h>
  14. #include <linux/netdevice.h>
  15. #include <linux/types.h>
  16. #include <linux/slab.h>
  17. #include <linux/skbuff.h>
  18. #include <linux/etherdevice.h>
  19. #include <linux/if_arp.h>
  20. #include <linux/wireless.h>
  21. #include <linux/rtnetlink.h>
  22. #include <linux/bitmap.h>
  23. #include <net/net_namespace.h>
  24. #include <net/cfg80211.h>
  25. #include "ieee80211_i.h"
  26. #include "rate.h"
  27. #include "mesh.h"
  28. #include "wep.h"
  29. #include "wme.h"
  30. #include "aes_ccm.h"
  31. #include "led.h"
  32. #include "cfg.h"
  33. #include "debugfs.h"
  34. #include "debugfs_netdev.h"
  35. /*
  36. * For seeing transmitted packets on monitor interfaces
  37. * we have a radiotap header too.
  38. */
  39. struct ieee80211_tx_status_rtap_hdr {
  40. struct ieee80211_radiotap_header hdr;
  41. __le16 tx_flags;
  42. u8 data_retries;
  43. } __attribute__ ((packed));
  44. /* common interface routines */
  45. static int header_parse_80211(const struct sk_buff *skb, unsigned char *haddr)
  46. {
  47. memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); /* addr2 */
  48. return ETH_ALEN;
  49. }
  50. /* must be called under mdev tx lock */
  51. static void ieee80211_configure_filter(struct ieee80211_local *local)
  52. {
  53. unsigned int changed_flags;
  54. unsigned int new_flags = 0;
  55. if (atomic_read(&local->iff_promiscs))
  56. new_flags |= FIF_PROMISC_IN_BSS;
  57. if (atomic_read(&local->iff_allmultis))
  58. new_flags |= FIF_ALLMULTI;
  59. if (local->monitors)
  60. new_flags |= FIF_BCN_PRBRESP_PROMISC;
  61. if (local->fif_fcsfail)
  62. new_flags |= FIF_FCSFAIL;
  63. if (local->fif_plcpfail)
  64. new_flags |= FIF_PLCPFAIL;
  65. if (local->fif_control)
  66. new_flags |= FIF_CONTROL;
  67. if (local->fif_other_bss)
  68. new_flags |= FIF_OTHER_BSS;
  69. changed_flags = local->filter_flags ^ new_flags;
  70. /* be a bit nasty */
  71. new_flags |= (1<<31);
  72. local->ops->configure_filter(local_to_hw(local),
  73. changed_flags, &new_flags,
  74. local->mdev->mc_count,
  75. local->mdev->mc_list);
  76. WARN_ON(new_flags & (1<<31));
  77. local->filter_flags = new_flags & ~(1<<31);
  78. }
  79. /* master interface */
  80. static int ieee80211_master_open(struct net_device *dev)
  81. {
  82. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  83. struct ieee80211_sub_if_data *sdata;
  84. int res = -EOPNOTSUPP;
  85. /* we hold the RTNL here so can safely walk the list */
  86. list_for_each_entry(sdata, &local->interfaces, list) {
  87. if (netif_running(sdata->dev)) {
  88. res = 0;
  89. break;
  90. }
  91. }
  92. if (res)
  93. return res;
  94. netif_tx_start_all_queues(local->mdev);
  95. return 0;
  96. }
  97. static int ieee80211_master_stop(struct net_device *dev)
  98. {
  99. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  100. struct ieee80211_sub_if_data *sdata;
  101. /* we hold the RTNL here so can safely walk the list */
  102. list_for_each_entry(sdata, &local->interfaces, list)
  103. if (netif_running(sdata->dev))
  104. dev_close(sdata->dev);
  105. return 0;
  106. }
  107. static void ieee80211_master_set_multicast_list(struct net_device *dev)
  108. {
  109. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  110. ieee80211_configure_filter(local);
  111. }
  112. /* regular interfaces */
  113. static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
  114. {
  115. int meshhdrlen;
  116. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  117. meshhdrlen = (sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) ? 5 : 0;
  118. /* FIX: what would be proper limits for MTU?
  119. * This interface uses 802.3 frames. */
  120. if (new_mtu < 256 ||
  121. new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) {
  122. return -EINVAL;
  123. }
  124. #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
  125. printk(KERN_DEBUG "%s: setting MTU %d\n", dev->name, new_mtu);
  126. #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
  127. dev->mtu = new_mtu;
  128. return 0;
  129. }
  130. static inline int identical_mac_addr_allowed(int type1, int type2)
  131. {
  132. return (type1 == IEEE80211_IF_TYPE_MNTR ||
  133. type2 == IEEE80211_IF_TYPE_MNTR ||
  134. (type1 == IEEE80211_IF_TYPE_AP &&
  135. type2 == IEEE80211_IF_TYPE_WDS) ||
  136. (type1 == IEEE80211_IF_TYPE_WDS &&
  137. (type2 == IEEE80211_IF_TYPE_WDS ||
  138. type2 == IEEE80211_IF_TYPE_AP)) ||
  139. (type1 == IEEE80211_IF_TYPE_AP &&
  140. type2 == IEEE80211_IF_TYPE_VLAN) ||
  141. (type1 == IEEE80211_IF_TYPE_VLAN &&
  142. (type2 == IEEE80211_IF_TYPE_AP ||
  143. type2 == IEEE80211_IF_TYPE_VLAN)));
  144. }
  145. static int ieee80211_open(struct net_device *dev)
  146. {
  147. struct ieee80211_sub_if_data *sdata, *nsdata;
  148. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  149. struct sta_info *sta;
  150. struct ieee80211_if_init_conf conf;
  151. u32 changed = 0;
  152. int res;
  153. bool need_hw_reconfig = 0;
  154. u8 null_addr[ETH_ALEN] = {0};
  155. sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  156. /* fail early if user set an invalid address */
  157. if (compare_ether_addr(dev->dev_addr, null_addr) &&
  158. !is_valid_ether_addr(dev->dev_addr))
  159. return -EADDRNOTAVAIL;
  160. /* we hold the RTNL here so can safely walk the list */
  161. list_for_each_entry(nsdata, &local->interfaces, list) {
  162. struct net_device *ndev = nsdata->dev;
  163. if (ndev != dev && netif_running(ndev)) {
  164. /*
  165. * Allow only a single IBSS interface to be up at any
  166. * time. This is restricted because beacon distribution
  167. * cannot work properly if both are in the same IBSS.
  168. *
  169. * To remove this restriction we'd have to disallow them
  170. * from setting the same SSID on different IBSS interfaces
  171. * belonging to the same hardware. Then, however, we're
  172. * faced with having to adopt two different TSF timers...
  173. */
  174. if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS &&
  175. nsdata->vif.type == IEEE80211_IF_TYPE_IBSS)
  176. return -EBUSY;
  177. /*
  178. * The remaining checks are only performed for interfaces
  179. * with the same MAC address.
  180. */
  181. if (compare_ether_addr(dev->dev_addr, ndev->dev_addr))
  182. continue;
  183. /*
  184. * check whether it may have the same address
  185. */
  186. if (!identical_mac_addr_allowed(sdata->vif.type,
  187. nsdata->vif.type))
  188. return -ENOTUNIQ;
  189. /*
  190. * can only add VLANs to enabled APs
  191. */
  192. if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN &&
  193. nsdata->vif.type == IEEE80211_IF_TYPE_AP)
  194. sdata->bss = &nsdata->u.ap;
  195. }
  196. }
  197. switch (sdata->vif.type) {
  198. case IEEE80211_IF_TYPE_WDS:
  199. if (!is_valid_ether_addr(sdata->u.wds.remote_addr))
  200. return -ENOLINK;
  201. break;
  202. case IEEE80211_IF_TYPE_VLAN:
  203. if (!sdata->bss)
  204. return -ENOLINK;
  205. list_add(&sdata->u.vlan.list, &sdata->bss->vlans);
  206. break;
  207. case IEEE80211_IF_TYPE_AP:
  208. sdata->bss = &sdata->u.ap;
  209. break;
  210. case IEEE80211_IF_TYPE_MESH_POINT:
  211. /* mesh ifaces must set allmulti to forward mcast traffic */
  212. atomic_inc(&local->iff_allmultis);
  213. break;
  214. case IEEE80211_IF_TYPE_STA:
  215. case IEEE80211_IF_TYPE_MNTR:
  216. case IEEE80211_IF_TYPE_IBSS:
  217. /* no special treatment */
  218. break;
  219. case IEEE80211_IF_TYPE_INVALID:
  220. /* cannot happen */
  221. WARN_ON(1);
  222. break;
  223. }
  224. if (local->open_count == 0) {
  225. res = 0;
  226. if (local->ops->start)
  227. res = local->ops->start(local_to_hw(local));
  228. if (res)
  229. goto err_del_bss;
  230. need_hw_reconfig = 1;
  231. ieee80211_led_radio(local, local->hw.conf.radio_enabled);
  232. }
  233. /*
  234. * Check all interfaces and copy the hopefully now-present
  235. * MAC address to those that have the special null one.
  236. */
  237. list_for_each_entry(nsdata, &local->interfaces, list) {
  238. struct net_device *ndev = nsdata->dev;
  239. /*
  240. * No need to check netif_running since we do not allow
  241. * it to start up with this invalid address.
  242. */
  243. if (compare_ether_addr(null_addr, ndev->dev_addr) == 0)
  244. memcpy(ndev->dev_addr,
  245. local->hw.wiphy->perm_addr,
  246. ETH_ALEN);
  247. }
  248. if (compare_ether_addr(null_addr, local->mdev->dev_addr) == 0)
  249. memcpy(local->mdev->dev_addr, local->hw.wiphy->perm_addr,
  250. ETH_ALEN);
  251. /*
  252. * Validate the MAC address for this device.
  253. */
  254. if (!is_valid_ether_addr(dev->dev_addr)) {
  255. if (!local->open_count && local->ops->stop)
  256. local->ops->stop(local_to_hw(local));
  257. return -EADDRNOTAVAIL;
  258. }
  259. switch (sdata->vif.type) {
  260. case IEEE80211_IF_TYPE_VLAN:
  261. /* no need to tell driver */
  262. break;
  263. case IEEE80211_IF_TYPE_MNTR:
  264. if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
  265. local->cooked_mntrs++;
  266. break;
  267. }
  268. /* must be before the call to ieee80211_configure_filter */
  269. local->monitors++;
  270. if (local->monitors == 1)
  271. local->hw.conf.flags |= IEEE80211_CONF_RADIOTAP;
  272. if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
  273. local->fif_fcsfail++;
  274. if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
  275. local->fif_plcpfail++;
  276. if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
  277. local->fif_control++;
  278. if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
  279. local->fif_other_bss++;
  280. netif_addr_lock_bh(local->mdev);
  281. ieee80211_configure_filter(local);
  282. netif_addr_unlock_bh(local->mdev);
  283. break;
  284. case IEEE80211_IF_TYPE_STA:
  285. case IEEE80211_IF_TYPE_IBSS:
  286. sdata->u.sta.flags &= ~IEEE80211_STA_PREV_BSSID_SET;
  287. /* fall through */
  288. default:
  289. conf.vif = &sdata->vif;
  290. conf.type = sdata->vif.type;
  291. conf.mac_addr = dev->dev_addr;
  292. res = local->ops->add_interface(local_to_hw(local), &conf);
  293. if (res)
  294. goto err_stop;
  295. if (ieee80211_vif_is_mesh(&sdata->vif))
  296. ieee80211_start_mesh(sdata->dev);
  297. changed |= ieee80211_reset_erp_info(dev);
  298. ieee80211_bss_info_change_notify(sdata, changed);
  299. ieee80211_enable_keys(sdata);
  300. if (sdata->vif.type == IEEE80211_IF_TYPE_STA &&
  301. !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME))
  302. netif_carrier_off(dev);
  303. else
  304. netif_carrier_on(dev);
  305. }
  306. if (sdata->vif.type == IEEE80211_IF_TYPE_WDS) {
  307. /* Create STA entry for the WDS peer */
  308. sta = sta_info_alloc(sdata, sdata->u.wds.remote_addr,
  309. GFP_KERNEL);
  310. if (!sta) {
  311. res = -ENOMEM;
  312. goto err_del_interface;
  313. }
  314. /* no locking required since STA is not live yet */
  315. sta->flags |= WLAN_STA_AUTHORIZED;
  316. res = sta_info_insert(sta);
  317. if (res) {
  318. /* STA has been freed */
  319. goto err_del_interface;
  320. }
  321. }
  322. if (local->open_count == 0) {
  323. res = dev_open(local->mdev);
  324. WARN_ON(res);
  325. if (res)
  326. goto err_del_interface;
  327. tasklet_enable(&local->tx_pending_tasklet);
  328. tasklet_enable(&local->tasklet);
  329. }
  330. /*
  331. * set_multicast_list will be invoked by the networking core
  332. * which will check whether any increments here were done in
  333. * error and sync them down to the hardware as filter flags.
  334. */
  335. if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
  336. atomic_inc(&local->iff_allmultis);
  337. if (sdata->flags & IEEE80211_SDATA_PROMISC)
  338. atomic_inc(&local->iff_promiscs);
  339. local->open_count++;
  340. if (need_hw_reconfig)
  341. ieee80211_hw_config(local);
  342. /*
  343. * ieee80211_sta_work is disabled while network interface
  344. * is down. Therefore, some configuration changes may not
  345. * yet be effective. Trigger execution of ieee80211_sta_work
  346. * to fix this.
  347. */
  348. if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
  349. sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
  350. struct ieee80211_if_sta *ifsta = &sdata->u.sta;
  351. queue_work(local->hw.workqueue, &ifsta->work);
  352. }
  353. netif_tx_start_all_queues(dev);
  354. return 0;
  355. err_del_interface:
  356. local->ops->remove_interface(local_to_hw(local), &conf);
  357. err_stop:
  358. if (!local->open_count && local->ops->stop)
  359. local->ops->stop(local_to_hw(local));
  360. err_del_bss:
  361. sdata->bss = NULL;
  362. if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN)
  363. list_del(&sdata->u.vlan.list);
  364. return res;
  365. }
  366. static int ieee80211_stop(struct net_device *dev)
  367. {
  368. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  369. struct ieee80211_local *local = sdata->local;
  370. struct ieee80211_if_init_conf conf;
  371. struct sta_info *sta;
  372. /*
  373. * Stop TX on this interface first.
  374. */
  375. netif_tx_stop_all_queues(dev);
  376. /*
  377. * Now delete all active aggregation sessions.
  378. */
  379. rcu_read_lock();
  380. list_for_each_entry_rcu(sta, &local->sta_list, list) {
  381. if (sta->sdata == sdata)
  382. ieee80211_sta_tear_down_BA_sessions(dev, sta->addr);
  383. }
  384. rcu_read_unlock();
  385. /*
  386. * Remove all stations associated with this interface.
  387. *
  388. * This must be done before calling ops->remove_interface()
  389. * because otherwise we can later invoke ops->sta_notify()
  390. * whenever the STAs are removed, and that invalidates driver
  391. * assumptions about always getting a vif pointer that is valid
  392. * (because if we remove a STA after ops->remove_interface()
  393. * the driver will have removed the vif info already!)
  394. *
  395. * We could relax this and only unlink the stations from the
  396. * hash table and list but keep them on a per-sdata list that
  397. * will be inserted back again when the interface is brought
  398. * up again, but I don't currently see a use case for that,
  399. * except with WDS which gets a STA entry created when it is
  400. * brought up.
  401. */
  402. sta_info_flush(local, sdata);
  403. /*
  404. * Don't count this interface for promisc/allmulti while it
  405. * is down. dev_mc_unsync() will invoke set_multicast_list
  406. * on the master interface which will sync these down to the
  407. * hardware as filter flags.
  408. */
  409. if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
  410. atomic_dec(&local->iff_allmultis);
  411. if (sdata->flags & IEEE80211_SDATA_PROMISC)
  412. atomic_dec(&local->iff_promiscs);
  413. dev_mc_unsync(local->mdev, dev);
  414. /* APs need special treatment */
  415. if (sdata->vif.type == IEEE80211_IF_TYPE_AP) {
  416. struct ieee80211_sub_if_data *vlan, *tmp;
  417. struct beacon_data *old_beacon = sdata->u.ap.beacon;
  418. /* remove beacon */
  419. rcu_assign_pointer(sdata->u.ap.beacon, NULL);
  420. synchronize_rcu();
  421. kfree(old_beacon);
  422. /* down all dependent devices, that is VLANs */
  423. list_for_each_entry_safe(vlan, tmp, &sdata->u.ap.vlans,
  424. u.vlan.list)
  425. dev_close(vlan->dev);
  426. WARN_ON(!list_empty(&sdata->u.ap.vlans));
  427. }
  428. local->open_count--;
  429. switch (sdata->vif.type) {
  430. case IEEE80211_IF_TYPE_VLAN:
  431. list_del(&sdata->u.vlan.list);
  432. /* no need to tell driver */
  433. break;
  434. case IEEE80211_IF_TYPE_MNTR:
  435. if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
  436. local->cooked_mntrs--;
  437. break;
  438. }
  439. local->monitors--;
  440. if (local->monitors == 0)
  441. local->hw.conf.flags &= ~IEEE80211_CONF_RADIOTAP;
  442. if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
  443. local->fif_fcsfail--;
  444. if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
  445. local->fif_plcpfail--;
  446. if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
  447. local->fif_control--;
  448. if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
  449. local->fif_other_bss--;
  450. netif_addr_lock_bh(local->mdev);
  451. ieee80211_configure_filter(local);
  452. netif_addr_unlock_bh(local->mdev);
  453. break;
  454. case IEEE80211_IF_TYPE_MESH_POINT:
  455. /* allmulti is always set on mesh ifaces */
  456. atomic_dec(&local->iff_allmultis);
  457. /* fall through */
  458. case IEEE80211_IF_TYPE_STA:
  459. case IEEE80211_IF_TYPE_IBSS:
  460. sdata->u.sta.state = IEEE80211_DISABLED;
  461. memset(sdata->u.sta.bssid, 0, ETH_ALEN);
  462. del_timer_sync(&sdata->u.sta.timer);
  463. /*
  464. * When we get here, the interface is marked down.
  465. * Call synchronize_rcu() to wait for the RX path
  466. * should it be using the interface and enqueuing
  467. * frames at this very time on another CPU.
  468. */
  469. synchronize_rcu();
  470. skb_queue_purge(&sdata->u.sta.skb_queue);
  471. if (local->scan_dev == sdata->dev) {
  472. if (!local->ops->hw_scan) {
  473. local->sta_sw_scanning = 0;
  474. cancel_delayed_work(&local->scan_work);
  475. } else
  476. local->sta_hw_scanning = 0;
  477. }
  478. sdata->u.sta.flags &= ~IEEE80211_STA_PRIVACY_INVOKED;
  479. kfree(sdata->u.sta.extra_ie);
  480. sdata->u.sta.extra_ie = NULL;
  481. sdata->u.sta.extra_ie_len = 0;
  482. /* fall through */
  483. default:
  484. conf.vif = &sdata->vif;
  485. conf.type = sdata->vif.type;
  486. conf.mac_addr = dev->dev_addr;
  487. /* disable all keys for as long as this netdev is down */
  488. ieee80211_disable_keys(sdata);
  489. local->ops->remove_interface(local_to_hw(local), &conf);
  490. }
  491. sdata->bss = NULL;
  492. if (local->open_count == 0) {
  493. if (netif_running(local->mdev))
  494. dev_close(local->mdev);
  495. if (local->ops->stop)
  496. local->ops->stop(local_to_hw(local));
  497. ieee80211_led_radio(local, 0);
  498. flush_workqueue(local->hw.workqueue);
  499. tasklet_disable(&local->tx_pending_tasklet);
  500. tasklet_disable(&local->tasklet);
  501. }
  502. return 0;
  503. }
  504. int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
  505. {
  506. struct ieee80211_local *local = hw_to_local(hw);
  507. struct sta_info *sta;
  508. struct ieee80211_sub_if_data *sdata;
  509. u16 start_seq_num = 0;
  510. u8 *state;
  511. int ret;
  512. DECLARE_MAC_BUF(mac);
  513. if (tid >= STA_TID_NUM)
  514. return -EINVAL;
  515. #ifdef CONFIG_MAC80211_HT_DEBUG
  516. printk(KERN_DEBUG "Open BA session requested for %s tid %u\n",
  517. print_mac(mac, ra), tid);
  518. #endif /* CONFIG_MAC80211_HT_DEBUG */
  519. rcu_read_lock();
  520. sta = sta_info_get(local, ra);
  521. if (!sta) {
  522. #ifdef CONFIG_MAC80211_HT_DEBUG
  523. printk(KERN_DEBUG "Could not find the station\n");
  524. #endif
  525. ret = -ENOENT;
  526. goto exit;
  527. }
  528. spin_lock_bh(&sta->lock);
  529. /* we have tried too many times, receiver does not want A-MPDU */
  530. if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
  531. ret = -EBUSY;
  532. goto err_unlock_sta;
  533. }
  534. state = &sta->ampdu_mlme.tid_state_tx[tid];
  535. /* check if the TID is not in aggregation flow already */
  536. if (*state != HT_AGG_STATE_IDLE) {
  537. #ifdef CONFIG_MAC80211_HT_DEBUG
  538. printk(KERN_DEBUG "BA request denied - session is not "
  539. "idle on tid %u\n", tid);
  540. #endif /* CONFIG_MAC80211_HT_DEBUG */
  541. ret = -EAGAIN;
  542. goto err_unlock_sta;
  543. }
  544. /* prepare A-MPDU MLME for Tx aggregation */
  545. sta->ampdu_mlme.tid_tx[tid] =
  546. kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
  547. if (!sta->ampdu_mlme.tid_tx[tid]) {
  548. #ifdef CONFIG_MAC80211_HT_DEBUG
  549. if (net_ratelimit())
  550. printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
  551. tid);
  552. #endif
  553. ret = -ENOMEM;
  554. goto err_unlock_sta;
  555. }
  556. /* Tx timer */
  557. sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function =
  558. sta_addba_resp_timer_expired;
  559. sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data =
  560. (unsigned long)&sta->timer_to_tid[tid];
  561. init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
  562. /* create a new queue for this aggregation */
  563. ret = ieee80211_ht_agg_queue_add(local, sta, tid);
  564. /* case no queue is available to aggregation
  565. * don't switch to aggregation */
  566. if (ret) {
  567. #ifdef CONFIG_MAC80211_HT_DEBUG
  568. printk(KERN_DEBUG "BA request denied - queue unavailable for"
  569. " tid %d\n", tid);
  570. #endif /* CONFIG_MAC80211_HT_DEBUG */
  571. goto err_unlock_queue;
  572. }
  573. sdata = sta->sdata;
  574. /* Ok, the Addba frame hasn't been sent yet, but if the driver calls the
  575. * call back right away, it must see that the flow has begun */
  576. *state |= HT_ADDBA_REQUESTED_MSK;
  577. if (local->ops->ampdu_action)
  578. ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_START,
  579. ra, tid, &start_seq_num);
  580. if (ret) {
  581. /* No need to requeue the packets in the agg queue, since we
  582. * held the tx lock: no packet could be enqueued to the newly
  583. * allocated queue */
  584. ieee80211_ht_agg_queue_remove(local, sta, tid, 0);
  585. #ifdef CONFIG_MAC80211_HT_DEBUG
  586. printk(KERN_DEBUG "BA request denied - HW unavailable for"
  587. " tid %d\n", tid);
  588. #endif /* CONFIG_MAC80211_HT_DEBUG */
  589. *state = HT_AGG_STATE_IDLE;
  590. goto err_unlock_queue;
  591. }
  592. /* Will put all the packets in the new SW queue */
  593. ieee80211_requeue(local, ieee802_1d_to_ac[tid]);
  594. spin_unlock_bh(&sta->lock);
  595. /* send an addBA request */
  596. sta->ampdu_mlme.dialog_token_allocator++;
  597. sta->ampdu_mlme.tid_tx[tid]->dialog_token =
  598. sta->ampdu_mlme.dialog_token_allocator;
  599. sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
  600. ieee80211_send_addba_request(sta->sdata->dev, ra, tid,
  601. sta->ampdu_mlme.tid_tx[tid]->dialog_token,
  602. sta->ampdu_mlme.tid_tx[tid]->ssn,
  603. 0x40, 5000);
  604. /* activate the timer for the recipient's addBA response */
  605. sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires =
  606. jiffies + ADDBA_RESP_INTERVAL;
  607. add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
  608. #ifdef CONFIG_MAC80211_HT_DEBUG
  609. printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
  610. #endif
  611. goto exit;
  612. err_unlock_queue:
  613. kfree(sta->ampdu_mlme.tid_tx[tid]);
  614. sta->ampdu_mlme.tid_tx[tid] = NULL;
  615. ret = -EBUSY;
  616. err_unlock_sta:
  617. spin_unlock_bh(&sta->lock);
  618. exit:
  619. rcu_read_unlock();
  620. return ret;
  621. }
  622. EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
  623. int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw,
  624. u8 *ra, u16 tid,
  625. enum ieee80211_back_parties initiator)
  626. {
  627. struct ieee80211_local *local = hw_to_local(hw);
  628. struct sta_info *sta;
  629. u8 *state;
  630. int ret = 0;
  631. DECLARE_MAC_BUF(mac);
  632. if (tid >= STA_TID_NUM)
  633. return -EINVAL;
  634. rcu_read_lock();
  635. sta = sta_info_get(local, ra);
  636. if (!sta) {
  637. rcu_read_unlock();
  638. return -ENOENT;
  639. }
  640. /* check if the TID is in aggregation */
  641. state = &sta->ampdu_mlme.tid_state_tx[tid];
  642. spin_lock_bh(&sta->lock);
  643. if (*state != HT_AGG_STATE_OPERATIONAL) {
  644. ret = -ENOENT;
  645. goto stop_BA_exit;
  646. }
  647. #ifdef CONFIG_MAC80211_HT_DEBUG
  648. printk(KERN_DEBUG "Tx BA session stop requested for %s tid %u\n",
  649. print_mac(mac, ra), tid);
  650. #endif /* CONFIG_MAC80211_HT_DEBUG */
  651. ieee80211_stop_queue(hw, sta->tid_to_tx_q[tid]);
  652. *state = HT_AGG_STATE_REQ_STOP_BA_MSK |
  653. (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
  654. if (local->ops->ampdu_action)
  655. ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_STOP,
  656. ra, tid, NULL);
  657. /* case HW denied going back to legacy */
  658. if (ret) {
  659. WARN_ON(ret != -EBUSY);
  660. *state = HT_AGG_STATE_OPERATIONAL;
  661. ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
  662. goto stop_BA_exit;
  663. }
  664. stop_BA_exit:
  665. spin_unlock_bh(&sta->lock);
  666. rcu_read_unlock();
  667. return ret;
  668. }
  669. EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
  670. void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
  671. {
  672. struct ieee80211_local *local = hw_to_local(hw);
  673. struct sta_info *sta;
  674. u8 *state;
  675. DECLARE_MAC_BUF(mac);
  676. if (tid >= STA_TID_NUM) {
  677. #ifdef CONFIG_MAC80211_HT_DEBUG
  678. printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
  679. tid, STA_TID_NUM);
  680. #endif
  681. return;
  682. }
  683. rcu_read_lock();
  684. sta = sta_info_get(local, ra);
  685. if (!sta) {
  686. rcu_read_unlock();
  687. #ifdef CONFIG_MAC80211_HT_DEBUG
  688. printk(KERN_DEBUG "Could not find station: %s\n",
  689. print_mac(mac, ra));
  690. #endif
  691. return;
  692. }
  693. state = &sta->ampdu_mlme.tid_state_tx[tid];
  694. spin_lock_bh(&sta->lock);
  695. if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
  696. #ifdef CONFIG_MAC80211_HT_DEBUG
  697. printk(KERN_DEBUG "addBA was not requested yet, state is %d\n",
  698. *state);
  699. #endif
  700. spin_unlock_bh(&sta->lock);
  701. rcu_read_unlock();
  702. return;
  703. }
  704. WARN_ON_ONCE(*state & HT_ADDBA_DRV_READY_MSK);
  705. *state |= HT_ADDBA_DRV_READY_MSK;
  706. if (*state == HT_AGG_STATE_OPERATIONAL) {
  707. #ifdef CONFIG_MAC80211_HT_DEBUG
  708. printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid);
  709. #endif
  710. ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
  711. }
  712. spin_unlock_bh(&sta->lock);
  713. rcu_read_unlock();
  714. }
  715. EXPORT_SYMBOL(ieee80211_start_tx_ba_cb);
  716. void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
  717. {
  718. struct ieee80211_local *local = hw_to_local(hw);
  719. struct sta_info *sta;
  720. u8 *state;
  721. int agg_queue;
  722. DECLARE_MAC_BUF(mac);
  723. if (tid >= STA_TID_NUM) {
  724. #ifdef CONFIG_MAC80211_HT_DEBUG
  725. printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
  726. tid, STA_TID_NUM);
  727. #endif
  728. return;
  729. }
  730. #ifdef CONFIG_MAC80211_HT_DEBUG
  731. printk(KERN_DEBUG "Stopping Tx BA session for %s tid %d\n",
  732. print_mac(mac, ra), tid);
  733. #endif /* CONFIG_MAC80211_HT_DEBUG */
  734. rcu_read_lock();
  735. sta = sta_info_get(local, ra);
  736. if (!sta) {
  737. #ifdef CONFIG_MAC80211_HT_DEBUG
  738. printk(KERN_DEBUG "Could not find station: %s\n",
  739. print_mac(mac, ra));
  740. #endif
  741. rcu_read_unlock();
  742. return;
  743. }
  744. state = &sta->ampdu_mlme.tid_state_tx[tid];
  745. /* NOTE: no need to use sta->lock in this state check, as
  746. * ieee80211_stop_tx_ba_session will let only one stop call to
  747. * pass through per sta/tid
  748. */
  749. if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) {
  750. #ifdef CONFIG_MAC80211_HT_DEBUG
  751. printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
  752. #endif
  753. rcu_read_unlock();
  754. return;
  755. }
  756. if (*state & HT_AGG_STATE_INITIATOR_MSK)
  757. ieee80211_send_delba(sta->sdata->dev, ra, tid,
  758. WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
  759. agg_queue = sta->tid_to_tx_q[tid];
  760. ieee80211_ht_agg_queue_remove(local, sta, tid, 1);
  761. /* We just requeued the all the frames that were in the
  762. * removed queue, and since we might miss a softirq we do
  763. * netif_schedule_queue. ieee80211_wake_queue is not used
  764. * here as this queue is not necessarily stopped
  765. */
  766. netif_schedule_queue(netdev_get_tx_queue(local->mdev, agg_queue));
  767. spin_lock_bh(&sta->lock);
  768. *state = HT_AGG_STATE_IDLE;
  769. sta->ampdu_mlme.addba_req_num[tid] = 0;
  770. kfree(sta->ampdu_mlme.tid_tx[tid]);
  771. sta->ampdu_mlme.tid_tx[tid] = NULL;
  772. spin_unlock_bh(&sta->lock);
  773. rcu_read_unlock();
  774. }
  775. EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb);
  776. void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
  777. const u8 *ra, u16 tid)
  778. {
  779. struct ieee80211_local *local = hw_to_local(hw);
  780. struct ieee80211_ra_tid *ra_tid;
  781. struct sk_buff *skb = dev_alloc_skb(0);
  782. if (unlikely(!skb)) {
  783. #ifdef CONFIG_MAC80211_HT_DEBUG
  784. if (net_ratelimit())
  785. printk(KERN_WARNING "%s: Not enough memory, "
  786. "dropping start BA session", skb->dev->name);
  787. #endif
  788. return;
  789. }
  790. ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
  791. memcpy(&ra_tid->ra, ra, ETH_ALEN);
  792. ra_tid->tid = tid;
  793. skb->pkt_type = IEEE80211_ADDBA_MSG;
  794. skb_queue_tail(&local->skb_queue, skb);
  795. tasklet_schedule(&local->tasklet);
  796. }
  797. EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
  798. void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
  799. const u8 *ra, u16 tid)
  800. {
  801. struct ieee80211_local *local = hw_to_local(hw);
  802. struct ieee80211_ra_tid *ra_tid;
  803. struct sk_buff *skb = dev_alloc_skb(0);
  804. if (unlikely(!skb)) {
  805. #ifdef CONFIG_MAC80211_HT_DEBUG
  806. if (net_ratelimit())
  807. printk(KERN_WARNING "%s: Not enough memory, "
  808. "dropping stop BA session", skb->dev->name);
  809. #endif
  810. return;
  811. }
  812. ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
  813. memcpy(&ra_tid->ra, ra, ETH_ALEN);
  814. ra_tid->tid = tid;
  815. skb->pkt_type = IEEE80211_DELBA_MSG;
  816. skb_queue_tail(&local->skb_queue, skb);
  817. tasklet_schedule(&local->tasklet);
  818. }
  819. EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
  820. static void ieee80211_set_multicast_list(struct net_device *dev)
  821. {
  822. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  823. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  824. int allmulti, promisc, sdata_allmulti, sdata_promisc;
  825. allmulti = !!(dev->flags & IFF_ALLMULTI);
  826. promisc = !!(dev->flags & IFF_PROMISC);
  827. sdata_allmulti = !!(sdata->flags & IEEE80211_SDATA_ALLMULTI);
  828. sdata_promisc = !!(sdata->flags & IEEE80211_SDATA_PROMISC);
  829. if (allmulti != sdata_allmulti) {
  830. if (dev->flags & IFF_ALLMULTI)
  831. atomic_inc(&local->iff_allmultis);
  832. else
  833. atomic_dec(&local->iff_allmultis);
  834. sdata->flags ^= IEEE80211_SDATA_ALLMULTI;
  835. }
  836. if (promisc != sdata_promisc) {
  837. if (dev->flags & IFF_PROMISC)
  838. atomic_inc(&local->iff_promiscs);
  839. else
  840. atomic_dec(&local->iff_promiscs);
  841. sdata->flags ^= IEEE80211_SDATA_PROMISC;
  842. }
  843. dev_mc_sync(local->mdev, dev);
  844. }
  845. static const struct header_ops ieee80211_header_ops = {
  846. .create = eth_header,
  847. .parse = header_parse_80211,
  848. .rebuild = eth_rebuild_header,
  849. .cache = eth_header_cache,
  850. .cache_update = eth_header_cache_update,
  851. };
  852. void ieee80211_if_setup(struct net_device *dev)
  853. {
  854. ether_setup(dev);
  855. dev->hard_start_xmit = ieee80211_subif_start_xmit;
  856. dev->wireless_handlers = &ieee80211_iw_handler_def;
  857. dev->set_multicast_list = ieee80211_set_multicast_list;
  858. dev->change_mtu = ieee80211_change_mtu;
  859. dev->open = ieee80211_open;
  860. dev->stop = ieee80211_stop;
  861. dev->destructor = free_netdev;
  862. /* we will validate the address ourselves in ->open */
  863. dev->validate_addr = NULL;
  864. }
  865. /* everything else */
  866. int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed)
  867. {
  868. struct ieee80211_local *local = sdata->local;
  869. struct ieee80211_if_conf conf;
  870. if (WARN_ON(!netif_running(sdata->dev)))
  871. return 0;
  872. if (!local->ops->config_interface)
  873. return 0;
  874. memset(&conf, 0, sizeof(conf));
  875. conf.changed = changed;
  876. if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
  877. sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
  878. conf.bssid = sdata->u.sta.bssid;
  879. conf.ssid = sdata->u.sta.ssid;
  880. conf.ssid_len = sdata->u.sta.ssid_len;
  881. } else if (sdata->vif.type == IEEE80211_IF_TYPE_AP) {
  882. conf.bssid = sdata->dev->dev_addr;
  883. conf.ssid = sdata->u.ap.ssid;
  884. conf.ssid_len = sdata->u.ap.ssid_len;
  885. } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
  886. u8 zero[ETH_ALEN] = { 0 };
  887. conf.bssid = zero;
  888. conf.ssid = zero;
  889. conf.ssid_len = 0;
  890. } else {
  891. WARN_ON(1);
  892. return -EINVAL;
  893. }
  894. if (WARN_ON(!conf.bssid && (changed & IEEE80211_IFCC_BSSID)))
  895. return -EINVAL;
  896. if (WARN_ON(!conf.ssid && (changed & IEEE80211_IFCC_SSID)))
  897. return -EINVAL;
  898. return local->ops->config_interface(local_to_hw(local),
  899. &sdata->vif, &conf);
  900. }
  901. int ieee80211_hw_config(struct ieee80211_local *local)
  902. {
  903. struct ieee80211_channel *chan;
  904. int ret = 0;
  905. if (local->sta_sw_scanning)
  906. chan = local->scan_channel;
  907. else
  908. chan = local->oper_channel;
  909. local->hw.conf.channel = chan;
  910. if (!local->hw.conf.power_level)
  911. local->hw.conf.power_level = chan->max_power;
  912. else
  913. local->hw.conf.power_level = min(chan->max_power,
  914. local->hw.conf.power_level);
  915. local->hw.conf.max_antenna_gain = chan->max_antenna_gain;
  916. #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
  917. printk(KERN_DEBUG "%s: HW CONFIG: freq=%d\n",
  918. wiphy_name(local->hw.wiphy), chan->center_freq);
  919. #endif
  920. if (local->open_count)
  921. ret = local->ops->config(local_to_hw(local), &local->hw.conf);
  922. return ret;
  923. }
  924. /**
  925. * ieee80211_handle_ht should be used only after legacy configuration
  926. * has been determined namely band, as ht configuration depends upon
  927. * the hardware's HT abilities for a _specific_ band.
  928. */
  929. u32 ieee80211_handle_ht(struct ieee80211_local *local, int enable_ht,
  930. struct ieee80211_ht_info *req_ht_cap,
  931. struct ieee80211_ht_bss_info *req_bss_cap)
  932. {
  933. struct ieee80211_conf *conf = &local->hw.conf;
  934. struct ieee80211_supported_band *sband;
  935. struct ieee80211_ht_info ht_conf;
  936. struct ieee80211_ht_bss_info ht_bss_conf;
  937. u32 changed = 0;
  938. int i;
  939. u8 max_tx_streams = IEEE80211_HT_CAP_MAX_STREAMS;
  940. u8 tx_mcs_set_cap;
  941. sband = local->hw.wiphy->bands[conf->channel->band];
  942. memset(&ht_conf, 0, sizeof(struct ieee80211_ht_info));
  943. memset(&ht_bss_conf, 0, sizeof(struct ieee80211_ht_bss_info));
  944. /* HT is not supported */
  945. if (!sband->ht_info.ht_supported) {
  946. conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE;
  947. goto out;
  948. }
  949. /* disable HT */
  950. if (!enable_ht) {
  951. if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE)
  952. changed |= BSS_CHANGED_HT;
  953. conf->flags &= ~IEEE80211_CONF_SUPPORT_HT_MODE;
  954. conf->ht_conf.ht_supported = 0;
  955. goto out;
  956. }
  957. if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE))
  958. changed |= BSS_CHANGED_HT;
  959. conf->flags |= IEEE80211_CONF_SUPPORT_HT_MODE;
  960. ht_conf.ht_supported = 1;
  961. ht_conf.cap = req_ht_cap->cap & sband->ht_info.cap;
  962. ht_conf.cap &= ~(IEEE80211_HT_CAP_MIMO_PS);
  963. ht_conf.cap |= sband->ht_info.cap & IEEE80211_HT_CAP_MIMO_PS;
  964. ht_bss_conf.primary_channel = req_bss_cap->primary_channel;
  965. ht_bss_conf.bss_cap = req_bss_cap->bss_cap;
  966. ht_bss_conf.bss_op_mode = req_bss_cap->bss_op_mode;
  967. ht_conf.ampdu_factor = req_ht_cap->ampdu_factor;
  968. ht_conf.ampdu_density = req_ht_cap->ampdu_density;
  969. /* Bits 96-100 */
  970. tx_mcs_set_cap = sband->ht_info.supp_mcs_set[12];
  971. /* configure suppoerted Tx MCS according to requested MCS
  972. * (based in most cases on Rx capabilities of peer) and self
  973. * Tx MCS capabilities (as defined by low level driver HW
  974. * Tx capabilities) */
  975. if (!(tx_mcs_set_cap & IEEE80211_HT_CAP_MCS_TX_DEFINED))
  976. goto check_changed;
  977. /* Counting from 0 therfore + 1 */
  978. if (tx_mcs_set_cap & IEEE80211_HT_CAP_MCS_TX_RX_DIFF)
  979. max_tx_streams = ((tx_mcs_set_cap &
  980. IEEE80211_HT_CAP_MCS_TX_STREAMS) >> 2) + 1;
  981. for (i = 0; i < max_tx_streams; i++)
  982. ht_conf.supp_mcs_set[i] =
  983. sband->ht_info.supp_mcs_set[i] &
  984. req_ht_cap->supp_mcs_set[i];
  985. if (tx_mcs_set_cap & IEEE80211_HT_CAP_MCS_TX_UEQM)
  986. for (i = IEEE80211_SUPP_MCS_SET_UEQM;
  987. i < IEEE80211_SUPP_MCS_SET_LEN; i++)
  988. ht_conf.supp_mcs_set[i] =
  989. sband->ht_info.supp_mcs_set[i] &
  990. req_ht_cap->supp_mcs_set[i];
  991. check_changed:
  992. /* if bss configuration changed store the new one */
  993. if (memcmp(&conf->ht_conf, &ht_conf, sizeof(ht_conf)) ||
  994. memcmp(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf))) {
  995. changed |= BSS_CHANGED_HT;
  996. memcpy(&conf->ht_conf, &ht_conf, sizeof(ht_conf));
  997. memcpy(&conf->ht_bss_conf, &ht_bss_conf, sizeof(ht_bss_conf));
  998. }
  999. out:
  1000. return changed;
  1001. }
  1002. void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
  1003. u32 changed)
  1004. {
  1005. struct ieee80211_local *local = sdata->local;
  1006. if (!changed)
  1007. return;
  1008. if (local->ops->bss_info_changed)
  1009. local->ops->bss_info_changed(local_to_hw(local),
  1010. &sdata->vif,
  1011. &sdata->bss_conf,
  1012. changed);
  1013. }
  1014. u32 ieee80211_reset_erp_info(struct net_device *dev)
  1015. {
  1016. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  1017. sdata->bss_conf.use_cts_prot = 0;
  1018. sdata->bss_conf.use_short_preamble = 0;
  1019. return BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_PREAMBLE;
  1020. }
  1021. void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
  1022. struct sk_buff *skb)
  1023. {
  1024. struct ieee80211_local *local = hw_to_local(hw);
  1025. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  1026. int tmp;
  1027. skb->dev = local->mdev;
  1028. skb->pkt_type = IEEE80211_TX_STATUS_MSG;
  1029. skb_queue_tail(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS ?
  1030. &local->skb_queue : &local->skb_queue_unreliable, skb);
  1031. tmp = skb_queue_len(&local->skb_queue) +
  1032. skb_queue_len(&local->skb_queue_unreliable);
  1033. while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT &&
  1034. (skb = skb_dequeue(&local->skb_queue_unreliable))) {
  1035. dev_kfree_skb_irq(skb);
  1036. tmp--;
  1037. I802_DEBUG_INC(local->tx_status_drop);
  1038. }
  1039. tasklet_schedule(&local->tasklet);
  1040. }
  1041. EXPORT_SYMBOL(ieee80211_tx_status_irqsafe);
  1042. static void ieee80211_tasklet_handler(unsigned long data)
  1043. {
  1044. struct ieee80211_local *local = (struct ieee80211_local *) data;
  1045. struct sk_buff *skb;
  1046. struct ieee80211_rx_status rx_status;
  1047. struct ieee80211_ra_tid *ra_tid;
  1048. while ((skb = skb_dequeue(&local->skb_queue)) ||
  1049. (skb = skb_dequeue(&local->skb_queue_unreliable))) {
  1050. switch (skb->pkt_type) {
  1051. case IEEE80211_RX_MSG:
  1052. /* status is in skb->cb */
  1053. memcpy(&rx_status, skb->cb, sizeof(rx_status));
  1054. /* Clear skb->pkt_type in order to not confuse kernel
  1055. * netstack. */
  1056. skb->pkt_type = 0;
  1057. __ieee80211_rx(local_to_hw(local), skb, &rx_status);
  1058. break;
  1059. case IEEE80211_TX_STATUS_MSG:
  1060. skb->pkt_type = 0;
  1061. ieee80211_tx_status(local_to_hw(local), skb);
  1062. break;
  1063. case IEEE80211_DELBA_MSG:
  1064. ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
  1065. ieee80211_stop_tx_ba_cb(local_to_hw(local),
  1066. ra_tid->ra, ra_tid->tid);
  1067. dev_kfree_skb(skb);
  1068. break;
  1069. case IEEE80211_ADDBA_MSG:
  1070. ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
  1071. ieee80211_start_tx_ba_cb(local_to_hw(local),
  1072. ra_tid->ra, ra_tid->tid);
  1073. dev_kfree_skb(skb);
  1074. break ;
  1075. default:
  1076. WARN_ON(1);
  1077. dev_kfree_skb(skb);
  1078. break;
  1079. }
  1080. }
  1081. }
  1082. /* Remove added headers (e.g., QoS control), encryption header/MIC, etc. to
  1083. * make a prepared TX frame (one that has been given to hw) to look like brand
  1084. * new IEEE 802.11 frame that is ready to go through TX processing again.
  1085. */
  1086. static void ieee80211_remove_tx_extra(struct ieee80211_local *local,
  1087. struct ieee80211_key *key,
  1088. struct sk_buff *skb)
  1089. {
  1090. unsigned int hdrlen, iv_len, mic_len;
  1091. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1092. hdrlen = ieee80211_hdrlen(hdr->frame_control);
  1093. if (!key)
  1094. goto no_key;
  1095. switch (key->conf.alg) {
  1096. case ALG_WEP:
  1097. iv_len = WEP_IV_LEN;
  1098. mic_len = WEP_ICV_LEN;
  1099. break;
  1100. case ALG_TKIP:
  1101. iv_len = TKIP_IV_LEN;
  1102. mic_len = TKIP_ICV_LEN;
  1103. break;
  1104. case ALG_CCMP:
  1105. iv_len = CCMP_HDR_LEN;
  1106. mic_len = CCMP_MIC_LEN;
  1107. break;
  1108. default:
  1109. goto no_key;
  1110. }
  1111. if (skb->len >= hdrlen + mic_len &&
  1112. !(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
  1113. skb_trim(skb, skb->len - mic_len);
  1114. if (skb->len >= hdrlen + iv_len) {
  1115. memmove(skb->data + iv_len, skb->data, hdrlen);
  1116. hdr = (struct ieee80211_hdr *)skb_pull(skb, iv_len);
  1117. }
  1118. no_key:
  1119. if (ieee80211_is_data_qos(hdr->frame_control)) {
  1120. hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
  1121. memmove(skb->data + IEEE80211_QOS_CTL_LEN, skb->data,
  1122. hdrlen - IEEE80211_QOS_CTL_LEN);
  1123. skb_pull(skb, IEEE80211_QOS_CTL_LEN);
  1124. }
  1125. }
  1126. static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
  1127. struct sta_info *sta,
  1128. struct sk_buff *skb)
  1129. {
  1130. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  1131. sta->tx_filtered_count++;
  1132. /*
  1133. * Clear the TX filter mask for this STA when sending the next
  1134. * packet. If the STA went to power save mode, this will happen
  1135. * when it wakes up for the next time.
  1136. */
  1137. set_sta_flags(sta, WLAN_STA_CLEAR_PS_FILT);
  1138. /*
  1139. * This code races in the following way:
  1140. *
  1141. * (1) STA sends frame indicating it will go to sleep and does so
  1142. * (2) hardware/firmware adds STA to filter list, passes frame up
  1143. * (3) hardware/firmware processes TX fifo and suppresses a frame
  1144. * (4) we get TX status before having processed the frame and
  1145. * knowing that the STA has gone to sleep.
  1146. *
  1147. * This is actually quite unlikely even when both those events are
  1148. * processed from interrupts coming in quickly after one another or
  1149. * even at the same time because we queue both TX status events and
  1150. * RX frames to be processed by a tasklet and process them in the
  1151. * same order that they were received or TX status last. Hence, there
  1152. * is no race as long as the frame RX is processed before the next TX
  1153. * status, which drivers can ensure, see below.
  1154. *
  1155. * Note that this can only happen if the hardware or firmware can
  1156. * actually add STAs to the filter list, if this is done by the
  1157. * driver in response to set_tim() (which will only reduce the race
  1158. * this whole filtering tries to solve, not completely solve it)
  1159. * this situation cannot happen.
  1160. *
  1161. * To completely solve this race drivers need to make sure that they
  1162. * (a) don't mix the irq-safe/not irq-safe TX status/RX processing
  1163. * functions and
  1164. * (b) always process RX events before TX status events if ordering
  1165. * can be unknown, for example with different interrupt status
  1166. * bits.
  1167. */
  1168. if (test_sta_flags(sta, WLAN_STA_PS) &&
  1169. skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) {
  1170. ieee80211_remove_tx_extra(local, sta->key, skb);
  1171. skb_queue_tail(&sta->tx_filtered, skb);
  1172. return;
  1173. }
  1174. if (!test_sta_flags(sta, WLAN_STA_PS) &&
  1175. !(info->flags & IEEE80211_TX_CTL_REQUEUE)) {
  1176. /* Software retry the packet once */
  1177. info->flags |= IEEE80211_TX_CTL_REQUEUE;
  1178. ieee80211_remove_tx_extra(local, sta->key, skb);
  1179. dev_queue_xmit(skb);
  1180. return;
  1181. }
  1182. #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
  1183. if (net_ratelimit())
  1184. printk(KERN_DEBUG "%s: dropped TX filtered frame, "
  1185. "queue_len=%d PS=%d @%lu\n",
  1186. wiphy_name(local->hw.wiphy),
  1187. skb_queue_len(&sta->tx_filtered),
  1188. !!test_sta_flags(sta, WLAN_STA_PS), jiffies);
  1189. #endif
  1190. dev_kfree_skb(skb);
  1191. }
  1192. void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
  1193. {
  1194. struct sk_buff *skb2;
  1195. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  1196. struct ieee80211_local *local = hw_to_local(hw);
  1197. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  1198. u16 frag, type;
  1199. __le16 fc;
  1200. struct ieee80211_tx_status_rtap_hdr *rthdr;
  1201. struct ieee80211_sub_if_data *sdata;
  1202. struct net_device *prev_dev = NULL;
  1203. struct sta_info *sta;
  1204. rcu_read_lock();
  1205. if (info->status.excessive_retries) {
  1206. sta = sta_info_get(local, hdr->addr1);
  1207. if (sta) {
  1208. if (test_sta_flags(sta, WLAN_STA_PS)) {
  1209. /*
  1210. * The STA is in power save mode, so assume
  1211. * that this TX packet failed because of that.
  1212. */
  1213. ieee80211_handle_filtered_frame(local, sta, skb);
  1214. rcu_read_unlock();
  1215. return;
  1216. }
  1217. }
  1218. }
  1219. fc = hdr->frame_control;
  1220. if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
  1221. (ieee80211_is_data_qos(fc))) {
  1222. u16 tid, ssn;
  1223. u8 *qc;
  1224. sta = sta_info_get(local, hdr->addr1);
  1225. if (sta) {
  1226. qc = ieee80211_get_qos_ctl(hdr);
  1227. tid = qc[0] & 0xf;
  1228. ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10)
  1229. & IEEE80211_SCTL_SEQ);
  1230. ieee80211_send_bar(sta->sdata->dev, hdr->addr1,
  1231. tid, ssn);
  1232. }
  1233. }
  1234. if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
  1235. sta = sta_info_get(local, hdr->addr1);
  1236. if (sta) {
  1237. ieee80211_handle_filtered_frame(local, sta, skb);
  1238. rcu_read_unlock();
  1239. return;
  1240. }
  1241. } else
  1242. rate_control_tx_status(local->mdev, skb);
  1243. rcu_read_unlock();
  1244. ieee80211_led_tx(local, 0);
  1245. /* SNMP counters
  1246. * Fragments are passed to low-level drivers as separate skbs, so these
  1247. * are actually fragments, not frames. Update frame counters only for
  1248. * the first fragment of the frame. */
  1249. frag = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
  1250. type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE;
  1251. if (info->flags & IEEE80211_TX_STAT_ACK) {
  1252. if (frag == 0) {
  1253. local->dot11TransmittedFrameCount++;
  1254. if (is_multicast_ether_addr(hdr->addr1))
  1255. local->dot11MulticastTransmittedFrameCount++;
  1256. if (info->status.retry_count > 0)
  1257. local->dot11RetryCount++;
  1258. if (info->status.retry_count > 1)
  1259. local->dot11MultipleRetryCount++;
  1260. }
  1261. /* This counter shall be incremented for an acknowledged MPDU
  1262. * with an individual address in the address 1 field or an MPDU
  1263. * with a multicast address in the address 1 field of type Data
  1264. * or Management. */
  1265. if (!is_multicast_ether_addr(hdr->addr1) ||
  1266. type == IEEE80211_FTYPE_DATA ||
  1267. type == IEEE80211_FTYPE_MGMT)
  1268. local->dot11TransmittedFragmentCount++;
  1269. } else {
  1270. if (frag == 0)
  1271. local->dot11FailedCount++;
  1272. }
  1273. /* this was a transmitted frame, but now we want to reuse it */
  1274. skb_orphan(skb);
  1275. /*
  1276. * This is a bit racy but we can avoid a lot of work
  1277. * with this test...
  1278. */
  1279. if (!local->monitors && !local->cooked_mntrs) {
  1280. dev_kfree_skb(skb);
  1281. return;
  1282. }
  1283. /* send frame to monitor interfaces now */
  1284. if (skb_headroom(skb) < sizeof(*rthdr)) {
  1285. printk(KERN_ERR "ieee80211_tx_status: headroom too small\n");
  1286. dev_kfree_skb(skb);
  1287. return;
  1288. }
  1289. rthdr = (struct ieee80211_tx_status_rtap_hdr *)
  1290. skb_push(skb, sizeof(*rthdr));
  1291. memset(rthdr, 0, sizeof(*rthdr));
  1292. rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
  1293. rthdr->hdr.it_present =
  1294. cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) |
  1295. (1 << IEEE80211_RADIOTAP_DATA_RETRIES));
  1296. if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
  1297. !is_multicast_ether_addr(hdr->addr1))
  1298. rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL);
  1299. if ((info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) &&
  1300. (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT))
  1301. rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS);
  1302. else if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS)
  1303. rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS);
  1304. rthdr->data_retries = info->status.retry_count;
  1305. /* XXX: is this sufficient for BPF? */
  1306. skb_set_mac_header(skb, 0);
  1307. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1308. skb->pkt_type = PACKET_OTHERHOST;
  1309. skb->protocol = htons(ETH_P_802_2);
  1310. memset(skb->cb, 0, sizeof(skb->cb));
  1311. rcu_read_lock();
  1312. list_for_each_entry_rcu(sdata, &local->interfaces, list) {
  1313. if (sdata->vif.type == IEEE80211_IF_TYPE_MNTR) {
  1314. if (!netif_running(sdata->dev))
  1315. continue;
  1316. if (prev_dev) {
  1317. skb2 = skb_clone(skb, GFP_ATOMIC);
  1318. if (skb2) {
  1319. skb2->dev = prev_dev;
  1320. netif_rx(skb2);
  1321. }
  1322. }
  1323. prev_dev = sdata->dev;
  1324. }
  1325. }
  1326. if (prev_dev) {
  1327. skb->dev = prev_dev;
  1328. netif_rx(skb);
  1329. skb = NULL;
  1330. }
  1331. rcu_read_unlock();
  1332. dev_kfree_skb(skb);
  1333. }
  1334. EXPORT_SYMBOL(ieee80211_tx_status);
  1335. struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
  1336. const struct ieee80211_ops *ops)
  1337. {
  1338. struct ieee80211_local *local;
  1339. int priv_size;
  1340. struct wiphy *wiphy;
  1341. /* Ensure 32-byte alignment of our private data and hw private data.
  1342. * We use the wiphy priv data for both our ieee80211_local and for
  1343. * the driver's private data
  1344. *
  1345. * In memory it'll be like this:
  1346. *
  1347. * +-------------------------+
  1348. * | struct wiphy |
  1349. * +-------------------------+
  1350. * | struct ieee80211_local |
  1351. * +-------------------------+
  1352. * | driver's private data |
  1353. * +-------------------------+
  1354. *
  1355. */
  1356. priv_size = ((sizeof(struct ieee80211_local) +
  1357. NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST) +
  1358. priv_data_len;
  1359. wiphy = wiphy_new(&mac80211_config_ops, priv_size);
  1360. if (!wiphy)
  1361. return NULL;
  1362. wiphy->privid = mac80211_wiphy_privid;
  1363. local = wiphy_priv(wiphy);
  1364. local->hw.wiphy = wiphy;
  1365. local->hw.priv = (char *)local +
  1366. ((sizeof(struct ieee80211_local) +
  1367. NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
  1368. BUG_ON(!ops->tx);
  1369. BUG_ON(!ops->start);
  1370. BUG_ON(!ops->stop);
  1371. BUG_ON(!ops->config);
  1372. BUG_ON(!ops->add_interface);
  1373. BUG_ON(!ops->remove_interface);
  1374. BUG_ON(!ops->configure_filter);
  1375. local->ops = ops;
  1376. local->hw.queues = 1; /* default */
  1377. local->bridge_packets = 1;
  1378. local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
  1379. local->fragmentation_threshold = IEEE80211_MAX_FRAG_THRESHOLD;
  1380. local->short_retry_limit = 7;
  1381. local->long_retry_limit = 4;
  1382. local->hw.conf.radio_enabled = 1;
  1383. INIT_LIST_HEAD(&local->interfaces);
  1384. spin_lock_init(&local->key_lock);
  1385. INIT_DELAYED_WORK(&local->scan_work, ieee80211_sta_scan_work);
  1386. sta_info_init(local);
  1387. tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending,
  1388. (unsigned long)local);
  1389. tasklet_disable(&local->tx_pending_tasklet);
  1390. tasklet_init(&local->tasklet,
  1391. ieee80211_tasklet_handler,
  1392. (unsigned long) local);
  1393. tasklet_disable(&local->tasklet);
  1394. skb_queue_head_init(&local->skb_queue);
  1395. skb_queue_head_init(&local->skb_queue_unreliable);
  1396. return local_to_hw(local);
  1397. }
  1398. EXPORT_SYMBOL(ieee80211_alloc_hw);
  1399. int ieee80211_register_hw(struct ieee80211_hw *hw)
  1400. {
  1401. struct ieee80211_local *local = hw_to_local(hw);
  1402. const char *name;
  1403. int result;
  1404. enum ieee80211_band band;
  1405. struct net_device *mdev;
  1406. struct wireless_dev *mwdev;
  1407. /*
  1408. * generic code guarantees at least one band,
  1409. * set this very early because much code assumes
  1410. * that hw.conf.channel is assigned
  1411. */
  1412. for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
  1413. struct ieee80211_supported_band *sband;
  1414. sband = local->hw.wiphy->bands[band];
  1415. if (sband) {
  1416. /* init channel we're on */
  1417. local->hw.conf.channel =
  1418. local->oper_channel =
  1419. local->scan_channel = &sband->channels[0];
  1420. break;
  1421. }
  1422. }
  1423. result = wiphy_register(local->hw.wiphy);
  1424. if (result < 0)
  1425. return result;
  1426. /*
  1427. * We use the number of queues for feature tests (QoS, HT) internally
  1428. * so restrict them appropriately.
  1429. */
  1430. if (hw->queues > IEEE80211_MAX_QUEUES)
  1431. hw->queues = IEEE80211_MAX_QUEUES;
  1432. if (hw->ampdu_queues > IEEE80211_MAX_AMPDU_QUEUES)
  1433. hw->ampdu_queues = IEEE80211_MAX_AMPDU_QUEUES;
  1434. if (hw->queues < 4)
  1435. hw->ampdu_queues = 0;
  1436. mdev = alloc_netdev_mq(sizeof(struct wireless_dev),
  1437. "wmaster%d", ether_setup,
  1438. ieee80211_num_queues(hw));
  1439. if (!mdev)
  1440. goto fail_mdev_alloc;
  1441. mwdev = netdev_priv(mdev);
  1442. mdev->ieee80211_ptr = mwdev;
  1443. mwdev->wiphy = local->hw.wiphy;
  1444. local->mdev = mdev;
  1445. ieee80211_rx_bss_list_init(local);
  1446. mdev->hard_start_xmit = ieee80211_master_start_xmit;
  1447. mdev->open = ieee80211_master_open;
  1448. mdev->stop = ieee80211_master_stop;
  1449. mdev->type = ARPHRD_IEEE80211;
  1450. mdev->header_ops = &ieee80211_header_ops;
  1451. mdev->set_multicast_list = ieee80211_master_set_multicast_list;
  1452. name = wiphy_dev(local->hw.wiphy)->driver->name;
  1453. local->hw.workqueue = create_freezeable_workqueue(name);
  1454. if (!local->hw.workqueue) {
  1455. result = -ENOMEM;
  1456. goto fail_workqueue;
  1457. }
  1458. /*
  1459. * The hardware needs headroom for sending the frame,
  1460. * and we need some headroom for passing the frame to monitor
  1461. * interfaces, but never both at the same time.
  1462. */
  1463. local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom,
  1464. sizeof(struct ieee80211_tx_status_rtap_hdr));
  1465. debugfs_hw_add(local);
  1466. if (local->hw.conf.beacon_int < 10)
  1467. local->hw.conf.beacon_int = 100;
  1468. if (local->hw.max_listen_interval == 0)
  1469. local->hw.max_listen_interval = 1;
  1470. local->hw.conf.listen_interval = local->hw.max_listen_interval;
  1471. local->wstats_flags |= local->hw.flags & (IEEE80211_HW_SIGNAL_UNSPEC |
  1472. IEEE80211_HW_SIGNAL_DB |
  1473. IEEE80211_HW_SIGNAL_DBM) ?
  1474. IW_QUAL_QUAL_UPDATED : IW_QUAL_QUAL_INVALID;
  1475. local->wstats_flags |= local->hw.flags & IEEE80211_HW_NOISE_DBM ?
  1476. IW_QUAL_NOISE_UPDATED : IW_QUAL_NOISE_INVALID;
  1477. if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
  1478. local->wstats_flags |= IW_QUAL_DBM;
  1479. result = sta_info_start(local);
  1480. if (result < 0)
  1481. goto fail_sta_info;
  1482. rtnl_lock();
  1483. result = dev_alloc_name(local->mdev, local->mdev->name);
  1484. if (result < 0)
  1485. goto fail_dev;
  1486. memcpy(local->mdev->dev_addr, local->hw.wiphy->perm_addr, ETH_ALEN);
  1487. SET_NETDEV_DEV(local->mdev, wiphy_dev(local->hw.wiphy));
  1488. result = register_netdevice(local->mdev);
  1489. if (result < 0)
  1490. goto fail_dev;
  1491. result = ieee80211_init_rate_ctrl_alg(local,
  1492. hw->rate_control_algorithm);
  1493. if (result < 0) {
  1494. printk(KERN_DEBUG "%s: Failed to initialize rate control "
  1495. "algorithm\n", wiphy_name(local->hw.wiphy));
  1496. goto fail_rate;
  1497. }
  1498. result = ieee80211_wep_init(local);
  1499. if (result < 0) {
  1500. printk(KERN_DEBUG "%s: Failed to initialize wep: %d\n",
  1501. wiphy_name(local->hw.wiphy), result);
  1502. goto fail_wep;
  1503. }
  1504. local->mdev->select_queue = ieee80211_select_queue;
  1505. /* add one default STA interface */
  1506. result = ieee80211_if_add(local, "wlan%d", NULL,
  1507. IEEE80211_IF_TYPE_STA, NULL);
  1508. if (result)
  1509. printk(KERN_WARNING "%s: Failed to add default virtual iface\n",
  1510. wiphy_name(local->hw.wiphy));
  1511. rtnl_unlock();
  1512. ieee80211_led_init(local);
  1513. return 0;
  1514. fail_wep:
  1515. rate_control_deinitialize(local);
  1516. fail_rate:
  1517. unregister_netdevice(local->mdev);
  1518. local->mdev = NULL;
  1519. fail_dev:
  1520. rtnl_unlock();
  1521. sta_info_stop(local);
  1522. fail_sta_info:
  1523. debugfs_hw_del(local);
  1524. destroy_workqueue(local->hw.workqueue);
  1525. fail_workqueue:
  1526. if (local->mdev)
  1527. free_netdev(local->mdev);
  1528. fail_mdev_alloc:
  1529. wiphy_unregister(local->hw.wiphy);
  1530. return result;
  1531. }
  1532. EXPORT_SYMBOL(ieee80211_register_hw);
  1533. void ieee80211_unregister_hw(struct ieee80211_hw *hw)
  1534. {
  1535. struct ieee80211_local *local = hw_to_local(hw);
  1536. tasklet_kill(&local->tx_pending_tasklet);
  1537. tasklet_kill(&local->tasklet);
  1538. rtnl_lock();
  1539. /*
  1540. * At this point, interface list manipulations are fine
  1541. * because the driver cannot be handing us frames any
  1542. * more and the tasklet is killed.
  1543. */
  1544. /* First, we remove all virtual interfaces. */
  1545. ieee80211_remove_interfaces(local);
  1546. /* then, finally, remove the master interface */
  1547. unregister_netdevice(local->mdev);
  1548. rtnl_unlock();
  1549. ieee80211_rx_bss_list_deinit(local);
  1550. ieee80211_clear_tx_pending(local);
  1551. sta_info_stop(local);
  1552. rate_control_deinitialize(local);
  1553. debugfs_hw_del(local);
  1554. if (skb_queue_len(&local->skb_queue)
  1555. || skb_queue_len(&local->skb_queue_unreliable))
  1556. printk(KERN_WARNING "%s: skb_queue not empty\n",
  1557. wiphy_name(local->hw.wiphy));
  1558. skb_queue_purge(&local->skb_queue);
  1559. skb_queue_purge(&local->skb_queue_unreliable);
  1560. destroy_workqueue(local->hw.workqueue);
  1561. wiphy_unregister(local->hw.wiphy);
  1562. ieee80211_wep_free(local);
  1563. ieee80211_led_exit(local);
  1564. free_netdev(local->mdev);
  1565. }
  1566. EXPORT_SYMBOL(ieee80211_unregister_hw);
  1567. void ieee80211_free_hw(struct ieee80211_hw *hw)
  1568. {
  1569. struct ieee80211_local *local = hw_to_local(hw);
  1570. wiphy_free(local->hw.wiphy);
  1571. }
  1572. EXPORT_SYMBOL(ieee80211_free_hw);
  1573. static int __init ieee80211_init(void)
  1574. {
  1575. struct sk_buff *skb;
  1576. int ret;
  1577. BUILD_BUG_ON(sizeof(struct ieee80211_tx_info) > sizeof(skb->cb));
  1578. BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, driver_data) +
  1579. IEEE80211_TX_INFO_DRIVER_DATA_SIZE > sizeof(skb->cb));
  1580. ret = rc80211_pid_init();
  1581. if (ret)
  1582. return ret;
  1583. ieee80211_debugfs_netdev_init();
  1584. return 0;
  1585. }
  1586. static void __exit ieee80211_exit(void)
  1587. {
  1588. rc80211_pid_exit();
  1589. /*
  1590. * For key todo, it'll be empty by now but the work
  1591. * might still be scheduled.
  1592. */
  1593. flush_scheduled_work();
  1594. if (mesh_allocated)
  1595. ieee80211s_stop();
  1596. ieee80211_debugfs_netdev_exit();
  1597. }
  1598. subsys_initcall(ieee80211_init);
  1599. module_exit(ieee80211_exit);
  1600. MODULE_DESCRIPTION("IEEE 802.11 subsystem");
  1601. MODULE_LICENSE("GPL");