main.c 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052
  1. /*
  2. * Atheros CARL9170 driver
  3. *
  4. * mac80211 interaction code
  5. *
  6. * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
  7. * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; see the file COPYING. If not, see
  21. * http://www.gnu.org/licenses/.
  22. *
  23. * This file incorporates work covered by the following copyright and
  24. * permission notice:
  25. * Copyright (c) 2007-2008 Atheros Communications, Inc.
  26. *
  27. * Permission to use, copy, modify, and/or distribute this software for any
  28. * purpose with or without fee is hereby granted, provided that the above
  29. * copyright notice and this permission notice appear in all copies.
  30. *
  31. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  32. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  33. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  34. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  35. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  36. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  37. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  38. */
  39. #include <linux/init.h>
  40. #include <linux/slab.h>
  41. #include <linux/module.h>
  42. #include <linux/etherdevice.h>
  43. #include <linux/random.h>
  44. #include <net/mac80211.h>
  45. #include <net/cfg80211.h>
  46. #include "hw.h"
  47. #include "carl9170.h"
  48. #include "cmd.h"
  49. static int modparam_nohwcrypt;
  50. module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
  51. MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload.");
  52. int modparam_noht;
  53. module_param_named(noht, modparam_noht, int, S_IRUGO);
  54. MODULE_PARM_DESC(noht, "Disable MPDU aggregation.");
  55. #define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
  56. .bitrate = (_bitrate), \
  57. .flags = (_flags), \
  58. .hw_value = (_hw_rate) | (_txpidx) << 4, \
  59. }
  60. struct ieee80211_rate __carl9170_ratetable[] = {
  61. RATE(10, 0, 0, 0),
  62. RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE),
  63. RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE),
  64. RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE),
  65. RATE(60, 0xb, 0, 0),
  66. RATE(90, 0xf, 0, 0),
  67. RATE(120, 0xa, 0, 0),
  68. RATE(180, 0xe, 0, 0),
  69. RATE(240, 0x9, 0, 0),
  70. RATE(360, 0xd, 1, 0),
  71. RATE(480, 0x8, 2, 0),
  72. RATE(540, 0xc, 3, 0),
  73. };
  74. #undef RATE
  75. #define carl9170_g_ratetable (__carl9170_ratetable + 0)
  76. #define carl9170_g_ratetable_size 12
  77. #define carl9170_a_ratetable (__carl9170_ratetable + 4)
  78. #define carl9170_a_ratetable_size 8
  79. /*
  80. * NB: The hw_value is used as an index into the carl9170_phy_freq_params
  81. * array in phy.c so that we don't have to do frequency lookups!
  82. */
  83. #define CHAN(_freq, _idx) { \
  84. .center_freq = (_freq), \
  85. .hw_value = (_idx), \
  86. .max_power = 18, /* XXX */ \
  87. }
  88. static struct ieee80211_channel carl9170_2ghz_chantable[] = {
  89. CHAN(2412, 0),
  90. CHAN(2417, 1),
  91. CHAN(2422, 2),
  92. CHAN(2427, 3),
  93. CHAN(2432, 4),
  94. CHAN(2437, 5),
  95. CHAN(2442, 6),
  96. CHAN(2447, 7),
  97. CHAN(2452, 8),
  98. CHAN(2457, 9),
  99. CHAN(2462, 10),
  100. CHAN(2467, 11),
  101. CHAN(2472, 12),
  102. CHAN(2484, 13),
  103. };
  104. static struct ieee80211_channel carl9170_5ghz_chantable[] = {
  105. CHAN(4920, 14),
  106. CHAN(4940, 15),
  107. CHAN(4960, 16),
  108. CHAN(4980, 17),
  109. CHAN(5040, 18),
  110. CHAN(5060, 19),
  111. CHAN(5080, 20),
  112. CHAN(5180, 21),
  113. CHAN(5200, 22),
  114. CHAN(5220, 23),
  115. CHAN(5240, 24),
  116. CHAN(5260, 25),
  117. CHAN(5280, 26),
  118. CHAN(5300, 27),
  119. CHAN(5320, 28),
  120. CHAN(5500, 29),
  121. CHAN(5520, 30),
  122. CHAN(5540, 31),
  123. CHAN(5560, 32),
  124. CHAN(5580, 33),
  125. CHAN(5600, 34),
  126. CHAN(5620, 35),
  127. CHAN(5640, 36),
  128. CHAN(5660, 37),
  129. CHAN(5680, 38),
  130. CHAN(5700, 39),
  131. CHAN(5745, 40),
  132. CHAN(5765, 41),
  133. CHAN(5785, 42),
  134. CHAN(5805, 43),
  135. CHAN(5825, 44),
  136. CHAN(5170, 45),
  137. CHAN(5190, 46),
  138. CHAN(5210, 47),
  139. CHAN(5230, 48),
  140. };
  141. #undef CHAN
  142. #define CARL9170_HT_CAP \
  143. { \
  144. .ht_supported = true, \
  145. .cap = IEEE80211_HT_CAP_MAX_AMSDU | \
  146. IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
  147. IEEE80211_HT_CAP_SGI_40 | \
  148. IEEE80211_HT_CAP_DSSSCCK40 | \
  149. IEEE80211_HT_CAP_SM_PS, \
  150. .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, \
  151. .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, \
  152. .mcs = { \
  153. .rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, }, \
  154. .rx_highest = cpu_to_le16(300), \
  155. .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
  156. }, \
  157. }
  158. static struct ieee80211_supported_band carl9170_band_2GHz = {
  159. .channels = carl9170_2ghz_chantable,
  160. .n_channels = ARRAY_SIZE(carl9170_2ghz_chantable),
  161. .bitrates = carl9170_g_ratetable,
  162. .n_bitrates = carl9170_g_ratetable_size,
  163. .ht_cap = CARL9170_HT_CAP,
  164. };
  165. static struct ieee80211_supported_band carl9170_band_5GHz = {
  166. .channels = carl9170_5ghz_chantable,
  167. .n_channels = ARRAY_SIZE(carl9170_5ghz_chantable),
  168. .bitrates = carl9170_a_ratetable,
  169. .n_bitrates = carl9170_a_ratetable_size,
  170. .ht_cap = CARL9170_HT_CAP,
  171. };
  172. static void carl9170_ampdu_gc(struct ar9170 *ar)
  173. {
  174. struct carl9170_sta_tid *tid_info;
  175. LIST_HEAD(tid_gc);
  176. rcu_read_lock();
  177. list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
  178. spin_lock_bh(&ar->tx_ampdu_list_lock);
  179. if (tid_info->state == CARL9170_TID_STATE_SHUTDOWN) {
  180. tid_info->state = CARL9170_TID_STATE_KILLED;
  181. list_del_rcu(&tid_info->list);
  182. ar->tx_ampdu_list_len--;
  183. list_add_tail(&tid_info->tmp_list, &tid_gc);
  184. }
  185. spin_unlock_bh(&ar->tx_ampdu_list_lock);
  186. }
  187. rcu_assign_pointer(ar->tx_ampdu_iter, tid_info);
  188. rcu_read_unlock();
  189. synchronize_rcu();
  190. while (!list_empty(&tid_gc)) {
  191. struct sk_buff *skb;
  192. tid_info = list_first_entry(&tid_gc, struct carl9170_sta_tid,
  193. tmp_list);
  194. while ((skb = __skb_dequeue(&tid_info->queue)))
  195. carl9170_tx_status(ar, skb, false);
  196. list_del_init(&tid_info->tmp_list);
  197. kfree(tid_info);
  198. }
  199. }
  200. static void carl9170_flush(struct ar9170 *ar, bool drop_queued)
  201. {
  202. if (drop_queued) {
  203. int i;
  204. /*
  205. * We can only drop frames which have not been uploaded
  206. * to the device yet.
  207. */
  208. for (i = 0; i < ar->hw->queues; i++) {
  209. struct sk_buff *skb;
  210. while ((skb = skb_dequeue(&ar->tx_pending[i]))) {
  211. struct ieee80211_tx_info *info;
  212. info = IEEE80211_SKB_CB(skb);
  213. if (info->flags & IEEE80211_TX_CTL_AMPDU)
  214. atomic_dec(&ar->tx_ampdu_upload);
  215. carl9170_tx_status(ar, skb, false);
  216. }
  217. }
  218. }
  219. /* Wait for all other outstanding frames to timeout. */
  220. if (atomic_read(&ar->tx_total_queued))
  221. WARN_ON(wait_for_completion_timeout(&ar->tx_flush, HZ) == 0);
  222. }
  223. static void carl9170_flush_ba(struct ar9170 *ar)
  224. {
  225. struct sk_buff_head free;
  226. struct carl9170_sta_tid *tid_info;
  227. struct sk_buff *skb;
  228. __skb_queue_head_init(&free);
  229. rcu_read_lock();
  230. spin_lock_bh(&ar->tx_ampdu_list_lock);
  231. list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
  232. if (tid_info->state > CARL9170_TID_STATE_SUSPEND) {
  233. tid_info->state = CARL9170_TID_STATE_SUSPEND;
  234. spin_lock(&tid_info->lock);
  235. while ((skb = __skb_dequeue(&tid_info->queue)))
  236. __skb_queue_tail(&free, skb);
  237. spin_unlock(&tid_info->lock);
  238. }
  239. }
  240. spin_unlock_bh(&ar->tx_ampdu_list_lock);
  241. rcu_read_unlock();
  242. while ((skb = __skb_dequeue(&free)))
  243. carl9170_tx_status(ar, skb, false);
  244. }
  245. static void carl9170_zap_queues(struct ar9170 *ar)
  246. {
  247. struct carl9170_vif_info *cvif;
  248. unsigned int i;
  249. carl9170_ampdu_gc(ar);
  250. carl9170_flush_ba(ar);
  251. carl9170_flush(ar, true);
  252. for (i = 0; i < ar->hw->queues; i++) {
  253. spin_lock_bh(&ar->tx_status[i].lock);
  254. while (!skb_queue_empty(&ar->tx_status[i])) {
  255. struct sk_buff *skb;
  256. skb = skb_peek(&ar->tx_status[i]);
  257. carl9170_tx_get_skb(skb);
  258. spin_unlock_bh(&ar->tx_status[i].lock);
  259. carl9170_tx_drop(ar, skb);
  260. spin_lock_bh(&ar->tx_status[i].lock);
  261. carl9170_tx_put_skb(skb);
  262. }
  263. spin_unlock_bh(&ar->tx_status[i].lock);
  264. }
  265. BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_SOFT < 1);
  266. BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD < CARL9170_NUM_TX_LIMIT_SOFT);
  267. BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD >= CARL9170_BAW_BITS);
  268. /* reinitialize queues statistics */
  269. memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
  270. for (i = 0; i < ar->hw->queues; i++)
  271. ar->tx_stats[i].limit = CARL9170_NUM_TX_LIMIT_HARD;
  272. for (i = 0; i < DIV_ROUND_UP(ar->fw.mem_blocks, BITS_PER_LONG); i++)
  273. ar->mem_bitmap[i] = 0;
  274. rcu_read_lock();
  275. list_for_each_entry_rcu(cvif, &ar->vif_list, list) {
  276. spin_lock_bh(&ar->beacon_lock);
  277. dev_kfree_skb_any(cvif->beacon);
  278. cvif->beacon = NULL;
  279. spin_unlock_bh(&ar->beacon_lock);
  280. }
  281. rcu_read_unlock();
  282. atomic_set(&ar->tx_ampdu_upload, 0);
  283. atomic_set(&ar->tx_ampdu_scheduler, 0);
  284. atomic_set(&ar->tx_total_pending, 0);
  285. atomic_set(&ar->tx_total_queued, 0);
  286. atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks);
  287. }
  288. #define CARL9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \
  289. do { \
  290. queue.aifs = ai_fs; \
  291. queue.cw_min = cwmin; \
  292. queue.cw_max = cwmax; \
  293. queue.txop = _txop; \
  294. } while (0)
  295. static int carl9170_op_start(struct ieee80211_hw *hw)
  296. {
  297. struct ar9170 *ar = hw->priv;
  298. int err, i;
  299. mutex_lock(&ar->mutex);
  300. carl9170_zap_queues(ar);
  301. /* reset QoS defaults */
  302. CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VO], 2, 3, 7, 47);
  303. CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VI], 2, 7, 15, 94);
  304. CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BE], 3, 15, 1023, 0);
  305. CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BK], 7, 15, 1023, 0);
  306. CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_SPECIAL], 2, 3, 7, 0);
  307. ar->current_factor = ar->current_density = -1;
  308. /* "The first key is unique." */
  309. ar->usedkeys = 1;
  310. ar->filter_state = 0;
  311. ar->ps.last_action = jiffies;
  312. ar->ps.last_slept = jiffies;
  313. ar->erp_mode = CARL9170_ERP_AUTO;
  314. ar->rx_software_decryption = false;
  315. ar->disable_offload = false;
  316. for (i = 0; i < ar->hw->queues; i++) {
  317. ar->queue_stop_timeout[i] = jiffies;
  318. ar->max_queue_stop_timeout[i] = 0;
  319. }
  320. atomic_set(&ar->mem_allocs, 0);
  321. err = carl9170_usb_open(ar);
  322. if (err)
  323. goto out;
  324. err = carl9170_init_mac(ar);
  325. if (err)
  326. goto out;
  327. err = carl9170_set_qos(ar);
  328. if (err)
  329. goto out;
  330. if (ar->fw.rx_filter) {
  331. err = carl9170_rx_filter(ar, CARL9170_RX_FILTER_OTHER_RA |
  332. CARL9170_RX_FILTER_CTL_OTHER | CARL9170_RX_FILTER_BAD);
  333. if (err)
  334. goto out;
  335. }
  336. err = carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER,
  337. AR9170_DMA_TRIGGER_RXQ);
  338. if (err)
  339. goto out;
  340. /* Clear key-cache */
  341. for (i = 0; i < AR9170_CAM_MAX_USER + 4; i++) {
  342. err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
  343. 0, NULL, 0);
  344. if (err)
  345. goto out;
  346. err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
  347. 1, NULL, 0);
  348. if (err)
  349. goto out;
  350. if (i < AR9170_CAM_MAX_USER) {
  351. err = carl9170_disable_key(ar, i);
  352. if (err)
  353. goto out;
  354. }
  355. }
  356. carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STARTED);
  357. ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
  358. round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
  359. ieee80211_wake_queues(ar->hw);
  360. err = 0;
  361. out:
  362. mutex_unlock(&ar->mutex);
  363. return err;
  364. }
  365. static void carl9170_cancel_worker(struct ar9170 *ar)
  366. {
  367. cancel_delayed_work_sync(&ar->stat_work);
  368. cancel_delayed_work_sync(&ar->tx_janitor);
  369. #ifdef CONFIG_CARL9170_LEDS
  370. cancel_delayed_work_sync(&ar->led_work);
  371. #endif /* CONFIG_CARL9170_LEDS */
  372. cancel_work_sync(&ar->ps_work);
  373. cancel_work_sync(&ar->ping_work);
  374. cancel_work_sync(&ar->ampdu_work);
  375. }
  376. static void carl9170_op_stop(struct ieee80211_hw *hw)
  377. {
  378. struct ar9170 *ar = hw->priv;
  379. carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
  380. ieee80211_stop_queues(ar->hw);
  381. mutex_lock(&ar->mutex);
  382. if (IS_ACCEPTING_CMD(ar)) {
  383. rcu_assign_pointer(ar->beacon_iter, NULL);
  384. carl9170_led_set_state(ar, 0);
  385. /* stop DMA */
  386. carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER, 0);
  387. carl9170_usb_stop(ar);
  388. }
  389. carl9170_zap_queues(ar);
  390. mutex_unlock(&ar->mutex);
  391. carl9170_cancel_worker(ar);
  392. }
  393. static void carl9170_restart_work(struct work_struct *work)
  394. {
  395. struct ar9170 *ar = container_of(work, struct ar9170,
  396. restart_work);
  397. int err;
  398. ar->usedkeys = 0;
  399. ar->filter_state = 0;
  400. carl9170_cancel_worker(ar);
  401. mutex_lock(&ar->mutex);
  402. err = carl9170_usb_restart(ar);
  403. if (net_ratelimit()) {
  404. if (err) {
  405. dev_err(&ar->udev->dev, "Failed to restart device "
  406. " (%d).\n", err);
  407. } else {
  408. dev_info(&ar->udev->dev, "device restarted "
  409. "successfully.\n");
  410. }
  411. }
  412. carl9170_zap_queues(ar);
  413. mutex_unlock(&ar->mutex);
  414. if (!err) {
  415. ar->restart_counter++;
  416. atomic_set(&ar->pending_restarts, 0);
  417. ieee80211_restart_hw(ar->hw);
  418. } else {
  419. /*
  420. * The reset was unsuccessful and the device seems to
  421. * be dead. But there's still one option: a low-level
  422. * usb subsystem reset...
  423. */
  424. carl9170_usb_reset(ar);
  425. }
  426. }
  427. void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r)
  428. {
  429. carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
  430. /*
  431. * Sometimes, an error can trigger several different reset events.
  432. * By ignoring these *surplus* reset events, the device won't be
  433. * killed again, right after it has recovered.
  434. */
  435. if (atomic_inc_return(&ar->pending_restarts) > 1) {
  436. dev_dbg(&ar->udev->dev, "ignoring restart (%d)\n", r);
  437. return;
  438. }
  439. ieee80211_stop_queues(ar->hw);
  440. dev_err(&ar->udev->dev, "restart device (%d)\n", r);
  441. if (!WARN_ON(r == CARL9170_RR_NO_REASON) ||
  442. !WARN_ON(r >= __CARL9170_RR_LAST))
  443. ar->last_reason = r;
  444. if (!ar->registered)
  445. return;
  446. if (IS_ACCEPTING_CMD(ar) && !ar->needs_full_reset)
  447. ieee80211_queue_work(ar->hw, &ar->restart_work);
  448. else
  449. carl9170_usb_reset(ar);
  450. /*
  451. * At this point, the device instance might have vanished/disabled.
  452. * So, don't put any code which access the ar9170 struct
  453. * without proper protection.
  454. */
  455. }
  456. static void carl9170_ping_work(struct work_struct *work)
  457. {
  458. struct ar9170 *ar = container_of(work, struct ar9170, ping_work);
  459. int err;
  460. if (!IS_STARTED(ar))
  461. return;
  462. mutex_lock(&ar->mutex);
  463. err = carl9170_echo_test(ar, 0xdeadbeef);
  464. if (err)
  465. carl9170_restart(ar, CARL9170_RR_UNRESPONSIVE_DEVICE);
  466. mutex_unlock(&ar->mutex);
  467. }
  468. static int carl9170_init_interface(struct ar9170 *ar,
  469. struct ieee80211_vif *vif)
  470. {
  471. struct ath_common *common = &ar->common;
  472. int err;
  473. if (!vif) {
  474. WARN_ON_ONCE(IS_STARTED(ar));
  475. return 0;
  476. }
  477. memcpy(common->macaddr, vif->addr, ETH_ALEN);
  478. if (modparam_nohwcrypt ||
  479. ((vif->type != NL80211_IFTYPE_STATION) &&
  480. (vif->type != NL80211_IFTYPE_AP))) {
  481. ar->rx_software_decryption = true;
  482. ar->disable_offload = true;
  483. }
  484. err = carl9170_set_operating_mode(ar);
  485. return err;
  486. }
  487. static int carl9170_op_add_interface(struct ieee80211_hw *hw,
  488. struct ieee80211_vif *vif)
  489. {
  490. struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
  491. struct ieee80211_vif *main_vif;
  492. struct ar9170 *ar = hw->priv;
  493. int vif_id = -1, err = 0;
  494. mutex_lock(&ar->mutex);
  495. rcu_read_lock();
  496. if (vif_priv->active) {
  497. /*
  498. * Skip the interface structure initialization,
  499. * if the vif survived the _restart call.
  500. */
  501. vif_id = vif_priv->id;
  502. vif_priv->enable_beacon = false;
  503. spin_lock_bh(&ar->beacon_lock);
  504. dev_kfree_skb_any(vif_priv->beacon);
  505. vif_priv->beacon = NULL;
  506. spin_unlock_bh(&ar->beacon_lock);
  507. goto init;
  508. }
  509. main_vif = carl9170_get_main_vif(ar);
  510. if (main_vif) {
  511. switch (main_vif->type) {
  512. case NL80211_IFTYPE_STATION:
  513. if (vif->type == NL80211_IFTYPE_STATION)
  514. break;
  515. err = -EBUSY;
  516. rcu_read_unlock();
  517. goto unlock;
  518. case NL80211_IFTYPE_AP:
  519. if ((vif->type == NL80211_IFTYPE_STATION) ||
  520. (vif->type == NL80211_IFTYPE_WDS) ||
  521. (vif->type == NL80211_IFTYPE_AP))
  522. break;
  523. err = -EBUSY;
  524. rcu_read_unlock();
  525. goto unlock;
  526. default:
  527. rcu_read_unlock();
  528. goto unlock;
  529. }
  530. }
  531. vif_id = bitmap_find_free_region(&ar->vif_bitmap, ar->fw.vif_num, 0);
  532. if (vif_id < 0) {
  533. rcu_read_unlock();
  534. err = -ENOSPC;
  535. goto unlock;
  536. }
  537. BUG_ON(ar->vif_priv[vif_id].id != vif_id);
  538. vif_priv->active = true;
  539. vif_priv->id = vif_id;
  540. vif_priv->enable_beacon = false;
  541. ar->vifs++;
  542. list_add_tail_rcu(&vif_priv->list, &ar->vif_list);
  543. rcu_assign_pointer(ar->vif_priv[vif_id].vif, vif);
  544. init:
  545. if (carl9170_get_main_vif(ar) == vif) {
  546. rcu_assign_pointer(ar->beacon_iter, vif_priv);
  547. rcu_read_unlock();
  548. err = carl9170_init_interface(ar, vif);
  549. if (err)
  550. goto unlock;
  551. } else {
  552. rcu_read_unlock();
  553. err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
  554. if (err)
  555. goto unlock;
  556. }
  557. if (ar->fw.tx_seq_table) {
  558. err = carl9170_write_reg(ar, ar->fw.tx_seq_table + vif_id * 4,
  559. 0);
  560. if (err)
  561. goto unlock;
  562. }
  563. unlock:
  564. if (err && (vif_id >= 0)) {
  565. vif_priv->active = false;
  566. bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
  567. ar->vifs--;
  568. rcu_assign_pointer(ar->vif_priv[vif_id].vif, NULL);
  569. list_del_rcu(&vif_priv->list);
  570. mutex_unlock(&ar->mutex);
  571. synchronize_rcu();
  572. } else {
  573. if (ar->vifs > 1)
  574. ar->ps.off_override |= PS_OFF_VIF;
  575. mutex_unlock(&ar->mutex);
  576. }
  577. return err;
  578. }
  579. static void carl9170_op_remove_interface(struct ieee80211_hw *hw,
  580. struct ieee80211_vif *vif)
  581. {
  582. struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
  583. struct ieee80211_vif *main_vif;
  584. struct ar9170 *ar = hw->priv;
  585. unsigned int id;
  586. mutex_lock(&ar->mutex);
  587. if (WARN_ON_ONCE(!vif_priv->active))
  588. goto unlock;
  589. ar->vifs--;
  590. rcu_read_lock();
  591. main_vif = carl9170_get_main_vif(ar);
  592. id = vif_priv->id;
  593. vif_priv->active = false;
  594. WARN_ON(vif_priv->enable_beacon);
  595. vif_priv->enable_beacon = false;
  596. list_del_rcu(&vif_priv->list);
  597. rcu_assign_pointer(ar->vif_priv[id].vif, NULL);
  598. if (vif == main_vif) {
  599. rcu_read_unlock();
  600. if (ar->vifs) {
  601. WARN_ON(carl9170_init_interface(ar,
  602. carl9170_get_main_vif(ar)));
  603. } else {
  604. carl9170_set_operating_mode(ar);
  605. }
  606. } else {
  607. rcu_read_unlock();
  608. WARN_ON(carl9170_mod_virtual_mac(ar, id, NULL));
  609. }
  610. carl9170_update_beacon(ar, false);
  611. carl9170_flush_cab(ar, id);
  612. spin_lock_bh(&ar->beacon_lock);
  613. dev_kfree_skb_any(vif_priv->beacon);
  614. vif_priv->beacon = NULL;
  615. spin_unlock_bh(&ar->beacon_lock);
  616. bitmap_release_region(&ar->vif_bitmap, id, 0);
  617. carl9170_set_beacon_timers(ar);
  618. if (ar->vifs == 1)
  619. ar->ps.off_override &= ~PS_OFF_VIF;
  620. unlock:
  621. mutex_unlock(&ar->mutex);
  622. synchronize_rcu();
  623. }
  624. void carl9170_ps_check(struct ar9170 *ar)
  625. {
  626. ieee80211_queue_work(ar->hw, &ar->ps_work);
  627. }
  628. /* caller must hold ar->mutex */
  629. static int carl9170_ps_update(struct ar9170 *ar)
  630. {
  631. bool ps = false;
  632. int err = 0;
  633. if (!ar->ps.off_override)
  634. ps = (ar->hw->conf.flags & IEEE80211_CONF_PS);
  635. if (ps != ar->ps.state) {
  636. err = carl9170_powersave(ar, ps);
  637. if (err)
  638. return err;
  639. if (ar->ps.state && !ps) {
  640. ar->ps.sleep_ms = jiffies_to_msecs(jiffies -
  641. ar->ps.last_action);
  642. }
  643. if (ps)
  644. ar->ps.last_slept = jiffies;
  645. ar->ps.last_action = jiffies;
  646. ar->ps.state = ps;
  647. }
  648. return 0;
  649. }
  650. static void carl9170_ps_work(struct work_struct *work)
  651. {
  652. struct ar9170 *ar = container_of(work, struct ar9170,
  653. ps_work);
  654. mutex_lock(&ar->mutex);
  655. if (IS_STARTED(ar))
  656. WARN_ON_ONCE(carl9170_ps_update(ar) != 0);
  657. mutex_unlock(&ar->mutex);
  658. }
  659. static int carl9170_update_survey(struct ar9170 *ar, bool flush, bool noise)
  660. {
  661. int err;
  662. if (noise) {
  663. err = carl9170_get_noisefloor(ar);
  664. if (err)
  665. return err;
  666. }
  667. if (ar->fw.hw_counters) {
  668. err = carl9170_collect_tally(ar);
  669. if (err)
  670. return err;
  671. }
  672. if (flush)
  673. memset(&ar->tally, 0, sizeof(ar->tally));
  674. return 0;
  675. }
  676. static void carl9170_stat_work(struct work_struct *work)
  677. {
  678. struct ar9170 *ar = container_of(work, struct ar9170, stat_work.work);
  679. int err;
  680. mutex_lock(&ar->mutex);
  681. err = carl9170_update_survey(ar, false, true);
  682. mutex_unlock(&ar->mutex);
  683. if (err)
  684. return;
  685. ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
  686. round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
  687. }
  688. static int carl9170_op_config(struct ieee80211_hw *hw, u32 changed)
  689. {
  690. struct ar9170 *ar = hw->priv;
  691. int err = 0;
  692. mutex_lock(&ar->mutex);
  693. if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
  694. /* TODO */
  695. err = 0;
  696. }
  697. if (changed & IEEE80211_CONF_CHANGE_PS) {
  698. err = carl9170_ps_update(ar);
  699. if (err)
  700. goto out;
  701. }
  702. if (changed & IEEE80211_CONF_CHANGE_POWER) {
  703. /* TODO */
  704. err = 0;
  705. }
  706. if (changed & IEEE80211_CONF_CHANGE_SMPS) {
  707. /* TODO */
  708. err = 0;
  709. }
  710. if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
  711. /* adjust slot time for 5 GHz */
  712. err = carl9170_set_slot_time(ar);
  713. if (err)
  714. goto out;
  715. err = carl9170_update_survey(ar, true, false);
  716. if (err)
  717. goto out;
  718. err = carl9170_set_channel(ar, hw->conf.channel,
  719. hw->conf.channel_type, CARL9170_RFI_NONE);
  720. if (err)
  721. goto out;
  722. err = carl9170_update_survey(ar, false, true);
  723. if (err)
  724. goto out;
  725. err = carl9170_set_dyn_sifs_ack(ar);
  726. if (err)
  727. goto out;
  728. err = carl9170_set_rts_cts_rate(ar);
  729. if (err)
  730. goto out;
  731. }
  732. out:
  733. mutex_unlock(&ar->mutex);
  734. return err;
  735. }
  736. static u64 carl9170_op_prepare_multicast(struct ieee80211_hw *hw,
  737. struct netdev_hw_addr_list *mc_list)
  738. {
  739. struct netdev_hw_addr *ha;
  740. u64 mchash;
  741. /* always get broadcast frames */
  742. mchash = 1ULL << (0xff >> 2);
  743. netdev_hw_addr_list_for_each(ha, mc_list)
  744. mchash |= 1ULL << (ha->addr[5] >> 2);
  745. return mchash;
  746. }
  747. static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
  748. unsigned int changed_flags,
  749. unsigned int *new_flags,
  750. u64 multicast)
  751. {
  752. struct ar9170 *ar = hw->priv;
  753. /* mask supported flags */
  754. *new_flags &= FIF_ALLMULTI | ar->rx_filter_caps;
  755. if (!IS_ACCEPTING_CMD(ar))
  756. return;
  757. mutex_lock(&ar->mutex);
  758. ar->filter_state = *new_flags;
  759. /*
  760. * We can support more by setting the sniffer bit and
  761. * then checking the error flags, later.
  762. */
  763. if (*new_flags & FIF_ALLMULTI)
  764. multicast = ~0ULL;
  765. if (multicast != ar->cur_mc_hash)
  766. WARN_ON(carl9170_update_multicast(ar, multicast));
  767. if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
  768. ar->sniffer_enabled = !!(*new_flags &
  769. (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS));
  770. WARN_ON(carl9170_set_operating_mode(ar));
  771. }
  772. if (ar->fw.rx_filter && changed_flags & ar->rx_filter_caps) {
  773. u32 rx_filter = 0;
  774. if (!(*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)))
  775. rx_filter |= CARL9170_RX_FILTER_BAD;
  776. if (!(*new_flags & FIF_CONTROL))
  777. rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
  778. if (!(*new_flags & FIF_PSPOLL))
  779. rx_filter |= CARL9170_RX_FILTER_CTL_PSPOLL;
  780. if (!(*new_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))) {
  781. rx_filter |= CARL9170_RX_FILTER_OTHER_RA;
  782. rx_filter |= CARL9170_RX_FILTER_DECRY_FAIL;
  783. }
  784. WARN_ON(carl9170_rx_filter(ar, rx_filter));
  785. }
  786. mutex_unlock(&ar->mutex);
  787. }
  788. static void carl9170_op_bss_info_changed(struct ieee80211_hw *hw,
  789. struct ieee80211_vif *vif,
  790. struct ieee80211_bss_conf *bss_conf,
  791. u32 changed)
  792. {
  793. struct ar9170 *ar = hw->priv;
  794. struct ath_common *common = &ar->common;
  795. int err = 0;
  796. struct carl9170_vif_info *vif_priv;
  797. struct ieee80211_vif *main_vif;
  798. mutex_lock(&ar->mutex);
  799. vif_priv = (void *) vif->drv_priv;
  800. main_vif = carl9170_get_main_vif(ar);
  801. if (WARN_ON(!main_vif))
  802. goto out;
  803. if (changed & BSS_CHANGED_BEACON_ENABLED) {
  804. struct carl9170_vif_info *iter;
  805. int i = 0;
  806. vif_priv->enable_beacon = bss_conf->enable_beacon;
  807. rcu_read_lock();
  808. list_for_each_entry_rcu(iter, &ar->vif_list, list) {
  809. if (iter->active && iter->enable_beacon)
  810. i++;
  811. }
  812. rcu_read_unlock();
  813. ar->beacon_enabled = i;
  814. }
  815. if (changed & BSS_CHANGED_BEACON) {
  816. err = carl9170_update_beacon(ar, false);
  817. if (err)
  818. goto out;
  819. }
  820. if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON |
  821. BSS_CHANGED_BEACON_INT)) {
  822. if (main_vif != vif) {
  823. bss_conf->beacon_int = main_vif->bss_conf.beacon_int;
  824. bss_conf->dtim_period = main_vif->bss_conf.dtim_period;
  825. }
  826. /*
  827. * Therefore a hard limit for the broadcast traffic should
  828. * prevent false alarms.
  829. */
  830. if (vif->type != NL80211_IFTYPE_STATION &&
  831. (bss_conf->beacon_int * bss_conf->dtim_period >=
  832. (CARL9170_QUEUE_STUCK_TIMEOUT / 2))) {
  833. err = -EINVAL;
  834. goto out;
  835. }
  836. err = carl9170_set_beacon_timers(ar);
  837. if (err)
  838. goto out;
  839. }
  840. if (changed & BSS_CHANGED_HT) {
  841. /* TODO */
  842. err = 0;
  843. if (err)
  844. goto out;
  845. }
  846. if (main_vif != vif)
  847. goto out;
  848. /*
  849. * The following settings can only be changed by the
  850. * master interface.
  851. */
  852. if (changed & BSS_CHANGED_BSSID) {
  853. memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
  854. err = carl9170_set_operating_mode(ar);
  855. if (err)
  856. goto out;
  857. }
  858. if (changed & BSS_CHANGED_ASSOC) {
  859. ar->common.curaid = bss_conf->aid;
  860. err = carl9170_set_beacon_timers(ar);
  861. if (err)
  862. goto out;
  863. }
  864. if (changed & BSS_CHANGED_ERP_SLOT) {
  865. err = carl9170_set_slot_time(ar);
  866. if (err)
  867. goto out;
  868. }
  869. if (changed & BSS_CHANGED_BASIC_RATES) {
  870. err = carl9170_set_mac_rates(ar);
  871. if (err)
  872. goto out;
  873. }
  874. out:
  875. WARN_ON_ONCE(err && IS_STARTED(ar));
  876. mutex_unlock(&ar->mutex);
  877. }
  878. static u64 carl9170_op_get_tsf(struct ieee80211_hw *hw,
  879. struct ieee80211_vif *vif)
  880. {
  881. struct ar9170 *ar = hw->priv;
  882. struct carl9170_tsf_rsp tsf;
  883. int err;
  884. mutex_lock(&ar->mutex);
  885. err = carl9170_exec_cmd(ar, CARL9170_CMD_READ_TSF,
  886. 0, NULL, sizeof(tsf), &tsf);
  887. mutex_unlock(&ar->mutex);
  888. if (WARN_ON(err))
  889. return 0;
  890. return le64_to_cpu(tsf.tsf_64);
  891. }
  892. static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
  893. struct ieee80211_vif *vif,
  894. struct ieee80211_sta *sta,
  895. struct ieee80211_key_conf *key)
  896. {
  897. struct ar9170 *ar = hw->priv;
  898. int err = 0, i;
  899. u8 ktype;
  900. if (ar->disable_offload || !vif)
  901. return -EOPNOTSUPP;
  902. /*
  903. * We have to fall back to software encryption, whenever
  904. * the user choose to participates in an IBSS or is connected
  905. * to more than one network.
  906. *
  907. * This is very unfortunate, because some machines cannot handle
  908. * the high througput speed in 802.11n networks.
  909. */
  910. if (!is_main_vif(ar, vif)) {
  911. mutex_lock(&ar->mutex);
  912. goto err_softw;
  913. }
  914. /*
  915. * While the hardware supports *catch-all* key, for offloading
  916. * group-key en-/de-cryption. The way of how the hardware
  917. * decides which keyId maps to which key, remains a mystery...
  918. */
  919. if ((vif->type != NL80211_IFTYPE_STATION &&
  920. vif->type != NL80211_IFTYPE_ADHOC) &&
  921. !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
  922. return -EOPNOTSUPP;
  923. switch (key->cipher) {
  924. case WLAN_CIPHER_SUITE_WEP40:
  925. ktype = AR9170_ENC_ALG_WEP64;
  926. break;
  927. case WLAN_CIPHER_SUITE_WEP104:
  928. ktype = AR9170_ENC_ALG_WEP128;
  929. break;
  930. case WLAN_CIPHER_SUITE_TKIP:
  931. ktype = AR9170_ENC_ALG_TKIP;
  932. break;
  933. case WLAN_CIPHER_SUITE_CCMP:
  934. ktype = AR9170_ENC_ALG_AESCCMP;
  935. break;
  936. default:
  937. return -EOPNOTSUPP;
  938. }
  939. mutex_lock(&ar->mutex);
  940. if (cmd == SET_KEY) {
  941. if (!IS_STARTED(ar)) {
  942. err = -EOPNOTSUPP;
  943. goto out;
  944. }
  945. if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
  946. sta = NULL;
  947. i = 64 + key->keyidx;
  948. } else {
  949. for (i = 0; i < 64; i++)
  950. if (!(ar->usedkeys & BIT(i)))
  951. break;
  952. if (i == 64)
  953. goto err_softw;
  954. }
  955. key->hw_key_idx = i;
  956. err = carl9170_upload_key(ar, i, sta ? sta->addr : NULL,
  957. ktype, 0, key->key,
  958. min_t(u8, 16, key->keylen));
  959. if (err)
  960. goto out;
  961. if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
  962. err = carl9170_upload_key(ar, i, sta ? sta->addr :
  963. NULL, ktype, 1,
  964. key->key + 16, 16);
  965. if (err)
  966. goto out;
  967. /*
  968. * hardware is not capable generating MMIC
  969. * of fragmented frames!
  970. */
  971. key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
  972. }
  973. if (i < 64)
  974. ar->usedkeys |= BIT(i);
  975. key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
  976. } else {
  977. if (!IS_STARTED(ar)) {
  978. /* The device is gone... together with the key ;-) */
  979. err = 0;
  980. goto out;
  981. }
  982. if (key->hw_key_idx < 64) {
  983. ar->usedkeys &= ~BIT(key->hw_key_idx);
  984. } else {
  985. err = carl9170_upload_key(ar, key->hw_key_idx, NULL,
  986. AR9170_ENC_ALG_NONE, 0,
  987. NULL, 0);
  988. if (err)
  989. goto out;
  990. if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
  991. err = carl9170_upload_key(ar, key->hw_key_idx,
  992. NULL,
  993. AR9170_ENC_ALG_NONE,
  994. 1, NULL, 0);
  995. if (err)
  996. goto out;
  997. }
  998. }
  999. err = carl9170_disable_key(ar, key->hw_key_idx);
  1000. if (err)
  1001. goto out;
  1002. }
  1003. out:
  1004. mutex_unlock(&ar->mutex);
  1005. return err;
  1006. err_softw:
  1007. if (!ar->rx_software_decryption) {
  1008. ar->rx_software_decryption = true;
  1009. carl9170_set_operating_mode(ar);
  1010. }
  1011. mutex_unlock(&ar->mutex);
  1012. return -ENOSPC;
  1013. }
  1014. static int carl9170_op_sta_add(struct ieee80211_hw *hw,
  1015. struct ieee80211_vif *vif,
  1016. struct ieee80211_sta *sta)
  1017. {
  1018. struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
  1019. unsigned int i;
  1020. atomic_set(&sta_info->pending_frames, 0);
  1021. if (sta->ht_cap.ht_supported) {
  1022. if (sta->ht_cap.ampdu_density > 6) {
  1023. /*
  1024. * HW does support 16us AMPDU density.
  1025. * No HT-Xmit for station.
  1026. */
  1027. return 0;
  1028. }
  1029. for (i = 0; i < CARL9170_NUM_TID; i++)
  1030. rcu_assign_pointer(sta_info->agg[i], NULL);
  1031. sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
  1032. sta_info->ht_sta = true;
  1033. }
  1034. return 0;
  1035. }
  1036. static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
  1037. struct ieee80211_vif *vif,
  1038. struct ieee80211_sta *sta)
  1039. {
  1040. struct ar9170 *ar = hw->priv;
  1041. struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
  1042. unsigned int i;
  1043. bool cleanup = false;
  1044. if (sta->ht_cap.ht_supported) {
  1045. sta_info->ht_sta = false;
  1046. rcu_read_lock();
  1047. for (i = 0; i < CARL9170_NUM_TID; i++) {
  1048. struct carl9170_sta_tid *tid_info;
  1049. tid_info = rcu_dereference(sta_info->agg[i]);
  1050. rcu_assign_pointer(sta_info->agg[i], NULL);
  1051. if (!tid_info)
  1052. continue;
  1053. spin_lock_bh(&ar->tx_ampdu_list_lock);
  1054. if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
  1055. tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
  1056. spin_unlock_bh(&ar->tx_ampdu_list_lock);
  1057. cleanup = true;
  1058. }
  1059. rcu_read_unlock();
  1060. if (cleanup)
  1061. carl9170_ampdu_gc(ar);
  1062. }
  1063. return 0;
  1064. }
  1065. static int carl9170_op_conf_tx(struct ieee80211_hw *hw,
  1066. struct ieee80211_vif *vif, u16 queue,
  1067. const struct ieee80211_tx_queue_params *param)
  1068. {
  1069. struct ar9170 *ar = hw->priv;
  1070. int ret;
  1071. mutex_lock(&ar->mutex);
  1072. if (queue < ar->hw->queues) {
  1073. memcpy(&ar->edcf[ar9170_qmap[queue]], param, sizeof(*param));
  1074. ret = carl9170_set_qos(ar);
  1075. } else {
  1076. ret = -EINVAL;
  1077. }
  1078. mutex_unlock(&ar->mutex);
  1079. return ret;
  1080. }
  1081. static void carl9170_ampdu_work(struct work_struct *work)
  1082. {
  1083. struct ar9170 *ar = container_of(work, struct ar9170,
  1084. ampdu_work);
  1085. if (!IS_STARTED(ar))
  1086. return;
  1087. mutex_lock(&ar->mutex);
  1088. carl9170_ampdu_gc(ar);
  1089. mutex_unlock(&ar->mutex);
  1090. }
  1091. static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
  1092. struct ieee80211_vif *vif,
  1093. enum ieee80211_ampdu_mlme_action action,
  1094. struct ieee80211_sta *sta,
  1095. u16 tid, u16 *ssn, u8 buf_size)
  1096. {
  1097. struct ar9170 *ar = hw->priv;
  1098. struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
  1099. struct carl9170_sta_tid *tid_info;
  1100. if (modparam_noht)
  1101. return -EOPNOTSUPP;
  1102. switch (action) {
  1103. case IEEE80211_AMPDU_TX_START:
  1104. if (!sta_info->ht_sta)
  1105. return -EOPNOTSUPP;
  1106. rcu_read_lock();
  1107. if (rcu_dereference(sta_info->agg[tid])) {
  1108. rcu_read_unlock();
  1109. return -EBUSY;
  1110. }
  1111. tid_info = kzalloc(sizeof(struct carl9170_sta_tid),
  1112. GFP_ATOMIC);
  1113. if (!tid_info) {
  1114. rcu_read_unlock();
  1115. return -ENOMEM;
  1116. }
  1117. tid_info->hsn = tid_info->bsn = tid_info->snx = (*ssn);
  1118. tid_info->state = CARL9170_TID_STATE_PROGRESS;
  1119. tid_info->tid = tid;
  1120. tid_info->max = sta_info->ampdu_max_len;
  1121. INIT_LIST_HEAD(&tid_info->list);
  1122. INIT_LIST_HEAD(&tid_info->tmp_list);
  1123. skb_queue_head_init(&tid_info->queue);
  1124. spin_lock_init(&tid_info->lock);
  1125. spin_lock_bh(&ar->tx_ampdu_list_lock);
  1126. ar->tx_ampdu_list_len++;
  1127. list_add_tail_rcu(&tid_info->list, &ar->tx_ampdu_list);
  1128. rcu_assign_pointer(sta_info->agg[tid], tid_info);
  1129. spin_unlock_bh(&ar->tx_ampdu_list_lock);
  1130. rcu_read_unlock();
  1131. ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
  1132. break;
  1133. case IEEE80211_AMPDU_TX_STOP:
  1134. rcu_read_lock();
  1135. tid_info = rcu_dereference(sta_info->agg[tid]);
  1136. if (tid_info) {
  1137. spin_lock_bh(&ar->tx_ampdu_list_lock);
  1138. if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
  1139. tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
  1140. spin_unlock_bh(&ar->tx_ampdu_list_lock);
  1141. }
  1142. rcu_assign_pointer(sta_info->agg[tid], NULL);
  1143. rcu_read_unlock();
  1144. ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
  1145. ieee80211_queue_work(ar->hw, &ar->ampdu_work);
  1146. break;
  1147. case IEEE80211_AMPDU_TX_OPERATIONAL:
  1148. rcu_read_lock();
  1149. tid_info = rcu_dereference(sta_info->agg[tid]);
  1150. sta_info->stats[tid].clear = true;
  1151. sta_info->stats[tid].req = false;
  1152. if (tid_info) {
  1153. bitmap_zero(tid_info->bitmap, CARL9170_BAW_SIZE);
  1154. tid_info->state = CARL9170_TID_STATE_IDLE;
  1155. }
  1156. rcu_read_unlock();
  1157. if (WARN_ON_ONCE(!tid_info))
  1158. return -EFAULT;
  1159. break;
  1160. case IEEE80211_AMPDU_RX_START:
  1161. case IEEE80211_AMPDU_RX_STOP:
  1162. /* Handled by hardware */
  1163. break;
  1164. default:
  1165. return -EOPNOTSUPP;
  1166. }
  1167. return 0;
  1168. }
  1169. #ifdef CONFIG_CARL9170_WPC
  1170. static int carl9170_register_wps_button(struct ar9170 *ar)
  1171. {
  1172. struct input_dev *input;
  1173. int err;
  1174. if (!(ar->features & CARL9170_WPS_BUTTON))
  1175. return 0;
  1176. input = input_allocate_device();
  1177. if (!input)
  1178. return -ENOMEM;
  1179. snprintf(ar->wps.name, sizeof(ar->wps.name), "%s WPS Button",
  1180. wiphy_name(ar->hw->wiphy));
  1181. snprintf(ar->wps.phys, sizeof(ar->wps.phys),
  1182. "ieee80211/%s/input0", wiphy_name(ar->hw->wiphy));
  1183. input->name = ar->wps.name;
  1184. input->phys = ar->wps.phys;
  1185. input->id.bustype = BUS_USB;
  1186. input->dev.parent = &ar->hw->wiphy->dev;
  1187. input_set_capability(input, EV_KEY, KEY_WPS_BUTTON);
  1188. err = input_register_device(input);
  1189. if (err) {
  1190. input_free_device(input);
  1191. return err;
  1192. }
  1193. ar->wps.pbc = input;
  1194. return 0;
  1195. }
  1196. #endif /* CONFIG_CARL9170_WPC */
  1197. #ifdef CONFIG_CARL9170_HWRNG
  1198. static int carl9170_rng_get(struct ar9170 *ar)
  1199. {
  1200. #define RW (CARL9170_MAX_CMD_PAYLOAD_LEN / sizeof(u32))
  1201. #define RB (CARL9170_MAX_CMD_PAYLOAD_LEN)
  1202. static const __le32 rng_load[RW] = {
  1203. [0 ... (RW - 1)] = cpu_to_le32(AR9170_RAND_REG_NUM)};
  1204. u32 buf[RW];
  1205. unsigned int i, off = 0, transfer, count;
  1206. int err;
  1207. BUILD_BUG_ON(RB > CARL9170_MAX_CMD_PAYLOAD_LEN);
  1208. if (!IS_ACCEPTING_CMD(ar) || !ar->rng.initialized)
  1209. return -EAGAIN;
  1210. count = ARRAY_SIZE(ar->rng.cache);
  1211. while (count) {
  1212. err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
  1213. RB, (u8 *) rng_load,
  1214. RB, (u8 *) buf);
  1215. if (err)
  1216. return err;
  1217. transfer = min_t(unsigned int, count, RW);
  1218. for (i = 0; i < transfer; i++)
  1219. ar->rng.cache[off + i] = buf[i];
  1220. off += transfer;
  1221. count -= transfer;
  1222. }
  1223. ar->rng.cache_idx = 0;
  1224. #undef RW
  1225. #undef RB
  1226. return 0;
  1227. }
  1228. static int carl9170_rng_read(struct hwrng *rng, u32 *data)
  1229. {
  1230. struct ar9170 *ar = (struct ar9170 *)rng->priv;
  1231. int ret = -EIO;
  1232. mutex_lock(&ar->mutex);
  1233. if (ar->rng.cache_idx >= ARRAY_SIZE(ar->rng.cache)) {
  1234. ret = carl9170_rng_get(ar);
  1235. if (ret) {
  1236. mutex_unlock(&ar->mutex);
  1237. return ret;
  1238. }
  1239. }
  1240. *data = ar->rng.cache[ar->rng.cache_idx++];
  1241. mutex_unlock(&ar->mutex);
  1242. return sizeof(u16);
  1243. }
  1244. static void carl9170_unregister_hwrng(struct ar9170 *ar)
  1245. {
  1246. if (ar->rng.initialized) {
  1247. hwrng_unregister(&ar->rng.rng);
  1248. ar->rng.initialized = false;
  1249. }
  1250. }
  1251. static int carl9170_register_hwrng(struct ar9170 *ar)
  1252. {
  1253. int err;
  1254. snprintf(ar->rng.name, ARRAY_SIZE(ar->rng.name),
  1255. "%s_%s", KBUILD_MODNAME, wiphy_name(ar->hw->wiphy));
  1256. ar->rng.rng.name = ar->rng.name;
  1257. ar->rng.rng.data_read = carl9170_rng_read;
  1258. ar->rng.rng.priv = (unsigned long)ar;
  1259. if (WARN_ON(ar->rng.initialized))
  1260. return -EALREADY;
  1261. err = hwrng_register(&ar->rng.rng);
  1262. if (err) {
  1263. dev_err(&ar->udev->dev, "Failed to register the random "
  1264. "number generator (%d)\n", err);
  1265. return err;
  1266. }
  1267. ar->rng.initialized = true;
  1268. err = carl9170_rng_get(ar);
  1269. if (err) {
  1270. carl9170_unregister_hwrng(ar);
  1271. return err;
  1272. }
  1273. return 0;
  1274. }
  1275. #endif /* CONFIG_CARL9170_HWRNG */
  1276. static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx,
  1277. struct survey_info *survey)
  1278. {
  1279. struct ar9170 *ar = hw->priv;
  1280. struct ieee80211_channel *chan;
  1281. struct ieee80211_supported_band *band;
  1282. int err, b, i;
  1283. chan = ar->channel;
  1284. if (!chan)
  1285. return -ENODEV;
  1286. if (idx == chan->hw_value) {
  1287. mutex_lock(&ar->mutex);
  1288. err = carl9170_update_survey(ar, false, true);
  1289. mutex_unlock(&ar->mutex);
  1290. if (err)
  1291. return err;
  1292. }
  1293. for (b = 0; b < IEEE80211_NUM_BANDS; b++) {
  1294. band = ar->hw->wiphy->bands[b];
  1295. if (!band)
  1296. continue;
  1297. for (i = 0; i < band->n_channels; i++) {
  1298. if (band->channels[i].hw_value == idx) {
  1299. chan = &band->channels[i];
  1300. goto found;
  1301. }
  1302. }
  1303. }
  1304. return -ENOENT;
  1305. found:
  1306. memcpy(survey, &ar->survey[idx], sizeof(*survey));
  1307. survey->channel = chan;
  1308. survey->filled = SURVEY_INFO_NOISE_DBM;
  1309. if (ar->channel == chan)
  1310. survey->filled |= SURVEY_INFO_IN_USE;
  1311. if (ar->fw.hw_counters) {
  1312. survey->filled |= SURVEY_INFO_CHANNEL_TIME |
  1313. SURVEY_INFO_CHANNEL_TIME_BUSY |
  1314. SURVEY_INFO_CHANNEL_TIME_TX;
  1315. }
  1316. return 0;
  1317. }
  1318. static void carl9170_op_flush(struct ieee80211_hw *hw, bool drop)
  1319. {
  1320. struct ar9170 *ar = hw->priv;
  1321. unsigned int vid;
  1322. mutex_lock(&ar->mutex);
  1323. for_each_set_bit(vid, &ar->vif_bitmap, ar->fw.vif_num)
  1324. carl9170_flush_cab(ar, vid);
  1325. carl9170_flush(ar, drop);
  1326. mutex_unlock(&ar->mutex);
  1327. }
  1328. static int carl9170_op_get_stats(struct ieee80211_hw *hw,
  1329. struct ieee80211_low_level_stats *stats)
  1330. {
  1331. struct ar9170 *ar = hw->priv;
  1332. memset(stats, 0, sizeof(*stats));
  1333. stats->dot11ACKFailureCount = ar->tx_ack_failures;
  1334. stats->dot11FCSErrorCount = ar->tx_fcs_errors;
  1335. return 0;
  1336. }
  1337. static void carl9170_op_sta_notify(struct ieee80211_hw *hw,
  1338. struct ieee80211_vif *vif,
  1339. enum sta_notify_cmd cmd,
  1340. struct ieee80211_sta *sta)
  1341. {
  1342. struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
  1343. switch (cmd) {
  1344. case STA_NOTIFY_SLEEP:
  1345. sta_info->sleeping = true;
  1346. if (atomic_read(&sta_info->pending_frames))
  1347. ieee80211_sta_block_awake(hw, sta, true);
  1348. break;
  1349. case STA_NOTIFY_AWAKE:
  1350. sta_info->sleeping = false;
  1351. break;
  1352. }
  1353. }
  1354. static bool carl9170_tx_frames_pending(struct ieee80211_hw *hw)
  1355. {
  1356. struct ar9170 *ar = hw->priv;
  1357. return !!atomic_read(&ar->tx_total_queued);
  1358. }
  1359. static const struct ieee80211_ops carl9170_ops = {
  1360. .start = carl9170_op_start,
  1361. .stop = carl9170_op_stop,
  1362. .tx = carl9170_op_tx,
  1363. .flush = carl9170_op_flush,
  1364. .add_interface = carl9170_op_add_interface,
  1365. .remove_interface = carl9170_op_remove_interface,
  1366. .config = carl9170_op_config,
  1367. .prepare_multicast = carl9170_op_prepare_multicast,
  1368. .configure_filter = carl9170_op_configure_filter,
  1369. .conf_tx = carl9170_op_conf_tx,
  1370. .bss_info_changed = carl9170_op_bss_info_changed,
  1371. .get_tsf = carl9170_op_get_tsf,
  1372. .set_key = carl9170_op_set_key,
  1373. .sta_add = carl9170_op_sta_add,
  1374. .sta_remove = carl9170_op_sta_remove,
  1375. .sta_notify = carl9170_op_sta_notify,
  1376. .get_survey = carl9170_op_get_survey,
  1377. .get_stats = carl9170_op_get_stats,
  1378. .ampdu_action = carl9170_op_ampdu_action,
  1379. .tx_frames_pending = carl9170_tx_frames_pending,
  1380. };
  1381. void *carl9170_alloc(size_t priv_size)
  1382. {
  1383. struct ieee80211_hw *hw;
  1384. struct ar9170 *ar;
  1385. struct sk_buff *skb;
  1386. int i;
  1387. /*
  1388. * this buffer is used for rx stream reconstruction.
  1389. * Under heavy load this device (or the transport layer?)
  1390. * tends to split the streams into separate rx descriptors.
  1391. */
  1392. skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
  1393. if (!skb)
  1394. goto err_nomem;
  1395. hw = ieee80211_alloc_hw(priv_size, &carl9170_ops);
  1396. if (!hw)
  1397. goto err_nomem;
  1398. ar = hw->priv;
  1399. ar->hw = hw;
  1400. ar->rx_failover = skb;
  1401. memset(&ar->rx_plcp, 0, sizeof(struct ar9170_rx_head));
  1402. ar->rx_has_plcp = false;
  1403. /*
  1404. * Here's a hidden pitfall!
  1405. *
  1406. * All 4 AC queues work perfectly well under _legacy_ operation.
  1407. * However as soon as aggregation is enabled, the traffic flow
  1408. * gets very bumpy. Therefore we have to _switch_ to a
  1409. * software AC with a single HW queue.
  1410. */
  1411. hw->queues = __AR9170_NUM_TXQ;
  1412. mutex_init(&ar->mutex);
  1413. spin_lock_init(&ar->beacon_lock);
  1414. spin_lock_init(&ar->cmd_lock);
  1415. spin_lock_init(&ar->tx_stats_lock);
  1416. spin_lock_init(&ar->tx_ampdu_list_lock);
  1417. spin_lock_init(&ar->mem_lock);
  1418. spin_lock_init(&ar->state_lock);
  1419. atomic_set(&ar->pending_restarts, 0);
  1420. ar->vifs = 0;
  1421. for (i = 0; i < ar->hw->queues; i++) {
  1422. skb_queue_head_init(&ar->tx_status[i]);
  1423. skb_queue_head_init(&ar->tx_pending[i]);
  1424. }
  1425. INIT_WORK(&ar->ps_work, carl9170_ps_work);
  1426. INIT_WORK(&ar->ping_work, carl9170_ping_work);
  1427. INIT_WORK(&ar->restart_work, carl9170_restart_work);
  1428. INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work);
  1429. INIT_DELAYED_WORK(&ar->stat_work, carl9170_stat_work);
  1430. INIT_DELAYED_WORK(&ar->tx_janitor, carl9170_tx_janitor);
  1431. INIT_LIST_HEAD(&ar->tx_ampdu_list);
  1432. rcu_assign_pointer(ar->tx_ampdu_iter,
  1433. (struct carl9170_sta_tid *) &ar->tx_ampdu_list);
  1434. bitmap_zero(&ar->vif_bitmap, ar->fw.vif_num);
  1435. INIT_LIST_HEAD(&ar->vif_list);
  1436. init_completion(&ar->tx_flush);
  1437. /* firmware decides which modes we support */
  1438. hw->wiphy->interface_modes = 0;
  1439. hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
  1440. IEEE80211_HW_REPORTS_TX_ACK_STATUS |
  1441. IEEE80211_HW_SUPPORTS_PS |
  1442. IEEE80211_HW_PS_NULLFUNC_STACK |
  1443. IEEE80211_HW_NEED_DTIM_PERIOD |
  1444. IEEE80211_HW_SIGNAL_DBM;
  1445. if (!modparam_noht) {
  1446. /*
  1447. * see the comment above, why we allow the user
  1448. * to disable HT by a module parameter.
  1449. */
  1450. hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
  1451. }
  1452. hw->extra_tx_headroom = sizeof(struct _carl9170_tx_superframe);
  1453. hw->sta_data_size = sizeof(struct carl9170_sta_info);
  1454. hw->vif_data_size = sizeof(struct carl9170_vif_info);
  1455. hw->max_rates = CARL9170_TX_MAX_RATES;
  1456. hw->max_rate_tries = CARL9170_TX_USER_RATE_TRIES;
  1457. for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
  1458. ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
  1459. hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
  1460. return ar;
  1461. err_nomem:
  1462. kfree_skb(skb);
  1463. return ERR_PTR(-ENOMEM);
  1464. }
  1465. static int carl9170_read_eeprom(struct ar9170 *ar)
  1466. {
  1467. #define RW 8 /* number of words to read at once */
  1468. #define RB (sizeof(u32) * RW)
  1469. u8 *eeprom = (void *)&ar->eeprom;
  1470. __le32 offsets[RW];
  1471. int i, j, err;
  1472. BUILD_BUG_ON(sizeof(ar->eeprom) & 3);
  1473. BUILD_BUG_ON(RB > CARL9170_MAX_CMD_LEN - 4);
  1474. #ifndef __CHECKER__
  1475. /* don't want to handle trailing remains */
  1476. BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
  1477. #endif
  1478. for (i = 0; i < sizeof(ar->eeprom) / RB; i++) {
  1479. for (j = 0; j < RW; j++)
  1480. offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
  1481. RB * i + 4 * j);
  1482. err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
  1483. RB, (u8 *) &offsets,
  1484. RB, eeprom + RB * i);
  1485. if (err)
  1486. return err;
  1487. }
  1488. #undef RW
  1489. #undef RB
  1490. return 0;
  1491. }
  1492. static int carl9170_parse_eeprom(struct ar9170 *ar)
  1493. {
  1494. struct ath_regulatory *regulatory = &ar->common.regulatory;
  1495. unsigned int rx_streams, tx_streams, tx_params = 0;
  1496. int bands = 0;
  1497. int chans = 0;
  1498. if (ar->eeprom.length == cpu_to_le16(0xffff))
  1499. return -ENODATA;
  1500. rx_streams = hweight8(ar->eeprom.rx_mask);
  1501. tx_streams = hweight8(ar->eeprom.tx_mask);
  1502. if (rx_streams != tx_streams) {
  1503. tx_params = IEEE80211_HT_MCS_TX_RX_DIFF;
  1504. WARN_ON(!(tx_streams >= 1 && tx_streams <=
  1505. IEEE80211_HT_MCS_TX_MAX_STREAMS));
  1506. tx_params = (tx_streams - 1) <<
  1507. IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
  1508. carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
  1509. carl9170_band_5GHz.ht_cap.mcs.tx_params |= tx_params;
  1510. }
  1511. if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
  1512. ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
  1513. &carl9170_band_2GHz;
  1514. chans += carl9170_band_2GHz.n_channels;
  1515. bands++;
  1516. }
  1517. if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
  1518. ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
  1519. &carl9170_band_5GHz;
  1520. chans += carl9170_band_5GHz.n_channels;
  1521. bands++;
  1522. }
  1523. if (!bands)
  1524. return -EINVAL;
  1525. ar->survey = kzalloc(sizeof(struct survey_info) * chans, GFP_KERNEL);
  1526. if (!ar->survey)
  1527. return -ENOMEM;
  1528. ar->num_channels = chans;
  1529. /*
  1530. * I measured this, a bandswitch takes roughly
  1531. * 135 ms and a frequency switch about 80.
  1532. *
  1533. * FIXME: measure these values again once EEPROM settings
  1534. * are used, that will influence them!
  1535. */
  1536. if (bands == 2)
  1537. ar->hw->channel_change_time = 135 * 1000;
  1538. else
  1539. ar->hw->channel_change_time = 80 * 1000;
  1540. regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
  1541. /* second part of wiphy init */
  1542. SET_IEEE80211_PERM_ADDR(ar->hw, ar->eeprom.mac_address);
  1543. return 0;
  1544. }
  1545. static int carl9170_reg_notifier(struct wiphy *wiphy,
  1546. struct regulatory_request *request)
  1547. {
  1548. struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
  1549. struct ar9170 *ar = hw->priv;
  1550. return ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory);
  1551. }
  1552. int carl9170_register(struct ar9170 *ar)
  1553. {
  1554. struct ath_regulatory *regulatory = &ar->common.regulatory;
  1555. int err = 0, i;
  1556. if (WARN_ON(ar->mem_bitmap))
  1557. return -EINVAL;
  1558. ar->mem_bitmap = kzalloc(roundup(ar->fw.mem_blocks, BITS_PER_LONG) *
  1559. sizeof(unsigned long), GFP_KERNEL);
  1560. if (!ar->mem_bitmap)
  1561. return -ENOMEM;
  1562. /* try to read EEPROM, init MAC addr */
  1563. err = carl9170_read_eeprom(ar);
  1564. if (err)
  1565. return err;
  1566. err = carl9170_fw_fix_eeprom(ar);
  1567. if (err)
  1568. return err;
  1569. err = carl9170_parse_eeprom(ar);
  1570. if (err)
  1571. return err;
  1572. err = ath_regd_init(regulatory, ar->hw->wiphy,
  1573. carl9170_reg_notifier);
  1574. if (err)
  1575. return err;
  1576. if (modparam_noht) {
  1577. carl9170_band_2GHz.ht_cap.ht_supported = false;
  1578. carl9170_band_5GHz.ht_cap.ht_supported = false;
  1579. }
  1580. for (i = 0; i < ar->fw.vif_num; i++) {
  1581. ar->vif_priv[i].id = i;
  1582. ar->vif_priv[i].vif = NULL;
  1583. }
  1584. err = ieee80211_register_hw(ar->hw);
  1585. if (err)
  1586. return err;
  1587. /* mac80211 interface is now registered */
  1588. ar->registered = true;
  1589. if (!ath_is_world_regd(regulatory))
  1590. regulatory_hint(ar->hw->wiphy, regulatory->alpha2);
  1591. #ifdef CONFIG_CARL9170_DEBUGFS
  1592. carl9170_debugfs_register(ar);
  1593. #endif /* CONFIG_CARL9170_DEBUGFS */
  1594. err = carl9170_led_init(ar);
  1595. if (err)
  1596. goto err_unreg;
  1597. #ifdef CONFIG_CARL9170_LEDS
  1598. err = carl9170_led_register(ar);
  1599. if (err)
  1600. goto err_unreg;
  1601. #endif /* CONFIG_CARL9170_LEDS */
  1602. #ifdef CONFIG_CARL9170_WPC
  1603. err = carl9170_register_wps_button(ar);
  1604. if (err)
  1605. goto err_unreg;
  1606. #endif /* CONFIG_CARL9170_WPC */
  1607. #ifdef CONFIG_CARL9170_HWRNG
  1608. err = carl9170_register_hwrng(ar);
  1609. if (err)
  1610. goto err_unreg;
  1611. #endif /* CONFIG_CARL9170_HWRNG */
  1612. dev_info(&ar->udev->dev, "Atheros AR9170 is registered as '%s'\n",
  1613. wiphy_name(ar->hw->wiphy));
  1614. return 0;
  1615. err_unreg:
  1616. carl9170_unregister(ar);
  1617. return err;
  1618. }
  1619. void carl9170_unregister(struct ar9170 *ar)
  1620. {
  1621. if (!ar->registered)
  1622. return;
  1623. ar->registered = false;
  1624. #ifdef CONFIG_CARL9170_LEDS
  1625. carl9170_led_unregister(ar);
  1626. #endif /* CONFIG_CARL9170_LEDS */
  1627. #ifdef CONFIG_CARL9170_DEBUGFS
  1628. carl9170_debugfs_unregister(ar);
  1629. #endif /* CONFIG_CARL9170_DEBUGFS */
  1630. #ifdef CONFIG_CARL9170_WPC
  1631. if (ar->wps.pbc) {
  1632. input_unregister_device(ar->wps.pbc);
  1633. ar->wps.pbc = NULL;
  1634. }
  1635. #endif /* CONFIG_CARL9170_WPC */
  1636. #ifdef CONFIG_CARL9170_HWRNG
  1637. carl9170_unregister_hwrng(ar);
  1638. #endif /* CONFIG_CARL9170_HWRNG */
  1639. carl9170_cancel_worker(ar);
  1640. cancel_work_sync(&ar->restart_work);
  1641. ieee80211_unregister_hw(ar->hw);
  1642. }
  1643. void carl9170_free(struct ar9170 *ar)
  1644. {
  1645. WARN_ON(ar->registered);
  1646. WARN_ON(IS_INITIALIZED(ar));
  1647. kfree_skb(ar->rx_failover);
  1648. ar->rx_failover = NULL;
  1649. kfree(ar->mem_bitmap);
  1650. ar->mem_bitmap = NULL;
  1651. kfree(ar->survey);
  1652. ar->survey = NULL;
  1653. mutex_destroy(&ar->mutex);
  1654. ieee80211_free_hw(ar->hw);
  1655. }