init.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897
  1. /*
  2. * Copyright (c) 2008-2009 Atheros Communications Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <linux/slab.h>
  17. #include "ath9k.h"
  18. static char *dev_info = "ath9k";
  19. MODULE_AUTHOR("Atheros Communications");
  20. MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
  21. MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
  22. MODULE_LICENSE("Dual BSD/GPL");
  23. static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
  24. module_param_named(debug, ath9k_debug, uint, 0);
  25. MODULE_PARM_DESC(debug, "Debugging mask");
  26. int modparam_nohwcrypt;
  27. module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
  28. MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
  29. /* We use the hw_value as an index into our private channel structure */
  30. #define CHAN2G(_freq, _idx) { \
  31. .center_freq = (_freq), \
  32. .hw_value = (_idx), \
  33. .max_power = 20, \
  34. }
  35. #define CHAN5G(_freq, _idx) { \
  36. .band = IEEE80211_BAND_5GHZ, \
  37. .center_freq = (_freq), \
  38. .hw_value = (_idx), \
  39. .max_power = 20, \
  40. }
  41. /* Some 2 GHz radios are actually tunable on 2312-2732
  42. * on 5 MHz steps, we support the channels which we know
  43. * we have calibration data for all cards though to make
  44. * this static */
  45. static struct ieee80211_channel ath9k_2ghz_chantable[] = {
  46. CHAN2G(2412, 0), /* Channel 1 */
  47. CHAN2G(2417, 1), /* Channel 2 */
  48. CHAN2G(2422, 2), /* Channel 3 */
  49. CHAN2G(2427, 3), /* Channel 4 */
  50. CHAN2G(2432, 4), /* Channel 5 */
  51. CHAN2G(2437, 5), /* Channel 6 */
  52. CHAN2G(2442, 6), /* Channel 7 */
  53. CHAN2G(2447, 7), /* Channel 8 */
  54. CHAN2G(2452, 8), /* Channel 9 */
  55. CHAN2G(2457, 9), /* Channel 10 */
  56. CHAN2G(2462, 10), /* Channel 11 */
  57. CHAN2G(2467, 11), /* Channel 12 */
  58. CHAN2G(2472, 12), /* Channel 13 */
  59. CHAN2G(2484, 13), /* Channel 14 */
  60. };
  61. /* Some 5 GHz radios are actually tunable on XXXX-YYYY
  62. * on 5 MHz steps, we support the channels which we know
  63. * we have calibration data for all cards though to make
  64. * this static */
  65. static struct ieee80211_channel ath9k_5ghz_chantable[] = {
  66. /* _We_ call this UNII 1 */
  67. CHAN5G(5180, 14), /* Channel 36 */
  68. CHAN5G(5200, 15), /* Channel 40 */
  69. CHAN5G(5220, 16), /* Channel 44 */
  70. CHAN5G(5240, 17), /* Channel 48 */
  71. /* _We_ call this UNII 2 */
  72. CHAN5G(5260, 18), /* Channel 52 */
  73. CHAN5G(5280, 19), /* Channel 56 */
  74. CHAN5G(5300, 20), /* Channel 60 */
  75. CHAN5G(5320, 21), /* Channel 64 */
  76. /* _We_ call this "Middle band" */
  77. CHAN5G(5500, 22), /* Channel 100 */
  78. CHAN5G(5520, 23), /* Channel 104 */
  79. CHAN5G(5540, 24), /* Channel 108 */
  80. CHAN5G(5560, 25), /* Channel 112 */
  81. CHAN5G(5580, 26), /* Channel 116 */
  82. CHAN5G(5600, 27), /* Channel 120 */
  83. CHAN5G(5620, 28), /* Channel 124 */
  84. CHAN5G(5640, 29), /* Channel 128 */
  85. CHAN5G(5660, 30), /* Channel 132 */
  86. CHAN5G(5680, 31), /* Channel 136 */
  87. CHAN5G(5700, 32), /* Channel 140 */
  88. /* _We_ call this UNII 3 */
  89. CHAN5G(5745, 33), /* Channel 149 */
  90. CHAN5G(5765, 34), /* Channel 153 */
  91. CHAN5G(5785, 35), /* Channel 157 */
  92. CHAN5G(5805, 36), /* Channel 161 */
  93. CHAN5G(5825, 37), /* Channel 165 */
  94. };
  95. /* Atheros hardware rate code addition for short premble */
  96. #define SHPCHECK(__hw_rate, __flags) \
  97. ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
  98. #define RATE(_bitrate, _hw_rate, _flags) { \
  99. .bitrate = (_bitrate), \
  100. .flags = (_flags), \
  101. .hw_value = (_hw_rate), \
  102. .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
  103. }
  104. static struct ieee80211_rate ath9k_legacy_rates[] = {
  105. RATE(10, 0x1b, 0),
  106. RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
  107. RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
  108. RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
  109. RATE(60, 0x0b, 0),
  110. RATE(90, 0x0f, 0),
  111. RATE(120, 0x0a, 0),
  112. RATE(180, 0x0e, 0),
  113. RATE(240, 0x09, 0),
  114. RATE(360, 0x0d, 0),
  115. RATE(480, 0x08, 0),
  116. RATE(540, 0x0c, 0),
  117. };
  118. static void ath9k_deinit_softc(struct ath_softc *sc);
  119. /*
  120. * Read and write, they both share the same lock. We do this to serialize
  121. * reads and writes on Atheros 802.11n PCI devices only. This is required
  122. * as the FIFO on these devices can only accept sanely 2 requests.
  123. */
  124. static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
  125. {
  126. struct ath_hw *ah = (struct ath_hw *) hw_priv;
  127. struct ath_common *common = ath9k_hw_common(ah);
  128. struct ath_softc *sc = (struct ath_softc *) common->priv;
  129. if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
  130. unsigned long flags;
  131. spin_lock_irqsave(&sc->sc_serial_rw, flags);
  132. iowrite32(val, sc->mem + reg_offset);
  133. spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
  134. } else
  135. iowrite32(val, sc->mem + reg_offset);
  136. }
  137. static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
  138. {
  139. struct ath_hw *ah = (struct ath_hw *) hw_priv;
  140. struct ath_common *common = ath9k_hw_common(ah);
  141. struct ath_softc *sc = (struct ath_softc *) common->priv;
  142. u32 val;
  143. if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
  144. unsigned long flags;
  145. spin_lock_irqsave(&sc->sc_serial_rw, flags);
  146. val = ioread32(sc->mem + reg_offset);
  147. spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
  148. } else
  149. val = ioread32(sc->mem + reg_offset);
  150. return val;
  151. }
  152. static const struct ath_ops ath9k_common_ops = {
  153. .read = ath9k_ioread32,
  154. .write = ath9k_iowrite32,
  155. };
  156. static int count_streams(unsigned int chainmask, int max)
  157. {
  158. int streams = 0;
  159. do {
  160. if (++streams == max)
  161. break;
  162. } while ((chainmask = chainmask & (chainmask - 1)));
  163. return streams;
  164. }
  165. /**************************/
  166. /* Initialization */
  167. /**************************/
  168. static void setup_ht_cap(struct ath_softc *sc,
  169. struct ieee80211_sta_ht_cap *ht_info)
  170. {
  171. struct ath_hw *ah = sc->sc_ah;
  172. struct ath_common *common = ath9k_hw_common(ah);
  173. u8 tx_streams, rx_streams;
  174. int i, max_streams;
  175. ht_info->ht_supported = true;
  176. ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
  177. IEEE80211_HT_CAP_SM_PS |
  178. IEEE80211_HT_CAP_SGI_40 |
  179. IEEE80211_HT_CAP_DSSSCCK40;
  180. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
  181. ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
  182. ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
  183. ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
  184. if (AR_SREV_9300_20_OR_LATER(ah))
  185. max_streams = 3;
  186. else
  187. max_streams = 2;
  188. if (AR_SREV_9280_10_OR_LATER(ah)) {
  189. if (max_streams >= 2)
  190. ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
  191. ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
  192. }
  193. /* set up supported mcs set */
  194. memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
  195. tx_streams = count_streams(common->tx_chainmask, max_streams);
  196. rx_streams = count_streams(common->rx_chainmask, max_streams);
  197. ath_print(common, ATH_DBG_CONFIG,
  198. "TX streams %d, RX streams: %d\n",
  199. tx_streams, rx_streams);
  200. if (tx_streams != rx_streams) {
  201. ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
  202. ht_info->mcs.tx_params |= ((tx_streams - 1) <<
  203. IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
  204. }
  205. for (i = 0; i < rx_streams; i++)
  206. ht_info->mcs.rx_mask[i] = 0xff;
  207. ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
  208. }
  209. static int ath9k_reg_notifier(struct wiphy *wiphy,
  210. struct regulatory_request *request)
  211. {
  212. struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
  213. struct ath_wiphy *aphy = hw->priv;
  214. struct ath_softc *sc = aphy->sc;
  215. struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
  216. return ath_reg_notifier_apply(wiphy, request, reg);
  217. }
  218. /*
  219. * This function will allocate both the DMA descriptor structure, and the
  220. * buffers it contains. These are used to contain the descriptors used
  221. * by the system.
  222. */
  223. int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
  224. struct list_head *head, const char *name,
  225. int nbuf, int ndesc, bool is_tx)
  226. {
  227. #define DS2PHYS(_dd, _ds) \
  228. ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
  229. #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
  230. #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
  231. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  232. u8 *ds;
  233. struct ath_buf *bf;
  234. int i, bsize, error, desc_len;
  235. ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
  236. name, nbuf, ndesc);
  237. INIT_LIST_HEAD(head);
  238. if (is_tx)
  239. desc_len = sc->sc_ah->caps.tx_desc_len;
  240. else
  241. desc_len = sizeof(struct ath_desc);
  242. /* ath_desc must be a multiple of DWORDs */
  243. if ((desc_len % 4) != 0) {
  244. ath_print(common, ATH_DBG_FATAL,
  245. "ath_desc not DWORD aligned\n");
  246. BUG_ON((desc_len % 4) != 0);
  247. error = -ENOMEM;
  248. goto fail;
  249. }
  250. dd->dd_desc_len = desc_len * nbuf * ndesc;
  251. /*
  252. * Need additional DMA memory because we can't use
  253. * descriptors that cross the 4K page boundary. Assume
  254. * one skipped descriptor per 4K page.
  255. */
  256. if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
  257. u32 ndesc_skipped =
  258. ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
  259. u32 dma_len;
  260. while (ndesc_skipped) {
  261. dma_len = ndesc_skipped * desc_len;
  262. dd->dd_desc_len += dma_len;
  263. ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
  264. };
  265. }
  266. /* allocate descriptors */
  267. dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
  268. &dd->dd_desc_paddr, GFP_KERNEL);
  269. if (dd->dd_desc == NULL) {
  270. error = -ENOMEM;
  271. goto fail;
  272. }
  273. ds = (u8 *) dd->dd_desc;
  274. ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
  275. name, ds, (u32) dd->dd_desc_len,
  276. ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
  277. /* allocate buffers */
  278. bsize = sizeof(struct ath_buf) * nbuf;
  279. bf = kzalloc(bsize, GFP_KERNEL);
  280. if (bf == NULL) {
  281. error = -ENOMEM;
  282. goto fail2;
  283. }
  284. dd->dd_bufptr = bf;
  285. for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
  286. bf->bf_desc = ds;
  287. bf->bf_daddr = DS2PHYS(dd, ds);
  288. if (!(sc->sc_ah->caps.hw_caps &
  289. ATH9K_HW_CAP_4KB_SPLITTRANS)) {
  290. /*
  291. * Skip descriptor addresses which can cause 4KB
  292. * boundary crossing (addr + length) with a 32 dword
  293. * descriptor fetch.
  294. */
  295. while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
  296. BUG_ON((caddr_t) bf->bf_desc >=
  297. ((caddr_t) dd->dd_desc +
  298. dd->dd_desc_len));
  299. ds += (desc_len * ndesc);
  300. bf->bf_desc = ds;
  301. bf->bf_daddr = DS2PHYS(dd, ds);
  302. }
  303. }
  304. list_add_tail(&bf->list, head);
  305. }
  306. return 0;
  307. fail2:
  308. dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
  309. dd->dd_desc_paddr);
  310. fail:
  311. memset(dd, 0, sizeof(*dd));
  312. return error;
  313. #undef ATH_DESC_4KB_BOUND_CHECK
  314. #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
  315. #undef DS2PHYS
  316. }
  317. static void ath9k_init_crypto(struct ath_softc *sc)
  318. {
  319. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  320. int i = 0;
  321. /* Get the hardware key cache size. */
  322. common->keymax = sc->sc_ah->caps.keycache_size;
  323. if (common->keymax > ATH_KEYMAX) {
  324. ath_print(common, ATH_DBG_ANY,
  325. "Warning, using only %u entries in %u key cache\n",
  326. ATH_KEYMAX, common->keymax);
  327. common->keymax = ATH_KEYMAX;
  328. }
  329. /*
  330. * Reset the key cache since some parts do not
  331. * reset the contents on initial power up.
  332. */
  333. for (i = 0; i < common->keymax; i++)
  334. ath9k_hw_keyreset(sc->sc_ah, (u16) i);
  335. if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
  336. ATH9K_CIPHER_TKIP, NULL)) {
  337. /*
  338. * Whether we should enable h/w TKIP MIC.
  339. * XXX: if we don't support WME TKIP MIC, then we wouldn't
  340. * report WMM capable, so it's always safe to turn on
  341. * TKIP MIC in this case.
  342. */
  343. ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC, 0, 1, NULL);
  344. }
  345. /*
  346. * Check whether the separate key cache entries
  347. * are required to handle both tx+rx MIC keys.
  348. * With split mic keys the number of stations is limited
  349. * to 27 otherwise 59.
  350. */
  351. if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
  352. ATH9K_CIPHER_TKIP, NULL)
  353. && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
  354. ATH9K_CIPHER_MIC, NULL)
  355. && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_TKIP_SPLIT,
  356. 0, NULL))
  357. common->splitmic = 1;
  358. /* turn on mcast key search if possible */
  359. if (!ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
  360. (void)ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH,
  361. 1, 1, NULL);
  362. }
  363. static int ath9k_init_btcoex(struct ath_softc *sc)
  364. {
  365. int r, qnum;
  366. switch (sc->sc_ah->btcoex_hw.scheme) {
  367. case ATH_BTCOEX_CFG_NONE:
  368. break;
  369. case ATH_BTCOEX_CFG_2WIRE:
  370. ath9k_hw_btcoex_init_2wire(sc->sc_ah);
  371. break;
  372. case ATH_BTCOEX_CFG_3WIRE:
  373. ath9k_hw_btcoex_init_3wire(sc->sc_ah);
  374. r = ath_init_btcoex_timer(sc);
  375. if (r)
  376. return -1;
  377. qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
  378. ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum);
  379. sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
  380. break;
  381. default:
  382. WARN_ON(1);
  383. break;
  384. }
  385. return 0;
  386. }
  387. static int ath9k_init_queues(struct ath_softc *sc)
  388. {
  389. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  390. int i = 0;
  391. for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
  392. sc->tx.hwq_map[i] = -1;
  393. sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
  394. if (sc->beacon.beaconq == -1) {
  395. ath_print(common, ATH_DBG_FATAL,
  396. "Unable to setup a beacon xmit queue\n");
  397. goto err;
  398. }
  399. sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
  400. if (sc->beacon.cabq == NULL) {
  401. ath_print(common, ATH_DBG_FATAL,
  402. "Unable to setup CAB xmit queue\n");
  403. goto err;
  404. }
  405. sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
  406. ath_cabq_update(sc);
  407. if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
  408. ath_print(common, ATH_DBG_FATAL,
  409. "Unable to setup xmit queue for BK traffic\n");
  410. goto err;
  411. }
  412. if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
  413. ath_print(common, ATH_DBG_FATAL,
  414. "Unable to setup xmit queue for BE traffic\n");
  415. goto err;
  416. }
  417. if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
  418. ath_print(common, ATH_DBG_FATAL,
  419. "Unable to setup xmit queue for VI traffic\n");
  420. goto err;
  421. }
  422. if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
  423. ath_print(common, ATH_DBG_FATAL,
  424. "Unable to setup xmit queue for VO traffic\n");
  425. goto err;
  426. }
  427. return 0;
  428. err:
  429. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
  430. if (ATH_TXQ_SETUP(sc, i))
  431. ath_tx_cleanupq(sc, &sc->tx.txq[i]);
  432. return -EIO;
  433. }
  434. static void ath9k_init_channels_rates(struct ath_softc *sc)
  435. {
  436. if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) {
  437. sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
  438. sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
  439. sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
  440. ARRAY_SIZE(ath9k_2ghz_chantable);
  441. sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
  442. sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
  443. ARRAY_SIZE(ath9k_legacy_rates);
  444. }
  445. if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
  446. sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
  447. sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
  448. sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
  449. ARRAY_SIZE(ath9k_5ghz_chantable);
  450. sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
  451. ath9k_legacy_rates + 4;
  452. sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
  453. ARRAY_SIZE(ath9k_legacy_rates) - 4;
  454. }
  455. }
  456. static void ath9k_init_misc(struct ath_softc *sc)
  457. {
  458. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  459. int i = 0;
  460. common->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
  461. setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
  462. sc->config.txpowlimit = ATH_TXPOWER_MAX;
  463. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
  464. sc->sc_flags |= SC_OP_TXAGGR;
  465. sc->sc_flags |= SC_OP_RXAGGR;
  466. }
  467. common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
  468. common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
  469. ath9k_hw_set_diversity(sc->sc_ah, true);
  470. sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
  471. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
  472. memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
  473. sc->beacon.slottime = ATH9K_SLOT_TIME_9;
  474. for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
  475. sc->beacon.bslot[i] = NULL;
  476. sc->beacon.bslot_aphy[i] = NULL;
  477. }
  478. }
  479. static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
  480. const struct ath_bus_ops *bus_ops)
  481. {
  482. struct ath_hw *ah = NULL;
  483. struct ath_common *common;
  484. int ret = 0, i;
  485. int csz = 0;
  486. ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
  487. if (!ah)
  488. return -ENOMEM;
  489. ah->hw_version.devid = devid;
  490. ah->hw_version.subsysid = subsysid;
  491. sc->sc_ah = ah;
  492. common = ath9k_hw_common(ah);
  493. common->ops = &ath9k_common_ops;
  494. common->bus_ops = bus_ops;
  495. common->ah = ah;
  496. common->hw = sc->hw;
  497. common->priv = sc;
  498. common->debug_mask = ath9k_debug;
  499. spin_lock_init(&sc->wiphy_lock);
  500. spin_lock_init(&sc->sc_resetlock);
  501. spin_lock_init(&sc->sc_serial_rw);
  502. spin_lock_init(&sc->sc_pm_lock);
  503. mutex_init(&sc->mutex);
  504. tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
  505. tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
  506. (unsigned long)sc);
  507. /*
  508. * Cache line size is used to size and align various
  509. * structures used to communicate with the hardware.
  510. */
  511. ath_read_cachesize(common, &csz);
  512. common->cachelsz = csz << 2; /* convert to bytes */
  513. /* Initializes the hardware for all supported chipsets */
  514. ret = ath9k_hw_init(ah);
  515. if (ret)
  516. goto err_hw;
  517. ret = ath9k_init_debug(ah);
  518. if (ret) {
  519. ath_print(common, ATH_DBG_FATAL,
  520. "Unable to create debugfs files\n");
  521. goto err_debug;
  522. }
  523. ret = ath9k_init_queues(sc);
  524. if (ret)
  525. goto err_queues;
  526. ret = ath9k_init_btcoex(sc);
  527. if (ret)
  528. goto err_btcoex;
  529. ath9k_init_crypto(sc);
  530. ath9k_init_channels_rates(sc);
  531. ath9k_init_misc(sc);
  532. return 0;
  533. err_btcoex:
  534. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
  535. if (ATH_TXQ_SETUP(sc, i))
  536. ath_tx_cleanupq(sc, &sc->tx.txq[i]);
  537. err_queues:
  538. ath9k_exit_debug(ah);
  539. err_debug:
  540. ath9k_hw_deinit(ah);
  541. err_hw:
  542. tasklet_kill(&sc->intr_tq);
  543. tasklet_kill(&sc->bcon_tasklet);
  544. kfree(ah);
  545. sc->sc_ah = NULL;
  546. return ret;
  547. }
  548. void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
  549. {
  550. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  551. hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
  552. IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
  553. IEEE80211_HW_SIGNAL_DBM |
  554. IEEE80211_HW_SUPPORTS_PS |
  555. IEEE80211_HW_PS_NULLFUNC_STACK |
  556. IEEE80211_HW_SPECTRUM_MGMT |
  557. IEEE80211_HW_REPORTS_TX_ACK_STATUS;
  558. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
  559. hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
  560. if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
  561. hw->flags |= IEEE80211_HW_MFP_CAPABLE;
  562. hw->wiphy->interface_modes =
  563. BIT(NL80211_IFTYPE_AP) |
  564. BIT(NL80211_IFTYPE_STATION) |
  565. BIT(NL80211_IFTYPE_ADHOC) |
  566. BIT(NL80211_IFTYPE_MESH_POINT);
  567. hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
  568. hw->queues = 4;
  569. hw->max_rates = 4;
  570. hw->channel_change_time = 5000;
  571. hw->max_listen_interval = 10;
  572. hw->max_rate_tries = 10;
  573. hw->sta_data_size = sizeof(struct ath_node);
  574. hw->vif_data_size = sizeof(struct ath_vif);
  575. hw->rate_control_algorithm = "ath9k_rate_control";
  576. if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
  577. hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
  578. &sc->sbands[IEEE80211_BAND_2GHZ];
  579. if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
  580. hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
  581. &sc->sbands[IEEE80211_BAND_5GHZ];
  582. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
  583. if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
  584. setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
  585. if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
  586. setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
  587. }
  588. SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
  589. }
  590. int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
  591. const struct ath_bus_ops *bus_ops)
  592. {
  593. struct ieee80211_hw *hw = sc->hw;
  594. struct ath_common *common;
  595. struct ath_hw *ah;
  596. int error = 0;
  597. struct ath_regulatory *reg;
  598. /* Bring up device */
  599. error = ath9k_init_softc(devid, sc, subsysid, bus_ops);
  600. if (error != 0)
  601. goto error_init;
  602. ah = sc->sc_ah;
  603. common = ath9k_hw_common(ah);
  604. ath9k_set_hw_capab(sc, hw);
  605. /* Initialize regulatory */
  606. error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
  607. ath9k_reg_notifier);
  608. if (error)
  609. goto error_regd;
  610. reg = &common->regulatory;
  611. /* Setup TX DMA */
  612. error = ath_tx_init(sc, ATH_TXBUF);
  613. if (error != 0)
  614. goto error_tx;
  615. /* Setup RX DMA */
  616. error = ath_rx_init(sc, ATH_RXBUF);
  617. if (error != 0)
  618. goto error_rx;
  619. /* Register with mac80211 */
  620. error = ieee80211_register_hw(hw);
  621. if (error)
  622. goto error_register;
  623. /* Handle world regulatory */
  624. if (!ath_is_world_regd(reg)) {
  625. error = regulatory_hint(hw->wiphy, reg->alpha2);
  626. if (error)
  627. goto error_world;
  628. }
  629. INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
  630. INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
  631. sc->wiphy_scheduler_int = msecs_to_jiffies(500);
  632. ath_init_leds(sc);
  633. ath_start_rfkill_poll(sc);
  634. return 0;
  635. error_world:
  636. ieee80211_unregister_hw(hw);
  637. error_register:
  638. ath_rx_cleanup(sc);
  639. error_rx:
  640. ath_tx_cleanup(sc);
  641. error_tx:
  642. /* Nothing */
  643. error_regd:
  644. ath9k_deinit_softc(sc);
  645. error_init:
  646. return error;
  647. }
  648. /*****************************/
  649. /* De-Initialization */
  650. /*****************************/
  651. static void ath9k_deinit_softc(struct ath_softc *sc)
  652. {
  653. int i = 0;
  654. if ((sc->btcoex.no_stomp_timer) &&
  655. sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
  656. ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
  657. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
  658. if (ATH_TXQ_SETUP(sc, i))
  659. ath_tx_cleanupq(sc, &sc->tx.txq[i]);
  660. ath9k_exit_debug(sc->sc_ah);
  661. ath9k_hw_deinit(sc->sc_ah);
  662. tasklet_kill(&sc->intr_tq);
  663. tasklet_kill(&sc->bcon_tasklet);
  664. kfree(sc->sc_ah);
  665. sc->sc_ah = NULL;
  666. }
  667. void ath9k_deinit_device(struct ath_softc *sc)
  668. {
  669. struct ieee80211_hw *hw = sc->hw;
  670. int i = 0;
  671. ath9k_ps_wakeup(sc);
  672. wiphy_rfkill_stop_polling(sc->hw->wiphy);
  673. ath_deinit_leds(sc);
  674. for (i = 0; i < sc->num_sec_wiphy; i++) {
  675. struct ath_wiphy *aphy = sc->sec_wiphy[i];
  676. if (aphy == NULL)
  677. continue;
  678. sc->sec_wiphy[i] = NULL;
  679. ieee80211_unregister_hw(aphy->hw);
  680. ieee80211_free_hw(aphy->hw);
  681. }
  682. kfree(sc->sec_wiphy);
  683. ieee80211_unregister_hw(hw);
  684. ath_rx_cleanup(sc);
  685. ath_tx_cleanup(sc);
  686. ath9k_deinit_softc(sc);
  687. }
  688. void ath_descdma_cleanup(struct ath_softc *sc,
  689. struct ath_descdma *dd,
  690. struct list_head *head)
  691. {
  692. dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
  693. dd->dd_desc_paddr);
  694. INIT_LIST_HEAD(head);
  695. kfree(dd->dd_bufptr);
  696. memset(dd, 0, sizeof(*dd));
  697. }
  698. /************************/
  699. /* Module Hooks */
  700. /************************/
  701. static int __init ath9k_init(void)
  702. {
  703. int error;
  704. /* Register rate control algorithm */
  705. error = ath_rate_control_register();
  706. if (error != 0) {
  707. printk(KERN_ERR
  708. "ath9k: Unable to register rate control "
  709. "algorithm: %d\n",
  710. error);
  711. goto err_out;
  712. }
  713. error = ath9k_debug_create_root();
  714. if (error) {
  715. printk(KERN_ERR
  716. "ath9k: Unable to create debugfs root: %d\n",
  717. error);
  718. goto err_rate_unregister;
  719. }
  720. error = ath_pci_init();
  721. if (error < 0) {
  722. printk(KERN_ERR
  723. "ath9k: No PCI devices found, driver not installed.\n");
  724. error = -ENODEV;
  725. goto err_remove_root;
  726. }
  727. error = ath_ahb_init();
  728. if (error < 0) {
  729. error = -ENODEV;
  730. goto err_pci_exit;
  731. }
  732. return 0;
  733. err_pci_exit:
  734. ath_pci_exit();
  735. err_remove_root:
  736. ath9k_debug_remove_root();
  737. err_rate_unregister:
  738. ath_rate_control_unregister();
  739. err_out:
  740. return error;
  741. }
  742. module_init(ath9k_init);
  743. static void __exit ath9k_exit(void)
  744. {
  745. ath_ahb_exit();
  746. ath_pci_exit();
  747. ath9k_debug_remove_root();
  748. ath_rate_control_unregister();
  749. printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
  750. }
  751. module_exit(ath9k_exit);