init.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895
  1. /*
  2. * Copyright (c) 2008-2009 Atheros Communications Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include "ath9k.h"
  17. static char *dev_info = "ath9k";
  18. MODULE_AUTHOR("Atheros Communications");
  19. MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
  20. MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
  21. MODULE_LICENSE("Dual BSD/GPL");
  22. static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
  23. module_param_named(debug, ath9k_debug, uint, 0);
  24. MODULE_PARM_DESC(debug, "Debugging mask");
  25. int modparam_nohwcrypt;
  26. module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
  27. MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
  28. /* We use the hw_value as an index into our private channel structure */
  29. #define CHAN2G(_freq, _idx) { \
  30. .center_freq = (_freq), \
  31. .hw_value = (_idx), \
  32. .max_power = 20, \
  33. }
  34. #define CHAN5G(_freq, _idx) { \
  35. .band = IEEE80211_BAND_5GHZ, \
  36. .center_freq = (_freq), \
  37. .hw_value = (_idx), \
  38. .max_power = 20, \
  39. }
  40. /* Some 2 GHz radios are actually tunable on 2312-2732
  41. * on 5 MHz steps, we support the channels which we know
  42. * we have calibration data for all cards though to make
  43. * this static */
  44. static struct ieee80211_channel ath9k_2ghz_chantable[] = {
  45. CHAN2G(2412, 0), /* Channel 1 */
  46. CHAN2G(2417, 1), /* Channel 2 */
  47. CHAN2G(2422, 2), /* Channel 3 */
  48. CHAN2G(2427, 3), /* Channel 4 */
  49. CHAN2G(2432, 4), /* Channel 5 */
  50. CHAN2G(2437, 5), /* Channel 6 */
  51. CHAN2G(2442, 6), /* Channel 7 */
  52. CHAN2G(2447, 7), /* Channel 8 */
  53. CHAN2G(2452, 8), /* Channel 9 */
  54. CHAN2G(2457, 9), /* Channel 10 */
  55. CHAN2G(2462, 10), /* Channel 11 */
  56. CHAN2G(2467, 11), /* Channel 12 */
  57. CHAN2G(2472, 12), /* Channel 13 */
  58. CHAN2G(2484, 13), /* Channel 14 */
  59. };
  60. /* Some 5 GHz radios are actually tunable on XXXX-YYYY
  61. * on 5 MHz steps, we support the channels which we know
  62. * we have calibration data for all cards though to make
  63. * this static */
  64. static struct ieee80211_channel ath9k_5ghz_chantable[] = {
  65. /* _We_ call this UNII 1 */
  66. CHAN5G(5180, 14), /* Channel 36 */
  67. CHAN5G(5200, 15), /* Channel 40 */
  68. CHAN5G(5220, 16), /* Channel 44 */
  69. CHAN5G(5240, 17), /* Channel 48 */
  70. /* _We_ call this UNII 2 */
  71. CHAN5G(5260, 18), /* Channel 52 */
  72. CHAN5G(5280, 19), /* Channel 56 */
  73. CHAN5G(5300, 20), /* Channel 60 */
  74. CHAN5G(5320, 21), /* Channel 64 */
  75. /* _We_ call this "Middle band" */
  76. CHAN5G(5500, 22), /* Channel 100 */
  77. CHAN5G(5520, 23), /* Channel 104 */
  78. CHAN5G(5540, 24), /* Channel 108 */
  79. CHAN5G(5560, 25), /* Channel 112 */
  80. CHAN5G(5580, 26), /* Channel 116 */
  81. CHAN5G(5600, 27), /* Channel 120 */
  82. CHAN5G(5620, 28), /* Channel 124 */
  83. CHAN5G(5640, 29), /* Channel 128 */
  84. CHAN5G(5660, 30), /* Channel 132 */
  85. CHAN5G(5680, 31), /* Channel 136 */
  86. CHAN5G(5700, 32), /* Channel 140 */
  87. /* _We_ call this UNII 3 */
  88. CHAN5G(5745, 33), /* Channel 149 */
  89. CHAN5G(5765, 34), /* Channel 153 */
  90. CHAN5G(5785, 35), /* Channel 157 */
  91. CHAN5G(5805, 36), /* Channel 161 */
  92. CHAN5G(5825, 37), /* Channel 165 */
  93. };
  94. /* Atheros hardware rate code addition for short premble */
  95. #define SHPCHECK(__hw_rate, __flags) \
  96. ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
  97. #define RATE(_bitrate, _hw_rate, _flags) { \
  98. .bitrate = (_bitrate), \
  99. .flags = (_flags), \
  100. .hw_value = (_hw_rate), \
  101. .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
  102. }
  103. static struct ieee80211_rate ath9k_legacy_rates[] = {
  104. RATE(10, 0x1b, 0),
  105. RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
  106. RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
  107. RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
  108. RATE(60, 0x0b, 0),
  109. RATE(90, 0x0f, 0),
  110. RATE(120, 0x0a, 0),
  111. RATE(180, 0x0e, 0),
  112. RATE(240, 0x09, 0),
  113. RATE(360, 0x0d, 0),
  114. RATE(480, 0x08, 0),
  115. RATE(540, 0x0c, 0),
  116. };
  117. static void ath9k_deinit_softc(struct ath_softc *sc);
  118. /*
  119. * Read and write, they both share the same lock. We do this to serialize
  120. * reads and writes on Atheros 802.11n PCI devices only. This is required
  121. * as the FIFO on these devices can only accept sanely 2 requests.
  122. */
  123. static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
  124. {
  125. struct ath_hw *ah = (struct ath_hw *) hw_priv;
  126. struct ath_common *common = ath9k_hw_common(ah);
  127. struct ath_softc *sc = (struct ath_softc *) common->priv;
  128. if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
  129. unsigned long flags;
  130. spin_lock_irqsave(&sc->sc_serial_rw, flags);
  131. iowrite32(val, sc->mem + reg_offset);
  132. spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
  133. } else
  134. iowrite32(val, sc->mem + reg_offset);
  135. }
  136. static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
  137. {
  138. struct ath_hw *ah = (struct ath_hw *) hw_priv;
  139. struct ath_common *common = ath9k_hw_common(ah);
  140. struct ath_softc *sc = (struct ath_softc *) common->priv;
  141. u32 val;
  142. if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
  143. unsigned long flags;
  144. spin_lock_irqsave(&sc->sc_serial_rw, flags);
  145. val = ioread32(sc->mem + reg_offset);
  146. spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
  147. } else
  148. val = ioread32(sc->mem + reg_offset);
  149. return val;
  150. }
  151. static const struct ath_ops ath9k_common_ops = {
  152. .read = ath9k_ioread32,
  153. .write = ath9k_iowrite32,
  154. };
  155. static int count_streams(unsigned int chainmask, int max)
  156. {
  157. int streams = 0;
  158. do {
  159. if (++streams == max)
  160. break;
  161. } while ((chainmask = chainmask & (chainmask - 1)));
  162. return streams;
  163. }
  164. /**************************/
  165. /* Initialization */
  166. /**************************/
  167. static void setup_ht_cap(struct ath_softc *sc,
  168. struct ieee80211_sta_ht_cap *ht_info)
  169. {
  170. struct ath_hw *ah = sc->sc_ah;
  171. struct ath_common *common = ath9k_hw_common(ah);
  172. u8 tx_streams, rx_streams;
  173. int i, max_streams;
  174. ht_info->ht_supported = true;
  175. ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
  176. IEEE80211_HT_CAP_SM_PS |
  177. IEEE80211_HT_CAP_SGI_40 |
  178. IEEE80211_HT_CAP_DSSSCCK40;
  179. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
  180. ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
  181. ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
  182. ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
  183. if (AR_SREV_9300_20_OR_LATER(ah))
  184. max_streams = 3;
  185. else
  186. max_streams = 2;
  187. if (AR_SREV_9280_10_OR_LATER(ah)) {
  188. if (max_streams >= 2)
  189. ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
  190. ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
  191. }
  192. /* set up supported mcs set */
  193. memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
  194. tx_streams = count_streams(common->tx_chainmask, max_streams);
  195. rx_streams = count_streams(common->rx_chainmask, max_streams);
  196. ath_print(common, ATH_DBG_CONFIG,
  197. "TX streams %d, RX streams: %d\n",
  198. tx_streams, rx_streams);
  199. if (tx_streams != rx_streams) {
  200. ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
  201. ht_info->mcs.tx_params |= ((tx_streams - 1) <<
  202. IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
  203. }
  204. for (i = 0; i < rx_streams; i++)
  205. ht_info->mcs.rx_mask[i] = 0xff;
  206. ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
  207. }
  208. static int ath9k_reg_notifier(struct wiphy *wiphy,
  209. struct regulatory_request *request)
  210. {
  211. struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
  212. struct ath_wiphy *aphy = hw->priv;
  213. struct ath_softc *sc = aphy->sc;
  214. struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
  215. return ath_reg_notifier_apply(wiphy, request, reg);
  216. }
  217. /*
  218. * This function will allocate both the DMA descriptor structure, and the
  219. * buffers it contains. These are used to contain the descriptors used
  220. * by the system.
  221. */
  222. int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
  223. struct list_head *head, const char *name,
  224. int nbuf, int ndesc, bool is_tx)
  225. {
  226. #define DS2PHYS(_dd, _ds) \
  227. ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
  228. #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
  229. #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
  230. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  231. u8 *ds;
  232. struct ath_buf *bf;
  233. int i, bsize, error, desc_len;
  234. ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
  235. name, nbuf, ndesc);
  236. INIT_LIST_HEAD(head);
  237. if (is_tx)
  238. desc_len = sc->sc_ah->caps.tx_desc_len;
  239. else
  240. desc_len = sizeof(struct ath_desc);
  241. /* ath_desc must be a multiple of DWORDs */
  242. if ((desc_len % 4) != 0) {
  243. ath_print(common, ATH_DBG_FATAL,
  244. "ath_desc not DWORD aligned\n");
  245. BUG_ON((desc_len % 4) != 0);
  246. error = -ENOMEM;
  247. goto fail;
  248. }
  249. dd->dd_desc_len = desc_len * nbuf * ndesc;
  250. /*
  251. * Need additional DMA memory because we can't use
  252. * descriptors that cross the 4K page boundary. Assume
  253. * one skipped descriptor per 4K page.
  254. */
  255. if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
  256. u32 ndesc_skipped =
  257. ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
  258. u32 dma_len;
  259. while (ndesc_skipped) {
  260. dma_len = ndesc_skipped * desc_len;
  261. dd->dd_desc_len += dma_len;
  262. ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
  263. };
  264. }
  265. /* allocate descriptors */
  266. dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
  267. &dd->dd_desc_paddr, GFP_KERNEL);
  268. if (dd->dd_desc == NULL) {
  269. error = -ENOMEM;
  270. goto fail;
  271. }
  272. ds = (u8 *) dd->dd_desc;
  273. ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
  274. name, ds, (u32) dd->dd_desc_len,
  275. ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
  276. /* allocate buffers */
  277. bsize = sizeof(struct ath_buf) * nbuf;
  278. bf = kzalloc(bsize, GFP_KERNEL);
  279. if (bf == NULL) {
  280. error = -ENOMEM;
  281. goto fail2;
  282. }
  283. dd->dd_bufptr = bf;
  284. for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
  285. bf->bf_desc = ds;
  286. bf->bf_daddr = DS2PHYS(dd, ds);
  287. if (!(sc->sc_ah->caps.hw_caps &
  288. ATH9K_HW_CAP_4KB_SPLITTRANS)) {
  289. /*
  290. * Skip descriptor addresses which can cause 4KB
  291. * boundary crossing (addr + length) with a 32 dword
  292. * descriptor fetch.
  293. */
  294. while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
  295. BUG_ON((caddr_t) bf->bf_desc >=
  296. ((caddr_t) dd->dd_desc +
  297. dd->dd_desc_len));
  298. ds += (desc_len * ndesc);
  299. bf->bf_desc = ds;
  300. bf->bf_daddr = DS2PHYS(dd, ds);
  301. }
  302. }
  303. list_add_tail(&bf->list, head);
  304. }
  305. return 0;
  306. fail2:
  307. dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
  308. dd->dd_desc_paddr);
  309. fail:
  310. memset(dd, 0, sizeof(*dd));
  311. return error;
  312. #undef ATH_DESC_4KB_BOUND_CHECK
  313. #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
  314. #undef DS2PHYS
  315. }
  316. static void ath9k_init_crypto(struct ath_softc *sc)
  317. {
  318. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  319. int i = 0;
  320. /* Get the hardware key cache size. */
  321. common->keymax = sc->sc_ah->caps.keycache_size;
  322. if (common->keymax > ATH_KEYMAX) {
  323. ath_print(common, ATH_DBG_ANY,
  324. "Warning, using only %u entries in %u key cache\n",
  325. ATH_KEYMAX, common->keymax);
  326. common->keymax = ATH_KEYMAX;
  327. }
  328. /*
  329. * Reset the key cache since some parts do not
  330. * reset the contents on initial power up.
  331. */
  332. for (i = 0; i < common->keymax; i++)
  333. ath9k_hw_keyreset(sc->sc_ah, (u16) i);
  334. if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
  335. ATH9K_CIPHER_TKIP, NULL)) {
  336. /*
  337. * Whether we should enable h/w TKIP MIC.
  338. * XXX: if we don't support WME TKIP MIC, then we wouldn't
  339. * report WMM capable, so it's always safe to turn on
  340. * TKIP MIC in this case.
  341. */
  342. ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC, 0, 1, NULL);
  343. }
  344. /*
  345. * Check whether the separate key cache entries
  346. * are required to handle both tx+rx MIC keys.
  347. * With split mic keys the number of stations is limited
  348. * to 27 otherwise 59.
  349. */
  350. if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
  351. ATH9K_CIPHER_TKIP, NULL)
  352. && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
  353. ATH9K_CIPHER_MIC, NULL)
  354. && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_TKIP_SPLIT,
  355. 0, NULL))
  356. common->splitmic = 1;
  357. /* turn on mcast key search if possible */
  358. if (!ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
  359. (void)ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH,
  360. 1, 1, NULL);
  361. }
  362. static int ath9k_init_btcoex(struct ath_softc *sc)
  363. {
  364. int r, qnum;
  365. switch (sc->sc_ah->btcoex_hw.scheme) {
  366. case ATH_BTCOEX_CFG_NONE:
  367. break;
  368. case ATH_BTCOEX_CFG_2WIRE:
  369. ath9k_hw_btcoex_init_2wire(sc->sc_ah);
  370. break;
  371. case ATH_BTCOEX_CFG_3WIRE:
  372. ath9k_hw_btcoex_init_3wire(sc->sc_ah);
  373. r = ath_init_btcoex_timer(sc);
  374. if (r)
  375. return -1;
  376. qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
  377. ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum);
  378. sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
  379. break;
  380. default:
  381. WARN_ON(1);
  382. break;
  383. }
  384. return 0;
  385. }
  386. static int ath9k_init_queues(struct ath_softc *sc)
  387. {
  388. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  389. int i = 0;
  390. for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
  391. sc->tx.hwq_map[i] = -1;
  392. sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
  393. if (sc->beacon.beaconq == -1) {
  394. ath_print(common, ATH_DBG_FATAL,
  395. "Unable to setup a beacon xmit queue\n");
  396. goto err;
  397. }
  398. sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
  399. if (sc->beacon.cabq == NULL) {
  400. ath_print(common, ATH_DBG_FATAL,
  401. "Unable to setup CAB xmit queue\n");
  402. goto err;
  403. }
  404. sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
  405. ath_cabq_update(sc);
  406. if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
  407. ath_print(common, ATH_DBG_FATAL,
  408. "Unable to setup xmit queue for BK traffic\n");
  409. goto err;
  410. }
  411. if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
  412. ath_print(common, ATH_DBG_FATAL,
  413. "Unable to setup xmit queue for BE traffic\n");
  414. goto err;
  415. }
  416. if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
  417. ath_print(common, ATH_DBG_FATAL,
  418. "Unable to setup xmit queue for VI traffic\n");
  419. goto err;
  420. }
  421. if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
  422. ath_print(common, ATH_DBG_FATAL,
  423. "Unable to setup xmit queue for VO traffic\n");
  424. goto err;
  425. }
  426. return 0;
  427. err:
  428. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
  429. if (ATH_TXQ_SETUP(sc, i))
  430. ath_tx_cleanupq(sc, &sc->tx.txq[i]);
  431. return -EIO;
  432. }
  433. static void ath9k_init_channels_rates(struct ath_softc *sc)
  434. {
  435. if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) {
  436. sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
  437. sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
  438. sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
  439. ARRAY_SIZE(ath9k_2ghz_chantable);
  440. sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
  441. sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
  442. ARRAY_SIZE(ath9k_legacy_rates);
  443. }
  444. if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
  445. sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
  446. sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
  447. sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
  448. ARRAY_SIZE(ath9k_5ghz_chantable);
  449. sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
  450. ath9k_legacy_rates + 4;
  451. sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
  452. ARRAY_SIZE(ath9k_legacy_rates) - 4;
  453. }
  454. }
  455. static void ath9k_init_misc(struct ath_softc *sc)
  456. {
  457. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  458. int i = 0;
  459. common->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
  460. setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
  461. sc->config.txpowlimit = ATH_TXPOWER_MAX;
  462. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
  463. sc->sc_flags |= SC_OP_TXAGGR;
  464. sc->sc_flags |= SC_OP_RXAGGR;
  465. }
  466. common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
  467. common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
  468. ath9k_hw_set_diversity(sc->sc_ah, true);
  469. sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
  470. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
  471. memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
  472. sc->beacon.slottime = ATH9K_SLOT_TIME_9;
  473. for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
  474. sc->beacon.bslot[i] = NULL;
  475. sc->beacon.bslot_aphy[i] = NULL;
  476. }
  477. }
  478. static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
  479. const struct ath_bus_ops *bus_ops)
  480. {
  481. struct ath_hw *ah = NULL;
  482. struct ath_common *common;
  483. int ret = 0, i;
  484. int csz = 0;
  485. ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
  486. if (!ah)
  487. return -ENOMEM;
  488. ah->hw_version.devid = devid;
  489. ah->hw_version.subsysid = subsysid;
  490. sc->sc_ah = ah;
  491. common = ath9k_hw_common(ah);
  492. common->ops = &ath9k_common_ops;
  493. common->bus_ops = bus_ops;
  494. common->ah = ah;
  495. common->hw = sc->hw;
  496. common->priv = sc;
  497. common->debug_mask = ath9k_debug;
  498. spin_lock_init(&sc->wiphy_lock);
  499. spin_lock_init(&sc->sc_resetlock);
  500. spin_lock_init(&sc->sc_serial_rw);
  501. spin_lock_init(&sc->sc_pm_lock);
  502. mutex_init(&sc->mutex);
  503. tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
  504. tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
  505. (unsigned long)sc);
  506. /*
  507. * Cache line size is used to size and align various
  508. * structures used to communicate with the hardware.
  509. */
  510. ath_read_cachesize(common, &csz);
  511. common->cachelsz = csz << 2; /* convert to bytes */
  512. /* Initializes the hardware for all supported chipsets */
  513. ret = ath9k_hw_init(ah);
  514. if (ret)
  515. goto err_hw;
  516. ret = ath9k_init_debug(ah);
  517. if (ret) {
  518. ath_print(common, ATH_DBG_FATAL,
  519. "Unable to create debugfs files\n");
  520. goto err_debug;
  521. }
  522. ret = ath9k_init_queues(sc);
  523. if (ret)
  524. goto err_queues;
  525. ret = ath9k_init_btcoex(sc);
  526. if (ret)
  527. goto err_btcoex;
  528. ath9k_init_crypto(sc);
  529. ath9k_init_channels_rates(sc);
  530. ath9k_init_misc(sc);
  531. return 0;
  532. err_btcoex:
  533. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
  534. if (ATH_TXQ_SETUP(sc, i))
  535. ath_tx_cleanupq(sc, &sc->tx.txq[i]);
  536. err_queues:
  537. ath9k_exit_debug(ah);
  538. err_debug:
  539. ath9k_hw_deinit(ah);
  540. err_hw:
  541. tasklet_kill(&sc->intr_tq);
  542. tasklet_kill(&sc->bcon_tasklet);
  543. kfree(ah);
  544. sc->sc_ah = NULL;
  545. return ret;
  546. }
  547. void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
  548. {
  549. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  550. hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
  551. IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
  552. IEEE80211_HW_SIGNAL_DBM |
  553. IEEE80211_HW_SUPPORTS_PS |
  554. IEEE80211_HW_PS_NULLFUNC_STACK |
  555. IEEE80211_HW_SPECTRUM_MGMT |
  556. IEEE80211_HW_REPORTS_TX_ACK_STATUS;
  557. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
  558. hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
  559. if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
  560. hw->flags |= IEEE80211_HW_MFP_CAPABLE;
  561. hw->wiphy->interface_modes =
  562. BIT(NL80211_IFTYPE_AP) |
  563. BIT(NL80211_IFTYPE_STATION) |
  564. BIT(NL80211_IFTYPE_ADHOC) |
  565. BIT(NL80211_IFTYPE_MESH_POINT);
  566. hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
  567. hw->queues = 4;
  568. hw->max_rates = 4;
  569. hw->channel_change_time = 5000;
  570. hw->max_listen_interval = 10;
  571. hw->max_rate_tries = 10;
  572. hw->sta_data_size = sizeof(struct ath_node);
  573. hw->vif_data_size = sizeof(struct ath_vif);
  574. hw->rate_control_algorithm = "ath9k_rate_control";
  575. if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
  576. hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
  577. &sc->sbands[IEEE80211_BAND_2GHZ];
  578. if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
  579. hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
  580. &sc->sbands[IEEE80211_BAND_5GHZ];
  581. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
  582. if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
  583. setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
  584. if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
  585. setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
  586. }
  587. SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
  588. }
  589. int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
  590. const struct ath_bus_ops *bus_ops)
  591. {
  592. struct ieee80211_hw *hw = sc->hw;
  593. struct ath_common *common;
  594. struct ath_hw *ah;
  595. int error = 0;
  596. struct ath_regulatory *reg;
  597. /* Bring up device */
  598. error = ath9k_init_softc(devid, sc, subsysid, bus_ops);
  599. if (error != 0)
  600. goto error_init;
  601. ah = sc->sc_ah;
  602. common = ath9k_hw_common(ah);
  603. ath9k_set_hw_capab(sc, hw);
  604. /* Initialize regulatory */
  605. error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
  606. ath9k_reg_notifier);
  607. if (error)
  608. goto error_regd;
  609. reg = &common->regulatory;
  610. /* Setup TX DMA */
  611. error = ath_tx_init(sc, ATH_TXBUF);
  612. if (error != 0)
  613. goto error_tx;
  614. /* Setup RX DMA */
  615. error = ath_rx_init(sc, ATH_RXBUF);
  616. if (error != 0)
  617. goto error_rx;
  618. /* Register with mac80211 */
  619. error = ieee80211_register_hw(hw);
  620. if (error)
  621. goto error_register;
  622. /* Handle world regulatory */
  623. if (!ath_is_world_regd(reg)) {
  624. error = regulatory_hint(hw->wiphy, reg->alpha2);
  625. if (error)
  626. goto error_world;
  627. }
  628. INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
  629. INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
  630. sc->wiphy_scheduler_int = msecs_to_jiffies(500);
  631. ath_init_leds(sc);
  632. ath_start_rfkill_poll(sc);
  633. return 0;
  634. error_world:
  635. ieee80211_unregister_hw(hw);
  636. error_register:
  637. ath_rx_cleanup(sc);
  638. error_rx:
  639. ath_tx_cleanup(sc);
  640. error_tx:
  641. /* Nothing */
  642. error_regd:
  643. ath9k_deinit_softc(sc);
  644. error_init:
  645. return error;
  646. }
  647. /*****************************/
  648. /* De-Initialization */
  649. /*****************************/
  650. static void ath9k_deinit_softc(struct ath_softc *sc)
  651. {
  652. int i = 0;
  653. if ((sc->btcoex.no_stomp_timer) &&
  654. sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
  655. ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
  656. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
  657. if (ATH_TXQ_SETUP(sc, i))
  658. ath_tx_cleanupq(sc, &sc->tx.txq[i]);
  659. ath9k_exit_debug(sc->sc_ah);
  660. ath9k_hw_deinit(sc->sc_ah);
  661. tasklet_kill(&sc->intr_tq);
  662. tasklet_kill(&sc->bcon_tasklet);
  663. kfree(sc->sc_ah);
  664. sc->sc_ah = NULL;
  665. }
  666. void ath9k_deinit_device(struct ath_softc *sc)
  667. {
  668. struct ieee80211_hw *hw = sc->hw;
  669. int i = 0;
  670. ath9k_ps_wakeup(sc);
  671. wiphy_rfkill_stop_polling(sc->hw->wiphy);
  672. ath_deinit_leds(sc);
  673. for (i = 0; i < sc->num_sec_wiphy; i++) {
  674. struct ath_wiphy *aphy = sc->sec_wiphy[i];
  675. if (aphy == NULL)
  676. continue;
  677. sc->sec_wiphy[i] = NULL;
  678. ieee80211_unregister_hw(aphy->hw);
  679. ieee80211_free_hw(aphy->hw);
  680. }
  681. kfree(sc->sec_wiphy);
  682. ieee80211_unregister_hw(hw);
  683. ath_rx_cleanup(sc);
  684. ath_tx_cleanup(sc);
  685. ath9k_deinit_softc(sc);
  686. }
  687. void ath_descdma_cleanup(struct ath_softc *sc,
  688. struct ath_descdma *dd,
  689. struct list_head *head)
  690. {
  691. dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
  692. dd->dd_desc_paddr);
  693. INIT_LIST_HEAD(head);
  694. kfree(dd->dd_bufptr);
  695. memset(dd, 0, sizeof(*dd));
  696. }
  697. /************************/
  698. /* Module Hooks */
  699. /************************/
  700. static int __init ath9k_init(void)
  701. {
  702. int error;
  703. /* Register rate control algorithm */
  704. error = ath_rate_control_register();
  705. if (error != 0) {
  706. printk(KERN_ERR
  707. "ath9k: Unable to register rate control "
  708. "algorithm: %d\n",
  709. error);
  710. goto err_out;
  711. }
  712. error = ath9k_debug_create_root();
  713. if (error) {
  714. printk(KERN_ERR
  715. "ath9k: Unable to create debugfs root: %d\n",
  716. error);
  717. goto err_rate_unregister;
  718. }
  719. error = ath_pci_init();
  720. if (error < 0) {
  721. printk(KERN_ERR
  722. "ath9k: No PCI devices found, driver not installed.\n");
  723. error = -ENODEV;
  724. goto err_remove_root;
  725. }
  726. error = ath_ahb_init();
  727. if (error < 0) {
  728. error = -ENODEV;
  729. goto err_pci_exit;
  730. }
  731. return 0;
  732. err_pci_exit:
  733. ath_pci_exit();
  734. err_remove_root:
  735. ath9k_debug_remove_root();
  736. err_rate_unregister:
  737. ath_rate_control_unregister();
  738. err_out:
  739. return error;
  740. }
  741. module_init(ath9k_init);
  742. static void __exit ath9k_exit(void)
  743. {
  744. ath_ahb_exit();
  745. ath_pci_exit();
  746. ath9k_debug_remove_root();
  747. ath_rate_control_unregister();
  748. printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
  749. }
  750. module_exit(ath9k_exit);