init.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889
  1. /*
  2. * Copyright (c) 2008-2009 Atheros Communications Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <linux/slab.h>
  17. #include "ath9k.h"
  18. static char *dev_info = "ath9k";
  19. MODULE_AUTHOR("Atheros Communications");
  20. MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
  21. MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
  22. MODULE_LICENSE("Dual BSD/GPL");
  23. static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
  24. module_param_named(debug, ath9k_debug, uint, 0);
  25. MODULE_PARM_DESC(debug, "Debugging mask");
  26. int modparam_nohwcrypt;
  27. module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
  28. MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
  29. /* We use the hw_value as an index into our private channel structure */
  30. #define CHAN2G(_freq, _idx) { \
  31. .center_freq = (_freq), \
  32. .hw_value = (_idx), \
  33. .max_power = 20, \
  34. }
  35. #define CHAN5G(_freq, _idx) { \
  36. .band = IEEE80211_BAND_5GHZ, \
  37. .center_freq = (_freq), \
  38. .hw_value = (_idx), \
  39. .max_power = 20, \
  40. }
  41. /* Some 2 GHz radios are actually tunable on 2312-2732
  42. * on 5 MHz steps, we support the channels which we know
  43. * we have calibration data for all cards though to make
  44. * this static */
  45. static struct ieee80211_channel ath9k_2ghz_chantable[] = {
  46. CHAN2G(2412, 0), /* Channel 1 */
  47. CHAN2G(2417, 1), /* Channel 2 */
  48. CHAN2G(2422, 2), /* Channel 3 */
  49. CHAN2G(2427, 3), /* Channel 4 */
  50. CHAN2G(2432, 4), /* Channel 5 */
  51. CHAN2G(2437, 5), /* Channel 6 */
  52. CHAN2G(2442, 6), /* Channel 7 */
  53. CHAN2G(2447, 7), /* Channel 8 */
  54. CHAN2G(2452, 8), /* Channel 9 */
  55. CHAN2G(2457, 9), /* Channel 10 */
  56. CHAN2G(2462, 10), /* Channel 11 */
  57. CHAN2G(2467, 11), /* Channel 12 */
  58. CHAN2G(2472, 12), /* Channel 13 */
  59. CHAN2G(2484, 13), /* Channel 14 */
  60. };
  61. /* Some 5 GHz radios are actually tunable on XXXX-YYYY
  62. * on 5 MHz steps, we support the channels which we know
  63. * we have calibration data for all cards though to make
  64. * this static */
  65. static struct ieee80211_channel ath9k_5ghz_chantable[] = {
  66. /* _We_ call this UNII 1 */
  67. CHAN5G(5180, 14), /* Channel 36 */
  68. CHAN5G(5200, 15), /* Channel 40 */
  69. CHAN5G(5220, 16), /* Channel 44 */
  70. CHAN5G(5240, 17), /* Channel 48 */
  71. /* _We_ call this UNII 2 */
  72. CHAN5G(5260, 18), /* Channel 52 */
  73. CHAN5G(5280, 19), /* Channel 56 */
  74. CHAN5G(5300, 20), /* Channel 60 */
  75. CHAN5G(5320, 21), /* Channel 64 */
  76. /* _We_ call this "Middle band" */
  77. CHAN5G(5500, 22), /* Channel 100 */
  78. CHAN5G(5520, 23), /* Channel 104 */
  79. CHAN5G(5540, 24), /* Channel 108 */
  80. CHAN5G(5560, 25), /* Channel 112 */
  81. CHAN5G(5580, 26), /* Channel 116 */
  82. CHAN5G(5600, 27), /* Channel 120 */
  83. CHAN5G(5620, 28), /* Channel 124 */
  84. CHAN5G(5640, 29), /* Channel 128 */
  85. CHAN5G(5660, 30), /* Channel 132 */
  86. CHAN5G(5680, 31), /* Channel 136 */
  87. CHAN5G(5700, 32), /* Channel 140 */
  88. /* _We_ call this UNII 3 */
  89. CHAN5G(5745, 33), /* Channel 149 */
  90. CHAN5G(5765, 34), /* Channel 153 */
  91. CHAN5G(5785, 35), /* Channel 157 */
  92. CHAN5G(5805, 36), /* Channel 161 */
  93. CHAN5G(5825, 37), /* Channel 165 */
  94. };
  95. /* Atheros hardware rate code addition for short premble */
  96. #define SHPCHECK(__hw_rate, __flags) \
  97. ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
  98. #define RATE(_bitrate, _hw_rate, _flags) { \
  99. .bitrate = (_bitrate), \
  100. .flags = (_flags), \
  101. .hw_value = (_hw_rate), \
  102. .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
  103. }
  104. static struct ieee80211_rate ath9k_legacy_rates[] = {
  105. RATE(10, 0x1b, 0),
  106. RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
  107. RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
  108. RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
  109. RATE(60, 0x0b, 0),
  110. RATE(90, 0x0f, 0),
  111. RATE(120, 0x0a, 0),
  112. RATE(180, 0x0e, 0),
  113. RATE(240, 0x09, 0),
  114. RATE(360, 0x0d, 0),
  115. RATE(480, 0x08, 0),
  116. RATE(540, 0x0c, 0),
  117. };
  118. static void ath9k_deinit_softc(struct ath_softc *sc);
  119. /*
  120. * Read and write, they both share the same lock. We do this to serialize
  121. * reads and writes on Atheros 802.11n PCI devices only. This is required
  122. * as the FIFO on these devices can only accept sanely 2 requests.
  123. */
  124. static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
  125. {
  126. struct ath_hw *ah = (struct ath_hw *) hw_priv;
  127. struct ath_common *common = ath9k_hw_common(ah);
  128. struct ath_softc *sc = (struct ath_softc *) common->priv;
  129. if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
  130. unsigned long flags;
  131. spin_lock_irqsave(&sc->sc_serial_rw, flags);
  132. iowrite32(val, sc->mem + reg_offset);
  133. spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
  134. } else
  135. iowrite32(val, sc->mem + reg_offset);
  136. }
  137. static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
  138. {
  139. struct ath_hw *ah = (struct ath_hw *) hw_priv;
  140. struct ath_common *common = ath9k_hw_common(ah);
  141. struct ath_softc *sc = (struct ath_softc *) common->priv;
  142. u32 val;
  143. if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
  144. unsigned long flags;
  145. spin_lock_irqsave(&sc->sc_serial_rw, flags);
  146. val = ioread32(sc->mem + reg_offset);
  147. spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
  148. } else
  149. val = ioread32(sc->mem + reg_offset);
  150. return val;
  151. }
  152. static const struct ath_ops ath9k_common_ops = {
  153. .read = ath9k_ioread32,
  154. .write = ath9k_iowrite32,
  155. };
  156. /**************************/
  157. /* Initialization */
  158. /**************************/
  159. static void setup_ht_cap(struct ath_softc *sc,
  160. struct ieee80211_sta_ht_cap *ht_info)
  161. {
  162. struct ath_hw *ah = sc->sc_ah;
  163. struct ath_common *common = ath9k_hw_common(ah);
  164. u8 tx_streams, rx_streams;
  165. int i, max_streams;
  166. ht_info->ht_supported = true;
  167. ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
  168. IEEE80211_HT_CAP_SM_PS |
  169. IEEE80211_HT_CAP_SGI_40 |
  170. IEEE80211_HT_CAP_DSSSCCK40;
  171. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
  172. ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
  173. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
  174. ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
  175. ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
  176. ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
  177. if (AR_SREV_9300_20_OR_LATER(ah))
  178. max_streams = 3;
  179. else
  180. max_streams = 2;
  181. if (AR_SREV_9280_10_OR_LATER(ah)) {
  182. if (max_streams >= 2)
  183. ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
  184. ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
  185. }
  186. /* set up supported mcs set */
  187. memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
  188. tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, max_streams);
  189. rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, max_streams);
  190. ath_print(common, ATH_DBG_CONFIG,
  191. "TX streams %d, RX streams: %d\n",
  192. tx_streams, rx_streams);
  193. if (tx_streams != rx_streams) {
  194. ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
  195. ht_info->mcs.tx_params |= ((tx_streams - 1) <<
  196. IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
  197. }
  198. for (i = 0; i < rx_streams; i++)
  199. ht_info->mcs.rx_mask[i] = 0xff;
  200. ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
  201. }
  202. static int ath9k_reg_notifier(struct wiphy *wiphy,
  203. struct regulatory_request *request)
  204. {
  205. struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
  206. struct ath_wiphy *aphy = hw->priv;
  207. struct ath_softc *sc = aphy->sc;
  208. struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
  209. return ath_reg_notifier_apply(wiphy, request, reg);
  210. }
  211. /*
  212. * This function will allocate both the DMA descriptor structure, and the
  213. * buffers it contains. These are used to contain the descriptors used
  214. * by the system.
  215. */
  216. int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
  217. struct list_head *head, const char *name,
  218. int nbuf, int ndesc, bool is_tx)
  219. {
  220. #define DS2PHYS(_dd, _ds) \
  221. ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
  222. #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
  223. #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
  224. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  225. u8 *ds;
  226. struct ath_buf *bf;
  227. int i, bsize, error, desc_len;
  228. ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
  229. name, nbuf, ndesc);
  230. INIT_LIST_HEAD(head);
  231. if (is_tx)
  232. desc_len = sc->sc_ah->caps.tx_desc_len;
  233. else
  234. desc_len = sizeof(struct ath_desc);
  235. /* ath_desc must be a multiple of DWORDs */
  236. if ((desc_len % 4) != 0) {
  237. ath_print(common, ATH_DBG_FATAL,
  238. "ath_desc not DWORD aligned\n");
  239. BUG_ON((desc_len % 4) != 0);
  240. error = -ENOMEM;
  241. goto fail;
  242. }
  243. dd->dd_desc_len = desc_len * nbuf * ndesc;
  244. /*
  245. * Need additional DMA memory because we can't use
  246. * descriptors that cross the 4K page boundary. Assume
  247. * one skipped descriptor per 4K page.
  248. */
  249. if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
  250. u32 ndesc_skipped =
  251. ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
  252. u32 dma_len;
  253. while (ndesc_skipped) {
  254. dma_len = ndesc_skipped * desc_len;
  255. dd->dd_desc_len += dma_len;
  256. ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
  257. }
  258. }
  259. /* allocate descriptors */
  260. dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
  261. &dd->dd_desc_paddr, GFP_KERNEL);
  262. if (dd->dd_desc == NULL) {
  263. error = -ENOMEM;
  264. goto fail;
  265. }
  266. ds = (u8 *) dd->dd_desc;
  267. ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
  268. name, ds, (u32) dd->dd_desc_len,
  269. ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
  270. /* allocate buffers */
  271. bsize = sizeof(struct ath_buf) * nbuf;
  272. bf = kzalloc(bsize, GFP_KERNEL);
  273. if (bf == NULL) {
  274. error = -ENOMEM;
  275. goto fail2;
  276. }
  277. dd->dd_bufptr = bf;
  278. for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
  279. bf->bf_desc = ds;
  280. bf->bf_daddr = DS2PHYS(dd, ds);
  281. if (!(sc->sc_ah->caps.hw_caps &
  282. ATH9K_HW_CAP_4KB_SPLITTRANS)) {
  283. /*
  284. * Skip descriptor addresses which can cause 4KB
  285. * boundary crossing (addr + length) with a 32 dword
  286. * descriptor fetch.
  287. */
  288. while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
  289. BUG_ON((caddr_t) bf->bf_desc >=
  290. ((caddr_t) dd->dd_desc +
  291. dd->dd_desc_len));
  292. ds += (desc_len * ndesc);
  293. bf->bf_desc = ds;
  294. bf->bf_daddr = DS2PHYS(dd, ds);
  295. }
  296. }
  297. list_add_tail(&bf->list, head);
  298. }
  299. return 0;
  300. fail2:
  301. dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
  302. dd->dd_desc_paddr);
  303. fail:
  304. memset(dd, 0, sizeof(*dd));
  305. return error;
  306. #undef ATH_DESC_4KB_BOUND_CHECK
  307. #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
  308. #undef DS2PHYS
  309. }
  310. static void ath9k_init_crypto(struct ath_softc *sc)
  311. {
  312. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  313. int i = 0;
  314. /* Get the hardware key cache size. */
  315. common->keymax = sc->sc_ah->caps.keycache_size;
  316. if (common->keymax > ATH_KEYMAX) {
  317. ath_print(common, ATH_DBG_ANY,
  318. "Warning, using only %u entries in %u key cache\n",
  319. ATH_KEYMAX, common->keymax);
  320. common->keymax = ATH_KEYMAX;
  321. }
  322. /*
  323. * Reset the key cache since some parts do not
  324. * reset the contents on initial power up.
  325. */
  326. for (i = 0; i < common->keymax; i++)
  327. ath9k_hw_keyreset(sc->sc_ah, (u16) i);
  328. if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
  329. ATH9K_CIPHER_TKIP, NULL)) {
  330. /*
  331. * Whether we should enable h/w TKIP MIC.
  332. * XXX: if we don't support WME TKIP MIC, then we wouldn't
  333. * report WMM capable, so it's always safe to turn on
  334. * TKIP MIC in this case.
  335. */
  336. ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC, 0, 1, NULL);
  337. }
  338. /*
  339. * Check whether the separate key cache entries
  340. * are required to handle both tx+rx MIC keys.
  341. * With split mic keys the number of stations is limited
  342. * to 27 otherwise 59.
  343. */
  344. if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
  345. ATH9K_CIPHER_TKIP, NULL)
  346. && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
  347. ATH9K_CIPHER_MIC, NULL)
  348. && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_TKIP_SPLIT,
  349. 0, NULL))
  350. common->splitmic = 1;
  351. /* turn on mcast key search if possible */
  352. if (!ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
  353. (void)ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH,
  354. 1, 1, NULL);
  355. }
  356. static int ath9k_init_btcoex(struct ath_softc *sc)
  357. {
  358. int r, qnum;
  359. switch (sc->sc_ah->btcoex_hw.scheme) {
  360. case ATH_BTCOEX_CFG_NONE:
  361. break;
  362. case ATH_BTCOEX_CFG_2WIRE:
  363. ath9k_hw_btcoex_init_2wire(sc->sc_ah);
  364. break;
  365. case ATH_BTCOEX_CFG_3WIRE:
  366. ath9k_hw_btcoex_init_3wire(sc->sc_ah);
  367. r = ath_init_btcoex_timer(sc);
  368. if (r)
  369. return -1;
  370. qnum = sc->tx.hwq_map[WME_AC_BE];
  371. ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum);
  372. sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
  373. break;
  374. default:
  375. WARN_ON(1);
  376. break;
  377. }
  378. return 0;
  379. }
  380. static int ath9k_init_queues(struct ath_softc *sc)
  381. {
  382. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  383. int i = 0;
  384. for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
  385. sc->tx.hwq_map[i] = -1;
  386. sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
  387. if (sc->beacon.beaconq == -1) {
  388. ath_print(common, ATH_DBG_FATAL,
  389. "Unable to setup a beacon xmit queue\n");
  390. goto err;
  391. }
  392. sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
  393. if (sc->beacon.cabq == NULL) {
  394. ath_print(common, ATH_DBG_FATAL,
  395. "Unable to setup CAB xmit queue\n");
  396. goto err;
  397. }
  398. sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
  399. ath_cabq_update(sc);
  400. if (!ath_tx_setup(sc, WME_AC_BK)) {
  401. ath_print(common, ATH_DBG_FATAL,
  402. "Unable to setup xmit queue for BK traffic\n");
  403. goto err;
  404. }
  405. if (!ath_tx_setup(sc, WME_AC_BE)) {
  406. ath_print(common, ATH_DBG_FATAL,
  407. "Unable to setup xmit queue for BE traffic\n");
  408. goto err;
  409. }
  410. if (!ath_tx_setup(sc, WME_AC_VI)) {
  411. ath_print(common, ATH_DBG_FATAL,
  412. "Unable to setup xmit queue for VI traffic\n");
  413. goto err;
  414. }
  415. if (!ath_tx_setup(sc, WME_AC_VO)) {
  416. ath_print(common, ATH_DBG_FATAL,
  417. "Unable to setup xmit queue for VO traffic\n");
  418. goto err;
  419. }
  420. return 0;
  421. err:
  422. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
  423. if (ATH_TXQ_SETUP(sc, i))
  424. ath_tx_cleanupq(sc, &sc->tx.txq[i]);
  425. return -EIO;
  426. }
  427. static void ath9k_init_channels_rates(struct ath_softc *sc)
  428. {
  429. if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) {
  430. sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
  431. sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
  432. sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
  433. ARRAY_SIZE(ath9k_2ghz_chantable);
  434. sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
  435. sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
  436. ARRAY_SIZE(ath9k_legacy_rates);
  437. }
  438. if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
  439. sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
  440. sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
  441. sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
  442. ARRAY_SIZE(ath9k_5ghz_chantable);
  443. sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
  444. ath9k_legacy_rates + 4;
  445. sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
  446. ARRAY_SIZE(ath9k_legacy_rates) - 4;
  447. }
  448. }
  449. static void ath9k_init_misc(struct ath_softc *sc)
  450. {
  451. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  452. int i = 0;
  453. common->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
  454. setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
  455. sc->config.txpowlimit = ATH_TXPOWER_MAX;
  456. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
  457. sc->sc_flags |= SC_OP_TXAGGR;
  458. sc->sc_flags |= SC_OP_RXAGGR;
  459. }
  460. common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
  461. common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
  462. ath9k_hw_set_diversity(sc->sc_ah, true);
  463. sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
  464. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
  465. memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
  466. sc->beacon.slottime = ATH9K_SLOT_TIME_9;
  467. for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
  468. sc->beacon.bslot[i] = NULL;
  469. sc->beacon.bslot_aphy[i] = NULL;
  470. }
  471. }
  472. static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
  473. const struct ath_bus_ops *bus_ops)
  474. {
  475. struct ath_hw *ah = NULL;
  476. struct ath_common *common;
  477. int ret = 0, i;
  478. int csz = 0;
  479. ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
  480. if (!ah)
  481. return -ENOMEM;
  482. ah->hw_version.devid = devid;
  483. ah->hw_version.subsysid = subsysid;
  484. sc->sc_ah = ah;
  485. common = ath9k_hw_common(ah);
  486. common->ops = &ath9k_common_ops;
  487. common->bus_ops = bus_ops;
  488. common->ah = ah;
  489. common->hw = sc->hw;
  490. common->priv = sc;
  491. common->debug_mask = ath9k_debug;
  492. spin_lock_init(&sc->wiphy_lock);
  493. spin_lock_init(&sc->sc_resetlock);
  494. spin_lock_init(&sc->sc_serial_rw);
  495. spin_lock_init(&sc->sc_pm_lock);
  496. mutex_init(&sc->mutex);
  497. tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
  498. tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
  499. (unsigned long)sc);
  500. /*
  501. * Cache line size is used to size and align various
  502. * structures used to communicate with the hardware.
  503. */
  504. ath_read_cachesize(common, &csz);
  505. common->cachelsz = csz << 2; /* convert to bytes */
  506. /* Initializes the hardware for all supported chipsets */
  507. ret = ath9k_hw_init(ah);
  508. if (ret)
  509. goto err_hw;
  510. ret = ath9k_init_debug(ah);
  511. if (ret) {
  512. ath_print(common, ATH_DBG_FATAL,
  513. "Unable to create debugfs files\n");
  514. goto err_debug;
  515. }
  516. ret = ath9k_init_queues(sc);
  517. if (ret)
  518. goto err_queues;
  519. ret = ath9k_init_btcoex(sc);
  520. if (ret)
  521. goto err_btcoex;
  522. ath9k_init_crypto(sc);
  523. ath9k_init_channels_rates(sc);
  524. ath9k_init_misc(sc);
  525. return 0;
  526. err_btcoex:
  527. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
  528. if (ATH_TXQ_SETUP(sc, i))
  529. ath_tx_cleanupq(sc, &sc->tx.txq[i]);
  530. err_queues:
  531. ath9k_exit_debug(ah);
  532. err_debug:
  533. ath9k_hw_deinit(ah);
  534. err_hw:
  535. tasklet_kill(&sc->intr_tq);
  536. tasklet_kill(&sc->bcon_tasklet);
  537. kfree(ah);
  538. sc->sc_ah = NULL;
  539. return ret;
  540. }
  541. void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
  542. {
  543. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  544. hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
  545. IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
  546. IEEE80211_HW_SIGNAL_DBM |
  547. IEEE80211_HW_SUPPORTS_PS |
  548. IEEE80211_HW_PS_NULLFUNC_STACK |
  549. IEEE80211_HW_SPECTRUM_MGMT |
  550. IEEE80211_HW_REPORTS_TX_ACK_STATUS;
  551. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
  552. hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
  553. if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
  554. hw->flags |= IEEE80211_HW_MFP_CAPABLE;
  555. hw->wiphy->interface_modes =
  556. BIT(NL80211_IFTYPE_AP) |
  557. BIT(NL80211_IFTYPE_STATION) |
  558. BIT(NL80211_IFTYPE_ADHOC) |
  559. BIT(NL80211_IFTYPE_MESH_POINT);
  560. hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
  561. hw->queues = 4;
  562. hw->max_rates = 4;
  563. hw->channel_change_time = 5000;
  564. hw->max_listen_interval = 10;
  565. hw->max_rate_tries = 10;
  566. hw->sta_data_size = sizeof(struct ath_node);
  567. hw->vif_data_size = sizeof(struct ath_vif);
  568. hw->rate_control_algorithm = "ath9k_rate_control";
  569. if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
  570. hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
  571. &sc->sbands[IEEE80211_BAND_2GHZ];
  572. if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
  573. hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
  574. &sc->sbands[IEEE80211_BAND_5GHZ];
  575. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
  576. if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
  577. setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
  578. if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
  579. setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
  580. }
  581. SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
  582. }
  583. int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
  584. const struct ath_bus_ops *bus_ops)
  585. {
  586. struct ieee80211_hw *hw = sc->hw;
  587. struct ath_common *common;
  588. struct ath_hw *ah;
  589. int error = 0;
  590. struct ath_regulatory *reg;
  591. /* Bring up device */
  592. error = ath9k_init_softc(devid, sc, subsysid, bus_ops);
  593. if (error != 0)
  594. goto error_init;
  595. ah = sc->sc_ah;
  596. common = ath9k_hw_common(ah);
  597. ath9k_set_hw_capab(sc, hw);
  598. /* Initialize regulatory */
  599. error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
  600. ath9k_reg_notifier);
  601. if (error)
  602. goto error_regd;
  603. reg = &common->regulatory;
  604. /* Setup TX DMA */
  605. error = ath_tx_init(sc, ATH_TXBUF);
  606. if (error != 0)
  607. goto error_tx;
  608. /* Setup RX DMA */
  609. error = ath_rx_init(sc, ATH_RXBUF);
  610. if (error != 0)
  611. goto error_rx;
  612. /* Register with mac80211 */
  613. error = ieee80211_register_hw(hw);
  614. if (error)
  615. goto error_register;
  616. /* Handle world regulatory */
  617. if (!ath_is_world_regd(reg)) {
  618. error = regulatory_hint(hw->wiphy, reg->alpha2);
  619. if (error)
  620. goto error_world;
  621. }
  622. INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
  623. INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
  624. INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
  625. sc->wiphy_scheduler_int = msecs_to_jiffies(500);
  626. ath_init_leds(sc);
  627. ath_start_rfkill_poll(sc);
  628. return 0;
  629. error_world:
  630. ieee80211_unregister_hw(hw);
  631. error_register:
  632. ath_rx_cleanup(sc);
  633. error_rx:
  634. ath_tx_cleanup(sc);
  635. error_tx:
  636. /* Nothing */
  637. error_regd:
  638. ath9k_deinit_softc(sc);
  639. error_init:
  640. return error;
  641. }
  642. /*****************************/
  643. /* De-Initialization */
  644. /*****************************/
  645. static void ath9k_deinit_softc(struct ath_softc *sc)
  646. {
  647. int i = 0;
  648. if ((sc->btcoex.no_stomp_timer) &&
  649. sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
  650. ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
  651. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
  652. if (ATH_TXQ_SETUP(sc, i))
  653. ath_tx_cleanupq(sc, &sc->tx.txq[i]);
  654. ath9k_exit_debug(sc->sc_ah);
  655. ath9k_hw_deinit(sc->sc_ah);
  656. tasklet_kill(&sc->intr_tq);
  657. tasklet_kill(&sc->bcon_tasklet);
  658. kfree(sc->sc_ah);
  659. sc->sc_ah = NULL;
  660. }
  661. void ath9k_deinit_device(struct ath_softc *sc)
  662. {
  663. struct ieee80211_hw *hw = sc->hw;
  664. int i = 0;
  665. ath9k_ps_wakeup(sc);
  666. wiphy_rfkill_stop_polling(sc->hw->wiphy);
  667. ath_deinit_leds(sc);
  668. for (i = 0; i < sc->num_sec_wiphy; i++) {
  669. struct ath_wiphy *aphy = sc->sec_wiphy[i];
  670. if (aphy == NULL)
  671. continue;
  672. sc->sec_wiphy[i] = NULL;
  673. ieee80211_unregister_hw(aphy->hw);
  674. ieee80211_free_hw(aphy->hw);
  675. }
  676. kfree(sc->sec_wiphy);
  677. ieee80211_unregister_hw(hw);
  678. ath_rx_cleanup(sc);
  679. ath_tx_cleanup(sc);
  680. ath9k_deinit_softc(sc);
  681. }
  682. void ath_descdma_cleanup(struct ath_softc *sc,
  683. struct ath_descdma *dd,
  684. struct list_head *head)
  685. {
  686. dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
  687. dd->dd_desc_paddr);
  688. INIT_LIST_HEAD(head);
  689. kfree(dd->dd_bufptr);
  690. memset(dd, 0, sizeof(*dd));
  691. }
  692. /************************/
  693. /* Module Hooks */
  694. /************************/
  695. static int __init ath9k_init(void)
  696. {
  697. int error;
  698. /* Register rate control algorithm */
  699. error = ath_rate_control_register();
  700. if (error != 0) {
  701. printk(KERN_ERR
  702. "ath9k: Unable to register rate control "
  703. "algorithm: %d\n",
  704. error);
  705. goto err_out;
  706. }
  707. error = ath9k_debug_create_root();
  708. if (error) {
  709. printk(KERN_ERR
  710. "ath9k: Unable to create debugfs root: %d\n",
  711. error);
  712. goto err_rate_unregister;
  713. }
  714. error = ath_pci_init();
  715. if (error < 0) {
  716. printk(KERN_ERR
  717. "ath9k: No PCI devices found, driver not installed.\n");
  718. error = -ENODEV;
  719. goto err_remove_root;
  720. }
  721. error = ath_ahb_init();
  722. if (error < 0) {
  723. error = -ENODEV;
  724. goto err_pci_exit;
  725. }
  726. return 0;
  727. err_pci_exit:
  728. ath_pci_exit();
  729. err_remove_root:
  730. ath9k_debug_remove_root();
  731. err_rate_unregister:
  732. ath_rate_control_unregister();
  733. err_out:
  734. return error;
  735. }
  736. module_init(ath9k_init);
  737. static void __exit ath9k_exit(void)
  738. {
  739. ath_ahb_exit();
  740. ath_pci_exit();
  741. ath9k_debug_remove_root();
  742. ath_rate_control_unregister();
  743. printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
  744. }
  745. module_exit(ath9k_exit);