init.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914
  1. /*
  2. * Copyright (c) 2008-2009 Atheros Communications Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <linux/slab.h>
  17. #include <linux/pm_qos_params.h>
  18. #include "ath9k.h"
  19. static char *dev_info = "ath9k";
  20. MODULE_AUTHOR("Atheros Communications");
  21. MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
  22. MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
  23. MODULE_LICENSE("Dual BSD/GPL");
  24. static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
  25. module_param_named(debug, ath9k_debug, uint, 0);
  26. MODULE_PARM_DESC(debug, "Debugging mask");
  27. int modparam_nohwcrypt;
  28. module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
  29. MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
  30. int led_blink;
  31. module_param_named(blink, led_blink, int, 0444);
  32. MODULE_PARM_DESC(blink, "Enable LED blink on activity");
  33. /* We use the hw_value as an index into our private channel structure */
  34. #define CHAN2G(_freq, _idx) { \
  35. .center_freq = (_freq), \
  36. .hw_value = (_idx), \
  37. .max_power = 20, \
  38. }
  39. #define CHAN5G(_freq, _idx) { \
  40. .band = IEEE80211_BAND_5GHZ, \
  41. .center_freq = (_freq), \
  42. .hw_value = (_idx), \
  43. .max_power = 20, \
  44. }
  45. /* Some 2 GHz radios are actually tunable on 2312-2732
  46. * on 5 MHz steps, we support the channels which we know
  47. * we have calibration data for all cards though to make
  48. * this static */
  49. static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
  50. CHAN2G(2412, 0), /* Channel 1 */
  51. CHAN2G(2417, 1), /* Channel 2 */
  52. CHAN2G(2422, 2), /* Channel 3 */
  53. CHAN2G(2427, 3), /* Channel 4 */
  54. CHAN2G(2432, 4), /* Channel 5 */
  55. CHAN2G(2437, 5), /* Channel 6 */
  56. CHAN2G(2442, 6), /* Channel 7 */
  57. CHAN2G(2447, 7), /* Channel 8 */
  58. CHAN2G(2452, 8), /* Channel 9 */
  59. CHAN2G(2457, 9), /* Channel 10 */
  60. CHAN2G(2462, 10), /* Channel 11 */
  61. CHAN2G(2467, 11), /* Channel 12 */
  62. CHAN2G(2472, 12), /* Channel 13 */
  63. CHAN2G(2484, 13), /* Channel 14 */
  64. };
  65. /* Some 5 GHz radios are actually tunable on XXXX-YYYY
  66. * on 5 MHz steps, we support the channels which we know
  67. * we have calibration data for all cards though to make
  68. * this static */
  69. static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
  70. /* _We_ call this UNII 1 */
  71. CHAN5G(5180, 14), /* Channel 36 */
  72. CHAN5G(5200, 15), /* Channel 40 */
  73. CHAN5G(5220, 16), /* Channel 44 */
  74. CHAN5G(5240, 17), /* Channel 48 */
  75. /* _We_ call this UNII 2 */
  76. CHAN5G(5260, 18), /* Channel 52 */
  77. CHAN5G(5280, 19), /* Channel 56 */
  78. CHAN5G(5300, 20), /* Channel 60 */
  79. CHAN5G(5320, 21), /* Channel 64 */
  80. /* _We_ call this "Middle band" */
  81. CHAN5G(5500, 22), /* Channel 100 */
  82. CHAN5G(5520, 23), /* Channel 104 */
  83. CHAN5G(5540, 24), /* Channel 108 */
  84. CHAN5G(5560, 25), /* Channel 112 */
  85. CHAN5G(5580, 26), /* Channel 116 */
  86. CHAN5G(5600, 27), /* Channel 120 */
  87. CHAN5G(5620, 28), /* Channel 124 */
  88. CHAN5G(5640, 29), /* Channel 128 */
  89. CHAN5G(5660, 30), /* Channel 132 */
  90. CHAN5G(5680, 31), /* Channel 136 */
  91. CHAN5G(5700, 32), /* Channel 140 */
  92. /* _We_ call this UNII 3 */
  93. CHAN5G(5745, 33), /* Channel 149 */
  94. CHAN5G(5765, 34), /* Channel 153 */
  95. CHAN5G(5785, 35), /* Channel 157 */
  96. CHAN5G(5805, 36), /* Channel 161 */
  97. CHAN5G(5825, 37), /* Channel 165 */
  98. };
  99. /* Atheros hardware rate code addition for short premble */
  100. #define SHPCHECK(__hw_rate, __flags) \
  101. ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
  102. #define RATE(_bitrate, _hw_rate, _flags) { \
  103. .bitrate = (_bitrate), \
  104. .flags = (_flags), \
  105. .hw_value = (_hw_rate), \
  106. .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
  107. }
  108. static struct ieee80211_rate ath9k_legacy_rates[] = {
  109. RATE(10, 0x1b, 0),
  110. RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
  111. RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
  112. RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
  113. RATE(60, 0x0b, 0),
  114. RATE(90, 0x0f, 0),
  115. RATE(120, 0x0a, 0),
  116. RATE(180, 0x0e, 0),
  117. RATE(240, 0x09, 0),
  118. RATE(360, 0x0d, 0),
  119. RATE(480, 0x08, 0),
  120. RATE(540, 0x0c, 0),
  121. };
  122. static void ath9k_deinit_softc(struct ath_softc *sc);
  123. /*
  124. * Read and write, they both share the same lock. We do this to serialize
  125. * reads and writes on Atheros 802.11n PCI devices only. This is required
  126. * as the FIFO on these devices can only accept sanely 2 requests.
  127. */
  128. static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
  129. {
  130. struct ath_hw *ah = (struct ath_hw *) hw_priv;
  131. struct ath_common *common = ath9k_hw_common(ah);
  132. struct ath_softc *sc = (struct ath_softc *) common->priv;
  133. if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
  134. unsigned long flags;
  135. spin_lock_irqsave(&sc->sc_serial_rw, flags);
  136. iowrite32(val, sc->mem + reg_offset);
  137. spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
  138. } else
  139. iowrite32(val, sc->mem + reg_offset);
  140. }
  141. static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
  142. {
  143. struct ath_hw *ah = (struct ath_hw *) hw_priv;
  144. struct ath_common *common = ath9k_hw_common(ah);
  145. struct ath_softc *sc = (struct ath_softc *) common->priv;
  146. u32 val;
  147. if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
  148. unsigned long flags;
  149. spin_lock_irqsave(&sc->sc_serial_rw, flags);
  150. val = ioread32(sc->mem + reg_offset);
  151. spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
  152. } else
  153. val = ioread32(sc->mem + reg_offset);
  154. return val;
  155. }
  156. static const struct ath_ops ath9k_common_ops = {
  157. .read = ath9k_ioread32,
  158. .write = ath9k_iowrite32,
  159. };
  160. struct pm_qos_request_list ath9k_pm_qos_req;
  161. /**************************/
  162. /* Initialization */
  163. /**************************/
  164. static void setup_ht_cap(struct ath_softc *sc,
  165. struct ieee80211_sta_ht_cap *ht_info)
  166. {
  167. struct ath_hw *ah = sc->sc_ah;
  168. struct ath_common *common = ath9k_hw_common(ah);
  169. u8 tx_streams, rx_streams;
  170. int i, max_streams;
  171. ht_info->ht_supported = true;
  172. ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
  173. IEEE80211_HT_CAP_SM_PS |
  174. IEEE80211_HT_CAP_SGI_40 |
  175. IEEE80211_HT_CAP_DSSSCCK40;
  176. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
  177. ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
  178. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
  179. ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
  180. ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
  181. ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
  182. if (AR_SREV_9300_20_OR_LATER(ah))
  183. max_streams = 3;
  184. else
  185. max_streams = 2;
  186. if (AR_SREV_9280_20_OR_LATER(ah)) {
  187. if (max_streams >= 2)
  188. ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
  189. ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
  190. }
  191. /* set up supported mcs set */
  192. memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
  193. tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, max_streams);
  194. rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, max_streams);
  195. ath_print(common, ATH_DBG_CONFIG,
  196. "TX streams %d, RX streams: %d\n",
  197. tx_streams, rx_streams);
  198. if (tx_streams != rx_streams) {
  199. ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
  200. ht_info->mcs.tx_params |= ((tx_streams - 1) <<
  201. IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
  202. }
  203. for (i = 0; i < rx_streams; i++)
  204. ht_info->mcs.rx_mask[i] = 0xff;
  205. ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
  206. }
  207. static int ath9k_reg_notifier(struct wiphy *wiphy,
  208. struct regulatory_request *request)
  209. {
  210. struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
  211. struct ath_wiphy *aphy = hw->priv;
  212. struct ath_softc *sc = aphy->sc;
  213. struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
  214. return ath_reg_notifier_apply(wiphy, request, reg);
  215. }
  216. /*
  217. * This function will allocate both the DMA descriptor structure, and the
  218. * buffers it contains. These are used to contain the descriptors used
  219. * by the system.
  220. */
  221. int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
  222. struct list_head *head, const char *name,
  223. int nbuf, int ndesc, bool is_tx)
  224. {
  225. #define DS2PHYS(_dd, _ds) \
  226. ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
  227. #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
  228. #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
  229. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  230. u8 *ds;
  231. struct ath_buf *bf;
  232. int i, bsize, error, desc_len;
  233. ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
  234. name, nbuf, ndesc);
  235. INIT_LIST_HEAD(head);
  236. if (is_tx)
  237. desc_len = sc->sc_ah->caps.tx_desc_len;
  238. else
  239. desc_len = sizeof(struct ath_desc);
  240. /* ath_desc must be a multiple of DWORDs */
  241. if ((desc_len % 4) != 0) {
  242. ath_print(common, ATH_DBG_FATAL,
  243. "ath_desc not DWORD aligned\n");
  244. BUG_ON((desc_len % 4) != 0);
  245. error = -ENOMEM;
  246. goto fail;
  247. }
  248. dd->dd_desc_len = desc_len * nbuf * ndesc;
  249. /*
  250. * Need additional DMA memory because we can't use
  251. * descriptors that cross the 4K page boundary. Assume
  252. * one skipped descriptor per 4K page.
  253. */
  254. if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
  255. u32 ndesc_skipped =
  256. ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
  257. u32 dma_len;
  258. while (ndesc_skipped) {
  259. dma_len = ndesc_skipped * desc_len;
  260. dd->dd_desc_len += dma_len;
  261. ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
  262. }
  263. }
  264. /* allocate descriptors */
  265. dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
  266. &dd->dd_desc_paddr, GFP_KERNEL);
  267. if (dd->dd_desc == NULL) {
  268. error = -ENOMEM;
  269. goto fail;
  270. }
  271. ds = (u8 *) dd->dd_desc;
  272. ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
  273. name, ds, (u32) dd->dd_desc_len,
  274. ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
  275. /* allocate buffers */
  276. bsize = sizeof(struct ath_buf) * nbuf;
  277. bf = kzalloc(bsize, GFP_KERNEL);
  278. if (bf == NULL) {
  279. error = -ENOMEM;
  280. goto fail2;
  281. }
  282. dd->dd_bufptr = bf;
  283. for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
  284. bf->bf_desc = ds;
  285. bf->bf_daddr = DS2PHYS(dd, ds);
  286. if (!(sc->sc_ah->caps.hw_caps &
  287. ATH9K_HW_CAP_4KB_SPLITTRANS)) {
  288. /*
  289. * Skip descriptor addresses which can cause 4KB
  290. * boundary crossing (addr + length) with a 32 dword
  291. * descriptor fetch.
  292. */
  293. while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
  294. BUG_ON((caddr_t) bf->bf_desc >=
  295. ((caddr_t) dd->dd_desc +
  296. dd->dd_desc_len));
  297. ds += (desc_len * ndesc);
  298. bf->bf_desc = ds;
  299. bf->bf_daddr = DS2PHYS(dd, ds);
  300. }
  301. }
  302. list_add_tail(&bf->list, head);
  303. }
  304. return 0;
  305. fail2:
  306. dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
  307. dd->dd_desc_paddr);
  308. fail:
  309. memset(dd, 0, sizeof(*dd));
  310. return error;
  311. #undef ATH_DESC_4KB_BOUND_CHECK
  312. #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
  313. #undef DS2PHYS
  314. }
  315. static void ath9k_init_crypto(struct ath_softc *sc)
  316. {
  317. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  318. int i = 0;
  319. /* Get the hardware key cache size. */
  320. common->keymax = sc->sc_ah->caps.keycache_size;
  321. if (common->keymax > ATH_KEYMAX) {
  322. ath_print(common, ATH_DBG_ANY,
  323. "Warning, using only %u entries in %u key cache\n",
  324. ATH_KEYMAX, common->keymax);
  325. common->keymax = ATH_KEYMAX;
  326. }
  327. /*
  328. * Reset the key cache since some parts do not
  329. * reset the contents on initial power up.
  330. */
  331. for (i = 0; i < common->keymax; i++)
  332. ath_hw_keyreset(common, (u16) i);
  333. /*
  334. * Check whether the separate key cache entries
  335. * are required to handle both tx+rx MIC keys.
  336. * With split mic keys the number of stations is limited
  337. * to 27 otherwise 59.
  338. */
  339. if (sc->sc_ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA)
  340. common->crypt_caps |= ATH_CRYPT_CAP_MIC_COMBINED;
  341. }
  342. static int ath9k_init_btcoex(struct ath_softc *sc)
  343. {
  344. int r, qnum;
  345. switch (sc->sc_ah->btcoex_hw.scheme) {
  346. case ATH_BTCOEX_CFG_NONE:
  347. break;
  348. case ATH_BTCOEX_CFG_2WIRE:
  349. ath9k_hw_btcoex_init_2wire(sc->sc_ah);
  350. break;
  351. case ATH_BTCOEX_CFG_3WIRE:
  352. ath9k_hw_btcoex_init_3wire(sc->sc_ah);
  353. r = ath_init_btcoex_timer(sc);
  354. if (r)
  355. return -1;
  356. qnum = sc->tx.hwq_map[WME_AC_BE];
  357. ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum);
  358. sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
  359. break;
  360. default:
  361. WARN_ON(1);
  362. break;
  363. }
  364. return 0;
  365. }
  366. static int ath9k_init_queues(struct ath_softc *sc)
  367. {
  368. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  369. int i = 0;
  370. for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
  371. sc->tx.hwq_map[i] = -1;
  372. sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
  373. if (sc->beacon.beaconq == -1) {
  374. ath_print(common, ATH_DBG_FATAL,
  375. "Unable to setup a beacon xmit queue\n");
  376. goto err;
  377. }
  378. sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
  379. if (sc->beacon.cabq == NULL) {
  380. ath_print(common, ATH_DBG_FATAL,
  381. "Unable to setup CAB xmit queue\n");
  382. goto err;
  383. }
  384. sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
  385. ath_cabq_update(sc);
  386. if (!ath_tx_setup(sc, WME_AC_BK)) {
  387. ath_print(common, ATH_DBG_FATAL,
  388. "Unable to setup xmit queue for BK traffic\n");
  389. goto err;
  390. }
  391. if (!ath_tx_setup(sc, WME_AC_BE)) {
  392. ath_print(common, ATH_DBG_FATAL,
  393. "Unable to setup xmit queue for BE traffic\n");
  394. goto err;
  395. }
  396. if (!ath_tx_setup(sc, WME_AC_VI)) {
  397. ath_print(common, ATH_DBG_FATAL,
  398. "Unable to setup xmit queue for VI traffic\n");
  399. goto err;
  400. }
  401. if (!ath_tx_setup(sc, WME_AC_VO)) {
  402. ath_print(common, ATH_DBG_FATAL,
  403. "Unable to setup xmit queue for VO traffic\n");
  404. goto err;
  405. }
  406. return 0;
  407. err:
  408. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
  409. if (ATH_TXQ_SETUP(sc, i))
  410. ath_tx_cleanupq(sc, &sc->tx.txq[i]);
  411. return -EIO;
  412. }
  413. static int ath9k_init_channels_rates(struct ath_softc *sc)
  414. {
  415. void *channels;
  416. BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) +
  417. ARRAY_SIZE(ath9k_5ghz_chantable) !=
  418. ATH9K_NUM_CHANNELS);
  419. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
  420. channels = kmemdup(ath9k_2ghz_chantable,
  421. sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
  422. if (!channels)
  423. return -ENOMEM;
  424. sc->sbands[IEEE80211_BAND_2GHZ].channels = channels;
  425. sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
  426. sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
  427. ARRAY_SIZE(ath9k_2ghz_chantable);
  428. sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
  429. sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
  430. ARRAY_SIZE(ath9k_legacy_rates);
  431. }
  432. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
  433. channels = kmemdup(ath9k_5ghz_chantable,
  434. sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
  435. if (!channels) {
  436. if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
  437. kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
  438. return -ENOMEM;
  439. }
  440. sc->sbands[IEEE80211_BAND_5GHZ].channels = channels;
  441. sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
  442. sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
  443. ARRAY_SIZE(ath9k_5ghz_chantable);
  444. sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
  445. ath9k_legacy_rates + 4;
  446. sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
  447. ARRAY_SIZE(ath9k_legacy_rates) - 4;
  448. }
  449. return 0;
  450. }
  451. static void ath9k_init_misc(struct ath_softc *sc)
  452. {
  453. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  454. int i = 0;
  455. setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
  456. sc->config.txpowlimit = ATH_TXPOWER_MAX;
  457. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
  458. sc->sc_flags |= SC_OP_TXAGGR;
  459. sc->sc_flags |= SC_OP_RXAGGR;
  460. }
  461. common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
  462. common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
  463. ath9k_hw_set_diversity(sc->sc_ah, true);
  464. sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
  465. memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
  466. sc->beacon.slottime = ATH9K_SLOT_TIME_9;
  467. for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
  468. sc->beacon.bslot[i] = NULL;
  469. sc->beacon.bslot_aphy[i] = NULL;
  470. }
  471. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
  472. sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
  473. }
  474. static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
  475. const struct ath_bus_ops *bus_ops)
  476. {
  477. struct ath_hw *ah = NULL;
  478. struct ath_common *common;
  479. int ret = 0, i;
  480. int csz = 0;
  481. ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
  482. if (!ah)
  483. return -ENOMEM;
  484. ah->hw_version.devid = devid;
  485. ah->hw_version.subsysid = subsysid;
  486. sc->sc_ah = ah;
  487. common = ath9k_hw_common(ah);
  488. common->ops = &ath9k_common_ops;
  489. common->bus_ops = bus_ops;
  490. common->ah = ah;
  491. common->hw = sc->hw;
  492. common->priv = sc;
  493. common->debug_mask = ath9k_debug;
  494. spin_lock_init(&common->cc_lock);
  495. spin_lock_init(&sc->wiphy_lock);
  496. spin_lock_init(&sc->sc_resetlock);
  497. spin_lock_init(&sc->sc_serial_rw);
  498. spin_lock_init(&sc->sc_pm_lock);
  499. mutex_init(&sc->mutex);
  500. tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
  501. tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
  502. (unsigned long)sc);
  503. /*
  504. * Cache line size is used to size and align various
  505. * structures used to communicate with the hardware.
  506. */
  507. ath_read_cachesize(common, &csz);
  508. common->cachelsz = csz << 2; /* convert to bytes */
  509. /* Initializes the hardware for all supported chipsets */
  510. ret = ath9k_hw_init(ah);
  511. if (ret)
  512. goto err_hw;
  513. ret = ath9k_init_debug(ah);
  514. if (ret) {
  515. ath_print(common, ATH_DBG_FATAL,
  516. "Unable to create debugfs files\n");
  517. goto err_debug;
  518. }
  519. ret = ath9k_init_queues(sc);
  520. if (ret)
  521. goto err_queues;
  522. ret = ath9k_init_btcoex(sc);
  523. if (ret)
  524. goto err_btcoex;
  525. ret = ath9k_init_channels_rates(sc);
  526. if (ret)
  527. goto err_btcoex;
  528. ath9k_init_crypto(sc);
  529. ath9k_init_misc(sc);
  530. return 0;
  531. err_btcoex:
  532. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
  533. if (ATH_TXQ_SETUP(sc, i))
  534. ath_tx_cleanupq(sc, &sc->tx.txq[i]);
  535. err_queues:
  536. ath9k_exit_debug(ah);
  537. err_debug:
  538. ath9k_hw_deinit(ah);
  539. err_hw:
  540. tasklet_kill(&sc->intr_tq);
  541. tasklet_kill(&sc->bcon_tasklet);
  542. kfree(ah);
  543. sc->sc_ah = NULL;
  544. return ret;
  545. }
  546. void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
  547. {
  548. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  549. hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
  550. IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
  551. IEEE80211_HW_SIGNAL_DBM |
  552. IEEE80211_HW_SUPPORTS_PS |
  553. IEEE80211_HW_PS_NULLFUNC_STACK |
  554. IEEE80211_HW_SPECTRUM_MGMT |
  555. IEEE80211_HW_REPORTS_TX_ACK_STATUS;
  556. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
  557. hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
  558. if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
  559. hw->flags |= IEEE80211_HW_MFP_CAPABLE;
  560. hw->wiphy->interface_modes =
  561. BIT(NL80211_IFTYPE_AP) |
  562. BIT(NL80211_IFTYPE_WDS) |
  563. BIT(NL80211_IFTYPE_STATION) |
  564. BIT(NL80211_IFTYPE_ADHOC) |
  565. BIT(NL80211_IFTYPE_MESH_POINT);
  566. if (AR_SREV_5416(sc->sc_ah))
  567. hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
  568. hw->queues = 4;
  569. hw->max_rates = 4;
  570. hw->channel_change_time = 5000;
  571. hw->max_listen_interval = 10;
  572. hw->max_rate_tries = 10;
  573. hw->sta_data_size = sizeof(struct ath_node);
  574. hw->vif_data_size = sizeof(struct ath_vif);
  575. #ifdef CONFIG_ATH9K_RATE_CONTROL
  576. hw->rate_control_algorithm = "ath9k_rate_control";
  577. #endif
  578. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
  579. hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
  580. &sc->sbands[IEEE80211_BAND_2GHZ];
  581. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
  582. hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
  583. &sc->sbands[IEEE80211_BAND_5GHZ];
  584. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
  585. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
  586. setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
  587. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
  588. setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
  589. }
  590. SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
  591. }
  592. int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
  593. const struct ath_bus_ops *bus_ops)
  594. {
  595. struct ieee80211_hw *hw = sc->hw;
  596. struct ath_common *common;
  597. struct ath_hw *ah;
  598. int error = 0;
  599. struct ath_regulatory *reg;
  600. /* Bring up device */
  601. error = ath9k_init_softc(devid, sc, subsysid, bus_ops);
  602. if (error != 0)
  603. goto error_init;
  604. ah = sc->sc_ah;
  605. common = ath9k_hw_common(ah);
  606. ath9k_set_hw_capab(sc, hw);
  607. /* Initialize regulatory */
  608. error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
  609. ath9k_reg_notifier);
  610. if (error)
  611. goto error_regd;
  612. reg = &common->regulatory;
  613. /* Setup TX DMA */
  614. error = ath_tx_init(sc, ATH_TXBUF);
  615. if (error != 0)
  616. goto error_tx;
  617. /* Setup RX DMA */
  618. error = ath_rx_init(sc, ATH_RXBUF);
  619. if (error != 0)
  620. goto error_rx;
  621. /* Register with mac80211 */
  622. error = ieee80211_register_hw(hw);
  623. if (error)
  624. goto error_register;
  625. /* Handle world regulatory */
  626. if (!ath_is_world_regd(reg)) {
  627. error = regulatory_hint(hw->wiphy, reg->alpha2);
  628. if (error)
  629. goto error_world;
  630. }
  631. INIT_WORK(&sc->hw_check_work, ath_hw_check);
  632. INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
  633. INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
  634. INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
  635. sc->wiphy_scheduler_int = msecs_to_jiffies(500);
  636. ath_init_leds(sc);
  637. ath_start_rfkill_poll(sc);
  638. pm_qos_add_request(&ath9k_pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
  639. PM_QOS_DEFAULT_VALUE);
  640. return 0;
  641. error_world:
  642. ieee80211_unregister_hw(hw);
  643. error_register:
  644. ath_rx_cleanup(sc);
  645. error_rx:
  646. ath_tx_cleanup(sc);
  647. error_tx:
  648. /* Nothing */
  649. error_regd:
  650. ath9k_deinit_softc(sc);
  651. error_init:
  652. return error;
  653. }
  654. /*****************************/
  655. /* De-Initialization */
  656. /*****************************/
  657. static void ath9k_deinit_softc(struct ath_softc *sc)
  658. {
  659. int i = 0;
  660. if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
  661. kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
  662. if (sc->sbands[IEEE80211_BAND_5GHZ].channels)
  663. kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels);
  664. if ((sc->btcoex.no_stomp_timer) &&
  665. sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
  666. ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
  667. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
  668. if (ATH_TXQ_SETUP(sc, i))
  669. ath_tx_cleanupq(sc, &sc->tx.txq[i]);
  670. ath9k_exit_debug(sc->sc_ah);
  671. ath9k_hw_deinit(sc->sc_ah);
  672. tasklet_kill(&sc->intr_tq);
  673. tasklet_kill(&sc->bcon_tasklet);
  674. kfree(sc->sc_ah);
  675. sc->sc_ah = NULL;
  676. }
  677. void ath9k_deinit_device(struct ath_softc *sc)
  678. {
  679. struct ieee80211_hw *hw = sc->hw;
  680. int i = 0;
  681. ath9k_ps_wakeup(sc);
  682. wiphy_rfkill_stop_polling(sc->hw->wiphy);
  683. ath_deinit_leds(sc);
  684. for (i = 0; i < sc->num_sec_wiphy; i++) {
  685. struct ath_wiphy *aphy = sc->sec_wiphy[i];
  686. if (aphy == NULL)
  687. continue;
  688. sc->sec_wiphy[i] = NULL;
  689. ieee80211_unregister_hw(aphy->hw);
  690. ieee80211_free_hw(aphy->hw);
  691. }
  692. ieee80211_unregister_hw(hw);
  693. pm_qos_remove_request(&ath9k_pm_qos_req);
  694. ath_rx_cleanup(sc);
  695. ath_tx_cleanup(sc);
  696. ath9k_deinit_softc(sc);
  697. kfree(sc->sec_wiphy);
  698. }
  699. void ath_descdma_cleanup(struct ath_softc *sc,
  700. struct ath_descdma *dd,
  701. struct list_head *head)
  702. {
  703. dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
  704. dd->dd_desc_paddr);
  705. INIT_LIST_HEAD(head);
  706. kfree(dd->dd_bufptr);
  707. memset(dd, 0, sizeof(*dd));
  708. }
  709. /************************/
  710. /* Module Hooks */
  711. /************************/
  712. static int __init ath9k_init(void)
  713. {
  714. int error;
  715. /* Register rate control algorithm */
  716. error = ath_rate_control_register();
  717. if (error != 0) {
  718. printk(KERN_ERR
  719. "ath9k: Unable to register rate control "
  720. "algorithm: %d\n",
  721. error);
  722. goto err_out;
  723. }
  724. error = ath9k_debug_create_root();
  725. if (error) {
  726. printk(KERN_ERR
  727. "ath9k: Unable to create debugfs root: %d\n",
  728. error);
  729. goto err_rate_unregister;
  730. }
  731. error = ath_pci_init();
  732. if (error < 0) {
  733. printk(KERN_ERR
  734. "ath9k: No PCI devices found, driver not installed.\n");
  735. error = -ENODEV;
  736. goto err_remove_root;
  737. }
  738. error = ath_ahb_init();
  739. if (error < 0) {
  740. error = -ENODEV;
  741. goto err_pci_exit;
  742. }
  743. return 0;
  744. err_pci_exit:
  745. ath_pci_exit();
  746. err_remove_root:
  747. ath9k_debug_remove_root();
  748. err_rate_unregister:
  749. ath_rate_control_unregister();
  750. err_out:
  751. return error;
  752. }
  753. module_init(ath9k_init);
  754. static void __exit ath9k_exit(void)
  755. {
  756. ath_ahb_exit();
  757. ath_pci_exit();
  758. ath9k_debug_remove_root();
  759. ath_rate_control_unregister();
  760. printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
  761. }
  762. module_exit(ath9k_exit);