init.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913
  1. /*
  2. * Copyright (c) 2008-2009 Atheros Communications Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <linux/slab.h>
  17. #include "ath9k.h"
  18. static char *dev_info = "ath9k";
  19. MODULE_AUTHOR("Atheros Communications");
  20. MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
  21. MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
  22. MODULE_LICENSE("Dual BSD/GPL");
  23. static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
  24. module_param_named(debug, ath9k_debug, uint, 0);
  25. MODULE_PARM_DESC(debug, "Debugging mask");
  26. int modparam_nohwcrypt;
  27. module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
  28. MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
  29. int led_blink;
  30. module_param_named(blink, led_blink, int, 0444);
  31. MODULE_PARM_DESC(blink, "Enable LED blink on activity");
  32. /* We use the hw_value as an index into our private channel structure */
  33. #define CHAN2G(_freq, _idx) { \
  34. .center_freq = (_freq), \
  35. .hw_value = (_idx), \
  36. .max_power = 20, \
  37. }
  38. #define CHAN5G(_freq, _idx) { \
  39. .band = IEEE80211_BAND_5GHZ, \
  40. .center_freq = (_freq), \
  41. .hw_value = (_idx), \
  42. .max_power = 20, \
  43. }
  44. /* Some 2 GHz radios are actually tunable on 2312-2732
  45. * on 5 MHz steps, we support the channels which we know
  46. * we have calibration data for all cards though to make
  47. * this static */
  48. static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
  49. CHAN2G(2412, 0), /* Channel 1 */
  50. CHAN2G(2417, 1), /* Channel 2 */
  51. CHAN2G(2422, 2), /* Channel 3 */
  52. CHAN2G(2427, 3), /* Channel 4 */
  53. CHAN2G(2432, 4), /* Channel 5 */
  54. CHAN2G(2437, 5), /* Channel 6 */
  55. CHAN2G(2442, 6), /* Channel 7 */
  56. CHAN2G(2447, 7), /* Channel 8 */
  57. CHAN2G(2452, 8), /* Channel 9 */
  58. CHAN2G(2457, 9), /* Channel 10 */
  59. CHAN2G(2462, 10), /* Channel 11 */
  60. CHAN2G(2467, 11), /* Channel 12 */
  61. CHAN2G(2472, 12), /* Channel 13 */
  62. CHAN2G(2484, 13), /* Channel 14 */
  63. };
  64. /* Some 5 GHz radios are actually tunable on XXXX-YYYY
  65. * on 5 MHz steps, we support the channels which we know
  66. * we have calibration data for all cards though to make
  67. * this static */
  68. static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
  69. /* _We_ call this UNII 1 */
  70. CHAN5G(5180, 14), /* Channel 36 */
  71. CHAN5G(5200, 15), /* Channel 40 */
  72. CHAN5G(5220, 16), /* Channel 44 */
  73. CHAN5G(5240, 17), /* Channel 48 */
  74. /* _We_ call this UNII 2 */
  75. CHAN5G(5260, 18), /* Channel 52 */
  76. CHAN5G(5280, 19), /* Channel 56 */
  77. CHAN5G(5300, 20), /* Channel 60 */
  78. CHAN5G(5320, 21), /* Channel 64 */
  79. /* _We_ call this "Middle band" */
  80. CHAN5G(5500, 22), /* Channel 100 */
  81. CHAN5G(5520, 23), /* Channel 104 */
  82. CHAN5G(5540, 24), /* Channel 108 */
  83. CHAN5G(5560, 25), /* Channel 112 */
  84. CHAN5G(5580, 26), /* Channel 116 */
  85. CHAN5G(5600, 27), /* Channel 120 */
  86. CHAN5G(5620, 28), /* Channel 124 */
  87. CHAN5G(5640, 29), /* Channel 128 */
  88. CHAN5G(5660, 30), /* Channel 132 */
  89. CHAN5G(5680, 31), /* Channel 136 */
  90. CHAN5G(5700, 32), /* Channel 140 */
  91. /* _We_ call this UNII 3 */
  92. CHAN5G(5745, 33), /* Channel 149 */
  93. CHAN5G(5765, 34), /* Channel 153 */
  94. CHAN5G(5785, 35), /* Channel 157 */
  95. CHAN5G(5805, 36), /* Channel 161 */
  96. CHAN5G(5825, 37), /* Channel 165 */
  97. };
  98. /* Atheros hardware rate code addition for short premble */
  99. #define SHPCHECK(__hw_rate, __flags) \
  100. ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
  101. #define RATE(_bitrate, _hw_rate, _flags) { \
  102. .bitrate = (_bitrate), \
  103. .flags = (_flags), \
  104. .hw_value = (_hw_rate), \
  105. .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
  106. }
  107. static struct ieee80211_rate ath9k_legacy_rates[] = {
  108. RATE(10, 0x1b, 0),
  109. RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
  110. RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
  111. RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
  112. RATE(60, 0x0b, 0),
  113. RATE(90, 0x0f, 0),
  114. RATE(120, 0x0a, 0),
  115. RATE(180, 0x0e, 0),
  116. RATE(240, 0x09, 0),
  117. RATE(360, 0x0d, 0),
  118. RATE(480, 0x08, 0),
  119. RATE(540, 0x0c, 0),
  120. };
  121. static void ath9k_deinit_softc(struct ath_softc *sc);
  122. /*
  123. * Read and write, they both share the same lock. We do this to serialize
  124. * reads and writes on Atheros 802.11n PCI devices only. This is required
  125. * as the FIFO on these devices can only accept sanely 2 requests.
  126. */
  127. static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
  128. {
  129. struct ath_hw *ah = (struct ath_hw *) hw_priv;
  130. struct ath_common *common = ath9k_hw_common(ah);
  131. struct ath_softc *sc = (struct ath_softc *) common->priv;
  132. if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
  133. unsigned long flags;
  134. spin_lock_irqsave(&sc->sc_serial_rw, flags);
  135. iowrite32(val, sc->mem + reg_offset);
  136. spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
  137. } else
  138. iowrite32(val, sc->mem + reg_offset);
  139. }
  140. static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
  141. {
  142. struct ath_hw *ah = (struct ath_hw *) hw_priv;
  143. struct ath_common *common = ath9k_hw_common(ah);
  144. struct ath_softc *sc = (struct ath_softc *) common->priv;
  145. u32 val;
  146. if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
  147. unsigned long flags;
  148. spin_lock_irqsave(&sc->sc_serial_rw, flags);
  149. val = ioread32(sc->mem + reg_offset);
  150. spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
  151. } else
  152. val = ioread32(sc->mem + reg_offset);
  153. return val;
  154. }
  155. static const struct ath_ops ath9k_common_ops = {
  156. .read = ath9k_ioread32,
  157. .write = ath9k_iowrite32,
  158. };
  159. /**************************/
  160. /* Initialization */
  161. /**************************/
  162. static void setup_ht_cap(struct ath_softc *sc,
  163. struct ieee80211_sta_ht_cap *ht_info)
  164. {
  165. struct ath_hw *ah = sc->sc_ah;
  166. struct ath_common *common = ath9k_hw_common(ah);
  167. u8 tx_streams, rx_streams;
  168. int i, max_streams;
  169. ht_info->ht_supported = true;
  170. ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
  171. IEEE80211_HT_CAP_SM_PS |
  172. IEEE80211_HT_CAP_SGI_40 |
  173. IEEE80211_HT_CAP_DSSSCCK40;
  174. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
  175. ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
  176. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
  177. ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
  178. ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
  179. ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
  180. if (AR_SREV_9300_20_OR_LATER(ah))
  181. max_streams = 3;
  182. else
  183. max_streams = 2;
  184. if (AR_SREV_9280_20_OR_LATER(ah)) {
  185. if (max_streams >= 2)
  186. ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
  187. ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
  188. }
  189. /* set up supported mcs set */
  190. memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
  191. tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, max_streams);
  192. rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, max_streams);
  193. ath_print(common, ATH_DBG_CONFIG,
  194. "TX streams %d, RX streams: %d\n",
  195. tx_streams, rx_streams);
  196. if (tx_streams != rx_streams) {
  197. ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
  198. ht_info->mcs.tx_params |= ((tx_streams - 1) <<
  199. IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
  200. }
  201. for (i = 0; i < rx_streams; i++)
  202. ht_info->mcs.rx_mask[i] = 0xff;
  203. ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
  204. }
  205. static int ath9k_reg_notifier(struct wiphy *wiphy,
  206. struct regulatory_request *request)
  207. {
  208. struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
  209. struct ath_wiphy *aphy = hw->priv;
  210. struct ath_softc *sc = aphy->sc;
  211. struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
  212. return ath_reg_notifier_apply(wiphy, request, reg);
  213. }
  214. /*
  215. * This function will allocate both the DMA descriptor structure, and the
  216. * buffers it contains. These are used to contain the descriptors used
  217. * by the system.
  218. */
  219. int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
  220. struct list_head *head, const char *name,
  221. int nbuf, int ndesc, bool is_tx)
  222. {
  223. #define DS2PHYS(_dd, _ds) \
  224. ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
  225. #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
  226. #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
  227. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  228. u8 *ds;
  229. struct ath_buf *bf;
  230. int i, bsize, error, desc_len;
  231. ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
  232. name, nbuf, ndesc);
  233. INIT_LIST_HEAD(head);
  234. if (is_tx)
  235. desc_len = sc->sc_ah->caps.tx_desc_len;
  236. else
  237. desc_len = sizeof(struct ath_desc);
  238. /* ath_desc must be a multiple of DWORDs */
  239. if ((desc_len % 4) != 0) {
  240. ath_print(common, ATH_DBG_FATAL,
  241. "ath_desc not DWORD aligned\n");
  242. BUG_ON((desc_len % 4) != 0);
  243. error = -ENOMEM;
  244. goto fail;
  245. }
  246. dd->dd_desc_len = desc_len * nbuf * ndesc;
  247. /*
  248. * Need additional DMA memory because we can't use
  249. * descriptors that cross the 4K page boundary. Assume
  250. * one skipped descriptor per 4K page.
  251. */
  252. if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
  253. u32 ndesc_skipped =
  254. ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
  255. u32 dma_len;
  256. while (ndesc_skipped) {
  257. dma_len = ndesc_skipped * desc_len;
  258. dd->dd_desc_len += dma_len;
  259. ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
  260. }
  261. }
  262. /* allocate descriptors */
  263. dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
  264. &dd->dd_desc_paddr, GFP_KERNEL);
  265. if (dd->dd_desc == NULL) {
  266. error = -ENOMEM;
  267. goto fail;
  268. }
  269. ds = (u8 *) dd->dd_desc;
  270. ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
  271. name, ds, (u32) dd->dd_desc_len,
  272. ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
  273. /* allocate buffers */
  274. bsize = sizeof(struct ath_buf) * nbuf;
  275. bf = kzalloc(bsize, GFP_KERNEL);
  276. if (bf == NULL) {
  277. error = -ENOMEM;
  278. goto fail2;
  279. }
  280. dd->dd_bufptr = bf;
  281. for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
  282. bf->bf_desc = ds;
  283. bf->bf_daddr = DS2PHYS(dd, ds);
  284. if (!(sc->sc_ah->caps.hw_caps &
  285. ATH9K_HW_CAP_4KB_SPLITTRANS)) {
  286. /*
  287. * Skip descriptor addresses which can cause 4KB
  288. * boundary crossing (addr + length) with a 32 dword
  289. * descriptor fetch.
  290. */
  291. while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
  292. BUG_ON((caddr_t) bf->bf_desc >=
  293. ((caddr_t) dd->dd_desc +
  294. dd->dd_desc_len));
  295. ds += (desc_len * ndesc);
  296. bf->bf_desc = ds;
  297. bf->bf_daddr = DS2PHYS(dd, ds);
  298. }
  299. }
  300. list_add_tail(&bf->list, head);
  301. }
  302. return 0;
  303. fail2:
  304. dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
  305. dd->dd_desc_paddr);
  306. fail:
  307. memset(dd, 0, sizeof(*dd));
  308. return error;
  309. #undef ATH_DESC_4KB_BOUND_CHECK
  310. #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
  311. #undef DS2PHYS
  312. }
  313. static void ath9k_init_crypto(struct ath_softc *sc)
  314. {
  315. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  316. int i = 0;
  317. /* Get the hardware key cache size. */
  318. common->keymax = sc->sc_ah->caps.keycache_size;
  319. if (common->keymax > ATH_KEYMAX) {
  320. ath_print(common, ATH_DBG_ANY,
  321. "Warning, using only %u entries in %u key cache\n",
  322. ATH_KEYMAX, common->keymax);
  323. common->keymax = ATH_KEYMAX;
  324. }
  325. /*
  326. * Reset the key cache since some parts do not
  327. * reset the contents on initial power up.
  328. */
  329. for (i = 0; i < common->keymax; i++)
  330. ath_hw_keyreset(common, (u16) i);
  331. /*
  332. * Check whether the separate key cache entries
  333. * are required to handle both tx+rx MIC keys.
  334. * With split mic keys the number of stations is limited
  335. * to 27 otherwise 59.
  336. */
  337. if (sc->sc_ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA)
  338. common->crypt_caps |= ATH_CRYPT_CAP_MIC_COMBINED;
  339. }
  340. static int ath9k_init_btcoex(struct ath_softc *sc)
  341. {
  342. int r, qnum;
  343. switch (sc->sc_ah->btcoex_hw.scheme) {
  344. case ATH_BTCOEX_CFG_NONE:
  345. break;
  346. case ATH_BTCOEX_CFG_2WIRE:
  347. ath9k_hw_btcoex_init_2wire(sc->sc_ah);
  348. break;
  349. case ATH_BTCOEX_CFG_3WIRE:
  350. ath9k_hw_btcoex_init_3wire(sc->sc_ah);
  351. r = ath_init_btcoex_timer(sc);
  352. if (r)
  353. return -1;
  354. qnum = sc->tx.hwq_map[WME_AC_BE];
  355. ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum);
  356. sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
  357. break;
  358. default:
  359. WARN_ON(1);
  360. break;
  361. }
  362. return 0;
  363. }
  364. static int ath9k_init_queues(struct ath_softc *sc)
  365. {
  366. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  367. int i = 0;
  368. for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
  369. sc->tx.hwq_map[i] = -1;
  370. sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
  371. if (sc->beacon.beaconq == -1) {
  372. ath_print(common, ATH_DBG_FATAL,
  373. "Unable to setup a beacon xmit queue\n");
  374. goto err;
  375. }
  376. sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
  377. if (sc->beacon.cabq == NULL) {
  378. ath_print(common, ATH_DBG_FATAL,
  379. "Unable to setup CAB xmit queue\n");
  380. goto err;
  381. }
  382. sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
  383. ath_cabq_update(sc);
  384. if (!ath_tx_setup(sc, WME_AC_BK)) {
  385. ath_print(common, ATH_DBG_FATAL,
  386. "Unable to setup xmit queue for BK traffic\n");
  387. goto err;
  388. }
  389. if (!ath_tx_setup(sc, WME_AC_BE)) {
  390. ath_print(common, ATH_DBG_FATAL,
  391. "Unable to setup xmit queue for BE traffic\n");
  392. goto err;
  393. }
  394. if (!ath_tx_setup(sc, WME_AC_VI)) {
  395. ath_print(common, ATH_DBG_FATAL,
  396. "Unable to setup xmit queue for VI traffic\n");
  397. goto err;
  398. }
  399. if (!ath_tx_setup(sc, WME_AC_VO)) {
  400. ath_print(common, ATH_DBG_FATAL,
  401. "Unable to setup xmit queue for VO traffic\n");
  402. goto err;
  403. }
  404. return 0;
  405. err:
  406. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
  407. if (ATH_TXQ_SETUP(sc, i))
  408. ath_tx_cleanupq(sc, &sc->tx.txq[i]);
  409. return -EIO;
  410. }
  411. static int ath9k_init_channels_rates(struct ath_softc *sc)
  412. {
  413. void *channels;
  414. BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) +
  415. ARRAY_SIZE(ath9k_5ghz_chantable) !=
  416. ATH9K_NUM_CHANNELS);
  417. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
  418. channels = kmemdup(ath9k_2ghz_chantable,
  419. sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
  420. if (!channels)
  421. return -ENOMEM;
  422. sc->sbands[IEEE80211_BAND_2GHZ].channels = channels;
  423. sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
  424. sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
  425. ARRAY_SIZE(ath9k_2ghz_chantable);
  426. sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
  427. sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
  428. ARRAY_SIZE(ath9k_legacy_rates);
  429. }
  430. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
  431. channels = kmemdup(ath9k_5ghz_chantable,
  432. sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
  433. if (!channels) {
  434. if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
  435. kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
  436. return -ENOMEM;
  437. }
  438. sc->sbands[IEEE80211_BAND_5GHZ].channels = channels;
  439. sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
  440. sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
  441. ARRAY_SIZE(ath9k_5ghz_chantable);
  442. sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
  443. ath9k_legacy_rates + 4;
  444. sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
  445. ARRAY_SIZE(ath9k_legacy_rates) - 4;
  446. }
  447. return 0;
  448. }
  449. static void ath9k_init_misc(struct ath_softc *sc)
  450. {
  451. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  452. int i = 0;
  453. setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
  454. sc->config.txpowlimit = ATH_TXPOWER_MAX;
  455. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
  456. sc->sc_flags |= SC_OP_TXAGGR;
  457. sc->sc_flags |= SC_OP_RXAGGR;
  458. }
  459. common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
  460. common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
  461. ath9k_hw_set_diversity(sc->sc_ah, true);
  462. sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
  463. memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
  464. sc->beacon.slottime = ATH9K_SLOT_TIME_9;
  465. for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
  466. sc->beacon.bslot[i] = NULL;
  467. sc->beacon.bslot_aphy[i] = NULL;
  468. }
  469. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
  470. sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
  471. }
  472. static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
  473. const struct ath_bus_ops *bus_ops)
  474. {
  475. struct ath_hw *ah = NULL;
  476. struct ath_common *common;
  477. int ret = 0, i;
  478. int csz = 0;
  479. ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
  480. if (!ah)
  481. return -ENOMEM;
  482. ah->hw_version.devid = devid;
  483. ah->hw_version.subsysid = subsysid;
  484. sc->sc_ah = ah;
  485. common = ath9k_hw_common(ah);
  486. common->ops = &ath9k_common_ops;
  487. common->bus_ops = bus_ops;
  488. common->ah = ah;
  489. common->hw = sc->hw;
  490. common->priv = sc;
  491. common->debug_mask = ath9k_debug;
  492. spin_lock_init(&common->cc_lock);
  493. spin_lock_init(&sc->wiphy_lock);
  494. spin_lock_init(&sc->sc_resetlock);
  495. spin_lock_init(&sc->sc_serial_rw);
  496. spin_lock_init(&sc->sc_pm_lock);
  497. mutex_init(&sc->mutex);
  498. tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
  499. tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
  500. (unsigned long)sc);
  501. /*
  502. * Cache line size is used to size and align various
  503. * structures used to communicate with the hardware.
  504. */
  505. ath_read_cachesize(common, &csz);
  506. common->cachelsz = csz << 2; /* convert to bytes */
  507. /* Initializes the hardware for all supported chipsets */
  508. ret = ath9k_hw_init(ah);
  509. if (ret)
  510. goto err_hw;
  511. ret = ath9k_init_debug(ah);
  512. if (ret) {
  513. ath_print(common, ATH_DBG_FATAL,
  514. "Unable to create debugfs files\n");
  515. goto err_debug;
  516. }
  517. ret = ath9k_init_queues(sc);
  518. if (ret)
  519. goto err_queues;
  520. ret = ath9k_init_btcoex(sc);
  521. if (ret)
  522. goto err_btcoex;
  523. ret = ath9k_init_channels_rates(sc);
  524. if (ret)
  525. goto err_btcoex;
  526. ath9k_init_crypto(sc);
  527. ath9k_init_misc(sc);
  528. return 0;
  529. err_btcoex:
  530. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
  531. if (ATH_TXQ_SETUP(sc, i))
  532. ath_tx_cleanupq(sc, &sc->tx.txq[i]);
  533. err_queues:
  534. ath9k_exit_debug(ah);
  535. err_debug:
  536. ath9k_hw_deinit(ah);
  537. err_hw:
  538. tasklet_kill(&sc->intr_tq);
  539. tasklet_kill(&sc->bcon_tasklet);
  540. kfree(ah);
  541. sc->sc_ah = NULL;
  542. return ret;
  543. }
  544. void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
  545. {
  546. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  547. hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
  548. IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
  549. IEEE80211_HW_SIGNAL_DBM |
  550. IEEE80211_HW_SUPPORTS_PS |
  551. IEEE80211_HW_PS_NULLFUNC_STACK |
  552. IEEE80211_HW_SPECTRUM_MGMT |
  553. IEEE80211_HW_REPORTS_TX_ACK_STATUS;
  554. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
  555. hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
  556. if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
  557. hw->flags |= IEEE80211_HW_MFP_CAPABLE;
  558. hw->wiphy->interface_modes =
  559. BIT(NL80211_IFTYPE_P2P_GO) |
  560. BIT(NL80211_IFTYPE_P2P_CLIENT) |
  561. BIT(NL80211_IFTYPE_AP) |
  562. BIT(NL80211_IFTYPE_WDS) |
  563. BIT(NL80211_IFTYPE_STATION) |
  564. BIT(NL80211_IFTYPE_ADHOC) |
  565. BIT(NL80211_IFTYPE_MESH_POINT);
  566. if (AR_SREV_5416(sc->sc_ah))
  567. hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
  568. hw->queues = 4;
  569. hw->max_rates = 4;
  570. hw->channel_change_time = 5000;
  571. hw->max_listen_interval = 10;
  572. hw->max_rate_tries = 10;
  573. hw->sta_data_size = sizeof(struct ath_node);
  574. hw->vif_data_size = sizeof(struct ath_vif);
  575. #ifdef CONFIG_ATH9K_RATE_CONTROL
  576. hw->rate_control_algorithm = "ath9k_rate_control";
  577. #endif
  578. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
  579. hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
  580. &sc->sbands[IEEE80211_BAND_2GHZ];
  581. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
  582. hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
  583. &sc->sbands[IEEE80211_BAND_5GHZ];
  584. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
  585. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
  586. setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
  587. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
  588. setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
  589. }
  590. SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
  591. }
  592. int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
  593. const struct ath_bus_ops *bus_ops)
  594. {
  595. struct ieee80211_hw *hw = sc->hw;
  596. struct ath_common *common;
  597. struct ath_hw *ah;
  598. int error = 0;
  599. struct ath_regulatory *reg;
  600. /* Bring up device */
  601. error = ath9k_init_softc(devid, sc, subsysid, bus_ops);
  602. if (error != 0)
  603. goto error_init;
  604. ah = sc->sc_ah;
  605. common = ath9k_hw_common(ah);
  606. ath9k_set_hw_capab(sc, hw);
  607. /* Initialize regulatory */
  608. error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
  609. ath9k_reg_notifier);
  610. if (error)
  611. goto error_regd;
  612. reg = &common->regulatory;
  613. /* Setup TX DMA */
  614. error = ath_tx_init(sc, ATH_TXBUF);
  615. if (error != 0)
  616. goto error_tx;
  617. /* Setup RX DMA */
  618. error = ath_rx_init(sc, ATH_RXBUF);
  619. if (error != 0)
  620. goto error_rx;
  621. /* Register with mac80211 */
  622. error = ieee80211_register_hw(hw);
  623. if (error)
  624. goto error_register;
  625. /* Handle world regulatory */
  626. if (!ath_is_world_regd(reg)) {
  627. error = regulatory_hint(hw->wiphy, reg->alpha2);
  628. if (error)
  629. goto error_world;
  630. }
  631. INIT_WORK(&sc->hw_check_work, ath_hw_check);
  632. INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
  633. INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
  634. INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
  635. sc->wiphy_scheduler_int = msecs_to_jiffies(500);
  636. ath_init_leds(sc);
  637. ath_start_rfkill_poll(sc);
  638. pm_qos_add_request(&sc->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
  639. PM_QOS_DEFAULT_VALUE);
  640. return 0;
  641. error_world:
  642. ieee80211_unregister_hw(hw);
  643. error_register:
  644. ath_rx_cleanup(sc);
  645. error_rx:
  646. ath_tx_cleanup(sc);
  647. error_tx:
  648. /* Nothing */
  649. error_regd:
  650. ath9k_deinit_softc(sc);
  651. error_init:
  652. return error;
  653. }
  654. /*****************************/
  655. /* De-Initialization */
  656. /*****************************/
  657. static void ath9k_deinit_softc(struct ath_softc *sc)
  658. {
  659. int i = 0;
  660. if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
  661. kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
  662. if (sc->sbands[IEEE80211_BAND_5GHZ].channels)
  663. kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels);
  664. if ((sc->btcoex.no_stomp_timer) &&
  665. sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
  666. ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
  667. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
  668. if (ATH_TXQ_SETUP(sc, i))
  669. ath_tx_cleanupq(sc, &sc->tx.txq[i]);
  670. ath9k_exit_debug(sc->sc_ah);
  671. ath9k_hw_deinit(sc->sc_ah);
  672. tasklet_kill(&sc->intr_tq);
  673. tasklet_kill(&sc->bcon_tasklet);
  674. kfree(sc->sc_ah);
  675. sc->sc_ah = NULL;
  676. }
  677. void ath9k_deinit_device(struct ath_softc *sc)
  678. {
  679. struct ieee80211_hw *hw = sc->hw;
  680. int i = 0;
  681. ath9k_ps_wakeup(sc);
  682. wiphy_rfkill_stop_polling(sc->hw->wiphy);
  683. ath_deinit_leds(sc);
  684. for (i = 0; i < sc->num_sec_wiphy; i++) {
  685. struct ath_wiphy *aphy = sc->sec_wiphy[i];
  686. if (aphy == NULL)
  687. continue;
  688. sc->sec_wiphy[i] = NULL;
  689. ieee80211_unregister_hw(aphy->hw);
  690. ieee80211_free_hw(aphy->hw);
  691. }
  692. ieee80211_unregister_hw(hw);
  693. pm_qos_remove_request(&sc->pm_qos_req);
  694. ath_rx_cleanup(sc);
  695. ath_tx_cleanup(sc);
  696. ath9k_deinit_softc(sc);
  697. kfree(sc->sec_wiphy);
  698. }
  699. void ath_descdma_cleanup(struct ath_softc *sc,
  700. struct ath_descdma *dd,
  701. struct list_head *head)
  702. {
  703. dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
  704. dd->dd_desc_paddr);
  705. INIT_LIST_HEAD(head);
  706. kfree(dd->dd_bufptr);
  707. memset(dd, 0, sizeof(*dd));
  708. }
  709. /************************/
  710. /* Module Hooks */
  711. /************************/
  712. static int __init ath9k_init(void)
  713. {
  714. int error;
  715. /* Register rate control algorithm */
  716. error = ath_rate_control_register();
  717. if (error != 0) {
  718. printk(KERN_ERR
  719. "ath9k: Unable to register rate control "
  720. "algorithm: %d\n",
  721. error);
  722. goto err_out;
  723. }
  724. error = ath9k_debug_create_root();
  725. if (error) {
  726. printk(KERN_ERR
  727. "ath9k: Unable to create debugfs root: %d\n",
  728. error);
  729. goto err_rate_unregister;
  730. }
  731. error = ath_pci_init();
  732. if (error < 0) {
  733. printk(KERN_ERR
  734. "ath9k: No PCI devices found, driver not installed.\n");
  735. error = -ENODEV;
  736. goto err_remove_root;
  737. }
  738. error = ath_ahb_init();
  739. if (error < 0) {
  740. error = -ENODEV;
  741. goto err_pci_exit;
  742. }
  743. return 0;
  744. err_pci_exit:
  745. ath_pci_exit();
  746. err_remove_root:
  747. ath9k_debug_remove_root();
  748. err_rate_unregister:
  749. ath_rate_control_unregister();
  750. err_out:
  751. return error;
  752. }
  753. module_init(ath9k_init);
  754. static void __exit ath9k_exit(void)
  755. {
  756. ath_ahb_exit();
  757. ath_pci_exit();
  758. ath9k_debug_remove_root();
  759. ath_rate_control_unregister();
  760. printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
  761. }
  762. module_exit(ath9k_exit);