rt2800mmio.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873
  1. /* Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
  2. * Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
  3. * Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
  4. * Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
  5. * Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
  6. * Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
  7. * Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
  8. * Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com>
  9. * <http://rt2x00.serialmonkey.com>
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2 of the License, or
  14. * (at your option) any later version.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; if not, write to the
  23. * Free Software Foundation, Inc.,
  24. * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  25. */
  26. /* Module: rt2800mmio
  27. * Abstract: rt2800 MMIO device routines.
  28. */
  29. #include <linux/kernel.h>
  30. #include <linux/module.h>
  31. #include <linux/export.h>
  32. #include "rt2x00.h"
  33. #include "rt2x00mmio.h"
  34. #include "rt2800.h"
  35. #include "rt2800lib.h"
  36. #include "rt2800mmio.h"
  37. /*
  38. * TX descriptor initialization
  39. */
  40. __le32 *rt2800mmio_get_txwi(struct queue_entry *entry)
  41. {
  42. return (__le32 *) entry->skb->data;
  43. }
  44. EXPORT_SYMBOL_GPL(rt2800mmio_get_txwi);
  45. void rt2800mmio_write_tx_desc(struct queue_entry *entry,
  46. struct txentry_desc *txdesc)
  47. {
  48. struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
  49. struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
  50. __le32 *txd = entry_priv->desc;
  51. u32 word;
  52. const unsigned int txwi_size = entry->queue->winfo_size;
  53. /*
  54. * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
  55. * must contains a TXWI structure + 802.11 header + padding + 802.11
  56. * data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and
  57. * SD_PTR1/SD_LEN1 contains 802.11 header + padding + 802.11
  58. * data. It means that LAST_SEC0 is always 0.
  59. */
  60. /*
  61. * Initialize TX descriptor
  62. */
  63. word = 0;
  64. rt2x00_set_field32(&word, TXD_W0_SD_PTR0, skbdesc->skb_dma);
  65. rt2x00_desc_write(txd, 0, word);
  66. word = 0;
  67. rt2x00_set_field32(&word, TXD_W1_SD_LEN1, entry->skb->len);
  68. rt2x00_set_field32(&word, TXD_W1_LAST_SEC1,
  69. !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
  70. rt2x00_set_field32(&word, TXD_W1_BURST,
  71. test_bit(ENTRY_TXD_BURST, &txdesc->flags));
  72. rt2x00_set_field32(&word, TXD_W1_SD_LEN0, txwi_size);
  73. rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0);
  74. rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0);
  75. rt2x00_desc_write(txd, 1, word);
  76. word = 0;
  77. rt2x00_set_field32(&word, TXD_W2_SD_PTR1,
  78. skbdesc->skb_dma + txwi_size);
  79. rt2x00_desc_write(txd, 2, word);
  80. word = 0;
  81. rt2x00_set_field32(&word, TXD_W3_WIV,
  82. !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
  83. rt2x00_set_field32(&word, TXD_W3_QSEL, 2);
  84. rt2x00_desc_write(txd, 3, word);
  85. /*
  86. * Register descriptor details in skb frame descriptor.
  87. */
  88. skbdesc->desc = txd;
  89. skbdesc->desc_len = TXD_DESC_SIZE;
  90. }
  91. EXPORT_SYMBOL_GPL(rt2800mmio_write_tx_desc);
  92. /*
  93. * RX control handlers
  94. */
  95. void rt2800mmio_fill_rxdone(struct queue_entry *entry,
  96. struct rxdone_entry_desc *rxdesc)
  97. {
  98. struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
  99. __le32 *rxd = entry_priv->desc;
  100. u32 word;
  101. rt2x00_desc_read(rxd, 3, &word);
  102. if (rt2x00_get_field32(word, RXD_W3_CRC_ERROR))
  103. rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
  104. /*
  105. * Unfortunately we don't know the cipher type used during
  106. * decryption. This prevents us from correct providing
  107. * correct statistics through debugfs.
  108. */
  109. rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W3_CIPHER_ERROR);
  110. if (rt2x00_get_field32(word, RXD_W3_DECRYPTED)) {
  111. /*
  112. * Hardware has stripped IV/EIV data from 802.11 frame during
  113. * decryption. Unfortunately the descriptor doesn't contain
  114. * any fields with the EIV/IV data either, so they can't
  115. * be restored by rt2x00lib.
  116. */
  117. rxdesc->flags |= RX_FLAG_IV_STRIPPED;
  118. /*
  119. * The hardware has already checked the Michael Mic and has
  120. * stripped it from the frame. Signal this to mac80211.
  121. */
  122. rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
  123. if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
  124. rxdesc->flags |= RX_FLAG_DECRYPTED;
  125. else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
  126. rxdesc->flags |= RX_FLAG_MMIC_ERROR;
  127. }
  128. if (rt2x00_get_field32(word, RXD_W3_MY_BSS))
  129. rxdesc->dev_flags |= RXDONE_MY_BSS;
  130. if (rt2x00_get_field32(word, RXD_W3_L2PAD))
  131. rxdesc->dev_flags |= RXDONE_L2PAD;
  132. /*
  133. * Process the RXWI structure that is at the start of the buffer.
  134. */
  135. rt2800_process_rxwi(entry, rxdesc);
  136. }
  137. EXPORT_SYMBOL_GPL(rt2800mmio_fill_rxdone);
  138. /*
  139. * Interrupt functions.
  140. */
  141. static void rt2800mmio_wakeup(struct rt2x00_dev *rt2x00dev)
  142. {
  143. struct ieee80211_conf conf = { .flags = 0 };
  144. struct rt2x00lib_conf libconf = { .conf = &conf };
  145. rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
  146. }
  147. static bool rt2800mmio_txdone_entry_check(struct queue_entry *entry, u32 status)
  148. {
  149. __le32 *txwi;
  150. u32 word;
  151. int wcid, tx_wcid;
  152. wcid = rt2x00_get_field32(status, TX_STA_FIFO_WCID);
  153. txwi = rt2800_drv_get_txwi(entry);
  154. rt2x00_desc_read(txwi, 1, &word);
  155. tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
  156. return (tx_wcid == wcid);
  157. }
  158. static bool rt2800mmio_txdone_find_entry(struct queue_entry *entry, void *data)
  159. {
  160. u32 status = *(u32 *)data;
  161. /*
  162. * rt2800pci hardware might reorder frames when exchanging traffic
  163. * with multiple BA enabled STAs.
  164. *
  165. * For example, a tx queue
  166. * [ STA1 | STA2 | STA1 | STA2 ]
  167. * can result in tx status reports
  168. * [ STA1 | STA1 | STA2 | STA2 ]
  169. * when the hw decides to aggregate the frames for STA1 into one AMPDU.
  170. *
  171. * To mitigate this effect, associate the tx status to the first frame
  172. * in the tx queue with a matching wcid.
  173. */
  174. if (rt2800mmio_txdone_entry_check(entry, status) &&
  175. !test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
  176. /*
  177. * Got a matching frame, associate the tx status with
  178. * the frame
  179. */
  180. entry->status = status;
  181. set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
  182. return true;
  183. }
  184. /* Check the next frame */
  185. return false;
  186. }
  187. static bool rt2800mmio_txdone_match_first(struct queue_entry *entry, void *data)
  188. {
  189. u32 status = *(u32 *)data;
  190. /*
  191. * Find the first frame without tx status and assign this status to it
  192. * regardless if it matches or not.
  193. */
  194. if (!test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
  195. /*
  196. * Got a matching frame, associate the tx status with
  197. * the frame
  198. */
  199. entry->status = status;
  200. set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
  201. return true;
  202. }
  203. /* Check the next frame */
  204. return false;
  205. }
  206. static bool rt2800mmio_txdone_release_entries(struct queue_entry *entry,
  207. void *data)
  208. {
  209. if (test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
  210. rt2800_txdone_entry(entry, entry->status,
  211. rt2800mmio_get_txwi(entry));
  212. return false;
  213. }
  214. /* No more frames to release */
  215. return true;
  216. }
  217. static bool rt2800mmio_txdone(struct rt2x00_dev *rt2x00dev)
  218. {
  219. struct data_queue *queue;
  220. u32 status;
  221. u8 qid;
  222. int max_tx_done = 16;
  223. while (kfifo_get(&rt2x00dev->txstatus_fifo, &status)) {
  224. qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE);
  225. if (unlikely(qid >= QID_RX)) {
  226. /*
  227. * Unknown queue, this shouldn't happen. Just drop
  228. * this tx status.
  229. */
  230. rt2x00_warn(rt2x00dev, "Got TX status report with unexpected pid %u, dropping\n",
  231. qid);
  232. break;
  233. }
  234. queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
  235. if (unlikely(queue == NULL)) {
  236. /*
  237. * The queue is NULL, this shouldn't happen. Stop
  238. * processing here and drop the tx status
  239. */
  240. rt2x00_warn(rt2x00dev, "Got TX status for an unavailable queue %u, dropping\n",
  241. qid);
  242. break;
  243. }
  244. if (unlikely(rt2x00queue_empty(queue))) {
  245. /*
  246. * The queue is empty. Stop processing here
  247. * and drop the tx status.
  248. */
  249. rt2x00_warn(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
  250. qid);
  251. break;
  252. }
  253. /*
  254. * Let's associate this tx status with the first
  255. * matching frame.
  256. */
  257. if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
  258. Q_INDEX, &status,
  259. rt2800mmio_txdone_find_entry)) {
  260. /*
  261. * We cannot match the tx status to any frame, so just
  262. * use the first one.
  263. */
  264. if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
  265. Q_INDEX, &status,
  266. rt2800mmio_txdone_match_first)) {
  267. rt2x00_warn(rt2x00dev, "No frame found for TX status on queue %u, dropping\n",
  268. qid);
  269. break;
  270. }
  271. }
  272. /*
  273. * Release all frames with a valid tx status.
  274. */
  275. rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
  276. Q_INDEX, NULL,
  277. rt2800mmio_txdone_release_entries);
  278. if (--max_tx_done == 0)
  279. break;
  280. }
  281. return !max_tx_done;
  282. }
  283. static inline void rt2800mmio_enable_interrupt(struct rt2x00_dev *rt2x00dev,
  284. struct rt2x00_field32 irq_field)
  285. {
  286. u32 reg;
  287. /*
  288. * Enable a single interrupt. The interrupt mask register
  289. * access needs locking.
  290. */
  291. spin_lock_irq(&rt2x00dev->irqmask_lock);
  292. rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
  293. rt2x00_set_field32(&reg, irq_field, 1);
  294. rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
  295. spin_unlock_irq(&rt2x00dev->irqmask_lock);
  296. }
  297. void rt2800mmio_txstatus_tasklet(unsigned long data)
  298. {
  299. struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
  300. if (rt2800mmio_txdone(rt2x00dev))
  301. tasklet_schedule(&rt2x00dev->txstatus_tasklet);
  302. /*
  303. * No need to enable the tx status interrupt here as we always
  304. * leave it enabled to minimize the possibility of a tx status
  305. * register overflow. See comment in interrupt handler.
  306. */
  307. }
  308. EXPORT_SYMBOL_GPL(rt2800mmio_txstatus_tasklet);
  309. void rt2800mmio_pretbtt_tasklet(unsigned long data)
  310. {
  311. struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
  312. rt2x00lib_pretbtt(rt2x00dev);
  313. if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
  314. rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT);
  315. }
  316. EXPORT_SYMBOL_GPL(rt2800mmio_pretbtt_tasklet);
  317. void rt2800mmio_tbtt_tasklet(unsigned long data)
  318. {
  319. struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
  320. struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
  321. u32 reg;
  322. rt2x00lib_beacondone(rt2x00dev);
  323. if (rt2x00dev->intf_ap_count) {
  324. /*
  325. * The rt2800pci hardware tbtt timer is off by 1us per tbtt
  326. * causing beacon skew and as a result causing problems with
  327. * some powersaving clients over time. Shorten the beacon
  328. * interval every 64 beacons by 64us to mitigate this effect.
  329. */
  330. if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 2)) {
  331. rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
  332. rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
  333. (rt2x00dev->beacon_int * 16) - 1);
  334. rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
  335. } else if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 1)) {
  336. rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
  337. rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
  338. (rt2x00dev->beacon_int * 16));
  339. rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
  340. }
  341. drv_data->tbtt_tick++;
  342. drv_data->tbtt_tick %= BCN_TBTT_OFFSET;
  343. }
  344. if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
  345. rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT);
  346. }
  347. EXPORT_SYMBOL_GPL(rt2800mmio_tbtt_tasklet);
  348. void rt2800mmio_rxdone_tasklet(unsigned long data)
  349. {
  350. struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
  351. if (rt2x00mmio_rxdone(rt2x00dev))
  352. tasklet_schedule(&rt2x00dev->rxdone_tasklet);
  353. else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
  354. rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE);
  355. }
  356. EXPORT_SYMBOL_GPL(rt2800mmio_rxdone_tasklet);
  357. void rt2800mmio_autowake_tasklet(unsigned long data)
  358. {
  359. struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
  360. rt2800mmio_wakeup(rt2x00dev);
  361. if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
  362. rt2800mmio_enable_interrupt(rt2x00dev,
  363. INT_MASK_CSR_AUTO_WAKEUP);
  364. }
  365. EXPORT_SYMBOL_GPL(rt2800mmio_autowake_tasklet);
  366. static void rt2800mmio_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
  367. {
  368. u32 status;
  369. int i;
  370. /*
  371. * The TX_FIFO_STATUS interrupt needs special care. We should
  372. * read TX_STA_FIFO but we should do it immediately as otherwise
  373. * the register can overflow and we would lose status reports.
  374. *
  375. * Hence, read the TX_STA_FIFO register and copy all tx status
  376. * reports into a kernel FIFO which is handled in the txstatus
  377. * tasklet. We use a tasklet to process the tx status reports
  378. * because we can schedule the tasklet multiple times (when the
  379. * interrupt fires again during tx status processing).
  380. *
  381. * Furthermore we don't disable the TX_FIFO_STATUS
  382. * interrupt here but leave it enabled so that the TX_STA_FIFO
  383. * can also be read while the tx status tasklet gets executed.
  384. *
  385. * Since we have only one producer and one consumer we don't
  386. * need to lock the kfifo.
  387. */
  388. for (i = 0; i < rt2x00dev->tx->limit; i++) {
  389. rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO, &status);
  390. if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
  391. break;
  392. if (!kfifo_put(&rt2x00dev->txstatus_fifo, status)) {
  393. rt2x00_warn(rt2x00dev, "TX status FIFO overrun, drop tx status report\n");
  394. break;
  395. }
  396. }
  397. /* Schedule the tasklet for processing the tx status. */
  398. tasklet_schedule(&rt2x00dev->txstatus_tasklet);
  399. }
  400. irqreturn_t rt2800mmio_interrupt(int irq, void *dev_instance)
  401. {
  402. struct rt2x00_dev *rt2x00dev = dev_instance;
  403. u32 reg, mask;
  404. /* Read status and ACK all interrupts */
  405. rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
  406. rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
  407. if (!reg)
  408. return IRQ_NONE;
  409. if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
  410. return IRQ_HANDLED;
  411. /*
  412. * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
  413. * for interrupts and interrupt masks we can just use the value of
  414. * INT_SOURCE_CSR to create the interrupt mask.
  415. */
  416. mask = ~reg;
  417. if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) {
  418. rt2800mmio_txstatus_interrupt(rt2x00dev);
  419. /*
  420. * Never disable the TX_FIFO_STATUS interrupt.
  421. */
  422. rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1);
  423. }
  424. if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
  425. tasklet_hi_schedule(&rt2x00dev->pretbtt_tasklet);
  426. if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
  427. tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
  428. if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
  429. tasklet_schedule(&rt2x00dev->rxdone_tasklet);
  430. if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
  431. tasklet_schedule(&rt2x00dev->autowake_tasklet);
  432. /*
  433. * Disable all interrupts for which a tasklet was scheduled right now,
  434. * the tasklet will reenable the appropriate interrupts.
  435. */
  436. spin_lock(&rt2x00dev->irqmask_lock);
  437. rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
  438. reg &= mask;
  439. rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
  440. spin_unlock(&rt2x00dev->irqmask_lock);
  441. return IRQ_HANDLED;
  442. }
  443. EXPORT_SYMBOL_GPL(rt2800mmio_interrupt);
  444. void rt2800mmio_toggle_irq(struct rt2x00_dev *rt2x00dev,
  445. enum dev_state state)
  446. {
  447. u32 reg;
  448. unsigned long flags;
  449. /*
  450. * When interrupts are being enabled, the interrupt registers
  451. * should clear the register to assure a clean state.
  452. */
  453. if (state == STATE_RADIO_IRQ_ON) {
  454. rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
  455. rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
  456. }
  457. spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
  458. reg = 0;
  459. if (state == STATE_RADIO_IRQ_ON) {
  460. rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1);
  461. rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1);
  462. rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1);
  463. rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
  464. rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1);
  465. }
  466. rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
  467. spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
  468. if (state == STATE_RADIO_IRQ_OFF) {
  469. /*
  470. * Wait for possibly running tasklets to finish.
  471. */
  472. tasklet_kill(&rt2x00dev->txstatus_tasklet);
  473. tasklet_kill(&rt2x00dev->rxdone_tasklet);
  474. tasklet_kill(&rt2x00dev->autowake_tasklet);
  475. tasklet_kill(&rt2x00dev->tbtt_tasklet);
  476. tasklet_kill(&rt2x00dev->pretbtt_tasklet);
  477. }
  478. }
  479. EXPORT_SYMBOL_GPL(rt2800mmio_toggle_irq);
  480. /*
  481. * Queue handlers.
  482. */
  483. void rt2800mmio_start_queue(struct data_queue *queue)
  484. {
  485. struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
  486. u32 reg;
  487. switch (queue->qid) {
  488. case QID_RX:
  489. rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
  490. rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
  491. rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
  492. break;
  493. case QID_BEACON:
  494. rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
  495. rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
  496. rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
  497. rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
  498. rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
  499. rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
  500. rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 1);
  501. rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
  502. break;
  503. default:
  504. break;
  505. }
  506. }
  507. EXPORT_SYMBOL_GPL(rt2800mmio_start_queue);
  508. void rt2800mmio_kick_queue(struct data_queue *queue)
  509. {
  510. struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
  511. struct queue_entry *entry;
  512. switch (queue->qid) {
  513. case QID_AC_VO:
  514. case QID_AC_VI:
  515. case QID_AC_BE:
  516. case QID_AC_BK:
  517. entry = rt2x00queue_get_entry(queue, Q_INDEX);
  518. rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(queue->qid),
  519. entry->entry_idx);
  520. break;
  521. case QID_MGMT:
  522. entry = rt2x00queue_get_entry(queue, Q_INDEX);
  523. rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(5),
  524. entry->entry_idx);
  525. break;
  526. default:
  527. break;
  528. }
  529. }
  530. EXPORT_SYMBOL_GPL(rt2800mmio_kick_queue);
  531. void rt2800mmio_stop_queue(struct data_queue *queue)
  532. {
  533. struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
  534. u32 reg;
  535. switch (queue->qid) {
  536. case QID_RX:
  537. rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
  538. rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
  539. rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
  540. break;
  541. case QID_BEACON:
  542. rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
  543. rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
  544. rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
  545. rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
  546. rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
  547. rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
  548. rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 0);
  549. rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
  550. /*
  551. * Wait for current invocation to finish. The tasklet
  552. * won't be scheduled anymore afterwards since we disabled
  553. * the TBTT and PRE TBTT timer.
  554. */
  555. tasklet_kill(&rt2x00dev->tbtt_tasklet);
  556. tasklet_kill(&rt2x00dev->pretbtt_tasklet);
  557. break;
  558. default:
  559. break;
  560. }
  561. }
  562. EXPORT_SYMBOL_GPL(rt2800mmio_stop_queue);
  563. void rt2800mmio_queue_init(struct data_queue *queue)
  564. {
  565. struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
  566. unsigned short txwi_size, rxwi_size;
  567. rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size);
  568. switch (queue->qid) {
  569. case QID_RX:
  570. queue->limit = 128;
  571. queue->data_size = AGGREGATION_SIZE;
  572. queue->desc_size = RXD_DESC_SIZE;
  573. queue->winfo_size = rxwi_size;
  574. queue->priv_size = sizeof(struct queue_entry_priv_mmio);
  575. break;
  576. case QID_AC_VO:
  577. case QID_AC_VI:
  578. case QID_AC_BE:
  579. case QID_AC_BK:
  580. queue->limit = 64;
  581. queue->data_size = AGGREGATION_SIZE;
  582. queue->desc_size = TXD_DESC_SIZE;
  583. queue->winfo_size = txwi_size;
  584. queue->priv_size = sizeof(struct queue_entry_priv_mmio);
  585. break;
  586. case QID_BEACON:
  587. queue->limit = 8;
  588. queue->data_size = 0; /* No DMA required for beacons */
  589. queue->desc_size = TXD_DESC_SIZE;
  590. queue->winfo_size = txwi_size;
  591. queue->priv_size = sizeof(struct queue_entry_priv_mmio);
  592. break;
  593. case QID_ATIM:
  594. /* fallthrough */
  595. default:
  596. BUG();
  597. break;
  598. }
  599. }
  600. EXPORT_SYMBOL_GPL(rt2800mmio_queue_init);
  601. /*
  602. * Initialization functions.
  603. */
  604. bool rt2800mmio_get_entry_state(struct queue_entry *entry)
  605. {
  606. struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
  607. u32 word;
  608. if (entry->queue->qid == QID_RX) {
  609. rt2x00_desc_read(entry_priv->desc, 1, &word);
  610. return (!rt2x00_get_field32(word, RXD_W1_DMA_DONE));
  611. } else {
  612. rt2x00_desc_read(entry_priv->desc, 1, &word);
  613. return (!rt2x00_get_field32(word, TXD_W1_DMA_DONE));
  614. }
  615. }
  616. EXPORT_SYMBOL_GPL(rt2800mmio_get_entry_state);
  617. void rt2800mmio_clear_entry(struct queue_entry *entry)
  618. {
  619. struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
  620. struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
  621. struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
  622. u32 word;
  623. if (entry->queue->qid == QID_RX) {
  624. rt2x00_desc_read(entry_priv->desc, 0, &word);
  625. rt2x00_set_field32(&word, RXD_W0_SDP0, skbdesc->skb_dma);
  626. rt2x00_desc_write(entry_priv->desc, 0, word);
  627. rt2x00_desc_read(entry_priv->desc, 1, &word);
  628. rt2x00_set_field32(&word, RXD_W1_DMA_DONE, 0);
  629. rt2x00_desc_write(entry_priv->desc, 1, word);
  630. /*
  631. * Set RX IDX in register to inform hardware that we have
  632. * handled this entry and it is available for reuse again.
  633. */
  634. rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
  635. entry->entry_idx);
  636. } else {
  637. rt2x00_desc_read(entry_priv->desc, 1, &word);
  638. rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1);
  639. rt2x00_desc_write(entry_priv->desc, 1, word);
  640. }
  641. }
  642. EXPORT_SYMBOL_GPL(rt2800mmio_clear_entry);
  643. int rt2800mmio_init_queues(struct rt2x00_dev *rt2x00dev)
  644. {
  645. struct queue_entry_priv_mmio *entry_priv;
  646. /*
  647. * Initialize registers.
  648. */
  649. entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
  650. rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR0,
  651. entry_priv->desc_dma);
  652. rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT0,
  653. rt2x00dev->tx[0].limit);
  654. rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX0, 0);
  655. rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX0, 0);
  656. entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
  657. rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR1,
  658. entry_priv->desc_dma);
  659. rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT1,
  660. rt2x00dev->tx[1].limit);
  661. rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX1, 0);
  662. rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX1, 0);
  663. entry_priv = rt2x00dev->tx[2].entries[0].priv_data;
  664. rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR2,
  665. entry_priv->desc_dma);
  666. rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT2,
  667. rt2x00dev->tx[2].limit);
  668. rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX2, 0);
  669. rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX2, 0);
  670. entry_priv = rt2x00dev->tx[3].entries[0].priv_data;
  671. rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR3,
  672. entry_priv->desc_dma);
  673. rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT3,
  674. rt2x00dev->tx[3].limit);
  675. rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX3, 0);
  676. rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX3, 0);
  677. rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR4, 0);
  678. rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT4, 0);
  679. rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX4, 0);
  680. rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX4, 0);
  681. rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR5, 0);
  682. rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT5, 0);
  683. rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX5, 0);
  684. rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX5, 0);
  685. entry_priv = rt2x00dev->rx->entries[0].priv_data;
  686. rt2x00mmio_register_write(rt2x00dev, RX_BASE_PTR,
  687. entry_priv->desc_dma);
  688. rt2x00mmio_register_write(rt2x00dev, RX_MAX_CNT,
  689. rt2x00dev->rx[0].limit);
  690. rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
  691. rt2x00dev->rx[0].limit - 1);
  692. rt2x00mmio_register_write(rt2x00dev, RX_DRX_IDX, 0);
  693. rt2800_disable_wpdma(rt2x00dev);
  694. rt2x00mmio_register_write(rt2x00dev, DELAY_INT_CFG, 0);
  695. return 0;
  696. }
  697. EXPORT_SYMBOL_GPL(rt2800mmio_init_queues);
  698. int rt2800mmio_init_registers(struct rt2x00_dev *rt2x00dev)
  699. {
  700. u32 reg;
  701. /*
  702. * Reset DMA indexes
  703. */
  704. rt2x00mmio_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
  705. rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
  706. rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
  707. rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
  708. rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
  709. rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
  710. rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
  711. rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
  712. rt2x00mmio_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
  713. rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
  714. rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
  715. if (rt2x00_is_pcie(rt2x00dev) &&
  716. (rt2x00_rt(rt2x00dev, RT3090) ||
  717. rt2x00_rt(rt2x00dev, RT3390) ||
  718. rt2x00_rt(rt2x00dev, RT3572) ||
  719. rt2x00_rt(rt2x00dev, RT3593) ||
  720. rt2x00_rt(rt2x00dev, RT5390) ||
  721. rt2x00_rt(rt2x00dev, RT5392) ||
  722. rt2x00_rt(rt2x00dev, RT5592))) {
  723. rt2x00mmio_register_read(rt2x00dev, AUX_CTRL, &reg);
  724. rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
  725. rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
  726. rt2x00mmio_register_write(rt2x00dev, AUX_CTRL, reg);
  727. }
  728. rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
  729. reg = 0;
  730. rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
  731. rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
  732. rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
  733. rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
  734. return 0;
  735. }
  736. EXPORT_SYMBOL_GPL(rt2800mmio_init_registers);
  737. /*
  738. * Device state switch handlers.
  739. */
  740. int rt2800mmio_enable_radio(struct rt2x00_dev *rt2x00dev)
  741. {
  742. /* Wait for DMA, ignore error until we initialize queues. */
  743. rt2800_wait_wpdma_ready(rt2x00dev);
  744. if (unlikely(rt2800mmio_init_queues(rt2x00dev)))
  745. return -EIO;
  746. return rt2800_enable_radio(rt2x00dev);
  747. }
  748. EXPORT_SYMBOL_GPL(rt2800mmio_enable_radio);
  749. MODULE_AUTHOR(DRV_PROJECT);
  750. MODULE_VERSION(DRV_VERSION);
  751. MODULE_DESCRIPTION("rt2800 MMIO library");
  752. MODULE_LICENSE("GPL");