rt2x00pci.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517
  1. /*
  2. Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
  3. <http://rt2x00.serialmonkey.com>
  4. This program is free software; you can redistribute it and/or modify
  5. it under the terms of the GNU General Public License as published by
  6. the Free Software Foundation; either version 2 of the License, or
  7. (at your option) any later version.
  8. This program is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. GNU General Public License for more details.
  12. You should have received a copy of the GNU General Public License
  13. along with this program; if not, write to the
  14. Free Software Foundation, Inc.,
  15. 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  16. */
  17. /*
  18. Module: rt2x00pci
  19. Abstract: rt2x00 generic pci device routines.
  20. */
  21. #include <linux/dma-mapping.h>
  22. #include <linux/kernel.h>
  23. #include <linux/module.h>
  24. #include <linux/pci.h>
  25. #include "rt2x00.h"
  26. #include "rt2x00pci.h"
  27. /*
  28. * TX data handlers.
  29. */
  30. int rt2x00pci_write_tx_data(struct rt2x00_dev *rt2x00dev,
  31. struct data_queue *queue, struct sk_buff *skb)
  32. {
  33. struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
  34. struct queue_entry_priv_pci *entry_priv = entry->priv_data;
  35. struct skb_frame_desc *skbdesc;
  36. struct txentry_desc txdesc;
  37. u32 word;
  38. if (rt2x00queue_full(queue))
  39. return -EINVAL;
  40. rt2x00_desc_read(entry_priv->desc, 0, &word);
  41. if (rt2x00_get_field32(word, TXD_ENTRY_OWNER_NIC) ||
  42. rt2x00_get_field32(word, TXD_ENTRY_VALID)) {
  43. ERROR(rt2x00dev,
  44. "Arrived at non-free entry in the non-full queue %d.\n"
  45. "Please file bug report to %s.\n",
  46. entry->queue->qid, DRV_PROJECT);
  47. return -EINVAL;
  48. }
  49. /*
  50. * Copy all TX descriptor information into txdesc,
  51. * after that we are free to use the skb->cb array
  52. * for our information.
  53. */
  54. entry->skb = skb;
  55. rt2x00queue_create_tx_descriptor(entry, &txdesc);
  56. /*
  57. * Fill in skb descriptor
  58. */
  59. skbdesc = get_skb_frame_desc(skb);
  60. memset(skbdesc, 0, sizeof(*skbdesc));
  61. skbdesc->data = skb->data;
  62. skbdesc->data_len = skb->len;
  63. skbdesc->desc = entry_priv->desc;
  64. skbdesc->desc_len = queue->desc_size;
  65. skbdesc->entry = entry;
  66. memcpy(entry_priv->data, skb->data, skb->len);
  67. rt2x00queue_write_tx_descriptor(entry, &txdesc);
  68. rt2x00queue_index_inc(queue, Q_INDEX);
  69. return 0;
  70. }
  71. EXPORT_SYMBOL_GPL(rt2x00pci_write_tx_data);
  72. /*
  73. * TX/RX data handlers.
  74. */
  75. void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
  76. {
  77. struct data_queue *queue = rt2x00dev->rx;
  78. struct queue_entry *entry;
  79. struct queue_entry_priv_pci *entry_priv;
  80. struct ieee80211_hdr *hdr;
  81. struct skb_frame_desc *skbdesc;
  82. struct rxdone_entry_desc rxdesc;
  83. int header_size;
  84. int align;
  85. u32 word;
  86. while (1) {
  87. entry = rt2x00queue_get_entry(queue, Q_INDEX);
  88. entry_priv = entry->priv_data;
  89. rt2x00_desc_read(entry_priv->desc, 0, &word);
  90. if (rt2x00_get_field32(word, RXD_ENTRY_OWNER_NIC))
  91. break;
  92. memset(&rxdesc, 0, sizeof(rxdesc));
  93. rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
  94. hdr = (struct ieee80211_hdr *)entry_priv->data;
  95. header_size =
  96. ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control));
  97. /*
  98. * The data behind the ieee80211 header must be
  99. * aligned on a 4 byte boundary.
  100. */
  101. align = header_size % 4;
  102. /*
  103. * Allocate the sk_buffer, initialize it and copy
  104. * all data into it.
  105. */
  106. entry->skb = dev_alloc_skb(rxdesc.size + align);
  107. if (!entry->skb)
  108. return;
  109. skb_reserve(entry->skb, align);
  110. memcpy(skb_put(entry->skb, rxdesc.size),
  111. entry_priv->data, rxdesc.size);
  112. /*
  113. * Fill in skb descriptor
  114. */
  115. skbdesc = get_skb_frame_desc(entry->skb);
  116. memset(skbdesc, 0, sizeof(*skbdesc));
  117. skbdesc->data = entry->skb->data;
  118. skbdesc->data_len = entry->skb->len;
  119. skbdesc->desc = entry_priv->desc;
  120. skbdesc->desc_len = queue->desc_size;
  121. skbdesc->entry = entry;
  122. /*
  123. * Send the frame to rt2x00lib for further processing.
  124. */
  125. rt2x00lib_rxdone(entry, &rxdesc);
  126. if (test_bit(DEVICE_ENABLED_RADIO, &queue->rt2x00dev->flags)) {
  127. rt2x00_set_field32(&word, RXD_ENTRY_OWNER_NIC, 1);
  128. rt2x00_desc_write(entry_priv->desc, 0, word);
  129. }
  130. rt2x00queue_index_inc(queue, Q_INDEX);
  131. }
  132. }
  133. EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
  134. void rt2x00pci_txdone(struct rt2x00_dev *rt2x00dev, struct queue_entry *entry,
  135. struct txdone_entry_desc *txdesc)
  136. {
  137. struct queue_entry_priv_pci *entry_priv = entry->priv_data;
  138. enum data_queue_qid qid = skb_get_queue_mapping(entry->skb);
  139. u32 word;
  140. rt2x00lib_txdone(entry, txdesc);
  141. /*
  142. * Make this entry available for reuse.
  143. */
  144. entry->flags = 0;
  145. rt2x00_desc_read(entry_priv->desc, 0, &word);
  146. rt2x00_set_field32(&word, TXD_ENTRY_OWNER_NIC, 0);
  147. rt2x00_set_field32(&word, TXD_ENTRY_VALID, 0);
  148. rt2x00_desc_write(entry_priv->desc, 0, word);
  149. rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);
  150. /*
  151. * If the data queue was full before the txdone handler
  152. * we must make sure the packet queue in the mac80211 stack
  153. * is reenabled when the txdone handler has finished.
  154. */
  155. if (!rt2x00queue_full(entry->queue))
  156. ieee80211_wake_queue(rt2x00dev->hw, qid);
  157. }
  158. EXPORT_SYMBOL_GPL(rt2x00pci_txdone);
  159. /*
  160. * Device initialization handlers.
  161. */
  162. #define desc_size(__queue) \
  163. ({ \
  164. ((__queue)->limit * (__queue)->desc_size);\
  165. })
  166. #define data_size(__queue) \
  167. ({ \
  168. ((__queue)->limit * (__queue)->data_size);\
  169. })
  170. #define dma_size(__queue) \
  171. ({ \
  172. data_size(__queue) + desc_size(__queue);\
  173. })
  174. #define desc_offset(__queue, __base, __i) \
  175. ({ \
  176. (__base) + data_size(__queue) + \
  177. ((__i) * (__queue)->desc_size); \
  178. })
  179. #define data_offset(__queue, __base, __i) \
  180. ({ \
  181. (__base) + \
  182. ((__i) * (__queue)->data_size); \
  183. })
  184. static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
  185. struct data_queue *queue)
  186. {
  187. struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
  188. struct queue_entry_priv_pci *entry_priv;
  189. void *addr;
  190. dma_addr_t dma;
  191. unsigned int i;
  192. /*
  193. * Allocate DMA memory for descriptor and buffer.
  194. */
  195. addr = pci_alloc_consistent(pci_dev, dma_size(queue), &dma);
  196. if (!addr)
  197. return -ENOMEM;
  198. memset(addr, 0, dma_size(queue));
  199. /*
  200. * Initialize all queue entries to contain valid addresses.
  201. */
  202. for (i = 0; i < queue->limit; i++) {
  203. entry_priv = queue->entries[i].priv_data;
  204. entry_priv->desc = desc_offset(queue, addr, i);
  205. entry_priv->desc_dma = desc_offset(queue, dma, i);
  206. entry_priv->data = data_offset(queue, addr, i);
  207. entry_priv->data_dma = data_offset(queue, dma, i);
  208. }
  209. return 0;
  210. }
  211. static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
  212. struct data_queue *queue)
  213. {
  214. struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
  215. struct queue_entry_priv_pci *entry_priv =
  216. queue->entries[0].priv_data;
  217. if (entry_priv->data)
  218. pci_free_consistent(pci_dev, dma_size(queue),
  219. entry_priv->data, entry_priv->data_dma);
  220. entry_priv->data = NULL;
  221. }
  222. int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
  223. {
  224. struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
  225. struct data_queue *queue;
  226. int status;
  227. /*
  228. * Allocate DMA
  229. */
  230. queue_for_each(rt2x00dev, queue) {
  231. status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue);
  232. if (status)
  233. goto exit;
  234. }
  235. /*
  236. * Register interrupt handler.
  237. */
  238. status = request_irq(pci_dev->irq, rt2x00dev->ops->lib->irq_handler,
  239. IRQF_SHARED, pci_name(pci_dev), rt2x00dev);
  240. if (status) {
  241. ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
  242. pci_dev->irq, status);
  243. goto exit;
  244. }
  245. return 0;
  246. exit:
  247. queue_for_each(rt2x00dev, queue)
  248. rt2x00pci_free_queue_dma(rt2x00dev, queue);
  249. return status;
  250. }
  251. EXPORT_SYMBOL_GPL(rt2x00pci_initialize);
  252. void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
  253. {
  254. struct data_queue *queue;
  255. /*
  256. * Free irq line.
  257. */
  258. free_irq(rt2x00dev_pci(rt2x00dev)->irq, rt2x00dev);
  259. /*
  260. * Free DMA
  261. */
  262. queue_for_each(rt2x00dev, queue)
  263. rt2x00pci_free_queue_dma(rt2x00dev, queue);
  264. }
  265. EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize);
  266. /*
  267. * PCI driver handlers.
  268. */
  269. static void rt2x00pci_free_reg(struct rt2x00_dev *rt2x00dev)
  270. {
  271. kfree(rt2x00dev->rf);
  272. rt2x00dev->rf = NULL;
  273. kfree(rt2x00dev->eeprom);
  274. rt2x00dev->eeprom = NULL;
  275. if (rt2x00dev->csr.base) {
  276. iounmap(rt2x00dev->csr.base);
  277. rt2x00dev->csr.base = NULL;
  278. }
  279. }
  280. static int rt2x00pci_alloc_reg(struct rt2x00_dev *rt2x00dev)
  281. {
  282. struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
  283. rt2x00dev->csr.base = ioremap(pci_resource_start(pci_dev, 0),
  284. pci_resource_len(pci_dev, 0));
  285. if (!rt2x00dev->csr.base)
  286. goto exit;
  287. rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
  288. if (!rt2x00dev->eeprom)
  289. goto exit;
  290. rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL);
  291. if (!rt2x00dev->rf)
  292. goto exit;
  293. return 0;
  294. exit:
  295. ERROR_PROBE("Failed to allocate registers.\n");
  296. rt2x00pci_free_reg(rt2x00dev);
  297. return -ENOMEM;
  298. }
  299. int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
  300. {
  301. struct rt2x00_ops *ops = (struct rt2x00_ops *)id->driver_data;
  302. struct ieee80211_hw *hw;
  303. struct rt2x00_dev *rt2x00dev;
  304. int retval;
  305. retval = pci_request_regions(pci_dev, pci_name(pci_dev));
  306. if (retval) {
  307. ERROR_PROBE("PCI request regions failed.\n");
  308. return retval;
  309. }
  310. retval = pci_enable_device(pci_dev);
  311. if (retval) {
  312. ERROR_PROBE("Enable device failed.\n");
  313. goto exit_release_regions;
  314. }
  315. pci_set_master(pci_dev);
  316. if (pci_set_mwi(pci_dev))
  317. ERROR_PROBE("MWI not available.\n");
  318. if (pci_set_dma_mask(pci_dev, DMA_64BIT_MASK) &&
  319. pci_set_dma_mask(pci_dev, DMA_32BIT_MASK)) {
  320. ERROR_PROBE("PCI DMA not supported.\n");
  321. retval = -EIO;
  322. goto exit_disable_device;
  323. }
  324. hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
  325. if (!hw) {
  326. ERROR_PROBE("Failed to allocate hardware.\n");
  327. retval = -ENOMEM;
  328. goto exit_disable_device;
  329. }
  330. pci_set_drvdata(pci_dev, hw);
  331. rt2x00dev = hw->priv;
  332. rt2x00dev->dev = pci_dev;
  333. rt2x00dev->ops = ops;
  334. rt2x00dev->hw = hw;
  335. retval = rt2x00pci_alloc_reg(rt2x00dev);
  336. if (retval)
  337. goto exit_free_device;
  338. retval = rt2x00lib_probe_dev(rt2x00dev);
  339. if (retval)
  340. goto exit_free_reg;
  341. return 0;
  342. exit_free_reg:
  343. rt2x00pci_free_reg(rt2x00dev);
  344. exit_free_device:
  345. ieee80211_free_hw(hw);
  346. exit_disable_device:
  347. if (retval != -EBUSY)
  348. pci_disable_device(pci_dev);
  349. exit_release_regions:
  350. pci_release_regions(pci_dev);
  351. pci_set_drvdata(pci_dev, NULL);
  352. return retval;
  353. }
  354. EXPORT_SYMBOL_GPL(rt2x00pci_probe);
  355. void rt2x00pci_remove(struct pci_dev *pci_dev)
  356. {
  357. struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
  358. struct rt2x00_dev *rt2x00dev = hw->priv;
  359. /*
  360. * Free all allocated data.
  361. */
  362. rt2x00lib_remove_dev(rt2x00dev);
  363. rt2x00pci_free_reg(rt2x00dev);
  364. ieee80211_free_hw(hw);
  365. /*
  366. * Free the PCI device data.
  367. */
  368. pci_set_drvdata(pci_dev, NULL);
  369. pci_disable_device(pci_dev);
  370. pci_release_regions(pci_dev);
  371. }
  372. EXPORT_SYMBOL_GPL(rt2x00pci_remove);
  373. #ifdef CONFIG_PM
  374. int rt2x00pci_suspend(struct pci_dev *pci_dev, pm_message_t state)
  375. {
  376. struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
  377. struct rt2x00_dev *rt2x00dev = hw->priv;
  378. int retval;
  379. retval = rt2x00lib_suspend(rt2x00dev, state);
  380. if (retval)
  381. return retval;
  382. rt2x00pci_free_reg(rt2x00dev);
  383. pci_save_state(pci_dev);
  384. pci_disable_device(pci_dev);
  385. return pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
  386. }
  387. EXPORT_SYMBOL_GPL(rt2x00pci_suspend);
  388. int rt2x00pci_resume(struct pci_dev *pci_dev)
  389. {
  390. struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
  391. struct rt2x00_dev *rt2x00dev = hw->priv;
  392. int retval;
  393. if (pci_set_power_state(pci_dev, PCI_D0) ||
  394. pci_enable_device(pci_dev) ||
  395. pci_restore_state(pci_dev)) {
  396. ERROR(rt2x00dev, "Failed to resume device.\n");
  397. return -EIO;
  398. }
  399. retval = rt2x00pci_alloc_reg(rt2x00dev);
  400. if (retval)
  401. return retval;
  402. retval = rt2x00lib_resume(rt2x00dev);
  403. if (retval)
  404. goto exit_free_reg;
  405. return 0;
  406. exit_free_reg:
  407. rt2x00pci_free_reg(rt2x00dev);
  408. return retval;
  409. }
  410. EXPORT_SYMBOL_GPL(rt2x00pci_resume);
  411. #endif /* CONFIG_PM */
  412. /*
  413. * rt2x00pci module information.
  414. */
  415. MODULE_AUTHOR(DRV_PROJECT);
  416. MODULE_VERSION(DRV_VERSION);
  417. MODULE_DESCRIPTION("rt2x00 pci library");
  418. MODULE_LICENSE("GPL");