bcm43xx_dma.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171
  1. /*
  2. Broadcom BCM43xx wireless driver
  3. DMA ringbuffer and descriptor allocation/management
  4. Copyright (c) 2005, 2006 Michael Buesch <mbuesch@freenet.de>
  5. Some code in this file is derived from the b44.c driver
  6. Copyright (C) 2002 David S. Miller
  7. Copyright (C) Pekka Pietikainen
  8. This program is free software; you can redistribute it and/or modify
  9. it under the terms of the GNU General Public License as published by
  10. the Free Software Foundation; either version 2 of the License, or
  11. (at your option) any later version.
  12. This program is distributed in the hope that it will be useful,
  13. but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. GNU General Public License for more details.
  16. You should have received a copy of the GNU General Public License
  17. along with this program; see the file COPYING. If not, write to
  18. the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
  19. Boston, MA 02110-1301, USA.
  20. */
  21. #include "bcm43xx.h"
  22. #include "bcm43xx_dma.h"
  23. #include "bcm43xx_main.h"
  24. #include "bcm43xx_debugfs.h"
  25. #include "bcm43xx_power.h"
  26. #include "bcm43xx_xmit.h"
  27. #include <linux/dma-mapping.h>
  28. #include <linux/pci.h>
  29. #include <linux/delay.h>
  30. #include <linux/skbuff.h>
  31. static inline int free_slots(struct bcm43xx_dmaring *ring)
  32. {
  33. return (ring->nr_slots - ring->used_slots);
  34. }
  35. static inline int next_slot(struct bcm43xx_dmaring *ring, int slot)
  36. {
  37. assert(slot >= -1 && slot <= ring->nr_slots - 1);
  38. if (slot == ring->nr_slots - 1)
  39. return 0;
  40. return slot + 1;
  41. }
  42. static inline int prev_slot(struct bcm43xx_dmaring *ring, int slot)
  43. {
  44. assert(slot >= 0 && slot <= ring->nr_slots - 1);
  45. if (slot == 0)
  46. return ring->nr_slots - 1;
  47. return slot - 1;
  48. }
  49. /* Request a slot for usage. */
  50. static inline
  51. int request_slot(struct bcm43xx_dmaring *ring)
  52. {
  53. int slot;
  54. assert(ring->tx);
  55. assert(!ring->suspended);
  56. assert(free_slots(ring) != 0);
  57. slot = next_slot(ring, ring->current_slot);
  58. ring->current_slot = slot;
  59. ring->used_slots++;
  60. /* Check the number of available slots and suspend TX,
  61. * if we are running low on free slots.
  62. */
  63. if (unlikely(free_slots(ring) < ring->suspend_mark)) {
  64. netif_stop_queue(ring->bcm->net_dev);
  65. ring->suspended = 1;
  66. }
  67. #ifdef CONFIG_BCM43XX_DEBUG
  68. if (ring->used_slots > ring->max_used_slots)
  69. ring->max_used_slots = ring->used_slots;
  70. #endif /* CONFIG_BCM43XX_DEBUG*/
  71. return slot;
  72. }
  73. /* Return a slot to the free slots. */
  74. static inline
  75. void return_slot(struct bcm43xx_dmaring *ring, int slot)
  76. {
  77. assert(ring->tx);
  78. ring->used_slots--;
  79. /* Check if TX is suspended and check if we have
  80. * enough free slots to resume it again.
  81. */
  82. if (unlikely(ring->suspended)) {
  83. if (free_slots(ring) >= ring->resume_mark) {
  84. ring->suspended = 0;
  85. netif_wake_queue(ring->bcm->net_dev);
  86. }
  87. }
  88. }
  89. u16 bcm43xx_dmacontroller_base(int dma64bit, int controller_idx)
  90. {
  91. static const u16 map64[] = {
  92. BCM43xx_MMIO_DMA64_BASE0,
  93. BCM43xx_MMIO_DMA64_BASE1,
  94. BCM43xx_MMIO_DMA64_BASE2,
  95. BCM43xx_MMIO_DMA64_BASE3,
  96. BCM43xx_MMIO_DMA64_BASE4,
  97. BCM43xx_MMIO_DMA64_BASE5,
  98. };
  99. static const u16 map32[] = {
  100. BCM43xx_MMIO_DMA32_BASE0,
  101. BCM43xx_MMIO_DMA32_BASE1,
  102. BCM43xx_MMIO_DMA32_BASE2,
  103. BCM43xx_MMIO_DMA32_BASE3,
  104. BCM43xx_MMIO_DMA32_BASE4,
  105. BCM43xx_MMIO_DMA32_BASE5,
  106. };
  107. if (dma64bit) {
  108. assert(controller_idx >= 0 &&
  109. controller_idx < ARRAY_SIZE(map64));
  110. return map64[controller_idx];
  111. }
  112. assert(controller_idx >= 0 &&
  113. controller_idx < ARRAY_SIZE(map32));
  114. return map32[controller_idx];
  115. }
  116. static inline
  117. dma_addr_t map_descbuffer(struct bcm43xx_dmaring *ring,
  118. unsigned char *buf,
  119. size_t len,
  120. int tx)
  121. {
  122. dma_addr_t dmaaddr;
  123. if (tx) {
  124. dmaaddr = dma_map_single(&ring->bcm->pci_dev->dev,
  125. buf, len,
  126. DMA_TO_DEVICE);
  127. } else {
  128. dmaaddr = dma_map_single(&ring->bcm->pci_dev->dev,
  129. buf, len,
  130. DMA_FROM_DEVICE);
  131. }
  132. return dmaaddr;
  133. }
  134. static inline
  135. void unmap_descbuffer(struct bcm43xx_dmaring *ring,
  136. dma_addr_t addr,
  137. size_t len,
  138. int tx)
  139. {
  140. if (tx) {
  141. dma_unmap_single(&ring->bcm->pci_dev->dev,
  142. addr, len,
  143. DMA_TO_DEVICE);
  144. } else {
  145. dma_unmap_single(&ring->bcm->pci_dev->dev,
  146. addr, len,
  147. DMA_FROM_DEVICE);
  148. }
  149. }
  150. static inline
  151. void sync_descbuffer_for_cpu(struct bcm43xx_dmaring *ring,
  152. dma_addr_t addr,
  153. size_t len)
  154. {
  155. assert(!ring->tx);
  156. dma_sync_single_for_cpu(&ring->bcm->pci_dev->dev,
  157. addr, len, DMA_FROM_DEVICE);
  158. }
  159. static inline
  160. void sync_descbuffer_for_device(struct bcm43xx_dmaring *ring,
  161. dma_addr_t addr,
  162. size_t len)
  163. {
  164. assert(!ring->tx);
  165. dma_sync_single_for_device(&ring->bcm->pci_dev->dev,
  166. addr, len, DMA_FROM_DEVICE);
  167. }
  168. /* Unmap and free a descriptor buffer. */
  169. static inline
  170. void free_descriptor_buffer(struct bcm43xx_dmaring *ring,
  171. struct bcm43xx_dmadesc_meta *meta,
  172. int irq_context)
  173. {
  174. assert(meta->skb);
  175. if (irq_context)
  176. dev_kfree_skb_irq(meta->skb);
  177. else
  178. dev_kfree_skb(meta->skb);
  179. meta->skb = NULL;
  180. }
  181. static int alloc_ringmemory(struct bcm43xx_dmaring *ring)
  182. {
  183. struct device *dev = &(ring->bcm->pci_dev->dev);
  184. ring->descbase = dma_alloc_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
  185. &(ring->dmabase), GFP_KERNEL);
  186. if (!ring->descbase) {
  187. printk(KERN_ERR PFX "DMA ringmemory allocation failed\n");
  188. return -ENOMEM;
  189. }
  190. memset(ring->descbase, 0, BCM43xx_DMA_RINGMEMSIZE);
  191. return 0;
  192. }
  193. static void free_ringmemory(struct bcm43xx_dmaring *ring)
  194. {
  195. struct device *dev = &(ring->bcm->pci_dev->dev);
  196. dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
  197. ring->descbase, ring->dmabase);
  198. }
  199. /* Reset the RX DMA channel */
  200. int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
  201. u16 mmio_base, int dma64)
  202. {
  203. int i;
  204. u32 value;
  205. u16 offset;
  206. offset = dma64 ? BCM43xx_DMA64_RXCTL : BCM43xx_DMA32_RXCTL;
  207. bcm43xx_write32(bcm, mmio_base + offset, 0);
  208. for (i = 0; i < 1000; i++) {
  209. offset = dma64 ? BCM43xx_DMA64_RXSTATUS : BCM43xx_DMA32_RXSTATUS;
  210. value = bcm43xx_read32(bcm, mmio_base + offset);
  211. if (dma64) {
  212. value &= BCM43xx_DMA64_RXSTAT;
  213. if (value == BCM43xx_DMA64_RXSTAT_DISABLED) {
  214. i = -1;
  215. break;
  216. }
  217. } else {
  218. value &= BCM43xx_DMA32_RXSTATE;
  219. if (value == BCM43xx_DMA32_RXSTAT_DISABLED) {
  220. i = -1;
  221. break;
  222. }
  223. }
  224. udelay(10);
  225. }
  226. if (i != -1) {
  227. printk(KERN_ERR PFX "Error: Wait on DMA RX status timed out.\n");
  228. return -ENODEV;
  229. }
  230. return 0;
  231. }
  232. /* Reset the RX DMA channel */
  233. int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
  234. u16 mmio_base, int dma64)
  235. {
  236. int i;
  237. u32 value;
  238. u16 offset;
  239. for (i = 0; i < 1000; i++) {
  240. offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
  241. value = bcm43xx_read32(bcm, mmio_base + offset);
  242. if (dma64) {
  243. value &= BCM43xx_DMA64_TXSTAT;
  244. if (value == BCM43xx_DMA64_TXSTAT_DISABLED ||
  245. value == BCM43xx_DMA64_TXSTAT_IDLEWAIT ||
  246. value == BCM43xx_DMA64_TXSTAT_STOPPED)
  247. break;
  248. } else {
  249. value &= BCM43xx_DMA32_TXSTATE;
  250. if (value == BCM43xx_DMA32_TXSTAT_DISABLED ||
  251. value == BCM43xx_DMA32_TXSTAT_IDLEWAIT ||
  252. value == BCM43xx_DMA32_TXSTAT_STOPPED)
  253. break;
  254. }
  255. udelay(10);
  256. }
  257. offset = dma64 ? BCM43xx_DMA64_TXCTL : BCM43xx_DMA32_TXCTL;
  258. bcm43xx_write32(bcm, mmio_base + offset, 0);
  259. for (i = 0; i < 1000; i++) {
  260. offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
  261. value = bcm43xx_read32(bcm, mmio_base + offset);
  262. if (dma64) {
  263. value &= BCM43xx_DMA64_TXSTAT;
  264. if (value == BCM43xx_DMA64_TXSTAT_DISABLED) {
  265. i = -1;
  266. break;
  267. }
  268. } else {
  269. value &= BCM43xx_DMA32_TXSTATE;
  270. if (value == BCM43xx_DMA32_TXSTAT_DISABLED) {
  271. i = -1;
  272. break;
  273. }
  274. }
  275. udelay(10);
  276. }
  277. if (i != -1) {
  278. printk(KERN_ERR PFX "Error: Wait on DMA TX status timed out.\n");
  279. return -ENODEV;
  280. }
  281. /* ensure the reset is completed. */
  282. udelay(300);
  283. return 0;
  284. }
  285. static void fill_descriptor(struct bcm43xx_dmaring *ring,
  286. struct bcm43xx_dmadesc_generic *desc,
  287. dma_addr_t dmaaddr,
  288. u16 bufsize,
  289. int start, int end, int irq)
  290. {
  291. int slot;
  292. slot = bcm43xx_dma_desc2idx(ring, desc);
  293. assert(slot >= 0 && slot < ring->nr_slots);
  294. if (ring->dma64) {
  295. u32 ctl0 = 0, ctl1 = 0;
  296. u32 addrlo, addrhi;
  297. u32 addrext;
  298. addrlo = (u32)(dmaaddr & 0xFFFFFFFF);
  299. addrhi = (((u64)dmaaddr >> 32) & ~BCM43xx_DMA64_ROUTING);
  300. addrext = (((u64)dmaaddr >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
  301. addrhi |= ring->routing;
  302. if (slot == ring->nr_slots - 1)
  303. ctl0 |= BCM43xx_DMA64_DCTL0_DTABLEEND;
  304. if (start)
  305. ctl0 |= BCM43xx_DMA64_DCTL0_FRAMESTART;
  306. if (end)
  307. ctl0 |= BCM43xx_DMA64_DCTL0_FRAMEEND;
  308. if (irq)
  309. ctl0 |= BCM43xx_DMA64_DCTL0_IRQ;
  310. ctl1 |= (bufsize - ring->frameoffset)
  311. & BCM43xx_DMA64_DCTL1_BYTECNT;
  312. ctl1 |= (addrext << BCM43xx_DMA64_DCTL1_ADDREXT_SHIFT)
  313. & BCM43xx_DMA64_DCTL1_ADDREXT_MASK;
  314. desc->dma64.control0 = cpu_to_le32(ctl0);
  315. desc->dma64.control1 = cpu_to_le32(ctl1);
  316. desc->dma64.address_low = cpu_to_le32(addrlo);
  317. desc->dma64.address_high = cpu_to_le32(addrhi);
  318. } else {
  319. u32 ctl;
  320. u32 addr;
  321. u32 addrext;
  322. addr = (u32)(dmaaddr & ~BCM43xx_DMA32_ROUTING);
  323. addrext = (u32)(dmaaddr & BCM43xx_DMA32_ROUTING)
  324. >> BCM43xx_DMA32_ROUTING_SHIFT;
  325. addr |= ring->routing;
  326. ctl = (bufsize - ring->frameoffset)
  327. & BCM43xx_DMA32_DCTL_BYTECNT;
  328. if (slot == ring->nr_slots - 1)
  329. ctl |= BCM43xx_DMA32_DCTL_DTABLEEND;
  330. if (start)
  331. ctl |= BCM43xx_DMA32_DCTL_FRAMESTART;
  332. if (end)
  333. ctl |= BCM43xx_DMA32_DCTL_FRAMEEND;
  334. if (irq)
  335. ctl |= BCM43xx_DMA32_DCTL_IRQ;
  336. ctl |= (addrext << BCM43xx_DMA32_DCTL_ADDREXT_SHIFT)
  337. & BCM43xx_DMA32_DCTL_ADDREXT_MASK;
  338. desc->dma32.control = cpu_to_le32(ctl);
  339. desc->dma32.address = cpu_to_le32(addr);
  340. }
  341. }
  342. static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
  343. struct bcm43xx_dmadesc_generic *desc,
  344. struct bcm43xx_dmadesc_meta *meta,
  345. gfp_t gfp_flags)
  346. {
  347. struct bcm43xx_rxhdr *rxhdr;
  348. struct bcm43xx_hwxmitstatus *xmitstat;
  349. dma_addr_t dmaaddr;
  350. struct sk_buff *skb;
  351. assert(!ring->tx);
  352. skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
  353. if (unlikely(!skb))
  354. return -ENOMEM;
  355. dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
  356. meta->skb = skb;
  357. meta->dmaaddr = dmaaddr;
  358. skb->dev = ring->bcm->net_dev;
  359. fill_descriptor(ring, desc, dmaaddr,
  360. ring->rx_buffersize, 0, 0, 0);
  361. rxhdr = (struct bcm43xx_rxhdr *)(skb->data);
  362. rxhdr->frame_length = 0;
  363. rxhdr->flags1 = 0;
  364. xmitstat = (struct bcm43xx_hwxmitstatus *)(skb->data);
  365. xmitstat->cookie = 0;
  366. return 0;
  367. }
  368. /* Allocate the initial descbuffers.
  369. * This is used for an RX ring only.
  370. */
  371. static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring)
  372. {
  373. int i, err = -ENOMEM;
  374. struct bcm43xx_dmadesc_generic *desc;
  375. struct bcm43xx_dmadesc_meta *meta;
  376. for (i = 0; i < ring->nr_slots; i++) {
  377. desc = bcm43xx_dma_idx2desc(ring, i, &meta);
  378. err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
  379. if (err)
  380. goto err_unwind;
  381. }
  382. mb();
  383. ring->used_slots = ring->nr_slots;
  384. err = 0;
  385. out:
  386. return err;
  387. err_unwind:
  388. for (i--; i >= 0; i--) {
  389. desc = bcm43xx_dma_idx2desc(ring, i, &meta);
  390. unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
  391. dev_kfree_skb(meta->skb);
  392. }
  393. goto out;
  394. }
  395. /* Do initial setup of the DMA controller.
  396. * Reset the controller, write the ring busaddress
  397. * and switch the "enable" bit on.
  398. */
  399. static int dmacontroller_setup(struct bcm43xx_dmaring *ring)
  400. {
  401. int err = 0;
  402. u32 value;
  403. u32 addrext;
  404. if (ring->tx) {
  405. if (ring->dma64) {
  406. u64 ringbase = (u64)(ring->dmabase);
  407. addrext = ((ringbase >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
  408. value = BCM43xx_DMA64_TXENABLE;
  409. value |= (addrext << BCM43xx_DMA64_TXADDREXT_SHIFT)
  410. & BCM43xx_DMA64_TXADDREXT_MASK;
  411. bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL, value);
  412. bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO,
  413. (ringbase & 0xFFFFFFFF));
  414. bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI,
  415. ((ringbase >> 32) & ~BCM43xx_DMA64_ROUTING)
  416. | ring->routing);
  417. } else {
  418. u32 ringbase = (u32)(ring->dmabase);
  419. addrext = (ringbase >> BCM43xx_DMA32_ROUTING_SHIFT);
  420. value = BCM43xx_DMA32_TXENABLE;
  421. value |= (addrext << BCM43xx_DMA32_TXADDREXT_SHIFT)
  422. & BCM43xx_DMA32_TXADDREXT_MASK;
  423. bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL, value);
  424. bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING,
  425. (ringbase & ~BCM43xx_DMA32_ROUTING)
  426. | ring->routing);
  427. }
  428. } else {
  429. err = alloc_initial_descbuffers(ring);
  430. if (err)
  431. goto out;
  432. if (ring->dma64) {
  433. u64 ringbase = (u64)(ring->dmabase);
  434. addrext = ((ringbase >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
  435. value = (ring->frameoffset << BCM43xx_DMA64_RXFROFF_SHIFT);
  436. value |= BCM43xx_DMA64_RXENABLE;
  437. value |= (addrext << BCM43xx_DMA64_RXADDREXT_SHIFT)
  438. & BCM43xx_DMA64_RXADDREXT_MASK;
  439. bcm43xx_dma_write(ring, BCM43xx_DMA64_RXCTL, value);
  440. bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO,
  441. (ringbase & 0xFFFFFFFF));
  442. bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI,
  443. ((ringbase >> 32) & ~BCM43xx_DMA64_ROUTING)
  444. | ring->routing);
  445. bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX, 200);
  446. } else {
  447. u32 ringbase = (u32)(ring->dmabase);
  448. addrext = (ringbase >> BCM43xx_DMA32_ROUTING_SHIFT);
  449. value = (ring->frameoffset << BCM43xx_DMA32_RXFROFF_SHIFT);
  450. value |= BCM43xx_DMA32_RXENABLE;
  451. value |= (addrext << BCM43xx_DMA32_RXADDREXT_SHIFT)
  452. & BCM43xx_DMA32_RXADDREXT_MASK;
  453. bcm43xx_dma_write(ring, BCM43xx_DMA32_RXCTL, value);
  454. bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING,
  455. (ringbase & ~BCM43xx_DMA32_ROUTING)
  456. | ring->routing);
  457. bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX, 200);
  458. }
  459. }
  460. out:
  461. return err;
  462. }
  463. /* Shutdown the DMA controller. */
  464. static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring)
  465. {
  466. if (ring->tx) {
  467. bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base, ring->dma64);
  468. if (ring->dma64) {
  469. bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO, 0);
  470. bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI, 0);
  471. } else
  472. bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING, 0);
  473. } else {
  474. bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base, ring->dma64);
  475. if (ring->dma64) {
  476. bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO, 0);
  477. bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI, 0);
  478. } else
  479. bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING, 0);
  480. }
  481. }
  482. static void free_all_descbuffers(struct bcm43xx_dmaring *ring)
  483. {
  484. struct bcm43xx_dmadesc_generic *desc;
  485. struct bcm43xx_dmadesc_meta *meta;
  486. int i;
  487. if (!ring->used_slots)
  488. return;
  489. for (i = 0; i < ring->nr_slots; i++) {
  490. desc = bcm43xx_dma_idx2desc(ring, i, &meta);
  491. if (!meta->skb) {
  492. assert(ring->tx);
  493. continue;
  494. }
  495. if (ring->tx) {
  496. unmap_descbuffer(ring, meta->dmaaddr,
  497. meta->skb->len, 1);
  498. } else {
  499. unmap_descbuffer(ring, meta->dmaaddr,
  500. ring->rx_buffersize, 0);
  501. }
  502. free_descriptor_buffer(ring, meta, 0);
  503. }
  504. }
  505. /* Main initialization function. */
  506. static
  507. struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm,
  508. int controller_index,
  509. int for_tx,
  510. int dma64)
  511. {
  512. struct bcm43xx_dmaring *ring;
  513. int err;
  514. int nr_slots;
  515. ring = kzalloc(sizeof(*ring), GFP_KERNEL);
  516. if (!ring)
  517. goto out;
  518. nr_slots = BCM43xx_RXRING_SLOTS;
  519. if (for_tx)
  520. nr_slots = BCM43xx_TXRING_SLOTS;
  521. ring->meta = kcalloc(nr_slots, sizeof(struct bcm43xx_dmadesc_meta),
  522. GFP_KERNEL);
  523. if (!ring->meta)
  524. goto err_kfree_ring;
  525. ring->routing = BCM43xx_DMA32_CLIENTTRANS;
  526. if (dma64)
  527. ring->routing = BCM43xx_DMA64_CLIENTTRANS;
  528. #ifdef CONFIG_BCM947XX
  529. if (bcm->pci_dev->bus->number == 0)
  530. ring->routing = dma64 ? BCM43xx_DMA64_NOTRANS : BCM43xx_DMA32_NOTRANS;
  531. #endif
  532. ring->bcm = bcm;
  533. ring->nr_slots = nr_slots;
  534. ring->suspend_mark = ring->nr_slots * BCM43xx_TXSUSPEND_PERCENT / 100;
  535. ring->resume_mark = ring->nr_slots * BCM43xx_TXRESUME_PERCENT / 100;
  536. assert(ring->suspend_mark < ring->resume_mark);
  537. ring->mmio_base = bcm43xx_dmacontroller_base(dma64, controller_index);
  538. ring->index = controller_index;
  539. ring->dma64 = !!dma64;
  540. if (for_tx) {
  541. ring->tx = 1;
  542. ring->current_slot = -1;
  543. } else {
  544. if (ring->index == 0) {
  545. ring->rx_buffersize = BCM43xx_DMA0_RX_BUFFERSIZE;
  546. ring->frameoffset = BCM43xx_DMA0_RX_FRAMEOFFSET;
  547. } else if (ring->index == 3) {
  548. ring->rx_buffersize = BCM43xx_DMA3_RX_BUFFERSIZE;
  549. ring->frameoffset = BCM43xx_DMA3_RX_FRAMEOFFSET;
  550. } else
  551. assert(0);
  552. }
  553. err = alloc_ringmemory(ring);
  554. if (err)
  555. goto err_kfree_meta;
  556. err = dmacontroller_setup(ring);
  557. if (err)
  558. goto err_free_ringmemory;
  559. out:
  560. return ring;
  561. err_free_ringmemory:
  562. free_ringmemory(ring);
  563. err_kfree_meta:
  564. kfree(ring->meta);
  565. err_kfree_ring:
  566. kfree(ring);
  567. ring = NULL;
  568. goto out;
  569. }
  570. /* Main cleanup function. */
  571. static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring *ring)
  572. {
  573. if (!ring)
  574. return;
  575. dprintk(KERN_INFO PFX "DMA-%s 0x%04X (%s) max used slots: %d/%d\n",
  576. (ring->dma64) ? "64" : "32",
  577. ring->mmio_base,
  578. (ring->tx) ? "TX" : "RX",
  579. ring->max_used_slots, ring->nr_slots);
  580. /* Device IRQs are disabled prior entering this function,
  581. * so no need to take care of concurrency with rx handler stuff.
  582. */
  583. dmacontroller_cleanup(ring);
  584. free_all_descbuffers(ring);
  585. free_ringmemory(ring);
  586. kfree(ring->meta);
  587. kfree(ring);
  588. }
  589. void bcm43xx_dma_free(struct bcm43xx_private *bcm)
  590. {
  591. struct bcm43xx_dma *dma;
  592. if (bcm43xx_using_pio(bcm))
  593. return;
  594. dma = bcm43xx_current_dma(bcm);
  595. bcm43xx_destroy_dmaring(dma->rx_ring3);
  596. dma->rx_ring3 = NULL;
  597. bcm43xx_destroy_dmaring(dma->rx_ring0);
  598. dma->rx_ring0 = NULL;
  599. bcm43xx_destroy_dmaring(dma->tx_ring5);
  600. dma->tx_ring5 = NULL;
  601. bcm43xx_destroy_dmaring(dma->tx_ring4);
  602. dma->tx_ring4 = NULL;
  603. bcm43xx_destroy_dmaring(dma->tx_ring3);
  604. dma->tx_ring3 = NULL;
  605. bcm43xx_destroy_dmaring(dma->tx_ring2);
  606. dma->tx_ring2 = NULL;
  607. bcm43xx_destroy_dmaring(dma->tx_ring1);
  608. dma->tx_ring1 = NULL;
  609. bcm43xx_destroy_dmaring(dma->tx_ring0);
  610. dma->tx_ring0 = NULL;
  611. }
  612. int bcm43xx_dma_init(struct bcm43xx_private *bcm)
  613. {
  614. struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm);
  615. struct bcm43xx_dmaring *ring;
  616. int err = -ENOMEM;
  617. int dma64 = 0;
  618. u32 sbtmstatehi;
  619. sbtmstatehi = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATEHIGH);
  620. if (sbtmstatehi & BCM43xx_SBTMSTATEHIGH_DMA64BIT)
  621. dma64 = 1;
  622. /* setup TX DMA channels. */
  623. ring = bcm43xx_setup_dmaring(bcm, 0, 1, dma64);
  624. if (!ring)
  625. goto out;
  626. dma->tx_ring0 = ring;
  627. ring = bcm43xx_setup_dmaring(bcm, 1, 1, dma64);
  628. if (!ring)
  629. goto err_destroy_tx0;
  630. dma->tx_ring1 = ring;
  631. ring = bcm43xx_setup_dmaring(bcm, 2, 1, dma64);
  632. if (!ring)
  633. goto err_destroy_tx1;
  634. dma->tx_ring2 = ring;
  635. ring = bcm43xx_setup_dmaring(bcm, 3, 1, dma64);
  636. if (!ring)
  637. goto err_destroy_tx2;
  638. dma->tx_ring3 = ring;
  639. ring = bcm43xx_setup_dmaring(bcm, 4, 1, dma64);
  640. if (!ring)
  641. goto err_destroy_tx3;
  642. dma->tx_ring4 = ring;
  643. ring = bcm43xx_setup_dmaring(bcm, 5, 1, dma64);
  644. if (!ring)
  645. goto err_destroy_tx4;
  646. dma->tx_ring5 = ring;
  647. /* setup RX DMA channels. */
  648. ring = bcm43xx_setup_dmaring(bcm, 0, 0, dma64);
  649. if (!ring)
  650. goto err_destroy_tx5;
  651. dma->rx_ring0 = ring;
  652. if (bcm->current_core->rev < 5) {
  653. ring = bcm43xx_setup_dmaring(bcm, 3, 0, dma64);
  654. if (!ring)
  655. goto err_destroy_rx0;
  656. dma->rx_ring3 = ring;
  657. }
  658. dprintk(KERN_INFO PFX "%s DMA initialized\n",
  659. dma64 ? "64-bit" : "32-bit");
  660. err = 0;
  661. out:
  662. return err;
  663. err_destroy_rx0:
  664. bcm43xx_destroy_dmaring(dma->rx_ring0);
  665. dma->rx_ring0 = NULL;
  666. err_destroy_tx5:
  667. bcm43xx_destroy_dmaring(dma->tx_ring5);
  668. dma->tx_ring5 = NULL;
  669. err_destroy_tx4:
  670. bcm43xx_destroy_dmaring(dma->tx_ring4);
  671. dma->tx_ring4 = NULL;
  672. err_destroy_tx3:
  673. bcm43xx_destroy_dmaring(dma->tx_ring3);
  674. dma->tx_ring3 = NULL;
  675. err_destroy_tx2:
  676. bcm43xx_destroy_dmaring(dma->tx_ring2);
  677. dma->tx_ring2 = NULL;
  678. err_destroy_tx1:
  679. bcm43xx_destroy_dmaring(dma->tx_ring1);
  680. dma->tx_ring1 = NULL;
  681. err_destroy_tx0:
  682. bcm43xx_destroy_dmaring(dma->tx_ring0);
  683. dma->tx_ring0 = NULL;
  684. goto out;
  685. }
  686. /* Generate a cookie for the TX header. */
  687. static u16 generate_cookie(struct bcm43xx_dmaring *ring,
  688. int slot)
  689. {
  690. u16 cookie = 0x1000;
  691. /* Use the upper 4 bits of the cookie as
  692. * DMA controller ID and store the slot number
  693. * in the lower 12 bits.
  694. * Note that the cookie must never be 0, as this
  695. * is a special value used in RX path.
  696. */
  697. switch (ring->index) {
  698. case 0:
  699. cookie = 0xA000;
  700. break;
  701. case 1:
  702. cookie = 0xB000;
  703. break;
  704. case 2:
  705. cookie = 0xC000;
  706. break;
  707. case 3:
  708. cookie = 0xD000;
  709. break;
  710. case 4:
  711. cookie = 0xE000;
  712. break;
  713. case 5:
  714. cookie = 0xF000;
  715. break;
  716. }
  717. assert(((u16)slot & 0xF000) == 0x0000);
  718. cookie |= (u16)slot;
  719. return cookie;
  720. }
  721. /* Inspect a cookie and find out to which controller/slot it belongs. */
  722. static
  723. struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
  724. u16 cookie, int *slot)
  725. {
  726. struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm);
  727. struct bcm43xx_dmaring *ring = NULL;
  728. switch (cookie & 0xF000) {
  729. case 0xA000:
  730. ring = dma->tx_ring0;
  731. break;
  732. case 0xB000:
  733. ring = dma->tx_ring1;
  734. break;
  735. case 0xC000:
  736. ring = dma->tx_ring2;
  737. break;
  738. case 0xD000:
  739. ring = dma->tx_ring3;
  740. break;
  741. case 0xE000:
  742. ring = dma->tx_ring4;
  743. break;
  744. case 0xF000:
  745. ring = dma->tx_ring5;
  746. break;
  747. default:
  748. assert(0);
  749. }
  750. *slot = (cookie & 0x0FFF);
  751. assert(*slot >= 0 && *slot < ring->nr_slots);
  752. return ring;
  753. }
  754. static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
  755. int slot)
  756. {
  757. u16 offset;
  758. int descsize;
  759. /* Everything is ready to start. Buffers are DMA mapped and
  760. * associated with slots.
  761. * "slot" is the last slot of the new frame we want to transmit.
  762. * Close your seat belts now, please.
  763. */
  764. wmb();
  765. slot = next_slot(ring, slot);
  766. offset = (ring->dma64) ? BCM43xx_DMA64_TXINDEX : BCM43xx_DMA32_TXINDEX;
  767. descsize = (ring->dma64) ? sizeof(struct bcm43xx_dmadesc64)
  768. : sizeof(struct bcm43xx_dmadesc32);
  769. bcm43xx_dma_write(ring, offset,
  770. (u32)(slot * descsize));
  771. }
  772. static void dma_tx_fragment(struct bcm43xx_dmaring *ring,
  773. struct sk_buff *skb,
  774. u8 cur_frag)
  775. {
  776. int slot;
  777. struct bcm43xx_dmadesc_generic *desc;
  778. struct bcm43xx_dmadesc_meta *meta;
  779. dma_addr_t dmaaddr;
  780. assert(skb_shinfo(skb)->nr_frags == 0);
  781. slot = request_slot(ring);
  782. desc = bcm43xx_dma_idx2desc(ring, slot, &meta);
  783. /* Add a device specific TX header. */
  784. assert(skb_headroom(skb) >= sizeof(struct bcm43xx_txhdr));
  785. /* Reserve enough headroom for the device tx header. */
  786. __skb_push(skb, sizeof(struct bcm43xx_txhdr));
  787. /* Now calculate and add the tx header.
  788. * The tx header includes the PLCP header.
  789. */
  790. bcm43xx_generate_txhdr(ring->bcm,
  791. (struct bcm43xx_txhdr *)skb->data,
  792. skb->data + sizeof(struct bcm43xx_txhdr),
  793. skb->len - sizeof(struct bcm43xx_txhdr),
  794. (cur_frag == 0),
  795. generate_cookie(ring, slot));
  796. meta->skb = skb;
  797. dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
  798. meta->dmaaddr = dmaaddr;
  799. fill_descriptor(ring, desc, dmaaddr,
  800. skb->len, 1, 1, 1);
  801. /* Now transfer the whole frame. */
  802. dmacontroller_poke_tx(ring, slot);
  803. }
  804. int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
  805. struct ieee80211_txb *txb)
  806. {
  807. /* We just received a packet from the kernel network subsystem.
  808. * Add headers and DMA map the memory. Poke
  809. * the device to send the stuff.
  810. * Note that this is called from atomic context.
  811. */
  812. struct bcm43xx_dmaring *ring = bcm43xx_current_dma(bcm)->tx_ring1;
  813. u8 i;
  814. struct sk_buff *skb;
  815. assert(ring->tx);
  816. if (unlikely(free_slots(ring) < txb->nr_frags)) {
  817. /* The queue should be stopped,
  818. * if we are low on free slots.
  819. * If this ever triggers, we have to lower the suspend_mark.
  820. */
  821. dprintkl(KERN_ERR PFX "Out of DMA descriptor slots!\n");
  822. return -ENOMEM;
  823. }
  824. for (i = 0; i < txb->nr_frags; i++) {
  825. skb = txb->fragments[i];
  826. /* Take skb from ieee80211_txb_free */
  827. txb->fragments[i] = NULL;
  828. dma_tx_fragment(ring, skb, i);
  829. }
  830. ieee80211_txb_free(txb);
  831. return 0;
  832. }
  833. void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
  834. struct bcm43xx_xmitstatus *status)
  835. {
  836. struct bcm43xx_dmaring *ring;
  837. struct bcm43xx_dmadesc_generic *desc;
  838. struct bcm43xx_dmadesc_meta *meta;
  839. int is_last_fragment;
  840. int slot;
  841. u32 tmp;
  842. ring = parse_cookie(bcm, status->cookie, &slot);
  843. assert(ring);
  844. assert(ring->tx);
  845. while (1) {
  846. assert(slot >= 0 && slot < ring->nr_slots);
  847. desc = bcm43xx_dma_idx2desc(ring, slot, &meta);
  848. if (ring->dma64) {
  849. tmp = le32_to_cpu(desc->dma64.control0);
  850. is_last_fragment = !!(tmp & BCM43xx_DMA64_DCTL0_FRAMEEND);
  851. } else {
  852. tmp = le32_to_cpu(desc->dma32.control);
  853. is_last_fragment = !!(tmp & BCM43xx_DMA32_DCTL_FRAMEEND);
  854. }
  855. unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
  856. free_descriptor_buffer(ring, meta, 1);
  857. /* Everything belonging to the slot is unmapped
  858. * and freed, so we can return it.
  859. */
  860. return_slot(ring, slot);
  861. if (is_last_fragment)
  862. break;
  863. slot = next_slot(ring, slot);
  864. }
  865. bcm->stats.last_tx = jiffies;
  866. }
  867. static void dma_rx(struct bcm43xx_dmaring *ring,
  868. int *slot)
  869. {
  870. struct bcm43xx_dmadesc_generic *desc;
  871. struct bcm43xx_dmadesc_meta *meta;
  872. struct bcm43xx_rxhdr *rxhdr;
  873. struct sk_buff *skb;
  874. u16 len;
  875. int err;
  876. dma_addr_t dmaaddr;
  877. desc = bcm43xx_dma_idx2desc(ring, *slot, &meta);
  878. sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
  879. skb = meta->skb;
  880. if (ring->index == 3) {
  881. /* We received an xmit status. */
  882. struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data;
  883. struct bcm43xx_xmitstatus stat;
  884. int i = 0;
  885. stat.cookie = le16_to_cpu(hw->cookie);
  886. while (stat.cookie == 0) {
  887. if (unlikely(++i >= 10000)) {
  888. assert(0);
  889. break;
  890. }
  891. udelay(2);
  892. barrier();
  893. stat.cookie = le16_to_cpu(hw->cookie);
  894. }
  895. stat.flags = hw->flags;
  896. stat.cnt1 = hw->cnt1;
  897. stat.cnt2 = hw->cnt2;
  898. stat.seq = le16_to_cpu(hw->seq);
  899. stat.unknown = le16_to_cpu(hw->unknown);
  900. bcm43xx_debugfs_log_txstat(ring->bcm, &stat);
  901. bcm43xx_dma_handle_xmitstatus(ring->bcm, &stat);
  902. /* recycle the descriptor buffer. */
  903. sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize);
  904. return;
  905. }
  906. rxhdr = (struct bcm43xx_rxhdr *)skb->data;
  907. len = le16_to_cpu(rxhdr->frame_length);
  908. if (len == 0) {
  909. int i = 0;
  910. do {
  911. udelay(2);
  912. barrier();
  913. len = le16_to_cpu(rxhdr->frame_length);
  914. } while (len == 0 && i++ < 5);
  915. if (unlikely(len == 0)) {
  916. /* recycle the descriptor buffer. */
  917. sync_descbuffer_for_device(ring, meta->dmaaddr,
  918. ring->rx_buffersize);
  919. goto drop;
  920. }
  921. }
  922. if (unlikely(len > ring->rx_buffersize)) {
  923. /* The data did not fit into one descriptor buffer
  924. * and is split over multiple buffers.
  925. * This should never happen, as we try to allocate buffers
  926. * big enough. So simply ignore this packet.
  927. */
  928. int cnt = 0;
  929. s32 tmp = len;
  930. while (1) {
  931. desc = bcm43xx_dma_idx2desc(ring, *slot, &meta);
  932. /* recycle the descriptor buffer. */
  933. sync_descbuffer_for_device(ring, meta->dmaaddr,
  934. ring->rx_buffersize);
  935. *slot = next_slot(ring, *slot);
  936. cnt++;
  937. tmp -= ring->rx_buffersize;
  938. if (tmp <= 0)
  939. break;
  940. }
  941. printkl(KERN_ERR PFX "DMA RX buffer too small "
  942. "(len: %u, buffer: %u, nr-dropped: %d)\n",
  943. len, ring->rx_buffersize, cnt);
  944. goto drop;
  945. }
  946. len -= IEEE80211_FCS_LEN;
  947. dmaaddr = meta->dmaaddr;
  948. err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
  949. if (unlikely(err)) {
  950. dprintkl(KERN_ERR PFX "DMA RX: setup_rx_descbuffer() failed\n");
  951. sync_descbuffer_for_device(ring, dmaaddr,
  952. ring->rx_buffersize);
  953. goto drop;
  954. }
  955. unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
  956. skb_put(skb, len + ring->frameoffset);
  957. skb_pull(skb, ring->frameoffset);
  958. err = bcm43xx_rx(ring->bcm, skb, rxhdr);
  959. if (err) {
  960. dev_kfree_skb_irq(skb);
  961. goto drop;
  962. }
  963. drop:
  964. return;
  965. }
  966. void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
  967. {
  968. u32 status;
  969. u16 descptr;
  970. int slot, current_slot;
  971. #ifdef CONFIG_BCM43XX_DEBUG
  972. int used_slots = 0;
  973. #endif
  974. assert(!ring->tx);
  975. if (ring->dma64) {
  976. status = bcm43xx_dma_read(ring, BCM43xx_DMA64_RXSTATUS);
  977. descptr = (status & BCM43xx_DMA64_RXSTATDPTR);
  978. current_slot = descptr / sizeof(struct bcm43xx_dmadesc64);
  979. } else {
  980. status = bcm43xx_dma_read(ring, BCM43xx_DMA32_RXSTATUS);
  981. descptr = (status & BCM43xx_DMA32_RXDPTR);
  982. current_slot = descptr / sizeof(struct bcm43xx_dmadesc32);
  983. }
  984. assert(current_slot >= 0 && current_slot < ring->nr_slots);
  985. slot = ring->current_slot;
  986. for ( ; slot != current_slot; slot = next_slot(ring, slot)) {
  987. dma_rx(ring, &slot);
  988. #ifdef CONFIG_BCM43XX_DEBUG
  989. if (++used_slots > ring->max_used_slots)
  990. ring->max_used_slots = used_slots;
  991. #endif
  992. }
  993. if (ring->dma64) {
  994. bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX,
  995. (u32)(slot * sizeof(struct bcm43xx_dmadesc64)));
  996. } else {
  997. bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX,
  998. (u32)(slot * sizeof(struct bcm43xx_dmadesc32)));
  999. }
  1000. ring->current_slot = slot;
  1001. }
  1002. void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring)
  1003. {
  1004. assert(ring->tx);
  1005. bcm43xx_power_saving_ctl_bits(ring->bcm, -1, 1);
  1006. if (ring->dma64) {
  1007. bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
  1008. bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
  1009. | BCM43xx_DMA64_TXSUSPEND);
  1010. } else {
  1011. bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
  1012. bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
  1013. | BCM43xx_DMA32_TXSUSPEND);
  1014. }
  1015. }
  1016. void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring)
  1017. {
  1018. assert(ring->tx);
  1019. if (ring->dma64) {
  1020. bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
  1021. bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
  1022. & ~BCM43xx_DMA64_TXSUSPEND);
  1023. } else {
  1024. bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
  1025. bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
  1026. & ~BCM43xx_DMA32_TXSUSPEND);
  1027. }
  1028. bcm43xx_power_saving_ctl_bits(ring->bcm, -1, -1);
  1029. }