bcm43xx_dma.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267
  1. /*
  2. Broadcom BCM43xx wireless driver
  3. DMA ringbuffer and descriptor allocation/management
  4. Copyright (c) 2005, 2006 Michael Buesch <mbuesch@freenet.de>
  5. Some code in this file is derived from the b44.c driver
  6. Copyright (C) 2002 David S. Miller
  7. Copyright (C) Pekka Pietikainen
  8. This program is free software; you can redistribute it and/or modify
  9. it under the terms of the GNU General Public License as published by
  10. the Free Software Foundation; either version 2 of the License, or
  11. (at your option) any later version.
  12. This program is distributed in the hope that it will be useful,
  13. but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. GNU General Public License for more details.
  16. You should have received a copy of the GNU General Public License
  17. along with this program; see the file COPYING. If not, write to
  18. the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
  19. Boston, MA 02110-1301, USA.
  20. */
  21. #include "bcm43xx.h"
  22. #include "bcm43xx_dma.h"
  23. #include "bcm43xx_main.h"
  24. #include "bcm43xx_debugfs.h"
  25. #include "bcm43xx_power.h"
  26. #include "bcm43xx_xmit.h"
  27. #include <linux/dma-mapping.h>
  28. #include <linux/pci.h>
  29. #include <linux/delay.h>
  30. #include <linux/skbuff.h>
  31. static inline int free_slots(struct bcm43xx_dmaring *ring)
  32. {
  33. return (ring->nr_slots - ring->used_slots);
  34. }
  35. static inline int next_slot(struct bcm43xx_dmaring *ring, int slot)
  36. {
  37. assert(slot >= -1 && slot <= ring->nr_slots - 1);
  38. if (slot == ring->nr_slots - 1)
  39. return 0;
  40. return slot + 1;
  41. }
  42. static inline int prev_slot(struct bcm43xx_dmaring *ring, int slot)
  43. {
  44. assert(slot >= 0 && slot <= ring->nr_slots - 1);
  45. if (slot == 0)
  46. return ring->nr_slots - 1;
  47. return slot - 1;
  48. }
  49. /* Request a slot for usage. */
  50. static inline
  51. int request_slot(struct bcm43xx_dmaring *ring)
  52. {
  53. int slot;
  54. assert(ring->tx);
  55. assert(!ring->suspended);
  56. assert(free_slots(ring) != 0);
  57. slot = next_slot(ring, ring->current_slot);
  58. ring->current_slot = slot;
  59. ring->used_slots++;
  60. /* Check the number of available slots and suspend TX,
  61. * if we are running low on free slots.
  62. */
  63. if (unlikely(free_slots(ring) < ring->suspend_mark)) {
  64. netif_stop_queue(ring->bcm->net_dev);
  65. ring->suspended = 1;
  66. }
  67. #ifdef CONFIG_BCM43XX_DEBUG
  68. if (ring->used_slots > ring->max_used_slots)
  69. ring->max_used_slots = ring->used_slots;
  70. #endif /* CONFIG_BCM43XX_DEBUG*/
  71. return slot;
  72. }
  73. /* Return a slot to the free slots. */
  74. static inline
  75. void return_slot(struct bcm43xx_dmaring *ring, int slot)
  76. {
  77. assert(ring->tx);
  78. ring->used_slots--;
  79. /* Check if TX is suspended and check if we have
  80. * enough free slots to resume it again.
  81. */
  82. if (unlikely(ring->suspended)) {
  83. if (free_slots(ring) >= ring->resume_mark) {
  84. ring->suspended = 0;
  85. netif_wake_queue(ring->bcm->net_dev);
  86. }
  87. }
  88. }
  89. u16 bcm43xx_dmacontroller_base(int dma64bit, int controller_idx)
  90. {
  91. static const u16 map64[] = {
  92. BCM43xx_MMIO_DMA64_BASE0,
  93. BCM43xx_MMIO_DMA64_BASE1,
  94. BCM43xx_MMIO_DMA64_BASE2,
  95. BCM43xx_MMIO_DMA64_BASE3,
  96. BCM43xx_MMIO_DMA64_BASE4,
  97. BCM43xx_MMIO_DMA64_BASE5,
  98. };
  99. static const u16 map32[] = {
  100. BCM43xx_MMIO_DMA32_BASE0,
  101. BCM43xx_MMIO_DMA32_BASE1,
  102. BCM43xx_MMIO_DMA32_BASE2,
  103. BCM43xx_MMIO_DMA32_BASE3,
  104. BCM43xx_MMIO_DMA32_BASE4,
  105. BCM43xx_MMIO_DMA32_BASE5,
  106. };
  107. if (dma64bit) {
  108. assert(controller_idx >= 0 &&
  109. controller_idx < ARRAY_SIZE(map64));
  110. return map64[controller_idx];
  111. }
  112. assert(controller_idx >= 0 &&
  113. controller_idx < ARRAY_SIZE(map32));
  114. return map32[controller_idx];
  115. }
  116. static inline
  117. dma_addr_t map_descbuffer(struct bcm43xx_dmaring *ring,
  118. unsigned char *buf,
  119. size_t len,
  120. int tx)
  121. {
  122. dma_addr_t dmaaddr;
  123. int direction = PCI_DMA_FROMDEVICE;
  124. if (tx)
  125. direction = PCI_DMA_TODEVICE;
  126. dmaaddr = pci_map_single(ring->bcm->pci_dev,
  127. buf, len,
  128. direction);
  129. return dmaaddr;
  130. }
  131. static inline
  132. void unmap_descbuffer(struct bcm43xx_dmaring *ring,
  133. dma_addr_t addr,
  134. size_t len,
  135. int tx)
  136. {
  137. if (tx) {
  138. pci_unmap_single(ring->bcm->pci_dev,
  139. addr, len,
  140. PCI_DMA_TODEVICE);
  141. } else {
  142. pci_unmap_single(ring->bcm->pci_dev,
  143. addr, len,
  144. PCI_DMA_FROMDEVICE);
  145. }
  146. }
  147. static inline
  148. void sync_descbuffer_for_cpu(struct bcm43xx_dmaring *ring,
  149. dma_addr_t addr,
  150. size_t len)
  151. {
  152. assert(!ring->tx);
  153. pci_dma_sync_single_for_cpu(ring->bcm->pci_dev,
  154. addr, len, PCI_DMA_FROMDEVICE);
  155. }
  156. static inline
  157. void sync_descbuffer_for_device(struct bcm43xx_dmaring *ring,
  158. dma_addr_t addr,
  159. size_t len)
  160. {
  161. assert(!ring->tx);
  162. pci_dma_sync_single_for_cpu(ring->bcm->pci_dev,
  163. addr, len, PCI_DMA_TODEVICE);
  164. }
  165. /* Unmap and free a descriptor buffer. */
  166. static inline
  167. void free_descriptor_buffer(struct bcm43xx_dmaring *ring,
  168. struct bcm43xx_dmadesc_meta *meta,
  169. int irq_context)
  170. {
  171. assert(meta->skb);
  172. if (irq_context)
  173. dev_kfree_skb_irq(meta->skb);
  174. else
  175. dev_kfree_skb(meta->skb);
  176. meta->skb = NULL;
  177. }
  178. static int alloc_ringmemory(struct bcm43xx_dmaring *ring)
  179. {
  180. ring->descbase = pci_alloc_consistent(ring->bcm->pci_dev, BCM43xx_DMA_RINGMEMSIZE,
  181. &(ring->dmabase));
  182. if (!ring->descbase) {
  183. /* Allocation may have failed due to pci_alloc_consistent
  184. insisting on use of GFP_DMA, which is more restrictive
  185. than necessary... */
  186. struct dma_desc *rx_ring;
  187. dma_addr_t rx_ring_dma;
  188. rx_ring = kzalloc(BCM43xx_DMA_RINGMEMSIZE, GFP_KERNEL);
  189. if (!rx_ring)
  190. goto out_err;
  191. rx_ring_dma = pci_map_single(ring->bcm->pci_dev, rx_ring,
  192. BCM43xx_DMA_RINGMEMSIZE,
  193. PCI_DMA_BIDIRECTIONAL);
  194. if (pci_dma_mapping_error(rx_ring_dma) ||
  195. rx_ring_dma + BCM43xx_DMA_RINGMEMSIZE > ring->bcm->dma_mask) {
  196. /* Sigh... */
  197. if (!pci_dma_mapping_error(rx_ring_dma))
  198. pci_unmap_single(ring->bcm->pci_dev,
  199. rx_ring_dma, BCM43xx_DMA_RINGMEMSIZE,
  200. PCI_DMA_BIDIRECTIONAL);
  201. rx_ring_dma = pci_map_single(ring->bcm->pci_dev,
  202. rx_ring, BCM43xx_DMA_RINGMEMSIZE,
  203. PCI_DMA_BIDIRECTIONAL);
  204. if (pci_dma_mapping_error(rx_ring_dma) ||
  205. rx_ring_dma + BCM43xx_DMA_RINGMEMSIZE > ring->bcm->dma_mask) {
  206. assert(0);
  207. if (!pci_dma_mapping_error(rx_ring_dma))
  208. pci_unmap_single(ring->bcm->pci_dev,
  209. rx_ring_dma, BCM43xx_DMA_RINGMEMSIZE,
  210. PCI_DMA_BIDIRECTIONAL);
  211. goto out_err;
  212. }
  213. }
  214. ring->descbase = rx_ring;
  215. ring->dmabase = rx_ring_dma;
  216. }
  217. memset(ring->descbase, 0, BCM43xx_DMA_RINGMEMSIZE);
  218. return 0;
  219. out_err:
  220. printk(KERN_ERR PFX "DMA ringmemory allocation failed\n");
  221. return -ENOMEM;
  222. }
  223. static void free_ringmemory(struct bcm43xx_dmaring *ring)
  224. {
  225. struct device *dev = &(ring->bcm->pci_dev->dev);
  226. dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
  227. ring->descbase, ring->dmabase);
  228. }
  229. /* Reset the RX DMA channel */
  230. int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
  231. u16 mmio_base, int dma64)
  232. {
  233. int i;
  234. u32 value;
  235. u16 offset;
  236. offset = dma64 ? BCM43xx_DMA64_RXCTL : BCM43xx_DMA32_RXCTL;
  237. bcm43xx_write32(bcm, mmio_base + offset, 0);
  238. for (i = 0; i < 1000; i++) {
  239. offset = dma64 ? BCM43xx_DMA64_RXSTATUS : BCM43xx_DMA32_RXSTATUS;
  240. value = bcm43xx_read32(bcm, mmio_base + offset);
  241. if (dma64) {
  242. value &= BCM43xx_DMA64_RXSTAT;
  243. if (value == BCM43xx_DMA64_RXSTAT_DISABLED) {
  244. i = -1;
  245. break;
  246. }
  247. } else {
  248. value &= BCM43xx_DMA32_RXSTATE;
  249. if (value == BCM43xx_DMA32_RXSTAT_DISABLED) {
  250. i = -1;
  251. break;
  252. }
  253. }
  254. udelay(10);
  255. }
  256. if (i != -1) {
  257. printk(KERN_ERR PFX "Error: Wait on DMA RX status timed out.\n");
  258. return -ENODEV;
  259. }
  260. return 0;
  261. }
  262. /* Reset the RX DMA channel */
  263. int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
  264. u16 mmio_base, int dma64)
  265. {
  266. int i;
  267. u32 value;
  268. u16 offset;
  269. for (i = 0; i < 1000; i++) {
  270. offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
  271. value = bcm43xx_read32(bcm, mmio_base + offset);
  272. if (dma64) {
  273. value &= BCM43xx_DMA64_TXSTAT;
  274. if (value == BCM43xx_DMA64_TXSTAT_DISABLED ||
  275. value == BCM43xx_DMA64_TXSTAT_IDLEWAIT ||
  276. value == BCM43xx_DMA64_TXSTAT_STOPPED)
  277. break;
  278. } else {
  279. value &= BCM43xx_DMA32_TXSTATE;
  280. if (value == BCM43xx_DMA32_TXSTAT_DISABLED ||
  281. value == BCM43xx_DMA32_TXSTAT_IDLEWAIT ||
  282. value == BCM43xx_DMA32_TXSTAT_STOPPED)
  283. break;
  284. }
  285. udelay(10);
  286. }
  287. offset = dma64 ? BCM43xx_DMA64_TXCTL : BCM43xx_DMA32_TXCTL;
  288. bcm43xx_write32(bcm, mmio_base + offset, 0);
  289. for (i = 0; i < 1000; i++) {
  290. offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
  291. value = bcm43xx_read32(bcm, mmio_base + offset);
  292. if (dma64) {
  293. value &= BCM43xx_DMA64_TXSTAT;
  294. if (value == BCM43xx_DMA64_TXSTAT_DISABLED) {
  295. i = -1;
  296. break;
  297. }
  298. } else {
  299. value &= BCM43xx_DMA32_TXSTATE;
  300. if (value == BCM43xx_DMA32_TXSTAT_DISABLED) {
  301. i = -1;
  302. break;
  303. }
  304. }
  305. udelay(10);
  306. }
  307. if (i != -1) {
  308. printk(KERN_ERR PFX "Error: Wait on DMA TX status timed out.\n");
  309. return -ENODEV;
  310. }
  311. /* ensure the reset is completed. */
  312. udelay(300);
  313. return 0;
  314. }
  315. static void fill_descriptor(struct bcm43xx_dmaring *ring,
  316. struct bcm43xx_dmadesc_generic *desc,
  317. dma_addr_t dmaaddr,
  318. u16 bufsize,
  319. int start, int end, int irq)
  320. {
  321. int slot;
  322. slot = bcm43xx_dma_desc2idx(ring, desc);
  323. assert(slot >= 0 && slot < ring->nr_slots);
  324. if (ring->dma64) {
  325. u32 ctl0 = 0, ctl1 = 0;
  326. u32 addrlo, addrhi;
  327. u32 addrext;
  328. addrlo = (u32)(dmaaddr & 0xFFFFFFFF);
  329. addrhi = (((u64)dmaaddr >> 32) & ~BCM43xx_DMA64_ROUTING);
  330. addrext = (((u64)dmaaddr >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
  331. addrhi |= ring->routing;
  332. if (slot == ring->nr_slots - 1)
  333. ctl0 |= BCM43xx_DMA64_DCTL0_DTABLEEND;
  334. if (start)
  335. ctl0 |= BCM43xx_DMA64_DCTL0_FRAMESTART;
  336. if (end)
  337. ctl0 |= BCM43xx_DMA64_DCTL0_FRAMEEND;
  338. if (irq)
  339. ctl0 |= BCM43xx_DMA64_DCTL0_IRQ;
  340. ctl1 |= (bufsize - ring->frameoffset)
  341. & BCM43xx_DMA64_DCTL1_BYTECNT;
  342. ctl1 |= (addrext << BCM43xx_DMA64_DCTL1_ADDREXT_SHIFT)
  343. & BCM43xx_DMA64_DCTL1_ADDREXT_MASK;
  344. desc->dma64.control0 = cpu_to_le32(ctl0);
  345. desc->dma64.control1 = cpu_to_le32(ctl1);
  346. desc->dma64.address_low = cpu_to_le32(addrlo);
  347. desc->dma64.address_high = cpu_to_le32(addrhi);
  348. } else {
  349. u32 ctl;
  350. u32 addr;
  351. u32 addrext;
  352. addr = (u32)(dmaaddr & ~BCM43xx_DMA32_ROUTING);
  353. addrext = (u32)(dmaaddr & BCM43xx_DMA32_ROUTING)
  354. >> BCM43xx_DMA32_ROUTING_SHIFT;
  355. addr |= ring->routing;
  356. ctl = (bufsize - ring->frameoffset)
  357. & BCM43xx_DMA32_DCTL_BYTECNT;
  358. if (slot == ring->nr_slots - 1)
  359. ctl |= BCM43xx_DMA32_DCTL_DTABLEEND;
  360. if (start)
  361. ctl |= BCM43xx_DMA32_DCTL_FRAMESTART;
  362. if (end)
  363. ctl |= BCM43xx_DMA32_DCTL_FRAMEEND;
  364. if (irq)
  365. ctl |= BCM43xx_DMA32_DCTL_IRQ;
  366. ctl |= (addrext << BCM43xx_DMA32_DCTL_ADDREXT_SHIFT)
  367. & BCM43xx_DMA32_DCTL_ADDREXT_MASK;
  368. desc->dma32.control = cpu_to_le32(ctl);
  369. desc->dma32.address = cpu_to_le32(addr);
  370. }
  371. }
  372. static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
  373. struct bcm43xx_dmadesc_generic *desc,
  374. struct bcm43xx_dmadesc_meta *meta,
  375. gfp_t gfp_flags)
  376. {
  377. struct bcm43xx_rxhdr *rxhdr;
  378. struct bcm43xx_hwxmitstatus *xmitstat;
  379. dma_addr_t dmaaddr;
  380. struct sk_buff *skb;
  381. assert(!ring->tx);
  382. skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
  383. if (unlikely(!skb))
  384. return -ENOMEM;
  385. dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
  386. /* This hardware bug work-around adapted from the b44 driver.
  387. The chip may be unable to do PCI DMA to/from anything above 1GB */
  388. if (pci_dma_mapping_error(dmaaddr) ||
  389. dmaaddr + ring->rx_buffersize > ring->bcm->dma_mask) {
  390. /* This one has 30-bit addressing... */
  391. if (!pci_dma_mapping_error(dmaaddr))
  392. pci_unmap_single(ring->bcm->pci_dev,
  393. dmaaddr, ring->rx_buffersize,
  394. PCI_DMA_FROMDEVICE);
  395. dev_kfree_skb_any(skb);
  396. skb = __dev_alloc_skb(ring->rx_buffersize,GFP_DMA);
  397. if (skb == NULL)
  398. return -ENOMEM;
  399. dmaaddr = pci_map_single(ring->bcm->pci_dev,
  400. skb->data, ring->rx_buffersize,
  401. PCI_DMA_FROMDEVICE);
  402. if (pci_dma_mapping_error(dmaaddr) ||
  403. dmaaddr + ring->rx_buffersize > ring->bcm->dma_mask) {
  404. assert(0);
  405. dev_kfree_skb_any(skb);
  406. return -ENOMEM;
  407. }
  408. }
  409. meta->skb = skb;
  410. meta->dmaaddr = dmaaddr;
  411. skb->dev = ring->bcm->net_dev;
  412. fill_descriptor(ring, desc, dmaaddr,
  413. ring->rx_buffersize, 0, 0, 0);
  414. rxhdr = (struct bcm43xx_rxhdr *)(skb->data);
  415. rxhdr->frame_length = 0;
  416. rxhdr->flags1 = 0;
  417. xmitstat = (struct bcm43xx_hwxmitstatus *)(skb->data);
  418. xmitstat->cookie = 0;
  419. return 0;
  420. }
  421. /* Allocate the initial descbuffers.
  422. * This is used for an RX ring only.
  423. */
  424. static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring)
  425. {
  426. int i, err = -ENOMEM;
  427. struct bcm43xx_dmadesc_generic *desc;
  428. struct bcm43xx_dmadesc_meta *meta;
  429. for (i = 0; i < ring->nr_slots; i++) {
  430. desc = bcm43xx_dma_idx2desc(ring, i, &meta);
  431. err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
  432. if (err)
  433. goto err_unwind;
  434. }
  435. mb();
  436. ring->used_slots = ring->nr_slots;
  437. err = 0;
  438. out:
  439. return err;
  440. err_unwind:
  441. for (i--; i >= 0; i--) {
  442. desc = bcm43xx_dma_idx2desc(ring, i, &meta);
  443. unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
  444. dev_kfree_skb(meta->skb);
  445. }
  446. goto out;
  447. }
  448. /* Do initial setup of the DMA controller.
  449. * Reset the controller, write the ring busaddress
  450. * and switch the "enable" bit on.
  451. */
  452. static int dmacontroller_setup(struct bcm43xx_dmaring *ring)
  453. {
  454. int err = 0;
  455. u32 value;
  456. u32 addrext;
  457. if (ring->tx) {
  458. if (ring->dma64) {
  459. u64 ringbase = (u64)(ring->dmabase);
  460. addrext = ((ringbase >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
  461. value = BCM43xx_DMA64_TXENABLE;
  462. value |= (addrext << BCM43xx_DMA64_TXADDREXT_SHIFT)
  463. & BCM43xx_DMA64_TXADDREXT_MASK;
  464. bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL, value);
  465. bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO,
  466. (ringbase & 0xFFFFFFFF));
  467. bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI,
  468. ((ringbase >> 32) & ~BCM43xx_DMA64_ROUTING)
  469. | ring->routing);
  470. } else {
  471. u32 ringbase = (u32)(ring->dmabase);
  472. addrext = (ringbase >> BCM43xx_DMA32_ROUTING_SHIFT);
  473. value = BCM43xx_DMA32_TXENABLE;
  474. value |= (addrext << BCM43xx_DMA32_TXADDREXT_SHIFT)
  475. & BCM43xx_DMA32_TXADDREXT_MASK;
  476. bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL, value);
  477. bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING,
  478. (ringbase & ~BCM43xx_DMA32_ROUTING)
  479. | ring->routing);
  480. }
  481. } else {
  482. err = alloc_initial_descbuffers(ring);
  483. if (err)
  484. goto out;
  485. if (ring->dma64) {
  486. u64 ringbase = (u64)(ring->dmabase);
  487. addrext = ((ringbase >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
  488. value = (ring->frameoffset << BCM43xx_DMA64_RXFROFF_SHIFT);
  489. value |= BCM43xx_DMA64_RXENABLE;
  490. value |= (addrext << BCM43xx_DMA64_RXADDREXT_SHIFT)
  491. & BCM43xx_DMA64_RXADDREXT_MASK;
  492. bcm43xx_dma_write(ring, BCM43xx_DMA64_RXCTL, value);
  493. bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO,
  494. (ringbase & 0xFFFFFFFF));
  495. bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI,
  496. ((ringbase >> 32) & ~BCM43xx_DMA64_ROUTING)
  497. | ring->routing);
  498. bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX, 200);
  499. } else {
  500. u32 ringbase = (u32)(ring->dmabase);
  501. addrext = (ringbase >> BCM43xx_DMA32_ROUTING_SHIFT);
  502. value = (ring->frameoffset << BCM43xx_DMA32_RXFROFF_SHIFT);
  503. value |= BCM43xx_DMA32_RXENABLE;
  504. value |= (addrext << BCM43xx_DMA32_RXADDREXT_SHIFT)
  505. & BCM43xx_DMA32_RXADDREXT_MASK;
  506. bcm43xx_dma_write(ring, BCM43xx_DMA32_RXCTL, value);
  507. bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING,
  508. (ringbase & ~BCM43xx_DMA32_ROUTING)
  509. | ring->routing);
  510. bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX, 200);
  511. }
  512. }
  513. out:
  514. return err;
  515. }
  516. /* Shutdown the DMA controller. */
  517. static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring)
  518. {
  519. if (ring->tx) {
  520. bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base, ring->dma64);
  521. if (ring->dma64) {
  522. bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO, 0);
  523. bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI, 0);
  524. } else
  525. bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING, 0);
  526. } else {
  527. bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base, ring->dma64);
  528. if (ring->dma64) {
  529. bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO, 0);
  530. bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI, 0);
  531. } else
  532. bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING, 0);
  533. }
  534. }
  535. static void free_all_descbuffers(struct bcm43xx_dmaring *ring)
  536. {
  537. struct bcm43xx_dmadesc_generic *desc;
  538. struct bcm43xx_dmadesc_meta *meta;
  539. int i;
  540. if (!ring->used_slots)
  541. return;
  542. for (i = 0; i < ring->nr_slots; i++) {
  543. desc = bcm43xx_dma_idx2desc(ring, i, &meta);
  544. if (!meta->skb) {
  545. assert(ring->tx);
  546. continue;
  547. }
  548. if (ring->tx) {
  549. unmap_descbuffer(ring, meta->dmaaddr,
  550. meta->skb->len, 1);
  551. } else {
  552. unmap_descbuffer(ring, meta->dmaaddr,
  553. ring->rx_buffersize, 0);
  554. }
  555. free_descriptor_buffer(ring, meta, 0);
  556. }
  557. }
  558. /* Main initialization function. */
  559. static
  560. struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm,
  561. int controller_index,
  562. int for_tx,
  563. int dma64)
  564. {
  565. struct bcm43xx_dmaring *ring;
  566. int err;
  567. int nr_slots;
  568. ring = kzalloc(sizeof(*ring), GFP_KERNEL);
  569. if (!ring)
  570. goto out;
  571. nr_slots = BCM43xx_RXRING_SLOTS;
  572. if (for_tx)
  573. nr_slots = BCM43xx_TXRING_SLOTS;
  574. ring->meta = kcalloc(nr_slots, sizeof(struct bcm43xx_dmadesc_meta),
  575. GFP_KERNEL);
  576. if (!ring->meta)
  577. goto err_kfree_ring;
  578. ring->routing = BCM43xx_DMA32_CLIENTTRANS;
  579. if (dma64)
  580. ring->routing = BCM43xx_DMA64_CLIENTTRANS;
  581. #ifdef CONFIG_BCM947XX
  582. if (bcm->pci_dev->bus->number == 0)
  583. ring->routing = dma64 ? BCM43xx_DMA64_NOTRANS : BCM43xx_DMA32_NOTRANS;
  584. #endif
  585. ring->bcm = bcm;
  586. ring->nr_slots = nr_slots;
  587. ring->suspend_mark = ring->nr_slots * BCM43xx_TXSUSPEND_PERCENT / 100;
  588. ring->resume_mark = ring->nr_slots * BCM43xx_TXRESUME_PERCENT / 100;
  589. assert(ring->suspend_mark < ring->resume_mark);
  590. ring->mmio_base = bcm43xx_dmacontroller_base(dma64, controller_index);
  591. ring->index = controller_index;
  592. ring->dma64 = !!dma64;
  593. if (for_tx) {
  594. ring->tx = 1;
  595. ring->current_slot = -1;
  596. } else {
  597. if (ring->index == 0) {
  598. ring->rx_buffersize = BCM43xx_DMA0_RX_BUFFERSIZE;
  599. ring->frameoffset = BCM43xx_DMA0_RX_FRAMEOFFSET;
  600. } else if (ring->index == 3) {
  601. ring->rx_buffersize = BCM43xx_DMA3_RX_BUFFERSIZE;
  602. ring->frameoffset = BCM43xx_DMA3_RX_FRAMEOFFSET;
  603. } else
  604. assert(0);
  605. }
  606. err = alloc_ringmemory(ring);
  607. if (err)
  608. goto err_kfree_meta;
  609. err = dmacontroller_setup(ring);
  610. if (err)
  611. goto err_free_ringmemory;
  612. return ring;
  613. out:
  614. printk(KERN_ERR PFX "Error in bcm43xx_setup_dmaring\n");
  615. return ring;
  616. err_free_ringmemory:
  617. free_ringmemory(ring);
  618. err_kfree_meta:
  619. kfree(ring->meta);
  620. err_kfree_ring:
  621. kfree(ring);
  622. ring = NULL;
  623. goto out;
  624. }
  625. /* Main cleanup function. */
  626. static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring *ring)
  627. {
  628. if (!ring)
  629. return;
  630. dprintk(KERN_INFO PFX "DMA-%s 0x%04X (%s) max used slots: %d/%d\n",
  631. (ring->dma64) ? "64" : "32",
  632. ring->mmio_base,
  633. (ring->tx) ? "TX" : "RX",
  634. ring->max_used_slots, ring->nr_slots);
  635. /* Device IRQs are disabled prior entering this function,
  636. * so no need to take care of concurrency with rx handler stuff.
  637. */
  638. dmacontroller_cleanup(ring);
  639. free_all_descbuffers(ring);
  640. free_ringmemory(ring);
  641. kfree(ring->meta);
  642. kfree(ring);
  643. }
  644. void bcm43xx_dma_free(struct bcm43xx_private *bcm)
  645. {
  646. struct bcm43xx_dma *dma;
  647. if (bcm43xx_using_pio(bcm))
  648. return;
  649. dma = bcm43xx_current_dma(bcm);
  650. bcm43xx_destroy_dmaring(dma->rx_ring3);
  651. dma->rx_ring3 = NULL;
  652. bcm43xx_destroy_dmaring(dma->rx_ring0);
  653. dma->rx_ring0 = NULL;
  654. bcm43xx_destroy_dmaring(dma->tx_ring5);
  655. dma->tx_ring5 = NULL;
  656. bcm43xx_destroy_dmaring(dma->tx_ring4);
  657. dma->tx_ring4 = NULL;
  658. bcm43xx_destroy_dmaring(dma->tx_ring3);
  659. dma->tx_ring3 = NULL;
  660. bcm43xx_destroy_dmaring(dma->tx_ring2);
  661. dma->tx_ring2 = NULL;
  662. bcm43xx_destroy_dmaring(dma->tx_ring1);
  663. dma->tx_ring1 = NULL;
  664. bcm43xx_destroy_dmaring(dma->tx_ring0);
  665. dma->tx_ring0 = NULL;
  666. }
  667. int bcm43xx_dma_init(struct bcm43xx_private *bcm)
  668. {
  669. struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm);
  670. struct bcm43xx_dmaring *ring;
  671. int err = -ENOMEM;
  672. int dma64 = 0;
  673. bcm->dma_mask = bcm43xx_get_supported_dma_mask(bcm);
  674. if (bcm->dma_mask == DMA_64BIT_MASK)
  675. dma64 = 1;
  676. err = pci_set_dma_mask(bcm->pci_dev, bcm->dma_mask);
  677. if (err)
  678. goto no_dma;
  679. err = pci_set_consistent_dma_mask(bcm->pci_dev, bcm->dma_mask);
  680. if (err)
  681. goto no_dma;
  682. /* setup TX DMA channels. */
  683. ring = bcm43xx_setup_dmaring(bcm, 0, 1, dma64);
  684. if (!ring)
  685. goto out;
  686. dma->tx_ring0 = ring;
  687. ring = bcm43xx_setup_dmaring(bcm, 1, 1, dma64);
  688. if (!ring)
  689. goto err_destroy_tx0;
  690. dma->tx_ring1 = ring;
  691. ring = bcm43xx_setup_dmaring(bcm, 2, 1, dma64);
  692. if (!ring)
  693. goto err_destroy_tx1;
  694. dma->tx_ring2 = ring;
  695. ring = bcm43xx_setup_dmaring(bcm, 3, 1, dma64);
  696. if (!ring)
  697. goto err_destroy_tx2;
  698. dma->tx_ring3 = ring;
  699. ring = bcm43xx_setup_dmaring(bcm, 4, 1, dma64);
  700. if (!ring)
  701. goto err_destroy_tx3;
  702. dma->tx_ring4 = ring;
  703. ring = bcm43xx_setup_dmaring(bcm, 5, 1, dma64);
  704. if (!ring)
  705. goto err_destroy_tx4;
  706. dma->tx_ring5 = ring;
  707. /* setup RX DMA channels. */
  708. ring = bcm43xx_setup_dmaring(bcm, 0, 0, dma64);
  709. if (!ring)
  710. goto err_destroy_tx5;
  711. dma->rx_ring0 = ring;
  712. if (bcm->current_core->rev < 5) {
  713. ring = bcm43xx_setup_dmaring(bcm, 3, 0, dma64);
  714. if (!ring)
  715. goto err_destroy_rx0;
  716. dma->rx_ring3 = ring;
  717. }
  718. dprintk(KERN_INFO PFX "%d-bit DMA initialized\n",
  719. (bcm->dma_mask == DMA_64BIT_MASK) ? 64 :
  720. (bcm->dma_mask == DMA_32BIT_MASK) ? 32 : 30);
  721. err = 0;
  722. out:
  723. return err;
  724. err_destroy_rx0:
  725. bcm43xx_destroy_dmaring(dma->rx_ring0);
  726. dma->rx_ring0 = NULL;
  727. err_destroy_tx5:
  728. bcm43xx_destroy_dmaring(dma->tx_ring5);
  729. dma->tx_ring5 = NULL;
  730. err_destroy_tx4:
  731. bcm43xx_destroy_dmaring(dma->tx_ring4);
  732. dma->tx_ring4 = NULL;
  733. err_destroy_tx3:
  734. bcm43xx_destroy_dmaring(dma->tx_ring3);
  735. dma->tx_ring3 = NULL;
  736. err_destroy_tx2:
  737. bcm43xx_destroy_dmaring(dma->tx_ring2);
  738. dma->tx_ring2 = NULL;
  739. err_destroy_tx1:
  740. bcm43xx_destroy_dmaring(dma->tx_ring1);
  741. dma->tx_ring1 = NULL;
  742. err_destroy_tx0:
  743. bcm43xx_destroy_dmaring(dma->tx_ring0);
  744. dma->tx_ring0 = NULL;
  745. no_dma:
  746. #ifdef CONFIG_BCM43XX_PIO
  747. printk(KERN_WARNING PFX "DMA not supported on this device."
  748. " Falling back to PIO.\n");
  749. bcm->__using_pio = 1;
  750. return -ENOSYS;
  751. #else
  752. printk(KERN_ERR PFX "FATAL: DMA not supported and PIO not configured. "
  753. "Please recompile the driver with PIO support.\n");
  754. return -ENODEV;
  755. #endif /* CONFIG_BCM43XX_PIO */
  756. }
  757. /* Generate a cookie for the TX header. */
  758. static u16 generate_cookie(struct bcm43xx_dmaring *ring,
  759. int slot)
  760. {
  761. u16 cookie = 0x1000;
  762. /* Use the upper 4 bits of the cookie as
  763. * DMA controller ID and store the slot number
  764. * in the lower 12 bits.
  765. * Note that the cookie must never be 0, as this
  766. * is a special value used in RX path.
  767. */
  768. switch (ring->index) {
  769. case 0:
  770. cookie = 0xA000;
  771. break;
  772. case 1:
  773. cookie = 0xB000;
  774. break;
  775. case 2:
  776. cookie = 0xC000;
  777. break;
  778. case 3:
  779. cookie = 0xD000;
  780. break;
  781. case 4:
  782. cookie = 0xE000;
  783. break;
  784. case 5:
  785. cookie = 0xF000;
  786. break;
  787. }
  788. assert(((u16)slot & 0xF000) == 0x0000);
  789. cookie |= (u16)slot;
  790. return cookie;
  791. }
  792. /* Inspect a cookie and find out to which controller/slot it belongs. */
  793. static
  794. struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
  795. u16 cookie, int *slot)
  796. {
  797. struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm);
  798. struct bcm43xx_dmaring *ring = NULL;
  799. switch (cookie & 0xF000) {
  800. case 0xA000:
  801. ring = dma->tx_ring0;
  802. break;
  803. case 0xB000:
  804. ring = dma->tx_ring1;
  805. break;
  806. case 0xC000:
  807. ring = dma->tx_ring2;
  808. break;
  809. case 0xD000:
  810. ring = dma->tx_ring3;
  811. break;
  812. case 0xE000:
  813. ring = dma->tx_ring4;
  814. break;
  815. case 0xF000:
  816. ring = dma->tx_ring5;
  817. break;
  818. default:
  819. assert(0);
  820. }
  821. *slot = (cookie & 0x0FFF);
  822. assert(*slot >= 0 && *slot < ring->nr_slots);
  823. return ring;
  824. }
  825. static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
  826. int slot)
  827. {
  828. u16 offset;
  829. int descsize;
  830. /* Everything is ready to start. Buffers are DMA mapped and
  831. * associated with slots.
  832. * "slot" is the last slot of the new frame we want to transmit.
  833. * Close your seat belts now, please.
  834. */
  835. wmb();
  836. slot = next_slot(ring, slot);
  837. offset = (ring->dma64) ? BCM43xx_DMA64_TXINDEX : BCM43xx_DMA32_TXINDEX;
  838. descsize = (ring->dma64) ? sizeof(struct bcm43xx_dmadesc64)
  839. : sizeof(struct bcm43xx_dmadesc32);
  840. bcm43xx_dma_write(ring, offset,
  841. (u32)(slot * descsize));
  842. }
  843. static void dma_tx_fragment(struct bcm43xx_dmaring *ring,
  844. struct sk_buff *skb,
  845. u8 cur_frag)
  846. {
  847. int slot;
  848. struct bcm43xx_dmadesc_generic *desc;
  849. struct bcm43xx_dmadesc_meta *meta;
  850. dma_addr_t dmaaddr;
  851. struct sk_buff *bounce_skb;
  852. assert(skb_shinfo(skb)->nr_frags == 0);
  853. slot = request_slot(ring);
  854. desc = bcm43xx_dma_idx2desc(ring, slot, &meta);
  855. /* Add a device specific TX header. */
  856. assert(skb_headroom(skb) >= sizeof(struct bcm43xx_txhdr));
  857. /* Reserve enough headroom for the device tx header. */
  858. __skb_push(skb, sizeof(struct bcm43xx_txhdr));
  859. /* Now calculate and add the tx header.
  860. * The tx header includes the PLCP header.
  861. */
  862. bcm43xx_generate_txhdr(ring->bcm,
  863. (struct bcm43xx_txhdr *)skb->data,
  864. skb->data + sizeof(struct bcm43xx_txhdr),
  865. skb->len - sizeof(struct bcm43xx_txhdr),
  866. (cur_frag == 0),
  867. generate_cookie(ring, slot));
  868. dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
  869. if (dma_mapping_error(dmaaddr) || dmaaddr + skb->len > ring->bcm->dma_mask) {
  870. /* chip cannot handle DMA to/from > 1GB, use bounce buffer (copied from b44 driver) */
  871. if (!dma_mapping_error(dmaaddr))
  872. unmap_descbuffer(ring, dmaaddr, skb->len, 1);
  873. bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC|GFP_DMA);
  874. if (!bounce_skb)
  875. return;
  876. dmaaddr = map_descbuffer(ring, bounce_skb->data, bounce_skb->len, 1);
  877. if (dma_mapping_error(dmaaddr) || dmaaddr + skb->len > ring->bcm->dma_mask) {
  878. if (!dma_mapping_error(dmaaddr))
  879. unmap_descbuffer(ring, dmaaddr, skb->len, 1);
  880. dev_kfree_skb_any(bounce_skb);
  881. assert(0);
  882. return;
  883. }
  884. skb_copy_from_linear_data(skb, skb_put(bounce_skb, skb->len),
  885. skb->len);
  886. dev_kfree_skb_any(skb);
  887. skb = bounce_skb;
  888. }
  889. meta->skb = skb;
  890. meta->dmaaddr = dmaaddr;
  891. fill_descriptor(ring, desc, dmaaddr,
  892. skb->len, 1, 1, 1);
  893. /* Now transfer the whole frame. */
  894. dmacontroller_poke_tx(ring, slot);
  895. }
  896. int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
  897. struct ieee80211_txb *txb)
  898. {
  899. /* We just received a packet from the kernel network subsystem.
  900. * Add headers and DMA map the memory. Poke
  901. * the device to send the stuff.
  902. * Note that this is called from atomic context.
  903. */
  904. struct bcm43xx_dmaring *ring = bcm43xx_current_dma(bcm)->tx_ring1;
  905. u8 i;
  906. struct sk_buff *skb;
  907. assert(ring->tx);
  908. if (unlikely(free_slots(ring) < txb->nr_frags)) {
  909. /* The queue should be stopped,
  910. * if we are low on free slots.
  911. * If this ever triggers, we have to lower the suspend_mark.
  912. */
  913. dprintkl(KERN_ERR PFX "Out of DMA descriptor slots!\n");
  914. return -ENOMEM;
  915. }
  916. for (i = 0; i < txb->nr_frags; i++) {
  917. skb = txb->fragments[i];
  918. /* Take skb from ieee80211_txb_free */
  919. txb->fragments[i] = NULL;
  920. dma_tx_fragment(ring, skb, i);
  921. }
  922. ieee80211_txb_free(txb);
  923. return 0;
  924. }
  925. void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
  926. struct bcm43xx_xmitstatus *status)
  927. {
  928. struct bcm43xx_dmaring *ring;
  929. struct bcm43xx_dmadesc_generic *desc;
  930. struct bcm43xx_dmadesc_meta *meta;
  931. int is_last_fragment;
  932. int slot;
  933. u32 tmp;
  934. ring = parse_cookie(bcm, status->cookie, &slot);
  935. assert(ring);
  936. assert(ring->tx);
  937. while (1) {
  938. assert(slot >= 0 && slot < ring->nr_slots);
  939. desc = bcm43xx_dma_idx2desc(ring, slot, &meta);
  940. if (ring->dma64) {
  941. tmp = le32_to_cpu(desc->dma64.control0);
  942. is_last_fragment = !!(tmp & BCM43xx_DMA64_DCTL0_FRAMEEND);
  943. } else {
  944. tmp = le32_to_cpu(desc->dma32.control);
  945. is_last_fragment = !!(tmp & BCM43xx_DMA32_DCTL_FRAMEEND);
  946. }
  947. unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
  948. free_descriptor_buffer(ring, meta, 1);
  949. /* Everything belonging to the slot is unmapped
  950. * and freed, so we can return it.
  951. */
  952. return_slot(ring, slot);
  953. if (is_last_fragment)
  954. break;
  955. slot = next_slot(ring, slot);
  956. }
  957. bcm->stats.last_tx = jiffies;
  958. }
  959. static void dma_rx(struct bcm43xx_dmaring *ring,
  960. int *slot)
  961. {
  962. struct bcm43xx_dmadesc_generic *desc;
  963. struct bcm43xx_dmadesc_meta *meta;
  964. struct bcm43xx_rxhdr *rxhdr;
  965. struct sk_buff *skb;
  966. u16 len;
  967. int err;
  968. dma_addr_t dmaaddr;
  969. desc = bcm43xx_dma_idx2desc(ring, *slot, &meta);
  970. sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
  971. skb = meta->skb;
  972. if (ring->index == 3) {
  973. /* We received an xmit status. */
  974. struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data;
  975. struct bcm43xx_xmitstatus stat;
  976. int i = 0;
  977. stat.cookie = le16_to_cpu(hw->cookie);
  978. while (stat.cookie == 0) {
  979. if (unlikely(++i >= 10000)) {
  980. assert(0);
  981. break;
  982. }
  983. udelay(2);
  984. barrier();
  985. stat.cookie = le16_to_cpu(hw->cookie);
  986. }
  987. stat.flags = hw->flags;
  988. stat.cnt1 = hw->cnt1;
  989. stat.cnt2 = hw->cnt2;
  990. stat.seq = le16_to_cpu(hw->seq);
  991. stat.unknown = le16_to_cpu(hw->unknown);
  992. bcm43xx_debugfs_log_txstat(ring->bcm, &stat);
  993. bcm43xx_dma_handle_xmitstatus(ring->bcm, &stat);
  994. /* recycle the descriptor buffer. */
  995. sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize);
  996. return;
  997. }
  998. rxhdr = (struct bcm43xx_rxhdr *)skb->data;
  999. len = le16_to_cpu(rxhdr->frame_length);
  1000. if (len == 0) {
  1001. int i = 0;
  1002. do {
  1003. udelay(2);
  1004. barrier();
  1005. len = le16_to_cpu(rxhdr->frame_length);
  1006. } while (len == 0 && i++ < 5);
  1007. if (unlikely(len == 0)) {
  1008. /* recycle the descriptor buffer. */
  1009. sync_descbuffer_for_device(ring, meta->dmaaddr,
  1010. ring->rx_buffersize);
  1011. goto drop;
  1012. }
  1013. }
  1014. if (unlikely(len > ring->rx_buffersize)) {
  1015. /* The data did not fit into one descriptor buffer
  1016. * and is split over multiple buffers.
  1017. * This should never happen, as we try to allocate buffers
  1018. * big enough. So simply ignore this packet.
  1019. */
  1020. int cnt = 0;
  1021. s32 tmp = len;
  1022. while (1) {
  1023. desc = bcm43xx_dma_idx2desc(ring, *slot, &meta);
  1024. /* recycle the descriptor buffer. */
  1025. sync_descbuffer_for_device(ring, meta->dmaaddr,
  1026. ring->rx_buffersize);
  1027. *slot = next_slot(ring, *slot);
  1028. cnt++;
  1029. tmp -= ring->rx_buffersize;
  1030. if (tmp <= 0)
  1031. break;
  1032. }
  1033. printkl(KERN_ERR PFX "DMA RX buffer too small "
  1034. "(len: %u, buffer: %u, nr-dropped: %d)\n",
  1035. len, ring->rx_buffersize, cnt);
  1036. goto drop;
  1037. }
  1038. len -= IEEE80211_FCS_LEN;
  1039. dmaaddr = meta->dmaaddr;
  1040. err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
  1041. if (unlikely(err)) {
  1042. dprintkl(KERN_ERR PFX "DMA RX: setup_rx_descbuffer() failed\n");
  1043. sync_descbuffer_for_device(ring, dmaaddr,
  1044. ring->rx_buffersize);
  1045. goto drop;
  1046. }
  1047. unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
  1048. skb_put(skb, len + ring->frameoffset);
  1049. skb_pull(skb, ring->frameoffset);
  1050. err = bcm43xx_rx(ring->bcm, skb, rxhdr);
  1051. if (err) {
  1052. dev_kfree_skb_irq(skb);
  1053. goto drop;
  1054. }
  1055. drop:
  1056. return;
  1057. }
  1058. void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
  1059. {
  1060. u32 status;
  1061. u16 descptr;
  1062. int slot, current_slot;
  1063. #ifdef CONFIG_BCM43XX_DEBUG
  1064. int used_slots = 0;
  1065. #endif
  1066. assert(!ring->tx);
  1067. if (ring->dma64) {
  1068. status = bcm43xx_dma_read(ring, BCM43xx_DMA64_RXSTATUS);
  1069. descptr = (status & BCM43xx_DMA64_RXSTATDPTR);
  1070. current_slot = descptr / sizeof(struct bcm43xx_dmadesc64);
  1071. } else {
  1072. status = bcm43xx_dma_read(ring, BCM43xx_DMA32_RXSTATUS);
  1073. descptr = (status & BCM43xx_DMA32_RXDPTR);
  1074. current_slot = descptr / sizeof(struct bcm43xx_dmadesc32);
  1075. }
  1076. assert(current_slot >= 0 && current_slot < ring->nr_slots);
  1077. slot = ring->current_slot;
  1078. for ( ; slot != current_slot; slot = next_slot(ring, slot)) {
  1079. dma_rx(ring, &slot);
  1080. #ifdef CONFIG_BCM43XX_DEBUG
  1081. if (++used_slots > ring->max_used_slots)
  1082. ring->max_used_slots = used_slots;
  1083. #endif
  1084. }
  1085. if (ring->dma64) {
  1086. bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX,
  1087. (u32)(slot * sizeof(struct bcm43xx_dmadesc64)));
  1088. } else {
  1089. bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX,
  1090. (u32)(slot * sizeof(struct bcm43xx_dmadesc32)));
  1091. }
  1092. ring->current_slot = slot;
  1093. }
  1094. void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring)
  1095. {
  1096. assert(ring->tx);
  1097. bcm43xx_power_saving_ctl_bits(ring->bcm, -1, 1);
  1098. if (ring->dma64) {
  1099. bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
  1100. bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
  1101. | BCM43xx_DMA64_TXSUSPEND);
  1102. } else {
  1103. bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
  1104. bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
  1105. | BCM43xx_DMA32_TXSUSPEND);
  1106. }
  1107. }
  1108. void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring)
  1109. {
  1110. assert(ring->tx);
  1111. if (ring->dma64) {
  1112. bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
  1113. bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
  1114. & ~BCM43xx_DMA64_TXSUSPEND);
  1115. } else {
  1116. bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
  1117. bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
  1118. & ~BCM43xx_DMA32_TXSUSPEND);
  1119. }
  1120. bcm43xx_power_saving_ctl_bits(ring->bcm, -1, -1);
  1121. }