bcm43xx_dma.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266
  1. /*
  2. Broadcom BCM43xx wireless driver
  3. DMA ringbuffer and descriptor allocation/management
  4. Copyright (c) 2005, 2006 Michael Buesch <mbuesch@freenet.de>
  5. Some code in this file is derived from the b44.c driver
  6. Copyright (C) 2002 David S. Miller
  7. Copyright (C) Pekka Pietikainen
  8. This program is free software; you can redistribute it and/or modify
  9. it under the terms of the GNU General Public License as published by
  10. the Free Software Foundation; either version 2 of the License, or
  11. (at your option) any later version.
  12. This program is distributed in the hope that it will be useful,
  13. but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. GNU General Public License for more details.
  16. You should have received a copy of the GNU General Public License
  17. along with this program; see the file COPYING. If not, write to
  18. the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
  19. Boston, MA 02110-1301, USA.
  20. */
  21. #include "bcm43xx.h"
  22. #include "bcm43xx_dma.h"
  23. #include "bcm43xx_main.h"
  24. #include "bcm43xx_debugfs.h"
  25. #include "bcm43xx_power.h"
  26. #include "bcm43xx_xmit.h"
  27. #include <linux/dma-mapping.h>
  28. #include <linux/pci.h>
  29. #include <linux/delay.h>
  30. #include <linux/skbuff.h>
  31. static inline int free_slots(struct bcm43xx_dmaring *ring)
  32. {
  33. return (ring->nr_slots - ring->used_slots);
  34. }
  35. static inline int next_slot(struct bcm43xx_dmaring *ring, int slot)
  36. {
  37. assert(slot >= -1 && slot <= ring->nr_slots - 1);
  38. if (slot == ring->nr_slots - 1)
  39. return 0;
  40. return slot + 1;
  41. }
  42. static inline int prev_slot(struct bcm43xx_dmaring *ring, int slot)
  43. {
  44. assert(slot >= 0 && slot <= ring->nr_slots - 1);
  45. if (slot == 0)
  46. return ring->nr_slots - 1;
  47. return slot - 1;
  48. }
  49. /* Request a slot for usage. */
  50. static inline
  51. int request_slot(struct bcm43xx_dmaring *ring)
  52. {
  53. int slot;
  54. assert(ring->tx);
  55. assert(!ring->suspended);
  56. assert(free_slots(ring) != 0);
  57. slot = next_slot(ring, ring->current_slot);
  58. ring->current_slot = slot;
  59. ring->used_slots++;
  60. /* Check the number of available slots and suspend TX,
  61. * if we are running low on free slots.
  62. */
  63. if (unlikely(free_slots(ring) < ring->suspend_mark)) {
  64. netif_stop_queue(ring->bcm->net_dev);
  65. ring->suspended = 1;
  66. }
  67. #ifdef CONFIG_BCM43XX_DEBUG
  68. if (ring->used_slots > ring->max_used_slots)
  69. ring->max_used_slots = ring->used_slots;
  70. #endif /* CONFIG_BCM43XX_DEBUG*/
  71. return slot;
  72. }
  73. /* Return a slot to the free slots. */
  74. static inline
  75. void return_slot(struct bcm43xx_dmaring *ring, int slot)
  76. {
  77. assert(ring->tx);
  78. ring->used_slots--;
  79. /* Check if TX is suspended and check if we have
  80. * enough free slots to resume it again.
  81. */
  82. if (unlikely(ring->suspended)) {
  83. if (free_slots(ring) >= ring->resume_mark) {
  84. ring->suspended = 0;
  85. netif_wake_queue(ring->bcm->net_dev);
  86. }
  87. }
  88. }
  89. u16 bcm43xx_dmacontroller_base(int dma64bit, int controller_idx)
  90. {
  91. static const u16 map64[] = {
  92. BCM43xx_MMIO_DMA64_BASE0,
  93. BCM43xx_MMIO_DMA64_BASE1,
  94. BCM43xx_MMIO_DMA64_BASE2,
  95. BCM43xx_MMIO_DMA64_BASE3,
  96. BCM43xx_MMIO_DMA64_BASE4,
  97. BCM43xx_MMIO_DMA64_BASE5,
  98. };
  99. static const u16 map32[] = {
  100. BCM43xx_MMIO_DMA32_BASE0,
  101. BCM43xx_MMIO_DMA32_BASE1,
  102. BCM43xx_MMIO_DMA32_BASE2,
  103. BCM43xx_MMIO_DMA32_BASE3,
  104. BCM43xx_MMIO_DMA32_BASE4,
  105. BCM43xx_MMIO_DMA32_BASE5,
  106. };
  107. if (dma64bit) {
  108. assert(controller_idx >= 0 &&
  109. controller_idx < ARRAY_SIZE(map64));
  110. return map64[controller_idx];
  111. }
  112. assert(controller_idx >= 0 &&
  113. controller_idx < ARRAY_SIZE(map32));
  114. return map32[controller_idx];
  115. }
  116. static inline
  117. dma_addr_t map_descbuffer(struct bcm43xx_dmaring *ring,
  118. unsigned char *buf,
  119. size_t len,
  120. int tx)
  121. {
  122. dma_addr_t dmaaddr;
  123. int direction = PCI_DMA_FROMDEVICE;
  124. if (tx)
  125. direction = PCI_DMA_TODEVICE;
  126. dmaaddr = pci_map_single(ring->bcm->pci_dev,
  127. buf, len,
  128. direction);
  129. return dmaaddr;
  130. }
  131. static inline
  132. void unmap_descbuffer(struct bcm43xx_dmaring *ring,
  133. dma_addr_t addr,
  134. size_t len,
  135. int tx)
  136. {
  137. if (tx) {
  138. pci_unmap_single(ring->bcm->pci_dev,
  139. addr, len,
  140. PCI_DMA_TODEVICE);
  141. } else {
  142. pci_unmap_single(ring->bcm->pci_dev,
  143. addr, len,
  144. PCI_DMA_FROMDEVICE);
  145. }
  146. }
  147. static inline
  148. void sync_descbuffer_for_cpu(struct bcm43xx_dmaring *ring,
  149. dma_addr_t addr,
  150. size_t len)
  151. {
  152. assert(!ring->tx);
  153. pci_dma_sync_single_for_cpu(ring->bcm->pci_dev,
  154. addr, len, PCI_DMA_FROMDEVICE);
  155. }
  156. static inline
  157. void sync_descbuffer_for_device(struct bcm43xx_dmaring *ring,
  158. dma_addr_t addr,
  159. size_t len)
  160. {
  161. assert(!ring->tx);
  162. pci_dma_sync_single_for_cpu(ring->bcm->pci_dev,
  163. addr, len, PCI_DMA_TODEVICE);
  164. }
  165. /* Unmap and free a descriptor buffer. */
  166. static inline
  167. void free_descriptor_buffer(struct bcm43xx_dmaring *ring,
  168. struct bcm43xx_dmadesc_meta *meta,
  169. int irq_context)
  170. {
  171. assert(meta->skb);
  172. if (irq_context)
  173. dev_kfree_skb_irq(meta->skb);
  174. else
  175. dev_kfree_skb(meta->skb);
  176. meta->skb = NULL;
  177. }
  178. static int alloc_ringmemory(struct bcm43xx_dmaring *ring)
  179. {
  180. ring->descbase = pci_alloc_consistent(ring->bcm->pci_dev, BCM43xx_DMA_RINGMEMSIZE,
  181. &(ring->dmabase));
  182. if (!ring->descbase) {
  183. /* Allocation may have failed due to pci_alloc_consistent
  184. insisting on use of GFP_DMA, which is more restrictive
  185. than necessary... */
  186. struct dma_desc *rx_ring;
  187. dma_addr_t rx_ring_dma;
  188. rx_ring = kzalloc(BCM43xx_DMA_RINGMEMSIZE, GFP_KERNEL);
  189. if (!rx_ring)
  190. goto out_err;
  191. rx_ring_dma = pci_map_single(ring->bcm->pci_dev, rx_ring,
  192. BCM43xx_DMA_RINGMEMSIZE,
  193. PCI_DMA_BIDIRECTIONAL);
  194. if (pci_dma_mapping_error(rx_ring_dma) ||
  195. rx_ring_dma + BCM43xx_DMA_RINGMEMSIZE > ring->bcm->dma_mask) {
  196. /* Sigh... */
  197. if (!pci_dma_mapping_error(rx_ring_dma))
  198. pci_unmap_single(ring->bcm->pci_dev,
  199. rx_ring_dma, BCM43xx_DMA_RINGMEMSIZE,
  200. PCI_DMA_BIDIRECTIONAL);
  201. rx_ring_dma = pci_map_single(ring->bcm->pci_dev,
  202. rx_ring, BCM43xx_DMA_RINGMEMSIZE,
  203. PCI_DMA_BIDIRECTIONAL);
  204. if (pci_dma_mapping_error(rx_ring_dma) ||
  205. rx_ring_dma + BCM43xx_DMA_RINGMEMSIZE > ring->bcm->dma_mask) {
  206. assert(0);
  207. if (!pci_dma_mapping_error(rx_ring_dma))
  208. pci_unmap_single(ring->bcm->pci_dev,
  209. rx_ring_dma, BCM43xx_DMA_RINGMEMSIZE,
  210. PCI_DMA_BIDIRECTIONAL);
  211. goto out_err;
  212. }
  213. }
  214. ring->descbase = rx_ring;
  215. ring->dmabase = rx_ring_dma;
  216. }
  217. memset(ring->descbase, 0, BCM43xx_DMA_RINGMEMSIZE);
  218. return 0;
  219. out_err:
  220. printk(KERN_ERR PFX "DMA ringmemory allocation failed\n");
  221. return -ENOMEM;
  222. }
  223. static void free_ringmemory(struct bcm43xx_dmaring *ring)
  224. {
  225. struct device *dev = &(ring->bcm->pci_dev->dev);
  226. dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
  227. ring->descbase, ring->dmabase);
  228. }
  229. /* Reset the RX DMA channel */
  230. int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
  231. u16 mmio_base, int dma64)
  232. {
  233. int i;
  234. u32 value;
  235. u16 offset;
  236. offset = dma64 ? BCM43xx_DMA64_RXCTL : BCM43xx_DMA32_RXCTL;
  237. bcm43xx_write32(bcm, mmio_base + offset, 0);
  238. for (i = 0; i < 1000; i++) {
  239. offset = dma64 ? BCM43xx_DMA64_RXSTATUS : BCM43xx_DMA32_RXSTATUS;
  240. value = bcm43xx_read32(bcm, mmio_base + offset);
  241. if (dma64) {
  242. value &= BCM43xx_DMA64_RXSTAT;
  243. if (value == BCM43xx_DMA64_RXSTAT_DISABLED) {
  244. i = -1;
  245. break;
  246. }
  247. } else {
  248. value &= BCM43xx_DMA32_RXSTATE;
  249. if (value == BCM43xx_DMA32_RXSTAT_DISABLED) {
  250. i = -1;
  251. break;
  252. }
  253. }
  254. udelay(10);
  255. }
  256. if (i != -1) {
  257. printk(KERN_ERR PFX "Error: Wait on DMA RX status timed out.\n");
  258. return -ENODEV;
  259. }
  260. return 0;
  261. }
  262. /* Reset the RX DMA channel */
  263. int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
  264. u16 mmio_base, int dma64)
  265. {
  266. int i;
  267. u32 value;
  268. u16 offset;
  269. for (i = 0; i < 1000; i++) {
  270. offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
  271. value = bcm43xx_read32(bcm, mmio_base + offset);
  272. if (dma64) {
  273. value &= BCM43xx_DMA64_TXSTAT;
  274. if (value == BCM43xx_DMA64_TXSTAT_DISABLED ||
  275. value == BCM43xx_DMA64_TXSTAT_IDLEWAIT ||
  276. value == BCM43xx_DMA64_TXSTAT_STOPPED)
  277. break;
  278. } else {
  279. value &= BCM43xx_DMA32_TXSTATE;
  280. if (value == BCM43xx_DMA32_TXSTAT_DISABLED ||
  281. value == BCM43xx_DMA32_TXSTAT_IDLEWAIT ||
  282. value == BCM43xx_DMA32_TXSTAT_STOPPED)
  283. break;
  284. }
  285. udelay(10);
  286. }
  287. offset = dma64 ? BCM43xx_DMA64_TXCTL : BCM43xx_DMA32_TXCTL;
  288. bcm43xx_write32(bcm, mmio_base + offset, 0);
  289. for (i = 0; i < 1000; i++) {
  290. offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
  291. value = bcm43xx_read32(bcm, mmio_base + offset);
  292. if (dma64) {
  293. value &= BCM43xx_DMA64_TXSTAT;
  294. if (value == BCM43xx_DMA64_TXSTAT_DISABLED) {
  295. i = -1;
  296. break;
  297. }
  298. } else {
  299. value &= BCM43xx_DMA32_TXSTATE;
  300. if (value == BCM43xx_DMA32_TXSTAT_DISABLED) {
  301. i = -1;
  302. break;
  303. }
  304. }
  305. udelay(10);
  306. }
  307. if (i != -1) {
  308. printk(KERN_ERR PFX "Error: Wait on DMA TX status timed out.\n");
  309. return -ENODEV;
  310. }
  311. /* ensure the reset is completed. */
  312. udelay(300);
  313. return 0;
  314. }
  315. static void fill_descriptor(struct bcm43xx_dmaring *ring,
  316. struct bcm43xx_dmadesc_generic *desc,
  317. dma_addr_t dmaaddr,
  318. u16 bufsize,
  319. int start, int end, int irq)
  320. {
  321. int slot;
  322. slot = bcm43xx_dma_desc2idx(ring, desc);
  323. assert(slot >= 0 && slot < ring->nr_slots);
  324. if (ring->dma64) {
  325. u32 ctl0 = 0, ctl1 = 0;
  326. u32 addrlo, addrhi;
  327. u32 addrext;
  328. addrlo = (u32)(dmaaddr & 0xFFFFFFFF);
  329. addrhi = (((u64)dmaaddr >> 32) & ~BCM43xx_DMA64_ROUTING);
  330. addrext = (((u64)dmaaddr >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
  331. addrhi |= ring->routing;
  332. if (slot == ring->nr_slots - 1)
  333. ctl0 |= BCM43xx_DMA64_DCTL0_DTABLEEND;
  334. if (start)
  335. ctl0 |= BCM43xx_DMA64_DCTL0_FRAMESTART;
  336. if (end)
  337. ctl0 |= BCM43xx_DMA64_DCTL0_FRAMEEND;
  338. if (irq)
  339. ctl0 |= BCM43xx_DMA64_DCTL0_IRQ;
  340. ctl1 |= (bufsize - ring->frameoffset)
  341. & BCM43xx_DMA64_DCTL1_BYTECNT;
  342. ctl1 |= (addrext << BCM43xx_DMA64_DCTL1_ADDREXT_SHIFT)
  343. & BCM43xx_DMA64_DCTL1_ADDREXT_MASK;
  344. desc->dma64.control0 = cpu_to_le32(ctl0);
  345. desc->dma64.control1 = cpu_to_le32(ctl1);
  346. desc->dma64.address_low = cpu_to_le32(addrlo);
  347. desc->dma64.address_high = cpu_to_le32(addrhi);
  348. } else {
  349. u32 ctl;
  350. u32 addr;
  351. u32 addrext;
  352. addr = (u32)(dmaaddr & ~BCM43xx_DMA32_ROUTING);
  353. addrext = (u32)(dmaaddr & BCM43xx_DMA32_ROUTING)
  354. >> BCM43xx_DMA32_ROUTING_SHIFT;
  355. addr |= ring->routing;
  356. ctl = (bufsize - ring->frameoffset)
  357. & BCM43xx_DMA32_DCTL_BYTECNT;
  358. if (slot == ring->nr_slots - 1)
  359. ctl |= BCM43xx_DMA32_DCTL_DTABLEEND;
  360. if (start)
  361. ctl |= BCM43xx_DMA32_DCTL_FRAMESTART;
  362. if (end)
  363. ctl |= BCM43xx_DMA32_DCTL_FRAMEEND;
  364. if (irq)
  365. ctl |= BCM43xx_DMA32_DCTL_IRQ;
  366. ctl |= (addrext << BCM43xx_DMA32_DCTL_ADDREXT_SHIFT)
  367. & BCM43xx_DMA32_DCTL_ADDREXT_MASK;
  368. desc->dma32.control = cpu_to_le32(ctl);
  369. desc->dma32.address = cpu_to_le32(addr);
  370. }
  371. }
  372. static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
  373. struct bcm43xx_dmadesc_generic *desc,
  374. struct bcm43xx_dmadesc_meta *meta,
  375. gfp_t gfp_flags)
  376. {
  377. struct bcm43xx_rxhdr *rxhdr;
  378. struct bcm43xx_hwxmitstatus *xmitstat;
  379. dma_addr_t dmaaddr;
  380. struct sk_buff *skb;
  381. assert(!ring->tx);
  382. skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
  383. if (unlikely(!skb))
  384. return -ENOMEM;
  385. dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
  386. /* This hardware bug work-around adapted from the b44 driver.
  387. The chip may be unable to do PCI DMA to/from anything above 1GB */
  388. if (pci_dma_mapping_error(dmaaddr) ||
  389. dmaaddr + ring->rx_buffersize > ring->bcm->dma_mask) {
  390. /* This one has 30-bit addressing... */
  391. if (!pci_dma_mapping_error(dmaaddr))
  392. pci_unmap_single(ring->bcm->pci_dev,
  393. dmaaddr, ring->rx_buffersize,
  394. PCI_DMA_FROMDEVICE);
  395. dev_kfree_skb_any(skb);
  396. skb = __dev_alloc_skb(ring->rx_buffersize,GFP_DMA);
  397. if (skb == NULL)
  398. return -ENOMEM;
  399. dmaaddr = pci_map_single(ring->bcm->pci_dev,
  400. skb->data, ring->rx_buffersize,
  401. PCI_DMA_FROMDEVICE);
  402. if (pci_dma_mapping_error(dmaaddr) ||
  403. dmaaddr + ring->rx_buffersize > ring->bcm->dma_mask) {
  404. assert(0);
  405. dev_kfree_skb_any(skb);
  406. return -ENOMEM;
  407. }
  408. }
  409. meta->skb = skb;
  410. meta->dmaaddr = dmaaddr;
  411. skb->dev = ring->bcm->net_dev;
  412. fill_descriptor(ring, desc, dmaaddr,
  413. ring->rx_buffersize, 0, 0, 0);
  414. rxhdr = (struct bcm43xx_rxhdr *)(skb->data);
  415. rxhdr->frame_length = 0;
  416. rxhdr->flags1 = 0;
  417. xmitstat = (struct bcm43xx_hwxmitstatus *)(skb->data);
  418. xmitstat->cookie = 0;
  419. return 0;
  420. }
  421. /* Allocate the initial descbuffers.
  422. * This is used for an RX ring only.
  423. */
  424. static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring)
  425. {
  426. int i, err = -ENOMEM;
  427. struct bcm43xx_dmadesc_generic *desc;
  428. struct bcm43xx_dmadesc_meta *meta;
  429. for (i = 0; i < ring->nr_slots; i++) {
  430. desc = bcm43xx_dma_idx2desc(ring, i, &meta);
  431. err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
  432. if (err)
  433. goto err_unwind;
  434. }
  435. mb();
  436. ring->used_slots = ring->nr_slots;
  437. err = 0;
  438. out:
  439. return err;
  440. err_unwind:
  441. for (i--; i >= 0; i--) {
  442. desc = bcm43xx_dma_idx2desc(ring, i, &meta);
  443. unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
  444. dev_kfree_skb(meta->skb);
  445. }
  446. goto out;
  447. }
  448. /* Do initial setup of the DMA controller.
  449. * Reset the controller, write the ring busaddress
  450. * and switch the "enable" bit on.
  451. */
  452. static int dmacontroller_setup(struct bcm43xx_dmaring *ring)
  453. {
  454. int err = 0;
  455. u32 value;
  456. u32 addrext;
  457. if (ring->tx) {
  458. if (ring->dma64) {
  459. u64 ringbase = (u64)(ring->dmabase);
  460. addrext = ((ringbase >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
  461. value = BCM43xx_DMA64_TXENABLE;
  462. value |= (addrext << BCM43xx_DMA64_TXADDREXT_SHIFT)
  463. & BCM43xx_DMA64_TXADDREXT_MASK;
  464. bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL, value);
  465. bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO,
  466. (ringbase & 0xFFFFFFFF));
  467. bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI,
  468. ((ringbase >> 32) & ~BCM43xx_DMA64_ROUTING)
  469. | ring->routing);
  470. } else {
  471. u32 ringbase = (u32)(ring->dmabase);
  472. addrext = (ringbase >> BCM43xx_DMA32_ROUTING_SHIFT);
  473. value = BCM43xx_DMA32_TXENABLE;
  474. value |= (addrext << BCM43xx_DMA32_TXADDREXT_SHIFT)
  475. & BCM43xx_DMA32_TXADDREXT_MASK;
  476. bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL, value);
  477. bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING,
  478. (ringbase & ~BCM43xx_DMA32_ROUTING)
  479. | ring->routing);
  480. }
  481. } else {
  482. err = alloc_initial_descbuffers(ring);
  483. if (err)
  484. goto out;
  485. if (ring->dma64) {
  486. u64 ringbase = (u64)(ring->dmabase);
  487. addrext = ((ringbase >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
  488. value = (ring->frameoffset << BCM43xx_DMA64_RXFROFF_SHIFT);
  489. value |= BCM43xx_DMA64_RXENABLE;
  490. value |= (addrext << BCM43xx_DMA64_RXADDREXT_SHIFT)
  491. & BCM43xx_DMA64_RXADDREXT_MASK;
  492. bcm43xx_dma_write(ring, BCM43xx_DMA64_RXCTL, value);
  493. bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO,
  494. (ringbase & 0xFFFFFFFF));
  495. bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI,
  496. ((ringbase >> 32) & ~BCM43xx_DMA64_ROUTING)
  497. | ring->routing);
  498. bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX, 200);
  499. } else {
  500. u32 ringbase = (u32)(ring->dmabase);
  501. addrext = (ringbase >> BCM43xx_DMA32_ROUTING_SHIFT);
  502. value = (ring->frameoffset << BCM43xx_DMA32_RXFROFF_SHIFT);
  503. value |= BCM43xx_DMA32_RXENABLE;
  504. value |= (addrext << BCM43xx_DMA32_RXADDREXT_SHIFT)
  505. & BCM43xx_DMA32_RXADDREXT_MASK;
  506. bcm43xx_dma_write(ring, BCM43xx_DMA32_RXCTL, value);
  507. bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING,
  508. (ringbase & ~BCM43xx_DMA32_ROUTING)
  509. | ring->routing);
  510. bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX, 200);
  511. }
  512. }
  513. out:
  514. return err;
  515. }
  516. /* Shutdown the DMA controller. */
  517. static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring)
  518. {
  519. if (ring->tx) {
  520. bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base, ring->dma64);
  521. if (ring->dma64) {
  522. bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO, 0);
  523. bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI, 0);
  524. } else
  525. bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING, 0);
  526. } else {
  527. bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base, ring->dma64);
  528. if (ring->dma64) {
  529. bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO, 0);
  530. bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI, 0);
  531. } else
  532. bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING, 0);
  533. }
  534. }
  535. static void free_all_descbuffers(struct bcm43xx_dmaring *ring)
  536. {
  537. struct bcm43xx_dmadesc_generic *desc;
  538. struct bcm43xx_dmadesc_meta *meta;
  539. int i;
  540. if (!ring->used_slots)
  541. return;
  542. for (i = 0; i < ring->nr_slots; i++) {
  543. desc = bcm43xx_dma_idx2desc(ring, i, &meta);
  544. if (!meta->skb) {
  545. assert(ring->tx);
  546. continue;
  547. }
  548. if (ring->tx) {
  549. unmap_descbuffer(ring, meta->dmaaddr,
  550. meta->skb->len, 1);
  551. } else {
  552. unmap_descbuffer(ring, meta->dmaaddr,
  553. ring->rx_buffersize, 0);
  554. }
  555. free_descriptor_buffer(ring, meta, 0);
  556. }
  557. }
  558. /* Main initialization function. */
  559. static
  560. struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm,
  561. int controller_index,
  562. int for_tx,
  563. int dma64)
  564. {
  565. struct bcm43xx_dmaring *ring;
  566. int err;
  567. int nr_slots;
  568. ring = kzalloc(sizeof(*ring), GFP_KERNEL);
  569. if (!ring)
  570. goto out;
  571. nr_slots = BCM43xx_RXRING_SLOTS;
  572. if (for_tx)
  573. nr_slots = BCM43xx_TXRING_SLOTS;
  574. ring->meta = kcalloc(nr_slots, sizeof(struct bcm43xx_dmadesc_meta),
  575. GFP_KERNEL);
  576. if (!ring->meta)
  577. goto err_kfree_ring;
  578. ring->routing = BCM43xx_DMA32_CLIENTTRANS;
  579. if (dma64)
  580. ring->routing = BCM43xx_DMA64_CLIENTTRANS;
  581. #ifdef CONFIG_BCM947XX
  582. if (bcm->pci_dev->bus->number == 0)
  583. ring->routing = dma64 ? BCM43xx_DMA64_NOTRANS : BCM43xx_DMA32_NOTRANS;
  584. #endif
  585. ring->bcm = bcm;
  586. ring->nr_slots = nr_slots;
  587. ring->suspend_mark = ring->nr_slots * BCM43xx_TXSUSPEND_PERCENT / 100;
  588. ring->resume_mark = ring->nr_slots * BCM43xx_TXRESUME_PERCENT / 100;
  589. assert(ring->suspend_mark < ring->resume_mark);
  590. ring->mmio_base = bcm43xx_dmacontroller_base(dma64, controller_index);
  591. ring->index = controller_index;
  592. ring->dma64 = !!dma64;
  593. if (for_tx) {
  594. ring->tx = 1;
  595. ring->current_slot = -1;
  596. } else {
  597. if (ring->index == 0) {
  598. ring->rx_buffersize = BCM43xx_DMA0_RX_BUFFERSIZE;
  599. ring->frameoffset = BCM43xx_DMA0_RX_FRAMEOFFSET;
  600. } else if (ring->index == 3) {
  601. ring->rx_buffersize = BCM43xx_DMA3_RX_BUFFERSIZE;
  602. ring->frameoffset = BCM43xx_DMA3_RX_FRAMEOFFSET;
  603. } else
  604. assert(0);
  605. }
  606. err = alloc_ringmemory(ring);
  607. if (err)
  608. goto err_kfree_meta;
  609. err = dmacontroller_setup(ring);
  610. if (err)
  611. goto err_free_ringmemory;
  612. return ring;
  613. out:
  614. printk(KERN_ERR PFX "Error in bcm43xx_setup_dmaring\n");
  615. return ring;
  616. err_free_ringmemory:
  617. free_ringmemory(ring);
  618. err_kfree_meta:
  619. kfree(ring->meta);
  620. err_kfree_ring:
  621. kfree(ring);
  622. ring = NULL;
  623. goto out;
  624. }
  625. /* Main cleanup function. */
  626. static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring *ring)
  627. {
  628. if (!ring)
  629. return;
  630. dprintk(KERN_INFO PFX "DMA-%s 0x%04X (%s) max used slots: %d/%d\n",
  631. (ring->dma64) ? "64" : "32",
  632. ring->mmio_base,
  633. (ring->tx) ? "TX" : "RX",
  634. ring->max_used_slots, ring->nr_slots);
  635. /* Device IRQs are disabled prior entering this function,
  636. * so no need to take care of concurrency with rx handler stuff.
  637. */
  638. dmacontroller_cleanup(ring);
  639. free_all_descbuffers(ring);
  640. free_ringmemory(ring);
  641. kfree(ring->meta);
  642. kfree(ring);
  643. }
  644. void bcm43xx_dma_free(struct bcm43xx_private *bcm)
  645. {
  646. struct bcm43xx_dma *dma;
  647. if (bcm43xx_using_pio(bcm))
  648. return;
  649. dma = bcm43xx_current_dma(bcm);
  650. bcm43xx_destroy_dmaring(dma->rx_ring3);
  651. dma->rx_ring3 = NULL;
  652. bcm43xx_destroy_dmaring(dma->rx_ring0);
  653. dma->rx_ring0 = NULL;
  654. bcm43xx_destroy_dmaring(dma->tx_ring5);
  655. dma->tx_ring5 = NULL;
  656. bcm43xx_destroy_dmaring(dma->tx_ring4);
  657. dma->tx_ring4 = NULL;
  658. bcm43xx_destroy_dmaring(dma->tx_ring3);
  659. dma->tx_ring3 = NULL;
  660. bcm43xx_destroy_dmaring(dma->tx_ring2);
  661. dma->tx_ring2 = NULL;
  662. bcm43xx_destroy_dmaring(dma->tx_ring1);
  663. dma->tx_ring1 = NULL;
  664. bcm43xx_destroy_dmaring(dma->tx_ring0);
  665. dma->tx_ring0 = NULL;
  666. }
  667. int bcm43xx_dma_init(struct bcm43xx_private *bcm)
  668. {
  669. struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm);
  670. struct bcm43xx_dmaring *ring;
  671. int err = -ENOMEM;
  672. int dma64 = 0;
  673. bcm->dma_mask = bcm43xx_get_supported_dma_mask(bcm);
  674. if (bcm->dma_mask == DMA_64BIT_MASK)
  675. dma64 = 1;
  676. err = pci_set_dma_mask(bcm->pci_dev, bcm->dma_mask);
  677. if (err)
  678. goto no_dma;
  679. err = pci_set_consistent_dma_mask(bcm->pci_dev, bcm->dma_mask);
  680. if (err)
  681. goto no_dma;
  682. /* setup TX DMA channels. */
  683. ring = bcm43xx_setup_dmaring(bcm, 0, 1, dma64);
  684. if (!ring)
  685. goto out;
  686. dma->tx_ring0 = ring;
  687. ring = bcm43xx_setup_dmaring(bcm, 1, 1, dma64);
  688. if (!ring)
  689. goto err_destroy_tx0;
  690. dma->tx_ring1 = ring;
  691. ring = bcm43xx_setup_dmaring(bcm, 2, 1, dma64);
  692. if (!ring)
  693. goto err_destroy_tx1;
  694. dma->tx_ring2 = ring;
  695. ring = bcm43xx_setup_dmaring(bcm, 3, 1, dma64);
  696. if (!ring)
  697. goto err_destroy_tx2;
  698. dma->tx_ring3 = ring;
  699. ring = bcm43xx_setup_dmaring(bcm, 4, 1, dma64);
  700. if (!ring)
  701. goto err_destroy_tx3;
  702. dma->tx_ring4 = ring;
  703. ring = bcm43xx_setup_dmaring(bcm, 5, 1, dma64);
  704. if (!ring)
  705. goto err_destroy_tx4;
  706. dma->tx_ring5 = ring;
  707. /* setup RX DMA channels. */
  708. ring = bcm43xx_setup_dmaring(bcm, 0, 0, dma64);
  709. if (!ring)
  710. goto err_destroy_tx5;
  711. dma->rx_ring0 = ring;
  712. if (bcm->current_core->rev < 5) {
  713. ring = bcm43xx_setup_dmaring(bcm, 3, 0, dma64);
  714. if (!ring)
  715. goto err_destroy_rx0;
  716. dma->rx_ring3 = ring;
  717. }
  718. dprintk(KERN_INFO PFX "%d-bit DMA initialized\n",
  719. (bcm->dma_mask == DMA_64BIT_MASK) ? 64 :
  720. (bcm->dma_mask == DMA_32BIT_MASK) ? 32 : 30);
  721. err = 0;
  722. out:
  723. return err;
  724. err_destroy_rx0:
  725. bcm43xx_destroy_dmaring(dma->rx_ring0);
  726. dma->rx_ring0 = NULL;
  727. err_destroy_tx5:
  728. bcm43xx_destroy_dmaring(dma->tx_ring5);
  729. dma->tx_ring5 = NULL;
  730. err_destroy_tx4:
  731. bcm43xx_destroy_dmaring(dma->tx_ring4);
  732. dma->tx_ring4 = NULL;
  733. err_destroy_tx3:
  734. bcm43xx_destroy_dmaring(dma->tx_ring3);
  735. dma->tx_ring3 = NULL;
  736. err_destroy_tx2:
  737. bcm43xx_destroy_dmaring(dma->tx_ring2);
  738. dma->tx_ring2 = NULL;
  739. err_destroy_tx1:
  740. bcm43xx_destroy_dmaring(dma->tx_ring1);
  741. dma->tx_ring1 = NULL;
  742. err_destroy_tx0:
  743. bcm43xx_destroy_dmaring(dma->tx_ring0);
  744. dma->tx_ring0 = NULL;
  745. no_dma:
  746. #ifdef CONFIG_BCM43XX_PIO
  747. printk(KERN_WARNING PFX "DMA not supported on this device."
  748. " Falling back to PIO.\n");
  749. bcm->__using_pio = 1;
  750. return -ENOSYS;
  751. #else
  752. printk(KERN_ERR PFX "FATAL: DMA not supported and PIO not configured. "
  753. "Please recompile the driver with PIO support.\n");
  754. return -ENODEV;
  755. #endif /* CONFIG_BCM43XX_PIO */
  756. }
  757. /* Generate a cookie for the TX header. */
  758. static u16 generate_cookie(struct bcm43xx_dmaring *ring,
  759. int slot)
  760. {
  761. u16 cookie = 0x1000;
  762. /* Use the upper 4 bits of the cookie as
  763. * DMA controller ID and store the slot number
  764. * in the lower 12 bits.
  765. * Note that the cookie must never be 0, as this
  766. * is a special value used in RX path.
  767. */
  768. switch (ring->index) {
  769. case 0:
  770. cookie = 0xA000;
  771. break;
  772. case 1:
  773. cookie = 0xB000;
  774. break;
  775. case 2:
  776. cookie = 0xC000;
  777. break;
  778. case 3:
  779. cookie = 0xD000;
  780. break;
  781. case 4:
  782. cookie = 0xE000;
  783. break;
  784. case 5:
  785. cookie = 0xF000;
  786. break;
  787. }
  788. assert(((u16)slot & 0xF000) == 0x0000);
  789. cookie |= (u16)slot;
  790. return cookie;
  791. }
  792. /* Inspect a cookie and find out to which controller/slot it belongs. */
  793. static
  794. struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
  795. u16 cookie, int *slot)
  796. {
  797. struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm);
  798. struct bcm43xx_dmaring *ring = NULL;
  799. switch (cookie & 0xF000) {
  800. case 0xA000:
  801. ring = dma->tx_ring0;
  802. break;
  803. case 0xB000:
  804. ring = dma->tx_ring1;
  805. break;
  806. case 0xC000:
  807. ring = dma->tx_ring2;
  808. break;
  809. case 0xD000:
  810. ring = dma->tx_ring3;
  811. break;
  812. case 0xE000:
  813. ring = dma->tx_ring4;
  814. break;
  815. case 0xF000:
  816. ring = dma->tx_ring5;
  817. break;
  818. default:
  819. assert(0);
  820. }
  821. *slot = (cookie & 0x0FFF);
  822. assert(*slot >= 0 && *slot < ring->nr_slots);
  823. return ring;
  824. }
  825. static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
  826. int slot)
  827. {
  828. u16 offset;
  829. int descsize;
  830. /* Everything is ready to start. Buffers are DMA mapped and
  831. * associated with slots.
  832. * "slot" is the last slot of the new frame we want to transmit.
  833. * Close your seat belts now, please.
  834. */
  835. wmb();
  836. slot = next_slot(ring, slot);
  837. offset = (ring->dma64) ? BCM43xx_DMA64_TXINDEX : BCM43xx_DMA32_TXINDEX;
  838. descsize = (ring->dma64) ? sizeof(struct bcm43xx_dmadesc64)
  839. : sizeof(struct bcm43xx_dmadesc32);
  840. bcm43xx_dma_write(ring, offset,
  841. (u32)(slot * descsize));
  842. }
  843. static void dma_tx_fragment(struct bcm43xx_dmaring *ring,
  844. struct sk_buff *skb,
  845. u8 cur_frag)
  846. {
  847. int slot;
  848. struct bcm43xx_dmadesc_generic *desc;
  849. struct bcm43xx_dmadesc_meta *meta;
  850. dma_addr_t dmaaddr;
  851. struct sk_buff *bounce_skb;
  852. assert(skb_shinfo(skb)->nr_frags == 0);
  853. slot = request_slot(ring);
  854. desc = bcm43xx_dma_idx2desc(ring, slot, &meta);
  855. /* Add a device specific TX header. */
  856. assert(skb_headroom(skb) >= sizeof(struct bcm43xx_txhdr));
  857. /* Reserve enough headroom for the device tx header. */
  858. __skb_push(skb, sizeof(struct bcm43xx_txhdr));
  859. /* Now calculate and add the tx header.
  860. * The tx header includes the PLCP header.
  861. */
  862. bcm43xx_generate_txhdr(ring->bcm,
  863. (struct bcm43xx_txhdr *)skb->data,
  864. skb->data + sizeof(struct bcm43xx_txhdr),
  865. skb->len - sizeof(struct bcm43xx_txhdr),
  866. (cur_frag == 0),
  867. generate_cookie(ring, slot));
  868. dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
  869. if (dma_mapping_error(dmaaddr) || dmaaddr + skb->len > ring->bcm->dma_mask) {
  870. /* chip cannot handle DMA to/from > 1GB, use bounce buffer (copied from b44 driver) */
  871. if (!dma_mapping_error(dmaaddr))
  872. unmap_descbuffer(ring, dmaaddr, skb->len, 1);
  873. bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC|GFP_DMA);
  874. if (!bounce_skb)
  875. return;
  876. dmaaddr = map_descbuffer(ring, bounce_skb->data, bounce_skb->len, 1);
  877. if (dma_mapping_error(dmaaddr) || dmaaddr + skb->len > ring->bcm->dma_mask) {
  878. if (!dma_mapping_error(dmaaddr))
  879. unmap_descbuffer(ring, dmaaddr, skb->len, 1);
  880. dev_kfree_skb_any(bounce_skb);
  881. assert(0);
  882. return;
  883. }
  884. memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
  885. dev_kfree_skb_any(skb);
  886. skb = bounce_skb;
  887. }
  888. meta->skb = skb;
  889. meta->dmaaddr = dmaaddr;
  890. fill_descriptor(ring, desc, dmaaddr,
  891. skb->len, 1, 1, 1);
  892. /* Now transfer the whole frame. */
  893. dmacontroller_poke_tx(ring, slot);
  894. }
  895. int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
  896. struct ieee80211_txb *txb)
  897. {
  898. /* We just received a packet from the kernel network subsystem.
  899. * Add headers and DMA map the memory. Poke
  900. * the device to send the stuff.
  901. * Note that this is called from atomic context.
  902. */
  903. struct bcm43xx_dmaring *ring = bcm43xx_current_dma(bcm)->tx_ring1;
  904. u8 i;
  905. struct sk_buff *skb;
  906. assert(ring->tx);
  907. if (unlikely(free_slots(ring) < txb->nr_frags)) {
  908. /* The queue should be stopped,
  909. * if we are low on free slots.
  910. * If this ever triggers, we have to lower the suspend_mark.
  911. */
  912. dprintkl(KERN_ERR PFX "Out of DMA descriptor slots!\n");
  913. return -ENOMEM;
  914. }
  915. for (i = 0; i < txb->nr_frags; i++) {
  916. skb = txb->fragments[i];
  917. /* Take skb from ieee80211_txb_free */
  918. txb->fragments[i] = NULL;
  919. dma_tx_fragment(ring, skb, i);
  920. }
  921. ieee80211_txb_free(txb);
  922. return 0;
  923. }
  924. void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
  925. struct bcm43xx_xmitstatus *status)
  926. {
  927. struct bcm43xx_dmaring *ring;
  928. struct bcm43xx_dmadesc_generic *desc;
  929. struct bcm43xx_dmadesc_meta *meta;
  930. int is_last_fragment;
  931. int slot;
  932. u32 tmp;
  933. ring = parse_cookie(bcm, status->cookie, &slot);
  934. assert(ring);
  935. assert(ring->tx);
  936. while (1) {
  937. assert(slot >= 0 && slot < ring->nr_slots);
  938. desc = bcm43xx_dma_idx2desc(ring, slot, &meta);
  939. if (ring->dma64) {
  940. tmp = le32_to_cpu(desc->dma64.control0);
  941. is_last_fragment = !!(tmp & BCM43xx_DMA64_DCTL0_FRAMEEND);
  942. } else {
  943. tmp = le32_to_cpu(desc->dma32.control);
  944. is_last_fragment = !!(tmp & BCM43xx_DMA32_DCTL_FRAMEEND);
  945. }
  946. unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
  947. free_descriptor_buffer(ring, meta, 1);
  948. /* Everything belonging to the slot is unmapped
  949. * and freed, so we can return it.
  950. */
  951. return_slot(ring, slot);
  952. if (is_last_fragment)
  953. break;
  954. slot = next_slot(ring, slot);
  955. }
  956. bcm->stats.last_tx = jiffies;
  957. }
  958. static void dma_rx(struct bcm43xx_dmaring *ring,
  959. int *slot)
  960. {
  961. struct bcm43xx_dmadesc_generic *desc;
  962. struct bcm43xx_dmadesc_meta *meta;
  963. struct bcm43xx_rxhdr *rxhdr;
  964. struct sk_buff *skb;
  965. u16 len;
  966. int err;
  967. dma_addr_t dmaaddr;
  968. desc = bcm43xx_dma_idx2desc(ring, *slot, &meta);
  969. sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
  970. skb = meta->skb;
  971. if (ring->index == 3) {
  972. /* We received an xmit status. */
  973. struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data;
  974. struct bcm43xx_xmitstatus stat;
  975. int i = 0;
  976. stat.cookie = le16_to_cpu(hw->cookie);
  977. while (stat.cookie == 0) {
  978. if (unlikely(++i >= 10000)) {
  979. assert(0);
  980. break;
  981. }
  982. udelay(2);
  983. barrier();
  984. stat.cookie = le16_to_cpu(hw->cookie);
  985. }
  986. stat.flags = hw->flags;
  987. stat.cnt1 = hw->cnt1;
  988. stat.cnt2 = hw->cnt2;
  989. stat.seq = le16_to_cpu(hw->seq);
  990. stat.unknown = le16_to_cpu(hw->unknown);
  991. bcm43xx_debugfs_log_txstat(ring->bcm, &stat);
  992. bcm43xx_dma_handle_xmitstatus(ring->bcm, &stat);
  993. /* recycle the descriptor buffer. */
  994. sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize);
  995. return;
  996. }
  997. rxhdr = (struct bcm43xx_rxhdr *)skb->data;
  998. len = le16_to_cpu(rxhdr->frame_length);
  999. if (len == 0) {
  1000. int i = 0;
  1001. do {
  1002. udelay(2);
  1003. barrier();
  1004. len = le16_to_cpu(rxhdr->frame_length);
  1005. } while (len == 0 && i++ < 5);
  1006. if (unlikely(len == 0)) {
  1007. /* recycle the descriptor buffer. */
  1008. sync_descbuffer_for_device(ring, meta->dmaaddr,
  1009. ring->rx_buffersize);
  1010. goto drop;
  1011. }
  1012. }
  1013. if (unlikely(len > ring->rx_buffersize)) {
  1014. /* The data did not fit into one descriptor buffer
  1015. * and is split over multiple buffers.
  1016. * This should never happen, as we try to allocate buffers
  1017. * big enough. So simply ignore this packet.
  1018. */
  1019. int cnt = 0;
  1020. s32 tmp = len;
  1021. while (1) {
  1022. desc = bcm43xx_dma_idx2desc(ring, *slot, &meta);
  1023. /* recycle the descriptor buffer. */
  1024. sync_descbuffer_for_device(ring, meta->dmaaddr,
  1025. ring->rx_buffersize);
  1026. *slot = next_slot(ring, *slot);
  1027. cnt++;
  1028. tmp -= ring->rx_buffersize;
  1029. if (tmp <= 0)
  1030. break;
  1031. }
  1032. printkl(KERN_ERR PFX "DMA RX buffer too small "
  1033. "(len: %u, buffer: %u, nr-dropped: %d)\n",
  1034. len, ring->rx_buffersize, cnt);
  1035. goto drop;
  1036. }
  1037. len -= IEEE80211_FCS_LEN;
  1038. dmaaddr = meta->dmaaddr;
  1039. err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
  1040. if (unlikely(err)) {
  1041. dprintkl(KERN_ERR PFX "DMA RX: setup_rx_descbuffer() failed\n");
  1042. sync_descbuffer_for_device(ring, dmaaddr,
  1043. ring->rx_buffersize);
  1044. goto drop;
  1045. }
  1046. unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
  1047. skb_put(skb, len + ring->frameoffset);
  1048. skb_pull(skb, ring->frameoffset);
  1049. err = bcm43xx_rx(ring->bcm, skb, rxhdr);
  1050. if (err) {
  1051. dev_kfree_skb_irq(skb);
  1052. goto drop;
  1053. }
  1054. drop:
  1055. return;
  1056. }
  1057. void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
  1058. {
  1059. u32 status;
  1060. u16 descptr;
  1061. int slot, current_slot;
  1062. #ifdef CONFIG_BCM43XX_DEBUG
  1063. int used_slots = 0;
  1064. #endif
  1065. assert(!ring->tx);
  1066. if (ring->dma64) {
  1067. status = bcm43xx_dma_read(ring, BCM43xx_DMA64_RXSTATUS);
  1068. descptr = (status & BCM43xx_DMA64_RXSTATDPTR);
  1069. current_slot = descptr / sizeof(struct bcm43xx_dmadesc64);
  1070. } else {
  1071. status = bcm43xx_dma_read(ring, BCM43xx_DMA32_RXSTATUS);
  1072. descptr = (status & BCM43xx_DMA32_RXDPTR);
  1073. current_slot = descptr / sizeof(struct bcm43xx_dmadesc32);
  1074. }
  1075. assert(current_slot >= 0 && current_slot < ring->nr_slots);
  1076. slot = ring->current_slot;
  1077. for ( ; slot != current_slot; slot = next_slot(ring, slot)) {
  1078. dma_rx(ring, &slot);
  1079. #ifdef CONFIG_BCM43XX_DEBUG
  1080. if (++used_slots > ring->max_used_slots)
  1081. ring->max_used_slots = used_slots;
  1082. #endif
  1083. }
  1084. if (ring->dma64) {
  1085. bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX,
  1086. (u32)(slot * sizeof(struct bcm43xx_dmadesc64)));
  1087. } else {
  1088. bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX,
  1089. (u32)(slot * sizeof(struct bcm43xx_dmadesc32)));
  1090. }
  1091. ring->current_slot = slot;
  1092. }
  1093. void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring)
  1094. {
  1095. assert(ring->tx);
  1096. bcm43xx_power_saving_ctl_bits(ring->bcm, -1, 1);
  1097. if (ring->dma64) {
  1098. bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
  1099. bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
  1100. | BCM43xx_DMA64_TXSUSPEND);
  1101. } else {
  1102. bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
  1103. bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
  1104. | BCM43xx_DMA32_TXSUSPEND);
  1105. }
  1106. }
  1107. void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring)
  1108. {
  1109. assert(ring->tx);
  1110. if (ring->dma64) {
  1111. bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
  1112. bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
  1113. & ~BCM43xx_DMA64_TXSUSPEND);
  1114. } else {
  1115. bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
  1116. bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
  1117. & ~BCM43xx_DMA32_TXSUSPEND);
  1118. }
  1119. bcm43xx_power_saving_ctl_bits(ring->bcm, -1, -1);
  1120. }