bcm43xx_dma.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263
  1. /*
  2. Broadcom BCM43xx wireless driver
  3. DMA ringbuffer and descriptor allocation/management
  4. Copyright (c) 2005, 2006 Michael Buesch <mbuesch@freenet.de>
  5. Some code in this file is derived from the b44.c driver
  6. Copyright (C) 2002 David S. Miller
  7. Copyright (C) Pekka Pietikainen
  8. This program is free software; you can redistribute it and/or modify
  9. it under the terms of the GNU General Public License as published by
  10. the Free Software Foundation; either version 2 of the License, or
  11. (at your option) any later version.
  12. This program is distributed in the hope that it will be useful,
  13. but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. GNU General Public License for more details.
  16. You should have received a copy of the GNU General Public License
  17. along with this program; see the file COPYING. If not, write to
  18. the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
  19. Boston, MA 02110-1301, USA.
  20. */
  21. #include "bcm43xx.h"
  22. #include "bcm43xx_dma.h"
  23. #include "bcm43xx_main.h"
  24. #include "bcm43xx_debugfs.h"
  25. #include "bcm43xx_power.h"
  26. #include "bcm43xx_xmit.h"
  27. #include <linux/dma-mapping.h>
  28. #include <linux/pci.h>
  29. #include <linux/delay.h>
  30. #include <linux/skbuff.h>
  31. static inline int free_slots(struct bcm43xx_dmaring *ring)
  32. {
  33. return (ring->nr_slots - ring->used_slots);
  34. }
  35. static inline int next_slot(struct bcm43xx_dmaring *ring, int slot)
  36. {
  37. assert(slot >= -1 && slot <= ring->nr_slots - 1);
  38. if (slot == ring->nr_slots - 1)
  39. return 0;
  40. return slot + 1;
  41. }
  42. static inline int prev_slot(struct bcm43xx_dmaring *ring, int slot)
  43. {
  44. assert(slot >= 0 && slot <= ring->nr_slots - 1);
  45. if (slot == 0)
  46. return ring->nr_slots - 1;
  47. return slot - 1;
  48. }
  49. /* Request a slot for usage. */
  50. static inline
  51. int request_slot(struct bcm43xx_dmaring *ring)
  52. {
  53. int slot;
  54. assert(ring->tx);
  55. assert(!ring->suspended);
  56. assert(free_slots(ring) != 0);
  57. slot = next_slot(ring, ring->current_slot);
  58. ring->current_slot = slot;
  59. ring->used_slots++;
  60. /* Check the number of available slots and suspend TX,
  61. * if we are running low on free slots.
  62. */
  63. if (unlikely(free_slots(ring) < ring->suspend_mark)) {
  64. netif_stop_queue(ring->bcm->net_dev);
  65. ring->suspended = 1;
  66. }
  67. #ifdef CONFIG_BCM43XX_DEBUG
  68. if (ring->used_slots > ring->max_used_slots)
  69. ring->max_used_slots = ring->used_slots;
  70. #endif /* CONFIG_BCM43XX_DEBUG*/
  71. return slot;
  72. }
  73. /* Return a slot to the free slots. */
  74. static inline
  75. void return_slot(struct bcm43xx_dmaring *ring, int slot)
  76. {
  77. assert(ring->tx);
  78. ring->used_slots--;
  79. /* Check if TX is suspended and check if we have
  80. * enough free slots to resume it again.
  81. */
  82. if (unlikely(ring->suspended)) {
  83. if (free_slots(ring) >= ring->resume_mark) {
  84. ring->suspended = 0;
  85. netif_wake_queue(ring->bcm->net_dev);
  86. }
  87. }
  88. }
  89. u16 bcm43xx_dmacontroller_base(int dma64bit, int controller_idx)
  90. {
  91. static const u16 map64[] = {
  92. BCM43xx_MMIO_DMA64_BASE0,
  93. BCM43xx_MMIO_DMA64_BASE1,
  94. BCM43xx_MMIO_DMA64_BASE2,
  95. BCM43xx_MMIO_DMA64_BASE3,
  96. BCM43xx_MMIO_DMA64_BASE4,
  97. BCM43xx_MMIO_DMA64_BASE5,
  98. };
  99. static const u16 map32[] = {
  100. BCM43xx_MMIO_DMA32_BASE0,
  101. BCM43xx_MMIO_DMA32_BASE1,
  102. BCM43xx_MMIO_DMA32_BASE2,
  103. BCM43xx_MMIO_DMA32_BASE3,
  104. BCM43xx_MMIO_DMA32_BASE4,
  105. BCM43xx_MMIO_DMA32_BASE5,
  106. };
  107. if (dma64bit) {
  108. assert(controller_idx >= 0 &&
  109. controller_idx < ARRAY_SIZE(map64));
  110. return map64[controller_idx];
  111. }
  112. assert(controller_idx >= 0 &&
  113. controller_idx < ARRAY_SIZE(map32));
  114. return map32[controller_idx];
  115. }
  116. static inline
  117. dma_addr_t map_descbuffer(struct bcm43xx_dmaring *ring,
  118. unsigned char *buf,
  119. size_t len,
  120. int tx)
  121. {
  122. dma_addr_t dmaaddr;
  123. int direction = PCI_DMA_FROMDEVICE;
  124. if (tx)
  125. direction = PCI_DMA_TODEVICE;
  126. dmaaddr = pci_map_single(ring->bcm->pci_dev,
  127. buf, len,
  128. direction);
  129. return dmaaddr;
  130. }
  131. static inline
  132. void unmap_descbuffer(struct bcm43xx_dmaring *ring,
  133. dma_addr_t addr,
  134. size_t len,
  135. int tx)
  136. {
  137. if (tx) {
  138. pci_unmap_single(ring->bcm->pci_dev,
  139. addr, len,
  140. PCI_DMA_TODEVICE);
  141. } else {
  142. pci_unmap_single(ring->bcm->pci_dev,
  143. addr, len,
  144. PCI_DMA_FROMDEVICE);
  145. }
  146. }
  147. static inline
  148. void sync_descbuffer_for_cpu(struct bcm43xx_dmaring *ring,
  149. dma_addr_t addr,
  150. size_t len)
  151. {
  152. assert(!ring->tx);
  153. pci_dma_sync_single_for_cpu(ring->bcm->pci_dev,
  154. addr, len, PCI_DMA_FROMDEVICE);
  155. }
  156. static inline
  157. void sync_descbuffer_for_device(struct bcm43xx_dmaring *ring,
  158. dma_addr_t addr,
  159. size_t len)
  160. {
  161. assert(!ring->tx);
  162. pci_dma_sync_single_for_cpu(ring->bcm->pci_dev,
  163. addr, len, PCI_DMA_TODEVICE);
  164. }
  165. /* Unmap and free a descriptor buffer. */
  166. static inline
  167. void free_descriptor_buffer(struct bcm43xx_dmaring *ring,
  168. struct bcm43xx_dmadesc_meta *meta,
  169. int irq_context)
  170. {
  171. assert(meta->skb);
  172. if (irq_context)
  173. dev_kfree_skb_irq(meta->skb);
  174. else
  175. dev_kfree_skb(meta->skb);
  176. meta->skb = NULL;
  177. }
  178. static int alloc_ringmemory(struct bcm43xx_dmaring *ring)
  179. {
  180. ring->descbase = pci_alloc_consistent(ring->bcm->pci_dev, BCM43xx_DMA_RINGMEMSIZE,
  181. &(ring->dmabase));
  182. if (!ring->descbase) {
  183. /* Allocation may have failed due to pci_alloc_consistent
  184. insisting on use of GFP_DMA, which is more restrictive
  185. than necessary... */
  186. struct dma_desc *rx_ring;
  187. dma_addr_t rx_ring_dma;
  188. rx_ring = kzalloc(BCM43xx_DMA_RINGMEMSIZE, GFP_KERNEL);
  189. if (!rx_ring)
  190. goto out_err;
  191. rx_ring_dma = pci_map_single(ring->bcm->pci_dev, rx_ring,
  192. BCM43xx_DMA_RINGMEMSIZE,
  193. PCI_DMA_BIDIRECTIONAL);
  194. if (pci_dma_mapping_error(rx_ring_dma) ||
  195. rx_ring_dma + BCM43xx_DMA_RINGMEMSIZE > ring->bcm->dma_mask) {
  196. /* Sigh... */
  197. if (!pci_dma_mapping_error(rx_ring_dma))
  198. pci_unmap_single(ring->bcm->pci_dev,
  199. rx_ring_dma, BCM43xx_DMA_RINGMEMSIZE,
  200. PCI_DMA_BIDIRECTIONAL);
  201. rx_ring_dma = pci_map_single(ring->bcm->pci_dev,
  202. rx_ring, BCM43xx_DMA_RINGMEMSIZE,
  203. PCI_DMA_BIDIRECTIONAL);
  204. if (pci_dma_mapping_error(rx_ring_dma) ||
  205. rx_ring_dma + BCM43xx_DMA_RINGMEMSIZE > ring->bcm->dma_mask) {
  206. assert(0);
  207. if (!pci_dma_mapping_error(rx_ring_dma))
  208. pci_unmap_single(ring->bcm->pci_dev,
  209. rx_ring_dma, BCM43xx_DMA_RINGMEMSIZE,
  210. PCI_DMA_BIDIRECTIONAL);
  211. goto out_err;
  212. }
  213. }
  214. ring->descbase = rx_ring;
  215. ring->dmabase = rx_ring_dma;
  216. }
  217. memset(ring->descbase, 0, BCM43xx_DMA_RINGMEMSIZE);
  218. return 0;
  219. out_err:
  220. printk(KERN_ERR PFX "DMA ringmemory allocation failed\n");
  221. return -ENOMEM;
  222. }
  223. static void free_ringmemory(struct bcm43xx_dmaring *ring)
  224. {
  225. struct device *dev = &(ring->bcm->pci_dev->dev);
  226. dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
  227. ring->descbase, ring->dmabase);
  228. }
  229. /* Reset the RX DMA channel */
  230. int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
  231. u16 mmio_base, int dma64)
  232. {
  233. int i;
  234. u32 value;
  235. u16 offset;
  236. offset = dma64 ? BCM43xx_DMA64_RXCTL : BCM43xx_DMA32_RXCTL;
  237. bcm43xx_write32(bcm, mmio_base + offset, 0);
  238. for (i = 0; i < 1000; i++) {
  239. offset = dma64 ? BCM43xx_DMA64_RXSTATUS : BCM43xx_DMA32_RXSTATUS;
  240. value = bcm43xx_read32(bcm, mmio_base + offset);
  241. if (dma64) {
  242. value &= BCM43xx_DMA64_RXSTAT;
  243. if (value == BCM43xx_DMA64_RXSTAT_DISABLED) {
  244. i = -1;
  245. break;
  246. }
  247. } else {
  248. value &= BCM43xx_DMA32_RXSTATE;
  249. if (value == BCM43xx_DMA32_RXSTAT_DISABLED) {
  250. i = -1;
  251. break;
  252. }
  253. }
  254. udelay(10);
  255. }
  256. if (i != -1) {
  257. printk(KERN_ERR PFX "Error: Wait on DMA RX status timed out.\n");
  258. return -ENODEV;
  259. }
  260. return 0;
  261. }
  262. /* Reset the RX DMA channel */
  263. int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
  264. u16 mmio_base, int dma64)
  265. {
  266. int i;
  267. u32 value;
  268. u16 offset;
  269. for (i = 0; i < 1000; i++) {
  270. offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
  271. value = bcm43xx_read32(bcm, mmio_base + offset);
  272. if (dma64) {
  273. value &= BCM43xx_DMA64_TXSTAT;
  274. if (value == BCM43xx_DMA64_TXSTAT_DISABLED ||
  275. value == BCM43xx_DMA64_TXSTAT_IDLEWAIT ||
  276. value == BCM43xx_DMA64_TXSTAT_STOPPED)
  277. break;
  278. } else {
  279. value &= BCM43xx_DMA32_TXSTATE;
  280. if (value == BCM43xx_DMA32_TXSTAT_DISABLED ||
  281. value == BCM43xx_DMA32_TXSTAT_IDLEWAIT ||
  282. value == BCM43xx_DMA32_TXSTAT_STOPPED)
  283. break;
  284. }
  285. udelay(10);
  286. }
  287. offset = dma64 ? BCM43xx_DMA64_TXCTL : BCM43xx_DMA32_TXCTL;
  288. bcm43xx_write32(bcm, mmio_base + offset, 0);
  289. for (i = 0; i < 1000; i++) {
  290. offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
  291. value = bcm43xx_read32(bcm, mmio_base + offset);
  292. if (dma64) {
  293. value &= BCM43xx_DMA64_TXSTAT;
  294. if (value == BCM43xx_DMA64_TXSTAT_DISABLED) {
  295. i = -1;
  296. break;
  297. }
  298. } else {
  299. value &= BCM43xx_DMA32_TXSTATE;
  300. if (value == BCM43xx_DMA32_TXSTAT_DISABLED) {
  301. i = -1;
  302. break;
  303. }
  304. }
  305. udelay(10);
  306. }
  307. if (i != -1) {
  308. printk(KERN_ERR PFX "Error: Wait on DMA TX status timed out.\n");
  309. return -ENODEV;
  310. }
  311. /* ensure the reset is completed. */
  312. udelay(300);
  313. return 0;
  314. }
  315. static void fill_descriptor(struct bcm43xx_dmaring *ring,
  316. struct bcm43xx_dmadesc_generic *desc,
  317. dma_addr_t dmaaddr,
  318. u16 bufsize,
  319. int start, int end, int irq)
  320. {
  321. int slot;
  322. slot = bcm43xx_dma_desc2idx(ring, desc);
  323. assert(slot >= 0 && slot < ring->nr_slots);
  324. if (ring->dma64) {
  325. u32 ctl0 = 0, ctl1 = 0;
  326. u32 addrlo, addrhi;
  327. u32 addrext;
  328. addrlo = (u32)(dmaaddr & 0xFFFFFFFF);
  329. addrhi = (((u64)dmaaddr >> 32) & ~BCM43xx_DMA64_ROUTING);
  330. addrext = (((u64)dmaaddr >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
  331. addrhi |= ring->routing;
  332. if (slot == ring->nr_slots - 1)
  333. ctl0 |= BCM43xx_DMA64_DCTL0_DTABLEEND;
  334. if (start)
  335. ctl0 |= BCM43xx_DMA64_DCTL0_FRAMESTART;
  336. if (end)
  337. ctl0 |= BCM43xx_DMA64_DCTL0_FRAMEEND;
  338. if (irq)
  339. ctl0 |= BCM43xx_DMA64_DCTL0_IRQ;
  340. ctl1 |= (bufsize - ring->frameoffset)
  341. & BCM43xx_DMA64_DCTL1_BYTECNT;
  342. ctl1 |= (addrext << BCM43xx_DMA64_DCTL1_ADDREXT_SHIFT)
  343. & BCM43xx_DMA64_DCTL1_ADDREXT_MASK;
  344. desc->dma64.control0 = cpu_to_le32(ctl0);
  345. desc->dma64.control1 = cpu_to_le32(ctl1);
  346. desc->dma64.address_low = cpu_to_le32(addrlo);
  347. desc->dma64.address_high = cpu_to_le32(addrhi);
  348. } else {
  349. u32 ctl;
  350. u32 addr;
  351. u32 addrext;
  352. addr = (u32)(dmaaddr & ~BCM43xx_DMA32_ROUTING);
  353. addrext = (u32)(dmaaddr & BCM43xx_DMA32_ROUTING)
  354. >> BCM43xx_DMA32_ROUTING_SHIFT;
  355. addr |= ring->routing;
  356. ctl = (bufsize - ring->frameoffset)
  357. & BCM43xx_DMA32_DCTL_BYTECNT;
  358. if (slot == ring->nr_slots - 1)
  359. ctl |= BCM43xx_DMA32_DCTL_DTABLEEND;
  360. if (start)
  361. ctl |= BCM43xx_DMA32_DCTL_FRAMESTART;
  362. if (end)
  363. ctl |= BCM43xx_DMA32_DCTL_FRAMEEND;
  364. if (irq)
  365. ctl |= BCM43xx_DMA32_DCTL_IRQ;
  366. ctl |= (addrext << BCM43xx_DMA32_DCTL_ADDREXT_SHIFT)
  367. & BCM43xx_DMA32_DCTL_ADDREXT_MASK;
  368. desc->dma32.control = cpu_to_le32(ctl);
  369. desc->dma32.address = cpu_to_le32(addr);
  370. }
  371. }
  372. static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
  373. struct bcm43xx_dmadesc_generic *desc,
  374. struct bcm43xx_dmadesc_meta *meta,
  375. gfp_t gfp_flags)
  376. {
  377. struct bcm43xx_rxhdr *rxhdr;
  378. struct bcm43xx_hwxmitstatus *xmitstat;
  379. dma_addr_t dmaaddr;
  380. struct sk_buff *skb;
  381. assert(!ring->tx);
  382. skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
  383. if (unlikely(!skb))
  384. return -ENOMEM;
  385. dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
  386. /* This hardware bug work-around adapted from the b44 driver.
  387. The chip may be unable to do PCI DMA to/from anything above 1GB */
  388. if (pci_dma_mapping_error(dmaaddr) ||
  389. dmaaddr + ring->rx_buffersize > ring->bcm->dma_mask) {
  390. /* This one has 30-bit addressing... */
  391. if (!pci_dma_mapping_error(dmaaddr))
  392. pci_unmap_single(ring->bcm->pci_dev,
  393. dmaaddr, ring->rx_buffersize,
  394. PCI_DMA_FROMDEVICE);
  395. dev_kfree_skb_any(skb);
  396. skb = __dev_alloc_skb(ring->rx_buffersize,GFP_DMA);
  397. if (skb == NULL)
  398. return -ENOMEM;
  399. dmaaddr = pci_map_single(ring->bcm->pci_dev,
  400. skb->data, ring->rx_buffersize,
  401. PCI_DMA_FROMDEVICE);
  402. if (pci_dma_mapping_error(dmaaddr) ||
  403. dmaaddr + ring->rx_buffersize > ring->bcm->dma_mask) {
  404. assert(0);
  405. dev_kfree_skb_any(skb);
  406. return -ENOMEM;
  407. }
  408. }
  409. meta->skb = skb;
  410. meta->dmaaddr = dmaaddr;
  411. skb->dev = ring->bcm->net_dev;
  412. fill_descriptor(ring, desc, dmaaddr,
  413. ring->rx_buffersize, 0, 0, 0);
  414. rxhdr = (struct bcm43xx_rxhdr *)(skb->data);
  415. rxhdr->frame_length = 0;
  416. rxhdr->flags1 = 0;
  417. xmitstat = (struct bcm43xx_hwxmitstatus *)(skb->data);
  418. xmitstat->cookie = 0;
  419. return 0;
  420. }
  421. /* Allocate the initial descbuffers.
  422. * This is used for an RX ring only.
  423. */
  424. static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring)
  425. {
  426. int i, err = -ENOMEM;
  427. struct bcm43xx_dmadesc_generic *desc;
  428. struct bcm43xx_dmadesc_meta *meta;
  429. for (i = 0; i < ring->nr_slots; i++) {
  430. desc = bcm43xx_dma_idx2desc(ring, i, &meta);
  431. err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
  432. if (err)
  433. goto err_unwind;
  434. }
  435. mb();
  436. ring->used_slots = ring->nr_slots;
  437. err = 0;
  438. out:
  439. return err;
  440. err_unwind:
  441. for (i--; i >= 0; i--) {
  442. desc = bcm43xx_dma_idx2desc(ring, i, &meta);
  443. unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
  444. dev_kfree_skb(meta->skb);
  445. }
  446. goto out;
  447. }
  448. /* Do initial setup of the DMA controller.
  449. * Reset the controller, write the ring busaddress
  450. * and switch the "enable" bit on.
  451. */
  452. static int dmacontroller_setup(struct bcm43xx_dmaring *ring)
  453. {
  454. int err = 0;
  455. u32 value;
  456. u32 addrext;
  457. if (ring->tx) {
  458. if (ring->dma64) {
  459. u64 ringbase = (u64)(ring->dmabase);
  460. addrext = ((ringbase >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
  461. value = BCM43xx_DMA64_TXENABLE;
  462. value |= (addrext << BCM43xx_DMA64_TXADDREXT_SHIFT)
  463. & BCM43xx_DMA64_TXADDREXT_MASK;
  464. bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL, value);
  465. bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO,
  466. (ringbase & 0xFFFFFFFF));
  467. bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI,
  468. ((ringbase >> 32) & ~BCM43xx_DMA64_ROUTING)
  469. | ring->routing);
  470. } else {
  471. u32 ringbase = (u32)(ring->dmabase);
  472. addrext = (ringbase >> BCM43xx_DMA32_ROUTING_SHIFT);
  473. value = BCM43xx_DMA32_TXENABLE;
  474. value |= (addrext << BCM43xx_DMA32_TXADDREXT_SHIFT)
  475. & BCM43xx_DMA32_TXADDREXT_MASK;
  476. bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL, value);
  477. bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING,
  478. (ringbase & ~BCM43xx_DMA32_ROUTING)
  479. | ring->routing);
  480. }
  481. } else {
  482. err = alloc_initial_descbuffers(ring);
  483. if (err)
  484. goto out;
  485. if (ring->dma64) {
  486. u64 ringbase = (u64)(ring->dmabase);
  487. addrext = ((ringbase >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
  488. value = (ring->frameoffset << BCM43xx_DMA64_RXFROFF_SHIFT);
  489. value |= BCM43xx_DMA64_RXENABLE;
  490. value |= (addrext << BCM43xx_DMA64_RXADDREXT_SHIFT)
  491. & BCM43xx_DMA64_RXADDREXT_MASK;
  492. bcm43xx_dma_write(ring, BCM43xx_DMA64_RXCTL, value);
  493. bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO,
  494. (ringbase & 0xFFFFFFFF));
  495. bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI,
  496. ((ringbase >> 32) & ~BCM43xx_DMA64_ROUTING)
  497. | ring->routing);
  498. bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX, 200);
  499. } else {
  500. u32 ringbase = (u32)(ring->dmabase);
  501. addrext = (ringbase >> BCM43xx_DMA32_ROUTING_SHIFT);
  502. value = (ring->frameoffset << BCM43xx_DMA32_RXFROFF_SHIFT);
  503. value |= BCM43xx_DMA32_RXENABLE;
  504. value |= (addrext << BCM43xx_DMA32_RXADDREXT_SHIFT)
  505. & BCM43xx_DMA32_RXADDREXT_MASK;
  506. bcm43xx_dma_write(ring, BCM43xx_DMA32_RXCTL, value);
  507. bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING,
  508. (ringbase & ~BCM43xx_DMA32_ROUTING)
  509. | ring->routing);
  510. bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX, 200);
  511. }
  512. }
  513. out:
  514. return err;
  515. }
  516. /* Shutdown the DMA controller. */
  517. static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring)
  518. {
  519. if (ring->tx) {
  520. bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base, ring->dma64);
  521. if (ring->dma64) {
  522. bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO, 0);
  523. bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI, 0);
  524. } else
  525. bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING, 0);
  526. } else {
  527. bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base, ring->dma64);
  528. if (ring->dma64) {
  529. bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO, 0);
  530. bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI, 0);
  531. } else
  532. bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING, 0);
  533. }
  534. }
  535. static void free_all_descbuffers(struct bcm43xx_dmaring *ring)
  536. {
  537. struct bcm43xx_dmadesc_generic *desc;
  538. struct bcm43xx_dmadesc_meta *meta;
  539. int i;
  540. if (!ring->used_slots)
  541. return;
  542. for (i = 0; i < ring->nr_slots; i++) {
  543. desc = bcm43xx_dma_idx2desc(ring, i, &meta);
  544. if (!meta->skb) {
  545. assert(ring->tx);
  546. continue;
  547. }
  548. if (ring->tx) {
  549. unmap_descbuffer(ring, meta->dmaaddr,
  550. meta->skb->len, 1);
  551. } else {
  552. unmap_descbuffer(ring, meta->dmaaddr,
  553. ring->rx_buffersize, 0);
  554. }
  555. free_descriptor_buffer(ring, meta, 0);
  556. }
  557. }
  558. /* Main initialization function. */
  559. static
  560. struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm,
  561. int controller_index,
  562. int for_tx,
  563. int dma64)
  564. {
  565. struct bcm43xx_dmaring *ring;
  566. int err;
  567. int nr_slots;
  568. ring = kzalloc(sizeof(*ring), GFP_KERNEL);
  569. if (!ring)
  570. goto out;
  571. nr_slots = BCM43xx_RXRING_SLOTS;
  572. if (for_tx)
  573. nr_slots = BCM43xx_TXRING_SLOTS;
  574. ring->meta = kcalloc(nr_slots, sizeof(struct bcm43xx_dmadesc_meta),
  575. GFP_KERNEL);
  576. if (!ring->meta)
  577. goto err_kfree_ring;
  578. ring->routing = BCM43xx_DMA32_CLIENTTRANS;
  579. if (dma64)
  580. ring->routing = BCM43xx_DMA64_CLIENTTRANS;
  581. ring->bcm = bcm;
  582. ring->nr_slots = nr_slots;
  583. ring->suspend_mark = ring->nr_slots * BCM43xx_TXSUSPEND_PERCENT / 100;
  584. ring->resume_mark = ring->nr_slots * BCM43xx_TXRESUME_PERCENT / 100;
  585. assert(ring->suspend_mark < ring->resume_mark);
  586. ring->mmio_base = bcm43xx_dmacontroller_base(dma64, controller_index);
  587. ring->index = controller_index;
  588. ring->dma64 = !!dma64;
  589. if (for_tx) {
  590. ring->tx = 1;
  591. ring->current_slot = -1;
  592. } else {
  593. if (ring->index == 0) {
  594. ring->rx_buffersize = BCM43xx_DMA0_RX_BUFFERSIZE;
  595. ring->frameoffset = BCM43xx_DMA0_RX_FRAMEOFFSET;
  596. } else if (ring->index == 3) {
  597. ring->rx_buffersize = BCM43xx_DMA3_RX_BUFFERSIZE;
  598. ring->frameoffset = BCM43xx_DMA3_RX_FRAMEOFFSET;
  599. } else
  600. assert(0);
  601. }
  602. err = alloc_ringmemory(ring);
  603. if (err)
  604. goto err_kfree_meta;
  605. err = dmacontroller_setup(ring);
  606. if (err)
  607. goto err_free_ringmemory;
  608. return ring;
  609. out:
  610. printk(KERN_ERR PFX "Error in bcm43xx_setup_dmaring\n");
  611. return ring;
  612. err_free_ringmemory:
  613. free_ringmemory(ring);
  614. err_kfree_meta:
  615. kfree(ring->meta);
  616. err_kfree_ring:
  617. kfree(ring);
  618. ring = NULL;
  619. goto out;
  620. }
  621. /* Main cleanup function. */
  622. static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring *ring)
  623. {
  624. if (!ring)
  625. return;
  626. dprintk(KERN_INFO PFX "DMA-%s 0x%04X (%s) max used slots: %d/%d\n",
  627. (ring->dma64) ? "64" : "32",
  628. ring->mmio_base,
  629. (ring->tx) ? "TX" : "RX",
  630. ring->max_used_slots, ring->nr_slots);
  631. /* Device IRQs are disabled prior entering this function,
  632. * so no need to take care of concurrency with rx handler stuff.
  633. */
  634. dmacontroller_cleanup(ring);
  635. free_all_descbuffers(ring);
  636. free_ringmemory(ring);
  637. kfree(ring->meta);
  638. kfree(ring);
  639. }
  640. void bcm43xx_dma_free(struct bcm43xx_private *bcm)
  641. {
  642. struct bcm43xx_dma *dma;
  643. if (bcm43xx_using_pio(bcm))
  644. return;
  645. dma = bcm43xx_current_dma(bcm);
  646. bcm43xx_destroy_dmaring(dma->rx_ring3);
  647. dma->rx_ring3 = NULL;
  648. bcm43xx_destroy_dmaring(dma->rx_ring0);
  649. dma->rx_ring0 = NULL;
  650. bcm43xx_destroy_dmaring(dma->tx_ring5);
  651. dma->tx_ring5 = NULL;
  652. bcm43xx_destroy_dmaring(dma->tx_ring4);
  653. dma->tx_ring4 = NULL;
  654. bcm43xx_destroy_dmaring(dma->tx_ring3);
  655. dma->tx_ring3 = NULL;
  656. bcm43xx_destroy_dmaring(dma->tx_ring2);
  657. dma->tx_ring2 = NULL;
  658. bcm43xx_destroy_dmaring(dma->tx_ring1);
  659. dma->tx_ring1 = NULL;
  660. bcm43xx_destroy_dmaring(dma->tx_ring0);
  661. dma->tx_ring0 = NULL;
  662. }
  663. int bcm43xx_dma_init(struct bcm43xx_private *bcm)
  664. {
  665. struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm);
  666. struct bcm43xx_dmaring *ring;
  667. int err = -ENOMEM;
  668. int dma64 = 0;
  669. bcm->dma_mask = bcm43xx_get_supported_dma_mask(bcm);
  670. if (bcm->dma_mask == DMA_64BIT_MASK)
  671. dma64 = 1;
  672. err = pci_set_dma_mask(bcm->pci_dev, bcm->dma_mask);
  673. if (err)
  674. goto no_dma;
  675. err = pci_set_consistent_dma_mask(bcm->pci_dev, bcm->dma_mask);
  676. if (err)
  677. goto no_dma;
  678. /* setup TX DMA channels. */
  679. ring = bcm43xx_setup_dmaring(bcm, 0, 1, dma64);
  680. if (!ring)
  681. goto out;
  682. dma->tx_ring0 = ring;
  683. ring = bcm43xx_setup_dmaring(bcm, 1, 1, dma64);
  684. if (!ring)
  685. goto err_destroy_tx0;
  686. dma->tx_ring1 = ring;
  687. ring = bcm43xx_setup_dmaring(bcm, 2, 1, dma64);
  688. if (!ring)
  689. goto err_destroy_tx1;
  690. dma->tx_ring2 = ring;
  691. ring = bcm43xx_setup_dmaring(bcm, 3, 1, dma64);
  692. if (!ring)
  693. goto err_destroy_tx2;
  694. dma->tx_ring3 = ring;
  695. ring = bcm43xx_setup_dmaring(bcm, 4, 1, dma64);
  696. if (!ring)
  697. goto err_destroy_tx3;
  698. dma->tx_ring4 = ring;
  699. ring = bcm43xx_setup_dmaring(bcm, 5, 1, dma64);
  700. if (!ring)
  701. goto err_destroy_tx4;
  702. dma->tx_ring5 = ring;
  703. /* setup RX DMA channels. */
  704. ring = bcm43xx_setup_dmaring(bcm, 0, 0, dma64);
  705. if (!ring)
  706. goto err_destroy_tx5;
  707. dma->rx_ring0 = ring;
  708. if (bcm->current_core->rev < 5) {
  709. ring = bcm43xx_setup_dmaring(bcm, 3, 0, dma64);
  710. if (!ring)
  711. goto err_destroy_rx0;
  712. dma->rx_ring3 = ring;
  713. }
  714. dprintk(KERN_INFO PFX "%d-bit DMA initialized\n",
  715. (bcm->dma_mask == DMA_64BIT_MASK) ? 64 :
  716. (bcm->dma_mask == DMA_32BIT_MASK) ? 32 : 30);
  717. err = 0;
  718. out:
  719. return err;
  720. err_destroy_rx0:
  721. bcm43xx_destroy_dmaring(dma->rx_ring0);
  722. dma->rx_ring0 = NULL;
  723. err_destroy_tx5:
  724. bcm43xx_destroy_dmaring(dma->tx_ring5);
  725. dma->tx_ring5 = NULL;
  726. err_destroy_tx4:
  727. bcm43xx_destroy_dmaring(dma->tx_ring4);
  728. dma->tx_ring4 = NULL;
  729. err_destroy_tx3:
  730. bcm43xx_destroy_dmaring(dma->tx_ring3);
  731. dma->tx_ring3 = NULL;
  732. err_destroy_tx2:
  733. bcm43xx_destroy_dmaring(dma->tx_ring2);
  734. dma->tx_ring2 = NULL;
  735. err_destroy_tx1:
  736. bcm43xx_destroy_dmaring(dma->tx_ring1);
  737. dma->tx_ring1 = NULL;
  738. err_destroy_tx0:
  739. bcm43xx_destroy_dmaring(dma->tx_ring0);
  740. dma->tx_ring0 = NULL;
  741. no_dma:
  742. #ifdef CONFIG_BCM43XX_PIO
  743. printk(KERN_WARNING PFX "DMA not supported on this device."
  744. " Falling back to PIO.\n");
  745. bcm->__using_pio = 1;
  746. return -ENOSYS;
  747. #else
  748. printk(KERN_ERR PFX "FATAL: DMA not supported and PIO not configured. "
  749. "Please recompile the driver with PIO support.\n");
  750. return -ENODEV;
  751. #endif /* CONFIG_BCM43XX_PIO */
  752. }
  753. /* Generate a cookie for the TX header. */
  754. static u16 generate_cookie(struct bcm43xx_dmaring *ring,
  755. int slot)
  756. {
  757. u16 cookie = 0x1000;
  758. /* Use the upper 4 bits of the cookie as
  759. * DMA controller ID and store the slot number
  760. * in the lower 12 bits.
  761. * Note that the cookie must never be 0, as this
  762. * is a special value used in RX path.
  763. */
  764. switch (ring->index) {
  765. case 0:
  766. cookie = 0xA000;
  767. break;
  768. case 1:
  769. cookie = 0xB000;
  770. break;
  771. case 2:
  772. cookie = 0xC000;
  773. break;
  774. case 3:
  775. cookie = 0xD000;
  776. break;
  777. case 4:
  778. cookie = 0xE000;
  779. break;
  780. case 5:
  781. cookie = 0xF000;
  782. break;
  783. }
  784. assert(((u16)slot & 0xF000) == 0x0000);
  785. cookie |= (u16)slot;
  786. return cookie;
  787. }
  788. /* Inspect a cookie and find out to which controller/slot it belongs. */
  789. static
  790. struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
  791. u16 cookie, int *slot)
  792. {
  793. struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm);
  794. struct bcm43xx_dmaring *ring = NULL;
  795. switch (cookie & 0xF000) {
  796. case 0xA000:
  797. ring = dma->tx_ring0;
  798. break;
  799. case 0xB000:
  800. ring = dma->tx_ring1;
  801. break;
  802. case 0xC000:
  803. ring = dma->tx_ring2;
  804. break;
  805. case 0xD000:
  806. ring = dma->tx_ring3;
  807. break;
  808. case 0xE000:
  809. ring = dma->tx_ring4;
  810. break;
  811. case 0xF000:
  812. ring = dma->tx_ring5;
  813. break;
  814. default:
  815. assert(0);
  816. }
  817. *slot = (cookie & 0x0FFF);
  818. assert(*slot >= 0 && *slot < ring->nr_slots);
  819. return ring;
  820. }
  821. static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
  822. int slot)
  823. {
  824. u16 offset;
  825. int descsize;
  826. /* Everything is ready to start. Buffers are DMA mapped and
  827. * associated with slots.
  828. * "slot" is the last slot of the new frame we want to transmit.
  829. * Close your seat belts now, please.
  830. */
  831. wmb();
  832. slot = next_slot(ring, slot);
  833. offset = (ring->dma64) ? BCM43xx_DMA64_TXINDEX : BCM43xx_DMA32_TXINDEX;
  834. descsize = (ring->dma64) ? sizeof(struct bcm43xx_dmadesc64)
  835. : sizeof(struct bcm43xx_dmadesc32);
  836. bcm43xx_dma_write(ring, offset,
  837. (u32)(slot * descsize));
  838. }
  839. static void dma_tx_fragment(struct bcm43xx_dmaring *ring,
  840. struct sk_buff *skb,
  841. u8 cur_frag)
  842. {
  843. int slot;
  844. struct bcm43xx_dmadesc_generic *desc;
  845. struct bcm43xx_dmadesc_meta *meta;
  846. dma_addr_t dmaaddr;
  847. struct sk_buff *bounce_skb;
  848. assert(skb_shinfo(skb)->nr_frags == 0);
  849. slot = request_slot(ring);
  850. desc = bcm43xx_dma_idx2desc(ring, slot, &meta);
  851. /* Add a device specific TX header. */
  852. assert(skb_headroom(skb) >= sizeof(struct bcm43xx_txhdr));
  853. /* Reserve enough headroom for the device tx header. */
  854. __skb_push(skb, sizeof(struct bcm43xx_txhdr));
  855. /* Now calculate and add the tx header.
  856. * The tx header includes the PLCP header.
  857. */
  858. bcm43xx_generate_txhdr(ring->bcm,
  859. (struct bcm43xx_txhdr *)skb->data,
  860. skb->data + sizeof(struct bcm43xx_txhdr),
  861. skb->len - sizeof(struct bcm43xx_txhdr),
  862. (cur_frag == 0),
  863. generate_cookie(ring, slot));
  864. dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
  865. if (dma_mapping_error(dmaaddr) || dmaaddr + skb->len > ring->bcm->dma_mask) {
  866. /* chip cannot handle DMA to/from > 1GB, use bounce buffer (copied from b44 driver) */
  867. if (!dma_mapping_error(dmaaddr))
  868. unmap_descbuffer(ring, dmaaddr, skb->len, 1);
  869. bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC|GFP_DMA);
  870. if (!bounce_skb)
  871. return;
  872. dmaaddr = map_descbuffer(ring, bounce_skb->data, bounce_skb->len, 1);
  873. if (dma_mapping_error(dmaaddr) || dmaaddr + skb->len > ring->bcm->dma_mask) {
  874. if (!dma_mapping_error(dmaaddr))
  875. unmap_descbuffer(ring, dmaaddr, skb->len, 1);
  876. dev_kfree_skb_any(bounce_skb);
  877. assert(0);
  878. return;
  879. }
  880. skb_copy_from_linear_data(skb, skb_put(bounce_skb, skb->len),
  881. skb->len);
  882. dev_kfree_skb_any(skb);
  883. skb = bounce_skb;
  884. }
  885. meta->skb = skb;
  886. meta->dmaaddr = dmaaddr;
  887. fill_descriptor(ring, desc, dmaaddr,
  888. skb->len, 1, 1, 1);
  889. /* Now transfer the whole frame. */
  890. dmacontroller_poke_tx(ring, slot);
  891. }
  892. int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
  893. struct ieee80211_txb *txb)
  894. {
  895. /* We just received a packet from the kernel network subsystem.
  896. * Add headers and DMA map the memory. Poke
  897. * the device to send the stuff.
  898. * Note that this is called from atomic context.
  899. */
  900. struct bcm43xx_dmaring *ring = bcm43xx_current_dma(bcm)->tx_ring1;
  901. u8 i;
  902. struct sk_buff *skb;
  903. assert(ring->tx);
  904. if (unlikely(free_slots(ring) < txb->nr_frags)) {
  905. /* The queue should be stopped,
  906. * if we are low on free slots.
  907. * If this ever triggers, we have to lower the suspend_mark.
  908. */
  909. dprintkl(KERN_ERR PFX "Out of DMA descriptor slots!\n");
  910. return -ENOMEM;
  911. }
  912. for (i = 0; i < txb->nr_frags; i++) {
  913. skb = txb->fragments[i];
  914. /* Take skb from ieee80211_txb_free */
  915. txb->fragments[i] = NULL;
  916. dma_tx_fragment(ring, skb, i);
  917. }
  918. ieee80211_txb_free(txb);
  919. return 0;
  920. }
  921. void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
  922. struct bcm43xx_xmitstatus *status)
  923. {
  924. struct bcm43xx_dmaring *ring;
  925. struct bcm43xx_dmadesc_generic *desc;
  926. struct bcm43xx_dmadesc_meta *meta;
  927. int is_last_fragment;
  928. int slot;
  929. u32 tmp;
  930. ring = parse_cookie(bcm, status->cookie, &slot);
  931. assert(ring);
  932. assert(ring->tx);
  933. while (1) {
  934. assert(slot >= 0 && slot < ring->nr_slots);
  935. desc = bcm43xx_dma_idx2desc(ring, slot, &meta);
  936. if (ring->dma64) {
  937. tmp = le32_to_cpu(desc->dma64.control0);
  938. is_last_fragment = !!(tmp & BCM43xx_DMA64_DCTL0_FRAMEEND);
  939. } else {
  940. tmp = le32_to_cpu(desc->dma32.control);
  941. is_last_fragment = !!(tmp & BCM43xx_DMA32_DCTL_FRAMEEND);
  942. }
  943. unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
  944. free_descriptor_buffer(ring, meta, 1);
  945. /* Everything belonging to the slot is unmapped
  946. * and freed, so we can return it.
  947. */
  948. return_slot(ring, slot);
  949. if (is_last_fragment)
  950. break;
  951. slot = next_slot(ring, slot);
  952. }
  953. bcm->stats.last_tx = jiffies;
  954. }
  955. static void dma_rx(struct bcm43xx_dmaring *ring,
  956. int *slot)
  957. {
  958. struct bcm43xx_dmadesc_generic *desc;
  959. struct bcm43xx_dmadesc_meta *meta;
  960. struct bcm43xx_rxhdr *rxhdr;
  961. struct sk_buff *skb;
  962. u16 len;
  963. int err;
  964. dma_addr_t dmaaddr;
  965. desc = bcm43xx_dma_idx2desc(ring, *slot, &meta);
  966. sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
  967. skb = meta->skb;
  968. if (ring->index == 3) {
  969. /* We received an xmit status. */
  970. struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data;
  971. struct bcm43xx_xmitstatus stat;
  972. int i = 0;
  973. stat.cookie = le16_to_cpu(hw->cookie);
  974. while (stat.cookie == 0) {
  975. if (unlikely(++i >= 10000)) {
  976. assert(0);
  977. break;
  978. }
  979. udelay(2);
  980. barrier();
  981. stat.cookie = le16_to_cpu(hw->cookie);
  982. }
  983. stat.flags = hw->flags;
  984. stat.cnt1 = hw->cnt1;
  985. stat.cnt2 = hw->cnt2;
  986. stat.seq = le16_to_cpu(hw->seq);
  987. stat.unknown = le16_to_cpu(hw->unknown);
  988. bcm43xx_debugfs_log_txstat(ring->bcm, &stat);
  989. bcm43xx_dma_handle_xmitstatus(ring->bcm, &stat);
  990. /* recycle the descriptor buffer. */
  991. sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize);
  992. return;
  993. }
  994. rxhdr = (struct bcm43xx_rxhdr *)skb->data;
  995. len = le16_to_cpu(rxhdr->frame_length);
  996. if (len == 0) {
  997. int i = 0;
  998. do {
  999. udelay(2);
  1000. barrier();
  1001. len = le16_to_cpu(rxhdr->frame_length);
  1002. } while (len == 0 && i++ < 5);
  1003. if (unlikely(len == 0)) {
  1004. /* recycle the descriptor buffer. */
  1005. sync_descbuffer_for_device(ring, meta->dmaaddr,
  1006. ring->rx_buffersize);
  1007. goto drop;
  1008. }
  1009. }
  1010. if (unlikely(len > ring->rx_buffersize)) {
  1011. /* The data did not fit into one descriptor buffer
  1012. * and is split over multiple buffers.
  1013. * This should never happen, as we try to allocate buffers
  1014. * big enough. So simply ignore this packet.
  1015. */
  1016. int cnt = 0;
  1017. s32 tmp = len;
  1018. while (1) {
  1019. desc = bcm43xx_dma_idx2desc(ring, *slot, &meta);
  1020. /* recycle the descriptor buffer. */
  1021. sync_descbuffer_for_device(ring, meta->dmaaddr,
  1022. ring->rx_buffersize);
  1023. *slot = next_slot(ring, *slot);
  1024. cnt++;
  1025. tmp -= ring->rx_buffersize;
  1026. if (tmp <= 0)
  1027. break;
  1028. }
  1029. printkl(KERN_ERR PFX "DMA RX buffer too small "
  1030. "(len: %u, buffer: %u, nr-dropped: %d)\n",
  1031. len, ring->rx_buffersize, cnt);
  1032. goto drop;
  1033. }
  1034. len -= IEEE80211_FCS_LEN;
  1035. dmaaddr = meta->dmaaddr;
  1036. err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
  1037. if (unlikely(err)) {
  1038. dprintkl(KERN_ERR PFX "DMA RX: setup_rx_descbuffer() failed\n");
  1039. sync_descbuffer_for_device(ring, dmaaddr,
  1040. ring->rx_buffersize);
  1041. goto drop;
  1042. }
  1043. unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
  1044. skb_put(skb, len + ring->frameoffset);
  1045. skb_pull(skb, ring->frameoffset);
  1046. err = bcm43xx_rx(ring->bcm, skb, rxhdr);
  1047. if (err) {
  1048. dev_kfree_skb_irq(skb);
  1049. goto drop;
  1050. }
  1051. drop:
  1052. return;
  1053. }
  1054. void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
  1055. {
  1056. u32 status;
  1057. u16 descptr;
  1058. int slot, current_slot;
  1059. #ifdef CONFIG_BCM43XX_DEBUG
  1060. int used_slots = 0;
  1061. #endif
  1062. assert(!ring->tx);
  1063. if (ring->dma64) {
  1064. status = bcm43xx_dma_read(ring, BCM43xx_DMA64_RXSTATUS);
  1065. descptr = (status & BCM43xx_DMA64_RXSTATDPTR);
  1066. current_slot = descptr / sizeof(struct bcm43xx_dmadesc64);
  1067. } else {
  1068. status = bcm43xx_dma_read(ring, BCM43xx_DMA32_RXSTATUS);
  1069. descptr = (status & BCM43xx_DMA32_RXDPTR);
  1070. current_slot = descptr / sizeof(struct bcm43xx_dmadesc32);
  1071. }
  1072. assert(current_slot >= 0 && current_slot < ring->nr_slots);
  1073. slot = ring->current_slot;
  1074. for ( ; slot != current_slot; slot = next_slot(ring, slot)) {
  1075. dma_rx(ring, &slot);
  1076. #ifdef CONFIG_BCM43XX_DEBUG
  1077. if (++used_slots > ring->max_used_slots)
  1078. ring->max_used_slots = used_slots;
  1079. #endif
  1080. }
  1081. if (ring->dma64) {
  1082. bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX,
  1083. (u32)(slot * sizeof(struct bcm43xx_dmadesc64)));
  1084. } else {
  1085. bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX,
  1086. (u32)(slot * sizeof(struct bcm43xx_dmadesc32)));
  1087. }
  1088. ring->current_slot = slot;
  1089. }
  1090. void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring)
  1091. {
  1092. assert(ring->tx);
  1093. bcm43xx_power_saving_ctl_bits(ring->bcm, -1, 1);
  1094. if (ring->dma64) {
  1095. bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
  1096. bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
  1097. | BCM43xx_DMA64_TXSUSPEND);
  1098. } else {
  1099. bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
  1100. bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
  1101. | BCM43xx_DMA32_TXSUSPEND);
  1102. }
  1103. }
  1104. void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring)
  1105. {
  1106. assert(ring->tx);
  1107. if (ring->dma64) {
  1108. bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
  1109. bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
  1110. & ~BCM43xx_DMA64_TXSUSPEND);
  1111. } else {
  1112. bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
  1113. bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
  1114. & ~BCM43xx_DMA32_TXSUSPEND);
  1115. }
  1116. bcm43xx_power_saving_ctl_bits(ring->bcm, -1, -1);
  1117. }