dma.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615
  1. /*
  2. Broadcom B43 wireless driver
  3. DMA ringbuffer and descriptor allocation/management
  4. Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
  5. Some code in this file is derived from the b44.c driver
  6. Copyright (C) 2002 David S. Miller
  7. Copyright (C) Pekka Pietikainen
  8. This program is free software; you can redistribute it and/or modify
  9. it under the terms of the GNU General Public License as published by
  10. the Free Software Foundation; either version 2 of the License, or
  11. (at your option) any later version.
  12. This program is distributed in the hope that it will be useful,
  13. but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. GNU General Public License for more details.
  16. You should have received a copy of the GNU General Public License
  17. along with this program; see the file COPYING. If not, write to
  18. the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
  19. Boston, MA 02110-1301, USA.
  20. */
  21. #include "b43.h"
  22. #include "dma.h"
  23. #include "main.h"
  24. #include "debugfs.h"
  25. #include "xmit.h"
  26. #include <linux/dma-mapping.h>
  27. #include <linux/pci.h>
  28. #include <linux/delay.h>
  29. #include <linux/skbuff.h>
  30. #include <linux/etherdevice.h>
  31. #include <asm/div64.h>
  32. /* 32bit DMA ops. */
  33. static
  34. struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring,
  35. int slot,
  36. struct b43_dmadesc_meta **meta)
  37. {
  38. struct b43_dmadesc32 *desc;
  39. *meta = &(ring->meta[slot]);
  40. desc = ring->descbase;
  41. desc = &(desc[slot]);
  42. return (struct b43_dmadesc_generic *)desc;
  43. }
  44. static void op32_fill_descriptor(struct b43_dmaring *ring,
  45. struct b43_dmadesc_generic *desc,
  46. dma_addr_t dmaaddr, u16 bufsize,
  47. int start, int end, int irq)
  48. {
  49. struct b43_dmadesc32 *descbase = ring->descbase;
  50. int slot;
  51. u32 ctl;
  52. u32 addr;
  53. u32 addrext;
  54. slot = (int)(&(desc->dma32) - descbase);
  55. B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
  56. addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
  57. addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK)
  58. >> SSB_DMA_TRANSLATION_SHIFT;
  59. addr |= ssb_dma_translation(ring->dev->dev);
  60. ctl = (bufsize - ring->frameoffset)
  61. & B43_DMA32_DCTL_BYTECNT;
  62. if (slot == ring->nr_slots - 1)
  63. ctl |= B43_DMA32_DCTL_DTABLEEND;
  64. if (start)
  65. ctl |= B43_DMA32_DCTL_FRAMESTART;
  66. if (end)
  67. ctl |= B43_DMA32_DCTL_FRAMEEND;
  68. if (irq)
  69. ctl |= B43_DMA32_DCTL_IRQ;
  70. ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT)
  71. & B43_DMA32_DCTL_ADDREXT_MASK;
  72. desc->dma32.control = cpu_to_le32(ctl);
  73. desc->dma32.address = cpu_to_le32(addr);
  74. }
  75. static void op32_poke_tx(struct b43_dmaring *ring, int slot)
  76. {
  77. b43_dma_write(ring, B43_DMA32_TXINDEX,
  78. (u32) (slot * sizeof(struct b43_dmadesc32)));
  79. }
  80. static void op32_tx_suspend(struct b43_dmaring *ring)
  81. {
  82. b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
  83. | B43_DMA32_TXSUSPEND);
  84. }
  85. static void op32_tx_resume(struct b43_dmaring *ring)
  86. {
  87. b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
  88. & ~B43_DMA32_TXSUSPEND);
  89. }
  90. static int op32_get_current_rxslot(struct b43_dmaring *ring)
  91. {
  92. u32 val;
  93. val = b43_dma_read(ring, B43_DMA32_RXSTATUS);
  94. val &= B43_DMA32_RXDPTR;
  95. return (val / sizeof(struct b43_dmadesc32));
  96. }
  97. static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot)
  98. {
  99. b43_dma_write(ring, B43_DMA32_RXINDEX,
  100. (u32) (slot * sizeof(struct b43_dmadesc32)));
  101. }
  102. static const struct b43_dma_ops dma32_ops = {
  103. .idx2desc = op32_idx2desc,
  104. .fill_descriptor = op32_fill_descriptor,
  105. .poke_tx = op32_poke_tx,
  106. .tx_suspend = op32_tx_suspend,
  107. .tx_resume = op32_tx_resume,
  108. .get_current_rxslot = op32_get_current_rxslot,
  109. .set_current_rxslot = op32_set_current_rxslot,
  110. };
  111. /* 64bit DMA ops. */
  112. static
  113. struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring,
  114. int slot,
  115. struct b43_dmadesc_meta **meta)
  116. {
  117. struct b43_dmadesc64 *desc;
  118. *meta = &(ring->meta[slot]);
  119. desc = ring->descbase;
  120. desc = &(desc[slot]);
  121. return (struct b43_dmadesc_generic *)desc;
  122. }
  123. static void op64_fill_descriptor(struct b43_dmaring *ring,
  124. struct b43_dmadesc_generic *desc,
  125. dma_addr_t dmaaddr, u16 bufsize,
  126. int start, int end, int irq)
  127. {
  128. struct b43_dmadesc64 *descbase = ring->descbase;
  129. int slot;
  130. u32 ctl0 = 0, ctl1 = 0;
  131. u32 addrlo, addrhi;
  132. u32 addrext;
  133. slot = (int)(&(desc->dma64) - descbase);
  134. B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
  135. addrlo = (u32) (dmaaddr & 0xFFFFFFFF);
  136. addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
  137. addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
  138. >> SSB_DMA_TRANSLATION_SHIFT;
  139. addrhi |= (ssb_dma_translation(ring->dev->dev) << 1);
  140. if (slot == ring->nr_slots - 1)
  141. ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
  142. if (start)
  143. ctl0 |= B43_DMA64_DCTL0_FRAMESTART;
  144. if (end)
  145. ctl0 |= B43_DMA64_DCTL0_FRAMEEND;
  146. if (irq)
  147. ctl0 |= B43_DMA64_DCTL0_IRQ;
  148. ctl1 |= (bufsize - ring->frameoffset)
  149. & B43_DMA64_DCTL1_BYTECNT;
  150. ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT)
  151. & B43_DMA64_DCTL1_ADDREXT_MASK;
  152. desc->dma64.control0 = cpu_to_le32(ctl0);
  153. desc->dma64.control1 = cpu_to_le32(ctl1);
  154. desc->dma64.address_low = cpu_to_le32(addrlo);
  155. desc->dma64.address_high = cpu_to_le32(addrhi);
  156. }
  157. static void op64_poke_tx(struct b43_dmaring *ring, int slot)
  158. {
  159. b43_dma_write(ring, B43_DMA64_TXINDEX,
  160. (u32) (slot * sizeof(struct b43_dmadesc64)));
  161. }
  162. static void op64_tx_suspend(struct b43_dmaring *ring)
  163. {
  164. b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
  165. | B43_DMA64_TXSUSPEND);
  166. }
  167. static void op64_tx_resume(struct b43_dmaring *ring)
  168. {
  169. b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
  170. & ~B43_DMA64_TXSUSPEND);
  171. }
  172. static int op64_get_current_rxslot(struct b43_dmaring *ring)
  173. {
  174. u32 val;
  175. val = b43_dma_read(ring, B43_DMA64_RXSTATUS);
  176. val &= B43_DMA64_RXSTATDPTR;
  177. return (val / sizeof(struct b43_dmadesc64));
  178. }
  179. static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot)
  180. {
  181. b43_dma_write(ring, B43_DMA64_RXINDEX,
  182. (u32) (slot * sizeof(struct b43_dmadesc64)));
  183. }
  184. static const struct b43_dma_ops dma64_ops = {
  185. .idx2desc = op64_idx2desc,
  186. .fill_descriptor = op64_fill_descriptor,
  187. .poke_tx = op64_poke_tx,
  188. .tx_suspend = op64_tx_suspend,
  189. .tx_resume = op64_tx_resume,
  190. .get_current_rxslot = op64_get_current_rxslot,
  191. .set_current_rxslot = op64_set_current_rxslot,
  192. };
  193. static inline int free_slots(struct b43_dmaring *ring)
  194. {
  195. return (ring->nr_slots - ring->used_slots);
  196. }
  197. static inline int next_slot(struct b43_dmaring *ring, int slot)
  198. {
  199. B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
  200. if (slot == ring->nr_slots - 1)
  201. return 0;
  202. return slot + 1;
  203. }
  204. static inline int prev_slot(struct b43_dmaring *ring, int slot)
  205. {
  206. B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
  207. if (slot == 0)
  208. return ring->nr_slots - 1;
  209. return slot - 1;
  210. }
  211. #ifdef CONFIG_B43_DEBUG
  212. static void update_max_used_slots(struct b43_dmaring *ring,
  213. int current_used_slots)
  214. {
  215. if (current_used_slots <= ring->max_used_slots)
  216. return;
  217. ring->max_used_slots = current_used_slots;
  218. if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) {
  219. b43dbg(ring->dev->wl,
  220. "max_used_slots increased to %d on %s ring %d\n",
  221. ring->max_used_slots,
  222. ring->tx ? "TX" : "RX", ring->index);
  223. }
  224. }
  225. #else
  226. static inline
  227. void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots)
  228. {
  229. }
  230. #endif /* DEBUG */
  231. /* Request a slot for usage. */
  232. static inline int request_slot(struct b43_dmaring *ring)
  233. {
  234. int slot;
  235. B43_WARN_ON(!ring->tx);
  236. B43_WARN_ON(ring->stopped);
  237. B43_WARN_ON(free_slots(ring) == 0);
  238. slot = next_slot(ring, ring->current_slot);
  239. ring->current_slot = slot;
  240. ring->used_slots++;
  241. update_max_used_slots(ring, ring->used_slots);
  242. return slot;
  243. }
  244. static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
  245. {
  246. static const u16 map64[] = {
  247. B43_MMIO_DMA64_BASE0,
  248. B43_MMIO_DMA64_BASE1,
  249. B43_MMIO_DMA64_BASE2,
  250. B43_MMIO_DMA64_BASE3,
  251. B43_MMIO_DMA64_BASE4,
  252. B43_MMIO_DMA64_BASE5,
  253. };
  254. static const u16 map32[] = {
  255. B43_MMIO_DMA32_BASE0,
  256. B43_MMIO_DMA32_BASE1,
  257. B43_MMIO_DMA32_BASE2,
  258. B43_MMIO_DMA32_BASE3,
  259. B43_MMIO_DMA32_BASE4,
  260. B43_MMIO_DMA32_BASE5,
  261. };
  262. if (type == B43_DMA_64BIT) {
  263. B43_WARN_ON(!(controller_idx >= 0 &&
  264. controller_idx < ARRAY_SIZE(map64)));
  265. return map64[controller_idx];
  266. }
  267. B43_WARN_ON(!(controller_idx >= 0 &&
  268. controller_idx < ARRAY_SIZE(map32)));
  269. return map32[controller_idx];
  270. }
  271. static inline
  272. dma_addr_t map_descbuffer(struct b43_dmaring *ring,
  273. unsigned char *buf, size_t len, int tx)
  274. {
  275. dma_addr_t dmaaddr;
  276. if (tx) {
  277. dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
  278. buf, len, DMA_TO_DEVICE);
  279. } else {
  280. dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
  281. buf, len, DMA_FROM_DEVICE);
  282. }
  283. return dmaaddr;
  284. }
  285. static inline
  286. void unmap_descbuffer(struct b43_dmaring *ring,
  287. dma_addr_t addr, size_t len, int tx)
  288. {
  289. if (tx) {
  290. dma_unmap_single(ring->dev->dev->dma_dev,
  291. addr, len, DMA_TO_DEVICE);
  292. } else {
  293. dma_unmap_single(ring->dev->dev->dma_dev,
  294. addr, len, DMA_FROM_DEVICE);
  295. }
  296. }
  297. static inline
  298. void sync_descbuffer_for_cpu(struct b43_dmaring *ring,
  299. dma_addr_t addr, size_t len)
  300. {
  301. B43_WARN_ON(ring->tx);
  302. dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
  303. addr, len, DMA_FROM_DEVICE);
  304. }
  305. static inline
  306. void sync_descbuffer_for_device(struct b43_dmaring *ring,
  307. dma_addr_t addr, size_t len)
  308. {
  309. B43_WARN_ON(ring->tx);
  310. dma_sync_single_for_device(ring->dev->dev->dma_dev,
  311. addr, len, DMA_FROM_DEVICE);
  312. }
  313. static inline
  314. void free_descriptor_buffer(struct b43_dmaring *ring,
  315. struct b43_dmadesc_meta *meta)
  316. {
  317. if (meta->skb) {
  318. dev_kfree_skb_any(meta->skb);
  319. meta->skb = NULL;
  320. }
  321. }
  322. static int alloc_ringmemory(struct b43_dmaring *ring)
  323. {
  324. struct device *dma_dev = ring->dev->dev->dma_dev;
  325. gfp_t flags = GFP_KERNEL;
  326. /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
  327. * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
  328. * has shown that 4K is sufficient for the latter as long as the buffer
  329. * does not cross an 8K boundary.
  330. *
  331. * For unknown reasons - possibly a hardware error - the BCM4311 rev
  332. * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
  333. * which accounts for the GFP_DMA flag below.
  334. */
  335. if (ring->type == B43_DMA_64BIT)
  336. flags |= GFP_DMA;
  337. ring->descbase = dma_alloc_coherent(dma_dev, B43_DMA_RINGMEMSIZE,
  338. &(ring->dmabase), flags);
  339. if (!ring->descbase) {
  340. b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
  341. return -ENOMEM;
  342. }
  343. memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE);
  344. return 0;
  345. }
  346. static void free_ringmemory(struct b43_dmaring *ring)
  347. {
  348. struct device *dma_dev = ring->dev->dev->dma_dev;
  349. dma_free_coherent(dma_dev, B43_DMA_RINGMEMSIZE,
  350. ring->descbase, ring->dmabase);
  351. }
  352. /* Reset the RX DMA channel */
  353. static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base,
  354. enum b43_dmatype type)
  355. {
  356. int i;
  357. u32 value;
  358. u16 offset;
  359. might_sleep();
  360. offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
  361. b43_write32(dev, mmio_base + offset, 0);
  362. for (i = 0; i < 10; i++) {
  363. offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS :
  364. B43_DMA32_RXSTATUS;
  365. value = b43_read32(dev, mmio_base + offset);
  366. if (type == B43_DMA_64BIT) {
  367. value &= B43_DMA64_RXSTAT;
  368. if (value == B43_DMA64_RXSTAT_DISABLED) {
  369. i = -1;
  370. break;
  371. }
  372. } else {
  373. value &= B43_DMA32_RXSTATE;
  374. if (value == B43_DMA32_RXSTAT_DISABLED) {
  375. i = -1;
  376. break;
  377. }
  378. }
  379. msleep(1);
  380. }
  381. if (i != -1) {
  382. b43err(dev->wl, "DMA RX reset timed out\n");
  383. return -ENODEV;
  384. }
  385. return 0;
  386. }
  387. /* Reset the TX DMA channel */
  388. static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
  389. enum b43_dmatype type)
  390. {
  391. int i;
  392. u32 value;
  393. u16 offset;
  394. might_sleep();
  395. for (i = 0; i < 10; i++) {
  396. offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
  397. B43_DMA32_TXSTATUS;
  398. value = b43_read32(dev, mmio_base + offset);
  399. if (type == B43_DMA_64BIT) {
  400. value &= B43_DMA64_TXSTAT;
  401. if (value == B43_DMA64_TXSTAT_DISABLED ||
  402. value == B43_DMA64_TXSTAT_IDLEWAIT ||
  403. value == B43_DMA64_TXSTAT_STOPPED)
  404. break;
  405. } else {
  406. value &= B43_DMA32_TXSTATE;
  407. if (value == B43_DMA32_TXSTAT_DISABLED ||
  408. value == B43_DMA32_TXSTAT_IDLEWAIT ||
  409. value == B43_DMA32_TXSTAT_STOPPED)
  410. break;
  411. }
  412. msleep(1);
  413. }
  414. offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
  415. b43_write32(dev, mmio_base + offset, 0);
  416. for (i = 0; i < 10; i++) {
  417. offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
  418. B43_DMA32_TXSTATUS;
  419. value = b43_read32(dev, mmio_base + offset);
  420. if (type == B43_DMA_64BIT) {
  421. value &= B43_DMA64_TXSTAT;
  422. if (value == B43_DMA64_TXSTAT_DISABLED) {
  423. i = -1;
  424. break;
  425. }
  426. } else {
  427. value &= B43_DMA32_TXSTATE;
  428. if (value == B43_DMA32_TXSTAT_DISABLED) {
  429. i = -1;
  430. break;
  431. }
  432. }
  433. msleep(1);
  434. }
  435. if (i != -1) {
  436. b43err(dev->wl, "DMA TX reset timed out\n");
  437. return -ENODEV;
  438. }
  439. /* ensure the reset is completed. */
  440. msleep(1);
  441. return 0;
  442. }
  443. /* Check if a DMA mapping address is invalid. */
  444. static bool b43_dma_mapping_error(struct b43_dmaring *ring,
  445. dma_addr_t addr,
  446. size_t buffersize, bool dma_to_device)
  447. {
  448. if (unlikely(dma_mapping_error(addr)))
  449. return 1;
  450. switch (ring->type) {
  451. case B43_DMA_30BIT:
  452. if ((u64)addr + buffersize > (1ULL << 30))
  453. goto address_error;
  454. break;
  455. case B43_DMA_32BIT:
  456. if ((u64)addr + buffersize > (1ULL << 32))
  457. goto address_error;
  458. break;
  459. case B43_DMA_64BIT:
  460. /* Currently we can't have addresses beyond
  461. * 64bit in the kernel. */
  462. break;
  463. }
  464. /* The address is OK. */
  465. return 0;
  466. address_error:
  467. /* We can't support this address. Unmap it again. */
  468. unmap_descbuffer(ring, addr, buffersize, dma_to_device);
  469. return 1;
  470. }
  471. static int setup_rx_descbuffer(struct b43_dmaring *ring,
  472. struct b43_dmadesc_generic *desc,
  473. struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
  474. {
  475. struct b43_rxhdr_fw4 *rxhdr;
  476. dma_addr_t dmaaddr;
  477. struct sk_buff *skb;
  478. B43_WARN_ON(ring->tx);
  479. skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
  480. if (unlikely(!skb))
  481. return -ENOMEM;
  482. dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
  483. if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
  484. /* ugh. try to realloc in zone_dma */
  485. gfp_flags |= GFP_DMA;
  486. dev_kfree_skb_any(skb);
  487. skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
  488. if (unlikely(!skb))
  489. return -ENOMEM;
  490. dmaaddr = map_descbuffer(ring, skb->data,
  491. ring->rx_buffersize, 0);
  492. }
  493. if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
  494. b43err(ring->dev->wl, "RX DMA buffer allocation failed\n");
  495. dev_kfree_skb_any(skb);
  496. return -EIO;
  497. }
  498. meta->skb = skb;
  499. meta->dmaaddr = dmaaddr;
  500. ring->ops->fill_descriptor(ring, desc, dmaaddr,
  501. ring->rx_buffersize, 0, 0, 0);
  502. rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
  503. rxhdr->frame_len = 0;
  504. return 0;
  505. }
  506. /* Allocate the initial descbuffers.
  507. * This is used for an RX ring only.
  508. */
  509. static int alloc_initial_descbuffers(struct b43_dmaring *ring)
  510. {
  511. int i, err = -ENOMEM;
  512. struct b43_dmadesc_generic *desc;
  513. struct b43_dmadesc_meta *meta;
  514. for (i = 0; i < ring->nr_slots; i++) {
  515. desc = ring->ops->idx2desc(ring, i, &meta);
  516. err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
  517. if (err) {
  518. b43err(ring->dev->wl,
  519. "Failed to allocate initial descbuffers\n");
  520. goto err_unwind;
  521. }
  522. }
  523. mb();
  524. ring->used_slots = ring->nr_slots;
  525. err = 0;
  526. out:
  527. return err;
  528. err_unwind:
  529. for (i--; i >= 0; i--) {
  530. desc = ring->ops->idx2desc(ring, i, &meta);
  531. unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
  532. dev_kfree_skb(meta->skb);
  533. }
  534. goto out;
  535. }
  536. /* Do initial setup of the DMA controller.
  537. * Reset the controller, write the ring busaddress
  538. * and switch the "enable" bit on.
  539. */
  540. static int dmacontroller_setup(struct b43_dmaring *ring)
  541. {
  542. int err = 0;
  543. u32 value;
  544. u32 addrext;
  545. u32 trans = ssb_dma_translation(ring->dev->dev);
  546. if (ring->tx) {
  547. if (ring->type == B43_DMA_64BIT) {
  548. u64 ringbase = (u64) (ring->dmabase);
  549. addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
  550. >> SSB_DMA_TRANSLATION_SHIFT;
  551. value = B43_DMA64_TXENABLE;
  552. value |= (addrext << B43_DMA64_TXADDREXT_SHIFT)
  553. & B43_DMA64_TXADDREXT_MASK;
  554. b43_dma_write(ring, B43_DMA64_TXCTL, value);
  555. b43_dma_write(ring, B43_DMA64_TXRINGLO,
  556. (ringbase & 0xFFFFFFFF));
  557. b43_dma_write(ring, B43_DMA64_TXRINGHI,
  558. ((ringbase >> 32) &
  559. ~SSB_DMA_TRANSLATION_MASK)
  560. | (trans << 1));
  561. } else {
  562. u32 ringbase = (u32) (ring->dmabase);
  563. addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
  564. >> SSB_DMA_TRANSLATION_SHIFT;
  565. value = B43_DMA32_TXENABLE;
  566. value |= (addrext << B43_DMA32_TXADDREXT_SHIFT)
  567. & B43_DMA32_TXADDREXT_MASK;
  568. b43_dma_write(ring, B43_DMA32_TXCTL, value);
  569. b43_dma_write(ring, B43_DMA32_TXRING,
  570. (ringbase & ~SSB_DMA_TRANSLATION_MASK)
  571. | trans);
  572. }
  573. } else {
  574. err = alloc_initial_descbuffers(ring);
  575. if (err)
  576. goto out;
  577. if (ring->type == B43_DMA_64BIT) {
  578. u64 ringbase = (u64) (ring->dmabase);
  579. addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
  580. >> SSB_DMA_TRANSLATION_SHIFT;
  581. value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT);
  582. value |= B43_DMA64_RXENABLE;
  583. value |= (addrext << B43_DMA64_RXADDREXT_SHIFT)
  584. & B43_DMA64_RXADDREXT_MASK;
  585. b43_dma_write(ring, B43_DMA64_RXCTL, value);
  586. b43_dma_write(ring, B43_DMA64_RXRINGLO,
  587. (ringbase & 0xFFFFFFFF));
  588. b43_dma_write(ring, B43_DMA64_RXRINGHI,
  589. ((ringbase >> 32) &
  590. ~SSB_DMA_TRANSLATION_MASK)
  591. | (trans << 1));
  592. b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
  593. sizeof(struct b43_dmadesc64));
  594. } else {
  595. u32 ringbase = (u32) (ring->dmabase);
  596. addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
  597. >> SSB_DMA_TRANSLATION_SHIFT;
  598. value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT);
  599. value |= B43_DMA32_RXENABLE;
  600. value |= (addrext << B43_DMA32_RXADDREXT_SHIFT)
  601. & B43_DMA32_RXADDREXT_MASK;
  602. b43_dma_write(ring, B43_DMA32_RXCTL, value);
  603. b43_dma_write(ring, B43_DMA32_RXRING,
  604. (ringbase & ~SSB_DMA_TRANSLATION_MASK)
  605. | trans);
  606. b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots *
  607. sizeof(struct b43_dmadesc32));
  608. }
  609. }
  610. out:
  611. return err;
  612. }
  613. /* Shutdown the DMA controller. */
  614. static void dmacontroller_cleanup(struct b43_dmaring *ring)
  615. {
  616. if (ring->tx) {
  617. b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
  618. ring->type);
  619. if (ring->type == B43_DMA_64BIT) {
  620. b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
  621. b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
  622. } else
  623. b43_dma_write(ring, B43_DMA32_TXRING, 0);
  624. } else {
  625. b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
  626. ring->type);
  627. if (ring->type == B43_DMA_64BIT) {
  628. b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
  629. b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
  630. } else
  631. b43_dma_write(ring, B43_DMA32_RXRING, 0);
  632. }
  633. }
  634. static void free_all_descbuffers(struct b43_dmaring *ring)
  635. {
  636. struct b43_dmadesc_generic *desc;
  637. struct b43_dmadesc_meta *meta;
  638. int i;
  639. if (!ring->used_slots)
  640. return;
  641. for (i = 0; i < ring->nr_slots; i++) {
  642. desc = ring->ops->idx2desc(ring, i, &meta);
  643. if (!meta->skb) {
  644. B43_WARN_ON(!ring->tx);
  645. continue;
  646. }
  647. if (ring->tx) {
  648. unmap_descbuffer(ring, meta->dmaaddr,
  649. meta->skb->len, 1);
  650. } else {
  651. unmap_descbuffer(ring, meta->dmaaddr,
  652. ring->rx_buffersize, 0);
  653. }
  654. free_descriptor_buffer(ring, meta);
  655. }
  656. }
  657. static u64 supported_dma_mask(struct b43_wldev *dev)
  658. {
  659. u32 tmp;
  660. u16 mmio_base;
  661. tmp = b43_read32(dev, SSB_TMSHIGH);
  662. if (tmp & SSB_TMSHIGH_DMA64)
  663. return DMA_64BIT_MASK;
  664. mmio_base = b43_dmacontroller_base(0, 0);
  665. b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
  666. tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
  667. if (tmp & B43_DMA32_TXADDREXT_MASK)
  668. return DMA_32BIT_MASK;
  669. return DMA_30BIT_MASK;
  670. }
  671. static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask)
  672. {
  673. if (dmamask == DMA_30BIT_MASK)
  674. return B43_DMA_30BIT;
  675. if (dmamask == DMA_32BIT_MASK)
  676. return B43_DMA_32BIT;
  677. if (dmamask == DMA_64BIT_MASK)
  678. return B43_DMA_64BIT;
  679. B43_WARN_ON(1);
  680. return B43_DMA_30BIT;
  681. }
  682. /* Main initialization function. */
  683. static
  684. struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
  685. int controller_index,
  686. int for_tx,
  687. enum b43_dmatype type)
  688. {
  689. struct b43_dmaring *ring;
  690. int err;
  691. int nr_slots;
  692. dma_addr_t dma_test;
  693. ring = kzalloc(sizeof(*ring), GFP_KERNEL);
  694. if (!ring)
  695. goto out;
  696. ring->type = type;
  697. nr_slots = B43_RXRING_SLOTS;
  698. if (for_tx)
  699. nr_slots = B43_TXRING_SLOTS;
  700. ring->meta = kcalloc(nr_slots, sizeof(struct b43_dmadesc_meta),
  701. GFP_KERNEL);
  702. if (!ring->meta)
  703. goto err_kfree_ring;
  704. if (for_tx) {
  705. ring->txhdr_cache = kcalloc(nr_slots,
  706. b43_txhdr_size(dev),
  707. GFP_KERNEL);
  708. if (!ring->txhdr_cache)
  709. goto err_kfree_meta;
  710. /* test for ability to dma to txhdr_cache */
  711. dma_test = dma_map_single(dev->dev->dma_dev,
  712. ring->txhdr_cache,
  713. b43_txhdr_size(dev),
  714. DMA_TO_DEVICE);
  715. if (b43_dma_mapping_error(ring, dma_test,
  716. b43_txhdr_size(dev), 1)) {
  717. /* ugh realloc */
  718. kfree(ring->txhdr_cache);
  719. ring->txhdr_cache = kcalloc(nr_slots,
  720. b43_txhdr_size(dev),
  721. GFP_KERNEL | GFP_DMA);
  722. if (!ring->txhdr_cache)
  723. goto err_kfree_meta;
  724. dma_test = dma_map_single(dev->dev->dma_dev,
  725. ring->txhdr_cache,
  726. b43_txhdr_size(dev),
  727. DMA_TO_DEVICE);
  728. if (b43_dma_mapping_error(ring, dma_test,
  729. b43_txhdr_size(dev), 1)) {
  730. b43err(dev->wl,
  731. "TXHDR DMA allocation failed\n");
  732. goto err_kfree_txhdr_cache;
  733. }
  734. }
  735. dma_unmap_single(dev->dev->dma_dev,
  736. dma_test, b43_txhdr_size(dev),
  737. DMA_TO_DEVICE);
  738. }
  739. ring->dev = dev;
  740. ring->nr_slots = nr_slots;
  741. ring->mmio_base = b43_dmacontroller_base(type, controller_index);
  742. ring->index = controller_index;
  743. if (type == B43_DMA_64BIT)
  744. ring->ops = &dma64_ops;
  745. else
  746. ring->ops = &dma32_ops;
  747. if (for_tx) {
  748. ring->tx = 1;
  749. ring->current_slot = -1;
  750. } else {
  751. if (ring->index == 0) {
  752. ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE;
  753. ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET;
  754. } else if (ring->index == 3) {
  755. ring->rx_buffersize = B43_DMA3_RX_BUFFERSIZE;
  756. ring->frameoffset = B43_DMA3_RX_FRAMEOFFSET;
  757. } else
  758. B43_WARN_ON(1);
  759. }
  760. spin_lock_init(&ring->lock);
  761. #ifdef CONFIG_B43_DEBUG
  762. ring->last_injected_overflow = jiffies;
  763. #endif
  764. err = alloc_ringmemory(ring);
  765. if (err)
  766. goto err_kfree_txhdr_cache;
  767. err = dmacontroller_setup(ring);
  768. if (err)
  769. goto err_free_ringmemory;
  770. out:
  771. return ring;
  772. err_free_ringmemory:
  773. free_ringmemory(ring);
  774. err_kfree_txhdr_cache:
  775. kfree(ring->txhdr_cache);
  776. err_kfree_meta:
  777. kfree(ring->meta);
  778. err_kfree_ring:
  779. kfree(ring);
  780. ring = NULL;
  781. goto out;
  782. }
  783. #define divide(a, b) ({ \
  784. typeof(a) __a = a; \
  785. do_div(__a, b); \
  786. __a; \
  787. })
  788. #define modulo(a, b) ({ \
  789. typeof(a) __a = a; \
  790. do_div(__a, b); \
  791. })
  792. /* Main cleanup function. */
  793. static void b43_destroy_dmaring(struct b43_dmaring *ring,
  794. const char *ringname)
  795. {
  796. if (!ring)
  797. return;
  798. #ifdef CONFIG_B43_DEBUG
  799. {
  800. /* Print some statistics. */
  801. u64 failed_packets = ring->nr_failed_tx_packets;
  802. u64 succeed_packets = ring->nr_succeed_tx_packets;
  803. u64 nr_packets = failed_packets + succeed_packets;
  804. u64 permille_failed = 0, average_tries = 0;
  805. if (nr_packets)
  806. permille_failed = divide(failed_packets * 1000, nr_packets);
  807. if (nr_packets)
  808. average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets);
  809. b43dbg(ring->dev->wl, "DMA-%u %s: "
  810. "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, "
  811. "Average tries %llu.%02llu\n",
  812. (unsigned int)(ring->type), ringname,
  813. ring->max_used_slots,
  814. ring->nr_slots,
  815. (unsigned long long)failed_packets,
  816. (unsigned long long)nr_packets,
  817. (unsigned long long)divide(permille_failed, 10),
  818. (unsigned long long)modulo(permille_failed, 10),
  819. (unsigned long long)divide(average_tries, 100),
  820. (unsigned long long)modulo(average_tries, 100));
  821. }
  822. #endif /* DEBUG */
  823. /* Device IRQs are disabled prior entering this function,
  824. * so no need to take care of concurrency with rx handler stuff.
  825. */
  826. dmacontroller_cleanup(ring);
  827. free_all_descbuffers(ring);
  828. free_ringmemory(ring);
  829. kfree(ring->txhdr_cache);
  830. kfree(ring->meta);
  831. kfree(ring);
  832. }
  833. #define destroy_ring(dma, ring) do { \
  834. b43_destroy_dmaring((dma)->ring, __stringify(ring)); \
  835. (dma)->ring = NULL; \
  836. } while (0)
  837. void b43_dma_free(struct b43_wldev *dev)
  838. {
  839. struct b43_dma *dma;
  840. if (b43_using_pio_transfers(dev))
  841. return;
  842. dma = &dev->dma;
  843. destroy_ring(dma, rx_ring);
  844. destroy_ring(dma, tx_ring_AC_BK);
  845. destroy_ring(dma, tx_ring_AC_BE);
  846. destroy_ring(dma, tx_ring_AC_VI);
  847. destroy_ring(dma, tx_ring_AC_VO);
  848. destroy_ring(dma, tx_ring_mcast);
  849. }
  850. static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
  851. {
  852. u64 orig_mask = mask;
  853. bool fallback = 0;
  854. int err;
  855. /* Try to set the DMA mask. If it fails, try falling back to a
  856. * lower mask, as we can always also support a lower one. */
  857. while (1) {
  858. err = ssb_dma_set_mask(dev->dev, mask);
  859. if (!err)
  860. break;
  861. if (mask == DMA_64BIT_MASK) {
  862. mask = DMA_32BIT_MASK;
  863. fallback = 1;
  864. continue;
  865. }
  866. if (mask == DMA_32BIT_MASK) {
  867. mask = DMA_30BIT_MASK;
  868. fallback = 1;
  869. continue;
  870. }
  871. b43err(dev->wl, "The machine/kernel does not support "
  872. "the required %u-bit DMA mask\n",
  873. (unsigned int)dma_mask_to_engine_type(orig_mask));
  874. return -EOPNOTSUPP;
  875. }
  876. if (fallback) {
  877. b43info(dev->wl, "DMA mask fallback from %u-bit to %u-bit\n",
  878. (unsigned int)dma_mask_to_engine_type(orig_mask),
  879. (unsigned int)dma_mask_to_engine_type(mask));
  880. }
  881. return 0;
  882. }
  883. int b43_dma_init(struct b43_wldev *dev)
  884. {
  885. struct b43_dma *dma = &dev->dma;
  886. int err;
  887. u64 dmamask;
  888. enum b43_dmatype type;
  889. dmamask = supported_dma_mask(dev);
  890. type = dma_mask_to_engine_type(dmamask);
  891. err = b43_dma_set_mask(dev, dmamask);
  892. if (err)
  893. return err;
  894. err = -ENOMEM;
  895. /* setup TX DMA channels. */
  896. dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type);
  897. if (!dma->tx_ring_AC_BK)
  898. goto out;
  899. dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type);
  900. if (!dma->tx_ring_AC_BE)
  901. goto err_destroy_bk;
  902. dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type);
  903. if (!dma->tx_ring_AC_VI)
  904. goto err_destroy_be;
  905. dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type);
  906. if (!dma->tx_ring_AC_VO)
  907. goto err_destroy_vi;
  908. dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type);
  909. if (!dma->tx_ring_mcast)
  910. goto err_destroy_vo;
  911. /* setup RX DMA channel. */
  912. dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type);
  913. if (!dma->rx_ring)
  914. goto err_destroy_mcast;
  915. /* No support for the TX status DMA ring. */
  916. B43_WARN_ON(dev->dev->id.revision < 5);
  917. b43dbg(dev->wl, "%u-bit DMA initialized\n",
  918. (unsigned int)type);
  919. err = 0;
  920. out:
  921. return err;
  922. err_destroy_mcast:
  923. destroy_ring(dma, tx_ring_mcast);
  924. err_destroy_vo:
  925. destroy_ring(dma, tx_ring_AC_VO);
  926. err_destroy_vi:
  927. destroy_ring(dma, tx_ring_AC_VI);
  928. err_destroy_be:
  929. destroy_ring(dma, tx_ring_AC_BE);
  930. err_destroy_bk:
  931. destroy_ring(dma, tx_ring_AC_BK);
  932. return err;
  933. }
  934. /* Generate a cookie for the TX header. */
  935. static u16 generate_cookie(struct b43_dmaring *ring, int slot)
  936. {
  937. u16 cookie;
  938. /* Use the upper 4 bits of the cookie as
  939. * DMA controller ID and store the slot number
  940. * in the lower 12 bits.
  941. * Note that the cookie must never be 0, as this
  942. * is a special value used in RX path.
  943. * It can also not be 0xFFFF because that is special
  944. * for multicast frames.
  945. */
  946. cookie = (((u16)ring->index + 1) << 12);
  947. B43_WARN_ON(slot & ~0x0FFF);
  948. cookie |= (u16)slot;
  949. return cookie;
  950. }
  951. /* Inspect a cookie and find out to which controller/slot it belongs. */
  952. static
  953. struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
  954. {
  955. struct b43_dma *dma = &dev->dma;
  956. struct b43_dmaring *ring = NULL;
  957. switch (cookie & 0xF000) {
  958. case 0x1000:
  959. ring = dma->tx_ring_AC_BK;
  960. break;
  961. case 0x2000:
  962. ring = dma->tx_ring_AC_BE;
  963. break;
  964. case 0x3000:
  965. ring = dma->tx_ring_AC_VI;
  966. break;
  967. case 0x4000:
  968. ring = dma->tx_ring_AC_VO;
  969. break;
  970. case 0x5000:
  971. ring = dma->tx_ring_mcast;
  972. break;
  973. default:
  974. B43_WARN_ON(1);
  975. }
  976. *slot = (cookie & 0x0FFF);
  977. B43_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
  978. return ring;
  979. }
  980. static int dma_tx_fragment(struct b43_dmaring *ring,
  981. struct sk_buff *skb,
  982. struct ieee80211_tx_control *ctl)
  983. {
  984. const struct b43_dma_ops *ops = ring->ops;
  985. u8 *header;
  986. int slot, old_top_slot, old_used_slots;
  987. int err;
  988. struct b43_dmadesc_generic *desc;
  989. struct b43_dmadesc_meta *meta;
  990. struct b43_dmadesc_meta *meta_hdr;
  991. struct sk_buff *bounce_skb;
  992. u16 cookie;
  993. size_t hdrsize = b43_txhdr_size(ring->dev);
  994. #define SLOTS_PER_PACKET 2
  995. old_top_slot = ring->current_slot;
  996. old_used_slots = ring->used_slots;
  997. /* Get a slot for the header. */
  998. slot = request_slot(ring);
  999. desc = ops->idx2desc(ring, slot, &meta_hdr);
  1000. memset(meta_hdr, 0, sizeof(*meta_hdr));
  1001. header = &(ring->txhdr_cache[slot * hdrsize]);
  1002. cookie = generate_cookie(ring, slot);
  1003. err = b43_generate_txhdr(ring->dev, header,
  1004. skb->data, skb->len, ctl, cookie);
  1005. if (unlikely(err)) {
  1006. ring->current_slot = old_top_slot;
  1007. ring->used_slots = old_used_slots;
  1008. return err;
  1009. }
  1010. meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
  1011. hdrsize, 1);
  1012. if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) {
  1013. ring->current_slot = old_top_slot;
  1014. ring->used_slots = old_used_slots;
  1015. return -EIO;
  1016. }
  1017. ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
  1018. hdrsize, 1, 0, 0);
  1019. /* Get a slot for the payload. */
  1020. slot = request_slot(ring);
  1021. desc = ops->idx2desc(ring, slot, &meta);
  1022. memset(meta, 0, sizeof(*meta));
  1023. memcpy(&meta->txstat.control, ctl, sizeof(*ctl));
  1024. meta->skb = skb;
  1025. meta->is_last_fragment = 1;
  1026. meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
  1027. /* create a bounce buffer in zone_dma on mapping failure. */
  1028. if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
  1029. bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
  1030. if (!bounce_skb) {
  1031. ring->current_slot = old_top_slot;
  1032. ring->used_slots = old_used_slots;
  1033. err = -ENOMEM;
  1034. goto out_unmap_hdr;
  1035. }
  1036. memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
  1037. dev_kfree_skb_any(skb);
  1038. skb = bounce_skb;
  1039. meta->skb = skb;
  1040. meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
  1041. if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
  1042. ring->current_slot = old_top_slot;
  1043. ring->used_slots = old_used_slots;
  1044. err = -EIO;
  1045. goto out_free_bounce;
  1046. }
  1047. }
  1048. ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
  1049. if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) {
  1050. /* Tell the firmware about the cookie of the last
  1051. * mcast frame, so it can clear the more-data bit in it. */
  1052. b43_shm_write16(ring->dev, B43_SHM_SHARED,
  1053. B43_SHM_SH_MCASTCOOKIE, cookie);
  1054. }
  1055. /* Now transfer the whole frame. */
  1056. wmb();
  1057. ops->poke_tx(ring, next_slot(ring, slot));
  1058. return 0;
  1059. out_free_bounce:
  1060. dev_kfree_skb_any(skb);
  1061. out_unmap_hdr:
  1062. unmap_descbuffer(ring, meta_hdr->dmaaddr,
  1063. hdrsize, 1);
  1064. return err;
  1065. }
  1066. static inline int should_inject_overflow(struct b43_dmaring *ring)
  1067. {
  1068. #ifdef CONFIG_B43_DEBUG
  1069. if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) {
  1070. /* Check if we should inject another ringbuffer overflow
  1071. * to test handling of this situation in the stack. */
  1072. unsigned long next_overflow;
  1073. next_overflow = ring->last_injected_overflow + HZ;
  1074. if (time_after(jiffies, next_overflow)) {
  1075. ring->last_injected_overflow = jiffies;
  1076. b43dbg(ring->dev->wl,
  1077. "Injecting TX ring overflow on "
  1078. "DMA controller %d\n", ring->index);
  1079. return 1;
  1080. }
  1081. }
  1082. #endif /* CONFIG_B43_DEBUG */
  1083. return 0;
  1084. }
  1085. /* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */
  1086. static struct b43_dmaring * select_ring_by_priority(struct b43_wldev *dev,
  1087. u8 queue_prio)
  1088. {
  1089. struct b43_dmaring *ring;
  1090. if (b43_modparam_qos) {
  1091. /* 0 = highest priority */
  1092. switch (queue_prio) {
  1093. default:
  1094. B43_WARN_ON(1);
  1095. /* fallthrough */
  1096. case 0:
  1097. ring = dev->dma.tx_ring_AC_VO;
  1098. break;
  1099. case 1:
  1100. ring = dev->dma.tx_ring_AC_VI;
  1101. break;
  1102. case 2:
  1103. ring = dev->dma.tx_ring_AC_BE;
  1104. break;
  1105. case 3:
  1106. ring = dev->dma.tx_ring_AC_BK;
  1107. break;
  1108. }
  1109. } else
  1110. ring = dev->dma.tx_ring_AC_BE;
  1111. return ring;
  1112. }
  1113. int b43_dma_tx(struct b43_wldev *dev,
  1114. struct sk_buff *skb, struct ieee80211_tx_control *ctl)
  1115. {
  1116. struct b43_dmaring *ring;
  1117. struct ieee80211_hdr *hdr;
  1118. int err = 0;
  1119. unsigned long flags;
  1120. hdr = (struct ieee80211_hdr *)skb->data;
  1121. if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) {
  1122. /* The multicast ring will be sent after the DTIM */
  1123. ring = dev->dma.tx_ring_mcast;
  1124. /* Set the more-data bit. Ucode will clear it on
  1125. * the last frame for us. */
  1126. hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
  1127. } else {
  1128. /* Decide by priority where to put this frame. */
  1129. ring = select_ring_by_priority(dev, ctl->queue);
  1130. }
  1131. spin_lock_irqsave(&ring->lock, flags);
  1132. B43_WARN_ON(!ring->tx);
  1133. if (unlikely(free_slots(ring) < SLOTS_PER_PACKET)) {
  1134. b43warn(dev->wl, "DMA queue overflow\n");
  1135. err = -ENOSPC;
  1136. goto out_unlock;
  1137. }
  1138. /* Check if the queue was stopped in mac80211,
  1139. * but we got called nevertheless.
  1140. * That would be a mac80211 bug. */
  1141. B43_WARN_ON(ring->stopped);
  1142. /* Assign the queue number to the ring (if not already done before)
  1143. * so TX status handling can use it. The queue to ring mapping is
  1144. * static, so we don't need to store it per frame. */
  1145. ring->queue_prio = ctl->queue;
  1146. err = dma_tx_fragment(ring, skb, ctl);
  1147. if (unlikely(err == -ENOKEY)) {
  1148. /* Drop this packet, as we don't have the encryption key
  1149. * anymore and must not transmit it unencrypted. */
  1150. dev_kfree_skb_any(skb);
  1151. err = 0;
  1152. goto out_unlock;
  1153. }
  1154. if (unlikely(err)) {
  1155. b43err(dev->wl, "DMA tx mapping failure\n");
  1156. goto out_unlock;
  1157. }
  1158. ring->nr_tx_packets++;
  1159. if ((free_slots(ring) < SLOTS_PER_PACKET) ||
  1160. should_inject_overflow(ring)) {
  1161. /* This TX ring is full. */
  1162. ieee80211_stop_queue(dev->wl->hw, ctl->queue);
  1163. ring->stopped = 1;
  1164. if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
  1165. b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
  1166. }
  1167. }
  1168. out_unlock:
  1169. spin_unlock_irqrestore(&ring->lock, flags);
  1170. return err;
  1171. }
  1172. /* Called with IRQs disabled. */
  1173. void b43_dma_handle_txstatus(struct b43_wldev *dev,
  1174. const struct b43_txstatus *status)
  1175. {
  1176. const struct b43_dma_ops *ops;
  1177. struct b43_dmaring *ring;
  1178. struct b43_dmadesc_generic *desc;
  1179. struct b43_dmadesc_meta *meta;
  1180. int slot;
  1181. bool frame_succeed;
  1182. ring = parse_cookie(dev, status->cookie, &slot);
  1183. if (unlikely(!ring))
  1184. return;
  1185. spin_lock(&ring->lock); /* IRQs are already disabled. */
  1186. B43_WARN_ON(!ring->tx);
  1187. ops = ring->ops;
  1188. while (1) {
  1189. B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
  1190. desc = ops->idx2desc(ring, slot, &meta);
  1191. if (meta->skb)
  1192. unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len,
  1193. 1);
  1194. else
  1195. unmap_descbuffer(ring, meta->dmaaddr,
  1196. b43_txhdr_size(dev), 1);
  1197. if (meta->is_last_fragment) {
  1198. B43_WARN_ON(!meta->skb);
  1199. /* Call back to inform the ieee80211 subsystem about the
  1200. * status of the transmission.
  1201. * Some fields of txstat are already filled in dma_tx().
  1202. */
  1203. frame_succeed = b43_fill_txstatus_report(
  1204. &(meta->txstat), status);
  1205. #ifdef CONFIG_B43_DEBUG
  1206. if (frame_succeed)
  1207. ring->nr_succeed_tx_packets++;
  1208. else
  1209. ring->nr_failed_tx_packets++;
  1210. ring->nr_total_packet_tries += status->frame_count;
  1211. #endif /* DEBUG */
  1212. ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb,
  1213. &(meta->txstat));
  1214. /* skb is freed by ieee80211_tx_status_irqsafe() */
  1215. meta->skb = NULL;
  1216. } else {
  1217. /* No need to call free_descriptor_buffer here, as
  1218. * this is only the txhdr, which is not allocated.
  1219. */
  1220. B43_WARN_ON(meta->skb);
  1221. }
  1222. /* Everything unmapped and free'd. So it's not used anymore. */
  1223. ring->used_slots--;
  1224. if (meta->is_last_fragment)
  1225. break;
  1226. slot = next_slot(ring, slot);
  1227. }
  1228. dev->stats.last_tx = jiffies;
  1229. if (ring->stopped) {
  1230. B43_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
  1231. ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
  1232. ring->stopped = 0;
  1233. if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
  1234. b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
  1235. }
  1236. }
  1237. spin_unlock(&ring->lock);
  1238. }
  1239. void b43_dma_get_tx_stats(struct b43_wldev *dev,
  1240. struct ieee80211_tx_queue_stats *stats)
  1241. {
  1242. const int nr_queues = dev->wl->hw->queues;
  1243. struct b43_dmaring *ring;
  1244. struct ieee80211_tx_queue_stats_data *data;
  1245. unsigned long flags;
  1246. int i;
  1247. for (i = 0; i < nr_queues; i++) {
  1248. data = &(stats->data[i]);
  1249. ring = select_ring_by_priority(dev, i);
  1250. spin_lock_irqsave(&ring->lock, flags);
  1251. data->len = ring->used_slots / SLOTS_PER_PACKET;
  1252. data->limit = ring->nr_slots / SLOTS_PER_PACKET;
  1253. data->count = ring->nr_tx_packets;
  1254. spin_unlock_irqrestore(&ring->lock, flags);
  1255. }
  1256. }
  1257. static void dma_rx(struct b43_dmaring *ring, int *slot)
  1258. {
  1259. const struct b43_dma_ops *ops = ring->ops;
  1260. struct b43_dmadesc_generic *desc;
  1261. struct b43_dmadesc_meta *meta;
  1262. struct b43_rxhdr_fw4 *rxhdr;
  1263. struct sk_buff *skb;
  1264. u16 len;
  1265. int err;
  1266. dma_addr_t dmaaddr;
  1267. desc = ops->idx2desc(ring, *slot, &meta);
  1268. sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
  1269. skb = meta->skb;
  1270. rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
  1271. len = le16_to_cpu(rxhdr->frame_len);
  1272. if (len == 0) {
  1273. int i = 0;
  1274. do {
  1275. udelay(2);
  1276. barrier();
  1277. len = le16_to_cpu(rxhdr->frame_len);
  1278. } while (len == 0 && i++ < 5);
  1279. if (unlikely(len == 0)) {
  1280. /* recycle the descriptor buffer. */
  1281. sync_descbuffer_for_device(ring, meta->dmaaddr,
  1282. ring->rx_buffersize);
  1283. goto drop;
  1284. }
  1285. }
  1286. if (unlikely(len > ring->rx_buffersize)) {
  1287. /* The data did not fit into one descriptor buffer
  1288. * and is split over multiple buffers.
  1289. * This should never happen, as we try to allocate buffers
  1290. * big enough. So simply ignore this packet.
  1291. */
  1292. int cnt = 0;
  1293. s32 tmp = len;
  1294. while (1) {
  1295. desc = ops->idx2desc(ring, *slot, &meta);
  1296. /* recycle the descriptor buffer. */
  1297. sync_descbuffer_for_device(ring, meta->dmaaddr,
  1298. ring->rx_buffersize);
  1299. *slot = next_slot(ring, *slot);
  1300. cnt++;
  1301. tmp -= ring->rx_buffersize;
  1302. if (tmp <= 0)
  1303. break;
  1304. }
  1305. b43err(ring->dev->wl, "DMA RX buffer too small "
  1306. "(len: %u, buffer: %u, nr-dropped: %d)\n",
  1307. len, ring->rx_buffersize, cnt);
  1308. goto drop;
  1309. }
  1310. dmaaddr = meta->dmaaddr;
  1311. err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
  1312. if (unlikely(err)) {
  1313. b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
  1314. sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
  1315. goto drop;
  1316. }
  1317. unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
  1318. skb_put(skb, len + ring->frameoffset);
  1319. skb_pull(skb, ring->frameoffset);
  1320. b43_rx(ring->dev, skb, rxhdr);
  1321. drop:
  1322. return;
  1323. }
  1324. void b43_dma_rx(struct b43_dmaring *ring)
  1325. {
  1326. const struct b43_dma_ops *ops = ring->ops;
  1327. int slot, current_slot;
  1328. int used_slots = 0;
  1329. B43_WARN_ON(ring->tx);
  1330. current_slot = ops->get_current_rxslot(ring);
  1331. B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots));
  1332. slot = ring->current_slot;
  1333. for (; slot != current_slot; slot = next_slot(ring, slot)) {
  1334. dma_rx(ring, &slot);
  1335. update_max_used_slots(ring, ++used_slots);
  1336. }
  1337. ops->set_current_rxslot(ring, slot);
  1338. ring->current_slot = slot;
  1339. }
  1340. static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring)
  1341. {
  1342. unsigned long flags;
  1343. spin_lock_irqsave(&ring->lock, flags);
  1344. B43_WARN_ON(!ring->tx);
  1345. ring->ops->tx_suspend(ring);
  1346. spin_unlock_irqrestore(&ring->lock, flags);
  1347. }
  1348. static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
  1349. {
  1350. unsigned long flags;
  1351. spin_lock_irqsave(&ring->lock, flags);
  1352. B43_WARN_ON(!ring->tx);
  1353. ring->ops->tx_resume(ring);
  1354. spin_unlock_irqrestore(&ring->lock, flags);
  1355. }
  1356. void b43_dma_tx_suspend(struct b43_wldev *dev)
  1357. {
  1358. b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
  1359. b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BK);
  1360. b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BE);
  1361. b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VI);
  1362. b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VO);
  1363. b43_dma_tx_suspend_ring(dev->dma.tx_ring_mcast);
  1364. }
  1365. void b43_dma_tx_resume(struct b43_wldev *dev)
  1366. {
  1367. b43_dma_tx_resume_ring(dev->dma.tx_ring_mcast);
  1368. b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VO);
  1369. b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VI);
  1370. b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BE);
  1371. b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK);
  1372. b43_power_saving_ctl_bits(dev, 0);
  1373. }
  1374. #ifdef CONFIG_B43_PIO
  1375. static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
  1376. u16 mmio_base, bool enable)
  1377. {
  1378. u32 ctl;
  1379. if (type == B43_DMA_64BIT) {
  1380. ctl = b43_read32(dev, mmio_base + B43_DMA64_RXCTL);
  1381. ctl &= ~B43_DMA64_RXDIRECTFIFO;
  1382. if (enable)
  1383. ctl |= B43_DMA64_RXDIRECTFIFO;
  1384. b43_write32(dev, mmio_base + B43_DMA64_RXCTL, ctl);
  1385. } else {
  1386. ctl = b43_read32(dev, mmio_base + B43_DMA32_RXCTL);
  1387. ctl &= ~B43_DMA32_RXDIRECTFIFO;
  1388. if (enable)
  1389. ctl |= B43_DMA32_RXDIRECTFIFO;
  1390. b43_write32(dev, mmio_base + B43_DMA32_RXCTL, ctl);
  1391. }
  1392. }
  1393. /* Enable/Disable Direct FIFO Receive Mode (PIO) on a RX engine.
  1394. * This is called from PIO code, so DMA structures are not available. */
  1395. void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
  1396. unsigned int engine_index, bool enable)
  1397. {
  1398. enum b43_dmatype type;
  1399. u16 mmio_base;
  1400. type = dma_mask_to_engine_type(supported_dma_mask(dev));
  1401. mmio_base = b43_dmacontroller_base(type, engine_index);
  1402. direct_fifo_rx(dev, type, mmio_base, enable);
  1403. }
  1404. #endif /* CONFIG_B43_PIO */