dma.c 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672
  1. /*
  2. Broadcom B43 wireless driver
  3. DMA ringbuffer and descriptor allocation/management
  4. Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
  5. Some code in this file is derived from the b44.c driver
  6. Copyright (C) 2002 David S. Miller
  7. Copyright (C) Pekka Pietikainen
  8. This program is free software; you can redistribute it and/or modify
  9. it under the terms of the GNU General Public License as published by
  10. the Free Software Foundation; either version 2 of the License, or
  11. (at your option) any later version.
  12. This program is distributed in the hope that it will be useful,
  13. but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. GNU General Public License for more details.
  16. You should have received a copy of the GNU General Public License
  17. along with this program; see the file COPYING. If not, write to
  18. the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
  19. Boston, MA 02110-1301, USA.
  20. */
  21. #include "b43.h"
  22. #include "dma.h"
  23. #include "main.h"
  24. #include "debugfs.h"
  25. #include "xmit.h"
  26. #include <linux/dma-mapping.h>
  27. #include <linux/pci.h>
  28. #include <linux/delay.h>
  29. #include <linux/skbuff.h>
  30. #include <linux/etherdevice.h>
  31. #include <linux/slab.h>
  32. #include <asm/div64.h>
  33. /* Required number of TX DMA slots per TX frame.
  34. * This currently is 2, because we put the header and the ieee80211 frame
  35. * into separate slots. */
  36. #define TX_SLOTS_PER_FRAME 2
  37. /* 32bit DMA ops. */
  38. static
  39. struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring,
  40. int slot,
  41. struct b43_dmadesc_meta **meta)
  42. {
  43. struct b43_dmadesc32 *desc;
  44. *meta = &(ring->meta[slot]);
  45. desc = ring->descbase;
  46. desc = &(desc[slot]);
  47. return (struct b43_dmadesc_generic *)desc;
  48. }
  49. static void op32_fill_descriptor(struct b43_dmaring *ring,
  50. struct b43_dmadesc_generic *desc,
  51. dma_addr_t dmaaddr, u16 bufsize,
  52. int start, int end, int irq)
  53. {
  54. struct b43_dmadesc32 *descbase = ring->descbase;
  55. int slot;
  56. u32 ctl;
  57. u32 addr;
  58. u32 addrext;
  59. slot = (int)(&(desc->dma32) - descbase);
  60. B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
  61. addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
  62. addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK)
  63. >> SSB_DMA_TRANSLATION_SHIFT;
  64. addr |= ring->dev->dma.translation;
  65. ctl = bufsize & B43_DMA32_DCTL_BYTECNT;
  66. if (slot == ring->nr_slots - 1)
  67. ctl |= B43_DMA32_DCTL_DTABLEEND;
  68. if (start)
  69. ctl |= B43_DMA32_DCTL_FRAMESTART;
  70. if (end)
  71. ctl |= B43_DMA32_DCTL_FRAMEEND;
  72. if (irq)
  73. ctl |= B43_DMA32_DCTL_IRQ;
  74. ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT)
  75. & B43_DMA32_DCTL_ADDREXT_MASK;
  76. desc->dma32.control = cpu_to_le32(ctl);
  77. desc->dma32.address = cpu_to_le32(addr);
  78. }
  79. static void op32_poke_tx(struct b43_dmaring *ring, int slot)
  80. {
  81. b43_dma_write(ring, B43_DMA32_TXINDEX,
  82. (u32) (slot * sizeof(struct b43_dmadesc32)));
  83. }
  84. static void op32_tx_suspend(struct b43_dmaring *ring)
  85. {
  86. b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
  87. | B43_DMA32_TXSUSPEND);
  88. }
  89. static void op32_tx_resume(struct b43_dmaring *ring)
  90. {
  91. b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
  92. & ~B43_DMA32_TXSUSPEND);
  93. }
  94. static int op32_get_current_rxslot(struct b43_dmaring *ring)
  95. {
  96. u32 val;
  97. val = b43_dma_read(ring, B43_DMA32_RXSTATUS);
  98. val &= B43_DMA32_RXDPTR;
  99. return (val / sizeof(struct b43_dmadesc32));
  100. }
  101. static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot)
  102. {
  103. b43_dma_write(ring, B43_DMA32_RXINDEX,
  104. (u32) (slot * sizeof(struct b43_dmadesc32)));
  105. }
  106. static const struct b43_dma_ops dma32_ops = {
  107. .idx2desc = op32_idx2desc,
  108. .fill_descriptor = op32_fill_descriptor,
  109. .poke_tx = op32_poke_tx,
  110. .tx_suspend = op32_tx_suspend,
  111. .tx_resume = op32_tx_resume,
  112. .get_current_rxslot = op32_get_current_rxslot,
  113. .set_current_rxslot = op32_set_current_rxslot,
  114. };
  115. /* 64bit DMA ops. */
  116. static
  117. struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring,
  118. int slot,
  119. struct b43_dmadesc_meta **meta)
  120. {
  121. struct b43_dmadesc64 *desc;
  122. *meta = &(ring->meta[slot]);
  123. desc = ring->descbase;
  124. desc = &(desc[slot]);
  125. return (struct b43_dmadesc_generic *)desc;
  126. }
  127. static void op64_fill_descriptor(struct b43_dmaring *ring,
  128. struct b43_dmadesc_generic *desc,
  129. dma_addr_t dmaaddr, u16 bufsize,
  130. int start, int end, int irq)
  131. {
  132. struct b43_dmadesc64 *descbase = ring->descbase;
  133. int slot;
  134. u32 ctl0 = 0, ctl1 = 0;
  135. u32 addrlo, addrhi;
  136. u32 addrext;
  137. slot = (int)(&(desc->dma64) - descbase);
  138. B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
  139. addrlo = (u32) (dmaaddr & 0xFFFFFFFF);
  140. addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
  141. addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
  142. >> SSB_DMA_TRANSLATION_SHIFT;
  143. addrhi |= (ring->dev->dma.translation << 1);
  144. if (slot == ring->nr_slots - 1)
  145. ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
  146. if (start)
  147. ctl0 |= B43_DMA64_DCTL0_FRAMESTART;
  148. if (end)
  149. ctl0 |= B43_DMA64_DCTL0_FRAMEEND;
  150. if (irq)
  151. ctl0 |= B43_DMA64_DCTL0_IRQ;
  152. ctl1 |= bufsize & B43_DMA64_DCTL1_BYTECNT;
  153. ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT)
  154. & B43_DMA64_DCTL1_ADDREXT_MASK;
  155. desc->dma64.control0 = cpu_to_le32(ctl0);
  156. desc->dma64.control1 = cpu_to_le32(ctl1);
  157. desc->dma64.address_low = cpu_to_le32(addrlo);
  158. desc->dma64.address_high = cpu_to_le32(addrhi);
  159. }
  160. static void op64_poke_tx(struct b43_dmaring *ring, int slot)
  161. {
  162. b43_dma_write(ring, B43_DMA64_TXINDEX,
  163. (u32) (slot * sizeof(struct b43_dmadesc64)));
  164. }
  165. static void op64_tx_suspend(struct b43_dmaring *ring)
  166. {
  167. b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
  168. | B43_DMA64_TXSUSPEND);
  169. }
  170. static void op64_tx_resume(struct b43_dmaring *ring)
  171. {
  172. b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
  173. & ~B43_DMA64_TXSUSPEND);
  174. }
  175. static int op64_get_current_rxslot(struct b43_dmaring *ring)
  176. {
  177. u32 val;
  178. val = b43_dma_read(ring, B43_DMA64_RXSTATUS);
  179. val &= B43_DMA64_RXSTATDPTR;
  180. return (val / sizeof(struct b43_dmadesc64));
  181. }
  182. static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot)
  183. {
  184. b43_dma_write(ring, B43_DMA64_RXINDEX,
  185. (u32) (slot * sizeof(struct b43_dmadesc64)));
  186. }
  187. static const struct b43_dma_ops dma64_ops = {
  188. .idx2desc = op64_idx2desc,
  189. .fill_descriptor = op64_fill_descriptor,
  190. .poke_tx = op64_poke_tx,
  191. .tx_suspend = op64_tx_suspend,
  192. .tx_resume = op64_tx_resume,
  193. .get_current_rxslot = op64_get_current_rxslot,
  194. .set_current_rxslot = op64_set_current_rxslot,
  195. };
  196. static inline int free_slots(struct b43_dmaring *ring)
  197. {
  198. return (ring->nr_slots - ring->used_slots);
  199. }
  200. static inline int next_slot(struct b43_dmaring *ring, int slot)
  201. {
  202. B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
  203. if (slot == ring->nr_slots - 1)
  204. return 0;
  205. return slot + 1;
  206. }
  207. static inline int prev_slot(struct b43_dmaring *ring, int slot)
  208. {
  209. B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
  210. if (slot == 0)
  211. return ring->nr_slots - 1;
  212. return slot - 1;
  213. }
  214. #ifdef CONFIG_B43_DEBUG
  215. static void update_max_used_slots(struct b43_dmaring *ring,
  216. int current_used_slots)
  217. {
  218. if (current_used_slots <= ring->max_used_slots)
  219. return;
  220. ring->max_used_slots = current_used_slots;
  221. if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) {
  222. b43dbg(ring->dev->wl,
  223. "max_used_slots increased to %d on %s ring %d\n",
  224. ring->max_used_slots,
  225. ring->tx ? "TX" : "RX", ring->index);
  226. }
  227. }
  228. #else
  229. static inline
  230. void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots)
  231. {
  232. }
  233. #endif /* DEBUG */
  234. /* Request a slot for usage. */
  235. static inline int request_slot(struct b43_dmaring *ring)
  236. {
  237. int slot;
  238. B43_WARN_ON(!ring->tx);
  239. B43_WARN_ON(ring->stopped);
  240. B43_WARN_ON(free_slots(ring) == 0);
  241. slot = next_slot(ring, ring->current_slot);
  242. ring->current_slot = slot;
  243. ring->used_slots++;
  244. update_max_used_slots(ring, ring->used_slots);
  245. return slot;
  246. }
  247. static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
  248. {
  249. static const u16 map64[] = {
  250. B43_MMIO_DMA64_BASE0,
  251. B43_MMIO_DMA64_BASE1,
  252. B43_MMIO_DMA64_BASE2,
  253. B43_MMIO_DMA64_BASE3,
  254. B43_MMIO_DMA64_BASE4,
  255. B43_MMIO_DMA64_BASE5,
  256. };
  257. static const u16 map32[] = {
  258. B43_MMIO_DMA32_BASE0,
  259. B43_MMIO_DMA32_BASE1,
  260. B43_MMIO_DMA32_BASE2,
  261. B43_MMIO_DMA32_BASE3,
  262. B43_MMIO_DMA32_BASE4,
  263. B43_MMIO_DMA32_BASE5,
  264. };
  265. if (type == B43_DMA_64BIT) {
  266. B43_WARN_ON(!(controller_idx >= 0 &&
  267. controller_idx < ARRAY_SIZE(map64)));
  268. return map64[controller_idx];
  269. }
  270. B43_WARN_ON(!(controller_idx >= 0 &&
  271. controller_idx < ARRAY_SIZE(map32)));
  272. return map32[controller_idx];
  273. }
  274. static inline
  275. dma_addr_t map_descbuffer(struct b43_dmaring *ring,
  276. unsigned char *buf, size_t len, int tx)
  277. {
  278. dma_addr_t dmaaddr;
  279. if (tx) {
  280. dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
  281. buf, len, DMA_TO_DEVICE);
  282. } else {
  283. dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
  284. buf, len, DMA_FROM_DEVICE);
  285. }
  286. return dmaaddr;
  287. }
  288. static inline
  289. void unmap_descbuffer(struct b43_dmaring *ring,
  290. dma_addr_t addr, size_t len, int tx)
  291. {
  292. if (tx) {
  293. dma_unmap_single(ring->dev->dev->dma_dev,
  294. addr, len, DMA_TO_DEVICE);
  295. } else {
  296. dma_unmap_single(ring->dev->dev->dma_dev,
  297. addr, len, DMA_FROM_DEVICE);
  298. }
  299. }
  300. static inline
  301. void sync_descbuffer_for_cpu(struct b43_dmaring *ring,
  302. dma_addr_t addr, size_t len)
  303. {
  304. B43_WARN_ON(ring->tx);
  305. dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
  306. addr, len, DMA_FROM_DEVICE);
  307. }
  308. static inline
  309. void sync_descbuffer_for_device(struct b43_dmaring *ring,
  310. dma_addr_t addr, size_t len)
  311. {
  312. B43_WARN_ON(ring->tx);
  313. dma_sync_single_for_device(ring->dev->dev->dma_dev,
  314. addr, len, DMA_FROM_DEVICE);
  315. }
  316. static inline
  317. void free_descriptor_buffer(struct b43_dmaring *ring,
  318. struct b43_dmadesc_meta *meta)
  319. {
  320. if (meta->skb) {
  321. dev_kfree_skb_any(meta->skb);
  322. meta->skb = NULL;
  323. }
  324. }
  325. static int alloc_ringmemory(struct b43_dmaring *ring)
  326. {
  327. gfp_t flags = GFP_KERNEL;
  328. /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
  329. * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
  330. * has shown that 4K is sufficient for the latter as long as the buffer
  331. * does not cross an 8K boundary.
  332. *
  333. * For unknown reasons - possibly a hardware error - the BCM4311 rev
  334. * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
  335. * which accounts for the GFP_DMA flag below.
  336. *
  337. * The flags here must match the flags in free_ringmemory below!
  338. */
  339. if (ring->type == B43_DMA_64BIT)
  340. flags |= GFP_DMA;
  341. ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
  342. B43_DMA_RINGMEMSIZE,
  343. &(ring->dmabase), flags);
  344. if (!ring->descbase) {
  345. b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
  346. return -ENOMEM;
  347. }
  348. memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE);
  349. return 0;
  350. }
  351. static void free_ringmemory(struct b43_dmaring *ring)
  352. {
  353. dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE,
  354. ring->descbase, ring->dmabase);
  355. }
  356. /* Reset the RX DMA channel */
  357. static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base,
  358. enum b43_dmatype type)
  359. {
  360. int i;
  361. u32 value;
  362. u16 offset;
  363. might_sleep();
  364. offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
  365. b43_write32(dev, mmio_base + offset, 0);
  366. for (i = 0; i < 10; i++) {
  367. offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS :
  368. B43_DMA32_RXSTATUS;
  369. value = b43_read32(dev, mmio_base + offset);
  370. if (type == B43_DMA_64BIT) {
  371. value &= B43_DMA64_RXSTAT;
  372. if (value == B43_DMA64_RXSTAT_DISABLED) {
  373. i = -1;
  374. break;
  375. }
  376. } else {
  377. value &= B43_DMA32_RXSTATE;
  378. if (value == B43_DMA32_RXSTAT_DISABLED) {
  379. i = -1;
  380. break;
  381. }
  382. }
  383. msleep(1);
  384. }
  385. if (i != -1) {
  386. b43err(dev->wl, "DMA RX reset timed out\n");
  387. return -ENODEV;
  388. }
  389. return 0;
  390. }
  391. /* Reset the TX DMA channel */
  392. static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
  393. enum b43_dmatype type)
  394. {
  395. int i;
  396. u32 value;
  397. u16 offset;
  398. might_sleep();
  399. for (i = 0; i < 10; i++) {
  400. offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
  401. B43_DMA32_TXSTATUS;
  402. value = b43_read32(dev, mmio_base + offset);
  403. if (type == B43_DMA_64BIT) {
  404. value &= B43_DMA64_TXSTAT;
  405. if (value == B43_DMA64_TXSTAT_DISABLED ||
  406. value == B43_DMA64_TXSTAT_IDLEWAIT ||
  407. value == B43_DMA64_TXSTAT_STOPPED)
  408. break;
  409. } else {
  410. value &= B43_DMA32_TXSTATE;
  411. if (value == B43_DMA32_TXSTAT_DISABLED ||
  412. value == B43_DMA32_TXSTAT_IDLEWAIT ||
  413. value == B43_DMA32_TXSTAT_STOPPED)
  414. break;
  415. }
  416. msleep(1);
  417. }
  418. offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
  419. b43_write32(dev, mmio_base + offset, 0);
  420. for (i = 0; i < 10; i++) {
  421. offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
  422. B43_DMA32_TXSTATUS;
  423. value = b43_read32(dev, mmio_base + offset);
  424. if (type == B43_DMA_64BIT) {
  425. value &= B43_DMA64_TXSTAT;
  426. if (value == B43_DMA64_TXSTAT_DISABLED) {
  427. i = -1;
  428. break;
  429. }
  430. } else {
  431. value &= B43_DMA32_TXSTATE;
  432. if (value == B43_DMA32_TXSTAT_DISABLED) {
  433. i = -1;
  434. break;
  435. }
  436. }
  437. msleep(1);
  438. }
  439. if (i != -1) {
  440. b43err(dev->wl, "DMA TX reset timed out\n");
  441. return -ENODEV;
  442. }
  443. /* ensure the reset is completed. */
  444. msleep(1);
  445. return 0;
  446. }
  447. /* Check if a DMA mapping address is invalid. */
  448. static bool b43_dma_mapping_error(struct b43_dmaring *ring,
  449. dma_addr_t addr,
  450. size_t buffersize, bool dma_to_device)
  451. {
  452. if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
  453. return 1;
  454. switch (ring->type) {
  455. case B43_DMA_30BIT:
  456. if ((u64)addr + buffersize > (1ULL << 30))
  457. goto address_error;
  458. break;
  459. case B43_DMA_32BIT:
  460. if ((u64)addr + buffersize > (1ULL << 32))
  461. goto address_error;
  462. break;
  463. case B43_DMA_64BIT:
  464. /* Currently we can't have addresses beyond
  465. * 64bit in the kernel. */
  466. break;
  467. }
  468. /* The address is OK. */
  469. return 0;
  470. address_error:
  471. /* We can't support this address. Unmap it again. */
  472. unmap_descbuffer(ring, addr, buffersize, dma_to_device);
  473. return 1;
  474. }
  475. static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
  476. {
  477. unsigned char *f = skb->data + ring->frameoffset;
  478. return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xFF);
  479. }
  480. static void b43_poison_rx_buffer(struct b43_dmaring *ring, struct sk_buff *skb)
  481. {
  482. struct b43_rxhdr_fw4 *rxhdr;
  483. unsigned char *frame;
  484. /* This poisons the RX buffer to detect DMA failures. */
  485. rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
  486. rxhdr->frame_len = 0;
  487. B43_WARN_ON(ring->rx_buffersize < ring->frameoffset + sizeof(struct b43_plcp_hdr6) + 2);
  488. frame = skb->data + ring->frameoffset;
  489. memset(frame, 0xFF, sizeof(struct b43_plcp_hdr6) + 2 /* padding */);
  490. }
  491. static int setup_rx_descbuffer(struct b43_dmaring *ring,
  492. struct b43_dmadesc_generic *desc,
  493. struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
  494. {
  495. dma_addr_t dmaaddr;
  496. struct sk_buff *skb;
  497. B43_WARN_ON(ring->tx);
  498. skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
  499. if (unlikely(!skb))
  500. return -ENOMEM;
  501. b43_poison_rx_buffer(ring, skb);
  502. dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
  503. if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
  504. /* ugh. try to realloc in zone_dma */
  505. gfp_flags |= GFP_DMA;
  506. dev_kfree_skb_any(skb);
  507. skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
  508. if (unlikely(!skb))
  509. return -ENOMEM;
  510. b43_poison_rx_buffer(ring, skb);
  511. dmaaddr = map_descbuffer(ring, skb->data,
  512. ring->rx_buffersize, 0);
  513. if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
  514. b43err(ring->dev->wl, "RX DMA buffer allocation failed\n");
  515. dev_kfree_skb_any(skb);
  516. return -EIO;
  517. }
  518. }
  519. meta->skb = skb;
  520. meta->dmaaddr = dmaaddr;
  521. ring->ops->fill_descriptor(ring, desc, dmaaddr,
  522. ring->rx_buffersize, 0, 0, 0);
  523. return 0;
  524. }
  525. /* Allocate the initial descbuffers.
  526. * This is used for an RX ring only.
  527. */
  528. static int alloc_initial_descbuffers(struct b43_dmaring *ring)
  529. {
  530. int i, err = -ENOMEM;
  531. struct b43_dmadesc_generic *desc;
  532. struct b43_dmadesc_meta *meta;
  533. for (i = 0; i < ring->nr_slots; i++) {
  534. desc = ring->ops->idx2desc(ring, i, &meta);
  535. err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
  536. if (err) {
  537. b43err(ring->dev->wl,
  538. "Failed to allocate initial descbuffers\n");
  539. goto err_unwind;
  540. }
  541. }
  542. mb();
  543. ring->used_slots = ring->nr_slots;
  544. err = 0;
  545. out:
  546. return err;
  547. err_unwind:
  548. for (i--; i >= 0; i--) {
  549. desc = ring->ops->idx2desc(ring, i, &meta);
  550. unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
  551. dev_kfree_skb(meta->skb);
  552. }
  553. goto out;
  554. }
  555. /* Do initial setup of the DMA controller.
  556. * Reset the controller, write the ring busaddress
  557. * and switch the "enable" bit on.
  558. */
  559. static int dmacontroller_setup(struct b43_dmaring *ring)
  560. {
  561. int err = 0;
  562. u32 value;
  563. u32 addrext;
  564. u32 trans = ring->dev->dma.translation;
  565. if (ring->tx) {
  566. if (ring->type == B43_DMA_64BIT) {
  567. u64 ringbase = (u64) (ring->dmabase);
  568. addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
  569. >> SSB_DMA_TRANSLATION_SHIFT;
  570. value = B43_DMA64_TXENABLE;
  571. value |= (addrext << B43_DMA64_TXADDREXT_SHIFT)
  572. & B43_DMA64_TXADDREXT_MASK;
  573. b43_dma_write(ring, B43_DMA64_TXCTL, value);
  574. b43_dma_write(ring, B43_DMA64_TXRINGLO,
  575. (ringbase & 0xFFFFFFFF));
  576. b43_dma_write(ring, B43_DMA64_TXRINGHI,
  577. ((ringbase >> 32) &
  578. ~SSB_DMA_TRANSLATION_MASK)
  579. | (trans << 1));
  580. } else {
  581. u32 ringbase = (u32) (ring->dmabase);
  582. addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
  583. >> SSB_DMA_TRANSLATION_SHIFT;
  584. value = B43_DMA32_TXENABLE;
  585. value |= (addrext << B43_DMA32_TXADDREXT_SHIFT)
  586. & B43_DMA32_TXADDREXT_MASK;
  587. b43_dma_write(ring, B43_DMA32_TXCTL, value);
  588. b43_dma_write(ring, B43_DMA32_TXRING,
  589. (ringbase & ~SSB_DMA_TRANSLATION_MASK)
  590. | trans);
  591. }
  592. } else {
  593. err = alloc_initial_descbuffers(ring);
  594. if (err)
  595. goto out;
  596. if (ring->type == B43_DMA_64BIT) {
  597. u64 ringbase = (u64) (ring->dmabase);
  598. addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
  599. >> SSB_DMA_TRANSLATION_SHIFT;
  600. value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT);
  601. value |= B43_DMA64_RXENABLE;
  602. value |= (addrext << B43_DMA64_RXADDREXT_SHIFT)
  603. & B43_DMA64_RXADDREXT_MASK;
  604. b43_dma_write(ring, B43_DMA64_RXCTL, value);
  605. b43_dma_write(ring, B43_DMA64_RXRINGLO,
  606. (ringbase & 0xFFFFFFFF));
  607. b43_dma_write(ring, B43_DMA64_RXRINGHI,
  608. ((ringbase >> 32) &
  609. ~SSB_DMA_TRANSLATION_MASK)
  610. | (trans << 1));
  611. b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
  612. sizeof(struct b43_dmadesc64));
  613. } else {
  614. u32 ringbase = (u32) (ring->dmabase);
  615. addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
  616. >> SSB_DMA_TRANSLATION_SHIFT;
  617. value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT);
  618. value |= B43_DMA32_RXENABLE;
  619. value |= (addrext << B43_DMA32_RXADDREXT_SHIFT)
  620. & B43_DMA32_RXADDREXT_MASK;
  621. b43_dma_write(ring, B43_DMA32_RXCTL, value);
  622. b43_dma_write(ring, B43_DMA32_RXRING,
  623. (ringbase & ~SSB_DMA_TRANSLATION_MASK)
  624. | trans);
  625. b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots *
  626. sizeof(struct b43_dmadesc32));
  627. }
  628. }
  629. out:
  630. return err;
  631. }
  632. /* Shutdown the DMA controller. */
  633. static void dmacontroller_cleanup(struct b43_dmaring *ring)
  634. {
  635. if (ring->tx) {
  636. b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
  637. ring->type);
  638. if (ring->type == B43_DMA_64BIT) {
  639. b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
  640. b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
  641. } else
  642. b43_dma_write(ring, B43_DMA32_TXRING, 0);
  643. } else {
  644. b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
  645. ring->type);
  646. if (ring->type == B43_DMA_64BIT) {
  647. b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
  648. b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
  649. } else
  650. b43_dma_write(ring, B43_DMA32_RXRING, 0);
  651. }
  652. }
  653. static void free_all_descbuffers(struct b43_dmaring *ring)
  654. {
  655. struct b43_dmadesc_meta *meta;
  656. int i;
  657. if (!ring->used_slots)
  658. return;
  659. for (i = 0; i < ring->nr_slots; i++) {
  660. /* get meta - ignore returned value */
  661. ring->ops->idx2desc(ring, i, &meta);
  662. if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) {
  663. B43_WARN_ON(!ring->tx);
  664. continue;
  665. }
  666. if (ring->tx) {
  667. unmap_descbuffer(ring, meta->dmaaddr,
  668. meta->skb->len, 1);
  669. } else {
  670. unmap_descbuffer(ring, meta->dmaaddr,
  671. ring->rx_buffersize, 0);
  672. }
  673. free_descriptor_buffer(ring, meta);
  674. }
  675. }
  676. static u64 supported_dma_mask(struct b43_wldev *dev)
  677. {
  678. u32 tmp;
  679. u16 mmio_base;
  680. tmp = b43_read32(dev, SSB_TMSHIGH);
  681. if (tmp & SSB_TMSHIGH_DMA64)
  682. return DMA_BIT_MASK(64);
  683. mmio_base = b43_dmacontroller_base(0, 0);
  684. b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
  685. tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
  686. if (tmp & B43_DMA32_TXADDREXT_MASK)
  687. return DMA_BIT_MASK(32);
  688. return DMA_BIT_MASK(30);
  689. }
  690. static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask)
  691. {
  692. if (dmamask == DMA_BIT_MASK(30))
  693. return B43_DMA_30BIT;
  694. if (dmamask == DMA_BIT_MASK(32))
  695. return B43_DMA_32BIT;
  696. if (dmamask == DMA_BIT_MASK(64))
  697. return B43_DMA_64BIT;
  698. B43_WARN_ON(1);
  699. return B43_DMA_30BIT;
  700. }
  701. /* Main initialization function. */
  702. static
  703. struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
  704. int controller_index,
  705. int for_tx,
  706. enum b43_dmatype type)
  707. {
  708. struct b43_dmaring *ring;
  709. int i, err;
  710. dma_addr_t dma_test;
  711. ring = kzalloc(sizeof(*ring), GFP_KERNEL);
  712. if (!ring)
  713. goto out;
  714. ring->nr_slots = B43_RXRING_SLOTS;
  715. if (for_tx)
  716. ring->nr_slots = B43_TXRING_SLOTS;
  717. ring->meta = kcalloc(ring->nr_slots, sizeof(struct b43_dmadesc_meta),
  718. GFP_KERNEL);
  719. if (!ring->meta)
  720. goto err_kfree_ring;
  721. for (i = 0; i < ring->nr_slots; i++)
  722. ring->meta->skb = B43_DMA_PTR_POISON;
  723. ring->type = type;
  724. ring->dev = dev;
  725. ring->mmio_base = b43_dmacontroller_base(type, controller_index);
  726. ring->index = controller_index;
  727. if (type == B43_DMA_64BIT)
  728. ring->ops = &dma64_ops;
  729. else
  730. ring->ops = &dma32_ops;
  731. if (for_tx) {
  732. ring->tx = 1;
  733. ring->current_slot = -1;
  734. } else {
  735. if (ring->index == 0) {
  736. ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE;
  737. ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET;
  738. } else
  739. B43_WARN_ON(1);
  740. }
  741. #ifdef CONFIG_B43_DEBUG
  742. ring->last_injected_overflow = jiffies;
  743. #endif
  744. if (for_tx) {
  745. /* Assumption: B43_TXRING_SLOTS can be divided by TX_SLOTS_PER_FRAME */
  746. BUILD_BUG_ON(B43_TXRING_SLOTS % TX_SLOTS_PER_FRAME != 0);
  747. ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
  748. b43_txhdr_size(dev),
  749. GFP_KERNEL);
  750. if (!ring->txhdr_cache)
  751. goto err_kfree_meta;
  752. /* test for ability to dma to txhdr_cache */
  753. dma_test = dma_map_single(dev->dev->dma_dev,
  754. ring->txhdr_cache,
  755. b43_txhdr_size(dev),
  756. DMA_TO_DEVICE);
  757. if (b43_dma_mapping_error(ring, dma_test,
  758. b43_txhdr_size(dev), 1)) {
  759. /* ugh realloc */
  760. kfree(ring->txhdr_cache);
  761. ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
  762. b43_txhdr_size(dev),
  763. GFP_KERNEL | GFP_DMA);
  764. if (!ring->txhdr_cache)
  765. goto err_kfree_meta;
  766. dma_test = dma_map_single(dev->dev->dma_dev,
  767. ring->txhdr_cache,
  768. b43_txhdr_size(dev),
  769. DMA_TO_DEVICE);
  770. if (b43_dma_mapping_error(ring, dma_test,
  771. b43_txhdr_size(dev), 1)) {
  772. b43err(dev->wl,
  773. "TXHDR DMA allocation failed\n");
  774. goto err_kfree_txhdr_cache;
  775. }
  776. }
  777. dma_unmap_single(dev->dev->dma_dev,
  778. dma_test, b43_txhdr_size(dev),
  779. DMA_TO_DEVICE);
  780. }
  781. err = alloc_ringmemory(ring);
  782. if (err)
  783. goto err_kfree_txhdr_cache;
  784. err = dmacontroller_setup(ring);
  785. if (err)
  786. goto err_free_ringmemory;
  787. out:
  788. return ring;
  789. err_free_ringmemory:
  790. free_ringmemory(ring);
  791. err_kfree_txhdr_cache:
  792. kfree(ring->txhdr_cache);
  793. err_kfree_meta:
  794. kfree(ring->meta);
  795. err_kfree_ring:
  796. kfree(ring);
  797. ring = NULL;
  798. goto out;
  799. }
  800. #define divide(a, b) ({ \
  801. typeof(a) __a = a; \
  802. do_div(__a, b); \
  803. __a; \
  804. })
  805. #define modulo(a, b) ({ \
  806. typeof(a) __a = a; \
  807. do_div(__a, b); \
  808. })
  809. /* Main cleanup function. */
  810. static void b43_destroy_dmaring(struct b43_dmaring *ring,
  811. const char *ringname)
  812. {
  813. if (!ring)
  814. return;
  815. #ifdef CONFIG_B43_DEBUG
  816. {
  817. /* Print some statistics. */
  818. u64 failed_packets = ring->nr_failed_tx_packets;
  819. u64 succeed_packets = ring->nr_succeed_tx_packets;
  820. u64 nr_packets = failed_packets + succeed_packets;
  821. u64 permille_failed = 0, average_tries = 0;
  822. if (nr_packets)
  823. permille_failed = divide(failed_packets * 1000, nr_packets);
  824. if (nr_packets)
  825. average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets);
  826. b43dbg(ring->dev->wl, "DMA-%u %s: "
  827. "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, "
  828. "Average tries %llu.%02llu\n",
  829. (unsigned int)(ring->type), ringname,
  830. ring->max_used_slots,
  831. ring->nr_slots,
  832. (unsigned long long)failed_packets,
  833. (unsigned long long)nr_packets,
  834. (unsigned long long)divide(permille_failed, 10),
  835. (unsigned long long)modulo(permille_failed, 10),
  836. (unsigned long long)divide(average_tries, 100),
  837. (unsigned long long)modulo(average_tries, 100));
  838. }
  839. #endif /* DEBUG */
  840. /* Device IRQs are disabled prior entering this function,
  841. * so no need to take care of concurrency with rx handler stuff.
  842. */
  843. dmacontroller_cleanup(ring);
  844. free_all_descbuffers(ring);
  845. free_ringmemory(ring);
  846. kfree(ring->txhdr_cache);
  847. kfree(ring->meta);
  848. kfree(ring);
  849. }
  850. #define destroy_ring(dma, ring) do { \
  851. b43_destroy_dmaring((dma)->ring, __stringify(ring)); \
  852. (dma)->ring = NULL; \
  853. } while (0)
  854. void b43_dma_free(struct b43_wldev *dev)
  855. {
  856. struct b43_dma *dma;
  857. if (b43_using_pio_transfers(dev))
  858. return;
  859. dma = &dev->dma;
  860. destroy_ring(dma, rx_ring);
  861. destroy_ring(dma, tx_ring_AC_BK);
  862. destroy_ring(dma, tx_ring_AC_BE);
  863. destroy_ring(dma, tx_ring_AC_VI);
  864. destroy_ring(dma, tx_ring_AC_VO);
  865. destroy_ring(dma, tx_ring_mcast);
  866. }
  867. static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
  868. {
  869. u64 orig_mask = mask;
  870. bool fallback = 0;
  871. int err;
  872. /* Try to set the DMA mask. If it fails, try falling back to a
  873. * lower mask, as we can always also support a lower one. */
  874. while (1) {
  875. err = dma_set_mask(dev->dev->dma_dev, mask);
  876. if (!err) {
  877. err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
  878. if (!err)
  879. break;
  880. }
  881. if (mask == DMA_BIT_MASK(64)) {
  882. mask = DMA_BIT_MASK(32);
  883. fallback = 1;
  884. continue;
  885. }
  886. if (mask == DMA_BIT_MASK(32)) {
  887. mask = DMA_BIT_MASK(30);
  888. fallback = 1;
  889. continue;
  890. }
  891. b43err(dev->wl, "The machine/kernel does not support "
  892. "the required %u-bit DMA mask\n",
  893. (unsigned int)dma_mask_to_engine_type(orig_mask));
  894. return -EOPNOTSUPP;
  895. }
  896. if (fallback) {
  897. b43info(dev->wl, "DMA mask fallback from %u-bit to %u-bit\n",
  898. (unsigned int)dma_mask_to_engine_type(orig_mask),
  899. (unsigned int)dma_mask_to_engine_type(mask));
  900. }
  901. return 0;
  902. }
  903. int b43_dma_init(struct b43_wldev *dev)
  904. {
  905. struct b43_dma *dma = &dev->dma;
  906. int err;
  907. u64 dmamask;
  908. enum b43_dmatype type;
  909. dmamask = supported_dma_mask(dev);
  910. type = dma_mask_to_engine_type(dmamask);
  911. err = b43_dma_set_mask(dev, dmamask);
  912. if (err)
  913. return err;
  914. dma->translation = ssb_dma_translation(dev->sdev);
  915. err = -ENOMEM;
  916. /* setup TX DMA channels. */
  917. dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type);
  918. if (!dma->tx_ring_AC_BK)
  919. goto out;
  920. dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type);
  921. if (!dma->tx_ring_AC_BE)
  922. goto err_destroy_bk;
  923. dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type);
  924. if (!dma->tx_ring_AC_VI)
  925. goto err_destroy_be;
  926. dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type);
  927. if (!dma->tx_ring_AC_VO)
  928. goto err_destroy_vi;
  929. dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type);
  930. if (!dma->tx_ring_mcast)
  931. goto err_destroy_vo;
  932. /* setup RX DMA channel. */
  933. dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type);
  934. if (!dma->rx_ring)
  935. goto err_destroy_mcast;
  936. /* No support for the TX status DMA ring. */
  937. B43_WARN_ON(dev->dev->core_rev < 5);
  938. b43dbg(dev->wl, "%u-bit DMA initialized\n",
  939. (unsigned int)type);
  940. err = 0;
  941. out:
  942. return err;
  943. err_destroy_mcast:
  944. destroy_ring(dma, tx_ring_mcast);
  945. err_destroy_vo:
  946. destroy_ring(dma, tx_ring_AC_VO);
  947. err_destroy_vi:
  948. destroy_ring(dma, tx_ring_AC_VI);
  949. err_destroy_be:
  950. destroy_ring(dma, tx_ring_AC_BE);
  951. err_destroy_bk:
  952. destroy_ring(dma, tx_ring_AC_BK);
  953. return err;
  954. }
  955. /* Generate a cookie for the TX header. */
  956. static u16 generate_cookie(struct b43_dmaring *ring, int slot)
  957. {
  958. u16 cookie;
  959. /* Use the upper 4 bits of the cookie as
  960. * DMA controller ID and store the slot number
  961. * in the lower 12 bits.
  962. * Note that the cookie must never be 0, as this
  963. * is a special value used in RX path.
  964. * It can also not be 0xFFFF because that is special
  965. * for multicast frames.
  966. */
  967. cookie = (((u16)ring->index + 1) << 12);
  968. B43_WARN_ON(slot & ~0x0FFF);
  969. cookie |= (u16)slot;
  970. return cookie;
  971. }
  972. /* Inspect a cookie and find out to which controller/slot it belongs. */
  973. static
  974. struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
  975. {
  976. struct b43_dma *dma = &dev->dma;
  977. struct b43_dmaring *ring = NULL;
  978. switch (cookie & 0xF000) {
  979. case 0x1000:
  980. ring = dma->tx_ring_AC_BK;
  981. break;
  982. case 0x2000:
  983. ring = dma->tx_ring_AC_BE;
  984. break;
  985. case 0x3000:
  986. ring = dma->tx_ring_AC_VI;
  987. break;
  988. case 0x4000:
  989. ring = dma->tx_ring_AC_VO;
  990. break;
  991. case 0x5000:
  992. ring = dma->tx_ring_mcast;
  993. break;
  994. }
  995. *slot = (cookie & 0x0FFF);
  996. if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) {
  997. b43dbg(dev->wl, "TX-status contains "
  998. "invalid cookie: 0x%04X\n", cookie);
  999. return NULL;
  1000. }
  1001. return ring;
  1002. }
  1003. static int dma_tx_fragment(struct b43_dmaring *ring,
  1004. struct sk_buff *skb)
  1005. {
  1006. const struct b43_dma_ops *ops = ring->ops;
  1007. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  1008. struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(info);
  1009. u8 *header;
  1010. int slot, old_top_slot, old_used_slots;
  1011. int err;
  1012. struct b43_dmadesc_generic *desc;
  1013. struct b43_dmadesc_meta *meta;
  1014. struct b43_dmadesc_meta *meta_hdr;
  1015. u16 cookie;
  1016. size_t hdrsize = b43_txhdr_size(ring->dev);
  1017. /* Important note: If the number of used DMA slots per TX frame
  1018. * is changed here, the TX_SLOTS_PER_FRAME definition at the top of
  1019. * the file has to be updated, too!
  1020. */
  1021. old_top_slot = ring->current_slot;
  1022. old_used_slots = ring->used_slots;
  1023. /* Get a slot for the header. */
  1024. slot = request_slot(ring);
  1025. desc = ops->idx2desc(ring, slot, &meta_hdr);
  1026. memset(meta_hdr, 0, sizeof(*meta_hdr));
  1027. header = &(ring->txhdr_cache[(slot / TX_SLOTS_PER_FRAME) * hdrsize]);
  1028. cookie = generate_cookie(ring, slot);
  1029. err = b43_generate_txhdr(ring->dev, header,
  1030. skb, info, cookie);
  1031. if (unlikely(err)) {
  1032. ring->current_slot = old_top_slot;
  1033. ring->used_slots = old_used_slots;
  1034. return err;
  1035. }
  1036. meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
  1037. hdrsize, 1);
  1038. if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) {
  1039. ring->current_slot = old_top_slot;
  1040. ring->used_slots = old_used_slots;
  1041. return -EIO;
  1042. }
  1043. ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
  1044. hdrsize, 1, 0, 0);
  1045. /* Get a slot for the payload. */
  1046. slot = request_slot(ring);
  1047. desc = ops->idx2desc(ring, slot, &meta);
  1048. memset(meta, 0, sizeof(*meta));
  1049. meta->skb = skb;
  1050. meta->is_last_fragment = 1;
  1051. priv_info->bouncebuffer = NULL;
  1052. meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
  1053. /* create a bounce buffer in zone_dma on mapping failure. */
  1054. if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
  1055. priv_info->bouncebuffer = kmemdup(skb->data, skb->len,
  1056. GFP_ATOMIC | GFP_DMA);
  1057. if (!priv_info->bouncebuffer) {
  1058. ring->current_slot = old_top_slot;
  1059. ring->used_slots = old_used_slots;
  1060. err = -ENOMEM;
  1061. goto out_unmap_hdr;
  1062. }
  1063. meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1);
  1064. if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
  1065. kfree(priv_info->bouncebuffer);
  1066. priv_info->bouncebuffer = NULL;
  1067. ring->current_slot = old_top_slot;
  1068. ring->used_slots = old_used_slots;
  1069. err = -EIO;
  1070. goto out_unmap_hdr;
  1071. }
  1072. }
  1073. ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
  1074. if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
  1075. /* Tell the firmware about the cookie of the last
  1076. * mcast frame, so it can clear the more-data bit in it. */
  1077. b43_shm_write16(ring->dev, B43_SHM_SHARED,
  1078. B43_SHM_SH_MCASTCOOKIE, cookie);
  1079. }
  1080. /* Now transfer the whole frame. */
  1081. wmb();
  1082. ops->poke_tx(ring, next_slot(ring, slot));
  1083. return 0;
  1084. out_unmap_hdr:
  1085. unmap_descbuffer(ring, meta_hdr->dmaaddr,
  1086. hdrsize, 1);
  1087. return err;
  1088. }
  1089. static inline int should_inject_overflow(struct b43_dmaring *ring)
  1090. {
  1091. #ifdef CONFIG_B43_DEBUG
  1092. if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) {
  1093. /* Check if we should inject another ringbuffer overflow
  1094. * to test handling of this situation in the stack. */
  1095. unsigned long next_overflow;
  1096. next_overflow = ring->last_injected_overflow + HZ;
  1097. if (time_after(jiffies, next_overflow)) {
  1098. ring->last_injected_overflow = jiffies;
  1099. b43dbg(ring->dev->wl,
  1100. "Injecting TX ring overflow on "
  1101. "DMA controller %d\n", ring->index);
  1102. return 1;
  1103. }
  1104. }
  1105. #endif /* CONFIG_B43_DEBUG */
  1106. return 0;
  1107. }
  1108. /* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */
  1109. static struct b43_dmaring *select_ring_by_priority(struct b43_wldev *dev,
  1110. u8 queue_prio)
  1111. {
  1112. struct b43_dmaring *ring;
  1113. if (dev->qos_enabled) {
  1114. /* 0 = highest priority */
  1115. switch (queue_prio) {
  1116. default:
  1117. B43_WARN_ON(1);
  1118. /* fallthrough */
  1119. case 0:
  1120. ring = dev->dma.tx_ring_AC_VO;
  1121. break;
  1122. case 1:
  1123. ring = dev->dma.tx_ring_AC_VI;
  1124. break;
  1125. case 2:
  1126. ring = dev->dma.tx_ring_AC_BE;
  1127. break;
  1128. case 3:
  1129. ring = dev->dma.tx_ring_AC_BK;
  1130. break;
  1131. }
  1132. } else
  1133. ring = dev->dma.tx_ring_AC_BE;
  1134. return ring;
  1135. }
  1136. int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
  1137. {
  1138. struct b43_dmaring *ring;
  1139. struct ieee80211_hdr *hdr;
  1140. int err = 0;
  1141. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  1142. hdr = (struct ieee80211_hdr *)skb->data;
  1143. if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
  1144. /* The multicast ring will be sent after the DTIM */
  1145. ring = dev->dma.tx_ring_mcast;
  1146. /* Set the more-data bit. Ucode will clear it on
  1147. * the last frame for us. */
  1148. hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
  1149. } else {
  1150. /* Decide by priority where to put this frame. */
  1151. ring = select_ring_by_priority(
  1152. dev, skb_get_queue_mapping(skb));
  1153. }
  1154. B43_WARN_ON(!ring->tx);
  1155. if (unlikely(ring->stopped)) {
  1156. /* We get here only because of a bug in mac80211.
  1157. * Because of a race, one packet may be queued after
  1158. * the queue is stopped, thus we got called when we shouldn't.
  1159. * For now, just refuse the transmit. */
  1160. if (b43_debug(dev, B43_DBG_DMAVERBOSE))
  1161. b43err(dev->wl, "Packet after queue stopped\n");
  1162. err = -ENOSPC;
  1163. goto out;
  1164. }
  1165. if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) {
  1166. /* If we get here, we have a real error with the queue
  1167. * full, but queues not stopped. */
  1168. b43err(dev->wl, "DMA queue overflow\n");
  1169. err = -ENOSPC;
  1170. goto out;
  1171. }
  1172. /* Assign the queue number to the ring (if not already done before)
  1173. * so TX status handling can use it. The queue to ring mapping is
  1174. * static, so we don't need to store it per frame. */
  1175. ring->queue_prio = skb_get_queue_mapping(skb);
  1176. err = dma_tx_fragment(ring, skb);
  1177. if (unlikely(err == -ENOKEY)) {
  1178. /* Drop this packet, as we don't have the encryption key
  1179. * anymore and must not transmit it unencrypted. */
  1180. dev_kfree_skb_any(skb);
  1181. err = 0;
  1182. goto out;
  1183. }
  1184. if (unlikely(err)) {
  1185. b43err(dev->wl, "DMA tx mapping failure\n");
  1186. goto out;
  1187. }
  1188. if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
  1189. should_inject_overflow(ring)) {
  1190. /* This TX ring is full. */
  1191. ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
  1192. ring->stopped = 1;
  1193. if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
  1194. b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
  1195. }
  1196. }
  1197. out:
  1198. return err;
  1199. }
  1200. void b43_dma_handle_txstatus(struct b43_wldev *dev,
  1201. const struct b43_txstatus *status)
  1202. {
  1203. const struct b43_dma_ops *ops;
  1204. struct b43_dmaring *ring;
  1205. struct b43_dmadesc_meta *meta;
  1206. int slot, firstused;
  1207. bool frame_succeed;
  1208. ring = parse_cookie(dev, status->cookie, &slot);
  1209. if (unlikely(!ring))
  1210. return;
  1211. B43_WARN_ON(!ring->tx);
  1212. /* Sanity check: TX packets are processed in-order on one ring.
  1213. * Check if the slot deduced from the cookie really is the first
  1214. * used slot. */
  1215. firstused = ring->current_slot - ring->used_slots + 1;
  1216. if (firstused < 0)
  1217. firstused = ring->nr_slots + firstused;
  1218. if (unlikely(slot != firstused)) {
  1219. /* This possibly is a firmware bug and will result in
  1220. * malfunction, memory leaks and/or stall of DMA functionality. */
  1221. b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. "
  1222. "Expected %d, but got %d\n",
  1223. ring->index, firstused, slot);
  1224. return;
  1225. }
  1226. ops = ring->ops;
  1227. while (1) {
  1228. B43_WARN_ON(slot < 0 || slot >= ring->nr_slots);
  1229. /* get meta - ignore returned value */
  1230. ops->idx2desc(ring, slot, &meta);
  1231. if (b43_dma_ptr_is_poisoned(meta->skb)) {
  1232. b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) "
  1233. "on ring %d\n",
  1234. slot, firstused, ring->index);
  1235. break;
  1236. }
  1237. if (meta->skb) {
  1238. struct b43_private_tx_info *priv_info =
  1239. b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
  1240. unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
  1241. kfree(priv_info->bouncebuffer);
  1242. priv_info->bouncebuffer = NULL;
  1243. } else {
  1244. unmap_descbuffer(ring, meta->dmaaddr,
  1245. b43_txhdr_size(dev), 1);
  1246. }
  1247. if (meta->is_last_fragment) {
  1248. struct ieee80211_tx_info *info;
  1249. if (unlikely(!meta->skb)) {
  1250. /* This is a scatter-gather fragment of a frame, so
  1251. * the skb pointer must not be NULL. */
  1252. b43dbg(dev->wl, "TX status unexpected NULL skb "
  1253. "at slot %d (first=%d) on ring %d\n",
  1254. slot, firstused, ring->index);
  1255. break;
  1256. }
  1257. info = IEEE80211_SKB_CB(meta->skb);
  1258. /*
  1259. * Call back to inform the ieee80211 subsystem about
  1260. * the status of the transmission.
  1261. */
  1262. frame_succeed = b43_fill_txstatus_report(dev, info, status);
  1263. #ifdef CONFIG_B43_DEBUG
  1264. if (frame_succeed)
  1265. ring->nr_succeed_tx_packets++;
  1266. else
  1267. ring->nr_failed_tx_packets++;
  1268. ring->nr_total_packet_tries += status->frame_count;
  1269. #endif /* DEBUG */
  1270. ieee80211_tx_status(dev->wl->hw, meta->skb);
  1271. /* skb will be freed by ieee80211_tx_status().
  1272. * Poison our pointer. */
  1273. meta->skb = B43_DMA_PTR_POISON;
  1274. } else {
  1275. /* No need to call free_descriptor_buffer here, as
  1276. * this is only the txhdr, which is not allocated.
  1277. */
  1278. if (unlikely(meta->skb)) {
  1279. b43dbg(dev->wl, "TX status unexpected non-NULL skb "
  1280. "at slot %d (first=%d) on ring %d\n",
  1281. slot, firstused, ring->index);
  1282. break;
  1283. }
  1284. }
  1285. /* Everything unmapped and free'd. So it's not used anymore. */
  1286. ring->used_slots--;
  1287. if (meta->is_last_fragment) {
  1288. /* This is the last scatter-gather
  1289. * fragment of the frame. We are done. */
  1290. break;
  1291. }
  1292. slot = next_slot(ring, slot);
  1293. }
  1294. if (ring->stopped) {
  1295. B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
  1296. ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
  1297. ring->stopped = 0;
  1298. if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
  1299. b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
  1300. }
  1301. }
  1302. }
  1303. static void dma_rx(struct b43_dmaring *ring, int *slot)
  1304. {
  1305. const struct b43_dma_ops *ops = ring->ops;
  1306. struct b43_dmadesc_generic *desc;
  1307. struct b43_dmadesc_meta *meta;
  1308. struct b43_rxhdr_fw4 *rxhdr;
  1309. struct sk_buff *skb;
  1310. u16 len;
  1311. int err;
  1312. dma_addr_t dmaaddr;
  1313. desc = ops->idx2desc(ring, *slot, &meta);
  1314. sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
  1315. skb = meta->skb;
  1316. rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
  1317. len = le16_to_cpu(rxhdr->frame_len);
  1318. if (len == 0) {
  1319. int i = 0;
  1320. do {
  1321. udelay(2);
  1322. barrier();
  1323. len = le16_to_cpu(rxhdr->frame_len);
  1324. } while (len == 0 && i++ < 5);
  1325. if (unlikely(len == 0)) {
  1326. dmaaddr = meta->dmaaddr;
  1327. goto drop_recycle_buffer;
  1328. }
  1329. }
  1330. if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) {
  1331. /* Something went wrong with the DMA.
  1332. * The device did not touch the buffer and did not overwrite the poison. */
  1333. b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n");
  1334. dmaaddr = meta->dmaaddr;
  1335. goto drop_recycle_buffer;
  1336. }
  1337. if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) {
  1338. /* The data did not fit into one descriptor buffer
  1339. * and is split over multiple buffers.
  1340. * This should never happen, as we try to allocate buffers
  1341. * big enough. So simply ignore this packet.
  1342. */
  1343. int cnt = 0;
  1344. s32 tmp = len;
  1345. while (1) {
  1346. desc = ops->idx2desc(ring, *slot, &meta);
  1347. /* recycle the descriptor buffer. */
  1348. b43_poison_rx_buffer(ring, meta->skb);
  1349. sync_descbuffer_for_device(ring, meta->dmaaddr,
  1350. ring->rx_buffersize);
  1351. *slot = next_slot(ring, *slot);
  1352. cnt++;
  1353. tmp -= ring->rx_buffersize;
  1354. if (tmp <= 0)
  1355. break;
  1356. }
  1357. b43err(ring->dev->wl, "DMA RX buffer too small "
  1358. "(len: %u, buffer: %u, nr-dropped: %d)\n",
  1359. len, ring->rx_buffersize, cnt);
  1360. goto drop;
  1361. }
  1362. dmaaddr = meta->dmaaddr;
  1363. err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
  1364. if (unlikely(err)) {
  1365. b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
  1366. goto drop_recycle_buffer;
  1367. }
  1368. unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
  1369. skb_put(skb, len + ring->frameoffset);
  1370. skb_pull(skb, ring->frameoffset);
  1371. b43_rx(ring->dev, skb, rxhdr);
  1372. drop:
  1373. return;
  1374. drop_recycle_buffer:
  1375. /* Poison and recycle the RX buffer. */
  1376. b43_poison_rx_buffer(ring, skb);
  1377. sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
  1378. }
  1379. void b43_dma_rx(struct b43_dmaring *ring)
  1380. {
  1381. const struct b43_dma_ops *ops = ring->ops;
  1382. int slot, current_slot;
  1383. int used_slots = 0;
  1384. B43_WARN_ON(ring->tx);
  1385. current_slot = ops->get_current_rxslot(ring);
  1386. B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots));
  1387. slot = ring->current_slot;
  1388. for (; slot != current_slot; slot = next_slot(ring, slot)) {
  1389. dma_rx(ring, &slot);
  1390. update_max_used_slots(ring, ++used_slots);
  1391. }
  1392. wmb();
  1393. ops->set_current_rxslot(ring, slot);
  1394. ring->current_slot = slot;
  1395. }
  1396. static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring)
  1397. {
  1398. B43_WARN_ON(!ring->tx);
  1399. ring->ops->tx_suspend(ring);
  1400. }
  1401. static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
  1402. {
  1403. B43_WARN_ON(!ring->tx);
  1404. ring->ops->tx_resume(ring);
  1405. }
  1406. void b43_dma_tx_suspend(struct b43_wldev *dev)
  1407. {
  1408. b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
  1409. b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BK);
  1410. b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BE);
  1411. b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VI);
  1412. b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VO);
  1413. b43_dma_tx_suspend_ring(dev->dma.tx_ring_mcast);
  1414. }
  1415. void b43_dma_tx_resume(struct b43_wldev *dev)
  1416. {
  1417. b43_dma_tx_resume_ring(dev->dma.tx_ring_mcast);
  1418. b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VO);
  1419. b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VI);
  1420. b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BE);
  1421. b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK);
  1422. b43_power_saving_ctl_bits(dev, 0);
  1423. }
  1424. static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
  1425. u16 mmio_base, bool enable)
  1426. {
  1427. u32 ctl;
  1428. if (type == B43_DMA_64BIT) {
  1429. ctl = b43_read32(dev, mmio_base + B43_DMA64_RXCTL);
  1430. ctl &= ~B43_DMA64_RXDIRECTFIFO;
  1431. if (enable)
  1432. ctl |= B43_DMA64_RXDIRECTFIFO;
  1433. b43_write32(dev, mmio_base + B43_DMA64_RXCTL, ctl);
  1434. } else {
  1435. ctl = b43_read32(dev, mmio_base + B43_DMA32_RXCTL);
  1436. ctl &= ~B43_DMA32_RXDIRECTFIFO;
  1437. if (enable)
  1438. ctl |= B43_DMA32_RXDIRECTFIFO;
  1439. b43_write32(dev, mmio_base + B43_DMA32_RXCTL, ctl);
  1440. }
  1441. }
  1442. /* Enable/Disable Direct FIFO Receive Mode (PIO) on a RX engine.
  1443. * This is called from PIO code, so DMA structures are not available. */
  1444. void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
  1445. unsigned int engine_index, bool enable)
  1446. {
  1447. enum b43_dmatype type;
  1448. u16 mmio_base;
  1449. type = dma_mask_to_engine_type(supported_dma_mask(dev));
  1450. mmio_base = b43_dmacontroller_base(type, engine_index);
  1451. direct_fifo_rx(dev, type, mmio_base, enable);
  1452. }