dma.c 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776
  1. /*
  2. Broadcom B43 wireless driver
  3. DMA ringbuffer and descriptor allocation/management
  4. Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch>
  5. Some code in this file is derived from the b44.c driver
  6. Copyright (C) 2002 David S. Miller
  7. Copyright (C) Pekka Pietikainen
  8. This program is free software; you can redistribute it and/or modify
  9. it under the terms of the GNU General Public License as published by
  10. the Free Software Foundation; either version 2 of the License, or
  11. (at your option) any later version.
  12. This program is distributed in the hope that it will be useful,
  13. but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. GNU General Public License for more details.
  16. You should have received a copy of the GNU General Public License
  17. along with this program; see the file COPYING. If not, write to
  18. the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
  19. Boston, MA 02110-1301, USA.
  20. */
  21. #include "b43.h"
  22. #include "dma.h"
  23. #include "main.h"
  24. #include "debugfs.h"
  25. #include "xmit.h"
  26. #include <linux/dma-mapping.h>
  27. #include <linux/pci.h>
  28. #include <linux/delay.h>
  29. #include <linux/skbuff.h>
  30. #include <linux/etherdevice.h>
  31. #include <linux/slab.h>
  32. #include <asm/div64.h>
  33. /* Required number of TX DMA slots per TX frame.
  34. * This currently is 2, because we put the header and the ieee80211 frame
  35. * into separate slots. */
  36. #define TX_SLOTS_PER_FRAME 2
  37. static u32 b43_dma_address(struct b43_dma *dma, dma_addr_t dmaaddr,
  38. enum b43_addrtype addrtype)
  39. {
  40. u32 uninitialized_var(addr);
  41. switch (addrtype) {
  42. case B43_DMA_ADDR_LOW:
  43. addr = lower_32_bits(dmaaddr);
  44. if (dma->translation_in_low) {
  45. addr &= ~SSB_DMA_TRANSLATION_MASK;
  46. addr |= dma->translation;
  47. }
  48. break;
  49. case B43_DMA_ADDR_HIGH:
  50. addr = upper_32_bits(dmaaddr);
  51. if (!dma->translation_in_low) {
  52. addr &= ~SSB_DMA_TRANSLATION_MASK;
  53. addr |= dma->translation;
  54. }
  55. break;
  56. case B43_DMA_ADDR_EXT:
  57. if (dma->translation_in_low)
  58. addr = lower_32_bits(dmaaddr);
  59. else
  60. addr = upper_32_bits(dmaaddr);
  61. addr &= SSB_DMA_TRANSLATION_MASK;
  62. addr >>= SSB_DMA_TRANSLATION_SHIFT;
  63. break;
  64. }
  65. return addr;
  66. }
  67. /* 32bit DMA ops. */
  68. static
  69. struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring,
  70. int slot,
  71. struct b43_dmadesc_meta **meta)
  72. {
  73. struct b43_dmadesc32 *desc;
  74. *meta = &(ring->meta[slot]);
  75. desc = ring->descbase;
  76. desc = &(desc[slot]);
  77. return (struct b43_dmadesc_generic *)desc;
  78. }
  79. static void op32_fill_descriptor(struct b43_dmaring *ring,
  80. struct b43_dmadesc_generic *desc,
  81. dma_addr_t dmaaddr, u16 bufsize,
  82. int start, int end, int irq)
  83. {
  84. struct b43_dmadesc32 *descbase = ring->descbase;
  85. int slot;
  86. u32 ctl;
  87. u32 addr;
  88. u32 addrext;
  89. slot = (int)(&(desc->dma32) - descbase);
  90. B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
  91. addr = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW);
  92. addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT);
  93. ctl = bufsize & B43_DMA32_DCTL_BYTECNT;
  94. if (slot == ring->nr_slots - 1)
  95. ctl |= B43_DMA32_DCTL_DTABLEEND;
  96. if (start)
  97. ctl |= B43_DMA32_DCTL_FRAMESTART;
  98. if (end)
  99. ctl |= B43_DMA32_DCTL_FRAMEEND;
  100. if (irq)
  101. ctl |= B43_DMA32_DCTL_IRQ;
  102. ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT)
  103. & B43_DMA32_DCTL_ADDREXT_MASK;
  104. desc->dma32.control = cpu_to_le32(ctl);
  105. desc->dma32.address = cpu_to_le32(addr);
  106. }
  107. static void op32_poke_tx(struct b43_dmaring *ring, int slot)
  108. {
  109. b43_dma_write(ring, B43_DMA32_TXINDEX,
  110. (u32) (slot * sizeof(struct b43_dmadesc32)));
  111. }
  112. static void op32_tx_suspend(struct b43_dmaring *ring)
  113. {
  114. b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
  115. | B43_DMA32_TXSUSPEND);
  116. }
  117. static void op32_tx_resume(struct b43_dmaring *ring)
  118. {
  119. b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
  120. & ~B43_DMA32_TXSUSPEND);
  121. }
  122. static int op32_get_current_rxslot(struct b43_dmaring *ring)
  123. {
  124. u32 val;
  125. val = b43_dma_read(ring, B43_DMA32_RXSTATUS);
  126. val &= B43_DMA32_RXDPTR;
  127. return (val / sizeof(struct b43_dmadesc32));
  128. }
  129. static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot)
  130. {
  131. b43_dma_write(ring, B43_DMA32_RXINDEX,
  132. (u32) (slot * sizeof(struct b43_dmadesc32)));
  133. }
  134. static const struct b43_dma_ops dma32_ops = {
  135. .idx2desc = op32_idx2desc,
  136. .fill_descriptor = op32_fill_descriptor,
  137. .poke_tx = op32_poke_tx,
  138. .tx_suspend = op32_tx_suspend,
  139. .tx_resume = op32_tx_resume,
  140. .get_current_rxslot = op32_get_current_rxslot,
  141. .set_current_rxslot = op32_set_current_rxslot,
  142. };
  143. /* 64bit DMA ops. */
  144. static
  145. struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring,
  146. int slot,
  147. struct b43_dmadesc_meta **meta)
  148. {
  149. struct b43_dmadesc64 *desc;
  150. *meta = &(ring->meta[slot]);
  151. desc = ring->descbase;
  152. desc = &(desc[slot]);
  153. return (struct b43_dmadesc_generic *)desc;
  154. }
  155. static void op64_fill_descriptor(struct b43_dmaring *ring,
  156. struct b43_dmadesc_generic *desc,
  157. dma_addr_t dmaaddr, u16 bufsize,
  158. int start, int end, int irq)
  159. {
  160. struct b43_dmadesc64 *descbase = ring->descbase;
  161. int slot;
  162. u32 ctl0 = 0, ctl1 = 0;
  163. u32 addrlo, addrhi;
  164. u32 addrext;
  165. slot = (int)(&(desc->dma64) - descbase);
  166. B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
  167. addrlo = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW);
  168. addrhi = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_HIGH);
  169. addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT);
  170. if (slot == ring->nr_slots - 1)
  171. ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
  172. if (start)
  173. ctl0 |= B43_DMA64_DCTL0_FRAMESTART;
  174. if (end)
  175. ctl0 |= B43_DMA64_DCTL0_FRAMEEND;
  176. if (irq)
  177. ctl0 |= B43_DMA64_DCTL0_IRQ;
  178. ctl1 |= bufsize & B43_DMA64_DCTL1_BYTECNT;
  179. ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT)
  180. & B43_DMA64_DCTL1_ADDREXT_MASK;
  181. desc->dma64.control0 = cpu_to_le32(ctl0);
  182. desc->dma64.control1 = cpu_to_le32(ctl1);
  183. desc->dma64.address_low = cpu_to_le32(addrlo);
  184. desc->dma64.address_high = cpu_to_le32(addrhi);
  185. }
  186. static void op64_poke_tx(struct b43_dmaring *ring, int slot)
  187. {
  188. b43_dma_write(ring, B43_DMA64_TXINDEX,
  189. (u32) (slot * sizeof(struct b43_dmadesc64)));
  190. }
  191. static void op64_tx_suspend(struct b43_dmaring *ring)
  192. {
  193. b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
  194. | B43_DMA64_TXSUSPEND);
  195. }
  196. static void op64_tx_resume(struct b43_dmaring *ring)
  197. {
  198. b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
  199. & ~B43_DMA64_TXSUSPEND);
  200. }
  201. static int op64_get_current_rxslot(struct b43_dmaring *ring)
  202. {
  203. u32 val;
  204. val = b43_dma_read(ring, B43_DMA64_RXSTATUS);
  205. val &= B43_DMA64_RXSTATDPTR;
  206. return (val / sizeof(struct b43_dmadesc64));
  207. }
  208. static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot)
  209. {
  210. b43_dma_write(ring, B43_DMA64_RXINDEX,
  211. (u32) (slot * sizeof(struct b43_dmadesc64)));
  212. }
  213. static const struct b43_dma_ops dma64_ops = {
  214. .idx2desc = op64_idx2desc,
  215. .fill_descriptor = op64_fill_descriptor,
  216. .poke_tx = op64_poke_tx,
  217. .tx_suspend = op64_tx_suspend,
  218. .tx_resume = op64_tx_resume,
  219. .get_current_rxslot = op64_get_current_rxslot,
  220. .set_current_rxslot = op64_set_current_rxslot,
  221. };
  222. static inline int free_slots(struct b43_dmaring *ring)
  223. {
  224. return (ring->nr_slots - ring->used_slots);
  225. }
  226. static inline int next_slot(struct b43_dmaring *ring, int slot)
  227. {
  228. B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
  229. if (slot == ring->nr_slots - 1)
  230. return 0;
  231. return slot + 1;
  232. }
  233. static inline int prev_slot(struct b43_dmaring *ring, int slot)
  234. {
  235. B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
  236. if (slot == 0)
  237. return ring->nr_slots - 1;
  238. return slot - 1;
  239. }
  240. #ifdef CONFIG_B43_DEBUG
  241. static void update_max_used_slots(struct b43_dmaring *ring,
  242. int current_used_slots)
  243. {
  244. if (current_used_slots <= ring->max_used_slots)
  245. return;
  246. ring->max_used_slots = current_used_slots;
  247. if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) {
  248. b43dbg(ring->dev->wl,
  249. "max_used_slots increased to %d on %s ring %d\n",
  250. ring->max_used_slots,
  251. ring->tx ? "TX" : "RX", ring->index);
  252. }
  253. }
  254. #else
  255. static inline
  256. void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots)
  257. {
  258. }
  259. #endif /* DEBUG */
  260. /* Request a slot for usage. */
  261. static inline int request_slot(struct b43_dmaring *ring)
  262. {
  263. int slot;
  264. B43_WARN_ON(!ring->tx);
  265. B43_WARN_ON(ring->stopped);
  266. B43_WARN_ON(free_slots(ring) == 0);
  267. slot = next_slot(ring, ring->current_slot);
  268. ring->current_slot = slot;
  269. ring->used_slots++;
  270. update_max_used_slots(ring, ring->used_slots);
  271. return slot;
  272. }
  273. static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
  274. {
  275. static const u16 map64[] = {
  276. B43_MMIO_DMA64_BASE0,
  277. B43_MMIO_DMA64_BASE1,
  278. B43_MMIO_DMA64_BASE2,
  279. B43_MMIO_DMA64_BASE3,
  280. B43_MMIO_DMA64_BASE4,
  281. B43_MMIO_DMA64_BASE5,
  282. };
  283. static const u16 map32[] = {
  284. B43_MMIO_DMA32_BASE0,
  285. B43_MMIO_DMA32_BASE1,
  286. B43_MMIO_DMA32_BASE2,
  287. B43_MMIO_DMA32_BASE3,
  288. B43_MMIO_DMA32_BASE4,
  289. B43_MMIO_DMA32_BASE5,
  290. };
  291. if (type == B43_DMA_64BIT) {
  292. B43_WARN_ON(!(controller_idx >= 0 &&
  293. controller_idx < ARRAY_SIZE(map64)));
  294. return map64[controller_idx];
  295. }
  296. B43_WARN_ON(!(controller_idx >= 0 &&
  297. controller_idx < ARRAY_SIZE(map32)));
  298. return map32[controller_idx];
  299. }
  300. static inline
  301. dma_addr_t map_descbuffer(struct b43_dmaring *ring,
  302. unsigned char *buf, size_t len, int tx)
  303. {
  304. dma_addr_t dmaaddr;
  305. if (tx) {
  306. dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
  307. buf, len, DMA_TO_DEVICE);
  308. } else {
  309. dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
  310. buf, len, DMA_FROM_DEVICE);
  311. }
  312. return dmaaddr;
  313. }
  314. static inline
  315. void unmap_descbuffer(struct b43_dmaring *ring,
  316. dma_addr_t addr, size_t len, int tx)
  317. {
  318. if (tx) {
  319. dma_unmap_single(ring->dev->dev->dma_dev,
  320. addr, len, DMA_TO_DEVICE);
  321. } else {
  322. dma_unmap_single(ring->dev->dev->dma_dev,
  323. addr, len, DMA_FROM_DEVICE);
  324. }
  325. }
  326. static inline
  327. void sync_descbuffer_for_cpu(struct b43_dmaring *ring,
  328. dma_addr_t addr, size_t len)
  329. {
  330. B43_WARN_ON(ring->tx);
  331. dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
  332. addr, len, DMA_FROM_DEVICE);
  333. }
  334. static inline
  335. void sync_descbuffer_for_device(struct b43_dmaring *ring,
  336. dma_addr_t addr, size_t len)
  337. {
  338. B43_WARN_ON(ring->tx);
  339. dma_sync_single_for_device(ring->dev->dev->dma_dev,
  340. addr, len, DMA_FROM_DEVICE);
  341. }
  342. static inline
  343. void free_descriptor_buffer(struct b43_dmaring *ring,
  344. struct b43_dmadesc_meta *meta)
  345. {
  346. if (meta->skb) {
  347. dev_kfree_skb_any(meta->skb);
  348. meta->skb = NULL;
  349. }
  350. }
  351. static int alloc_ringmemory(struct b43_dmaring *ring)
  352. {
  353. gfp_t flags = GFP_KERNEL;
  354. /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
  355. * alignment and 8K buffers for 64-bit DMA with 8K alignment.
  356. * In practice we could use smaller buffers for the latter, but the
  357. * alignment is really important because of the hardware bug. If bit
  358. * 0x00001000 is used in DMA address, some hardware (like BCM4331)
  359. * copies that bit into B43_DMA64_RXSTATUS and we get false values from
  360. * B43_DMA64_RXSTATDPTR. Let's just use 8K buffers even if we don't use
  361. * more than 256 slots for ring.
  362. */
  363. u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
  364. B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
  365. ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
  366. ring_mem_size, &(ring->dmabase),
  367. flags);
  368. if (!ring->descbase) {
  369. b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
  370. return -ENOMEM;
  371. }
  372. memset(ring->descbase, 0, ring_mem_size);
  373. return 0;
  374. }
  375. static void free_ringmemory(struct b43_dmaring *ring)
  376. {
  377. u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
  378. B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
  379. dma_free_coherent(ring->dev->dev->dma_dev, ring_mem_size,
  380. ring->descbase, ring->dmabase);
  381. }
  382. /* Reset the RX DMA channel */
  383. static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base,
  384. enum b43_dmatype type)
  385. {
  386. int i;
  387. u32 value;
  388. u16 offset;
  389. might_sleep();
  390. offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
  391. b43_write32(dev, mmio_base + offset, 0);
  392. for (i = 0; i < 10; i++) {
  393. offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS :
  394. B43_DMA32_RXSTATUS;
  395. value = b43_read32(dev, mmio_base + offset);
  396. if (type == B43_DMA_64BIT) {
  397. value &= B43_DMA64_RXSTAT;
  398. if (value == B43_DMA64_RXSTAT_DISABLED) {
  399. i = -1;
  400. break;
  401. }
  402. } else {
  403. value &= B43_DMA32_RXSTATE;
  404. if (value == B43_DMA32_RXSTAT_DISABLED) {
  405. i = -1;
  406. break;
  407. }
  408. }
  409. msleep(1);
  410. }
  411. if (i != -1) {
  412. b43err(dev->wl, "DMA RX reset timed out\n");
  413. return -ENODEV;
  414. }
  415. return 0;
  416. }
  417. /* Reset the TX DMA channel */
  418. static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
  419. enum b43_dmatype type)
  420. {
  421. int i;
  422. u32 value;
  423. u16 offset;
  424. might_sleep();
  425. for (i = 0; i < 10; i++) {
  426. offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
  427. B43_DMA32_TXSTATUS;
  428. value = b43_read32(dev, mmio_base + offset);
  429. if (type == B43_DMA_64BIT) {
  430. value &= B43_DMA64_TXSTAT;
  431. if (value == B43_DMA64_TXSTAT_DISABLED ||
  432. value == B43_DMA64_TXSTAT_IDLEWAIT ||
  433. value == B43_DMA64_TXSTAT_STOPPED)
  434. break;
  435. } else {
  436. value &= B43_DMA32_TXSTATE;
  437. if (value == B43_DMA32_TXSTAT_DISABLED ||
  438. value == B43_DMA32_TXSTAT_IDLEWAIT ||
  439. value == B43_DMA32_TXSTAT_STOPPED)
  440. break;
  441. }
  442. msleep(1);
  443. }
  444. offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
  445. b43_write32(dev, mmio_base + offset, 0);
  446. for (i = 0; i < 10; i++) {
  447. offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
  448. B43_DMA32_TXSTATUS;
  449. value = b43_read32(dev, mmio_base + offset);
  450. if (type == B43_DMA_64BIT) {
  451. value &= B43_DMA64_TXSTAT;
  452. if (value == B43_DMA64_TXSTAT_DISABLED) {
  453. i = -1;
  454. break;
  455. }
  456. } else {
  457. value &= B43_DMA32_TXSTATE;
  458. if (value == B43_DMA32_TXSTAT_DISABLED) {
  459. i = -1;
  460. break;
  461. }
  462. }
  463. msleep(1);
  464. }
  465. if (i != -1) {
  466. b43err(dev->wl, "DMA TX reset timed out\n");
  467. return -ENODEV;
  468. }
  469. /* ensure the reset is completed. */
  470. msleep(1);
  471. return 0;
  472. }
  473. /* Check if a DMA mapping address is invalid. */
  474. static bool b43_dma_mapping_error(struct b43_dmaring *ring,
  475. dma_addr_t addr,
  476. size_t buffersize, bool dma_to_device)
  477. {
  478. if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
  479. return 1;
  480. switch (ring->type) {
  481. case B43_DMA_30BIT:
  482. if ((u64)addr + buffersize > (1ULL << 30))
  483. goto address_error;
  484. break;
  485. case B43_DMA_32BIT:
  486. if ((u64)addr + buffersize > (1ULL << 32))
  487. goto address_error;
  488. break;
  489. case B43_DMA_64BIT:
  490. /* Currently we can't have addresses beyond
  491. * 64bit in the kernel. */
  492. break;
  493. }
  494. /* The address is OK. */
  495. return 0;
  496. address_error:
  497. /* We can't support this address. Unmap it again. */
  498. unmap_descbuffer(ring, addr, buffersize, dma_to_device);
  499. return 1;
  500. }
  501. static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
  502. {
  503. unsigned char *f = skb->data + ring->frameoffset;
  504. return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xFF);
  505. }
  506. static void b43_poison_rx_buffer(struct b43_dmaring *ring, struct sk_buff *skb)
  507. {
  508. struct b43_rxhdr_fw4 *rxhdr;
  509. unsigned char *frame;
  510. /* This poisons the RX buffer to detect DMA failures. */
  511. rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
  512. rxhdr->frame_len = 0;
  513. B43_WARN_ON(ring->rx_buffersize < ring->frameoffset + sizeof(struct b43_plcp_hdr6) + 2);
  514. frame = skb->data + ring->frameoffset;
  515. memset(frame, 0xFF, sizeof(struct b43_plcp_hdr6) + 2 /* padding */);
  516. }
  517. static int setup_rx_descbuffer(struct b43_dmaring *ring,
  518. struct b43_dmadesc_generic *desc,
  519. struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
  520. {
  521. dma_addr_t dmaaddr;
  522. struct sk_buff *skb;
  523. B43_WARN_ON(ring->tx);
  524. skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
  525. if (unlikely(!skb))
  526. return -ENOMEM;
  527. b43_poison_rx_buffer(ring, skb);
  528. dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
  529. if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
  530. /* ugh. try to realloc in zone_dma */
  531. gfp_flags |= GFP_DMA;
  532. dev_kfree_skb_any(skb);
  533. skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
  534. if (unlikely(!skb))
  535. return -ENOMEM;
  536. b43_poison_rx_buffer(ring, skb);
  537. dmaaddr = map_descbuffer(ring, skb->data,
  538. ring->rx_buffersize, 0);
  539. if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
  540. b43err(ring->dev->wl, "RX DMA buffer allocation failed\n");
  541. dev_kfree_skb_any(skb);
  542. return -EIO;
  543. }
  544. }
  545. meta->skb = skb;
  546. meta->dmaaddr = dmaaddr;
  547. ring->ops->fill_descriptor(ring, desc, dmaaddr,
  548. ring->rx_buffersize, 0, 0, 0);
  549. return 0;
  550. }
  551. /* Allocate the initial descbuffers.
  552. * This is used for an RX ring only.
  553. */
  554. static int alloc_initial_descbuffers(struct b43_dmaring *ring)
  555. {
  556. int i, err = -ENOMEM;
  557. struct b43_dmadesc_generic *desc;
  558. struct b43_dmadesc_meta *meta;
  559. for (i = 0; i < ring->nr_slots; i++) {
  560. desc = ring->ops->idx2desc(ring, i, &meta);
  561. err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
  562. if (err) {
  563. b43err(ring->dev->wl,
  564. "Failed to allocate initial descbuffers\n");
  565. goto err_unwind;
  566. }
  567. }
  568. mb();
  569. ring->used_slots = ring->nr_slots;
  570. err = 0;
  571. out:
  572. return err;
  573. err_unwind:
  574. for (i--; i >= 0; i--) {
  575. desc = ring->ops->idx2desc(ring, i, &meta);
  576. unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
  577. dev_kfree_skb(meta->skb);
  578. }
  579. goto out;
  580. }
  581. /* Do initial setup of the DMA controller.
  582. * Reset the controller, write the ring busaddress
  583. * and switch the "enable" bit on.
  584. */
  585. static int dmacontroller_setup(struct b43_dmaring *ring)
  586. {
  587. int err = 0;
  588. u32 value;
  589. u32 addrext;
  590. bool parity = ring->dev->dma.parity;
  591. u32 addrlo;
  592. u32 addrhi;
  593. if (ring->tx) {
  594. if (ring->type == B43_DMA_64BIT) {
  595. u64 ringbase = (u64) (ring->dmabase);
  596. addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
  597. addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
  598. addrhi = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_HIGH);
  599. value = B43_DMA64_TXENABLE;
  600. value |= (addrext << B43_DMA64_TXADDREXT_SHIFT)
  601. & B43_DMA64_TXADDREXT_MASK;
  602. if (!parity)
  603. value |= B43_DMA64_TXPARITYDISABLE;
  604. b43_dma_write(ring, B43_DMA64_TXCTL, value);
  605. b43_dma_write(ring, B43_DMA64_TXRINGLO, addrlo);
  606. b43_dma_write(ring, B43_DMA64_TXRINGHI, addrhi);
  607. } else {
  608. u32 ringbase = (u32) (ring->dmabase);
  609. addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
  610. addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
  611. value = B43_DMA32_TXENABLE;
  612. value |= (addrext << B43_DMA32_TXADDREXT_SHIFT)
  613. & B43_DMA32_TXADDREXT_MASK;
  614. if (!parity)
  615. value |= B43_DMA32_TXPARITYDISABLE;
  616. b43_dma_write(ring, B43_DMA32_TXCTL, value);
  617. b43_dma_write(ring, B43_DMA32_TXRING, addrlo);
  618. }
  619. } else {
  620. err = alloc_initial_descbuffers(ring);
  621. if (err)
  622. goto out;
  623. if (ring->type == B43_DMA_64BIT) {
  624. u64 ringbase = (u64) (ring->dmabase);
  625. addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
  626. addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
  627. addrhi = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_HIGH);
  628. value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT);
  629. value |= B43_DMA64_RXENABLE;
  630. value |= (addrext << B43_DMA64_RXADDREXT_SHIFT)
  631. & B43_DMA64_RXADDREXT_MASK;
  632. if (!parity)
  633. value |= B43_DMA64_RXPARITYDISABLE;
  634. b43_dma_write(ring, B43_DMA64_RXCTL, value);
  635. b43_dma_write(ring, B43_DMA64_RXRINGLO, addrlo);
  636. b43_dma_write(ring, B43_DMA64_RXRINGHI, addrhi);
  637. b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
  638. sizeof(struct b43_dmadesc64));
  639. } else {
  640. u32 ringbase = (u32) (ring->dmabase);
  641. addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
  642. addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
  643. value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT);
  644. value |= B43_DMA32_RXENABLE;
  645. value |= (addrext << B43_DMA32_RXADDREXT_SHIFT)
  646. & B43_DMA32_RXADDREXT_MASK;
  647. if (!parity)
  648. value |= B43_DMA32_RXPARITYDISABLE;
  649. b43_dma_write(ring, B43_DMA32_RXCTL, value);
  650. b43_dma_write(ring, B43_DMA32_RXRING, addrlo);
  651. b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots *
  652. sizeof(struct b43_dmadesc32));
  653. }
  654. }
  655. out:
  656. return err;
  657. }
  658. /* Shutdown the DMA controller. */
  659. static void dmacontroller_cleanup(struct b43_dmaring *ring)
  660. {
  661. if (ring->tx) {
  662. b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
  663. ring->type);
  664. if (ring->type == B43_DMA_64BIT) {
  665. b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
  666. b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
  667. } else
  668. b43_dma_write(ring, B43_DMA32_TXRING, 0);
  669. } else {
  670. b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
  671. ring->type);
  672. if (ring->type == B43_DMA_64BIT) {
  673. b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
  674. b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
  675. } else
  676. b43_dma_write(ring, B43_DMA32_RXRING, 0);
  677. }
  678. }
  679. static void free_all_descbuffers(struct b43_dmaring *ring)
  680. {
  681. struct b43_dmadesc_meta *meta;
  682. int i;
  683. if (!ring->used_slots)
  684. return;
  685. for (i = 0; i < ring->nr_slots; i++) {
  686. /* get meta - ignore returned value */
  687. ring->ops->idx2desc(ring, i, &meta);
  688. if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) {
  689. B43_WARN_ON(!ring->tx);
  690. continue;
  691. }
  692. if (ring->tx) {
  693. unmap_descbuffer(ring, meta->dmaaddr,
  694. meta->skb->len, 1);
  695. } else {
  696. unmap_descbuffer(ring, meta->dmaaddr,
  697. ring->rx_buffersize, 0);
  698. }
  699. free_descriptor_buffer(ring, meta);
  700. }
  701. }
  702. static u64 supported_dma_mask(struct b43_wldev *dev)
  703. {
  704. u32 tmp;
  705. u16 mmio_base;
  706. switch (dev->dev->bus_type) {
  707. #ifdef CONFIG_B43_BCMA
  708. case B43_BUS_BCMA:
  709. tmp = bcma_aread32(dev->dev->bdev, BCMA_IOST);
  710. if (tmp & BCMA_IOST_DMA64)
  711. return DMA_BIT_MASK(64);
  712. break;
  713. #endif
  714. #ifdef CONFIG_B43_SSB
  715. case B43_BUS_SSB:
  716. tmp = ssb_read32(dev->dev->sdev, SSB_TMSHIGH);
  717. if (tmp & SSB_TMSHIGH_DMA64)
  718. return DMA_BIT_MASK(64);
  719. break;
  720. #endif
  721. }
  722. mmio_base = b43_dmacontroller_base(0, 0);
  723. b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
  724. tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
  725. if (tmp & B43_DMA32_TXADDREXT_MASK)
  726. return DMA_BIT_MASK(32);
  727. return DMA_BIT_MASK(30);
  728. }
  729. static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask)
  730. {
  731. if (dmamask == DMA_BIT_MASK(30))
  732. return B43_DMA_30BIT;
  733. if (dmamask == DMA_BIT_MASK(32))
  734. return B43_DMA_32BIT;
  735. if (dmamask == DMA_BIT_MASK(64))
  736. return B43_DMA_64BIT;
  737. B43_WARN_ON(1);
  738. return B43_DMA_30BIT;
  739. }
  740. /* Main initialization function. */
  741. static
  742. struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
  743. int controller_index,
  744. int for_tx,
  745. enum b43_dmatype type)
  746. {
  747. struct b43_dmaring *ring;
  748. int i, err;
  749. dma_addr_t dma_test;
  750. ring = kzalloc(sizeof(*ring), GFP_KERNEL);
  751. if (!ring)
  752. goto out;
  753. ring->nr_slots = B43_RXRING_SLOTS;
  754. if (for_tx)
  755. ring->nr_slots = B43_TXRING_SLOTS;
  756. ring->meta = kcalloc(ring->nr_slots, sizeof(struct b43_dmadesc_meta),
  757. GFP_KERNEL);
  758. if (!ring->meta)
  759. goto err_kfree_ring;
  760. for (i = 0; i < ring->nr_slots; i++)
  761. ring->meta->skb = B43_DMA_PTR_POISON;
  762. ring->type = type;
  763. ring->dev = dev;
  764. ring->mmio_base = b43_dmacontroller_base(type, controller_index);
  765. ring->index = controller_index;
  766. if (type == B43_DMA_64BIT)
  767. ring->ops = &dma64_ops;
  768. else
  769. ring->ops = &dma32_ops;
  770. if (for_tx) {
  771. ring->tx = true;
  772. ring->current_slot = -1;
  773. } else {
  774. if (ring->index == 0) {
  775. switch (dev->fw.hdr_format) {
  776. case B43_FW_HDR_598:
  777. ring->rx_buffersize = B43_DMA0_RX_FW598_BUFSIZE;
  778. ring->frameoffset = B43_DMA0_RX_FW598_FO;
  779. break;
  780. case B43_FW_HDR_410:
  781. case B43_FW_HDR_351:
  782. ring->rx_buffersize = B43_DMA0_RX_FW351_BUFSIZE;
  783. ring->frameoffset = B43_DMA0_RX_FW351_FO;
  784. break;
  785. }
  786. } else
  787. B43_WARN_ON(1);
  788. }
  789. #ifdef CONFIG_B43_DEBUG
  790. ring->last_injected_overflow = jiffies;
  791. #endif
  792. if (for_tx) {
  793. /* Assumption: B43_TXRING_SLOTS can be divided by TX_SLOTS_PER_FRAME */
  794. BUILD_BUG_ON(B43_TXRING_SLOTS % TX_SLOTS_PER_FRAME != 0);
  795. ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
  796. b43_txhdr_size(dev),
  797. GFP_KERNEL);
  798. if (!ring->txhdr_cache)
  799. goto err_kfree_meta;
  800. /* test for ability to dma to txhdr_cache */
  801. dma_test = dma_map_single(dev->dev->dma_dev,
  802. ring->txhdr_cache,
  803. b43_txhdr_size(dev),
  804. DMA_TO_DEVICE);
  805. if (b43_dma_mapping_error(ring, dma_test,
  806. b43_txhdr_size(dev), 1)) {
  807. /* ugh realloc */
  808. kfree(ring->txhdr_cache);
  809. ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
  810. b43_txhdr_size(dev),
  811. GFP_KERNEL | GFP_DMA);
  812. if (!ring->txhdr_cache)
  813. goto err_kfree_meta;
  814. dma_test = dma_map_single(dev->dev->dma_dev,
  815. ring->txhdr_cache,
  816. b43_txhdr_size(dev),
  817. DMA_TO_DEVICE);
  818. if (b43_dma_mapping_error(ring, dma_test,
  819. b43_txhdr_size(dev), 1)) {
  820. b43err(dev->wl,
  821. "TXHDR DMA allocation failed\n");
  822. goto err_kfree_txhdr_cache;
  823. }
  824. }
  825. dma_unmap_single(dev->dev->dma_dev,
  826. dma_test, b43_txhdr_size(dev),
  827. DMA_TO_DEVICE);
  828. }
  829. err = alloc_ringmemory(ring);
  830. if (err)
  831. goto err_kfree_txhdr_cache;
  832. err = dmacontroller_setup(ring);
  833. if (err)
  834. goto err_free_ringmemory;
  835. out:
  836. return ring;
  837. err_free_ringmemory:
  838. free_ringmemory(ring);
  839. err_kfree_txhdr_cache:
  840. kfree(ring->txhdr_cache);
  841. err_kfree_meta:
  842. kfree(ring->meta);
  843. err_kfree_ring:
  844. kfree(ring);
  845. ring = NULL;
  846. goto out;
  847. }
  848. #define divide(a, b) ({ \
  849. typeof(a) __a = a; \
  850. do_div(__a, b); \
  851. __a; \
  852. })
  853. #define modulo(a, b) ({ \
  854. typeof(a) __a = a; \
  855. do_div(__a, b); \
  856. })
  857. /* Main cleanup function. */
  858. static void b43_destroy_dmaring(struct b43_dmaring *ring,
  859. const char *ringname)
  860. {
  861. if (!ring)
  862. return;
  863. #ifdef CONFIG_B43_DEBUG
  864. {
  865. /* Print some statistics. */
  866. u64 failed_packets = ring->nr_failed_tx_packets;
  867. u64 succeed_packets = ring->nr_succeed_tx_packets;
  868. u64 nr_packets = failed_packets + succeed_packets;
  869. u64 permille_failed = 0, average_tries = 0;
  870. if (nr_packets)
  871. permille_failed = divide(failed_packets * 1000, nr_packets);
  872. if (nr_packets)
  873. average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets);
  874. b43dbg(ring->dev->wl, "DMA-%u %s: "
  875. "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, "
  876. "Average tries %llu.%02llu\n",
  877. (unsigned int)(ring->type), ringname,
  878. ring->max_used_slots,
  879. ring->nr_slots,
  880. (unsigned long long)failed_packets,
  881. (unsigned long long)nr_packets,
  882. (unsigned long long)divide(permille_failed, 10),
  883. (unsigned long long)modulo(permille_failed, 10),
  884. (unsigned long long)divide(average_tries, 100),
  885. (unsigned long long)modulo(average_tries, 100));
  886. }
  887. #endif /* DEBUG */
  888. /* Device IRQs are disabled prior entering this function,
  889. * so no need to take care of concurrency with rx handler stuff.
  890. */
  891. dmacontroller_cleanup(ring);
  892. free_all_descbuffers(ring);
  893. free_ringmemory(ring);
  894. kfree(ring->txhdr_cache);
  895. kfree(ring->meta);
  896. kfree(ring);
  897. }
  898. #define destroy_ring(dma, ring) do { \
  899. b43_destroy_dmaring((dma)->ring, __stringify(ring)); \
  900. (dma)->ring = NULL; \
  901. } while (0)
  902. void b43_dma_free(struct b43_wldev *dev)
  903. {
  904. struct b43_dma *dma;
  905. if (b43_using_pio_transfers(dev))
  906. return;
  907. dma = &dev->dma;
  908. destroy_ring(dma, rx_ring);
  909. destroy_ring(dma, tx_ring_AC_BK);
  910. destroy_ring(dma, tx_ring_AC_BE);
  911. destroy_ring(dma, tx_ring_AC_VI);
  912. destroy_ring(dma, tx_ring_AC_VO);
  913. destroy_ring(dma, tx_ring_mcast);
  914. }
  915. static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
  916. {
  917. u64 orig_mask = mask;
  918. bool fallback = false;
  919. int err;
  920. /* Try to set the DMA mask. If it fails, try falling back to a
  921. * lower mask, as we can always also support a lower one. */
  922. while (1) {
  923. err = dma_set_mask(dev->dev->dma_dev, mask);
  924. if (!err) {
  925. err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
  926. if (!err)
  927. break;
  928. }
  929. if (mask == DMA_BIT_MASK(64)) {
  930. mask = DMA_BIT_MASK(32);
  931. fallback = true;
  932. continue;
  933. }
  934. if (mask == DMA_BIT_MASK(32)) {
  935. mask = DMA_BIT_MASK(30);
  936. fallback = true;
  937. continue;
  938. }
  939. b43err(dev->wl, "The machine/kernel does not support "
  940. "the required %u-bit DMA mask\n",
  941. (unsigned int)dma_mask_to_engine_type(orig_mask));
  942. return -EOPNOTSUPP;
  943. }
  944. if (fallback) {
  945. b43info(dev->wl, "DMA mask fallback from %u-bit to %u-bit\n",
  946. (unsigned int)dma_mask_to_engine_type(orig_mask),
  947. (unsigned int)dma_mask_to_engine_type(mask));
  948. }
  949. return 0;
  950. }
  951. /* Some hardware with 64-bit DMA seems to be bugged and looks for translation
  952. * bit in low address word instead of high one.
  953. */
  954. static bool b43_dma_translation_in_low_word(struct b43_wldev *dev,
  955. enum b43_dmatype type)
  956. {
  957. if (type != B43_DMA_64BIT)
  958. return 1;
  959. #ifdef CONFIG_B43_SSB
  960. if (dev->dev->bus_type == B43_BUS_SSB &&
  961. dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI &&
  962. !(dev->dev->sdev->bus->host_pci->is_pcie &&
  963. ssb_read32(dev->dev->sdev, SSB_TMSHIGH) & SSB_TMSHIGH_DMA64))
  964. return 1;
  965. #endif
  966. return 0;
  967. }
  968. int b43_dma_init(struct b43_wldev *dev)
  969. {
  970. struct b43_dma *dma = &dev->dma;
  971. int err;
  972. u64 dmamask;
  973. enum b43_dmatype type;
  974. dmamask = supported_dma_mask(dev);
  975. type = dma_mask_to_engine_type(dmamask);
  976. err = b43_dma_set_mask(dev, dmamask);
  977. if (err)
  978. return err;
  979. switch (dev->dev->bus_type) {
  980. #ifdef CONFIG_B43_BCMA
  981. case B43_BUS_BCMA:
  982. dma->translation = bcma_core_dma_translation(dev->dev->bdev);
  983. break;
  984. #endif
  985. #ifdef CONFIG_B43_SSB
  986. case B43_BUS_SSB:
  987. dma->translation = ssb_dma_translation(dev->dev->sdev);
  988. break;
  989. #endif
  990. }
  991. dma->translation_in_low = b43_dma_translation_in_low_word(dev, type);
  992. dma->parity = true;
  993. #ifdef CONFIG_B43_BCMA
  994. /* TODO: find out which SSB devices need disabling parity */
  995. if (dev->dev->bus_type == B43_BUS_BCMA)
  996. dma->parity = false;
  997. #endif
  998. err = -ENOMEM;
  999. /* setup TX DMA channels. */
  1000. dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type);
  1001. if (!dma->tx_ring_AC_BK)
  1002. goto out;
  1003. dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type);
  1004. if (!dma->tx_ring_AC_BE)
  1005. goto err_destroy_bk;
  1006. dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type);
  1007. if (!dma->tx_ring_AC_VI)
  1008. goto err_destroy_be;
  1009. dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type);
  1010. if (!dma->tx_ring_AC_VO)
  1011. goto err_destroy_vi;
  1012. dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type);
  1013. if (!dma->tx_ring_mcast)
  1014. goto err_destroy_vo;
  1015. /* setup RX DMA channel. */
  1016. dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type);
  1017. if (!dma->rx_ring)
  1018. goto err_destroy_mcast;
  1019. /* No support for the TX status DMA ring. */
  1020. B43_WARN_ON(dev->dev->core_rev < 5);
  1021. b43dbg(dev->wl, "%u-bit DMA initialized\n",
  1022. (unsigned int)type);
  1023. err = 0;
  1024. out:
  1025. return err;
  1026. err_destroy_mcast:
  1027. destroy_ring(dma, tx_ring_mcast);
  1028. err_destroy_vo:
  1029. destroy_ring(dma, tx_ring_AC_VO);
  1030. err_destroy_vi:
  1031. destroy_ring(dma, tx_ring_AC_VI);
  1032. err_destroy_be:
  1033. destroy_ring(dma, tx_ring_AC_BE);
  1034. err_destroy_bk:
  1035. destroy_ring(dma, tx_ring_AC_BK);
  1036. return err;
  1037. }
  1038. /* Generate a cookie for the TX header. */
  1039. static u16 generate_cookie(struct b43_dmaring *ring, int slot)
  1040. {
  1041. u16 cookie;
  1042. /* Use the upper 4 bits of the cookie as
  1043. * DMA controller ID and store the slot number
  1044. * in the lower 12 bits.
  1045. * Note that the cookie must never be 0, as this
  1046. * is a special value used in RX path.
  1047. * It can also not be 0xFFFF because that is special
  1048. * for multicast frames.
  1049. */
  1050. cookie = (((u16)ring->index + 1) << 12);
  1051. B43_WARN_ON(slot & ~0x0FFF);
  1052. cookie |= (u16)slot;
  1053. return cookie;
  1054. }
  1055. /* Inspect a cookie and find out to which controller/slot it belongs. */
  1056. static
  1057. struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
  1058. {
  1059. struct b43_dma *dma = &dev->dma;
  1060. struct b43_dmaring *ring = NULL;
  1061. switch (cookie & 0xF000) {
  1062. case 0x1000:
  1063. ring = dma->tx_ring_AC_BK;
  1064. break;
  1065. case 0x2000:
  1066. ring = dma->tx_ring_AC_BE;
  1067. break;
  1068. case 0x3000:
  1069. ring = dma->tx_ring_AC_VI;
  1070. break;
  1071. case 0x4000:
  1072. ring = dma->tx_ring_AC_VO;
  1073. break;
  1074. case 0x5000:
  1075. ring = dma->tx_ring_mcast;
  1076. break;
  1077. }
  1078. *slot = (cookie & 0x0FFF);
  1079. if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) {
  1080. b43dbg(dev->wl, "TX-status contains "
  1081. "invalid cookie: 0x%04X\n", cookie);
  1082. return NULL;
  1083. }
  1084. return ring;
  1085. }
  1086. static int dma_tx_fragment(struct b43_dmaring *ring,
  1087. struct sk_buff *skb)
  1088. {
  1089. const struct b43_dma_ops *ops = ring->ops;
  1090. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  1091. struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(info);
  1092. u8 *header;
  1093. int slot, old_top_slot, old_used_slots;
  1094. int err;
  1095. struct b43_dmadesc_generic *desc;
  1096. struct b43_dmadesc_meta *meta;
  1097. struct b43_dmadesc_meta *meta_hdr;
  1098. u16 cookie;
  1099. size_t hdrsize = b43_txhdr_size(ring->dev);
  1100. /* Important note: If the number of used DMA slots per TX frame
  1101. * is changed here, the TX_SLOTS_PER_FRAME definition at the top of
  1102. * the file has to be updated, too!
  1103. */
  1104. old_top_slot = ring->current_slot;
  1105. old_used_slots = ring->used_slots;
  1106. /* Get a slot for the header. */
  1107. slot = request_slot(ring);
  1108. desc = ops->idx2desc(ring, slot, &meta_hdr);
  1109. memset(meta_hdr, 0, sizeof(*meta_hdr));
  1110. header = &(ring->txhdr_cache[(slot / TX_SLOTS_PER_FRAME) * hdrsize]);
  1111. cookie = generate_cookie(ring, slot);
  1112. err = b43_generate_txhdr(ring->dev, header,
  1113. skb, info, cookie);
  1114. if (unlikely(err)) {
  1115. ring->current_slot = old_top_slot;
  1116. ring->used_slots = old_used_slots;
  1117. return err;
  1118. }
  1119. meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
  1120. hdrsize, 1);
  1121. if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) {
  1122. ring->current_slot = old_top_slot;
  1123. ring->used_slots = old_used_slots;
  1124. return -EIO;
  1125. }
  1126. ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
  1127. hdrsize, 1, 0, 0);
  1128. /* Get a slot for the payload. */
  1129. slot = request_slot(ring);
  1130. desc = ops->idx2desc(ring, slot, &meta);
  1131. memset(meta, 0, sizeof(*meta));
  1132. meta->skb = skb;
  1133. meta->is_last_fragment = true;
  1134. priv_info->bouncebuffer = NULL;
  1135. meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
  1136. /* create a bounce buffer in zone_dma on mapping failure. */
  1137. if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
  1138. priv_info->bouncebuffer = kmemdup(skb->data, skb->len,
  1139. GFP_ATOMIC | GFP_DMA);
  1140. if (!priv_info->bouncebuffer) {
  1141. ring->current_slot = old_top_slot;
  1142. ring->used_slots = old_used_slots;
  1143. err = -ENOMEM;
  1144. goto out_unmap_hdr;
  1145. }
  1146. meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1);
  1147. if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
  1148. kfree(priv_info->bouncebuffer);
  1149. priv_info->bouncebuffer = NULL;
  1150. ring->current_slot = old_top_slot;
  1151. ring->used_slots = old_used_slots;
  1152. err = -EIO;
  1153. goto out_unmap_hdr;
  1154. }
  1155. }
  1156. ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
  1157. if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
  1158. /* Tell the firmware about the cookie of the last
  1159. * mcast frame, so it can clear the more-data bit in it. */
  1160. b43_shm_write16(ring->dev, B43_SHM_SHARED,
  1161. B43_SHM_SH_MCASTCOOKIE, cookie);
  1162. }
  1163. /* Now transfer the whole frame. */
  1164. wmb();
  1165. ops->poke_tx(ring, next_slot(ring, slot));
  1166. return 0;
  1167. out_unmap_hdr:
  1168. unmap_descbuffer(ring, meta_hdr->dmaaddr,
  1169. hdrsize, 1);
  1170. return err;
  1171. }
  1172. static inline int should_inject_overflow(struct b43_dmaring *ring)
  1173. {
  1174. #ifdef CONFIG_B43_DEBUG
  1175. if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) {
  1176. /* Check if we should inject another ringbuffer overflow
  1177. * to test handling of this situation in the stack. */
  1178. unsigned long next_overflow;
  1179. next_overflow = ring->last_injected_overflow + HZ;
  1180. if (time_after(jiffies, next_overflow)) {
  1181. ring->last_injected_overflow = jiffies;
  1182. b43dbg(ring->dev->wl,
  1183. "Injecting TX ring overflow on "
  1184. "DMA controller %d\n", ring->index);
  1185. return 1;
  1186. }
  1187. }
  1188. #endif /* CONFIG_B43_DEBUG */
  1189. return 0;
  1190. }
  1191. /* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */
  1192. static struct b43_dmaring *select_ring_by_priority(struct b43_wldev *dev,
  1193. u8 queue_prio)
  1194. {
  1195. struct b43_dmaring *ring;
  1196. if (dev->qos_enabled) {
  1197. /* 0 = highest priority */
  1198. switch (queue_prio) {
  1199. default:
  1200. B43_WARN_ON(1);
  1201. /* fallthrough */
  1202. case 0:
  1203. ring = dev->dma.tx_ring_AC_VO;
  1204. break;
  1205. case 1:
  1206. ring = dev->dma.tx_ring_AC_VI;
  1207. break;
  1208. case 2:
  1209. ring = dev->dma.tx_ring_AC_BE;
  1210. break;
  1211. case 3:
  1212. ring = dev->dma.tx_ring_AC_BK;
  1213. break;
  1214. }
  1215. } else
  1216. ring = dev->dma.tx_ring_AC_BE;
  1217. return ring;
  1218. }
  1219. int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
  1220. {
  1221. struct b43_dmaring *ring;
  1222. struct ieee80211_hdr *hdr;
  1223. int err = 0;
  1224. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  1225. hdr = (struct ieee80211_hdr *)skb->data;
  1226. if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
  1227. /* The multicast ring will be sent after the DTIM */
  1228. ring = dev->dma.tx_ring_mcast;
  1229. /* Set the more-data bit. Ucode will clear it on
  1230. * the last frame for us. */
  1231. hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
  1232. } else {
  1233. /* Decide by priority where to put this frame. */
  1234. ring = select_ring_by_priority(
  1235. dev, skb_get_queue_mapping(skb));
  1236. }
  1237. B43_WARN_ON(!ring->tx);
  1238. if (unlikely(ring->stopped)) {
  1239. /* We get here only because of a bug in mac80211.
  1240. * Because of a race, one packet may be queued after
  1241. * the queue is stopped, thus we got called when we shouldn't.
  1242. * For now, just refuse the transmit. */
  1243. if (b43_debug(dev, B43_DBG_DMAVERBOSE))
  1244. b43err(dev->wl, "Packet after queue stopped\n");
  1245. err = -ENOSPC;
  1246. goto out;
  1247. }
  1248. if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) {
  1249. /* If we get here, we have a real error with the queue
  1250. * full, but queues not stopped. */
  1251. b43err(dev->wl, "DMA queue overflow\n");
  1252. err = -ENOSPC;
  1253. goto out;
  1254. }
  1255. /* Assign the queue number to the ring (if not already done before)
  1256. * so TX status handling can use it. The queue to ring mapping is
  1257. * static, so we don't need to store it per frame. */
  1258. ring->queue_prio = skb_get_queue_mapping(skb);
  1259. err = dma_tx_fragment(ring, skb);
  1260. if (unlikely(err == -ENOKEY)) {
  1261. /* Drop this packet, as we don't have the encryption key
  1262. * anymore and must not transmit it unencrypted. */
  1263. dev_kfree_skb_any(skb);
  1264. err = 0;
  1265. goto out;
  1266. }
  1267. if (unlikely(err)) {
  1268. b43err(dev->wl, "DMA tx mapping failure\n");
  1269. goto out;
  1270. }
  1271. if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
  1272. should_inject_overflow(ring)) {
  1273. /* This TX ring is full. */
  1274. unsigned int skb_mapping = skb_get_queue_mapping(skb);
  1275. ieee80211_stop_queue(dev->wl->hw, skb_mapping);
  1276. dev->wl->tx_queue_stopped[skb_mapping] = 1;
  1277. ring->stopped = true;
  1278. if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
  1279. b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
  1280. }
  1281. }
  1282. out:
  1283. return err;
  1284. }
  1285. void b43_dma_handle_txstatus(struct b43_wldev *dev,
  1286. const struct b43_txstatus *status)
  1287. {
  1288. const struct b43_dma_ops *ops;
  1289. struct b43_dmaring *ring;
  1290. struct b43_dmadesc_meta *meta;
  1291. int slot, firstused;
  1292. bool frame_succeed;
  1293. ring = parse_cookie(dev, status->cookie, &slot);
  1294. if (unlikely(!ring))
  1295. return;
  1296. B43_WARN_ON(!ring->tx);
  1297. /* Sanity check: TX packets are processed in-order on one ring.
  1298. * Check if the slot deduced from the cookie really is the first
  1299. * used slot. */
  1300. firstused = ring->current_slot - ring->used_slots + 1;
  1301. if (firstused < 0)
  1302. firstused = ring->nr_slots + firstused;
  1303. if (unlikely(slot != firstused)) {
  1304. /* This possibly is a firmware bug and will result in
  1305. * malfunction, memory leaks and/or stall of DMA functionality. */
  1306. b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. "
  1307. "Expected %d, but got %d\n",
  1308. ring->index, firstused, slot);
  1309. return;
  1310. }
  1311. ops = ring->ops;
  1312. while (1) {
  1313. B43_WARN_ON(slot < 0 || slot >= ring->nr_slots);
  1314. /* get meta - ignore returned value */
  1315. ops->idx2desc(ring, slot, &meta);
  1316. if (b43_dma_ptr_is_poisoned(meta->skb)) {
  1317. b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) "
  1318. "on ring %d\n",
  1319. slot, firstused, ring->index);
  1320. break;
  1321. }
  1322. if (meta->skb) {
  1323. struct b43_private_tx_info *priv_info =
  1324. b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
  1325. unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
  1326. kfree(priv_info->bouncebuffer);
  1327. priv_info->bouncebuffer = NULL;
  1328. } else {
  1329. unmap_descbuffer(ring, meta->dmaaddr,
  1330. b43_txhdr_size(dev), 1);
  1331. }
  1332. if (meta->is_last_fragment) {
  1333. struct ieee80211_tx_info *info;
  1334. if (unlikely(!meta->skb)) {
  1335. /* This is a scatter-gather fragment of a frame, so
  1336. * the skb pointer must not be NULL. */
  1337. b43dbg(dev->wl, "TX status unexpected NULL skb "
  1338. "at slot %d (first=%d) on ring %d\n",
  1339. slot, firstused, ring->index);
  1340. break;
  1341. }
  1342. info = IEEE80211_SKB_CB(meta->skb);
  1343. /*
  1344. * Call back to inform the ieee80211 subsystem about
  1345. * the status of the transmission.
  1346. */
  1347. frame_succeed = b43_fill_txstatus_report(dev, info, status);
  1348. #ifdef CONFIG_B43_DEBUG
  1349. if (frame_succeed)
  1350. ring->nr_succeed_tx_packets++;
  1351. else
  1352. ring->nr_failed_tx_packets++;
  1353. ring->nr_total_packet_tries += status->frame_count;
  1354. #endif /* DEBUG */
  1355. ieee80211_tx_status(dev->wl->hw, meta->skb);
  1356. /* skb will be freed by ieee80211_tx_status().
  1357. * Poison our pointer. */
  1358. meta->skb = B43_DMA_PTR_POISON;
  1359. } else {
  1360. /* No need to call free_descriptor_buffer here, as
  1361. * this is only the txhdr, which is not allocated.
  1362. */
  1363. if (unlikely(meta->skb)) {
  1364. b43dbg(dev->wl, "TX status unexpected non-NULL skb "
  1365. "at slot %d (first=%d) on ring %d\n",
  1366. slot, firstused, ring->index);
  1367. break;
  1368. }
  1369. }
  1370. /* Everything unmapped and free'd. So it's not used anymore. */
  1371. ring->used_slots--;
  1372. if (meta->is_last_fragment) {
  1373. /* This is the last scatter-gather
  1374. * fragment of the frame. We are done. */
  1375. break;
  1376. }
  1377. slot = next_slot(ring, slot);
  1378. }
  1379. if (ring->stopped) {
  1380. B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
  1381. ring->stopped = false;
  1382. }
  1383. if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
  1384. dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
  1385. } else {
  1386. /* If the driver queue is running wake the corresponding
  1387. * mac80211 queue. */
  1388. ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
  1389. if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
  1390. b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
  1391. }
  1392. }
  1393. /* Add work to the queue. */
  1394. ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work);
  1395. }
  1396. static void dma_rx(struct b43_dmaring *ring, int *slot)
  1397. {
  1398. const struct b43_dma_ops *ops = ring->ops;
  1399. struct b43_dmadesc_generic *desc;
  1400. struct b43_dmadesc_meta *meta;
  1401. struct b43_rxhdr_fw4 *rxhdr;
  1402. struct sk_buff *skb;
  1403. u16 len;
  1404. int err;
  1405. dma_addr_t dmaaddr;
  1406. desc = ops->idx2desc(ring, *slot, &meta);
  1407. sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
  1408. skb = meta->skb;
  1409. rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
  1410. len = le16_to_cpu(rxhdr->frame_len);
  1411. if (len == 0) {
  1412. int i = 0;
  1413. do {
  1414. udelay(2);
  1415. barrier();
  1416. len = le16_to_cpu(rxhdr->frame_len);
  1417. } while (len == 0 && i++ < 5);
  1418. if (unlikely(len == 0)) {
  1419. dmaaddr = meta->dmaaddr;
  1420. goto drop_recycle_buffer;
  1421. }
  1422. }
  1423. if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) {
  1424. /* Something went wrong with the DMA.
  1425. * The device did not touch the buffer and did not overwrite the poison. */
  1426. b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n");
  1427. dmaaddr = meta->dmaaddr;
  1428. goto drop_recycle_buffer;
  1429. }
  1430. if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) {
  1431. /* The data did not fit into one descriptor buffer
  1432. * and is split over multiple buffers.
  1433. * This should never happen, as we try to allocate buffers
  1434. * big enough. So simply ignore this packet.
  1435. */
  1436. int cnt = 0;
  1437. s32 tmp = len;
  1438. while (1) {
  1439. desc = ops->idx2desc(ring, *slot, &meta);
  1440. /* recycle the descriptor buffer. */
  1441. b43_poison_rx_buffer(ring, meta->skb);
  1442. sync_descbuffer_for_device(ring, meta->dmaaddr,
  1443. ring->rx_buffersize);
  1444. *slot = next_slot(ring, *slot);
  1445. cnt++;
  1446. tmp -= ring->rx_buffersize;
  1447. if (tmp <= 0)
  1448. break;
  1449. }
  1450. b43err(ring->dev->wl, "DMA RX buffer too small "
  1451. "(len: %u, buffer: %u, nr-dropped: %d)\n",
  1452. len, ring->rx_buffersize, cnt);
  1453. goto drop;
  1454. }
  1455. dmaaddr = meta->dmaaddr;
  1456. err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
  1457. if (unlikely(err)) {
  1458. b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
  1459. goto drop_recycle_buffer;
  1460. }
  1461. unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
  1462. skb_put(skb, len + ring->frameoffset);
  1463. skb_pull(skb, ring->frameoffset);
  1464. b43_rx(ring->dev, skb, rxhdr);
  1465. drop:
  1466. return;
  1467. drop_recycle_buffer:
  1468. /* Poison and recycle the RX buffer. */
  1469. b43_poison_rx_buffer(ring, skb);
  1470. sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
  1471. }
  1472. void b43_dma_rx(struct b43_dmaring *ring)
  1473. {
  1474. const struct b43_dma_ops *ops = ring->ops;
  1475. int slot, current_slot;
  1476. int used_slots = 0;
  1477. B43_WARN_ON(ring->tx);
  1478. current_slot = ops->get_current_rxslot(ring);
  1479. B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots));
  1480. slot = ring->current_slot;
  1481. for (; slot != current_slot; slot = next_slot(ring, slot)) {
  1482. dma_rx(ring, &slot);
  1483. update_max_used_slots(ring, ++used_slots);
  1484. }
  1485. wmb();
  1486. ops->set_current_rxslot(ring, slot);
  1487. ring->current_slot = slot;
  1488. }
  1489. static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring)
  1490. {
  1491. B43_WARN_ON(!ring->tx);
  1492. ring->ops->tx_suspend(ring);
  1493. }
  1494. static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
  1495. {
  1496. B43_WARN_ON(!ring->tx);
  1497. ring->ops->tx_resume(ring);
  1498. }
  1499. void b43_dma_tx_suspend(struct b43_wldev *dev)
  1500. {
  1501. b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
  1502. b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BK);
  1503. b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BE);
  1504. b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VI);
  1505. b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VO);
  1506. b43_dma_tx_suspend_ring(dev->dma.tx_ring_mcast);
  1507. }
  1508. void b43_dma_tx_resume(struct b43_wldev *dev)
  1509. {
  1510. b43_dma_tx_resume_ring(dev->dma.tx_ring_mcast);
  1511. b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VO);
  1512. b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VI);
  1513. b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BE);
  1514. b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK);
  1515. b43_power_saving_ctl_bits(dev, 0);
  1516. }
  1517. static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
  1518. u16 mmio_base, bool enable)
  1519. {
  1520. u32 ctl;
  1521. if (type == B43_DMA_64BIT) {
  1522. ctl = b43_read32(dev, mmio_base + B43_DMA64_RXCTL);
  1523. ctl &= ~B43_DMA64_RXDIRECTFIFO;
  1524. if (enable)
  1525. ctl |= B43_DMA64_RXDIRECTFIFO;
  1526. b43_write32(dev, mmio_base + B43_DMA64_RXCTL, ctl);
  1527. } else {
  1528. ctl = b43_read32(dev, mmio_base + B43_DMA32_RXCTL);
  1529. ctl &= ~B43_DMA32_RXDIRECTFIFO;
  1530. if (enable)
  1531. ctl |= B43_DMA32_RXDIRECTFIFO;
  1532. b43_write32(dev, mmio_base + B43_DMA32_RXCTL, ctl);
  1533. }
  1534. }
  1535. /* Enable/Disable Direct FIFO Receive Mode (PIO) on a RX engine.
  1536. * This is called from PIO code, so DMA structures are not available. */
  1537. void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
  1538. unsigned int engine_index, bool enable)
  1539. {
  1540. enum b43_dmatype type;
  1541. u16 mmio_base;
  1542. type = dma_mask_to_engine_type(supported_dma_mask(dev));
  1543. mmio_base = b43_dmacontroller_base(type, engine_index);
  1544. direct_fifo_rx(dev, type, mmio_base, enable);
  1545. }