dma.c 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562
  1. /*
  2. Broadcom B43 wireless driver
  3. DMA ringbuffer and descriptor allocation/management
  4. Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
  5. Some code in this file is derived from the b44.c driver
  6. Copyright (C) 2002 David S. Miller
  7. Copyright (C) Pekka Pietikainen
  8. This program is free software; you can redistribute it and/or modify
  9. it under the terms of the GNU General Public License as published by
  10. the Free Software Foundation; either version 2 of the License, or
  11. (at your option) any later version.
  12. This program is distributed in the hope that it will be useful,
  13. but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. GNU General Public License for more details.
  16. You should have received a copy of the GNU General Public License
  17. along with this program; see the file COPYING. If not, write to
  18. the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
  19. Boston, MA 02110-1301, USA.
  20. */
  21. #include "b43.h"
  22. #include "dma.h"
  23. #include "main.h"
  24. #include "debugfs.h"
  25. #include "xmit.h"
  26. #include <linux/dma-mapping.h>
  27. #include <linux/pci.h>
  28. #include <linux/delay.h>
  29. #include <linux/skbuff.h>
  30. #include <linux/etherdevice.h>
  31. #include <asm/div64.h>
  32. /* 32bit DMA ops. */
  33. static
  34. struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring,
  35. int slot,
  36. struct b43_dmadesc_meta **meta)
  37. {
  38. struct b43_dmadesc32 *desc;
  39. *meta = &(ring->meta[slot]);
  40. desc = ring->descbase;
  41. desc = &(desc[slot]);
  42. return (struct b43_dmadesc_generic *)desc;
  43. }
  44. static void op32_fill_descriptor(struct b43_dmaring *ring,
  45. struct b43_dmadesc_generic *desc,
  46. dma_addr_t dmaaddr, u16 bufsize,
  47. int start, int end, int irq)
  48. {
  49. struct b43_dmadesc32 *descbase = ring->descbase;
  50. int slot;
  51. u32 ctl;
  52. u32 addr;
  53. u32 addrext;
  54. slot = (int)(&(desc->dma32) - descbase);
  55. B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
  56. addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
  57. addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK)
  58. >> SSB_DMA_TRANSLATION_SHIFT;
  59. addr |= ssb_dma_translation(ring->dev->dev);
  60. ctl = (bufsize - ring->frameoffset)
  61. & B43_DMA32_DCTL_BYTECNT;
  62. if (slot == ring->nr_slots - 1)
  63. ctl |= B43_DMA32_DCTL_DTABLEEND;
  64. if (start)
  65. ctl |= B43_DMA32_DCTL_FRAMESTART;
  66. if (end)
  67. ctl |= B43_DMA32_DCTL_FRAMEEND;
  68. if (irq)
  69. ctl |= B43_DMA32_DCTL_IRQ;
  70. ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT)
  71. & B43_DMA32_DCTL_ADDREXT_MASK;
  72. desc->dma32.control = cpu_to_le32(ctl);
  73. desc->dma32.address = cpu_to_le32(addr);
  74. }
  75. static void op32_poke_tx(struct b43_dmaring *ring, int slot)
  76. {
  77. b43_dma_write(ring, B43_DMA32_TXINDEX,
  78. (u32) (slot * sizeof(struct b43_dmadesc32)));
  79. }
  80. static void op32_tx_suspend(struct b43_dmaring *ring)
  81. {
  82. b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
  83. | B43_DMA32_TXSUSPEND);
  84. }
  85. static void op32_tx_resume(struct b43_dmaring *ring)
  86. {
  87. b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
  88. & ~B43_DMA32_TXSUSPEND);
  89. }
  90. static int op32_get_current_rxslot(struct b43_dmaring *ring)
  91. {
  92. u32 val;
  93. val = b43_dma_read(ring, B43_DMA32_RXSTATUS);
  94. val &= B43_DMA32_RXDPTR;
  95. return (val / sizeof(struct b43_dmadesc32));
  96. }
  97. static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot)
  98. {
  99. b43_dma_write(ring, B43_DMA32_RXINDEX,
  100. (u32) (slot * sizeof(struct b43_dmadesc32)));
  101. }
  102. static const struct b43_dma_ops dma32_ops = {
  103. .idx2desc = op32_idx2desc,
  104. .fill_descriptor = op32_fill_descriptor,
  105. .poke_tx = op32_poke_tx,
  106. .tx_suspend = op32_tx_suspend,
  107. .tx_resume = op32_tx_resume,
  108. .get_current_rxslot = op32_get_current_rxslot,
  109. .set_current_rxslot = op32_set_current_rxslot,
  110. };
  111. /* 64bit DMA ops. */
  112. static
  113. struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring,
  114. int slot,
  115. struct b43_dmadesc_meta **meta)
  116. {
  117. struct b43_dmadesc64 *desc;
  118. *meta = &(ring->meta[slot]);
  119. desc = ring->descbase;
  120. desc = &(desc[slot]);
  121. return (struct b43_dmadesc_generic *)desc;
  122. }
  123. static void op64_fill_descriptor(struct b43_dmaring *ring,
  124. struct b43_dmadesc_generic *desc,
  125. dma_addr_t dmaaddr, u16 bufsize,
  126. int start, int end, int irq)
  127. {
  128. struct b43_dmadesc64 *descbase = ring->descbase;
  129. int slot;
  130. u32 ctl0 = 0, ctl1 = 0;
  131. u32 addrlo, addrhi;
  132. u32 addrext;
  133. slot = (int)(&(desc->dma64) - descbase);
  134. B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
  135. addrlo = (u32) (dmaaddr & 0xFFFFFFFF);
  136. addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
  137. addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
  138. >> SSB_DMA_TRANSLATION_SHIFT;
  139. addrhi |= (ssb_dma_translation(ring->dev->dev) << 1);
  140. if (slot == ring->nr_slots - 1)
  141. ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
  142. if (start)
  143. ctl0 |= B43_DMA64_DCTL0_FRAMESTART;
  144. if (end)
  145. ctl0 |= B43_DMA64_DCTL0_FRAMEEND;
  146. if (irq)
  147. ctl0 |= B43_DMA64_DCTL0_IRQ;
  148. ctl1 |= (bufsize - ring->frameoffset)
  149. & B43_DMA64_DCTL1_BYTECNT;
  150. ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT)
  151. & B43_DMA64_DCTL1_ADDREXT_MASK;
  152. desc->dma64.control0 = cpu_to_le32(ctl0);
  153. desc->dma64.control1 = cpu_to_le32(ctl1);
  154. desc->dma64.address_low = cpu_to_le32(addrlo);
  155. desc->dma64.address_high = cpu_to_le32(addrhi);
  156. }
  157. static void op64_poke_tx(struct b43_dmaring *ring, int slot)
  158. {
  159. b43_dma_write(ring, B43_DMA64_TXINDEX,
  160. (u32) (slot * sizeof(struct b43_dmadesc64)));
  161. }
  162. static void op64_tx_suspend(struct b43_dmaring *ring)
  163. {
  164. b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
  165. | B43_DMA64_TXSUSPEND);
  166. }
  167. static void op64_tx_resume(struct b43_dmaring *ring)
  168. {
  169. b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
  170. & ~B43_DMA64_TXSUSPEND);
  171. }
  172. static int op64_get_current_rxslot(struct b43_dmaring *ring)
  173. {
  174. u32 val;
  175. val = b43_dma_read(ring, B43_DMA64_RXSTATUS);
  176. val &= B43_DMA64_RXSTATDPTR;
  177. return (val / sizeof(struct b43_dmadesc64));
  178. }
  179. static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot)
  180. {
  181. b43_dma_write(ring, B43_DMA64_RXINDEX,
  182. (u32) (slot * sizeof(struct b43_dmadesc64)));
  183. }
  184. static const struct b43_dma_ops dma64_ops = {
  185. .idx2desc = op64_idx2desc,
  186. .fill_descriptor = op64_fill_descriptor,
  187. .poke_tx = op64_poke_tx,
  188. .tx_suspend = op64_tx_suspend,
  189. .tx_resume = op64_tx_resume,
  190. .get_current_rxslot = op64_get_current_rxslot,
  191. .set_current_rxslot = op64_set_current_rxslot,
  192. };
  193. static inline int free_slots(struct b43_dmaring *ring)
  194. {
  195. return (ring->nr_slots - ring->used_slots);
  196. }
  197. static inline int next_slot(struct b43_dmaring *ring, int slot)
  198. {
  199. B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
  200. if (slot == ring->nr_slots - 1)
  201. return 0;
  202. return slot + 1;
  203. }
  204. static inline int prev_slot(struct b43_dmaring *ring, int slot)
  205. {
  206. B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
  207. if (slot == 0)
  208. return ring->nr_slots - 1;
  209. return slot - 1;
  210. }
  211. #ifdef CONFIG_B43_DEBUG
  212. static void update_max_used_slots(struct b43_dmaring *ring,
  213. int current_used_slots)
  214. {
  215. if (current_used_slots <= ring->max_used_slots)
  216. return;
  217. ring->max_used_slots = current_used_slots;
  218. if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) {
  219. b43dbg(ring->dev->wl,
  220. "max_used_slots increased to %d on %s ring %d\n",
  221. ring->max_used_slots,
  222. ring->tx ? "TX" : "RX", ring->index);
  223. }
  224. }
  225. #else
  226. static inline
  227. void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots)
  228. {
  229. }
  230. #endif /* DEBUG */
  231. /* Request a slot for usage. */
  232. static inline int request_slot(struct b43_dmaring *ring)
  233. {
  234. int slot;
  235. B43_WARN_ON(!ring->tx);
  236. B43_WARN_ON(ring->stopped);
  237. B43_WARN_ON(free_slots(ring) == 0);
  238. slot = next_slot(ring, ring->current_slot);
  239. ring->current_slot = slot;
  240. ring->used_slots++;
  241. update_max_used_slots(ring, ring->used_slots);
  242. return slot;
  243. }
  244. static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
  245. {
  246. static const u16 map64[] = {
  247. B43_MMIO_DMA64_BASE0,
  248. B43_MMIO_DMA64_BASE1,
  249. B43_MMIO_DMA64_BASE2,
  250. B43_MMIO_DMA64_BASE3,
  251. B43_MMIO_DMA64_BASE4,
  252. B43_MMIO_DMA64_BASE5,
  253. };
  254. static const u16 map32[] = {
  255. B43_MMIO_DMA32_BASE0,
  256. B43_MMIO_DMA32_BASE1,
  257. B43_MMIO_DMA32_BASE2,
  258. B43_MMIO_DMA32_BASE3,
  259. B43_MMIO_DMA32_BASE4,
  260. B43_MMIO_DMA32_BASE5,
  261. };
  262. if (type == B43_DMA_64BIT) {
  263. B43_WARN_ON(!(controller_idx >= 0 &&
  264. controller_idx < ARRAY_SIZE(map64)));
  265. return map64[controller_idx];
  266. }
  267. B43_WARN_ON(!(controller_idx >= 0 &&
  268. controller_idx < ARRAY_SIZE(map32)));
  269. return map32[controller_idx];
  270. }
  271. static inline
  272. dma_addr_t map_descbuffer(struct b43_dmaring *ring,
  273. unsigned char *buf, size_t len, int tx)
  274. {
  275. dma_addr_t dmaaddr;
  276. if (tx) {
  277. dmaaddr = dma_map_single(ring->dev->dev->dev,
  278. buf, len, DMA_TO_DEVICE);
  279. } else {
  280. dmaaddr = dma_map_single(ring->dev->dev->dev,
  281. buf, len, DMA_FROM_DEVICE);
  282. }
  283. return dmaaddr;
  284. }
  285. static inline
  286. void unmap_descbuffer(struct b43_dmaring *ring,
  287. dma_addr_t addr, size_t len, int tx)
  288. {
  289. if (tx) {
  290. dma_unmap_single(ring->dev->dev->dev, addr, len, DMA_TO_DEVICE);
  291. } else {
  292. dma_unmap_single(ring->dev->dev->dev,
  293. addr, len, DMA_FROM_DEVICE);
  294. }
  295. }
  296. static inline
  297. void sync_descbuffer_for_cpu(struct b43_dmaring *ring,
  298. dma_addr_t addr, size_t len)
  299. {
  300. B43_WARN_ON(ring->tx);
  301. dma_sync_single_for_cpu(ring->dev->dev->dev,
  302. addr, len, DMA_FROM_DEVICE);
  303. }
  304. static inline
  305. void sync_descbuffer_for_device(struct b43_dmaring *ring,
  306. dma_addr_t addr, size_t len)
  307. {
  308. B43_WARN_ON(ring->tx);
  309. dma_sync_single_for_device(ring->dev->dev->dev,
  310. addr, len, DMA_FROM_DEVICE);
  311. }
  312. static inline
  313. void free_descriptor_buffer(struct b43_dmaring *ring,
  314. struct b43_dmadesc_meta *meta)
  315. {
  316. if (meta->skb) {
  317. dev_kfree_skb_any(meta->skb);
  318. meta->skb = NULL;
  319. }
  320. }
  321. static int alloc_ringmemory(struct b43_dmaring *ring)
  322. {
  323. struct device *dev = ring->dev->dev->dev;
  324. gfp_t flags = GFP_KERNEL;
  325. /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
  326. * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
  327. * has shown that 4K is sufficient for the latter as long as the buffer
  328. * does not cross an 8K boundary.
  329. *
  330. * For unknown reasons - possibly a hardware error - the BCM4311 rev
  331. * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
  332. * which accounts for the GFP_DMA flag below.
  333. */
  334. if (ring->type == B43_DMA_64BIT)
  335. flags |= GFP_DMA;
  336. ring->descbase = dma_alloc_coherent(dev, B43_DMA_RINGMEMSIZE,
  337. &(ring->dmabase), flags);
  338. if (!ring->descbase) {
  339. b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
  340. return -ENOMEM;
  341. }
  342. memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE);
  343. return 0;
  344. }
  345. static void free_ringmemory(struct b43_dmaring *ring)
  346. {
  347. struct device *dev = ring->dev->dev->dev;
  348. dma_free_coherent(dev, B43_DMA_RINGMEMSIZE,
  349. ring->descbase, ring->dmabase);
  350. }
  351. /* Reset the RX DMA channel */
  352. static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base,
  353. enum b43_dmatype type)
  354. {
  355. int i;
  356. u32 value;
  357. u16 offset;
  358. might_sleep();
  359. offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
  360. b43_write32(dev, mmio_base + offset, 0);
  361. for (i = 0; i < 10; i++) {
  362. offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS :
  363. B43_DMA32_RXSTATUS;
  364. value = b43_read32(dev, mmio_base + offset);
  365. if (type == B43_DMA_64BIT) {
  366. value &= B43_DMA64_RXSTAT;
  367. if (value == B43_DMA64_RXSTAT_DISABLED) {
  368. i = -1;
  369. break;
  370. }
  371. } else {
  372. value &= B43_DMA32_RXSTATE;
  373. if (value == B43_DMA32_RXSTAT_DISABLED) {
  374. i = -1;
  375. break;
  376. }
  377. }
  378. msleep(1);
  379. }
  380. if (i != -1) {
  381. b43err(dev->wl, "DMA RX reset timed out\n");
  382. return -ENODEV;
  383. }
  384. return 0;
  385. }
  386. /* Reset the TX DMA channel */
  387. static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
  388. enum b43_dmatype type)
  389. {
  390. int i;
  391. u32 value;
  392. u16 offset;
  393. might_sleep();
  394. for (i = 0; i < 10; i++) {
  395. offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
  396. B43_DMA32_TXSTATUS;
  397. value = b43_read32(dev, mmio_base + offset);
  398. if (type == B43_DMA_64BIT) {
  399. value &= B43_DMA64_TXSTAT;
  400. if (value == B43_DMA64_TXSTAT_DISABLED ||
  401. value == B43_DMA64_TXSTAT_IDLEWAIT ||
  402. value == B43_DMA64_TXSTAT_STOPPED)
  403. break;
  404. } else {
  405. value &= B43_DMA32_TXSTATE;
  406. if (value == B43_DMA32_TXSTAT_DISABLED ||
  407. value == B43_DMA32_TXSTAT_IDLEWAIT ||
  408. value == B43_DMA32_TXSTAT_STOPPED)
  409. break;
  410. }
  411. msleep(1);
  412. }
  413. offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
  414. b43_write32(dev, mmio_base + offset, 0);
  415. for (i = 0; i < 10; i++) {
  416. offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
  417. B43_DMA32_TXSTATUS;
  418. value = b43_read32(dev, mmio_base + offset);
  419. if (type == B43_DMA_64BIT) {
  420. value &= B43_DMA64_TXSTAT;
  421. if (value == B43_DMA64_TXSTAT_DISABLED) {
  422. i = -1;
  423. break;
  424. }
  425. } else {
  426. value &= B43_DMA32_TXSTATE;
  427. if (value == B43_DMA32_TXSTAT_DISABLED) {
  428. i = -1;
  429. break;
  430. }
  431. }
  432. msleep(1);
  433. }
  434. if (i != -1) {
  435. b43err(dev->wl, "DMA TX reset timed out\n");
  436. return -ENODEV;
  437. }
  438. /* ensure the reset is completed. */
  439. msleep(1);
  440. return 0;
  441. }
  442. /* Check if a DMA mapping address is invalid. */
  443. static bool b43_dma_mapping_error(struct b43_dmaring *ring,
  444. dma_addr_t addr,
  445. size_t buffersize)
  446. {
  447. if (unlikely(dma_mapping_error(addr)))
  448. return 1;
  449. switch (ring->type) {
  450. case B43_DMA_30BIT:
  451. if ((u64)addr + buffersize > (1ULL << 30))
  452. return 1;
  453. break;
  454. case B43_DMA_32BIT:
  455. if ((u64)addr + buffersize > (1ULL << 32))
  456. return 1;
  457. break;
  458. case B43_DMA_64BIT:
  459. /* Currently we can't have addresses beyond
  460. * 64bit in the kernel. */
  461. break;
  462. }
  463. /* The address is OK. */
  464. return 0;
  465. }
  466. static int setup_rx_descbuffer(struct b43_dmaring *ring,
  467. struct b43_dmadesc_generic *desc,
  468. struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
  469. {
  470. struct b43_rxhdr_fw4 *rxhdr;
  471. struct b43_hwtxstatus *txstat;
  472. dma_addr_t dmaaddr;
  473. struct sk_buff *skb;
  474. B43_WARN_ON(ring->tx);
  475. skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
  476. if (unlikely(!skb))
  477. return -ENOMEM;
  478. dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
  479. if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) {
  480. /* ugh. try to realloc in zone_dma */
  481. gfp_flags |= GFP_DMA;
  482. dev_kfree_skb_any(skb);
  483. skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
  484. if (unlikely(!skb))
  485. return -ENOMEM;
  486. dmaaddr = map_descbuffer(ring, skb->data,
  487. ring->rx_buffersize, 0);
  488. }
  489. if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) {
  490. dev_kfree_skb_any(skb);
  491. return -EIO;
  492. }
  493. meta->skb = skb;
  494. meta->dmaaddr = dmaaddr;
  495. ring->ops->fill_descriptor(ring, desc, dmaaddr,
  496. ring->rx_buffersize, 0, 0, 0);
  497. rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
  498. rxhdr->frame_len = 0;
  499. txstat = (struct b43_hwtxstatus *)(skb->data);
  500. txstat->cookie = 0;
  501. return 0;
  502. }
  503. /* Allocate the initial descbuffers.
  504. * This is used for an RX ring only.
  505. */
  506. static int alloc_initial_descbuffers(struct b43_dmaring *ring)
  507. {
  508. int i, err = -ENOMEM;
  509. struct b43_dmadesc_generic *desc;
  510. struct b43_dmadesc_meta *meta;
  511. for (i = 0; i < ring->nr_slots; i++) {
  512. desc = ring->ops->idx2desc(ring, i, &meta);
  513. err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
  514. if (err) {
  515. b43err(ring->dev->wl,
  516. "Failed to allocate initial descbuffers\n");
  517. goto err_unwind;
  518. }
  519. }
  520. mb();
  521. ring->used_slots = ring->nr_slots;
  522. err = 0;
  523. out:
  524. return err;
  525. err_unwind:
  526. for (i--; i >= 0; i--) {
  527. desc = ring->ops->idx2desc(ring, i, &meta);
  528. unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
  529. dev_kfree_skb(meta->skb);
  530. }
  531. goto out;
  532. }
  533. /* Do initial setup of the DMA controller.
  534. * Reset the controller, write the ring busaddress
  535. * and switch the "enable" bit on.
  536. */
  537. static int dmacontroller_setup(struct b43_dmaring *ring)
  538. {
  539. int err = 0;
  540. u32 value;
  541. u32 addrext;
  542. u32 trans = ssb_dma_translation(ring->dev->dev);
  543. if (ring->tx) {
  544. if (ring->type == B43_DMA_64BIT) {
  545. u64 ringbase = (u64) (ring->dmabase);
  546. addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
  547. >> SSB_DMA_TRANSLATION_SHIFT;
  548. value = B43_DMA64_TXENABLE;
  549. value |= (addrext << B43_DMA64_TXADDREXT_SHIFT)
  550. & B43_DMA64_TXADDREXT_MASK;
  551. b43_dma_write(ring, B43_DMA64_TXCTL, value);
  552. b43_dma_write(ring, B43_DMA64_TXRINGLO,
  553. (ringbase & 0xFFFFFFFF));
  554. b43_dma_write(ring, B43_DMA64_TXRINGHI,
  555. ((ringbase >> 32) &
  556. ~SSB_DMA_TRANSLATION_MASK)
  557. | (trans << 1));
  558. } else {
  559. u32 ringbase = (u32) (ring->dmabase);
  560. addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
  561. >> SSB_DMA_TRANSLATION_SHIFT;
  562. value = B43_DMA32_TXENABLE;
  563. value |= (addrext << B43_DMA32_TXADDREXT_SHIFT)
  564. & B43_DMA32_TXADDREXT_MASK;
  565. b43_dma_write(ring, B43_DMA32_TXCTL, value);
  566. b43_dma_write(ring, B43_DMA32_TXRING,
  567. (ringbase & ~SSB_DMA_TRANSLATION_MASK)
  568. | trans);
  569. }
  570. } else {
  571. err = alloc_initial_descbuffers(ring);
  572. if (err)
  573. goto out;
  574. if (ring->type == B43_DMA_64BIT) {
  575. u64 ringbase = (u64) (ring->dmabase);
  576. addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
  577. >> SSB_DMA_TRANSLATION_SHIFT;
  578. value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT);
  579. value |= B43_DMA64_RXENABLE;
  580. value |= (addrext << B43_DMA64_RXADDREXT_SHIFT)
  581. & B43_DMA64_RXADDREXT_MASK;
  582. b43_dma_write(ring, B43_DMA64_RXCTL, value);
  583. b43_dma_write(ring, B43_DMA64_RXRINGLO,
  584. (ringbase & 0xFFFFFFFF));
  585. b43_dma_write(ring, B43_DMA64_RXRINGHI,
  586. ((ringbase >> 32) &
  587. ~SSB_DMA_TRANSLATION_MASK)
  588. | (trans << 1));
  589. b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
  590. sizeof(struct b43_dmadesc64));
  591. } else {
  592. u32 ringbase = (u32) (ring->dmabase);
  593. addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
  594. >> SSB_DMA_TRANSLATION_SHIFT;
  595. value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT);
  596. value |= B43_DMA32_RXENABLE;
  597. value |= (addrext << B43_DMA32_RXADDREXT_SHIFT)
  598. & B43_DMA32_RXADDREXT_MASK;
  599. b43_dma_write(ring, B43_DMA32_RXCTL, value);
  600. b43_dma_write(ring, B43_DMA32_RXRING,
  601. (ringbase & ~SSB_DMA_TRANSLATION_MASK)
  602. | trans);
  603. b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots *
  604. sizeof(struct b43_dmadesc32));
  605. }
  606. }
  607. out:
  608. return err;
  609. }
  610. /* Shutdown the DMA controller. */
  611. static void dmacontroller_cleanup(struct b43_dmaring *ring)
  612. {
  613. if (ring->tx) {
  614. b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
  615. ring->type);
  616. if (ring->type == B43_DMA_64BIT) {
  617. b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
  618. b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
  619. } else
  620. b43_dma_write(ring, B43_DMA32_TXRING, 0);
  621. } else {
  622. b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
  623. ring->type);
  624. if (ring->type == B43_DMA_64BIT) {
  625. b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
  626. b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
  627. } else
  628. b43_dma_write(ring, B43_DMA32_RXRING, 0);
  629. }
  630. }
  631. static void free_all_descbuffers(struct b43_dmaring *ring)
  632. {
  633. struct b43_dmadesc_generic *desc;
  634. struct b43_dmadesc_meta *meta;
  635. int i;
  636. if (!ring->used_slots)
  637. return;
  638. for (i = 0; i < ring->nr_slots; i++) {
  639. desc = ring->ops->idx2desc(ring, i, &meta);
  640. if (!meta->skb) {
  641. B43_WARN_ON(!ring->tx);
  642. continue;
  643. }
  644. if (ring->tx) {
  645. unmap_descbuffer(ring, meta->dmaaddr,
  646. meta->skb->len, 1);
  647. } else {
  648. unmap_descbuffer(ring, meta->dmaaddr,
  649. ring->rx_buffersize, 0);
  650. }
  651. free_descriptor_buffer(ring, meta);
  652. }
  653. }
  654. static u64 supported_dma_mask(struct b43_wldev *dev)
  655. {
  656. u32 tmp;
  657. u16 mmio_base;
  658. tmp = b43_read32(dev, SSB_TMSHIGH);
  659. if (tmp & SSB_TMSHIGH_DMA64)
  660. return DMA_64BIT_MASK;
  661. mmio_base = b43_dmacontroller_base(0, 0);
  662. b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
  663. tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
  664. if (tmp & B43_DMA32_TXADDREXT_MASK)
  665. return DMA_32BIT_MASK;
  666. return DMA_30BIT_MASK;
  667. }
  668. /* Main initialization function. */
  669. static
  670. struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
  671. int controller_index,
  672. int for_tx,
  673. enum b43_dmatype type)
  674. {
  675. struct b43_dmaring *ring;
  676. int err;
  677. int nr_slots;
  678. dma_addr_t dma_test;
  679. ring = kzalloc(sizeof(*ring), GFP_KERNEL);
  680. if (!ring)
  681. goto out;
  682. ring->type = type;
  683. nr_slots = B43_RXRING_SLOTS;
  684. if (for_tx)
  685. nr_slots = B43_TXRING_SLOTS;
  686. ring->meta = kcalloc(nr_slots, sizeof(struct b43_dmadesc_meta),
  687. GFP_KERNEL);
  688. if (!ring->meta)
  689. goto err_kfree_ring;
  690. if (for_tx) {
  691. ring->txhdr_cache = kcalloc(nr_slots,
  692. b43_txhdr_size(dev),
  693. GFP_KERNEL);
  694. if (!ring->txhdr_cache)
  695. goto err_kfree_meta;
  696. /* test for ability to dma to txhdr_cache */
  697. dma_test = dma_map_single(dev->dev->dev,
  698. ring->txhdr_cache,
  699. b43_txhdr_size(dev),
  700. DMA_TO_DEVICE);
  701. if (b43_dma_mapping_error(ring, dma_test, b43_txhdr_size(dev))) {
  702. /* ugh realloc */
  703. kfree(ring->txhdr_cache);
  704. ring->txhdr_cache = kcalloc(nr_slots,
  705. b43_txhdr_size(dev),
  706. GFP_KERNEL | GFP_DMA);
  707. if (!ring->txhdr_cache)
  708. goto err_kfree_meta;
  709. dma_test = dma_map_single(dev->dev->dev,
  710. ring->txhdr_cache,
  711. b43_txhdr_size(dev),
  712. DMA_TO_DEVICE);
  713. if (b43_dma_mapping_error(ring, dma_test,
  714. b43_txhdr_size(dev)))
  715. goto err_kfree_txhdr_cache;
  716. }
  717. dma_unmap_single(dev->dev->dev,
  718. dma_test, b43_txhdr_size(dev),
  719. DMA_TO_DEVICE);
  720. }
  721. ring->dev = dev;
  722. ring->nr_slots = nr_slots;
  723. ring->mmio_base = b43_dmacontroller_base(type, controller_index);
  724. ring->index = controller_index;
  725. if (type == B43_DMA_64BIT)
  726. ring->ops = &dma64_ops;
  727. else
  728. ring->ops = &dma32_ops;
  729. if (for_tx) {
  730. ring->tx = 1;
  731. ring->current_slot = -1;
  732. } else {
  733. if (ring->index == 0) {
  734. ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE;
  735. ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET;
  736. } else if (ring->index == 3) {
  737. ring->rx_buffersize = B43_DMA3_RX_BUFFERSIZE;
  738. ring->frameoffset = B43_DMA3_RX_FRAMEOFFSET;
  739. } else
  740. B43_WARN_ON(1);
  741. }
  742. spin_lock_init(&ring->lock);
  743. #ifdef CONFIG_B43_DEBUG
  744. ring->last_injected_overflow = jiffies;
  745. #endif
  746. err = alloc_ringmemory(ring);
  747. if (err)
  748. goto err_kfree_txhdr_cache;
  749. err = dmacontroller_setup(ring);
  750. if (err)
  751. goto err_free_ringmemory;
  752. out:
  753. return ring;
  754. err_free_ringmemory:
  755. free_ringmemory(ring);
  756. err_kfree_txhdr_cache:
  757. kfree(ring->txhdr_cache);
  758. err_kfree_meta:
  759. kfree(ring->meta);
  760. err_kfree_ring:
  761. kfree(ring);
  762. ring = NULL;
  763. goto out;
  764. }
  765. #define divide(a, b) ({ \
  766. typeof(a) __a = a; \
  767. do_div(__a, b); \
  768. __a; \
  769. })
  770. #define modulo(a, b) ({ \
  771. typeof(a) __a = a; \
  772. do_div(__a, b); \
  773. })
  774. /* Main cleanup function. */
  775. static void b43_destroy_dmaring(struct b43_dmaring *ring,
  776. const char *ringname)
  777. {
  778. if (!ring)
  779. return;
  780. #ifdef CONFIG_B43_DEBUG
  781. {
  782. /* Print some statistics. */
  783. u64 failed_packets = ring->nr_failed_tx_packets;
  784. u64 succeed_packets = ring->nr_succeed_tx_packets;
  785. u64 nr_packets = failed_packets + succeed_packets;
  786. u64 permille_failed = 0, average_tries = 0;
  787. if (nr_packets)
  788. permille_failed = divide(failed_packets * 1000, nr_packets);
  789. if (nr_packets)
  790. average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets);
  791. b43dbg(ring->dev->wl, "DMA-%u %s: "
  792. "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, "
  793. "Average tries %llu.%02llu\n",
  794. (unsigned int)(ring->type), ringname,
  795. ring->max_used_slots,
  796. ring->nr_slots,
  797. (unsigned long long)failed_packets,
  798. (unsigned long long)nr_packets,
  799. (unsigned long long)divide(permille_failed, 10),
  800. (unsigned long long)modulo(permille_failed, 10),
  801. (unsigned long long)divide(average_tries, 100),
  802. (unsigned long long)modulo(average_tries, 100));
  803. }
  804. #endif /* DEBUG */
  805. /* Device IRQs are disabled prior entering this function,
  806. * so no need to take care of concurrency with rx handler stuff.
  807. */
  808. dmacontroller_cleanup(ring);
  809. free_all_descbuffers(ring);
  810. free_ringmemory(ring);
  811. kfree(ring->txhdr_cache);
  812. kfree(ring->meta);
  813. kfree(ring);
  814. }
  815. #define destroy_ring(dma, ring) do { \
  816. b43_destroy_dmaring((dma)->ring, __stringify(ring)); \
  817. (dma)->ring = NULL; \
  818. } while (0)
  819. void b43_dma_free(struct b43_wldev *dev)
  820. {
  821. struct b43_dma *dma = &dev->dma;
  822. destroy_ring(dma, rx_ring);
  823. destroy_ring(dma, tx_ring_AC_BK);
  824. destroy_ring(dma, tx_ring_AC_BE);
  825. destroy_ring(dma, tx_ring_AC_VI);
  826. destroy_ring(dma, tx_ring_AC_VO);
  827. destroy_ring(dma, tx_ring_mcast);
  828. }
  829. int b43_dma_init(struct b43_wldev *dev)
  830. {
  831. struct b43_dma *dma = &dev->dma;
  832. int err;
  833. u64 dmamask;
  834. enum b43_dmatype type;
  835. dmamask = supported_dma_mask(dev);
  836. switch (dmamask) {
  837. default:
  838. B43_WARN_ON(1);
  839. case DMA_30BIT_MASK:
  840. type = B43_DMA_30BIT;
  841. break;
  842. case DMA_32BIT_MASK:
  843. type = B43_DMA_32BIT;
  844. break;
  845. case DMA_64BIT_MASK:
  846. type = B43_DMA_64BIT;
  847. break;
  848. }
  849. err = ssb_dma_set_mask(dev->dev, dmamask);
  850. if (err) {
  851. b43err(dev->wl, "The machine/kernel does not support "
  852. "the required DMA mask (0x%08X%08X)\n",
  853. (unsigned int)((dmamask & 0xFFFFFFFF00000000ULL) >> 32),
  854. (unsigned int)(dmamask & 0x00000000FFFFFFFFULL));
  855. return -EOPNOTSUPP;
  856. }
  857. err = -ENOMEM;
  858. /* setup TX DMA channels. */
  859. dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type);
  860. if (!dma->tx_ring_AC_BK)
  861. goto out;
  862. dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type);
  863. if (!dma->tx_ring_AC_BE)
  864. goto err_destroy_bk;
  865. dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type);
  866. if (!dma->tx_ring_AC_VI)
  867. goto err_destroy_be;
  868. dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type);
  869. if (!dma->tx_ring_AC_VO)
  870. goto err_destroy_vi;
  871. dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type);
  872. if (!dma->tx_ring_mcast)
  873. goto err_destroy_vo;
  874. /* setup RX DMA channel. */
  875. dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type);
  876. if (!dma->rx_ring)
  877. goto err_destroy_mcast;
  878. /* No support for the TX status DMA ring. */
  879. B43_WARN_ON(dev->dev->id.revision < 5);
  880. b43dbg(dev->wl, "%u-bit DMA initialized\n",
  881. (unsigned int)type);
  882. err = 0;
  883. out:
  884. return err;
  885. err_destroy_mcast:
  886. destroy_ring(dma, tx_ring_mcast);
  887. err_destroy_vo:
  888. destroy_ring(dma, tx_ring_AC_VO);
  889. err_destroy_vi:
  890. destroy_ring(dma, tx_ring_AC_VI);
  891. err_destroy_be:
  892. destroy_ring(dma, tx_ring_AC_BE);
  893. err_destroy_bk:
  894. destroy_ring(dma, tx_ring_AC_BK);
  895. return err;
  896. }
  897. /* Generate a cookie for the TX header. */
  898. static u16 generate_cookie(struct b43_dmaring *ring, int slot)
  899. {
  900. u16 cookie;
  901. /* Use the upper 4 bits of the cookie as
  902. * DMA controller ID and store the slot number
  903. * in the lower 12 bits.
  904. * Note that the cookie must never be 0, as this
  905. * is a special value used in RX path.
  906. * It can also not be 0xFFFF because that is special
  907. * for multicast frames.
  908. */
  909. cookie = (((u16)ring->index + 1) << 12);
  910. B43_WARN_ON(slot & ~0x0FFF);
  911. cookie |= (u16)slot;
  912. return cookie;
  913. }
  914. /* Inspect a cookie and find out to which controller/slot it belongs. */
  915. static
  916. struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
  917. {
  918. struct b43_dma *dma = &dev->dma;
  919. struct b43_dmaring *ring = NULL;
  920. switch (cookie & 0xF000) {
  921. case 0x1000:
  922. ring = dma->tx_ring_AC_BK;
  923. break;
  924. case 0x2000:
  925. ring = dma->tx_ring_AC_BE;
  926. break;
  927. case 0x3000:
  928. ring = dma->tx_ring_AC_VI;
  929. break;
  930. case 0x4000:
  931. ring = dma->tx_ring_AC_VO;
  932. break;
  933. case 0x5000:
  934. ring = dma->tx_ring_mcast;
  935. break;
  936. default:
  937. B43_WARN_ON(1);
  938. }
  939. *slot = (cookie & 0x0FFF);
  940. B43_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
  941. return ring;
  942. }
  943. static int dma_tx_fragment(struct b43_dmaring *ring,
  944. struct sk_buff *skb,
  945. struct ieee80211_tx_control *ctl)
  946. {
  947. const struct b43_dma_ops *ops = ring->ops;
  948. u8 *header;
  949. int slot, old_top_slot, old_used_slots;
  950. int err;
  951. struct b43_dmadesc_generic *desc;
  952. struct b43_dmadesc_meta *meta;
  953. struct b43_dmadesc_meta *meta_hdr;
  954. struct sk_buff *bounce_skb;
  955. u16 cookie;
  956. size_t hdrsize = b43_txhdr_size(ring->dev);
  957. #define SLOTS_PER_PACKET 2
  958. B43_WARN_ON(skb_shinfo(skb)->nr_frags);
  959. old_top_slot = ring->current_slot;
  960. old_used_slots = ring->used_slots;
  961. /* Get a slot for the header. */
  962. slot = request_slot(ring);
  963. desc = ops->idx2desc(ring, slot, &meta_hdr);
  964. memset(meta_hdr, 0, sizeof(*meta_hdr));
  965. header = &(ring->txhdr_cache[slot * hdrsize]);
  966. cookie = generate_cookie(ring, slot);
  967. err = b43_generate_txhdr(ring->dev, header,
  968. skb->data, skb->len, ctl, cookie);
  969. if (unlikely(err)) {
  970. ring->current_slot = old_top_slot;
  971. ring->used_slots = old_used_slots;
  972. return err;
  973. }
  974. meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
  975. hdrsize, 1);
  976. if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize)) {
  977. ring->current_slot = old_top_slot;
  978. ring->used_slots = old_used_slots;
  979. return -EIO;
  980. }
  981. ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
  982. hdrsize, 1, 0, 0);
  983. /* Get a slot for the payload. */
  984. slot = request_slot(ring);
  985. desc = ops->idx2desc(ring, slot, &meta);
  986. memset(meta, 0, sizeof(*meta));
  987. memcpy(&meta->txstat.control, ctl, sizeof(*ctl));
  988. meta->skb = skb;
  989. meta->is_last_fragment = 1;
  990. meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
  991. /* create a bounce buffer in zone_dma on mapping failure. */
  992. if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len)) {
  993. bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
  994. if (!bounce_skb) {
  995. ring->current_slot = old_top_slot;
  996. ring->used_slots = old_used_slots;
  997. err = -ENOMEM;
  998. goto out_unmap_hdr;
  999. }
  1000. memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
  1001. dev_kfree_skb_any(skb);
  1002. skb = bounce_skb;
  1003. meta->skb = skb;
  1004. meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
  1005. if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len)) {
  1006. ring->current_slot = old_top_slot;
  1007. ring->used_slots = old_used_slots;
  1008. err = -EIO;
  1009. goto out_free_bounce;
  1010. }
  1011. }
  1012. ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
  1013. if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) {
  1014. /* Tell the firmware about the cookie of the last
  1015. * mcast frame, so it can clear the more-data bit in it. */
  1016. b43_shm_write16(ring->dev, B43_SHM_SHARED,
  1017. B43_SHM_SH_MCASTCOOKIE, cookie);
  1018. }
  1019. /* Now transfer the whole frame. */
  1020. wmb();
  1021. ops->poke_tx(ring, next_slot(ring, slot));
  1022. return 0;
  1023. out_free_bounce:
  1024. dev_kfree_skb_any(skb);
  1025. out_unmap_hdr:
  1026. unmap_descbuffer(ring, meta_hdr->dmaaddr,
  1027. hdrsize, 1);
  1028. return err;
  1029. }
  1030. static inline int should_inject_overflow(struct b43_dmaring *ring)
  1031. {
  1032. #ifdef CONFIG_B43_DEBUG
  1033. if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) {
  1034. /* Check if we should inject another ringbuffer overflow
  1035. * to test handling of this situation in the stack. */
  1036. unsigned long next_overflow;
  1037. next_overflow = ring->last_injected_overflow + HZ;
  1038. if (time_after(jiffies, next_overflow)) {
  1039. ring->last_injected_overflow = jiffies;
  1040. b43dbg(ring->dev->wl,
  1041. "Injecting TX ring overflow on "
  1042. "DMA controller %d\n", ring->index);
  1043. return 1;
  1044. }
  1045. }
  1046. #endif /* CONFIG_B43_DEBUG */
  1047. return 0;
  1048. }
  1049. /* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */
  1050. static struct b43_dmaring * select_ring_by_priority(struct b43_wldev *dev,
  1051. u8 queue_prio)
  1052. {
  1053. struct b43_dmaring *ring;
  1054. if (b43_modparam_qos) {
  1055. /* 0 = highest priority */
  1056. switch (queue_prio) {
  1057. default:
  1058. B43_WARN_ON(1);
  1059. /* fallthrough */
  1060. case 0:
  1061. ring = dev->dma.tx_ring_AC_VO;
  1062. break;
  1063. case 1:
  1064. ring = dev->dma.tx_ring_AC_VI;
  1065. break;
  1066. case 2:
  1067. ring = dev->dma.tx_ring_AC_BE;
  1068. break;
  1069. case 3:
  1070. ring = dev->dma.tx_ring_AC_BK;
  1071. break;
  1072. }
  1073. } else
  1074. ring = dev->dma.tx_ring_AC_BE;
  1075. return ring;
  1076. }
  1077. int b43_dma_tx(struct b43_wldev *dev,
  1078. struct sk_buff *skb, struct ieee80211_tx_control *ctl)
  1079. {
  1080. struct b43_dmaring *ring;
  1081. struct ieee80211_hdr *hdr;
  1082. int err = 0;
  1083. unsigned long flags;
  1084. if (unlikely(skb->len < 2 + 2 + 6)) {
  1085. /* Too short, this can't be a valid frame. */
  1086. return -EINVAL;
  1087. }
  1088. hdr = (struct ieee80211_hdr *)skb->data;
  1089. if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) {
  1090. /* The multicast ring will be sent after the DTIM */
  1091. ring = dev->dma.tx_ring_mcast;
  1092. /* Set the more-data bit. Ucode will clear it on
  1093. * the last frame for us. */
  1094. hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
  1095. } else {
  1096. /* Decide by priority where to put this frame. */
  1097. ring = select_ring_by_priority(dev, ctl->queue);
  1098. }
  1099. spin_lock_irqsave(&ring->lock, flags);
  1100. B43_WARN_ON(!ring->tx);
  1101. if (unlikely(free_slots(ring) < SLOTS_PER_PACKET)) {
  1102. b43warn(dev->wl, "DMA queue overflow\n");
  1103. err = -ENOSPC;
  1104. goto out_unlock;
  1105. }
  1106. /* Check if the queue was stopped in mac80211,
  1107. * but we got called nevertheless.
  1108. * That would be a mac80211 bug. */
  1109. B43_WARN_ON(ring->stopped);
  1110. /* Assign the queue number to the ring (if not already done before)
  1111. * so TX status handling can use it. The queue to ring mapping is
  1112. * static, so we don't need to store it per frame. */
  1113. ring->queue_prio = ctl->queue;
  1114. err = dma_tx_fragment(ring, skb, ctl);
  1115. if (unlikely(err == -ENOKEY)) {
  1116. /* Drop this packet, as we don't have the encryption key
  1117. * anymore and must not transmit it unencrypted. */
  1118. dev_kfree_skb_any(skb);
  1119. err = 0;
  1120. goto out_unlock;
  1121. }
  1122. if (unlikely(err)) {
  1123. b43err(dev->wl, "DMA tx mapping failure\n");
  1124. goto out_unlock;
  1125. }
  1126. ring->nr_tx_packets++;
  1127. if ((free_slots(ring) < SLOTS_PER_PACKET) ||
  1128. should_inject_overflow(ring)) {
  1129. /* This TX ring is full. */
  1130. ieee80211_stop_queue(dev->wl->hw, ctl->queue);
  1131. ring->stopped = 1;
  1132. if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
  1133. b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
  1134. }
  1135. }
  1136. out_unlock:
  1137. spin_unlock_irqrestore(&ring->lock, flags);
  1138. return err;
  1139. }
  1140. static void b43_fill_txstatus_report(struct b43_dmaring *ring,
  1141. struct ieee80211_tx_status *report,
  1142. const struct b43_txstatus *status)
  1143. {
  1144. bool frame_failed = 0;
  1145. if (status->acked) {
  1146. /* The frame was ACKed. */
  1147. report->flags |= IEEE80211_TX_STATUS_ACK;
  1148. } else {
  1149. /* The frame was not ACKed... */
  1150. if (!(report->control.flags & IEEE80211_TXCTL_NO_ACK)) {
  1151. /* ...but we expected an ACK. */
  1152. frame_failed = 1;
  1153. report->excessive_retries = 1;
  1154. }
  1155. }
  1156. if (status->frame_count == 0) {
  1157. /* The frame was not transmitted at all. */
  1158. report->retry_count = 0;
  1159. } else {
  1160. report->retry_count = status->frame_count - 1;
  1161. #ifdef CONFIG_B43_DEBUG
  1162. if (frame_failed)
  1163. ring->nr_failed_tx_packets++;
  1164. else
  1165. ring->nr_succeed_tx_packets++;
  1166. ring->nr_total_packet_tries += status->frame_count;
  1167. #endif /* DEBUG */
  1168. }
  1169. }
  1170. void b43_dma_handle_txstatus(struct b43_wldev *dev,
  1171. const struct b43_txstatus *status)
  1172. {
  1173. const struct b43_dma_ops *ops;
  1174. struct b43_dmaring *ring;
  1175. struct b43_dmadesc_generic *desc;
  1176. struct b43_dmadesc_meta *meta;
  1177. int slot;
  1178. ring = parse_cookie(dev, status->cookie, &slot);
  1179. if (unlikely(!ring))
  1180. return;
  1181. B43_WARN_ON(!irqs_disabled());
  1182. spin_lock(&ring->lock);
  1183. B43_WARN_ON(!ring->tx);
  1184. ops = ring->ops;
  1185. while (1) {
  1186. B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
  1187. desc = ops->idx2desc(ring, slot, &meta);
  1188. if (meta->skb)
  1189. unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len,
  1190. 1);
  1191. else
  1192. unmap_descbuffer(ring, meta->dmaaddr,
  1193. b43_txhdr_size(dev), 1);
  1194. if (meta->is_last_fragment) {
  1195. B43_WARN_ON(!meta->skb);
  1196. /* Call back to inform the ieee80211 subsystem about the
  1197. * status of the transmission.
  1198. * Some fields of txstat are already filled in dma_tx().
  1199. */
  1200. b43_fill_txstatus_report(ring, &(meta->txstat), status);
  1201. ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb,
  1202. &(meta->txstat));
  1203. /* skb is freed by ieee80211_tx_status_irqsafe() */
  1204. meta->skb = NULL;
  1205. } else {
  1206. /* No need to call free_descriptor_buffer here, as
  1207. * this is only the txhdr, which is not allocated.
  1208. */
  1209. B43_WARN_ON(meta->skb);
  1210. }
  1211. /* Everything unmapped and free'd. So it's not used anymore. */
  1212. ring->used_slots--;
  1213. if (meta->is_last_fragment)
  1214. break;
  1215. slot = next_slot(ring, slot);
  1216. }
  1217. dev->stats.last_tx = jiffies;
  1218. if (ring->stopped) {
  1219. B43_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
  1220. ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
  1221. ring->stopped = 0;
  1222. if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
  1223. b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
  1224. }
  1225. }
  1226. spin_unlock(&ring->lock);
  1227. }
  1228. void b43_dma_get_tx_stats(struct b43_wldev *dev,
  1229. struct ieee80211_tx_queue_stats *stats)
  1230. {
  1231. const int nr_queues = dev->wl->hw->queues;
  1232. struct b43_dmaring *ring;
  1233. struct ieee80211_tx_queue_stats_data *data;
  1234. unsigned long flags;
  1235. int i;
  1236. for (i = 0; i < nr_queues; i++) {
  1237. data = &(stats->data[i]);
  1238. ring = select_ring_by_priority(dev, i);
  1239. spin_lock_irqsave(&ring->lock, flags);
  1240. data->len = ring->used_slots / SLOTS_PER_PACKET;
  1241. data->limit = ring->nr_slots / SLOTS_PER_PACKET;
  1242. data->count = ring->nr_tx_packets;
  1243. spin_unlock_irqrestore(&ring->lock, flags);
  1244. }
  1245. }
  1246. static void dma_rx(struct b43_dmaring *ring, int *slot)
  1247. {
  1248. const struct b43_dma_ops *ops = ring->ops;
  1249. struct b43_dmadesc_generic *desc;
  1250. struct b43_dmadesc_meta *meta;
  1251. struct b43_rxhdr_fw4 *rxhdr;
  1252. struct sk_buff *skb;
  1253. u16 len;
  1254. int err;
  1255. dma_addr_t dmaaddr;
  1256. desc = ops->idx2desc(ring, *slot, &meta);
  1257. sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
  1258. skb = meta->skb;
  1259. rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
  1260. len = le16_to_cpu(rxhdr->frame_len);
  1261. if (len == 0) {
  1262. int i = 0;
  1263. do {
  1264. udelay(2);
  1265. barrier();
  1266. len = le16_to_cpu(rxhdr->frame_len);
  1267. } while (len == 0 && i++ < 5);
  1268. if (unlikely(len == 0)) {
  1269. /* recycle the descriptor buffer. */
  1270. sync_descbuffer_for_device(ring, meta->dmaaddr,
  1271. ring->rx_buffersize);
  1272. goto drop;
  1273. }
  1274. }
  1275. if (unlikely(len > ring->rx_buffersize)) {
  1276. /* The data did not fit into one descriptor buffer
  1277. * and is split over multiple buffers.
  1278. * This should never happen, as we try to allocate buffers
  1279. * big enough. So simply ignore this packet.
  1280. */
  1281. int cnt = 0;
  1282. s32 tmp = len;
  1283. while (1) {
  1284. desc = ops->idx2desc(ring, *slot, &meta);
  1285. /* recycle the descriptor buffer. */
  1286. sync_descbuffer_for_device(ring, meta->dmaaddr,
  1287. ring->rx_buffersize);
  1288. *slot = next_slot(ring, *slot);
  1289. cnt++;
  1290. tmp -= ring->rx_buffersize;
  1291. if (tmp <= 0)
  1292. break;
  1293. }
  1294. b43err(ring->dev->wl, "DMA RX buffer too small "
  1295. "(len: %u, buffer: %u, nr-dropped: %d)\n",
  1296. len, ring->rx_buffersize, cnt);
  1297. goto drop;
  1298. }
  1299. dmaaddr = meta->dmaaddr;
  1300. err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
  1301. if (unlikely(err)) {
  1302. b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
  1303. sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
  1304. goto drop;
  1305. }
  1306. unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
  1307. skb_put(skb, len + ring->frameoffset);
  1308. skb_pull(skb, ring->frameoffset);
  1309. b43_rx(ring->dev, skb, rxhdr);
  1310. drop:
  1311. return;
  1312. }
  1313. void b43_dma_rx(struct b43_dmaring *ring)
  1314. {
  1315. const struct b43_dma_ops *ops = ring->ops;
  1316. int slot, current_slot;
  1317. int used_slots = 0;
  1318. B43_WARN_ON(ring->tx);
  1319. current_slot = ops->get_current_rxslot(ring);
  1320. B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots));
  1321. slot = ring->current_slot;
  1322. for (; slot != current_slot; slot = next_slot(ring, slot)) {
  1323. dma_rx(ring, &slot);
  1324. update_max_used_slots(ring, ++used_slots);
  1325. }
  1326. ops->set_current_rxslot(ring, slot);
  1327. ring->current_slot = slot;
  1328. }
  1329. static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring)
  1330. {
  1331. unsigned long flags;
  1332. spin_lock_irqsave(&ring->lock, flags);
  1333. B43_WARN_ON(!ring->tx);
  1334. ring->ops->tx_suspend(ring);
  1335. spin_unlock_irqrestore(&ring->lock, flags);
  1336. }
  1337. static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
  1338. {
  1339. unsigned long flags;
  1340. spin_lock_irqsave(&ring->lock, flags);
  1341. B43_WARN_ON(!ring->tx);
  1342. ring->ops->tx_resume(ring);
  1343. spin_unlock_irqrestore(&ring->lock, flags);
  1344. }
  1345. void b43_dma_tx_suspend(struct b43_wldev *dev)
  1346. {
  1347. b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
  1348. b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BK);
  1349. b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BE);
  1350. b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VI);
  1351. b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VO);
  1352. b43_dma_tx_suspend_ring(dev->dma.tx_ring_mcast);
  1353. }
  1354. void b43_dma_tx_resume(struct b43_wldev *dev)
  1355. {
  1356. b43_dma_tx_resume_ring(dev->dma.tx_ring_mcast);
  1357. b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VO);
  1358. b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VI);
  1359. b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BE);
  1360. b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK);
  1361. b43_power_saving_ctl_bits(dev, 0);
  1362. }